Home | History | Annotate | Line # | Download | only in ntpd
      1 /*	$NetBSD: ntp_loopfilter.c,v 1.14 2024/08/18 20:47:17 christos Exp $	*/
      2 
      3 /*
      4  * ntp_loopfilter.c - implements the NTP loop filter algorithm
      5  *
      6  * ATTENTION: Get approval from Dave Mills on all changes to this file!
      7  *
      8  */
      9 #ifdef HAVE_CONFIG_H
     10 # include <config.h>
     11 #endif
     12 
     13 #ifdef USE_SNPRINTB
     14 # include <util.h>
     15 #endif
     16 #include "ntpd.h"
     17 #include "ntp_io.h"
     18 #include "ntp_unixtime.h"
     19 #include "ntp_stdlib.h"
     20 #include "timexsup.h"
     21 
     22 #include <limits.h>
     23 #include <stdio.h>
     24 #include <ctype.h>
     25 
     26 #include <signal.h>
     27 #include <setjmp.h>
     28 
     29 #ifdef KERNEL_PLL
     30 #include "ntp_syscall.h"
     31 #endif /* KERNEL_PLL */
     32 
     33 /*
     34  * This is an implementation of the clock discipline algorithm described
     35  * in UDel TR 97-4-3, as amended. It operates as an adaptive parameter,
     36  * hybrid phase/frequency-lock loop. A number of sanity checks are
     37  * included to protect against timewarps, timespikes and general mayhem.
     38  * All units are in s and s/s, unless noted otherwise.
     39  */
     40 #define CLOCK_MAX	.128	/* default step threshold (s) */
     41 #define CLOCK_MINSTEP	300.	/* default stepout threshold (s) */
     42 #define CLOCK_PANIC	1000.	/* default panic threshold (s) */
     43 #define	CLOCK_PHI	15e-6	/* max frequency error (s/s) */
     44 #define CLOCK_PLL	16.	/* PLL loop gain (log2) */
     45 #define CLOCK_AVG	8.	/* parameter averaging constant */
     46 #define CLOCK_FLL	.25	/* FLL loop gain */
     47 #define	CLOCK_FLOOR	.0005	/* startup offset floor (s) */
     48 #define	CLOCK_ALLAN	11	/* Allan intercept (log2 s) */
     49 #define CLOCK_LIMIT	30	/* poll-adjust threshold */
     50 #define CLOCK_PGATE	4.	/* poll-adjust gate */
     51 #define PPS_MAXAGE	120	/* kernel pps signal timeout (s) */
     52 #define	FREQTOD(x)	((x) / 65536e6) /* NTP to double */
     53 #define	DTOFREQ(x)	((int32)((x) * 65536e6)) /* double to NTP */
     54 
     55 /*
     56  * Clock discipline state machine. This is used to control the
     57  * synchronization behavior during initialization and following a
     58  * timewarp.
     59  *
     60  *	State	< step		> step		Comments
     61  *	========================================================
     62  *	NSET	FREQ		step, FREQ	freq not set
     63  *
     64  *	FSET	SYNC		step, SYNC	freq set
     65  *
     66  *	FREQ	if (mu < 900)	if (mu < 900)	set freq direct
     67  *		    ignore	    ignore
     68  *		else		else
     69  *		    freq, SYNC	    freq, step, SYNC
     70  *
     71  *	SYNC	SYNC		SPIK, ignore	adjust phase/freq
     72  *
     73  *	SPIK	SYNC		if (mu < 900)	adjust phase/freq
     74  *				    ignore
     75  *				step, SYNC
     76  */
     77 /*
     78  * Kernel PLL/PPS state machine. This is used with the kernel PLL
     79  * modifications described in the documentation.
     80  *
     81  * If kernel support for the ntp_adjtime() system call is available, the
     82  * ntp_control flag is set. The ntp_enable and kern_enable flags can be
     83  * set at configuration time or run time using ntpdc. If ntp_enable is
     84  * false, the discipline loop is unlocked and no corrections of any kind
     85  * are made. If both ntp_control and kern_enable are set, the kernel
     86  * support is used as described above; if false, the kernel is bypassed
     87  * entirely and the daemon discipline used instead.
     88  *
     89  * There have been three versions of the kernel discipline code. The
     90  * first (microkernel) now in Solaris discipilnes the microseconds. The
     91  * second and third (nanokernel) disciplines the clock in nanoseconds.
     92  * These versions are identifed if the symbol STA_PLL is present in the
     93  * header file /usr/include/sys/timex.h. The third and current version
     94  * includes TAI offset and is identified by the symbol NTP_API with
     95  * value 4.
     96  *
     97  * Each PPS time/frequency discipline can be enabled by the atom driver
     98  * or another driver. If enabled, the STA_PPSTIME and STA_FREQ bits are
     99  * set in the kernel status word; otherwise, these bits are cleared.
    100  * These bits are also cleard if the kernel reports an error.
    101  *
    102  * If an external clock is present, the clock driver sets STA_CLK in the
    103  * status word. When the local clock driver sees this bit, it updates
    104  * via this routine, which then calls ntp_adjtime() with the STA_PLL bit
    105  * set to zero, in which case the system clock is not adjusted. This is
    106  * also a signal for the external clock driver to discipline the system
    107  * clock. Unless specified otherwise, all times are in seconds.
    108  */
    109 /*
    110  * Program variables that can be tinkered.
    111  */
    112 double	clock_max_back = CLOCK_MAX;	/* step threshold */
    113 double	clock_max_fwd =  CLOCK_MAX;	/* step threshold */
    114 double	clock_minstep = CLOCK_MINSTEP; /* stepout threshold */
    115 double	clock_panic = CLOCK_PANIC; /* panic threshold */
    116 double	clock_phi = CLOCK_PHI;	/* dispersion rate (s/s) */
    117 u_char	allan_xpt = CLOCK_ALLAN; /* Allan intercept (log2 s) */
    118 
    119 /*
    120  * Program variables
    121  */
    122 static double clock_offset;	/* offset */
    123 double	clock_jitter;		/* offset jitter */
    124 double	drift_comp;		/* frequency (s/s) */
    125 static double init_drift_comp; /* initial frequency (PPM) */
    126 double	clock_stability;	/* frequency stability (wander) (s/s) */
    127 double	clock_codec;		/* audio codec frequency (samples/s) */
    128 static u_long clock_epoch;	/* last update */
    129 u_int	sys_tai;		/* TAI offset from UTC */
    130 static int loop_started;	/* TRUE after LOOP_DRIFTINIT */
    131 static void rstclock (int, double); /* transition function */
    132 static double direct_freq(double); /* direct set frequency */
    133 static void set_freq(double);	/* set frequency */
    134 static char relative_path[PATH_MAX + 1]; /* relative path per recursive make */
    135 static char *this_file = NULL;
    136 
    137 #ifdef KERNEL_PLL
    138 static struct timex ntv;	/* ntp_adjtime() parameters */
    139 int	pll_status;		/* last kernel status bits */
    140 #if defined(STA_NANO) && NTP_API == 4
    141 static u_int loop_tai;		/* last TAI offset */
    142 #endif /* STA_NANO */
    143 static	void	start_kern_loop(void);
    144 static	void	stop_kern_loop(void);
    145 #endif /* KERNEL_PLL */
    146 
    147 /*
    148  * Clock state machine control flags
    149  */
    150 int	ntp_enable = TRUE;	/* clock discipline enabled */
    151 int	pll_control;		/* kernel support available */
    152 int	kern_enable = TRUE;	/* kernel support enabled */
    153 int	hardpps_enable;		/* kernel PPS discipline enabled */
    154 int	ext_enable;		/* external clock enabled */
    155 int	pps_stratum;		/* pps stratum */
    156 int	kernel_status;		/* from ntp_adjtime */
    157 int	force_step_once = FALSE; /* always step time once at startup (-G) */
    158 int	mode_ntpdate = FALSE;	/* exit on first clock set (-q) */
    159 int	freq_cnt;		/* initial frequency clamp */
    160 int	freq_set;		/* initial set frequency switch */
    161 
    162 /*
    163  * Clock state machine variables
    164  */
    165 int	state = 0;		/* clock discipline state */
    166 u_char	sys_poll;		/* time constant/poll (log2 s) */
    167 int	tc_counter;		/* jiggle counter */
    168 double	last_offset;		/* last offset (s) */
    169 
    170 u_int	tc_twinlo;		/* TC step down not before this time */
    171 u_int	tc_twinhi;		/* TC step up not before this time */
    172 
    173 /*
    174  * Huff-n'-puff filter variables
    175  */
    176 static double *sys_huffpuff;	/* huff-n'-puff filter */
    177 static int sys_hufflen;		/* huff-n'-puff filter stages */
    178 static int sys_huffptr;		/* huff-n'-puff filter pointer */
    179 static double sys_mindly;	/* huff-n'-puff filter min delay */
    180 
    181 #if defined(KERNEL_PLL)
    182 /* Emacs cc-mode goes nuts if we split the next line... */
    183 #define MOD_BITS (MOD_OFFSET | MOD_MAXERROR | MOD_ESTERROR | \
    184     MOD_STATUS | MOD_TIMECONST)
    185 #ifdef SIGSYS
    186 static void pll_trap (int);	/* configuration trap */
    187 static struct sigaction sigsys;	/* current sigaction status */
    188 static struct sigaction newsigsys; /* new sigaction status */
    189 static sigjmp_buf env;		/* environment var. for pll_trap() */
    190 #endif /* SIGSYS */
    191 #endif /* KERNEL_PLL */
    192 
    193 static void
    194 sync_status(const char *what, int ostatus, int nstatus)
    195 {
    196 	char obuf[256], nbuf[256], tbuf[1024];
    197 #if defined(USE_SNPRINTB) && defined (STA_FMT)
    198 	snprintb(obuf, sizeof(obuf), STA_FMT, ostatus);
    199 	snprintb(nbuf, sizeof(nbuf), STA_FMT, nstatus);
    200 #else
    201 	snprintf(obuf, sizeof(obuf), "%04x", ostatus);
    202 	snprintf(nbuf, sizeof(nbuf), "%04x", nstatus);
    203 #endif
    204 	snprintf(tbuf, sizeof(tbuf), "%s status: %s -> %s", what, obuf, nbuf);
    205 	report_event(EVNT_KERN, NULL, tbuf);
    206 }
    207 
    208 /*
    209  * file_name - return pointer to non-relative portion of this C file pathname
    210  */
    211 static char *file_name(void)
    212 {
    213 	if (this_file == NULL) {
    214 	    (void)strncpy(relative_path, __FILE__, PATH_MAX);
    215 	    for (this_file=relative_path;
    216 		*this_file && ! isalnum((unsigned char)*this_file);
    217 		this_file++) ;
    218 	}
    219 	return this_file;
    220 }
    221 
    222 /*
    223  * init_loopfilter - initialize loop filter data
    224  */
    225 void
    226 init_loopfilter(void)
    227 {
    228 	/*
    229 	 * Initialize state variables.
    230 	 */
    231 	sys_poll = ntp_minpoll;
    232 	clock_jitter = LOGTOD(sys_precision);
    233 	freq_cnt = (int)clock_minstep;
    234 }
    235 
    236 #ifdef KERNEL_PLL
    237 /*
    238  * ntp_adjtime_error_handler - process errors from ntp_adjtime
    239  */
    240 static void
    241 ntp_adjtime_error_handler(
    242 	const char *caller,	/* name of calling function */
    243 	struct timex *ptimex,	/* pointer to struct timex */
    244 	int ret,		/* return value from ntp_adjtime */
    245 	int saved_errno,	/* value of errno when ntp_adjtime returned */
    246 	int pps_call,		/* ntp_adjtime call was PPS-related */
    247 	int tai_call,		/* ntp_adjtime call was TAI-related */
    248 	int line		/* line number of ntp_adjtime call */
    249 	)
    250 {
    251 	char des[1024] = "";	/* Decoded Error Status */
    252 	char *dbp, *ebp;
    253 
    254 	dbp = des;
    255 	ebp = dbp + sizeof(des);
    256 
    257 	switch (ret) {
    258 	    case -1:
    259 		switch (saved_errno) {
    260 		    case EFAULT:
    261 			msyslog(LOG_ERR, "%s: %s line %d: invalid struct timex pointer: 0x%lx",
    262 			    caller, file_name(), line,
    263 			    (long)((void *)ptimex)
    264 			);
    265 		    break;
    266 		    case EINVAL:
    267 			msyslog(LOG_ERR, "%s: %s line %d: invalid struct timex \"constant\" element value: %ld",
    268 			    caller, file_name(), line,
    269 			    (long)(ptimex->constant)
    270 			);
    271 		    break;
    272 		    case EPERM:
    273 			if (tai_call) {
    274 			    errno = saved_errno;
    275 			    msyslog(LOG_ERR,
    276 				"%s: ntp_adjtime(TAI) failed: %m",
    277 				caller);
    278 			}
    279 			errno = saved_errno;
    280 			msyslog(LOG_ERR, "%s: %s line %d: ntp_adjtime: %m",
    281 			    caller, file_name(), line
    282 			);
    283 		    break;
    284 		    default:
    285 			msyslog(LOG_NOTICE, "%s: %s line %d: unhandled errno value %d after failed ntp_adjtime call",
    286 			    caller, file_name(), line,
    287 			    saved_errno
    288 			);
    289 		    break;
    290 		}
    291 	    break;
    292 #ifdef TIME_OK
    293 	    case TIME_OK: /* 0: synchronized, no leap second warning */
    294 		/* msyslog(LOG_INFO, "kernel reports time is synchronized normally"); */
    295 	    break;
    296 #else
    297 # warning TIME_OK is not defined
    298 #endif
    299 #ifdef TIME_INS
    300 	    case TIME_INS: /* 1: positive leap second warning */
    301 		msyslog(LOG_INFO, "kernel reports leap second insertion scheduled");
    302 	    break;
    303 #else
    304 # warning TIME_INS is not defined
    305 #endif
    306 #ifdef TIME_DEL
    307 	    case TIME_DEL: /* 2: negative leap second warning */
    308 		msyslog(LOG_INFO, "kernel reports leap second deletion scheduled");
    309 	    break;
    310 #else
    311 # warning TIME_DEL is not defined
    312 #endif
    313 #ifdef TIME_OOP
    314 	    case TIME_OOP: /* 3: leap second in progress */
    315 		msyslog(LOG_INFO, "kernel reports leap second in progress");
    316 	    break;
    317 #else
    318 # warning TIME_OOP is not defined
    319 #endif
    320 #ifdef TIME_WAIT
    321 	    case TIME_WAIT: /* 4: leap second has occured */
    322 		msyslog(LOG_INFO, "kernel reports leap second has occurred");
    323 	    break;
    324 #else
    325 # warning TIME_WAIT is not defined
    326 #endif
    327 #ifdef TIME_ERROR
    328 #if 0
    329 
    330 from the reference implementation of ntp_gettime():
    331 
    332 		// Hardware or software error
    333         if ((time_status & (STA_UNSYNC | STA_CLOCKERR))
    334 
    335 	/*
    336          * PPS signal lost when either time or frequency synchronization
    337          * requested
    338          */
    339 	|| (time_status & (STA_PPSFREQ | STA_PPSTIME)
    340 	    && !(time_status & STA_PPSSIGNAL))
    341 
    342         /*
    343          * PPS jitter exceeded when time synchronization requested
    344          */
    345 	|| (time_status & STA_PPSTIME &&
    346             time_status & STA_PPSJITTER)
    347 
    348         /*
    349          * PPS wander exceeded or calibration error when frequency
    350          * synchronization requested
    351          */
    352 	|| (time_status & STA_PPSFREQ &&
    353             time_status & (STA_PPSWANDER | STA_PPSERROR)))
    354                 return (TIME_ERROR);
    355 
    356 or, from ntp_adjtime():
    357 
    358 	if (  (time_status & (STA_UNSYNC | STA_CLOCKERR))
    359 	    || (time_status & (STA_PPSFREQ | STA_PPSTIME)
    360 		&& !(time_status & STA_PPSSIGNAL))
    361 	    || (time_status & STA_PPSTIME
    362 		&& time_status & STA_PPSJITTER)
    363 	    || (time_status & STA_PPSFREQ
    364 		&& time_status & (STA_PPSWANDER | STA_PPSERROR))
    365 	   )
    366 		return (TIME_ERROR);
    367 #endif
    368 
    369 	    case TIME_ERROR: /* 5: unsynchronized, or loss of synchronization */
    370 				/* error (see status word) */
    371 
    372 		if (ptimex->status & STA_UNSYNC)
    373 			xsbprintf(&dbp, ebp, "%sClock Unsynchronized",
    374 				 (*des) ? "; " : "");
    375 
    376 		if (ptimex->status & STA_CLOCKERR)
    377 		    xsbprintf(&dbp, ebp, "%sClock Error",
    378 			      (*des) ? "; " : "");
    379 
    380 		if (!(ptimex->status & STA_PPSSIGNAL)
    381 		    && ptimex->status & STA_PPSFREQ)
    382 		    xsbprintf(&dbp, ebp, "%sPPS Frequency Sync wanted but no PPS",
    383 			      (*des) ? "; " : "");
    384 
    385 		if (!(ptimex->status & STA_PPSSIGNAL)
    386 		    && ptimex->status & STA_PPSTIME)
    387 			xsbprintf(&dbp, ebp, "%sPPS Time Sync wanted but no PPS signal",
    388 				  (*des) ? "; " : "");
    389 
    390 		if (   ptimex->status & STA_PPSTIME
    391 		    && ptimex->status & STA_PPSJITTER)
    392 			xsbprintf(&dbp, ebp, "%sPPS Time Sync wanted but PPS Jitter exceeded",
    393 				  (*des) ? "; " : "");
    394 
    395 		if (   ptimex->status & STA_PPSFREQ
    396 		    && ptimex->status & STA_PPSWANDER)
    397 			xsbprintf(&dbp, ebp, "%sPPS Frequency Sync wanted but PPS Wander exceeded",
    398 				  (*des) ? "; " : "");
    399 
    400 		if (   ptimex->status & STA_PPSFREQ
    401 		    && ptimex->status & STA_PPSERROR)
    402 			xsbprintf(&dbp, ebp, "%sPPS Frequency Sync wanted but Calibration error detected",
    403 				  (*des) ? "; " : "");
    404 
    405 		if (pps_call && !(ptimex->status & STA_PPSSIGNAL))
    406 			report_event(EVNT_KERN, NULL,
    407 			    "no PPS signal");
    408 		DPRINTF(1, ("kernel loop status %#x (%s)\n",
    409 			ptimex->status, des));
    410 		/*
    411 		 * This code may be returned when ntp_adjtime() has just
    412 		 * been called for the first time, quite a while after
    413 		 * startup, when ntpd just starts to discipline the kernel
    414 		 * time. In this case the occurrence of this message
    415 		 * can be pretty confusing.
    416 		 *
    417 		 * HMS: How about a message when we begin kernel processing:
    418 		 *    Determining kernel clock state...
    419 		 * so an initial TIME_ERROR message is less confising,
    420 		 * or skipping the first message (ugh),
    421 		 * or ???
    422 		 * msyslog(LOG_INFO, "kernel reports time synchronization lost");
    423 		 */
    424 		msyslog(LOG_INFO, "kernel reports TIME_ERROR: %#x: %s",
    425 			ptimex->status, des);
    426 	    break;
    427 #else
    428 # warning TIME_ERROR is not defined
    429 #endif
    430 	    default:
    431 		msyslog(LOG_NOTICE, "%s: %s line %d: unhandled return value %d from ntp_adjtime() in %s at line %d",
    432 		    caller, file_name(), line,
    433 		    ret,
    434 		    __func__, __LINE__
    435 		);
    436 	    break;
    437 	}
    438 	return;
    439 }
    440 #endif
    441 
    442 /*
    443  * local_clock - the NTP logical clock loop filter.
    444  *
    445  * Return codes:
    446  * -1	update ignored: exceeds panic threshold
    447  * 0	update ignored: popcorn or exceeds step threshold
    448  * 1	clock was slewed
    449  * 2	clock was stepped
    450  *
    451  * LOCKCLOCK: The only thing this routine does is set the
    452  * sys_rootdisp variable equal to the peer dispersion.
    453  */
    454 int
    455 local_clock(
    456 	struct	peer *peer,	/* synch source peer structure */
    457 	double	fp_offset	/* clock offset (s) */
    458 	)
    459 {
    460 	int	rval;		/* return code */
    461 	int	osys_poll;	/* old system poll */
    462 	int	ntp_adj_ret;	/* returned by ntp_adjtime */
    463 	double	mu;		/* interval since last update */
    464 	double	clock_frequency; /* clock frequency */
    465 	double	dtemp, etemp;	/* double temps */
    466 	char	tbuf[80];	/* report buffer */
    467 
    468 	(void)ntp_adj_ret; /* not always used below... */
    469 	/*
    470 	 * If the loop is opened or the NIST LOCKCLOCK is in use,
    471 	 * monitor and record the offsets anyway in order to determine
    472 	 * the open-loop response and then go home.
    473 	 */
    474 #ifndef LOCKCLOCK
    475 	if (!ntp_enable)
    476 #endif /* not LOCKCLOCK */
    477 	{
    478 		record_loop_stats(fp_offset, drift_comp, clock_jitter,
    479 		    clock_stability, sys_poll);
    480 		return (0);
    481 	}
    482 
    483 #ifndef LOCKCLOCK
    484 	/*
    485 	 * If the clock is way off, panic is declared. The clock_panic
    486 	 * defaults to 1000 s; if set to zero, the panic will never
    487 	 * occur. The allow_panic defaults to FALSE, so the first panic
    488 	 * will exit. It can be set TRUE by a command line option, in
    489 	 * which case the clock will be set anyway and time marches on.
    490 	 * But, allow_panic will be set FALSE when the update is less
    491 	 * than the step threshold; so, subsequent panics will exit.
    492 	 */
    493 	if (fabs(fp_offset) > clock_panic && clock_panic > 0 &&
    494 	    !allow_panic) {
    495 		snprintf(tbuf, sizeof(tbuf),
    496 		    "%+.0f s; set clock manually within %.0f s.",
    497 		    fp_offset, clock_panic);
    498 		report_event(EVNT_SYSFAULT, NULL, tbuf);
    499 		return (-1);
    500 	}
    501 
    502 	allow_panic = FALSE;
    503 
    504 	/*
    505 	 * This section simulates ntpdate. If the offset exceeds the
    506 	 * step threshold (128 ms), step the clock to that time and
    507 	 * exit. Otherwise, slew the clock to that time and exit. Note
    508 	 * that the slew will persist and eventually complete beyond the
    509 	 * life of this program. Note that while ntpdate is active, the
    510 	 * terminal does not detach, so the termination message prints
    511 	 * directly to the terminal.
    512 	 */
    513 	if (mode_ntpdate) {
    514 		if (  ( fp_offset > clock_max_fwd  && clock_max_fwd  > 0)
    515 		   || (-fp_offset > clock_max_back && clock_max_back > 0)) {
    516 			step_systime(fp_offset);
    517 			msyslog(LOG_NOTICE, "ntpd: time set %+.6f s",
    518 			    fp_offset);
    519 			printf("ntpd: time set %+.6fs\n", fp_offset);
    520 		} else {
    521 			adj_systime(fp_offset);
    522 			msyslog(LOG_NOTICE, "ntpd: time slew %+.6f s",
    523 			    fp_offset);
    524 			printf("ntpd: time slew %+.6fs\n", fp_offset);
    525 		}
    526 		record_loop_stats(fp_offset, drift_comp, clock_jitter,
    527 		    clock_stability, sys_poll);
    528 		exit (0);
    529 	}
    530 
    531 	/*
    532 	 * The huff-n'-puff filter finds the lowest delay in the recent
    533 	 * interval. This is used to correct the offset by one-half the
    534 	 * difference between the sample delay and minimum delay. This
    535 	 * is most effective if the delays are highly assymetric and
    536 	 * clockhopping is avoided and the clock frequency wander is
    537 	 * relatively small.
    538 	 */
    539 	if (sys_huffpuff != NULL) {
    540 		if (peer->delay < sys_huffpuff[sys_huffptr])
    541 			sys_huffpuff[sys_huffptr] = peer->delay;
    542 		if (peer->delay < sys_mindly)
    543 			sys_mindly = peer->delay;
    544 		if (fp_offset > 0)
    545 			dtemp = -(peer->delay - sys_mindly) / 2;
    546 		else
    547 			dtemp = (peer->delay - sys_mindly) / 2;
    548 		fp_offset += dtemp;
    549 		DPRINTF(1, ("local_clock: size %d mindly %.6f huffpuff %.6f\n",
    550 			    sys_hufflen, sys_mindly, dtemp));
    551 	}
    552 
    553 	/*
    554 	 * Clock state machine transition function which defines how the
    555 	 * system reacts to large phase and frequency excursion. There
    556 	 * are two main regimes: when the offset exceeds the step
    557 	 * threshold (128 ms) and when it does not. Under certain
    558 	 * conditions updates are suspended until the stepout theshold
    559 	 * (900 s) is exceeded. See the documentation on how these
    560 	 * thresholds interact with commands and command line options.
    561 	 *
    562 	 * Note the kernel is disabled if step is disabled or greater
    563 	 * than 0.5 s or in ntpdate mode.
    564 	 */
    565 	osys_poll = sys_poll;
    566 	if (sys_poll < peer->minpoll)
    567 		sys_poll = peer->minpoll;
    568 	if (sys_poll > peer->maxpoll)
    569 		sys_poll = peer->maxpoll;
    570 	mu = current_time - clock_epoch;
    571 	clock_frequency = drift_comp;
    572 	rval = 1;
    573 	if (  ( fp_offset > clock_max_fwd  && clock_max_fwd  > 0)
    574 	   || (-fp_offset > clock_max_back && clock_max_back > 0)
    575 	   || force_step_once ) {
    576 		if (force_step_once) {
    577 			force_step_once = FALSE;  /* we want this only once after startup */
    578 			msyslog(LOG_NOTICE, "Doing intital time step" );
    579 		}
    580 
    581 		switch (state) {
    582 
    583 		/*
    584 		 * In SYNC state we ignore the first outlier and switch
    585 		 * to SPIK state.
    586 		 */
    587 		case EVNT_SYNC:
    588 			snprintf(tbuf, sizeof(tbuf), "%+.6f s",
    589 			    fp_offset);
    590 			report_event(EVNT_SPIK, NULL, tbuf);
    591 			state = EVNT_SPIK;
    592 			return (0);
    593 
    594 		/*
    595 		 * In FREQ state we ignore outliers and inlyers. At the
    596 		 * first outlier after the stepout threshold, compute
    597 		 * the apparent frequency correction and step the phase.
    598 		 */
    599 		case EVNT_FREQ:
    600 			if (mu < clock_minstep)
    601 				return (0);
    602 
    603 			clock_frequency = direct_freq(fp_offset);
    604 
    605 			/*FALLTHROUGH*/
    606 
    607 		/*
    608 		 * In SPIK state we ignore succeeding outliers until
    609 		 * either an inlyer is found or the stepout threshold is
    610 		 * exceeded.
    611 		 */
    612 		case EVNT_SPIK:
    613 			if (mu < clock_minstep)
    614 				return (0);
    615 
    616 			/*FALLTHROUGH*/
    617 
    618 		/*
    619 		 * We get here by default in NSET and FSET states and
    620 		 * from above in FREQ or SPIK states.
    621 		 *
    622 		 * In NSET state an initial frequency correction is not
    623 		 * available, usually because the frequency file has not
    624 		 * yet been written. Since the time is outside the step
    625 		 * threshold, the clock is stepped. The frequency will
    626 		 * be set directly following the stepout interval.
    627 		 *
    628 		 * In FSET state the initial frequency has been set from
    629 		 * the frequency file. Since the time is outside the
    630 		 * step threshold, the clock is stepped immediately,
    631 		 * rather than after the stepout interval. Guys get
    632 		 * nervous if it takes 15 minutes to set the clock for
    633 		 * the first time.
    634 		 *
    635 		 * In FREQ and SPIK states the stepout threshold has
    636 		 * expired and the phase is still above the step
    637 		 * threshold. Note that a single spike greater than the
    638 		 * step threshold is always suppressed, even with a
    639 		 * long time constant.
    640 		 */
    641 		default:
    642 			snprintf(tbuf, sizeof(tbuf), "%+.6f s",
    643 			    fp_offset);
    644 			report_event(EVNT_CLOCKRESET, NULL, tbuf);
    645 			step_systime(fp_offset);
    646 			reinit_timer();
    647 			tc_counter = 0;
    648 			clock_jitter = LOGTOD(sys_precision);
    649 			rval = 2;
    650 			if (state == EVNT_NSET) {
    651 				rstclock(EVNT_FREQ, 0);
    652 				return (rval);
    653 			}
    654 			break;
    655 		}
    656 		rstclock(EVNT_SYNC, 0);
    657 	} else {
    658 		/*
    659 		 * The offset is less than the step threshold. Calculate
    660 		 * the jitter as the exponentially weighted offset
    661 		 * differences.
    662 		 */
    663 		etemp = SQUARE(clock_jitter);
    664 		dtemp = SQUARE(max(fabs(fp_offset - last_offset),
    665 		    LOGTOD(sys_precision)));
    666 		clock_jitter = SQRT(etemp + (dtemp - etemp) /
    667 		    CLOCK_AVG);
    668 		switch (state) {
    669 
    670 		/*
    671 		 * In NSET state this is the first update received and
    672 		 * the frequency has not been initialized. Adjust the
    673 		 * phase, but do not adjust the frequency until after
    674 		 * the stepout threshold.
    675 		 */
    676 		case EVNT_NSET:
    677 			adj_systime(fp_offset);
    678 			rstclock(EVNT_FREQ, fp_offset);
    679 			break;
    680 
    681 		/*
    682 		 * In FREQ state ignore updates until the stepout
    683 		 * threshold. After that, compute the new frequency, but
    684 		 * do not adjust the frequency until the holdoff counter
    685 		 * decrements to zero.
    686 		 */
    687 		case EVNT_FREQ:
    688 			if (mu < clock_minstep)
    689 				return (0);
    690 
    691 			clock_frequency = direct_freq(fp_offset);
    692 			/* fall through */
    693 
    694 		/*
    695 		 * We get here by default in FSET, SPIK and SYNC states.
    696 		 * Here compute the frequency update due to PLL and FLL
    697 		 * contributions. Note, we avoid frequency discipline at
    698 		 * startup until the initial transient has subsided.
    699 		 */
    700 		default:
    701 			if (freq_cnt == 0) {
    702 
    703 				/*
    704 				 * The FLL and PLL frequency gain constants
    705 				 * depend on the time constant and Allan
    706 				 * intercept. The PLL is always used, but
    707 				 * becomes ineffective above the Allan intercept
    708 				 * where the FLL becomes effective.
    709 				 */
    710 				if (sys_poll >= allan_xpt)
    711 					clock_frequency +=
    712 					      (fp_offset - clock_offset)
    713 					    / ( max(ULOGTOD(sys_poll), mu)
    714 					       * CLOCK_FLL);
    715 
    716 				/*
    717 				 * The PLL frequency gain (numerator) depends on
    718 				 * the minimum of the update interval and Allan
    719 				 * intercept. This reduces the PLL gain when the
    720 				 * FLL becomes effective.
    721 				 */
    722 				etemp = min(ULOGTOD(allan_xpt), mu);
    723 				dtemp = 4 * CLOCK_PLL * ULOGTOD(sys_poll);
    724 				clock_frequency +=
    725 				    fp_offset * etemp / (dtemp * dtemp);
    726 			}
    727 			rstclock(EVNT_SYNC, fp_offset);
    728 			if (fabs(fp_offset) < CLOCK_FLOOR)
    729 				freq_cnt = 0;
    730 			break;
    731 		}
    732 	}
    733 
    734 #ifdef KERNEL_PLL
    735 	/*
    736 	 * This code segment works when clock adjustments are made using
    737 	 * precision time kernel support and the ntp_adjtime() system
    738 	 * call. This support is available in Solaris 2.6 and later,
    739 	 * Digital Unix 4.0 and later, FreeBSD, Linux and specially
    740 	 * modified kernels for HP-UX 9 and Ultrix 4. In the case of the
    741 	 * DECstation 5000/240 and Alpha AXP, additional kernel
    742 	 * modifications provide a true microsecond clock and nanosecond
    743 	 * clock, respectively.
    744 	 *
    745 	 * Important note: The kernel discipline is used only if the
    746 	 * step threshold is less than 0.5 s, as anything higher can
    747 	 * lead to overflow problems. This might occur if some misguided
    748 	 * lad set the step threshold to something ridiculous.
    749 	 */
    750 	if (pll_control && kern_enable && freq_cnt == 0) {
    751 
    752 		/*
    753 		 * We initialize the structure for the ntp_adjtime()
    754 		 * system call. We have to convert everything to
    755 		 * microseconds or nanoseconds first. Do not update the
    756 		 * system variables if the ext_enable flag is set. In
    757 		 * this case, the external clock driver will update the
    758 		 * variables, which will be read later by the local
    759 		 * clock driver. Afterwards, remember the time and
    760 		 * frequency offsets for jitter and stability values and
    761 		 * to update the frequency file.
    762 		 */
    763 		ZERO(ntv);
    764 		if (ext_enable) {
    765 			ntv.modes = MOD_STATUS;
    766 		} else {
    767 			ntv.modes = MOD_BITS;
    768 			ntv.offset = var_long_from_dbl(
    769 			    clock_offset, &ntv.modes);
    770 #ifdef STA_NANO
    771 			ntv.constant = sys_poll;
    772 #else /* STA_NANO */
    773 			ntv.constant = sys_poll - 4;
    774 #endif /* STA_NANO */
    775 			if (ntv.constant < 0)
    776 				ntv.constant = 0;
    777 
    778 			ntv.esterror = usec_long_from_dbl(
    779 				clock_jitter);
    780 			ntv.maxerror = usec_long_from_dbl(
    781 				sys_rootdelay / 2 + sys_rootdisp);
    782 			ntv.status = STA_PLL;
    783 
    784 			/*
    785 			 * Enable/disable the PPS if requested.
    786 			 */
    787 			if (hardpps_enable) {
    788 				ntv.status |= (STA_PPSTIME | STA_PPSFREQ);
    789 				if (!(pll_status & STA_PPSTIME))
    790 					sync_status("PPS enabled",
    791 						pll_status,
    792 						ntv.status);
    793 			} else {
    794 				ntv.status &= ~(STA_PPSTIME | STA_PPSFREQ);
    795 				if (pll_status & STA_PPSTIME)
    796 					sync_status("PPS disabled",
    797 						pll_status,
    798 						ntv.status);
    799 			}
    800 			if (sys_leap == LEAP_ADDSECOND)
    801 				ntv.status |= STA_INS;
    802 			else if (sys_leap == LEAP_DELSECOND)
    803 				ntv.status |= STA_DEL;
    804 		}
    805 
    806 		/*
    807 		 * Pass the stuff to the kernel. If it squeals, turn off
    808 		 * the pps. In any case, fetch the kernel offset,
    809 		 * frequency and jitter.
    810 		 */
    811 		ntp_adj_ret = ntp_adjtime(&ntv);
    812 		/*
    813 		 * A squeal is a return status < 0, or a state change.
    814 		 */
    815 		if ((0 > ntp_adj_ret) || (ntp_adj_ret != kernel_status)) {
    816 			kernel_status = ntp_adj_ret;
    817 			ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, hardpps_enable, 0, __LINE__ - 1);
    818 		}
    819 		pll_status = ntv.status;
    820 		clock_offset = dbl_from_var_long(ntv.offset, ntv.status);
    821 		clock_frequency = FREQTOD(ntv.freq);
    822 
    823 		/*
    824 		 * If the kernel PPS is lit, monitor its performance.
    825 		 */
    826 		if (ntv.status & STA_PPSTIME) {
    827 			clock_jitter = dbl_from_var_long(
    828 				ntv.jitter, ntv.status);
    829 		}
    830 
    831 #if defined(STA_NANO) && NTP_API == 4
    832 		/*
    833 		 * If the TAI changes, update the kernel TAI.
    834 		 */
    835 		if (loop_tai != sys_tai) {
    836 			loop_tai = sys_tai;
    837 			ntv.modes = MOD_TAI;
    838 			ntv.constant = sys_tai;
    839 			if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
    840 			    ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 1, __LINE__ - 1);
    841 			}
    842 		}
    843 #endif /* STA_NANO */
    844 	}
    845 #endif /* KERNEL_PLL */
    846 
    847 	/*
    848 	 * Clamp the frequency within the tolerance range and calculate
    849 	 * the frequency difference since the last update.
    850 	 */
    851 	if (fabs(clock_frequency) > NTP_MAXFREQ)
    852 		msyslog(LOG_NOTICE,
    853 		    "frequency error %.0f PPM exceeds tolerance %.0f PPM",
    854 		    clock_frequency * 1e6, NTP_MAXFREQ * 1e6);
    855 	dtemp = SQUARE(clock_frequency - drift_comp);
    856 	if (clock_frequency > NTP_MAXFREQ)
    857 		drift_comp = NTP_MAXFREQ;
    858 	else if (clock_frequency < -NTP_MAXFREQ)
    859 		drift_comp = -NTP_MAXFREQ;
    860 	else
    861 		drift_comp = clock_frequency;
    862 
    863 	/*
    864 	 * Calculate the wander as the exponentially weighted RMS
    865 	 * frequency differences. Record the change for the frequency
    866 	 * file update.
    867 	 */
    868 	etemp = SQUARE(clock_stability);
    869 	clock_stability = SQRT(etemp + (dtemp - etemp) / CLOCK_AVG);
    870 
    871 	/*
    872 	 * Here we adjust the time constant by comparing the current
    873 	 * offset with the clock jitter. If the offset is less than the
    874 	 * clock jitter times a constant, then the averaging interval is
    875 	 * increased, otherwise it is decreased. A bit of hysteresis
    876 	 * helps calm the dance. Works best using burst mode. Don't
    877 	 * fiddle with the poll during the startup clamp period.
    878 	 * [Bug 3615] also observe time gates to avoid eager stepping
    879 	 */
    880 	if (freq_cnt > 0) {
    881 		tc_counter = 0;
    882 		tc_twinlo  = current_time;
    883 		tc_twinhi  = current_time;
    884 	} else if (fabs(clock_offset) < CLOCK_PGATE * clock_jitter) {
    885 		tc_counter += sys_poll;
    886 		if (tc_counter > CLOCK_LIMIT) {
    887 			tc_counter = CLOCK_LIMIT;
    888 			if (sys_poll < peer->maxpoll)
    889 				sys_poll += (current_time >= tc_twinhi);
    890 		}
    891 	} else {
    892 		tc_counter -= sys_poll << 1;
    893 		if (tc_counter < -CLOCK_LIMIT) {
    894 			tc_counter = -CLOCK_LIMIT;
    895 			if (sys_poll > peer->minpoll)
    896 				sys_poll -= (current_time >= tc_twinlo);
    897 		}
    898 	}
    899 
    900 	/*
    901 	 * If the time constant has changed, update the poll variables.
    902 	 *
    903 	 * [bug 3615] also set new time gates
    904 	 * The time limit for stepping down will be half the TC interval
    905 	 * or 60 secs from now, whatever is bigger, and the step up time
    906 	 * limit will be half the TC interval after the step down limit.
    907 	 *
    908 	 * The 'sys_poll' value affects the servo loop gain, and
    909 	 * overshooting sys_poll slows it down unnecessarily.  Stepping
    910 	 * down too fast also has bad effects.
    911 	 *
    912 	 * The 'tc_counter' dance itself is something that *should*
    913 	 * happen *once* every (1 << sys_poll) seconds, I think, but
    914 	 * that's not how it works right now, and adding time guards
    915 	 * seems the least intrusive way to handle this.
    916 	 */
    917 	if (osys_poll != sys_poll) {
    918 		u_int deadband = 1u << (sys_poll - 1);
    919 		tc_counter = 0;
    920 		tc_twinlo  = current_time + max(deadband, 60);
    921 		tc_twinhi  = tc_twinlo + deadband;
    922 		poll_update(peer, sys_poll, 0);
    923 	}
    924 
    925 	/*
    926 	 * Yibbidy, yibbbidy, yibbidy; that'h all folks.
    927 	 */
    928 	record_loop_stats(clock_offset, drift_comp, clock_jitter,
    929 	    clock_stability, sys_poll);
    930 	DPRINTF(1, ("local_clock: offset %.9f jit %.9f freq %.3f stab %.3f poll %d\n",
    931 		    clock_offset, clock_jitter, drift_comp * 1e6,
    932 		    clock_stability * 1e6, sys_poll));
    933 	return (rval);
    934 #endif /* not LOCKCLOCK */
    935 }
    936 
    937 
    938 /*
    939  * adj_host_clock - Called once every second to update the local clock.
    940  *
    941  * LOCKCLOCK: The only thing this routine does is increment the
    942  * sys_rootdisp variable.
    943  */
    944 void
    945 adj_host_clock(
    946 	void
    947 	)
    948 {
    949 	double	offset_adj;
    950 	double	freq_adj;
    951 
    952 	/*
    953 	 * Update the dispersion since the last update. In contrast to
    954 	 * NTPv3, NTPv4 does not declare unsynchronized after one day,
    955 	 * since the dispersion check serves this function. Also,
    956 	 * since the poll interval can exceed one day, the old test
    957 	 * would be counterproductive. During the startup clamp period, the
    958 	 * time constant is clamped at 2.
    959 	 */
    960 	sys_rootdisp += clock_phi;
    961 #ifndef LOCKCLOCK
    962 	if (!ntp_enable || mode_ntpdate)
    963 		return;
    964 	/*
    965 	 * Determine the phase adjustment. The gain factor (denominator)
    966 	 * increases with poll interval, so is dominated by the FLL
    967 	 * above the Allan intercept. Note the reduced time constant at
    968 	 * startup.
    969 	 */
    970 	if (state != EVNT_SYNC) {
    971 		offset_adj = 0.;
    972 	} else if (freq_cnt > 0) {
    973 		offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(1));
    974 		freq_cnt--;
    975 #ifdef KERNEL_PLL
    976 	} else if (pll_control && kern_enable) {
    977 		offset_adj = 0.;
    978 #endif /* KERNEL_PLL */
    979 	} else {
    980 		offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(sys_poll));
    981 	}
    982 
    983 	/*
    984 	 * If the kernel discipline is enabled the frequency correction
    985 	 * drift_comp has already been engaged via ntp_adjtime() in
    986 	 * set_freq().  Otherwise it is a component of the adj_systime()
    987 	 * offset.
    988 	 */
    989 #ifdef KERNEL_PLL
    990 	if (pll_control && kern_enable)
    991 		freq_adj = 0.;
    992 	else
    993 #endif /* KERNEL_PLL */
    994 		freq_adj = drift_comp;
    995 
    996 	/* Bound absolute value of total adjustment to NTP_MAXFREQ. */
    997 	if (offset_adj + freq_adj > NTP_MAXFREQ)
    998 		offset_adj = NTP_MAXFREQ - freq_adj;
    999 	else if (offset_adj + freq_adj < -NTP_MAXFREQ)
   1000 		offset_adj = -NTP_MAXFREQ - freq_adj;
   1001 
   1002 	clock_offset -= offset_adj;
   1003 	/*
   1004 	 * Windows port adj_systime() must be called each second,
   1005 	 * even if the argument is zero, to ease emulation of
   1006 	 * adjtime() using Windows' slew API which controls the rate
   1007 	 * but does not automatically stop slewing when an offset
   1008 	 * has decayed to zero.
   1009 	 */
   1010 	DEBUG_INSIST(enable_panic_check == TRUE);
   1011 	enable_panic_check = FALSE;
   1012 	adj_systime(offset_adj + freq_adj);
   1013 	enable_panic_check = TRUE;
   1014 #endif /* LOCKCLOCK */
   1015 }
   1016 
   1017 
   1018 /*
   1019  * Clock state machine. Enter new state and set state variables.
   1020  */
   1021 static void
   1022 rstclock(
   1023 	int	trans,		/* new state */
   1024 	double	offset		/* new offset */
   1025 	)
   1026 {
   1027 	DPRINTF(2, ("rstclock: mu %lu state %d poll %d count %d\n",
   1028 		    current_time - clock_epoch, trans, sys_poll,
   1029 		    tc_counter));
   1030 	if (trans != state && trans != EVNT_FSET)
   1031 		report_event(trans, NULL, NULL);
   1032 #ifdef HAVE_WORKING_FORK
   1033 	if (trans != state && EVNT_SYNC == trans) {
   1034 		/*
   1035 		 * If our parent process is waiting for the
   1036 		 * first clock sync, send them home satisfied.
   1037 		 */
   1038 		if (daemon_pipe[1] != -1) {
   1039 			if (2 != write(daemon_pipe[1], "S\n", 2)) {
   1040 				msyslog(LOG_ERR, "daemon failed to notify parent ntpd (--wait-sync)");
   1041 			}
   1042 			close(daemon_pipe[1]);
   1043 			daemon_pipe[1] = -1;
   1044 		}
   1045 	}
   1046 #endif /* HAVE_WORKING_FORK */
   1047 
   1048 	state = trans;
   1049 	last_offset = clock_offset = offset;
   1050 	clock_epoch = current_time;
   1051 }
   1052 
   1053 
   1054 /*
   1055  * calc_freq - calculate frequency directly
   1056  *
   1057  * This is very carefully done. When the offset is first computed at the
   1058  * first update, a residual frequency component results. Subsequently,
   1059  * updates are suppresed until the end of the measurement interval while
   1060  * the offset is amortized. At the end of the interval the frequency is
   1061  * calculated from the current offset, residual offset, length of the
   1062  * interval and residual frequency component. At the same time the
   1063  * frequenchy file is armed for update at the next hourly stats.
   1064  */
   1065 static double
   1066 direct_freq(
   1067 	double	fp_offset
   1068 	)
   1069 {
   1070 	set_freq(fp_offset / (current_time - clock_epoch));
   1071 
   1072 	return drift_comp;
   1073 }
   1074 
   1075 
   1076 /*
   1077  * set_freq - set clock frequency correction
   1078  *
   1079  * Used to step the frequency correction at startup, possibly again once
   1080  * the frequency is measured (that is, transitioning from EVNT_NSET to
   1081  * EVNT_FSET), and finally to switch between daemon and kernel loop
   1082  * discipline at runtime.
   1083  *
   1084  * When the kernel loop discipline is available but the daemon loop is
   1085  * in use, the kernel frequency correction is disabled (set to 0) to
   1086  * ensure drift_comp is applied by only one of the loops.
   1087  */
   1088 static void
   1089 set_freq(
   1090 	double	freq		/* frequency update */
   1091 	)
   1092 {
   1093 	const char *	loop_desc;
   1094 	int ntp_adj_ret;
   1095 
   1096 	(void)ntp_adj_ret; /* not always used below... */
   1097 	drift_comp = freq;
   1098 	loop_desc = "ntpd";
   1099 #ifdef KERNEL_PLL
   1100 	if (pll_control) {
   1101 		ZERO(ntv);
   1102 		ntv.modes = MOD_FREQUENCY;
   1103 		if (kern_enable) {
   1104 			loop_desc = "kernel";
   1105 			ntv.freq = DTOFREQ(drift_comp);
   1106 		}
   1107 		if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
   1108 		    ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1);
   1109 		}
   1110 	}
   1111 #endif /* KERNEL_PLL */
   1112 	mprintf_event(EVNT_FSET, NULL, "%s %.3f PPM", loop_desc,
   1113 	    drift_comp * 1e6);
   1114 }
   1115 
   1116 
   1117 #ifdef KERNEL_PLL
   1118 static void
   1119 start_kern_loop(void)
   1120 {
   1121 	static int atexit_done;
   1122 	int ntp_adj_ret;
   1123 
   1124 	pll_control = TRUE;
   1125 	ZERO(ntv);
   1126 	ntv.modes = MOD_BITS;
   1127 	ntv.status = STA_PLL | STA_UNSYNC;
   1128 	ntv.maxerror = MAXDISPERSE * 1.0e6;
   1129 	ntv.esterror = MAXDISPERSE * 1.0e6;
   1130 	ntv.constant = sys_poll;
   1131 	/*             ^^^^^^^^ why is it that here constant is
   1132 	 * unconditionally set to sys_poll, whereas elsewhere is is
   1133 	 * modified depending on nanosecond vs. microsecond kernel?
   1134 	 */
   1135 	/*[bug 3699] make sure kernel PLL sees our initial drift compensation */
   1136 	if (freq_set) {
   1137 		ntv.modes |= MOD_FREQUENCY;
   1138 		ntv.freq = DTOFREQ(drift_comp);
   1139 	}
   1140 #ifdef SIGSYS
   1141 	/*
   1142 	 * Use sigsetjmp() to save state and then call ntp_adjtime(); if
   1143 	 * it fails, then pll_trap() will set pll_control FALSE before
   1144 	 * returning control using siglogjmp().
   1145 	 */
   1146 	newsigsys.sa_handler = pll_trap;
   1147 	newsigsys.sa_flags = 0;
   1148 	if (sigaction(SIGSYS, &newsigsys, &sigsys)) {
   1149 		msyslog(LOG_ERR, "sigaction() trap SIGSYS: %m");
   1150 		pll_control = FALSE;
   1151 	} else {
   1152 		if (sigsetjmp(env, 1) == 0) {
   1153 			if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
   1154 			    ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1);
   1155 			}
   1156 		}
   1157 		if (sigaction(SIGSYS, &sigsys, NULL)) {
   1158 			msyslog(LOG_ERR,
   1159 			    "sigaction() restore SIGSYS: %m");
   1160 			pll_control = FALSE;
   1161 		}
   1162 	}
   1163 #else /* SIGSYS */
   1164 	if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
   1165 	    ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1);
   1166 	}
   1167 #endif /* SIGSYS */
   1168 
   1169 	/*
   1170 	 * Save the result status and light up an external clock
   1171 	 * if available.
   1172 	 */
   1173 	pll_status = ntv.status;
   1174 	if (pll_control) {
   1175 		if (!atexit_done) {
   1176 			atexit_done = TRUE;
   1177 			atexit(&stop_kern_loop);
   1178 		}
   1179 #ifdef STA_NANO
   1180 		if (pll_status & STA_CLK)
   1181 			ext_enable = TRUE;
   1182 #endif /* STA_NANO */
   1183 		report_event(EVNT_KERN, NULL,
   1184 	  	    "kernel time sync enabled");
   1185 	}
   1186 }
   1187 #endif	/* KERNEL_PLL */
   1188 
   1189 
   1190 #ifdef KERNEL_PLL
   1191 static void
   1192 stop_kern_loop(void)
   1193 {
   1194 	if (pll_control && kern_enable)
   1195 		report_event(EVNT_KERN, NULL,
   1196 		    "kernel time sync disabled");
   1197 }
   1198 #endif	/* KERNEL_PLL */
   1199 
   1200 
   1201 /*
   1202  * select_loop() - choose kernel or daemon loop discipline.
   1203  */
   1204 void
   1205 select_loop(
   1206 	int	use_kern_loop
   1207 	)
   1208 {
   1209 	if (kern_enable == use_kern_loop)
   1210 		return;
   1211 #ifdef KERNEL_PLL
   1212 	if (pll_control && !use_kern_loop)
   1213 		stop_kern_loop();
   1214 #endif
   1215 	kern_enable = use_kern_loop;
   1216 #ifdef KERNEL_PLL
   1217 	if (pll_control && use_kern_loop)
   1218 		start_kern_loop();
   1219 #endif
   1220 	/*
   1221 	 * If this loop selection change occurs after initial startup,
   1222 	 * call set_freq() to switch the frequency compensation to or
   1223 	 * from the kernel loop.
   1224 	 */
   1225 #ifdef KERNEL_PLL
   1226 	if (pll_control && loop_started)
   1227 		set_freq(drift_comp);
   1228 #endif
   1229 }
   1230 
   1231 
   1232 /*
   1233  * huff-n'-puff filter
   1234  */
   1235 void
   1236 huffpuff(void)
   1237 {
   1238 	int i;
   1239 
   1240 	if (sys_huffpuff == NULL)
   1241 		return;
   1242 
   1243 	sys_huffptr = (sys_huffptr + 1) % sys_hufflen;
   1244 	sys_huffpuff[sys_huffptr] = 1e9;
   1245 	sys_mindly = 1e9;
   1246 	for (i = 0; i < sys_hufflen; i++) {
   1247 		if (sys_huffpuff[i] < sys_mindly)
   1248 			sys_mindly = sys_huffpuff[i];
   1249 	}
   1250 }
   1251 
   1252 
   1253 /*
   1254  * loop_config - configure the loop filter
   1255  *
   1256  * LOCKCLOCK: The LOOP_DRIFTINIT and LOOP_DRIFTCOMP cases are no-ops.
   1257  */
   1258 void
   1259 loop_config(
   1260 	int	item,
   1261 	double	freq
   1262 	)
   1263 {
   1264 	int	i;
   1265 	double	ftemp;
   1266 
   1267 	DPRINTF(2, ("loop_config: item %d freq %f\n", item, freq));
   1268 	switch (item) {
   1269 
   1270 	/*
   1271 	 * We first assume the kernel supports the ntp_adjtime()
   1272 	 * syscall. If that syscall works, initialize the kernel time
   1273 	 * variables. Otherwise, continue leaving no harm behind.
   1274 	 */
   1275 	case LOOP_DRIFTINIT:
   1276 #ifndef LOCKCLOCK
   1277 #ifdef KERNEL_PLL
   1278 		if (mode_ntpdate)
   1279 			break;
   1280 
   1281 		start_kern_loop();
   1282 #endif /* KERNEL_PLL */
   1283 
   1284 		/*
   1285 		 * Initialize frequency if given; otherwise, begin frequency
   1286 		 * calibration phase.
   1287 		 */
   1288 		ftemp = init_drift_comp / 1e6;
   1289 		if (ftemp > NTP_MAXFREQ)
   1290 			ftemp = NTP_MAXFREQ;
   1291 		else if (ftemp < -NTP_MAXFREQ)
   1292 			ftemp = -NTP_MAXFREQ;
   1293 		set_freq(ftemp);
   1294 		if (freq_set)
   1295 			rstclock(EVNT_FSET, 0);
   1296 		else
   1297 			rstclock(EVNT_NSET, 0);
   1298 		loop_started = TRUE;
   1299 #endif /* LOCKCLOCK */
   1300 		break;
   1301 
   1302 	case LOOP_KERN_CLEAR:
   1303 #if 0		/* XXX: needs more review, and how can we get here? */
   1304 #ifndef LOCKCLOCK
   1305 # ifdef KERNEL_PLL
   1306 		if (pll_control && kern_enable) {
   1307 			memset((char *)&ntv, 0, sizeof(ntv));
   1308 			ntv.modes = MOD_STATUS;
   1309 			ntv.status = STA_UNSYNC;
   1310 			ntp_adjtime(&ntv);
   1311 			sync_status("kernel time sync disabled",
   1312 				pll_status,
   1313 				ntv.status);
   1314 		   }
   1315 # endif /* KERNEL_PLL */
   1316 #endif /* LOCKCLOCK */
   1317 #endif
   1318 		break;
   1319 
   1320 	/*
   1321 	 * Tinker command variables for Ulrich Windl. Very dangerous.
   1322 	 */
   1323 	case LOOP_ALLAN:	/* Allan intercept (log2) (allan) */
   1324 		allan_xpt = (u_char)freq;
   1325 		break;
   1326 
   1327 	case LOOP_CODEC:	/* audio codec frequency (codec) */
   1328 		clock_codec = freq / 1e6;
   1329 		break;
   1330 
   1331 	case LOOP_PHI:		/* dispersion threshold (dispersion) */
   1332 		clock_phi = freq / 1e6;
   1333 		break;
   1334 
   1335 	case LOOP_FREQ:		/* initial frequency (freq) */
   1336 		init_drift_comp = freq;
   1337 		freq_set = 1;
   1338 		break;
   1339 
   1340 	case LOOP_NOFREQ:	/* remove any initial drift comp spec */
   1341 		init_drift_comp = 0;
   1342 		freq_set = 0;
   1343 		break;
   1344 
   1345 	case LOOP_HUFFPUFF:	/* huff-n'-puff length (huffpuff) */
   1346 		if (freq < HUFFPUFF)
   1347 			freq = HUFFPUFF;
   1348 		sys_hufflen = (int)(freq / HUFFPUFF);
   1349 		sys_huffpuff = eallocarray(sys_hufflen, sizeof(sys_huffpuff[0]));
   1350 		for (i = 0; i < sys_hufflen; i++)
   1351 			sys_huffpuff[i] = 1e9;
   1352 		sys_mindly = 1e9;
   1353 		break;
   1354 
   1355 	case LOOP_PANIC:	/* panic threshold (panic) */
   1356 		clock_panic = freq;
   1357 		break;
   1358 
   1359 	case LOOP_MAX:		/* step threshold (step) */
   1360 		clock_max_fwd = clock_max_back = freq;
   1361 		if (freq == 0 || freq > 0.5)
   1362 			select_loop(FALSE);
   1363 		break;
   1364 
   1365 	case LOOP_MAX_BACK:	/* step threshold (step) */
   1366 		clock_max_back = freq;
   1367 		/*
   1368 		 * Leave using the kernel discipline code unless both
   1369 		 * limits are massive.  This assumes the reason to stop
   1370 		 * using it is that it's pointless, not that it goes wrong.
   1371 		 */
   1372 		if (  (clock_max_back == 0 || clock_max_back > 0.5)
   1373 		   || (clock_max_fwd  == 0 || clock_max_fwd  > 0.5))
   1374 			select_loop(FALSE);
   1375 		break;
   1376 
   1377 	case LOOP_MAX_FWD:	/* step threshold (step) */
   1378 		clock_max_fwd = freq;
   1379 		if (  (clock_max_back == 0 || clock_max_back > 0.5)
   1380 		   || (clock_max_fwd  == 0 || clock_max_fwd  > 0.5))
   1381 			select_loop(FALSE);
   1382 		break;
   1383 
   1384 	case LOOP_MINSTEP:	/* stepout threshold (stepout) */
   1385 		if (freq < CLOCK_MINSTEP)
   1386 			clock_minstep = CLOCK_MINSTEP;
   1387 		else
   1388 			clock_minstep = freq;
   1389 		break;
   1390 
   1391 	case LOOP_TICK:		/* tick increment (tick) */
   1392 		set_sys_tick_precision(freq);
   1393 		break;
   1394 
   1395 	case LOOP_LEAP:		/* not used, fall through */
   1396 	default:
   1397 		msyslog(LOG_NOTICE,
   1398 		    "loop_config: unsupported option %d", item);
   1399 	}
   1400 }
   1401 
   1402 
   1403 #if defined(KERNEL_PLL) && defined(SIGSYS)
   1404 /*
   1405  * _trap - trap processor for undefined syscalls
   1406  *
   1407  * This nugget is called by the kernel when the SYS_ntp_adjtime()
   1408  * syscall bombs because the silly thing has not been implemented in
   1409  * the kernel. In this case the phase-lock loop is emulated by
   1410  * the stock adjtime() syscall and a lot of indelicate abuse.
   1411  */
   1412 static RETSIGTYPE
   1413 pll_trap(
   1414 	int arg
   1415 	)
   1416 {
   1417 	pll_control = FALSE;
   1418 	siglongjmp(env, 1);
   1419 }
   1420 #endif /* KERNEL_PLL && SIGSYS */
   1421