Home | History | Annotate | Line # | Download | only in kern
kern_clock.c revision 1.50.2.3
      1 /*	$NetBSD: kern_clock.c,v 1.50.2.3 2001/01/18 09:23:43 bouyer Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2000 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 /*-
     41  * Copyright (c) 1982, 1986, 1991, 1993
     42  *	The Regents of the University of California.  All rights reserved.
     43  * (c) UNIX System Laboratories, Inc.
     44  * All or some portions of this file are derived from material licensed
     45  * to the University of California by American Telephone and Telegraph
     46  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     47  * the permission of UNIX System Laboratories, Inc.
     48  *
     49  * Redistribution and use in source and binary forms, with or without
     50  * modification, are permitted provided that the following conditions
     51  * are met:
     52  * 1. Redistributions of source code must retain the above copyright
     53  *    notice, this list of conditions and the following disclaimer.
     54  * 2. Redistributions in binary form must reproduce the above copyright
     55  *    notice, this list of conditions and the following disclaimer in the
     56  *    documentation and/or other materials provided with the distribution.
     57  * 3. All advertising materials mentioning features or use of this software
     58  *    must display the following acknowledgement:
     59  *	This product includes software developed by the University of
     60  *	California, Berkeley and its contributors.
     61  * 4. Neither the name of the University nor the names of its contributors
     62  *    may be used to endorse or promote products derived from this software
     63  *    without specific prior written permission.
     64  *
     65  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     66  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     67  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     68  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     69  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     70  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     71  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     72  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     73  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     74  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     75  * SUCH DAMAGE.
     76  *
     77  *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
     78  */
     79 
     80 #include "opt_ntp.h"
     81 
     82 #include <sys/param.h>
     83 #include <sys/systm.h>
     84 #include <sys/dkstat.h>
     85 #include <sys/callout.h>
     86 #include <sys/kernel.h>
     87 #include <sys/proc.h>
     88 #include <sys/resourcevar.h>
     89 #include <sys/signalvar.h>
     90 #include <uvm/uvm_extern.h>
     91 #include <sys/sysctl.h>
     92 #include <sys/timex.h>
     93 #include <sys/sched.h>
     94 
     95 #include <machine/cpu.h>
     96 
     97 #ifdef GPROF
     98 #include <sys/gmon.h>
     99 #endif
    100 
    101 /*
    102  * Clock handling routines.
    103  *
    104  * This code is written to operate with two timers that run independently of
    105  * each other.  The main clock, running hz times per second, is used to keep
    106  * track of real time.  The second timer handles kernel and user profiling,
    107  * and does resource use estimation.  If the second timer is programmable,
    108  * it is randomized to avoid aliasing between the two clocks.  For example,
    109  * the randomization prevents an adversary from always giving up the cpu
    110  * just before its quantum expires.  Otherwise, it would never accumulate
    111  * cpu ticks.  The mean frequency of the second timer is stathz.
    112  *
    113  * If no second timer exists, stathz will be zero; in this case we drive
    114  * profiling and statistics off the main clock.  This WILL NOT be accurate;
    115  * do not do it unless absolutely necessary.
    116  *
    117  * The statistics clock may (or may not) be run at a higher rate while
    118  * profiling.  This profile clock runs at profhz.  We require that profhz
    119  * be an integral multiple of stathz.
    120  *
    121  * If the statistics clock is running fast, it must be divided by the ratio
    122  * profhz/stathz for statistics.  (For profiling, every tick counts.)
    123  */
    124 
    125 #ifdef NTP	/* NTP phase-locked loop in kernel */
    126 /*
    127  * Phase/frequency-lock loop (PLL/FLL) definitions
    128  *
    129  * The following variables are read and set by the ntp_adjtime() system
    130  * call.
    131  *
    132  * time_state shows the state of the system clock, with values defined
    133  * in the timex.h header file.
    134  *
    135  * time_status shows the status of the system clock, with bits defined
    136  * in the timex.h header file.
    137  *
    138  * time_offset is used by the PLL/FLL to adjust the system time in small
    139  * increments.
    140  *
    141  * time_constant determines the bandwidth or "stiffness" of the PLL.
    142  *
    143  * time_tolerance determines maximum frequency error or tolerance of the
    144  * CPU clock oscillator and is a property of the architecture; however,
    145  * in principle it could change as result of the presence of external
    146  * discipline signals, for instance.
    147  *
    148  * time_precision is usually equal to the kernel tick variable; however,
    149  * in cases where a precision clock counter or external clock is
    150  * available, the resolution can be much less than this and depend on
    151  * whether the external clock is working or not.
    152  *
    153  * time_maxerror is initialized by a ntp_adjtime() call and increased by
    154  * the kernel once each second to reflect the maximum error bound
    155  * growth.
    156  *
    157  * time_esterror is set and read by the ntp_adjtime() call, but
    158  * otherwise not used by the kernel.
    159  */
    160 int time_state = TIME_OK;	/* clock state */
    161 int time_status = STA_UNSYNC;	/* clock status bits */
    162 long time_offset = 0;		/* time offset (us) */
    163 long time_constant = 0;		/* pll time constant */
    164 long time_tolerance = MAXFREQ;	/* frequency tolerance (scaled ppm) */
    165 long time_precision = 1;	/* clock precision (us) */
    166 long time_maxerror = MAXPHASE;	/* maximum error (us) */
    167 long time_esterror = MAXPHASE;	/* estimated error (us) */
    168 
    169 /*
    170  * The following variables establish the state of the PLL/FLL and the
    171  * residual time and frequency offset of the local clock. The scale
    172  * factors are defined in the timex.h header file.
    173  *
    174  * time_phase and time_freq are the phase increment and the frequency
    175  * increment, respectively, of the kernel time variable.
    176  *
    177  * time_freq is set via ntp_adjtime() from a value stored in a file when
    178  * the synchronization daemon is first started. Its value is retrieved
    179  * via ntp_adjtime() and written to the file about once per hour by the
    180  * daemon.
    181  *
    182  * time_adj is the adjustment added to the value of tick at each timer
    183  * interrupt and is recomputed from time_phase and time_freq at each
    184  * seconds rollover.
    185  *
    186  * time_reftime is the second's portion of the system time at the last
    187  * call to ntp_adjtime(). It is used to adjust the time_freq variable
    188  * and to increase the time_maxerror as the time since last update
    189  * increases.
    190  */
    191 long time_phase = 0;		/* phase offset (scaled us) */
    192 long time_freq = 0;		/* frequency offset (scaled ppm) */
    193 long time_adj = 0;		/* tick adjust (scaled 1 / hz) */
    194 long time_reftime = 0;		/* time at last adjustment (s) */
    195 
    196 #ifdef PPS_SYNC
    197 /*
    198  * The following variables are used only if the kernel PPS discipline
    199  * code is configured (PPS_SYNC). The scale factors are defined in the
    200  * timex.h header file.
    201  *
    202  * pps_time contains the time at each calibration interval, as read by
    203  * microtime(). pps_count counts the seconds of the calibration
    204  * interval, the duration of which is nominally pps_shift in powers of
    205  * two.
    206  *
    207  * pps_offset is the time offset produced by the time median filter
    208  * pps_tf[], while pps_jitter is the dispersion (jitter) measured by
    209  * this filter.
    210  *
    211  * pps_freq is the frequency offset produced by the frequency median
    212  * filter pps_ff[], while pps_stabil is the dispersion (wander) measured
    213  * by this filter.
    214  *
    215  * pps_usec is latched from a high resolution counter or external clock
    216  * at pps_time. Here we want the hardware counter contents only, not the
    217  * contents plus the time_tv.usec as usual.
    218  *
    219  * pps_valid counts the number of seconds since the last PPS update. It
    220  * is used as a watchdog timer to disable the PPS discipline should the
    221  * PPS signal be lost.
    222  *
    223  * pps_glitch counts the number of seconds since the beginning of an
    224  * offset burst more than tick/2 from current nominal offset. It is used
    225  * mainly to suppress error bursts due to priority conflicts between the
    226  * PPS interrupt and timer interrupt.
    227  *
    228  * pps_intcnt counts the calibration intervals for use in the interval-
    229  * adaptation algorithm. It's just too complicated for words.
    230  */
    231 struct timeval pps_time;	/* kernel time at last interval */
    232 long pps_tf[] = {0, 0, 0};	/* pps time offset median filter (us) */
    233 long pps_offset = 0;		/* pps time offset (us) */
    234 long pps_jitter = MAXTIME;	/* time dispersion (jitter) (us) */
    235 long pps_ff[] = {0, 0, 0};	/* pps frequency offset median filter */
    236 long pps_freq = 0;		/* frequency offset (scaled ppm) */
    237 long pps_stabil = MAXFREQ;	/* frequency dispersion (scaled ppm) */
    238 long pps_usec = 0;		/* microsec counter at last interval */
    239 long pps_valid = PPS_VALID;	/* pps signal watchdog counter */
    240 int pps_glitch = 0;		/* pps signal glitch counter */
    241 int pps_count = 0;		/* calibration interval counter (s) */
    242 int pps_shift = PPS_SHIFT;	/* interval duration (s) (shift) */
    243 int pps_intcnt = 0;		/* intervals at current duration */
    244 
    245 /*
    246  * PPS signal quality monitors
    247  *
    248  * pps_jitcnt counts the seconds that have been discarded because the
    249  * jitter measured by the time median filter exceeds the limit MAXTIME
    250  * (100 us).
    251  *
    252  * pps_calcnt counts the frequency calibration intervals, which are
    253  * variable from 4 s to 256 s.
    254  *
    255  * pps_errcnt counts the calibration intervals which have been discarded
    256  * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
    257  * calibration interval jitter exceeds two ticks.
    258  *
    259  * pps_stbcnt counts the calibration intervals that have been discarded
    260  * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
    261  */
    262 long pps_jitcnt = 0;		/* jitter limit exceeded */
    263 long pps_calcnt = 0;		/* calibration intervals */
    264 long pps_errcnt = 0;		/* calibration errors */
    265 long pps_stbcnt = 0;		/* stability limit exceeded */
    266 #endif /* PPS_SYNC */
    267 
    268 #ifdef EXT_CLOCK
    269 /*
    270  * External clock definitions
    271  *
    272  * The following definitions and declarations are used only if an
    273  * external clock is configured on the system.
    274  */
    275 #define CLOCK_INTERVAL 30	/* CPU clock update interval (s) */
    276 
    277 /*
    278  * The clock_count variable is set to CLOCK_INTERVAL at each PPS
    279  * interrupt and decremented once each second.
    280  */
    281 int clock_count = 0;		/* CPU clock counter */
    282 
    283 #ifdef HIGHBALL
    284 /*
    285  * The clock_offset and clock_cpu variables are used by the HIGHBALL
    286  * interface. The clock_offset variable defines the offset between
    287  * system time and the HIGBALL counters. The clock_cpu variable contains
    288  * the offset between the system clock and the HIGHBALL clock for use in
    289  * disciplining the kernel time variable.
    290  */
    291 extern struct timeval clock_offset; /* Highball clock offset */
    292 long clock_cpu = 0;		/* CPU clock adjust */
    293 #endif /* HIGHBALL */
    294 #endif /* EXT_CLOCK */
    295 #endif /* NTP */
    296 
    297 
    298 /*
    299  * Bump a timeval by a small number of usec's.
    300  */
    301 #define BUMPTIME(t, usec) { \
    302 	volatile struct timeval *tp = (t); \
    303 	long us; \
    304  \
    305 	tp->tv_usec = us = tp->tv_usec + (usec); \
    306 	if (us >= 1000000) { \
    307 		tp->tv_usec = us - 1000000; \
    308 		tp->tv_sec++; \
    309 	} \
    310 }
    311 
    312 int	stathz;
    313 int	profhz;
    314 int	profprocs;
    315 int	softclock_running;		/* 1 => softclock() is running */
    316 static int psdiv;			/* prof => stat divider */
    317 int	psratio;			/* ratio: prof / stat */
    318 int	tickfix, tickfixinterval;	/* used if tick not really integral */
    319 #ifndef NTP
    320 static int tickfixcnt;			/* accumulated fractional error */
    321 #else
    322 int	fixtick;			/* used by NTP for same */
    323 int	shifthz;
    324 #endif
    325 
    326 /*
    327  * We might want ldd to load the both words from time at once.
    328  * To succeed we need to be quadword aligned.
    329  * The sparc already does that, and that it has worked so far is a fluke.
    330  */
    331 volatile struct	timeval time  __attribute__((__aligned__(__alignof__(quad_t))));
    332 volatile struct	timeval mono_time;
    333 
    334 /*
    335  * The callout mechanism is based on the work of Adam M. Costello and
    336  * George Varghese, published in a technical report entitled "Redesigning
    337  * the BSD Callout and Timer Facilities", and Justin Gibbs's subsequent
    338  * integration into FreeBSD, modified for NetBSD by Jason R. Thorpe.
    339  *
    340  * The original work on the data structures used in this implementation
    341  * was published by G. Varghese and A. Lauck in the paper "Hashed and
    342  * Hierarchical Timing Wheels: Data Structures for the Efficient
    343  * Implementation of a Timer Facility" in the Proceedings of the 11th
    344  * ACM Annual Symposium on Operating System Principles, Austin, Texas,
    345  * November 1987.
    346  */
    347 struct callout_queue *callwheel;
    348 int	callwheelsize, callwheelbits, callwheelmask;
    349 
    350 static struct callout *nextsoftcheck;	/* next callout to be checked */
    351 
    352 #ifdef CALLWHEEL_STATS
    353 int	callwheel_collisions;		/* number of hash collisions */
    354 int	callwheel_maxlength;		/* length of the longest hash chain */
    355 int	*callwheel_sizes;		/* per-bucket length count */
    356 u_int64_t callwheel_count;		/* # callouts currently */
    357 u_int64_t callwheel_established;	/* # callouts established */
    358 u_int64_t callwheel_fired;		/* # callouts that fired */
    359 u_int64_t callwheel_disestablished;	/* # callouts disestablished */
    360 u_int64_t callwheel_changed;		/* # callouts changed */
    361 u_int64_t callwheel_softclocks;		/* # times softclock() called */
    362 u_int64_t callwheel_softchecks;		/* # checks per softclock() */
    363 u_int64_t callwheel_softempty;		/* # empty buckets seen */
    364 #endif /* CALLWHEEL_STATS */
    365 
    366 /*
    367  * This value indicates the number of consecutive callouts that
    368  * will be checked before we allow interrupts to have a chance
    369  * again.
    370  */
    371 #ifndef MAX_SOFTCLOCK_STEPS
    372 #define	MAX_SOFTCLOCK_STEPS	100
    373 #endif
    374 
    375 struct simplelock callwheel_slock;
    376 
    377 #define	CALLWHEEL_LOCK(s)						\
    378 do {									\
    379 	s = splclock();							\
    380 	simple_lock(&callwheel_slock);					\
    381 } while (0)
    382 
    383 #define	CALLWHEEL_UNLOCK(s)						\
    384 do {									\
    385 	simple_unlock(&callwheel_slock);				\
    386 	splx(s);							\
    387 } while (0)
    388 
    389 static void callout_stop_locked(struct callout *);
    390 
    391 /*
    392  * These are both protected by callwheel_lock.
    393  * XXX SHOULD BE STATIC!!
    394  */
    395 u_int64_t hardclock_ticks, softclock_ticks;
    396 
    397 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
    398 void	softclock(void *);
    399 void	*softclock_si;
    400 #endif
    401 
    402 /*
    403  * Initialize clock frequencies and start both clocks running.
    404  */
    405 void
    406 initclocks(void)
    407 {
    408 	int i;
    409 
    410 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
    411 	softclock_si = softintr_establish(IPL_SOFTCLOCK, softclock, NULL);
    412 	if (softclock_si == NULL)
    413 		panic("initclocks: unable to register softclock intr");
    414 #endif
    415 
    416 	/*
    417 	 * Set divisors to 1 (normal case) and let the machine-specific
    418 	 * code do its bit.
    419 	 */
    420 	psdiv = 1;
    421 	cpu_initclocks();
    422 
    423 	/*
    424 	 * Compute profhz/stathz/rrticks, and fix profhz if needed.
    425 	 */
    426 	i = stathz ? stathz : hz;
    427 	if (profhz == 0)
    428 		profhz = i;
    429 	psratio = profhz / i;
    430 	rrticks = hz / 10;
    431 
    432 #ifdef NTP
    433 	switch (hz) {
    434 	case 1:
    435 		shifthz = SHIFT_SCALE - 0;
    436 		break;
    437 	case 2:
    438 		shifthz = SHIFT_SCALE - 1;
    439 		break;
    440 	case 4:
    441 		shifthz = SHIFT_SCALE - 2;
    442 		break;
    443 	case 8:
    444 		shifthz = SHIFT_SCALE - 3;
    445 		break;
    446 	case 16:
    447 		shifthz = SHIFT_SCALE - 4;
    448 		break;
    449 	case 32:
    450 		shifthz = SHIFT_SCALE - 5;
    451 		break;
    452 	case 60:
    453 	case 64:
    454 		shifthz = SHIFT_SCALE - 6;
    455 		break;
    456 	case 96:
    457 	case 100:
    458 	case 128:
    459 		shifthz = SHIFT_SCALE - 7;
    460 		break;
    461 	case 256:
    462 		shifthz = SHIFT_SCALE - 8;
    463 		break;
    464 	case 512:
    465 		shifthz = SHIFT_SCALE - 9;
    466 		break;
    467 	case 1000:
    468 	case 1024:
    469 		shifthz = SHIFT_SCALE - 10;
    470 		break;
    471 	case 1200:
    472 	case 2048:
    473 		shifthz = SHIFT_SCALE - 11;
    474 		break;
    475 	case 4096:
    476 		shifthz = SHIFT_SCALE - 12;
    477 		break;
    478 	case 8192:
    479 		shifthz = SHIFT_SCALE - 13;
    480 		break;
    481 	case 16384:
    482 		shifthz = SHIFT_SCALE - 14;
    483 		break;
    484 	case 32768:
    485 		shifthz = SHIFT_SCALE - 15;
    486 		break;
    487 	case 65536:
    488 		shifthz = SHIFT_SCALE - 16;
    489 		break;
    490 	default:
    491 		panic("weird hz");
    492 	}
    493 	if (fixtick == 0) {
    494 		/*
    495 		 * Give MD code a chance to set this to a better
    496 		 * value; but, if it doesn't, we should.
    497 		 */
    498 		fixtick = (1000000 - (hz*tick));
    499 	}
    500 #endif
    501 }
    502 
    503 /*
    504  * The real-time timer, interrupting hz times per second.
    505  */
    506 void
    507 hardclock(struct clockframe *frame)
    508 {
    509 	struct proc *p;
    510 	int delta;
    511 	extern int tickdelta;
    512 	extern long timedelta;
    513 	struct cpu_info *ci = curcpu();
    514 #ifdef NTP
    515 	int time_update;
    516 	int ltemp;
    517 #endif
    518 
    519 	p = curproc;
    520 	if (p) {
    521 		struct pstats *pstats;
    522 
    523 		/*
    524 		 * Run current process's virtual and profile time, as needed.
    525 		 */
    526 		pstats = p->p_stats;
    527 		if (CLKF_USERMODE(frame) &&
    528 		    timerisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
    529 		    itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
    530 			psignal(p, SIGVTALRM);
    531 		if (timerisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
    532 		    itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
    533 			psignal(p, SIGPROF);
    534 	}
    535 
    536 	/*
    537 	 * If no separate statistics clock is available, run it from here.
    538 	 */
    539 	if (stathz == 0)
    540 		statclock(frame);
    541 	if ((--ci->ci_schedstate.spc_rrticks) <= 0)
    542 		roundrobin(ci);
    543 
    544 #if defined(MULTIPROCESSOR)
    545 	/*
    546 	 * If we are not the primary CPU, we're not allowed to do
    547 	 * any more work.
    548 	 */
    549 	if (CPU_IS_PRIMARY(ci) == 0)
    550 		return;
    551 #endif
    552 
    553 	/*
    554 	 * Increment the time-of-day.  The increment is normally just
    555 	 * ``tick''.  If the machine is one which has a clock frequency
    556 	 * such that ``hz'' would not divide the second evenly into
    557 	 * milliseconds, a periodic adjustment must be applied.  Finally,
    558 	 * if we are still adjusting the time (see adjtime()),
    559 	 * ``tickdelta'' may also be added in.
    560 	 */
    561 	delta = tick;
    562 
    563 #ifndef NTP
    564 	if (tickfix) {
    565 		tickfixcnt += tickfix;
    566 		if (tickfixcnt >= tickfixinterval) {
    567 			delta++;
    568 			tickfixcnt -= tickfixinterval;
    569 		}
    570 	}
    571 #endif /* !NTP */
    572 	/* Imprecise 4bsd adjtime() handling */
    573 	if (timedelta != 0) {
    574 		delta += tickdelta;
    575 		timedelta -= tickdelta;
    576 	}
    577 
    578 #ifdef notyet
    579 	microset();
    580 #endif
    581 
    582 #ifndef NTP
    583 	BUMPTIME(&time, delta);		/* XXX Now done using NTP code below */
    584 #endif
    585 	BUMPTIME(&mono_time, delta);
    586 
    587 #ifdef NTP
    588 	time_update = delta;
    589 
    590 	/*
    591 	 * Compute the phase adjustment. If the low-order bits
    592 	 * (time_phase) of the update overflow, bump the high-order bits
    593 	 * (time_update).
    594 	 */
    595 	time_phase += time_adj;
    596 	if (time_phase <= -FINEUSEC) {
    597 		ltemp = -time_phase >> SHIFT_SCALE;
    598 		time_phase += ltemp << SHIFT_SCALE;
    599 		time_update -= ltemp;
    600 	} else if (time_phase >= FINEUSEC) {
    601 		ltemp = time_phase >> SHIFT_SCALE;
    602 		time_phase -= ltemp << SHIFT_SCALE;
    603 		time_update += ltemp;
    604 	}
    605 
    606 #ifdef HIGHBALL
    607 	/*
    608 	 * If the HIGHBALL board is installed, we need to adjust the
    609 	 * external clock offset in order to close the hardware feedback
    610 	 * loop. This will adjust the external clock phase and frequency
    611 	 * in small amounts. The additional phase noise and frequency
    612 	 * wander this causes should be minimal. We also need to
    613 	 * discipline the kernel time variable, since the PLL is used to
    614 	 * discipline the external clock. If the Highball board is not
    615 	 * present, we discipline kernel time with the PLL as usual. We
    616 	 * assume that the external clock phase adjustment (time_update)
    617 	 * and kernel phase adjustment (clock_cpu) are less than the
    618 	 * value of tick.
    619 	 */
    620 	clock_offset.tv_usec += time_update;
    621 	if (clock_offset.tv_usec >= 1000000) {
    622 		clock_offset.tv_sec++;
    623 		clock_offset.tv_usec -= 1000000;
    624 	}
    625 	if (clock_offset.tv_usec < 0) {
    626 		clock_offset.tv_sec--;
    627 		clock_offset.tv_usec += 1000000;
    628 	}
    629 	time.tv_usec += clock_cpu;
    630 	clock_cpu = 0;
    631 #else
    632 	time.tv_usec += time_update;
    633 #endif /* HIGHBALL */
    634 
    635 	/*
    636 	 * On rollover of the second the phase adjustment to be used for
    637 	 * the next second is calculated. Also, the maximum error is
    638 	 * increased by the tolerance. If the PPS frequency discipline
    639 	 * code is present, the phase is increased to compensate for the
    640 	 * CPU clock oscillator frequency error.
    641 	 *
    642  	 * On a 32-bit machine and given parameters in the timex.h
    643 	 * header file, the maximum phase adjustment is +-512 ms and
    644 	 * maximum frequency offset is a tad less than) +-512 ppm. On a
    645 	 * 64-bit machine, you shouldn't need to ask.
    646 	 */
    647 	if (time.tv_usec >= 1000000) {
    648 		time.tv_usec -= 1000000;
    649 		time.tv_sec++;
    650 		time_maxerror += time_tolerance >> SHIFT_USEC;
    651 
    652 		/*
    653 		 * Leap second processing. If in leap-insert state at
    654 		 * the end of the day, the system clock is set back one
    655 		 * second; if in leap-delete state, the system clock is
    656 		 * set ahead one second. The microtime() routine or
    657 		 * external clock driver will insure that reported time
    658 		 * is always monotonic. The ugly divides should be
    659 		 * replaced.
    660 		 */
    661 		switch (time_state) {
    662 		case TIME_OK:
    663 			if (time_status & STA_INS)
    664 				time_state = TIME_INS;
    665 			else if (time_status & STA_DEL)
    666 				time_state = TIME_DEL;
    667 			break;
    668 
    669 		case TIME_INS:
    670 			if (time.tv_sec % 86400 == 0) {
    671 				time.tv_sec--;
    672 				time_state = TIME_OOP;
    673 			}
    674 			break;
    675 
    676 		case TIME_DEL:
    677 			if ((time.tv_sec + 1) % 86400 == 0) {
    678 				time.tv_sec++;
    679 				time_state = TIME_WAIT;
    680 			}
    681 			break;
    682 
    683 		case TIME_OOP:
    684 			time_state = TIME_WAIT;
    685 			break;
    686 
    687 		case TIME_WAIT:
    688 			if (!(time_status & (STA_INS | STA_DEL)))
    689 				time_state = TIME_OK;
    690 			break;
    691 		}
    692 
    693 		/*
    694 		 * Compute the phase adjustment for the next second. In
    695 		 * PLL mode, the offset is reduced by a fixed factor
    696 		 * times the time constant. In FLL mode the offset is
    697 		 * used directly. In either mode, the maximum phase
    698 		 * adjustment for each second is clamped so as to spread
    699 		 * the adjustment over not more than the number of
    700 		 * seconds between updates.
    701 		 */
    702 		if (time_offset < 0) {
    703 			ltemp = -time_offset;
    704 			if (!(time_status & STA_FLL))
    705 				ltemp >>= SHIFT_KG + time_constant;
    706 			if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
    707 				ltemp = (MAXPHASE / MINSEC) <<
    708 				    SHIFT_UPDATE;
    709 			time_offset += ltemp;
    710 			time_adj = -ltemp << (shifthz - SHIFT_UPDATE);
    711 		} else if (time_offset > 0) {
    712 			ltemp = time_offset;
    713 			if (!(time_status & STA_FLL))
    714 				ltemp >>= SHIFT_KG + time_constant;
    715 			if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
    716 				ltemp = (MAXPHASE / MINSEC) <<
    717 				    SHIFT_UPDATE;
    718 			time_offset -= ltemp;
    719 			time_adj = ltemp << (shifthz - SHIFT_UPDATE);
    720 		} else
    721 			time_adj = 0;
    722 
    723 		/*
    724 		 * Compute the frequency estimate and additional phase
    725 		 * adjustment due to frequency error for the next
    726 		 * second. When the PPS signal is engaged, gnaw on the
    727 		 * watchdog counter and update the frequency computed by
    728 		 * the pll and the PPS signal.
    729 		 */
    730 #ifdef PPS_SYNC
    731 		pps_valid++;
    732 		if (pps_valid == PPS_VALID) {
    733 			pps_jitter = MAXTIME;
    734 			pps_stabil = MAXFREQ;
    735 			time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
    736 			    STA_PPSWANDER | STA_PPSERROR);
    737 		}
    738 		ltemp = time_freq + pps_freq;
    739 #else
    740 		ltemp = time_freq;
    741 #endif /* PPS_SYNC */
    742 
    743 		if (ltemp < 0)
    744 			time_adj -= -ltemp >> (SHIFT_USEC - shifthz);
    745 		else
    746 			time_adj += ltemp >> (SHIFT_USEC - shifthz);
    747 		time_adj += (long)fixtick << shifthz;
    748 
    749 		/*
    750 		 * When the CPU clock oscillator frequency is not a
    751 		 * power of 2 in Hz, shifthz is only an approximate
    752 		 * scale factor.
    753 		 *
    754 		 * To determine the adjustment, you can do the following:
    755 		 *   bc -q
    756 		 *   scale=24
    757 		 *   obase=2
    758 		 *   idealhz/realhz
    759 		 * where `idealhz' is the next higher power of 2, and `realhz'
    760 		 * is the actual value.  You may need to factor this result
    761 		 * into a sequence of 2 multipliers to get better precision.
    762 		 *
    763 		 * Likewise, the error can be calculated with (e.g. for 100Hz):
    764 		 *   bc -q
    765 		 *   scale=24
    766 		 *   ((1+2^-2+2^-5)*(1-2^-10)*realhz-idealhz)/idealhz
    767 		 * (and then multiply by 1000000 to get ppm).
    768 		 */
    769 		switch (hz) {
    770 		case 60:
    771 			/* A factor of 1.000100010001 gives about 15ppm
    772 			   error. */
    773 			if (time_adj < 0) {
    774 				time_adj -= (-time_adj >> 4);
    775 				time_adj -= (-time_adj >> 8);
    776 			} else {
    777 				time_adj += (time_adj >> 4);
    778 				time_adj += (time_adj >> 8);
    779 			}
    780 			break;
    781 
    782 		case 96:
    783 			/* A factor of 1.0101010101 gives about 244ppm error. */
    784 			if (time_adj < 0) {
    785 				time_adj -= (-time_adj >> 2);
    786 				time_adj -= (-time_adj >> 4) + (-time_adj >> 8);
    787 			} else {
    788 				time_adj += (time_adj >> 2);
    789 				time_adj += (time_adj >> 4) + (time_adj >> 8);
    790 			}
    791 			break;
    792 
    793 		case 100:
    794 			/* A factor of 1.010001111010111 gives about 1ppm
    795 			   error. */
    796 			if (time_adj < 0) {
    797 				time_adj -= (-time_adj >> 2) + (-time_adj >> 5);
    798 				time_adj += (-time_adj >> 10);
    799 			} else {
    800 				time_adj += (time_adj >> 2) + (time_adj >> 5);
    801 				time_adj -= (time_adj >> 10);
    802 			}
    803 			break;
    804 
    805 		case 1000:
    806 			/* A factor of 1.000001100010100001 gives about 50ppm
    807 			   error. */
    808 			if (time_adj < 0) {
    809 				time_adj -= (-time_adj >> 6) + (-time_adj >> 11);
    810 				time_adj -= (-time_adj >> 7);
    811 			} else {
    812 				time_adj += (time_adj >> 6) + (time_adj >> 11);
    813 				time_adj += (time_adj >> 7);
    814 			}
    815 			break;
    816 
    817 		case 1200:
    818 			/* A factor of 1.1011010011100001 gives about 64ppm
    819 			   error. */
    820 			if (time_adj < 0) {
    821 				time_adj -= (-time_adj >> 1) + (-time_adj >> 6);
    822 				time_adj -= (-time_adj >> 3) + (-time_adj >> 10);
    823 			} else {
    824 				time_adj += (time_adj >> 1) + (time_adj >> 6);
    825 				time_adj += (time_adj >> 3) + (time_adj >> 10);
    826 			}
    827 			break;
    828 		}
    829 
    830 #ifdef EXT_CLOCK
    831 		/*
    832 		 * If an external clock is present, it is necessary to
    833 		 * discipline the kernel time variable anyway, since not
    834 		 * all system components use the microtime() interface.
    835 		 * Here, the time offset between the external clock and
    836 		 * kernel time variable is computed every so often.
    837 		 */
    838 		clock_count++;
    839 		if (clock_count > CLOCK_INTERVAL) {
    840 			clock_count = 0;
    841 			microtime(&clock_ext);
    842 			delta.tv_sec = clock_ext.tv_sec - time.tv_sec;
    843 			delta.tv_usec = clock_ext.tv_usec -
    844 			    time.tv_usec;
    845 			if (delta.tv_usec < 0)
    846 				delta.tv_sec--;
    847 			if (delta.tv_usec >= 500000) {
    848 				delta.tv_usec -= 1000000;
    849 				delta.tv_sec++;
    850 			}
    851 			if (delta.tv_usec < -500000) {
    852 				delta.tv_usec += 1000000;
    853 				delta.tv_sec--;
    854 			}
    855 			if (delta.tv_sec > 0 || (delta.tv_sec == 0 &&
    856 			    delta.tv_usec > MAXPHASE) ||
    857 			    delta.tv_sec < -1 || (delta.tv_sec == -1 &&
    858 			    delta.tv_usec < -MAXPHASE)) {
    859 				time = clock_ext;
    860 				delta.tv_sec = 0;
    861 				delta.tv_usec = 0;
    862 			}
    863 #ifdef HIGHBALL
    864 			clock_cpu = delta.tv_usec;
    865 #else /* HIGHBALL */
    866 			hardupdate(delta.tv_usec);
    867 #endif /* HIGHBALL */
    868 		}
    869 #endif /* EXT_CLOCK */
    870 	}
    871 
    872 #endif /* NTP */
    873 
    874 	/*
    875 	 * Process callouts at a very low cpu priority, so we don't keep the
    876 	 * relatively high clock interrupt priority any longer than necessary.
    877 	 */
    878 	simple_lock(&callwheel_slock);	/* already at splclock() */
    879 	hardclock_ticks++;
    880 	if (TAILQ_FIRST(&callwheel[hardclock_ticks & callwheelmask]) != NULL) {
    881 		simple_unlock(&callwheel_slock);
    882 		if (CLKF_BASEPRI(frame)) {
    883 			/*
    884 			 * Save the overhead of a software interrupt;
    885 			 * it will happen as soon as we return, so do
    886 			 * it now.
    887 			 *
    888 			 * NOTE: If we're at ``base priority'', softclock()
    889 			 * was not already running.
    890 			 */
    891 			spllowersoftclock();
    892 			KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
    893 			softclock(NULL);
    894 			KERNEL_UNLOCK();
    895 		} else {
    896 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
    897 			softintr_schedule(softclock_si);
    898 #else
    899 			setsoftclock();
    900 #endif
    901 		}
    902 		return;
    903 	} else if (softclock_running == 0 &&
    904 		   (softclock_ticks + 1) == hardclock_ticks) {
    905 		softclock_ticks++;
    906 	}
    907 	simple_unlock(&callwheel_slock);
    908 }
    909 
    910 /*
    911  * Software (low priority) clock interrupt.
    912  * Run periodic events from timeout queue.
    913  */
    914 /*ARGSUSED*/
    915 void
    916 softclock(void *v)
    917 {
    918 	struct callout_queue *bucket;
    919 	struct callout *c;
    920 	void (*func)(void *);
    921 	void *arg;
    922 	int s, idx;
    923 	int steps = 0;
    924 
    925 	CALLWHEEL_LOCK(s);
    926 
    927 	softclock_running = 1;
    928 
    929 #ifdef CALLWHEEL_STATS
    930 	callwheel_softclocks++;
    931 #endif
    932 
    933 	while (softclock_ticks != hardclock_ticks) {
    934 		softclock_ticks++;
    935 		idx = (int)(softclock_ticks & callwheelmask);
    936 		bucket = &callwheel[idx];
    937 		c = TAILQ_FIRST(bucket);
    938 #ifdef CALLWHEEL_STATS
    939 		if (c == NULL)
    940 			callwheel_softempty++;
    941 #endif
    942 		while (c != NULL) {
    943 #ifdef CALLWHEEL_STATS
    944 			callwheel_softchecks++;
    945 #endif
    946 			if (c->c_time != softclock_ticks) {
    947 				c = TAILQ_NEXT(c, c_link);
    948 				if (++steps >= MAX_SOFTCLOCK_STEPS) {
    949 					nextsoftcheck = c;
    950 					/* Give interrupts a chance. */
    951 					CALLWHEEL_UNLOCK(s);
    952 					CALLWHEEL_LOCK(s);
    953 					c = nextsoftcheck;
    954 					steps = 0;
    955 				}
    956 			} else {
    957 				nextsoftcheck = TAILQ_NEXT(c, c_link);
    958 				TAILQ_REMOVE(bucket, c, c_link);
    959 #ifdef CALLWHEEL_STATS
    960 				callwheel_sizes[idx]--;
    961 				callwheel_fired++;
    962 				callwheel_count--;
    963 #endif
    964 				func = c->c_func;
    965 				arg = c->c_arg;
    966 				c->c_func = NULL;
    967 				c->c_flags &= ~CALLOUT_PENDING;
    968 				CALLWHEEL_UNLOCK(s);
    969 				(*func)(arg);
    970 				CALLWHEEL_LOCK(s);
    971 				steps = 0;
    972 				c = nextsoftcheck;
    973 			}
    974 		}
    975 	}
    976 	nextsoftcheck = NULL;
    977 	softclock_running = 0;
    978 	CALLWHEEL_UNLOCK(s);
    979 }
    980 
    981 /*
    982  * callout_setsize:
    983  *
    984  *	Determine how many callwheels are necessary and
    985  *	set hash mask.  Called from allocsys().
    986  */
    987 void
    988 callout_setsize(void)
    989 {
    990 
    991 	for (callwheelsize = 1; callwheelsize < ncallout; callwheelsize <<= 1)
    992 		/* loop */ ;
    993 	callwheelmask = callwheelsize - 1;
    994 }
    995 
    996 /*
    997  * callout_startup:
    998  *
    999  *	Initialize the callwheel buckets.
   1000  */
   1001 void
   1002 callout_startup(void)
   1003 {
   1004 	int i;
   1005 
   1006 	for (i = 0; i < callwheelsize; i++)
   1007 		TAILQ_INIT(&callwheel[i]);
   1008 
   1009 	simple_lock_init(&callwheel_slock);
   1010 }
   1011 
   1012 /*
   1013  * callout_init:
   1014  *
   1015  *	Initialize a callout structure so that it can be used
   1016  *	by callout_reset() and callout_stop().
   1017  */
   1018 void
   1019 callout_init(struct callout *c)
   1020 {
   1021 
   1022 	memset(c, 0, sizeof(*c));
   1023 }
   1024 
   1025 /*
   1026  * callout_reset:
   1027  *
   1028  *	Establish or change a timeout.
   1029  */
   1030 void
   1031 callout_reset(struct callout *c, int ticks, void (*func)(void *), void *arg)
   1032 {
   1033 	struct callout_queue *bucket;
   1034 	int s;
   1035 
   1036 	if (ticks <= 0)
   1037 		ticks = 1;
   1038 
   1039 	CALLWHEEL_LOCK(s);
   1040 
   1041 	/*
   1042 	 * If this callout's timer is already running, cancel it
   1043 	 * before we modify it.
   1044 	 */
   1045 	if (c->c_flags & CALLOUT_PENDING) {
   1046 		callout_stop_locked(c);	/* Already locked */
   1047 #ifdef CALLWHEEL_STATS
   1048 		callwheel_changed++;
   1049 #endif
   1050 	}
   1051 
   1052 	c->c_arg = arg;
   1053 	c->c_func = func;
   1054 	c->c_flags = CALLOUT_ACTIVE | CALLOUT_PENDING;
   1055 	c->c_time = hardclock_ticks + ticks;
   1056 
   1057 	bucket = &callwheel[c->c_time & callwheelmask];
   1058 
   1059 #ifdef CALLWHEEL_STATS
   1060 	if (TAILQ_FIRST(bucket) != NULL)
   1061 		callwheel_collisions++;
   1062 #endif
   1063 
   1064 	TAILQ_INSERT_TAIL(bucket, c, c_link);
   1065 
   1066 #ifdef CALLWHEEL_STATS
   1067 	callwheel_count++;
   1068 	callwheel_established++;
   1069 	if (++callwheel_sizes[c->c_time & callwheelmask] > callwheel_maxlength)
   1070 		callwheel_maxlength =
   1071 		    callwheel_sizes[c->c_time & callwheelmask];
   1072 #endif
   1073 
   1074 	CALLWHEEL_UNLOCK(s);
   1075 }
   1076 
   1077 /*
   1078  * callout_stop_locked:
   1079  *
   1080  *	Disestablish a timeout.  Callwheel is locked.
   1081  */
   1082 static void
   1083 callout_stop_locked(struct callout *c)
   1084 {
   1085 
   1086 	/*
   1087 	 * Don't attempt to delete a callout that's not on the queue.
   1088 	 */
   1089 	if ((c->c_flags & CALLOUT_PENDING) == 0) {
   1090 		c->c_flags &= ~CALLOUT_ACTIVE;
   1091 		return;
   1092 	}
   1093 
   1094 	c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
   1095 
   1096 	if (nextsoftcheck == c)
   1097 		nextsoftcheck = TAILQ_NEXT(c, c_link);
   1098 
   1099 	TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_link);
   1100 #ifdef CALLWHEEL_STATS
   1101 	callwheel_count--;
   1102 	callwheel_disestablished++;
   1103 	callwheel_sizes[c->c_time & callwheelmask]--;
   1104 #endif
   1105 
   1106 	c->c_func = NULL;
   1107 }
   1108 
   1109 /*
   1110  * callout_stop:
   1111  *
   1112  *	Disestablish a timeout.  Callwheel is unlocked.  This is
   1113  *	the standard entry point.
   1114  */
   1115 void
   1116 callout_stop(struct callout *c)
   1117 {
   1118 	int s;
   1119 
   1120 	CALLWHEEL_LOCK(s);
   1121 	callout_stop_locked(c);
   1122 	CALLWHEEL_UNLOCK(s);
   1123 }
   1124 
   1125 #ifdef CALLWHEEL_STATS
   1126 /*
   1127  * callout_showstats:
   1128  *
   1129  *	Display callout statistics.  Call it from DDB.
   1130  */
   1131 void
   1132 callout_showstats(void)
   1133 {
   1134 	u_int64_t curticks;
   1135 	int s;
   1136 
   1137 	s = splclock();
   1138 	curticks = softclock_ticks;
   1139 	splx(s);
   1140 
   1141 	printf("Callwheel statistics:\n");
   1142 	printf("\tCallouts currently queued: %llu\n", callwheel_count);
   1143 	printf("\tCallouts established: %llu\n", callwheel_established);
   1144 	printf("\tCallouts disestablished: %llu\n", callwheel_disestablished);
   1145 	if (callwheel_changed != 0)
   1146 		printf("\t\tOf those, %llu were changes\n", callwheel_changed);
   1147 	printf("\tCallouts that fired: %llu\n", callwheel_fired);
   1148 	printf("\tNumber of buckets: %d\n", callwheelsize);
   1149 	printf("\tNumber of hash collisions: %d\n", callwheel_collisions);
   1150 	printf("\tMaximum hash chain length: %d\n", callwheel_maxlength);
   1151 	printf("\tSoftclocks: %llu, Softchecks: %llu\n",
   1152 	    callwheel_softclocks, callwheel_softchecks);
   1153 	printf("\t\tEmpty buckets seen: %llu\n", callwheel_softempty);
   1154 }
   1155 #endif
   1156 
   1157 /*
   1158  * Compute number of hz until specified time.  Used to compute second
   1159  * argument to callout_reset() from an absolute time.
   1160  */
   1161 int
   1162 hzto(struct timeval *tv)
   1163 {
   1164 	unsigned long ticks;
   1165 	long sec, usec;
   1166 	int s;
   1167 
   1168 	/*
   1169 	 * If the number of usecs in the whole seconds part of the time
   1170 	 * difference fits in a long, then the total number of usecs will
   1171 	 * fit in an unsigned long.  Compute the total and convert it to
   1172 	 * ticks, rounding up and adding 1 to allow for the current tick
   1173 	 * to expire.  Rounding also depends on unsigned long arithmetic
   1174 	 * to avoid overflow.
   1175 	 *
   1176 	 * Otherwise, if the number of ticks in the whole seconds part of
   1177 	 * the time difference fits in a long, then convert the parts to
   1178 	 * ticks separately and add, using similar rounding methods and
   1179 	 * overflow avoidance.  This method would work in the previous
   1180 	 * case, but it is slightly slower and assume that hz is integral.
   1181 	 *
   1182 	 * Otherwise, round the time difference down to the maximum
   1183 	 * representable value.
   1184 	 *
   1185 	 * If ints are 32-bit, then the maximum value for any timeout in
   1186 	 * 10ms ticks is 248 days.
   1187 	 */
   1188 	s = splclock();
   1189 	sec = tv->tv_sec - time.tv_sec;
   1190 	usec = tv->tv_usec - time.tv_usec;
   1191 	splx(s);
   1192 
   1193 	if (usec < 0) {
   1194 		sec--;
   1195 		usec += 1000000;
   1196 	}
   1197 
   1198 	if (sec < 0 || (sec == 0 && usec <= 0)) {
   1199 		/*
   1200 		 * Would expire now or in the past.  Return 0 ticks.
   1201 		 * This is different from the legacy hzto() interface,
   1202 		 * and callers need to check for it.
   1203 		 */
   1204 		ticks = 0;
   1205 	} else if (sec <= (LONG_MAX / 1000000))
   1206 		ticks = (((sec * 1000000) + (unsigned long)usec + (tick - 1))
   1207 		    / tick) + 1;
   1208 	else if (sec <= (LONG_MAX / hz))
   1209 		ticks = (sec * hz) +
   1210 		    (((unsigned long)usec + (tick - 1)) / tick) + 1;
   1211 	else
   1212 		ticks = LONG_MAX;
   1213 
   1214 	if (ticks > INT_MAX)
   1215 		ticks = INT_MAX;
   1216 
   1217 	return ((int)ticks);
   1218 }
   1219 
   1220 /*
   1221  * Start profiling on a process.
   1222  *
   1223  * Kernel profiling passes proc0 which never exits and hence
   1224  * keeps the profile clock running constantly.
   1225  */
   1226 void
   1227 startprofclock(struct proc *p)
   1228 {
   1229 
   1230 	if ((p->p_flag & P_PROFIL) == 0) {
   1231 		p->p_flag |= P_PROFIL;
   1232 		if (++profprocs == 1 && stathz != 0)
   1233 			psdiv = psratio;
   1234 	}
   1235 }
   1236 
   1237 /*
   1238  * Stop profiling on a process.
   1239  */
   1240 void
   1241 stopprofclock(struct proc *p)
   1242 {
   1243 
   1244 	if (p->p_flag & P_PROFIL) {
   1245 		p->p_flag &= ~P_PROFIL;
   1246 		if (--profprocs == 0 && stathz != 0)
   1247 			psdiv = 1;
   1248 	}
   1249 }
   1250 
   1251 /*
   1252  * Statistics clock.  Grab profile sample, and if divider reaches 0,
   1253  * do process and kernel statistics.
   1254  */
   1255 void
   1256 statclock(struct clockframe *frame)
   1257 {
   1258 #ifdef GPROF
   1259 	struct gmonparam *g;
   1260 	intptr_t i;
   1261 #endif
   1262 	struct cpu_info *ci = curcpu();
   1263 	struct schedstate_percpu *spc = &ci->ci_schedstate;
   1264 	struct proc *p;
   1265 
   1266 	/*
   1267 	 * Notice changes in divisor frequency, and adjust clock
   1268 	 * frequency accordingly.
   1269 	 */
   1270 	if (spc->spc_psdiv != psdiv) {
   1271 		spc->spc_psdiv = psdiv;
   1272 		spc->spc_pscnt = psdiv;
   1273 		if (psdiv == 1) {
   1274 			setstatclockrate(stathz);
   1275 		} else {
   1276 			setstatclockrate(profhz);
   1277 		}
   1278 	}
   1279 	p = curproc;
   1280 	if (CLKF_USERMODE(frame)) {
   1281 		if (p->p_flag & P_PROFIL)
   1282 			addupc_intr(p, CLKF_PC(frame));
   1283 		if (--spc->spc_pscnt > 0)
   1284 			return;
   1285 		/*
   1286 		 * Came from user mode; CPU was in user state.
   1287 		 * If this process is being profiled record the tick.
   1288 		 */
   1289 		p->p_uticks++;
   1290 		if (p->p_nice > NZERO)
   1291 			spc->spc_cp_time[CP_NICE]++;
   1292 		else
   1293 			spc->spc_cp_time[CP_USER]++;
   1294 	} else {
   1295 #ifdef GPROF
   1296 		/*
   1297 		 * Kernel statistics are just like addupc_intr, only easier.
   1298 		 */
   1299 		g = &_gmonparam;
   1300 		if (g->state == GMON_PROF_ON) {
   1301 			i = CLKF_PC(frame) - g->lowpc;
   1302 			if (i < g->textsize) {
   1303 				i /= HISTFRACTION * sizeof(*g->kcount);
   1304 				g->kcount[i]++;
   1305 			}
   1306 		}
   1307 #endif
   1308 #ifdef PROC_PC
   1309 		if (p && p->p_flag & P_PROFIL)
   1310 			addupc_intr(p, PROC_PC(p));
   1311 #endif
   1312 		if (--spc->spc_pscnt > 0)
   1313 			return;
   1314 		/*
   1315 		 * Came from kernel mode, so we were:
   1316 		 * - handling an interrupt,
   1317 		 * - doing syscall or trap work on behalf of the current
   1318 		 *   user process, or
   1319 		 * - spinning in the idle loop.
   1320 		 * Whichever it is, charge the time as appropriate.
   1321 		 * Note that we charge interrupts to the current process,
   1322 		 * regardless of whether they are ``for'' that process,
   1323 		 * so that we know how much of its real time was spent
   1324 		 * in ``non-process'' (i.e., interrupt) work.
   1325 		 */
   1326 		if (CLKF_INTR(frame)) {
   1327 			if (p != NULL)
   1328 				p->p_iticks++;
   1329 			spc->spc_cp_time[CP_INTR]++;
   1330 		} else if (p != NULL) {
   1331 			p->p_sticks++;
   1332 			spc->spc_cp_time[CP_SYS]++;
   1333 		} else
   1334 			spc->spc_cp_time[CP_IDLE]++;
   1335 	}
   1336 	spc->spc_pscnt = psdiv;
   1337 
   1338 	if (p != NULL) {
   1339 		++p->p_cpticks;
   1340 		/*
   1341 		 * If no separate schedclock is provided, call it here
   1342 		 * at ~~12-25 Hz, ~~16 Hz is best
   1343 		 */
   1344 		if (schedhz == 0)
   1345 			if ((++ci->ci_schedstate.spc_schedticks & 3) == 0)
   1346 				schedclock(p);
   1347 	}
   1348 }
   1349 
   1350 
   1351 #ifdef NTP	/* NTP phase-locked loop in kernel */
   1352 
   1353 /*
   1354  * hardupdate() - local clock update
   1355  *
   1356  * This routine is called by ntp_adjtime() to update the local clock
   1357  * phase and frequency. The implementation is of an adaptive-parameter,
   1358  * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new
   1359  * time and frequency offset estimates for each call. If the kernel PPS
   1360  * discipline code is configured (PPS_SYNC), the PPS signal itself
   1361  * determines the new time offset, instead of the calling argument.
   1362  * Presumably, calls to ntp_adjtime() occur only when the caller
   1363  * believes the local clock is valid within some bound (+-128 ms with
   1364  * NTP). If the caller's time is far different than the PPS time, an
   1365  * argument will ensue, and it's not clear who will lose.
   1366  *
   1367  * For uncompensated quartz crystal oscillatores and nominal update
   1368  * intervals less than 1024 s, operation should be in phase-lock mode
   1369  * (STA_FLL = 0), where the loop is disciplined to phase. For update
   1370  * intervals greater than thiss, operation should be in frequency-lock
   1371  * mode (STA_FLL = 1), where the loop is disciplined to frequency.
   1372  *
   1373  * Note: splclock() is in effect.
   1374  */
   1375 void
   1376 hardupdate(long offset)
   1377 {
   1378 	long ltemp, mtemp;
   1379 
   1380 	if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME))
   1381 		return;
   1382 	ltemp = offset;
   1383 #ifdef PPS_SYNC
   1384 	if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
   1385 		ltemp = pps_offset;
   1386 #endif /* PPS_SYNC */
   1387 
   1388 	/*
   1389 	 * Scale the phase adjustment and clamp to the operating range.
   1390 	 */
   1391 	if (ltemp > MAXPHASE)
   1392 		time_offset = MAXPHASE << SHIFT_UPDATE;
   1393 	else if (ltemp < -MAXPHASE)
   1394 		time_offset = -(MAXPHASE << SHIFT_UPDATE);
   1395 	else
   1396 		time_offset = ltemp << SHIFT_UPDATE;
   1397 
   1398 	/*
   1399 	 * Select whether the frequency is to be controlled and in which
   1400 	 * mode (PLL or FLL). Clamp to the operating range. Ugly
   1401 	 * multiply/divide should be replaced someday.
   1402 	 */
   1403 	if (time_status & STA_FREQHOLD || time_reftime == 0)
   1404 		time_reftime = time.tv_sec;
   1405 	mtemp = time.tv_sec - time_reftime;
   1406 	time_reftime = time.tv_sec;
   1407 	if (time_status & STA_FLL) {
   1408 		if (mtemp >= MINSEC) {
   1409 			ltemp = ((time_offset / mtemp) << (SHIFT_USEC -
   1410 			    SHIFT_UPDATE));
   1411 			if (ltemp < 0)
   1412 				time_freq -= -ltemp >> SHIFT_KH;
   1413 			else
   1414 				time_freq += ltemp >> SHIFT_KH;
   1415 		}
   1416 	} else {
   1417 		if (mtemp < MAXSEC) {
   1418 			ltemp *= mtemp;
   1419 			if (ltemp < 0)
   1420 				time_freq -= -ltemp >> (time_constant +
   1421 				    time_constant + SHIFT_KF -
   1422 				    SHIFT_USEC);
   1423 			else
   1424 				time_freq += ltemp >> (time_constant +
   1425 				    time_constant + SHIFT_KF -
   1426 				    SHIFT_USEC);
   1427 		}
   1428 	}
   1429 	if (time_freq > time_tolerance)
   1430 		time_freq = time_tolerance;
   1431 	else if (time_freq < -time_tolerance)
   1432 		time_freq = -time_tolerance;
   1433 }
   1434 
   1435 #ifdef PPS_SYNC
   1436 /*
   1437  * hardpps() - discipline CPU clock oscillator to external PPS signal
   1438  *
   1439  * This routine is called at each PPS interrupt in order to discipline
   1440  * the CPU clock oscillator to the PPS signal. It measures the PPS phase
   1441  * and leaves it in a handy spot for the hardclock() routine. It
   1442  * integrates successive PPS phase differences and calculates the
   1443  * frequency offset. This is used in hardclock() to discipline the CPU
   1444  * clock oscillator so that intrinsic frequency error is cancelled out.
   1445  * The code requires the caller to capture the time and hardware counter
   1446  * value at the on-time PPS signal transition.
   1447  *
   1448  * Note that, on some Unix systems, this routine runs at an interrupt
   1449  * priority level higher than the timer interrupt routine hardclock().
   1450  * Therefore, the variables used are distinct from the hardclock()
   1451  * variables, except for certain exceptions: The PPS frequency pps_freq
   1452  * and phase pps_offset variables are determined by this routine and
   1453  * updated atomically. The time_tolerance variable can be considered a
   1454  * constant, since it is infrequently changed, and then only when the
   1455  * PPS signal is disabled. The watchdog counter pps_valid is updated
   1456  * once per second by hardclock() and is atomically cleared in this
   1457  * routine.
   1458  */
   1459 void
   1460 hardpps(struct timeval *tvp,		/* time at PPS */
   1461 	long usec			/* hardware counter at PPS */)
   1462 {
   1463 	long u_usec, v_usec, bigtick;
   1464 	long cal_sec, cal_usec;
   1465 
   1466 	/*
   1467 	 * An occasional glitch can be produced when the PPS interrupt
   1468 	 * occurs in the hardclock() routine before the time variable is
   1469 	 * updated. Here the offset is discarded when the difference
   1470 	 * between it and the last one is greater than tick/2, but not
   1471 	 * if the interval since the first discard exceeds 30 s.
   1472 	 */
   1473 	time_status |= STA_PPSSIGNAL;
   1474 	time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
   1475 	pps_valid = 0;
   1476 	u_usec = -tvp->tv_usec;
   1477 	if (u_usec < -500000)
   1478 		u_usec += 1000000;
   1479 	v_usec = pps_offset - u_usec;
   1480 	if (v_usec < 0)
   1481 		v_usec = -v_usec;
   1482 	if (v_usec > (tick >> 1)) {
   1483 		if (pps_glitch > MAXGLITCH) {
   1484 			pps_glitch = 0;
   1485 			pps_tf[2] = u_usec;
   1486 			pps_tf[1] = u_usec;
   1487 		} else {
   1488 			pps_glitch++;
   1489 			u_usec = pps_offset;
   1490 		}
   1491 	} else
   1492 		pps_glitch = 0;
   1493 
   1494 	/*
   1495 	 * A three-stage median filter is used to help deglitch the pps
   1496 	 * time. The median sample becomes the time offset estimate; the
   1497 	 * difference between the other two samples becomes the time
   1498 	 * dispersion (jitter) estimate.
   1499 	 */
   1500 	pps_tf[2] = pps_tf[1];
   1501 	pps_tf[1] = pps_tf[0];
   1502 	pps_tf[0] = u_usec;
   1503 	if (pps_tf[0] > pps_tf[1]) {
   1504 		if (pps_tf[1] > pps_tf[2]) {
   1505 			pps_offset = pps_tf[1];		/* 0 1 2 */
   1506 			v_usec = pps_tf[0] - pps_tf[2];
   1507 		} else if (pps_tf[2] > pps_tf[0]) {
   1508 			pps_offset = pps_tf[0];		/* 2 0 1 */
   1509 			v_usec = pps_tf[2] - pps_tf[1];
   1510 		} else {
   1511 			pps_offset = pps_tf[2];		/* 0 2 1 */
   1512 			v_usec = pps_tf[0] - pps_tf[1];
   1513 		}
   1514 	} else {
   1515 		if (pps_tf[1] < pps_tf[2]) {
   1516 			pps_offset = pps_tf[1];		/* 2 1 0 */
   1517 			v_usec = pps_tf[2] - pps_tf[0];
   1518 		} else  if (pps_tf[2] < pps_tf[0]) {
   1519 			pps_offset = pps_tf[0];		/* 1 0 2 */
   1520 			v_usec = pps_tf[1] - pps_tf[2];
   1521 		} else {
   1522 			pps_offset = pps_tf[2];		/* 1 2 0 */
   1523 			v_usec = pps_tf[1] - pps_tf[0];
   1524 		}
   1525 	}
   1526 	if (v_usec > MAXTIME)
   1527 		pps_jitcnt++;
   1528 	v_usec = (v_usec << PPS_AVG) - pps_jitter;
   1529 	if (v_usec < 0)
   1530 		pps_jitter -= -v_usec >> PPS_AVG;
   1531 	else
   1532 		pps_jitter += v_usec >> PPS_AVG;
   1533 	if (pps_jitter > (MAXTIME >> 1))
   1534 		time_status |= STA_PPSJITTER;
   1535 
   1536 	/*
   1537 	 * During the calibration interval adjust the starting time when
   1538 	 * the tick overflows. At the end of the interval compute the
   1539 	 * duration of the interval and the difference of the hardware
   1540 	 * counters at the beginning and end of the interval. This code
   1541 	 * is deliciously complicated by the fact valid differences may
   1542 	 * exceed the value of tick when using long calibration
   1543 	 * intervals and small ticks. Note that the counter can be
   1544 	 * greater than tick if caught at just the wrong instant, but
   1545 	 * the values returned and used here are correct.
   1546 	 */
   1547 	bigtick = (long)tick << SHIFT_USEC;
   1548 	pps_usec -= pps_freq;
   1549 	if (pps_usec >= bigtick)
   1550 		pps_usec -= bigtick;
   1551 	if (pps_usec < 0)
   1552 		pps_usec += bigtick;
   1553 	pps_time.tv_sec++;
   1554 	pps_count++;
   1555 	if (pps_count < (1 << pps_shift))
   1556 		return;
   1557 	pps_count = 0;
   1558 	pps_calcnt++;
   1559 	u_usec = usec << SHIFT_USEC;
   1560 	v_usec = pps_usec - u_usec;
   1561 	if (v_usec >= bigtick >> 1)
   1562 		v_usec -= bigtick;
   1563 	if (v_usec < -(bigtick >> 1))
   1564 		v_usec += bigtick;
   1565 	if (v_usec < 0)
   1566 		v_usec = -(-v_usec >> pps_shift);
   1567 	else
   1568 		v_usec = v_usec >> pps_shift;
   1569 	pps_usec = u_usec;
   1570 	cal_sec = tvp->tv_sec;
   1571 	cal_usec = tvp->tv_usec;
   1572 	cal_sec -= pps_time.tv_sec;
   1573 	cal_usec -= pps_time.tv_usec;
   1574 	if (cal_usec < 0) {
   1575 		cal_usec += 1000000;
   1576 		cal_sec--;
   1577 	}
   1578 	pps_time = *tvp;
   1579 
   1580 	/*
   1581 	 * Check for lost interrupts, noise, excessive jitter and
   1582 	 * excessive frequency error. The number of timer ticks during
   1583 	 * the interval may vary +-1 tick. Add to this a margin of one
   1584 	 * tick for the PPS signal jitter and maximum frequency
   1585 	 * deviation. If the limits are exceeded, the calibration
   1586 	 * interval is reset to the minimum and we start over.
   1587 	 */
   1588 	u_usec = (long)tick << 1;
   1589 	if (!((cal_sec == -1 && cal_usec > (1000000 - u_usec))
   1590 	    || (cal_sec == 0 && cal_usec < u_usec))
   1591 	    || v_usec > time_tolerance || v_usec < -time_tolerance) {
   1592 		pps_errcnt++;
   1593 		pps_shift = PPS_SHIFT;
   1594 		pps_intcnt = 0;
   1595 		time_status |= STA_PPSERROR;
   1596 		return;
   1597 	}
   1598 
   1599 	/*
   1600 	 * A three-stage median filter is used to help deglitch the pps
   1601 	 * frequency. The median sample becomes the frequency offset
   1602 	 * estimate; the difference between the other two samples
   1603 	 * becomes the frequency dispersion (stability) estimate.
   1604 	 */
   1605 	pps_ff[2] = pps_ff[1];
   1606 	pps_ff[1] = pps_ff[0];
   1607 	pps_ff[0] = v_usec;
   1608 	if (pps_ff[0] > pps_ff[1]) {
   1609 		if (pps_ff[1] > pps_ff[2]) {
   1610 			u_usec = pps_ff[1];		/* 0 1 2 */
   1611 			v_usec = pps_ff[0] - pps_ff[2];
   1612 		} else if (pps_ff[2] > pps_ff[0]) {
   1613 			u_usec = pps_ff[0];		/* 2 0 1 */
   1614 			v_usec = pps_ff[2] - pps_ff[1];
   1615 		} else {
   1616 			u_usec = pps_ff[2];		/* 0 2 1 */
   1617 			v_usec = pps_ff[0] - pps_ff[1];
   1618 		}
   1619 	} else {
   1620 		if (pps_ff[1] < pps_ff[2]) {
   1621 			u_usec = pps_ff[1];		/* 2 1 0 */
   1622 			v_usec = pps_ff[2] - pps_ff[0];
   1623 		} else  if (pps_ff[2] < pps_ff[0]) {
   1624 			u_usec = pps_ff[0];		/* 1 0 2 */
   1625 			v_usec = pps_ff[1] - pps_ff[2];
   1626 		} else {
   1627 			u_usec = pps_ff[2];		/* 1 2 0 */
   1628 			v_usec = pps_ff[1] - pps_ff[0];
   1629 		}
   1630 	}
   1631 
   1632 	/*
   1633 	 * Here the frequency dispersion (stability) is updated. If it
   1634 	 * is less than one-fourth the maximum (MAXFREQ), the frequency
   1635 	 * offset is updated as well, but clamped to the tolerance. It
   1636 	 * will be processed later by the hardclock() routine.
   1637 	 */
   1638 	v_usec = (v_usec >> 1) - pps_stabil;
   1639 	if (v_usec < 0)
   1640 		pps_stabil -= -v_usec >> PPS_AVG;
   1641 	else
   1642 		pps_stabil += v_usec >> PPS_AVG;
   1643 	if (pps_stabil > MAXFREQ >> 2) {
   1644 		pps_stbcnt++;
   1645 		time_status |= STA_PPSWANDER;
   1646 		return;
   1647 	}
   1648 	if (time_status & STA_PPSFREQ) {
   1649 		if (u_usec < 0) {
   1650 			pps_freq -= -u_usec >> PPS_AVG;
   1651 			if (pps_freq < -time_tolerance)
   1652 				pps_freq = -time_tolerance;
   1653 			u_usec = -u_usec;
   1654 		} else {
   1655 			pps_freq += u_usec >> PPS_AVG;
   1656 			if (pps_freq > time_tolerance)
   1657 				pps_freq = time_tolerance;
   1658 		}
   1659 	}
   1660 
   1661 	/*
   1662 	 * Here the calibration interval is adjusted. If the maximum
   1663 	 * time difference is greater than tick / 4, reduce the interval
   1664 	 * by half. If this is not the case for four consecutive
   1665 	 * intervals, double the interval.
   1666 	 */
   1667 	if (u_usec << pps_shift > bigtick >> 2) {
   1668 		pps_intcnt = 0;
   1669 		if (pps_shift > PPS_SHIFT)
   1670 			pps_shift--;
   1671 	} else if (pps_intcnt >= 4) {
   1672 		pps_intcnt = 0;
   1673 		if (pps_shift < PPS_SHIFTMAX)
   1674 			pps_shift++;
   1675 	} else
   1676 		pps_intcnt++;
   1677 }
   1678 #endif /* PPS_SYNC */
   1679 #endif /* NTP  */
   1680 
   1681 /*
   1682  * Return information about system clocks.
   1683  */
   1684 int
   1685 sysctl_clockrate(void *where, size_t *sizep)
   1686 {
   1687 	struct clockinfo clkinfo;
   1688 
   1689 	/*
   1690 	 * Construct clockinfo structure.
   1691 	 */
   1692 	clkinfo.tick = tick;
   1693 	clkinfo.tickadj = tickadj;
   1694 	clkinfo.hz = hz;
   1695 	clkinfo.profhz = profhz;
   1696 	clkinfo.stathz = stathz ? stathz : hz;
   1697 	return (sysctl_rdstruct(where, sizep, NULL, &clkinfo, sizeof(clkinfo)));
   1698 }
   1699