Home | History | Annotate | Line # | Download | only in kern
kern_clock.c revision 1.106.2.3
      1 /*	$NetBSD: kern_clock.c,v 1.106.2.3 2007/02/23 12:00:29 yamt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center.
     10  * This code is derived from software contributed to The NetBSD Foundation
     11  * by Charles M. Hannum.
     12  *
     13  * Redistribution and use in source and binary forms, with or without
     14  * modification, are permitted provided that the following conditions
     15  * are met:
     16  * 1. Redistributions of source code must retain the above copyright
     17  *    notice, this list of conditions and the following disclaimer.
     18  * 2. Redistributions in binary form must reproduce the above copyright
     19  *    notice, this list of conditions and the following disclaimer in the
     20  *    documentation and/or other materials provided with the distribution.
     21  * 3. All advertising materials mentioning features or use of this software
     22  *    must display the following acknowledgement:
     23  *	This product includes software developed by the NetBSD
     24  *	Foundation, Inc. and its contributors.
     25  * 4. Neither the name of The NetBSD Foundation nor the names of its
     26  *    contributors may be used to endorse or promote products derived
     27  *    from this software without specific prior written permission.
     28  *
     29  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     30  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     31  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     32  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     33  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     34  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     35  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     36  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     37  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     38  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     39  * POSSIBILITY OF SUCH DAMAGE.
     40  */
     41 
     42 /*-
     43  * Copyright (c) 1982, 1986, 1991, 1993
     44  *	The Regents of the University of California.  All rights reserved.
     45  * (c) UNIX System Laboratories, Inc.
     46  * All or some portions of this file are derived from material licensed
     47  * to the University of California by American Telephone and Telegraph
     48  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     49  * the permission of UNIX System Laboratories, Inc.
     50  *
     51  * Redistribution and use in source and binary forms, with or without
     52  * modification, are permitted provided that the following conditions
     53  * are met:
     54  * 1. Redistributions of source code must retain the above copyright
     55  *    notice, this list of conditions and the following disclaimer.
     56  * 2. Redistributions in binary form must reproduce the above copyright
     57  *    notice, this list of conditions and the following disclaimer in the
     58  *    documentation and/or other materials provided with the distribution.
     59  * 3. Neither the name of the University nor the names of its contributors
     60  *    may be used to endorse or promote products derived from this software
     61  *    without specific prior written permission.
     62  *
     63  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     64  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     65  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     66  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     67  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     68  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     69  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     70  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     71  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     72  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     73  * SUCH DAMAGE.
     74  *
     75  *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
     76  */
     77 
     78 #include <sys/cdefs.h>
     79 __KERNEL_RCSID(0, "$NetBSD: kern_clock.c,v 1.106.2.3 2007/02/23 12:00:29 yamt Exp $");
     80 
     81 #include "opt_ntp.h"
     82 #include "opt_multiprocessor.h"
     83 #include "opt_perfctrs.h"
     84 
     85 #include <sys/param.h>
     86 #include <sys/systm.h>
     87 #include <sys/callout.h>
     88 #include <sys/kernel.h>
     89 #include <sys/proc.h>
     90 #include <sys/resourcevar.h>
     91 #include <sys/signalvar.h>
     92 #include <sys/sysctl.h>
     93 #include <sys/timex.h>
     94 #include <sys/sched.h>
     95 #include <sys/time.h>
     96 #ifdef __HAVE_TIMECOUNTER
     97 #include <sys/timetc.h>
     98 #endif
     99 
    100 #include <machine/cpu.h>
    101 #include <machine/intr.h>
    102 
    103 #ifdef GPROF
    104 #include <sys/gmon.h>
    105 #endif
    106 
    107 /*
    108  * Clock handling routines.
    109  *
    110  * This code is written to operate with two timers that run independently of
    111  * each other.  The main clock, running hz times per second, is used to keep
    112  * track of real time.  The second timer handles kernel and user profiling,
    113  * and does resource use estimation.  If the second timer is programmable,
    114  * it is randomized to avoid aliasing between the two clocks.  For example,
    115  * the randomization prevents an adversary from always giving up the CPU
    116  * just before its quantum expires.  Otherwise, it would never accumulate
    117  * CPU ticks.  The mean frequency of the second timer is stathz.
    118  *
    119  * If no second timer exists, stathz will be zero; in this case we drive
    120  * profiling and statistics off the main clock.  This WILL NOT be accurate;
    121  * do not do it unless absolutely necessary.
    122  *
    123  * The statistics clock may (or may not) be run at a higher rate while
    124  * profiling.  This profile clock runs at profhz.  We require that profhz
    125  * be an integral multiple of stathz.
    126  *
    127  * If the statistics clock is running fast, it must be divided by the ratio
    128  * profhz/stathz for statistics.  (For profiling, every tick counts.)
    129  */
    130 
    131 #ifndef __HAVE_TIMECOUNTER
    132 #ifdef NTP	/* NTP phase-locked loop in kernel */
    133 /*
    134  * Phase/frequency-lock loop (PLL/FLL) definitions
    135  *
    136  * The following variables are read and set by the ntp_adjtime() system
    137  * call.
    138  *
    139  * time_state shows the state of the system clock, with values defined
    140  * in the timex.h header file.
    141  *
    142  * time_status shows the status of the system clock, with bits defined
    143  * in the timex.h header file.
    144  *
    145  * time_offset is used by the PLL/FLL to adjust the system time in small
    146  * increments.
    147  *
    148  * time_constant determines the bandwidth or "stiffness" of the PLL.
    149  *
    150  * time_tolerance determines maximum frequency error or tolerance of the
    151  * CPU clock oscillator and is a property of the architecture; however,
    152  * in principle it could change as result of the presence of external
    153  * discipline signals, for instance.
    154  *
    155  * time_precision is usually equal to the kernel tick variable; however,
    156  * in cases where a precision clock counter or external clock is
    157  * available, the resolution can be much less than this and depend on
    158  * whether the external clock is working or not.
    159  *
    160  * time_maxerror is initialized by a ntp_adjtime() call and increased by
    161  * the kernel once each second to reflect the maximum error bound
    162  * growth.
    163  *
    164  * time_esterror is set and read by the ntp_adjtime() call, but
    165  * otherwise not used by the kernel.
    166  */
    167 int time_state = TIME_OK;	/* clock state */
    168 int time_status = STA_UNSYNC;	/* clock status bits */
    169 long time_offset = 0;		/* time offset (us) */
    170 long time_constant = 0;		/* pll time constant */
    171 long time_tolerance = MAXFREQ;	/* frequency tolerance (scaled ppm) */
    172 long time_precision = 1;	/* clock precision (us) */
    173 long time_maxerror = MAXPHASE;	/* maximum error (us) */
    174 long time_esterror = MAXPHASE;	/* estimated error (us) */
    175 
    176 /*
    177  * The following variables establish the state of the PLL/FLL and the
    178  * residual time and frequency offset of the local clock. The scale
    179  * factors are defined in the timex.h header file.
    180  *
    181  * time_phase and time_freq are the phase increment and the frequency
    182  * increment, respectively, of the kernel time variable.
    183  *
    184  * time_freq is set via ntp_adjtime() from a value stored in a file when
    185  * the synchronization daemon is first started. Its value is retrieved
    186  * via ntp_adjtime() and written to the file about once per hour by the
    187  * daemon.
    188  *
    189  * time_adj is the adjustment added to the value of tick at each timer
    190  * interrupt and is recomputed from time_phase and time_freq at each
    191  * seconds rollover.
    192  *
    193  * time_reftime is the second's portion of the system time at the last
    194  * call to ntp_adjtime(). It is used to adjust the time_freq variable
    195  * and to increase the time_maxerror as the time since last update
    196  * increases.
    197  */
    198 long time_phase = 0;		/* phase offset (scaled us) */
    199 long time_freq = 0;		/* frequency offset (scaled ppm) */
    200 long time_adj = 0;		/* tick adjust (scaled 1 / hz) */
    201 long time_reftime = 0;		/* time at last adjustment (s) */
    202 
    203 #ifdef PPS_SYNC
    204 /*
    205  * The following variables are used only if the kernel PPS discipline
    206  * code is configured (PPS_SYNC). The scale factors are defined in the
    207  * timex.h header file.
    208  *
    209  * pps_time contains the time at each calibration interval, as read by
    210  * microtime(). pps_count counts the seconds of the calibration
    211  * interval, the duration of which is nominally pps_shift in powers of
    212  * two.
    213  *
    214  * pps_offset is the time offset produced by the time median filter
    215  * pps_tf[], while pps_jitter is the dispersion (jitter) measured by
    216  * this filter.
    217  *
    218  * pps_freq is the frequency offset produced by the frequency median
    219  * filter pps_ff[], while pps_stabil is the dispersion (wander) measured
    220  * by this filter.
    221  *
    222  * pps_usec is latched from a high resolution counter or external clock
    223  * at pps_time. Here we want the hardware counter contents only, not the
    224  * contents plus the time_tv.usec as usual.
    225  *
    226  * pps_valid counts the number of seconds since the last PPS update. It
    227  * is used as a watchdog timer to disable the PPS discipline should the
    228  * PPS signal be lost.
    229  *
    230  * pps_glitch counts the number of seconds since the beginning of an
    231  * offset burst more than tick/2 from current nominal offset. It is used
    232  * mainly to suppress error bursts due to priority conflicts between the
    233  * PPS interrupt and timer interrupt.
    234  *
    235  * pps_intcnt counts the calibration intervals for use in the interval-
    236  * adaptation algorithm. It's just too complicated for words.
    237  *
    238  * pps_kc_hardpps_source contains an arbitrary value that uniquely
    239  * identifies the currently bound source of the PPS signal, or NULL
    240  * if no source is bound.
    241  *
    242  * pps_kc_hardpps_mode indicates which transitions, if any, of the PPS
    243  * signal should be reported.
    244  */
    245 struct timeval pps_time;	/* kernel time at last interval */
    246 long pps_tf[] = {0, 0, 0};	/* pps time offset median filter (us) */
    247 long pps_offset = 0;		/* pps time offset (us) */
    248 long pps_jitter = MAXTIME;	/* time dispersion (jitter) (us) */
    249 long pps_ff[] = {0, 0, 0};	/* pps frequency offset median filter */
    250 long pps_freq = 0;		/* frequency offset (scaled ppm) */
    251 long pps_stabil = MAXFREQ;	/* frequency dispersion (scaled ppm) */
    252 long pps_usec = 0;		/* microsec counter at last interval */
    253 long pps_valid = PPS_VALID;	/* pps signal watchdog counter */
    254 int pps_glitch = 0;		/* pps signal glitch counter */
    255 int pps_count = 0;		/* calibration interval counter (s) */
    256 int pps_shift = PPS_SHIFT;	/* interval duration (s) (shift) */
    257 int pps_intcnt = 0;		/* intervals at current duration */
    258 void *pps_kc_hardpps_source = NULL; /* current PPS supplier's identifier */
    259 int pps_kc_hardpps_mode = 0;	/* interesting edges of PPS signal */
    260 
    261 /*
    262  * PPS signal quality monitors
    263  *
    264  * pps_jitcnt counts the seconds that have been discarded because the
    265  * jitter measured by the time median filter exceeds the limit MAXTIME
    266  * (100 us).
    267  *
    268  * pps_calcnt counts the frequency calibration intervals, which are
    269  * variable from 4 s to 256 s.
    270  *
    271  * pps_errcnt counts the calibration intervals which have been discarded
    272  * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
    273  * calibration interval jitter exceeds two ticks.
    274  *
    275  * pps_stbcnt counts the calibration intervals that have been discarded
    276  * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
    277  */
    278 long pps_jitcnt = 0;		/* jitter limit exceeded */
    279 long pps_calcnt = 0;		/* calibration intervals */
    280 long pps_errcnt = 0;		/* calibration errors */
    281 long pps_stbcnt = 0;		/* stability limit exceeded */
    282 #endif /* PPS_SYNC */
    283 
    284 #ifdef EXT_CLOCK
    285 /*
    286  * External clock definitions
    287  *
    288  * The following definitions and declarations are used only if an
    289  * external clock is configured on the system.
    290  */
    291 #define CLOCK_INTERVAL 30	/* CPU clock update interval (s) */
    292 
    293 /*
    294  * The clock_count variable is set to CLOCK_INTERVAL at each PPS
    295  * interrupt and decremented once each second.
    296  */
    297 int clock_count = 0;		/* CPU clock counter */
    298 
    299 #ifdef HIGHBALL
    300 /*
    301  * The clock_offset and clock_cpu variables are used by the HIGHBALL
    302  * interface. The clock_offset variable defines the offset between
    303  * system time and the HIGBALL counters. The clock_cpu variable contains
    304  * the offset between the system clock and the HIGHBALL clock for use in
    305  * disciplining the kernel time variable.
    306  */
    307 extern struct timeval clock_offset; /* Highball clock offset */
    308 long clock_cpu = 0;		/* CPU clock adjust */
    309 #endif /* HIGHBALL */
    310 #endif /* EXT_CLOCK */
    311 #endif /* NTP */
    312 
    313 /*
    314  * Bump a timeval by a small number of usec's.
    315  */
    316 #define BUMPTIME(t, usec) { \
    317 	volatile struct timeval *tp = (t); \
    318 	long us; \
    319  \
    320 	tp->tv_usec = us = tp->tv_usec + (usec); \
    321 	if (us >= 1000000) { \
    322 		tp->tv_usec = us - 1000000; \
    323 		tp->tv_sec++; \
    324 	} \
    325 }
    326 #endif /* !__HAVE_TIMECOUNTER */
    327 
    328 int	stathz;
    329 int	profhz;
    330 int	profsrc;
    331 int	schedhz;
    332 int	profprocs;
    333 int	hardclock_ticks;
    334 static int statscheddiv; /* stat => sched divider (used if schedhz == 0) */
    335 static int psdiv;			/* prof => stat divider */
    336 int	psratio;			/* ratio: prof / stat */
    337 #ifndef __HAVE_TIMECOUNTER
    338 int	tickfix, tickfixinterval;	/* used if tick not really integral */
    339 #ifndef NTP
    340 static int tickfixcnt;			/* accumulated fractional error */
    341 #else
    342 int	fixtick;			/* used by NTP for same */
    343 int	shifthz;
    344 #endif
    345 
    346 /*
    347  * We might want ldd to load the both words from time at once.
    348  * To succeed we need to be quadword aligned.
    349  * The sparc already does that, and that it has worked so far is a fluke.
    350  */
    351 volatile struct	timeval time  __attribute__((__aligned__(__alignof__(quad_t))));
    352 volatile struct	timeval mono_time;
    353 #endif /* !__HAVE_TIMECOUNTER */
    354 
    355 void	*softclock_si;
    356 
    357 #ifdef __HAVE_TIMECOUNTER
    358 static u_int get_intr_timecount(struct timecounter *);
    359 
    360 static struct timecounter intr_timecounter = {
    361 	get_intr_timecount,	/* get_timecount */
    362 	0,			/* no poll_pps */
    363 	~0u,			/* counter_mask */
    364 	0,		        /* frequency */
    365 	"clockinterrupt",	/* name */
    366 	0,			/* quality - minimum implementation level for a clock */
    367 	NULL,			/* prev */
    368 	NULL,			/* next */
    369 };
    370 
    371 static u_int
    372 get_intr_timecount(struct timecounter *tc)
    373 {
    374 
    375 	return (u_int)hardclock_ticks;
    376 }
    377 #endif
    378 
    379 /*
    380  * Initialize clock frequencies and start both clocks running.
    381  */
    382 void
    383 initclocks(void)
    384 {
    385 	int i;
    386 
    387 	softclock_si = softintr_establish(IPL_SOFTCLOCK, softclock, NULL);
    388 	if (softclock_si == NULL)
    389 		panic("initclocks: unable to register softclock intr");
    390 
    391 	/*
    392 	 * Set divisors to 1 (normal case) and let the machine-specific
    393 	 * code do its bit.
    394 	 */
    395 	psdiv = 1;
    396 #ifdef __HAVE_TIMECOUNTER
    397 	/*
    398 	 * provide minimum default time counter
    399 	 * will only run at interrupt resolution
    400 	 */
    401 	intr_timecounter.tc_frequency = hz;
    402 	tc_init(&intr_timecounter);
    403 #endif
    404 	cpu_initclocks();
    405 
    406 	/*
    407 	 * Compute profhz and stathz, fix profhz if needed.
    408 	 */
    409 	i = stathz ? stathz : hz;
    410 	if (profhz == 0)
    411 		profhz = i;
    412 	psratio = profhz / i;
    413 	if (schedhz == 0) {
    414 		/* 16Hz is best */
    415 		statscheddiv = i / 16;
    416 		if (statscheddiv <= 0)
    417 			panic("statscheddiv");
    418 	}
    419 
    420 #ifndef __HAVE_TIMECOUNTER
    421 #ifdef NTP
    422 	switch (hz) {
    423 	case 1:
    424 		shifthz = SHIFT_SCALE - 0;
    425 		break;
    426 	case 2:
    427 		shifthz = SHIFT_SCALE - 1;
    428 		break;
    429 	case 4:
    430 		shifthz = SHIFT_SCALE - 2;
    431 		break;
    432 	case 8:
    433 		shifthz = SHIFT_SCALE - 3;
    434 		break;
    435 	case 16:
    436 		shifthz = SHIFT_SCALE - 4;
    437 		break;
    438 	case 32:
    439 		shifthz = SHIFT_SCALE - 5;
    440 		break;
    441 	case 50:
    442 	case 60:
    443 	case 64:
    444 		shifthz = SHIFT_SCALE - 6;
    445 		break;
    446 	case 96:
    447 	case 100:
    448 	case 128:
    449 		shifthz = SHIFT_SCALE - 7;
    450 		break;
    451 	case 256:
    452 		shifthz = SHIFT_SCALE - 8;
    453 		break;
    454 	case 512:
    455 		shifthz = SHIFT_SCALE - 9;
    456 		break;
    457 	case 1000:
    458 	case 1024:
    459 		shifthz = SHIFT_SCALE - 10;
    460 		break;
    461 	case 1200:
    462 	case 2048:
    463 		shifthz = SHIFT_SCALE - 11;
    464 		break;
    465 	case 4096:
    466 		shifthz = SHIFT_SCALE - 12;
    467 		break;
    468 	case 8192:
    469 		shifthz = SHIFT_SCALE - 13;
    470 		break;
    471 	case 16384:
    472 		shifthz = SHIFT_SCALE - 14;
    473 		break;
    474 	case 32768:
    475 		shifthz = SHIFT_SCALE - 15;
    476 		break;
    477 	case 65536:
    478 		shifthz = SHIFT_SCALE - 16;
    479 		break;
    480 	default:
    481 		panic("weird hz");
    482 	}
    483 	if (fixtick == 0) {
    484 		/*
    485 		 * Give MD code a chance to set this to a better
    486 		 * value; but, if it doesn't, we should.
    487 		 */
    488 		fixtick = (1000000 - (hz*tick));
    489 	}
    490 #endif /* NTP */
    491 #endif /* !__HAVE_TIMECOUNTER */
    492 }
    493 
    494 /*
    495  * The real-time timer, interrupting hz times per second.
    496  */
    497 void
    498 hardclock(struct clockframe *frame)
    499 {
    500 	struct lwp *l;
    501 	struct proc *p;
    502 	struct cpu_info *ci = curcpu();
    503 	struct ptimer *pt;
    504 #ifndef __HAVE_TIMECOUNTER
    505 	int delta;
    506 	extern int tickdelta;
    507 	extern long timedelta;
    508 #ifdef NTP
    509 	int time_update;
    510 	int ltemp;
    511 #endif /* NTP */
    512 #endif /* __HAVE_TIMECOUNTER */
    513 
    514 	l = curlwp;
    515 	if (!CURCPU_IDLE_P()) {
    516 		p = l->l_proc;
    517 		/*
    518 		 * Run current process's virtual and profile time, as needed.
    519 		 */
    520 		if (CLKF_USERMODE(frame) && p->p_timers &&
    521 		    (pt = LIST_FIRST(&p->p_timers->pts_virtual)) != NULL)
    522 			if (itimerdecr(pt, tick) == 0)
    523 				itimerfire(pt);
    524 		if (p->p_timers &&
    525 		    (pt = LIST_FIRST(&p->p_timers->pts_prof)) != NULL)
    526 			if (itimerdecr(pt, tick) == 0)
    527 				itimerfire(pt);
    528 	}
    529 
    530 	/*
    531 	 * If no separate statistics clock is available, run it from here.
    532 	 */
    533 	if (stathz == 0)
    534 		statclock(frame);
    535 	if ((--ci->ci_schedstate.spc_ticks) <= 0)
    536 		sched_tick(ci);
    537 
    538 #if defined(MULTIPROCESSOR)
    539 	/*
    540 	 * If we are not the primary CPU, we're not allowed to do
    541 	 * any more work.
    542 	 */
    543 	if (CPU_IS_PRIMARY(ci) == 0)
    544 		return;
    545 #endif
    546 
    547 	hardclock_ticks++;
    548 
    549 #ifdef __HAVE_TIMECOUNTER
    550 	tc_ticktock();
    551 #else /* __HAVE_TIMECOUNTER */
    552 	/*
    553 	 * Increment the time-of-day.  The increment is normally just
    554 	 * ``tick''.  If the machine is one which has a clock frequency
    555 	 * such that ``hz'' would not divide the second evenly into
    556 	 * milliseconds, a periodic adjustment must be applied.  Finally,
    557 	 * if we are still adjusting the time (see adjtime()),
    558 	 * ``tickdelta'' may also be added in.
    559 	 */
    560 	delta = tick;
    561 
    562 #ifndef NTP
    563 	if (tickfix) {
    564 		tickfixcnt += tickfix;
    565 		if (tickfixcnt >= tickfixinterval) {
    566 			delta++;
    567 			tickfixcnt -= tickfixinterval;
    568 		}
    569 	}
    570 #endif /* !NTP */
    571 	/* Imprecise 4bsd adjtime() handling */
    572 	if (timedelta != 0) {
    573 		delta += tickdelta;
    574 		timedelta -= tickdelta;
    575 	}
    576 
    577 #ifdef notyet
    578 	microset();
    579 #endif
    580 
    581 #ifndef NTP
    582 	BUMPTIME(&time, delta);		/* XXX Now done using NTP code below */
    583 #endif
    584 	BUMPTIME(&mono_time, delta);
    585 
    586 #ifdef NTP
    587 	time_update = delta;
    588 
    589 	/*
    590 	 * Compute the phase adjustment. If the low-order bits
    591 	 * (time_phase) of the update overflow, bump the high-order bits
    592 	 * (time_update).
    593 	 */
    594 	time_phase += time_adj;
    595 	if (time_phase <= -FINEUSEC) {
    596 		ltemp = -time_phase >> SHIFT_SCALE;
    597 		time_phase += ltemp << SHIFT_SCALE;
    598 		time_update -= ltemp;
    599 	} else if (time_phase >= FINEUSEC) {
    600 		ltemp = time_phase >> SHIFT_SCALE;
    601 		time_phase -= ltemp << SHIFT_SCALE;
    602 		time_update += ltemp;
    603 	}
    604 
    605 #ifdef HIGHBALL
    606 	/*
    607 	 * If the HIGHBALL board is installed, we need to adjust the
    608 	 * external clock offset in order to close the hardware feedback
    609 	 * loop. This will adjust the external clock phase and frequency
    610 	 * in small amounts. The additional phase noise and frequency
    611 	 * wander this causes should be minimal. We also need to
    612 	 * discipline the kernel time variable, since the PLL is used to
    613 	 * discipline the external clock. If the Highball board is not
    614 	 * present, we discipline kernel time with the PLL as usual. We
    615 	 * assume that the external clock phase adjustment (time_update)
    616 	 * and kernel phase adjustment (clock_cpu) are less than the
    617 	 * value of tick.
    618 	 */
    619 	clock_offset.tv_usec += time_update;
    620 	if (clock_offset.tv_usec >= 1000000) {
    621 		clock_offset.tv_sec++;
    622 		clock_offset.tv_usec -= 1000000;
    623 	}
    624 	if (clock_offset.tv_usec < 0) {
    625 		clock_offset.tv_sec--;
    626 		clock_offset.tv_usec += 1000000;
    627 	}
    628 	time.tv_usec += clock_cpu;
    629 	clock_cpu = 0;
    630 #else
    631 	time.tv_usec += time_update;
    632 #endif /* HIGHBALL */
    633 
    634 	/*
    635 	 * On rollover of the second the phase adjustment to be used for
    636 	 * the next second is calculated. Also, the maximum error is
    637 	 * increased by the tolerance. If the PPS frequency discipline
    638 	 * code is present, the phase is increased to compensate for the
    639 	 * CPU clock oscillator frequency error.
    640 	 *
    641  	 * On a 32-bit machine and given parameters in the timex.h
    642 	 * header file, the maximum phase adjustment is +-512 ms and
    643 	 * maximum frequency offset is a tad less than) +-512 ppm. On a
    644 	 * 64-bit machine, you shouldn't need to ask.
    645 	 */
    646 	if (time.tv_usec >= 1000000) {
    647 		time.tv_usec -= 1000000;
    648 		time.tv_sec++;
    649 		time_maxerror += time_tolerance >> SHIFT_USEC;
    650 
    651 		/*
    652 		 * Leap second processing. If in leap-insert state at
    653 		 * the end of the day, the system clock is set back one
    654 		 * second; if in leap-delete state, the system clock is
    655 		 * set ahead one second. The microtime() routine or
    656 		 * external clock driver will insure that reported time
    657 		 * is always monotonic. The ugly divides should be
    658 		 * replaced.
    659 		 */
    660 		switch (time_state) {
    661 		case TIME_OK:
    662 			if (time_status & STA_INS)
    663 				time_state = TIME_INS;
    664 			else if (time_status & STA_DEL)
    665 				time_state = TIME_DEL;
    666 			break;
    667 
    668 		case TIME_INS:
    669 			if (time.tv_sec % 86400 == 0) {
    670 				time.tv_sec--;
    671 				time_state = TIME_OOP;
    672 			}
    673 			break;
    674 
    675 		case TIME_DEL:
    676 			if ((time.tv_sec + 1) % 86400 == 0) {
    677 				time.tv_sec++;
    678 				time_state = TIME_WAIT;
    679 			}
    680 			break;
    681 
    682 		case TIME_OOP:
    683 			time_state = TIME_WAIT;
    684 			break;
    685 
    686 		case TIME_WAIT:
    687 			if (!(time_status & (STA_INS | STA_DEL)))
    688 				time_state = TIME_OK;
    689 			break;
    690 		}
    691 
    692 		/*
    693 		 * Compute the phase adjustment for the next second. In
    694 		 * PLL mode, the offset is reduced by a fixed factor
    695 		 * times the time constant. In FLL mode the offset is
    696 		 * used directly. In either mode, the maximum phase
    697 		 * adjustment for each second is clamped so as to spread
    698 		 * the adjustment over not more than the number of
    699 		 * seconds between updates.
    700 		 */
    701 		if (time_offset < 0) {
    702 			ltemp = -time_offset;
    703 			if (!(time_status & STA_FLL))
    704 				ltemp >>= SHIFT_KG + time_constant;
    705 			if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
    706 				ltemp = (MAXPHASE / MINSEC) <<
    707 				    SHIFT_UPDATE;
    708 			time_offset += ltemp;
    709 			time_adj = -ltemp << (shifthz - SHIFT_UPDATE);
    710 		} else if (time_offset > 0) {
    711 			ltemp = time_offset;
    712 			if (!(time_status & STA_FLL))
    713 				ltemp >>= SHIFT_KG + time_constant;
    714 			if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
    715 				ltemp = (MAXPHASE / MINSEC) <<
    716 				    SHIFT_UPDATE;
    717 			time_offset -= ltemp;
    718 			time_adj = ltemp << (shifthz - SHIFT_UPDATE);
    719 		} else
    720 			time_adj = 0;
    721 
    722 		/*
    723 		 * Compute the frequency estimate and additional phase
    724 		 * adjustment due to frequency error for the next
    725 		 * second. When the PPS signal is engaged, gnaw on the
    726 		 * watchdog counter and update the frequency computed by
    727 		 * the pll and the PPS signal.
    728 		 */
    729 #ifdef PPS_SYNC
    730 		pps_valid++;
    731 		if (pps_valid == PPS_VALID) {
    732 			pps_jitter = MAXTIME;
    733 			pps_stabil = MAXFREQ;
    734 			time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
    735 			    STA_PPSWANDER | STA_PPSERROR);
    736 		}
    737 		ltemp = time_freq + pps_freq;
    738 #else
    739 		ltemp = time_freq;
    740 #endif /* PPS_SYNC */
    741 
    742 		if (ltemp < 0)
    743 			time_adj -= -ltemp >> (SHIFT_USEC - shifthz);
    744 		else
    745 			time_adj += ltemp >> (SHIFT_USEC - shifthz);
    746 		time_adj += (long)fixtick << shifthz;
    747 
    748 		/*
    749 		 * When the CPU clock oscillator frequency is not a
    750 		 * power of 2 in Hz, shifthz is only an approximate
    751 		 * scale factor.
    752 		 *
    753 		 * To determine the adjustment, you can do the following:
    754 		 *   bc -q
    755 		 *   scale=24
    756 		 *   obase=2
    757 		 *   idealhz/realhz
    758 		 * where `idealhz' is the next higher power of 2, and `realhz'
    759 		 * is the actual value.  You may need to factor this result
    760 		 * into a sequence of 2 multipliers to get better precision.
    761 		 *
    762 		 * Likewise, the error can be calculated with (e.g. for 100Hz):
    763 		 *   bc -q
    764 		 *   scale=24
    765 		 *   ((1+2^-2+2^-5)*(1-2^-10)*realhz-idealhz)/idealhz
    766 		 * (and then multiply by 1000000 to get ppm).
    767 		 */
    768 		switch (hz) {
    769 		case 60:
    770 			/* A factor of 1.000100010001 gives about 15ppm
    771 			   error. */
    772 			if (time_adj < 0) {
    773 				time_adj -= (-time_adj >> 4);
    774 				time_adj -= (-time_adj >> 8);
    775 			} else {
    776 				time_adj += (time_adj >> 4);
    777 				time_adj += (time_adj >> 8);
    778 			}
    779 			break;
    780 
    781 		case 96:
    782 			/* A factor of 1.0101010101 gives about 244ppm error. */
    783 			if (time_adj < 0) {
    784 				time_adj -= (-time_adj >> 2);
    785 				time_adj -= (-time_adj >> 4) + (-time_adj >> 8);
    786 			} else {
    787 				time_adj += (time_adj >> 2);
    788 				time_adj += (time_adj >> 4) + (time_adj >> 8);
    789 			}
    790 			break;
    791 
    792 		case 50:
    793 		case 100:
    794 			/* A factor of 1.010001111010111 gives about 1ppm
    795 			   error. */
    796 			if (time_adj < 0) {
    797 				time_adj -= (-time_adj >> 2) + (-time_adj >> 5);
    798 				time_adj += (-time_adj >> 10);
    799 			} else {
    800 				time_adj += (time_adj >> 2) + (time_adj >> 5);
    801 				time_adj -= (time_adj >> 10);
    802 			}
    803 			break;
    804 
    805 		case 1000:
    806 			/* A factor of 1.000001100010100001 gives about 50ppm
    807 			   error. */
    808 			if (time_adj < 0) {
    809 				time_adj -= (-time_adj >> 6) + (-time_adj >> 11);
    810 				time_adj -= (-time_adj >> 7);
    811 			} else {
    812 				time_adj += (time_adj >> 6) + (time_adj >> 11);
    813 				time_adj += (time_adj >> 7);
    814 			}
    815 			break;
    816 
    817 		case 1200:
    818 			/* A factor of 1.1011010011100001 gives about 64ppm
    819 			   error. */
    820 			if (time_adj < 0) {
    821 				time_adj -= (-time_adj >> 1) + (-time_adj >> 6);
    822 				time_adj -= (-time_adj >> 3) + (-time_adj >> 10);
    823 			} else {
    824 				time_adj += (time_adj >> 1) + (time_adj >> 6);
    825 				time_adj += (time_adj >> 3) + (time_adj >> 10);
    826 			}
    827 			break;
    828 		}
    829 
    830 #ifdef EXT_CLOCK
    831 		/*
    832 		 * If an external clock is present, it is necessary to
    833 		 * discipline the kernel time variable anyway, since not
    834 		 * all system components use the microtime() interface.
    835 		 * Here, the time offset between the external clock and
    836 		 * kernel time variable is computed every so often.
    837 		 */
    838 		clock_count++;
    839 		if (clock_count > CLOCK_INTERVAL) {
    840 			clock_count = 0;
    841 			microtime(&clock_ext);
    842 			delta.tv_sec = clock_ext.tv_sec - time.tv_sec;
    843 			delta.tv_usec = clock_ext.tv_usec -
    844 			    time.tv_usec;
    845 			if (delta.tv_usec < 0)
    846 				delta.tv_sec--;
    847 			if (delta.tv_usec >= 500000) {
    848 				delta.tv_usec -= 1000000;
    849 				delta.tv_sec++;
    850 			}
    851 			if (delta.tv_usec < -500000) {
    852 				delta.tv_usec += 1000000;
    853 				delta.tv_sec--;
    854 			}
    855 			if (delta.tv_sec > 0 || (delta.tv_sec == 0 &&
    856 			    delta.tv_usec > MAXPHASE) ||
    857 			    delta.tv_sec < -1 || (delta.tv_sec == -1 &&
    858 			    delta.tv_usec < -MAXPHASE)) {
    859 				time = clock_ext;
    860 				delta.tv_sec = 0;
    861 				delta.tv_usec = 0;
    862 			}
    863 #ifdef HIGHBALL
    864 			clock_cpu = delta.tv_usec;
    865 #else /* HIGHBALL */
    866 			hardupdate(delta.tv_usec);
    867 #endif /* HIGHBALL */
    868 		}
    869 #endif /* EXT_CLOCK */
    870 	}
    871 
    872 #endif /* NTP */
    873 #endif /* !__HAVE_TIMECOUNTER */
    874 
    875 	/*
    876 	 * Update real-time timeout queue.  Callouts are processed at a
    877 	 * very low CPU priority, so we don't keep the relatively high
    878 	 * clock interrupt priority any longer than necessary.
    879 	 */
    880 	if (callout_hardclock())
    881 		softintr_schedule(softclock_si);
    882 }
    883 
    884 #ifdef __HAVE_TIMECOUNTER
    885 /*
    886  * Compute number of hz until specified time.  Used to compute second
    887  * argument to callout_reset() from an absolute time.
    888  */
    889 int
    890 hzto(struct timeval *tvp)
    891 {
    892 	struct timeval now, tv;
    893 
    894 	tv = *tvp;	/* Don't modify original tvp. */
    895 	getmicrotime(&now);
    896 	timersub(&tv, &now, &tv);
    897 	return tvtohz(&tv);
    898 }
    899 #endif /* __HAVE_TIMECOUNTER */
    900 
    901 /*
    902  * Compute number of ticks in the specified amount of time.
    903  */
    904 int
    905 tvtohz(struct timeval *tv)
    906 {
    907 	unsigned long ticks;
    908 	long sec, usec;
    909 
    910 	/*
    911 	 * If the number of usecs in the whole seconds part of the time
    912 	 * difference fits in a long, then the total number of usecs will
    913 	 * fit in an unsigned long.  Compute the total and convert it to
    914 	 * ticks, rounding up and adding 1 to allow for the current tick
    915 	 * to expire.  Rounding also depends on unsigned long arithmetic
    916 	 * to avoid overflow.
    917 	 *
    918 	 * Otherwise, if the number of ticks in the whole seconds part of
    919 	 * the time difference fits in a long, then convert the parts to
    920 	 * ticks separately and add, using similar rounding methods and
    921 	 * overflow avoidance.  This method would work in the previous
    922 	 * case, but it is slightly slower and assumes that hz is integral.
    923 	 *
    924 	 * Otherwise, round the time difference down to the maximum
    925 	 * representable value.
    926 	 *
    927 	 * If ints are 32-bit, then the maximum value for any timeout in
    928 	 * 10ms ticks is 248 days.
    929 	 */
    930 	sec = tv->tv_sec;
    931 	usec = tv->tv_usec;
    932 
    933 	if (usec < 0) {
    934 		sec--;
    935 		usec += 1000000;
    936 	}
    937 
    938 	if (sec < 0 || (sec == 0 && usec <= 0)) {
    939 		/*
    940 		 * Would expire now or in the past.  Return 0 ticks.
    941 		 * This is different from the legacy hzto() interface,
    942 		 * and callers need to check for it.
    943 		 */
    944 		ticks = 0;
    945 	} else if (sec <= (LONG_MAX / 1000000))
    946 		ticks = (((sec * 1000000) + (unsigned long)usec + (tick - 1))
    947 		    / tick) + 1;
    948 	else if (sec <= (LONG_MAX / hz))
    949 		ticks = (sec * hz) +
    950 		    (((unsigned long)usec + (tick - 1)) / tick) + 1;
    951 	else
    952 		ticks = LONG_MAX;
    953 
    954 	if (ticks > INT_MAX)
    955 		ticks = INT_MAX;
    956 
    957 	return ((int)ticks);
    958 }
    959 
    960 #ifndef __HAVE_TIMECOUNTER
    961 /*
    962  * Compute number of hz until specified time.  Used to compute second
    963  * argument to callout_reset() from an absolute time.
    964  */
    965 int
    966 hzto(struct timeval *tv)
    967 {
    968 	unsigned long ticks;
    969 	long sec, usec;
    970 	int s;
    971 
    972 	/*
    973 	 * If the number of usecs in the whole seconds part of the time
    974 	 * difference fits in a long, then the total number of usecs will
    975 	 * fit in an unsigned long.  Compute the total and convert it to
    976 	 * ticks, rounding up and adding 1 to allow for the current tick
    977 	 * to expire.  Rounding also depends on unsigned long arithmetic
    978 	 * to avoid overflow.
    979 	 *
    980 	 * Otherwise, if the number of ticks in the whole seconds part of
    981 	 * the time difference fits in a long, then convert the parts to
    982 	 * ticks separately and add, using similar rounding methods and
    983 	 * overflow avoidance.  This method would work in the previous
    984 	 * case, but it is slightly slower and assume that hz is integral.
    985 	 *
    986 	 * Otherwise, round the time difference down to the maximum
    987 	 * representable value.
    988 	 *
    989 	 * If ints are 32-bit, then the maximum value for any timeout in
    990 	 * 10ms ticks is 248 days.
    991 	 */
    992 	s = splclock();
    993 	sec = tv->tv_sec - time.tv_sec;
    994 	usec = tv->tv_usec - time.tv_usec;
    995 	splx(s);
    996 
    997 	if (usec < 0) {
    998 		sec--;
    999 		usec += 1000000;
   1000 	}
   1001 
   1002 	if (sec < 0 || (sec == 0 && usec <= 0)) {
   1003 		/*
   1004 		 * Would expire now or in the past.  Return 0 ticks.
   1005 		 * This is different from the legacy hzto() interface,
   1006 		 * and callers need to check for it.
   1007 		 */
   1008 		ticks = 0;
   1009 	} else if (sec <= (LONG_MAX / 1000000))
   1010 		ticks = (((sec * 1000000) + (unsigned long)usec + (tick - 1))
   1011 		    / tick) + 1;
   1012 	else if (sec <= (LONG_MAX / hz))
   1013 		ticks = (sec * hz) +
   1014 		    (((unsigned long)usec + (tick - 1)) / tick) + 1;
   1015 	else
   1016 		ticks = LONG_MAX;
   1017 
   1018 	if (ticks > INT_MAX)
   1019 		ticks = INT_MAX;
   1020 
   1021 	return ((int)ticks);
   1022 }
   1023 #endif /* !__HAVE_TIMECOUNTER */
   1024 
   1025 /*
   1026  * Compute number of ticks in the specified amount of time.
   1027  */
   1028 int
   1029 tstohz(struct timespec *ts)
   1030 {
   1031 	struct timeval tv;
   1032 
   1033 	/*
   1034 	 * usec has great enough resolution for hz, so convert to a
   1035 	 * timeval and use tvtohz() above.
   1036 	 */
   1037 	TIMESPEC_TO_TIMEVAL(&tv, ts);
   1038 	return tvtohz(&tv);
   1039 }
   1040 
   1041 /*
   1042  * Start profiling on a process.
   1043  *
   1044  * Kernel profiling passes proc0 which never exits and hence
   1045  * keeps the profile clock running constantly.
   1046  */
   1047 void
   1048 startprofclock(struct proc *p)
   1049 {
   1050 
   1051 	LOCK_ASSERT(mutex_owned(&p->p_stmutex));
   1052 
   1053 	if ((p->p_stflag & PST_PROFIL) == 0) {
   1054 		p->p_stflag |= PST_PROFIL;
   1055 		/*
   1056 		 * This is only necessary if using the clock as the
   1057 		 * profiling source.
   1058 		 */
   1059 		if (++profprocs == 1 && stathz != 0)
   1060 			psdiv = psratio;
   1061 	}
   1062 }
   1063 
   1064 /*
   1065  * Stop profiling on a process.
   1066  */
   1067 void
   1068 stopprofclock(struct proc *p)
   1069 {
   1070 
   1071 	LOCK_ASSERT(mutex_owned(&p->p_stmutex));
   1072 
   1073 	if (p->p_stflag & PST_PROFIL) {
   1074 		p->p_stflag &= ~PST_PROFIL;
   1075 		/*
   1076 		 * This is only necessary if using the clock as the
   1077 		 * profiling source.
   1078 		 */
   1079 		if (--profprocs == 0 && stathz != 0)
   1080 			psdiv = 1;
   1081 	}
   1082 }
   1083 
   1084 #if defined(PERFCTRS)
   1085 /*
   1086  * Independent profiling "tick" in case we're using a separate
   1087  * clock or profiling event source.  Currently, that's just
   1088  * performance counters--hence the wrapper.
   1089  */
   1090 void
   1091 proftick(struct clockframe *frame)
   1092 {
   1093 #ifdef GPROF
   1094         struct gmonparam *g;
   1095         intptr_t i;
   1096 #endif
   1097 	struct lwp *l;
   1098 	struct proc *p;
   1099 
   1100 	l = curlwp;
   1101 	p = (l ? l->l_proc : NULL);
   1102 	if (CLKF_USERMODE(frame)) {
   1103 		mutex_spin_enter(&p->p_stmutex);
   1104 		if (p->p_stflag & PST_PROFIL)
   1105 			addupc_intr(l, CLKF_PC(frame));
   1106 		mutex_spin_exit(&p->p_stmutex);
   1107 	} else {
   1108 #ifdef GPROF
   1109 		g = &_gmonparam;
   1110 		if (g->state == GMON_PROF_ON) {
   1111 			i = CLKF_PC(frame) - g->lowpc;
   1112 			if (i < g->textsize) {
   1113 				i /= HISTFRACTION * sizeof(*g->kcount);
   1114 				g->kcount[i]++;
   1115 			}
   1116 		}
   1117 #endif
   1118 #ifdef PROC_PC
   1119 		if (p != NULL) {
   1120 			mutex_spin_enter(&p->p_stmutex);
   1121 			if (p->p_stflag & PST_PROFIL))
   1122 				addupc_intr(l, PROC_PC(p));
   1123 			mutex_spin_exit(&p->p_stmutex);
   1124 		}
   1125 #endif
   1126 	}
   1127 }
   1128 #endif
   1129 
   1130 /*
   1131  * Statistics clock.  Grab profile sample, and if divider reaches 0,
   1132  * do process and kernel statistics.
   1133  */
   1134 void
   1135 statclock(struct clockframe *frame)
   1136 {
   1137 #ifdef GPROF
   1138 	struct gmonparam *g;
   1139 	intptr_t i;
   1140 #endif
   1141 	struct cpu_info *ci = curcpu();
   1142 	struct schedstate_percpu *spc = &ci->ci_schedstate;
   1143 	struct proc *p;
   1144 	struct lwp *l;
   1145 
   1146 	/*
   1147 	 * Notice changes in divisor frequency, and adjust clock
   1148 	 * frequency accordingly.
   1149 	 */
   1150 	if (spc->spc_psdiv != psdiv) {
   1151 		spc->spc_psdiv = psdiv;
   1152 		spc->spc_pscnt = psdiv;
   1153 		if (psdiv == 1) {
   1154 			setstatclockrate(stathz);
   1155 		} else {
   1156 			setstatclockrate(profhz);
   1157 		}
   1158 	}
   1159 	l = curlwp;
   1160 	if ((l->l_flag & L_IDLE) != 0) {
   1161 		/*
   1162 		 * don't account idle lwps as swapper.
   1163 		 */
   1164 		p = NULL;
   1165 	} else {
   1166 		p = l->l_proc;
   1167 		mutex_spin_enter(&p->p_stmutex);
   1168 	}
   1169 
   1170 	if (CLKF_USERMODE(frame)) {
   1171 		if ((p->p_stflag & PST_PROFIL) && profsrc == PROFSRC_CLOCK)
   1172 			addupc_intr(l, CLKF_PC(frame));
   1173 		if (--spc->spc_pscnt > 0) {
   1174 			mutex_spin_exit(&p->p_stmutex);
   1175 			return;
   1176 		}
   1177 
   1178 		/*
   1179 		 * Came from user mode; CPU was in user state.
   1180 		 * If this process is being profiled record the tick.
   1181 		 */
   1182 		p->p_uticks++;
   1183 		if (p->p_nice > NZERO)
   1184 			spc->spc_cp_time[CP_NICE]++;
   1185 		else
   1186 			spc->spc_cp_time[CP_USER]++;
   1187 	} else {
   1188 #ifdef GPROF
   1189 		/*
   1190 		 * Kernel statistics are just like addupc_intr, only easier.
   1191 		 */
   1192 		g = &_gmonparam;
   1193 		if (profsrc == PROFSRC_CLOCK && g->state == GMON_PROF_ON) {
   1194 			i = CLKF_PC(frame) - g->lowpc;
   1195 			if (i < g->textsize) {
   1196 				i /= HISTFRACTION * sizeof(*g->kcount);
   1197 				g->kcount[i]++;
   1198 			}
   1199 		}
   1200 #endif
   1201 #ifdef LWP_PC
   1202 		if (p != NULL && profsrc == PROFSRC_CLOCK &&
   1203 		    (p->p_stflag & PST_PROFIL)) {
   1204 			addupc_intr(l, LWP_PC(l));
   1205 		}
   1206 #endif
   1207 		if (--spc->spc_pscnt > 0) {
   1208 			if (p != NULL)
   1209 				mutex_spin_exit(&p->p_stmutex);
   1210 			return;
   1211 		}
   1212 		/*
   1213 		 * Came from kernel mode, so we were:
   1214 		 * - handling an interrupt,
   1215 		 * - doing syscall or trap work on behalf of the current
   1216 		 *   user process, or
   1217 		 * - spinning in the idle loop.
   1218 		 * Whichever it is, charge the time as appropriate.
   1219 		 * Note that we charge interrupts to the current process,
   1220 		 * regardless of whether they are ``for'' that process,
   1221 		 * so that we know how much of its real time was spent
   1222 		 * in ``non-process'' (i.e., interrupt) work.
   1223 		 */
   1224 		if (CLKF_INTR(frame)) {
   1225 			if (p != NULL) {
   1226 				p->p_iticks++;
   1227 			}
   1228 			spc->spc_cp_time[CP_INTR]++;
   1229 		} else if (p != NULL) {
   1230 			p->p_sticks++;
   1231 			spc->spc_cp_time[CP_SYS]++;
   1232 		} else {
   1233 			spc->spc_cp_time[CP_IDLE]++;
   1234 		}
   1235 	}
   1236 	spc->spc_pscnt = psdiv;
   1237 
   1238 	if (p == NULL) {
   1239 		return;
   1240 	}
   1241 
   1242 	++p->p_cpticks;
   1243 	mutex_spin_exit(&p->p_stmutex);
   1244 
   1245 	/*
   1246 	 * If no separate schedclock is provided, call it here
   1247 	 * at about 16 Hz.
   1248 	 */
   1249 	if (schedhz == 0) {
   1250 		if ((int)(--ci->ci_schedstate.spc_schedticks) <= 0) {
   1251 			sched_clock(l);
   1252 			ci->ci_schedstate.spc_schedticks = statscheddiv;
   1253 		}
   1254 	}
   1255 }
   1256 
   1257 #ifndef __HAVE_TIMECOUNTER
   1258 #ifdef NTP	/* NTP phase-locked loop in kernel */
   1259 /*
   1260  * hardupdate() - local clock update
   1261  *
   1262  * This routine is called by ntp_adjtime() to update the local clock
   1263  * phase and frequency. The implementation is of an adaptive-parameter,
   1264  * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new
   1265  * time and frequency offset estimates for each call. If the kernel PPS
   1266  * discipline code is configured (PPS_SYNC), the PPS signal itself
   1267  * determines the new time offset, instead of the calling argument.
   1268  * Presumably, calls to ntp_adjtime() occur only when the caller
   1269  * believes the local clock is valid within some bound (+-128 ms with
   1270  * NTP). If the caller's time is far different than the PPS time, an
   1271  * argument will ensue, and it's not clear who will lose.
   1272  *
   1273  * For uncompensated quartz crystal oscillatores and nominal update
   1274  * intervals less than 1024 s, operation should be in phase-lock mode
   1275  * (STA_FLL = 0), where the loop is disciplined to phase. For update
   1276  * intervals greater than thiss, operation should be in frequency-lock
   1277  * mode (STA_FLL = 1), where the loop is disciplined to frequency.
   1278  *
   1279  * Note: splclock() is in effect.
   1280  */
   1281 void
   1282 hardupdate(long offset)
   1283 {
   1284 	long ltemp, mtemp;
   1285 
   1286 	if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME))
   1287 		return;
   1288 	ltemp = offset;
   1289 #ifdef PPS_SYNC
   1290 	if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
   1291 		ltemp = pps_offset;
   1292 #endif /* PPS_SYNC */
   1293 
   1294 	/*
   1295 	 * Scale the phase adjustment and clamp to the operating range.
   1296 	 */
   1297 	if (ltemp > MAXPHASE)
   1298 		time_offset = MAXPHASE << SHIFT_UPDATE;
   1299 	else if (ltemp < -MAXPHASE)
   1300 		time_offset = -(MAXPHASE << SHIFT_UPDATE);
   1301 	else
   1302 		time_offset = ltemp << SHIFT_UPDATE;
   1303 
   1304 	/*
   1305 	 * Select whether the frequency is to be controlled and in which
   1306 	 * mode (PLL or FLL). Clamp to the operating range. Ugly
   1307 	 * multiply/divide should be replaced someday.
   1308 	 */
   1309 	if (time_status & STA_FREQHOLD || time_reftime == 0)
   1310 		time_reftime = time.tv_sec;
   1311 	mtemp = time.tv_sec - time_reftime;
   1312 	time_reftime = time.tv_sec;
   1313 	if (time_status & STA_FLL) {
   1314 		if (mtemp >= MINSEC) {
   1315 			ltemp = ((time_offset / mtemp) << (SHIFT_USEC -
   1316 			    SHIFT_UPDATE));
   1317 			if (ltemp < 0)
   1318 				time_freq -= -ltemp >> SHIFT_KH;
   1319 			else
   1320 				time_freq += ltemp >> SHIFT_KH;
   1321 		}
   1322 	} else {
   1323 		if (mtemp < MAXSEC) {
   1324 			ltemp *= mtemp;
   1325 			if (ltemp < 0)
   1326 				time_freq -= -ltemp >> (time_constant +
   1327 				    time_constant + SHIFT_KF -
   1328 				    SHIFT_USEC);
   1329 			else
   1330 				time_freq += ltemp >> (time_constant +
   1331 				    time_constant + SHIFT_KF -
   1332 				    SHIFT_USEC);
   1333 		}
   1334 	}
   1335 	if (time_freq > time_tolerance)
   1336 		time_freq = time_tolerance;
   1337 	else if (time_freq < -time_tolerance)
   1338 		time_freq = -time_tolerance;
   1339 }
   1340 
   1341 #ifdef PPS_SYNC
   1342 /*
   1343  * hardpps() - discipline CPU clock oscillator to external PPS signal
   1344  *
   1345  * This routine is called at each PPS interrupt in order to discipline
   1346  * the CPU clock oscillator to the PPS signal. It measures the PPS phase
   1347  * and leaves it in a handy spot for the hardclock() routine. It
   1348  * integrates successive PPS phase differences and calculates the
   1349  * frequency offset. This is used in hardclock() to discipline the CPU
   1350  * clock oscillator so that intrinsic frequency error is cancelled out.
   1351  * The code requires the caller to capture the time and hardware counter
   1352  * value at the on-time PPS signal transition.
   1353  *
   1354  * Note that, on some Unix systems, this routine runs at an interrupt
   1355  * priority level higher than the timer interrupt routine hardclock().
   1356  * Therefore, the variables used are distinct from the hardclock()
   1357  * variables, except for certain exceptions: The PPS frequency pps_freq
   1358  * and phase pps_offset variables are determined by this routine and
   1359  * updated atomically. The time_tolerance variable can be considered a
   1360  * constant, since it is infrequently changed, and then only when the
   1361  * PPS signal is disabled. The watchdog counter pps_valid is updated
   1362  * once per second by hardclock() and is atomically cleared in this
   1363  * routine.
   1364  */
   1365 void
   1366 hardpps(struct timeval *tvp,		/* time at PPS */
   1367 	long usec			/* hardware counter at PPS */)
   1368 {
   1369 	long u_usec, v_usec, bigtick;
   1370 	long cal_sec, cal_usec;
   1371 
   1372 	/*
   1373 	 * An occasional glitch can be produced when the PPS interrupt
   1374 	 * occurs in the hardclock() routine before the time variable is
   1375 	 * updated. Here the offset is discarded when the difference
   1376 	 * between it and the last one is greater than tick/2, but not
   1377 	 * if the interval since the first discard exceeds 30 s.
   1378 	 */
   1379 	time_status |= STA_PPSSIGNAL;
   1380 	time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
   1381 	pps_valid = 0;
   1382 	u_usec = -tvp->tv_usec;
   1383 	if (u_usec < -500000)
   1384 		u_usec += 1000000;
   1385 	v_usec = pps_offset - u_usec;
   1386 	if (v_usec < 0)
   1387 		v_usec = -v_usec;
   1388 	if (v_usec > (tick >> 1)) {
   1389 		if (pps_glitch > MAXGLITCH) {
   1390 			pps_glitch = 0;
   1391 			pps_tf[2] = u_usec;
   1392 			pps_tf[1] = u_usec;
   1393 		} else {
   1394 			pps_glitch++;
   1395 			u_usec = pps_offset;
   1396 		}
   1397 	} else
   1398 		pps_glitch = 0;
   1399 
   1400 	/*
   1401 	 * A three-stage median filter is used to help deglitch the pps
   1402 	 * time. The median sample becomes the time offset estimate; the
   1403 	 * difference between the other two samples becomes the time
   1404 	 * dispersion (jitter) estimate.
   1405 	 */
   1406 	pps_tf[2] = pps_tf[1];
   1407 	pps_tf[1] = pps_tf[0];
   1408 	pps_tf[0] = u_usec;
   1409 	if (pps_tf[0] > pps_tf[1]) {
   1410 		if (pps_tf[1] > pps_tf[2]) {
   1411 			pps_offset = pps_tf[1];		/* 0 1 2 */
   1412 			v_usec = pps_tf[0] - pps_tf[2];
   1413 		} else if (pps_tf[2] > pps_tf[0]) {
   1414 			pps_offset = pps_tf[0];		/* 2 0 1 */
   1415 			v_usec = pps_tf[2] - pps_tf[1];
   1416 		} else {
   1417 			pps_offset = pps_tf[2];		/* 0 2 1 */
   1418 			v_usec = pps_tf[0] - pps_tf[1];
   1419 		}
   1420 	} else {
   1421 		if (pps_tf[1] < pps_tf[2]) {
   1422 			pps_offset = pps_tf[1];		/* 2 1 0 */
   1423 			v_usec = pps_tf[2] - pps_tf[0];
   1424 		} else  if (pps_tf[2] < pps_tf[0]) {
   1425 			pps_offset = pps_tf[0];		/* 1 0 2 */
   1426 			v_usec = pps_tf[1] - pps_tf[2];
   1427 		} else {
   1428 			pps_offset = pps_tf[2];		/* 1 2 0 */
   1429 			v_usec = pps_tf[1] - pps_tf[0];
   1430 		}
   1431 	}
   1432 	if (v_usec > MAXTIME)
   1433 		pps_jitcnt++;
   1434 	v_usec = (v_usec << PPS_AVG) - pps_jitter;
   1435 	if (v_usec < 0)
   1436 		pps_jitter -= -v_usec >> PPS_AVG;
   1437 	else
   1438 		pps_jitter += v_usec >> PPS_AVG;
   1439 	if (pps_jitter > (MAXTIME >> 1))
   1440 		time_status |= STA_PPSJITTER;
   1441 
   1442 	/*
   1443 	 * During the calibration interval adjust the starting time when
   1444 	 * the tick overflows. At the end of the interval compute the
   1445 	 * duration of the interval and the difference of the hardware
   1446 	 * counters at the beginning and end of the interval. This code
   1447 	 * is deliciously complicated by the fact valid differences may
   1448 	 * exceed the value of tick when using long calibration
   1449 	 * intervals and small ticks. Note that the counter can be
   1450 	 * greater than tick if caught at just the wrong instant, but
   1451 	 * the values returned and used here are correct.
   1452 	 */
   1453 	bigtick = (long)tick << SHIFT_USEC;
   1454 	pps_usec -= pps_freq;
   1455 	if (pps_usec >= bigtick)
   1456 		pps_usec -= bigtick;
   1457 	if (pps_usec < 0)
   1458 		pps_usec += bigtick;
   1459 	pps_time.tv_sec++;
   1460 	pps_count++;
   1461 	if (pps_count < (1 << pps_shift))
   1462 		return;
   1463 	pps_count = 0;
   1464 	pps_calcnt++;
   1465 	u_usec = usec << SHIFT_USEC;
   1466 	v_usec = pps_usec - u_usec;
   1467 	if (v_usec >= bigtick >> 1)
   1468 		v_usec -= bigtick;
   1469 	if (v_usec < -(bigtick >> 1))
   1470 		v_usec += bigtick;
   1471 	if (v_usec < 0)
   1472 		v_usec = -(-v_usec >> pps_shift);
   1473 	else
   1474 		v_usec = v_usec >> pps_shift;
   1475 	pps_usec = u_usec;
   1476 	cal_sec = tvp->tv_sec;
   1477 	cal_usec = tvp->tv_usec;
   1478 	cal_sec -= pps_time.tv_sec;
   1479 	cal_usec -= pps_time.tv_usec;
   1480 	if (cal_usec < 0) {
   1481 		cal_usec += 1000000;
   1482 		cal_sec--;
   1483 	}
   1484 	pps_time = *tvp;
   1485 
   1486 	/*
   1487 	 * Check for lost interrupts, noise, excessive jitter and
   1488 	 * excessive frequency error. The number of timer ticks during
   1489 	 * the interval may vary +-1 tick. Add to this a margin of one
   1490 	 * tick for the PPS signal jitter and maximum frequency
   1491 	 * deviation. If the limits are exceeded, the calibration
   1492 	 * interval is reset to the minimum and we start over.
   1493 	 */
   1494 	u_usec = (long)tick << 1;
   1495 	if (!((cal_sec == -1 && cal_usec > (1000000 - u_usec))
   1496 	    || (cal_sec == 0 && cal_usec < u_usec))
   1497 	    || v_usec > time_tolerance || v_usec < -time_tolerance) {
   1498 		pps_errcnt++;
   1499 		pps_shift = PPS_SHIFT;
   1500 		pps_intcnt = 0;
   1501 		time_status |= STA_PPSERROR;
   1502 		return;
   1503 	}
   1504 
   1505 	/*
   1506 	 * A three-stage median filter is used to help deglitch the pps
   1507 	 * frequency. The median sample becomes the frequency offset
   1508 	 * estimate; the difference between the other two samples
   1509 	 * becomes the frequency dispersion (stability) estimate.
   1510 	 */
   1511 	pps_ff[2] = pps_ff[1];
   1512 	pps_ff[1] = pps_ff[0];
   1513 	pps_ff[0] = v_usec;
   1514 	if (pps_ff[0] > pps_ff[1]) {
   1515 		if (pps_ff[1] > pps_ff[2]) {
   1516 			u_usec = pps_ff[1];		/* 0 1 2 */
   1517 			v_usec = pps_ff[0] - pps_ff[2];
   1518 		} else if (pps_ff[2] > pps_ff[0]) {
   1519 			u_usec = pps_ff[0];		/* 2 0 1 */
   1520 			v_usec = pps_ff[2] - pps_ff[1];
   1521 		} else {
   1522 			u_usec = pps_ff[2];		/* 0 2 1 */
   1523 			v_usec = pps_ff[0] - pps_ff[1];
   1524 		}
   1525 	} else {
   1526 		if (pps_ff[1] < pps_ff[2]) {
   1527 			u_usec = pps_ff[1];		/* 2 1 0 */
   1528 			v_usec = pps_ff[2] - pps_ff[0];
   1529 		} else  if (pps_ff[2] < pps_ff[0]) {
   1530 			u_usec = pps_ff[0];		/* 1 0 2 */
   1531 			v_usec = pps_ff[1] - pps_ff[2];
   1532 		} else {
   1533 			u_usec = pps_ff[2];		/* 1 2 0 */
   1534 			v_usec = pps_ff[1] - pps_ff[0];
   1535 		}
   1536 	}
   1537 
   1538 	/*
   1539 	 * Here the frequency dispersion (stability) is updated. If it
   1540 	 * is less than one-fourth the maximum (MAXFREQ), the frequency
   1541 	 * offset is updated as well, but clamped to the tolerance. It
   1542 	 * will be processed later by the hardclock() routine.
   1543 	 */
   1544 	v_usec = (v_usec >> 1) - pps_stabil;
   1545 	if (v_usec < 0)
   1546 		pps_stabil -= -v_usec >> PPS_AVG;
   1547 	else
   1548 		pps_stabil += v_usec >> PPS_AVG;
   1549 	if (pps_stabil > MAXFREQ >> 2) {
   1550 		pps_stbcnt++;
   1551 		time_status |= STA_PPSWANDER;
   1552 		return;
   1553 	}
   1554 	if (time_status & STA_PPSFREQ) {
   1555 		if (u_usec < 0) {
   1556 			pps_freq -= -u_usec >> PPS_AVG;
   1557 			if (pps_freq < -time_tolerance)
   1558 				pps_freq = -time_tolerance;
   1559 			u_usec = -u_usec;
   1560 		} else {
   1561 			pps_freq += u_usec >> PPS_AVG;
   1562 			if (pps_freq > time_tolerance)
   1563 				pps_freq = time_tolerance;
   1564 		}
   1565 	}
   1566 
   1567 	/*
   1568 	 * Here the calibration interval is adjusted. If the maximum
   1569 	 * time difference is greater than tick / 4, reduce the interval
   1570 	 * by half. If this is not the case for four consecutive
   1571 	 * intervals, double the interval.
   1572 	 */
   1573 	if (u_usec << pps_shift > bigtick >> 2) {
   1574 		pps_intcnt = 0;
   1575 		if (pps_shift > PPS_SHIFT)
   1576 			pps_shift--;
   1577 	} else if (pps_intcnt >= 4) {
   1578 		pps_intcnt = 0;
   1579 		if (pps_shift < PPS_SHIFTMAX)
   1580 			pps_shift++;
   1581 	} else
   1582 		pps_intcnt++;
   1583 }
   1584 #endif /* PPS_SYNC */
   1585 #endif /* NTP  */
   1586 
   1587 /* timecounter compat functions */
   1588 void
   1589 nanotime(struct timespec *ts)
   1590 {
   1591 	struct timeval tv;
   1592 
   1593 	microtime(&tv);
   1594 	TIMEVAL_TO_TIMESPEC(&tv, ts);
   1595 }
   1596 
   1597 void
   1598 getbinuptime(struct bintime *bt)
   1599 {
   1600 	struct timeval tv;
   1601 
   1602 	microtime(&tv);
   1603 	timeval2bintime(&tv, bt);
   1604 }
   1605 
   1606 void
   1607 nanouptime(struct timespec *tsp)
   1608 {
   1609 	int s;
   1610 
   1611 	s = splclock();
   1612 	TIMEVAL_TO_TIMESPEC(&mono_time, tsp);
   1613 	splx(s);
   1614 }
   1615 
   1616 void
   1617 getnanouptime(struct timespec *tsp)
   1618 {
   1619 	int s;
   1620 
   1621 	s = splclock();
   1622 	TIMEVAL_TO_TIMESPEC(&mono_time, tsp);
   1623 	splx(s);
   1624 }
   1625 
   1626 void
   1627 getmicrouptime(struct timeval *tvp)
   1628 {
   1629 	int s;
   1630 
   1631 	s = splclock();
   1632 	*tvp = mono_time;
   1633 	splx(s);
   1634 }
   1635 
   1636 void
   1637 getnanotime(struct timespec *tsp)
   1638 {
   1639 	int s;
   1640 
   1641 	s = splclock();
   1642 	TIMEVAL_TO_TIMESPEC(&time, tsp);
   1643 	splx(s);
   1644 }
   1645 
   1646 void
   1647 getmicrotime(struct timeval *tvp)
   1648 {
   1649 	int s;
   1650 
   1651 	s = splclock();
   1652 	*tvp = time;
   1653 	splx(s);
   1654 }
   1655 #endif /* !__HAVE_TIMECOUNTER */
   1656