Home | History | Annotate | Line # | Download | only in kern
kern_clock.c revision 1.106.6.4
      1 /*	$NetBSD: kern_clock.c,v 1.106.6.4 2007/07/01 21:43:41 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center.
     10  * This code is derived from software contributed to The NetBSD Foundation
     11  * by Charles M. Hannum.
     12  *
     13  * Redistribution and use in source and binary forms, with or without
     14  * modification, are permitted provided that the following conditions
     15  * are met:
     16  * 1. Redistributions of source code must retain the above copyright
     17  *    notice, this list of conditions and the following disclaimer.
     18  * 2. Redistributions in binary form must reproduce the above copyright
     19  *    notice, this list of conditions and the following disclaimer in the
     20  *    documentation and/or other materials provided with the distribution.
     21  * 3. All advertising materials mentioning features or use of this software
     22  *    must display the following acknowledgement:
     23  *	This product includes software developed by the NetBSD
     24  *	Foundation, Inc. and its contributors.
     25  * 4. Neither the name of The NetBSD Foundation nor the names of its
     26  *    contributors may be used to endorse or promote products derived
     27  *    from this software without specific prior written permission.
     28  *
     29  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     30  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     31  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     32  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     33  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     34  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     35  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     36  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     37  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     38  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     39  * POSSIBILITY OF SUCH DAMAGE.
     40  */
     41 
     42 /*-
     43  * Copyright (c) 1982, 1986, 1991, 1993
     44  *	The Regents of the University of California.  All rights reserved.
     45  * (c) UNIX System Laboratories, Inc.
     46  * All or some portions of this file are derived from material licensed
     47  * to the University of California by American Telephone and Telegraph
     48  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     49  * the permission of UNIX System Laboratories, Inc.
     50  *
     51  * Redistribution and use in source and binary forms, with or without
     52  * modification, are permitted provided that the following conditions
     53  * are met:
     54  * 1. Redistributions of source code must retain the above copyright
     55  *    notice, this list of conditions and the following disclaimer.
     56  * 2. Redistributions in binary form must reproduce the above copyright
     57  *    notice, this list of conditions and the following disclaimer in the
     58  *    documentation and/or other materials provided with the distribution.
     59  * 3. Neither the name of the University nor the names of its contributors
     60  *    may be used to endorse or promote products derived from this software
     61  *    without specific prior written permission.
     62  *
     63  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     64  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     65  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     66  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     67  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     68  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     69  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     70  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     71  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     72  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     73  * SUCH DAMAGE.
     74  *
     75  *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
     76  */
     77 
     78 #include <sys/cdefs.h>
     79 __KERNEL_RCSID(0, "$NetBSD: kern_clock.c,v 1.106.6.4 2007/07/01 21:43:41 ad Exp $");
     80 
     81 #include "opt_ntp.h"
     82 #include "opt_multiprocessor.h"
     83 #include "opt_perfctrs.h"
     84 
     85 #include <sys/param.h>
     86 #include <sys/systm.h>
     87 #include <sys/callout.h>
     88 #include <sys/kernel.h>
     89 #include <sys/proc.h>
     90 #include <sys/resourcevar.h>
     91 #include <sys/signalvar.h>
     92 #include <sys/sysctl.h>
     93 #include <sys/timex.h>
     94 #include <sys/sched.h>
     95 #include <sys/time.h>
     96 #include <sys/timetc.h>
     97 #include <sys/cpu.h>
     98 #include <sys/intr.h>
     99 
    100 #ifdef GPROF
    101 #include <sys/gmon.h>
    102 #endif
    103 
    104 /*
    105  * Clock handling routines.
    106  *
    107  * This code is written to operate with two timers that run independently of
    108  * each other.  The main clock, running hz times per second, is used to keep
    109  * track of real time.  The second timer handles kernel and user profiling,
    110  * and does resource use estimation.  If the second timer is programmable,
    111  * it is randomized to avoid aliasing between the two clocks.  For example,
    112  * the randomization prevents an adversary from always giving up the CPU
    113  * just before its quantum expires.  Otherwise, it would never accumulate
    114  * CPU ticks.  The mean frequency of the second timer is stathz.
    115  *
    116  * If no second timer exists, stathz will be zero; in this case we drive
    117  * profiling and statistics off the main clock.  This WILL NOT be accurate;
    118  * do not do it unless absolutely necessary.
    119  *
    120  * The statistics clock may (or may not) be run at a higher rate while
    121  * profiling.  This profile clock runs at profhz.  We require that profhz
    122  * be an integral multiple of stathz.
    123  *
    124  * If the statistics clock is running fast, it must be divided by the ratio
    125  * profhz/stathz for statistics.  (For profiling, every tick counts.)
    126  */
    127 
    128 #ifndef __HAVE_TIMECOUNTER
    129 #ifdef NTP	/* NTP phase-locked loop in kernel */
    130 /*
    131  * Phase/frequency-lock loop (PLL/FLL) definitions
    132  *
    133  * The following variables are read and set by the ntp_adjtime() system
    134  * call.
    135  *
    136  * time_state shows the state of the system clock, with values defined
    137  * in the timex.h header file.
    138  *
    139  * time_status shows the status of the system clock, with bits defined
    140  * in the timex.h header file.
    141  *
    142  * time_offset is used by the PLL/FLL to adjust the system time in small
    143  * increments.
    144  *
    145  * time_constant determines the bandwidth or "stiffness" of the PLL.
    146  *
    147  * time_tolerance determines maximum frequency error or tolerance of the
    148  * CPU clock oscillator and is a property of the architecture; however,
    149  * in principle it could change as result of the presence of external
    150  * discipline signals, for instance.
    151  *
    152  * time_precision is usually equal to the kernel tick variable; however,
    153  * in cases where a precision clock counter or external clock is
    154  * available, the resolution can be much less than this and depend on
    155  * whether the external clock is working or not.
    156  *
    157  * time_maxerror is initialized by a ntp_adjtime() call and increased by
    158  * the kernel once each second to reflect the maximum error bound
    159  * growth.
    160  *
    161  * time_esterror is set and read by the ntp_adjtime() call, but
    162  * otherwise not used by the kernel.
    163  */
    164 int time_state = TIME_OK;	/* clock state */
    165 int time_status = STA_UNSYNC;	/* clock status bits */
    166 long time_offset = 0;		/* time offset (us) */
    167 long time_constant = 0;		/* pll time constant */
    168 long time_tolerance = MAXFREQ;	/* frequency tolerance (scaled ppm) */
    169 long time_precision = 1;	/* clock precision (us) */
    170 long time_maxerror = MAXPHASE;	/* maximum error (us) */
    171 long time_esterror = MAXPHASE;	/* estimated error (us) */
    172 
    173 /*
    174  * The following variables establish the state of the PLL/FLL and the
    175  * residual time and frequency offset of the local clock. The scale
    176  * factors are defined in the timex.h header file.
    177  *
    178  * time_phase and time_freq are the phase increment and the frequency
    179  * increment, respectively, of the kernel time variable.
    180  *
    181  * time_freq is set via ntp_adjtime() from a value stored in a file when
    182  * the synchronization daemon is first started. Its value is retrieved
    183  * via ntp_adjtime() and written to the file about once per hour by the
    184  * daemon.
    185  *
    186  * time_adj is the adjustment added to the value of tick at each timer
    187  * interrupt and is recomputed from time_phase and time_freq at each
    188  * seconds rollover.
    189  *
    190  * time_reftime is the second's portion of the system time at the last
    191  * call to ntp_adjtime(). It is used to adjust the time_freq variable
    192  * and to increase the time_maxerror as the time since last update
    193  * increases.
    194  */
    195 long time_phase = 0;		/* phase offset (scaled us) */
    196 long time_freq = 0;		/* frequency offset (scaled ppm) */
    197 long time_adj = 0;		/* tick adjust (scaled 1 / hz) */
    198 long time_reftime = 0;		/* time at last adjustment (s) */
    199 
    200 #ifdef PPS_SYNC
    201 /*
    202  * The following variables are used only if the kernel PPS discipline
    203  * code is configured (PPS_SYNC). The scale factors are defined in the
    204  * timex.h header file.
    205  *
    206  * pps_time contains the time at each calibration interval, as read by
    207  * microtime(). pps_count counts the seconds of the calibration
    208  * interval, the duration of which is nominally pps_shift in powers of
    209  * two.
    210  *
    211  * pps_offset is the time offset produced by the time median filter
    212  * pps_tf[], while pps_jitter is the dispersion (jitter) measured by
    213  * this filter.
    214  *
    215  * pps_freq is the frequency offset produced by the frequency median
    216  * filter pps_ff[], while pps_stabil is the dispersion (wander) measured
    217  * by this filter.
    218  *
    219  * pps_usec is latched from a high resolution counter or external clock
    220  * at pps_time. Here we want the hardware counter contents only, not the
    221  * contents plus the time_tv.usec as usual.
    222  *
    223  * pps_valid counts the number of seconds since the last PPS update. It
    224  * is used as a watchdog timer to disable the PPS discipline should the
    225  * PPS signal be lost.
    226  *
    227  * pps_glitch counts the number of seconds since the beginning of an
    228  * offset burst more than tick/2 from current nominal offset. It is used
    229  * mainly to suppress error bursts due to priority conflicts between the
    230  * PPS interrupt and timer interrupt.
    231  *
    232  * pps_intcnt counts the calibration intervals for use in the interval-
    233  * adaptation algorithm. It's just too complicated for words.
    234  *
    235  * pps_kc_hardpps_source contains an arbitrary value that uniquely
    236  * identifies the currently bound source of the PPS signal, or NULL
    237  * if no source is bound.
    238  *
    239  * pps_kc_hardpps_mode indicates which transitions, if any, of the PPS
    240  * signal should be reported.
    241  */
    242 struct timeval pps_time;	/* kernel time at last interval */
    243 long pps_tf[] = {0, 0, 0};	/* pps time offset median filter (us) */
    244 long pps_offset = 0;		/* pps time offset (us) */
    245 long pps_jitter = MAXTIME;	/* time dispersion (jitter) (us) */
    246 long pps_ff[] = {0, 0, 0};	/* pps frequency offset median filter */
    247 long pps_freq = 0;		/* frequency offset (scaled ppm) */
    248 long pps_stabil = MAXFREQ;	/* frequency dispersion (scaled ppm) */
    249 long pps_usec = 0;		/* microsec counter at last interval */
    250 long pps_valid = PPS_VALID;	/* pps signal watchdog counter */
    251 int pps_glitch = 0;		/* pps signal glitch counter */
    252 int pps_count = 0;		/* calibration interval counter (s) */
    253 int pps_shift = PPS_SHIFT;	/* interval duration (s) (shift) */
    254 int pps_intcnt = 0;		/* intervals at current duration */
    255 void *pps_kc_hardpps_source = NULL; /* current PPS supplier's identifier */
    256 int pps_kc_hardpps_mode = 0;	/* interesting edges of PPS signal */
    257 
    258 /*
    259  * PPS signal quality monitors
    260  *
    261  * pps_jitcnt counts the seconds that have been discarded because the
    262  * jitter measured by the time median filter exceeds the limit MAXTIME
    263  * (100 us).
    264  *
    265  * pps_calcnt counts the frequency calibration intervals, which are
    266  * variable from 4 s to 256 s.
    267  *
    268  * pps_errcnt counts the calibration intervals which have been discarded
    269  * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
    270  * calibration interval jitter exceeds two ticks.
    271  *
    272  * pps_stbcnt counts the calibration intervals that have been discarded
    273  * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
    274  */
    275 long pps_jitcnt = 0;		/* jitter limit exceeded */
    276 long pps_calcnt = 0;		/* calibration intervals */
    277 long pps_errcnt = 0;		/* calibration errors */
    278 long pps_stbcnt = 0;		/* stability limit exceeded */
    279 #endif /* PPS_SYNC */
    280 
    281 #ifdef EXT_CLOCK
    282 /*
    283  * External clock definitions
    284  *
    285  * The following definitions and declarations are used only if an
    286  * external clock is configured on the system.
    287  */
    288 #define CLOCK_INTERVAL 30	/* CPU clock update interval (s) */
    289 
    290 /*
    291  * The clock_count variable is set to CLOCK_INTERVAL at each PPS
    292  * interrupt and decremented once each second.
    293  */
    294 int clock_count = 0;		/* CPU clock counter */
    295 
    296 #ifdef HIGHBALL
    297 /*
    298  * The clock_offset and clock_cpu variables are used by the HIGHBALL
    299  * interface. The clock_offset variable defines the offset between
    300  * system time and the HIGBALL counters. The clock_cpu variable contains
    301  * the offset between the system clock and the HIGHBALL clock for use in
    302  * disciplining the kernel time variable.
    303  */
    304 extern struct timeval clock_offset; /* Highball clock offset */
    305 long clock_cpu = 0;		/* CPU clock adjust */
    306 #endif /* HIGHBALL */
    307 #endif /* EXT_CLOCK */
    308 #endif /* NTP */
    309 
    310 /*
    311  * Bump a timeval by a small number of usec's.
    312  */
    313 #define BUMPTIME(t, usec) { \
    314 	volatile struct timeval *tp = (t); \
    315 	long us; \
    316  \
    317 	tp->tv_usec = us = tp->tv_usec + (usec); \
    318 	if (us >= 1000000) { \
    319 		tp->tv_usec = us - 1000000; \
    320 		tp->tv_sec++; \
    321 	} \
    322 }
    323 #endif /* !__HAVE_TIMECOUNTER */
    324 
    325 int	stathz;
    326 int	profhz;
    327 int	profsrc;
    328 int	schedhz;
    329 int	profprocs;
    330 int	hardclock_ticks;
    331 static int statscheddiv; /* stat => sched divider (used if schedhz == 0) */
    332 static int psdiv;			/* prof => stat divider */
    333 int	psratio;			/* ratio: prof / stat */
    334 #ifndef __HAVE_TIMECOUNTER
    335 int	tickfix, tickfixinterval;	/* used if tick not really integral */
    336 #ifndef NTP
    337 static int tickfixcnt;			/* accumulated fractional error */
    338 #else
    339 int	fixtick;			/* used by NTP for same */
    340 int	shifthz;
    341 #endif
    342 
    343 /*
    344  * We might want ldd to load the both words from time at once.
    345  * To succeed we need to be quadword aligned.
    346  * The sparc already does that, and that it has worked so far is a fluke.
    347  */
    348 volatile struct	timeval time  __attribute__((__aligned__(__alignof__(quad_t))));
    349 volatile struct	timeval mono_time;
    350 #endif /* !__HAVE_TIMECOUNTER */
    351 
    352 #ifdef __HAVE_TIMECOUNTER
    353 static u_int get_intr_timecount(struct timecounter *);
    354 
    355 static struct timecounter intr_timecounter = {
    356 	get_intr_timecount,	/* get_timecount */
    357 	0,			/* no poll_pps */
    358 	~0u,			/* counter_mask */
    359 	0,		        /* frequency */
    360 	"clockinterrupt",	/* name */
    361 	0,			/* quality - minimum implementation level for a clock */
    362 	NULL,			/* prev */
    363 	NULL,			/* next */
    364 };
    365 
    366 static u_int
    367 get_intr_timecount(struct timecounter *tc)
    368 {
    369 
    370 	return (u_int)hardclock_ticks;
    371 }
    372 #endif
    373 
    374 /*
    375  * Initialize clock frequencies and start both clocks running.
    376  */
    377 void
    378 initclocks(void)
    379 {
    380 	int i;
    381 
    382 	/*
    383 	 * Set divisors to 1 (normal case) and let the machine-specific
    384 	 * code do its bit.
    385 	 */
    386 	psdiv = 1;
    387 #ifdef __HAVE_TIMECOUNTER
    388 	/*
    389 	 * provide minimum default time counter
    390 	 * will only run at interrupt resolution
    391 	 */
    392 	intr_timecounter.tc_frequency = hz;
    393 	tc_init(&intr_timecounter);
    394 #endif
    395 	cpu_initclocks();
    396 
    397 	/*
    398 	 * Compute profhz and stathz, fix profhz if needed.
    399 	 */
    400 	i = stathz ? stathz : hz;
    401 	if (profhz == 0)
    402 		profhz = i;
    403 	psratio = profhz / i;
    404 	if (schedhz == 0) {
    405 		/* 16Hz is best */
    406 		statscheddiv = i / 16;
    407 		if (statscheddiv <= 0)
    408 			panic("statscheddiv");
    409 	}
    410 
    411 #ifndef __HAVE_TIMECOUNTER
    412 #ifdef NTP
    413 	switch (hz) {
    414 	case 1:
    415 		shifthz = SHIFT_SCALE - 0;
    416 		break;
    417 	case 2:
    418 		shifthz = SHIFT_SCALE - 1;
    419 		break;
    420 	case 4:
    421 		shifthz = SHIFT_SCALE - 2;
    422 		break;
    423 	case 8:
    424 		shifthz = SHIFT_SCALE - 3;
    425 		break;
    426 	case 16:
    427 		shifthz = SHIFT_SCALE - 4;
    428 		break;
    429 	case 32:
    430 		shifthz = SHIFT_SCALE - 5;
    431 		break;
    432 	case 50:
    433 	case 60:
    434 	case 64:
    435 		shifthz = SHIFT_SCALE - 6;
    436 		break;
    437 	case 96:
    438 	case 100:
    439 	case 128:
    440 		shifthz = SHIFT_SCALE - 7;
    441 		break;
    442 	case 256:
    443 		shifthz = SHIFT_SCALE - 8;
    444 		break;
    445 	case 512:
    446 		shifthz = SHIFT_SCALE - 9;
    447 		break;
    448 	case 1000:
    449 	case 1024:
    450 		shifthz = SHIFT_SCALE - 10;
    451 		break;
    452 	case 1200:
    453 	case 2048:
    454 		shifthz = SHIFT_SCALE - 11;
    455 		break;
    456 	case 4096:
    457 		shifthz = SHIFT_SCALE - 12;
    458 		break;
    459 	case 8192:
    460 		shifthz = SHIFT_SCALE - 13;
    461 		break;
    462 	case 16384:
    463 		shifthz = SHIFT_SCALE - 14;
    464 		break;
    465 	case 32768:
    466 		shifthz = SHIFT_SCALE - 15;
    467 		break;
    468 	case 65536:
    469 		shifthz = SHIFT_SCALE - 16;
    470 		break;
    471 	default:
    472 		panic("weird hz");
    473 	}
    474 	if (fixtick == 0) {
    475 		/*
    476 		 * Give MD code a chance to set this to a better
    477 		 * value; but, if it doesn't, we should.
    478 		 */
    479 		fixtick = (1000000 - (hz*tick));
    480 	}
    481 #endif /* NTP */
    482 #endif /* !__HAVE_TIMECOUNTER */
    483 }
    484 
    485 /*
    486  * The real-time timer, interrupting hz times per second.
    487  */
    488 void
    489 hardclock(struct clockframe *frame)
    490 {
    491 	struct lwp *l;
    492 	struct proc *p;
    493 	struct cpu_info *ci = curcpu();
    494 	struct ptimer *pt;
    495 #ifndef __HAVE_TIMECOUNTER
    496 	int delta;
    497 	extern int tickdelta;
    498 	extern long timedelta;
    499 #ifdef NTP
    500 	int time_update;
    501 	int ltemp;
    502 #endif /* NTP */
    503 #endif /* __HAVE_TIMECOUNTER */
    504 
    505 	l = curlwp;
    506 	if (!CURCPU_IDLE_P()) {
    507 		p = l->l_proc;
    508 		/*
    509 		 * Run current process's virtual and profile time, as needed.
    510 		 */
    511 		if (CLKF_USERMODE(frame) && p->p_timers &&
    512 		    (pt = LIST_FIRST(&p->p_timers->pts_virtual)) != NULL)
    513 			if (itimerdecr(pt, tick) == 0)
    514 				itimerfire(pt);
    515 		if (p->p_timers &&
    516 		    (pt = LIST_FIRST(&p->p_timers->pts_prof)) != NULL)
    517 			if (itimerdecr(pt, tick) == 0)
    518 				itimerfire(pt);
    519 	}
    520 
    521 	/*
    522 	 * If no separate statistics clock is available, run it from here.
    523 	 */
    524 	if (stathz == 0)
    525 		statclock(frame);
    526 	if ((--ci->ci_schedstate.spc_ticks) <= 0)
    527 		sched_tick(ci);
    528 
    529 #if defined(MULTIPROCESSOR)
    530 	/*
    531 	 * If we are not the primary CPU, we're not allowed to do
    532 	 * any more work.
    533 	 */
    534 	if (CPU_IS_PRIMARY(ci) == 0)
    535 		return;
    536 #endif
    537 
    538 	hardclock_ticks++;
    539 
    540 #ifdef __HAVE_TIMECOUNTER
    541 	tc_ticktock();
    542 #else /* __HAVE_TIMECOUNTER */
    543 	/*
    544 	 * Increment the time-of-day.  The increment is normally just
    545 	 * ``tick''.  If the machine is one which has a clock frequency
    546 	 * such that ``hz'' would not divide the second evenly into
    547 	 * milliseconds, a periodic adjustment must be applied.  Finally,
    548 	 * if we are still adjusting the time (see adjtime()),
    549 	 * ``tickdelta'' may also be added in.
    550 	 */
    551 	delta = tick;
    552 
    553 #ifndef NTP
    554 	if (tickfix) {
    555 		tickfixcnt += tickfix;
    556 		if (tickfixcnt >= tickfixinterval) {
    557 			delta++;
    558 			tickfixcnt -= tickfixinterval;
    559 		}
    560 	}
    561 #endif /* !NTP */
    562 	/* Imprecise 4bsd adjtime() handling */
    563 	if (timedelta != 0) {
    564 		delta += tickdelta;
    565 		timedelta -= tickdelta;
    566 	}
    567 
    568 #ifdef notyet
    569 	microset();
    570 #endif
    571 
    572 #ifndef NTP
    573 	BUMPTIME(&time, delta);		/* XXX Now done using NTP code below */
    574 #endif
    575 	BUMPTIME(&mono_time, delta);
    576 
    577 #ifdef NTP
    578 	time_update = delta;
    579 
    580 	/*
    581 	 * Compute the phase adjustment. If the low-order bits
    582 	 * (time_phase) of the update overflow, bump the high-order bits
    583 	 * (time_update).
    584 	 */
    585 	time_phase += time_adj;
    586 	if (time_phase <= -FINEUSEC) {
    587 		ltemp = -time_phase >> SHIFT_SCALE;
    588 		time_phase += ltemp << SHIFT_SCALE;
    589 		time_update -= ltemp;
    590 	} else if (time_phase >= FINEUSEC) {
    591 		ltemp = time_phase >> SHIFT_SCALE;
    592 		time_phase -= ltemp << SHIFT_SCALE;
    593 		time_update += ltemp;
    594 	}
    595 
    596 #ifdef HIGHBALL
    597 	/*
    598 	 * If the HIGHBALL board is installed, we need to adjust the
    599 	 * external clock offset in order to close the hardware feedback
    600 	 * loop. This will adjust the external clock phase and frequency
    601 	 * in small amounts. The additional phase noise and frequency
    602 	 * wander this causes should be minimal. We also need to
    603 	 * discipline the kernel time variable, since the PLL is used to
    604 	 * discipline the external clock. If the Highball board is not
    605 	 * present, we discipline kernel time with the PLL as usual. We
    606 	 * assume that the external clock phase adjustment (time_update)
    607 	 * and kernel phase adjustment (clock_cpu) are less than the
    608 	 * value of tick.
    609 	 */
    610 	clock_offset.tv_usec += time_update;
    611 	if (clock_offset.tv_usec >= 1000000) {
    612 		clock_offset.tv_sec++;
    613 		clock_offset.tv_usec -= 1000000;
    614 	}
    615 	if (clock_offset.tv_usec < 0) {
    616 		clock_offset.tv_sec--;
    617 		clock_offset.tv_usec += 1000000;
    618 	}
    619 	time.tv_usec += clock_cpu;
    620 	clock_cpu = 0;
    621 #else
    622 	time.tv_usec += time_update;
    623 #endif /* HIGHBALL */
    624 
    625 	/*
    626 	 * On rollover of the second the phase adjustment to be used for
    627 	 * the next second is calculated. Also, the maximum error is
    628 	 * increased by the tolerance. If the PPS frequency discipline
    629 	 * code is present, the phase is increased to compensate for the
    630 	 * CPU clock oscillator frequency error.
    631 	 *
    632  	 * On a 32-bit machine and given parameters in the timex.h
    633 	 * header file, the maximum phase adjustment is +-512 ms and
    634 	 * maximum frequency offset is a tad less than) +-512 ppm. On a
    635 	 * 64-bit machine, you shouldn't need to ask.
    636 	 */
    637 	if (time.tv_usec >= 1000000) {
    638 		time.tv_usec -= 1000000;
    639 		time.tv_sec++;
    640 		time_maxerror += time_tolerance >> SHIFT_USEC;
    641 
    642 		/*
    643 		 * Leap second processing. If in leap-insert state at
    644 		 * the end of the day, the system clock is set back one
    645 		 * second; if in leap-delete state, the system clock is
    646 		 * set ahead one second. The microtime() routine or
    647 		 * external clock driver will insure that reported time
    648 		 * is always monotonic. The ugly divides should be
    649 		 * replaced.
    650 		 */
    651 		switch (time_state) {
    652 		case TIME_OK:
    653 			if (time_status & STA_INS)
    654 				time_state = TIME_INS;
    655 			else if (time_status & STA_DEL)
    656 				time_state = TIME_DEL;
    657 			break;
    658 
    659 		case TIME_INS:
    660 			if (time.tv_sec % 86400 == 0) {
    661 				time.tv_sec--;
    662 				time_state = TIME_OOP;
    663 			}
    664 			break;
    665 
    666 		case TIME_DEL:
    667 			if ((time.tv_sec + 1) % 86400 == 0) {
    668 				time.tv_sec++;
    669 				time_state = TIME_WAIT;
    670 			}
    671 			break;
    672 
    673 		case TIME_OOP:
    674 			time_state = TIME_WAIT;
    675 			break;
    676 
    677 		case TIME_WAIT:
    678 			if (!(time_status & (STA_INS | STA_DEL)))
    679 				time_state = TIME_OK;
    680 			break;
    681 		}
    682 
    683 		/*
    684 		 * Compute the phase adjustment for the next second. In
    685 		 * PLL mode, the offset is reduced by a fixed factor
    686 		 * times the time constant. In FLL mode the offset is
    687 		 * used directly. In either mode, the maximum phase
    688 		 * adjustment for each second is clamped so as to spread
    689 		 * the adjustment over not more than the number of
    690 		 * seconds between updates.
    691 		 */
    692 		if (time_offset < 0) {
    693 			ltemp = -time_offset;
    694 			if (!(time_status & STA_FLL))
    695 				ltemp >>= SHIFT_KG + time_constant;
    696 			if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
    697 				ltemp = (MAXPHASE / MINSEC) <<
    698 				    SHIFT_UPDATE;
    699 			time_offset += ltemp;
    700 			time_adj = -ltemp << (shifthz - SHIFT_UPDATE);
    701 		} else if (time_offset > 0) {
    702 			ltemp = time_offset;
    703 			if (!(time_status & STA_FLL))
    704 				ltemp >>= SHIFT_KG + time_constant;
    705 			if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
    706 				ltemp = (MAXPHASE / MINSEC) <<
    707 				    SHIFT_UPDATE;
    708 			time_offset -= ltemp;
    709 			time_adj = ltemp << (shifthz - SHIFT_UPDATE);
    710 		} else
    711 			time_adj = 0;
    712 
    713 		/*
    714 		 * Compute the frequency estimate and additional phase
    715 		 * adjustment due to frequency error for the next
    716 		 * second. When the PPS signal is engaged, gnaw on the
    717 		 * watchdog counter and update the frequency computed by
    718 		 * the pll and the PPS signal.
    719 		 */
    720 #ifdef PPS_SYNC
    721 		pps_valid++;
    722 		if (pps_valid == PPS_VALID) {
    723 			pps_jitter = MAXTIME;
    724 			pps_stabil = MAXFREQ;
    725 			time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
    726 			    STA_PPSWANDER | STA_PPSERROR);
    727 		}
    728 		ltemp = time_freq + pps_freq;
    729 #else
    730 		ltemp = time_freq;
    731 #endif /* PPS_SYNC */
    732 
    733 		if (ltemp < 0)
    734 			time_adj -= -ltemp >> (SHIFT_USEC - shifthz);
    735 		else
    736 			time_adj += ltemp >> (SHIFT_USEC - shifthz);
    737 		time_adj += (long)fixtick << shifthz;
    738 
    739 		/*
    740 		 * When the CPU clock oscillator frequency is not a
    741 		 * power of 2 in Hz, shifthz is only an approximate
    742 		 * scale factor.
    743 		 *
    744 		 * To determine the adjustment, you can do the following:
    745 		 *   bc -q
    746 		 *   scale=24
    747 		 *   obase=2
    748 		 *   idealhz/realhz
    749 		 * where `idealhz' is the next higher power of 2, and `realhz'
    750 		 * is the actual value.  You may need to factor this result
    751 		 * into a sequence of 2 multipliers to get better precision.
    752 		 *
    753 		 * Likewise, the error can be calculated with (e.g. for 100Hz):
    754 		 *   bc -q
    755 		 *   scale=24
    756 		 *   ((1+2^-2+2^-5)*(1-2^-10)*realhz-idealhz)/idealhz
    757 		 * (and then multiply by 1000000 to get ppm).
    758 		 */
    759 		switch (hz) {
    760 		case 60:
    761 			/* A factor of 1.000100010001 gives about 15ppm
    762 			   error. */
    763 			if (time_adj < 0) {
    764 				time_adj -= (-time_adj >> 4);
    765 				time_adj -= (-time_adj >> 8);
    766 			} else {
    767 				time_adj += (time_adj >> 4);
    768 				time_adj += (time_adj >> 8);
    769 			}
    770 			break;
    771 
    772 		case 96:
    773 			/* A factor of 1.0101010101 gives about 244ppm error. */
    774 			if (time_adj < 0) {
    775 				time_adj -= (-time_adj >> 2);
    776 				time_adj -= (-time_adj >> 4) + (-time_adj >> 8);
    777 			} else {
    778 				time_adj += (time_adj >> 2);
    779 				time_adj += (time_adj >> 4) + (time_adj >> 8);
    780 			}
    781 			break;
    782 
    783 		case 50:
    784 		case 100:
    785 			/* A factor of 1.010001111010111 gives about 1ppm
    786 			   error. */
    787 			if (time_adj < 0) {
    788 				time_adj -= (-time_adj >> 2) + (-time_adj >> 5);
    789 				time_adj += (-time_adj >> 10);
    790 			} else {
    791 				time_adj += (time_adj >> 2) + (time_adj >> 5);
    792 				time_adj -= (time_adj >> 10);
    793 			}
    794 			break;
    795 
    796 		case 1000:
    797 			/* A factor of 1.000001100010100001 gives about 50ppm
    798 			   error. */
    799 			if (time_adj < 0) {
    800 				time_adj -= (-time_adj >> 6) + (-time_adj >> 11);
    801 				time_adj -= (-time_adj >> 7);
    802 			} else {
    803 				time_adj += (time_adj >> 6) + (time_adj >> 11);
    804 				time_adj += (time_adj >> 7);
    805 			}
    806 			break;
    807 
    808 		case 1200:
    809 			/* A factor of 1.1011010011100001 gives about 64ppm
    810 			   error. */
    811 			if (time_adj < 0) {
    812 				time_adj -= (-time_adj >> 1) + (-time_adj >> 6);
    813 				time_adj -= (-time_adj >> 3) + (-time_adj >> 10);
    814 			} else {
    815 				time_adj += (time_adj >> 1) + (time_adj >> 6);
    816 				time_adj += (time_adj >> 3) + (time_adj >> 10);
    817 			}
    818 			break;
    819 		}
    820 
    821 #ifdef EXT_CLOCK
    822 		/*
    823 		 * If an external clock is present, it is necessary to
    824 		 * discipline the kernel time variable anyway, since not
    825 		 * all system components use the microtime() interface.
    826 		 * Here, the time offset between the external clock and
    827 		 * kernel time variable is computed every so often.
    828 		 */
    829 		clock_count++;
    830 		if (clock_count > CLOCK_INTERVAL) {
    831 			clock_count = 0;
    832 			microtime(&clock_ext);
    833 			delta.tv_sec = clock_ext.tv_sec - time.tv_sec;
    834 			delta.tv_usec = clock_ext.tv_usec -
    835 			    time.tv_usec;
    836 			if (delta.tv_usec < 0)
    837 				delta.tv_sec--;
    838 			if (delta.tv_usec >= 500000) {
    839 				delta.tv_usec -= 1000000;
    840 				delta.tv_sec++;
    841 			}
    842 			if (delta.tv_usec < -500000) {
    843 				delta.tv_usec += 1000000;
    844 				delta.tv_sec--;
    845 			}
    846 			if (delta.tv_sec > 0 || (delta.tv_sec == 0 &&
    847 			    delta.tv_usec > MAXPHASE) ||
    848 			    delta.tv_sec < -1 || (delta.tv_sec == -1 &&
    849 			    delta.tv_usec < -MAXPHASE)) {
    850 				time = clock_ext;
    851 				delta.tv_sec = 0;
    852 				delta.tv_usec = 0;
    853 			}
    854 #ifdef HIGHBALL
    855 			clock_cpu = delta.tv_usec;
    856 #else /* HIGHBALL */
    857 			hardupdate(delta.tv_usec);
    858 #endif /* HIGHBALL */
    859 		}
    860 #endif /* EXT_CLOCK */
    861 	}
    862 
    863 #endif /* NTP */
    864 #endif /* !__HAVE_TIMECOUNTER */
    865 
    866 	/*
    867 	 * Update real-time timeout queue.  Callouts are processed at a
    868 	 * very low CPU priority, so we don't keep the relatively high
    869 	 * clock interrupt priority any longer than necessary.
    870 	 */
    871 	callout_hardclock();
    872 }
    873 
    874 #ifdef __HAVE_TIMECOUNTER
    875 /*
    876  * Compute number of hz until specified time.  Used to compute second
    877  * argument to callout_reset() from an absolute time.
    878  */
    879 int
    880 hzto(struct timeval *tvp)
    881 {
    882 	struct timeval now, tv;
    883 
    884 	tv = *tvp;	/* Don't modify original tvp. */
    885 	getmicrotime(&now);
    886 	timersub(&tv, &now, &tv);
    887 	return tvtohz(&tv);
    888 }
    889 #endif /* __HAVE_TIMECOUNTER */
    890 
    891 /*
    892  * Compute number of ticks in the specified amount of time.
    893  */
    894 int
    895 tvtohz(struct timeval *tv)
    896 {
    897 	unsigned long ticks;
    898 	long sec, usec;
    899 
    900 	/*
    901 	 * If the number of usecs in the whole seconds part of the time
    902 	 * difference fits in a long, then the total number of usecs will
    903 	 * fit in an unsigned long.  Compute the total and convert it to
    904 	 * ticks, rounding up and adding 1 to allow for the current tick
    905 	 * to expire.  Rounding also depends on unsigned long arithmetic
    906 	 * to avoid overflow.
    907 	 *
    908 	 * Otherwise, if the number of ticks in the whole seconds part of
    909 	 * the time difference fits in a long, then convert the parts to
    910 	 * ticks separately and add, using similar rounding methods and
    911 	 * overflow avoidance.  This method would work in the previous
    912 	 * case, but it is slightly slower and assumes that hz is integral.
    913 	 *
    914 	 * Otherwise, round the time difference down to the maximum
    915 	 * representable value.
    916 	 *
    917 	 * If ints are 32-bit, then the maximum value for any timeout in
    918 	 * 10ms ticks is 248 days.
    919 	 */
    920 	sec = tv->tv_sec;
    921 	usec = tv->tv_usec;
    922 
    923 	if (usec < 0) {
    924 		sec--;
    925 		usec += 1000000;
    926 	}
    927 
    928 	if (sec < 0 || (sec == 0 && usec <= 0)) {
    929 		/*
    930 		 * Would expire now or in the past.  Return 0 ticks.
    931 		 * This is different from the legacy hzto() interface,
    932 		 * and callers need to check for it.
    933 		 */
    934 		ticks = 0;
    935 	} else if (sec <= (LONG_MAX / 1000000))
    936 		ticks = (((sec * 1000000) + (unsigned long)usec + (tick - 1))
    937 		    / tick) + 1;
    938 	else if (sec <= (LONG_MAX / hz))
    939 		ticks = (sec * hz) +
    940 		    (((unsigned long)usec + (tick - 1)) / tick) + 1;
    941 	else
    942 		ticks = LONG_MAX;
    943 
    944 	if (ticks > INT_MAX)
    945 		ticks = INT_MAX;
    946 
    947 	return ((int)ticks);
    948 }
    949 
    950 #ifndef __HAVE_TIMECOUNTER
    951 /*
    952  * Compute number of hz until specified time.  Used to compute second
    953  * argument to callout_reset() from an absolute time.
    954  */
    955 int
    956 hzto(struct timeval *tv)
    957 {
    958 	unsigned long ticks;
    959 	long sec, usec;
    960 	int s;
    961 
    962 	/*
    963 	 * If the number of usecs in the whole seconds part of the time
    964 	 * difference fits in a long, then the total number of usecs will
    965 	 * fit in an unsigned long.  Compute the total and convert it to
    966 	 * ticks, rounding up and adding 1 to allow for the current tick
    967 	 * to expire.  Rounding also depends on unsigned long arithmetic
    968 	 * to avoid overflow.
    969 	 *
    970 	 * Otherwise, if the number of ticks in the whole seconds part of
    971 	 * the time difference fits in a long, then convert the parts to
    972 	 * ticks separately and add, using similar rounding methods and
    973 	 * overflow avoidance.  This method would work in the previous
    974 	 * case, but it is slightly slower and assume that hz is integral.
    975 	 *
    976 	 * Otherwise, round the time difference down to the maximum
    977 	 * representable value.
    978 	 *
    979 	 * If ints are 32-bit, then the maximum value for any timeout in
    980 	 * 10ms ticks is 248 days.
    981 	 */
    982 	s = splclock();
    983 	sec = tv->tv_sec - time.tv_sec;
    984 	usec = tv->tv_usec - time.tv_usec;
    985 	splx(s);
    986 
    987 	if (usec < 0) {
    988 		sec--;
    989 		usec += 1000000;
    990 	}
    991 
    992 	if (sec < 0 || (sec == 0 && usec <= 0)) {
    993 		/*
    994 		 * Would expire now or in the past.  Return 0 ticks.
    995 		 * This is different from the legacy hzto() interface,
    996 		 * and callers need to check for it.
    997 		 */
    998 		ticks = 0;
    999 	} else if (sec <= (LONG_MAX / 1000000))
   1000 		ticks = (((sec * 1000000) + (unsigned long)usec + (tick - 1))
   1001 		    / tick) + 1;
   1002 	else if (sec <= (LONG_MAX / hz))
   1003 		ticks = (sec * hz) +
   1004 		    (((unsigned long)usec + (tick - 1)) / tick) + 1;
   1005 	else
   1006 		ticks = LONG_MAX;
   1007 
   1008 	if (ticks > INT_MAX)
   1009 		ticks = INT_MAX;
   1010 
   1011 	return ((int)ticks);
   1012 }
   1013 #endif /* !__HAVE_TIMECOUNTER */
   1014 
   1015 /*
   1016  * Compute number of ticks in the specified amount of time.
   1017  */
   1018 int
   1019 tstohz(struct timespec *ts)
   1020 {
   1021 	struct timeval tv;
   1022 
   1023 	/*
   1024 	 * usec has great enough resolution for hz, so convert to a
   1025 	 * timeval and use tvtohz() above.
   1026 	 */
   1027 	TIMESPEC_TO_TIMEVAL(&tv, ts);
   1028 	return tvtohz(&tv);
   1029 }
   1030 
   1031 /*
   1032  * Start profiling on a process.
   1033  *
   1034  * Kernel profiling passes proc0 which never exits and hence
   1035  * keeps the profile clock running constantly.
   1036  */
   1037 void
   1038 startprofclock(struct proc *p)
   1039 {
   1040 
   1041 	KASSERT(mutex_owned(&p->p_stmutex));
   1042 
   1043 	if ((p->p_stflag & PST_PROFIL) == 0) {
   1044 		p->p_stflag |= PST_PROFIL;
   1045 		/*
   1046 		 * This is only necessary if using the clock as the
   1047 		 * profiling source.
   1048 		 */
   1049 		if (++profprocs == 1 && stathz != 0)
   1050 			psdiv = psratio;
   1051 	}
   1052 }
   1053 
   1054 /*
   1055  * Stop profiling on a process.
   1056  */
   1057 void
   1058 stopprofclock(struct proc *p)
   1059 {
   1060 
   1061 	KASSERT(mutex_owned(&p->p_stmutex));
   1062 
   1063 	if (p->p_stflag & PST_PROFIL) {
   1064 		p->p_stflag &= ~PST_PROFIL;
   1065 		/*
   1066 		 * This is only necessary if using the clock as the
   1067 		 * profiling source.
   1068 		 */
   1069 		if (--profprocs == 0 && stathz != 0)
   1070 			psdiv = 1;
   1071 	}
   1072 }
   1073 
   1074 #if defined(PERFCTRS)
   1075 /*
   1076  * Independent profiling "tick" in case we're using a separate
   1077  * clock or profiling event source.  Currently, that's just
   1078  * performance counters--hence the wrapper.
   1079  */
   1080 void
   1081 proftick(struct clockframe *frame)
   1082 {
   1083 #ifdef GPROF
   1084         struct gmonparam *g;
   1085         intptr_t i;
   1086 #endif
   1087 	struct lwp *l;
   1088 	struct proc *p;
   1089 
   1090 	l = curlwp;
   1091 	p = (l ? l->l_proc : NULL);
   1092 	if (CLKF_USERMODE(frame)) {
   1093 		mutex_spin_enter(&p->p_stmutex);
   1094 		if (p->p_stflag & PST_PROFIL)
   1095 			addupc_intr(l, CLKF_PC(frame));
   1096 		mutex_spin_exit(&p->p_stmutex);
   1097 	} else {
   1098 #ifdef GPROF
   1099 		g = &_gmonparam;
   1100 		if (g->state == GMON_PROF_ON) {
   1101 			i = CLKF_PC(frame) - g->lowpc;
   1102 			if (i < g->textsize) {
   1103 				i /= HISTFRACTION * sizeof(*g->kcount);
   1104 				g->kcount[i]++;
   1105 			}
   1106 		}
   1107 #endif
   1108 #ifdef PROC_PC
   1109 		if (p != NULL) {
   1110 			mutex_spin_enter(&p->p_stmutex);
   1111 			if (p->p_stflag & PST_PROFIL))
   1112 				addupc_intr(l, PROC_PC(p));
   1113 			mutex_spin_exit(&p->p_stmutex);
   1114 		}
   1115 #endif
   1116 	}
   1117 }
   1118 #endif
   1119 
   1120 void
   1121 schedclock(struct lwp *l)
   1122 {
   1123 
   1124 	if ((l->l_flag & LW_IDLE) != 0)
   1125 		return;
   1126 
   1127 	sched_schedclock(l);
   1128 }
   1129 
   1130 /*
   1131  * Statistics clock.  Grab profile sample, and if divider reaches 0,
   1132  * do process and kernel statistics.
   1133  */
   1134 void
   1135 statclock(struct clockframe *frame)
   1136 {
   1137 #ifdef GPROF
   1138 	struct gmonparam *g;
   1139 	intptr_t i;
   1140 #endif
   1141 	struct cpu_info *ci = curcpu();
   1142 	struct schedstate_percpu *spc = &ci->ci_schedstate;
   1143 	struct proc *p;
   1144 	struct lwp *l;
   1145 
   1146 	/*
   1147 	 * Notice changes in divisor frequency, and adjust clock
   1148 	 * frequency accordingly.
   1149 	 */
   1150 	if (spc->spc_psdiv != psdiv) {
   1151 		spc->spc_psdiv = psdiv;
   1152 		spc->spc_pscnt = psdiv;
   1153 		if (psdiv == 1) {
   1154 			setstatclockrate(stathz);
   1155 		} else {
   1156 			setstatclockrate(profhz);
   1157 		}
   1158 	}
   1159 	l = curlwp;
   1160 	if ((l->l_flag & LW_IDLE) != 0) {
   1161 		/*
   1162 		 * don't account idle lwps as swapper.
   1163 		 */
   1164 		p = NULL;
   1165 	} else {
   1166 		p = l->l_proc;
   1167 		mutex_spin_enter(&p->p_stmutex);
   1168 	}
   1169 
   1170 	if (CLKF_USERMODE(frame)) {
   1171 		if ((p->p_stflag & PST_PROFIL) && profsrc == PROFSRC_CLOCK)
   1172 			addupc_intr(l, CLKF_PC(frame));
   1173 		if (--spc->spc_pscnt > 0) {
   1174 			mutex_spin_exit(&p->p_stmutex);
   1175 			return;
   1176 		}
   1177 
   1178 		/*
   1179 		 * Came from user mode; CPU was in user state.
   1180 		 * If this process is being profiled record the tick.
   1181 		 */
   1182 		p->p_uticks++;
   1183 		if (p->p_nice > NZERO)
   1184 			spc->spc_cp_time[CP_NICE]++;
   1185 		else
   1186 			spc->spc_cp_time[CP_USER]++;
   1187 	} else {
   1188 #ifdef GPROF
   1189 		/*
   1190 		 * Kernel statistics are just like addupc_intr, only easier.
   1191 		 */
   1192 		g = &_gmonparam;
   1193 		if (profsrc == PROFSRC_CLOCK && g->state == GMON_PROF_ON) {
   1194 			i = CLKF_PC(frame) - g->lowpc;
   1195 			if (i < g->textsize) {
   1196 				i /= HISTFRACTION * sizeof(*g->kcount);
   1197 				g->kcount[i]++;
   1198 			}
   1199 		}
   1200 #endif
   1201 #ifdef LWP_PC
   1202 		if (p != NULL && profsrc == PROFSRC_CLOCK &&
   1203 		    (p->p_stflag & PST_PROFIL)) {
   1204 			addupc_intr(l, LWP_PC(l));
   1205 		}
   1206 #endif
   1207 		if (--spc->spc_pscnt > 0) {
   1208 			if (p != NULL)
   1209 				mutex_spin_exit(&p->p_stmutex);
   1210 			return;
   1211 		}
   1212 		/*
   1213 		 * Came from kernel mode, so we were:
   1214 		 * - handling an interrupt,
   1215 		 * - doing syscall or trap work on behalf of the current
   1216 		 *   user process, or
   1217 		 * - spinning in the idle loop.
   1218 		 * Whichever it is, charge the time as appropriate.
   1219 		 * Note that we charge interrupts to the current process,
   1220 		 * regardless of whether they are ``for'' that process,
   1221 		 * so that we know how much of its real time was spent
   1222 		 * in ``non-process'' (i.e., interrupt) work.
   1223 		 */
   1224 		if (CLKF_INTR(frame) || (l->l_flag & LW_INTR) != 0) {
   1225 			if (p != NULL) {
   1226 				p->p_iticks++;
   1227 			}
   1228 			spc->spc_cp_time[CP_INTR]++;
   1229 		} else if (p != NULL) {
   1230 			p->p_sticks++;
   1231 			spc->spc_cp_time[CP_SYS]++;
   1232 		} else {
   1233 			spc->spc_cp_time[CP_IDLE]++;
   1234 		}
   1235 	}
   1236 	spc->spc_pscnt = psdiv;
   1237 
   1238 	if (p != NULL) {
   1239 		++l->l_cpticks;
   1240 		mutex_spin_exit(&p->p_stmutex);
   1241 	}
   1242 
   1243 	/*
   1244 	 * If no separate schedclock is provided, call it here
   1245 	 * at about 16 Hz.
   1246 	 */
   1247 	if (schedhz == 0) {
   1248 		if ((int)(--ci->ci_schedstate.spc_schedticks) <= 0) {
   1249 			schedclock(l);
   1250 			ci->ci_schedstate.spc_schedticks = statscheddiv;
   1251 		}
   1252 	}
   1253 }
   1254 
   1255 #ifndef __HAVE_TIMECOUNTER
   1256 #ifdef NTP	/* NTP phase-locked loop in kernel */
   1257 /*
   1258  * hardupdate() - local clock update
   1259  *
   1260  * This routine is called by ntp_adjtime() to update the local clock
   1261  * phase and frequency. The implementation is of an adaptive-parameter,
   1262  * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new
   1263  * time and frequency offset estimates for each call. If the kernel PPS
   1264  * discipline code is configured (PPS_SYNC), the PPS signal itself
   1265  * determines the new time offset, instead of the calling argument.
   1266  * Presumably, calls to ntp_adjtime() occur only when the caller
   1267  * believes the local clock is valid within some bound (+-128 ms with
   1268  * NTP). If the caller's time is far different than the PPS time, an
   1269  * argument will ensue, and it's not clear who will lose.
   1270  *
   1271  * For uncompensated quartz crystal oscillatores and nominal update
   1272  * intervals less than 1024 s, operation should be in phase-lock mode
   1273  * (STA_FLL = 0), where the loop is disciplined to phase. For update
   1274  * intervals greater than thiss, operation should be in frequency-lock
   1275  * mode (STA_FLL = 1), where the loop is disciplined to frequency.
   1276  *
   1277  * Note: splclock() is in effect.
   1278  */
   1279 void
   1280 hardupdate(long offset)
   1281 {
   1282 	long ltemp, mtemp;
   1283 
   1284 	if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME))
   1285 		return;
   1286 	ltemp = offset;
   1287 #ifdef PPS_SYNC
   1288 	if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
   1289 		ltemp = pps_offset;
   1290 #endif /* PPS_SYNC */
   1291 
   1292 	/*
   1293 	 * Scale the phase adjustment and clamp to the operating range.
   1294 	 */
   1295 	if (ltemp > MAXPHASE)
   1296 		time_offset = MAXPHASE << SHIFT_UPDATE;
   1297 	else if (ltemp < -MAXPHASE)
   1298 		time_offset = -(MAXPHASE << SHIFT_UPDATE);
   1299 	else
   1300 		time_offset = ltemp << SHIFT_UPDATE;
   1301 
   1302 	/*
   1303 	 * Select whether the frequency is to be controlled and in which
   1304 	 * mode (PLL or FLL). Clamp to the operating range. Ugly
   1305 	 * multiply/divide should be replaced someday.
   1306 	 */
   1307 	if (time_status & STA_FREQHOLD || time_reftime == 0)
   1308 		time_reftime = time.tv_sec;
   1309 	mtemp = time.tv_sec - time_reftime;
   1310 	time_reftime = time.tv_sec;
   1311 	if (time_status & STA_FLL) {
   1312 		if (mtemp >= MINSEC) {
   1313 			ltemp = ((time_offset / mtemp) << (SHIFT_USEC -
   1314 			    SHIFT_UPDATE));
   1315 			if (ltemp < 0)
   1316 				time_freq -= -ltemp >> SHIFT_KH;
   1317 			else
   1318 				time_freq += ltemp >> SHIFT_KH;
   1319 		}
   1320 	} else {
   1321 		if (mtemp < MAXSEC) {
   1322 			ltemp *= mtemp;
   1323 			if (ltemp < 0)
   1324 				time_freq -= -ltemp >> (time_constant +
   1325 				    time_constant + SHIFT_KF -
   1326 				    SHIFT_USEC);
   1327 			else
   1328 				time_freq += ltemp >> (time_constant +
   1329 				    time_constant + SHIFT_KF -
   1330 				    SHIFT_USEC);
   1331 		}
   1332 	}
   1333 	if (time_freq > time_tolerance)
   1334 		time_freq = time_tolerance;
   1335 	else if (time_freq < -time_tolerance)
   1336 		time_freq = -time_tolerance;
   1337 }
   1338 
   1339 #ifdef PPS_SYNC
   1340 /*
   1341  * hardpps() - discipline CPU clock oscillator to external PPS signal
   1342  *
   1343  * This routine is called at each PPS interrupt in order to discipline
   1344  * the CPU clock oscillator to the PPS signal. It measures the PPS phase
   1345  * and leaves it in a handy spot for the hardclock() routine. It
   1346  * integrates successive PPS phase differences and calculates the
   1347  * frequency offset. This is used in hardclock() to discipline the CPU
   1348  * clock oscillator so that intrinsic frequency error is cancelled out.
   1349  * The code requires the caller to capture the time and hardware counter
   1350  * value at the on-time PPS signal transition.
   1351  *
   1352  * Note that, on some Unix systems, this routine runs at an interrupt
   1353  * priority level higher than the timer interrupt routine hardclock().
   1354  * Therefore, the variables used are distinct from the hardclock()
   1355  * variables, except for certain exceptions: The PPS frequency pps_freq
   1356  * and phase pps_offset variables are determined by this routine and
   1357  * updated atomically. The time_tolerance variable can be considered a
   1358  * constant, since it is infrequently changed, and then only when the
   1359  * PPS signal is disabled. The watchdog counter pps_valid is updated
   1360  * once per second by hardclock() and is atomically cleared in this
   1361  * routine.
   1362  */
   1363 void
   1364 hardpps(struct timeval *tvp,		/* time at PPS */
   1365 	long usec			/* hardware counter at PPS */)
   1366 {
   1367 	long u_usec, v_usec, bigtick;
   1368 	long cal_sec, cal_usec;
   1369 
   1370 	/*
   1371 	 * An occasional glitch can be produced when the PPS interrupt
   1372 	 * occurs in the hardclock() routine before the time variable is
   1373 	 * updated. Here the offset is discarded when the difference
   1374 	 * between it and the last one is greater than tick/2, but not
   1375 	 * if the interval since the first discard exceeds 30 s.
   1376 	 */
   1377 	time_status |= STA_PPSSIGNAL;
   1378 	time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
   1379 	pps_valid = 0;
   1380 	u_usec = -tvp->tv_usec;
   1381 	if (u_usec < -500000)
   1382 		u_usec += 1000000;
   1383 	v_usec = pps_offset - u_usec;
   1384 	if (v_usec < 0)
   1385 		v_usec = -v_usec;
   1386 	if (v_usec > (tick >> 1)) {
   1387 		if (pps_glitch > MAXGLITCH) {
   1388 			pps_glitch = 0;
   1389 			pps_tf[2] = u_usec;
   1390 			pps_tf[1] = u_usec;
   1391 		} else {
   1392 			pps_glitch++;
   1393 			u_usec = pps_offset;
   1394 		}
   1395 	} else
   1396 		pps_glitch = 0;
   1397 
   1398 	/*
   1399 	 * A three-stage median filter is used to help deglitch the pps
   1400 	 * time. The median sample becomes the time offset estimate; the
   1401 	 * difference between the other two samples becomes the time
   1402 	 * dispersion (jitter) estimate.
   1403 	 */
   1404 	pps_tf[2] = pps_tf[1];
   1405 	pps_tf[1] = pps_tf[0];
   1406 	pps_tf[0] = u_usec;
   1407 	if (pps_tf[0] > pps_tf[1]) {
   1408 		if (pps_tf[1] > pps_tf[2]) {
   1409 			pps_offset = pps_tf[1];		/* 0 1 2 */
   1410 			v_usec = pps_tf[0] - pps_tf[2];
   1411 		} else if (pps_tf[2] > pps_tf[0]) {
   1412 			pps_offset = pps_tf[0];		/* 2 0 1 */
   1413 			v_usec = pps_tf[2] - pps_tf[1];
   1414 		} else {
   1415 			pps_offset = pps_tf[2];		/* 0 2 1 */
   1416 			v_usec = pps_tf[0] - pps_tf[1];
   1417 		}
   1418 	} else {
   1419 		if (pps_tf[1] < pps_tf[2]) {
   1420 			pps_offset = pps_tf[1];		/* 2 1 0 */
   1421 			v_usec = pps_tf[2] - pps_tf[0];
   1422 		} else  if (pps_tf[2] < pps_tf[0]) {
   1423 			pps_offset = pps_tf[0];		/* 1 0 2 */
   1424 			v_usec = pps_tf[1] - pps_tf[2];
   1425 		} else {
   1426 			pps_offset = pps_tf[2];		/* 1 2 0 */
   1427 			v_usec = pps_tf[1] - pps_tf[0];
   1428 		}
   1429 	}
   1430 	if (v_usec > MAXTIME)
   1431 		pps_jitcnt++;
   1432 	v_usec = (v_usec << PPS_AVG) - pps_jitter;
   1433 	if (v_usec < 0)
   1434 		pps_jitter -= -v_usec >> PPS_AVG;
   1435 	else
   1436 		pps_jitter += v_usec >> PPS_AVG;
   1437 	if (pps_jitter > (MAXTIME >> 1))
   1438 		time_status |= STA_PPSJITTER;
   1439 
   1440 	/*
   1441 	 * During the calibration interval adjust the starting time when
   1442 	 * the tick overflows. At the end of the interval compute the
   1443 	 * duration of the interval and the difference of the hardware
   1444 	 * counters at the beginning and end of the interval. This code
   1445 	 * is deliciously complicated by the fact valid differences may
   1446 	 * exceed the value of tick when using long calibration
   1447 	 * intervals and small ticks. Note that the counter can be
   1448 	 * greater than tick if caught at just the wrong instant, but
   1449 	 * the values returned and used here are correct.
   1450 	 */
   1451 	bigtick = (long)tick << SHIFT_USEC;
   1452 	pps_usec -= pps_freq;
   1453 	if (pps_usec >= bigtick)
   1454 		pps_usec -= bigtick;
   1455 	if (pps_usec < 0)
   1456 		pps_usec += bigtick;
   1457 	pps_time.tv_sec++;
   1458 	pps_count++;
   1459 	if (pps_count < (1 << pps_shift))
   1460 		return;
   1461 	pps_count = 0;
   1462 	pps_calcnt++;
   1463 	u_usec = usec << SHIFT_USEC;
   1464 	v_usec = pps_usec - u_usec;
   1465 	if (v_usec >= bigtick >> 1)
   1466 		v_usec -= bigtick;
   1467 	if (v_usec < -(bigtick >> 1))
   1468 		v_usec += bigtick;
   1469 	if (v_usec < 0)
   1470 		v_usec = -(-v_usec >> pps_shift);
   1471 	else
   1472 		v_usec = v_usec >> pps_shift;
   1473 	pps_usec = u_usec;
   1474 	cal_sec = tvp->tv_sec;
   1475 	cal_usec = tvp->tv_usec;
   1476 	cal_sec -= pps_time.tv_sec;
   1477 	cal_usec -= pps_time.tv_usec;
   1478 	if (cal_usec < 0) {
   1479 		cal_usec += 1000000;
   1480 		cal_sec--;
   1481 	}
   1482 	pps_time = *tvp;
   1483 
   1484 	/*
   1485 	 * Check for lost interrupts, noise, excessive jitter and
   1486 	 * excessive frequency error. The number of timer ticks during
   1487 	 * the interval may vary +-1 tick. Add to this a margin of one
   1488 	 * tick for the PPS signal jitter and maximum frequency
   1489 	 * deviation. If the limits are exceeded, the calibration
   1490 	 * interval is reset to the minimum and we start over.
   1491 	 */
   1492 	u_usec = (long)tick << 1;
   1493 	if (!((cal_sec == -1 && cal_usec > (1000000 - u_usec))
   1494 	    || (cal_sec == 0 && cal_usec < u_usec))
   1495 	    || v_usec > time_tolerance || v_usec < -time_tolerance) {
   1496 		pps_errcnt++;
   1497 		pps_shift = PPS_SHIFT;
   1498 		pps_intcnt = 0;
   1499 		time_status |= STA_PPSERROR;
   1500 		return;
   1501 	}
   1502 
   1503 	/*
   1504 	 * A three-stage median filter is used to help deglitch the pps
   1505 	 * frequency. The median sample becomes the frequency offset
   1506 	 * estimate; the difference between the other two samples
   1507 	 * becomes the frequency dispersion (stability) estimate.
   1508 	 */
   1509 	pps_ff[2] = pps_ff[1];
   1510 	pps_ff[1] = pps_ff[0];
   1511 	pps_ff[0] = v_usec;
   1512 	if (pps_ff[0] > pps_ff[1]) {
   1513 		if (pps_ff[1] > pps_ff[2]) {
   1514 			u_usec = pps_ff[1];		/* 0 1 2 */
   1515 			v_usec = pps_ff[0] - pps_ff[2];
   1516 		} else if (pps_ff[2] > pps_ff[0]) {
   1517 			u_usec = pps_ff[0];		/* 2 0 1 */
   1518 			v_usec = pps_ff[2] - pps_ff[1];
   1519 		} else {
   1520 			u_usec = pps_ff[2];		/* 0 2 1 */
   1521 			v_usec = pps_ff[0] - pps_ff[1];
   1522 		}
   1523 	} else {
   1524 		if (pps_ff[1] < pps_ff[2]) {
   1525 			u_usec = pps_ff[1];		/* 2 1 0 */
   1526 			v_usec = pps_ff[2] - pps_ff[0];
   1527 		} else  if (pps_ff[2] < pps_ff[0]) {
   1528 			u_usec = pps_ff[0];		/* 1 0 2 */
   1529 			v_usec = pps_ff[1] - pps_ff[2];
   1530 		} else {
   1531 			u_usec = pps_ff[2];		/* 1 2 0 */
   1532 			v_usec = pps_ff[1] - pps_ff[0];
   1533 		}
   1534 	}
   1535 
   1536 	/*
   1537 	 * Here the frequency dispersion (stability) is updated. If it
   1538 	 * is less than one-fourth the maximum (MAXFREQ), the frequency
   1539 	 * offset is updated as well, but clamped to the tolerance. It
   1540 	 * will be processed later by the hardclock() routine.
   1541 	 */
   1542 	v_usec = (v_usec >> 1) - pps_stabil;
   1543 	if (v_usec < 0)
   1544 		pps_stabil -= -v_usec >> PPS_AVG;
   1545 	else
   1546 		pps_stabil += v_usec >> PPS_AVG;
   1547 	if (pps_stabil > MAXFREQ >> 2) {
   1548 		pps_stbcnt++;
   1549 		time_status |= STA_PPSWANDER;
   1550 		return;
   1551 	}
   1552 	if (time_status & STA_PPSFREQ) {
   1553 		if (u_usec < 0) {
   1554 			pps_freq -= -u_usec >> PPS_AVG;
   1555 			if (pps_freq < -time_tolerance)
   1556 				pps_freq = -time_tolerance;
   1557 			u_usec = -u_usec;
   1558 		} else {
   1559 			pps_freq += u_usec >> PPS_AVG;
   1560 			if (pps_freq > time_tolerance)
   1561 				pps_freq = time_tolerance;
   1562 		}
   1563 	}
   1564 
   1565 	/*
   1566 	 * Here the calibration interval is adjusted. If the maximum
   1567 	 * time difference is greater than tick / 4, reduce the interval
   1568 	 * by half. If this is not the case for four consecutive
   1569 	 * intervals, double the interval.
   1570 	 */
   1571 	if (u_usec << pps_shift > bigtick >> 2) {
   1572 		pps_intcnt = 0;
   1573 		if (pps_shift > PPS_SHIFT)
   1574 			pps_shift--;
   1575 	} else if (pps_intcnt >= 4) {
   1576 		pps_intcnt = 0;
   1577 		if (pps_shift < PPS_SHIFTMAX)
   1578 			pps_shift++;
   1579 	} else
   1580 		pps_intcnt++;
   1581 }
   1582 #endif /* PPS_SYNC */
   1583 #endif /* NTP  */
   1584 
   1585 /* timecounter compat functions */
   1586 void
   1587 nanotime(struct timespec *ts)
   1588 {
   1589 	struct timeval tv;
   1590 
   1591 	microtime(&tv);
   1592 	TIMEVAL_TO_TIMESPEC(&tv, ts);
   1593 }
   1594 
   1595 void
   1596 getbinuptime(struct bintime *bt)
   1597 {
   1598 	struct timeval tv;
   1599 
   1600 	microtime(&tv);
   1601 	timeval2bintime(&tv, bt);
   1602 }
   1603 
   1604 void
   1605 nanouptime(struct timespec *tsp)
   1606 {
   1607 	int s;
   1608 
   1609 	s = splclock();
   1610 	TIMEVAL_TO_TIMESPEC(&mono_time, tsp);
   1611 	splx(s);
   1612 }
   1613 
   1614 void
   1615 getnanouptime(struct timespec *tsp)
   1616 {
   1617 	int s;
   1618 
   1619 	s = splclock();
   1620 	TIMEVAL_TO_TIMESPEC(&mono_time, tsp);
   1621 	splx(s);
   1622 }
   1623 
   1624 void
   1625 getmicrouptime(struct timeval *tvp)
   1626 {
   1627 	int s;
   1628 
   1629 	s = splclock();
   1630 	*tvp = mono_time;
   1631 	splx(s);
   1632 }
   1633 
   1634 void
   1635 getnanotime(struct timespec *tsp)
   1636 {
   1637 	int s;
   1638 
   1639 	s = splclock();
   1640 	TIMEVAL_TO_TIMESPEC(&time, tsp);
   1641 	splx(s);
   1642 }
   1643 
   1644 void
   1645 getmicrotime(struct timeval *tvp)
   1646 {
   1647 	int s;
   1648 
   1649 	s = splclock();
   1650 	*tvp = time;
   1651 	splx(s);
   1652 }
   1653 
   1654 u_int64_t
   1655 tc_getfrequency(void)
   1656 {
   1657 	return hz;
   1658 }
   1659 #endif /* !__HAVE_TIMECOUNTER */
   1660