Home | History | Annotate | Line # | Download | only in kern
kern_clock.c revision 1.109
      1 /*	$NetBSD: kern_clock.c,v 1.109 2007/07/09 21:10:51 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center.
     10  * This code is derived from software contributed to The NetBSD Foundation
     11  * by Charles M. Hannum.
     12  *
     13  * Redistribution and use in source and binary forms, with or without
     14  * modification, are permitted provided that the following conditions
     15  * are met:
     16  * 1. Redistributions of source code must retain the above copyright
     17  *    notice, this list of conditions and the following disclaimer.
     18  * 2. Redistributions in binary form must reproduce the above copyright
     19  *    notice, this list of conditions and the following disclaimer in the
     20  *    documentation and/or other materials provided with the distribution.
     21  * 3. All advertising materials mentioning features or use of this software
     22  *    must display the following acknowledgement:
     23  *	This product includes software developed by the NetBSD
     24  *	Foundation, Inc. and its contributors.
     25  * 4. Neither the name of The NetBSD Foundation nor the names of its
     26  *    contributors may be used to endorse or promote products derived
     27  *    from this software without specific prior written permission.
     28  *
     29  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     30  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     31  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     32  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     33  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     34  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     35  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     36  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     37  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     38  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     39  * POSSIBILITY OF SUCH DAMAGE.
     40  */
     41 
     42 /*-
     43  * Copyright (c) 1982, 1986, 1991, 1993
     44  *	The Regents of the University of California.  All rights reserved.
     45  * (c) UNIX System Laboratories, Inc.
     46  * All or some portions of this file are derived from material licensed
     47  * to the University of California by American Telephone and Telegraph
     48  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     49  * the permission of UNIX System Laboratories, Inc.
     50  *
     51  * Redistribution and use in source and binary forms, with or without
     52  * modification, are permitted provided that the following conditions
     53  * are met:
     54  * 1. Redistributions of source code must retain the above copyright
     55  *    notice, this list of conditions and the following disclaimer.
     56  * 2. Redistributions in binary form must reproduce the above copyright
     57  *    notice, this list of conditions and the following disclaimer in the
     58  *    documentation and/or other materials provided with the distribution.
     59  * 3. Neither the name of the University nor the names of its contributors
     60  *    may be used to endorse or promote products derived from this software
     61  *    without specific prior written permission.
     62  *
     63  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     64  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     65  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     66  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     67  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     68  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     69  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     70  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     71  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     72  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     73  * SUCH DAMAGE.
     74  *
     75  *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
     76  */
     77 
     78 #include <sys/cdefs.h>
     79 __KERNEL_RCSID(0, "$NetBSD: kern_clock.c,v 1.109 2007/07/09 21:10:51 ad Exp $");
     80 
     81 #include "opt_ntp.h"
     82 #include "opt_multiprocessor.h"
     83 #include "opt_perfctrs.h"
     84 
     85 #include <sys/param.h>
     86 #include <sys/systm.h>
     87 #include <sys/callout.h>
     88 #include <sys/kernel.h>
     89 #include <sys/proc.h>
     90 #include <sys/resourcevar.h>
     91 #include <sys/signalvar.h>
     92 #include <sys/sysctl.h>
     93 #include <sys/timex.h>
     94 #include <sys/sched.h>
     95 #include <sys/time.h>
     96 #include <sys/timetc.h>
     97 #include <sys/cpu.h>
     98 
     99 #ifdef GPROF
    100 #include <sys/gmon.h>
    101 #endif
    102 
    103 /*
    104  * Clock handling routines.
    105  *
    106  * This code is written to operate with two timers that run independently of
    107  * each other.  The main clock, running hz times per second, is used to keep
    108  * track of real time.  The second timer handles kernel and user profiling,
    109  * and does resource use estimation.  If the second timer is programmable,
    110  * it is randomized to avoid aliasing between the two clocks.  For example,
    111  * the randomization prevents an adversary from always giving up the CPU
    112  * just before its quantum expires.  Otherwise, it would never accumulate
    113  * CPU ticks.  The mean frequency of the second timer is stathz.
    114  *
    115  * If no second timer exists, stathz will be zero; in this case we drive
    116  * profiling and statistics off the main clock.  This WILL NOT be accurate;
    117  * do not do it unless absolutely necessary.
    118  *
    119  * The statistics clock may (or may not) be run at a higher rate while
    120  * profiling.  This profile clock runs at profhz.  We require that profhz
    121  * be an integral multiple of stathz.
    122  *
    123  * If the statistics clock is running fast, it must be divided by the ratio
    124  * profhz/stathz for statistics.  (For profiling, every tick counts.)
    125  */
    126 
    127 #ifndef __HAVE_TIMECOUNTER
    128 #ifdef NTP	/* NTP phase-locked loop in kernel */
    129 /*
    130  * Phase/frequency-lock loop (PLL/FLL) definitions
    131  *
    132  * The following variables are read and set by the ntp_adjtime() system
    133  * call.
    134  *
    135  * time_state shows the state of the system clock, with values defined
    136  * in the timex.h header file.
    137  *
    138  * time_status shows the status of the system clock, with bits defined
    139  * in the timex.h header file.
    140  *
    141  * time_offset is used by the PLL/FLL to adjust the system time in small
    142  * increments.
    143  *
    144  * time_constant determines the bandwidth or "stiffness" of the PLL.
    145  *
    146  * time_tolerance determines maximum frequency error or tolerance of the
    147  * CPU clock oscillator and is a property of the architecture; however,
    148  * in principle it could change as result of the presence of external
    149  * discipline signals, for instance.
    150  *
    151  * time_precision is usually equal to the kernel tick variable; however,
    152  * in cases where a precision clock counter or external clock is
    153  * available, the resolution can be much less than this and depend on
    154  * whether the external clock is working or not.
    155  *
    156  * time_maxerror is initialized by a ntp_adjtime() call and increased by
    157  * the kernel once each second to reflect the maximum error bound
    158  * growth.
    159  *
    160  * time_esterror is set and read by the ntp_adjtime() call, but
    161  * otherwise not used by the kernel.
    162  */
    163 int time_state = TIME_OK;	/* clock state */
    164 int time_status = STA_UNSYNC;	/* clock status bits */
    165 long time_offset = 0;		/* time offset (us) */
    166 long time_constant = 0;		/* pll time constant */
    167 long time_tolerance = MAXFREQ;	/* frequency tolerance (scaled ppm) */
    168 long time_precision = 1;	/* clock precision (us) */
    169 long time_maxerror = MAXPHASE;	/* maximum error (us) */
    170 long time_esterror = MAXPHASE;	/* estimated error (us) */
    171 
    172 /*
    173  * The following variables establish the state of the PLL/FLL and the
    174  * residual time and frequency offset of the local clock. The scale
    175  * factors are defined in the timex.h header file.
    176  *
    177  * time_phase and time_freq are the phase increment and the frequency
    178  * increment, respectively, of the kernel time variable.
    179  *
    180  * time_freq is set via ntp_adjtime() from a value stored in a file when
    181  * the synchronization daemon is first started. Its value is retrieved
    182  * via ntp_adjtime() and written to the file about once per hour by the
    183  * daemon.
    184  *
    185  * time_adj is the adjustment added to the value of tick at each timer
    186  * interrupt and is recomputed from time_phase and time_freq at each
    187  * seconds rollover.
    188  *
    189  * time_reftime is the second's portion of the system time at the last
    190  * call to ntp_adjtime(). It is used to adjust the time_freq variable
    191  * and to increase the time_maxerror as the time since last update
    192  * increases.
    193  */
    194 long time_phase = 0;		/* phase offset (scaled us) */
    195 long time_freq = 0;		/* frequency offset (scaled ppm) */
    196 long time_adj = 0;		/* tick adjust (scaled 1 / hz) */
    197 long time_reftime = 0;		/* time at last adjustment (s) */
    198 
    199 #ifdef PPS_SYNC
    200 /*
    201  * The following variables are used only if the kernel PPS discipline
    202  * code is configured (PPS_SYNC). The scale factors are defined in the
    203  * timex.h header file.
    204  *
    205  * pps_time contains the time at each calibration interval, as read by
    206  * microtime(). pps_count counts the seconds of the calibration
    207  * interval, the duration of which is nominally pps_shift in powers of
    208  * two.
    209  *
    210  * pps_offset is the time offset produced by the time median filter
    211  * pps_tf[], while pps_jitter is the dispersion (jitter) measured by
    212  * this filter.
    213  *
    214  * pps_freq is the frequency offset produced by the frequency median
    215  * filter pps_ff[], while pps_stabil is the dispersion (wander) measured
    216  * by this filter.
    217  *
    218  * pps_usec is latched from a high resolution counter or external clock
    219  * at pps_time. Here we want the hardware counter contents only, not the
    220  * contents plus the time_tv.usec as usual.
    221  *
    222  * pps_valid counts the number of seconds since the last PPS update. It
    223  * is used as a watchdog timer to disable the PPS discipline should the
    224  * PPS signal be lost.
    225  *
    226  * pps_glitch counts the number of seconds since the beginning of an
    227  * offset burst more than tick/2 from current nominal offset. It is used
    228  * mainly to suppress error bursts due to priority conflicts between the
    229  * PPS interrupt and timer interrupt.
    230  *
    231  * pps_intcnt counts the calibration intervals for use in the interval-
    232  * adaptation algorithm. It's just too complicated for words.
    233  *
    234  * pps_kc_hardpps_source contains an arbitrary value that uniquely
    235  * identifies the currently bound source of the PPS signal, or NULL
    236  * if no source is bound.
    237  *
    238  * pps_kc_hardpps_mode indicates which transitions, if any, of the PPS
    239  * signal should be reported.
    240  */
    241 struct timeval pps_time;	/* kernel time at last interval */
    242 long pps_tf[] = {0, 0, 0};	/* pps time offset median filter (us) */
    243 long pps_offset = 0;		/* pps time offset (us) */
    244 long pps_jitter = MAXTIME;	/* time dispersion (jitter) (us) */
    245 long pps_ff[] = {0, 0, 0};	/* pps frequency offset median filter */
    246 long pps_freq = 0;		/* frequency offset (scaled ppm) */
    247 long pps_stabil = MAXFREQ;	/* frequency dispersion (scaled ppm) */
    248 long pps_usec = 0;		/* microsec counter at last interval */
    249 long pps_valid = PPS_VALID;	/* pps signal watchdog counter */
    250 int pps_glitch = 0;		/* pps signal glitch counter */
    251 int pps_count = 0;		/* calibration interval counter (s) */
    252 int pps_shift = PPS_SHIFT;	/* interval duration (s) (shift) */
    253 int pps_intcnt = 0;		/* intervals at current duration */
    254 void *pps_kc_hardpps_source = NULL; /* current PPS supplier's identifier */
    255 int pps_kc_hardpps_mode = 0;	/* interesting edges of PPS signal */
    256 
    257 /*
    258  * PPS signal quality monitors
    259  *
    260  * pps_jitcnt counts the seconds that have been discarded because the
    261  * jitter measured by the time median filter exceeds the limit MAXTIME
    262  * (100 us).
    263  *
    264  * pps_calcnt counts the frequency calibration intervals, which are
    265  * variable from 4 s to 256 s.
    266  *
    267  * pps_errcnt counts the calibration intervals which have been discarded
    268  * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
    269  * calibration interval jitter exceeds two ticks.
    270  *
    271  * pps_stbcnt counts the calibration intervals that have been discarded
    272  * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
    273  */
    274 long pps_jitcnt = 0;		/* jitter limit exceeded */
    275 long pps_calcnt = 0;		/* calibration intervals */
    276 long pps_errcnt = 0;		/* calibration errors */
    277 long pps_stbcnt = 0;		/* stability limit exceeded */
    278 #endif /* PPS_SYNC */
    279 
    280 #ifdef EXT_CLOCK
    281 /*
    282  * External clock definitions
    283  *
    284  * The following definitions and declarations are used only if an
    285  * external clock is configured on the system.
    286  */
    287 #define CLOCK_INTERVAL 30	/* CPU clock update interval (s) */
    288 
    289 /*
    290  * The clock_count variable is set to CLOCK_INTERVAL at each PPS
    291  * interrupt and decremented once each second.
    292  */
    293 int clock_count = 0;		/* CPU clock counter */
    294 
    295 #ifdef HIGHBALL
    296 /*
    297  * The clock_offset and clock_cpu variables are used by the HIGHBALL
    298  * interface. The clock_offset variable defines the offset between
    299  * system time and the HIGBALL counters. The clock_cpu variable contains
    300  * the offset between the system clock and the HIGHBALL clock for use in
    301  * disciplining the kernel time variable.
    302  */
    303 extern struct timeval clock_offset; /* Highball clock offset */
    304 long clock_cpu = 0;		/* CPU clock adjust */
    305 #endif /* HIGHBALL */
    306 #endif /* EXT_CLOCK */
    307 #endif /* NTP */
    308 
    309 /*
    310  * Bump a timeval by a small number of usec's.
    311  */
    312 #define BUMPTIME(t, usec) { \
    313 	volatile struct timeval *tp = (t); \
    314 	long us; \
    315  \
    316 	tp->tv_usec = us = tp->tv_usec + (usec); \
    317 	if (us >= 1000000) { \
    318 		tp->tv_usec = us - 1000000; \
    319 		tp->tv_sec++; \
    320 	} \
    321 }
    322 #endif /* !__HAVE_TIMECOUNTER */
    323 
    324 int	stathz;
    325 int	profhz;
    326 int	profsrc;
    327 int	schedhz;
    328 int	profprocs;
    329 int	hardclock_ticks;
    330 static int statscheddiv; /* stat => sched divider (used if schedhz == 0) */
    331 static int psdiv;			/* prof => stat divider */
    332 int	psratio;			/* ratio: prof / stat */
    333 #ifndef __HAVE_TIMECOUNTER
    334 int	tickfix, tickfixinterval;	/* used if tick not really integral */
    335 #ifndef NTP
    336 static int tickfixcnt;			/* accumulated fractional error */
    337 #else
    338 int	fixtick;			/* used by NTP for same */
    339 int	shifthz;
    340 #endif
    341 
    342 /*
    343  * We might want ldd to load the both words from time at once.
    344  * To succeed we need to be quadword aligned.
    345  * The sparc already does that, and that it has worked so far is a fluke.
    346  */
    347 volatile struct	timeval time  __attribute__((__aligned__(__alignof__(quad_t))));
    348 volatile struct	timeval mono_time;
    349 #endif /* !__HAVE_TIMECOUNTER */
    350 
    351 #ifdef __HAVE_TIMECOUNTER
    352 static u_int get_intr_timecount(struct timecounter *);
    353 
    354 static struct timecounter intr_timecounter = {
    355 	get_intr_timecount,	/* get_timecount */
    356 	0,			/* no poll_pps */
    357 	~0u,			/* counter_mask */
    358 	0,		        /* frequency */
    359 	"clockinterrupt",	/* name */
    360 	0,			/* quality - minimum implementation level for a clock */
    361 	NULL,			/* prev */
    362 	NULL,			/* next */
    363 };
    364 
    365 static u_int
    366 get_intr_timecount(struct timecounter *tc)
    367 {
    368 
    369 	return (u_int)hardclock_ticks;
    370 }
    371 #endif
    372 
    373 /*
    374  * Initialize clock frequencies and start both clocks running.
    375  */
    376 void
    377 initclocks(void)
    378 {
    379 	int i;
    380 
    381 	/*
    382 	 * Set divisors to 1 (normal case) and let the machine-specific
    383 	 * code do its bit.
    384 	 */
    385 	psdiv = 1;
    386 #ifdef __HAVE_TIMECOUNTER
    387 	/*
    388 	 * provide minimum default time counter
    389 	 * will only run at interrupt resolution
    390 	 */
    391 	intr_timecounter.tc_frequency = hz;
    392 	tc_init(&intr_timecounter);
    393 #endif
    394 	cpu_initclocks();
    395 
    396 	/*
    397 	 * Compute profhz and stathz, fix profhz if needed.
    398 	 */
    399 	i = stathz ? stathz : hz;
    400 	if (profhz == 0)
    401 		profhz = i;
    402 	psratio = profhz / i;
    403 	if (schedhz == 0) {
    404 		/* 16Hz is best */
    405 		statscheddiv = i / 16;
    406 		if (statscheddiv <= 0)
    407 			panic("statscheddiv");
    408 	}
    409 
    410 #ifndef __HAVE_TIMECOUNTER
    411 #ifdef NTP
    412 	switch (hz) {
    413 	case 1:
    414 		shifthz = SHIFT_SCALE - 0;
    415 		break;
    416 	case 2:
    417 		shifthz = SHIFT_SCALE - 1;
    418 		break;
    419 	case 4:
    420 		shifthz = SHIFT_SCALE - 2;
    421 		break;
    422 	case 8:
    423 		shifthz = SHIFT_SCALE - 3;
    424 		break;
    425 	case 16:
    426 		shifthz = SHIFT_SCALE - 4;
    427 		break;
    428 	case 32:
    429 		shifthz = SHIFT_SCALE - 5;
    430 		break;
    431 	case 50:
    432 	case 60:
    433 	case 64:
    434 		shifthz = SHIFT_SCALE - 6;
    435 		break;
    436 	case 96:
    437 	case 100:
    438 	case 128:
    439 		shifthz = SHIFT_SCALE - 7;
    440 		break;
    441 	case 256:
    442 		shifthz = SHIFT_SCALE - 8;
    443 		break;
    444 	case 512:
    445 		shifthz = SHIFT_SCALE - 9;
    446 		break;
    447 	case 1000:
    448 	case 1024:
    449 		shifthz = SHIFT_SCALE - 10;
    450 		break;
    451 	case 1200:
    452 	case 2048:
    453 		shifthz = SHIFT_SCALE - 11;
    454 		break;
    455 	case 4096:
    456 		shifthz = SHIFT_SCALE - 12;
    457 		break;
    458 	case 8192:
    459 		shifthz = SHIFT_SCALE - 13;
    460 		break;
    461 	case 16384:
    462 		shifthz = SHIFT_SCALE - 14;
    463 		break;
    464 	case 32768:
    465 		shifthz = SHIFT_SCALE - 15;
    466 		break;
    467 	case 65536:
    468 		shifthz = SHIFT_SCALE - 16;
    469 		break;
    470 	default:
    471 		panic("weird hz");
    472 	}
    473 	if (fixtick == 0) {
    474 		/*
    475 		 * Give MD code a chance to set this to a better
    476 		 * value; but, if it doesn't, we should.
    477 		 */
    478 		fixtick = (1000000 - (hz*tick));
    479 	}
    480 #endif /* NTP */
    481 #endif /* !__HAVE_TIMECOUNTER */
    482 }
    483 
    484 /*
    485  * The real-time timer, interrupting hz times per second.
    486  */
    487 void
    488 hardclock(struct clockframe *frame)
    489 {
    490 	struct lwp *l;
    491 	struct proc *p;
    492 	struct cpu_info *ci = curcpu();
    493 	struct ptimer *pt;
    494 #ifndef __HAVE_TIMECOUNTER
    495 	int delta;
    496 	extern int tickdelta;
    497 	extern long timedelta;
    498 #ifdef NTP
    499 	int time_update;
    500 	int ltemp;
    501 #endif /* NTP */
    502 #endif /* __HAVE_TIMECOUNTER */
    503 
    504 	l = curlwp;
    505 	if (!CURCPU_IDLE_P()) {
    506 		p = l->l_proc;
    507 		/*
    508 		 * Run current process's virtual and profile time, as needed.
    509 		 */
    510 		if (CLKF_USERMODE(frame) && p->p_timers &&
    511 		    (pt = LIST_FIRST(&p->p_timers->pts_virtual)) != NULL)
    512 			if (itimerdecr(pt, tick) == 0)
    513 				itimerfire(pt);
    514 		if (p->p_timers &&
    515 		    (pt = LIST_FIRST(&p->p_timers->pts_prof)) != NULL)
    516 			if (itimerdecr(pt, tick) == 0)
    517 				itimerfire(pt);
    518 	}
    519 
    520 	/*
    521 	 * If no separate statistics clock is available, run it from here.
    522 	 */
    523 	if (stathz == 0)
    524 		statclock(frame);
    525 	if ((--ci->ci_schedstate.spc_ticks) <= 0)
    526 		sched_tick(ci);
    527 
    528 #if defined(MULTIPROCESSOR)
    529 	/*
    530 	 * If we are not the primary CPU, we're not allowed to do
    531 	 * any more work.
    532 	 */
    533 	if (CPU_IS_PRIMARY(ci) == 0)
    534 		return;
    535 #endif
    536 
    537 	hardclock_ticks++;
    538 
    539 #ifdef __HAVE_TIMECOUNTER
    540 	tc_ticktock();
    541 #else /* __HAVE_TIMECOUNTER */
    542 	/*
    543 	 * Increment the time-of-day.  The increment is normally just
    544 	 * ``tick''.  If the machine is one which has a clock frequency
    545 	 * such that ``hz'' would not divide the second evenly into
    546 	 * milliseconds, a periodic adjustment must be applied.  Finally,
    547 	 * if we are still adjusting the time (see adjtime()),
    548 	 * ``tickdelta'' may also be added in.
    549 	 */
    550 	delta = tick;
    551 
    552 #ifndef NTP
    553 	if (tickfix) {
    554 		tickfixcnt += tickfix;
    555 		if (tickfixcnt >= tickfixinterval) {
    556 			delta++;
    557 			tickfixcnt -= tickfixinterval;
    558 		}
    559 	}
    560 #endif /* !NTP */
    561 	/* Imprecise 4bsd adjtime() handling */
    562 	if (timedelta != 0) {
    563 		delta += tickdelta;
    564 		timedelta -= tickdelta;
    565 	}
    566 
    567 #ifdef notyet
    568 	microset();
    569 #endif
    570 
    571 #ifndef NTP
    572 	BUMPTIME(&time, delta);		/* XXX Now done using NTP code below */
    573 #endif
    574 	BUMPTIME(&mono_time, delta);
    575 
    576 #ifdef NTP
    577 	time_update = delta;
    578 
    579 	/*
    580 	 * Compute the phase adjustment. If the low-order bits
    581 	 * (time_phase) of the update overflow, bump the high-order bits
    582 	 * (time_update).
    583 	 */
    584 	time_phase += time_adj;
    585 	if (time_phase <= -FINEUSEC) {
    586 		ltemp = -time_phase >> SHIFT_SCALE;
    587 		time_phase += ltemp << SHIFT_SCALE;
    588 		time_update -= ltemp;
    589 	} else if (time_phase >= FINEUSEC) {
    590 		ltemp = time_phase >> SHIFT_SCALE;
    591 		time_phase -= ltemp << SHIFT_SCALE;
    592 		time_update += ltemp;
    593 	}
    594 
    595 #ifdef HIGHBALL
    596 	/*
    597 	 * If the HIGHBALL board is installed, we need to adjust the
    598 	 * external clock offset in order to close the hardware feedback
    599 	 * loop. This will adjust the external clock phase and frequency
    600 	 * in small amounts. The additional phase noise and frequency
    601 	 * wander this causes should be minimal. We also need to
    602 	 * discipline the kernel time variable, since the PLL is used to
    603 	 * discipline the external clock. If the Highball board is not
    604 	 * present, we discipline kernel time with the PLL as usual. We
    605 	 * assume that the external clock phase adjustment (time_update)
    606 	 * and kernel phase adjustment (clock_cpu) are less than the
    607 	 * value of tick.
    608 	 */
    609 	clock_offset.tv_usec += time_update;
    610 	if (clock_offset.tv_usec >= 1000000) {
    611 		clock_offset.tv_sec++;
    612 		clock_offset.tv_usec -= 1000000;
    613 	}
    614 	if (clock_offset.tv_usec < 0) {
    615 		clock_offset.tv_sec--;
    616 		clock_offset.tv_usec += 1000000;
    617 	}
    618 	time.tv_usec += clock_cpu;
    619 	clock_cpu = 0;
    620 #else
    621 	time.tv_usec += time_update;
    622 #endif /* HIGHBALL */
    623 
    624 	/*
    625 	 * On rollover of the second the phase adjustment to be used for
    626 	 * the next second is calculated. Also, the maximum error is
    627 	 * increased by the tolerance. If the PPS frequency discipline
    628 	 * code is present, the phase is increased to compensate for the
    629 	 * CPU clock oscillator frequency error.
    630 	 *
    631  	 * On a 32-bit machine and given parameters in the timex.h
    632 	 * header file, the maximum phase adjustment is +-512 ms and
    633 	 * maximum frequency offset is a tad less than) +-512 ppm. On a
    634 	 * 64-bit machine, you shouldn't need to ask.
    635 	 */
    636 	if (time.tv_usec >= 1000000) {
    637 		time.tv_usec -= 1000000;
    638 		time.tv_sec++;
    639 		time_maxerror += time_tolerance >> SHIFT_USEC;
    640 
    641 		/*
    642 		 * Leap second processing. If in leap-insert state at
    643 		 * the end of the day, the system clock is set back one
    644 		 * second; if in leap-delete state, the system clock is
    645 		 * set ahead one second. The microtime() routine or
    646 		 * external clock driver will insure that reported time
    647 		 * is always monotonic. The ugly divides should be
    648 		 * replaced.
    649 		 */
    650 		switch (time_state) {
    651 		case TIME_OK:
    652 			if (time_status & STA_INS)
    653 				time_state = TIME_INS;
    654 			else if (time_status & STA_DEL)
    655 				time_state = TIME_DEL;
    656 			break;
    657 
    658 		case TIME_INS:
    659 			if (time.tv_sec % 86400 == 0) {
    660 				time.tv_sec--;
    661 				time_state = TIME_OOP;
    662 			}
    663 			break;
    664 
    665 		case TIME_DEL:
    666 			if ((time.tv_sec + 1) % 86400 == 0) {
    667 				time.tv_sec++;
    668 				time_state = TIME_WAIT;
    669 			}
    670 			break;
    671 
    672 		case TIME_OOP:
    673 			time_state = TIME_WAIT;
    674 			break;
    675 
    676 		case TIME_WAIT:
    677 			if (!(time_status & (STA_INS | STA_DEL)))
    678 				time_state = TIME_OK;
    679 			break;
    680 		}
    681 
    682 		/*
    683 		 * Compute the phase adjustment for the next second. In
    684 		 * PLL mode, the offset is reduced by a fixed factor
    685 		 * times the time constant. In FLL mode the offset is
    686 		 * used directly. In either mode, the maximum phase
    687 		 * adjustment for each second is clamped so as to spread
    688 		 * the adjustment over not more than the number of
    689 		 * seconds between updates.
    690 		 */
    691 		if (time_offset < 0) {
    692 			ltemp = -time_offset;
    693 			if (!(time_status & STA_FLL))
    694 				ltemp >>= SHIFT_KG + time_constant;
    695 			if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
    696 				ltemp = (MAXPHASE / MINSEC) <<
    697 				    SHIFT_UPDATE;
    698 			time_offset += ltemp;
    699 			time_adj = -ltemp << (shifthz - SHIFT_UPDATE);
    700 		} else if (time_offset > 0) {
    701 			ltemp = time_offset;
    702 			if (!(time_status & STA_FLL))
    703 				ltemp >>= SHIFT_KG + time_constant;
    704 			if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
    705 				ltemp = (MAXPHASE / MINSEC) <<
    706 				    SHIFT_UPDATE;
    707 			time_offset -= ltemp;
    708 			time_adj = ltemp << (shifthz - SHIFT_UPDATE);
    709 		} else
    710 			time_adj = 0;
    711 
    712 		/*
    713 		 * Compute the frequency estimate and additional phase
    714 		 * adjustment due to frequency error for the next
    715 		 * second. When the PPS signal is engaged, gnaw on the
    716 		 * watchdog counter and update the frequency computed by
    717 		 * the pll and the PPS signal.
    718 		 */
    719 #ifdef PPS_SYNC
    720 		pps_valid++;
    721 		if (pps_valid == PPS_VALID) {
    722 			pps_jitter = MAXTIME;
    723 			pps_stabil = MAXFREQ;
    724 			time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
    725 			    STA_PPSWANDER | STA_PPSERROR);
    726 		}
    727 		ltemp = time_freq + pps_freq;
    728 #else
    729 		ltemp = time_freq;
    730 #endif /* PPS_SYNC */
    731 
    732 		if (ltemp < 0)
    733 			time_adj -= -ltemp >> (SHIFT_USEC - shifthz);
    734 		else
    735 			time_adj += ltemp >> (SHIFT_USEC - shifthz);
    736 		time_adj += (long)fixtick << shifthz;
    737 
    738 		/*
    739 		 * When the CPU clock oscillator frequency is not a
    740 		 * power of 2 in Hz, shifthz is only an approximate
    741 		 * scale factor.
    742 		 *
    743 		 * To determine the adjustment, you can do the following:
    744 		 *   bc -q
    745 		 *   scale=24
    746 		 *   obase=2
    747 		 *   idealhz/realhz
    748 		 * where `idealhz' is the next higher power of 2, and `realhz'
    749 		 * is the actual value.  You may need to factor this result
    750 		 * into a sequence of 2 multipliers to get better precision.
    751 		 *
    752 		 * Likewise, the error can be calculated with (e.g. for 100Hz):
    753 		 *   bc -q
    754 		 *   scale=24
    755 		 *   ((1+2^-2+2^-5)*(1-2^-10)*realhz-idealhz)/idealhz
    756 		 * (and then multiply by 1000000 to get ppm).
    757 		 */
    758 		switch (hz) {
    759 		case 60:
    760 			/* A factor of 1.000100010001 gives about 15ppm
    761 			   error. */
    762 			if (time_adj < 0) {
    763 				time_adj -= (-time_adj >> 4);
    764 				time_adj -= (-time_adj >> 8);
    765 			} else {
    766 				time_adj += (time_adj >> 4);
    767 				time_adj += (time_adj >> 8);
    768 			}
    769 			break;
    770 
    771 		case 96:
    772 			/* A factor of 1.0101010101 gives about 244ppm error. */
    773 			if (time_adj < 0) {
    774 				time_adj -= (-time_adj >> 2);
    775 				time_adj -= (-time_adj >> 4) + (-time_adj >> 8);
    776 			} else {
    777 				time_adj += (time_adj >> 2);
    778 				time_adj += (time_adj >> 4) + (time_adj >> 8);
    779 			}
    780 			break;
    781 
    782 		case 50:
    783 		case 100:
    784 			/* A factor of 1.010001111010111 gives about 1ppm
    785 			   error. */
    786 			if (time_adj < 0) {
    787 				time_adj -= (-time_adj >> 2) + (-time_adj >> 5);
    788 				time_adj += (-time_adj >> 10);
    789 			} else {
    790 				time_adj += (time_adj >> 2) + (time_adj >> 5);
    791 				time_adj -= (time_adj >> 10);
    792 			}
    793 			break;
    794 
    795 		case 1000:
    796 			/* A factor of 1.000001100010100001 gives about 50ppm
    797 			   error. */
    798 			if (time_adj < 0) {
    799 				time_adj -= (-time_adj >> 6) + (-time_adj >> 11);
    800 				time_adj -= (-time_adj >> 7);
    801 			} else {
    802 				time_adj += (time_adj >> 6) + (time_adj >> 11);
    803 				time_adj += (time_adj >> 7);
    804 			}
    805 			break;
    806 
    807 		case 1200:
    808 			/* A factor of 1.1011010011100001 gives about 64ppm
    809 			   error. */
    810 			if (time_adj < 0) {
    811 				time_adj -= (-time_adj >> 1) + (-time_adj >> 6);
    812 				time_adj -= (-time_adj >> 3) + (-time_adj >> 10);
    813 			} else {
    814 				time_adj += (time_adj >> 1) + (time_adj >> 6);
    815 				time_adj += (time_adj >> 3) + (time_adj >> 10);
    816 			}
    817 			break;
    818 		}
    819 
    820 #ifdef EXT_CLOCK
    821 		/*
    822 		 * If an external clock is present, it is necessary to
    823 		 * discipline the kernel time variable anyway, since not
    824 		 * all system components use the microtime() interface.
    825 		 * Here, the time offset between the external clock and
    826 		 * kernel time variable is computed every so often.
    827 		 */
    828 		clock_count++;
    829 		if (clock_count > CLOCK_INTERVAL) {
    830 			clock_count = 0;
    831 			microtime(&clock_ext);
    832 			delta.tv_sec = clock_ext.tv_sec - time.tv_sec;
    833 			delta.tv_usec = clock_ext.tv_usec -
    834 			    time.tv_usec;
    835 			if (delta.tv_usec < 0)
    836 				delta.tv_sec--;
    837 			if (delta.tv_usec >= 500000) {
    838 				delta.tv_usec -= 1000000;
    839 				delta.tv_sec++;
    840 			}
    841 			if (delta.tv_usec < -500000) {
    842 				delta.tv_usec += 1000000;
    843 				delta.tv_sec--;
    844 			}
    845 			if (delta.tv_sec > 0 || (delta.tv_sec == 0 &&
    846 			    delta.tv_usec > MAXPHASE) ||
    847 			    delta.tv_sec < -1 || (delta.tv_sec == -1 &&
    848 			    delta.tv_usec < -MAXPHASE)) {
    849 				time = clock_ext;
    850 				delta.tv_sec = 0;
    851 				delta.tv_usec = 0;
    852 			}
    853 #ifdef HIGHBALL
    854 			clock_cpu = delta.tv_usec;
    855 #else /* HIGHBALL */
    856 			hardupdate(delta.tv_usec);
    857 #endif /* HIGHBALL */
    858 		}
    859 #endif /* EXT_CLOCK */
    860 	}
    861 
    862 #endif /* NTP */
    863 #endif /* !__HAVE_TIMECOUNTER */
    864 
    865 	/*
    866 	 * Update real-time timeout queue.  Callouts are processed at a
    867 	 * very low CPU priority, so we don't keep the relatively high
    868 	 * clock interrupt priority any longer than necessary.
    869 	 */
    870 	callout_hardclock();
    871 }
    872 
    873 #ifdef __HAVE_TIMECOUNTER
    874 /*
    875  * Compute number of hz until specified time.  Used to compute second
    876  * argument to callout_reset() from an absolute time.
    877  */
    878 int
    879 hzto(struct timeval *tvp)
    880 {
    881 	struct timeval now, tv;
    882 
    883 	tv = *tvp;	/* Don't modify original tvp. */
    884 	getmicrotime(&now);
    885 	timersub(&tv, &now, &tv);
    886 	return tvtohz(&tv);
    887 }
    888 #endif /* __HAVE_TIMECOUNTER */
    889 
    890 /*
    891  * Compute number of ticks in the specified amount of time.
    892  */
    893 int
    894 tvtohz(struct timeval *tv)
    895 {
    896 	unsigned long ticks;
    897 	long sec, usec;
    898 
    899 	/*
    900 	 * If the number of usecs in the whole seconds part of the time
    901 	 * difference fits in a long, then the total number of usecs will
    902 	 * fit in an unsigned long.  Compute the total and convert it to
    903 	 * ticks, rounding up and adding 1 to allow for the current tick
    904 	 * to expire.  Rounding also depends on unsigned long arithmetic
    905 	 * to avoid overflow.
    906 	 *
    907 	 * Otherwise, if the number of ticks in the whole seconds part of
    908 	 * the time difference fits in a long, then convert the parts to
    909 	 * ticks separately and add, using similar rounding methods and
    910 	 * overflow avoidance.  This method would work in the previous
    911 	 * case, but it is slightly slower and assumes that hz is integral.
    912 	 *
    913 	 * Otherwise, round the time difference down to the maximum
    914 	 * representable value.
    915 	 *
    916 	 * If ints are 32-bit, then the maximum value for any timeout in
    917 	 * 10ms ticks is 248 days.
    918 	 */
    919 	sec = tv->tv_sec;
    920 	usec = tv->tv_usec;
    921 
    922 	if (usec < 0) {
    923 		sec--;
    924 		usec += 1000000;
    925 	}
    926 
    927 	if (sec < 0 || (sec == 0 && usec <= 0)) {
    928 		/*
    929 		 * Would expire now or in the past.  Return 0 ticks.
    930 		 * This is different from the legacy hzto() interface,
    931 		 * and callers need to check for it.
    932 		 */
    933 		ticks = 0;
    934 	} else if (sec <= (LONG_MAX / 1000000))
    935 		ticks = (((sec * 1000000) + (unsigned long)usec + (tick - 1))
    936 		    / tick) + 1;
    937 	else if (sec <= (LONG_MAX / hz))
    938 		ticks = (sec * hz) +
    939 		    (((unsigned long)usec + (tick - 1)) / tick) + 1;
    940 	else
    941 		ticks = LONG_MAX;
    942 
    943 	if (ticks > INT_MAX)
    944 		ticks = INT_MAX;
    945 
    946 	return ((int)ticks);
    947 }
    948 
    949 #ifndef __HAVE_TIMECOUNTER
    950 /*
    951  * Compute number of hz until specified time.  Used to compute second
    952  * argument to callout_reset() from an absolute time.
    953  */
    954 int
    955 hzto(struct timeval *tv)
    956 {
    957 	unsigned long ticks;
    958 	long sec, usec;
    959 	int s;
    960 
    961 	/*
    962 	 * If the number of usecs in the whole seconds part of the time
    963 	 * difference fits in a long, then the total number of usecs will
    964 	 * fit in an unsigned long.  Compute the total and convert it to
    965 	 * ticks, rounding up and adding 1 to allow for the current tick
    966 	 * to expire.  Rounding also depends on unsigned long arithmetic
    967 	 * to avoid overflow.
    968 	 *
    969 	 * Otherwise, if the number of ticks in the whole seconds part of
    970 	 * the time difference fits in a long, then convert the parts to
    971 	 * ticks separately and add, using similar rounding methods and
    972 	 * overflow avoidance.  This method would work in the previous
    973 	 * case, but it is slightly slower and assume that hz is integral.
    974 	 *
    975 	 * Otherwise, round the time difference down to the maximum
    976 	 * representable value.
    977 	 *
    978 	 * If ints are 32-bit, then the maximum value for any timeout in
    979 	 * 10ms ticks is 248 days.
    980 	 */
    981 	s = splclock();
    982 	sec = tv->tv_sec - time.tv_sec;
    983 	usec = tv->tv_usec - time.tv_usec;
    984 	splx(s);
    985 
    986 	if (usec < 0) {
    987 		sec--;
    988 		usec += 1000000;
    989 	}
    990 
    991 	if (sec < 0 || (sec == 0 && usec <= 0)) {
    992 		/*
    993 		 * Would expire now or in the past.  Return 0 ticks.
    994 		 * This is different from the legacy hzto() interface,
    995 		 * and callers need to check for it.
    996 		 */
    997 		ticks = 0;
    998 	} else if (sec <= (LONG_MAX / 1000000))
    999 		ticks = (((sec * 1000000) + (unsigned long)usec + (tick - 1))
   1000 		    / tick) + 1;
   1001 	else if (sec <= (LONG_MAX / hz))
   1002 		ticks = (sec * hz) +
   1003 		    (((unsigned long)usec + (tick - 1)) / tick) + 1;
   1004 	else
   1005 		ticks = LONG_MAX;
   1006 
   1007 	if (ticks > INT_MAX)
   1008 		ticks = INT_MAX;
   1009 
   1010 	return ((int)ticks);
   1011 }
   1012 #endif /* !__HAVE_TIMECOUNTER */
   1013 
   1014 /*
   1015  * Compute number of ticks in the specified amount of time.
   1016  */
   1017 int
   1018 tstohz(struct timespec *ts)
   1019 {
   1020 	struct timeval tv;
   1021 
   1022 	/*
   1023 	 * usec has great enough resolution for hz, so convert to a
   1024 	 * timeval and use tvtohz() above.
   1025 	 */
   1026 	TIMESPEC_TO_TIMEVAL(&tv, ts);
   1027 	return tvtohz(&tv);
   1028 }
   1029 
   1030 /*
   1031  * Start profiling on a process.
   1032  *
   1033  * Kernel profiling passes proc0 which never exits and hence
   1034  * keeps the profile clock running constantly.
   1035  */
   1036 void
   1037 startprofclock(struct proc *p)
   1038 {
   1039 
   1040 	KASSERT(mutex_owned(&p->p_stmutex));
   1041 
   1042 	if ((p->p_stflag & PST_PROFIL) == 0) {
   1043 		p->p_stflag |= PST_PROFIL;
   1044 		/*
   1045 		 * This is only necessary if using the clock as the
   1046 		 * profiling source.
   1047 		 */
   1048 		if (++profprocs == 1 && stathz != 0)
   1049 			psdiv = psratio;
   1050 	}
   1051 }
   1052 
   1053 /*
   1054  * Stop profiling on a process.
   1055  */
   1056 void
   1057 stopprofclock(struct proc *p)
   1058 {
   1059 
   1060 	KASSERT(mutex_owned(&p->p_stmutex));
   1061 
   1062 	if (p->p_stflag & PST_PROFIL) {
   1063 		p->p_stflag &= ~PST_PROFIL;
   1064 		/*
   1065 		 * This is only necessary if using the clock as the
   1066 		 * profiling source.
   1067 		 */
   1068 		if (--profprocs == 0 && stathz != 0)
   1069 			psdiv = 1;
   1070 	}
   1071 }
   1072 
   1073 #if defined(PERFCTRS)
   1074 /*
   1075  * Independent profiling "tick" in case we're using a separate
   1076  * clock or profiling event source.  Currently, that's just
   1077  * performance counters--hence the wrapper.
   1078  */
   1079 void
   1080 proftick(struct clockframe *frame)
   1081 {
   1082 #ifdef GPROF
   1083         struct gmonparam *g;
   1084         intptr_t i;
   1085 #endif
   1086 	struct lwp *l;
   1087 	struct proc *p;
   1088 
   1089 	l = curlwp;
   1090 	p = (l ? l->l_proc : NULL);
   1091 	if (CLKF_USERMODE(frame)) {
   1092 		mutex_spin_enter(&p->p_stmutex);
   1093 		if (p->p_stflag & PST_PROFIL)
   1094 			addupc_intr(l, CLKF_PC(frame));
   1095 		mutex_spin_exit(&p->p_stmutex);
   1096 	} else {
   1097 #ifdef GPROF
   1098 		g = &_gmonparam;
   1099 		if (g->state == GMON_PROF_ON) {
   1100 			i = CLKF_PC(frame) - g->lowpc;
   1101 			if (i < g->textsize) {
   1102 				i /= HISTFRACTION * sizeof(*g->kcount);
   1103 				g->kcount[i]++;
   1104 			}
   1105 		}
   1106 #endif
   1107 #ifdef PROC_PC
   1108 		if (p != NULL) {
   1109 			mutex_spin_enter(&p->p_stmutex);
   1110 			if (p->p_stflag & PST_PROFIL))
   1111 				addupc_intr(l, PROC_PC(p));
   1112 			mutex_spin_exit(&p->p_stmutex);
   1113 		}
   1114 #endif
   1115 	}
   1116 }
   1117 #endif
   1118 
   1119 void
   1120 schedclock(struct lwp *l)
   1121 {
   1122 
   1123 	if ((l->l_flag & LW_IDLE) != 0)
   1124 		return;
   1125 
   1126 	sched_schedclock(l);
   1127 }
   1128 
   1129 /*
   1130  * Statistics clock.  Grab profile sample, and if divider reaches 0,
   1131  * do process and kernel statistics.
   1132  */
   1133 void
   1134 statclock(struct clockframe *frame)
   1135 {
   1136 #ifdef GPROF
   1137 	struct gmonparam *g;
   1138 	intptr_t i;
   1139 #endif
   1140 	struct cpu_info *ci = curcpu();
   1141 	struct schedstate_percpu *spc = &ci->ci_schedstate;
   1142 	struct proc *p;
   1143 	struct lwp *l;
   1144 
   1145 	/*
   1146 	 * Notice changes in divisor frequency, and adjust clock
   1147 	 * frequency accordingly.
   1148 	 */
   1149 	if (spc->spc_psdiv != psdiv) {
   1150 		spc->spc_psdiv = psdiv;
   1151 		spc->spc_pscnt = psdiv;
   1152 		if (psdiv == 1) {
   1153 			setstatclockrate(stathz);
   1154 		} else {
   1155 			setstatclockrate(profhz);
   1156 		}
   1157 	}
   1158 	l = curlwp;
   1159 	if ((l->l_flag & LW_IDLE) != 0) {
   1160 		/*
   1161 		 * don't account idle lwps as swapper.
   1162 		 */
   1163 		p = NULL;
   1164 	} else {
   1165 		p = l->l_proc;
   1166 		mutex_spin_enter(&p->p_stmutex);
   1167 	}
   1168 
   1169 	if (CLKF_USERMODE(frame)) {
   1170 		if ((p->p_stflag & PST_PROFIL) && profsrc == PROFSRC_CLOCK)
   1171 			addupc_intr(l, CLKF_PC(frame));
   1172 		if (--spc->spc_pscnt > 0) {
   1173 			mutex_spin_exit(&p->p_stmutex);
   1174 			return;
   1175 		}
   1176 
   1177 		/*
   1178 		 * Came from user mode; CPU was in user state.
   1179 		 * If this process is being profiled record the tick.
   1180 		 */
   1181 		p->p_uticks++;
   1182 		if (p->p_nice > NZERO)
   1183 			spc->spc_cp_time[CP_NICE]++;
   1184 		else
   1185 			spc->spc_cp_time[CP_USER]++;
   1186 	} else {
   1187 #ifdef GPROF
   1188 		/*
   1189 		 * Kernel statistics are just like addupc_intr, only easier.
   1190 		 */
   1191 		g = &_gmonparam;
   1192 		if (profsrc == PROFSRC_CLOCK && g->state == GMON_PROF_ON) {
   1193 			i = CLKF_PC(frame) - g->lowpc;
   1194 			if (i < g->textsize) {
   1195 				i /= HISTFRACTION * sizeof(*g->kcount);
   1196 				g->kcount[i]++;
   1197 			}
   1198 		}
   1199 #endif
   1200 #ifdef LWP_PC
   1201 		if (p != NULL && profsrc == PROFSRC_CLOCK &&
   1202 		    (p->p_stflag & PST_PROFIL)) {
   1203 			addupc_intr(l, LWP_PC(l));
   1204 		}
   1205 #endif
   1206 		if (--spc->spc_pscnt > 0) {
   1207 			if (p != NULL)
   1208 				mutex_spin_exit(&p->p_stmutex);
   1209 			return;
   1210 		}
   1211 		/*
   1212 		 * Came from kernel mode, so we were:
   1213 		 * - handling an interrupt,
   1214 		 * - doing syscall or trap work on behalf of the current
   1215 		 *   user process, or
   1216 		 * - spinning in the idle loop.
   1217 		 * Whichever it is, charge the time as appropriate.
   1218 		 * Note that we charge interrupts to the current process,
   1219 		 * regardless of whether they are ``for'' that process,
   1220 		 * so that we know how much of its real time was spent
   1221 		 * in ``non-process'' (i.e., interrupt) work.
   1222 		 */
   1223 		if (CLKF_INTR(frame) || (l->l_flag & LW_INTR) != 0) {
   1224 			if (p != NULL) {
   1225 				p->p_iticks++;
   1226 			}
   1227 			spc->spc_cp_time[CP_INTR]++;
   1228 		} else if (p != NULL) {
   1229 			p->p_sticks++;
   1230 			spc->spc_cp_time[CP_SYS]++;
   1231 		} else {
   1232 			spc->spc_cp_time[CP_IDLE]++;
   1233 		}
   1234 	}
   1235 	spc->spc_pscnt = psdiv;
   1236 
   1237 	if (p != NULL) {
   1238 		++l->l_cpticks;
   1239 		mutex_spin_exit(&p->p_stmutex);
   1240 	}
   1241 
   1242 	/*
   1243 	 * If no separate schedclock is provided, call it here
   1244 	 * at about 16 Hz.
   1245 	 */
   1246 	if (schedhz == 0) {
   1247 		if ((int)(--ci->ci_schedstate.spc_schedticks) <= 0) {
   1248 			schedclock(l);
   1249 			ci->ci_schedstate.spc_schedticks = statscheddiv;
   1250 		}
   1251 	}
   1252 }
   1253 
   1254 #ifndef __HAVE_TIMECOUNTER
   1255 #ifdef NTP	/* NTP phase-locked loop in kernel */
   1256 /*
   1257  * hardupdate() - local clock update
   1258  *
   1259  * This routine is called by ntp_adjtime() to update the local clock
   1260  * phase and frequency. The implementation is of an adaptive-parameter,
   1261  * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new
   1262  * time and frequency offset estimates for each call. If the kernel PPS
   1263  * discipline code is configured (PPS_SYNC), the PPS signal itself
   1264  * determines the new time offset, instead of the calling argument.
   1265  * Presumably, calls to ntp_adjtime() occur only when the caller
   1266  * believes the local clock is valid within some bound (+-128 ms with
   1267  * NTP). If the caller's time is far different than the PPS time, an
   1268  * argument will ensue, and it's not clear who will lose.
   1269  *
   1270  * For uncompensated quartz crystal oscillatores and nominal update
   1271  * intervals less than 1024 s, operation should be in phase-lock mode
   1272  * (STA_FLL = 0), where the loop is disciplined to phase. For update
   1273  * intervals greater than thiss, operation should be in frequency-lock
   1274  * mode (STA_FLL = 1), where the loop is disciplined to frequency.
   1275  *
   1276  * Note: splclock() is in effect.
   1277  */
   1278 void
   1279 hardupdate(long offset)
   1280 {
   1281 	long ltemp, mtemp;
   1282 
   1283 	if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME))
   1284 		return;
   1285 	ltemp = offset;
   1286 #ifdef PPS_SYNC
   1287 	if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
   1288 		ltemp = pps_offset;
   1289 #endif /* PPS_SYNC */
   1290 
   1291 	/*
   1292 	 * Scale the phase adjustment and clamp to the operating range.
   1293 	 */
   1294 	if (ltemp > MAXPHASE)
   1295 		time_offset = MAXPHASE << SHIFT_UPDATE;
   1296 	else if (ltemp < -MAXPHASE)
   1297 		time_offset = -(MAXPHASE << SHIFT_UPDATE);
   1298 	else
   1299 		time_offset = ltemp << SHIFT_UPDATE;
   1300 
   1301 	/*
   1302 	 * Select whether the frequency is to be controlled and in which
   1303 	 * mode (PLL or FLL). Clamp to the operating range. Ugly
   1304 	 * multiply/divide should be replaced someday.
   1305 	 */
   1306 	if (time_status & STA_FREQHOLD || time_reftime == 0)
   1307 		time_reftime = time.tv_sec;
   1308 	mtemp = time.tv_sec - time_reftime;
   1309 	time_reftime = time.tv_sec;
   1310 	if (time_status & STA_FLL) {
   1311 		if (mtemp >= MINSEC) {
   1312 			ltemp = ((time_offset / mtemp) << (SHIFT_USEC -
   1313 			    SHIFT_UPDATE));
   1314 			if (ltemp < 0)
   1315 				time_freq -= -ltemp >> SHIFT_KH;
   1316 			else
   1317 				time_freq += ltemp >> SHIFT_KH;
   1318 		}
   1319 	} else {
   1320 		if (mtemp < MAXSEC) {
   1321 			ltemp *= mtemp;
   1322 			if (ltemp < 0)
   1323 				time_freq -= -ltemp >> (time_constant +
   1324 				    time_constant + SHIFT_KF -
   1325 				    SHIFT_USEC);
   1326 			else
   1327 				time_freq += ltemp >> (time_constant +
   1328 				    time_constant + SHIFT_KF -
   1329 				    SHIFT_USEC);
   1330 		}
   1331 	}
   1332 	if (time_freq > time_tolerance)
   1333 		time_freq = time_tolerance;
   1334 	else if (time_freq < -time_tolerance)
   1335 		time_freq = -time_tolerance;
   1336 }
   1337 
   1338 #ifdef PPS_SYNC
   1339 /*
   1340  * hardpps() - discipline CPU clock oscillator to external PPS signal
   1341  *
   1342  * This routine is called at each PPS interrupt in order to discipline
   1343  * the CPU clock oscillator to the PPS signal. It measures the PPS phase
   1344  * and leaves it in a handy spot for the hardclock() routine. It
   1345  * integrates successive PPS phase differences and calculates the
   1346  * frequency offset. This is used in hardclock() to discipline the CPU
   1347  * clock oscillator so that intrinsic frequency error is cancelled out.
   1348  * The code requires the caller to capture the time and hardware counter
   1349  * value at the on-time PPS signal transition.
   1350  *
   1351  * Note that, on some Unix systems, this routine runs at an interrupt
   1352  * priority level higher than the timer interrupt routine hardclock().
   1353  * Therefore, the variables used are distinct from the hardclock()
   1354  * variables, except for certain exceptions: The PPS frequency pps_freq
   1355  * and phase pps_offset variables are determined by this routine and
   1356  * updated atomically. The time_tolerance variable can be considered a
   1357  * constant, since it is infrequently changed, and then only when the
   1358  * PPS signal is disabled. The watchdog counter pps_valid is updated
   1359  * once per second by hardclock() and is atomically cleared in this
   1360  * routine.
   1361  */
   1362 void
   1363 hardpps(struct timeval *tvp,		/* time at PPS */
   1364 	long usec			/* hardware counter at PPS */)
   1365 {
   1366 	long u_usec, v_usec, bigtick;
   1367 	long cal_sec, cal_usec;
   1368 
   1369 	/*
   1370 	 * An occasional glitch can be produced when the PPS interrupt
   1371 	 * occurs in the hardclock() routine before the time variable is
   1372 	 * updated. Here the offset is discarded when the difference
   1373 	 * between it and the last one is greater than tick/2, but not
   1374 	 * if the interval since the first discard exceeds 30 s.
   1375 	 */
   1376 	time_status |= STA_PPSSIGNAL;
   1377 	time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
   1378 	pps_valid = 0;
   1379 	u_usec = -tvp->tv_usec;
   1380 	if (u_usec < -500000)
   1381 		u_usec += 1000000;
   1382 	v_usec = pps_offset - u_usec;
   1383 	if (v_usec < 0)
   1384 		v_usec = -v_usec;
   1385 	if (v_usec > (tick >> 1)) {
   1386 		if (pps_glitch > MAXGLITCH) {
   1387 			pps_glitch = 0;
   1388 			pps_tf[2] = u_usec;
   1389 			pps_tf[1] = u_usec;
   1390 		} else {
   1391 			pps_glitch++;
   1392 			u_usec = pps_offset;
   1393 		}
   1394 	} else
   1395 		pps_glitch = 0;
   1396 
   1397 	/*
   1398 	 * A three-stage median filter is used to help deglitch the pps
   1399 	 * time. The median sample becomes the time offset estimate; the
   1400 	 * difference between the other two samples becomes the time
   1401 	 * dispersion (jitter) estimate.
   1402 	 */
   1403 	pps_tf[2] = pps_tf[1];
   1404 	pps_tf[1] = pps_tf[0];
   1405 	pps_tf[0] = u_usec;
   1406 	if (pps_tf[0] > pps_tf[1]) {
   1407 		if (pps_tf[1] > pps_tf[2]) {
   1408 			pps_offset = pps_tf[1];		/* 0 1 2 */
   1409 			v_usec = pps_tf[0] - pps_tf[2];
   1410 		} else if (pps_tf[2] > pps_tf[0]) {
   1411 			pps_offset = pps_tf[0];		/* 2 0 1 */
   1412 			v_usec = pps_tf[2] - pps_tf[1];
   1413 		} else {
   1414 			pps_offset = pps_tf[2];		/* 0 2 1 */
   1415 			v_usec = pps_tf[0] - pps_tf[1];
   1416 		}
   1417 	} else {
   1418 		if (pps_tf[1] < pps_tf[2]) {
   1419 			pps_offset = pps_tf[1];		/* 2 1 0 */
   1420 			v_usec = pps_tf[2] - pps_tf[0];
   1421 		} else  if (pps_tf[2] < pps_tf[0]) {
   1422 			pps_offset = pps_tf[0];		/* 1 0 2 */
   1423 			v_usec = pps_tf[1] - pps_tf[2];
   1424 		} else {
   1425 			pps_offset = pps_tf[2];		/* 1 2 0 */
   1426 			v_usec = pps_tf[1] - pps_tf[0];
   1427 		}
   1428 	}
   1429 	if (v_usec > MAXTIME)
   1430 		pps_jitcnt++;
   1431 	v_usec = (v_usec << PPS_AVG) - pps_jitter;
   1432 	if (v_usec < 0)
   1433 		pps_jitter -= -v_usec >> PPS_AVG;
   1434 	else
   1435 		pps_jitter += v_usec >> PPS_AVG;
   1436 	if (pps_jitter > (MAXTIME >> 1))
   1437 		time_status |= STA_PPSJITTER;
   1438 
   1439 	/*
   1440 	 * During the calibration interval adjust the starting time when
   1441 	 * the tick overflows. At the end of the interval compute the
   1442 	 * duration of the interval and the difference of the hardware
   1443 	 * counters at the beginning and end of the interval. This code
   1444 	 * is deliciously complicated by the fact valid differences may
   1445 	 * exceed the value of tick when using long calibration
   1446 	 * intervals and small ticks. Note that the counter can be
   1447 	 * greater than tick if caught at just the wrong instant, but
   1448 	 * the values returned and used here are correct.
   1449 	 */
   1450 	bigtick = (long)tick << SHIFT_USEC;
   1451 	pps_usec -= pps_freq;
   1452 	if (pps_usec >= bigtick)
   1453 		pps_usec -= bigtick;
   1454 	if (pps_usec < 0)
   1455 		pps_usec += bigtick;
   1456 	pps_time.tv_sec++;
   1457 	pps_count++;
   1458 	if (pps_count < (1 << pps_shift))
   1459 		return;
   1460 	pps_count = 0;
   1461 	pps_calcnt++;
   1462 	u_usec = usec << SHIFT_USEC;
   1463 	v_usec = pps_usec - u_usec;
   1464 	if (v_usec >= bigtick >> 1)
   1465 		v_usec -= bigtick;
   1466 	if (v_usec < -(bigtick >> 1))
   1467 		v_usec += bigtick;
   1468 	if (v_usec < 0)
   1469 		v_usec = -(-v_usec >> pps_shift);
   1470 	else
   1471 		v_usec = v_usec >> pps_shift;
   1472 	pps_usec = u_usec;
   1473 	cal_sec = tvp->tv_sec;
   1474 	cal_usec = tvp->tv_usec;
   1475 	cal_sec -= pps_time.tv_sec;
   1476 	cal_usec -= pps_time.tv_usec;
   1477 	if (cal_usec < 0) {
   1478 		cal_usec += 1000000;
   1479 		cal_sec--;
   1480 	}
   1481 	pps_time = *tvp;
   1482 
   1483 	/*
   1484 	 * Check for lost interrupts, noise, excessive jitter and
   1485 	 * excessive frequency error. The number of timer ticks during
   1486 	 * the interval may vary +-1 tick. Add to this a margin of one
   1487 	 * tick for the PPS signal jitter and maximum frequency
   1488 	 * deviation. If the limits are exceeded, the calibration
   1489 	 * interval is reset to the minimum and we start over.
   1490 	 */
   1491 	u_usec = (long)tick << 1;
   1492 	if (!((cal_sec == -1 && cal_usec > (1000000 - u_usec))
   1493 	    || (cal_sec == 0 && cal_usec < u_usec))
   1494 	    || v_usec > time_tolerance || v_usec < -time_tolerance) {
   1495 		pps_errcnt++;
   1496 		pps_shift = PPS_SHIFT;
   1497 		pps_intcnt = 0;
   1498 		time_status |= STA_PPSERROR;
   1499 		return;
   1500 	}
   1501 
   1502 	/*
   1503 	 * A three-stage median filter is used to help deglitch the pps
   1504 	 * frequency. The median sample becomes the frequency offset
   1505 	 * estimate; the difference between the other two samples
   1506 	 * becomes the frequency dispersion (stability) estimate.
   1507 	 */
   1508 	pps_ff[2] = pps_ff[1];
   1509 	pps_ff[1] = pps_ff[0];
   1510 	pps_ff[0] = v_usec;
   1511 	if (pps_ff[0] > pps_ff[1]) {
   1512 		if (pps_ff[1] > pps_ff[2]) {
   1513 			u_usec = pps_ff[1];		/* 0 1 2 */
   1514 			v_usec = pps_ff[0] - pps_ff[2];
   1515 		} else if (pps_ff[2] > pps_ff[0]) {
   1516 			u_usec = pps_ff[0];		/* 2 0 1 */
   1517 			v_usec = pps_ff[2] - pps_ff[1];
   1518 		} else {
   1519 			u_usec = pps_ff[2];		/* 0 2 1 */
   1520 			v_usec = pps_ff[0] - pps_ff[1];
   1521 		}
   1522 	} else {
   1523 		if (pps_ff[1] < pps_ff[2]) {
   1524 			u_usec = pps_ff[1];		/* 2 1 0 */
   1525 			v_usec = pps_ff[2] - pps_ff[0];
   1526 		} else  if (pps_ff[2] < pps_ff[0]) {
   1527 			u_usec = pps_ff[0];		/* 1 0 2 */
   1528 			v_usec = pps_ff[1] - pps_ff[2];
   1529 		} else {
   1530 			u_usec = pps_ff[2];		/* 1 2 0 */
   1531 			v_usec = pps_ff[1] - pps_ff[0];
   1532 		}
   1533 	}
   1534 
   1535 	/*
   1536 	 * Here the frequency dispersion (stability) is updated. If it
   1537 	 * is less than one-fourth the maximum (MAXFREQ), the frequency
   1538 	 * offset is updated as well, but clamped to the tolerance. It
   1539 	 * will be processed later by the hardclock() routine.
   1540 	 */
   1541 	v_usec = (v_usec >> 1) - pps_stabil;
   1542 	if (v_usec < 0)
   1543 		pps_stabil -= -v_usec >> PPS_AVG;
   1544 	else
   1545 		pps_stabil += v_usec >> PPS_AVG;
   1546 	if (pps_stabil > MAXFREQ >> 2) {
   1547 		pps_stbcnt++;
   1548 		time_status |= STA_PPSWANDER;
   1549 		return;
   1550 	}
   1551 	if (time_status & STA_PPSFREQ) {
   1552 		if (u_usec < 0) {
   1553 			pps_freq -= -u_usec >> PPS_AVG;
   1554 			if (pps_freq < -time_tolerance)
   1555 				pps_freq = -time_tolerance;
   1556 			u_usec = -u_usec;
   1557 		} else {
   1558 			pps_freq += u_usec >> PPS_AVG;
   1559 			if (pps_freq > time_tolerance)
   1560 				pps_freq = time_tolerance;
   1561 		}
   1562 	}
   1563 
   1564 	/*
   1565 	 * Here the calibration interval is adjusted. If the maximum
   1566 	 * time difference is greater than tick / 4, reduce the interval
   1567 	 * by half. If this is not the case for four consecutive
   1568 	 * intervals, double the interval.
   1569 	 */
   1570 	if (u_usec << pps_shift > bigtick >> 2) {
   1571 		pps_intcnt = 0;
   1572 		if (pps_shift > PPS_SHIFT)
   1573 			pps_shift--;
   1574 	} else if (pps_intcnt >= 4) {
   1575 		pps_intcnt = 0;
   1576 		if (pps_shift < PPS_SHIFTMAX)
   1577 			pps_shift++;
   1578 	} else
   1579 		pps_intcnt++;
   1580 }
   1581 #endif /* PPS_SYNC */
   1582 #endif /* NTP  */
   1583 
   1584 /* timecounter compat functions */
   1585 void
   1586 nanotime(struct timespec *ts)
   1587 {
   1588 	struct timeval tv;
   1589 
   1590 	microtime(&tv);
   1591 	TIMEVAL_TO_TIMESPEC(&tv, ts);
   1592 }
   1593 
   1594 void
   1595 getbinuptime(struct bintime *bt)
   1596 {
   1597 	struct timeval tv;
   1598 
   1599 	microtime(&tv);
   1600 	timeval2bintime(&tv, bt);
   1601 }
   1602 
   1603 void
   1604 nanouptime(struct timespec *tsp)
   1605 {
   1606 	int s;
   1607 
   1608 	s = splclock();
   1609 	TIMEVAL_TO_TIMESPEC(&mono_time, tsp);
   1610 	splx(s);
   1611 }
   1612 
   1613 void
   1614 getnanouptime(struct timespec *tsp)
   1615 {
   1616 	int s;
   1617 
   1618 	s = splclock();
   1619 	TIMEVAL_TO_TIMESPEC(&mono_time, tsp);
   1620 	splx(s);
   1621 }
   1622 
   1623 void
   1624 getmicrouptime(struct timeval *tvp)
   1625 {
   1626 	int s;
   1627 
   1628 	s = splclock();
   1629 	*tvp = mono_time;
   1630 	splx(s);
   1631 }
   1632 
   1633 void
   1634 getnanotime(struct timespec *tsp)
   1635 {
   1636 	int s;
   1637 
   1638 	s = splclock();
   1639 	TIMEVAL_TO_TIMESPEC(&time, tsp);
   1640 	splx(s);
   1641 }
   1642 
   1643 void
   1644 getmicrotime(struct timeval *tvp)
   1645 {
   1646 	int s;
   1647 
   1648 	s = splclock();
   1649 	*tvp = time;
   1650 	splx(s);
   1651 }
   1652 
   1653 u_int64_t
   1654 tc_getfrequency(void)
   1655 {
   1656 	return hz;
   1657 }
   1658 #endif /* !__HAVE_TIMECOUNTER */
   1659