kern_clock.c revision 1.60.2.1 1 /* $NetBSD: kern_clock.c,v 1.60.2.1 2000/07/13 20:12:18 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*-
41 * Copyright (c) 1982, 1986, 1991, 1993
42 * The Regents of the University of California. All rights reserved.
43 * (c) UNIX System Laboratories, Inc.
44 * All or some portions of this file are derived from material licensed
45 * to the University of California by American Telephone and Telegraph
46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47 * the permission of UNIX System Laboratories, Inc.
48 *
49 * Redistribution and use in source and binary forms, with or without
50 * modification, are permitted provided that the following conditions
51 * are met:
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 * 3. All advertising materials mentioning features or use of this software
58 * must display the following acknowledgement:
59 * This product includes software developed by the University of
60 * California, Berkeley and its contributors.
61 * 4. Neither the name of the University nor the names of its contributors
62 * may be used to endorse or promote products derived from this software
63 * without specific prior written permission.
64 *
65 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
69 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
70 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
71 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
72 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
73 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
74 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
75 * SUCH DAMAGE.
76 *
77 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
78 */
79
80 #include "opt_ntp.h"
81
82 #include <sys/param.h>
83 #include <sys/systm.h>
84 #include <sys/dkstat.h>
85 #include <sys/callout.h>
86 #include <sys/kernel.h>
87 #include <sys/proc.h>
88 #include <sys/resourcevar.h>
89 #include <sys/signalvar.h>
90 #include <vm/vm.h>
91 #include <sys/sysctl.h>
92 #include <sys/timex.h>
93 #include <sys/sched.h>
94
95 #include <machine/cpu.h>
96
97 #ifdef GPROF
98 #include <sys/gmon.h>
99 #endif
100
101 /*
102 * Clock handling routines.
103 *
104 * This code is written to operate with two timers that run independently of
105 * each other. The main clock, running hz times per second, is used to keep
106 * track of real time. The second timer handles kernel and user profiling,
107 * and does resource use estimation. If the second timer is programmable,
108 * it is randomized to avoid aliasing between the two clocks. For example,
109 * the randomization prevents an adversary from always giving up the cpu
110 * just before its quantum expires. Otherwise, it would never accumulate
111 * cpu ticks. The mean frequency of the second timer is stathz.
112 *
113 * If no second timer exists, stathz will be zero; in this case we drive
114 * profiling and statistics off the main clock. This WILL NOT be accurate;
115 * do not do it unless absolutely necessary.
116 *
117 * The statistics clock may (or may not) be run at a higher rate while
118 * profiling. This profile clock runs at profhz. We require that profhz
119 * be an integral multiple of stathz.
120 *
121 * If the statistics clock is running fast, it must be divided by the ratio
122 * profhz/stathz for statistics. (For profiling, every tick counts.)
123 */
124
125 #ifdef NTP /* NTP phase-locked loop in kernel */
126 /*
127 * Phase/frequency-lock loop (PLL/FLL) definitions
128 *
129 * The following variables are read and set by the ntp_adjtime() system
130 * call.
131 *
132 * time_state shows the state of the system clock, with values defined
133 * in the timex.h header file.
134 *
135 * time_status shows the status of the system clock, with bits defined
136 * in the timex.h header file.
137 *
138 * time_offset is used by the PLL/FLL to adjust the system time in small
139 * increments.
140 *
141 * time_constant determines the bandwidth or "stiffness" of the PLL.
142 *
143 * time_tolerance determines maximum frequency error or tolerance of the
144 * CPU clock oscillator and is a property of the architecture; however,
145 * in principle it could change as result of the presence of external
146 * discipline signals, for instance.
147 *
148 * time_precision is usually equal to the kernel tick variable; however,
149 * in cases where a precision clock counter or external clock is
150 * available, the resolution can be much less than this and depend on
151 * whether the external clock is working or not.
152 *
153 * time_maxerror is initialized by a ntp_adjtime() call and increased by
154 * the kernel once each second to reflect the maximum error bound
155 * growth.
156 *
157 * time_esterror is set and read by the ntp_adjtime() call, but
158 * otherwise not used by the kernel.
159 */
160 int time_state = TIME_OK; /* clock state */
161 int time_status = STA_UNSYNC; /* clock status bits */
162 long time_offset = 0; /* time offset (us) */
163 long time_constant = 0; /* pll time constant */
164 long time_tolerance = MAXFREQ; /* frequency tolerance (scaled ppm) */
165 long time_precision = 1; /* clock precision (us) */
166 long time_maxerror = MAXPHASE; /* maximum error (us) */
167 long time_esterror = MAXPHASE; /* estimated error (us) */
168
169 /*
170 * The following variables establish the state of the PLL/FLL and the
171 * residual time and frequency offset of the local clock. The scale
172 * factors are defined in the timex.h header file.
173 *
174 * time_phase and time_freq are the phase increment and the frequency
175 * increment, respectively, of the kernel time variable.
176 *
177 * time_freq is set via ntp_adjtime() from a value stored in a file when
178 * the synchronization daemon is first started. Its value is retrieved
179 * via ntp_adjtime() and written to the file about once per hour by the
180 * daemon.
181 *
182 * time_adj is the adjustment added to the value of tick at each timer
183 * interrupt and is recomputed from time_phase and time_freq at each
184 * seconds rollover.
185 *
186 * time_reftime is the second's portion of the system time at the last
187 * call to ntp_adjtime(). It is used to adjust the time_freq variable
188 * and to increase the time_maxerror as the time since last update
189 * increases.
190 */
191 long time_phase = 0; /* phase offset (scaled us) */
192 long time_freq = 0; /* frequency offset (scaled ppm) */
193 long time_adj = 0; /* tick adjust (scaled 1 / hz) */
194 long time_reftime = 0; /* time at last adjustment (s) */
195
196 #ifdef PPS_SYNC
197 /*
198 * The following variables are used only if the kernel PPS discipline
199 * code is configured (PPS_SYNC). The scale factors are defined in the
200 * timex.h header file.
201 *
202 * pps_time contains the time at each calibration interval, as read by
203 * microtime(). pps_count counts the seconds of the calibration
204 * interval, the duration of which is nominally pps_shift in powers of
205 * two.
206 *
207 * pps_offset is the time offset produced by the time median filter
208 * pps_tf[], while pps_jitter is the dispersion (jitter) measured by
209 * this filter.
210 *
211 * pps_freq is the frequency offset produced by the frequency median
212 * filter pps_ff[], while pps_stabil is the dispersion (wander) measured
213 * by this filter.
214 *
215 * pps_usec is latched from a high resolution counter or external clock
216 * at pps_time. Here we want the hardware counter contents only, not the
217 * contents plus the time_tv.usec as usual.
218 *
219 * pps_valid counts the number of seconds since the last PPS update. It
220 * is used as a watchdog timer to disable the PPS discipline should the
221 * PPS signal be lost.
222 *
223 * pps_glitch counts the number of seconds since the beginning of an
224 * offset burst more than tick/2 from current nominal offset. It is used
225 * mainly to suppress error bursts due to priority conflicts between the
226 * PPS interrupt and timer interrupt.
227 *
228 * pps_intcnt counts the calibration intervals for use in the interval-
229 * adaptation algorithm. It's just too complicated for words.
230 */
231 struct timeval pps_time; /* kernel time at last interval */
232 long pps_tf[] = {0, 0, 0}; /* pps time offset median filter (us) */
233 long pps_offset = 0; /* pps time offset (us) */
234 long pps_jitter = MAXTIME; /* time dispersion (jitter) (us) */
235 long pps_ff[] = {0, 0, 0}; /* pps frequency offset median filter */
236 long pps_freq = 0; /* frequency offset (scaled ppm) */
237 long pps_stabil = MAXFREQ; /* frequency dispersion (scaled ppm) */
238 long pps_usec = 0; /* microsec counter at last interval */
239 long pps_valid = PPS_VALID; /* pps signal watchdog counter */
240 int pps_glitch = 0; /* pps signal glitch counter */
241 int pps_count = 0; /* calibration interval counter (s) */
242 int pps_shift = PPS_SHIFT; /* interval duration (s) (shift) */
243 int pps_intcnt = 0; /* intervals at current duration */
244
245 /*
246 * PPS signal quality monitors
247 *
248 * pps_jitcnt counts the seconds that have been discarded because the
249 * jitter measured by the time median filter exceeds the limit MAXTIME
250 * (100 us).
251 *
252 * pps_calcnt counts the frequency calibration intervals, which are
253 * variable from 4 s to 256 s.
254 *
255 * pps_errcnt counts the calibration intervals which have been discarded
256 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
257 * calibration interval jitter exceeds two ticks.
258 *
259 * pps_stbcnt counts the calibration intervals that have been discarded
260 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
261 */
262 long pps_jitcnt = 0; /* jitter limit exceeded */
263 long pps_calcnt = 0; /* calibration intervals */
264 long pps_errcnt = 0; /* calibration errors */
265 long pps_stbcnt = 0; /* stability limit exceeded */
266 #endif /* PPS_SYNC */
267
268 #ifdef EXT_CLOCK
269 /*
270 * External clock definitions
271 *
272 * The following definitions and declarations are used only if an
273 * external clock is configured on the system.
274 */
275 #define CLOCK_INTERVAL 30 /* CPU clock update interval (s) */
276
277 /*
278 * The clock_count variable is set to CLOCK_INTERVAL at each PPS
279 * interrupt and decremented once each second.
280 */
281 int clock_count = 0; /* CPU clock counter */
282
283 #ifdef HIGHBALL
284 /*
285 * The clock_offset and clock_cpu variables are used by the HIGHBALL
286 * interface. The clock_offset variable defines the offset between
287 * system time and the HIGBALL counters. The clock_cpu variable contains
288 * the offset between the system clock and the HIGHBALL clock for use in
289 * disciplining the kernel time variable.
290 */
291 extern struct timeval clock_offset; /* Highball clock offset */
292 long clock_cpu = 0; /* CPU clock adjust */
293 #endif /* HIGHBALL */
294 #endif /* EXT_CLOCK */
295 #endif /* NTP */
296
297
298 /*
299 * Bump a timeval by a small number of usec's.
300 */
301 #define BUMPTIME(t, usec) { \
302 volatile struct timeval *tp = (t); \
303 long us; \
304 \
305 tp->tv_usec = us = tp->tv_usec + (usec); \
306 if (us >= 1000000) { \
307 tp->tv_usec = us - 1000000; \
308 tp->tv_sec++; \
309 } \
310 }
311
312 int stathz;
313 int profhz;
314 int profprocs;
315 u_int64_t hardclock_ticks, softclock_ticks;
316 int softclock_running; /* 1 => softclock() is running */
317 static int psdiv, pscnt; /* prof => stat divider */
318 int psratio; /* ratio: prof / stat */
319 int tickfix, tickfixinterval; /* used if tick not really integral */
320 #ifndef NTP
321 static int tickfixcnt; /* accumulated fractional error */
322 #else
323 int fixtick; /* used by NTP for same */
324 int shifthz;
325 #endif
326
327 /*
328 * We might want ldd to load the both words from time at once.
329 * To succeed we need to be quadword aligned.
330 * The sparc already does that, and that it has worked so far is a fluke.
331 */
332 volatile struct timeval time __attribute__((__aligned__(__alignof__(quad_t))));
333 volatile struct timeval mono_time;
334
335 /*
336 * The callout mechanism is based on the work of Adam M. Costello and
337 * George Varghese, published in a technical report entitled "Redesigning
338 * the BSD Callout and Timer Facilities", and Justin Gibbs's subsequent
339 * integration into FreeBSD, modified for NetBSD by Jason R. Thorpe.
340 *
341 * The original work on the data structures used in this implementation
342 * was published by G. Varghese and A. Lauck in the paper "Hashed and
343 * Hierarchical Timing Wheels: Data Structures for the Efficient
344 * Implementation of a Timer Facility" in the Proceedings of the 11th
345 * ACM Annual Symposium on Operating System Principles, Austin, Texas,
346 * November 1987.
347 */
348 struct callout_queue *callwheel;
349 int callwheelsize, callwheelbits, callwheelmask;
350
351 static struct callout *nextsoftcheck; /* next callout to be checked */
352
353 #ifdef CALLWHEEL_STATS
354 int callwheel_collisions; /* number of hash collisions */
355 int callwheel_maxlength; /* length of the longest hash chain */
356 int *callwheel_sizes; /* per-bucket length count */
357 u_int64_t callwheel_count; /* # callouts currently */
358 u_int64_t callwheel_established; /* # callouts established */
359 u_int64_t callwheel_fired; /* # callouts that fired */
360 u_int64_t callwheel_disestablished; /* # callouts disestablished */
361 u_int64_t callwheel_changed; /* # callouts changed */
362 u_int64_t callwheel_softclocks; /* # times softclock() called */
363 u_int64_t callwheel_softchecks; /* # checks per softclock() */
364 u_int64_t callwheel_softempty; /* # empty buckets seen */
365 #endif /* CALLWHEEL_STATS */
366
367 /*
368 * This value indicates the number of consecutive callouts that
369 * will be checked before we allow interrupts to have a chance
370 * again.
371 */
372 #ifndef MAX_SOFTCLOCK_STEPS
373 #define MAX_SOFTCLOCK_STEPS 100
374 #endif
375
376 /*
377 * Initialize clock frequencies and start both clocks running.
378 */
379 void
380 initclocks()
381 {
382 int i;
383
384 /*
385 * Set divisors to 1 (normal case) and let the machine-specific
386 * code do its bit.
387 */
388 psdiv = pscnt = 1;
389 cpu_initclocks();
390
391 /*
392 * Compute profhz/stathz, and fix profhz if needed.
393 */
394 i = stathz ? stathz : hz;
395 if (profhz == 0)
396 profhz = i;
397 psratio = profhz / i;
398
399 #ifdef NTP
400 switch (hz) {
401 case 1:
402 shifthz = SHIFT_SCALE - 0;
403 break;
404 case 2:
405 shifthz = SHIFT_SCALE - 1;
406 break;
407 case 4:
408 shifthz = SHIFT_SCALE - 2;
409 break;
410 case 8:
411 shifthz = SHIFT_SCALE - 3;
412 break;
413 case 16:
414 shifthz = SHIFT_SCALE - 4;
415 break;
416 case 32:
417 shifthz = SHIFT_SCALE - 5;
418 break;
419 case 60:
420 case 64:
421 shifthz = SHIFT_SCALE - 6;
422 break;
423 case 96:
424 case 100:
425 case 128:
426 shifthz = SHIFT_SCALE - 7;
427 break;
428 case 256:
429 shifthz = SHIFT_SCALE - 8;
430 break;
431 case 512:
432 shifthz = SHIFT_SCALE - 9;
433 break;
434 case 1000:
435 case 1024:
436 shifthz = SHIFT_SCALE - 10;
437 break;
438 case 1200:
439 case 2048:
440 shifthz = SHIFT_SCALE - 11;
441 break;
442 case 4096:
443 shifthz = SHIFT_SCALE - 12;
444 break;
445 case 8192:
446 shifthz = SHIFT_SCALE - 13;
447 break;
448 case 16384:
449 shifthz = SHIFT_SCALE - 14;
450 break;
451 case 32768:
452 shifthz = SHIFT_SCALE - 15;
453 break;
454 case 65536:
455 shifthz = SHIFT_SCALE - 16;
456 break;
457 default:
458 panic("weird hz");
459 }
460 if (fixtick == 0) {
461 /*
462 * Give MD code a chance to set this to a better
463 * value; but, if it doesn't, we should.
464 */
465 fixtick = (1000000 - (hz*tick));
466 }
467 #endif
468 }
469
470 /*
471 * The real-time timer, interrupting hz times per second.
472 */
473 void
474 hardclock(frame)
475 struct clockframe *frame;
476 {
477 struct proc *p;
478 int delta;
479 extern int tickdelta;
480 extern long timedelta;
481 #ifdef NTP
482 int time_update;
483 int ltemp;
484 #endif
485
486 p = curproc;
487 if (p) {
488 struct pstats *pstats;
489
490 /*
491 * Run current process's virtual and profile time, as needed.
492 */
493 pstats = p->p_stats;
494 if (CLKF_USERMODE(frame) &&
495 timerisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
496 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
497 psignal(p, SIGVTALRM);
498 if (timerisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
499 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
500 psignal(p, SIGPROF);
501 }
502
503 /*
504 * If no separate statistics clock is available, run it from here.
505 */
506 if (stathz == 0)
507 statclock(frame);
508
509 #if defined(MULTIPROCESSOR)
510 /*
511 * If we are not the primary CPU, we're not allowed to do
512 * any more work.
513 */
514 if (CPU_IS_PRIMARY(curcpu()) == 0)
515 return;
516 #endif
517
518 /*
519 * Increment the time-of-day. The increment is normally just
520 * ``tick''. If the machine is one which has a clock frequency
521 * such that ``hz'' would not divide the second evenly into
522 * milliseconds, a periodic adjustment must be applied. Finally,
523 * if we are still adjusting the time (see adjtime()),
524 * ``tickdelta'' may also be added in.
525 */
526 hardclock_ticks++;
527 delta = tick;
528
529 #ifndef NTP
530 if (tickfix) {
531 tickfixcnt += tickfix;
532 if (tickfixcnt >= tickfixinterval) {
533 delta++;
534 tickfixcnt -= tickfixinterval;
535 }
536 }
537 #endif /* !NTP */
538 /* Imprecise 4bsd adjtime() handling */
539 if (timedelta != 0) {
540 delta += tickdelta;
541 timedelta -= tickdelta;
542 }
543
544 #ifdef notyet
545 microset();
546 #endif
547
548 #ifndef NTP
549 BUMPTIME(&time, delta); /* XXX Now done using NTP code below */
550 #endif
551 BUMPTIME(&mono_time, delta);
552
553 #ifdef NTP
554 time_update = delta;
555
556 /*
557 * Compute the phase adjustment. If the low-order bits
558 * (time_phase) of the update overflow, bump the high-order bits
559 * (time_update).
560 */
561 time_phase += time_adj;
562 if (time_phase <= -FINEUSEC) {
563 ltemp = -time_phase >> SHIFT_SCALE;
564 time_phase += ltemp << SHIFT_SCALE;
565 time_update -= ltemp;
566 } else if (time_phase >= FINEUSEC) {
567 ltemp = time_phase >> SHIFT_SCALE;
568 time_phase -= ltemp << SHIFT_SCALE;
569 time_update += ltemp;
570 }
571
572 #ifdef HIGHBALL
573 /*
574 * If the HIGHBALL board is installed, we need to adjust the
575 * external clock offset in order to close the hardware feedback
576 * loop. This will adjust the external clock phase and frequency
577 * in small amounts. The additional phase noise and frequency
578 * wander this causes should be minimal. We also need to
579 * discipline the kernel time variable, since the PLL is used to
580 * discipline the external clock. If the Highball board is not
581 * present, we discipline kernel time with the PLL as usual. We
582 * assume that the external clock phase adjustment (time_update)
583 * and kernel phase adjustment (clock_cpu) are less than the
584 * value of tick.
585 */
586 clock_offset.tv_usec += time_update;
587 if (clock_offset.tv_usec >= 1000000) {
588 clock_offset.tv_sec++;
589 clock_offset.tv_usec -= 1000000;
590 }
591 if (clock_offset.tv_usec < 0) {
592 clock_offset.tv_sec--;
593 clock_offset.tv_usec += 1000000;
594 }
595 time.tv_usec += clock_cpu;
596 clock_cpu = 0;
597 #else
598 time.tv_usec += time_update;
599 #endif /* HIGHBALL */
600
601 /*
602 * On rollover of the second the phase adjustment to be used for
603 * the next second is calculated. Also, the maximum error is
604 * increased by the tolerance. If the PPS frequency discipline
605 * code is present, the phase is increased to compensate for the
606 * CPU clock oscillator frequency error.
607 *
608 * On a 32-bit machine and given parameters in the timex.h
609 * header file, the maximum phase adjustment is +-512 ms and
610 * maximum frequency offset is a tad less than) +-512 ppm. On a
611 * 64-bit machine, you shouldn't need to ask.
612 */
613 if (time.tv_usec >= 1000000) {
614 time.tv_usec -= 1000000;
615 time.tv_sec++;
616 time_maxerror += time_tolerance >> SHIFT_USEC;
617
618 /*
619 * Leap second processing. If in leap-insert state at
620 * the end of the day, the system clock is set back one
621 * second; if in leap-delete state, the system clock is
622 * set ahead one second. The microtime() routine or
623 * external clock driver will insure that reported time
624 * is always monotonic. The ugly divides should be
625 * replaced.
626 */
627 switch (time_state) {
628 case TIME_OK:
629 if (time_status & STA_INS)
630 time_state = TIME_INS;
631 else if (time_status & STA_DEL)
632 time_state = TIME_DEL;
633 break;
634
635 case TIME_INS:
636 if (time.tv_sec % 86400 == 0) {
637 time.tv_sec--;
638 time_state = TIME_OOP;
639 }
640 break;
641
642 case TIME_DEL:
643 if ((time.tv_sec + 1) % 86400 == 0) {
644 time.tv_sec++;
645 time_state = TIME_WAIT;
646 }
647 break;
648
649 case TIME_OOP:
650 time_state = TIME_WAIT;
651 break;
652
653 case TIME_WAIT:
654 if (!(time_status & (STA_INS | STA_DEL)))
655 time_state = TIME_OK;
656 break;
657 }
658
659 /*
660 * Compute the phase adjustment for the next second. In
661 * PLL mode, the offset is reduced by a fixed factor
662 * times the time constant. In FLL mode the offset is
663 * used directly. In either mode, the maximum phase
664 * adjustment for each second is clamped so as to spread
665 * the adjustment over not more than the number of
666 * seconds between updates.
667 */
668 if (time_offset < 0) {
669 ltemp = -time_offset;
670 if (!(time_status & STA_FLL))
671 ltemp >>= SHIFT_KG + time_constant;
672 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
673 ltemp = (MAXPHASE / MINSEC) <<
674 SHIFT_UPDATE;
675 time_offset += ltemp;
676 time_adj = -ltemp << (shifthz - SHIFT_UPDATE);
677 } else if (time_offset > 0) {
678 ltemp = time_offset;
679 if (!(time_status & STA_FLL))
680 ltemp >>= SHIFT_KG + time_constant;
681 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
682 ltemp = (MAXPHASE / MINSEC) <<
683 SHIFT_UPDATE;
684 time_offset -= ltemp;
685 time_adj = ltemp << (shifthz - SHIFT_UPDATE);
686 } else
687 time_adj = 0;
688
689 /*
690 * Compute the frequency estimate and additional phase
691 * adjustment due to frequency error for the next
692 * second. When the PPS signal is engaged, gnaw on the
693 * watchdog counter and update the frequency computed by
694 * the pll and the PPS signal.
695 */
696 #ifdef PPS_SYNC
697 pps_valid++;
698 if (pps_valid == PPS_VALID) {
699 pps_jitter = MAXTIME;
700 pps_stabil = MAXFREQ;
701 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
702 STA_PPSWANDER | STA_PPSERROR);
703 }
704 ltemp = time_freq + pps_freq;
705 #else
706 ltemp = time_freq;
707 #endif /* PPS_SYNC */
708
709 if (ltemp < 0)
710 time_adj -= -ltemp >> (SHIFT_USEC - shifthz);
711 else
712 time_adj += ltemp >> (SHIFT_USEC - shifthz);
713 time_adj += (long)fixtick << shifthz;
714
715 /*
716 * When the CPU clock oscillator frequency is not a
717 * power of 2 in Hz, shifthz is only an approximate
718 * scale factor.
719 *
720 * To determine the adjustment, you can do the following:
721 * bc -q
722 * scale=24
723 * obase=2
724 * idealhz/realhz
725 * where `idealhz' is the next higher power of 2, and `realhz'
726 * is the actual value. You may need to factor this result
727 * into a sequence of 2 multipliers to get better precision.
728 *
729 * Likewise, the error can be calculated with (e.g. for 100Hz):
730 * bc -q
731 * scale=24
732 * ((1+2^-2+2^-5)*(1-2^-10)*realhz-idealhz)/idealhz
733 * (and then multiply by 1000000 to get ppm).
734 */
735 switch (hz) {
736 case 60:
737 /* A factor of 1.000100010001 gives about 15ppm
738 error. */
739 if (time_adj < 0) {
740 time_adj -= (-time_adj >> 4);
741 time_adj -= (-time_adj >> 8);
742 } else {
743 time_adj += (time_adj >> 4);
744 time_adj += (time_adj >> 8);
745 }
746 break;
747
748 case 96:
749 /* A factor of 1.0101010101 gives about 244ppm error. */
750 if (time_adj < 0) {
751 time_adj -= (-time_adj >> 2);
752 time_adj -= (-time_adj >> 4) + (-time_adj >> 8);
753 } else {
754 time_adj += (time_adj >> 2);
755 time_adj += (time_adj >> 4) + (time_adj >> 8);
756 }
757 break;
758
759 case 100:
760 /* A factor of 1.010001111010111 gives about 1ppm
761 error. */
762 if (time_adj < 0) {
763 time_adj -= (-time_adj >> 2) + (-time_adj >> 5);
764 time_adj += (-time_adj >> 10);
765 } else {
766 time_adj += (time_adj >> 2) + (time_adj >> 5);
767 time_adj -= (time_adj >> 10);
768 }
769 break;
770
771 case 1000:
772 /* A factor of 1.000001100010100001 gives about 50ppm
773 error. */
774 if (time_adj < 0) {
775 time_adj -= (-time_adj >> 6) + (-time_adj >> 11);
776 time_adj -= (-time_adj >> 7);
777 } else {
778 time_adj += (time_adj >> 6) + (time_adj >> 11);
779 time_adj += (time_adj >> 7);
780 }
781 break;
782
783 case 1200:
784 /* A factor of 1.1011010011100001 gives about 64ppm
785 error. */
786 if (time_adj < 0) {
787 time_adj -= (-time_adj >> 1) + (-time_adj >> 6);
788 time_adj -= (-time_adj >> 3) + (-time_adj >> 10);
789 } else {
790 time_adj += (time_adj >> 1) + (time_adj >> 6);
791 time_adj += (time_adj >> 3) + (time_adj >> 10);
792 }
793 break;
794 }
795
796 #ifdef EXT_CLOCK
797 /*
798 * If an external clock is present, it is necessary to
799 * discipline the kernel time variable anyway, since not
800 * all system components use the microtime() interface.
801 * Here, the time offset between the external clock and
802 * kernel time variable is computed every so often.
803 */
804 clock_count++;
805 if (clock_count > CLOCK_INTERVAL) {
806 clock_count = 0;
807 microtime(&clock_ext);
808 delta.tv_sec = clock_ext.tv_sec - time.tv_sec;
809 delta.tv_usec = clock_ext.tv_usec -
810 time.tv_usec;
811 if (delta.tv_usec < 0)
812 delta.tv_sec--;
813 if (delta.tv_usec >= 500000) {
814 delta.tv_usec -= 1000000;
815 delta.tv_sec++;
816 }
817 if (delta.tv_usec < -500000) {
818 delta.tv_usec += 1000000;
819 delta.tv_sec--;
820 }
821 if (delta.tv_sec > 0 || (delta.tv_sec == 0 &&
822 delta.tv_usec > MAXPHASE) ||
823 delta.tv_sec < -1 || (delta.tv_sec == -1 &&
824 delta.tv_usec < -MAXPHASE)) {
825 time = clock_ext;
826 delta.tv_sec = 0;
827 delta.tv_usec = 0;
828 }
829 #ifdef HIGHBALL
830 clock_cpu = delta.tv_usec;
831 #else /* HIGHBALL */
832 hardupdate(delta.tv_usec);
833 #endif /* HIGHBALL */
834 }
835 #endif /* EXT_CLOCK */
836 }
837
838 #endif /* NTP */
839
840 /*
841 * Process callouts at a very low cpu priority, so we don't keep the
842 * relatively high clock interrupt priority any longer than necessary.
843 */
844 if (TAILQ_FIRST(&callwheel[hardclock_ticks & callwheelmask]) != NULL) {
845 if (CLKF_BASEPRI(frame)) {
846 /*
847 * Save the overhead of a software interrupt;
848 * it will happen as soon as we return, so do
849 * it now.
850 *
851 * NOTE: If we're at ``base priority'', softclock()
852 * was not already running.
853 */
854 (void)spllowersoftclock();
855 softclock();
856 } else
857 setsoftclock();
858 } else if (softclock_running == 0 &&
859 (softclock_ticks + 1) == hardclock_ticks)
860 softclock_ticks++;
861 }
862
863 /*
864 * Software (low priority) clock interrupt.
865 * Run periodic events from timeout queue.
866 */
867 /*ARGSUSED*/
868 void
869 softclock()
870 {
871 struct callout_queue *bucket;
872 struct callout *c;
873 void (*func) __P((void *));
874 void *arg;
875 int s, idx;
876 int steps = 0;
877
878 s = splhigh();
879 softclock_running = 1;
880
881 #ifdef CALLWHEEL_STATS
882 callwheel_softclocks++;
883 #endif
884
885 while (softclock_ticks != hardclock_ticks) {
886 softclock_ticks++;
887 idx = (int)(softclock_ticks & callwheelmask);
888 bucket = &callwheel[idx];
889 c = TAILQ_FIRST(bucket);
890 #ifdef CALLWHEEL_STATS
891 if (c == NULL)
892 callwheel_softempty++;
893 #endif
894 while (c != NULL) {
895 #ifdef CALLWHEEL_STATS
896 callwheel_softchecks++;
897 #endif
898 if (c->c_time != softclock_ticks) {
899 c = TAILQ_NEXT(c, c_link);
900 if (++steps >= MAX_SOFTCLOCK_STEPS) {
901 nextsoftcheck = c;
902 /* Give interrupts a chance. */
903 splx(s);
904 (void) splhigh();
905 c = nextsoftcheck;
906 steps = 0;
907 }
908 } else {
909 nextsoftcheck = TAILQ_NEXT(c, c_link);
910 TAILQ_REMOVE(bucket, c, c_link);
911 #ifdef CALLWHEEL_STATS
912 callwheel_sizes[idx]--;
913 callwheel_fired++;
914 callwheel_count--;
915 #endif
916 func = c->c_func;
917 arg = c->c_arg;
918 c->c_func = NULL;
919 c->c_flags &= ~CALLOUT_PENDING;
920 splx(s);
921 (*func)(arg);
922 (void) splhigh();
923 steps = 0;
924 c = nextsoftcheck;
925 }
926 }
927 }
928 nextsoftcheck = NULL;
929 softclock_running = 0;
930 splx(s);
931 }
932
933 /*
934 * callout_setsize:
935 *
936 * Determine how many callwheels are necessary and
937 * set hash mask. Called from allocsys().
938 */
939 void
940 callout_setsize()
941 {
942
943 for (callwheelsize = 1; callwheelsize < ncallout; callwheelsize <<= 1)
944 /* loop */ ;
945 callwheelmask = callwheelsize - 1;
946 }
947
948 /*
949 * callout_startup:
950 *
951 * Initialize the callwheel buckets.
952 */
953 void
954 callout_startup()
955 {
956 int i;
957
958 for (i = 0; i < callwheelsize; i++)
959 TAILQ_INIT(&callwheel[i]);
960 }
961
962 /*
963 * callout_init:
964 *
965 * Initialize a callout structure so that it can be used
966 * by callout_reset() and callout_stop().
967 */
968 void
969 callout_init(c)
970 struct callout *c;
971 {
972
973 memset(c, 0, sizeof(*c));
974 }
975
976 /*
977 * callout_reset:
978 *
979 * Establish or change a timeout.
980 */
981 void
982 callout_reset(c, ticks, func, arg)
983 struct callout *c;
984 int ticks;
985 void (*func) __P((void *));
986 void *arg;
987 {
988 struct callout_queue *bucket;
989 int s;
990
991 if (ticks <= 0)
992 ticks = 1;
993
994 /* Lock out the clock. */
995 s = splhigh();
996
997 /*
998 * If this callout's timer is already running, cancel it
999 * before we modify it.
1000 */
1001 if (c->c_flags & CALLOUT_PENDING) {
1002 callout_stop(c);
1003 #ifdef CALLWHEEL_STATS
1004 callwheel_changed++;
1005 #endif
1006 }
1007
1008 c->c_arg = arg;
1009 c->c_func = func;
1010 c->c_flags = CALLOUT_ACTIVE | CALLOUT_PENDING;
1011 c->c_time = hardclock_ticks + ticks;
1012
1013 bucket = &callwheel[c->c_time & callwheelmask];
1014
1015 #ifdef CALLWHEEL_STATS
1016 if (TAILQ_FIRST(bucket) != NULL)
1017 callwheel_collisions++;
1018 #endif
1019
1020 TAILQ_INSERT_TAIL(bucket, c, c_link);
1021
1022 #ifdef CALLWHEEL_STATS
1023 callwheel_count++;
1024 callwheel_established++;
1025 if (++callwheel_sizes[c->c_time & callwheelmask] > callwheel_maxlength)
1026 callwheel_maxlength =
1027 callwheel_sizes[c->c_time & callwheelmask];
1028 #endif
1029
1030 splx(s);
1031 }
1032
1033 /*
1034 * callout_stop:
1035 *
1036 * Disestablish a timeout.
1037 */
1038 void
1039 callout_stop(c)
1040 struct callout *c;
1041 {
1042 int s;
1043
1044 /* Lock out the clock. */
1045 s = splhigh();
1046
1047 /*
1048 * Don't attempt to delete a callout that's not on the queue.
1049 */
1050 if ((c->c_flags & CALLOUT_PENDING) == 0) {
1051 c->c_flags &= ~CALLOUT_ACTIVE;
1052 splx(s);
1053 return;
1054 }
1055
1056 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
1057
1058 if (nextsoftcheck == c)
1059 nextsoftcheck = TAILQ_NEXT(c, c_link);
1060
1061 TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_link);
1062 #ifdef CALLWHEEL_STATS
1063 callwheel_count--;
1064 callwheel_disestablished++;
1065 callwheel_sizes[c->c_time & callwheelmask]--;
1066 #endif
1067
1068 c->c_func = NULL;
1069
1070 splx(s);
1071 }
1072
1073 #ifdef CALLWHEEL_STATS
1074 /*
1075 * callout_showstats:
1076 *
1077 * Display callout statistics. Call it from DDB.
1078 */
1079 void
1080 callout_showstats()
1081 {
1082 u_int64_t curticks;
1083 int s;
1084
1085 s = splclock();
1086 curticks = softclock_ticks;
1087 splx(s);
1088
1089 printf("Callwheel statistics:\n");
1090 printf("\tCallouts currently queued: %llu\n", callwheel_count);
1091 printf("\tCallouts established: %llu\n", callwheel_established);
1092 printf("\tCallouts disestablished: %llu\n", callwheel_disestablished);
1093 if (callwheel_changed != 0)
1094 printf("\t\tOf those, %llu were changes\n", callwheel_changed);
1095 printf("\tCallouts that fired: %llu\n", callwheel_fired);
1096 printf("\tNumber of buckets: %d\n", callwheelsize);
1097 printf("\tNumber of hash collisions: %d\n", callwheel_collisions);
1098 printf("\tMaximum hash chain length: %d\n", callwheel_maxlength);
1099 printf("\tSoftclocks: %llu, Softchecks: %llu\n",
1100 callwheel_softclocks, callwheel_softchecks);
1101 printf("\t\tEmpty buckets seen: %llu\n", callwheel_softempty);
1102 }
1103 #endif
1104
1105 /*
1106 * Compute number of hz until specified time. Used to compute second
1107 * argument to callout_reset() from an absolute time.
1108 */
1109 int
1110 hzto(tv)
1111 struct timeval *tv;
1112 {
1113 unsigned long ticks;
1114 long sec, usec;
1115 int s;
1116
1117 /*
1118 * If the number of usecs in the whole seconds part of the time
1119 * difference fits in a long, then the total number of usecs will
1120 * fit in an unsigned long. Compute the total and convert it to
1121 * ticks, rounding up and adding 1 to allow for the current tick
1122 * to expire. Rounding also depends on unsigned long arithmetic
1123 * to avoid overflow.
1124 *
1125 * Otherwise, if the number of ticks in the whole seconds part of
1126 * the time difference fits in a long, then convert the parts to
1127 * ticks separately and add, using similar rounding methods and
1128 * overflow avoidance. This method would work in the previous
1129 * case, but it is slightly slower and assume that hz is integral.
1130 *
1131 * Otherwise, round the time difference down to the maximum
1132 * representable value.
1133 *
1134 * If ints are 32-bit, then the maximum value for any timeout in
1135 * 10ms ticks is 248 days.
1136 */
1137 s = splclock();
1138 sec = tv->tv_sec - time.tv_sec;
1139 usec = tv->tv_usec - time.tv_usec;
1140 splx(s);
1141
1142 if (usec < 0) {
1143 sec--;
1144 usec += 1000000;
1145 }
1146
1147 if (sec < 0 || (sec == 0 && usec <= 0)) {
1148 /*
1149 * Would expire now or in the past. Return 0 ticks.
1150 * This is different from the legacy hzto() interface,
1151 * and callers need to check for it.
1152 */
1153 ticks = 0;
1154 } else if (sec <= (LONG_MAX / 1000000))
1155 ticks = (((sec * 1000000) + (unsigned long)usec + (tick - 1))
1156 / tick) + 1;
1157 else if (sec <= (LONG_MAX / hz))
1158 ticks = (sec * hz) +
1159 (((unsigned long)usec + (tick - 1)) / tick) + 1;
1160 else
1161 ticks = LONG_MAX;
1162
1163 if (ticks > INT_MAX)
1164 ticks = INT_MAX;
1165
1166 return ((int)ticks);
1167 }
1168
1169 /*
1170 * Start profiling on a process.
1171 *
1172 * Kernel profiling passes proc0 which never exits and hence
1173 * keeps the profile clock running constantly.
1174 */
1175 void
1176 startprofclock(p)
1177 struct proc *p;
1178 {
1179 int s;
1180
1181 if ((p->p_flag & P_PROFIL) == 0) {
1182 p->p_flag |= P_PROFIL;
1183 if (++profprocs == 1 && stathz != 0) {
1184 s = splstatclock();
1185 psdiv = pscnt = psratio;
1186 setstatclockrate(profhz);
1187 splx(s);
1188 }
1189 }
1190 }
1191
1192 /*
1193 * Stop profiling on a process.
1194 */
1195 void
1196 stopprofclock(p)
1197 struct proc *p;
1198 {
1199 int s;
1200
1201 if (p->p_flag & P_PROFIL) {
1202 p->p_flag &= ~P_PROFIL;
1203 if (--profprocs == 0 && stathz != 0) {
1204 s = splstatclock();
1205 psdiv = pscnt = 1;
1206 setstatclockrate(stathz);
1207 splx(s);
1208 }
1209 }
1210 }
1211
1212 /*
1213 * Statistics clock. Grab profile sample, and if divider reaches 0,
1214 * do process and kernel statistics.
1215 */
1216 void
1217 statclock(frame)
1218 struct clockframe *frame;
1219 {
1220 #ifdef GPROF
1221 struct gmonparam *g;
1222 int i;
1223 #endif
1224 struct cpu_info *ci = curcpu();
1225 struct schedstate_percpu *spc = &ci->ci_schedstate;
1226 struct proc *p;
1227
1228 if (CLKF_USERMODE(frame)) {
1229 p = curproc;
1230 if (p->p_flag & P_PROFIL)
1231 addupc_intr(p, CLKF_PC(frame), 1);
1232 if (--pscnt > 0)
1233 return;
1234 /*
1235 * Came from user mode; CPU was in user state.
1236 * If this process is being profiled record the tick.
1237 */
1238 p->p_uticks++;
1239 if (p->p_nice > NZERO)
1240 spc->spc_cp_time[CP_NICE]++;
1241 else
1242 spc->spc_cp_time[CP_USER]++;
1243 } else {
1244 #ifdef GPROF
1245 /*
1246 * Kernel statistics are just like addupc_intr, only easier.
1247 */
1248 g = &_gmonparam;
1249 if (g->state == GMON_PROF_ON) {
1250 i = CLKF_PC(frame) - g->lowpc;
1251 if (i < g->textsize) {
1252 i /= HISTFRACTION * sizeof(*g->kcount);
1253 g->kcount[i]++;
1254 }
1255 }
1256 #endif
1257 if (--pscnt > 0)
1258 return;
1259 /*
1260 * Came from kernel mode, so we were:
1261 * - handling an interrupt,
1262 * - doing syscall or trap work on behalf of the current
1263 * user process, or
1264 * - spinning in the idle loop.
1265 * Whichever it is, charge the time as appropriate.
1266 * Note that we charge interrupts to the current process,
1267 * regardless of whether they are ``for'' that process,
1268 * so that we know how much of its real time was spent
1269 * in ``non-process'' (i.e., interrupt) work.
1270 */
1271 p = curproc;
1272 if (CLKF_INTR(frame)) {
1273 if (p != NULL)
1274 p->p_iticks++;
1275 spc->spc_cp_time[CP_INTR]++;
1276 } else if (p != NULL) {
1277 p->p_sticks++;
1278 spc->spc_cp_time[CP_SYS]++;
1279 } else
1280 spc->spc_cp_time[CP_IDLE]++;
1281 }
1282 pscnt = psdiv;
1283
1284 if (p != NULL) {
1285 ++p->p_cpticks;
1286 /*
1287 * If no separate schedclock is provided, call it here
1288 * at ~~12-25 Hz, ~~16 Hz is best
1289 */
1290 if (schedhz == 0)
1291 if ((++ci->ci_schedstate.spc_schedticks & 3) == 0)
1292 schedclock(p);
1293 }
1294 }
1295
1296
1297 #ifdef NTP /* NTP phase-locked loop in kernel */
1298
1299 /*
1300 * hardupdate() - local clock update
1301 *
1302 * This routine is called by ntp_adjtime() to update the local clock
1303 * phase and frequency. The implementation is of an adaptive-parameter,
1304 * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new
1305 * time and frequency offset estimates for each call. If the kernel PPS
1306 * discipline code is configured (PPS_SYNC), the PPS signal itself
1307 * determines the new time offset, instead of the calling argument.
1308 * Presumably, calls to ntp_adjtime() occur only when the caller
1309 * believes the local clock is valid within some bound (+-128 ms with
1310 * NTP). If the caller's time is far different than the PPS time, an
1311 * argument will ensue, and it's not clear who will lose.
1312 *
1313 * For uncompensated quartz crystal oscillatores and nominal update
1314 * intervals less than 1024 s, operation should be in phase-lock mode
1315 * (STA_FLL = 0), where the loop is disciplined to phase. For update
1316 * intervals greater than thiss, operation should be in frequency-lock
1317 * mode (STA_FLL = 1), where the loop is disciplined to frequency.
1318 *
1319 * Note: splclock() is in effect.
1320 */
1321 void
1322 hardupdate(offset)
1323 long offset;
1324 {
1325 long ltemp, mtemp;
1326
1327 if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME))
1328 return;
1329 ltemp = offset;
1330 #ifdef PPS_SYNC
1331 if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
1332 ltemp = pps_offset;
1333 #endif /* PPS_SYNC */
1334
1335 /*
1336 * Scale the phase adjustment and clamp to the operating range.
1337 */
1338 if (ltemp > MAXPHASE)
1339 time_offset = MAXPHASE << SHIFT_UPDATE;
1340 else if (ltemp < -MAXPHASE)
1341 time_offset = -(MAXPHASE << SHIFT_UPDATE);
1342 else
1343 time_offset = ltemp << SHIFT_UPDATE;
1344
1345 /*
1346 * Select whether the frequency is to be controlled and in which
1347 * mode (PLL or FLL). Clamp to the operating range. Ugly
1348 * multiply/divide should be replaced someday.
1349 */
1350 if (time_status & STA_FREQHOLD || time_reftime == 0)
1351 time_reftime = time.tv_sec;
1352 mtemp = time.tv_sec - time_reftime;
1353 time_reftime = time.tv_sec;
1354 if (time_status & STA_FLL) {
1355 if (mtemp >= MINSEC) {
1356 ltemp = ((time_offset / mtemp) << (SHIFT_USEC -
1357 SHIFT_UPDATE));
1358 if (ltemp < 0)
1359 time_freq -= -ltemp >> SHIFT_KH;
1360 else
1361 time_freq += ltemp >> SHIFT_KH;
1362 }
1363 } else {
1364 if (mtemp < MAXSEC) {
1365 ltemp *= mtemp;
1366 if (ltemp < 0)
1367 time_freq -= -ltemp >> (time_constant +
1368 time_constant + SHIFT_KF -
1369 SHIFT_USEC);
1370 else
1371 time_freq += ltemp >> (time_constant +
1372 time_constant + SHIFT_KF -
1373 SHIFT_USEC);
1374 }
1375 }
1376 if (time_freq > time_tolerance)
1377 time_freq = time_tolerance;
1378 else if (time_freq < -time_tolerance)
1379 time_freq = -time_tolerance;
1380 }
1381
1382 #ifdef PPS_SYNC
1383 /*
1384 * hardpps() - discipline CPU clock oscillator to external PPS signal
1385 *
1386 * This routine is called at each PPS interrupt in order to discipline
1387 * the CPU clock oscillator to the PPS signal. It measures the PPS phase
1388 * and leaves it in a handy spot for the hardclock() routine. It
1389 * integrates successive PPS phase differences and calculates the
1390 * frequency offset. This is used in hardclock() to discipline the CPU
1391 * clock oscillator so that intrinsic frequency error is cancelled out.
1392 * The code requires the caller to capture the time and hardware counter
1393 * value at the on-time PPS signal transition.
1394 *
1395 * Note that, on some Unix systems, this routine runs at an interrupt
1396 * priority level higher than the timer interrupt routine hardclock().
1397 * Therefore, the variables used are distinct from the hardclock()
1398 * variables, except for certain exceptions: The PPS frequency pps_freq
1399 * and phase pps_offset variables are determined by this routine and
1400 * updated atomically. The time_tolerance variable can be considered a
1401 * constant, since it is infrequently changed, and then only when the
1402 * PPS signal is disabled. The watchdog counter pps_valid is updated
1403 * once per second by hardclock() and is atomically cleared in this
1404 * routine.
1405 */
1406 void
1407 hardpps(tvp, usec)
1408 struct timeval *tvp; /* time at PPS */
1409 long usec; /* hardware counter at PPS */
1410 {
1411 long u_usec, v_usec, bigtick;
1412 long cal_sec, cal_usec;
1413
1414 /*
1415 * An occasional glitch can be produced when the PPS interrupt
1416 * occurs in the hardclock() routine before the time variable is
1417 * updated. Here the offset is discarded when the difference
1418 * between it and the last one is greater than tick/2, but not
1419 * if the interval since the first discard exceeds 30 s.
1420 */
1421 time_status |= STA_PPSSIGNAL;
1422 time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
1423 pps_valid = 0;
1424 u_usec = -tvp->tv_usec;
1425 if (u_usec < -500000)
1426 u_usec += 1000000;
1427 v_usec = pps_offset - u_usec;
1428 if (v_usec < 0)
1429 v_usec = -v_usec;
1430 if (v_usec > (tick >> 1)) {
1431 if (pps_glitch > MAXGLITCH) {
1432 pps_glitch = 0;
1433 pps_tf[2] = u_usec;
1434 pps_tf[1] = u_usec;
1435 } else {
1436 pps_glitch++;
1437 u_usec = pps_offset;
1438 }
1439 } else
1440 pps_glitch = 0;
1441
1442 /*
1443 * A three-stage median filter is used to help deglitch the pps
1444 * time. The median sample becomes the time offset estimate; the
1445 * difference between the other two samples becomes the time
1446 * dispersion (jitter) estimate.
1447 */
1448 pps_tf[2] = pps_tf[1];
1449 pps_tf[1] = pps_tf[0];
1450 pps_tf[0] = u_usec;
1451 if (pps_tf[0] > pps_tf[1]) {
1452 if (pps_tf[1] > pps_tf[2]) {
1453 pps_offset = pps_tf[1]; /* 0 1 2 */
1454 v_usec = pps_tf[0] - pps_tf[2];
1455 } else if (pps_tf[2] > pps_tf[0]) {
1456 pps_offset = pps_tf[0]; /* 2 0 1 */
1457 v_usec = pps_tf[2] - pps_tf[1];
1458 } else {
1459 pps_offset = pps_tf[2]; /* 0 2 1 */
1460 v_usec = pps_tf[0] - pps_tf[1];
1461 }
1462 } else {
1463 if (pps_tf[1] < pps_tf[2]) {
1464 pps_offset = pps_tf[1]; /* 2 1 0 */
1465 v_usec = pps_tf[2] - pps_tf[0];
1466 } else if (pps_tf[2] < pps_tf[0]) {
1467 pps_offset = pps_tf[0]; /* 1 0 2 */
1468 v_usec = pps_tf[1] - pps_tf[2];
1469 } else {
1470 pps_offset = pps_tf[2]; /* 1 2 0 */
1471 v_usec = pps_tf[1] - pps_tf[0];
1472 }
1473 }
1474 if (v_usec > MAXTIME)
1475 pps_jitcnt++;
1476 v_usec = (v_usec << PPS_AVG) - pps_jitter;
1477 if (v_usec < 0)
1478 pps_jitter -= -v_usec >> PPS_AVG;
1479 else
1480 pps_jitter += v_usec >> PPS_AVG;
1481 if (pps_jitter > (MAXTIME >> 1))
1482 time_status |= STA_PPSJITTER;
1483
1484 /*
1485 * During the calibration interval adjust the starting time when
1486 * the tick overflows. At the end of the interval compute the
1487 * duration of the interval and the difference of the hardware
1488 * counters at the beginning and end of the interval. This code
1489 * is deliciously complicated by the fact valid differences may
1490 * exceed the value of tick when using long calibration
1491 * intervals and small ticks. Note that the counter can be
1492 * greater than tick if caught at just the wrong instant, but
1493 * the values returned and used here are correct.
1494 */
1495 bigtick = (long)tick << SHIFT_USEC;
1496 pps_usec -= pps_freq;
1497 if (pps_usec >= bigtick)
1498 pps_usec -= bigtick;
1499 if (pps_usec < 0)
1500 pps_usec += bigtick;
1501 pps_time.tv_sec++;
1502 pps_count++;
1503 if (pps_count < (1 << pps_shift))
1504 return;
1505 pps_count = 0;
1506 pps_calcnt++;
1507 u_usec = usec << SHIFT_USEC;
1508 v_usec = pps_usec - u_usec;
1509 if (v_usec >= bigtick >> 1)
1510 v_usec -= bigtick;
1511 if (v_usec < -(bigtick >> 1))
1512 v_usec += bigtick;
1513 if (v_usec < 0)
1514 v_usec = -(-v_usec >> pps_shift);
1515 else
1516 v_usec = v_usec >> pps_shift;
1517 pps_usec = u_usec;
1518 cal_sec = tvp->tv_sec;
1519 cal_usec = tvp->tv_usec;
1520 cal_sec -= pps_time.tv_sec;
1521 cal_usec -= pps_time.tv_usec;
1522 if (cal_usec < 0) {
1523 cal_usec += 1000000;
1524 cal_sec--;
1525 }
1526 pps_time = *tvp;
1527
1528 /*
1529 * Check for lost interrupts, noise, excessive jitter and
1530 * excessive frequency error. The number of timer ticks during
1531 * the interval may vary +-1 tick. Add to this a margin of one
1532 * tick for the PPS signal jitter and maximum frequency
1533 * deviation. If the limits are exceeded, the calibration
1534 * interval is reset to the minimum and we start over.
1535 */
1536 u_usec = (long)tick << 1;
1537 if (!((cal_sec == -1 && cal_usec > (1000000 - u_usec))
1538 || (cal_sec == 0 && cal_usec < u_usec))
1539 || v_usec > time_tolerance || v_usec < -time_tolerance) {
1540 pps_errcnt++;
1541 pps_shift = PPS_SHIFT;
1542 pps_intcnt = 0;
1543 time_status |= STA_PPSERROR;
1544 return;
1545 }
1546
1547 /*
1548 * A three-stage median filter is used to help deglitch the pps
1549 * frequency. The median sample becomes the frequency offset
1550 * estimate; the difference between the other two samples
1551 * becomes the frequency dispersion (stability) estimate.
1552 */
1553 pps_ff[2] = pps_ff[1];
1554 pps_ff[1] = pps_ff[0];
1555 pps_ff[0] = v_usec;
1556 if (pps_ff[0] > pps_ff[1]) {
1557 if (pps_ff[1] > pps_ff[2]) {
1558 u_usec = pps_ff[1]; /* 0 1 2 */
1559 v_usec = pps_ff[0] - pps_ff[2];
1560 } else if (pps_ff[2] > pps_ff[0]) {
1561 u_usec = pps_ff[0]; /* 2 0 1 */
1562 v_usec = pps_ff[2] - pps_ff[1];
1563 } else {
1564 u_usec = pps_ff[2]; /* 0 2 1 */
1565 v_usec = pps_ff[0] - pps_ff[1];
1566 }
1567 } else {
1568 if (pps_ff[1] < pps_ff[2]) {
1569 u_usec = pps_ff[1]; /* 2 1 0 */
1570 v_usec = pps_ff[2] - pps_ff[0];
1571 } else if (pps_ff[2] < pps_ff[0]) {
1572 u_usec = pps_ff[0]; /* 1 0 2 */
1573 v_usec = pps_ff[1] - pps_ff[2];
1574 } else {
1575 u_usec = pps_ff[2]; /* 1 2 0 */
1576 v_usec = pps_ff[1] - pps_ff[0];
1577 }
1578 }
1579
1580 /*
1581 * Here the frequency dispersion (stability) is updated. If it
1582 * is less than one-fourth the maximum (MAXFREQ), the frequency
1583 * offset is updated as well, but clamped to the tolerance. It
1584 * will be processed later by the hardclock() routine.
1585 */
1586 v_usec = (v_usec >> 1) - pps_stabil;
1587 if (v_usec < 0)
1588 pps_stabil -= -v_usec >> PPS_AVG;
1589 else
1590 pps_stabil += v_usec >> PPS_AVG;
1591 if (pps_stabil > MAXFREQ >> 2) {
1592 pps_stbcnt++;
1593 time_status |= STA_PPSWANDER;
1594 return;
1595 }
1596 if (time_status & STA_PPSFREQ) {
1597 if (u_usec < 0) {
1598 pps_freq -= -u_usec >> PPS_AVG;
1599 if (pps_freq < -time_tolerance)
1600 pps_freq = -time_tolerance;
1601 u_usec = -u_usec;
1602 } else {
1603 pps_freq += u_usec >> PPS_AVG;
1604 if (pps_freq > time_tolerance)
1605 pps_freq = time_tolerance;
1606 }
1607 }
1608
1609 /*
1610 * Here the calibration interval is adjusted. If the maximum
1611 * time difference is greater than tick / 4, reduce the interval
1612 * by half. If this is not the case for four consecutive
1613 * intervals, double the interval.
1614 */
1615 if (u_usec << pps_shift > bigtick >> 2) {
1616 pps_intcnt = 0;
1617 if (pps_shift > PPS_SHIFT)
1618 pps_shift--;
1619 } else if (pps_intcnt >= 4) {
1620 pps_intcnt = 0;
1621 if (pps_shift < PPS_SHIFTMAX)
1622 pps_shift++;
1623 } else
1624 pps_intcnt++;
1625 }
1626 #endif /* PPS_SYNC */
1627 #endif /* NTP */
1628
1629
1630 /*
1631 * Return information about system clocks.
1632 */
1633 int
1634 sysctl_clockrate(where, sizep)
1635 void *where;
1636 size_t *sizep;
1637 {
1638 struct clockinfo clkinfo;
1639
1640 /*
1641 * Construct clockinfo structure.
1642 */
1643 clkinfo.tick = tick;
1644 clkinfo.tickadj = tickadj;
1645 clkinfo.hz = hz;
1646 clkinfo.profhz = profhz;
1647 clkinfo.stathz = stathz ? stathz : hz;
1648 return (sysctl_rdstruct(where, sizep, NULL, &clkinfo, sizeof(clkinfo)));
1649 }
1650