kern_clock.c revision 1.53 1 /* $NetBSD: kern_clock.c,v 1.53 2000/03/23 20:51:09 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*-
41 * Copyright (c) 1982, 1986, 1991, 1993
42 * The Regents of the University of California. All rights reserved.
43 * (c) UNIX System Laboratories, Inc.
44 * All or some portions of this file are derived from material licensed
45 * to the University of California by American Telephone and Telegraph
46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47 * the permission of UNIX System Laboratories, Inc.
48 *
49 * Redistribution and use in source and binary forms, with or without
50 * modification, are permitted provided that the following conditions
51 * are met:
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 * 3. All advertising materials mentioning features or use of this software
58 * must display the following acknowledgement:
59 * This product includes software developed by the University of
60 * California, Berkeley and its contributors.
61 * 4. Neither the name of the University nor the names of its contributors
62 * may be used to endorse or promote products derived from this software
63 * without specific prior written permission.
64 *
65 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
69 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
70 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
71 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
72 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
73 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
74 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
75 * SUCH DAMAGE.
76 *
77 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
78 */
79
80 #include "opt_ntp.h"
81
82 #include <sys/param.h>
83 #include <sys/systm.h>
84 #include <sys/dkstat.h>
85 #include <sys/callout.h>
86 #include <sys/kernel.h>
87 #include <sys/proc.h>
88 #include <sys/resourcevar.h>
89 #include <sys/signalvar.h>
90 #include <vm/vm.h>
91 #include <sys/sysctl.h>
92 #include <sys/timex.h>
93 #include <sys/sched.h>
94
95 #include <machine/cpu.h>
96
97 #ifdef GPROF
98 #include <sys/gmon.h>
99 #endif
100
101 /*
102 * Clock handling routines.
103 *
104 * This code is written to operate with two timers that run independently of
105 * each other. The main clock, running hz times per second, is used to keep
106 * track of real time. The second timer handles kernel and user profiling,
107 * and does resource use estimation. If the second timer is programmable,
108 * it is randomized to avoid aliasing between the two clocks. For example,
109 * the randomization prevents an adversary from always giving up the cpu
110 * just before its quantum expires. Otherwise, it would never accumulate
111 * cpu ticks. The mean frequency of the second timer is stathz.
112 *
113 * If no second timer exists, stathz will be zero; in this case we drive
114 * profiling and statistics off the main clock. This WILL NOT be accurate;
115 * do not do it unless absolutely necessary.
116 *
117 * The statistics clock may (or may not) be run at a higher rate while
118 * profiling. This profile clock runs at profhz. We require that profhz
119 * be an integral multiple of stathz.
120 *
121 * If the statistics clock is running fast, it must be divided by the ratio
122 * profhz/stathz for statistics. (For profiling, every tick counts.)
123 */
124
125 #ifdef NTP /* NTP phase-locked loop in kernel */
126 /*
127 * Phase/frequency-lock loop (PLL/FLL) definitions
128 *
129 * The following variables are read and set by the ntp_adjtime() system
130 * call.
131 *
132 * time_state shows the state of the system clock, with values defined
133 * in the timex.h header file.
134 *
135 * time_status shows the status of the system clock, with bits defined
136 * in the timex.h header file.
137 *
138 * time_offset is used by the PLL/FLL to adjust the system time in small
139 * increments.
140 *
141 * time_constant determines the bandwidth or "stiffness" of the PLL.
142 *
143 * time_tolerance determines maximum frequency error or tolerance of the
144 * CPU clock oscillator and is a property of the architecture; however,
145 * in principle it could change as result of the presence of external
146 * discipline signals, for instance.
147 *
148 * time_precision is usually equal to the kernel tick variable; however,
149 * in cases where a precision clock counter or external clock is
150 * available, the resolution can be much less than this and depend on
151 * whether the external clock is working or not.
152 *
153 * time_maxerror is initialized by a ntp_adjtime() call and increased by
154 * the kernel once each second to reflect the maximum error bound
155 * growth.
156 *
157 * time_esterror is set and read by the ntp_adjtime() call, but
158 * otherwise not used by the kernel.
159 */
160 int time_state = TIME_OK; /* clock state */
161 int time_status = STA_UNSYNC; /* clock status bits */
162 long time_offset = 0; /* time offset (us) */
163 long time_constant = 0; /* pll time constant */
164 long time_tolerance = MAXFREQ; /* frequency tolerance (scaled ppm) */
165 long time_precision = 1; /* clock precision (us) */
166 long time_maxerror = MAXPHASE; /* maximum error (us) */
167 long time_esterror = MAXPHASE; /* estimated error (us) */
168
169 /*
170 * The following variables establish the state of the PLL/FLL and the
171 * residual time and frequency offset of the local clock. The scale
172 * factors are defined in the timex.h header file.
173 *
174 * time_phase and time_freq are the phase increment and the frequency
175 * increment, respectively, of the kernel time variable.
176 *
177 * time_freq is set via ntp_adjtime() from a value stored in a file when
178 * the synchronization daemon is first started. Its value is retrieved
179 * via ntp_adjtime() and written to the file about once per hour by the
180 * daemon.
181 *
182 * time_adj is the adjustment added to the value of tick at each timer
183 * interrupt and is recomputed from time_phase and time_freq at each
184 * seconds rollover.
185 *
186 * time_reftime is the second's portion of the system time at the last
187 * call to ntp_adjtime(). It is used to adjust the time_freq variable
188 * and to increase the time_maxerror as the time since last update
189 * increases.
190 */
191 long time_phase = 0; /* phase offset (scaled us) */
192 long time_freq = 0; /* frequency offset (scaled ppm) */
193 long time_adj = 0; /* tick adjust (scaled 1 / hz) */
194 long time_reftime = 0; /* time at last adjustment (s) */
195
196 #ifdef PPS_SYNC
197 /*
198 * The following variables are used only if the kernel PPS discipline
199 * code is configured (PPS_SYNC). The scale factors are defined in the
200 * timex.h header file.
201 *
202 * pps_time contains the time at each calibration interval, as read by
203 * microtime(). pps_count counts the seconds of the calibration
204 * interval, the duration of which is nominally pps_shift in powers of
205 * two.
206 *
207 * pps_offset is the time offset produced by the time median filter
208 * pps_tf[], while pps_jitter is the dispersion (jitter) measured by
209 * this filter.
210 *
211 * pps_freq is the frequency offset produced by the frequency median
212 * filter pps_ff[], while pps_stabil is the dispersion (wander) measured
213 * by this filter.
214 *
215 * pps_usec is latched from a high resolution counter or external clock
216 * at pps_time. Here we want the hardware counter contents only, not the
217 * contents plus the time_tv.usec as usual.
218 *
219 * pps_valid counts the number of seconds since the last PPS update. It
220 * is used as a watchdog timer to disable the PPS discipline should the
221 * PPS signal be lost.
222 *
223 * pps_glitch counts the number of seconds since the beginning of an
224 * offset burst more than tick/2 from current nominal offset. It is used
225 * mainly to suppress error bursts due to priority conflicts between the
226 * PPS interrupt and timer interrupt.
227 *
228 * pps_intcnt counts the calibration intervals for use in the interval-
229 * adaptation algorithm. It's just too complicated for words.
230 */
231 struct timeval pps_time; /* kernel time at last interval */
232 long pps_tf[] = {0, 0, 0}; /* pps time offset median filter (us) */
233 long pps_offset = 0; /* pps time offset (us) */
234 long pps_jitter = MAXTIME; /* time dispersion (jitter) (us) */
235 long pps_ff[] = {0, 0, 0}; /* pps frequency offset median filter */
236 long pps_freq = 0; /* frequency offset (scaled ppm) */
237 long pps_stabil = MAXFREQ; /* frequency dispersion (scaled ppm) */
238 long pps_usec = 0; /* microsec counter at last interval */
239 long pps_valid = PPS_VALID; /* pps signal watchdog counter */
240 int pps_glitch = 0; /* pps signal glitch counter */
241 int pps_count = 0; /* calibration interval counter (s) */
242 int pps_shift = PPS_SHIFT; /* interval duration (s) (shift) */
243 int pps_intcnt = 0; /* intervals at current duration */
244
245 /*
246 * PPS signal quality monitors
247 *
248 * pps_jitcnt counts the seconds that have been discarded because the
249 * jitter measured by the time median filter exceeds the limit MAXTIME
250 * (100 us).
251 *
252 * pps_calcnt counts the frequency calibration intervals, which are
253 * variable from 4 s to 256 s.
254 *
255 * pps_errcnt counts the calibration intervals which have been discarded
256 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
257 * calibration interval jitter exceeds two ticks.
258 *
259 * pps_stbcnt counts the calibration intervals that have been discarded
260 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
261 */
262 long pps_jitcnt = 0; /* jitter limit exceeded */
263 long pps_calcnt = 0; /* calibration intervals */
264 long pps_errcnt = 0; /* calibration errors */
265 long pps_stbcnt = 0; /* stability limit exceeded */
266 #endif /* PPS_SYNC */
267
268 #ifdef EXT_CLOCK
269 /*
270 * External clock definitions
271 *
272 * The following definitions and declarations are used only if an
273 * external clock is configured on the system.
274 */
275 #define CLOCK_INTERVAL 30 /* CPU clock update interval (s) */
276
277 /*
278 * The clock_count variable is set to CLOCK_INTERVAL at each PPS
279 * interrupt and decremented once each second.
280 */
281 int clock_count = 0; /* CPU clock counter */
282
283 #ifdef HIGHBALL
284 /*
285 * The clock_offset and clock_cpu variables are used by the HIGHBALL
286 * interface. The clock_offset variable defines the offset between
287 * system time and the HIGBALL counters. The clock_cpu variable contains
288 * the offset between the system clock and the HIGHBALL clock for use in
289 * disciplining the kernel time variable.
290 */
291 extern struct timeval clock_offset; /* Highball clock offset */
292 long clock_cpu = 0; /* CPU clock adjust */
293 #endif /* HIGHBALL */
294 #endif /* EXT_CLOCK */
295 #endif /* NTP */
296
297
298 /*
299 * Bump a timeval by a small number of usec's.
300 */
301 #define BUMPTIME(t, usec) { \
302 register volatile struct timeval *tp = (t); \
303 register long us; \
304 \
305 tp->tv_usec = us = tp->tv_usec + (usec); \
306 if (us >= 1000000) { \
307 tp->tv_usec = us - 1000000; \
308 tp->tv_sec++; \
309 } \
310 }
311
312 int stathz;
313 int profhz;
314 int profprocs;
315 u_int64_t hardclock_ticks, softclock_ticks;
316 int softclock_running; /* 1 => softclock() is running */
317 static int psdiv, pscnt; /* prof => stat divider */
318 int psratio; /* ratio: prof / stat */
319 int tickfix, tickfixinterval; /* used if tick not really integral */
320 #ifndef NTP
321 static int tickfixcnt; /* accumulated fractional error */
322 #else
323 int fixtick; /* used by NTP for same */
324 int shifthz;
325 #endif
326
327 /*
328 * We might want ldd to load the both words from time at once.
329 * To succeed we need to be quadword aligned.
330 * The sparc already does that, and that it has worked so far is a fluke.
331 */
332 volatile struct timeval time __attribute__((__aligned__(__alignof__(quad_t))));
333 volatile struct timeval mono_time;
334
335 /*
336 * The callout mechanism is based on the work of Adam M. Costello and
337 * George Varghese, published in a technical report entitled "Redesigning
338 * the BSD Callout and Timer Facilities", and Justin Gibbs's subsequent
339 * integration into FreeBSD, modified for NetBSD by Jason R. Thorpe.
340 *
341 * The original work on the data structures used in this implementation
342 * was published by G. Varghese and A. Lauck in the paper "Hashed and
343 * Hierarchical Timing Wheels: Data Structures for the Efficient
344 * Implementation of a Timer Facility" in the Proceedings of the 11th
345 * ACM Annual Symposium on Operating System Principles, Austin, Texas,
346 * November 1987.
347 */
348 struct callout_queue *callwheel;
349 int callwheelsize, callwheelbits, callwheelmask;
350
351 static struct callout *nextsoftcheck; /* next callout to be checked */
352
353 #ifdef CALLWHEEL_STATS
354 int callwheel_collisions; /* number of hash collisions */
355 int callwheel_maxlength; /* length of the longest hash chain */
356 int *callwheel_sizes; /* per-bucket length count */
357 u_int64_t callwheel_count; /* # callouts currently */
358 u_int64_t callwheel_established; /* # callouts established */
359 u_int64_t callwheel_fired; /* # callouts that fired */
360 u_int64_t callwheel_disestablished; /* # callouts disestablished */
361 u_int64_t callwheel_changed; /* # callouts changed */
362 u_int64_t callwheel_softclocks; /* # times softclock() called */
363 u_int64_t callwheel_softchecks; /* # checks per softclock() */
364 u_int64_t callwheel_softempty; /* # empty buckets seen */
365 #endif /* CALLWHEEL_STATS */
366
367 /*
368 * This value indicates the number of consecutive callouts that
369 * will be checked before we allow interrupts to have a chance
370 * again.
371 */
372 #ifndef MAX_SOFTCLOCK_STEPS
373 #define MAX_SOFTCLOCK_STEPS 100
374 #endif
375
376 /*
377 * Initialize clock frequencies and start both clocks running.
378 */
379 void
380 initclocks()
381 {
382 register int i;
383
384 /*
385 * Set divisors to 1 (normal case) and let the machine-specific
386 * code do its bit.
387 */
388 psdiv = pscnt = 1;
389 cpu_initclocks();
390
391 /*
392 * Compute profhz/stathz, and fix profhz if needed.
393 */
394 i = stathz ? stathz : hz;
395 if (profhz == 0)
396 profhz = i;
397 psratio = profhz / i;
398
399 #ifdef NTP
400 switch (hz) {
401 case 60:
402 case 64:
403 shifthz = SHIFT_SCALE - 6;
404 break;
405 case 96:
406 case 100:
407 case 128:
408 shifthz = SHIFT_SCALE - 7;
409 break;
410 case 256:
411 shifthz = SHIFT_SCALE - 8;
412 break;
413 case 512:
414 shifthz = SHIFT_SCALE - 9;
415 break;
416 case 1000:
417 case 1024:
418 shifthz = SHIFT_SCALE - 10;
419 break;
420 default:
421 panic("weird hz");
422 }
423 if (fixtick == 0) {
424 /*
425 * Give MD code a chance to set this to a better
426 * value; but, if it doesn't, we should.
427 */
428 fixtick = (1000000 - (hz*tick));
429 }
430 #endif
431 }
432
433 /*
434 * The real-time timer, interrupting hz times per second.
435 */
436 void
437 hardclock(frame)
438 register struct clockframe *frame;
439 {
440 register struct proc *p;
441 register int delta;
442 extern int tickdelta;
443 extern long timedelta;
444 #ifdef NTP
445 register int time_update;
446 register int ltemp;
447 #endif
448
449 p = curproc;
450 if (p) {
451 register struct pstats *pstats;
452
453 /*
454 * Run current process's virtual and profile time, as needed.
455 */
456 pstats = p->p_stats;
457 if (CLKF_USERMODE(frame) &&
458 timerisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
459 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
460 psignal(p, SIGVTALRM);
461 if (timerisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
462 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
463 psignal(p, SIGPROF);
464 }
465
466 /*
467 * If no separate statistics clock is available, run it from here.
468 */
469 if (stathz == 0)
470 statclock(frame);
471
472 /*
473 * Increment the time-of-day. The increment is normally just
474 * ``tick''. If the machine is one which has a clock frequency
475 * such that ``hz'' would not divide the second evenly into
476 * milliseconds, a periodic adjustment must be applied. Finally,
477 * if we are still adjusting the time (see adjtime()),
478 * ``tickdelta'' may also be added in.
479 */
480 hardclock_ticks++;
481 delta = tick;
482
483 #ifndef NTP
484 if (tickfix) {
485 tickfixcnt += tickfix;
486 if (tickfixcnt >= tickfixinterval) {
487 delta++;
488 tickfixcnt -= tickfixinterval;
489 }
490 }
491 #endif /* !NTP */
492 /* Imprecise 4bsd adjtime() handling */
493 if (timedelta != 0) {
494 delta += tickdelta;
495 timedelta -= tickdelta;
496 }
497
498 #ifdef notyet
499 microset();
500 #endif
501
502 #ifndef NTP
503 BUMPTIME(&time, delta); /* XXX Now done using NTP code below */
504 #endif
505 BUMPTIME(&mono_time, delta);
506
507 #ifdef NTP
508 time_update = delta;
509
510 /*
511 * Compute the phase adjustment. If the low-order bits
512 * (time_phase) of the update overflow, bump the high-order bits
513 * (time_update).
514 */
515 time_phase += time_adj;
516 if (time_phase <= -FINEUSEC) {
517 ltemp = -time_phase >> SHIFT_SCALE;
518 time_phase += ltemp << SHIFT_SCALE;
519 time_update -= ltemp;
520 } else if (time_phase >= FINEUSEC) {
521 ltemp = time_phase >> SHIFT_SCALE;
522 time_phase -= ltemp << SHIFT_SCALE;
523 time_update += ltemp;
524 }
525
526 #ifdef HIGHBALL
527 /*
528 * If the HIGHBALL board is installed, we need to adjust the
529 * external clock offset in order to close the hardware feedback
530 * loop. This will adjust the external clock phase and frequency
531 * in small amounts. The additional phase noise and frequency
532 * wander this causes should be minimal. We also need to
533 * discipline the kernel time variable, since the PLL is used to
534 * discipline the external clock. If the Highball board is not
535 * present, we discipline kernel time with the PLL as usual. We
536 * assume that the external clock phase adjustment (time_update)
537 * and kernel phase adjustment (clock_cpu) are less than the
538 * value of tick.
539 */
540 clock_offset.tv_usec += time_update;
541 if (clock_offset.tv_usec >= 1000000) {
542 clock_offset.tv_sec++;
543 clock_offset.tv_usec -= 1000000;
544 }
545 if (clock_offset.tv_usec < 0) {
546 clock_offset.tv_sec--;
547 clock_offset.tv_usec += 1000000;
548 }
549 time.tv_usec += clock_cpu;
550 clock_cpu = 0;
551 #else
552 time.tv_usec += time_update;
553 #endif /* HIGHBALL */
554
555 /*
556 * On rollover of the second the phase adjustment to be used for
557 * the next second is calculated. Also, the maximum error is
558 * increased by the tolerance. If the PPS frequency discipline
559 * code is present, the phase is increased to compensate for the
560 * CPU clock oscillator frequency error.
561 *
562 * On a 32-bit machine and given parameters in the timex.h
563 * header file, the maximum phase adjustment is +-512 ms and
564 * maximum frequency offset is a tad less than) +-512 ppm. On a
565 * 64-bit machine, you shouldn't need to ask.
566 */
567 if (time.tv_usec >= 1000000) {
568 time.tv_usec -= 1000000;
569 time.tv_sec++;
570 time_maxerror += time_tolerance >> SHIFT_USEC;
571
572 /*
573 * Leap second processing. If in leap-insert state at
574 * the end of the day, the system clock is set back one
575 * second; if in leap-delete state, the system clock is
576 * set ahead one second. The microtime() routine or
577 * external clock driver will insure that reported time
578 * is always monotonic. The ugly divides should be
579 * replaced.
580 */
581 switch (time_state) {
582 case TIME_OK:
583 if (time_status & STA_INS)
584 time_state = TIME_INS;
585 else if (time_status & STA_DEL)
586 time_state = TIME_DEL;
587 break;
588
589 case TIME_INS:
590 if (time.tv_sec % 86400 == 0) {
591 time.tv_sec--;
592 time_state = TIME_OOP;
593 }
594 break;
595
596 case TIME_DEL:
597 if ((time.tv_sec + 1) % 86400 == 0) {
598 time.tv_sec++;
599 time_state = TIME_WAIT;
600 }
601 break;
602
603 case TIME_OOP:
604 time_state = TIME_WAIT;
605 break;
606
607 case TIME_WAIT:
608 if (!(time_status & (STA_INS | STA_DEL)))
609 time_state = TIME_OK;
610 break;
611 }
612
613 /*
614 * Compute the phase adjustment for the next second. In
615 * PLL mode, the offset is reduced by a fixed factor
616 * times the time constant. In FLL mode the offset is
617 * used directly. In either mode, the maximum phase
618 * adjustment for each second is clamped so as to spread
619 * the adjustment over not more than the number of
620 * seconds between updates.
621 */
622 if (time_offset < 0) {
623 ltemp = -time_offset;
624 if (!(time_status & STA_FLL))
625 ltemp >>= SHIFT_KG + time_constant;
626 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
627 ltemp = (MAXPHASE / MINSEC) <<
628 SHIFT_UPDATE;
629 time_offset += ltemp;
630 time_adj = -ltemp << (shifthz - SHIFT_UPDATE);
631 } else if (time_offset > 0) {
632 ltemp = time_offset;
633 if (!(time_status & STA_FLL))
634 ltemp >>= SHIFT_KG + time_constant;
635 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
636 ltemp = (MAXPHASE / MINSEC) <<
637 SHIFT_UPDATE;
638 time_offset -= ltemp;
639 time_adj = ltemp << (shifthz - SHIFT_UPDATE);
640 } else
641 time_adj = 0;
642
643 /*
644 * Compute the frequency estimate and additional phase
645 * adjustment due to frequency error for the next
646 * second. When the PPS signal is engaged, gnaw on the
647 * watchdog counter and update the frequency computed by
648 * the pll and the PPS signal.
649 */
650 #ifdef PPS_SYNC
651 pps_valid++;
652 if (pps_valid == PPS_VALID) {
653 pps_jitter = MAXTIME;
654 pps_stabil = MAXFREQ;
655 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
656 STA_PPSWANDER | STA_PPSERROR);
657 }
658 ltemp = time_freq + pps_freq;
659 #else
660 ltemp = time_freq;
661 #endif /* PPS_SYNC */
662
663 if (ltemp < 0)
664 time_adj -= -ltemp >> (SHIFT_USEC - shifthz);
665 else
666 time_adj += ltemp >> (SHIFT_USEC - shifthz);
667 time_adj += (long)fixtick << shifthz;
668
669 /*
670 * When the CPU clock oscillator frequency is not a
671 * power of 2 in Hz, shifthz is only an approximate
672 * scale factor.
673 *
674 * To determine the adjustment, you can do the following:
675 * bc -q
676 * scale=24
677 * obase=2
678 * idealhz/realhz
679 * where `idealhz' is the next higher power of 2, and `realhz'
680 * is the actual value.
681 *
682 * Likewise, the error can be calculated with (e.g. for 100Hz):
683 * bc -q
684 * scale=24
685 * ((1+2^-2+2^-5)*realhz-idealhz)/idealhz
686 * (and then multiply by 100 to get %).
687 */
688 switch (hz) {
689 case 96:
690 /* A factor of 1.0101010101 gives about .025% error. */
691 if (time_adj < 0) {
692 time_adj -= (-time_adj >> 2);
693 time_adj -= (-time_adj >> 4) + (-time_adj >> 8);
694 } else {
695 time_adj += (time_adj >> 2);
696 time_adj += (time_adj >> 4) + (time_adj >> 8);
697 }
698 break;
699
700 case 100:
701 /* A factor of 1.01001 gives about .1% error. */
702 if (time_adj < 0)
703 time_adj -= (-time_adj >> 2) + (-time_adj >> 5);
704 else
705 time_adj += (time_adj >> 2) + (time_adj >> 5);
706 break;
707
708 case 60:
709 /* A factor of 1.00010001 gives about .025% error. */
710 if (time_adj < 0)
711 time_adj -= (-time_adj >> 4) + (-time_adj >> 8);
712 else
713 time_adj += (time_adj >> 4) + (time_adj >> 8);
714 break;
715
716 case 1000:
717 /* A factor of 1.0000011 gives about .055% error. */
718 if (time_adj < 0)
719 time_adj -= (-time_adj >> 6) + (-time_adj >> 7);
720 else
721 time_adj += (time_adj >> 6) + (time_adj >> 7);
722 break;
723 }
724
725 #ifdef EXT_CLOCK
726 /*
727 * If an external clock is present, it is necessary to
728 * discipline the kernel time variable anyway, since not
729 * all system components use the microtime() interface.
730 * Here, the time offset between the external clock and
731 * kernel time variable is computed every so often.
732 */
733 clock_count++;
734 if (clock_count > CLOCK_INTERVAL) {
735 clock_count = 0;
736 microtime(&clock_ext);
737 delta.tv_sec = clock_ext.tv_sec - time.tv_sec;
738 delta.tv_usec = clock_ext.tv_usec -
739 time.tv_usec;
740 if (delta.tv_usec < 0)
741 delta.tv_sec--;
742 if (delta.tv_usec >= 500000) {
743 delta.tv_usec -= 1000000;
744 delta.tv_sec++;
745 }
746 if (delta.tv_usec < -500000) {
747 delta.tv_usec += 1000000;
748 delta.tv_sec--;
749 }
750 if (delta.tv_sec > 0 || (delta.tv_sec == 0 &&
751 delta.tv_usec > MAXPHASE) ||
752 delta.tv_sec < -1 || (delta.tv_sec == -1 &&
753 delta.tv_usec < -MAXPHASE)) {
754 time = clock_ext;
755 delta.tv_sec = 0;
756 delta.tv_usec = 0;
757 }
758 #ifdef HIGHBALL
759 clock_cpu = delta.tv_usec;
760 #else /* HIGHBALL */
761 hardupdate(delta.tv_usec);
762 #endif /* HIGHBALL */
763 }
764 #endif /* EXT_CLOCK */
765 }
766
767 #endif /* NTP */
768
769 /*
770 * Process callouts at a very low cpu priority, so we don't keep the
771 * relatively high clock interrupt priority any longer than necessary.
772 */
773 if (TAILQ_FIRST(&callwheel[hardclock_ticks & callwheelmask]) != NULL) {
774 if (CLKF_BASEPRI(frame)) {
775 /*
776 * Save the overhead of a software interrupt;
777 * it will happen as soon as we return, so do
778 * it now.
779 *
780 * NOTE: If we're at ``base priority'', softclock()
781 * was not already running.
782 */
783 (void)spllowersoftclock();
784 softclock();
785 } else
786 setsoftclock();
787 } else if (softclock_running == 0 &&
788 (softclock_ticks + 1) == hardclock_ticks)
789 softclock_ticks++;
790 }
791
792 /*
793 * Software (low priority) clock interrupt.
794 * Run periodic events from timeout queue.
795 */
796 /*ARGSUSED*/
797 void
798 softclock()
799 {
800 struct callout_queue *bucket;
801 struct callout *c;
802 void (*func) __P((void *));
803 void *arg;
804 int s, idx;
805 int steps = 0;
806
807 s = splhigh();
808 softclock_running = 1;
809
810 #ifdef CALLWHEEL_STATS
811 callwheel_softclocks++;
812 #endif
813
814 while (softclock_ticks != hardclock_ticks) {
815 softclock_ticks++;
816 idx = (int)(softclock_ticks & callwheelmask);
817 bucket = &callwheel[idx];
818 c = TAILQ_FIRST(bucket);
819 #ifdef CALLWHEEL_STATS
820 if (c == NULL)
821 callwheel_softempty++;
822 #endif
823 while (c != NULL) {
824 #ifdef CALLWHEEL_STATS
825 callwheel_softchecks++;
826 #endif
827 if (c->c_time != softclock_ticks) {
828 c = TAILQ_NEXT(c, c_link);
829 if (++steps >= MAX_SOFTCLOCK_STEPS) {
830 nextsoftcheck = c;
831 /* Give interrupts a chance. */
832 splx(s);
833 (void) splhigh();
834 c = nextsoftcheck;
835 steps = 0;
836 }
837 } else {
838 nextsoftcheck = TAILQ_NEXT(c, c_link);
839 TAILQ_REMOVE(bucket, c, c_link);
840 #ifdef CALLWHEEL_STATS
841 callwheel_sizes[idx]--;
842 callwheel_fired++;
843 callwheel_count--;
844 #endif
845 func = c->c_func;
846 arg = c->c_arg;
847 c->c_func = NULL;
848 c->c_flags &= ~CALLOUT_PENDING;
849 splx(s);
850 (*func)(arg);
851 (void) splhigh();
852 steps = 0;
853 c = nextsoftcheck;
854 }
855 }
856 }
857 nextsoftcheck = NULL;
858 softclock_running = 0;
859 splx(s);
860 }
861
862 /*
863 * callout_startup:
864 *
865 * Initialize the callout subsystem. Called from main().
866 *
867 * This must be called before cpu_startup(), which calls
868 * allocsys(), which is what actually allocates the callwheel.
869 */
870 void
871 callout_startup()
872 {
873
874 for (callwheelsize = 1; callwheelsize < ncallout; callwheelsize <<= 1)
875 /* loop */ ;
876 callwheelmask = callwheelsize - 1;
877 }
878
879 /*
880 * callout_startup1:
881 *
882 * Initialize the callwheel buckets.
883 */
884 void
885 callout_startup1()
886 {
887 int i;
888
889 for (i = 0; i < callwheelsize; i++)
890 TAILQ_INIT(&callwheel[i]);
891 }
892
893 /*
894 * callout_init:
895 *
896 * Initialize a callout structure so that it can be used
897 * by callout_reset() and callout_stop().
898 */
899 void
900 callout_init(c)
901 struct callout *c;
902 {
903
904 memset(c, 0, sizeof(*c));
905 }
906
907 /*
908 * callout_reset:
909 *
910 * Establish or change a timeout.
911 */
912 void
913 callout_reset(c, ticks, func, arg)
914 struct callout *c;
915 int ticks;
916 void (*func) __P((void *));
917 void *arg;
918 {
919 struct callout_queue *bucket;
920 int s;
921
922 if (ticks <= 0)
923 ticks = 1;
924
925 /* Lock out the clock. */
926 s = splhigh();
927
928 /*
929 * If this callout's timer is already running, cancel it
930 * before we modify it.
931 */
932 if (c->c_flags & CALLOUT_PENDING) {
933 callout_stop(c);
934 #ifdef CALLWHEEL_STATS
935 callwheel_changed++;
936 #endif
937 }
938
939 c->c_arg = arg;
940 c->c_func = func;
941 c->c_flags = CALLOUT_ACTIVE | CALLOUT_PENDING;
942 c->c_time = hardclock_ticks + ticks;
943
944 bucket = &callwheel[c->c_time & callwheelmask];
945
946 #ifdef CALLWHEEL_STATS
947 if (TAILQ_FIRST(bucket) != NULL)
948 callwheel_collisions++;
949 #endif
950
951 TAILQ_INSERT_TAIL(bucket, c, c_link);
952
953 #ifdef CALLWHEEL_STATS
954 callwheel_count++;
955 callwheel_established++;
956 if (++callwheel_sizes[c->c_time & callwheelmask] > callwheel_maxlength)
957 callwheel_maxlength =
958 callwheel_sizes[c->c_time & callwheelmask];
959 #endif
960
961 splx(s);
962 }
963
964 /*
965 * callout_stop:
966 *
967 * Disestablish a timeout.
968 */
969 void
970 callout_stop(c)
971 struct callout *c;
972 {
973 int s;
974
975 /* Lock out the clock. */
976 s = splhigh();
977
978 /*
979 * Don't attempt to delete a callout that's not on the queue.
980 */
981 if ((c->c_flags & CALLOUT_PENDING) == 0) {
982 c->c_flags &= ~CALLOUT_ACTIVE;
983 splx(s);
984 return;
985 }
986
987 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
988
989 if (nextsoftcheck == c)
990 nextsoftcheck = TAILQ_NEXT(c, c_link);
991
992 TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_link);
993 #ifdef CALLWHEEL_STATS
994 callwheel_count--;
995 callwheel_disestablished++;
996 callwheel_sizes[c->c_time & callwheelmask]--;
997 #endif
998
999 c->c_func = NULL;
1000
1001 splx(s);
1002 }
1003
1004 #ifdef CALLWHEEL_STATS
1005 /*
1006 * callout_showstats:
1007 *
1008 * Display callout statistics. Call it from DDB.
1009 */
1010 void
1011 callout_showstats()
1012 {
1013 u_int64_t curticks;
1014 int s;
1015
1016 s = splclock();
1017 curticks = softclock_ticks;
1018 splx(s);
1019
1020 printf("Callwheel statistics:\n");
1021 printf("\tCallouts currently queued: %llu\n", callwheel_count);
1022 printf("\tCallouts established: %llu\n", callwheel_established);
1023 printf("\tCallouts disestablished: %llu\n", callwheel_disestablished);
1024 if (callwheel_changed != 0)
1025 printf("\t\tOf those, %llu were changes\n", callwheel_changed);
1026 printf("\tCallouts that fired: %llu\n", callwheel_fired);
1027 printf("\tNumber of buckets: %d\n", callwheelsize);
1028 printf("\tNumber of hash collisions: %d\n", callwheel_collisions);
1029 printf("\tMaximum hash chain length: %d\n", callwheel_maxlength);
1030 printf("\tSoftclocks: %llu, Softchecks: %llu\n",
1031 callwheel_softclocks, callwheel_softchecks);
1032 printf("\t\tEmpty buckets seen: %llu\n", callwheel_softempty);
1033 }
1034 #endif
1035
1036 /*
1037 * Compute number of hz until specified time. Used to compute second
1038 * argument to callout_reset() from an absolute time.
1039 */
1040 int
1041 hzto(tv)
1042 struct timeval *tv;
1043 {
1044 register long ticks, sec;
1045 int s;
1046
1047 /*
1048 * If number of microseconds will fit in 32 bit arithmetic,
1049 * then compute number of microseconds to time and scale to
1050 * ticks. Otherwise just compute number of hz in time, rounding
1051 * times greater than representible to maximum value. (We must
1052 * compute in microseconds, because hz can be greater than 1000,
1053 * and thus tick can be less than one millisecond).
1054 *
1055 * Delta times less than 14 hours can be computed ``exactly''.
1056 * (Note that if hz would yeild a non-integral number of us per
1057 * tick, i.e. tickfix is nonzero, timouts can be a tick longer
1058 * than they should be.) Maximum value for any timeout in 10ms
1059 * ticks is 250 days.
1060 */
1061 s = splclock();
1062 sec = tv->tv_sec - time.tv_sec;
1063 if (sec <= 0x7fffffff / 1000000 - 1)
1064 ticks = ((tv->tv_sec - time.tv_sec) * 1000000 +
1065 (tv->tv_usec - time.tv_usec)) / tick;
1066 else if (sec <= 0x7fffffff / hz)
1067 ticks = sec * hz;
1068 else
1069 ticks = 0x7fffffff;
1070 splx(s);
1071 return (ticks);
1072 }
1073
1074 /*
1075 * Start profiling on a process.
1076 *
1077 * Kernel profiling passes proc0 which never exits and hence
1078 * keeps the profile clock running constantly.
1079 */
1080 void
1081 startprofclock(p)
1082 register struct proc *p;
1083 {
1084 int s;
1085
1086 if ((p->p_flag & P_PROFIL) == 0) {
1087 p->p_flag |= P_PROFIL;
1088 if (++profprocs == 1 && stathz != 0) {
1089 s = splstatclock();
1090 psdiv = pscnt = psratio;
1091 setstatclockrate(profhz);
1092 splx(s);
1093 }
1094 }
1095 }
1096
1097 /*
1098 * Stop profiling on a process.
1099 */
1100 void
1101 stopprofclock(p)
1102 register struct proc *p;
1103 {
1104 int s;
1105
1106 if (p->p_flag & P_PROFIL) {
1107 p->p_flag &= ~P_PROFIL;
1108 if (--profprocs == 0 && stathz != 0) {
1109 s = splstatclock();
1110 psdiv = pscnt = 1;
1111 setstatclockrate(stathz);
1112 splx(s);
1113 }
1114 }
1115 }
1116
1117 /*
1118 * Statistics clock. Grab profile sample, and if divider reaches 0,
1119 * do process and kernel statistics.
1120 */
1121 void
1122 statclock(frame)
1123 register struct clockframe *frame;
1124 {
1125 #ifdef GPROF
1126 register struct gmonparam *g;
1127 register int i;
1128 #endif
1129 static int schedclk;
1130 register struct proc *p;
1131
1132 if (CLKF_USERMODE(frame)) {
1133 p = curproc;
1134 if (p->p_flag & P_PROFIL)
1135 addupc_intr(p, CLKF_PC(frame), 1);
1136 if (--pscnt > 0)
1137 return;
1138 /*
1139 * Came from user mode; CPU was in user state.
1140 * If this process is being profiled record the tick.
1141 */
1142 p->p_uticks++;
1143 if (p->p_nice > NZERO)
1144 cp_time[CP_NICE]++;
1145 else
1146 cp_time[CP_USER]++;
1147 } else {
1148 #ifdef GPROF
1149 /*
1150 * Kernel statistics are just like addupc_intr, only easier.
1151 */
1152 g = &_gmonparam;
1153 if (g->state == GMON_PROF_ON) {
1154 i = CLKF_PC(frame) - g->lowpc;
1155 if (i < g->textsize) {
1156 i /= HISTFRACTION * sizeof(*g->kcount);
1157 g->kcount[i]++;
1158 }
1159 }
1160 #endif
1161 if (--pscnt > 0)
1162 return;
1163 /*
1164 * Came from kernel mode, so we were:
1165 * - handling an interrupt,
1166 * - doing syscall or trap work on behalf of the current
1167 * user process, or
1168 * - spinning in the idle loop.
1169 * Whichever it is, charge the time as appropriate.
1170 * Note that we charge interrupts to the current process,
1171 * regardless of whether they are ``for'' that process,
1172 * so that we know how much of its real time was spent
1173 * in ``non-process'' (i.e., interrupt) work.
1174 */
1175 p = curproc;
1176 if (CLKF_INTR(frame)) {
1177 if (p != NULL)
1178 p->p_iticks++;
1179 cp_time[CP_INTR]++;
1180 } else if (p != NULL) {
1181 p->p_sticks++;
1182 cp_time[CP_SYS]++;
1183 } else
1184 cp_time[CP_IDLE]++;
1185 }
1186 pscnt = psdiv;
1187
1188 if (p != NULL) {
1189 ++p->p_cpticks;
1190 /*
1191 * If no schedclock is provided, call it here at ~~12-25 Hz,
1192 * ~~16 Hz is best
1193 */
1194 if(schedhz == 0)
1195 if ((++schedclk & 3) == 0)
1196 schedclock(p);
1197 }
1198 }
1199
1200
1201 #ifdef NTP /* NTP phase-locked loop in kernel */
1202
1203 /*
1204 * hardupdate() - local clock update
1205 *
1206 * This routine is called by ntp_adjtime() to update the local clock
1207 * phase and frequency. The implementation is of an adaptive-parameter,
1208 * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new
1209 * time and frequency offset estimates for each call. If the kernel PPS
1210 * discipline code is configured (PPS_SYNC), the PPS signal itself
1211 * determines the new time offset, instead of the calling argument.
1212 * Presumably, calls to ntp_adjtime() occur only when the caller
1213 * believes the local clock is valid within some bound (+-128 ms with
1214 * NTP). If the caller's time is far different than the PPS time, an
1215 * argument will ensue, and it's not clear who will lose.
1216 *
1217 * For uncompensated quartz crystal oscillatores and nominal update
1218 * intervals less than 1024 s, operation should be in phase-lock mode
1219 * (STA_FLL = 0), where the loop is disciplined to phase. For update
1220 * intervals greater than thiss, operation should be in frequency-lock
1221 * mode (STA_FLL = 1), where the loop is disciplined to frequency.
1222 *
1223 * Note: splclock() is in effect.
1224 */
1225 void
1226 hardupdate(offset)
1227 long offset;
1228 {
1229 long ltemp, mtemp;
1230
1231 if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME))
1232 return;
1233 ltemp = offset;
1234 #ifdef PPS_SYNC
1235 if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
1236 ltemp = pps_offset;
1237 #endif /* PPS_SYNC */
1238
1239 /*
1240 * Scale the phase adjustment and clamp to the operating range.
1241 */
1242 if (ltemp > MAXPHASE)
1243 time_offset = MAXPHASE << SHIFT_UPDATE;
1244 else if (ltemp < -MAXPHASE)
1245 time_offset = -(MAXPHASE << SHIFT_UPDATE);
1246 else
1247 time_offset = ltemp << SHIFT_UPDATE;
1248
1249 /*
1250 * Select whether the frequency is to be controlled and in which
1251 * mode (PLL or FLL). Clamp to the operating range. Ugly
1252 * multiply/divide should be replaced someday.
1253 */
1254 if (time_status & STA_FREQHOLD || time_reftime == 0)
1255 time_reftime = time.tv_sec;
1256 mtemp = time.tv_sec - time_reftime;
1257 time_reftime = time.tv_sec;
1258 if (time_status & STA_FLL) {
1259 if (mtemp >= MINSEC) {
1260 ltemp = ((time_offset / mtemp) << (SHIFT_USEC -
1261 SHIFT_UPDATE));
1262 if (ltemp < 0)
1263 time_freq -= -ltemp >> SHIFT_KH;
1264 else
1265 time_freq += ltemp >> SHIFT_KH;
1266 }
1267 } else {
1268 if (mtemp < MAXSEC) {
1269 ltemp *= mtemp;
1270 if (ltemp < 0)
1271 time_freq -= -ltemp >> (time_constant +
1272 time_constant + SHIFT_KF -
1273 SHIFT_USEC);
1274 else
1275 time_freq += ltemp >> (time_constant +
1276 time_constant + SHIFT_KF -
1277 SHIFT_USEC);
1278 }
1279 }
1280 if (time_freq > time_tolerance)
1281 time_freq = time_tolerance;
1282 else if (time_freq < -time_tolerance)
1283 time_freq = -time_tolerance;
1284 }
1285
1286 #ifdef PPS_SYNC
1287 /*
1288 * hardpps() - discipline CPU clock oscillator to external PPS signal
1289 *
1290 * This routine is called at each PPS interrupt in order to discipline
1291 * the CPU clock oscillator to the PPS signal. It measures the PPS phase
1292 * and leaves it in a handy spot for the hardclock() routine. It
1293 * integrates successive PPS phase differences and calculates the
1294 * frequency offset. This is used in hardclock() to discipline the CPU
1295 * clock oscillator so that intrinsic frequency error is cancelled out.
1296 * The code requires the caller to capture the time and hardware counter
1297 * value at the on-time PPS signal transition.
1298 *
1299 * Note that, on some Unix systems, this routine runs at an interrupt
1300 * priority level higher than the timer interrupt routine hardclock().
1301 * Therefore, the variables used are distinct from the hardclock()
1302 * variables, except for certain exceptions: The PPS frequency pps_freq
1303 * and phase pps_offset variables are determined by this routine and
1304 * updated atomically. The time_tolerance variable can be considered a
1305 * constant, since it is infrequently changed, and then only when the
1306 * PPS signal is disabled. The watchdog counter pps_valid is updated
1307 * once per second by hardclock() and is atomically cleared in this
1308 * routine.
1309 */
1310 void
1311 hardpps(tvp, usec)
1312 struct timeval *tvp; /* time at PPS */
1313 long usec; /* hardware counter at PPS */
1314 {
1315 long u_usec, v_usec, bigtick;
1316 long cal_sec, cal_usec;
1317
1318 /*
1319 * An occasional glitch can be produced when the PPS interrupt
1320 * occurs in the hardclock() routine before the time variable is
1321 * updated. Here the offset is discarded when the difference
1322 * between it and the last one is greater than tick/2, but not
1323 * if the interval since the first discard exceeds 30 s.
1324 */
1325 time_status |= STA_PPSSIGNAL;
1326 time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
1327 pps_valid = 0;
1328 u_usec = -tvp->tv_usec;
1329 if (u_usec < -500000)
1330 u_usec += 1000000;
1331 v_usec = pps_offset - u_usec;
1332 if (v_usec < 0)
1333 v_usec = -v_usec;
1334 if (v_usec > (tick >> 1)) {
1335 if (pps_glitch > MAXGLITCH) {
1336 pps_glitch = 0;
1337 pps_tf[2] = u_usec;
1338 pps_tf[1] = u_usec;
1339 } else {
1340 pps_glitch++;
1341 u_usec = pps_offset;
1342 }
1343 } else
1344 pps_glitch = 0;
1345
1346 /*
1347 * A three-stage median filter is used to help deglitch the pps
1348 * time. The median sample becomes the time offset estimate; the
1349 * difference between the other two samples becomes the time
1350 * dispersion (jitter) estimate.
1351 */
1352 pps_tf[2] = pps_tf[1];
1353 pps_tf[1] = pps_tf[0];
1354 pps_tf[0] = u_usec;
1355 if (pps_tf[0] > pps_tf[1]) {
1356 if (pps_tf[1] > pps_tf[2]) {
1357 pps_offset = pps_tf[1]; /* 0 1 2 */
1358 v_usec = pps_tf[0] - pps_tf[2];
1359 } else if (pps_tf[2] > pps_tf[0]) {
1360 pps_offset = pps_tf[0]; /* 2 0 1 */
1361 v_usec = pps_tf[2] - pps_tf[1];
1362 } else {
1363 pps_offset = pps_tf[2]; /* 0 2 1 */
1364 v_usec = pps_tf[0] - pps_tf[1];
1365 }
1366 } else {
1367 if (pps_tf[1] < pps_tf[2]) {
1368 pps_offset = pps_tf[1]; /* 2 1 0 */
1369 v_usec = pps_tf[2] - pps_tf[0];
1370 } else if (pps_tf[2] < pps_tf[0]) {
1371 pps_offset = pps_tf[0]; /* 1 0 2 */
1372 v_usec = pps_tf[1] - pps_tf[2];
1373 } else {
1374 pps_offset = pps_tf[2]; /* 1 2 0 */
1375 v_usec = pps_tf[1] - pps_tf[0];
1376 }
1377 }
1378 if (v_usec > MAXTIME)
1379 pps_jitcnt++;
1380 v_usec = (v_usec << PPS_AVG) - pps_jitter;
1381 if (v_usec < 0)
1382 pps_jitter -= -v_usec >> PPS_AVG;
1383 else
1384 pps_jitter += v_usec >> PPS_AVG;
1385 if (pps_jitter > (MAXTIME >> 1))
1386 time_status |= STA_PPSJITTER;
1387
1388 /*
1389 * During the calibration interval adjust the starting time when
1390 * the tick overflows. At the end of the interval compute the
1391 * duration of the interval and the difference of the hardware
1392 * counters at the beginning and end of the interval. This code
1393 * is deliciously complicated by the fact valid differences may
1394 * exceed the value of tick when using long calibration
1395 * intervals and small ticks. Note that the counter can be
1396 * greater than tick if caught at just the wrong instant, but
1397 * the values returned and used here are correct.
1398 */
1399 bigtick = (long)tick << SHIFT_USEC;
1400 pps_usec -= pps_freq;
1401 if (pps_usec >= bigtick)
1402 pps_usec -= bigtick;
1403 if (pps_usec < 0)
1404 pps_usec += bigtick;
1405 pps_time.tv_sec++;
1406 pps_count++;
1407 if (pps_count < (1 << pps_shift))
1408 return;
1409 pps_count = 0;
1410 pps_calcnt++;
1411 u_usec = usec << SHIFT_USEC;
1412 v_usec = pps_usec - u_usec;
1413 if (v_usec >= bigtick >> 1)
1414 v_usec -= bigtick;
1415 if (v_usec < -(bigtick >> 1))
1416 v_usec += bigtick;
1417 if (v_usec < 0)
1418 v_usec = -(-v_usec >> pps_shift);
1419 else
1420 v_usec = v_usec >> pps_shift;
1421 pps_usec = u_usec;
1422 cal_sec = tvp->tv_sec;
1423 cal_usec = tvp->tv_usec;
1424 cal_sec -= pps_time.tv_sec;
1425 cal_usec -= pps_time.tv_usec;
1426 if (cal_usec < 0) {
1427 cal_usec += 1000000;
1428 cal_sec--;
1429 }
1430 pps_time = *tvp;
1431
1432 /*
1433 * Check for lost interrupts, noise, excessive jitter and
1434 * excessive frequency error. The number of timer ticks during
1435 * the interval may vary +-1 tick. Add to this a margin of one
1436 * tick for the PPS signal jitter and maximum frequency
1437 * deviation. If the limits are exceeded, the calibration
1438 * interval is reset to the minimum and we start over.
1439 */
1440 u_usec = (long)tick << 1;
1441 if (!((cal_sec == -1 && cal_usec > (1000000 - u_usec))
1442 || (cal_sec == 0 && cal_usec < u_usec))
1443 || v_usec > time_tolerance || v_usec < -time_tolerance) {
1444 pps_errcnt++;
1445 pps_shift = PPS_SHIFT;
1446 pps_intcnt = 0;
1447 time_status |= STA_PPSERROR;
1448 return;
1449 }
1450
1451 /*
1452 * A three-stage median filter is used to help deglitch the pps
1453 * frequency. The median sample becomes the frequency offset
1454 * estimate; the difference between the other two samples
1455 * becomes the frequency dispersion (stability) estimate.
1456 */
1457 pps_ff[2] = pps_ff[1];
1458 pps_ff[1] = pps_ff[0];
1459 pps_ff[0] = v_usec;
1460 if (pps_ff[0] > pps_ff[1]) {
1461 if (pps_ff[1] > pps_ff[2]) {
1462 u_usec = pps_ff[1]; /* 0 1 2 */
1463 v_usec = pps_ff[0] - pps_ff[2];
1464 } else if (pps_ff[2] > pps_ff[0]) {
1465 u_usec = pps_ff[0]; /* 2 0 1 */
1466 v_usec = pps_ff[2] - pps_ff[1];
1467 } else {
1468 u_usec = pps_ff[2]; /* 0 2 1 */
1469 v_usec = pps_ff[0] - pps_ff[1];
1470 }
1471 } else {
1472 if (pps_ff[1] < pps_ff[2]) {
1473 u_usec = pps_ff[1]; /* 2 1 0 */
1474 v_usec = pps_ff[2] - pps_ff[0];
1475 } else if (pps_ff[2] < pps_ff[0]) {
1476 u_usec = pps_ff[0]; /* 1 0 2 */
1477 v_usec = pps_ff[1] - pps_ff[2];
1478 } else {
1479 u_usec = pps_ff[2]; /* 1 2 0 */
1480 v_usec = pps_ff[1] - pps_ff[0];
1481 }
1482 }
1483
1484 /*
1485 * Here the frequency dispersion (stability) is updated. If it
1486 * is less than one-fourth the maximum (MAXFREQ), the frequency
1487 * offset is updated as well, but clamped to the tolerance. It
1488 * will be processed later by the hardclock() routine.
1489 */
1490 v_usec = (v_usec >> 1) - pps_stabil;
1491 if (v_usec < 0)
1492 pps_stabil -= -v_usec >> PPS_AVG;
1493 else
1494 pps_stabil += v_usec >> PPS_AVG;
1495 if (pps_stabil > MAXFREQ >> 2) {
1496 pps_stbcnt++;
1497 time_status |= STA_PPSWANDER;
1498 return;
1499 }
1500 if (time_status & STA_PPSFREQ) {
1501 if (u_usec < 0) {
1502 pps_freq -= -u_usec >> PPS_AVG;
1503 if (pps_freq < -time_tolerance)
1504 pps_freq = -time_tolerance;
1505 u_usec = -u_usec;
1506 } else {
1507 pps_freq += u_usec >> PPS_AVG;
1508 if (pps_freq > time_tolerance)
1509 pps_freq = time_tolerance;
1510 }
1511 }
1512
1513 /*
1514 * Here the calibration interval is adjusted. If the maximum
1515 * time difference is greater than tick / 4, reduce the interval
1516 * by half. If this is not the case for four consecutive
1517 * intervals, double the interval.
1518 */
1519 if (u_usec << pps_shift > bigtick >> 2) {
1520 pps_intcnt = 0;
1521 if (pps_shift > PPS_SHIFT)
1522 pps_shift--;
1523 } else if (pps_intcnt >= 4) {
1524 pps_intcnt = 0;
1525 if (pps_shift < PPS_SHIFTMAX)
1526 pps_shift++;
1527 } else
1528 pps_intcnt++;
1529 }
1530 #endif /* PPS_SYNC */
1531 #endif /* NTP */
1532
1533
1534 /*
1535 * Return information about system clocks.
1536 */
1537 int
1538 sysctl_clockrate(where, sizep)
1539 register char *where;
1540 size_t *sizep;
1541 {
1542 struct clockinfo clkinfo;
1543
1544 /*
1545 * Construct clockinfo structure.
1546 */
1547 clkinfo.tick = tick;
1548 clkinfo.tickadj = tickadj;
1549 clkinfo.hz = hz;
1550 clkinfo.profhz = profhz;
1551 clkinfo.stathz = stathz ? stathz : hz;
1552 return (sysctl_rdstruct(where, sizep, NULL, &clkinfo, sizeof(clkinfo)));
1553 }
1554