kern_clock.c revision 1.85 1 /* $NetBSD: kern_clock.c,v 1.85 2003/06/12 14:44:36 drochner Exp $ */
2
3 /*-
4 * Copyright (c) 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*-
41 * Copyright (c) 1982, 1986, 1991, 1993
42 * The Regents of the University of California. All rights reserved.
43 * (c) UNIX System Laboratories, Inc.
44 * All or some portions of this file are derived from material licensed
45 * to the University of California by American Telephone and Telegraph
46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47 * the permission of UNIX System Laboratories, Inc.
48 *
49 * Redistribution and use in source and binary forms, with or without
50 * modification, are permitted provided that the following conditions
51 * are met:
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 * 3. All advertising materials mentioning features or use of this software
58 * must display the following acknowledgement:
59 * This product includes software developed by the University of
60 * California, Berkeley and its contributors.
61 * 4. Neither the name of the University nor the names of its contributors
62 * may be used to endorse or promote products derived from this software
63 * without specific prior written permission.
64 *
65 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
69 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
70 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
71 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
72 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
73 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
74 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
75 * SUCH DAMAGE.
76 *
77 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
78 */
79
80 #include <sys/cdefs.h>
81 __KERNEL_RCSID(0, "$NetBSD: kern_clock.c,v 1.85 2003/06/12 14:44:36 drochner Exp $");
82
83 #include "opt_ntp.h"
84 #include "opt_perfctrs.h"
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/callout.h>
89 #include <sys/kernel.h>
90 #include <sys/proc.h>
91 #include <sys/resourcevar.h>
92 #include <sys/signalvar.h>
93 #include <sys/sysctl.h>
94 #include <sys/timex.h>
95 #include <sys/sched.h>
96 #include <sys/time.h>
97
98 #include <machine/cpu.h>
99 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
100 #include <machine/intr.h>
101 #endif
102
103 #ifdef GPROF
104 #include <sys/gmon.h>
105 #endif
106
107 /*
108 * Clock handling routines.
109 *
110 * This code is written to operate with two timers that run independently of
111 * each other. The main clock, running hz times per second, is used to keep
112 * track of real time. The second timer handles kernel and user profiling,
113 * and does resource use estimation. If the second timer is programmable,
114 * it is randomized to avoid aliasing between the two clocks. For example,
115 * the randomization prevents an adversary from always giving up the cpu
116 * just before its quantum expires. Otherwise, it would never accumulate
117 * cpu ticks. The mean frequency of the second timer is stathz.
118 *
119 * If no second timer exists, stathz will be zero; in this case we drive
120 * profiling and statistics off the main clock. This WILL NOT be accurate;
121 * do not do it unless absolutely necessary.
122 *
123 * The statistics clock may (or may not) be run at a higher rate while
124 * profiling. This profile clock runs at profhz. We require that profhz
125 * be an integral multiple of stathz.
126 *
127 * If the statistics clock is running fast, it must be divided by the ratio
128 * profhz/stathz for statistics. (For profiling, every tick counts.)
129 */
130
131 #ifdef NTP /* NTP phase-locked loop in kernel */
132 /*
133 * Phase/frequency-lock loop (PLL/FLL) definitions
134 *
135 * The following variables are read and set by the ntp_adjtime() system
136 * call.
137 *
138 * time_state shows the state of the system clock, with values defined
139 * in the timex.h header file.
140 *
141 * time_status shows the status of the system clock, with bits defined
142 * in the timex.h header file.
143 *
144 * time_offset is used by the PLL/FLL to adjust the system time in small
145 * increments.
146 *
147 * time_constant determines the bandwidth or "stiffness" of the PLL.
148 *
149 * time_tolerance determines maximum frequency error or tolerance of the
150 * CPU clock oscillator and is a property of the architecture; however,
151 * in principle it could change as result of the presence of external
152 * discipline signals, for instance.
153 *
154 * time_precision is usually equal to the kernel tick variable; however,
155 * in cases where a precision clock counter or external clock is
156 * available, the resolution can be much less than this and depend on
157 * whether the external clock is working or not.
158 *
159 * time_maxerror is initialized by a ntp_adjtime() call and increased by
160 * the kernel once each second to reflect the maximum error bound
161 * growth.
162 *
163 * time_esterror is set and read by the ntp_adjtime() call, but
164 * otherwise not used by the kernel.
165 */
166 int time_state = TIME_OK; /* clock state */
167 int time_status = STA_UNSYNC; /* clock status bits */
168 long time_offset = 0; /* time offset (us) */
169 long time_constant = 0; /* pll time constant */
170 long time_tolerance = MAXFREQ; /* frequency tolerance (scaled ppm) */
171 long time_precision = 1; /* clock precision (us) */
172 long time_maxerror = MAXPHASE; /* maximum error (us) */
173 long time_esterror = MAXPHASE; /* estimated error (us) */
174
175 /*
176 * The following variables establish the state of the PLL/FLL and the
177 * residual time and frequency offset of the local clock. The scale
178 * factors are defined in the timex.h header file.
179 *
180 * time_phase and time_freq are the phase increment and the frequency
181 * increment, respectively, of the kernel time variable.
182 *
183 * time_freq is set via ntp_adjtime() from a value stored in a file when
184 * the synchronization daemon is first started. Its value is retrieved
185 * via ntp_adjtime() and written to the file about once per hour by the
186 * daemon.
187 *
188 * time_adj is the adjustment added to the value of tick at each timer
189 * interrupt and is recomputed from time_phase and time_freq at each
190 * seconds rollover.
191 *
192 * time_reftime is the second's portion of the system time at the last
193 * call to ntp_adjtime(). It is used to adjust the time_freq variable
194 * and to increase the time_maxerror as the time since last update
195 * increases.
196 */
197 long time_phase = 0; /* phase offset (scaled us) */
198 long time_freq = 0; /* frequency offset (scaled ppm) */
199 long time_adj = 0; /* tick adjust (scaled 1 / hz) */
200 long time_reftime = 0; /* time at last adjustment (s) */
201
202 #ifdef PPS_SYNC
203 /*
204 * The following variables are used only if the kernel PPS discipline
205 * code is configured (PPS_SYNC). The scale factors are defined in the
206 * timex.h header file.
207 *
208 * pps_time contains the time at each calibration interval, as read by
209 * microtime(). pps_count counts the seconds of the calibration
210 * interval, the duration of which is nominally pps_shift in powers of
211 * two.
212 *
213 * pps_offset is the time offset produced by the time median filter
214 * pps_tf[], while pps_jitter is the dispersion (jitter) measured by
215 * this filter.
216 *
217 * pps_freq is the frequency offset produced by the frequency median
218 * filter pps_ff[], while pps_stabil is the dispersion (wander) measured
219 * by this filter.
220 *
221 * pps_usec is latched from a high resolution counter or external clock
222 * at pps_time. Here we want the hardware counter contents only, not the
223 * contents plus the time_tv.usec as usual.
224 *
225 * pps_valid counts the number of seconds since the last PPS update. It
226 * is used as a watchdog timer to disable the PPS discipline should the
227 * PPS signal be lost.
228 *
229 * pps_glitch counts the number of seconds since the beginning of an
230 * offset burst more than tick/2 from current nominal offset. It is used
231 * mainly to suppress error bursts due to priority conflicts between the
232 * PPS interrupt and timer interrupt.
233 *
234 * pps_intcnt counts the calibration intervals for use in the interval-
235 * adaptation algorithm. It's just too complicated for words.
236 */
237 struct timeval pps_time; /* kernel time at last interval */
238 long pps_tf[] = {0, 0, 0}; /* pps time offset median filter (us) */
239 long pps_offset = 0; /* pps time offset (us) */
240 long pps_jitter = MAXTIME; /* time dispersion (jitter) (us) */
241 long pps_ff[] = {0, 0, 0}; /* pps frequency offset median filter */
242 long pps_freq = 0; /* frequency offset (scaled ppm) */
243 long pps_stabil = MAXFREQ; /* frequency dispersion (scaled ppm) */
244 long pps_usec = 0; /* microsec counter at last interval */
245 long pps_valid = PPS_VALID; /* pps signal watchdog counter */
246 int pps_glitch = 0; /* pps signal glitch counter */
247 int pps_count = 0; /* calibration interval counter (s) */
248 int pps_shift = PPS_SHIFT; /* interval duration (s) (shift) */
249 int pps_intcnt = 0; /* intervals at current duration */
250
251 /*
252 * PPS signal quality monitors
253 *
254 * pps_jitcnt counts the seconds that have been discarded because the
255 * jitter measured by the time median filter exceeds the limit MAXTIME
256 * (100 us).
257 *
258 * pps_calcnt counts the frequency calibration intervals, which are
259 * variable from 4 s to 256 s.
260 *
261 * pps_errcnt counts the calibration intervals which have been discarded
262 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
263 * calibration interval jitter exceeds two ticks.
264 *
265 * pps_stbcnt counts the calibration intervals that have been discarded
266 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
267 */
268 long pps_jitcnt = 0; /* jitter limit exceeded */
269 long pps_calcnt = 0; /* calibration intervals */
270 long pps_errcnt = 0; /* calibration errors */
271 long pps_stbcnt = 0; /* stability limit exceeded */
272 #endif /* PPS_SYNC */
273
274 #ifdef EXT_CLOCK
275 /*
276 * External clock definitions
277 *
278 * The following definitions and declarations are used only if an
279 * external clock is configured on the system.
280 */
281 #define CLOCK_INTERVAL 30 /* CPU clock update interval (s) */
282
283 /*
284 * The clock_count variable is set to CLOCK_INTERVAL at each PPS
285 * interrupt and decremented once each second.
286 */
287 int clock_count = 0; /* CPU clock counter */
288
289 #ifdef HIGHBALL
290 /*
291 * The clock_offset and clock_cpu variables are used by the HIGHBALL
292 * interface. The clock_offset variable defines the offset between
293 * system time and the HIGBALL counters. The clock_cpu variable contains
294 * the offset between the system clock and the HIGHBALL clock for use in
295 * disciplining the kernel time variable.
296 */
297 extern struct timeval clock_offset; /* Highball clock offset */
298 long clock_cpu = 0; /* CPU clock adjust */
299 #endif /* HIGHBALL */
300 #endif /* EXT_CLOCK */
301 #endif /* NTP */
302
303
304 /*
305 * Bump a timeval by a small number of usec's.
306 */
307 #define BUMPTIME(t, usec) { \
308 volatile struct timeval *tp = (t); \
309 long us; \
310 \
311 tp->tv_usec = us = tp->tv_usec + (usec); \
312 if (us >= 1000000) { \
313 tp->tv_usec = us - 1000000; \
314 tp->tv_sec++; \
315 } \
316 }
317
318 int stathz;
319 int profhz;
320 int profsrc;
321 int schedhz;
322 int profprocs;
323 int hardclock_ticks;
324 static int psdiv; /* prof => stat divider */
325 int psratio; /* ratio: prof / stat */
326 int tickfix, tickfixinterval; /* used if tick not really integral */
327 #ifndef NTP
328 static int tickfixcnt; /* accumulated fractional error */
329 #else
330 int fixtick; /* used by NTP for same */
331 int shifthz;
332 #endif
333
334 /*
335 * We might want ldd to load the both words from time at once.
336 * To succeed we need to be quadword aligned.
337 * The sparc already does that, and that it has worked so far is a fluke.
338 */
339 volatile struct timeval time __attribute__((__aligned__(__alignof__(quad_t))));
340 volatile struct timeval mono_time;
341
342 void *softclock_si;
343
344 /*
345 * Initialize clock frequencies and start both clocks running.
346 */
347 void
348 initclocks(void)
349 {
350 int i;
351
352 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
353 softclock_si = softintr_establish(IPL_SOFTCLOCK, softclock, NULL);
354 if (softclock_si == NULL)
355 panic("initclocks: unable to register softclock intr");
356 #endif
357
358 /*
359 * Set divisors to 1 (normal case) and let the machine-specific
360 * code do its bit.
361 */
362 psdiv = 1;
363 cpu_initclocks();
364
365 /*
366 * Compute profhz/stathz/rrticks, and fix profhz if needed.
367 */
368 i = stathz ? stathz : hz;
369 if (profhz == 0)
370 profhz = i;
371 psratio = profhz / i;
372 rrticks = hz / 10;
373
374 #ifdef NTP
375 switch (hz) {
376 case 1:
377 shifthz = SHIFT_SCALE - 0;
378 break;
379 case 2:
380 shifthz = SHIFT_SCALE - 1;
381 break;
382 case 4:
383 shifthz = SHIFT_SCALE - 2;
384 break;
385 case 8:
386 shifthz = SHIFT_SCALE - 3;
387 break;
388 case 16:
389 shifthz = SHIFT_SCALE - 4;
390 break;
391 case 32:
392 shifthz = SHIFT_SCALE - 5;
393 break;
394 case 60:
395 case 64:
396 shifthz = SHIFT_SCALE - 6;
397 break;
398 case 96:
399 case 100:
400 case 128:
401 shifthz = SHIFT_SCALE - 7;
402 break;
403 case 256:
404 shifthz = SHIFT_SCALE - 8;
405 break;
406 case 512:
407 shifthz = SHIFT_SCALE - 9;
408 break;
409 case 1000:
410 case 1024:
411 shifthz = SHIFT_SCALE - 10;
412 break;
413 case 1200:
414 case 2048:
415 shifthz = SHIFT_SCALE - 11;
416 break;
417 case 4096:
418 shifthz = SHIFT_SCALE - 12;
419 break;
420 case 8192:
421 shifthz = SHIFT_SCALE - 13;
422 break;
423 case 16384:
424 shifthz = SHIFT_SCALE - 14;
425 break;
426 case 32768:
427 shifthz = SHIFT_SCALE - 15;
428 break;
429 case 65536:
430 shifthz = SHIFT_SCALE - 16;
431 break;
432 default:
433 panic("weird hz");
434 }
435 if (fixtick == 0) {
436 /*
437 * Give MD code a chance to set this to a better
438 * value; but, if it doesn't, we should.
439 */
440 fixtick = (1000000 - (hz*tick));
441 }
442 #endif
443 }
444
445 /*
446 * The real-time timer, interrupting hz times per second.
447 */
448 void
449 hardclock(struct clockframe *frame)
450 {
451 struct lwp *l;
452 struct proc *p;
453 int delta;
454 extern int tickdelta;
455 extern long timedelta;
456 struct cpu_info *ci = curcpu();
457 struct ptimer *pt;
458 #ifdef NTP
459 int time_update;
460 int ltemp;
461 #endif
462
463 l = curlwp;
464 if (l) {
465 p = l->l_proc;
466 /*
467 * Run current process's virtual and profile time, as needed.
468 */
469 if (CLKF_USERMODE(frame) && p->p_timers &&
470 (pt = LIST_FIRST(&p->p_timers->pts_virtual)) != NULL)
471 if (itimerdecr(pt, tick) == 0)
472 itimerfire(pt);
473 if (p->p_timers &&
474 (pt = LIST_FIRST(&p->p_timers->pts_prof)) != NULL)
475 if (itimerdecr(pt, tick) == 0)
476 itimerfire(pt);
477 }
478
479 /*
480 * If no separate statistics clock is available, run it from here.
481 */
482 if (stathz == 0)
483 statclock(frame);
484 if ((--ci->ci_schedstate.spc_rrticks) <= 0)
485 roundrobin(ci);
486
487 #if defined(MULTIPROCESSOR)
488 /*
489 * If we are not the primary CPU, we're not allowed to do
490 * any more work.
491 */
492 if (CPU_IS_PRIMARY(ci) == 0)
493 return;
494 #endif
495
496 /*
497 * Increment the time-of-day. The increment is normally just
498 * ``tick''. If the machine is one which has a clock frequency
499 * such that ``hz'' would not divide the second evenly into
500 * milliseconds, a periodic adjustment must be applied. Finally,
501 * if we are still adjusting the time (see adjtime()),
502 * ``tickdelta'' may also be added in.
503 */
504 hardclock_ticks++;
505 delta = tick;
506
507 #ifndef NTP
508 if (tickfix) {
509 tickfixcnt += tickfix;
510 if (tickfixcnt >= tickfixinterval) {
511 delta++;
512 tickfixcnt -= tickfixinterval;
513 }
514 }
515 #endif /* !NTP */
516 /* Imprecise 4bsd adjtime() handling */
517 if (timedelta != 0) {
518 delta += tickdelta;
519 timedelta -= tickdelta;
520 }
521
522 #ifdef notyet
523 microset();
524 #endif
525
526 #ifndef NTP
527 BUMPTIME(&time, delta); /* XXX Now done using NTP code below */
528 #endif
529 BUMPTIME(&mono_time, delta);
530
531 #ifdef NTP
532 time_update = delta;
533
534 /*
535 * Compute the phase adjustment. If the low-order bits
536 * (time_phase) of the update overflow, bump the high-order bits
537 * (time_update).
538 */
539 time_phase += time_adj;
540 if (time_phase <= -FINEUSEC) {
541 ltemp = -time_phase >> SHIFT_SCALE;
542 time_phase += ltemp << SHIFT_SCALE;
543 time_update -= ltemp;
544 } else if (time_phase >= FINEUSEC) {
545 ltemp = time_phase >> SHIFT_SCALE;
546 time_phase -= ltemp << SHIFT_SCALE;
547 time_update += ltemp;
548 }
549
550 #ifdef HIGHBALL
551 /*
552 * If the HIGHBALL board is installed, we need to adjust the
553 * external clock offset in order to close the hardware feedback
554 * loop. This will adjust the external clock phase and frequency
555 * in small amounts. The additional phase noise and frequency
556 * wander this causes should be minimal. We also need to
557 * discipline the kernel time variable, since the PLL is used to
558 * discipline the external clock. If the Highball board is not
559 * present, we discipline kernel time with the PLL as usual. We
560 * assume that the external clock phase adjustment (time_update)
561 * and kernel phase adjustment (clock_cpu) are less than the
562 * value of tick.
563 */
564 clock_offset.tv_usec += time_update;
565 if (clock_offset.tv_usec >= 1000000) {
566 clock_offset.tv_sec++;
567 clock_offset.tv_usec -= 1000000;
568 }
569 if (clock_offset.tv_usec < 0) {
570 clock_offset.tv_sec--;
571 clock_offset.tv_usec += 1000000;
572 }
573 time.tv_usec += clock_cpu;
574 clock_cpu = 0;
575 #else
576 time.tv_usec += time_update;
577 #endif /* HIGHBALL */
578
579 /*
580 * On rollover of the second the phase adjustment to be used for
581 * the next second is calculated. Also, the maximum error is
582 * increased by the tolerance. If the PPS frequency discipline
583 * code is present, the phase is increased to compensate for the
584 * CPU clock oscillator frequency error.
585 *
586 * On a 32-bit machine and given parameters in the timex.h
587 * header file, the maximum phase adjustment is +-512 ms and
588 * maximum frequency offset is a tad less than) +-512 ppm. On a
589 * 64-bit machine, you shouldn't need to ask.
590 */
591 if (time.tv_usec >= 1000000) {
592 time.tv_usec -= 1000000;
593 time.tv_sec++;
594 time_maxerror += time_tolerance >> SHIFT_USEC;
595
596 /*
597 * Leap second processing. If in leap-insert state at
598 * the end of the day, the system clock is set back one
599 * second; if in leap-delete state, the system clock is
600 * set ahead one second. The microtime() routine or
601 * external clock driver will insure that reported time
602 * is always monotonic. The ugly divides should be
603 * replaced.
604 */
605 switch (time_state) {
606 case TIME_OK:
607 if (time_status & STA_INS)
608 time_state = TIME_INS;
609 else if (time_status & STA_DEL)
610 time_state = TIME_DEL;
611 break;
612
613 case TIME_INS:
614 if (time.tv_sec % 86400 == 0) {
615 time.tv_sec--;
616 time_state = TIME_OOP;
617 }
618 break;
619
620 case TIME_DEL:
621 if ((time.tv_sec + 1) % 86400 == 0) {
622 time.tv_sec++;
623 time_state = TIME_WAIT;
624 }
625 break;
626
627 case TIME_OOP:
628 time_state = TIME_WAIT;
629 break;
630
631 case TIME_WAIT:
632 if (!(time_status & (STA_INS | STA_DEL)))
633 time_state = TIME_OK;
634 break;
635 }
636
637 /*
638 * Compute the phase adjustment for the next second. In
639 * PLL mode, the offset is reduced by a fixed factor
640 * times the time constant. In FLL mode the offset is
641 * used directly. In either mode, the maximum phase
642 * adjustment for each second is clamped so as to spread
643 * the adjustment over not more than the number of
644 * seconds between updates.
645 */
646 if (time_offset < 0) {
647 ltemp = -time_offset;
648 if (!(time_status & STA_FLL))
649 ltemp >>= SHIFT_KG + time_constant;
650 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
651 ltemp = (MAXPHASE / MINSEC) <<
652 SHIFT_UPDATE;
653 time_offset += ltemp;
654 time_adj = -ltemp << (shifthz - SHIFT_UPDATE);
655 } else if (time_offset > 0) {
656 ltemp = time_offset;
657 if (!(time_status & STA_FLL))
658 ltemp >>= SHIFT_KG + time_constant;
659 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
660 ltemp = (MAXPHASE / MINSEC) <<
661 SHIFT_UPDATE;
662 time_offset -= ltemp;
663 time_adj = ltemp << (shifthz - SHIFT_UPDATE);
664 } else
665 time_adj = 0;
666
667 /*
668 * Compute the frequency estimate and additional phase
669 * adjustment due to frequency error for the next
670 * second. When the PPS signal is engaged, gnaw on the
671 * watchdog counter and update the frequency computed by
672 * the pll and the PPS signal.
673 */
674 #ifdef PPS_SYNC
675 pps_valid++;
676 if (pps_valid == PPS_VALID) {
677 pps_jitter = MAXTIME;
678 pps_stabil = MAXFREQ;
679 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
680 STA_PPSWANDER | STA_PPSERROR);
681 }
682 ltemp = time_freq + pps_freq;
683 #else
684 ltemp = time_freq;
685 #endif /* PPS_SYNC */
686
687 if (ltemp < 0)
688 time_adj -= -ltemp >> (SHIFT_USEC - shifthz);
689 else
690 time_adj += ltemp >> (SHIFT_USEC - shifthz);
691 time_adj += (long)fixtick << shifthz;
692
693 /*
694 * When the CPU clock oscillator frequency is not a
695 * power of 2 in Hz, shifthz is only an approximate
696 * scale factor.
697 *
698 * To determine the adjustment, you can do the following:
699 * bc -q
700 * scale=24
701 * obase=2
702 * idealhz/realhz
703 * where `idealhz' is the next higher power of 2, and `realhz'
704 * is the actual value. You may need to factor this result
705 * into a sequence of 2 multipliers to get better precision.
706 *
707 * Likewise, the error can be calculated with (e.g. for 100Hz):
708 * bc -q
709 * scale=24
710 * ((1+2^-2+2^-5)*(1-2^-10)*realhz-idealhz)/idealhz
711 * (and then multiply by 1000000 to get ppm).
712 */
713 switch (hz) {
714 case 60:
715 /* A factor of 1.000100010001 gives about 15ppm
716 error. */
717 if (time_adj < 0) {
718 time_adj -= (-time_adj >> 4);
719 time_adj -= (-time_adj >> 8);
720 } else {
721 time_adj += (time_adj >> 4);
722 time_adj += (time_adj >> 8);
723 }
724 break;
725
726 case 96:
727 /* A factor of 1.0101010101 gives about 244ppm error. */
728 if (time_adj < 0) {
729 time_adj -= (-time_adj >> 2);
730 time_adj -= (-time_adj >> 4) + (-time_adj >> 8);
731 } else {
732 time_adj += (time_adj >> 2);
733 time_adj += (time_adj >> 4) + (time_adj >> 8);
734 }
735 break;
736
737 case 100:
738 /* A factor of 1.010001111010111 gives about 1ppm
739 error. */
740 if (time_adj < 0) {
741 time_adj -= (-time_adj >> 2) + (-time_adj >> 5);
742 time_adj += (-time_adj >> 10);
743 } else {
744 time_adj += (time_adj >> 2) + (time_adj >> 5);
745 time_adj -= (time_adj >> 10);
746 }
747 break;
748
749 case 1000:
750 /* A factor of 1.000001100010100001 gives about 50ppm
751 error. */
752 if (time_adj < 0) {
753 time_adj -= (-time_adj >> 6) + (-time_adj >> 11);
754 time_adj -= (-time_adj >> 7);
755 } else {
756 time_adj += (time_adj >> 6) + (time_adj >> 11);
757 time_adj += (time_adj >> 7);
758 }
759 break;
760
761 case 1200:
762 /* A factor of 1.1011010011100001 gives about 64ppm
763 error. */
764 if (time_adj < 0) {
765 time_adj -= (-time_adj >> 1) + (-time_adj >> 6);
766 time_adj -= (-time_adj >> 3) + (-time_adj >> 10);
767 } else {
768 time_adj += (time_adj >> 1) + (time_adj >> 6);
769 time_adj += (time_adj >> 3) + (time_adj >> 10);
770 }
771 break;
772 }
773
774 #ifdef EXT_CLOCK
775 /*
776 * If an external clock is present, it is necessary to
777 * discipline the kernel time variable anyway, since not
778 * all system components use the microtime() interface.
779 * Here, the time offset between the external clock and
780 * kernel time variable is computed every so often.
781 */
782 clock_count++;
783 if (clock_count > CLOCK_INTERVAL) {
784 clock_count = 0;
785 microtime(&clock_ext);
786 delta.tv_sec = clock_ext.tv_sec - time.tv_sec;
787 delta.tv_usec = clock_ext.tv_usec -
788 time.tv_usec;
789 if (delta.tv_usec < 0)
790 delta.tv_sec--;
791 if (delta.tv_usec >= 500000) {
792 delta.tv_usec -= 1000000;
793 delta.tv_sec++;
794 }
795 if (delta.tv_usec < -500000) {
796 delta.tv_usec += 1000000;
797 delta.tv_sec--;
798 }
799 if (delta.tv_sec > 0 || (delta.tv_sec == 0 &&
800 delta.tv_usec > MAXPHASE) ||
801 delta.tv_sec < -1 || (delta.tv_sec == -1 &&
802 delta.tv_usec < -MAXPHASE)) {
803 time = clock_ext;
804 delta.tv_sec = 0;
805 delta.tv_usec = 0;
806 }
807 #ifdef HIGHBALL
808 clock_cpu = delta.tv_usec;
809 #else /* HIGHBALL */
810 hardupdate(delta.tv_usec);
811 #endif /* HIGHBALL */
812 }
813 #endif /* EXT_CLOCK */
814 }
815
816 #endif /* NTP */
817
818 /*
819 * Update real-time timeout queue.
820 * Process callouts at a very low cpu priority, so we don't keep the
821 * relatively high clock interrupt priority any longer than necessary.
822 */
823 if (callout_hardclock()) {
824 if (CLKF_BASEPRI(frame)) {
825 /*
826 * Save the overhead of a software interrupt;
827 * it will happen as soon as we return, so do
828 * it now.
829 */
830 spllowersoftclock();
831 KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
832 softclock(NULL);
833 KERNEL_UNLOCK();
834 } else {
835 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
836 softintr_schedule(softclock_si);
837 #else
838 setsoftclock();
839 #endif
840 }
841 }
842 }
843
844 /*
845 * Compute number of hz until specified time. Used to compute second
846 * argument to callout_reset() from an absolute time.
847 */
848 int
849 hzto(struct timeval *tv)
850 {
851 unsigned long ticks;
852 long sec, usec;
853 int s;
854
855 /*
856 * If the number of usecs in the whole seconds part of the time
857 * difference fits in a long, then the total number of usecs will
858 * fit in an unsigned long. Compute the total and convert it to
859 * ticks, rounding up and adding 1 to allow for the current tick
860 * to expire. Rounding also depends on unsigned long arithmetic
861 * to avoid overflow.
862 *
863 * Otherwise, if the number of ticks in the whole seconds part of
864 * the time difference fits in a long, then convert the parts to
865 * ticks separately and add, using similar rounding methods and
866 * overflow avoidance. This method would work in the previous
867 * case, but it is slightly slower and assume that hz is integral.
868 *
869 * Otherwise, round the time difference down to the maximum
870 * representable value.
871 *
872 * If ints are 32-bit, then the maximum value for any timeout in
873 * 10ms ticks is 248 days.
874 */
875 s = splclock();
876 sec = tv->tv_sec - time.tv_sec;
877 usec = tv->tv_usec - time.tv_usec;
878 splx(s);
879
880 if (usec < 0) {
881 sec--;
882 usec += 1000000;
883 }
884
885 if (sec < 0 || (sec == 0 && usec <= 0)) {
886 /*
887 * Would expire now or in the past. Return 0 ticks.
888 * This is different from the legacy hzto() interface,
889 * and callers need to check for it.
890 */
891 ticks = 0;
892 } else if (sec <= (LONG_MAX / 1000000))
893 ticks = (((sec * 1000000) + (unsigned long)usec + (tick - 1))
894 / tick) + 1;
895 else if (sec <= (LONG_MAX / hz))
896 ticks = (sec * hz) +
897 (((unsigned long)usec + (tick - 1)) / tick) + 1;
898 else
899 ticks = LONG_MAX;
900
901 if (ticks > INT_MAX)
902 ticks = INT_MAX;
903
904 return ((int)ticks);
905 }
906
907 /*
908 * Start profiling on a process.
909 *
910 * Kernel profiling passes proc0 which never exits and hence
911 * keeps the profile clock running constantly.
912 */
913 void
914 startprofclock(struct proc *p)
915 {
916
917 if ((p->p_flag & P_PROFIL) == 0) {
918 p->p_flag |= P_PROFIL;
919 /*
920 * This is only necessary if using the clock as the
921 * profiling source.
922 */
923 if (++profprocs == 1 && stathz != 0)
924 psdiv = psratio;
925 }
926 }
927
928 /*
929 * Stop profiling on a process.
930 */
931 void
932 stopprofclock(struct proc *p)
933 {
934
935 if (p->p_flag & P_PROFIL) {
936 p->p_flag &= ~P_PROFIL;
937 /*
938 * This is only necessary if using the clock as the
939 * profiling source.
940 */
941 if (--profprocs == 0 && stathz != 0)
942 psdiv = 1;
943 }
944 }
945
946 #if defined(PERFCTRS)
947 /*
948 * Independent profiling "tick" in case we're using a separate
949 * clock or profiling event source. Currently, that's just
950 * performance counters--hence the wrapper.
951 */
952 void
953 proftick(struct clockframe *frame)
954 {
955 #ifdef GPROF
956 struct gmonparam *g;
957 intptr_t i;
958 #endif
959 struct proc *p;
960
961 p = curproc;
962 if (CLKF_USERMODE(frame)) {
963 if (p->p_flag & P_PROFIL)
964 addupc_intr(p, CLKF_PC(frame));
965 } else {
966 #ifdef GPROF
967 g = &_gmonparam;
968 if (g->state == GMON_PROF_ON) {
969 i = CLKF_PC(frame) - g->lowpc;
970 if (i < g->textsize) {
971 i /= HISTFRACTION * sizeof(*g->kcount);
972 g->kcount[i]++;
973 }
974 }
975 #endif
976 #ifdef PROC_PC
977 if (p && p->p_flag & P_PROFIL)
978 addupc_intr(p, PROC_PC(p));
979 #endif
980 }
981 }
982 #endif
983
984 /*
985 * Statistics clock. Grab profile sample, and if divider reaches 0,
986 * do process and kernel statistics.
987 */
988 void
989 statclock(struct clockframe *frame)
990 {
991 #ifdef GPROF
992 struct gmonparam *g;
993 intptr_t i;
994 #endif
995 struct cpu_info *ci = curcpu();
996 struct schedstate_percpu *spc = &ci->ci_schedstate;
997 struct lwp *l;
998 struct proc *p;
999
1000 /*
1001 * Notice changes in divisor frequency, and adjust clock
1002 * frequency accordingly.
1003 */
1004 if (spc->spc_psdiv != psdiv) {
1005 spc->spc_psdiv = psdiv;
1006 spc->spc_pscnt = psdiv;
1007 if (psdiv == 1) {
1008 setstatclockrate(stathz);
1009 } else {
1010 setstatclockrate(profhz);
1011 }
1012 }
1013 l = curlwp;
1014 p = (l ? l->l_proc : 0);
1015 if (CLKF_USERMODE(frame)) {
1016 if (p->p_flag & P_PROFIL && profsrc == PROFSRC_CLOCK)
1017 addupc_intr(p, CLKF_PC(frame));
1018 if (--spc->spc_pscnt > 0)
1019 return;
1020 /*
1021 * Came from user mode; CPU was in user state.
1022 * If this process is being profiled record the tick.
1023 */
1024 p->p_uticks++;
1025 if (p->p_nice > NZERO)
1026 spc->spc_cp_time[CP_NICE]++;
1027 else
1028 spc->spc_cp_time[CP_USER]++;
1029 } else {
1030 #ifdef GPROF
1031 /*
1032 * Kernel statistics are just like addupc_intr, only easier.
1033 */
1034 g = &_gmonparam;
1035 if (profsrc == PROFSRC_CLOCK && g->state == GMON_PROF_ON) {
1036 i = CLKF_PC(frame) - g->lowpc;
1037 if (i < g->textsize) {
1038 i /= HISTFRACTION * sizeof(*g->kcount);
1039 g->kcount[i]++;
1040 }
1041 }
1042 #endif
1043 #ifdef LWP_PC
1044 if (p && profsrc == PROFSRC_CLOCK && p->p_flag & P_PROFIL)
1045 addupc_intr(p, LWP_PC(l));
1046 #endif
1047 if (--spc->spc_pscnt > 0)
1048 return;
1049 /*
1050 * Came from kernel mode, so we were:
1051 * - handling an interrupt,
1052 * - doing syscall or trap work on behalf of the current
1053 * user process, or
1054 * - spinning in the idle loop.
1055 * Whichever it is, charge the time as appropriate.
1056 * Note that we charge interrupts to the current process,
1057 * regardless of whether they are ``for'' that process,
1058 * so that we know how much of its real time was spent
1059 * in ``non-process'' (i.e., interrupt) work.
1060 */
1061 if (CLKF_INTR(frame)) {
1062 if (p != NULL)
1063 p->p_iticks++;
1064 spc->spc_cp_time[CP_INTR]++;
1065 } else if (p != NULL) {
1066 p->p_sticks++;
1067 spc->spc_cp_time[CP_SYS]++;
1068 } else
1069 spc->spc_cp_time[CP_IDLE]++;
1070 }
1071 spc->spc_pscnt = psdiv;
1072
1073 if (l != NULL) {
1074 ++p->p_cpticks;
1075 /*
1076 * If no separate schedclock is provided, call it here
1077 * at ~~12-25 Hz, ~~16 Hz is best
1078 */
1079 if (schedhz == 0)
1080 if ((++ci->ci_schedstate.spc_schedticks & 3) == 0)
1081 schedclock(l);
1082 }
1083 }
1084
1085
1086 #ifdef NTP /* NTP phase-locked loop in kernel */
1087
1088 /*
1089 * hardupdate() - local clock update
1090 *
1091 * This routine is called by ntp_adjtime() to update the local clock
1092 * phase and frequency. The implementation is of an adaptive-parameter,
1093 * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new
1094 * time and frequency offset estimates for each call. If the kernel PPS
1095 * discipline code is configured (PPS_SYNC), the PPS signal itself
1096 * determines the new time offset, instead of the calling argument.
1097 * Presumably, calls to ntp_adjtime() occur only when the caller
1098 * believes the local clock is valid within some bound (+-128 ms with
1099 * NTP). If the caller's time is far different than the PPS time, an
1100 * argument will ensue, and it's not clear who will lose.
1101 *
1102 * For uncompensated quartz crystal oscillatores and nominal update
1103 * intervals less than 1024 s, operation should be in phase-lock mode
1104 * (STA_FLL = 0), where the loop is disciplined to phase. For update
1105 * intervals greater than thiss, operation should be in frequency-lock
1106 * mode (STA_FLL = 1), where the loop is disciplined to frequency.
1107 *
1108 * Note: splclock() is in effect.
1109 */
1110 void
1111 hardupdate(long offset)
1112 {
1113 long ltemp, mtemp;
1114
1115 if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME))
1116 return;
1117 ltemp = offset;
1118 #ifdef PPS_SYNC
1119 if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
1120 ltemp = pps_offset;
1121 #endif /* PPS_SYNC */
1122
1123 /*
1124 * Scale the phase adjustment and clamp to the operating range.
1125 */
1126 if (ltemp > MAXPHASE)
1127 time_offset = MAXPHASE << SHIFT_UPDATE;
1128 else if (ltemp < -MAXPHASE)
1129 time_offset = -(MAXPHASE << SHIFT_UPDATE);
1130 else
1131 time_offset = ltemp << SHIFT_UPDATE;
1132
1133 /*
1134 * Select whether the frequency is to be controlled and in which
1135 * mode (PLL or FLL). Clamp to the operating range. Ugly
1136 * multiply/divide should be replaced someday.
1137 */
1138 if (time_status & STA_FREQHOLD || time_reftime == 0)
1139 time_reftime = time.tv_sec;
1140 mtemp = time.tv_sec - time_reftime;
1141 time_reftime = time.tv_sec;
1142 if (time_status & STA_FLL) {
1143 if (mtemp >= MINSEC) {
1144 ltemp = ((time_offset / mtemp) << (SHIFT_USEC -
1145 SHIFT_UPDATE));
1146 if (ltemp < 0)
1147 time_freq -= -ltemp >> SHIFT_KH;
1148 else
1149 time_freq += ltemp >> SHIFT_KH;
1150 }
1151 } else {
1152 if (mtemp < MAXSEC) {
1153 ltemp *= mtemp;
1154 if (ltemp < 0)
1155 time_freq -= -ltemp >> (time_constant +
1156 time_constant + SHIFT_KF -
1157 SHIFT_USEC);
1158 else
1159 time_freq += ltemp >> (time_constant +
1160 time_constant + SHIFT_KF -
1161 SHIFT_USEC);
1162 }
1163 }
1164 if (time_freq > time_tolerance)
1165 time_freq = time_tolerance;
1166 else if (time_freq < -time_tolerance)
1167 time_freq = -time_tolerance;
1168 }
1169
1170 #ifdef PPS_SYNC
1171 /*
1172 * hardpps() - discipline CPU clock oscillator to external PPS signal
1173 *
1174 * This routine is called at each PPS interrupt in order to discipline
1175 * the CPU clock oscillator to the PPS signal. It measures the PPS phase
1176 * and leaves it in a handy spot for the hardclock() routine. It
1177 * integrates successive PPS phase differences and calculates the
1178 * frequency offset. This is used in hardclock() to discipline the CPU
1179 * clock oscillator so that intrinsic frequency error is cancelled out.
1180 * The code requires the caller to capture the time and hardware counter
1181 * value at the on-time PPS signal transition.
1182 *
1183 * Note that, on some Unix systems, this routine runs at an interrupt
1184 * priority level higher than the timer interrupt routine hardclock().
1185 * Therefore, the variables used are distinct from the hardclock()
1186 * variables, except for certain exceptions: The PPS frequency pps_freq
1187 * and phase pps_offset variables are determined by this routine and
1188 * updated atomically. The time_tolerance variable can be considered a
1189 * constant, since it is infrequently changed, and then only when the
1190 * PPS signal is disabled. The watchdog counter pps_valid is updated
1191 * once per second by hardclock() and is atomically cleared in this
1192 * routine.
1193 */
1194 void
1195 hardpps(struct timeval *tvp, /* time at PPS */
1196 long usec /* hardware counter at PPS */)
1197 {
1198 long u_usec, v_usec, bigtick;
1199 long cal_sec, cal_usec;
1200
1201 /*
1202 * An occasional glitch can be produced when the PPS interrupt
1203 * occurs in the hardclock() routine before the time variable is
1204 * updated. Here the offset is discarded when the difference
1205 * between it and the last one is greater than tick/2, but not
1206 * if the interval since the first discard exceeds 30 s.
1207 */
1208 time_status |= STA_PPSSIGNAL;
1209 time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
1210 pps_valid = 0;
1211 u_usec = -tvp->tv_usec;
1212 if (u_usec < -500000)
1213 u_usec += 1000000;
1214 v_usec = pps_offset - u_usec;
1215 if (v_usec < 0)
1216 v_usec = -v_usec;
1217 if (v_usec > (tick >> 1)) {
1218 if (pps_glitch > MAXGLITCH) {
1219 pps_glitch = 0;
1220 pps_tf[2] = u_usec;
1221 pps_tf[1] = u_usec;
1222 } else {
1223 pps_glitch++;
1224 u_usec = pps_offset;
1225 }
1226 } else
1227 pps_glitch = 0;
1228
1229 /*
1230 * A three-stage median filter is used to help deglitch the pps
1231 * time. The median sample becomes the time offset estimate; the
1232 * difference between the other two samples becomes the time
1233 * dispersion (jitter) estimate.
1234 */
1235 pps_tf[2] = pps_tf[1];
1236 pps_tf[1] = pps_tf[0];
1237 pps_tf[0] = u_usec;
1238 if (pps_tf[0] > pps_tf[1]) {
1239 if (pps_tf[1] > pps_tf[2]) {
1240 pps_offset = pps_tf[1]; /* 0 1 2 */
1241 v_usec = pps_tf[0] - pps_tf[2];
1242 } else if (pps_tf[2] > pps_tf[0]) {
1243 pps_offset = pps_tf[0]; /* 2 0 1 */
1244 v_usec = pps_tf[2] - pps_tf[1];
1245 } else {
1246 pps_offset = pps_tf[2]; /* 0 2 1 */
1247 v_usec = pps_tf[0] - pps_tf[1];
1248 }
1249 } else {
1250 if (pps_tf[1] < pps_tf[2]) {
1251 pps_offset = pps_tf[1]; /* 2 1 0 */
1252 v_usec = pps_tf[2] - pps_tf[0];
1253 } else if (pps_tf[2] < pps_tf[0]) {
1254 pps_offset = pps_tf[0]; /* 1 0 2 */
1255 v_usec = pps_tf[1] - pps_tf[2];
1256 } else {
1257 pps_offset = pps_tf[2]; /* 1 2 0 */
1258 v_usec = pps_tf[1] - pps_tf[0];
1259 }
1260 }
1261 if (v_usec > MAXTIME)
1262 pps_jitcnt++;
1263 v_usec = (v_usec << PPS_AVG) - pps_jitter;
1264 if (v_usec < 0)
1265 pps_jitter -= -v_usec >> PPS_AVG;
1266 else
1267 pps_jitter += v_usec >> PPS_AVG;
1268 if (pps_jitter > (MAXTIME >> 1))
1269 time_status |= STA_PPSJITTER;
1270
1271 /*
1272 * During the calibration interval adjust the starting time when
1273 * the tick overflows. At the end of the interval compute the
1274 * duration of the interval and the difference of the hardware
1275 * counters at the beginning and end of the interval. This code
1276 * is deliciously complicated by the fact valid differences may
1277 * exceed the value of tick when using long calibration
1278 * intervals and small ticks. Note that the counter can be
1279 * greater than tick if caught at just the wrong instant, but
1280 * the values returned and used here are correct.
1281 */
1282 bigtick = (long)tick << SHIFT_USEC;
1283 pps_usec -= pps_freq;
1284 if (pps_usec >= bigtick)
1285 pps_usec -= bigtick;
1286 if (pps_usec < 0)
1287 pps_usec += bigtick;
1288 pps_time.tv_sec++;
1289 pps_count++;
1290 if (pps_count < (1 << pps_shift))
1291 return;
1292 pps_count = 0;
1293 pps_calcnt++;
1294 u_usec = usec << SHIFT_USEC;
1295 v_usec = pps_usec - u_usec;
1296 if (v_usec >= bigtick >> 1)
1297 v_usec -= bigtick;
1298 if (v_usec < -(bigtick >> 1))
1299 v_usec += bigtick;
1300 if (v_usec < 0)
1301 v_usec = -(-v_usec >> pps_shift);
1302 else
1303 v_usec = v_usec >> pps_shift;
1304 pps_usec = u_usec;
1305 cal_sec = tvp->tv_sec;
1306 cal_usec = tvp->tv_usec;
1307 cal_sec -= pps_time.tv_sec;
1308 cal_usec -= pps_time.tv_usec;
1309 if (cal_usec < 0) {
1310 cal_usec += 1000000;
1311 cal_sec--;
1312 }
1313 pps_time = *tvp;
1314
1315 /*
1316 * Check for lost interrupts, noise, excessive jitter and
1317 * excessive frequency error. The number of timer ticks during
1318 * the interval may vary +-1 tick. Add to this a margin of one
1319 * tick for the PPS signal jitter and maximum frequency
1320 * deviation. If the limits are exceeded, the calibration
1321 * interval is reset to the minimum and we start over.
1322 */
1323 u_usec = (long)tick << 1;
1324 if (!((cal_sec == -1 && cal_usec > (1000000 - u_usec))
1325 || (cal_sec == 0 && cal_usec < u_usec))
1326 || v_usec > time_tolerance || v_usec < -time_tolerance) {
1327 pps_errcnt++;
1328 pps_shift = PPS_SHIFT;
1329 pps_intcnt = 0;
1330 time_status |= STA_PPSERROR;
1331 return;
1332 }
1333
1334 /*
1335 * A three-stage median filter is used to help deglitch the pps
1336 * frequency. The median sample becomes the frequency offset
1337 * estimate; the difference between the other two samples
1338 * becomes the frequency dispersion (stability) estimate.
1339 */
1340 pps_ff[2] = pps_ff[1];
1341 pps_ff[1] = pps_ff[0];
1342 pps_ff[0] = v_usec;
1343 if (pps_ff[0] > pps_ff[1]) {
1344 if (pps_ff[1] > pps_ff[2]) {
1345 u_usec = pps_ff[1]; /* 0 1 2 */
1346 v_usec = pps_ff[0] - pps_ff[2];
1347 } else if (pps_ff[2] > pps_ff[0]) {
1348 u_usec = pps_ff[0]; /* 2 0 1 */
1349 v_usec = pps_ff[2] - pps_ff[1];
1350 } else {
1351 u_usec = pps_ff[2]; /* 0 2 1 */
1352 v_usec = pps_ff[0] - pps_ff[1];
1353 }
1354 } else {
1355 if (pps_ff[1] < pps_ff[2]) {
1356 u_usec = pps_ff[1]; /* 2 1 0 */
1357 v_usec = pps_ff[2] - pps_ff[0];
1358 } else if (pps_ff[2] < pps_ff[0]) {
1359 u_usec = pps_ff[0]; /* 1 0 2 */
1360 v_usec = pps_ff[1] - pps_ff[2];
1361 } else {
1362 u_usec = pps_ff[2]; /* 1 2 0 */
1363 v_usec = pps_ff[1] - pps_ff[0];
1364 }
1365 }
1366
1367 /*
1368 * Here the frequency dispersion (stability) is updated. If it
1369 * is less than one-fourth the maximum (MAXFREQ), the frequency
1370 * offset is updated as well, but clamped to the tolerance. It
1371 * will be processed later by the hardclock() routine.
1372 */
1373 v_usec = (v_usec >> 1) - pps_stabil;
1374 if (v_usec < 0)
1375 pps_stabil -= -v_usec >> PPS_AVG;
1376 else
1377 pps_stabil += v_usec >> PPS_AVG;
1378 if (pps_stabil > MAXFREQ >> 2) {
1379 pps_stbcnt++;
1380 time_status |= STA_PPSWANDER;
1381 return;
1382 }
1383 if (time_status & STA_PPSFREQ) {
1384 if (u_usec < 0) {
1385 pps_freq -= -u_usec >> PPS_AVG;
1386 if (pps_freq < -time_tolerance)
1387 pps_freq = -time_tolerance;
1388 u_usec = -u_usec;
1389 } else {
1390 pps_freq += u_usec >> PPS_AVG;
1391 if (pps_freq > time_tolerance)
1392 pps_freq = time_tolerance;
1393 }
1394 }
1395
1396 /*
1397 * Here the calibration interval is adjusted. If the maximum
1398 * time difference is greater than tick / 4, reduce the interval
1399 * by half. If this is not the case for four consecutive
1400 * intervals, double the interval.
1401 */
1402 if (u_usec << pps_shift > bigtick >> 2) {
1403 pps_intcnt = 0;
1404 if (pps_shift > PPS_SHIFT)
1405 pps_shift--;
1406 } else if (pps_intcnt >= 4) {
1407 pps_intcnt = 0;
1408 if (pps_shift < PPS_SHIFTMAX)
1409 pps_shift++;
1410 } else
1411 pps_intcnt++;
1412 }
1413 #endif /* PPS_SYNC */
1414 #endif /* NTP */
1415
1416 /*
1417 * Return information about system clocks.
1418 */
1419 int
1420 sysctl_clockrate(void *where, size_t *sizep)
1421 {
1422 struct clockinfo clkinfo;
1423
1424 /*
1425 * Construct clockinfo structure.
1426 */
1427 clkinfo.tick = tick;
1428 clkinfo.tickadj = tickadj;
1429 clkinfo.hz = hz;
1430 clkinfo.profhz = profhz;
1431 clkinfo.stathz = stathz ? stathz : hz;
1432 return (sysctl_rdstruct(where, sizep, NULL, &clkinfo, sizeof(clkinfo)));
1433 }
1434