kern_clock.c revision 1.93 1 /* $NetBSD: kern_clock.c,v 1.93 2005/02/26 21:34:55 perry Exp $ */
2
3 /*-
4 * Copyright (c) 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*-
41 * Copyright (c) 1982, 1986, 1991, 1993
42 * The Regents of the University of California. All rights reserved.
43 * (c) UNIX System Laboratories, Inc.
44 * All or some portions of this file are derived from material licensed
45 * to the University of California by American Telephone and Telegraph
46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47 * the permission of UNIX System Laboratories, Inc.
48 *
49 * Redistribution and use in source and binary forms, with or without
50 * modification, are permitted provided that the following conditions
51 * are met:
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 * 3. Neither the name of the University nor the names of its contributors
58 * may be used to endorse or promote products derived from this software
59 * without specific prior written permission.
60 *
61 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
64 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * SUCH DAMAGE.
72 *
73 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
74 */
75
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: kern_clock.c,v 1.93 2005/02/26 21:34:55 perry Exp $");
78
79 #include "opt_ntp.h"
80 #include "opt_multiprocessor.h"
81 #include "opt_perfctrs.h"
82
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/callout.h>
86 #include <sys/kernel.h>
87 #include <sys/proc.h>
88 #include <sys/resourcevar.h>
89 #include <sys/signalvar.h>
90 #include <sys/sysctl.h>
91 #include <sys/timex.h>
92 #include <sys/sched.h>
93 #include <sys/time.h>
94
95 #include <machine/cpu.h>
96 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
97 #include <machine/intr.h>
98 #endif
99
100 #ifdef GPROF
101 #include <sys/gmon.h>
102 #endif
103
104 /*
105 * Clock handling routines.
106 *
107 * This code is written to operate with two timers that run independently of
108 * each other. The main clock, running hz times per second, is used to keep
109 * track of real time. The second timer handles kernel and user profiling,
110 * and does resource use estimation. If the second timer is programmable,
111 * it is randomized to avoid aliasing between the two clocks. For example,
112 * the randomization prevents an adversary from always giving up the CPU
113 * just before its quantum expires. Otherwise, it would never accumulate
114 * CPU ticks. The mean frequency of the second timer is stathz.
115 *
116 * If no second timer exists, stathz will be zero; in this case we drive
117 * profiling and statistics off the main clock. This WILL NOT be accurate;
118 * do not do it unless absolutely necessary.
119 *
120 * The statistics clock may (or may not) be run at a higher rate while
121 * profiling. This profile clock runs at profhz. We require that profhz
122 * be an integral multiple of stathz.
123 *
124 * If the statistics clock is running fast, it must be divided by the ratio
125 * profhz/stathz for statistics. (For profiling, every tick counts.)
126 */
127
128 #ifdef NTP /* NTP phase-locked loop in kernel */
129 /*
130 * Phase/frequency-lock loop (PLL/FLL) definitions
131 *
132 * The following variables are read and set by the ntp_adjtime() system
133 * call.
134 *
135 * time_state shows the state of the system clock, with values defined
136 * in the timex.h header file.
137 *
138 * time_status shows the status of the system clock, with bits defined
139 * in the timex.h header file.
140 *
141 * time_offset is used by the PLL/FLL to adjust the system time in small
142 * increments.
143 *
144 * time_constant determines the bandwidth or "stiffness" of the PLL.
145 *
146 * time_tolerance determines maximum frequency error or tolerance of the
147 * CPU clock oscillator and is a property of the architecture; however,
148 * in principle it could change as result of the presence of external
149 * discipline signals, for instance.
150 *
151 * time_precision is usually equal to the kernel tick variable; however,
152 * in cases where a precision clock counter or external clock is
153 * available, the resolution can be much less than this and depend on
154 * whether the external clock is working or not.
155 *
156 * time_maxerror is initialized by a ntp_adjtime() call and increased by
157 * the kernel once each second to reflect the maximum error bound
158 * growth.
159 *
160 * time_esterror is set and read by the ntp_adjtime() call, but
161 * otherwise not used by the kernel.
162 */
163 int time_state = TIME_OK; /* clock state */
164 int time_status = STA_UNSYNC; /* clock status bits */
165 long time_offset = 0; /* time offset (us) */
166 long time_constant = 0; /* pll time constant */
167 long time_tolerance = MAXFREQ; /* frequency tolerance (scaled ppm) */
168 long time_precision = 1; /* clock precision (us) */
169 long time_maxerror = MAXPHASE; /* maximum error (us) */
170 long time_esterror = MAXPHASE; /* estimated error (us) */
171
172 /*
173 * The following variables establish the state of the PLL/FLL and the
174 * residual time and frequency offset of the local clock. The scale
175 * factors are defined in the timex.h header file.
176 *
177 * time_phase and time_freq are the phase increment and the frequency
178 * increment, respectively, of the kernel time variable.
179 *
180 * time_freq is set via ntp_adjtime() from a value stored in a file when
181 * the synchronization daemon is first started. Its value is retrieved
182 * via ntp_adjtime() and written to the file about once per hour by the
183 * daemon.
184 *
185 * time_adj is the adjustment added to the value of tick at each timer
186 * interrupt and is recomputed from time_phase and time_freq at each
187 * seconds rollover.
188 *
189 * time_reftime is the second's portion of the system time at the last
190 * call to ntp_adjtime(). It is used to adjust the time_freq variable
191 * and to increase the time_maxerror as the time since last update
192 * increases.
193 */
194 long time_phase = 0; /* phase offset (scaled us) */
195 long time_freq = 0; /* frequency offset (scaled ppm) */
196 long time_adj = 0; /* tick adjust (scaled 1 / hz) */
197 long time_reftime = 0; /* time at last adjustment (s) */
198
199 #ifdef PPS_SYNC
200 /*
201 * The following variables are used only if the kernel PPS discipline
202 * code is configured (PPS_SYNC). The scale factors are defined in the
203 * timex.h header file.
204 *
205 * pps_time contains the time at each calibration interval, as read by
206 * microtime(). pps_count counts the seconds of the calibration
207 * interval, the duration of which is nominally pps_shift in powers of
208 * two.
209 *
210 * pps_offset is the time offset produced by the time median filter
211 * pps_tf[], while pps_jitter is the dispersion (jitter) measured by
212 * this filter.
213 *
214 * pps_freq is the frequency offset produced by the frequency median
215 * filter pps_ff[], while pps_stabil is the dispersion (wander) measured
216 * by this filter.
217 *
218 * pps_usec is latched from a high resolution counter or external clock
219 * at pps_time. Here we want the hardware counter contents only, not the
220 * contents plus the time_tv.usec as usual.
221 *
222 * pps_valid counts the number of seconds since the last PPS update. It
223 * is used as a watchdog timer to disable the PPS discipline should the
224 * PPS signal be lost.
225 *
226 * pps_glitch counts the number of seconds since the beginning of an
227 * offset burst more than tick/2 from current nominal offset. It is used
228 * mainly to suppress error bursts due to priority conflicts between the
229 * PPS interrupt and timer interrupt.
230 *
231 * pps_intcnt counts the calibration intervals for use in the interval-
232 * adaptation algorithm. It's just too complicated for words.
233 *
234 * pps_kc_hardpps_source contains an arbitrary value that uniquely
235 * identifies the currently bound source of the PPS signal, or NULL
236 * if no source is bound.
237 *
238 * pps_kc_hardpps_mode indicates which transitions, if any, of the PPS
239 * signal should be reported.
240 */
241 struct timeval pps_time; /* kernel time at last interval */
242 long pps_tf[] = {0, 0, 0}; /* pps time offset median filter (us) */
243 long pps_offset = 0; /* pps time offset (us) */
244 long pps_jitter = MAXTIME; /* time dispersion (jitter) (us) */
245 long pps_ff[] = {0, 0, 0}; /* pps frequency offset median filter */
246 long pps_freq = 0; /* frequency offset (scaled ppm) */
247 long pps_stabil = MAXFREQ; /* frequency dispersion (scaled ppm) */
248 long pps_usec = 0; /* microsec counter at last interval */
249 long pps_valid = PPS_VALID; /* pps signal watchdog counter */
250 int pps_glitch = 0; /* pps signal glitch counter */
251 int pps_count = 0; /* calibration interval counter (s) */
252 int pps_shift = PPS_SHIFT; /* interval duration (s) (shift) */
253 int pps_intcnt = 0; /* intervals at current duration */
254 void *pps_kc_hardpps_source = NULL; /* current PPS supplier's identifier */
255 int pps_kc_hardpps_mode = 0; /* interesting edges of PPS signal */
256
257 /*
258 * PPS signal quality monitors
259 *
260 * pps_jitcnt counts the seconds that have been discarded because the
261 * jitter measured by the time median filter exceeds the limit MAXTIME
262 * (100 us).
263 *
264 * pps_calcnt counts the frequency calibration intervals, which are
265 * variable from 4 s to 256 s.
266 *
267 * pps_errcnt counts the calibration intervals which have been discarded
268 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
269 * calibration interval jitter exceeds two ticks.
270 *
271 * pps_stbcnt counts the calibration intervals that have been discarded
272 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
273 */
274 long pps_jitcnt = 0; /* jitter limit exceeded */
275 long pps_calcnt = 0; /* calibration intervals */
276 long pps_errcnt = 0; /* calibration errors */
277 long pps_stbcnt = 0; /* stability limit exceeded */
278 #endif /* PPS_SYNC */
279
280 #ifdef EXT_CLOCK
281 /*
282 * External clock definitions
283 *
284 * The following definitions and declarations are used only if an
285 * external clock is configured on the system.
286 */
287 #define CLOCK_INTERVAL 30 /* CPU clock update interval (s) */
288
289 /*
290 * The clock_count variable is set to CLOCK_INTERVAL at each PPS
291 * interrupt and decremented once each second.
292 */
293 int clock_count = 0; /* CPU clock counter */
294
295 #ifdef HIGHBALL
296 /*
297 * The clock_offset and clock_cpu variables are used by the HIGHBALL
298 * interface. The clock_offset variable defines the offset between
299 * system time and the HIGBALL counters. The clock_cpu variable contains
300 * the offset between the system clock and the HIGHBALL clock for use in
301 * disciplining the kernel time variable.
302 */
303 extern struct timeval clock_offset; /* Highball clock offset */
304 long clock_cpu = 0; /* CPU clock adjust */
305 #endif /* HIGHBALL */
306 #endif /* EXT_CLOCK */
307 #endif /* NTP */
308
309
310 /*
311 * Bump a timeval by a small number of usec's.
312 */
313 #define BUMPTIME(t, usec) { \
314 volatile struct timeval *tp = (t); \
315 long us; \
316 \
317 tp->tv_usec = us = tp->tv_usec + (usec); \
318 if (us >= 1000000) { \
319 tp->tv_usec = us - 1000000; \
320 tp->tv_sec++; \
321 } \
322 }
323
324 int stathz;
325 int profhz;
326 int profsrc;
327 int schedhz;
328 int profprocs;
329 int hardclock_ticks;
330 static int statscheddiv; /* stat => sched divider (used if schedhz == 0) */
331 static int psdiv; /* prof => stat divider */
332 int psratio; /* ratio: prof / stat */
333 int tickfix, tickfixinterval; /* used if tick not really integral */
334 #ifndef NTP
335 static int tickfixcnt; /* accumulated fractional error */
336 #else
337 int fixtick; /* used by NTP for same */
338 int shifthz;
339 #endif
340
341 /*
342 * We might want ldd to load the both words from time at once.
343 * To succeed we need to be quadword aligned.
344 * The sparc already does that, and that it has worked so far is a fluke.
345 */
346 volatile struct timeval time __attribute__((__aligned__(__alignof__(quad_t))));
347 volatile struct timeval mono_time;
348
349 void *softclock_si;
350
351 /*
352 * Initialize clock frequencies and start both clocks running.
353 */
354 void
355 initclocks(void)
356 {
357 int i;
358
359 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
360 softclock_si = softintr_establish(IPL_SOFTCLOCK, softclock, NULL);
361 if (softclock_si == NULL)
362 panic("initclocks: unable to register softclock intr");
363 #endif
364
365 /*
366 * Set divisors to 1 (normal case) and let the machine-specific
367 * code do its bit.
368 */
369 psdiv = 1;
370 cpu_initclocks();
371
372 /*
373 * Compute profhz/stathz/rrticks, and fix profhz if needed.
374 */
375 i = stathz ? stathz : hz;
376 if (profhz == 0)
377 profhz = i;
378 psratio = profhz / i;
379 rrticks = hz / 10;
380 if (schedhz == 0) {
381 /* 16Hz is best */
382 statscheddiv = i / 16;
383 if (statscheddiv <= 0)
384 panic("statscheddiv");
385 }
386
387 #ifdef NTP
388 switch (hz) {
389 case 1:
390 shifthz = SHIFT_SCALE - 0;
391 break;
392 case 2:
393 shifthz = SHIFT_SCALE - 1;
394 break;
395 case 4:
396 shifthz = SHIFT_SCALE - 2;
397 break;
398 case 8:
399 shifthz = SHIFT_SCALE - 3;
400 break;
401 case 16:
402 shifthz = SHIFT_SCALE - 4;
403 break;
404 case 32:
405 shifthz = SHIFT_SCALE - 5;
406 break;
407 case 50:
408 case 60:
409 case 64:
410 shifthz = SHIFT_SCALE - 6;
411 break;
412 case 96:
413 case 100:
414 case 128:
415 shifthz = SHIFT_SCALE - 7;
416 break;
417 case 256:
418 shifthz = SHIFT_SCALE - 8;
419 break;
420 case 512:
421 shifthz = SHIFT_SCALE - 9;
422 break;
423 case 1000:
424 case 1024:
425 shifthz = SHIFT_SCALE - 10;
426 break;
427 case 1200:
428 case 2048:
429 shifthz = SHIFT_SCALE - 11;
430 break;
431 case 4096:
432 shifthz = SHIFT_SCALE - 12;
433 break;
434 case 8192:
435 shifthz = SHIFT_SCALE - 13;
436 break;
437 case 16384:
438 shifthz = SHIFT_SCALE - 14;
439 break;
440 case 32768:
441 shifthz = SHIFT_SCALE - 15;
442 break;
443 case 65536:
444 shifthz = SHIFT_SCALE - 16;
445 break;
446 default:
447 panic("weird hz");
448 }
449 if (fixtick == 0) {
450 /*
451 * Give MD code a chance to set this to a better
452 * value; but, if it doesn't, we should.
453 */
454 fixtick = (1000000 - (hz*tick));
455 }
456 #endif
457 }
458
459 /*
460 * The real-time timer, interrupting hz times per second.
461 */
462 void
463 hardclock(struct clockframe *frame)
464 {
465 struct lwp *l;
466 struct proc *p;
467 int delta;
468 extern int tickdelta;
469 extern long timedelta;
470 struct cpu_info *ci = curcpu();
471 struct ptimer *pt;
472 #ifdef NTP
473 int time_update;
474 int ltemp;
475 #endif
476
477 l = curlwp;
478 if (l) {
479 p = l->l_proc;
480 /*
481 * Run current process's virtual and profile time, as needed.
482 */
483 if (CLKF_USERMODE(frame) && p->p_timers &&
484 (pt = LIST_FIRST(&p->p_timers->pts_virtual)) != NULL)
485 if (itimerdecr(pt, tick) == 0)
486 itimerfire(pt);
487 if (p->p_timers &&
488 (pt = LIST_FIRST(&p->p_timers->pts_prof)) != NULL)
489 if (itimerdecr(pt, tick) == 0)
490 itimerfire(pt);
491 }
492
493 /*
494 * If no separate statistics clock is available, run it from here.
495 */
496 if (stathz == 0)
497 statclock(frame);
498 if ((--ci->ci_schedstate.spc_rrticks) <= 0)
499 roundrobin(ci);
500
501 #if defined(MULTIPROCESSOR)
502 /*
503 * If we are not the primary CPU, we're not allowed to do
504 * any more work.
505 */
506 if (CPU_IS_PRIMARY(ci) == 0)
507 return;
508 #endif
509
510 /*
511 * Increment the time-of-day. The increment is normally just
512 * ``tick''. If the machine is one which has a clock frequency
513 * such that ``hz'' would not divide the second evenly into
514 * milliseconds, a periodic adjustment must be applied. Finally,
515 * if we are still adjusting the time (see adjtime()),
516 * ``tickdelta'' may also be added in.
517 */
518 hardclock_ticks++;
519 delta = tick;
520
521 #ifndef NTP
522 if (tickfix) {
523 tickfixcnt += tickfix;
524 if (tickfixcnt >= tickfixinterval) {
525 delta++;
526 tickfixcnt -= tickfixinterval;
527 }
528 }
529 #endif /* !NTP */
530 /* Imprecise 4bsd adjtime() handling */
531 if (timedelta != 0) {
532 delta += tickdelta;
533 timedelta -= tickdelta;
534 }
535
536 #ifdef notyet
537 microset();
538 #endif
539
540 #ifndef NTP
541 BUMPTIME(&time, delta); /* XXX Now done using NTP code below */
542 #endif
543 BUMPTIME(&mono_time, delta);
544
545 #ifdef NTP
546 time_update = delta;
547
548 /*
549 * Compute the phase adjustment. If the low-order bits
550 * (time_phase) of the update overflow, bump the high-order bits
551 * (time_update).
552 */
553 time_phase += time_adj;
554 if (time_phase <= -FINEUSEC) {
555 ltemp = -time_phase >> SHIFT_SCALE;
556 time_phase += ltemp << SHIFT_SCALE;
557 time_update -= ltemp;
558 } else if (time_phase >= FINEUSEC) {
559 ltemp = time_phase >> SHIFT_SCALE;
560 time_phase -= ltemp << SHIFT_SCALE;
561 time_update += ltemp;
562 }
563
564 #ifdef HIGHBALL
565 /*
566 * If the HIGHBALL board is installed, we need to adjust the
567 * external clock offset in order to close the hardware feedback
568 * loop. This will adjust the external clock phase and frequency
569 * in small amounts. The additional phase noise and frequency
570 * wander this causes should be minimal. We also need to
571 * discipline the kernel time variable, since the PLL is used to
572 * discipline the external clock. If the Highball board is not
573 * present, we discipline kernel time with the PLL as usual. We
574 * assume that the external clock phase adjustment (time_update)
575 * and kernel phase adjustment (clock_cpu) are less than the
576 * value of tick.
577 */
578 clock_offset.tv_usec += time_update;
579 if (clock_offset.tv_usec >= 1000000) {
580 clock_offset.tv_sec++;
581 clock_offset.tv_usec -= 1000000;
582 }
583 if (clock_offset.tv_usec < 0) {
584 clock_offset.tv_sec--;
585 clock_offset.tv_usec += 1000000;
586 }
587 time.tv_usec += clock_cpu;
588 clock_cpu = 0;
589 #else
590 time.tv_usec += time_update;
591 #endif /* HIGHBALL */
592
593 /*
594 * On rollover of the second the phase adjustment to be used for
595 * the next second is calculated. Also, the maximum error is
596 * increased by the tolerance. If the PPS frequency discipline
597 * code is present, the phase is increased to compensate for the
598 * CPU clock oscillator frequency error.
599 *
600 * On a 32-bit machine and given parameters in the timex.h
601 * header file, the maximum phase adjustment is +-512 ms and
602 * maximum frequency offset is a tad less than) +-512 ppm. On a
603 * 64-bit machine, you shouldn't need to ask.
604 */
605 if (time.tv_usec >= 1000000) {
606 time.tv_usec -= 1000000;
607 time.tv_sec++;
608 time_maxerror += time_tolerance >> SHIFT_USEC;
609
610 /*
611 * Leap second processing. If in leap-insert state at
612 * the end of the day, the system clock is set back one
613 * second; if in leap-delete state, the system clock is
614 * set ahead one second. The microtime() routine or
615 * external clock driver will insure that reported time
616 * is always monotonic. The ugly divides should be
617 * replaced.
618 */
619 switch (time_state) {
620 case TIME_OK:
621 if (time_status & STA_INS)
622 time_state = TIME_INS;
623 else if (time_status & STA_DEL)
624 time_state = TIME_DEL;
625 break;
626
627 case TIME_INS:
628 if (time.tv_sec % 86400 == 0) {
629 time.tv_sec--;
630 time_state = TIME_OOP;
631 }
632 break;
633
634 case TIME_DEL:
635 if ((time.tv_sec + 1) % 86400 == 0) {
636 time.tv_sec++;
637 time_state = TIME_WAIT;
638 }
639 break;
640
641 case TIME_OOP:
642 time_state = TIME_WAIT;
643 break;
644
645 case TIME_WAIT:
646 if (!(time_status & (STA_INS | STA_DEL)))
647 time_state = TIME_OK;
648 break;
649 }
650
651 /*
652 * Compute the phase adjustment for the next second. In
653 * PLL mode, the offset is reduced by a fixed factor
654 * times the time constant. In FLL mode the offset is
655 * used directly. In either mode, the maximum phase
656 * adjustment for each second is clamped so as to spread
657 * the adjustment over not more than the number of
658 * seconds between updates.
659 */
660 if (time_offset < 0) {
661 ltemp = -time_offset;
662 if (!(time_status & STA_FLL))
663 ltemp >>= SHIFT_KG + time_constant;
664 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
665 ltemp = (MAXPHASE / MINSEC) <<
666 SHIFT_UPDATE;
667 time_offset += ltemp;
668 time_adj = -ltemp << (shifthz - SHIFT_UPDATE);
669 } else if (time_offset > 0) {
670 ltemp = time_offset;
671 if (!(time_status & STA_FLL))
672 ltemp >>= SHIFT_KG + time_constant;
673 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
674 ltemp = (MAXPHASE / MINSEC) <<
675 SHIFT_UPDATE;
676 time_offset -= ltemp;
677 time_adj = ltemp << (shifthz - SHIFT_UPDATE);
678 } else
679 time_adj = 0;
680
681 /*
682 * Compute the frequency estimate and additional phase
683 * adjustment due to frequency error for the next
684 * second. When the PPS signal is engaged, gnaw on the
685 * watchdog counter and update the frequency computed by
686 * the pll and the PPS signal.
687 */
688 #ifdef PPS_SYNC
689 pps_valid++;
690 if (pps_valid == PPS_VALID) {
691 pps_jitter = MAXTIME;
692 pps_stabil = MAXFREQ;
693 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
694 STA_PPSWANDER | STA_PPSERROR);
695 }
696 ltemp = time_freq + pps_freq;
697 #else
698 ltemp = time_freq;
699 #endif /* PPS_SYNC */
700
701 if (ltemp < 0)
702 time_adj -= -ltemp >> (SHIFT_USEC - shifthz);
703 else
704 time_adj += ltemp >> (SHIFT_USEC - shifthz);
705 time_adj += (long)fixtick << shifthz;
706
707 /*
708 * When the CPU clock oscillator frequency is not a
709 * power of 2 in Hz, shifthz is only an approximate
710 * scale factor.
711 *
712 * To determine the adjustment, you can do the following:
713 * bc -q
714 * scale=24
715 * obase=2
716 * idealhz/realhz
717 * where `idealhz' is the next higher power of 2, and `realhz'
718 * is the actual value. You may need to factor this result
719 * into a sequence of 2 multipliers to get better precision.
720 *
721 * Likewise, the error can be calculated with (e.g. for 100Hz):
722 * bc -q
723 * scale=24
724 * ((1+2^-2+2^-5)*(1-2^-10)*realhz-idealhz)/idealhz
725 * (and then multiply by 1000000 to get ppm).
726 */
727 switch (hz) {
728 case 60:
729 /* A factor of 1.000100010001 gives about 15ppm
730 error. */
731 if (time_adj < 0) {
732 time_adj -= (-time_adj >> 4);
733 time_adj -= (-time_adj >> 8);
734 } else {
735 time_adj += (time_adj >> 4);
736 time_adj += (time_adj >> 8);
737 }
738 break;
739
740 case 96:
741 /* A factor of 1.0101010101 gives about 244ppm error. */
742 if (time_adj < 0) {
743 time_adj -= (-time_adj >> 2);
744 time_adj -= (-time_adj >> 4) + (-time_adj >> 8);
745 } else {
746 time_adj += (time_adj >> 2);
747 time_adj += (time_adj >> 4) + (time_adj >> 8);
748 }
749 break;
750
751 case 50:
752 case 100:
753 /* A factor of 1.010001111010111 gives about 1ppm
754 error. */
755 if (time_adj < 0) {
756 time_adj -= (-time_adj >> 2) + (-time_adj >> 5);
757 time_adj += (-time_adj >> 10);
758 } else {
759 time_adj += (time_adj >> 2) + (time_adj >> 5);
760 time_adj -= (time_adj >> 10);
761 }
762 break;
763
764 case 1000:
765 /* A factor of 1.000001100010100001 gives about 50ppm
766 error. */
767 if (time_adj < 0) {
768 time_adj -= (-time_adj >> 6) + (-time_adj >> 11);
769 time_adj -= (-time_adj >> 7);
770 } else {
771 time_adj += (time_adj >> 6) + (time_adj >> 11);
772 time_adj += (time_adj >> 7);
773 }
774 break;
775
776 case 1200:
777 /* A factor of 1.1011010011100001 gives about 64ppm
778 error. */
779 if (time_adj < 0) {
780 time_adj -= (-time_adj >> 1) + (-time_adj >> 6);
781 time_adj -= (-time_adj >> 3) + (-time_adj >> 10);
782 } else {
783 time_adj += (time_adj >> 1) + (time_adj >> 6);
784 time_adj += (time_adj >> 3) + (time_adj >> 10);
785 }
786 break;
787 }
788
789 #ifdef EXT_CLOCK
790 /*
791 * If an external clock is present, it is necessary to
792 * discipline the kernel time variable anyway, since not
793 * all system components use the microtime() interface.
794 * Here, the time offset between the external clock and
795 * kernel time variable is computed every so often.
796 */
797 clock_count++;
798 if (clock_count > CLOCK_INTERVAL) {
799 clock_count = 0;
800 microtime(&clock_ext);
801 delta.tv_sec = clock_ext.tv_sec - time.tv_sec;
802 delta.tv_usec = clock_ext.tv_usec -
803 time.tv_usec;
804 if (delta.tv_usec < 0)
805 delta.tv_sec--;
806 if (delta.tv_usec >= 500000) {
807 delta.tv_usec -= 1000000;
808 delta.tv_sec++;
809 }
810 if (delta.tv_usec < -500000) {
811 delta.tv_usec += 1000000;
812 delta.tv_sec--;
813 }
814 if (delta.tv_sec > 0 || (delta.tv_sec == 0 &&
815 delta.tv_usec > MAXPHASE) ||
816 delta.tv_sec < -1 || (delta.tv_sec == -1 &&
817 delta.tv_usec < -MAXPHASE)) {
818 time = clock_ext;
819 delta.tv_sec = 0;
820 delta.tv_usec = 0;
821 }
822 #ifdef HIGHBALL
823 clock_cpu = delta.tv_usec;
824 #else /* HIGHBALL */
825 hardupdate(delta.tv_usec);
826 #endif /* HIGHBALL */
827 }
828 #endif /* EXT_CLOCK */
829 }
830
831 #endif /* NTP */
832
833 /*
834 * Update real-time timeout queue.
835 * Process callouts at a very low CPU priority, so we don't keep the
836 * relatively high clock interrupt priority any longer than necessary.
837 */
838 if (callout_hardclock()) {
839 if (CLKF_BASEPRI(frame)) {
840 /*
841 * Save the overhead of a software interrupt;
842 * it will happen as soon as we return, so do
843 * it now.
844 */
845 spllowersoftclock();
846 KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
847 softclock(NULL);
848 KERNEL_UNLOCK();
849 } else {
850 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
851 softintr_schedule(softclock_si);
852 #else
853 setsoftclock();
854 #endif
855 }
856 }
857 }
858
859 /*
860 * Compute number of hz until specified time. Used to compute second
861 * argument to callout_reset() from an absolute time.
862 */
863 int
864 hzto(struct timeval *tv)
865 {
866 unsigned long ticks;
867 long sec, usec;
868 int s;
869
870 /*
871 * If the number of usecs in the whole seconds part of the time
872 * difference fits in a long, then the total number of usecs will
873 * fit in an unsigned long. Compute the total and convert it to
874 * ticks, rounding up and adding 1 to allow for the current tick
875 * to expire. Rounding also depends on unsigned long arithmetic
876 * to avoid overflow.
877 *
878 * Otherwise, if the number of ticks in the whole seconds part of
879 * the time difference fits in a long, then convert the parts to
880 * ticks separately and add, using similar rounding methods and
881 * overflow avoidance. This method would work in the previous
882 * case, but it is slightly slower and assume that hz is integral.
883 *
884 * Otherwise, round the time difference down to the maximum
885 * representable value.
886 *
887 * If ints are 32-bit, then the maximum value for any timeout in
888 * 10ms ticks is 248 days.
889 */
890 s = splclock();
891 sec = tv->tv_sec - time.tv_sec;
892 usec = tv->tv_usec - time.tv_usec;
893 splx(s);
894
895 if (usec < 0) {
896 sec--;
897 usec += 1000000;
898 }
899
900 if (sec < 0 || (sec == 0 && usec <= 0)) {
901 /*
902 * Would expire now or in the past. Return 0 ticks.
903 * This is different from the legacy hzto() interface,
904 * and callers need to check for it.
905 */
906 ticks = 0;
907 } else if (sec <= (LONG_MAX / 1000000))
908 ticks = (((sec * 1000000) + (unsigned long)usec + (tick - 1))
909 / tick) + 1;
910 else if (sec <= (LONG_MAX / hz))
911 ticks = (sec * hz) +
912 (((unsigned long)usec + (tick - 1)) / tick) + 1;
913 else
914 ticks = LONG_MAX;
915
916 if (ticks > INT_MAX)
917 ticks = INT_MAX;
918
919 return ((int)ticks);
920 }
921
922 /*
923 * Start profiling on a process.
924 *
925 * Kernel profiling passes proc0 which never exits and hence
926 * keeps the profile clock running constantly.
927 */
928 void
929 startprofclock(struct proc *p)
930 {
931
932 if ((p->p_flag & P_PROFIL) == 0) {
933 p->p_flag |= P_PROFIL;
934 /*
935 * This is only necessary if using the clock as the
936 * profiling source.
937 */
938 if (++profprocs == 1 && stathz != 0)
939 psdiv = psratio;
940 }
941 }
942
943 /*
944 * Stop profiling on a process.
945 */
946 void
947 stopprofclock(struct proc *p)
948 {
949
950 if (p->p_flag & P_PROFIL) {
951 p->p_flag &= ~P_PROFIL;
952 /*
953 * This is only necessary if using the clock as the
954 * profiling source.
955 */
956 if (--profprocs == 0 && stathz != 0)
957 psdiv = 1;
958 }
959 }
960
961 #if defined(PERFCTRS)
962 /*
963 * Independent profiling "tick" in case we're using a separate
964 * clock or profiling event source. Currently, that's just
965 * performance counters--hence the wrapper.
966 */
967 void
968 proftick(struct clockframe *frame)
969 {
970 #ifdef GPROF
971 struct gmonparam *g;
972 intptr_t i;
973 #endif
974 struct proc *p;
975
976 p = curproc;
977 if (CLKF_USERMODE(frame)) {
978 if (p->p_flag & P_PROFIL)
979 addupc_intr(p, CLKF_PC(frame));
980 } else {
981 #ifdef GPROF
982 g = &_gmonparam;
983 if (g->state == GMON_PROF_ON) {
984 i = CLKF_PC(frame) - g->lowpc;
985 if (i < g->textsize) {
986 i /= HISTFRACTION * sizeof(*g->kcount);
987 g->kcount[i]++;
988 }
989 }
990 #endif
991 #ifdef PROC_PC
992 if (p && p->p_flag & P_PROFIL)
993 addupc_intr(p, PROC_PC(p));
994 #endif
995 }
996 }
997 #endif
998
999 /*
1000 * Statistics clock. Grab profile sample, and if divider reaches 0,
1001 * do process and kernel statistics.
1002 */
1003 void
1004 statclock(struct clockframe *frame)
1005 {
1006 #ifdef GPROF
1007 struct gmonparam *g;
1008 intptr_t i;
1009 #endif
1010 struct cpu_info *ci = curcpu();
1011 struct schedstate_percpu *spc = &ci->ci_schedstate;
1012 struct lwp *l;
1013 struct proc *p;
1014
1015 /*
1016 * Notice changes in divisor frequency, and adjust clock
1017 * frequency accordingly.
1018 */
1019 if (spc->spc_psdiv != psdiv) {
1020 spc->spc_psdiv = psdiv;
1021 spc->spc_pscnt = psdiv;
1022 if (psdiv == 1) {
1023 setstatclockrate(stathz);
1024 } else {
1025 setstatclockrate(profhz);
1026 }
1027 }
1028 l = curlwp;
1029 p = (l ? l->l_proc : 0);
1030 if (CLKF_USERMODE(frame)) {
1031 if (p->p_flag & P_PROFIL && profsrc == PROFSRC_CLOCK)
1032 addupc_intr(p, CLKF_PC(frame));
1033 if (--spc->spc_pscnt > 0)
1034 return;
1035 /*
1036 * Came from user mode; CPU was in user state.
1037 * If this process is being profiled record the tick.
1038 */
1039 p->p_uticks++;
1040 if (p->p_nice > NZERO)
1041 spc->spc_cp_time[CP_NICE]++;
1042 else
1043 spc->spc_cp_time[CP_USER]++;
1044 } else {
1045 #ifdef GPROF
1046 /*
1047 * Kernel statistics are just like addupc_intr, only easier.
1048 */
1049 g = &_gmonparam;
1050 if (profsrc == PROFSRC_CLOCK && g->state == GMON_PROF_ON) {
1051 i = CLKF_PC(frame) - g->lowpc;
1052 if (i < g->textsize) {
1053 i /= HISTFRACTION * sizeof(*g->kcount);
1054 g->kcount[i]++;
1055 }
1056 }
1057 #endif
1058 #ifdef LWP_PC
1059 if (p && profsrc == PROFSRC_CLOCK && p->p_flag & P_PROFIL)
1060 addupc_intr(p, LWP_PC(l));
1061 #endif
1062 if (--spc->spc_pscnt > 0)
1063 return;
1064 /*
1065 * Came from kernel mode, so we were:
1066 * - handling an interrupt,
1067 * - doing syscall or trap work on behalf of the current
1068 * user process, or
1069 * - spinning in the idle loop.
1070 * Whichever it is, charge the time as appropriate.
1071 * Note that we charge interrupts to the current process,
1072 * regardless of whether they are ``for'' that process,
1073 * so that we know how much of its real time was spent
1074 * in ``non-process'' (i.e., interrupt) work.
1075 */
1076 if (CLKF_INTR(frame)) {
1077 if (p != NULL)
1078 p->p_iticks++;
1079 spc->spc_cp_time[CP_INTR]++;
1080 } else if (p != NULL) {
1081 p->p_sticks++;
1082 spc->spc_cp_time[CP_SYS]++;
1083 } else
1084 spc->spc_cp_time[CP_IDLE]++;
1085 }
1086 spc->spc_pscnt = psdiv;
1087
1088 if (l != NULL) {
1089 ++p->p_cpticks;
1090 /*
1091 * If no separate schedclock is provided, call it here
1092 * at about 16 Hz.
1093 */
1094 if (schedhz == 0)
1095 if ((int)(--ci->ci_schedstate.spc_schedticks) <= 0) {
1096 schedclock(l);
1097 ci->ci_schedstate.spc_schedticks = statscheddiv;
1098 }
1099 }
1100 }
1101
1102
1103 #ifdef NTP /* NTP phase-locked loop in kernel */
1104
1105 /*
1106 * hardupdate() - local clock update
1107 *
1108 * This routine is called by ntp_adjtime() to update the local clock
1109 * phase and frequency. The implementation is of an adaptive-parameter,
1110 * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new
1111 * time and frequency offset estimates for each call. If the kernel PPS
1112 * discipline code is configured (PPS_SYNC), the PPS signal itself
1113 * determines the new time offset, instead of the calling argument.
1114 * Presumably, calls to ntp_adjtime() occur only when the caller
1115 * believes the local clock is valid within some bound (+-128 ms with
1116 * NTP). If the caller's time is far different than the PPS time, an
1117 * argument will ensue, and it's not clear who will lose.
1118 *
1119 * For uncompensated quartz crystal oscillatores and nominal update
1120 * intervals less than 1024 s, operation should be in phase-lock mode
1121 * (STA_FLL = 0), where the loop is disciplined to phase. For update
1122 * intervals greater than thiss, operation should be in frequency-lock
1123 * mode (STA_FLL = 1), where the loop is disciplined to frequency.
1124 *
1125 * Note: splclock() is in effect.
1126 */
1127 void
1128 hardupdate(long offset)
1129 {
1130 long ltemp, mtemp;
1131
1132 if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME))
1133 return;
1134 ltemp = offset;
1135 #ifdef PPS_SYNC
1136 if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
1137 ltemp = pps_offset;
1138 #endif /* PPS_SYNC */
1139
1140 /*
1141 * Scale the phase adjustment and clamp to the operating range.
1142 */
1143 if (ltemp > MAXPHASE)
1144 time_offset = MAXPHASE << SHIFT_UPDATE;
1145 else if (ltemp < -MAXPHASE)
1146 time_offset = -(MAXPHASE << SHIFT_UPDATE);
1147 else
1148 time_offset = ltemp << SHIFT_UPDATE;
1149
1150 /*
1151 * Select whether the frequency is to be controlled and in which
1152 * mode (PLL or FLL). Clamp to the operating range. Ugly
1153 * multiply/divide should be replaced someday.
1154 */
1155 if (time_status & STA_FREQHOLD || time_reftime == 0)
1156 time_reftime = time.tv_sec;
1157 mtemp = time.tv_sec - time_reftime;
1158 time_reftime = time.tv_sec;
1159 if (time_status & STA_FLL) {
1160 if (mtemp >= MINSEC) {
1161 ltemp = ((time_offset / mtemp) << (SHIFT_USEC -
1162 SHIFT_UPDATE));
1163 if (ltemp < 0)
1164 time_freq -= -ltemp >> SHIFT_KH;
1165 else
1166 time_freq += ltemp >> SHIFT_KH;
1167 }
1168 } else {
1169 if (mtemp < MAXSEC) {
1170 ltemp *= mtemp;
1171 if (ltemp < 0)
1172 time_freq -= -ltemp >> (time_constant +
1173 time_constant + SHIFT_KF -
1174 SHIFT_USEC);
1175 else
1176 time_freq += ltemp >> (time_constant +
1177 time_constant + SHIFT_KF -
1178 SHIFT_USEC);
1179 }
1180 }
1181 if (time_freq > time_tolerance)
1182 time_freq = time_tolerance;
1183 else if (time_freq < -time_tolerance)
1184 time_freq = -time_tolerance;
1185 }
1186
1187 #ifdef PPS_SYNC
1188 /*
1189 * hardpps() - discipline CPU clock oscillator to external PPS signal
1190 *
1191 * This routine is called at each PPS interrupt in order to discipline
1192 * the CPU clock oscillator to the PPS signal. It measures the PPS phase
1193 * and leaves it in a handy spot for the hardclock() routine. It
1194 * integrates successive PPS phase differences and calculates the
1195 * frequency offset. This is used in hardclock() to discipline the CPU
1196 * clock oscillator so that intrinsic frequency error is cancelled out.
1197 * The code requires the caller to capture the time and hardware counter
1198 * value at the on-time PPS signal transition.
1199 *
1200 * Note that, on some Unix systems, this routine runs at an interrupt
1201 * priority level higher than the timer interrupt routine hardclock().
1202 * Therefore, the variables used are distinct from the hardclock()
1203 * variables, except for certain exceptions: The PPS frequency pps_freq
1204 * and phase pps_offset variables are determined by this routine and
1205 * updated atomically. The time_tolerance variable can be considered a
1206 * constant, since it is infrequently changed, and then only when the
1207 * PPS signal is disabled. The watchdog counter pps_valid is updated
1208 * once per second by hardclock() and is atomically cleared in this
1209 * routine.
1210 */
1211 void
1212 hardpps(struct timeval *tvp, /* time at PPS */
1213 long usec /* hardware counter at PPS */)
1214 {
1215 long u_usec, v_usec, bigtick;
1216 long cal_sec, cal_usec;
1217
1218 /*
1219 * An occasional glitch can be produced when the PPS interrupt
1220 * occurs in the hardclock() routine before the time variable is
1221 * updated. Here the offset is discarded when the difference
1222 * between it and the last one is greater than tick/2, but not
1223 * if the interval since the first discard exceeds 30 s.
1224 */
1225 time_status |= STA_PPSSIGNAL;
1226 time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
1227 pps_valid = 0;
1228 u_usec = -tvp->tv_usec;
1229 if (u_usec < -500000)
1230 u_usec += 1000000;
1231 v_usec = pps_offset - u_usec;
1232 if (v_usec < 0)
1233 v_usec = -v_usec;
1234 if (v_usec > (tick >> 1)) {
1235 if (pps_glitch > MAXGLITCH) {
1236 pps_glitch = 0;
1237 pps_tf[2] = u_usec;
1238 pps_tf[1] = u_usec;
1239 } else {
1240 pps_glitch++;
1241 u_usec = pps_offset;
1242 }
1243 } else
1244 pps_glitch = 0;
1245
1246 /*
1247 * A three-stage median filter is used to help deglitch the pps
1248 * time. The median sample becomes the time offset estimate; the
1249 * difference between the other two samples becomes the time
1250 * dispersion (jitter) estimate.
1251 */
1252 pps_tf[2] = pps_tf[1];
1253 pps_tf[1] = pps_tf[0];
1254 pps_tf[0] = u_usec;
1255 if (pps_tf[0] > pps_tf[1]) {
1256 if (pps_tf[1] > pps_tf[2]) {
1257 pps_offset = pps_tf[1]; /* 0 1 2 */
1258 v_usec = pps_tf[0] - pps_tf[2];
1259 } else if (pps_tf[2] > pps_tf[0]) {
1260 pps_offset = pps_tf[0]; /* 2 0 1 */
1261 v_usec = pps_tf[2] - pps_tf[1];
1262 } else {
1263 pps_offset = pps_tf[2]; /* 0 2 1 */
1264 v_usec = pps_tf[0] - pps_tf[1];
1265 }
1266 } else {
1267 if (pps_tf[1] < pps_tf[2]) {
1268 pps_offset = pps_tf[1]; /* 2 1 0 */
1269 v_usec = pps_tf[2] - pps_tf[0];
1270 } else if (pps_tf[2] < pps_tf[0]) {
1271 pps_offset = pps_tf[0]; /* 1 0 2 */
1272 v_usec = pps_tf[1] - pps_tf[2];
1273 } else {
1274 pps_offset = pps_tf[2]; /* 1 2 0 */
1275 v_usec = pps_tf[1] - pps_tf[0];
1276 }
1277 }
1278 if (v_usec > MAXTIME)
1279 pps_jitcnt++;
1280 v_usec = (v_usec << PPS_AVG) - pps_jitter;
1281 if (v_usec < 0)
1282 pps_jitter -= -v_usec >> PPS_AVG;
1283 else
1284 pps_jitter += v_usec >> PPS_AVG;
1285 if (pps_jitter > (MAXTIME >> 1))
1286 time_status |= STA_PPSJITTER;
1287
1288 /*
1289 * During the calibration interval adjust the starting time when
1290 * the tick overflows. At the end of the interval compute the
1291 * duration of the interval and the difference of the hardware
1292 * counters at the beginning and end of the interval. This code
1293 * is deliciously complicated by the fact valid differences may
1294 * exceed the value of tick when using long calibration
1295 * intervals and small ticks. Note that the counter can be
1296 * greater than tick if caught at just the wrong instant, but
1297 * the values returned and used here are correct.
1298 */
1299 bigtick = (long)tick << SHIFT_USEC;
1300 pps_usec -= pps_freq;
1301 if (pps_usec >= bigtick)
1302 pps_usec -= bigtick;
1303 if (pps_usec < 0)
1304 pps_usec += bigtick;
1305 pps_time.tv_sec++;
1306 pps_count++;
1307 if (pps_count < (1 << pps_shift))
1308 return;
1309 pps_count = 0;
1310 pps_calcnt++;
1311 u_usec = usec << SHIFT_USEC;
1312 v_usec = pps_usec - u_usec;
1313 if (v_usec >= bigtick >> 1)
1314 v_usec -= bigtick;
1315 if (v_usec < -(bigtick >> 1))
1316 v_usec += bigtick;
1317 if (v_usec < 0)
1318 v_usec = -(-v_usec >> pps_shift);
1319 else
1320 v_usec = v_usec >> pps_shift;
1321 pps_usec = u_usec;
1322 cal_sec = tvp->tv_sec;
1323 cal_usec = tvp->tv_usec;
1324 cal_sec -= pps_time.tv_sec;
1325 cal_usec -= pps_time.tv_usec;
1326 if (cal_usec < 0) {
1327 cal_usec += 1000000;
1328 cal_sec--;
1329 }
1330 pps_time = *tvp;
1331
1332 /*
1333 * Check for lost interrupts, noise, excessive jitter and
1334 * excessive frequency error. The number of timer ticks during
1335 * the interval may vary +-1 tick. Add to this a margin of one
1336 * tick for the PPS signal jitter and maximum frequency
1337 * deviation. If the limits are exceeded, the calibration
1338 * interval is reset to the minimum and we start over.
1339 */
1340 u_usec = (long)tick << 1;
1341 if (!((cal_sec == -1 && cal_usec > (1000000 - u_usec))
1342 || (cal_sec == 0 && cal_usec < u_usec))
1343 || v_usec > time_tolerance || v_usec < -time_tolerance) {
1344 pps_errcnt++;
1345 pps_shift = PPS_SHIFT;
1346 pps_intcnt = 0;
1347 time_status |= STA_PPSERROR;
1348 return;
1349 }
1350
1351 /*
1352 * A three-stage median filter is used to help deglitch the pps
1353 * frequency. The median sample becomes the frequency offset
1354 * estimate; the difference between the other two samples
1355 * becomes the frequency dispersion (stability) estimate.
1356 */
1357 pps_ff[2] = pps_ff[1];
1358 pps_ff[1] = pps_ff[0];
1359 pps_ff[0] = v_usec;
1360 if (pps_ff[0] > pps_ff[1]) {
1361 if (pps_ff[1] > pps_ff[2]) {
1362 u_usec = pps_ff[1]; /* 0 1 2 */
1363 v_usec = pps_ff[0] - pps_ff[2];
1364 } else if (pps_ff[2] > pps_ff[0]) {
1365 u_usec = pps_ff[0]; /* 2 0 1 */
1366 v_usec = pps_ff[2] - pps_ff[1];
1367 } else {
1368 u_usec = pps_ff[2]; /* 0 2 1 */
1369 v_usec = pps_ff[0] - pps_ff[1];
1370 }
1371 } else {
1372 if (pps_ff[1] < pps_ff[2]) {
1373 u_usec = pps_ff[1]; /* 2 1 0 */
1374 v_usec = pps_ff[2] - pps_ff[0];
1375 } else if (pps_ff[2] < pps_ff[0]) {
1376 u_usec = pps_ff[0]; /* 1 0 2 */
1377 v_usec = pps_ff[1] - pps_ff[2];
1378 } else {
1379 u_usec = pps_ff[2]; /* 1 2 0 */
1380 v_usec = pps_ff[1] - pps_ff[0];
1381 }
1382 }
1383
1384 /*
1385 * Here the frequency dispersion (stability) is updated. If it
1386 * is less than one-fourth the maximum (MAXFREQ), the frequency
1387 * offset is updated as well, but clamped to the tolerance. It
1388 * will be processed later by the hardclock() routine.
1389 */
1390 v_usec = (v_usec >> 1) - pps_stabil;
1391 if (v_usec < 0)
1392 pps_stabil -= -v_usec >> PPS_AVG;
1393 else
1394 pps_stabil += v_usec >> PPS_AVG;
1395 if (pps_stabil > MAXFREQ >> 2) {
1396 pps_stbcnt++;
1397 time_status |= STA_PPSWANDER;
1398 return;
1399 }
1400 if (time_status & STA_PPSFREQ) {
1401 if (u_usec < 0) {
1402 pps_freq -= -u_usec >> PPS_AVG;
1403 if (pps_freq < -time_tolerance)
1404 pps_freq = -time_tolerance;
1405 u_usec = -u_usec;
1406 } else {
1407 pps_freq += u_usec >> PPS_AVG;
1408 if (pps_freq > time_tolerance)
1409 pps_freq = time_tolerance;
1410 }
1411 }
1412
1413 /*
1414 * Here the calibration interval is adjusted. If the maximum
1415 * time difference is greater than tick / 4, reduce the interval
1416 * by half. If this is not the case for four consecutive
1417 * intervals, double the interval.
1418 */
1419 if (u_usec << pps_shift > bigtick >> 2) {
1420 pps_intcnt = 0;
1421 if (pps_shift > PPS_SHIFT)
1422 pps_shift--;
1423 } else if (pps_intcnt >= 4) {
1424 pps_intcnt = 0;
1425 if (pps_shift < PPS_SHIFTMAX)
1426 pps_shift++;
1427 } else
1428 pps_intcnt++;
1429 }
1430 #endif /* PPS_SYNC */
1431 #endif /* NTP */
1432