kern_clock.c revision 1.96.8.2 1 /* $NetBSD: kern_clock.c,v 1.96.8.2 2006/06/26 12:52:56 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 * This code is derived from software contributed to The NetBSD Foundation
11 * by Charles M. Hannum.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the NetBSD
24 * Foundation, Inc. and its contributors.
25 * 4. Neither the name of The NetBSD Foundation nor the names of its
26 * contributors may be used to endorse or promote products derived
27 * from this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
30 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
31 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
32 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
33 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
36 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
37 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42 /*-
43 * Copyright (c) 1982, 1986, 1991, 1993
44 * The Regents of the University of California. All rights reserved.
45 * (c) UNIX System Laboratories, Inc.
46 * All or some portions of this file are derived from material licensed
47 * to the University of California by American Telephone and Telegraph
48 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
49 * the permission of UNIX System Laboratories, Inc.
50 *
51 * Redistribution and use in source and binary forms, with or without
52 * modification, are permitted provided that the following conditions
53 * are met:
54 * 1. Redistributions of source code must retain the above copyright
55 * notice, this list of conditions and the following disclaimer.
56 * 2. Redistributions in binary form must reproduce the above copyright
57 * notice, this list of conditions and the following disclaimer in the
58 * documentation and/or other materials provided with the distribution.
59 * 3. Neither the name of the University nor the names of its contributors
60 * may be used to endorse or promote products derived from this software
61 * without specific prior written permission.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * SUCH DAMAGE.
74 *
75 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: kern_clock.c,v 1.96.8.2 2006/06/26 12:52:56 yamt Exp $");
80
81 #include "opt_ntp.h"
82 #include "opt_multiprocessor.h"
83 #include "opt_perfctrs.h"
84
85 #include <sys/param.h>
86 #include <sys/systm.h>
87 #include <sys/callout.h>
88 #include <sys/kernel.h>
89 #include <sys/proc.h>
90 #include <sys/resourcevar.h>
91 #include <sys/signalvar.h>
92 #include <sys/sysctl.h>
93 #include <sys/timex.h>
94 #include <sys/sched.h>
95 #include <sys/time.h>
96 #ifdef __HAVE_TIMECOUNTER
97 #include <sys/timetc.h>
98 #endif
99
100 #include <machine/cpu.h>
101 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
102 #include <machine/intr.h>
103 #endif
104
105 #ifdef GPROF
106 #include <sys/gmon.h>
107 #endif
108
109 /*
110 * Clock handling routines.
111 *
112 * This code is written to operate with two timers that run independently of
113 * each other. The main clock, running hz times per second, is used to keep
114 * track of real time. The second timer handles kernel and user profiling,
115 * and does resource use estimation. If the second timer is programmable,
116 * it is randomized to avoid aliasing between the two clocks. For example,
117 * the randomization prevents an adversary from always giving up the CPU
118 * just before its quantum expires. Otherwise, it would never accumulate
119 * CPU ticks. The mean frequency of the second timer is stathz.
120 *
121 * If no second timer exists, stathz will be zero; in this case we drive
122 * profiling and statistics off the main clock. This WILL NOT be accurate;
123 * do not do it unless absolutely necessary.
124 *
125 * The statistics clock may (or may not) be run at a higher rate while
126 * profiling. This profile clock runs at profhz. We require that profhz
127 * be an integral multiple of stathz.
128 *
129 * If the statistics clock is running fast, it must be divided by the ratio
130 * profhz/stathz for statistics. (For profiling, every tick counts.)
131 */
132
133 #ifndef __HAVE_TIMECOUNTER
134 #ifdef NTP /* NTP phase-locked loop in kernel */
135 /*
136 * Phase/frequency-lock loop (PLL/FLL) definitions
137 *
138 * The following variables are read and set by the ntp_adjtime() system
139 * call.
140 *
141 * time_state shows the state of the system clock, with values defined
142 * in the timex.h header file.
143 *
144 * time_status shows the status of the system clock, with bits defined
145 * in the timex.h header file.
146 *
147 * time_offset is used by the PLL/FLL to adjust the system time in small
148 * increments.
149 *
150 * time_constant determines the bandwidth or "stiffness" of the PLL.
151 *
152 * time_tolerance determines maximum frequency error or tolerance of the
153 * CPU clock oscillator and is a property of the architecture; however,
154 * in principle it could change as result of the presence of external
155 * discipline signals, for instance.
156 *
157 * time_precision is usually equal to the kernel tick variable; however,
158 * in cases where a precision clock counter or external clock is
159 * available, the resolution can be much less than this and depend on
160 * whether the external clock is working or not.
161 *
162 * time_maxerror is initialized by a ntp_adjtime() call and increased by
163 * the kernel once each second to reflect the maximum error bound
164 * growth.
165 *
166 * time_esterror is set and read by the ntp_adjtime() call, but
167 * otherwise not used by the kernel.
168 */
169 int time_state = TIME_OK; /* clock state */
170 int time_status = STA_UNSYNC; /* clock status bits */
171 long time_offset = 0; /* time offset (us) */
172 long time_constant = 0; /* pll time constant */
173 long time_tolerance = MAXFREQ; /* frequency tolerance (scaled ppm) */
174 long time_precision = 1; /* clock precision (us) */
175 long time_maxerror = MAXPHASE; /* maximum error (us) */
176 long time_esterror = MAXPHASE; /* estimated error (us) */
177
178 /*
179 * The following variables establish the state of the PLL/FLL and the
180 * residual time and frequency offset of the local clock. The scale
181 * factors are defined in the timex.h header file.
182 *
183 * time_phase and time_freq are the phase increment and the frequency
184 * increment, respectively, of the kernel time variable.
185 *
186 * time_freq is set via ntp_adjtime() from a value stored in a file when
187 * the synchronization daemon is first started. Its value is retrieved
188 * via ntp_adjtime() and written to the file about once per hour by the
189 * daemon.
190 *
191 * time_adj is the adjustment added to the value of tick at each timer
192 * interrupt and is recomputed from time_phase and time_freq at each
193 * seconds rollover.
194 *
195 * time_reftime is the second's portion of the system time at the last
196 * call to ntp_adjtime(). It is used to adjust the time_freq variable
197 * and to increase the time_maxerror as the time since last update
198 * increases.
199 */
200 long time_phase = 0; /* phase offset (scaled us) */
201 long time_freq = 0; /* frequency offset (scaled ppm) */
202 long time_adj = 0; /* tick adjust (scaled 1 / hz) */
203 long time_reftime = 0; /* time at last adjustment (s) */
204
205 #ifdef PPS_SYNC
206 /*
207 * The following variables are used only if the kernel PPS discipline
208 * code is configured (PPS_SYNC). The scale factors are defined in the
209 * timex.h header file.
210 *
211 * pps_time contains the time at each calibration interval, as read by
212 * microtime(). pps_count counts the seconds of the calibration
213 * interval, the duration of which is nominally pps_shift in powers of
214 * two.
215 *
216 * pps_offset is the time offset produced by the time median filter
217 * pps_tf[], while pps_jitter is the dispersion (jitter) measured by
218 * this filter.
219 *
220 * pps_freq is the frequency offset produced by the frequency median
221 * filter pps_ff[], while pps_stabil is the dispersion (wander) measured
222 * by this filter.
223 *
224 * pps_usec is latched from a high resolution counter or external clock
225 * at pps_time. Here we want the hardware counter contents only, not the
226 * contents plus the time_tv.usec as usual.
227 *
228 * pps_valid counts the number of seconds since the last PPS update. It
229 * is used as a watchdog timer to disable the PPS discipline should the
230 * PPS signal be lost.
231 *
232 * pps_glitch counts the number of seconds since the beginning of an
233 * offset burst more than tick/2 from current nominal offset. It is used
234 * mainly to suppress error bursts due to priority conflicts between the
235 * PPS interrupt and timer interrupt.
236 *
237 * pps_intcnt counts the calibration intervals for use in the interval-
238 * adaptation algorithm. It's just too complicated for words.
239 *
240 * pps_kc_hardpps_source contains an arbitrary value that uniquely
241 * identifies the currently bound source of the PPS signal, or NULL
242 * if no source is bound.
243 *
244 * pps_kc_hardpps_mode indicates which transitions, if any, of the PPS
245 * signal should be reported.
246 */
247 struct timeval pps_time; /* kernel time at last interval */
248 long pps_tf[] = {0, 0, 0}; /* pps time offset median filter (us) */
249 long pps_offset = 0; /* pps time offset (us) */
250 long pps_jitter = MAXTIME; /* time dispersion (jitter) (us) */
251 long pps_ff[] = {0, 0, 0}; /* pps frequency offset median filter */
252 long pps_freq = 0; /* frequency offset (scaled ppm) */
253 long pps_stabil = MAXFREQ; /* frequency dispersion (scaled ppm) */
254 long pps_usec = 0; /* microsec counter at last interval */
255 long pps_valid = PPS_VALID; /* pps signal watchdog counter */
256 int pps_glitch = 0; /* pps signal glitch counter */
257 int pps_count = 0; /* calibration interval counter (s) */
258 int pps_shift = PPS_SHIFT; /* interval duration (s) (shift) */
259 int pps_intcnt = 0; /* intervals at current duration */
260 void *pps_kc_hardpps_source = NULL; /* current PPS supplier's identifier */
261 int pps_kc_hardpps_mode = 0; /* interesting edges of PPS signal */
262
263 /*
264 * PPS signal quality monitors
265 *
266 * pps_jitcnt counts the seconds that have been discarded because the
267 * jitter measured by the time median filter exceeds the limit MAXTIME
268 * (100 us).
269 *
270 * pps_calcnt counts the frequency calibration intervals, which are
271 * variable from 4 s to 256 s.
272 *
273 * pps_errcnt counts the calibration intervals which have been discarded
274 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
275 * calibration interval jitter exceeds two ticks.
276 *
277 * pps_stbcnt counts the calibration intervals that have been discarded
278 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
279 */
280 long pps_jitcnt = 0; /* jitter limit exceeded */
281 long pps_calcnt = 0; /* calibration intervals */
282 long pps_errcnt = 0; /* calibration errors */
283 long pps_stbcnt = 0; /* stability limit exceeded */
284 #endif /* PPS_SYNC */
285
286 #ifdef EXT_CLOCK
287 /*
288 * External clock definitions
289 *
290 * The following definitions and declarations are used only if an
291 * external clock is configured on the system.
292 */
293 #define CLOCK_INTERVAL 30 /* CPU clock update interval (s) */
294
295 /*
296 * The clock_count variable is set to CLOCK_INTERVAL at each PPS
297 * interrupt and decremented once each second.
298 */
299 int clock_count = 0; /* CPU clock counter */
300
301 #ifdef HIGHBALL
302 /*
303 * The clock_offset and clock_cpu variables are used by the HIGHBALL
304 * interface. The clock_offset variable defines the offset between
305 * system time and the HIGBALL counters. The clock_cpu variable contains
306 * the offset between the system clock and the HIGHBALL clock for use in
307 * disciplining the kernel time variable.
308 */
309 extern struct timeval clock_offset; /* Highball clock offset */
310 long clock_cpu = 0; /* CPU clock adjust */
311 #endif /* HIGHBALL */
312 #endif /* EXT_CLOCK */
313 #endif /* NTP */
314
315 /*
316 * Bump a timeval by a small number of usec's.
317 */
318 #define BUMPTIME(t, usec) { \
319 volatile struct timeval *tp = (t); \
320 long us; \
321 \
322 tp->tv_usec = us = tp->tv_usec + (usec); \
323 if (us >= 1000000) { \
324 tp->tv_usec = us - 1000000; \
325 tp->tv_sec++; \
326 } \
327 }
328 #endif /* !__HAVE_TIMECOUNTER */
329
330 int stathz;
331 int profhz;
332 int profsrc;
333 int schedhz;
334 int profprocs;
335 int hardclock_ticks;
336 static int statscheddiv; /* stat => sched divider (used if schedhz == 0) */
337 static int psdiv; /* prof => stat divider */
338 int psratio; /* ratio: prof / stat */
339 #ifndef __HAVE_TIMECOUNTER
340 int tickfix, tickfixinterval; /* used if tick not really integral */
341 #ifndef NTP
342 static int tickfixcnt; /* accumulated fractional error */
343 #else
344 int fixtick; /* used by NTP for same */
345 int shifthz;
346 #endif
347
348 /*
349 * We might want ldd to load the both words from time at once.
350 * To succeed we need to be quadword aligned.
351 * The sparc already does that, and that it has worked so far is a fluke.
352 */
353 volatile struct timeval time __attribute__((__aligned__(__alignof__(quad_t))));
354 volatile struct timeval mono_time;
355 #endif /* !__HAVE_TIMECOUNTER */
356
357 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
358 void *softclock_si;
359 #endif
360
361 #ifdef __HAVE_TIMECOUNTER
362 static u_int get_intr_timecount(struct timecounter *);
363
364 static struct timecounter intr_timecounter = {
365 get_intr_timecount, /* get_timecount */
366 0, /* no poll_pps */
367 ~0u, /* counter_mask */
368 0, /* frequency */
369 "clockinterrupt", /* name */
370 0 /* quality - minimum implementation level for a clock */
371 };
372
373 static u_int
374 get_intr_timecount(struct timecounter *tc)
375 {
376 return (u_int)hardclock_ticks;
377 }
378 #endif
379
380 /*
381 * Initialize clock frequencies and start both clocks running.
382 */
383 void
384 initclocks(void)
385 {
386 int i;
387
388 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
389 softclock_si = softintr_establish(IPL_SOFTCLOCK, softclock, NULL);
390 if (softclock_si == NULL)
391 panic("initclocks: unable to register softclock intr");
392 #endif
393
394 /*
395 * Set divisors to 1 (normal case) and let the machine-specific
396 * code do its bit.
397 */
398 psdiv = 1;
399 #ifdef __HAVE_TIMECOUNTER
400 /*
401 * provide minimum default time counter
402 * will only run at interrupt resolution
403 */
404 intr_timecounter.tc_frequency = hz;
405 tc_init(&intr_timecounter);
406 #endif
407 cpu_initclocks();
408
409 /*
410 * Compute profhz/stathz/rrticks, and fix profhz if needed.
411 */
412 i = stathz ? stathz : hz;
413 if (profhz == 0)
414 profhz = i;
415 psratio = profhz / i;
416 rrticks = hz / 10;
417 if (schedhz == 0) {
418 /* 16Hz is best */
419 statscheddiv = i / 16;
420 if (statscheddiv <= 0)
421 panic("statscheddiv");
422 }
423
424 #ifndef __HAVE_TIMECOUNTER
425 #ifdef NTP
426 switch (hz) {
427 case 1:
428 shifthz = SHIFT_SCALE - 0;
429 break;
430 case 2:
431 shifthz = SHIFT_SCALE - 1;
432 break;
433 case 4:
434 shifthz = SHIFT_SCALE - 2;
435 break;
436 case 8:
437 shifthz = SHIFT_SCALE - 3;
438 break;
439 case 16:
440 shifthz = SHIFT_SCALE - 4;
441 break;
442 case 32:
443 shifthz = SHIFT_SCALE - 5;
444 break;
445 case 50:
446 case 60:
447 case 64:
448 shifthz = SHIFT_SCALE - 6;
449 break;
450 case 96:
451 case 100:
452 case 128:
453 shifthz = SHIFT_SCALE - 7;
454 break;
455 case 256:
456 shifthz = SHIFT_SCALE - 8;
457 break;
458 case 512:
459 shifthz = SHIFT_SCALE - 9;
460 break;
461 case 1000:
462 case 1024:
463 shifthz = SHIFT_SCALE - 10;
464 break;
465 case 1200:
466 case 2048:
467 shifthz = SHIFT_SCALE - 11;
468 break;
469 case 4096:
470 shifthz = SHIFT_SCALE - 12;
471 break;
472 case 8192:
473 shifthz = SHIFT_SCALE - 13;
474 break;
475 case 16384:
476 shifthz = SHIFT_SCALE - 14;
477 break;
478 case 32768:
479 shifthz = SHIFT_SCALE - 15;
480 break;
481 case 65536:
482 shifthz = SHIFT_SCALE - 16;
483 break;
484 default:
485 panic("weird hz");
486 }
487 if (fixtick == 0) {
488 /*
489 * Give MD code a chance to set this to a better
490 * value; but, if it doesn't, we should.
491 */
492 fixtick = (1000000 - (hz*tick));
493 }
494 #endif /* NTP */
495 #endif /* !__HAVE_TIMECOUNTER */
496 }
497
498 /*
499 * The real-time timer, interrupting hz times per second.
500 */
501 void
502 hardclock(struct clockframe *frame)
503 {
504 struct lwp *l;
505 struct proc *p;
506 struct cpu_info *ci = curcpu();
507 struct ptimer *pt;
508 #ifndef __HAVE_TIMECOUNTER
509 int delta;
510 extern int tickdelta;
511 extern long timedelta;
512 #ifdef NTP
513 int time_update;
514 int ltemp;
515 #endif /* NTP */
516 #endif /* __HAVE_TIMECOUNTER */
517
518 l = curlwp;
519 if (l) {
520 p = l->l_proc;
521 /*
522 * Run current process's virtual and profile time, as needed.
523 */
524 if (CLKF_USERMODE(frame) && p->p_timers &&
525 (pt = LIST_FIRST(&p->p_timers->pts_virtual)) != NULL)
526 if (itimerdecr(pt, tick) == 0)
527 itimerfire(pt);
528 if (p->p_timers &&
529 (pt = LIST_FIRST(&p->p_timers->pts_prof)) != NULL)
530 if (itimerdecr(pt, tick) == 0)
531 itimerfire(pt);
532 }
533
534 /*
535 * If no separate statistics clock is available, run it from here.
536 */
537 if (stathz == 0)
538 statclock(frame);
539 if ((--ci->ci_schedstate.spc_rrticks) <= 0)
540 roundrobin(ci);
541
542 #if defined(MULTIPROCESSOR)
543 /*
544 * If we are not the primary CPU, we're not allowed to do
545 * any more work.
546 */
547 if (CPU_IS_PRIMARY(ci) == 0)
548 return;
549 #endif
550
551 hardclock_ticks++;
552
553 #ifdef __HAVE_TIMECOUNTER
554 tc_ticktock();
555 #else /* __HAVE_TIMECOUNTER */
556 /*
557 * Increment the time-of-day. The increment is normally just
558 * ``tick''. If the machine is one which has a clock frequency
559 * such that ``hz'' would not divide the second evenly into
560 * milliseconds, a periodic adjustment must be applied. Finally,
561 * if we are still adjusting the time (see adjtime()),
562 * ``tickdelta'' may also be added in.
563 */
564 delta = tick;
565
566 #ifndef NTP
567 if (tickfix) {
568 tickfixcnt += tickfix;
569 if (tickfixcnt >= tickfixinterval) {
570 delta++;
571 tickfixcnt -= tickfixinterval;
572 }
573 }
574 #endif /* !NTP */
575 /* Imprecise 4bsd adjtime() handling */
576 if (timedelta != 0) {
577 delta += tickdelta;
578 timedelta -= tickdelta;
579 }
580
581 #ifdef notyet
582 microset();
583 #endif
584
585 #ifndef NTP
586 BUMPTIME(&time, delta); /* XXX Now done using NTP code below */
587 #endif
588 BUMPTIME(&mono_time, delta);
589
590 #ifdef NTP
591 time_update = delta;
592
593 /*
594 * Compute the phase adjustment. If the low-order bits
595 * (time_phase) of the update overflow, bump the high-order bits
596 * (time_update).
597 */
598 time_phase += time_adj;
599 if (time_phase <= -FINEUSEC) {
600 ltemp = -time_phase >> SHIFT_SCALE;
601 time_phase += ltemp << SHIFT_SCALE;
602 time_update -= ltemp;
603 } else if (time_phase >= FINEUSEC) {
604 ltemp = time_phase >> SHIFT_SCALE;
605 time_phase -= ltemp << SHIFT_SCALE;
606 time_update += ltemp;
607 }
608
609 #ifdef HIGHBALL
610 /*
611 * If the HIGHBALL board is installed, we need to adjust the
612 * external clock offset in order to close the hardware feedback
613 * loop. This will adjust the external clock phase and frequency
614 * in small amounts. The additional phase noise and frequency
615 * wander this causes should be minimal. We also need to
616 * discipline the kernel time variable, since the PLL is used to
617 * discipline the external clock. If the Highball board is not
618 * present, we discipline kernel time with the PLL as usual. We
619 * assume that the external clock phase adjustment (time_update)
620 * and kernel phase adjustment (clock_cpu) are less than the
621 * value of tick.
622 */
623 clock_offset.tv_usec += time_update;
624 if (clock_offset.tv_usec >= 1000000) {
625 clock_offset.tv_sec++;
626 clock_offset.tv_usec -= 1000000;
627 }
628 if (clock_offset.tv_usec < 0) {
629 clock_offset.tv_sec--;
630 clock_offset.tv_usec += 1000000;
631 }
632 time.tv_usec += clock_cpu;
633 clock_cpu = 0;
634 #else
635 time.tv_usec += time_update;
636 #endif /* HIGHBALL */
637
638 /*
639 * On rollover of the second the phase adjustment to be used for
640 * the next second is calculated. Also, the maximum error is
641 * increased by the tolerance. If the PPS frequency discipline
642 * code is present, the phase is increased to compensate for the
643 * CPU clock oscillator frequency error.
644 *
645 * On a 32-bit machine and given parameters in the timex.h
646 * header file, the maximum phase adjustment is +-512 ms and
647 * maximum frequency offset is a tad less than) +-512 ppm. On a
648 * 64-bit machine, you shouldn't need to ask.
649 */
650 if (time.tv_usec >= 1000000) {
651 time.tv_usec -= 1000000;
652 time.tv_sec++;
653 time_maxerror += time_tolerance >> SHIFT_USEC;
654
655 /*
656 * Leap second processing. If in leap-insert state at
657 * the end of the day, the system clock is set back one
658 * second; if in leap-delete state, the system clock is
659 * set ahead one second. The microtime() routine or
660 * external clock driver will insure that reported time
661 * is always monotonic. The ugly divides should be
662 * replaced.
663 */
664 switch (time_state) {
665 case TIME_OK:
666 if (time_status & STA_INS)
667 time_state = TIME_INS;
668 else if (time_status & STA_DEL)
669 time_state = TIME_DEL;
670 break;
671
672 case TIME_INS:
673 if (time.tv_sec % 86400 == 0) {
674 time.tv_sec--;
675 time_state = TIME_OOP;
676 }
677 break;
678
679 case TIME_DEL:
680 if ((time.tv_sec + 1) % 86400 == 0) {
681 time.tv_sec++;
682 time_state = TIME_WAIT;
683 }
684 break;
685
686 case TIME_OOP:
687 time_state = TIME_WAIT;
688 break;
689
690 case TIME_WAIT:
691 if (!(time_status & (STA_INS | STA_DEL)))
692 time_state = TIME_OK;
693 break;
694 }
695
696 /*
697 * Compute the phase adjustment for the next second. In
698 * PLL mode, the offset is reduced by a fixed factor
699 * times the time constant. In FLL mode the offset is
700 * used directly. In either mode, the maximum phase
701 * adjustment for each second is clamped so as to spread
702 * the adjustment over not more than the number of
703 * seconds between updates.
704 */
705 if (time_offset < 0) {
706 ltemp = -time_offset;
707 if (!(time_status & STA_FLL))
708 ltemp >>= SHIFT_KG + time_constant;
709 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
710 ltemp = (MAXPHASE / MINSEC) <<
711 SHIFT_UPDATE;
712 time_offset += ltemp;
713 time_adj = -ltemp << (shifthz - SHIFT_UPDATE);
714 } else if (time_offset > 0) {
715 ltemp = time_offset;
716 if (!(time_status & STA_FLL))
717 ltemp >>= SHIFT_KG + time_constant;
718 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
719 ltemp = (MAXPHASE / MINSEC) <<
720 SHIFT_UPDATE;
721 time_offset -= ltemp;
722 time_adj = ltemp << (shifthz - SHIFT_UPDATE);
723 } else
724 time_adj = 0;
725
726 /*
727 * Compute the frequency estimate and additional phase
728 * adjustment due to frequency error for the next
729 * second. When the PPS signal is engaged, gnaw on the
730 * watchdog counter and update the frequency computed by
731 * the pll and the PPS signal.
732 */
733 #ifdef PPS_SYNC
734 pps_valid++;
735 if (pps_valid == PPS_VALID) {
736 pps_jitter = MAXTIME;
737 pps_stabil = MAXFREQ;
738 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
739 STA_PPSWANDER | STA_PPSERROR);
740 }
741 ltemp = time_freq + pps_freq;
742 #else
743 ltemp = time_freq;
744 #endif /* PPS_SYNC */
745
746 if (ltemp < 0)
747 time_adj -= -ltemp >> (SHIFT_USEC - shifthz);
748 else
749 time_adj += ltemp >> (SHIFT_USEC - shifthz);
750 time_adj += (long)fixtick << shifthz;
751
752 /*
753 * When the CPU clock oscillator frequency is not a
754 * power of 2 in Hz, shifthz is only an approximate
755 * scale factor.
756 *
757 * To determine the adjustment, you can do the following:
758 * bc -q
759 * scale=24
760 * obase=2
761 * idealhz/realhz
762 * where `idealhz' is the next higher power of 2, and `realhz'
763 * is the actual value. You may need to factor this result
764 * into a sequence of 2 multipliers to get better precision.
765 *
766 * Likewise, the error can be calculated with (e.g. for 100Hz):
767 * bc -q
768 * scale=24
769 * ((1+2^-2+2^-5)*(1-2^-10)*realhz-idealhz)/idealhz
770 * (and then multiply by 1000000 to get ppm).
771 */
772 switch (hz) {
773 case 60:
774 /* A factor of 1.000100010001 gives about 15ppm
775 error. */
776 if (time_adj < 0) {
777 time_adj -= (-time_adj >> 4);
778 time_adj -= (-time_adj >> 8);
779 } else {
780 time_adj += (time_adj >> 4);
781 time_adj += (time_adj >> 8);
782 }
783 break;
784
785 case 96:
786 /* A factor of 1.0101010101 gives about 244ppm error. */
787 if (time_adj < 0) {
788 time_adj -= (-time_adj >> 2);
789 time_adj -= (-time_adj >> 4) + (-time_adj >> 8);
790 } else {
791 time_adj += (time_adj >> 2);
792 time_adj += (time_adj >> 4) + (time_adj >> 8);
793 }
794 break;
795
796 case 50:
797 case 100:
798 /* A factor of 1.010001111010111 gives about 1ppm
799 error. */
800 if (time_adj < 0) {
801 time_adj -= (-time_adj >> 2) + (-time_adj >> 5);
802 time_adj += (-time_adj >> 10);
803 } else {
804 time_adj += (time_adj >> 2) + (time_adj >> 5);
805 time_adj -= (time_adj >> 10);
806 }
807 break;
808
809 case 1000:
810 /* A factor of 1.000001100010100001 gives about 50ppm
811 error. */
812 if (time_adj < 0) {
813 time_adj -= (-time_adj >> 6) + (-time_adj >> 11);
814 time_adj -= (-time_adj >> 7);
815 } else {
816 time_adj += (time_adj >> 6) + (time_adj >> 11);
817 time_adj += (time_adj >> 7);
818 }
819 break;
820
821 case 1200:
822 /* A factor of 1.1011010011100001 gives about 64ppm
823 error. */
824 if (time_adj < 0) {
825 time_adj -= (-time_adj >> 1) + (-time_adj >> 6);
826 time_adj -= (-time_adj >> 3) + (-time_adj >> 10);
827 } else {
828 time_adj += (time_adj >> 1) + (time_adj >> 6);
829 time_adj += (time_adj >> 3) + (time_adj >> 10);
830 }
831 break;
832 }
833
834 #ifdef EXT_CLOCK
835 /*
836 * If an external clock is present, it is necessary to
837 * discipline the kernel time variable anyway, since not
838 * all system components use the microtime() interface.
839 * Here, the time offset between the external clock and
840 * kernel time variable is computed every so often.
841 */
842 clock_count++;
843 if (clock_count > CLOCK_INTERVAL) {
844 clock_count = 0;
845 microtime(&clock_ext);
846 delta.tv_sec = clock_ext.tv_sec - time.tv_sec;
847 delta.tv_usec = clock_ext.tv_usec -
848 time.tv_usec;
849 if (delta.tv_usec < 0)
850 delta.tv_sec--;
851 if (delta.tv_usec >= 500000) {
852 delta.tv_usec -= 1000000;
853 delta.tv_sec++;
854 }
855 if (delta.tv_usec < -500000) {
856 delta.tv_usec += 1000000;
857 delta.tv_sec--;
858 }
859 if (delta.tv_sec > 0 || (delta.tv_sec == 0 &&
860 delta.tv_usec > MAXPHASE) ||
861 delta.tv_sec < -1 || (delta.tv_sec == -1 &&
862 delta.tv_usec < -MAXPHASE)) {
863 time = clock_ext;
864 delta.tv_sec = 0;
865 delta.tv_usec = 0;
866 }
867 #ifdef HIGHBALL
868 clock_cpu = delta.tv_usec;
869 #else /* HIGHBALL */
870 hardupdate(delta.tv_usec);
871 #endif /* HIGHBALL */
872 }
873 #endif /* EXT_CLOCK */
874 }
875
876 #endif /* NTP */
877 #endif /* !__HAVE_TIMECOUNTER */
878
879 /*
880 * Update real-time timeout queue.
881 * Process callouts at a very low CPU priority, so we don't keep the
882 * relatively high clock interrupt priority any longer than necessary.
883 */
884 if (callout_hardclock()) {
885 if (CLKF_BASEPRI(frame)) {
886 /*
887 * Save the overhead of a software interrupt;
888 * it will happen as soon as we return, so do
889 * it now.
890 */
891 spllowersoftclock();
892 KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
893 softclock(NULL);
894 KERNEL_UNLOCK();
895 } else {
896 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
897 softintr_schedule(softclock_si);
898 #else
899 setsoftclock();
900 #endif
901 }
902 }
903 }
904
905 #ifdef __HAVE_TIMECOUNTER
906 /*
907 * Compute number of hz until specified time. Used to compute second
908 * argument to callout_reset() from an absolute time.
909 */
910 int
911 hzto(struct timeval *tvp)
912 {
913 struct timeval now, tv;
914
915 tv = *tvp; /* Don't modify original tvp. */
916 getmicrotime(&now);
917 timersub(&tv, &now, &tv);
918 return tvtohz(&tv);
919 }
920 #endif /* __HAVE_TIMECOUNTER */
921
922 /*
923 * Compute number of ticks in the specified amount of time.
924 */
925 int
926 tvtohz(struct timeval *tv)
927 {
928 unsigned long ticks;
929 long sec, usec;
930
931 /*
932 * If the number of usecs in the whole seconds part of the time
933 * difference fits in a long, then the total number of usecs will
934 * fit in an unsigned long. Compute the total and convert it to
935 * ticks, rounding up and adding 1 to allow for the current tick
936 * to expire. Rounding also depends on unsigned long arithmetic
937 * to avoid overflow.
938 *
939 * Otherwise, if the number of ticks in the whole seconds part of
940 * the time difference fits in a long, then convert the parts to
941 * ticks separately and add, using similar rounding methods and
942 * overflow avoidance. This method would work in the previous
943 * case, but it is slightly slower and assumes that hz is integral.
944 *
945 * Otherwise, round the time difference down to the maximum
946 * representable value.
947 *
948 * If ints are 32-bit, then the maximum value for any timeout in
949 * 10ms ticks is 248 days.
950 */
951 sec = tv->tv_sec;
952 usec = tv->tv_usec;
953
954 if (usec < 0) {
955 sec--;
956 usec += 1000000;
957 }
958
959 if (sec < 0 || (sec == 0 && usec <= 0)) {
960 /*
961 * Would expire now or in the past. Return 0 ticks.
962 * This is different from the legacy hzto() interface,
963 * and callers need to check for it.
964 */
965 ticks = 0;
966 } else if (sec <= (LONG_MAX / 1000000))
967 ticks = (((sec * 1000000) + (unsigned long)usec + (tick - 1))
968 / tick) + 1;
969 else if (sec <= (LONG_MAX / hz))
970 ticks = (sec * hz) +
971 (((unsigned long)usec + (tick - 1)) / tick) + 1;
972 else
973 ticks = LONG_MAX;
974
975 if (ticks > INT_MAX)
976 ticks = INT_MAX;
977
978 return ((int)ticks);
979 }
980
981 #ifndef __HAVE_TIMECOUNTER
982 /*
983 * Compute number of hz until specified time. Used to compute second
984 * argument to callout_reset() from an absolute time.
985 */
986 int
987 hzto(struct timeval *tv)
988 {
989 unsigned long ticks;
990 long sec, usec;
991 int s;
992
993 /*
994 * If the number of usecs in the whole seconds part of the time
995 * difference fits in a long, then the total number of usecs will
996 * fit in an unsigned long. Compute the total and convert it to
997 * ticks, rounding up and adding 1 to allow for the current tick
998 * to expire. Rounding also depends on unsigned long arithmetic
999 * to avoid overflow.
1000 *
1001 * Otherwise, if the number of ticks in the whole seconds part of
1002 * the time difference fits in a long, then convert the parts to
1003 * ticks separately and add, using similar rounding methods and
1004 * overflow avoidance. This method would work in the previous
1005 * case, but it is slightly slower and assume that hz is integral.
1006 *
1007 * Otherwise, round the time difference down to the maximum
1008 * representable value.
1009 *
1010 * If ints are 32-bit, then the maximum value for any timeout in
1011 * 10ms ticks is 248 days.
1012 */
1013 s = splclock();
1014 sec = tv->tv_sec - time.tv_sec;
1015 usec = tv->tv_usec - time.tv_usec;
1016 splx(s);
1017
1018 if (usec < 0) {
1019 sec--;
1020 usec += 1000000;
1021 }
1022
1023 if (sec < 0 || (sec == 0 && usec <= 0)) {
1024 /*
1025 * Would expire now or in the past. Return 0 ticks.
1026 * This is different from the legacy hzto() interface,
1027 * and callers need to check for it.
1028 */
1029 ticks = 0;
1030 } else if (sec <= (LONG_MAX / 1000000))
1031 ticks = (((sec * 1000000) + (unsigned long)usec + (tick - 1))
1032 / tick) + 1;
1033 else if (sec <= (LONG_MAX / hz))
1034 ticks = (sec * hz) +
1035 (((unsigned long)usec + (tick - 1)) / tick) + 1;
1036 else
1037 ticks = LONG_MAX;
1038
1039 if (ticks > INT_MAX)
1040 ticks = INT_MAX;
1041
1042 return ((int)ticks);
1043 }
1044 #endif /* !__HAVE_TIMECOUNTER */
1045
1046 /*
1047 * Compute number of ticks in the specified amount of time.
1048 */
1049 int
1050 tstohz(struct timespec *ts)
1051 {
1052 struct timeval tv;
1053
1054 /*
1055 * usec has great enough resolution for hz, so convert to a
1056 * timeval and use tvtohz() above.
1057 */
1058 TIMESPEC_TO_TIMEVAL(&tv, ts);
1059 return tvtohz(&tv);
1060 }
1061
1062 /*
1063 * Start profiling on a process.
1064 *
1065 * Kernel profiling passes proc0 which never exits and hence
1066 * keeps the profile clock running constantly.
1067 */
1068 void
1069 startprofclock(struct proc *p)
1070 {
1071
1072 if ((p->p_flag & P_PROFIL) == 0) {
1073 p->p_flag |= P_PROFIL;
1074 /*
1075 * This is only necessary if using the clock as the
1076 * profiling source.
1077 */
1078 if (++profprocs == 1 && stathz != 0)
1079 psdiv = psratio;
1080 }
1081 }
1082
1083 /*
1084 * Stop profiling on a process.
1085 */
1086 void
1087 stopprofclock(struct proc *p)
1088 {
1089
1090 if (p->p_flag & P_PROFIL) {
1091 p->p_flag &= ~P_PROFIL;
1092 /*
1093 * This is only necessary if using the clock as the
1094 * profiling source.
1095 */
1096 if (--profprocs == 0 && stathz != 0)
1097 psdiv = 1;
1098 }
1099 }
1100
1101 #if defined(PERFCTRS)
1102 /*
1103 * Independent profiling "tick" in case we're using a separate
1104 * clock or profiling event source. Currently, that's just
1105 * performance counters--hence the wrapper.
1106 */
1107 void
1108 proftick(struct clockframe *frame)
1109 {
1110 #ifdef GPROF
1111 struct gmonparam *g;
1112 intptr_t i;
1113 #endif
1114 struct proc *p;
1115
1116 p = curproc;
1117 if (CLKF_USERMODE(frame)) {
1118 if (p->p_flag & P_PROFIL)
1119 addupc_intr(p, CLKF_PC(frame));
1120 } else {
1121 #ifdef GPROF
1122 g = &_gmonparam;
1123 if (g->state == GMON_PROF_ON) {
1124 i = CLKF_PC(frame) - g->lowpc;
1125 if (i < g->textsize) {
1126 i /= HISTFRACTION * sizeof(*g->kcount);
1127 g->kcount[i]++;
1128 }
1129 }
1130 #endif
1131 #ifdef PROC_PC
1132 if (p && (p->p_flag & P_PROFIL))
1133 addupc_intr(p, PROC_PC(p));
1134 #endif
1135 }
1136 }
1137 #endif
1138
1139 /*
1140 * Statistics clock. Grab profile sample, and if divider reaches 0,
1141 * do process and kernel statistics.
1142 */
1143 void
1144 statclock(struct clockframe *frame)
1145 {
1146 #ifdef GPROF
1147 struct gmonparam *g;
1148 intptr_t i;
1149 #endif
1150 struct cpu_info *ci = curcpu();
1151 struct schedstate_percpu *spc = &ci->ci_schedstate;
1152 struct proc *p;
1153 struct lwp *l;
1154
1155 /*
1156 * Notice changes in divisor frequency, and adjust clock
1157 * frequency accordingly.
1158 */
1159 if (spc->spc_psdiv != psdiv) {
1160 spc->spc_psdiv = psdiv;
1161 spc->spc_pscnt = psdiv;
1162 if (psdiv == 1) {
1163 setstatclockrate(stathz);
1164 } else {
1165 setstatclockrate(profhz);
1166 }
1167 }
1168 l = curlwp;
1169 p = (l ? l->l_proc : NULL);
1170 if (CLKF_USERMODE(frame)) {
1171 KASSERT(p != NULL);
1172
1173 if ((p->p_flag & P_PROFIL) && profsrc == PROFSRC_CLOCK)
1174 addupc_intr(p, CLKF_PC(frame));
1175 if (--spc->spc_pscnt > 0)
1176 return;
1177 /*
1178 * Came from user mode; CPU was in user state.
1179 * If this process is being profiled record the tick.
1180 */
1181 p->p_uticks++;
1182 if (p->p_nice > NZERO)
1183 spc->spc_cp_time[CP_NICE]++;
1184 else
1185 spc->spc_cp_time[CP_USER]++;
1186 } else {
1187 #ifdef GPROF
1188 /*
1189 * Kernel statistics are just like addupc_intr, only easier.
1190 */
1191 g = &_gmonparam;
1192 if (profsrc == PROFSRC_CLOCK && g->state == GMON_PROF_ON) {
1193 i = CLKF_PC(frame) - g->lowpc;
1194 if (i < g->textsize) {
1195 i /= HISTFRACTION * sizeof(*g->kcount);
1196 g->kcount[i]++;
1197 }
1198 }
1199 #endif
1200 #ifdef LWP_PC
1201 if (p && profsrc == PROFSRC_CLOCK && (p->p_flag & P_PROFIL))
1202 addupc_intr(p, LWP_PC(l));
1203 #endif
1204 if (--spc->spc_pscnt > 0)
1205 return;
1206 /*
1207 * Came from kernel mode, so we were:
1208 * - handling an interrupt,
1209 * - doing syscall or trap work on behalf of the current
1210 * user process, or
1211 * - spinning in the idle loop.
1212 * Whichever it is, charge the time as appropriate.
1213 * Note that we charge interrupts to the current process,
1214 * regardless of whether they are ``for'' that process,
1215 * so that we know how much of its real time was spent
1216 * in ``non-process'' (i.e., interrupt) work.
1217 */
1218 if (CLKF_INTR(frame)) {
1219 if (p != NULL)
1220 p->p_iticks++;
1221 spc->spc_cp_time[CP_INTR]++;
1222 } else if (p != NULL) {
1223 p->p_sticks++;
1224 spc->spc_cp_time[CP_SYS]++;
1225 } else
1226 spc->spc_cp_time[CP_IDLE]++;
1227 }
1228 spc->spc_pscnt = psdiv;
1229
1230 if (p != NULL) {
1231 ++p->p_cpticks;
1232 /*
1233 * If no separate schedclock is provided, call it here
1234 * at about 16 Hz.
1235 */
1236 if (schedhz == 0)
1237 if ((int)(--ci->ci_schedstate.spc_schedticks) <= 0) {
1238 schedclock(l);
1239 ci->ci_schedstate.spc_schedticks = statscheddiv;
1240 }
1241 }
1242 }
1243
1244 #ifndef __HAVE_TIMECOUNTER
1245 #ifdef NTP /* NTP phase-locked loop in kernel */
1246 /*
1247 * hardupdate() - local clock update
1248 *
1249 * This routine is called by ntp_adjtime() to update the local clock
1250 * phase and frequency. The implementation is of an adaptive-parameter,
1251 * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new
1252 * time and frequency offset estimates for each call. If the kernel PPS
1253 * discipline code is configured (PPS_SYNC), the PPS signal itself
1254 * determines the new time offset, instead of the calling argument.
1255 * Presumably, calls to ntp_adjtime() occur only when the caller
1256 * believes the local clock is valid within some bound (+-128 ms with
1257 * NTP). If the caller's time is far different than the PPS time, an
1258 * argument will ensue, and it's not clear who will lose.
1259 *
1260 * For uncompensated quartz crystal oscillatores and nominal update
1261 * intervals less than 1024 s, operation should be in phase-lock mode
1262 * (STA_FLL = 0), where the loop is disciplined to phase. For update
1263 * intervals greater than thiss, operation should be in frequency-lock
1264 * mode (STA_FLL = 1), where the loop is disciplined to frequency.
1265 *
1266 * Note: splclock() is in effect.
1267 */
1268 void
1269 hardupdate(long offset)
1270 {
1271 long ltemp, mtemp;
1272
1273 if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME))
1274 return;
1275 ltemp = offset;
1276 #ifdef PPS_SYNC
1277 if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
1278 ltemp = pps_offset;
1279 #endif /* PPS_SYNC */
1280
1281 /*
1282 * Scale the phase adjustment and clamp to the operating range.
1283 */
1284 if (ltemp > MAXPHASE)
1285 time_offset = MAXPHASE << SHIFT_UPDATE;
1286 else if (ltemp < -MAXPHASE)
1287 time_offset = -(MAXPHASE << SHIFT_UPDATE);
1288 else
1289 time_offset = ltemp << SHIFT_UPDATE;
1290
1291 /*
1292 * Select whether the frequency is to be controlled and in which
1293 * mode (PLL or FLL). Clamp to the operating range. Ugly
1294 * multiply/divide should be replaced someday.
1295 */
1296 if (time_status & STA_FREQHOLD || time_reftime == 0)
1297 time_reftime = time.tv_sec;
1298 mtemp = time.tv_sec - time_reftime;
1299 time_reftime = time.tv_sec;
1300 if (time_status & STA_FLL) {
1301 if (mtemp >= MINSEC) {
1302 ltemp = ((time_offset / mtemp) << (SHIFT_USEC -
1303 SHIFT_UPDATE));
1304 if (ltemp < 0)
1305 time_freq -= -ltemp >> SHIFT_KH;
1306 else
1307 time_freq += ltemp >> SHIFT_KH;
1308 }
1309 } else {
1310 if (mtemp < MAXSEC) {
1311 ltemp *= mtemp;
1312 if (ltemp < 0)
1313 time_freq -= -ltemp >> (time_constant +
1314 time_constant + SHIFT_KF -
1315 SHIFT_USEC);
1316 else
1317 time_freq += ltemp >> (time_constant +
1318 time_constant + SHIFT_KF -
1319 SHIFT_USEC);
1320 }
1321 }
1322 if (time_freq > time_tolerance)
1323 time_freq = time_tolerance;
1324 else if (time_freq < -time_tolerance)
1325 time_freq = -time_tolerance;
1326 }
1327
1328 #ifdef PPS_SYNC
1329 /*
1330 * hardpps() - discipline CPU clock oscillator to external PPS signal
1331 *
1332 * This routine is called at each PPS interrupt in order to discipline
1333 * the CPU clock oscillator to the PPS signal. It measures the PPS phase
1334 * and leaves it in a handy spot for the hardclock() routine. It
1335 * integrates successive PPS phase differences and calculates the
1336 * frequency offset. This is used in hardclock() to discipline the CPU
1337 * clock oscillator so that intrinsic frequency error is cancelled out.
1338 * The code requires the caller to capture the time and hardware counter
1339 * value at the on-time PPS signal transition.
1340 *
1341 * Note that, on some Unix systems, this routine runs at an interrupt
1342 * priority level higher than the timer interrupt routine hardclock().
1343 * Therefore, the variables used are distinct from the hardclock()
1344 * variables, except for certain exceptions: The PPS frequency pps_freq
1345 * and phase pps_offset variables are determined by this routine and
1346 * updated atomically. The time_tolerance variable can be considered a
1347 * constant, since it is infrequently changed, and then only when the
1348 * PPS signal is disabled. The watchdog counter pps_valid is updated
1349 * once per second by hardclock() and is atomically cleared in this
1350 * routine.
1351 */
1352 void
1353 hardpps(struct timeval *tvp, /* time at PPS */
1354 long usec /* hardware counter at PPS */)
1355 {
1356 long u_usec, v_usec, bigtick;
1357 long cal_sec, cal_usec;
1358
1359 /*
1360 * An occasional glitch can be produced when the PPS interrupt
1361 * occurs in the hardclock() routine before the time variable is
1362 * updated. Here the offset is discarded when the difference
1363 * between it and the last one is greater than tick/2, but not
1364 * if the interval since the first discard exceeds 30 s.
1365 */
1366 time_status |= STA_PPSSIGNAL;
1367 time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
1368 pps_valid = 0;
1369 u_usec = -tvp->tv_usec;
1370 if (u_usec < -500000)
1371 u_usec += 1000000;
1372 v_usec = pps_offset - u_usec;
1373 if (v_usec < 0)
1374 v_usec = -v_usec;
1375 if (v_usec > (tick >> 1)) {
1376 if (pps_glitch > MAXGLITCH) {
1377 pps_glitch = 0;
1378 pps_tf[2] = u_usec;
1379 pps_tf[1] = u_usec;
1380 } else {
1381 pps_glitch++;
1382 u_usec = pps_offset;
1383 }
1384 } else
1385 pps_glitch = 0;
1386
1387 /*
1388 * A three-stage median filter is used to help deglitch the pps
1389 * time. The median sample becomes the time offset estimate; the
1390 * difference between the other two samples becomes the time
1391 * dispersion (jitter) estimate.
1392 */
1393 pps_tf[2] = pps_tf[1];
1394 pps_tf[1] = pps_tf[0];
1395 pps_tf[0] = u_usec;
1396 if (pps_tf[0] > pps_tf[1]) {
1397 if (pps_tf[1] > pps_tf[2]) {
1398 pps_offset = pps_tf[1]; /* 0 1 2 */
1399 v_usec = pps_tf[0] - pps_tf[2];
1400 } else if (pps_tf[2] > pps_tf[0]) {
1401 pps_offset = pps_tf[0]; /* 2 0 1 */
1402 v_usec = pps_tf[2] - pps_tf[1];
1403 } else {
1404 pps_offset = pps_tf[2]; /* 0 2 1 */
1405 v_usec = pps_tf[0] - pps_tf[1];
1406 }
1407 } else {
1408 if (pps_tf[1] < pps_tf[2]) {
1409 pps_offset = pps_tf[1]; /* 2 1 0 */
1410 v_usec = pps_tf[2] - pps_tf[0];
1411 } else if (pps_tf[2] < pps_tf[0]) {
1412 pps_offset = pps_tf[0]; /* 1 0 2 */
1413 v_usec = pps_tf[1] - pps_tf[2];
1414 } else {
1415 pps_offset = pps_tf[2]; /* 1 2 0 */
1416 v_usec = pps_tf[1] - pps_tf[0];
1417 }
1418 }
1419 if (v_usec > MAXTIME)
1420 pps_jitcnt++;
1421 v_usec = (v_usec << PPS_AVG) - pps_jitter;
1422 if (v_usec < 0)
1423 pps_jitter -= -v_usec >> PPS_AVG;
1424 else
1425 pps_jitter += v_usec >> PPS_AVG;
1426 if (pps_jitter > (MAXTIME >> 1))
1427 time_status |= STA_PPSJITTER;
1428
1429 /*
1430 * During the calibration interval adjust the starting time when
1431 * the tick overflows. At the end of the interval compute the
1432 * duration of the interval and the difference of the hardware
1433 * counters at the beginning and end of the interval. This code
1434 * is deliciously complicated by the fact valid differences may
1435 * exceed the value of tick when using long calibration
1436 * intervals and small ticks. Note that the counter can be
1437 * greater than tick if caught at just the wrong instant, but
1438 * the values returned and used here are correct.
1439 */
1440 bigtick = (long)tick << SHIFT_USEC;
1441 pps_usec -= pps_freq;
1442 if (pps_usec >= bigtick)
1443 pps_usec -= bigtick;
1444 if (pps_usec < 0)
1445 pps_usec += bigtick;
1446 pps_time.tv_sec++;
1447 pps_count++;
1448 if (pps_count < (1 << pps_shift))
1449 return;
1450 pps_count = 0;
1451 pps_calcnt++;
1452 u_usec = usec << SHIFT_USEC;
1453 v_usec = pps_usec - u_usec;
1454 if (v_usec >= bigtick >> 1)
1455 v_usec -= bigtick;
1456 if (v_usec < -(bigtick >> 1))
1457 v_usec += bigtick;
1458 if (v_usec < 0)
1459 v_usec = -(-v_usec >> pps_shift);
1460 else
1461 v_usec = v_usec >> pps_shift;
1462 pps_usec = u_usec;
1463 cal_sec = tvp->tv_sec;
1464 cal_usec = tvp->tv_usec;
1465 cal_sec -= pps_time.tv_sec;
1466 cal_usec -= pps_time.tv_usec;
1467 if (cal_usec < 0) {
1468 cal_usec += 1000000;
1469 cal_sec--;
1470 }
1471 pps_time = *tvp;
1472
1473 /*
1474 * Check for lost interrupts, noise, excessive jitter and
1475 * excessive frequency error. The number of timer ticks during
1476 * the interval may vary +-1 tick. Add to this a margin of one
1477 * tick for the PPS signal jitter and maximum frequency
1478 * deviation. If the limits are exceeded, the calibration
1479 * interval is reset to the minimum and we start over.
1480 */
1481 u_usec = (long)tick << 1;
1482 if (!((cal_sec == -1 && cal_usec > (1000000 - u_usec))
1483 || (cal_sec == 0 && cal_usec < u_usec))
1484 || v_usec > time_tolerance || v_usec < -time_tolerance) {
1485 pps_errcnt++;
1486 pps_shift = PPS_SHIFT;
1487 pps_intcnt = 0;
1488 time_status |= STA_PPSERROR;
1489 return;
1490 }
1491
1492 /*
1493 * A three-stage median filter is used to help deglitch the pps
1494 * frequency. The median sample becomes the frequency offset
1495 * estimate; the difference between the other two samples
1496 * becomes the frequency dispersion (stability) estimate.
1497 */
1498 pps_ff[2] = pps_ff[1];
1499 pps_ff[1] = pps_ff[0];
1500 pps_ff[0] = v_usec;
1501 if (pps_ff[0] > pps_ff[1]) {
1502 if (pps_ff[1] > pps_ff[2]) {
1503 u_usec = pps_ff[1]; /* 0 1 2 */
1504 v_usec = pps_ff[0] - pps_ff[2];
1505 } else if (pps_ff[2] > pps_ff[0]) {
1506 u_usec = pps_ff[0]; /* 2 0 1 */
1507 v_usec = pps_ff[2] - pps_ff[1];
1508 } else {
1509 u_usec = pps_ff[2]; /* 0 2 1 */
1510 v_usec = pps_ff[0] - pps_ff[1];
1511 }
1512 } else {
1513 if (pps_ff[1] < pps_ff[2]) {
1514 u_usec = pps_ff[1]; /* 2 1 0 */
1515 v_usec = pps_ff[2] - pps_ff[0];
1516 } else if (pps_ff[2] < pps_ff[0]) {
1517 u_usec = pps_ff[0]; /* 1 0 2 */
1518 v_usec = pps_ff[1] - pps_ff[2];
1519 } else {
1520 u_usec = pps_ff[2]; /* 1 2 0 */
1521 v_usec = pps_ff[1] - pps_ff[0];
1522 }
1523 }
1524
1525 /*
1526 * Here the frequency dispersion (stability) is updated. If it
1527 * is less than one-fourth the maximum (MAXFREQ), the frequency
1528 * offset is updated as well, but clamped to the tolerance. It
1529 * will be processed later by the hardclock() routine.
1530 */
1531 v_usec = (v_usec >> 1) - pps_stabil;
1532 if (v_usec < 0)
1533 pps_stabil -= -v_usec >> PPS_AVG;
1534 else
1535 pps_stabil += v_usec >> PPS_AVG;
1536 if (pps_stabil > MAXFREQ >> 2) {
1537 pps_stbcnt++;
1538 time_status |= STA_PPSWANDER;
1539 return;
1540 }
1541 if (time_status & STA_PPSFREQ) {
1542 if (u_usec < 0) {
1543 pps_freq -= -u_usec >> PPS_AVG;
1544 if (pps_freq < -time_tolerance)
1545 pps_freq = -time_tolerance;
1546 u_usec = -u_usec;
1547 } else {
1548 pps_freq += u_usec >> PPS_AVG;
1549 if (pps_freq > time_tolerance)
1550 pps_freq = time_tolerance;
1551 }
1552 }
1553
1554 /*
1555 * Here the calibration interval is adjusted. If the maximum
1556 * time difference is greater than tick / 4, reduce the interval
1557 * by half. If this is not the case for four consecutive
1558 * intervals, double the interval.
1559 */
1560 if (u_usec << pps_shift > bigtick >> 2) {
1561 pps_intcnt = 0;
1562 if (pps_shift > PPS_SHIFT)
1563 pps_shift--;
1564 } else if (pps_intcnt >= 4) {
1565 pps_intcnt = 0;
1566 if (pps_shift < PPS_SHIFTMAX)
1567 pps_shift++;
1568 } else
1569 pps_intcnt++;
1570 }
1571 #endif /* PPS_SYNC */
1572 #endif /* NTP */
1573
1574 /* timecounter compat functions */
1575 void
1576 nanotime(struct timespec *ts)
1577 {
1578 struct timeval tv;
1579
1580 microtime(&tv);
1581 TIMEVAL_TO_TIMESPEC(&tv, ts);
1582 }
1583
1584 void
1585 getbinuptime(struct bintime *bt)
1586 {
1587 struct timeval tv;
1588
1589 microtime(&tv);
1590 timeval2bintime(&tv, bt);
1591 }
1592
1593 void
1594 nanouptime(struct timespec *tsp)
1595 {
1596 int s;
1597
1598 s = splclock();
1599 TIMEVAL_TO_TIMESPEC(&mono_time, tsp);
1600 splx(s);
1601 }
1602
1603 void
1604 getnanouptime(struct timespec *tsp)
1605 {
1606 int s;
1607
1608 s = splclock();
1609 TIMEVAL_TO_TIMESPEC(&mono_time, tsp);
1610 splx(s);
1611 }
1612
1613 void
1614 getmicrouptime(struct timeval *tvp)
1615 {
1616 int s;
1617
1618 s = splclock();
1619 *tvp = mono_time;
1620 splx(s);
1621 }
1622
1623 void
1624 getnanotime(struct timespec *tsp)
1625 {
1626 int s;
1627
1628 s = splclock();
1629 TIMEVAL_TO_TIMESPEC(&time, tsp);
1630 splx(s);
1631 }
1632
1633 void
1634 getmicrotime(struct timeval *tvp)
1635 {
1636 int s;
1637
1638 s = splclock();
1639 *tvp = time;
1640 splx(s);
1641 }
1642 #endif /* !__HAVE_TIMECOUNTER */
1643