kern_clock.c revision 1.44 1 /* $NetBSD: kern_clock.c,v 1.44 1998/04/22 07:08:11 jonathan Exp $ */
2
3 /*-
4 * Copyright (c) 1982, 1986, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
41 */
42
43 #include "opt_ntp.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/dkstat.h>
48 #include <sys/callout.h>
49 #include <sys/kernel.h>
50 #include <sys/proc.h>
51 #include <sys/resourcevar.h>
52 #include <sys/signalvar.h>
53 #include <vm/vm.h>
54 #include <sys/sysctl.h>
55 #include <sys/timex.h>
56
57 #include <machine/cpu.h>
58
59 #ifdef GPROF
60 #include <sys/gmon.h>
61 #endif
62
63 /*
64 * Clock handling routines.
65 *
66 * This code is written to operate with two timers that run independently of
67 * each other. The main clock, running hz times per second, is used to keep
68 * track of real time. The second timer handles kernel and user profiling,
69 * and does resource use estimation. If the second timer is programmable,
70 * it is randomized to avoid aliasing between the two clocks. For example,
71 * the randomization prevents an adversary from always giving up the cpu
72 * just before its quantum expires. Otherwise, it would never accumulate
73 * cpu ticks. The mean frequency of the second timer is stathz.
74 *
75 * If no second timer exists, stathz will be zero; in this case we drive
76 * profiling and statistics off the main clock. This WILL NOT be accurate;
77 * do not do it unless absolutely necessary.
78 *
79 * The statistics clock may (or may not) be run at a higher rate while
80 * profiling. This profile clock runs at profhz. We require that profhz
81 * be an integral multiple of stathz.
82 *
83 * If the statistics clock is running fast, it must be divided by the ratio
84 * profhz/stathz for statistics. (For profiling, every tick counts.)
85 */
86
87 /*
88 * TODO:
89 * allocate more timeout table slots when table overflows.
90 */
91
92
93 #ifdef NTP /* NTP phase-locked loop in kernel */
94 /*
95 * Phase/frequency-lock loop (PLL/FLL) definitions
96 *
97 * The following variables are read and set by the ntp_adjtime() system
98 * call.
99 *
100 * time_state shows the state of the system clock, with values defined
101 * in the timex.h header file.
102 *
103 * time_status shows the status of the system clock, with bits defined
104 * in the timex.h header file.
105 *
106 * time_offset is used by the PLL/FLL to adjust the system time in small
107 * increments.
108 *
109 * time_constant determines the bandwidth or "stiffness" of the PLL.
110 *
111 * time_tolerance determines maximum frequency error or tolerance of the
112 * CPU clock oscillator and is a property of the architecture; however,
113 * in principle it could change as result of the presence of external
114 * discipline signals, for instance.
115 *
116 * time_precision is usually equal to the kernel tick variable; however,
117 * in cases where a precision clock counter or external clock is
118 * available, the resolution can be much less than this and depend on
119 * whether the external clock is working or not.
120 *
121 * time_maxerror is initialized by a ntp_adjtime() call and increased by
122 * the kernel once each second to reflect the maximum error bound
123 * growth.
124 *
125 * time_esterror is set and read by the ntp_adjtime() call, but
126 * otherwise not used by the kernel.
127 */
128 int time_state = TIME_OK; /* clock state */
129 int time_status = STA_UNSYNC; /* clock status bits */
130 long time_offset = 0; /* time offset (us) */
131 long time_constant = 0; /* pll time constant */
132 long time_tolerance = MAXFREQ; /* frequency tolerance (scaled ppm) */
133 long time_precision = 1; /* clock precision (us) */
134 long time_maxerror = MAXPHASE; /* maximum error (us) */
135 long time_esterror = MAXPHASE; /* estimated error (us) */
136
137 /*
138 * The following variables establish the state of the PLL/FLL and the
139 * residual time and frequency offset of the local clock. The scale
140 * factors are defined in the timex.h header file.
141 *
142 * time_phase and time_freq are the phase increment and the frequency
143 * increment, respectively, of the kernel time variable.
144 *
145 * time_freq is set via ntp_adjtime() from a value stored in a file when
146 * the synchronization daemon is first started. Its value is retrieved
147 * via ntp_adjtime() and written to the file about once per hour by the
148 * daemon.
149 *
150 * time_adj is the adjustment added to the value of tick at each timer
151 * interrupt and is recomputed from time_phase and time_freq at each
152 * seconds rollover.
153 *
154 * time_reftime is the second's portion of the system time at the last
155 * call to ntp_adjtime(). It is used to adjust the time_freq variable
156 * and to increase the time_maxerror as the time since last update
157 * increases.
158 */
159 long time_phase = 0; /* phase offset (scaled us) */
160 long time_freq = 0; /* frequency offset (scaled ppm) */
161 long time_adj = 0; /* tick adjust (scaled 1 / hz) */
162 long time_reftime = 0; /* time at last adjustment (s) */
163
164 #ifdef PPS_SYNC
165 /*
166 * The following variables are used only if the kernel PPS discipline
167 * code is configured (PPS_SYNC). The scale factors are defined in the
168 * timex.h header file.
169 *
170 * pps_time contains the time at each calibration interval, as read by
171 * microtime(). pps_count counts the seconds of the calibration
172 * interval, the duration of which is nominally pps_shift in powers of
173 * two.
174 *
175 * pps_offset is the time offset produced by the time median filter
176 * pps_tf[], while pps_jitter is the dispersion (jitter) measured by
177 * this filter.
178 *
179 * pps_freq is the frequency offset produced by the frequency median
180 * filter pps_ff[], while pps_stabil is the dispersion (wander) measured
181 * by this filter.
182 *
183 * pps_usec is latched from a high resolution counter or external clock
184 * at pps_time. Here we want the hardware counter contents only, not the
185 * contents plus the time_tv.usec as usual.
186 *
187 * pps_valid counts the number of seconds since the last PPS update. It
188 * is used as a watchdog timer to disable the PPS discipline should the
189 * PPS signal be lost.
190 *
191 * pps_glitch counts the number of seconds since the beginning of an
192 * offset burst more than tick/2 from current nominal offset. It is used
193 * mainly to suppress error bursts due to priority conflicts between the
194 * PPS interrupt and timer interrupt.
195 *
196 * pps_intcnt counts the calibration intervals for use in the interval-
197 * adaptation algorithm. It's just too complicated for words.
198 */
199 struct timeval pps_time; /* kernel time at last interval */
200 long pps_tf[] = {0, 0, 0}; /* pps time offset median filter (us) */
201 long pps_offset = 0; /* pps time offset (us) */
202 long pps_jitter = MAXTIME; /* time dispersion (jitter) (us) */
203 long pps_ff[] = {0, 0, 0}; /* pps frequency offset median filter */
204 long pps_freq = 0; /* frequency offset (scaled ppm) */
205 long pps_stabil = MAXFREQ; /* frequency dispersion (scaled ppm) */
206 long pps_usec = 0; /* microsec counter at last interval */
207 long pps_valid = PPS_VALID; /* pps signal watchdog counter */
208 int pps_glitch = 0; /* pps signal glitch counter */
209 int pps_count = 0; /* calibration interval counter (s) */
210 int pps_shift = PPS_SHIFT; /* interval duration (s) (shift) */
211 int pps_intcnt = 0; /* intervals at current duration */
212
213 /*
214 * PPS signal quality monitors
215 *
216 * pps_jitcnt counts the seconds that have been discarded because the
217 * jitter measured by the time median filter exceeds the limit MAXTIME
218 * (100 us).
219 *
220 * pps_calcnt counts the frequency calibration intervals, which are
221 * variable from 4 s to 256 s.
222 *
223 * pps_errcnt counts the calibration intervals which have been discarded
224 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
225 * calibration interval jitter exceeds two ticks.
226 *
227 * pps_stbcnt counts the calibration intervals that have been discarded
228 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
229 */
230 long pps_jitcnt = 0; /* jitter limit exceeded */
231 long pps_calcnt = 0; /* calibration intervals */
232 long pps_errcnt = 0; /* calibration errors */
233 long pps_stbcnt = 0; /* stability limit exceeded */
234 #endif /* PPS_SYNC */
235
236 #ifdef EXT_CLOCK
237 /*
238 * External clock definitions
239 *
240 * The following definitions and declarations are used only if an
241 * external clock is configured on the system.
242 */
243 #define CLOCK_INTERVAL 30 /* CPU clock update interval (s) */
244
245 /*
246 * The clock_count variable is set to CLOCK_INTERVAL at each PPS
247 * interrupt and decremented once each second.
248 */
249 int clock_count = 0; /* CPU clock counter */
250
251 #ifdef HIGHBALL
252 /*
253 * The clock_offset and clock_cpu variables are used by the HIGHBALL
254 * interface. The clock_offset variable defines the offset between
255 * system time and the HIGBALL counters. The clock_cpu variable contains
256 * the offset between the system clock and the HIGHBALL clock for use in
257 * disciplining the kernel time variable.
258 */
259 extern struct timeval clock_offset; /* Highball clock offset */
260 long clock_cpu = 0; /* CPU clock adjust */
261 #endif /* HIGHBALL */
262 #endif /* EXT_CLOCK */
263 #endif /* NTP */
264
265
266 /*
267 * Bump a timeval by a small number of usec's.
268 */
269 #define BUMPTIME(t, usec) { \
270 register volatile struct timeval *tp = (t); \
271 register long us; \
272 \
273 tp->tv_usec = us = tp->tv_usec + (usec); \
274 if (us >= 1000000) { \
275 tp->tv_usec = us - 1000000; \
276 tp->tv_sec++; \
277 } \
278 }
279
280 int stathz;
281 int profhz;
282 int profprocs;
283 int ticks;
284 static int psdiv, pscnt; /* prof => stat divider */
285 int psratio; /* ratio: prof / stat */
286 int tickfix, tickfixinterval; /* used if tick not really integral */
287 #ifndef NTP
288 static int tickfixcnt; /* accumulated fractional error */
289 #else
290 int fixtick; /* used by NTP for same */
291 int shifthz;
292 #endif
293
294 volatile struct timeval time;
295 volatile struct timeval mono_time;
296
297 /*
298 * Initialize clock frequencies and start both clocks running.
299 */
300 void
301 initclocks()
302 {
303 register int i;
304
305 /*
306 * Set divisors to 1 (normal case) and let the machine-specific
307 * code do its bit.
308 */
309 psdiv = pscnt = 1;
310 cpu_initclocks();
311
312 /*
313 * Compute profhz/stathz, and fix profhz if needed.
314 */
315 i = stathz ? stathz : hz;
316 if (profhz == 0)
317 profhz = i;
318 psratio = profhz / i;
319
320 #ifdef NTP
321 switch (hz) {
322 case 60:
323 case 64:
324 shifthz = SHIFT_SCALE - 6;
325 break;
326 case 96:
327 case 100:
328 case 128:
329 shifthz = SHIFT_SCALE - 7;
330 break;
331 case 256:
332 shifthz = SHIFT_SCALE - 8;
333 break;
334 case 512:
335 shifthz = SHIFT_SCALE - 9;
336 break;
337 case 1000:
338 case 1024:
339 shifthz = SHIFT_SCALE - 10;
340 break;
341 default:
342 panic("weird hz");
343 }
344 #endif
345 }
346
347 /*
348 * The real-time timer, interrupting hz times per second.
349 */
350 void
351 hardclock(frame)
352 register struct clockframe *frame;
353 {
354 register struct callout *p1;
355 register struct proc *p;
356 register int delta, needsoft;
357 extern int tickdelta;
358 extern long timedelta;
359 #ifdef NTP
360 register int time_update;
361 register int ltemp;
362 #endif
363
364 /*
365 * Update real-time timeout queue.
366 * At front of queue are some number of events which are ``due''.
367 * The time to these is <= 0 and if negative represents the
368 * number of ticks which have passed since it was supposed to happen.
369 * The rest of the q elements (times > 0) are events yet to happen,
370 * where the time for each is given as a delta from the previous.
371 * Decrementing just the first of these serves to decrement the time
372 * to all events.
373 */
374 needsoft = 0;
375 for (p1 = calltodo.c_next; p1 != NULL; p1 = p1->c_next) {
376 if (--p1->c_time > 0)
377 break;
378 needsoft = 1;
379 if (p1->c_time == 0)
380 break;
381 }
382
383 p = curproc;
384 if (p) {
385 register struct pstats *pstats;
386
387 /*
388 * Run current process's virtual and profile time, as needed.
389 */
390 pstats = p->p_stats;
391 if (CLKF_USERMODE(frame) &&
392 timerisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
393 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
394 psignal(p, SIGVTALRM);
395 if (timerisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
396 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
397 psignal(p, SIGPROF);
398 }
399
400 /*
401 * If no separate statistics clock is available, run it from here.
402 */
403 if (stathz == 0)
404 statclock(frame);
405
406 /*
407 * Increment the time-of-day. The increment is normally just
408 * ``tick''. If the machine is one which has a clock frequency
409 * such that ``hz'' would not divide the second evenly into
410 * milliseconds, a periodic adjustment must be applied. Finally,
411 * if we are still adjusting the time (see adjtime()),
412 * ``tickdelta'' may also be added in.
413 */
414 ticks++;
415 delta = tick;
416
417 #ifndef NTP
418 if (tickfix) {
419 tickfixcnt += tickfix;
420 if (tickfixcnt >= tickfixinterval) {
421 delta++;
422 tickfixcnt -= tickfixinterval;
423 }
424 }
425 #endif /* !NTP */
426 /* Imprecise 4bsd adjtime() handling */
427 if (timedelta != 0) {
428 delta += tickdelta;
429 timedelta -= tickdelta;
430 }
431
432 #ifdef notyet
433 microset();
434 #endif
435
436 #ifndef NTP
437 BUMPTIME(&time, delta); /* XXX Now done using NTP code below */
438 #endif
439 BUMPTIME(&mono_time, delta);
440
441 #ifdef NTP
442 time_update = delta;
443
444 /*
445 * Compute the phase adjustment. If the low-order bits
446 * (time_phase) of the update overflow, bump the high-order bits
447 * (time_update).
448 */
449 time_phase += time_adj;
450 if (time_phase <= -FINEUSEC) {
451 ltemp = -time_phase >> SHIFT_SCALE;
452 time_phase += ltemp << SHIFT_SCALE;
453 time_update -= ltemp;
454 } else if (time_phase >= FINEUSEC) {
455 ltemp = time_phase >> SHIFT_SCALE;
456 time_phase -= ltemp << SHIFT_SCALE;
457 time_update += ltemp;
458 }
459
460 #ifdef HIGHBALL
461 /*
462 * If the HIGHBALL board is installed, we need to adjust the
463 * external clock offset in order to close the hardware feedback
464 * loop. This will adjust the external clock phase and frequency
465 * in small amounts. The additional phase noise and frequency
466 * wander this causes should be minimal. We also need to
467 * discipline the kernel time variable, since the PLL is used to
468 * discipline the external clock. If the Highball board is not
469 * present, we discipline kernel time with the PLL as usual. We
470 * assume that the external clock phase adjustment (time_update)
471 * and kernel phase adjustment (clock_cpu) are less than the
472 * value of tick.
473 */
474 clock_offset.tv_usec += time_update;
475 if (clock_offset.tv_usec >= 1000000) {
476 clock_offset.tv_sec++;
477 clock_offset.tv_usec -= 1000000;
478 }
479 if (clock_offset.tv_usec < 0) {
480 clock_offset.tv_sec--;
481 clock_offset.tv_usec += 1000000;
482 }
483 time.tv_usec += clock_cpu;
484 clock_cpu = 0;
485 #else
486 time.tv_usec += time_update;
487 #endif /* HIGHBALL */
488
489 /*
490 * On rollover of the second the phase adjustment to be used for
491 * the next second is calculated. Also, the maximum error is
492 * increased by the tolerance. If the PPS frequency discipline
493 * code is present, the phase is increased to compensate for the
494 * CPU clock oscillator frequency error.
495 *
496 * On a 32-bit machine and given parameters in the timex.h
497 * header file, the maximum phase adjustment is +-512 ms and
498 * maximum frequency offset is a tad less than) +-512 ppm. On a
499 * 64-bit machine, you shouldn't need to ask.
500 */
501 if (time.tv_usec >= 1000000) {
502 time.tv_usec -= 1000000;
503 time.tv_sec++;
504 time_maxerror += time_tolerance >> SHIFT_USEC;
505
506 /*
507 * Leap second processing. If in leap-insert state at
508 * the end of the day, the system clock is set back one
509 * second; if in leap-delete state, the system clock is
510 * set ahead one second. The microtime() routine or
511 * external clock driver will insure that reported time
512 * is always monotonic. The ugly divides should be
513 * replaced.
514 */
515 switch (time_state) {
516 case TIME_OK:
517 if (time_status & STA_INS)
518 time_state = TIME_INS;
519 else if (time_status & STA_DEL)
520 time_state = TIME_DEL;
521 break;
522
523 case TIME_INS:
524 if (time.tv_sec % 86400 == 0) {
525 time.tv_sec--;
526 time_state = TIME_OOP;
527 }
528 break;
529
530 case TIME_DEL:
531 if ((time.tv_sec + 1) % 86400 == 0) {
532 time.tv_sec++;
533 time_state = TIME_WAIT;
534 }
535 break;
536
537 case TIME_OOP:
538 time_state = TIME_WAIT;
539 break;
540
541 case TIME_WAIT:
542 if (!(time_status & (STA_INS | STA_DEL)))
543 time_state = TIME_OK;
544 break;
545 }
546
547 /*
548 * Compute the phase adjustment for the next second. In
549 * PLL mode, the offset is reduced by a fixed factor
550 * times the time constant. In FLL mode the offset is
551 * used directly. In either mode, the maximum phase
552 * adjustment for each second is clamped so as to spread
553 * the adjustment over not more than the number of
554 * seconds between updates.
555 */
556 if (time_offset < 0) {
557 ltemp = -time_offset;
558 if (!(time_status & STA_FLL))
559 ltemp >>= SHIFT_KG + time_constant;
560 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
561 ltemp = (MAXPHASE / MINSEC) <<
562 SHIFT_UPDATE;
563 time_offset += ltemp;
564 time_adj = -ltemp << (shifthz - SHIFT_UPDATE);
565 } else if (time_offset > 0) {
566 ltemp = time_offset;
567 if (!(time_status & STA_FLL))
568 ltemp >>= SHIFT_KG + time_constant;
569 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
570 ltemp = (MAXPHASE / MINSEC) <<
571 SHIFT_UPDATE;
572 time_offset -= ltemp;
573 time_adj = ltemp << (shifthz - SHIFT_UPDATE);
574 } else
575 time_adj = 0;
576
577 /*
578 * Compute the frequency estimate and additional phase
579 * adjustment due to frequency error for the next
580 * second. When the PPS signal is engaged, gnaw on the
581 * watchdog counter and update the frequency computed by
582 * the pll and the PPS signal.
583 */
584 #ifdef PPS_SYNC
585 pps_valid++;
586 if (pps_valid == PPS_VALID) {
587 pps_jitter = MAXTIME;
588 pps_stabil = MAXFREQ;
589 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
590 STA_PPSWANDER | STA_PPSERROR);
591 }
592 ltemp = time_freq + pps_freq;
593 #else
594 ltemp = time_freq;
595 #endif /* PPS_SYNC */
596
597 if (ltemp < 0)
598 time_adj -= -ltemp >> (SHIFT_USEC - shifthz);
599 else
600 time_adj += ltemp >> (SHIFT_USEC - shifthz);
601 time_adj += (long)fixtick << shifthz;
602
603 /*
604 * When the CPU clock oscillator frequency is not a
605 * power of 2 in Hz, shifthz is only an approximate
606 * scale factor.
607 */
608 switch (hz) {
609 case 96:
610 case 100:
611 /*
612 * In the following code the overall gain is increased
613 * by a factor of 1.25, which results in a residual
614 * error less than 3 percent.
615 */
616 if (time_adj < 0)
617 time_adj -= -time_adj >> 2;
618 else
619 time_adj += time_adj >> 2;
620 break;
621 case 60:
622 /*
623 * 60 Hz m68k and vaxes have a PLL gain factor of of
624 * 60/64 (15/16) of what it should be. In the following code
625 * the overall gain is increased by a factor of 1.0625,
626 * (17/16) which results in a residual error of just less
627 * than 0.4 percent.
628 */
629 if (time_adj < 0)
630 time_adj -= -time_adj >> 4;
631 else
632 time_adj += time_adj >> 4;
633 break;
634 case 1000:
635 /*
636 * Do this the simple way; we're on an alpha, are
637 * the shift police going to come and get us? We
638 * would get a residual error of only .82% with
639 * a 5 bit right shift, but we also have 64 bits
640 * in time_adj to work with for fixed point scaling.
641 */
642 time_adj = (time_adj * 1024) / 1000;
643 break;
644 }
645
646 #ifdef EXT_CLOCK
647 /*
648 * If an external clock is present, it is necessary to
649 * discipline the kernel time variable anyway, since not
650 * all system components use the microtime() interface.
651 * Here, the time offset between the external clock and
652 * kernel time variable is computed every so often.
653 */
654 clock_count++;
655 if (clock_count > CLOCK_INTERVAL) {
656 clock_count = 0;
657 microtime(&clock_ext);
658 delta.tv_sec = clock_ext.tv_sec - time.tv_sec;
659 delta.tv_usec = clock_ext.tv_usec -
660 time.tv_usec;
661 if (delta.tv_usec < 0)
662 delta.tv_sec--;
663 if (delta.tv_usec >= 500000) {
664 delta.tv_usec -= 1000000;
665 delta.tv_sec++;
666 }
667 if (delta.tv_usec < -500000) {
668 delta.tv_usec += 1000000;
669 delta.tv_sec--;
670 }
671 if (delta.tv_sec > 0 || (delta.tv_sec == 0 &&
672 delta.tv_usec > MAXPHASE) ||
673 delta.tv_sec < -1 || (delta.tv_sec == -1 &&
674 delta.tv_usec < -MAXPHASE)) {
675 time = clock_ext;
676 delta.tv_sec = 0;
677 delta.tv_usec = 0;
678 }
679 #ifdef HIGHBALL
680 clock_cpu = delta.tv_usec;
681 #else /* HIGHBALL */
682 hardupdate(delta.tv_usec);
683 #endif /* HIGHBALL */
684 }
685 #endif /* EXT_CLOCK */
686 }
687
688 #endif /* NTP */
689
690 /*
691 * Process callouts at a very low cpu priority, so we don't keep the
692 * relatively high clock interrupt priority any longer than necessary.
693 */
694 if (needsoft) {
695 if (CLKF_BASEPRI(frame)) {
696 /*
697 * Save the overhead of a software interrupt;
698 * it will happen as soon as we return, so do it now.
699 */
700 (void)splsoftclock();
701 softclock();
702 } else
703 setsoftclock();
704 }
705 }
706
707 /*
708 * Software (low priority) clock interrupt.
709 * Run periodic events from timeout queue.
710 */
711 /*ARGSUSED*/
712 void
713 softclock()
714 {
715 register struct callout *c;
716 register void *arg;
717 register void (*func) __P((void *));
718 register int s;
719
720 s = splhigh();
721 while ((c = calltodo.c_next) != NULL && c->c_time <= 0) {
722 func = c->c_func;
723 arg = c->c_arg;
724 calltodo.c_next = c->c_next;
725 c->c_next = callfree;
726 callfree = c;
727 splx(s);
728 (*func)(arg);
729 (void) splhigh();
730 }
731 splx(s);
732 }
733
734 /*
735 * timeout --
736 * Execute a function after a specified length of time.
737 *
738 * untimeout --
739 * Cancel previous timeout function call.
740 *
741 * See AT&T BCI Driver Reference Manual for specification. This
742 * implementation differs from that one in that no identification
743 * value is returned from timeout, rather, the original arguments
744 * to timeout are used to identify entries for untimeout.
745 */
746 void
747 timeout(ftn, arg, ticks)
748 void (*ftn) __P((void *));
749 void *arg;
750 register int ticks;
751 {
752 register struct callout *new, *p, *t;
753 register int s;
754
755 if (ticks <= 0)
756 ticks = 1;
757
758 /* Lock out the clock. */
759 s = splhigh();
760
761 /* Fill in the next free callout structure. */
762 if (callfree == NULL)
763 panic("timeout table full");
764 new = callfree;
765 callfree = new->c_next;
766 new->c_arg = arg;
767 new->c_func = ftn;
768
769 /*
770 * The time for each event is stored as a difference from the time
771 * of the previous event on the queue. Walk the queue, correcting
772 * the ticks argument for queue entries passed. Correct the ticks
773 * value for the queue entry immediately after the insertion point
774 * as well. Watch out for negative c_time values; these represent
775 * overdue events.
776 */
777 for (p = &calltodo;
778 (t = p->c_next) != NULL && ticks > t->c_time; p = t)
779 if (t->c_time > 0)
780 ticks -= t->c_time;
781 new->c_time = ticks;
782 if (t != NULL)
783 t->c_time -= ticks;
784
785 /* Insert the new entry into the queue. */
786 p->c_next = new;
787 new->c_next = t;
788 splx(s);
789 }
790
791 void
792 untimeout(ftn, arg)
793 void (*ftn) __P((void *));
794 void *arg;
795 {
796 register struct callout *p, *t;
797 register int s;
798
799 s = splhigh();
800 for (p = &calltodo; (t = p->c_next) != NULL; p = t)
801 if (t->c_func == ftn && t->c_arg == arg) {
802 /* Increment next entry's tick count. */
803 if (t->c_next && t->c_time > 0)
804 t->c_next->c_time += t->c_time;
805
806 /* Move entry from callout queue to callfree queue. */
807 p->c_next = t->c_next;
808 t->c_next = callfree;
809 callfree = t;
810 break;
811 }
812 splx(s);
813 }
814
815 /*
816 * Compute number of hz until specified time. Used to
817 * compute third argument to timeout() from an absolute time.
818 */
819 int
820 hzto(tv)
821 struct timeval *tv;
822 {
823 register long ticks, sec;
824 int s;
825
826 /*
827 * If number of microseconds will fit in 32 bit arithmetic,
828 * then compute number of microseconds to time and scale to
829 * ticks. Otherwise just compute number of hz in time, rounding
830 * times greater than representible to maximum value. (We must
831 * compute in microseconds, because hz can be greater than 1000,
832 * and thus tick can be less than one millisecond).
833 *
834 * Delta times less than 14 hours can be computed ``exactly''.
835 * (Note that if hz would yeild a non-integral number of us per
836 * tick, i.e. tickfix is nonzero, timouts can be a tick longer
837 * than they should be.) Maximum value for any timeout in 10ms
838 * ticks is 250 days.
839 */
840 s = splclock();
841 sec = tv->tv_sec - time.tv_sec;
842 if (sec <= 0x7fffffff / 1000000 - 1)
843 ticks = ((tv->tv_sec - time.tv_sec) * 1000000 +
844 (tv->tv_usec - time.tv_usec)) / tick;
845 else if (sec <= 0x7fffffff / hz)
846 ticks = sec * hz;
847 else
848 ticks = 0x7fffffff;
849 splx(s);
850 return (ticks);
851 }
852
853 /*
854 * Start profiling on a process.
855 *
856 * Kernel profiling passes proc0 which never exits and hence
857 * keeps the profile clock running constantly.
858 */
859 void
860 startprofclock(p)
861 register struct proc *p;
862 {
863 int s;
864
865 if ((p->p_flag & P_PROFIL) == 0) {
866 p->p_flag |= P_PROFIL;
867 if (++profprocs == 1 && stathz != 0) {
868 s = splstatclock();
869 psdiv = pscnt = psratio;
870 setstatclockrate(profhz);
871 splx(s);
872 }
873 }
874 }
875
876 /*
877 * Stop profiling on a process.
878 */
879 void
880 stopprofclock(p)
881 register struct proc *p;
882 {
883 int s;
884
885 if (p->p_flag & P_PROFIL) {
886 p->p_flag &= ~P_PROFIL;
887 if (--profprocs == 0 && stathz != 0) {
888 s = splstatclock();
889 psdiv = pscnt = 1;
890 setstatclockrate(stathz);
891 splx(s);
892 }
893 }
894 }
895
896 /*
897 * Statistics clock. Grab profile sample, and if divider reaches 0,
898 * do process and kernel statistics.
899 */
900 void
901 statclock(frame)
902 register struct clockframe *frame;
903 {
904 #ifdef GPROF
905 register struct gmonparam *g;
906 register int i;
907 #endif
908 register struct proc *p;
909
910 if (CLKF_USERMODE(frame)) {
911 p = curproc;
912 if (p->p_flag & P_PROFIL)
913 addupc_intr(p, CLKF_PC(frame), 1);
914 if (--pscnt > 0)
915 return;
916 /*
917 * Came from user mode; CPU was in user state.
918 * If this process is being profiled record the tick.
919 */
920 p->p_uticks++;
921 if (p->p_nice > NZERO)
922 cp_time[CP_NICE]++;
923 else
924 cp_time[CP_USER]++;
925 } else {
926 #ifdef GPROF
927 /*
928 * Kernel statistics are just like addupc_intr, only easier.
929 */
930 g = &_gmonparam;
931 if (g->state == GMON_PROF_ON) {
932 i = CLKF_PC(frame) - g->lowpc;
933 if (i < g->textsize) {
934 i /= HISTFRACTION * sizeof(*g->kcount);
935 g->kcount[i]++;
936 }
937 }
938 #endif
939 if (--pscnt > 0)
940 return;
941 /*
942 * Came from kernel mode, so we were:
943 * - handling an interrupt,
944 * - doing syscall or trap work on behalf of the current
945 * user process, or
946 * - spinning in the idle loop.
947 * Whichever it is, charge the time as appropriate.
948 * Note that we charge interrupts to the current process,
949 * regardless of whether they are ``for'' that process,
950 * so that we know how much of its real time was spent
951 * in ``non-process'' (i.e., interrupt) work.
952 */
953 p = curproc;
954 if (CLKF_INTR(frame)) {
955 if (p != NULL)
956 p->p_iticks++;
957 cp_time[CP_INTR]++;
958 } else if (p != NULL) {
959 p->p_sticks++;
960 cp_time[CP_SYS]++;
961 } else
962 cp_time[CP_IDLE]++;
963 }
964 pscnt = psdiv;
965
966 /*
967 * We adjust the priority of the current process. The priority of
968 * a process gets worse as it accumulates CPU time. The cpu usage
969 * estimator (p_estcpu) is increased here. The formula for computing
970 * priorities (in kern_synch.c) will compute a different value each
971 * time p_estcpu increases by 4. The cpu usage estimator ramps up
972 * quite quickly when the process is running (linearly), and decays
973 * away exponentially, at a rate which is proportionally slower when
974 * the system is busy. The basic principal is that the system will
975 * 90% forget that the process used a lot of CPU time in 5 * loadav
976 * seconds. This causes the system to favor processes which haven't
977 * run much recently, and to round-robin among other processes.
978 */
979 if (p != NULL) {
980 p->p_cpticks++;
981 if (++p->p_estcpu == 0)
982 p->p_estcpu--;
983 if ((p->p_estcpu & 3) == 0) {
984 resetpriority(p);
985 if (p->p_priority >= PUSER)
986 p->p_priority = p->p_usrpri;
987 }
988 }
989 }
990
991
992 #ifdef NTP /* NTP phase-locked loop in kernel */
993
994 /*
995 * hardupdate() - local clock update
996 *
997 * This routine is called by ntp_adjtime() to update the local clock
998 * phase and frequency. The implementation is of an adaptive-parameter,
999 * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new
1000 * time and frequency offset estimates for each call. If the kernel PPS
1001 * discipline code is configured (PPS_SYNC), the PPS signal itself
1002 * determines the new time offset, instead of the calling argument.
1003 * Presumably, calls to ntp_adjtime() occur only when the caller
1004 * believes the local clock is valid within some bound (+-128 ms with
1005 * NTP). If the caller's time is far different than the PPS time, an
1006 * argument will ensue, and it's not clear who will lose.
1007 *
1008 * For uncompensated quartz crystal oscillatores and nominal update
1009 * intervals less than 1024 s, operation should be in phase-lock mode
1010 * (STA_FLL = 0), where the loop is disciplined to phase. For update
1011 * intervals greater than thiss, operation should be in frequency-lock
1012 * mode (STA_FLL = 1), where the loop is disciplined to frequency.
1013 *
1014 * Note: splclock() is in effect.
1015 */
1016 void
1017 hardupdate(offset)
1018 long offset;
1019 {
1020 long ltemp, mtemp;
1021
1022 if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME))
1023 return;
1024 ltemp = offset;
1025 #ifdef PPS_SYNC
1026 if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
1027 ltemp = pps_offset;
1028 #endif /* PPS_SYNC */
1029
1030 /*
1031 * Scale the phase adjustment and clamp to the operating range.
1032 */
1033 if (ltemp > MAXPHASE)
1034 time_offset = MAXPHASE << SHIFT_UPDATE;
1035 else if (ltemp < -MAXPHASE)
1036 time_offset = -(MAXPHASE << SHIFT_UPDATE);
1037 else
1038 time_offset = ltemp << SHIFT_UPDATE;
1039
1040 /*
1041 * Select whether the frequency is to be controlled and in which
1042 * mode (PLL or FLL). Clamp to the operating range. Ugly
1043 * multiply/divide should be replaced someday.
1044 */
1045 if (time_status & STA_FREQHOLD || time_reftime == 0)
1046 time_reftime = time.tv_sec;
1047 mtemp = time.tv_sec - time_reftime;
1048 time_reftime = time.tv_sec;
1049 if (time_status & STA_FLL) {
1050 if (mtemp >= MINSEC) {
1051 ltemp = ((time_offset / mtemp) << (SHIFT_USEC -
1052 SHIFT_UPDATE));
1053 if (ltemp < 0)
1054 time_freq -= -ltemp >> SHIFT_KH;
1055 else
1056 time_freq += ltemp >> SHIFT_KH;
1057 }
1058 } else {
1059 if (mtemp < MAXSEC) {
1060 ltemp *= mtemp;
1061 if (ltemp < 0)
1062 time_freq -= -ltemp >> (time_constant +
1063 time_constant + SHIFT_KF -
1064 SHIFT_USEC);
1065 else
1066 time_freq += ltemp >> (time_constant +
1067 time_constant + SHIFT_KF -
1068 SHIFT_USEC);
1069 }
1070 }
1071 if (time_freq > time_tolerance)
1072 time_freq = time_tolerance;
1073 else if (time_freq < -time_tolerance)
1074 time_freq = -time_tolerance;
1075 }
1076
1077 #ifdef PPS_SYNC
1078 /*
1079 * hardpps() - discipline CPU clock oscillator to external PPS signal
1080 *
1081 * This routine is called at each PPS interrupt in order to discipline
1082 * the CPU clock oscillator to the PPS signal. It measures the PPS phase
1083 * and leaves it in a handy spot for the hardclock() routine. It
1084 * integrates successive PPS phase differences and calculates the
1085 * frequency offset. This is used in hardclock() to discipline the CPU
1086 * clock oscillator so that intrinsic frequency error is cancelled out.
1087 * The code requires the caller to capture the time and hardware counter
1088 * value at the on-time PPS signal transition.
1089 *
1090 * Note that, on some Unix systems, this routine runs at an interrupt
1091 * priority level higher than the timer interrupt routine hardclock().
1092 * Therefore, the variables used are distinct from the hardclock()
1093 * variables, except for certain exceptions: The PPS frequency pps_freq
1094 * and phase pps_offset variables are determined by this routine and
1095 * updated atomically. The time_tolerance variable can be considered a
1096 * constant, since it is infrequently changed, and then only when the
1097 * PPS signal is disabled. The watchdog counter pps_valid is updated
1098 * once per second by hardclock() and is atomically cleared in this
1099 * routine.
1100 */
1101 void
1102 hardpps(tvp, usec)
1103 struct timeval *tvp; /* time at PPS */
1104 long usec; /* hardware counter at PPS */
1105 {
1106 long u_usec, v_usec, bigtick;
1107 long cal_sec, cal_usec;
1108
1109 /*
1110 * An occasional glitch can be produced when the PPS interrupt
1111 * occurs in the hardclock() routine before the time variable is
1112 * updated. Here the offset is discarded when the difference
1113 * between it and the last one is greater than tick/2, but not
1114 * if the interval since the first discard exceeds 30 s.
1115 */
1116 time_status |= STA_PPSSIGNAL;
1117 time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
1118 pps_valid = 0;
1119 u_usec = -tvp->tv_usec;
1120 if (u_usec < -500000)
1121 u_usec += 1000000;
1122 v_usec = pps_offset - u_usec;
1123 if (v_usec < 0)
1124 v_usec = -v_usec;
1125 if (v_usec > (tick >> 1)) {
1126 if (pps_glitch > MAXGLITCH) {
1127 pps_glitch = 0;
1128 pps_tf[2] = u_usec;
1129 pps_tf[1] = u_usec;
1130 } else {
1131 pps_glitch++;
1132 u_usec = pps_offset;
1133 }
1134 } else
1135 pps_glitch = 0;
1136
1137 /*
1138 * A three-stage median filter is used to help deglitch the pps
1139 * time. The median sample becomes the time offset estimate; the
1140 * difference between the other two samples becomes the time
1141 * dispersion (jitter) estimate.
1142 */
1143 pps_tf[2] = pps_tf[1];
1144 pps_tf[1] = pps_tf[0];
1145 pps_tf[0] = u_usec;
1146 if (pps_tf[0] > pps_tf[1]) {
1147 if (pps_tf[1] > pps_tf[2]) {
1148 pps_offset = pps_tf[1]; /* 0 1 2 */
1149 v_usec = pps_tf[0] - pps_tf[2];
1150 } else if (pps_tf[2] > pps_tf[0]) {
1151 pps_offset = pps_tf[0]; /* 2 0 1 */
1152 v_usec = pps_tf[2] - pps_tf[1];
1153 } else {
1154 pps_offset = pps_tf[2]; /* 0 2 1 */
1155 v_usec = pps_tf[0] - pps_tf[1];
1156 }
1157 } else {
1158 if (pps_tf[1] < pps_tf[2]) {
1159 pps_offset = pps_tf[1]; /* 2 1 0 */
1160 v_usec = pps_tf[2] - pps_tf[0];
1161 } else if (pps_tf[2] < pps_tf[0]) {
1162 pps_offset = pps_tf[0]; /* 1 0 2 */
1163 v_usec = pps_tf[1] - pps_tf[2];
1164 } else {
1165 pps_offset = pps_tf[2]; /* 1 2 0 */
1166 v_usec = pps_tf[1] - pps_tf[0];
1167 }
1168 }
1169 if (v_usec > MAXTIME)
1170 pps_jitcnt++;
1171 v_usec = (v_usec << PPS_AVG) - pps_jitter;
1172 if (v_usec < 0)
1173 pps_jitter -= -v_usec >> PPS_AVG;
1174 else
1175 pps_jitter += v_usec >> PPS_AVG;
1176 if (pps_jitter > (MAXTIME >> 1))
1177 time_status |= STA_PPSJITTER;
1178
1179 /*
1180 * During the calibration interval adjust the starting time when
1181 * the tick overflows. At the end of the interval compute the
1182 * duration of the interval and the difference of the hardware
1183 * counters at the beginning and end of the interval. This code
1184 * is deliciously complicated by the fact valid differences may
1185 * exceed the value of tick when using long calibration
1186 * intervals and small ticks. Note that the counter can be
1187 * greater than tick if caught at just the wrong instant, but
1188 * the values returned and used here are correct.
1189 */
1190 bigtick = (long)tick << SHIFT_USEC;
1191 pps_usec -= pps_freq;
1192 if (pps_usec >= bigtick)
1193 pps_usec -= bigtick;
1194 if (pps_usec < 0)
1195 pps_usec += bigtick;
1196 pps_time.tv_sec++;
1197 pps_count++;
1198 if (pps_count < (1 << pps_shift))
1199 return;
1200 pps_count = 0;
1201 pps_calcnt++;
1202 u_usec = usec << SHIFT_USEC;
1203 v_usec = pps_usec - u_usec;
1204 if (v_usec >= bigtick >> 1)
1205 v_usec -= bigtick;
1206 if (v_usec < -(bigtick >> 1))
1207 v_usec += bigtick;
1208 if (v_usec < 0)
1209 v_usec = -(-v_usec >> pps_shift);
1210 else
1211 v_usec = v_usec >> pps_shift;
1212 pps_usec = u_usec;
1213 cal_sec = tvp->tv_sec;
1214 cal_usec = tvp->tv_usec;
1215 cal_sec -= pps_time.tv_sec;
1216 cal_usec -= pps_time.tv_usec;
1217 if (cal_usec < 0) {
1218 cal_usec += 1000000;
1219 cal_sec--;
1220 }
1221 pps_time = *tvp;
1222
1223 /*
1224 * Check for lost interrupts, noise, excessive jitter and
1225 * excessive frequency error. The number of timer ticks during
1226 * the interval may vary +-1 tick. Add to this a margin of one
1227 * tick for the PPS signal jitter and maximum frequency
1228 * deviation. If the limits are exceeded, the calibration
1229 * interval is reset to the minimum and we start over.
1230 */
1231 u_usec = (long)tick << 1;
1232 if (!((cal_sec == -1 && cal_usec > (1000000 - u_usec))
1233 || (cal_sec == 0 && cal_usec < u_usec))
1234 || v_usec > time_tolerance || v_usec < -time_tolerance) {
1235 pps_errcnt++;
1236 pps_shift = PPS_SHIFT;
1237 pps_intcnt = 0;
1238 time_status |= STA_PPSERROR;
1239 return;
1240 }
1241
1242 /*
1243 * A three-stage median filter is used to help deglitch the pps
1244 * frequency. The median sample becomes the frequency offset
1245 * estimate; the difference between the other two samples
1246 * becomes the frequency dispersion (stability) estimate.
1247 */
1248 pps_ff[2] = pps_ff[1];
1249 pps_ff[1] = pps_ff[0];
1250 pps_ff[0] = v_usec;
1251 if (pps_ff[0] > pps_ff[1]) {
1252 if (pps_ff[1] > pps_ff[2]) {
1253 u_usec = pps_ff[1]; /* 0 1 2 */
1254 v_usec = pps_ff[0] - pps_ff[2];
1255 } else if (pps_ff[2] > pps_ff[0]) {
1256 u_usec = pps_ff[0]; /* 2 0 1 */
1257 v_usec = pps_ff[2] - pps_ff[1];
1258 } else {
1259 u_usec = pps_ff[2]; /* 0 2 1 */
1260 v_usec = pps_ff[0] - pps_ff[1];
1261 }
1262 } else {
1263 if (pps_ff[1] < pps_ff[2]) {
1264 u_usec = pps_ff[1]; /* 2 1 0 */
1265 v_usec = pps_ff[2] - pps_ff[0];
1266 } else if (pps_ff[2] < pps_ff[0]) {
1267 u_usec = pps_ff[0]; /* 1 0 2 */
1268 v_usec = pps_ff[1] - pps_ff[2];
1269 } else {
1270 u_usec = pps_ff[2]; /* 1 2 0 */
1271 v_usec = pps_ff[1] - pps_ff[0];
1272 }
1273 }
1274
1275 /*
1276 * Here the frequency dispersion (stability) is updated. If it
1277 * is less than one-fourth the maximum (MAXFREQ), the frequency
1278 * offset is updated as well, but clamped to the tolerance. It
1279 * will be processed later by the hardclock() routine.
1280 */
1281 v_usec = (v_usec >> 1) - pps_stabil;
1282 if (v_usec < 0)
1283 pps_stabil -= -v_usec >> PPS_AVG;
1284 else
1285 pps_stabil += v_usec >> PPS_AVG;
1286 if (pps_stabil > MAXFREQ >> 2) {
1287 pps_stbcnt++;
1288 time_status |= STA_PPSWANDER;
1289 return;
1290 }
1291 if (time_status & STA_PPSFREQ) {
1292 if (u_usec < 0) {
1293 pps_freq -= -u_usec >> PPS_AVG;
1294 if (pps_freq < -time_tolerance)
1295 pps_freq = -time_tolerance;
1296 u_usec = -u_usec;
1297 } else {
1298 pps_freq += u_usec >> PPS_AVG;
1299 if (pps_freq > time_tolerance)
1300 pps_freq = time_tolerance;
1301 }
1302 }
1303
1304 /*
1305 * Here the calibration interval is adjusted. If the maximum
1306 * time difference is greater than tick / 4, reduce the interval
1307 * by half. If this is not the case for four consecutive
1308 * intervals, double the interval.
1309 */
1310 if (u_usec << pps_shift > bigtick >> 2) {
1311 pps_intcnt = 0;
1312 if (pps_shift > PPS_SHIFT)
1313 pps_shift--;
1314 } else if (pps_intcnt >= 4) {
1315 pps_intcnt = 0;
1316 if (pps_shift < PPS_SHIFTMAX)
1317 pps_shift++;
1318 } else
1319 pps_intcnt++;
1320 }
1321 #endif /* PPS_SYNC */
1322 #endif /* NTP */
1323
1324
1325 /*
1326 * Return information about system clocks.
1327 */
1328 int
1329 sysctl_clockrate(where, sizep)
1330 register char *where;
1331 size_t *sizep;
1332 {
1333 struct clockinfo clkinfo;
1334
1335 /*
1336 * Construct clockinfo structure.
1337 */
1338 clkinfo.tick = tick;
1339 clkinfo.tickadj = tickadj;
1340 clkinfo.hz = hz;
1341 clkinfo.profhz = profhz;
1342 clkinfo.stathz = stathz ? stathz : hz;
1343 return (sysctl_rdstruct(where, sizep, NULL, &clkinfo, sizeof(clkinfo)));
1344 }
1345
1346