kern_tc.c revision 1.3 1 /* $NetBSD: kern_tc.c,v 1.3 2006/06/09 22:47:56 kardel Exp $ */
2
3 /*-
4 * ----------------------------------------------------------------------------
5 * "THE BEER-WARE LICENSE" (Revision 42):
6 * <phk (at) FreeBSD.ORG> wrote this file. As long as you retain this notice you
7 * can do whatever you want with this stuff. If we meet some day, and you think
8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
9 * ---------------------------------------------------------------------------
10 */
11
12 #include <sys/cdefs.h>
13 /* __FBSDID("$FreeBSD: src/sys/kern/kern_tc.c,v 1.166 2005/09/19 22:16:31 andre Exp $"); */
14 __KERNEL_RCSID(0, "$NetBSD: kern_tc.c,v 1.3 2006/06/09 22:47:56 kardel Exp $");
15
16 #include "opt_ntp.h"
17
18 #include <sys/param.h>
19 #ifdef __HAVE_TIMECOUNTER /* XXX */
20 #include <sys/kernel.h>
21 #include <sys/reboot.h> /* XXX just to get AB_VERBOSE */
22 #include <sys/sysctl.h>
23 #include <sys/syslog.h>
24 #include <sys/systm.h>
25 #include <sys/timepps.h>
26 #include <sys/timetc.h>
27 #include <sys/timex.h>
28 #include <sys/evcnt.h>
29 #include <sys/kauth.h>
30
31 /*
32 * maximum name length for TC names in sysctl interface
33 */
34 #define MAX_TCNAMELEN 64
35
36 /*
37 * A large step happens on boot. This constant detects such steps.
38 * It is relatively small so that ntp_update_second gets called enough
39 * in the typical 'missed a couple of seconds' case, but doesn't loop
40 * forever when the time step is large.
41 */
42 #define LARGE_STEP 200
43
44 /*
45 * Implement a dummy timecounter which we can use until we get a real one
46 * in the air. This allows the console and other early stuff to use
47 * time services.
48 */
49
50 static u_int
51 dummy_get_timecount(struct timecounter *tc)
52 {
53 static u_int now;
54
55 return (++now);
56 }
57
58 static struct timecounter dummy_timecounter = {
59 dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000
60 };
61
62 struct timehands {
63 /* These fields must be initialized by the driver. */
64 struct timecounter *th_counter;
65 int64_t th_adjustment;
66 u_int64_t th_scale;
67 u_int th_offset_count;
68 struct bintime th_offset;
69 struct timeval th_microtime;
70 struct timespec th_nanotime;
71 /* Fields not to be copied in tc_windup start with th_generation. */
72 volatile u_int th_generation;
73 struct timehands *th_next;
74 };
75
76 static struct timehands th0;
77 static struct timehands th9 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th0};
78 static struct timehands th8 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th9};
79 static struct timehands th7 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th8};
80 static struct timehands th6 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th7};
81 static struct timehands th5 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th6};
82 static struct timehands th4 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th5};
83 static struct timehands th3 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th4};
84 static struct timehands th2 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th3};
85 static struct timehands th1 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th2};
86 static struct timehands th0 = {
87 &dummy_timecounter,
88 0,
89 (uint64_t)-1 / 1000000,
90 0,
91 {1, 0},
92 {0, 0},
93 {0, 0},
94 1,
95 &th1
96 };
97
98 static struct timehands *volatile timehands = &th0;
99 struct timecounter *timecounter = &dummy_timecounter;
100 static struct timecounter *timecounters = &dummy_timecounter;
101
102 time_t time_second = 1;
103 time_t time_uptime = 1;
104
105 static struct bintime boottimebin;
106 struct timeval boottime;
107 #ifdef __FreeBSD__
108 static int sysctl_kern_boottime(SYSCTL_HANDLER_ARGS);
109 SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime, CTLTYPE_STRUCT|CTLFLAG_RD,
110 NULL, 0, sysctl_kern_boottime, "S,timeval", "System boottime");
111
112 SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, "");
113 #endif /* __FreeBSD__ */
114
115 static int timestepwarnings;
116
117 #ifdef __FreeBSD__
118 SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW,
119 ×tepwarnings, 0, "");
120 #endif /* __FreeBSD__ */
121
122 /*
123 * sysctl helper routine for kern.timercounter.current
124 */
125 static int
126 sysctl_kern_timecounter_hardware(SYSCTLFN_ARGS)
127 {
128 struct sysctlnode node;
129 int error;
130 char newname[MAX_TCNAMELEN];
131 struct timecounter *newtc, *tc;
132
133 tc = timecounter;
134
135 strlcpy(newname, tc->tc_name, sizeof(newname));
136
137 node = *rnode;
138 node.sysctl_data = newname;
139 node.sysctl_size = sizeof(newname);
140
141 error = sysctl_lookup(SYSCTLFN_CALL(&node));
142
143 if (error ||
144 newp == NULL ||
145 strncmp(newname, tc->tc_name, sizeof(newname)) == 0)
146 return error;
147
148 if (l && (error = kauth_authorize_generic(l->l_proc->p_cred,
149 KAUTH_GENERIC_ISSUSER, &l->l_proc->p_acflag)) != 0)
150 return (error);
151
152 /* XXX locking */
153
154 for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
155 if (strcmp(newname, newtc->tc_name) != 0)
156 continue;
157
158 /* Warm up new timecounter. */
159 (void)newtc->tc_get_timecount(newtc);
160 (void)newtc->tc_get_timecount(newtc);
161
162 timecounter = newtc;
163
164 /* XXX unlock */
165
166 return (0);
167 }
168
169 /* XXX unlock */
170
171 return (EINVAL);
172 }
173
174 static int
175 sysctl_kern_timecounter_choice(SYSCTLFN_ARGS)
176 {
177 char buf[48];
178 char *where = oldp;
179 const char *spc;
180 struct timecounter *tc;
181 size_t needed, left, slen;
182 int error;
183
184 if (newp != NULL)
185 return (EPERM);
186 if (namelen != 0)
187 return (EINVAL);
188
189 spc = "";
190 error = 0;
191 needed = 0;
192 left = *oldlenp;
193
194 /* XXX locking */
195
196 for (tc = timecounters; error == 0 && tc != NULL; tc = tc->tc_next) {
197 if (where == NULL) {
198 needed += sizeof(buf); /* be conservative */
199 } else {
200 slen = snprintf(buf, sizeof(buf), "%s%s(q=%d, f=%" PRId64
201 " Hz)", spc, tc->tc_name, tc->tc_quality,
202 tc->tc_frequency);
203 if (left < slen + 1)
204 break;
205 /* XXX use sysctl_copyout? (from sysctl_hw_disknames) */
206 error = copyout(buf, where, slen + 1);
207 spc = " ";
208 where += slen;
209 needed += slen;
210 left -= slen;
211 }
212 }
213
214 /* XXX unlock */
215
216 *oldlenp = needed;
217 return (error);
218 }
219
220 SYSCTL_SETUP(sysctl_timecounter_setup, "sysctl timecounter setup")
221 {
222 const struct sysctlnode *node;
223
224 sysctl_createv(clog, 0, NULL, &node,
225 CTLFLAG_PERMANENT,
226 CTLTYPE_NODE, "timecounter",
227 SYSCTL_DESCR("time counter information"),
228 NULL, 0, NULL, 0,
229 CTL_KERN, CTL_CREATE, CTL_EOL);
230
231 if (node != NULL) {
232 sysctl_createv(clog, 0, NULL, NULL,
233 CTLFLAG_PERMANENT,
234 CTLTYPE_STRING, "choice",
235 SYSCTL_DESCR("available counters"),
236 sysctl_kern_timecounter_choice, 0, NULL, 0,
237 CTL_KERN, node->sysctl_num, CTL_CREATE, CTL_EOL);
238
239 sysctl_createv(clog, 0, NULL, NULL,
240 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
241 CTLTYPE_STRING, "hardware",
242 SYSCTL_DESCR("currently active time counter"),
243 sysctl_kern_timecounter_hardware, 0, NULL, MAX_TCNAMELEN,
244 CTL_KERN, node->sysctl_num, CTL_CREATE, CTL_EOL);
245
246 sysctl_createv(clog, 0, NULL, NULL,
247 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
248 CTLTYPE_INT, "timestepwarnings",
249 SYSCTL_DESCR("log time steps"),
250 NULL, 0, ×tepwarnings, 0,
251 CTL_KERN, node->sysctl_num, CTL_CREATE, CTL_EOL);
252 }
253 }
254
255 #define TC_STATS(name) \
256 static struct evcnt n##name = \
257 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "timecounter", #name); \
258 EVCNT_ATTACH_STATIC(n##name)
259
260 TC_STATS(binuptime); TC_STATS(nanouptime); TC_STATS(microuptime);
261 TC_STATS(bintime); TC_STATS(nanotime); TC_STATS(microtime);
262 TC_STATS(getbinuptime); TC_STATS(getnanouptime); TC_STATS(getmicrouptime);
263 TC_STATS(getbintime); TC_STATS(getnanotime); TC_STATS(getmicrotime);
264 TC_STATS(setclock);
265
266 #undef TC_STATS
267
268 static void tc_windup(void);
269
270 #ifdef __FreeBSD__
271 static int
272 sysctl_kern_boottime(SYSCTL_HANDLER_ARGS)
273 {
274 #ifdef SCTL_MASK32
275 int tv[2];
276
277 if (req->flags & SCTL_MASK32) {
278 tv[0] = boottime.tv_sec;
279 tv[1] = boottime.tv_usec;
280 return SYSCTL_OUT(req, tv, sizeof(tv));
281 } else
282 #endif
283 return SYSCTL_OUT(req, &boottime, sizeof(boottime));
284 }
285 #endif /* __FreeBSD__ */
286
287 /*
288 * Return the difference between the timehands' counter value now and what
289 * was when we copied it to the timehands' offset_count.
290 */
291 static __inline u_int
292 tc_delta(struct timehands *th)
293 {
294 struct timecounter *tc;
295
296 tc = th->th_counter;
297 return ((tc->tc_get_timecount(tc) -
298 th->th_offset_count) & tc->tc_counter_mask);
299 }
300
301 /*
302 * Functions for reading the time. We have to loop until we are sure that
303 * the timehands that we operated on was not updated under our feet. See
304 * the comment in <sys/time.h> for a description of these 12 functions.
305 */
306
307 void
308 binuptime(struct bintime *bt)
309 {
310 struct timehands *th;
311 u_int gen;
312
313 nbinuptime.ev_count++;
314 do {
315 th = timehands;
316 gen = th->th_generation;
317 *bt = th->th_offset;
318 bintime_addx(bt, th->th_scale * tc_delta(th));
319 } while (gen == 0 || gen != th->th_generation);
320 }
321
322 void
323 nanouptime(struct timespec *tsp)
324 {
325 struct bintime bt;
326
327 nnanouptime.ev_count++;
328 binuptime(&bt);
329 bintime2timespec(&bt, tsp);
330 }
331
332 void
333 microuptime(struct timeval *tvp)
334 {
335 struct bintime bt;
336
337 nmicrouptime.ev_count++;
338 binuptime(&bt);
339 bintime2timeval(&bt, tvp);
340 }
341
342 void
343 bintime(struct bintime *bt)
344 {
345
346 nbintime.ev_count++;
347 binuptime(bt);
348 bintime_add(bt, &boottimebin);
349 }
350
351 void
352 nanotime(struct timespec *tsp)
353 {
354 struct bintime bt;
355
356 nnanotime.ev_count++;
357 bintime(&bt);
358 bintime2timespec(&bt, tsp);
359 }
360
361 void
362 microtime(struct timeval *tvp)
363 {
364 struct bintime bt;
365
366 nmicrotime.ev_count++;
367 bintime(&bt);
368 bintime2timeval(&bt, tvp);
369 }
370
371 void
372 getbinuptime(struct bintime *bt)
373 {
374 struct timehands *th;
375 u_int gen;
376
377 ngetbinuptime.ev_count++;
378 do {
379 th = timehands;
380 gen = th->th_generation;
381 *bt = th->th_offset;
382 } while (gen == 0 || gen != th->th_generation);
383 }
384
385 void
386 getnanouptime(struct timespec *tsp)
387 {
388 struct timehands *th;
389 u_int gen;
390
391 ngetnanouptime.ev_count++;
392 do {
393 th = timehands;
394 gen = th->th_generation;
395 bintime2timespec(&th->th_offset, tsp);
396 } while (gen == 0 || gen != th->th_generation);
397 }
398
399 void
400 getmicrouptime(struct timeval *tvp)
401 {
402 struct timehands *th;
403 u_int gen;
404
405 ngetmicrouptime.ev_count++;
406 do {
407 th = timehands;
408 gen = th->th_generation;
409 bintime2timeval(&th->th_offset, tvp);
410 } while (gen == 0 || gen != th->th_generation);
411 }
412
413 void
414 getbintime(struct bintime *bt)
415 {
416 struct timehands *th;
417 u_int gen;
418
419 ngetbintime.ev_count++;
420 do {
421 th = timehands;
422 gen = th->th_generation;
423 *bt = th->th_offset;
424 } while (gen == 0 || gen != th->th_generation);
425 bintime_add(bt, &boottimebin);
426 }
427
428 void
429 getnanotime(struct timespec *tsp)
430 {
431 struct timehands *th;
432 u_int gen;
433
434 ngetnanotime.ev_count++;
435 do {
436 th = timehands;
437 gen = th->th_generation;
438 *tsp = th->th_nanotime;
439 } while (gen == 0 || gen != th->th_generation);
440 }
441
442 void
443 getmicrotime(struct timeval *tvp)
444 {
445 struct timehands *th;
446 u_int gen;
447
448 ngetmicrotime.ev_count++;
449 do {
450 th = timehands;
451 gen = th->th_generation;
452 *tvp = th->th_microtime;
453 } while (gen == 0 || gen != th->th_generation);
454 }
455
456 /*
457 * Initialize a new timecounter and possibly use it.
458 */
459 void
460 tc_init(struct timecounter *tc)
461 {
462 u_int u;
463
464 u = tc->tc_frequency / tc->tc_counter_mask;
465 /* XXX: We need some margin here, 10% is a guess */
466 u *= 11;
467 u /= 10;
468 if (u > hz && tc->tc_quality >= 0) {
469 tc->tc_quality = -2000;
470 if (bootverbose) {
471 printf("timecounter: Timecounter \"%s\" frequency %ju Hz",
472 tc->tc_name, (uintmax_t)tc->tc_frequency);
473 printf(" -- Insufficient hz, needs at least %u\n", u);
474 }
475 } else if (tc->tc_quality >= 0 || bootverbose) {
476 printf("timecounter: Timecounter \"%s\" frequency %ju Hz quality %d\n",
477 tc->tc_name, (uintmax_t)tc->tc_frequency,
478 tc->tc_quality);
479 }
480
481 /* XXX locking */
482 tc->tc_next = timecounters;
483 timecounters = tc;
484 /*
485 * Never automatically use a timecounter with negative quality.
486 * Even though we run on the dummy counter, switching here may be
487 * worse since this timecounter may not be monotonous.
488 */
489 if (tc->tc_quality < 0)
490 return;
491 if (tc->tc_quality < timecounter->tc_quality)
492 return;
493 if (tc->tc_quality == timecounter->tc_quality &&
494 tc->tc_frequency < timecounter->tc_frequency)
495 return;
496 (void)tc->tc_get_timecount(tc);
497 (void)tc->tc_get_timecount(tc);
498 timecounter = tc;
499 tc_windup();
500 }
501
502 /* Report the frequency of the current timecounter. */
503 u_int64_t
504 tc_getfrequency(void)
505 {
506
507 return (timehands->th_counter->tc_frequency);
508 }
509
510 /*
511 * Step our concept of UTC. This is done by modifying our estimate of
512 * when we booted.
513 * XXX: not locked.
514 */
515 void
516 tc_setclock(struct timespec *ts)
517 {
518 struct timespec ts2;
519 struct bintime bt, bt2;
520
521 nsetclock.ev_count++;
522 binuptime(&bt2);
523 timespec2bintime(ts, &bt);
524 bintime_sub(&bt, &bt2);
525 bintime_add(&bt2, &boottimebin);
526 boottimebin = bt;
527 bintime2timeval(&bt, &boottime);
528
529 /* XXX fiddle all the little crinkly bits around the fiords... */
530 tc_windup();
531 if (timestepwarnings) {
532 bintime2timespec(&bt2, &ts2);
533 log(LOG_INFO, "Time stepped from %jd.%09ld to %jd.%09ld\n",
534 (intmax_t)ts2.tv_sec, ts2.tv_nsec,
535 (intmax_t)ts->tv_sec, ts->tv_nsec);
536 }
537 }
538
539 /*
540 * Initialize the next struct timehands in the ring and make
541 * it the active timehands. Along the way we might switch to a different
542 * timecounter and/or do seconds processing in NTP. Slightly magic.
543 */
544 static void
545 tc_windup(void)
546 {
547 struct bintime bt;
548 struct timehands *th, *tho;
549 u_int64_t scale;
550 u_int delta, ncount, ogen;
551 int i;
552 time_t t;
553
554 /*
555 * Make the next timehands a copy of the current one, but do not
556 * overwrite the generation or next pointer. While we update
557 * the contents, the generation must be zero.
558 */
559 tho = timehands;
560 th = tho->th_next;
561 ogen = th->th_generation;
562 th->th_generation = 0;
563 bcopy(tho, th, offsetof(struct timehands, th_generation));
564
565 /*
566 * Capture a timecounter delta on the current timecounter and if
567 * changing timecounters, a counter value from the new timecounter.
568 * Update the offset fields accordingly.
569 */
570 delta = tc_delta(th);
571 if (th->th_counter != timecounter)
572 ncount = timecounter->tc_get_timecount(timecounter);
573 else
574 ncount = 0;
575 th->th_offset_count += delta;
576 th->th_offset_count &= th->th_counter->tc_counter_mask;
577 bintime_addx(&th->th_offset, th->th_scale * delta);
578
579 /*
580 * Hardware latching timecounters may not generate interrupts on
581 * PPS events, so instead we poll them. There is a finite risk that
582 * the hardware might capture a count which is later than the one we
583 * got above, and therefore possibly in the next NTP second which might
584 * have a different rate than the current NTP second. It doesn't
585 * matter in practice.
586 */
587 if (tho->th_counter->tc_poll_pps)
588 tho->th_counter->tc_poll_pps(tho->th_counter);
589
590 /*
591 * Deal with NTP second processing. The for loop normally
592 * iterates at most once, but in extreme situations it might
593 * keep NTP sane if timeouts are not run for several seconds.
594 * At boot, the time step can be large when the TOD hardware
595 * has been read, so on really large steps, we call
596 * ntp_update_second only twice. We need to call it twice in
597 * case we missed a leap second.
598 * If NTP is not compiled in ntp_update_second still calculates
599 * the adjustment resulting from adjtime() calls.
600 */
601 bt = th->th_offset;
602 bintime_add(&bt, &boottimebin);
603 i = bt.sec - tho->th_microtime.tv_sec;
604 if (i > LARGE_STEP)
605 i = 2;
606 for (; i > 0; i--) {
607 t = bt.sec;
608 ntp_update_second(&th->th_adjustment, &bt.sec);
609 if (bt.sec != t)
610 boottimebin.sec += bt.sec - t;
611 }
612
613 /* Update the UTC timestamps used by the get*() functions. */
614 /* XXX shouldn't do this here. Should force non-`get' versions. */
615 bintime2timeval(&bt, &th->th_microtime);
616 bintime2timespec(&bt, &th->th_nanotime);
617
618 /* Now is a good time to change timecounters. */
619 if (th->th_counter != timecounter) {
620 th->th_counter = timecounter;
621 th->th_offset_count = ncount;
622
623 printf("timecounter: selected timecounter \"%s\" frequency %ju Hz quality %d\n",
624 timecounter->tc_name, (uintmax_t)timecounter->tc_frequency,
625 timecounter->tc_quality);
626 }
627
628 /*-
629 * Recalculate the scaling factor. We want the number of 1/2^64
630 * fractions of a second per period of the hardware counter, taking
631 * into account the th_adjustment factor which the NTP PLL/adjtime(2)
632 * processing provides us with.
633 *
634 * The th_adjustment is nanoseconds per second with 32 bit binary
635 * fraction and we want 64 bit binary fraction of second:
636 *
637 * x = a * 2^32 / 10^9 = a * 4.294967296
638 *
639 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
640 * we can only multiply by about 850 without overflowing, but that
641 * leaves suitably precise fractions for multiply before divide.
642 *
643 * Divide before multiply with a fraction of 2199/512 results in a
644 * systematic undercompensation of 10PPM of th_adjustment. On a
645 * 5000PPM adjustment this is a 0.05PPM error. This is acceptable.
646 *
647 * We happily sacrifice the lowest of the 64 bits of our result
648 * to the goddess of code clarity.
649 *
650 */
651 scale = (u_int64_t)1 << 63;
652 scale += (th->th_adjustment / 1024) * 2199;
653 scale /= th->th_counter->tc_frequency;
654 th->th_scale = scale * 2;
655
656 /*
657 * Now that the struct timehands is again consistent, set the new
658 * generation number, making sure to not make it zero.
659 */
660 if (++ogen == 0)
661 ogen = 1;
662 th->th_generation = ogen;
663
664 /* Go live with the new struct timehands. */
665 time_second = th->th_microtime.tv_sec;
666 time_uptime = th->th_offset.sec;
667 timehands = th;
668 }
669
670 #ifdef __FreeBSD__
671 /* Report or change the active timecounter hardware. */
672 static int
673 sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS)
674 {
675 char newname[32];
676 struct timecounter *newtc, *tc;
677 int error;
678
679 tc = timecounter;
680 strlcpy(newname, tc->tc_name, sizeof(newname));
681
682 error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req);
683 if (error != 0 || req->newptr == NULL ||
684 strcmp(newname, tc->tc_name) == 0)
685 return (error);
686
687 for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
688 if (strcmp(newname, newtc->tc_name) != 0)
689 continue;
690
691 /* Warm up new timecounter. */
692 (void)newtc->tc_get_timecount(newtc);
693 (void)newtc->tc_get_timecount(newtc);
694
695 timecounter = newtc;
696 return (0);
697 }
698 return (EINVAL);
699 }
700
701 SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware, CTLTYPE_STRING | CTLFLAG_RW,
702 0, 0, sysctl_kern_timecounter_hardware, "A", "");
703
704
705 /* Report or change the active timecounter hardware. */
706 static int
707 sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS)
708 {
709 char buf[32], *spc;
710 struct timecounter *tc;
711 int error;
712
713 spc = "";
714 error = 0;
715 for (tc = timecounters; error == 0 && tc != NULL; tc = tc->tc_next) {
716 sprintf(buf, "%s%s(%d)",
717 spc, tc->tc_name, tc->tc_quality);
718 error = SYSCTL_OUT(req, buf, strlen(buf));
719 spc = " ";
720 }
721 return (error);
722 }
723
724 SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice, CTLTYPE_STRING | CTLFLAG_RD,
725 0, 0, sysctl_kern_timecounter_choice, "A", "");
726 #endif /* __FreeBSD__ */
727
728 /*
729 * RFC 2783 PPS-API implementation.
730 */
731
732 int
733 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
734 {
735 pps_params_t *app;
736 pps_info_t *pipi;
737 #ifdef PPS_SYNC
738 int *epi;
739 #endif
740
741 KASSERT(pps != NULL); /* XXX ("NULL pps pointer in pps_ioctl") */
742 switch (cmd) {
743 case PPS_IOC_CREATE:
744 return (0);
745 case PPS_IOC_DESTROY:
746 return (0);
747 case PPS_IOC_SETPARAMS:
748 app = (pps_params_t *)data;
749 if (app->mode & ~pps->ppscap)
750 return (EINVAL);
751 pps->ppsparam = *app;
752 return (0);
753 case PPS_IOC_GETPARAMS:
754 app = (pps_params_t *)data;
755 *app = pps->ppsparam;
756 app->api_version = PPS_API_VERS_1;
757 return (0);
758 case PPS_IOC_GETCAP:
759 *(int*)data = pps->ppscap;
760 return (0);
761 case PPS_IOC_FETCH:
762 pipi = (pps_info_t *)data;
763 pps->ppsinfo.current_mode = pps->ppsparam.mode;
764 *pipi = pps->ppsinfo;
765 return (0);
766 case PPS_IOC_KCBIND:
767 #ifdef PPS_SYNC
768 epi = (int *)data;
769 /* XXX Only root should be able to do this */
770 if (*epi & ~pps->ppscap)
771 return (EINVAL);
772 pps->kcmode = *epi;
773 return (0);
774 #else
775 return (EOPNOTSUPP);
776 #endif
777 default:
778 return (EPASSTHROUGH);
779 }
780 }
781
782 void
783 pps_init(struct pps_state *pps)
784 {
785 pps->ppscap |= PPS_TSFMT_TSPEC;
786 if (pps->ppscap & PPS_CAPTUREASSERT)
787 pps->ppscap |= PPS_OFFSETASSERT;
788 if (pps->ppscap & PPS_CAPTURECLEAR)
789 pps->ppscap |= PPS_OFFSETCLEAR;
790 }
791
792 void
793 pps_capture(struct pps_state *pps)
794 {
795 struct timehands *th;
796
797 KASSERT(pps != NULL); /* XXX ("NULL pps pointer in pps_capture") */
798 th = timehands;
799 pps->capgen = th->th_generation;
800 pps->capth = th;
801 pps->capcount = th->th_counter->tc_get_timecount(th->th_counter);
802 if (pps->capgen != th->th_generation)
803 pps->capgen = 0;
804 }
805
806 void
807 pps_event(struct pps_state *pps, int event)
808 {
809 struct bintime bt;
810 struct timespec ts, *tsp, *osp;
811 u_int tcount, *pcount;
812 int foff, fhard;
813 pps_seq_t *pseq;
814
815 KASSERT(pps != NULL); /* XXX ("NULL pps pointer in pps_event") */
816 /* If the timecounter was wound up underneath us, bail out. */
817 if (pps->capgen == 0 || pps->capgen != pps->capth->th_generation)
818 return;
819
820 /* Things would be easier with arrays. */
821 if (event == PPS_CAPTUREASSERT) {
822 tsp = &pps->ppsinfo.assert_timestamp;
823 osp = &pps->ppsparam.assert_offset;
824 foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
825 fhard = pps->kcmode & PPS_CAPTUREASSERT;
826 pcount = &pps->ppscount[0];
827 pseq = &pps->ppsinfo.assert_sequence;
828 } else {
829 tsp = &pps->ppsinfo.clear_timestamp;
830 osp = &pps->ppsparam.clear_offset;
831 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
832 fhard = pps->kcmode & PPS_CAPTURECLEAR;
833 pcount = &pps->ppscount[1];
834 pseq = &pps->ppsinfo.clear_sequence;
835 }
836
837 /*
838 * If the timecounter changed, we cannot compare the count values, so
839 * we have to drop the rest of the PPS-stuff until the next event.
840 */
841 if (pps->ppstc != pps->capth->th_counter) {
842 pps->ppstc = pps->capth->th_counter;
843 *pcount = pps->capcount;
844 pps->ppscount[2] = pps->capcount;
845 return;
846 }
847
848 /* Convert the count to a timespec. */
849 tcount = pps->capcount - pps->capth->th_offset_count;
850 tcount &= pps->capth->th_counter->tc_counter_mask;
851 bt = pps->capth->th_offset;
852 bintime_addx(&bt, pps->capth->th_scale * tcount);
853 bintime_add(&bt, &boottimebin);
854 bintime2timespec(&bt, &ts);
855
856 /* If the timecounter was wound up underneath us, bail out. */
857 if (pps->capgen != pps->capth->th_generation)
858 return;
859
860 *pcount = pps->capcount;
861 (*pseq)++;
862 *tsp = ts;
863
864 if (foff) {
865 timespecadd(tsp, osp, tsp);
866 if (tsp->tv_nsec < 0) {
867 tsp->tv_nsec += 1000000000;
868 tsp->tv_sec -= 1;
869 }
870 }
871 #ifdef PPS_SYNC
872 if (fhard) {
873 u_int64_t scale;
874
875 /*
876 * Feed the NTP PLL/FLL.
877 * The FLL wants to know how many (hardware) nanoseconds
878 * elapsed since the previous event.
879 */
880 tcount = pps->capcount - pps->ppscount[2];
881 pps->ppscount[2] = pps->capcount;
882 tcount &= pps->capth->th_counter->tc_counter_mask;
883 scale = (u_int64_t)1 << 63;
884 scale /= pps->capth->th_counter->tc_frequency;
885 scale *= 2;
886 bt.sec = 0;
887 bt.frac = 0;
888 bintime_addx(&bt, scale * tcount);
889 bintime2timespec(&bt, &ts);
890 hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec);
891 }
892 #endif
893 }
894
895 /*
896 * Timecounters need to be updated every so often to prevent the hardware
897 * counter from overflowing. Updating also recalculates the cached values
898 * used by the get*() family of functions, so their precision depends on
899 * the update frequency.
900 */
901
902 static int tc_tick;
903 #ifdef __FreeBSD__
904 SYSCTL_INT(_kern_timecounter, OID_AUTO, tick, CTLFLAG_RD, &tc_tick, 0, "");
905 #endif /* __FreeBSD__ */
906
907 void
908 tc_ticktock(void)
909 {
910 static int count;
911
912 if (++count < tc_tick)
913 return;
914 count = 0;
915 tc_windup();
916 }
917
918 void
919 inittimecounter(void)
920 {
921 u_int p;
922
923 /*
924 * Set the initial timeout to
925 * max(1, <approx. number of hardclock ticks in a millisecond>).
926 * People should probably not use the sysctl to set the timeout
927 * to smaller than its inital value, since that value is the
928 * smallest reasonable one. If they want better timestamps they
929 * should use the non-"get"* functions.
930 */
931 if (hz > 1000)
932 tc_tick = (hz + 500) / 1000;
933 else
934 tc_tick = 1;
935 p = (tc_tick * 1000000) / hz;
936 printf("timecounter: Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000);
937
938 /* warm up new timecounter (again) and get rolling. */
939 (void)timecounter->tc_get_timecount(timecounter);
940 (void)timecounter->tc_get_timecount(timecounter);
941 }
942
943 #ifdef __FreeBSD__
944 SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL)
945 #endif /* __FreeBSD__ */
946 #endif /* __HAVE_TIMECOUNTER */
947