kern_tc.c revision 1.6 1 /* $NetBSD: kern_tc.c,v 1.6 2006/08/05 21:59:40 bjh21 Exp $ */
2
3 /*-
4 * ----------------------------------------------------------------------------
5 * "THE BEER-WARE LICENSE" (Revision 42):
6 * <phk (at) FreeBSD.ORG> wrote this file. As long as you retain this notice you
7 * can do whatever you want with this stuff. If we meet some day, and you think
8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
9 * ---------------------------------------------------------------------------
10 */
11
12 #include <sys/cdefs.h>
13 /* __FBSDID("$FreeBSD: src/sys/kern/kern_tc.c,v 1.166 2005/09/19 22:16:31 andre Exp $"); */
14 __KERNEL_RCSID(0, "$NetBSD: kern_tc.c,v 1.6 2006/08/05 21:59:40 bjh21 Exp $");
15
16 #include "opt_ntp.h"
17
18 #include <sys/param.h>
19 #ifdef __HAVE_TIMECOUNTER /* XXX */
20 #include <sys/kernel.h>
21 #include <sys/reboot.h> /* XXX just to get AB_VERBOSE */
22 #include <sys/sysctl.h>
23 #include <sys/syslog.h>
24 #include <sys/systm.h>
25 #include <sys/timepps.h>
26 #include <sys/timetc.h>
27 #include <sys/timex.h>
28 #include <sys/evcnt.h>
29 #include <sys/kauth.h>
30
31 /*
32 * maximum name length for TC names in sysctl interface
33 */
34 #define MAX_TCNAMELEN 64
35
36 /*
37 * A large step happens on boot. This constant detects such steps.
38 * It is relatively small so that ntp_update_second gets called enough
39 * in the typical 'missed a couple of seconds' case, but doesn't loop
40 * forever when the time step is large.
41 */
42 #define LARGE_STEP 200
43
44 /*
45 * Implement a dummy timecounter which we can use until we get a real one
46 * in the air. This allows the console and other early stuff to use
47 * time services.
48 */
49
50 static u_int
51 dummy_get_timecount(struct timecounter *tc)
52 {
53 static u_int now;
54
55 return (++now);
56 }
57
58 static struct timecounter dummy_timecounter = {
59 dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000
60 };
61
62 struct timehands {
63 /* These fields must be initialized by the driver. */
64 struct timecounter *th_counter;
65 int64_t th_adjustment;
66 u_int64_t th_scale;
67 u_int th_offset_count;
68 struct bintime th_offset;
69 struct timeval th_microtime;
70 struct timespec th_nanotime;
71 /* Fields not to be copied in tc_windup start with th_generation. */
72 volatile u_int th_generation;
73 struct timehands *th_next;
74 };
75
76 static struct timehands th0;
77 static struct timehands th9 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th0};
78 static struct timehands th8 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th9};
79 static struct timehands th7 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th8};
80 static struct timehands th6 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th7};
81 static struct timehands th5 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th6};
82 static struct timehands th4 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th5};
83 static struct timehands th3 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th4};
84 static struct timehands th2 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th3};
85 static struct timehands th1 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th2};
86 static struct timehands th0 = {
87 &dummy_timecounter,
88 0,
89 (uint64_t)-1 / 1000000,
90 0,
91 {1, 0},
92 {0, 0},
93 {0, 0},
94 1,
95 &th1
96 };
97
98 static struct timehands *volatile timehands = &th0;
99 struct timecounter *timecounter = &dummy_timecounter;
100 static struct timecounter *timecounters = &dummy_timecounter;
101
102 time_t time_second = 1;
103 time_t time_uptime = 1;
104
105 static struct bintime timebasebin;
106
107 static int timestepwarnings;
108
109 #ifdef __FreeBSD__
110 SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW,
111 ×tepwarnings, 0, "");
112 #endif /* __FreeBSD__ */
113
114 /*
115 * sysctl helper routine for kern.timercounter.current
116 */
117 static int
118 sysctl_kern_timecounter_hardware(SYSCTLFN_ARGS)
119 {
120 struct sysctlnode node;
121 int error;
122 char newname[MAX_TCNAMELEN];
123 struct timecounter *newtc, *tc;
124
125 tc = timecounter;
126
127 strlcpy(newname, tc->tc_name, sizeof(newname));
128
129 node = *rnode;
130 node.sysctl_data = newname;
131 node.sysctl_size = sizeof(newname);
132
133 error = sysctl_lookup(SYSCTLFN_CALL(&node));
134
135 if (error ||
136 newp == NULL ||
137 strncmp(newname, tc->tc_name, sizeof(newname)) == 0)
138 return error;
139
140 if (l != NULL && (error = kauth_authorize_generic(l->l_cred,
141 KAUTH_GENERIC_ISSUSER, &l->l_acflag)) != 0)
142 return (error);
143
144 /* XXX locking */
145
146 for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
147 if (strcmp(newname, newtc->tc_name) != 0)
148 continue;
149
150 /* Warm up new timecounter. */
151 (void)newtc->tc_get_timecount(newtc);
152 (void)newtc->tc_get_timecount(newtc);
153
154 timecounter = newtc;
155
156 /* XXX unlock */
157
158 return (0);
159 }
160
161 /* XXX unlock */
162
163 return (EINVAL);
164 }
165
166 static int
167 sysctl_kern_timecounter_choice(SYSCTLFN_ARGS)
168 {
169 char buf[48];
170 char *where = oldp;
171 const char *spc;
172 struct timecounter *tc;
173 size_t needed, left, slen;
174 int error;
175
176 if (newp != NULL)
177 return (EPERM);
178 if (namelen != 0)
179 return (EINVAL);
180
181 spc = "";
182 error = 0;
183 needed = 0;
184 left = *oldlenp;
185
186 /* XXX locking */
187
188 for (tc = timecounters; error == 0 && tc != NULL; tc = tc->tc_next) {
189 if (where == NULL) {
190 needed += sizeof(buf); /* be conservative */
191 } else {
192 slen = snprintf(buf, sizeof(buf), "%s%s(q=%d, f=%" PRId64
193 " Hz)", spc, tc->tc_name, tc->tc_quality,
194 tc->tc_frequency);
195 if (left < slen + 1)
196 break;
197 /* XXX use sysctl_copyout? (from sysctl_hw_disknames) */
198 error = copyout(buf, where, slen + 1);
199 spc = " ";
200 where += slen;
201 needed += slen;
202 left -= slen;
203 }
204 }
205
206 /* XXX unlock */
207
208 *oldlenp = needed;
209 return (error);
210 }
211
212 SYSCTL_SETUP(sysctl_timecounter_setup, "sysctl timecounter setup")
213 {
214 const struct sysctlnode *node;
215
216 sysctl_createv(clog, 0, NULL, &node,
217 CTLFLAG_PERMANENT,
218 CTLTYPE_NODE, "timecounter",
219 SYSCTL_DESCR("time counter information"),
220 NULL, 0, NULL, 0,
221 CTL_KERN, CTL_CREATE, CTL_EOL);
222
223 if (node != NULL) {
224 sysctl_createv(clog, 0, NULL, NULL,
225 CTLFLAG_PERMANENT,
226 CTLTYPE_STRING, "choice",
227 SYSCTL_DESCR("available counters"),
228 sysctl_kern_timecounter_choice, 0, NULL, 0,
229 CTL_KERN, node->sysctl_num, CTL_CREATE, CTL_EOL);
230
231 sysctl_createv(clog, 0, NULL, NULL,
232 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
233 CTLTYPE_STRING, "hardware",
234 SYSCTL_DESCR("currently active time counter"),
235 sysctl_kern_timecounter_hardware, 0, NULL, MAX_TCNAMELEN,
236 CTL_KERN, node->sysctl_num, CTL_CREATE, CTL_EOL);
237
238 sysctl_createv(clog, 0, NULL, NULL,
239 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
240 CTLTYPE_INT, "timestepwarnings",
241 SYSCTL_DESCR("log time steps"),
242 NULL, 0, ×tepwarnings, 0,
243 CTL_KERN, node->sysctl_num, CTL_CREATE, CTL_EOL);
244 }
245 }
246
247 #define TC_STATS(name) \
248 static struct evcnt n##name = \
249 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "timecounter", #name); \
250 EVCNT_ATTACH_STATIC(n##name)
251
252 TC_STATS(binuptime); TC_STATS(nanouptime); TC_STATS(microuptime);
253 TC_STATS(bintime); TC_STATS(nanotime); TC_STATS(microtime);
254 TC_STATS(getbinuptime); TC_STATS(getnanouptime); TC_STATS(getmicrouptime);
255 TC_STATS(getbintime); TC_STATS(getnanotime); TC_STATS(getmicrotime);
256 TC_STATS(setclock);
257
258 #undef TC_STATS
259
260 static void tc_windup(void);
261
262 /*
263 * Return the difference between the timehands' counter value now and what
264 * was when we copied it to the timehands' offset_count.
265 */
266 static __inline u_int
267 tc_delta(struct timehands *th)
268 {
269 struct timecounter *tc;
270
271 tc = th->th_counter;
272 return ((tc->tc_get_timecount(tc) -
273 th->th_offset_count) & tc->tc_counter_mask);
274 }
275
276 /*
277 * Functions for reading the time. We have to loop until we are sure that
278 * the timehands that we operated on was not updated under our feet. See
279 * the comment in <sys/time.h> for a description of these 12 functions.
280 */
281
282 void
283 binuptime(struct bintime *bt)
284 {
285 struct timehands *th;
286 u_int gen;
287
288 nbinuptime.ev_count++;
289 do {
290 th = timehands;
291 gen = th->th_generation;
292 *bt = th->th_offset;
293 bintime_addx(bt, th->th_scale * tc_delta(th));
294 } while (gen == 0 || gen != th->th_generation);
295 }
296
297 void
298 nanouptime(struct timespec *tsp)
299 {
300 struct bintime bt;
301
302 nnanouptime.ev_count++;
303 binuptime(&bt);
304 bintime2timespec(&bt, tsp);
305 }
306
307 void
308 microuptime(struct timeval *tvp)
309 {
310 struct bintime bt;
311
312 nmicrouptime.ev_count++;
313 binuptime(&bt);
314 bintime2timeval(&bt, tvp);
315 }
316
317 void
318 bintime(struct bintime *bt)
319 {
320
321 nbintime.ev_count++;
322 binuptime(bt);
323 bintime_add(bt, &timebasebin);
324 }
325
326 void
327 nanotime(struct timespec *tsp)
328 {
329 struct bintime bt;
330
331 nnanotime.ev_count++;
332 bintime(&bt);
333 bintime2timespec(&bt, tsp);
334 }
335
336 void
337 microtime(struct timeval *tvp)
338 {
339 struct bintime bt;
340
341 nmicrotime.ev_count++;
342 bintime(&bt);
343 bintime2timeval(&bt, tvp);
344 }
345
346 void
347 getbinuptime(struct bintime *bt)
348 {
349 struct timehands *th;
350 u_int gen;
351
352 ngetbinuptime.ev_count++;
353 do {
354 th = timehands;
355 gen = th->th_generation;
356 *bt = th->th_offset;
357 } while (gen == 0 || gen != th->th_generation);
358 }
359
360 void
361 getnanouptime(struct timespec *tsp)
362 {
363 struct timehands *th;
364 u_int gen;
365
366 ngetnanouptime.ev_count++;
367 do {
368 th = timehands;
369 gen = th->th_generation;
370 bintime2timespec(&th->th_offset, tsp);
371 } while (gen == 0 || gen != th->th_generation);
372 }
373
374 void
375 getmicrouptime(struct timeval *tvp)
376 {
377 struct timehands *th;
378 u_int gen;
379
380 ngetmicrouptime.ev_count++;
381 do {
382 th = timehands;
383 gen = th->th_generation;
384 bintime2timeval(&th->th_offset, tvp);
385 } while (gen == 0 || gen != th->th_generation);
386 }
387
388 void
389 getbintime(struct bintime *bt)
390 {
391 struct timehands *th;
392 u_int gen;
393
394 ngetbintime.ev_count++;
395 do {
396 th = timehands;
397 gen = th->th_generation;
398 *bt = th->th_offset;
399 } while (gen == 0 || gen != th->th_generation);
400 bintime_add(bt, &timebasebin);
401 }
402
403 void
404 getnanotime(struct timespec *tsp)
405 {
406 struct timehands *th;
407 u_int gen;
408
409 ngetnanotime.ev_count++;
410 do {
411 th = timehands;
412 gen = th->th_generation;
413 *tsp = th->th_nanotime;
414 } while (gen == 0 || gen != th->th_generation);
415 }
416
417 void
418 getmicrotime(struct timeval *tvp)
419 {
420 struct timehands *th;
421 u_int gen;
422
423 ngetmicrotime.ev_count++;
424 do {
425 th = timehands;
426 gen = th->th_generation;
427 *tvp = th->th_microtime;
428 } while (gen == 0 || gen != th->th_generation);
429 }
430
431 /*
432 * Initialize a new timecounter and possibly use it.
433 */
434 void
435 tc_init(struct timecounter *tc)
436 {
437 u_int u;
438 char freqstr[9];
439
440 humanize_number(freqstr, sizeof(freqstr), tc->tc_frequency,
441 "Hz", 1000);
442 u = tc->tc_frequency / tc->tc_counter_mask;
443 /* XXX: We need some margin here, 10% is a guess */
444 u *= 11;
445 u /= 10;
446 if (u > hz && tc->tc_quality >= 0) {
447 tc->tc_quality = -2000;
448 if (bootverbose) {
449 printf("timecounter: Timecounter \"%s\" frequency %s",
450 tc->tc_name, freqstr);
451 printf(" -- Insufficient hz, needs at least %u\n", u);
452 }
453 } else if (tc->tc_quality >= 0 || bootverbose) {
454 printf("timecounter: Timecounter \"%s\" frequency %s "
455 "quality %d\n", tc->tc_name, freqstr, tc->tc_quality);
456 }
457
458 /* XXX locking */
459 tc->tc_next = timecounters;
460 timecounters = tc;
461 /*
462 * Never automatically use a timecounter with negative quality.
463 * Even though we run on the dummy counter, switching here may be
464 * worse since this timecounter may not be monotonous.
465 */
466 if (tc->tc_quality < 0)
467 return;
468 if (tc->tc_quality < timecounter->tc_quality)
469 return;
470 if (tc->tc_quality == timecounter->tc_quality &&
471 tc->tc_frequency < timecounter->tc_frequency)
472 return;
473 (void)tc->tc_get_timecount(tc);
474 (void)tc->tc_get_timecount(tc);
475 timecounter = tc;
476 tc_windup();
477 }
478
479 /* Report the frequency of the current timecounter. */
480 u_int64_t
481 tc_getfrequency(void)
482 {
483
484 return (timehands->th_counter->tc_frequency);
485 }
486
487 /*
488 * Step our concept of UTC. This is done by modifying our estimate of
489 * when we booted.
490 * XXX: not locked.
491 */
492 void
493 tc_setclock(struct timespec *ts)
494 {
495 struct timespec ts2;
496 struct bintime bt, bt2;
497
498 nsetclock.ev_count++;
499 binuptime(&bt2);
500 timespec2bintime(ts, &bt);
501 bintime_sub(&bt, &bt2);
502 bintime_add(&bt2, &timebasebin);
503 timebasebin = bt;
504
505 /* XXX fiddle all the little crinkly bits around the fiords... */
506 tc_windup();
507 if (timestepwarnings) {
508 bintime2timespec(&bt2, &ts2);
509 log(LOG_INFO, "Time stepped from %jd.%09ld to %jd.%09ld\n",
510 (intmax_t)ts2.tv_sec, ts2.tv_nsec,
511 (intmax_t)ts->tv_sec, ts->tv_nsec);
512 }
513 }
514
515 /*
516 * Initialize the next struct timehands in the ring and make
517 * it the active timehands. Along the way we might switch to a different
518 * timecounter and/or do seconds processing in NTP. Slightly magic.
519 */
520 static void
521 tc_windup(void)
522 {
523 struct bintime bt;
524 struct timehands *th, *tho;
525 u_int64_t scale;
526 u_int delta, ncount, ogen;
527 int i;
528 time_t t;
529
530 /*
531 * Make the next timehands a copy of the current one, but do not
532 * overwrite the generation or next pointer. While we update
533 * the contents, the generation must be zero.
534 */
535 tho = timehands;
536 th = tho->th_next;
537 ogen = th->th_generation;
538 th->th_generation = 0;
539 bcopy(tho, th, offsetof(struct timehands, th_generation));
540
541 /*
542 * Capture a timecounter delta on the current timecounter and if
543 * changing timecounters, a counter value from the new timecounter.
544 * Update the offset fields accordingly.
545 */
546 delta = tc_delta(th);
547 if (th->th_counter != timecounter)
548 ncount = timecounter->tc_get_timecount(timecounter);
549 else
550 ncount = 0;
551 th->th_offset_count += delta;
552 th->th_offset_count &= th->th_counter->tc_counter_mask;
553 bintime_addx(&th->th_offset, th->th_scale * delta);
554
555 /*
556 * Hardware latching timecounters may not generate interrupts on
557 * PPS events, so instead we poll them. There is a finite risk that
558 * the hardware might capture a count which is later than the one we
559 * got above, and therefore possibly in the next NTP second which might
560 * have a different rate than the current NTP second. It doesn't
561 * matter in practice.
562 */
563 if (tho->th_counter->tc_poll_pps)
564 tho->th_counter->tc_poll_pps(tho->th_counter);
565
566 /*
567 * Deal with NTP second processing. The for loop normally
568 * iterates at most once, but in extreme situations it might
569 * keep NTP sane if timeouts are not run for several seconds.
570 * At boot, the time step can be large when the TOD hardware
571 * has been read, so on really large steps, we call
572 * ntp_update_second only twice. We need to call it twice in
573 * case we missed a leap second.
574 * If NTP is not compiled in ntp_update_second still calculates
575 * the adjustment resulting from adjtime() calls.
576 */
577 bt = th->th_offset;
578 bintime_add(&bt, &timebasebin);
579 i = bt.sec - tho->th_microtime.tv_sec;
580 if (i > LARGE_STEP)
581 i = 2;
582 for (; i > 0; i--) {
583 t = bt.sec;
584 ntp_update_second(&th->th_adjustment, &bt.sec);
585 if (bt.sec != t)
586 timebasebin.sec += bt.sec - t;
587 }
588
589 /* Update the UTC timestamps used by the get*() functions. */
590 /* XXX shouldn't do this here. Should force non-`get' versions. */
591 bintime2timeval(&bt, &th->th_microtime);
592 bintime2timespec(&bt, &th->th_nanotime);
593
594 /* Now is a good time to change timecounters. */
595 if (th->th_counter != timecounter) {
596 char freqstr[9];
597 th->th_counter = timecounter;
598 th->th_offset_count = ncount;
599
600 humanize_number(freqstr, sizeof(freqstr),
601 timecounter->tc_frequency, "Hz", 1000);
602 printf("timecounter: selected timecounter \"%s\" "
603 "frequency %s quality %d\n",
604 timecounter->tc_name, freqstr, timecounter->tc_quality);
605 }
606
607 /*-
608 * Recalculate the scaling factor. We want the number of 1/2^64
609 * fractions of a second per period of the hardware counter, taking
610 * into account the th_adjustment factor which the NTP PLL/adjtime(2)
611 * processing provides us with.
612 *
613 * The th_adjustment is nanoseconds per second with 32 bit binary
614 * fraction and we want 64 bit binary fraction of second:
615 *
616 * x = a * 2^32 / 10^9 = a * 4.294967296
617 *
618 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
619 * we can only multiply by about 850 without overflowing, but that
620 * leaves suitably precise fractions for multiply before divide.
621 *
622 * Divide before multiply with a fraction of 2199/512 results in a
623 * systematic undercompensation of 10PPM of th_adjustment. On a
624 * 5000PPM adjustment this is a 0.05PPM error. This is acceptable.
625 *
626 * We happily sacrifice the lowest of the 64 bits of our result
627 * to the goddess of code clarity.
628 *
629 */
630 scale = (u_int64_t)1 << 63;
631 scale += (th->th_adjustment / 1024) * 2199;
632 scale /= th->th_counter->tc_frequency;
633 th->th_scale = scale * 2;
634
635 /*
636 * Now that the struct timehands is again consistent, set the new
637 * generation number, making sure to not make it zero.
638 */
639 if (++ogen == 0)
640 ogen = 1;
641 th->th_generation = ogen;
642
643 /* Go live with the new struct timehands. */
644 time_second = th->th_microtime.tv_sec;
645 time_uptime = th->th_offset.sec;
646 timehands = th;
647 }
648
649 #ifdef __FreeBSD__
650 /* Report or change the active timecounter hardware. */
651 static int
652 sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS)
653 {
654 char newname[32];
655 struct timecounter *newtc, *tc;
656 int error;
657
658 tc = timecounter;
659 strlcpy(newname, tc->tc_name, sizeof(newname));
660
661 error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req);
662 if (error != 0 || req->newptr == NULL ||
663 strcmp(newname, tc->tc_name) == 0)
664 return (error);
665
666 for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
667 if (strcmp(newname, newtc->tc_name) != 0)
668 continue;
669
670 /* Warm up new timecounter. */
671 (void)newtc->tc_get_timecount(newtc);
672 (void)newtc->tc_get_timecount(newtc);
673
674 timecounter = newtc;
675 return (0);
676 }
677 return (EINVAL);
678 }
679
680 SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware, CTLTYPE_STRING | CTLFLAG_RW,
681 0, 0, sysctl_kern_timecounter_hardware, "A", "");
682
683
684 /* Report or change the active timecounter hardware. */
685 static int
686 sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS)
687 {
688 char buf[32], *spc;
689 struct timecounter *tc;
690 int error;
691
692 spc = "";
693 error = 0;
694 for (tc = timecounters; error == 0 && tc != NULL; tc = tc->tc_next) {
695 sprintf(buf, "%s%s(%d)",
696 spc, tc->tc_name, tc->tc_quality);
697 error = SYSCTL_OUT(req, buf, strlen(buf));
698 spc = " ";
699 }
700 return (error);
701 }
702
703 SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice, CTLTYPE_STRING | CTLFLAG_RD,
704 0, 0, sysctl_kern_timecounter_choice, "A", "");
705 #endif /* __FreeBSD__ */
706
707 /*
708 * RFC 2783 PPS-API implementation.
709 */
710
711 int
712 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
713 {
714 pps_params_t *app;
715 pps_info_t *pipi;
716 #ifdef PPS_SYNC
717 int *epi;
718 #endif
719
720 KASSERT(pps != NULL); /* XXX ("NULL pps pointer in pps_ioctl") */
721 switch (cmd) {
722 case PPS_IOC_CREATE:
723 return (0);
724 case PPS_IOC_DESTROY:
725 return (0);
726 case PPS_IOC_SETPARAMS:
727 app = (pps_params_t *)data;
728 if (app->mode & ~pps->ppscap)
729 return (EINVAL);
730 pps->ppsparam = *app;
731 return (0);
732 case PPS_IOC_GETPARAMS:
733 app = (pps_params_t *)data;
734 *app = pps->ppsparam;
735 app->api_version = PPS_API_VERS_1;
736 return (0);
737 case PPS_IOC_GETCAP:
738 *(int*)data = pps->ppscap;
739 return (0);
740 case PPS_IOC_FETCH:
741 pipi = (pps_info_t *)data;
742 pps->ppsinfo.current_mode = pps->ppsparam.mode;
743 *pipi = pps->ppsinfo;
744 return (0);
745 case PPS_IOC_KCBIND:
746 #ifdef PPS_SYNC
747 epi = (int *)data;
748 /* XXX Only root should be able to do this */
749 if (*epi & ~pps->ppscap)
750 return (EINVAL);
751 pps->kcmode = *epi;
752 return (0);
753 #else
754 return (EOPNOTSUPP);
755 #endif
756 default:
757 return (EPASSTHROUGH);
758 }
759 }
760
761 void
762 pps_init(struct pps_state *pps)
763 {
764 pps->ppscap |= PPS_TSFMT_TSPEC;
765 if (pps->ppscap & PPS_CAPTUREASSERT)
766 pps->ppscap |= PPS_OFFSETASSERT;
767 if (pps->ppscap & PPS_CAPTURECLEAR)
768 pps->ppscap |= PPS_OFFSETCLEAR;
769 }
770
771 void
772 pps_capture(struct pps_state *pps)
773 {
774 struct timehands *th;
775
776 KASSERT(pps != NULL); /* XXX ("NULL pps pointer in pps_capture") */
777 th = timehands;
778 pps->capgen = th->th_generation;
779 pps->capth = th;
780 pps->capcount = th->th_counter->tc_get_timecount(th->th_counter);
781 if (pps->capgen != th->th_generation)
782 pps->capgen = 0;
783 }
784
785 void
786 pps_event(struct pps_state *pps, int event)
787 {
788 struct bintime bt;
789 struct timespec ts, *tsp, *osp;
790 u_int tcount, *pcount;
791 int foff, fhard;
792 pps_seq_t *pseq;
793
794 KASSERT(pps != NULL); /* XXX ("NULL pps pointer in pps_event") */
795 /* If the timecounter was wound up underneath us, bail out. */
796 if (pps->capgen == 0 || pps->capgen != pps->capth->th_generation)
797 return;
798
799 /* Things would be easier with arrays. */
800 if (event == PPS_CAPTUREASSERT) {
801 tsp = &pps->ppsinfo.assert_timestamp;
802 osp = &pps->ppsparam.assert_offset;
803 foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
804 fhard = pps->kcmode & PPS_CAPTUREASSERT;
805 pcount = &pps->ppscount[0];
806 pseq = &pps->ppsinfo.assert_sequence;
807 } else {
808 tsp = &pps->ppsinfo.clear_timestamp;
809 osp = &pps->ppsparam.clear_offset;
810 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
811 fhard = pps->kcmode & PPS_CAPTURECLEAR;
812 pcount = &pps->ppscount[1];
813 pseq = &pps->ppsinfo.clear_sequence;
814 }
815
816 /*
817 * If the timecounter changed, we cannot compare the count values, so
818 * we have to drop the rest of the PPS-stuff until the next event.
819 */
820 if (pps->ppstc != pps->capth->th_counter) {
821 pps->ppstc = pps->capth->th_counter;
822 *pcount = pps->capcount;
823 pps->ppscount[2] = pps->capcount;
824 return;
825 }
826
827 /* Convert the count to a timespec. */
828 tcount = pps->capcount - pps->capth->th_offset_count;
829 tcount &= pps->capth->th_counter->tc_counter_mask;
830 bt = pps->capth->th_offset;
831 bintime_addx(&bt, pps->capth->th_scale * tcount);
832 bintime_add(&bt, &timebasebin);
833 bintime2timespec(&bt, &ts);
834
835 /* If the timecounter was wound up underneath us, bail out. */
836 if (pps->capgen != pps->capth->th_generation)
837 return;
838
839 *pcount = pps->capcount;
840 (*pseq)++;
841 *tsp = ts;
842
843 if (foff) {
844 timespecadd(tsp, osp, tsp);
845 if (tsp->tv_nsec < 0) {
846 tsp->tv_nsec += 1000000000;
847 tsp->tv_sec -= 1;
848 }
849 }
850 #ifdef PPS_SYNC
851 if (fhard) {
852 u_int64_t scale;
853
854 /*
855 * Feed the NTP PLL/FLL.
856 * The FLL wants to know how many (hardware) nanoseconds
857 * elapsed since the previous event.
858 */
859 tcount = pps->capcount - pps->ppscount[2];
860 pps->ppscount[2] = pps->capcount;
861 tcount &= pps->capth->th_counter->tc_counter_mask;
862 scale = (u_int64_t)1 << 63;
863 scale /= pps->capth->th_counter->tc_frequency;
864 scale *= 2;
865 bt.sec = 0;
866 bt.frac = 0;
867 bintime_addx(&bt, scale * tcount);
868 bintime2timespec(&bt, &ts);
869 hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec);
870 }
871 #endif
872 }
873
874 /*
875 * Timecounters need to be updated every so often to prevent the hardware
876 * counter from overflowing. Updating also recalculates the cached values
877 * used by the get*() family of functions, so their precision depends on
878 * the update frequency.
879 */
880
881 static int tc_tick;
882 #ifdef __FreeBSD__
883 SYSCTL_INT(_kern_timecounter, OID_AUTO, tick, CTLFLAG_RD, &tc_tick, 0, "");
884 #endif /* __FreeBSD__ */
885
886 void
887 tc_ticktock(void)
888 {
889 static int count;
890
891 if (++count < tc_tick)
892 return;
893 count = 0;
894 tc_windup();
895 }
896
897 void
898 inittimecounter(void)
899 {
900 u_int p;
901
902 /*
903 * Set the initial timeout to
904 * max(1, <approx. number of hardclock ticks in a millisecond>).
905 * People should probably not use the sysctl to set the timeout
906 * to smaller than its inital value, since that value is the
907 * smallest reasonable one. If they want better timestamps they
908 * should use the non-"get"* functions.
909 */
910 if (hz > 1000)
911 tc_tick = (hz + 500) / 1000;
912 else
913 tc_tick = 1;
914 p = (tc_tick * 1000000) / hz;
915 printf("timecounter: Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000);
916
917 /* warm up new timecounter (again) and get rolling. */
918 (void)timecounter->tc_get_timecount(timecounter);
919 (void)timecounter->tc_get_timecount(timecounter);
920 }
921
922 #ifdef __FreeBSD__
923 SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL)
924 #endif /* __FreeBSD__ */
925 #endif /* __HAVE_TIMECOUNTER */
926