Home | History | Annotate | Line # | Download | only in kern
kern_tc.c revision 1.24
      1 /* $NetBSD: kern_tc.c,v 1.24 2007/11/15 23:16:55 ad Exp $ */
      2 
      3 /*-
      4  * ----------------------------------------------------------------------------
      5  * "THE BEER-WARE LICENSE" (Revision 42):
      6  * <phk (at) FreeBSD.ORG> wrote this file.  As long as you retain this notice you
      7  * can do whatever you want with this stuff. If we meet some day, and you think
      8  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
      9  * ---------------------------------------------------------------------------
     10  */
     11 
     12 #include <sys/cdefs.h>
     13 /* __FBSDID("$FreeBSD: src/sys/kern/kern_tc.c,v 1.166 2005/09/19 22:16:31 andre Exp $"); */
     14 __KERNEL_RCSID(0, "$NetBSD: kern_tc.c,v 1.24 2007/11/15 23:16:55 ad Exp $");
     15 
     16 #include "opt_ntp.h"
     17 
     18 #include <sys/param.h>
     19 #ifdef __HAVE_TIMECOUNTER	/* XXX */
     20 #include <sys/kernel.h>
     21 #include <sys/reboot.h>	/* XXX just to get AB_VERBOSE */
     22 #include <sys/sysctl.h>
     23 #include <sys/syslog.h>
     24 #include <sys/systm.h>
     25 #include <sys/timepps.h>
     26 #include <sys/timetc.h>
     27 #include <sys/timex.h>
     28 #include <sys/evcnt.h>
     29 #include <sys/kauth.h>
     30 
     31 /*
     32  * A large step happens on boot.  This constant detects such steps.
     33  * It is relatively small so that ntp_update_second gets called enough
     34  * in the typical 'missed a couple of seconds' case, but doesn't loop
     35  * forever when the time step is large.
     36  */
     37 #define LARGE_STEP	200
     38 
     39 /*
     40  * Implement a dummy timecounter which we can use until we get a real one
     41  * in the air.  This allows the console and other early stuff to use
     42  * time services.
     43  */
     44 
     45 static u_int
     46 dummy_get_timecount(struct timecounter *tc)
     47 {
     48 	static u_int now;
     49 
     50 	return (++now);
     51 }
     52 
     53 static struct timecounter dummy_timecounter = {
     54 	dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000, NULL, NULL,
     55 };
     56 
     57 struct timehands {
     58 	/* These fields must be initialized by the driver. */
     59 	struct timecounter	*th_counter;
     60 	int64_t			th_adjustment;
     61 	u_int64_t		th_scale;
     62 	u_int	 		th_offset_count;
     63 	struct bintime		th_offset;
     64 	struct timeval		th_microtime;
     65 	struct timespec		th_nanotime;
     66 	/* Fields not to be copied in tc_windup start with th_generation. */
     67 	volatile u_int		th_generation;
     68 	struct timehands	*th_next;
     69 };
     70 
     71 static struct timehands th0;
     72 static struct timehands th9 = { .th_next = &th0, };
     73 static struct timehands th8 = { .th_next = &th9, };
     74 static struct timehands th7 = { .th_next = &th8, };
     75 static struct timehands th6 = { .th_next = &th7, };
     76 static struct timehands th5 = { .th_next = &th6, };
     77 static struct timehands th4 = { .th_next = &th5, };
     78 static struct timehands th3 = { .th_next = &th4, };
     79 static struct timehands th2 = { .th_next = &th3, };
     80 static struct timehands th1 = { .th_next = &th2, };
     81 static struct timehands th0 = {
     82 	.th_counter = &dummy_timecounter,
     83 	.th_scale = (uint64_t)-1 / 1000000,
     84 	.th_offset = { .sec = 1, .frac = 0 },
     85 	.th_generation = 1,
     86 	.th_next = &th1,
     87 };
     88 
     89 static struct timehands *volatile timehands = &th0;
     90 struct timecounter *timecounter = &dummy_timecounter;
     91 static struct timecounter *timecounters = &dummy_timecounter;
     92 
     93 time_t time_second = 1;
     94 time_t time_uptime = 1;
     95 
     96 static struct bintime timebasebin;
     97 
     98 static int timestepwarnings;
     99 
    100 #ifdef __FreeBSD__
    101 SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW,
    102     &timestepwarnings, 0, "");
    103 #endif /* __FreeBSD__ */
    104 
    105 /*
    106  * sysctl helper routine for kern.timercounter.current
    107  */
    108 static int
    109 sysctl_kern_timecounter_hardware(SYSCTLFN_ARGS)
    110 {
    111 	struct sysctlnode node;
    112 	int error;
    113 	char newname[MAX_TCNAMELEN];
    114 	struct timecounter *newtc, *tc;
    115 
    116 	tc = timecounter;
    117 
    118 	strlcpy(newname, tc->tc_name, sizeof(newname));
    119 
    120 	node = *rnode;
    121 	node.sysctl_data = newname;
    122 	node.sysctl_size = sizeof(newname);
    123 
    124 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    125 
    126 	if (error ||
    127 	    newp == NULL ||
    128 	    strncmp(newname, tc->tc_name, sizeof(newname)) == 0)
    129 		return error;
    130 
    131 	if (l != NULL && (error = kauth_authorize_generic(l->l_cred,
    132 	    KAUTH_GENERIC_ISSUSER, NULL)) != 0)
    133 		return (error);
    134 
    135 	if (!cold)
    136 		mutex_enter(&time_lock);
    137 	error = EINVAL;
    138 	for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
    139 		if (strcmp(newname, newtc->tc_name) != 0)
    140 			continue;
    141 		/* Warm up new timecounter. */
    142 		(void)newtc->tc_get_timecount(newtc);
    143 		(void)newtc->tc_get_timecount(newtc);
    144 		timecounter = newtc;
    145 		error = 0;
    146 		break;
    147 	}
    148 	if (!cold)
    149 		mutex_exit(&time_lock);
    150 	return error;
    151 }
    152 
    153 static int
    154 sysctl_kern_timecounter_choice(SYSCTLFN_ARGS)
    155 {
    156 	char buf[MAX_TCNAMELEN+48];
    157 	char *where = oldp;
    158 	const char *spc;
    159 	struct timecounter *tc;
    160 	size_t needed, left, slen;
    161 	int error;
    162 
    163 	if (newp != NULL)
    164 		return (EPERM);
    165 	if (namelen != 0)
    166 		return (EINVAL);
    167 
    168 	spc = "";
    169 	error = 0;
    170 	needed = 0;
    171 	left = *oldlenp;
    172 
    173 	mutex_enter(&time_lock);
    174 	for (tc = timecounters; error == 0 && tc != NULL; tc = tc->tc_next) {
    175 		if (where == NULL) {
    176 			needed += sizeof(buf);  /* be conservative */
    177 		} else {
    178 			slen = snprintf(buf, sizeof(buf), "%s%s(q=%d, f=%" PRId64
    179 					" Hz)", spc, tc->tc_name, tc->tc_quality,
    180 					tc->tc_frequency);
    181 			if (left < slen + 1)
    182 				break;
    183 			/* XXX use sysctl_copyout? (from sysctl_hw_disknames) */
    184 			/* XXX copyout with held lock. */
    185 			error = copyout(buf, where, slen + 1);
    186 			spc = " ";
    187 			where += slen;
    188 			needed += slen;
    189 			left -= slen;
    190 		}
    191 	}
    192 	mutex_exit(&time_lock);
    193 
    194 	*oldlenp = needed;
    195 	return (error);
    196 }
    197 
    198 SYSCTL_SETUP(sysctl_timecounter_setup, "sysctl timecounter setup")
    199 {
    200 	const struct sysctlnode *node;
    201 
    202 	sysctl_createv(clog, 0, NULL, &node,
    203 		       CTLFLAG_PERMANENT,
    204 		       CTLTYPE_NODE, "timecounter",
    205 		       SYSCTL_DESCR("time counter information"),
    206 		       NULL, 0, NULL, 0,
    207 		       CTL_KERN, CTL_CREATE, CTL_EOL);
    208 
    209 	if (node != NULL) {
    210 		sysctl_createv(clog, 0, NULL, NULL,
    211 			       CTLFLAG_PERMANENT,
    212 			       CTLTYPE_STRING, "choice",
    213 			       SYSCTL_DESCR("available counters"),
    214 			       sysctl_kern_timecounter_choice, 0, NULL, 0,
    215 			       CTL_KERN, node->sysctl_num, CTL_CREATE, CTL_EOL);
    216 
    217 		sysctl_createv(clog, 0, NULL, NULL,
    218 			       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    219 			       CTLTYPE_STRING, "hardware",
    220 			       SYSCTL_DESCR("currently active time counter"),
    221 			       sysctl_kern_timecounter_hardware, 0, NULL, MAX_TCNAMELEN,
    222 			       CTL_KERN, node->sysctl_num, CTL_CREATE, CTL_EOL);
    223 
    224 		sysctl_createv(clog, 0, NULL, NULL,
    225 			       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    226 			       CTLTYPE_INT, "timestepwarnings",
    227 			       SYSCTL_DESCR("log time steps"),
    228 			       NULL, 0, &timestepwarnings, 0,
    229 			       CTL_KERN, node->sysctl_num, CTL_CREATE, CTL_EOL);
    230 	}
    231 }
    232 
    233 #define	TC_STATS(name)							\
    234 static struct evcnt n##name =						\
    235     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "timecounter", #name);	\
    236 EVCNT_ATTACH_STATIC(n##name)
    237 
    238 TC_STATS(binuptime);    TC_STATS(nanouptime);    TC_STATS(microuptime);
    239 TC_STATS(bintime);      TC_STATS(nanotime);      TC_STATS(microtime);
    240 TC_STATS(getbinuptime); TC_STATS(getnanouptime); TC_STATS(getmicrouptime);
    241 TC_STATS(getbintime);   TC_STATS(getnanotime);   TC_STATS(getmicrotime);
    242 TC_STATS(setclock);
    243 
    244 #undef TC_STATS
    245 
    246 static void tc_windup(void);
    247 
    248 /*
    249  * Return the difference between the timehands' counter value now and what
    250  * was when we copied it to the timehands' offset_count.
    251  */
    252 static __inline u_int
    253 tc_delta(struct timehands *th)
    254 {
    255 	struct timecounter *tc;
    256 
    257 	tc = th->th_counter;
    258 	return ((tc->tc_get_timecount(tc) -
    259 		 th->th_offset_count) & tc->tc_counter_mask);
    260 }
    261 
    262 /*
    263  * Functions for reading the time.  We have to loop until we are sure that
    264  * the timehands that we operated on was not updated under our feet.  See
    265  * the comment in <sys/timevar.h> for a description of these 12 functions.
    266  */
    267 
    268 void
    269 binuptime(struct bintime *bt)
    270 {
    271 	struct timehands *th;
    272 	u_int gen;
    273 
    274 	nbinuptime.ev_count++;
    275 	do {
    276 		th = timehands;
    277 		gen = th->th_generation;
    278 		*bt = th->th_offset;
    279 		bintime_addx(bt, th->th_scale * tc_delta(th));
    280 	} while (gen == 0 || gen != th->th_generation);
    281 }
    282 
    283 void
    284 nanouptime(struct timespec *tsp)
    285 {
    286 	struct bintime bt;
    287 
    288 	nnanouptime.ev_count++;
    289 	binuptime(&bt);
    290 	bintime2timespec(&bt, tsp);
    291 }
    292 
    293 void
    294 microuptime(struct timeval *tvp)
    295 {
    296 	struct bintime bt;
    297 
    298 	nmicrouptime.ev_count++;
    299 	binuptime(&bt);
    300 	bintime2timeval(&bt, tvp);
    301 }
    302 
    303 void
    304 bintime(struct bintime *bt)
    305 {
    306 
    307 	nbintime.ev_count++;
    308 	binuptime(bt);
    309 	bintime_add(bt, &timebasebin);
    310 }
    311 
    312 void
    313 nanotime(struct timespec *tsp)
    314 {
    315 	struct bintime bt;
    316 
    317 	nnanotime.ev_count++;
    318 	bintime(&bt);
    319 	bintime2timespec(&bt, tsp);
    320 }
    321 
    322 void
    323 microtime(struct timeval *tvp)
    324 {
    325 	struct bintime bt;
    326 
    327 	nmicrotime.ev_count++;
    328 	bintime(&bt);
    329 	bintime2timeval(&bt, tvp);
    330 }
    331 
    332 void
    333 getbinuptime(struct bintime *bt)
    334 {
    335 	struct timehands *th;
    336 	u_int gen;
    337 
    338 	ngetbinuptime.ev_count++;
    339 	do {
    340 		th = timehands;
    341 		gen = th->th_generation;
    342 		*bt = th->th_offset;
    343 	} while (gen == 0 || gen != th->th_generation);
    344 }
    345 
    346 void
    347 getnanouptime(struct timespec *tsp)
    348 {
    349 	struct timehands *th;
    350 	u_int gen;
    351 
    352 	ngetnanouptime.ev_count++;
    353 	do {
    354 		th = timehands;
    355 		gen = th->th_generation;
    356 		bintime2timespec(&th->th_offset, tsp);
    357 	} while (gen == 0 || gen != th->th_generation);
    358 }
    359 
    360 void
    361 getmicrouptime(struct timeval *tvp)
    362 {
    363 	struct timehands *th;
    364 	u_int gen;
    365 
    366 	ngetmicrouptime.ev_count++;
    367 	do {
    368 		th = timehands;
    369 		gen = th->th_generation;
    370 		bintime2timeval(&th->th_offset, tvp);
    371 	} while (gen == 0 || gen != th->th_generation);
    372 }
    373 
    374 void
    375 getbintime(struct bintime *bt)
    376 {
    377 	struct timehands *th;
    378 	u_int gen;
    379 
    380 	ngetbintime.ev_count++;
    381 	do {
    382 		th = timehands;
    383 		gen = th->th_generation;
    384 		*bt = th->th_offset;
    385 	} while (gen == 0 || gen != th->th_generation);
    386 	bintime_add(bt, &timebasebin);
    387 }
    388 
    389 void
    390 getnanotime(struct timespec *tsp)
    391 {
    392 	struct timehands *th;
    393 	u_int gen;
    394 
    395 	ngetnanotime.ev_count++;
    396 	do {
    397 		th = timehands;
    398 		gen = th->th_generation;
    399 		*tsp = th->th_nanotime;
    400 	} while (gen == 0 || gen != th->th_generation);
    401 }
    402 
    403 void
    404 getmicrotime(struct timeval *tvp)
    405 {
    406 	struct timehands *th;
    407 	u_int gen;
    408 
    409 	ngetmicrotime.ev_count++;
    410 	do {
    411 		th = timehands;
    412 		gen = th->th_generation;
    413 		*tvp = th->th_microtime;
    414 	} while (gen == 0 || gen != th->th_generation);
    415 }
    416 
    417 /*
    418  * Initialize a new timecounter and possibly use it.
    419  */
    420 void
    421 tc_init(struct timecounter *tc)
    422 {
    423 	u_int u;
    424 	int s;
    425 
    426 	u = tc->tc_frequency / tc->tc_counter_mask;
    427 	/* XXX: We need some margin here, 10% is a guess */
    428 	u *= 11;
    429 	u /= 10;
    430 	if (u > hz && tc->tc_quality >= 0) {
    431 		tc->tc_quality = -2000;
    432 		aprint_verbose(
    433 		    "timecounter: Timecounter \"%s\" frequency %ju Hz",
    434 			    tc->tc_name, (uintmax_t)tc->tc_frequency);
    435 		aprint_verbose(" -- Insufficient hz, needs at least %u\n", u);
    436 	} else if (tc->tc_quality >= 0 || bootverbose) {
    437 		aprint_verbose(
    438 		    "timecounter: Timecounter \"%s\" frequency %ju Hz "
    439 		    "quality %d\n", tc->tc_name, (uintmax_t)tc->tc_frequency,
    440 		    tc->tc_quality);
    441 	}
    442 
    443 	mutex_enter(&time_lock);
    444 	s = splsched();
    445 	tc->tc_next = timecounters;
    446 	timecounters = tc;
    447 	/*
    448 	 * Never automatically use a timecounter with negative quality.
    449 	 * Even though we run on the dummy counter, switching here may be
    450 	 * worse since this timecounter may not be monotonous.
    451 	 */
    452 	if (tc->tc_quality >= 0 && (tc->tc_quality > timecounter->tc_quality ||
    453 	    (tc->tc_quality == timecounter->tc_quality &&
    454 	    tc->tc_frequency > timecounter->tc_frequency))) {
    455 		(void)tc->tc_get_timecount(tc);
    456 		(void)tc->tc_get_timecount(tc);
    457 		timecounter = tc;
    458 		tc_windup();
    459 	}
    460 	splx(s);
    461 	mutex_exit(&time_lock);
    462 }
    463 
    464 /* Report the frequency of the current timecounter. */
    465 u_int64_t
    466 tc_getfrequency(void)
    467 {
    468 
    469 	return (timehands->th_counter->tc_frequency);
    470 }
    471 
    472 /*
    473  * Step our concept of UTC.  This is done by modifying our estimate of
    474  * when we booted.
    475  * XXX: not locked.
    476  */
    477 void
    478 tc_setclock(struct timespec *ts)
    479 {
    480 	struct timespec ts2;
    481 	struct bintime bt, bt2;
    482 
    483 	nsetclock.ev_count++;
    484 	binuptime(&bt2);
    485 	timespec2bintime(ts, &bt);
    486 	bintime_sub(&bt, &bt2);
    487 	bintime_add(&bt2, &timebasebin);
    488 	timebasebin = bt;
    489 
    490 	/* XXX fiddle all the little crinkly bits around the fiords... */
    491 	tc_windup();
    492 	if (timestepwarnings) {
    493 		bintime2timespec(&bt2, &ts2);
    494 		log(LOG_INFO, "Time stepped from %jd.%09ld to %jd.%09ld\n",
    495 		    (intmax_t)ts2.tv_sec, ts2.tv_nsec,
    496 		    (intmax_t)ts->tv_sec, ts->tv_nsec);
    497 	}
    498 }
    499 
    500 /*
    501  * Initialize the next struct timehands in the ring and make
    502  * it the active timehands.  Along the way we might switch to a different
    503  * timecounter and/or do seconds processing in NTP.  Slightly magic.
    504  */
    505 static void
    506 tc_windup(void)
    507 {
    508 	struct bintime bt;
    509 	struct timehands *th, *tho;
    510 	u_int64_t scale;
    511 	u_int delta, ncount, ogen;
    512 	int i, s_update;
    513 	time_t t;
    514 
    515 	s_update = 0;
    516 
    517 	/*
    518 	 * Make the next timehands a copy of the current one, but do not
    519 	 * overwrite the generation or next pointer.  While we update
    520 	 * the contents, the generation must be zero.  Ensure global
    521 	 * visibility of the generation before proceeding.
    522 	 */
    523 	tho = timehands;
    524 	th = tho->th_next;
    525 	ogen = th->th_generation;
    526 	th->th_generation = 0;
    527 	mb_write();
    528 	bcopy(tho, th, offsetof(struct timehands, th_generation));
    529 
    530 	/*
    531 	 * Capture a timecounter delta on the current timecounter and if
    532 	 * changing timecounters, a counter value from the new timecounter.
    533 	 * Update the offset fields accordingly.
    534 	 */
    535 	delta = tc_delta(th);
    536 	if (th->th_counter != timecounter)
    537 		ncount = timecounter->tc_get_timecount(timecounter);
    538 	else
    539 		ncount = 0;
    540 	th->th_offset_count += delta;
    541 	th->th_offset_count &= th->th_counter->tc_counter_mask;
    542 	bintime_addx(&th->th_offset, th->th_scale * delta);
    543 
    544 	/*
    545 	 * Hardware latching timecounters may not generate interrupts on
    546 	 * PPS events, so instead we poll them.  There is a finite risk that
    547 	 * the hardware might capture a count which is later than the one we
    548 	 * got above, and therefore possibly in the next NTP second which might
    549 	 * have a different rate than the current NTP second.  It doesn't
    550 	 * matter in practice.
    551 	 */
    552 	if (tho->th_counter->tc_poll_pps)
    553 		tho->th_counter->tc_poll_pps(tho->th_counter);
    554 
    555 	/*
    556 	 * Deal with NTP second processing.  The for loop normally
    557 	 * iterates at most once, but in extreme situations it might
    558 	 * keep NTP sane if timeouts are not run for several seconds.
    559 	 * At boot, the time step can be large when the TOD hardware
    560 	 * has been read, so on really large steps, we call
    561 	 * ntp_update_second only twice.  We need to call it twice in
    562 	 * case we missed a leap second.
    563 	 * If NTP is not compiled in ntp_update_second still calculates
    564 	 * the adjustment resulting from adjtime() calls.
    565 	 */
    566 	bt = th->th_offset;
    567 	bintime_add(&bt, &timebasebin);
    568 	i = bt.sec - tho->th_microtime.tv_sec;
    569 	if (i > LARGE_STEP)
    570 		i = 2;
    571 	for (; i > 0; i--) {
    572 		t = bt.sec;
    573 		ntp_update_second(&th->th_adjustment, &bt.sec);
    574 		s_update = 1;
    575 		if (bt.sec != t)
    576 			timebasebin.sec += bt.sec - t;
    577 	}
    578 
    579 	/* Update the UTC timestamps used by the get*() functions. */
    580 	/* XXX shouldn't do this here.  Should force non-`get' versions. */
    581 	bintime2timeval(&bt, &th->th_microtime);
    582 	bintime2timespec(&bt, &th->th_nanotime);
    583 
    584 	/* Now is a good time to change timecounters. */
    585 	if (th->th_counter != timecounter) {
    586 		th->th_counter = timecounter;
    587 		th->th_offset_count = ncount;
    588 		s_update = 1;
    589 	}
    590 
    591 	/*-
    592 	 * Recalculate the scaling factor.  We want the number of 1/2^64
    593 	 * fractions of a second per period of the hardware counter, taking
    594 	 * into account the th_adjustment factor which the NTP PLL/adjtime(2)
    595 	 * processing provides us with.
    596 	 *
    597 	 * The th_adjustment is nanoseconds per second with 32 bit binary
    598 	 * fraction and we want 64 bit binary fraction of second:
    599 	 *
    600 	 *	 x = a * 2^32 / 10^9 = a * 4.294967296
    601 	 *
    602 	 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
    603 	 * we can only multiply by about 850 without overflowing, but that
    604 	 * leaves suitably precise fractions for multiply before divide.
    605 	 *
    606 	 * Divide before multiply with a fraction of 2199/512 results in a
    607 	 * systematic undercompensation of 10PPM of th_adjustment.  On a
    608 	 * 5000PPM adjustment this is a 0.05PPM error.  This is acceptable.
    609  	 *
    610 	 * We happily sacrifice the lowest of the 64 bits of our result
    611 	 * to the goddess of code clarity.
    612 	 *
    613 	 */
    614 	if (s_update) {
    615 		scale = (u_int64_t)1 << 63;
    616 		scale += (th->th_adjustment / 1024) * 2199;
    617 		scale /= th->th_counter->tc_frequency;
    618 		th->th_scale = scale * 2;
    619 	}
    620 	/*
    621 	 * Now that the struct timehands is again consistent, set the new
    622 	 * generation number, making sure to not make it zero.  Ensure
    623 	 * changes are globally visible before changing.
    624 	 */
    625 	if (++ogen == 0)
    626 		ogen = 1;
    627 	mb_write();
    628 	th->th_generation = ogen;
    629 
    630 	/*
    631 	 * Go live with the new struct timehands.  Ensure changes are
    632 	 * globally visible before changing.
    633 	 */
    634 	time_second = th->th_microtime.tv_sec;
    635 	time_uptime = th->th_offset.sec;
    636 	mb_write();
    637 	timehands = th;
    638 
    639 	/*
    640 	 * Force users of the old timehand to move on.  This is
    641 	 * necessary for MP systems; we need to ensure that the
    642 	 * consumers will move away from the old timehand before
    643 	 * we begin updating it again when we eventually wrap
    644 	 * around.
    645 	 */
    646 	if (++tho->th_generation == 0)
    647 		tho->th_generation = 1;
    648 }
    649 
    650 /*
    651  * RFC 2783 PPS-API implementation.
    652  */
    653 
    654 int
    655 pps_ioctl(u_long cmd, void *data, struct pps_state *pps)
    656 {
    657 	pps_params_t *app;
    658 	pps_info_t *pipi;
    659 #ifdef PPS_SYNC
    660 	int *epi;
    661 #endif
    662 
    663 	KASSERT(pps != NULL); /* XXX ("NULL pps pointer in pps_ioctl") */
    664 	switch (cmd) {
    665 	case PPS_IOC_CREATE:
    666 		return (0);
    667 	case PPS_IOC_DESTROY:
    668 		return (0);
    669 	case PPS_IOC_SETPARAMS:
    670 		app = (pps_params_t *)data;
    671 		if (app->mode & ~pps->ppscap)
    672 			return (EINVAL);
    673 		pps->ppsparam = *app;
    674 		return (0);
    675 	case PPS_IOC_GETPARAMS:
    676 		app = (pps_params_t *)data;
    677 		*app = pps->ppsparam;
    678 		app->api_version = PPS_API_VERS_1;
    679 		return (0);
    680 	case PPS_IOC_GETCAP:
    681 		*(int*)data = pps->ppscap;
    682 		return (0);
    683 	case PPS_IOC_FETCH:
    684 		pipi = (pps_info_t *)data;
    685 		pps->ppsinfo.current_mode = pps->ppsparam.mode;
    686 		*pipi = pps->ppsinfo;
    687 		return (0);
    688 	case PPS_IOC_KCBIND:
    689 #ifdef PPS_SYNC
    690 		epi = (int *)data;
    691 		/* XXX Only root should be able to do this */
    692 		if (*epi & ~pps->ppscap)
    693 			return (EINVAL);
    694 		pps->kcmode = *epi;
    695 		return (0);
    696 #else
    697 		return (EOPNOTSUPP);
    698 #endif
    699 	default:
    700 		return (EPASSTHROUGH);
    701 	}
    702 }
    703 
    704 void
    705 pps_init(struct pps_state *pps)
    706 {
    707 	pps->ppscap |= PPS_TSFMT_TSPEC;
    708 	if (pps->ppscap & PPS_CAPTUREASSERT)
    709 		pps->ppscap |= PPS_OFFSETASSERT;
    710 	if (pps->ppscap & PPS_CAPTURECLEAR)
    711 		pps->ppscap |= PPS_OFFSETCLEAR;
    712 }
    713 
    714 void
    715 pps_capture(struct pps_state *pps)
    716 {
    717 	struct timehands *th;
    718 
    719 	KASSERT(pps != NULL); /* XXX ("NULL pps pointer in pps_capture") */
    720 	th = timehands;
    721 	pps->capgen = th->th_generation;
    722 	pps->capth = th;
    723 	pps->capcount = th->th_counter->tc_get_timecount(th->th_counter);
    724 	if (pps->capgen != th->th_generation)
    725 		pps->capgen = 0;
    726 }
    727 
    728 void
    729 pps_event(struct pps_state *pps, int event)
    730 {
    731 	struct bintime bt;
    732 	struct timespec ts, *tsp, *osp;
    733 	u_int tcount, *pcount;
    734 	int foff, fhard;
    735 	pps_seq_t *pseq;
    736 
    737 	KASSERT(pps != NULL); /* XXX ("NULL pps pointer in pps_event") */
    738 	/* If the timecounter was wound up underneath us, bail out. */
    739 	if (pps->capgen == 0 || pps->capgen != pps->capth->th_generation)
    740 		return;
    741 
    742 	/* Things would be easier with arrays. */
    743 	if (event == PPS_CAPTUREASSERT) {
    744 		tsp = &pps->ppsinfo.assert_timestamp;
    745 		osp = &pps->ppsparam.assert_offset;
    746 		foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
    747 		fhard = pps->kcmode & PPS_CAPTUREASSERT;
    748 		pcount = &pps->ppscount[0];
    749 		pseq = &pps->ppsinfo.assert_sequence;
    750 	} else {
    751 		tsp = &pps->ppsinfo.clear_timestamp;
    752 		osp = &pps->ppsparam.clear_offset;
    753 		foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
    754 		fhard = pps->kcmode & PPS_CAPTURECLEAR;
    755 		pcount = &pps->ppscount[1];
    756 		pseq = &pps->ppsinfo.clear_sequence;
    757 	}
    758 
    759 	/*
    760 	 * If the timecounter changed, we cannot compare the count values, so
    761 	 * we have to drop the rest of the PPS-stuff until the next event.
    762 	 */
    763 	if (pps->ppstc != pps->capth->th_counter) {
    764 		pps->ppstc = pps->capth->th_counter;
    765 		*pcount = pps->capcount;
    766 		pps->ppscount[2] = pps->capcount;
    767 		return;
    768 	}
    769 
    770 	/* Convert the count to a timespec. */
    771 	tcount = pps->capcount - pps->capth->th_offset_count;
    772 	tcount &= pps->capth->th_counter->tc_counter_mask;
    773 	bt = pps->capth->th_offset;
    774 	bintime_addx(&bt, pps->capth->th_scale * tcount);
    775 	bintime_add(&bt, &timebasebin);
    776 	bintime2timespec(&bt, &ts);
    777 
    778 	/* If the timecounter was wound up underneath us, bail out. */
    779 	if (pps->capgen != pps->capth->th_generation)
    780 		return;
    781 
    782 	*pcount = pps->capcount;
    783 	(*pseq)++;
    784 	*tsp = ts;
    785 
    786 	if (foff) {
    787 		timespecadd(tsp, osp, tsp);
    788 		if (tsp->tv_nsec < 0) {
    789 			tsp->tv_nsec += 1000000000;
    790 			tsp->tv_sec -= 1;
    791 		}
    792 	}
    793 #ifdef PPS_SYNC
    794 	if (fhard) {
    795 		u_int64_t scale;
    796 
    797 		/*
    798 		 * Feed the NTP PLL/FLL.
    799 		 * The FLL wants to know how many (hardware) nanoseconds
    800 		 * elapsed since the previous event.
    801 		 */
    802 		tcount = pps->capcount - pps->ppscount[2];
    803 		pps->ppscount[2] = pps->capcount;
    804 		tcount &= pps->capth->th_counter->tc_counter_mask;
    805 		scale = (u_int64_t)1 << 63;
    806 		scale /= pps->capth->th_counter->tc_frequency;
    807 		scale *= 2;
    808 		bt.sec = 0;
    809 		bt.frac = 0;
    810 		bintime_addx(&bt, scale * tcount);
    811 		bintime2timespec(&bt, &ts);
    812 		hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec);
    813 	}
    814 #endif
    815 }
    816 
    817 /*
    818  * Timecounters need to be updated every so often to prevent the hardware
    819  * counter from overflowing.  Updating also recalculates the cached values
    820  * used by the get*() family of functions, so their precision depends on
    821  * the update frequency.
    822  */
    823 
    824 static int tc_tick;
    825 
    826 void
    827 tc_ticktock(void)
    828 {
    829 	static int count;
    830 
    831 	if (++count < tc_tick)
    832 		return;
    833 	count = 0;
    834 	tc_windup();
    835 }
    836 
    837 void
    838 inittimecounter(void)
    839 {
    840 	u_int p;
    841 
    842 	/*
    843 	 * Set the initial timeout to
    844 	 * max(1, <approx. number of hardclock ticks in a millisecond>).
    845 	 * People should probably not use the sysctl to set the timeout
    846 	 * to smaller than its inital value, since that value is the
    847 	 * smallest reasonable one.  If they want better timestamps they
    848 	 * should use the non-"get"* functions.
    849 	 */
    850 	if (hz > 1000)
    851 		tc_tick = (hz + 500) / 1000;
    852 	else
    853 		tc_tick = 1;
    854 	p = (tc_tick * 1000000) / hz;
    855 	aprint_verbose("timecounter: Timecounters tick every %d.%03u msec\n",
    856 	    p / 1000, p % 1000);
    857 
    858 	/* warm up new timecounter (again) and get rolling. */
    859 	(void)timecounter->tc_get_timecount(timecounter);
    860 	(void)timecounter->tc_get_timecount(timecounter);
    861 }
    862 
    863 #endif /* __HAVE_TIMECOUNTER */
    864