Home | History | Annotate | Line # | Download | only in kern
kern_tc.c revision 1.3.4.9
      1  1.3.4.9  yamt /* $NetBSD: kern_tc.c,v 1.3.4.9 2008/02/11 14:59:58 yamt Exp $ */
      2  1.3.4.2  yamt 
      3  1.3.4.2  yamt /*-
      4  1.3.4.2  yamt  * ----------------------------------------------------------------------------
      5  1.3.4.2  yamt  * "THE BEER-WARE LICENSE" (Revision 42):
      6  1.3.4.2  yamt  * <phk (at) FreeBSD.ORG> wrote this file.  As long as you retain this notice you
      7  1.3.4.2  yamt  * can do whatever you want with this stuff. If we meet some day, and you think
      8  1.3.4.2  yamt  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
      9  1.3.4.2  yamt  * ---------------------------------------------------------------------------
     10  1.3.4.2  yamt  */
     11  1.3.4.2  yamt 
     12  1.3.4.2  yamt #include <sys/cdefs.h>
     13  1.3.4.2  yamt /* __FBSDID("$FreeBSD: src/sys/kern/kern_tc.c,v 1.166 2005/09/19 22:16:31 andre Exp $"); */
     14  1.3.4.9  yamt __KERNEL_RCSID(0, "$NetBSD: kern_tc.c,v 1.3.4.9 2008/02/11 14:59:58 yamt Exp $");
     15  1.3.4.2  yamt 
     16  1.3.4.2  yamt #include "opt_ntp.h"
     17  1.3.4.2  yamt 
     18  1.3.4.2  yamt #include <sys/param.h>
     19  1.3.4.2  yamt #include <sys/kernel.h>
     20  1.3.4.2  yamt #include <sys/reboot.h>	/* XXX just to get AB_VERBOSE */
     21  1.3.4.2  yamt #include <sys/sysctl.h>
     22  1.3.4.2  yamt #include <sys/syslog.h>
     23  1.3.4.2  yamt #include <sys/systm.h>
     24  1.3.4.2  yamt #include <sys/timepps.h>
     25  1.3.4.2  yamt #include <sys/timetc.h>
     26  1.3.4.2  yamt #include <sys/timex.h>
     27  1.3.4.2  yamt #include <sys/evcnt.h>
     28  1.3.4.2  yamt #include <sys/kauth.h>
     29  1.3.4.7  yamt #include <sys/mutex.h>
     30  1.3.4.7  yamt #include <sys/atomic.h>
     31  1.3.4.2  yamt 
     32  1.3.4.2  yamt /*
     33  1.3.4.2  yamt  * A large step happens on boot.  This constant detects such steps.
     34  1.3.4.2  yamt  * It is relatively small so that ntp_update_second gets called enough
     35  1.3.4.2  yamt  * in the typical 'missed a couple of seconds' case, but doesn't loop
     36  1.3.4.2  yamt  * forever when the time step is large.
     37  1.3.4.2  yamt  */
     38  1.3.4.2  yamt #define LARGE_STEP	200
     39  1.3.4.2  yamt 
     40  1.3.4.2  yamt /*
     41  1.3.4.2  yamt  * Implement a dummy timecounter which we can use until we get a real one
     42  1.3.4.2  yamt  * in the air.  This allows the console and other early stuff to use
     43  1.3.4.2  yamt  * time services.
     44  1.3.4.2  yamt  */
     45  1.3.4.2  yamt 
     46  1.3.4.2  yamt static u_int
     47  1.3.4.2  yamt dummy_get_timecount(struct timecounter *tc)
     48  1.3.4.2  yamt {
     49  1.3.4.2  yamt 	static u_int now;
     50  1.3.4.2  yamt 
     51  1.3.4.2  yamt 	return (++now);
     52  1.3.4.2  yamt }
     53  1.3.4.2  yamt 
     54  1.3.4.2  yamt static struct timecounter dummy_timecounter = {
     55  1.3.4.3  yamt 	dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000, NULL, NULL,
     56  1.3.4.2  yamt };
     57  1.3.4.2  yamt 
     58  1.3.4.2  yamt struct timehands {
     59  1.3.4.2  yamt 	/* These fields must be initialized by the driver. */
     60  1.3.4.2  yamt 	struct timecounter	*th_counter;
     61  1.3.4.2  yamt 	int64_t			th_adjustment;
     62  1.3.4.2  yamt 	u_int64_t		th_scale;
     63  1.3.4.2  yamt 	u_int	 		th_offset_count;
     64  1.3.4.2  yamt 	struct bintime		th_offset;
     65  1.3.4.2  yamt 	struct timeval		th_microtime;
     66  1.3.4.2  yamt 	struct timespec		th_nanotime;
     67  1.3.4.2  yamt 	/* Fields not to be copied in tc_windup start with th_generation. */
     68  1.3.4.2  yamt 	volatile u_int		th_generation;
     69  1.3.4.2  yamt 	struct timehands	*th_next;
     70  1.3.4.2  yamt };
     71  1.3.4.2  yamt 
     72  1.3.4.2  yamt static struct timehands th0;
     73  1.3.4.3  yamt static struct timehands th9 = { .th_next = &th0, };
     74  1.3.4.3  yamt static struct timehands th8 = { .th_next = &th9, };
     75  1.3.4.3  yamt static struct timehands th7 = { .th_next = &th8, };
     76  1.3.4.3  yamt static struct timehands th6 = { .th_next = &th7, };
     77  1.3.4.3  yamt static struct timehands th5 = { .th_next = &th6, };
     78  1.3.4.3  yamt static struct timehands th4 = { .th_next = &th5, };
     79  1.3.4.3  yamt static struct timehands th3 = { .th_next = &th4, };
     80  1.3.4.3  yamt static struct timehands th2 = { .th_next = &th3, };
     81  1.3.4.3  yamt static struct timehands th1 = { .th_next = &th2, };
     82  1.3.4.2  yamt static struct timehands th0 = {
     83  1.3.4.3  yamt 	.th_counter = &dummy_timecounter,
     84  1.3.4.3  yamt 	.th_scale = (uint64_t)-1 / 1000000,
     85  1.3.4.3  yamt 	.th_offset = { .sec = 1, .frac = 0 },
     86  1.3.4.3  yamt 	.th_generation = 1,
     87  1.3.4.3  yamt 	.th_next = &th1,
     88  1.3.4.2  yamt };
     89  1.3.4.2  yamt 
     90  1.3.4.2  yamt static struct timehands *volatile timehands = &th0;
     91  1.3.4.2  yamt struct timecounter *timecounter = &dummy_timecounter;
     92  1.3.4.2  yamt static struct timecounter *timecounters = &dummy_timecounter;
     93  1.3.4.2  yamt 
     94  1.3.4.2  yamt time_t time_second = 1;
     95  1.3.4.2  yamt time_t time_uptime = 1;
     96  1.3.4.2  yamt 
     97  1.3.4.3  yamt static struct bintime timebasebin;
     98  1.3.4.2  yamt 
     99  1.3.4.2  yamt static int timestepwarnings;
    100  1.3.4.2  yamt 
    101  1.3.4.7  yamt extern kmutex_t time_lock;
    102  1.3.4.8  yamt static kmutex_t tc_windup_lock;
    103  1.3.4.7  yamt 
    104  1.3.4.2  yamt #ifdef __FreeBSD__
    105  1.3.4.2  yamt SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW,
    106  1.3.4.2  yamt     &timestepwarnings, 0, "");
    107  1.3.4.2  yamt #endif /* __FreeBSD__ */
    108  1.3.4.2  yamt 
    109  1.3.4.2  yamt /*
    110  1.3.4.8  yamt  * sysctl helper routine for kern.timercounter.hardware
    111  1.3.4.2  yamt  */
    112  1.3.4.2  yamt static int
    113  1.3.4.2  yamt sysctl_kern_timecounter_hardware(SYSCTLFN_ARGS)
    114  1.3.4.2  yamt {
    115  1.3.4.2  yamt 	struct sysctlnode node;
    116  1.3.4.2  yamt 	int error;
    117  1.3.4.2  yamt 	char newname[MAX_TCNAMELEN];
    118  1.3.4.2  yamt 	struct timecounter *newtc, *tc;
    119  1.3.4.2  yamt 
    120  1.3.4.2  yamt 	tc = timecounter;
    121  1.3.4.2  yamt 
    122  1.3.4.2  yamt 	strlcpy(newname, tc->tc_name, sizeof(newname));
    123  1.3.4.2  yamt 
    124  1.3.4.2  yamt 	node = *rnode;
    125  1.3.4.2  yamt 	node.sysctl_data = newname;
    126  1.3.4.2  yamt 	node.sysctl_size = sizeof(newname);
    127  1.3.4.2  yamt 
    128  1.3.4.2  yamt 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    129  1.3.4.2  yamt 
    130  1.3.4.2  yamt 	if (error ||
    131  1.3.4.2  yamt 	    newp == NULL ||
    132  1.3.4.2  yamt 	    strncmp(newname, tc->tc_name, sizeof(newname)) == 0)
    133  1.3.4.2  yamt 		return error;
    134  1.3.4.2  yamt 
    135  1.3.4.7  yamt 	if (l != NULL && (error = kauth_authorize_system(l->l_cred,
    136  1.3.4.7  yamt 	    KAUTH_SYSTEM_TIME, KAUTH_REQ_SYSTEM_TIME_TIMECOUNTERS, newname,
    137  1.3.4.7  yamt 	    NULL, NULL)) != 0)
    138  1.3.4.2  yamt 		return (error);
    139  1.3.4.2  yamt 
    140  1.3.4.7  yamt 	if (!cold)
    141  1.3.4.7  yamt 		mutex_enter(&time_lock);
    142  1.3.4.7  yamt 	error = EINVAL;
    143  1.3.4.2  yamt 	for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
    144  1.3.4.2  yamt 		if (strcmp(newname, newtc->tc_name) != 0)
    145  1.3.4.2  yamt 			continue;
    146  1.3.4.2  yamt 		/* Warm up new timecounter. */
    147  1.3.4.2  yamt 		(void)newtc->tc_get_timecount(newtc);
    148  1.3.4.2  yamt 		(void)newtc->tc_get_timecount(newtc);
    149  1.3.4.2  yamt 		timecounter = newtc;
    150  1.3.4.7  yamt 		error = 0;
    151  1.3.4.7  yamt 		break;
    152  1.3.4.2  yamt 	}
    153  1.3.4.7  yamt 	if (!cold)
    154  1.3.4.7  yamt 		mutex_exit(&time_lock);
    155  1.3.4.7  yamt 	return error;
    156  1.3.4.2  yamt }
    157  1.3.4.2  yamt 
    158  1.3.4.2  yamt static int
    159  1.3.4.2  yamt sysctl_kern_timecounter_choice(SYSCTLFN_ARGS)
    160  1.3.4.2  yamt {
    161  1.3.4.3  yamt 	char buf[MAX_TCNAMELEN+48];
    162  1.3.4.2  yamt 	char *where = oldp;
    163  1.3.4.2  yamt 	const char *spc;
    164  1.3.4.2  yamt 	struct timecounter *tc;
    165  1.3.4.2  yamt 	size_t needed, left, slen;
    166  1.3.4.2  yamt 	int error;
    167  1.3.4.2  yamt 
    168  1.3.4.2  yamt 	if (newp != NULL)
    169  1.3.4.2  yamt 		return (EPERM);
    170  1.3.4.2  yamt 	if (namelen != 0)
    171  1.3.4.2  yamt 		return (EINVAL);
    172  1.3.4.2  yamt 
    173  1.3.4.2  yamt 	spc = "";
    174  1.3.4.2  yamt 	error = 0;
    175  1.3.4.2  yamt 	needed = 0;
    176  1.3.4.2  yamt 	left = *oldlenp;
    177  1.3.4.2  yamt 
    178  1.3.4.7  yamt 	mutex_enter(&time_lock);
    179  1.3.4.2  yamt 	for (tc = timecounters; error == 0 && tc != NULL; tc = tc->tc_next) {
    180  1.3.4.2  yamt 		if (where == NULL) {
    181  1.3.4.2  yamt 			needed += sizeof(buf);  /* be conservative */
    182  1.3.4.2  yamt 		} else {
    183  1.3.4.2  yamt 			slen = snprintf(buf, sizeof(buf), "%s%s(q=%d, f=%" PRId64
    184  1.3.4.2  yamt 					" Hz)", spc, tc->tc_name, tc->tc_quality,
    185  1.3.4.2  yamt 					tc->tc_frequency);
    186  1.3.4.2  yamt 			if (left < slen + 1)
    187  1.3.4.2  yamt 				break;
    188  1.3.4.2  yamt 			/* XXX use sysctl_copyout? (from sysctl_hw_disknames) */
    189  1.3.4.7  yamt 			/* XXX copyout with held lock. */
    190  1.3.4.2  yamt 			error = copyout(buf, where, slen + 1);
    191  1.3.4.2  yamt 			spc = " ";
    192  1.3.4.2  yamt 			where += slen;
    193  1.3.4.2  yamt 			needed += slen;
    194  1.3.4.2  yamt 			left -= slen;
    195  1.3.4.2  yamt 		}
    196  1.3.4.2  yamt 	}
    197  1.3.4.7  yamt 	mutex_exit(&time_lock);
    198  1.3.4.2  yamt 
    199  1.3.4.2  yamt 	*oldlenp = needed;
    200  1.3.4.2  yamt 	return (error);
    201  1.3.4.2  yamt }
    202  1.3.4.2  yamt 
    203  1.3.4.2  yamt SYSCTL_SETUP(sysctl_timecounter_setup, "sysctl timecounter setup")
    204  1.3.4.2  yamt {
    205  1.3.4.2  yamt 	const struct sysctlnode *node;
    206  1.3.4.2  yamt 
    207  1.3.4.2  yamt 	sysctl_createv(clog, 0, NULL, &node,
    208  1.3.4.2  yamt 		       CTLFLAG_PERMANENT,
    209  1.3.4.2  yamt 		       CTLTYPE_NODE, "timecounter",
    210  1.3.4.2  yamt 		       SYSCTL_DESCR("time counter information"),
    211  1.3.4.2  yamt 		       NULL, 0, NULL, 0,
    212  1.3.4.2  yamt 		       CTL_KERN, CTL_CREATE, CTL_EOL);
    213  1.3.4.2  yamt 
    214  1.3.4.2  yamt 	if (node != NULL) {
    215  1.3.4.2  yamt 		sysctl_createv(clog, 0, NULL, NULL,
    216  1.3.4.2  yamt 			       CTLFLAG_PERMANENT,
    217  1.3.4.2  yamt 			       CTLTYPE_STRING, "choice",
    218  1.3.4.2  yamt 			       SYSCTL_DESCR("available counters"),
    219  1.3.4.2  yamt 			       sysctl_kern_timecounter_choice, 0, NULL, 0,
    220  1.3.4.2  yamt 			       CTL_KERN, node->sysctl_num, CTL_CREATE, CTL_EOL);
    221  1.3.4.2  yamt 
    222  1.3.4.2  yamt 		sysctl_createv(clog, 0, NULL, NULL,
    223  1.3.4.2  yamt 			       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    224  1.3.4.2  yamt 			       CTLTYPE_STRING, "hardware",
    225  1.3.4.2  yamt 			       SYSCTL_DESCR("currently active time counter"),
    226  1.3.4.2  yamt 			       sysctl_kern_timecounter_hardware, 0, NULL, MAX_TCNAMELEN,
    227  1.3.4.2  yamt 			       CTL_KERN, node->sysctl_num, CTL_CREATE, CTL_EOL);
    228  1.3.4.2  yamt 
    229  1.3.4.2  yamt 		sysctl_createv(clog, 0, NULL, NULL,
    230  1.3.4.2  yamt 			       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    231  1.3.4.2  yamt 			       CTLTYPE_INT, "timestepwarnings",
    232  1.3.4.2  yamt 			       SYSCTL_DESCR("log time steps"),
    233  1.3.4.2  yamt 			       NULL, 0, &timestepwarnings, 0,
    234  1.3.4.2  yamt 			       CTL_KERN, node->sysctl_num, CTL_CREATE, CTL_EOL);
    235  1.3.4.2  yamt 	}
    236  1.3.4.2  yamt }
    237  1.3.4.2  yamt 
    238  1.3.4.9  yamt #ifdef TC_COUNTERS
    239  1.3.4.2  yamt #define	TC_STATS(name)							\
    240  1.3.4.2  yamt static struct evcnt n##name =						\
    241  1.3.4.2  yamt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "timecounter", #name);	\
    242  1.3.4.2  yamt EVCNT_ATTACH_STATIC(n##name)
    243  1.3.4.2  yamt TC_STATS(binuptime);    TC_STATS(nanouptime);    TC_STATS(microuptime);
    244  1.3.4.2  yamt TC_STATS(bintime);      TC_STATS(nanotime);      TC_STATS(microtime);
    245  1.3.4.2  yamt TC_STATS(getbinuptime); TC_STATS(getnanouptime); TC_STATS(getmicrouptime);
    246  1.3.4.2  yamt TC_STATS(getbintime);   TC_STATS(getnanotime);   TC_STATS(getmicrotime);
    247  1.3.4.2  yamt TC_STATS(setclock);
    248  1.3.4.9  yamt #define	TC_COUNT(var)	var.ev_count++
    249  1.3.4.2  yamt #undef TC_STATS
    250  1.3.4.9  yamt #else
    251  1.3.4.9  yamt #define	TC_COUNT(var)	/* nothing */
    252  1.3.4.9  yamt #endif	/* TC_COUNTERS */
    253  1.3.4.2  yamt 
    254  1.3.4.2  yamt static void tc_windup(void);
    255  1.3.4.2  yamt 
    256  1.3.4.2  yamt /*
    257  1.3.4.2  yamt  * Return the difference between the timehands' counter value now and what
    258  1.3.4.2  yamt  * was when we copied it to the timehands' offset_count.
    259  1.3.4.2  yamt  */
    260  1.3.4.2  yamt static __inline u_int
    261  1.3.4.2  yamt tc_delta(struct timehands *th)
    262  1.3.4.2  yamt {
    263  1.3.4.2  yamt 	struct timecounter *tc;
    264  1.3.4.2  yamt 
    265  1.3.4.2  yamt 	tc = th->th_counter;
    266  1.3.4.2  yamt 	return ((tc->tc_get_timecount(tc) -
    267  1.3.4.2  yamt 		 th->th_offset_count) & tc->tc_counter_mask);
    268  1.3.4.2  yamt }
    269  1.3.4.2  yamt 
    270  1.3.4.2  yamt /*
    271  1.3.4.2  yamt  * Functions for reading the time.  We have to loop until we are sure that
    272  1.3.4.2  yamt  * the timehands that we operated on was not updated under our feet.  See
    273  1.3.4.6  yamt  * the comment in <sys/timevar.h> for a description of these 12 functions.
    274  1.3.4.2  yamt  */
    275  1.3.4.2  yamt 
    276  1.3.4.2  yamt void
    277  1.3.4.2  yamt binuptime(struct bintime *bt)
    278  1.3.4.2  yamt {
    279  1.3.4.2  yamt 	struct timehands *th;
    280  1.3.4.2  yamt 	u_int gen;
    281  1.3.4.2  yamt 
    282  1.3.4.9  yamt 	TC_COUNT(nbinuptime);
    283  1.3.4.2  yamt 	do {
    284  1.3.4.2  yamt 		th = timehands;
    285  1.3.4.2  yamt 		gen = th->th_generation;
    286  1.3.4.2  yamt 		*bt = th->th_offset;
    287  1.3.4.2  yamt 		bintime_addx(bt, th->th_scale * tc_delta(th));
    288  1.3.4.2  yamt 	} while (gen == 0 || gen != th->th_generation);
    289  1.3.4.2  yamt }
    290  1.3.4.2  yamt 
    291  1.3.4.2  yamt void
    292  1.3.4.2  yamt nanouptime(struct timespec *tsp)
    293  1.3.4.2  yamt {
    294  1.3.4.2  yamt 	struct bintime bt;
    295  1.3.4.2  yamt 
    296  1.3.4.9  yamt 	TC_COUNT(nnanouptime);
    297  1.3.4.2  yamt 	binuptime(&bt);
    298  1.3.4.2  yamt 	bintime2timespec(&bt, tsp);
    299  1.3.4.2  yamt }
    300  1.3.4.2  yamt 
    301  1.3.4.2  yamt void
    302  1.3.4.2  yamt microuptime(struct timeval *tvp)
    303  1.3.4.2  yamt {
    304  1.3.4.2  yamt 	struct bintime bt;
    305  1.3.4.2  yamt 
    306  1.3.4.9  yamt 	TC_COUNT(nmicrouptime);
    307  1.3.4.2  yamt 	binuptime(&bt);
    308  1.3.4.2  yamt 	bintime2timeval(&bt, tvp);
    309  1.3.4.2  yamt }
    310  1.3.4.2  yamt 
    311  1.3.4.2  yamt void
    312  1.3.4.2  yamt bintime(struct bintime *bt)
    313  1.3.4.2  yamt {
    314  1.3.4.2  yamt 
    315  1.3.4.9  yamt 	TC_COUNT(nbintime);
    316  1.3.4.2  yamt 	binuptime(bt);
    317  1.3.4.3  yamt 	bintime_add(bt, &timebasebin);
    318  1.3.4.2  yamt }
    319  1.3.4.2  yamt 
    320  1.3.4.2  yamt void
    321  1.3.4.2  yamt nanotime(struct timespec *tsp)
    322  1.3.4.2  yamt {
    323  1.3.4.2  yamt 	struct bintime bt;
    324  1.3.4.2  yamt 
    325  1.3.4.9  yamt 	TC_COUNT(nnanotime);
    326  1.3.4.2  yamt 	bintime(&bt);
    327  1.3.4.2  yamt 	bintime2timespec(&bt, tsp);
    328  1.3.4.2  yamt }
    329  1.3.4.2  yamt 
    330  1.3.4.2  yamt void
    331  1.3.4.2  yamt microtime(struct timeval *tvp)
    332  1.3.4.2  yamt {
    333  1.3.4.2  yamt 	struct bintime bt;
    334  1.3.4.2  yamt 
    335  1.3.4.9  yamt 	TC_COUNT(nmicrotime);
    336  1.3.4.2  yamt 	bintime(&bt);
    337  1.3.4.2  yamt 	bintime2timeval(&bt, tvp);
    338  1.3.4.2  yamt }
    339  1.3.4.2  yamt 
    340  1.3.4.2  yamt void
    341  1.3.4.2  yamt getbinuptime(struct bintime *bt)
    342  1.3.4.2  yamt {
    343  1.3.4.2  yamt 	struct timehands *th;
    344  1.3.4.2  yamt 	u_int gen;
    345  1.3.4.2  yamt 
    346  1.3.4.9  yamt 	TC_COUNT(ngetbinuptime);
    347  1.3.4.2  yamt 	do {
    348  1.3.4.2  yamt 		th = timehands;
    349  1.3.4.2  yamt 		gen = th->th_generation;
    350  1.3.4.2  yamt 		*bt = th->th_offset;
    351  1.3.4.2  yamt 	} while (gen == 0 || gen != th->th_generation);
    352  1.3.4.2  yamt }
    353  1.3.4.2  yamt 
    354  1.3.4.2  yamt void
    355  1.3.4.2  yamt getnanouptime(struct timespec *tsp)
    356  1.3.4.2  yamt {
    357  1.3.4.2  yamt 	struct timehands *th;
    358  1.3.4.2  yamt 	u_int gen;
    359  1.3.4.2  yamt 
    360  1.3.4.9  yamt 	TC_COUNT(ngetnanouptime);
    361  1.3.4.2  yamt 	do {
    362  1.3.4.2  yamt 		th = timehands;
    363  1.3.4.2  yamt 		gen = th->th_generation;
    364  1.3.4.2  yamt 		bintime2timespec(&th->th_offset, tsp);
    365  1.3.4.2  yamt 	} while (gen == 0 || gen != th->th_generation);
    366  1.3.4.2  yamt }
    367  1.3.4.2  yamt 
    368  1.3.4.2  yamt void
    369  1.3.4.2  yamt getmicrouptime(struct timeval *tvp)
    370  1.3.4.2  yamt {
    371  1.3.4.2  yamt 	struct timehands *th;
    372  1.3.4.2  yamt 	u_int gen;
    373  1.3.4.2  yamt 
    374  1.3.4.9  yamt 	TC_COUNT(ngetmicrouptime);
    375  1.3.4.2  yamt 	do {
    376  1.3.4.2  yamt 		th = timehands;
    377  1.3.4.2  yamt 		gen = th->th_generation;
    378  1.3.4.2  yamt 		bintime2timeval(&th->th_offset, tvp);
    379  1.3.4.2  yamt 	} while (gen == 0 || gen != th->th_generation);
    380  1.3.4.2  yamt }
    381  1.3.4.2  yamt 
    382  1.3.4.2  yamt void
    383  1.3.4.2  yamt getbintime(struct bintime *bt)
    384  1.3.4.2  yamt {
    385  1.3.4.2  yamt 	struct timehands *th;
    386  1.3.4.2  yamt 	u_int gen;
    387  1.3.4.2  yamt 
    388  1.3.4.9  yamt 	TC_COUNT(ngetbintime);
    389  1.3.4.2  yamt 	do {
    390  1.3.4.2  yamt 		th = timehands;
    391  1.3.4.2  yamt 		gen = th->th_generation;
    392  1.3.4.2  yamt 		*bt = th->th_offset;
    393  1.3.4.2  yamt 	} while (gen == 0 || gen != th->th_generation);
    394  1.3.4.3  yamt 	bintime_add(bt, &timebasebin);
    395  1.3.4.2  yamt }
    396  1.3.4.2  yamt 
    397  1.3.4.2  yamt void
    398  1.3.4.2  yamt getnanotime(struct timespec *tsp)
    399  1.3.4.2  yamt {
    400  1.3.4.2  yamt 	struct timehands *th;
    401  1.3.4.2  yamt 	u_int gen;
    402  1.3.4.2  yamt 
    403  1.3.4.9  yamt 	TC_COUNT(ngetnanotime);
    404  1.3.4.2  yamt 	do {
    405  1.3.4.2  yamt 		th = timehands;
    406  1.3.4.2  yamt 		gen = th->th_generation;
    407  1.3.4.2  yamt 		*tsp = th->th_nanotime;
    408  1.3.4.2  yamt 	} while (gen == 0 || gen != th->th_generation);
    409  1.3.4.2  yamt }
    410  1.3.4.2  yamt 
    411  1.3.4.2  yamt void
    412  1.3.4.2  yamt getmicrotime(struct timeval *tvp)
    413  1.3.4.2  yamt {
    414  1.3.4.2  yamt 	struct timehands *th;
    415  1.3.4.2  yamt 	u_int gen;
    416  1.3.4.2  yamt 
    417  1.3.4.9  yamt 	TC_COUNT(ngetmicrotime);
    418  1.3.4.2  yamt 	do {
    419  1.3.4.2  yamt 		th = timehands;
    420  1.3.4.2  yamt 		gen = th->th_generation;
    421  1.3.4.2  yamt 		*tvp = th->th_microtime;
    422  1.3.4.2  yamt 	} while (gen == 0 || gen != th->th_generation);
    423  1.3.4.2  yamt }
    424  1.3.4.2  yamt 
    425  1.3.4.2  yamt /*
    426  1.3.4.2  yamt  * Initialize a new timecounter and possibly use it.
    427  1.3.4.2  yamt  */
    428  1.3.4.2  yamt void
    429  1.3.4.2  yamt tc_init(struct timecounter *tc)
    430  1.3.4.2  yamt {
    431  1.3.4.2  yamt 	u_int u;
    432  1.3.4.2  yamt 
    433  1.3.4.2  yamt 	u = tc->tc_frequency / tc->tc_counter_mask;
    434  1.3.4.2  yamt 	/* XXX: We need some margin here, 10% is a guess */
    435  1.3.4.2  yamt 	u *= 11;
    436  1.3.4.2  yamt 	u /= 10;
    437  1.3.4.2  yamt 	if (u > hz && tc->tc_quality >= 0) {
    438  1.3.4.2  yamt 		tc->tc_quality = -2000;
    439  1.3.4.4  yamt 		aprint_verbose(
    440  1.3.4.4  yamt 		    "timecounter: Timecounter \"%s\" frequency %ju Hz",
    441  1.3.4.2  yamt 			    tc->tc_name, (uintmax_t)tc->tc_frequency);
    442  1.3.4.4  yamt 		aprint_verbose(" -- Insufficient hz, needs at least %u\n", u);
    443  1.3.4.2  yamt 	} else if (tc->tc_quality >= 0 || bootverbose) {
    444  1.3.4.4  yamt 		aprint_verbose(
    445  1.3.4.4  yamt 		    "timecounter: Timecounter \"%s\" frequency %ju Hz "
    446  1.3.4.4  yamt 		    "quality %d\n", tc->tc_name, (uintmax_t)tc->tc_frequency,
    447  1.3.4.2  yamt 		    tc->tc_quality);
    448  1.3.4.2  yamt 	}
    449  1.3.4.2  yamt 
    450  1.3.4.7  yamt 	mutex_enter(&time_lock);
    451  1.3.4.8  yamt 	mutex_spin_enter(&tc_windup_lock);
    452  1.3.4.2  yamt 	tc->tc_next = timecounters;
    453  1.3.4.2  yamt 	timecounters = tc;
    454  1.3.4.2  yamt 	/*
    455  1.3.4.2  yamt 	 * Never automatically use a timecounter with negative quality.
    456  1.3.4.2  yamt 	 * Even though we run on the dummy counter, switching here may be
    457  1.3.4.2  yamt 	 * worse since this timecounter may not be monotonous.
    458  1.3.4.2  yamt 	 */
    459  1.3.4.7  yamt 	if (tc->tc_quality >= 0 && (tc->tc_quality > timecounter->tc_quality ||
    460  1.3.4.7  yamt 	    (tc->tc_quality == timecounter->tc_quality &&
    461  1.3.4.7  yamt 	    tc->tc_frequency > timecounter->tc_frequency))) {
    462  1.3.4.7  yamt 		(void)tc->tc_get_timecount(tc);
    463  1.3.4.7  yamt 		(void)tc->tc_get_timecount(tc);
    464  1.3.4.7  yamt 		timecounter = tc;
    465  1.3.4.7  yamt 		tc_windup();
    466  1.3.4.7  yamt 	}
    467  1.3.4.8  yamt 	mutex_spin_exit(&tc_windup_lock);
    468  1.3.4.7  yamt 	mutex_exit(&time_lock);
    469  1.3.4.2  yamt }
    470  1.3.4.2  yamt 
    471  1.3.4.8  yamt /*
    472  1.3.4.8  yamt  * Stop using a timecounter and remove it from the timecounters list.
    473  1.3.4.8  yamt  */
    474  1.3.4.8  yamt int
    475  1.3.4.8  yamt tc_detach(struct timecounter *target)
    476  1.3.4.8  yamt {
    477  1.3.4.8  yamt 	struct timecounter *best, *tc;
    478  1.3.4.8  yamt 	struct timecounter **tcp = NULL;
    479  1.3.4.8  yamt 	int rc = 0;
    480  1.3.4.8  yamt 
    481  1.3.4.8  yamt 	mutex_enter(&time_lock);
    482  1.3.4.8  yamt 	for (tcp = &timecounters, tc = timecounters;
    483  1.3.4.8  yamt 	     tc != NULL;
    484  1.3.4.8  yamt 	     tcp = &tc->tc_next, tc = tc->tc_next) {
    485  1.3.4.8  yamt 		if (tc == target)
    486  1.3.4.8  yamt 			break;
    487  1.3.4.8  yamt 	}
    488  1.3.4.8  yamt 	if (tc == NULL) {
    489  1.3.4.8  yamt 		rc = ESRCH;
    490  1.3.4.8  yamt 		goto out;
    491  1.3.4.8  yamt 	}
    492  1.3.4.8  yamt 	*tcp = tc->tc_next;
    493  1.3.4.8  yamt 
    494  1.3.4.8  yamt 	if (timecounter != target)
    495  1.3.4.8  yamt 		goto out;
    496  1.3.4.8  yamt 
    497  1.3.4.8  yamt 	for (best = tc = timecounters; tc != NULL; tc = tc->tc_next) {
    498  1.3.4.8  yamt 		if (tc->tc_quality > best->tc_quality)
    499  1.3.4.8  yamt 			best = tc;
    500  1.3.4.8  yamt 		else if (tc->tc_quality < best->tc_quality)
    501  1.3.4.8  yamt 			continue;
    502  1.3.4.8  yamt 		else if (tc->tc_frequency > best->tc_frequency)
    503  1.3.4.8  yamt 			best = tc;
    504  1.3.4.8  yamt 	}
    505  1.3.4.8  yamt 	mutex_spin_enter(&tc_windup_lock);
    506  1.3.4.8  yamt 	(void)best->tc_get_timecount(best);
    507  1.3.4.8  yamt 	(void)best->tc_get_timecount(best);
    508  1.3.4.8  yamt 	timecounter = best;
    509  1.3.4.8  yamt 	tc_windup();
    510  1.3.4.8  yamt 	mutex_spin_exit(&tc_windup_lock);
    511  1.3.4.8  yamt out:
    512  1.3.4.8  yamt 	mutex_exit(&time_lock);
    513  1.3.4.8  yamt 	return rc;
    514  1.3.4.8  yamt }
    515  1.3.4.8  yamt 
    516  1.3.4.2  yamt /* Report the frequency of the current timecounter. */
    517  1.3.4.2  yamt u_int64_t
    518  1.3.4.2  yamt tc_getfrequency(void)
    519  1.3.4.2  yamt {
    520  1.3.4.2  yamt 
    521  1.3.4.2  yamt 	return (timehands->th_counter->tc_frequency);
    522  1.3.4.2  yamt }
    523  1.3.4.2  yamt 
    524  1.3.4.2  yamt /*
    525  1.3.4.2  yamt  * Step our concept of UTC.  This is done by modifying our estimate of
    526  1.3.4.2  yamt  * when we booted.
    527  1.3.4.2  yamt  */
    528  1.3.4.2  yamt void
    529  1.3.4.2  yamt tc_setclock(struct timespec *ts)
    530  1.3.4.2  yamt {
    531  1.3.4.2  yamt 	struct timespec ts2;
    532  1.3.4.2  yamt 	struct bintime bt, bt2;
    533  1.3.4.2  yamt 
    534  1.3.4.8  yamt 	mutex_spin_enter(&tc_windup_lock);
    535  1.3.4.9  yamt 	TC_COUNT(nsetclock);
    536  1.3.4.2  yamt 	binuptime(&bt2);
    537  1.3.4.2  yamt 	timespec2bintime(ts, &bt);
    538  1.3.4.2  yamt 	bintime_sub(&bt, &bt2);
    539  1.3.4.3  yamt 	bintime_add(&bt2, &timebasebin);
    540  1.3.4.3  yamt 	timebasebin = bt;
    541  1.3.4.2  yamt 	tc_windup();
    542  1.3.4.8  yamt 	mutex_spin_exit(&tc_windup_lock);
    543  1.3.4.8  yamt 
    544  1.3.4.2  yamt 	if (timestepwarnings) {
    545  1.3.4.2  yamt 		bintime2timespec(&bt2, &ts2);
    546  1.3.4.2  yamt 		log(LOG_INFO, "Time stepped from %jd.%09ld to %jd.%09ld\n",
    547  1.3.4.2  yamt 		    (intmax_t)ts2.tv_sec, ts2.tv_nsec,
    548  1.3.4.2  yamt 		    (intmax_t)ts->tv_sec, ts->tv_nsec);
    549  1.3.4.2  yamt 	}
    550  1.3.4.2  yamt }
    551  1.3.4.2  yamt 
    552  1.3.4.2  yamt /*
    553  1.3.4.2  yamt  * Initialize the next struct timehands in the ring and make
    554  1.3.4.2  yamt  * it the active timehands.  Along the way we might switch to a different
    555  1.3.4.2  yamt  * timecounter and/or do seconds processing in NTP.  Slightly magic.
    556  1.3.4.2  yamt  */
    557  1.3.4.2  yamt static void
    558  1.3.4.2  yamt tc_windup(void)
    559  1.3.4.2  yamt {
    560  1.3.4.2  yamt 	struct bintime bt;
    561  1.3.4.2  yamt 	struct timehands *th, *tho;
    562  1.3.4.2  yamt 	u_int64_t scale;
    563  1.3.4.2  yamt 	u_int delta, ncount, ogen;
    564  1.3.4.3  yamt 	int i, s_update;
    565  1.3.4.2  yamt 	time_t t;
    566  1.3.4.2  yamt 
    567  1.3.4.8  yamt 	KASSERT(mutex_owned(&tc_windup_lock));
    568  1.3.4.8  yamt 
    569  1.3.4.3  yamt 	s_update = 0;
    570  1.3.4.5  yamt 
    571  1.3.4.2  yamt 	/*
    572  1.3.4.2  yamt 	 * Make the next timehands a copy of the current one, but do not
    573  1.3.4.2  yamt 	 * overwrite the generation or next pointer.  While we update
    574  1.3.4.5  yamt 	 * the contents, the generation must be zero.  Ensure global
    575  1.3.4.5  yamt 	 * visibility of the generation before proceeding.
    576  1.3.4.2  yamt 	 */
    577  1.3.4.2  yamt 	tho = timehands;
    578  1.3.4.2  yamt 	th = tho->th_next;
    579  1.3.4.2  yamt 	ogen = th->th_generation;
    580  1.3.4.2  yamt 	th->th_generation = 0;
    581  1.3.4.7  yamt 	membar_producer();
    582  1.3.4.2  yamt 	bcopy(tho, th, offsetof(struct timehands, th_generation));
    583  1.3.4.2  yamt 
    584  1.3.4.2  yamt 	/*
    585  1.3.4.2  yamt 	 * Capture a timecounter delta on the current timecounter and if
    586  1.3.4.2  yamt 	 * changing timecounters, a counter value from the new timecounter.
    587  1.3.4.2  yamt 	 * Update the offset fields accordingly.
    588  1.3.4.2  yamt 	 */
    589  1.3.4.2  yamt 	delta = tc_delta(th);
    590  1.3.4.2  yamt 	if (th->th_counter != timecounter)
    591  1.3.4.2  yamt 		ncount = timecounter->tc_get_timecount(timecounter);
    592  1.3.4.2  yamt 	else
    593  1.3.4.2  yamt 		ncount = 0;
    594  1.3.4.2  yamt 	th->th_offset_count += delta;
    595  1.3.4.2  yamt 	th->th_offset_count &= th->th_counter->tc_counter_mask;
    596  1.3.4.2  yamt 	bintime_addx(&th->th_offset, th->th_scale * delta);
    597  1.3.4.2  yamt 
    598  1.3.4.2  yamt 	/*
    599  1.3.4.2  yamt 	 * Hardware latching timecounters may not generate interrupts on
    600  1.3.4.2  yamt 	 * PPS events, so instead we poll them.  There is a finite risk that
    601  1.3.4.2  yamt 	 * the hardware might capture a count which is later than the one we
    602  1.3.4.2  yamt 	 * got above, and therefore possibly in the next NTP second which might
    603  1.3.4.2  yamt 	 * have a different rate than the current NTP second.  It doesn't
    604  1.3.4.2  yamt 	 * matter in practice.
    605  1.3.4.2  yamt 	 */
    606  1.3.4.2  yamt 	if (tho->th_counter->tc_poll_pps)
    607  1.3.4.2  yamt 		tho->th_counter->tc_poll_pps(tho->th_counter);
    608  1.3.4.2  yamt 
    609  1.3.4.2  yamt 	/*
    610  1.3.4.2  yamt 	 * Deal with NTP second processing.  The for loop normally
    611  1.3.4.2  yamt 	 * iterates at most once, but in extreme situations it might
    612  1.3.4.2  yamt 	 * keep NTP sane if timeouts are not run for several seconds.
    613  1.3.4.2  yamt 	 * At boot, the time step can be large when the TOD hardware
    614  1.3.4.2  yamt 	 * has been read, so on really large steps, we call
    615  1.3.4.2  yamt 	 * ntp_update_second only twice.  We need to call it twice in
    616  1.3.4.2  yamt 	 * case we missed a leap second.
    617  1.3.4.2  yamt 	 * If NTP is not compiled in ntp_update_second still calculates
    618  1.3.4.2  yamt 	 * the adjustment resulting from adjtime() calls.
    619  1.3.4.2  yamt 	 */
    620  1.3.4.2  yamt 	bt = th->th_offset;
    621  1.3.4.3  yamt 	bintime_add(&bt, &timebasebin);
    622  1.3.4.2  yamt 	i = bt.sec - tho->th_microtime.tv_sec;
    623  1.3.4.2  yamt 	if (i > LARGE_STEP)
    624  1.3.4.2  yamt 		i = 2;
    625  1.3.4.2  yamt 	for (; i > 0; i--) {
    626  1.3.4.2  yamt 		t = bt.sec;
    627  1.3.4.2  yamt 		ntp_update_second(&th->th_adjustment, &bt.sec);
    628  1.3.4.3  yamt 		s_update = 1;
    629  1.3.4.2  yamt 		if (bt.sec != t)
    630  1.3.4.3  yamt 			timebasebin.sec += bt.sec - t;
    631  1.3.4.2  yamt 	}
    632  1.3.4.2  yamt 
    633  1.3.4.2  yamt 	/* Update the UTC timestamps used by the get*() functions. */
    634  1.3.4.2  yamt 	/* XXX shouldn't do this here.  Should force non-`get' versions. */
    635  1.3.4.2  yamt 	bintime2timeval(&bt, &th->th_microtime);
    636  1.3.4.2  yamt 	bintime2timespec(&bt, &th->th_nanotime);
    637  1.3.4.2  yamt 
    638  1.3.4.2  yamt 	/* Now is a good time to change timecounters. */
    639  1.3.4.2  yamt 	if (th->th_counter != timecounter) {
    640  1.3.4.2  yamt 		th->th_counter = timecounter;
    641  1.3.4.2  yamt 		th->th_offset_count = ncount;
    642  1.3.4.3  yamt 		s_update = 1;
    643  1.3.4.2  yamt 	}
    644  1.3.4.2  yamt 
    645  1.3.4.2  yamt 	/*-
    646  1.3.4.2  yamt 	 * Recalculate the scaling factor.  We want the number of 1/2^64
    647  1.3.4.2  yamt 	 * fractions of a second per period of the hardware counter, taking
    648  1.3.4.2  yamt 	 * into account the th_adjustment factor which the NTP PLL/adjtime(2)
    649  1.3.4.2  yamt 	 * processing provides us with.
    650  1.3.4.2  yamt 	 *
    651  1.3.4.2  yamt 	 * The th_adjustment is nanoseconds per second with 32 bit binary
    652  1.3.4.2  yamt 	 * fraction and we want 64 bit binary fraction of second:
    653  1.3.4.2  yamt 	 *
    654  1.3.4.2  yamt 	 *	 x = a * 2^32 / 10^9 = a * 4.294967296
    655  1.3.4.2  yamt 	 *
    656  1.3.4.2  yamt 	 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
    657  1.3.4.2  yamt 	 * we can only multiply by about 850 without overflowing, but that
    658  1.3.4.2  yamt 	 * leaves suitably precise fractions for multiply before divide.
    659  1.3.4.2  yamt 	 *
    660  1.3.4.2  yamt 	 * Divide before multiply with a fraction of 2199/512 results in a
    661  1.3.4.2  yamt 	 * systematic undercompensation of 10PPM of th_adjustment.  On a
    662  1.3.4.2  yamt 	 * 5000PPM adjustment this is a 0.05PPM error.  This is acceptable.
    663  1.3.4.2  yamt  	 *
    664  1.3.4.2  yamt 	 * We happily sacrifice the lowest of the 64 bits of our result
    665  1.3.4.2  yamt 	 * to the goddess of code clarity.
    666  1.3.4.2  yamt 	 *
    667  1.3.4.2  yamt 	 */
    668  1.3.4.3  yamt 	if (s_update) {
    669  1.3.4.3  yamt 		scale = (u_int64_t)1 << 63;
    670  1.3.4.3  yamt 		scale += (th->th_adjustment / 1024) * 2199;
    671  1.3.4.3  yamt 		scale /= th->th_counter->tc_frequency;
    672  1.3.4.3  yamt 		th->th_scale = scale * 2;
    673  1.3.4.3  yamt 	}
    674  1.3.4.2  yamt 	/*
    675  1.3.4.2  yamt 	 * Now that the struct timehands is again consistent, set the new
    676  1.3.4.5  yamt 	 * generation number, making sure to not make it zero.  Ensure
    677  1.3.4.5  yamt 	 * changes are globally visible before changing.
    678  1.3.4.2  yamt 	 */
    679  1.3.4.2  yamt 	if (++ogen == 0)
    680  1.3.4.2  yamt 		ogen = 1;
    681  1.3.4.7  yamt 	membar_producer();
    682  1.3.4.2  yamt 	th->th_generation = ogen;
    683  1.3.4.2  yamt 
    684  1.3.4.5  yamt 	/*
    685  1.3.4.5  yamt 	 * Go live with the new struct timehands.  Ensure changes are
    686  1.3.4.5  yamt 	 * globally visible before changing.
    687  1.3.4.5  yamt 	 */
    688  1.3.4.2  yamt 	time_second = th->th_microtime.tv_sec;
    689  1.3.4.2  yamt 	time_uptime = th->th_offset.sec;
    690  1.3.4.7  yamt 	membar_producer();
    691  1.3.4.2  yamt 	timehands = th;
    692  1.3.4.2  yamt 
    693  1.3.4.7  yamt 	/*
    694  1.3.4.7  yamt 	 * Force users of the old timehand to move on.  This is
    695  1.3.4.7  yamt 	 * necessary for MP systems; we need to ensure that the
    696  1.3.4.7  yamt 	 * consumers will move away from the old timehand before
    697  1.3.4.7  yamt 	 * we begin updating it again when we eventually wrap
    698  1.3.4.7  yamt 	 * around.
    699  1.3.4.7  yamt 	 */
    700  1.3.4.7  yamt 	if (++tho->th_generation == 0)
    701  1.3.4.7  yamt 		tho->th_generation = 1;
    702  1.3.4.2  yamt }
    703  1.3.4.2  yamt 
    704  1.3.4.2  yamt /*
    705  1.3.4.2  yamt  * RFC 2783 PPS-API implementation.
    706  1.3.4.2  yamt  */
    707  1.3.4.2  yamt 
    708  1.3.4.2  yamt int
    709  1.3.4.5  yamt pps_ioctl(u_long cmd, void *data, struct pps_state *pps)
    710  1.3.4.2  yamt {
    711  1.3.4.2  yamt 	pps_params_t *app;
    712  1.3.4.2  yamt 	pps_info_t *pipi;
    713  1.3.4.2  yamt #ifdef PPS_SYNC
    714  1.3.4.2  yamt 	int *epi;
    715  1.3.4.2  yamt #endif
    716  1.3.4.2  yamt 
    717  1.3.4.2  yamt 	KASSERT(pps != NULL); /* XXX ("NULL pps pointer in pps_ioctl") */
    718  1.3.4.2  yamt 	switch (cmd) {
    719  1.3.4.2  yamt 	case PPS_IOC_CREATE:
    720  1.3.4.2  yamt 		return (0);
    721  1.3.4.2  yamt 	case PPS_IOC_DESTROY:
    722  1.3.4.2  yamt 		return (0);
    723  1.3.4.2  yamt 	case PPS_IOC_SETPARAMS:
    724  1.3.4.2  yamt 		app = (pps_params_t *)data;
    725  1.3.4.2  yamt 		if (app->mode & ~pps->ppscap)
    726  1.3.4.2  yamt 			return (EINVAL);
    727  1.3.4.2  yamt 		pps->ppsparam = *app;
    728  1.3.4.2  yamt 		return (0);
    729  1.3.4.2  yamt 	case PPS_IOC_GETPARAMS:
    730  1.3.4.2  yamt 		app = (pps_params_t *)data;
    731  1.3.4.2  yamt 		*app = pps->ppsparam;
    732  1.3.4.2  yamt 		app->api_version = PPS_API_VERS_1;
    733  1.3.4.2  yamt 		return (0);
    734  1.3.4.2  yamt 	case PPS_IOC_GETCAP:
    735  1.3.4.2  yamt 		*(int*)data = pps->ppscap;
    736  1.3.4.2  yamt 		return (0);
    737  1.3.4.2  yamt 	case PPS_IOC_FETCH:
    738  1.3.4.2  yamt 		pipi = (pps_info_t *)data;
    739  1.3.4.2  yamt 		pps->ppsinfo.current_mode = pps->ppsparam.mode;
    740  1.3.4.2  yamt 		*pipi = pps->ppsinfo;
    741  1.3.4.2  yamt 		return (0);
    742  1.3.4.2  yamt 	case PPS_IOC_KCBIND:
    743  1.3.4.2  yamt #ifdef PPS_SYNC
    744  1.3.4.2  yamt 		epi = (int *)data;
    745  1.3.4.2  yamt 		/* XXX Only root should be able to do this */
    746  1.3.4.2  yamt 		if (*epi & ~pps->ppscap)
    747  1.3.4.2  yamt 			return (EINVAL);
    748  1.3.4.2  yamt 		pps->kcmode = *epi;
    749  1.3.4.2  yamt 		return (0);
    750  1.3.4.2  yamt #else
    751  1.3.4.2  yamt 		return (EOPNOTSUPP);
    752  1.3.4.2  yamt #endif
    753  1.3.4.2  yamt 	default:
    754  1.3.4.2  yamt 		return (EPASSTHROUGH);
    755  1.3.4.2  yamt 	}
    756  1.3.4.2  yamt }
    757  1.3.4.2  yamt 
    758  1.3.4.2  yamt void
    759  1.3.4.2  yamt pps_init(struct pps_state *pps)
    760  1.3.4.2  yamt {
    761  1.3.4.2  yamt 	pps->ppscap |= PPS_TSFMT_TSPEC;
    762  1.3.4.2  yamt 	if (pps->ppscap & PPS_CAPTUREASSERT)
    763  1.3.4.2  yamt 		pps->ppscap |= PPS_OFFSETASSERT;
    764  1.3.4.2  yamt 	if (pps->ppscap & PPS_CAPTURECLEAR)
    765  1.3.4.2  yamt 		pps->ppscap |= PPS_OFFSETCLEAR;
    766  1.3.4.2  yamt }
    767  1.3.4.2  yamt 
    768  1.3.4.2  yamt void
    769  1.3.4.2  yamt pps_capture(struct pps_state *pps)
    770  1.3.4.2  yamt {
    771  1.3.4.2  yamt 	struct timehands *th;
    772  1.3.4.2  yamt 
    773  1.3.4.2  yamt 	KASSERT(pps != NULL); /* XXX ("NULL pps pointer in pps_capture") */
    774  1.3.4.2  yamt 	th = timehands;
    775  1.3.4.2  yamt 	pps->capgen = th->th_generation;
    776  1.3.4.2  yamt 	pps->capth = th;
    777  1.3.4.2  yamt 	pps->capcount = th->th_counter->tc_get_timecount(th->th_counter);
    778  1.3.4.2  yamt 	if (pps->capgen != th->th_generation)
    779  1.3.4.2  yamt 		pps->capgen = 0;
    780  1.3.4.2  yamt }
    781  1.3.4.2  yamt 
    782  1.3.4.2  yamt void
    783  1.3.4.2  yamt pps_event(struct pps_state *pps, int event)
    784  1.3.4.2  yamt {
    785  1.3.4.2  yamt 	struct bintime bt;
    786  1.3.4.2  yamt 	struct timespec ts, *tsp, *osp;
    787  1.3.4.2  yamt 	u_int tcount, *pcount;
    788  1.3.4.2  yamt 	int foff, fhard;
    789  1.3.4.2  yamt 	pps_seq_t *pseq;
    790  1.3.4.2  yamt 
    791  1.3.4.2  yamt 	KASSERT(pps != NULL); /* XXX ("NULL pps pointer in pps_event") */
    792  1.3.4.2  yamt 	/* If the timecounter was wound up underneath us, bail out. */
    793  1.3.4.2  yamt 	if (pps->capgen == 0 || pps->capgen != pps->capth->th_generation)
    794  1.3.4.2  yamt 		return;
    795  1.3.4.2  yamt 
    796  1.3.4.2  yamt 	/* Things would be easier with arrays. */
    797  1.3.4.2  yamt 	if (event == PPS_CAPTUREASSERT) {
    798  1.3.4.2  yamt 		tsp = &pps->ppsinfo.assert_timestamp;
    799  1.3.4.2  yamt 		osp = &pps->ppsparam.assert_offset;
    800  1.3.4.2  yamt 		foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
    801  1.3.4.2  yamt 		fhard = pps->kcmode & PPS_CAPTUREASSERT;
    802  1.3.4.2  yamt 		pcount = &pps->ppscount[0];
    803  1.3.4.2  yamt 		pseq = &pps->ppsinfo.assert_sequence;
    804  1.3.4.2  yamt 	} else {
    805  1.3.4.2  yamt 		tsp = &pps->ppsinfo.clear_timestamp;
    806  1.3.4.2  yamt 		osp = &pps->ppsparam.clear_offset;
    807  1.3.4.2  yamt 		foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
    808  1.3.4.2  yamt 		fhard = pps->kcmode & PPS_CAPTURECLEAR;
    809  1.3.4.2  yamt 		pcount = &pps->ppscount[1];
    810  1.3.4.2  yamt 		pseq = &pps->ppsinfo.clear_sequence;
    811  1.3.4.2  yamt 	}
    812  1.3.4.2  yamt 
    813  1.3.4.2  yamt 	/*
    814  1.3.4.2  yamt 	 * If the timecounter changed, we cannot compare the count values, so
    815  1.3.4.2  yamt 	 * we have to drop the rest of the PPS-stuff until the next event.
    816  1.3.4.2  yamt 	 */
    817  1.3.4.2  yamt 	if (pps->ppstc != pps->capth->th_counter) {
    818  1.3.4.2  yamt 		pps->ppstc = pps->capth->th_counter;
    819  1.3.4.2  yamt 		*pcount = pps->capcount;
    820  1.3.4.2  yamt 		pps->ppscount[2] = pps->capcount;
    821  1.3.4.2  yamt 		return;
    822  1.3.4.2  yamt 	}
    823  1.3.4.2  yamt 
    824  1.3.4.2  yamt 	/* Convert the count to a timespec. */
    825  1.3.4.2  yamt 	tcount = pps->capcount - pps->capth->th_offset_count;
    826  1.3.4.2  yamt 	tcount &= pps->capth->th_counter->tc_counter_mask;
    827  1.3.4.2  yamt 	bt = pps->capth->th_offset;
    828  1.3.4.2  yamt 	bintime_addx(&bt, pps->capth->th_scale * tcount);
    829  1.3.4.3  yamt 	bintime_add(&bt, &timebasebin);
    830  1.3.4.2  yamt 	bintime2timespec(&bt, &ts);
    831  1.3.4.2  yamt 
    832  1.3.4.2  yamt 	/* If the timecounter was wound up underneath us, bail out. */
    833  1.3.4.2  yamt 	if (pps->capgen != pps->capth->th_generation)
    834  1.3.4.2  yamt 		return;
    835  1.3.4.2  yamt 
    836  1.3.4.2  yamt 	*pcount = pps->capcount;
    837  1.3.4.2  yamt 	(*pseq)++;
    838  1.3.4.2  yamt 	*tsp = ts;
    839  1.3.4.2  yamt 
    840  1.3.4.2  yamt 	if (foff) {
    841  1.3.4.2  yamt 		timespecadd(tsp, osp, tsp);
    842  1.3.4.2  yamt 		if (tsp->tv_nsec < 0) {
    843  1.3.4.2  yamt 			tsp->tv_nsec += 1000000000;
    844  1.3.4.2  yamt 			tsp->tv_sec -= 1;
    845  1.3.4.2  yamt 		}
    846  1.3.4.2  yamt 	}
    847  1.3.4.2  yamt #ifdef PPS_SYNC
    848  1.3.4.2  yamt 	if (fhard) {
    849  1.3.4.2  yamt 		u_int64_t scale;
    850  1.3.4.2  yamt 
    851  1.3.4.2  yamt 		/*
    852  1.3.4.2  yamt 		 * Feed the NTP PLL/FLL.
    853  1.3.4.2  yamt 		 * The FLL wants to know how many (hardware) nanoseconds
    854  1.3.4.2  yamt 		 * elapsed since the previous event.
    855  1.3.4.2  yamt 		 */
    856  1.3.4.2  yamt 		tcount = pps->capcount - pps->ppscount[2];
    857  1.3.4.2  yamt 		pps->ppscount[2] = pps->capcount;
    858  1.3.4.2  yamt 		tcount &= pps->capth->th_counter->tc_counter_mask;
    859  1.3.4.2  yamt 		scale = (u_int64_t)1 << 63;
    860  1.3.4.2  yamt 		scale /= pps->capth->th_counter->tc_frequency;
    861  1.3.4.2  yamt 		scale *= 2;
    862  1.3.4.2  yamt 		bt.sec = 0;
    863  1.3.4.2  yamt 		bt.frac = 0;
    864  1.3.4.2  yamt 		bintime_addx(&bt, scale * tcount);
    865  1.3.4.2  yamt 		bintime2timespec(&bt, &ts);
    866  1.3.4.2  yamt 		hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec);
    867  1.3.4.2  yamt 	}
    868  1.3.4.2  yamt #endif
    869  1.3.4.2  yamt }
    870  1.3.4.2  yamt 
    871  1.3.4.2  yamt /*
    872  1.3.4.2  yamt  * Timecounters need to be updated every so often to prevent the hardware
    873  1.3.4.2  yamt  * counter from overflowing.  Updating also recalculates the cached values
    874  1.3.4.2  yamt  * used by the get*() family of functions, so their precision depends on
    875  1.3.4.2  yamt  * the update frequency.
    876  1.3.4.2  yamt  */
    877  1.3.4.2  yamt 
    878  1.3.4.2  yamt static int tc_tick;
    879  1.3.4.2  yamt 
    880  1.3.4.2  yamt void
    881  1.3.4.2  yamt tc_ticktock(void)
    882  1.3.4.2  yamt {
    883  1.3.4.2  yamt 	static int count;
    884  1.3.4.2  yamt 
    885  1.3.4.2  yamt 	if (++count < tc_tick)
    886  1.3.4.2  yamt 		return;
    887  1.3.4.2  yamt 	count = 0;
    888  1.3.4.8  yamt 	mutex_spin_enter(&tc_windup_lock);
    889  1.3.4.2  yamt 	tc_windup();
    890  1.3.4.8  yamt 	mutex_spin_exit(&tc_windup_lock);
    891  1.3.4.2  yamt }
    892  1.3.4.2  yamt 
    893  1.3.4.2  yamt void
    894  1.3.4.2  yamt inittimecounter(void)
    895  1.3.4.2  yamt {
    896  1.3.4.2  yamt 	u_int p;
    897  1.3.4.2  yamt 
    898  1.3.4.8  yamt 	mutex_init(&tc_windup_lock, MUTEX_DEFAULT, IPL_SCHED);
    899  1.3.4.8  yamt 
    900  1.3.4.2  yamt 	/*
    901  1.3.4.2  yamt 	 * Set the initial timeout to
    902  1.3.4.2  yamt 	 * max(1, <approx. number of hardclock ticks in a millisecond>).
    903  1.3.4.2  yamt 	 * People should probably not use the sysctl to set the timeout
    904  1.3.4.2  yamt 	 * to smaller than its inital value, since that value is the
    905  1.3.4.2  yamt 	 * smallest reasonable one.  If they want better timestamps they
    906  1.3.4.2  yamt 	 * should use the non-"get"* functions.
    907  1.3.4.2  yamt 	 */
    908  1.3.4.2  yamt 	if (hz > 1000)
    909  1.3.4.2  yamt 		tc_tick = (hz + 500) / 1000;
    910  1.3.4.2  yamt 	else
    911  1.3.4.2  yamt 		tc_tick = 1;
    912  1.3.4.2  yamt 	p = (tc_tick * 1000000) / hz;
    913  1.3.4.4  yamt 	aprint_verbose("timecounter: Timecounters tick every %d.%03u msec\n",
    914  1.3.4.4  yamt 	    p / 1000, p % 1000);
    915  1.3.4.2  yamt 
    916  1.3.4.2  yamt 	/* warm up new timecounter (again) and get rolling. */
    917  1.3.4.2  yamt 	(void)timecounter->tc_get_timecount(timecounter);
    918  1.3.4.2  yamt 	(void)timecounter->tc_get_timecount(timecounter);
    919  1.3.4.2  yamt }
    920