1 1.78 andvar /* $NetBSD: kern_tc.c,v 1.78 2025/07/15 22:15:03 andvar Exp $ */ 2 1.33 ad 3 1.33 ad /*- 4 1.39 ad * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc. 5 1.33 ad * All rights reserved. 6 1.33 ad * 7 1.39 ad * This code is derived from software contributed to The NetBSD Foundation 8 1.39 ad * by Andrew Doran. 9 1.39 ad * 10 1.33 ad * Redistribution and use in source and binary forms, with or without 11 1.33 ad * modification, are permitted provided that the following conditions 12 1.33 ad * are met: 13 1.33 ad * 1. Redistributions of source code must retain the above copyright 14 1.33 ad * notice, this list of conditions and the following disclaimer. 15 1.33 ad * 2. Redistributions in binary form must reproduce the above copyright 16 1.33 ad * notice, this list of conditions and the following disclaimer in the 17 1.33 ad * documentation and/or other materials provided with the distribution. 18 1.33 ad * 19 1.33 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 1.33 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 1.33 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 1.33 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 1.33 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 1.33 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 1.33 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 1.33 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 1.33 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 1.33 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 1.33 ad * POSSIBILITY OF SUCH DAMAGE. 30 1.33 ad */ 31 1.2 kardel 32 1.1 simonb /*- 33 1.1 simonb * ---------------------------------------------------------------------------- 34 1.1 simonb * "THE BEER-WARE LICENSE" (Revision 42): 35 1.1 simonb * <phk (at) FreeBSD.ORG> wrote this file. As long as you retain this notice you 36 1.1 simonb * can do whatever you want with this stuff. If we meet some day, and you think 37 1.1 simonb * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 38 1.2 kardel * --------------------------------------------------------------------------- 39 1.1 simonb */ 40 1.1 simonb 41 1.75 riastrad /* 42 1.75 riastrad * https://papers.freebsd.org/2002/phk-timecounters.files/timecounter.pdf 43 1.75 riastrad */ 44 1.75 riastrad 45 1.1 simonb #include <sys/cdefs.h> 46 1.2 kardel /* __FBSDID("$FreeBSD: src/sys/kern/kern_tc.c,v 1.166 2005/09/19 22:16:31 andre Exp $"); */ 47 1.78 andvar __KERNEL_RCSID(0, "$NetBSD: kern_tc.c,v 1.78 2025/07/15 22:15:03 andvar Exp $"); 48 1.58 rin 49 1.58 rin #ifdef _KERNEL_OPT 50 1.58 rin #include "opt_ntp.h" 51 1.58 rin #endif 52 1.1 simonb 53 1.1 simonb #include <sys/param.h> 54 1.63 riastrad 55 1.61 simonb #include <sys/atomic.h> 56 1.61 simonb #include <sys/evcnt.h> 57 1.61 simonb #include <sys/kauth.h> 58 1.1 simonb #include <sys/kernel.h> 59 1.63 riastrad #include <sys/lock.h> 60 1.61 simonb #include <sys/mutex.h> 61 1.2 kardel #include <sys/reboot.h> /* XXX just to get AB_VERBOSE */ 62 1.1 simonb #include <sys/sysctl.h> 63 1.1 simonb #include <sys/syslog.h> 64 1.1 simonb #include <sys/systm.h> 65 1.1 simonb #include <sys/timepps.h> 66 1.1 simonb #include <sys/timetc.h> 67 1.1 simonb #include <sys/timex.h> 68 1.39 ad #include <sys/xcall.h> 69 1.2 kardel 70 1.2 kardel /* 71 1.1 simonb * A large step happens on boot. This constant detects such steps. 72 1.1 simonb * It is relatively small so that ntp_update_second gets called enough 73 1.1 simonb * in the typical 'missed a couple of seconds' case, but doesn't loop 74 1.1 simonb * forever when the time step is large. 75 1.1 simonb */ 76 1.1 simonb #define LARGE_STEP 200 77 1.1 simonb 78 1.1 simonb /* 79 1.1 simonb * Implement a dummy timecounter which we can use until we get a real one 80 1.1 simonb * in the air. This allows the console and other early stuff to use 81 1.1 simonb * time services. 82 1.1 simonb */ 83 1.1 simonb 84 1.1 simonb static u_int 85 1.16 yamt dummy_get_timecount(struct timecounter *tc) 86 1.1 simonb { 87 1.1 simonb static u_int now; 88 1.1 simonb 89 1.59 rin return ++now; 90 1.1 simonb } 91 1.1 simonb 92 1.1 simonb static struct timecounter dummy_timecounter = { 93 1.48 riastrad .tc_get_timecount = dummy_get_timecount, 94 1.48 riastrad .tc_counter_mask = ~0u, 95 1.48 riastrad .tc_frequency = 1000000, 96 1.48 riastrad .tc_name = "dummy", 97 1.48 riastrad .tc_quality = -1000000, 98 1.48 riastrad .tc_priv = NULL, 99 1.1 simonb }; 100 1.1 simonb 101 1.1 simonb struct timehands { 102 1.1 simonb /* These fields must be initialized by the driver. */ 103 1.40 kardel struct timecounter *th_counter; /* active timecounter */ 104 1.40 kardel int64_t th_adjustment; /* frequency adjustment */ 105 1.40 kardel /* (NTP/adjtime) */ 106 1.78 andvar uint64_t th_scale; /* scale factor (counter) */ 107 1.40 kardel /* tick->time) */ 108 1.57 rin uint64_t th_offset_count; /* offset at last time */ 109 1.40 kardel /* update (tc_windup()) */ 110 1.40 kardel struct bintime th_offset; /* bin (up)time at windup */ 111 1.40 kardel struct timeval th_microtime; /* cached microtime */ 112 1.40 kardel struct timespec th_nanotime; /* cached nanotime */ 113 1.1 simonb /* Fields not to be copied in tc_windup start with th_generation. */ 114 1.78 andvar volatile u_int th_generation; /* current generation */ 115 1.40 kardel struct timehands *th_next; /* next timehand */ 116 1.1 simonb }; 117 1.1 simonb 118 1.1 simonb static struct timehands th0; 119 1.10 christos static struct timehands th9 = { .th_next = &th0, }; 120 1.10 christos static struct timehands th8 = { .th_next = &th9, }; 121 1.10 christos static struct timehands th7 = { .th_next = &th8, }; 122 1.10 christos static struct timehands th6 = { .th_next = &th7, }; 123 1.10 christos static struct timehands th5 = { .th_next = &th6, }; 124 1.10 christos static struct timehands th4 = { .th_next = &th5, }; 125 1.10 christos static struct timehands th3 = { .th_next = &th4, }; 126 1.10 christos static struct timehands th2 = { .th_next = &th3, }; 127 1.10 christos static struct timehands th1 = { .th_next = &th2, }; 128 1.1 simonb static struct timehands th0 = { 129 1.10 christos .th_counter = &dummy_timecounter, 130 1.10 christos .th_scale = (uint64_t)-1 / 1000000, 131 1.10 christos .th_offset = { .sec = 1, .frac = 0 }, 132 1.10 christos .th_generation = 1, 133 1.10 christos .th_next = &th1, 134 1.1 simonb }; 135 1.1 simonb 136 1.1 simonb static struct timehands *volatile timehands = &th0; 137 1.1 simonb struct timecounter *timecounter = &dummy_timecounter; 138 1.1 simonb static struct timecounter *timecounters = &dummy_timecounter; 139 1.1 simonb 140 1.74 riastrad /* used by savecore(8) */ 141 1.74 riastrad time_t time_second_legacy asm("time_second"); 142 1.74 riastrad 143 1.68 riastrad #ifdef __HAVE_ATOMIC64_LOADSTORE 144 1.69 riastrad volatile time_t time__second __cacheline_aligned = 1; 145 1.69 riastrad volatile time_t time__uptime __cacheline_aligned = 1; 146 1.68 riastrad #else 147 1.63 riastrad static volatile struct { 148 1.63 riastrad uint32_t lo, hi; 149 1.63 riastrad } time__uptime32 __cacheline_aligned = { 150 1.63 riastrad .lo = 1, 151 1.63 riastrad }, time__second32 __cacheline_aligned = { 152 1.63 riastrad .lo = 1, 153 1.63 riastrad }; 154 1.63 riastrad #endif 155 1.1 simonb 156 1.71 riastrad static struct { 157 1.71 riastrad struct bintime bin; 158 1.71 riastrad volatile unsigned gen; /* even when stable, odd when changing */ 159 1.71 riastrad } timebase __cacheline_aligned; 160 1.1 simonb 161 1.1 simonb static int timestepwarnings; 162 1.2 kardel 163 1.33 ad kmutex_t timecounter_lock; 164 1.35 ad static u_int timecounter_mods; 165 1.39 ad static volatile int timecounter_removals = 1; 166 1.35 ad static u_int timecounter_bad; 167 1.25 ad 168 1.63 riastrad #ifdef __HAVE_ATOMIC64_LOADSTORE 169 1.63 riastrad 170 1.63 riastrad static inline void 171 1.63 riastrad setrealuptime(time_t second, time_t uptime) 172 1.63 riastrad { 173 1.63 riastrad 174 1.74 riastrad time_second_legacy = second; 175 1.74 riastrad 176 1.63 riastrad atomic_store_relaxed(&time__second, second); 177 1.63 riastrad atomic_store_relaxed(&time__uptime, uptime); 178 1.63 riastrad } 179 1.63 riastrad 180 1.63 riastrad #else 181 1.63 riastrad 182 1.63 riastrad static inline void 183 1.63 riastrad setrealuptime(time_t second, time_t uptime) 184 1.63 riastrad { 185 1.63 riastrad uint32_t seclo = second & 0xffffffff, sechi = second >> 32; 186 1.63 riastrad uint32_t uplo = uptime & 0xffffffff, uphi = uptime >> 32; 187 1.63 riastrad 188 1.63 riastrad KDASSERT(mutex_owned(&timecounter_lock)); 189 1.63 riastrad 190 1.74 riastrad time_second_legacy = second; 191 1.74 riastrad 192 1.63 riastrad /* 193 1.63 riastrad * Fast path -- no wraparound, just updating the low bits, so 194 1.63 riastrad * no need for seqlocked access. 195 1.63 riastrad */ 196 1.63 riastrad if (__predict_true(sechi == time__second32.hi) && 197 1.63 riastrad __predict_true(uphi == time__uptime32.hi)) { 198 1.63 riastrad atomic_store_relaxed(&time__second32.lo, seclo); 199 1.63 riastrad atomic_store_relaxed(&time__uptime32.lo, uplo); 200 1.63 riastrad return; 201 1.63 riastrad } 202 1.63 riastrad 203 1.63 riastrad atomic_store_relaxed(&time__second32.hi, 0xffffffff); 204 1.63 riastrad atomic_store_relaxed(&time__uptime32.hi, 0xffffffff); 205 1.70 riastrad membar_producer(); 206 1.63 riastrad atomic_store_relaxed(&time__second32.lo, seclo); 207 1.63 riastrad atomic_store_relaxed(&time__uptime32.lo, uplo); 208 1.70 riastrad membar_producer(); 209 1.63 riastrad atomic_store_relaxed(&time__second32.hi, sechi); 210 1.64 riastrad atomic_store_relaxed(&time__uptime32.hi, uphi); 211 1.63 riastrad } 212 1.63 riastrad 213 1.63 riastrad time_t 214 1.63 riastrad getrealtime(void) 215 1.63 riastrad { 216 1.63 riastrad uint32_t lo, hi; 217 1.63 riastrad 218 1.63 riastrad do { 219 1.63 riastrad for (;;) { 220 1.63 riastrad hi = atomic_load_relaxed(&time__second32.hi); 221 1.63 riastrad if (__predict_true(hi != 0xffffffff)) 222 1.63 riastrad break; 223 1.63 riastrad SPINLOCK_BACKOFF_HOOK; 224 1.63 riastrad } 225 1.70 riastrad membar_consumer(); 226 1.63 riastrad lo = atomic_load_relaxed(&time__second32.lo); 227 1.70 riastrad membar_consumer(); 228 1.63 riastrad } while (hi != atomic_load_relaxed(&time__second32.hi)); 229 1.63 riastrad 230 1.63 riastrad return ((time_t)hi << 32) | lo; 231 1.63 riastrad } 232 1.63 riastrad 233 1.63 riastrad time_t 234 1.63 riastrad getuptime(void) 235 1.63 riastrad { 236 1.63 riastrad uint32_t lo, hi; 237 1.63 riastrad 238 1.63 riastrad do { 239 1.63 riastrad for (;;) { 240 1.63 riastrad hi = atomic_load_relaxed(&time__uptime32.hi); 241 1.63 riastrad if (__predict_true(hi != 0xffffffff)) 242 1.63 riastrad break; 243 1.63 riastrad SPINLOCK_BACKOFF_HOOK; 244 1.63 riastrad } 245 1.70 riastrad membar_consumer(); 246 1.63 riastrad lo = atomic_load_relaxed(&time__uptime32.lo); 247 1.70 riastrad membar_consumer(); 248 1.63 riastrad } while (hi != atomic_load_relaxed(&time__uptime32.hi)); 249 1.63 riastrad 250 1.63 riastrad return ((time_t)hi << 32) | lo; 251 1.63 riastrad } 252 1.63 riastrad 253 1.63 riastrad time_t 254 1.63 riastrad getboottime(void) 255 1.63 riastrad { 256 1.63 riastrad 257 1.63 riastrad return getrealtime() - getuptime(); 258 1.63 riastrad } 259 1.63 riastrad 260 1.63 riastrad uint32_t 261 1.63 riastrad getuptime32(void) 262 1.63 riastrad { 263 1.63 riastrad 264 1.63 riastrad return atomic_load_relaxed(&time__uptime32.lo); 265 1.63 riastrad } 266 1.63 riastrad 267 1.63 riastrad #endif /* !defined(__HAVE_ATOMIC64_LOADSTORE) */ 268 1.63 riastrad 269 1.2 kardel /* 270 1.28 yamt * sysctl helper routine for kern.timercounter.hardware 271 1.2 kardel */ 272 1.2 kardel static int 273 1.2 kardel sysctl_kern_timecounter_hardware(SYSCTLFN_ARGS) 274 1.2 kardel { 275 1.2 kardel struct sysctlnode node; 276 1.2 kardel int error; 277 1.2 kardel char newname[MAX_TCNAMELEN]; 278 1.2 kardel struct timecounter *newtc, *tc; 279 1.2 kardel 280 1.2 kardel tc = timecounter; 281 1.2 kardel 282 1.2 kardel strlcpy(newname, tc->tc_name, sizeof(newname)); 283 1.2 kardel 284 1.2 kardel node = *rnode; 285 1.2 kardel node.sysctl_data = newname; 286 1.2 kardel node.sysctl_size = sizeof(newname); 287 1.2 kardel 288 1.2 kardel error = sysctl_lookup(SYSCTLFN_CALL(&node)); 289 1.2 kardel 290 1.2 kardel if (error || 291 1.2 kardel newp == NULL || 292 1.2 kardel strncmp(newname, tc->tc_name, sizeof(newname)) == 0) 293 1.2 kardel return error; 294 1.1 simonb 295 1.76 riastrad if (l != NULL && (error = kauth_authorize_system(l->l_cred, 296 1.26 elad KAUTH_SYSTEM_TIME, KAUTH_REQ_SYSTEM_TIME_TIMECOUNTERS, newname, 297 1.26 elad NULL, NULL)) != 0) 298 1.59 rin return error; 299 1.2 kardel 300 1.22 ad if (!cold) 301 1.35 ad mutex_spin_enter(&timecounter_lock); 302 1.23 ad error = EINVAL; 303 1.2 kardel for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) { 304 1.2 kardel if (strcmp(newname, newtc->tc_name) != 0) 305 1.2 kardel continue; 306 1.2 kardel /* Warm up new timecounter. */ 307 1.2 kardel (void)newtc->tc_get_timecount(newtc); 308 1.2 kardel (void)newtc->tc_get_timecount(newtc); 309 1.2 kardel timecounter = newtc; 310 1.22 ad error = 0; 311 1.23 ad break; 312 1.23 ad } 313 1.22 ad if (!cold) 314 1.35 ad mutex_spin_exit(&timecounter_lock); 315 1.22 ad return error; 316 1.2 kardel } 317 1.2 kardel 318 1.2 kardel static int 319 1.2 kardel sysctl_kern_timecounter_choice(SYSCTLFN_ARGS) 320 1.2 kardel { 321 1.9 kardel char buf[MAX_TCNAMELEN+48]; 322 1.35 ad char *where; 323 1.2 kardel const char *spc; 324 1.2 kardel struct timecounter *tc; 325 1.2 kardel size_t needed, left, slen; 326 1.35 ad int error, mods; 327 1.2 kardel 328 1.2 kardel if (newp != NULL) 329 1.59 rin return EPERM; 330 1.2 kardel if (namelen != 0) 331 1.59 rin return EINVAL; 332 1.2 kardel 333 1.35 ad mutex_spin_enter(&timecounter_lock); 334 1.35 ad retry: 335 1.2 kardel spc = ""; 336 1.2 kardel error = 0; 337 1.2 kardel needed = 0; 338 1.2 kardel left = *oldlenp; 339 1.35 ad where = oldp; 340 1.2 kardel for (tc = timecounters; error == 0 && tc != NULL; tc = tc->tc_next) { 341 1.2 kardel if (where == NULL) { 342 1.2 kardel needed += sizeof(buf); /* be conservative */ 343 1.2 kardel } else { 344 1.2 kardel slen = snprintf(buf, sizeof(buf), "%s%s(q=%d, f=%" PRId64 345 1.2 kardel " Hz)", spc, tc->tc_name, tc->tc_quality, 346 1.2 kardel tc->tc_frequency); 347 1.2 kardel if (left < slen + 1) 348 1.2 kardel break; 349 1.35 ad mods = timecounter_mods; 350 1.35 ad mutex_spin_exit(&timecounter_lock); 351 1.2 kardel error = copyout(buf, where, slen + 1); 352 1.35 ad mutex_spin_enter(&timecounter_lock); 353 1.35 ad if (mods != timecounter_mods) { 354 1.35 ad goto retry; 355 1.35 ad } 356 1.2 kardel spc = " "; 357 1.2 kardel where += slen; 358 1.2 kardel needed += slen; 359 1.2 kardel left -= slen; 360 1.2 kardel } 361 1.2 kardel } 362 1.35 ad mutex_spin_exit(&timecounter_lock); 363 1.2 kardel 364 1.2 kardel *oldlenp = needed; 365 1.59 rin return error; 366 1.2 kardel } 367 1.2 kardel 368 1.2 kardel SYSCTL_SETUP(sysctl_timecounter_setup, "sysctl timecounter setup") 369 1.2 kardel { 370 1.2 kardel const struct sysctlnode *node; 371 1.2 kardel 372 1.2 kardel sysctl_createv(clog, 0, NULL, &node, 373 1.2 kardel CTLFLAG_PERMANENT, 374 1.2 kardel CTLTYPE_NODE, "timecounter", 375 1.2 kardel SYSCTL_DESCR("time counter information"), 376 1.2 kardel NULL, 0, NULL, 0, 377 1.2 kardel CTL_KERN, CTL_CREATE, CTL_EOL); 378 1.2 kardel 379 1.2 kardel if (node != NULL) { 380 1.2 kardel sysctl_createv(clog, 0, NULL, NULL, 381 1.2 kardel CTLFLAG_PERMANENT, 382 1.2 kardel CTLTYPE_STRING, "choice", 383 1.2 kardel SYSCTL_DESCR("available counters"), 384 1.2 kardel sysctl_kern_timecounter_choice, 0, NULL, 0, 385 1.2 kardel CTL_KERN, node->sysctl_num, CTL_CREATE, CTL_EOL); 386 1.2 kardel 387 1.2 kardel sysctl_createv(clog, 0, NULL, NULL, 388 1.2 kardel CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 389 1.2 kardel CTLTYPE_STRING, "hardware", 390 1.2 kardel SYSCTL_DESCR("currently active time counter"), 391 1.2 kardel sysctl_kern_timecounter_hardware, 0, NULL, MAX_TCNAMELEN, 392 1.2 kardel CTL_KERN, node->sysctl_num, CTL_CREATE, CTL_EOL); 393 1.2 kardel 394 1.2 kardel sysctl_createv(clog, 0, NULL, NULL, 395 1.2 kardel CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 396 1.2 kardel CTLTYPE_INT, "timestepwarnings", 397 1.2 kardel SYSCTL_DESCR("log time steps"), 398 1.2 kardel NULL, 0, ×tepwarnings, 0, 399 1.2 kardel CTL_KERN, node->sysctl_num, CTL_CREATE, CTL_EOL); 400 1.2 kardel } 401 1.2 kardel } 402 1.2 kardel 403 1.32 ad #ifdef TC_COUNTERS 404 1.2 kardel #define TC_STATS(name) \ 405 1.2 kardel static struct evcnt n##name = \ 406 1.2 kardel EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "timecounter", #name); \ 407 1.2 kardel EVCNT_ATTACH_STATIC(n##name) 408 1.2 kardel TC_STATS(binuptime); TC_STATS(nanouptime); TC_STATS(microuptime); 409 1.2 kardel TC_STATS(bintime); TC_STATS(nanotime); TC_STATS(microtime); 410 1.2 kardel TC_STATS(getbinuptime); TC_STATS(getnanouptime); TC_STATS(getmicrouptime); 411 1.2 kardel TC_STATS(getbintime); TC_STATS(getnanotime); TC_STATS(getmicrotime); 412 1.2 kardel TC_STATS(setclock); 413 1.32 ad #define TC_COUNT(var) var.ev_count++ 414 1.1 simonb #undef TC_STATS 415 1.32 ad #else 416 1.32 ad #define TC_COUNT(var) /* nothing */ 417 1.32 ad #endif /* TC_COUNTERS */ 418 1.1 simonb 419 1.1 simonb static void tc_windup(void); 420 1.1 simonb 421 1.1 simonb /* 422 1.1 simonb * Return the difference between the timehands' counter value now and what 423 1.1 simonb * was when we copied it to the timehands' offset_count. 424 1.1 simonb */ 425 1.41 uebayasi static inline u_int 426 1.1 simonb tc_delta(struct timehands *th) 427 1.1 simonb { 428 1.1 simonb struct timecounter *tc; 429 1.1 simonb 430 1.1 simonb tc = th->th_counter; 431 1.59 rin return (tc->tc_get_timecount(tc) - 432 1.59 rin th->th_offset_count) & tc->tc_counter_mask; 433 1.1 simonb } 434 1.1 simonb 435 1.1 simonb /* 436 1.1 simonb * Functions for reading the time. We have to loop until we are sure that 437 1.1 simonb * the timehands that we operated on was not updated under our feet. See 438 1.21 simonb * the comment in <sys/timevar.h> for a description of these 12 functions. 439 1.1 simonb */ 440 1.1 simonb 441 1.1 simonb void 442 1.1 simonb binuptime(struct bintime *bt) 443 1.1 simonb { 444 1.1 simonb struct timehands *th; 445 1.39 ad lwp_t *l; 446 1.39 ad u_int lgen, gen; 447 1.1 simonb 448 1.32 ad TC_COUNT(nbinuptime); 449 1.39 ad 450 1.39 ad /* 451 1.39 ad * Provide exclusion against tc_detach(). 452 1.39 ad * 453 1.39 ad * We record the number of timecounter removals before accessing 454 1.39 ad * timecounter state. Note that the LWP can be using multiple 455 1.39 ad * "generations" at once, due to interrupts (interrupted while in 456 1.39 ad * this function). Hardware interrupts will borrow the interrupted 457 1.39 ad * LWP's l_tcgen value for this purpose, and can themselves be 458 1.39 ad * interrupted by higher priority interrupts. In this case we need 459 1.39 ad * to ensure that the oldest generation in use is recorded. 460 1.39 ad * 461 1.39 ad * splsched() is too expensive to use, so we take care to structure 462 1.39 ad * this code in such a way that it is not required. Likewise, we 463 1.39 ad * do not disable preemption. 464 1.39 ad * 465 1.39 ad * Memory barriers are also too expensive to use for such a 466 1.39 ad * performance critical function. The good news is that we do not 467 1.39 ad * need memory barriers for this type of exclusion, as the thread 468 1.39 ad * updating timecounter_removals will issue a broadcast cross call 469 1.39 ad * before inspecting our l_tcgen value (this elides memory ordering 470 1.39 ad * issues). 471 1.73 riastrad * 472 1.73 riastrad * XXX If the author of the above comment knows how to make it 473 1.73 riastrad * safe to avoid memory barriers around the access to 474 1.73 riastrad * th->th_generation, I'm all ears. 475 1.39 ad */ 476 1.39 ad l = curlwp; 477 1.39 ad lgen = l->l_tcgen; 478 1.39 ad if (__predict_true(lgen == 0)) { 479 1.39 ad l->l_tcgen = timecounter_removals; 480 1.39 ad } 481 1.39 ad __insn_barrier(); 482 1.39 ad 483 1.1 simonb do { 484 1.72 riastrad th = atomic_load_consume(&timehands); 485 1.1 simonb gen = th->th_generation; 486 1.73 riastrad membar_consumer(); 487 1.1 simonb *bt = th->th_offset; 488 1.1 simonb bintime_addx(bt, th->th_scale * tc_delta(th)); 489 1.73 riastrad membar_consumer(); 490 1.1 simonb } while (gen == 0 || gen != th->th_generation); 491 1.39 ad 492 1.39 ad __insn_barrier(); 493 1.39 ad l->l_tcgen = lgen; 494 1.1 simonb } 495 1.1 simonb 496 1.1 simonb void 497 1.1 simonb nanouptime(struct timespec *tsp) 498 1.1 simonb { 499 1.1 simonb struct bintime bt; 500 1.1 simonb 501 1.32 ad TC_COUNT(nnanouptime); 502 1.1 simonb binuptime(&bt); 503 1.1 simonb bintime2timespec(&bt, tsp); 504 1.1 simonb } 505 1.1 simonb 506 1.1 simonb void 507 1.1 simonb microuptime(struct timeval *tvp) 508 1.1 simonb { 509 1.1 simonb struct bintime bt; 510 1.1 simonb 511 1.32 ad TC_COUNT(nmicrouptime); 512 1.1 simonb binuptime(&bt); 513 1.1 simonb bintime2timeval(&bt, tvp); 514 1.1 simonb } 515 1.1 simonb 516 1.1 simonb void 517 1.1 simonb bintime(struct bintime *bt) 518 1.1 simonb { 519 1.71 riastrad struct bintime boottime; 520 1.1 simonb 521 1.32 ad TC_COUNT(nbintime); 522 1.1 simonb binuptime(bt); 523 1.71 riastrad getbinboottime(&boottime); 524 1.71 riastrad bintime_add(bt, &boottime); 525 1.1 simonb } 526 1.1 simonb 527 1.1 simonb void 528 1.1 simonb nanotime(struct timespec *tsp) 529 1.1 simonb { 530 1.1 simonb struct bintime bt; 531 1.1 simonb 532 1.32 ad TC_COUNT(nnanotime); 533 1.1 simonb bintime(&bt); 534 1.1 simonb bintime2timespec(&bt, tsp); 535 1.1 simonb } 536 1.1 simonb 537 1.1 simonb void 538 1.1 simonb microtime(struct timeval *tvp) 539 1.1 simonb { 540 1.1 simonb struct bintime bt; 541 1.1 simonb 542 1.32 ad TC_COUNT(nmicrotime); 543 1.1 simonb bintime(&bt); 544 1.1 simonb bintime2timeval(&bt, tvp); 545 1.1 simonb } 546 1.1 simonb 547 1.1 simonb void 548 1.1 simonb getbinuptime(struct bintime *bt) 549 1.1 simonb { 550 1.1 simonb struct timehands *th; 551 1.1 simonb u_int gen; 552 1.1 simonb 553 1.32 ad TC_COUNT(ngetbinuptime); 554 1.1 simonb do { 555 1.72 riastrad th = atomic_load_consume(&timehands); 556 1.1 simonb gen = th->th_generation; 557 1.73 riastrad membar_consumer(); 558 1.1 simonb *bt = th->th_offset; 559 1.73 riastrad membar_consumer(); 560 1.1 simonb } while (gen == 0 || gen != th->th_generation); 561 1.1 simonb } 562 1.1 simonb 563 1.1 simonb void 564 1.1 simonb getnanouptime(struct timespec *tsp) 565 1.1 simonb { 566 1.1 simonb struct timehands *th; 567 1.1 simonb u_int gen; 568 1.1 simonb 569 1.32 ad TC_COUNT(ngetnanouptime); 570 1.1 simonb do { 571 1.72 riastrad th = atomic_load_consume(&timehands); 572 1.1 simonb gen = th->th_generation; 573 1.73 riastrad membar_consumer(); 574 1.1 simonb bintime2timespec(&th->th_offset, tsp); 575 1.73 riastrad membar_consumer(); 576 1.1 simonb } while (gen == 0 || gen != th->th_generation); 577 1.1 simonb } 578 1.1 simonb 579 1.1 simonb void 580 1.1 simonb getmicrouptime(struct timeval *tvp) 581 1.1 simonb { 582 1.1 simonb struct timehands *th; 583 1.1 simonb u_int gen; 584 1.1 simonb 585 1.32 ad TC_COUNT(ngetmicrouptime); 586 1.1 simonb do { 587 1.72 riastrad th = atomic_load_consume(&timehands); 588 1.1 simonb gen = th->th_generation; 589 1.73 riastrad membar_consumer(); 590 1.1 simonb bintime2timeval(&th->th_offset, tvp); 591 1.73 riastrad membar_consumer(); 592 1.1 simonb } while (gen == 0 || gen != th->th_generation); 593 1.1 simonb } 594 1.1 simonb 595 1.1 simonb void 596 1.1 simonb getbintime(struct bintime *bt) 597 1.1 simonb { 598 1.1 simonb struct timehands *th; 599 1.71 riastrad struct bintime boottime; 600 1.1 simonb u_int gen; 601 1.1 simonb 602 1.32 ad TC_COUNT(ngetbintime); 603 1.1 simonb do { 604 1.72 riastrad th = atomic_load_consume(&timehands); 605 1.1 simonb gen = th->th_generation; 606 1.73 riastrad membar_consumer(); 607 1.1 simonb *bt = th->th_offset; 608 1.73 riastrad membar_consumer(); 609 1.1 simonb } while (gen == 0 || gen != th->th_generation); 610 1.71 riastrad getbinboottime(&boottime); 611 1.71 riastrad bintime_add(bt, &boottime); 612 1.1 simonb } 613 1.1 simonb 614 1.47 chs static inline void 615 1.47 chs dogetnanotime(struct timespec *tsp) 616 1.1 simonb { 617 1.1 simonb struct timehands *th; 618 1.1 simonb u_int gen; 619 1.1 simonb 620 1.32 ad TC_COUNT(ngetnanotime); 621 1.1 simonb do { 622 1.72 riastrad th = atomic_load_consume(&timehands); 623 1.1 simonb gen = th->th_generation; 624 1.73 riastrad membar_consumer(); 625 1.1 simonb *tsp = th->th_nanotime; 626 1.73 riastrad membar_consumer(); 627 1.1 simonb } while (gen == 0 || gen != th->th_generation); 628 1.1 simonb } 629 1.1 simonb 630 1.1 simonb void 631 1.47 chs getnanotime(struct timespec *tsp) 632 1.47 chs { 633 1.47 chs 634 1.47 chs dogetnanotime(tsp); 635 1.47 chs } 636 1.47 chs 637 1.47 chs void dtrace_getnanotime(struct timespec *tsp); 638 1.47 chs 639 1.47 chs void 640 1.47 chs dtrace_getnanotime(struct timespec *tsp) 641 1.47 chs { 642 1.47 chs 643 1.47 chs dogetnanotime(tsp); 644 1.47 chs } 645 1.47 chs 646 1.47 chs void 647 1.1 simonb getmicrotime(struct timeval *tvp) 648 1.1 simonb { 649 1.1 simonb struct timehands *th; 650 1.1 simonb u_int gen; 651 1.1 simonb 652 1.32 ad TC_COUNT(ngetmicrotime); 653 1.1 simonb do { 654 1.72 riastrad th = atomic_load_consume(&timehands); 655 1.1 simonb gen = th->th_generation; 656 1.73 riastrad membar_consumer(); 657 1.1 simonb *tvp = th->th_microtime; 658 1.73 riastrad membar_consumer(); 659 1.1 simonb } while (gen == 0 || gen != th->th_generation); 660 1.1 simonb } 661 1.1 simonb 662 1.54 thorpej void 663 1.54 thorpej getnanoboottime(struct timespec *tsp) 664 1.54 thorpej { 665 1.54 thorpej struct bintime bt; 666 1.54 thorpej 667 1.54 thorpej getbinboottime(&bt); 668 1.54 thorpej bintime2timespec(&bt, tsp); 669 1.54 thorpej } 670 1.54 thorpej 671 1.54 thorpej void 672 1.54 thorpej getmicroboottime(struct timeval *tvp) 673 1.54 thorpej { 674 1.54 thorpej struct bintime bt; 675 1.54 thorpej 676 1.54 thorpej getbinboottime(&bt); 677 1.54 thorpej bintime2timeval(&bt, tvp); 678 1.54 thorpej } 679 1.54 thorpej 680 1.54 thorpej void 681 1.71 riastrad getbinboottime(struct bintime *basep) 682 1.54 thorpej { 683 1.71 riastrad struct bintime base; 684 1.71 riastrad unsigned gen; 685 1.54 thorpej 686 1.71 riastrad do { 687 1.71 riastrad /* Spin until the timebase isn't changing. */ 688 1.71 riastrad while ((gen = atomic_load_relaxed(&timebase.gen)) & 1) 689 1.71 riastrad SPINLOCK_BACKOFF_HOOK; 690 1.71 riastrad 691 1.71 riastrad /* Read out a snapshot of the timebase. */ 692 1.71 riastrad membar_consumer(); 693 1.71 riastrad base = timebase.bin; 694 1.71 riastrad membar_consumer(); 695 1.71 riastrad 696 1.71 riastrad /* Restart if it changed while we were reading. */ 697 1.71 riastrad } while (gen != atomic_load_relaxed(&timebase.gen)); 698 1.71 riastrad 699 1.71 riastrad *basep = base; 700 1.54 thorpej } 701 1.54 thorpej 702 1.1 simonb /* 703 1.1 simonb * Initialize a new timecounter and possibly use it. 704 1.1 simonb */ 705 1.1 simonb void 706 1.1 simonb tc_init(struct timecounter *tc) 707 1.1 simonb { 708 1.1 simonb u_int u; 709 1.1 simonb 710 1.60 simonb KASSERTMSG(tc->tc_next == NULL, "timecounter %s already initialised", 711 1.60 simonb tc->tc_name); 712 1.60 simonb 713 1.1 simonb u = tc->tc_frequency / tc->tc_counter_mask; 714 1.1 simonb /* XXX: We need some margin here, 10% is a guess */ 715 1.1 simonb u *= 11; 716 1.1 simonb u /= 10; 717 1.1 simonb if (u > hz && tc->tc_quality >= 0) { 718 1.1 simonb tc->tc_quality = -2000; 719 1.18 ad aprint_verbose( 720 1.18 ad "timecounter: Timecounter \"%s\" frequency %ju Hz", 721 1.7 bjh21 tc->tc_name, (uintmax_t)tc->tc_frequency); 722 1.18 ad aprint_verbose(" -- Insufficient hz, needs at least %u\n", u); 723 1.1 simonb } else if (tc->tc_quality >= 0 || bootverbose) { 724 1.18 ad aprint_verbose( 725 1.18 ad "timecounter: Timecounter \"%s\" frequency %ju Hz " 726 1.18 ad "quality %d\n", tc->tc_name, (uintmax_t)tc->tc_frequency, 727 1.7 bjh21 tc->tc_quality); 728 1.1 simonb } 729 1.1 simonb 730 1.33 ad mutex_spin_enter(&timecounter_lock); 731 1.1 simonb tc->tc_next = timecounters; 732 1.1 simonb timecounters = tc; 733 1.35 ad timecounter_mods++; 734 1.1 simonb /* 735 1.1 simonb * Never automatically use a timecounter with negative quality. 736 1.1 simonb * Even though we run on the dummy counter, switching here may be 737 1.1 simonb * worse since this timecounter may not be monotonous. 738 1.1 simonb */ 739 1.22 ad if (tc->tc_quality >= 0 && (tc->tc_quality > timecounter->tc_quality || 740 1.24 ad (tc->tc_quality == timecounter->tc_quality && 741 1.24 ad tc->tc_frequency > timecounter->tc_frequency))) { 742 1.22 ad (void)tc->tc_get_timecount(tc); 743 1.22 ad (void)tc->tc_get_timecount(tc); 744 1.22 ad timecounter = tc; 745 1.22 ad tc_windup(); 746 1.22 ad } 747 1.33 ad mutex_spin_exit(&timecounter_lock); 748 1.35 ad } 749 1.35 ad 750 1.35 ad /* 751 1.35 ad * Pick a new timecounter due to the existing counter going bad. 752 1.35 ad */ 753 1.35 ad static void 754 1.35 ad tc_pick(void) 755 1.35 ad { 756 1.35 ad struct timecounter *best, *tc; 757 1.35 ad 758 1.51 riastrad KASSERT(mutex_owned(&timecounter_lock)); 759 1.35 ad 760 1.35 ad for (best = tc = timecounters; tc != NULL; tc = tc->tc_next) { 761 1.35 ad if (tc->tc_quality > best->tc_quality) 762 1.35 ad best = tc; 763 1.35 ad else if (tc->tc_quality < best->tc_quality) 764 1.35 ad continue; 765 1.35 ad else if (tc->tc_frequency > best->tc_frequency) 766 1.35 ad best = tc; 767 1.35 ad } 768 1.35 ad (void)best->tc_get_timecount(best); 769 1.35 ad (void)best->tc_get_timecount(best); 770 1.35 ad timecounter = best; 771 1.35 ad } 772 1.35 ad 773 1.35 ad /* 774 1.35 ad * A timecounter has gone bad, arrange to pick a new one at the next 775 1.35 ad * clock tick. 776 1.35 ad */ 777 1.35 ad void 778 1.35 ad tc_gonebad(struct timecounter *tc) 779 1.35 ad { 780 1.35 ad 781 1.35 ad tc->tc_quality = -100; 782 1.35 ad membar_producer(); 783 1.35 ad atomic_inc_uint(&timecounter_bad); 784 1.1 simonb } 785 1.1 simonb 786 1.29 dyoung /* 787 1.29 dyoung * Stop using a timecounter and remove it from the timecounters list. 788 1.29 dyoung */ 789 1.29 dyoung int 790 1.29 dyoung tc_detach(struct timecounter *target) 791 1.29 dyoung { 792 1.35 ad struct timecounter *tc; 793 1.29 dyoung struct timecounter **tcp = NULL; 794 1.39 ad int removals; 795 1.39 ad lwp_t *l; 796 1.29 dyoung 797 1.39 ad /* First, find the timecounter. */ 798 1.35 ad mutex_spin_enter(&timecounter_lock); 799 1.29 dyoung for (tcp = &timecounters, tc = timecounters; 800 1.29 dyoung tc != NULL; 801 1.29 dyoung tcp = &tc->tc_next, tc = tc->tc_next) { 802 1.29 dyoung if (tc == target) 803 1.29 dyoung break; 804 1.29 dyoung } 805 1.29 dyoung if (tc == NULL) { 806 1.39 ad mutex_spin_exit(&timecounter_lock); 807 1.39 ad return ESRCH; 808 1.39 ad } 809 1.39 ad 810 1.39 ad /* And now, remove it. */ 811 1.39 ad *tcp = tc->tc_next; 812 1.39 ad if (timecounter == target) { 813 1.39 ad tc_pick(); 814 1.39 ad tc_windup(); 815 1.39 ad } 816 1.39 ad timecounter_mods++; 817 1.39 ad removals = timecounter_removals++; 818 1.39 ad mutex_spin_exit(&timecounter_lock); 819 1.39 ad 820 1.39 ad /* 821 1.39 ad * We now have to determine if any threads in the system are still 822 1.39 ad * making use of this timecounter. 823 1.39 ad * 824 1.39 ad * We issue a broadcast cross call to elide memory ordering issues, 825 1.39 ad * then scan all LWPs in the system looking at each's timecounter 826 1.39 ad * generation number. We need to see a value of zero (not actively 827 1.39 ad * using a timecounter) or a value greater than our removal value. 828 1.39 ad * 829 1.39 ad * We may race with threads that read `timecounter_removals' and 830 1.39 ad * and then get preempted before updating `l_tcgen'. This is not 831 1.39 ad * a problem, since it means that these threads have not yet started 832 1.39 ad * accessing timecounter state. All we do need is one clean 833 1.39 ad * snapshot of the system where every thread appears not to be using 834 1.39 ad * old timecounter state. 835 1.39 ad */ 836 1.39 ad for (;;) { 837 1.52 uwe xc_barrier(0); 838 1.39 ad 839 1.55 ad mutex_enter(&proc_lock); 840 1.39 ad LIST_FOREACH(l, &alllwp, l_list) { 841 1.39 ad if (l->l_tcgen == 0 || l->l_tcgen > removals) { 842 1.39 ad /* 843 1.39 ad * Not using timecounter or old timecounter 844 1.39 ad * state at time of our xcall or later. 845 1.39 ad */ 846 1.39 ad continue; 847 1.39 ad } 848 1.39 ad break; 849 1.39 ad } 850 1.55 ad mutex_exit(&proc_lock); 851 1.39 ad 852 1.39 ad /* 853 1.39 ad * If the timecounter is still in use, wait at least 10ms 854 1.39 ad * before retrying. 855 1.39 ad */ 856 1.39 ad if (l == NULL) { 857 1.62 riastrad break; 858 1.35 ad } 859 1.39 ad (void)kpause("tcdetach", false, mstohz(10), NULL); 860 1.29 dyoung } 861 1.62 riastrad 862 1.62 riastrad tc->tc_next = NULL; 863 1.62 riastrad return 0; 864 1.29 dyoung } 865 1.29 dyoung 866 1.1 simonb /* Report the frequency of the current timecounter. */ 867 1.57 rin uint64_t 868 1.1 simonb tc_getfrequency(void) 869 1.1 simonb { 870 1.1 simonb 871 1.72 riastrad return atomic_load_consume(&timehands)->th_counter->tc_frequency; 872 1.1 simonb } 873 1.1 simonb 874 1.1 simonb /* 875 1.1 simonb * Step our concept of UTC. This is done by modifying our estimate of 876 1.1 simonb * when we booted. 877 1.1 simonb */ 878 1.1 simonb void 879 1.38 christos tc_setclock(const struct timespec *ts) 880 1.1 simonb { 881 1.1 simonb struct timespec ts2; 882 1.1 simonb struct bintime bt, bt2; 883 1.1 simonb 884 1.33 ad mutex_spin_enter(&timecounter_lock); 885 1.32 ad TC_COUNT(nsetclock); 886 1.1 simonb binuptime(&bt2); 887 1.1 simonb timespec2bintime(ts, &bt); 888 1.1 simonb bintime_sub(&bt, &bt2); 889 1.71 riastrad bintime_add(&bt2, &timebase.bin); 890 1.71 riastrad timebase.gen |= 1; /* change in progress */ 891 1.71 riastrad membar_producer(); 892 1.71 riastrad timebase.bin = bt; 893 1.71 riastrad membar_producer(); 894 1.71 riastrad timebase.gen++; /* commit change */ 895 1.30 ad tc_windup(); 896 1.33 ad mutex_spin_exit(&timecounter_lock); 897 1.1 simonb 898 1.1 simonb if (timestepwarnings) { 899 1.1 simonb bintime2timespec(&bt2, &ts2); 900 1.45 kardel log(LOG_INFO, 901 1.45 kardel "Time stepped from %lld.%09ld to %lld.%09ld\n", 902 1.38 christos (long long)ts2.tv_sec, ts2.tv_nsec, 903 1.38 christos (long long)ts->tv_sec, ts->tv_nsec); 904 1.1 simonb } 905 1.1 simonb } 906 1.1 simonb 907 1.1 simonb /* 908 1.1 simonb * Initialize the next struct timehands in the ring and make 909 1.1 simonb * it the active timehands. Along the way we might switch to a different 910 1.1 simonb * timecounter and/or do seconds processing in NTP. Slightly magic. 911 1.1 simonb */ 912 1.1 simonb static void 913 1.1 simonb tc_windup(void) 914 1.1 simonb { 915 1.1 simonb struct bintime bt; 916 1.1 simonb struct timehands *th, *tho; 917 1.57 rin uint64_t scale; 918 1.1 simonb u_int delta, ncount, ogen; 919 1.13 kardel int i, s_update; 920 1.1 simonb time_t t; 921 1.1 simonb 922 1.51 riastrad KASSERT(mutex_owned(&timecounter_lock)); 923 1.30 ad 924 1.13 kardel s_update = 0; 925 1.20 ad 926 1.1 simonb /* 927 1.1 simonb * Make the next timehands a copy of the current one, but do not 928 1.1 simonb * overwrite the generation or next pointer. While we update 929 1.20 ad * the contents, the generation must be zero. Ensure global 930 1.20 ad * visibility of the generation before proceeding. 931 1.1 simonb */ 932 1.1 simonb tho = timehands; 933 1.1 simonb th = tho->th_next; 934 1.1 simonb ogen = th->th_generation; 935 1.1 simonb th->th_generation = 0; 936 1.27 ad membar_producer(); 937 1.1 simonb bcopy(tho, th, offsetof(struct timehands, th_generation)); 938 1.1 simonb 939 1.1 simonb /* 940 1.1 simonb * Capture a timecounter delta on the current timecounter and if 941 1.1 simonb * changing timecounters, a counter value from the new timecounter. 942 1.1 simonb * Update the offset fields accordingly. 943 1.1 simonb */ 944 1.1 simonb delta = tc_delta(th); 945 1.1 simonb if (th->th_counter != timecounter) 946 1.1 simonb ncount = timecounter->tc_get_timecount(timecounter); 947 1.1 simonb else 948 1.1 simonb ncount = 0; 949 1.1 simonb th->th_offset_count += delta; 950 1.1 simonb bintime_addx(&th->th_offset, th->th_scale * delta); 951 1.1 simonb 952 1.1 simonb /* 953 1.1 simonb * Hardware latching timecounters may not generate interrupts on 954 1.1 simonb * PPS events, so instead we poll them. There is a finite risk that 955 1.1 simonb * the hardware might capture a count which is later than the one we 956 1.1 simonb * got above, and therefore possibly in the next NTP second which might 957 1.1 simonb * have a different rate than the current NTP second. It doesn't 958 1.1 simonb * matter in practice. 959 1.1 simonb */ 960 1.1 simonb if (tho->th_counter->tc_poll_pps) 961 1.1 simonb tho->th_counter->tc_poll_pps(tho->th_counter); 962 1.1 simonb 963 1.1 simonb /* 964 1.1 simonb * Deal with NTP second processing. The for loop normally 965 1.1 simonb * iterates at most once, but in extreme situations it might 966 1.1 simonb * keep NTP sane if timeouts are not run for several seconds. 967 1.1 simonb * At boot, the time step can be large when the TOD hardware 968 1.1 simonb * has been read, so on really large steps, we call 969 1.1 simonb * ntp_update_second only twice. We need to call it twice in 970 1.1 simonb * case we missed a leap second. 971 1.2 kardel * If NTP is not compiled in ntp_update_second still calculates 972 1.2 kardel * the adjustment resulting from adjtime() calls. 973 1.1 simonb */ 974 1.1 simonb bt = th->th_offset; 975 1.71 riastrad bintime_add(&bt, &timebase.bin); 976 1.1 simonb i = bt.sec - tho->th_microtime.tv_sec; 977 1.1 simonb if (i > LARGE_STEP) 978 1.1 simonb i = 2; 979 1.1 simonb for (; i > 0; i--) { 980 1.1 simonb t = bt.sec; 981 1.1 simonb ntp_update_second(&th->th_adjustment, &bt.sec); 982 1.13 kardel s_update = 1; 983 1.71 riastrad if (bt.sec != t) { 984 1.71 riastrad timebase.gen |= 1; /* change in progress */ 985 1.71 riastrad membar_producer(); 986 1.71 riastrad timebase.bin.sec += bt.sec - t; 987 1.71 riastrad membar_producer(); 988 1.71 riastrad timebase.gen++; /* commit change */ 989 1.71 riastrad } 990 1.1 simonb } 991 1.2 kardel 992 1.1 simonb /* Update the UTC timestamps used by the get*() functions. */ 993 1.1 simonb /* XXX shouldn't do this here. Should force non-`get' versions. */ 994 1.1 simonb bintime2timeval(&bt, &th->th_microtime); 995 1.1 simonb bintime2timespec(&bt, &th->th_nanotime); 996 1.1 simonb /* Now is a good time to change timecounters. */ 997 1.1 simonb if (th->th_counter != timecounter) { 998 1.1 simonb th->th_counter = timecounter; 999 1.1 simonb th->th_offset_count = ncount; 1000 1.13 kardel s_update = 1; 1001 1.1 simonb } 1002 1.1 simonb 1003 1.1 simonb /*- 1004 1.1 simonb * Recalculate the scaling factor. We want the number of 1/2^64 1005 1.1 simonb * fractions of a second per period of the hardware counter, taking 1006 1.1 simonb * into account the th_adjustment factor which the NTP PLL/adjtime(2) 1007 1.1 simonb * processing provides us with. 1008 1.1 simonb * 1009 1.1 simonb * The th_adjustment is nanoseconds per second with 32 bit binary 1010 1.1 simonb * fraction and we want 64 bit binary fraction of second: 1011 1.1 simonb * 1012 1.1 simonb * x = a * 2^32 / 10^9 = a * 4.294967296 1013 1.1 simonb * 1014 1.1 simonb * The range of th_adjustment is +/- 5000PPM so inside a 64bit int 1015 1.1 simonb * we can only multiply by about 850 without overflowing, but that 1016 1.1 simonb * leaves suitably precise fractions for multiply before divide. 1017 1.1 simonb * 1018 1.1 simonb * Divide before multiply with a fraction of 2199/512 results in a 1019 1.1 simonb * systematic undercompensation of 10PPM of th_adjustment. On a 1020 1.1 simonb * 5000PPM adjustment this is a 0.05PPM error. This is acceptable. 1021 1.1 simonb * 1022 1.1 simonb * We happily sacrifice the lowest of the 64 bits of our result 1023 1.1 simonb * to the goddess of code clarity. 1024 1.1 simonb * 1025 1.1 simonb */ 1026 1.13 kardel if (s_update) { 1027 1.57 rin scale = (uint64_t)1 << 63; 1028 1.13 kardel scale += (th->th_adjustment / 1024) * 2199; 1029 1.13 kardel scale /= th->th_counter->tc_frequency; 1030 1.13 kardel th->th_scale = scale * 2; 1031 1.13 kardel } 1032 1.1 simonb /* 1033 1.1 simonb * Now that the struct timehands is again consistent, set the new 1034 1.20 ad * generation number, making sure to not make it zero. Ensure 1035 1.20 ad * changes are globally visible before changing. 1036 1.1 simonb */ 1037 1.1 simonb if (++ogen == 0) 1038 1.1 simonb ogen = 1; 1039 1.27 ad membar_producer(); 1040 1.1 simonb th->th_generation = ogen; 1041 1.1 simonb 1042 1.20 ad /* 1043 1.20 ad * Go live with the new struct timehands. Ensure changes are 1044 1.20 ad * globally visible before changing. 1045 1.20 ad */ 1046 1.63 riastrad setrealuptime(th->th_microtime.tv_sec, th->th_offset.sec); 1047 1.72 riastrad atomic_store_release(&timehands, th); 1048 1.24 ad 1049 1.24 ad /* 1050 1.24 ad * Force users of the old timehand to move on. This is 1051 1.24 ad * necessary for MP systems; we need to ensure that the 1052 1.24 ad * consumers will move away from the old timehand before 1053 1.24 ad * we begin updating it again when we eventually wrap 1054 1.24 ad * around. 1055 1.24 ad */ 1056 1.24 ad if (++tho->th_generation == 0) 1057 1.24 ad tho->th_generation = 1; 1058 1.1 simonb } 1059 1.1 simonb 1060 1.1 simonb /* 1061 1.1 simonb * RFC 2783 PPS-API implementation. 1062 1.1 simonb */ 1063 1.1 simonb 1064 1.1 simonb int 1065 1.19 christos pps_ioctl(u_long cmd, void *data, struct pps_state *pps) 1066 1.1 simonb { 1067 1.1 simonb pps_params_t *app; 1068 1.2 kardel pps_info_t *pipi; 1069 1.1 simonb #ifdef PPS_SYNC 1070 1.2 kardel int *epi; 1071 1.1 simonb #endif 1072 1.1 simonb 1073 1.33 ad KASSERT(mutex_owned(&timecounter_lock)); 1074 1.33 ad 1075 1.45 kardel KASSERT(pps != NULL); 1076 1.45 kardel 1077 1.1 simonb switch (cmd) { 1078 1.1 simonb case PPS_IOC_CREATE: 1079 1.59 rin return 0; 1080 1.1 simonb case PPS_IOC_DESTROY: 1081 1.59 rin return 0; 1082 1.1 simonb case PPS_IOC_SETPARAMS: 1083 1.1 simonb app = (pps_params_t *)data; 1084 1.1 simonb if (app->mode & ~pps->ppscap) 1085 1.59 rin return EINVAL; 1086 1.1 simonb pps->ppsparam = *app; 1087 1.59 rin return 0; 1088 1.1 simonb case PPS_IOC_GETPARAMS: 1089 1.1 simonb app = (pps_params_t *)data; 1090 1.1 simonb *app = pps->ppsparam; 1091 1.1 simonb app->api_version = PPS_API_VERS_1; 1092 1.59 rin return 0; 1093 1.1 simonb case PPS_IOC_GETCAP: 1094 1.1 simonb *(int*)data = pps->ppscap; 1095 1.59 rin return 0; 1096 1.1 simonb case PPS_IOC_FETCH: 1097 1.2 kardel pipi = (pps_info_t *)data; 1098 1.1 simonb pps->ppsinfo.current_mode = pps->ppsparam.mode; 1099 1.2 kardel *pipi = pps->ppsinfo; 1100 1.59 rin return 0; 1101 1.1 simonb case PPS_IOC_KCBIND: 1102 1.1 simonb #ifdef PPS_SYNC 1103 1.2 kardel epi = (int *)data; 1104 1.1 simonb /* XXX Only root should be able to do this */ 1105 1.2 kardel if (*epi & ~pps->ppscap) 1106 1.59 rin return EINVAL; 1107 1.2 kardel pps->kcmode = *epi; 1108 1.59 rin return 0; 1109 1.1 simonb #else 1110 1.59 rin return EOPNOTSUPP; 1111 1.1 simonb #endif 1112 1.1 simonb default: 1113 1.59 rin return EPASSTHROUGH; 1114 1.1 simonb } 1115 1.1 simonb } 1116 1.1 simonb 1117 1.1 simonb void 1118 1.1 simonb pps_init(struct pps_state *pps) 1119 1.1 simonb { 1120 1.33 ad 1121 1.33 ad KASSERT(mutex_owned(&timecounter_lock)); 1122 1.33 ad 1123 1.1 simonb pps->ppscap |= PPS_TSFMT_TSPEC; 1124 1.1 simonb if (pps->ppscap & PPS_CAPTUREASSERT) 1125 1.1 simonb pps->ppscap |= PPS_OFFSETASSERT; 1126 1.1 simonb if (pps->ppscap & PPS_CAPTURECLEAR) 1127 1.1 simonb pps->ppscap |= PPS_OFFSETCLEAR; 1128 1.1 simonb } 1129 1.1 simonb 1130 1.45 kardel /* 1131 1.77 andvar * capture a timestamp in the pps structure 1132 1.45 kardel */ 1133 1.1 simonb void 1134 1.1 simonb pps_capture(struct pps_state *pps) 1135 1.1 simonb { 1136 1.1 simonb struct timehands *th; 1137 1.1 simonb 1138 1.33 ad KASSERT(mutex_owned(&timecounter_lock)); 1139 1.33 ad KASSERT(pps != NULL); 1140 1.33 ad 1141 1.1 simonb th = timehands; 1142 1.1 simonb pps->capgen = th->th_generation; 1143 1.1 simonb pps->capth = th; 1144 1.57 rin pps->capcount = (uint64_t)tc_delta(th) + th->th_offset_count; 1145 1.1 simonb if (pps->capgen != th->th_generation) 1146 1.1 simonb pps->capgen = 0; 1147 1.1 simonb } 1148 1.1 simonb 1149 1.45 kardel #ifdef PPS_DEBUG 1150 1.45 kardel int ppsdebug = 0; 1151 1.45 kardel #endif 1152 1.45 kardel 1153 1.45 kardel /* 1154 1.45 kardel * process a pps_capture()ed event 1155 1.45 kardel */ 1156 1.1 simonb void 1157 1.1 simonb pps_event(struct pps_state *pps, int event) 1158 1.1 simonb { 1159 1.45 kardel pps_ref_event(pps, event, NULL, PPS_REFEVNT_PPS|PPS_REFEVNT_CAPTURE); 1160 1.45 kardel } 1161 1.45 kardel 1162 1.45 kardel /* 1163 1.45 kardel * extended pps api / kernel pll/fll entry point 1164 1.45 kardel * 1165 1.45 kardel * feed reference time stamps to PPS engine 1166 1.45 kardel * 1167 1.45 kardel * will simulate a PPS event and feed 1168 1.45 kardel * the NTP PLL/FLL if requested. 1169 1.45 kardel * 1170 1.45 kardel * the ref time stamps should be roughly once 1171 1.45 kardel * a second but do not need to be exactly in phase 1172 1.45 kardel * with the UTC second but should be close to it. 1173 1.45 kardel * this relaxation of requirements allows callout 1174 1.76 riastrad * driven timestamping mechanisms to feed to pps 1175 1.45 kardel * capture/kernel pll logic. 1176 1.45 kardel * 1177 1.45 kardel * calling pattern is: 1178 1.45 kardel * pps_capture() (for PPS_REFEVNT_{CAPTURE|CAPCUR}) 1179 1.45 kardel * read timestamp from reference source 1180 1.45 kardel * pps_ref_event() 1181 1.45 kardel * 1182 1.45 kardel * supported refmodes: 1183 1.45 kardel * PPS_REFEVNT_CAPTURE 1184 1.45 kardel * use system timestamp of pps_capture() 1185 1.45 kardel * PPS_REFEVNT_CURRENT 1186 1.45 kardel * use system timestamp of this call 1187 1.45 kardel * PPS_REFEVNT_CAPCUR 1188 1.45 kardel * use average of read capture and current system time stamp 1189 1.45 kardel * PPS_REFEVNT_PPS 1190 1.45 kardel * assume timestamp on second mark - ref_ts is ignored 1191 1.45 kardel * 1192 1.45 kardel */ 1193 1.45 kardel 1194 1.45 kardel void 1195 1.45 kardel pps_ref_event(struct pps_state *pps, 1196 1.45 kardel int event, 1197 1.45 kardel struct bintime *ref_ts, 1198 1.45 kardel int refmode 1199 1.45 kardel ) 1200 1.45 kardel { 1201 1.45 kardel struct bintime bt; /* current time */ 1202 1.45 kardel struct bintime btd; /* time difference */ 1203 1.45 kardel struct bintime bt_ref; /* reference time */ 1204 1.1 simonb struct timespec ts, *tsp, *osp; 1205 1.45 kardel struct timehands *th; 1206 1.57 rin uint64_t tcount, acount, dcount, *pcount; 1207 1.46 martin int foff, gen; 1208 1.46 martin #ifdef PPS_SYNC 1209 1.46 martin int fhard; 1210 1.46 martin #endif 1211 1.1 simonb pps_seq_t *pseq; 1212 1.1 simonb 1213 1.33 ad KASSERT(mutex_owned(&timecounter_lock)); 1214 1.33 ad 1215 1.45 kardel KASSERT(pps != NULL); 1216 1.45 kardel 1217 1.45 kardel /* pick up current time stamp if needed */ 1218 1.45 kardel if (refmode & (PPS_REFEVNT_CURRENT|PPS_REFEVNT_CAPCUR)) { 1219 1.45 kardel /* pick up current time stamp */ 1220 1.45 kardel th = timehands; 1221 1.45 kardel gen = th->th_generation; 1222 1.57 rin tcount = (uint64_t)tc_delta(th) + th->th_offset_count; 1223 1.45 kardel if (gen != th->th_generation) 1224 1.45 kardel gen = 0; 1225 1.45 kardel 1226 1.45 kardel /* If the timecounter was wound up underneath us, bail out. */ 1227 1.45 kardel if (pps->capgen == 0 || 1228 1.45 kardel pps->capgen != pps->capth->th_generation || 1229 1.45 kardel gen == 0 || 1230 1.45 kardel gen != pps->capgen) { 1231 1.45 kardel #ifdef PPS_DEBUG 1232 1.45 kardel if (ppsdebug & 0x1) { 1233 1.45 kardel log(LOG_DEBUG, 1234 1.45 kardel "pps_ref_event(pps=%p, event=%d, ...): DROP (wind-up)\n", 1235 1.45 kardel pps, event); 1236 1.45 kardel } 1237 1.45 kardel #endif 1238 1.45 kardel return; 1239 1.45 kardel } 1240 1.45 kardel } else { 1241 1.45 kardel tcount = 0; /* keep GCC happy */ 1242 1.45 kardel } 1243 1.45 kardel 1244 1.45 kardel #ifdef PPS_DEBUG 1245 1.45 kardel if (ppsdebug & 0x1) { 1246 1.45 kardel struct timespec tmsp; 1247 1.76 riastrad 1248 1.45 kardel if (ref_ts == NULL) { 1249 1.45 kardel tmsp.tv_sec = 0; 1250 1.45 kardel tmsp.tv_nsec = 0; 1251 1.45 kardel } else { 1252 1.45 kardel bintime2timespec(ref_ts, &tmsp); 1253 1.45 kardel } 1254 1.45 kardel 1255 1.45 kardel log(LOG_DEBUG, 1256 1.45 kardel "pps_ref_event(pps=%p, event=%d, ref_ts=%"PRIi64 1257 1.45 kardel ".%09"PRIi32", refmode=0x%1x)\n", 1258 1.45 kardel pps, event, tmsp.tv_sec, (int32_t)tmsp.tv_nsec, refmode); 1259 1.45 kardel } 1260 1.45 kardel #endif 1261 1.1 simonb 1262 1.45 kardel /* setup correct event references */ 1263 1.1 simonb if (event == PPS_CAPTUREASSERT) { 1264 1.1 simonb tsp = &pps->ppsinfo.assert_timestamp; 1265 1.1 simonb osp = &pps->ppsparam.assert_offset; 1266 1.1 simonb foff = pps->ppsparam.mode & PPS_OFFSETASSERT; 1267 1.46 martin #ifdef PPS_SYNC 1268 1.1 simonb fhard = pps->kcmode & PPS_CAPTUREASSERT; 1269 1.46 martin #endif 1270 1.1 simonb pcount = &pps->ppscount[0]; 1271 1.1 simonb pseq = &pps->ppsinfo.assert_sequence; 1272 1.1 simonb } else { 1273 1.1 simonb tsp = &pps->ppsinfo.clear_timestamp; 1274 1.1 simonb osp = &pps->ppsparam.clear_offset; 1275 1.1 simonb foff = pps->ppsparam.mode & PPS_OFFSETCLEAR; 1276 1.46 martin #ifdef PPS_SYNC 1277 1.1 simonb fhard = pps->kcmode & PPS_CAPTURECLEAR; 1278 1.46 martin #endif 1279 1.1 simonb pcount = &pps->ppscount[1]; 1280 1.1 simonb pseq = &pps->ppsinfo.clear_sequence; 1281 1.1 simonb } 1282 1.1 simonb 1283 1.45 kardel /* determine system time stamp according to refmode */ 1284 1.45 kardel dcount = 0; /* keep GCC happy */ 1285 1.45 kardel switch (refmode & PPS_REFEVNT_RMASK) { 1286 1.45 kardel case PPS_REFEVNT_CAPTURE: 1287 1.45 kardel acount = pps->capcount; /* use capture timestamp */ 1288 1.45 kardel break; 1289 1.45 kardel 1290 1.45 kardel case PPS_REFEVNT_CURRENT: 1291 1.45 kardel acount = tcount; /* use current timestamp */ 1292 1.45 kardel break; 1293 1.45 kardel 1294 1.45 kardel case PPS_REFEVNT_CAPCUR: 1295 1.45 kardel /* 1296 1.45 kardel * calculate counter value between pps_capture() and 1297 1.45 kardel * pps_ref_event() 1298 1.45 kardel */ 1299 1.45 kardel dcount = tcount - pps->capcount; 1300 1.45 kardel acount = (dcount / 2) + pps->capcount; 1301 1.45 kardel break; 1302 1.45 kardel 1303 1.45 kardel default: /* ignore call error silently */ 1304 1.45 kardel return; 1305 1.45 kardel } 1306 1.45 kardel 1307 1.1 simonb /* 1308 1.1 simonb * If the timecounter changed, we cannot compare the count values, so 1309 1.1 simonb * we have to drop the rest of the PPS-stuff until the next event. 1310 1.1 simonb */ 1311 1.1 simonb if (pps->ppstc != pps->capth->th_counter) { 1312 1.1 simonb pps->ppstc = pps->capth->th_counter; 1313 1.45 kardel pps->capcount = acount; 1314 1.45 kardel *pcount = acount; 1315 1.45 kardel pps->ppscount[2] = acount; 1316 1.45 kardel #ifdef PPS_DEBUG 1317 1.45 kardel if (ppsdebug & 0x1) { 1318 1.45 kardel log(LOG_DEBUG, 1319 1.45 kardel "pps_ref_event(pps=%p, event=%d, ...): DROP (time-counter change)\n", 1320 1.45 kardel pps, event); 1321 1.45 kardel } 1322 1.45 kardel #endif 1323 1.1 simonb return; 1324 1.1 simonb } 1325 1.1 simonb 1326 1.45 kardel pps->capcount = acount; 1327 1.45 kardel 1328 1.45 kardel /* Convert the count to a bintime. */ 1329 1.1 simonb bt = pps->capth->th_offset; 1330 1.45 kardel bintime_addx(&bt, pps->capth->th_scale * (acount - pps->capth->th_offset_count)); 1331 1.71 riastrad bintime_add(&bt, &timebase.bin); 1332 1.45 kardel 1333 1.45 kardel if ((refmode & PPS_REFEVNT_PPS) == 0) { 1334 1.45 kardel /* determine difference to reference time stamp */ 1335 1.45 kardel bt_ref = *ref_ts; 1336 1.45 kardel 1337 1.45 kardel btd = bt; 1338 1.45 kardel bintime_sub(&btd, &bt_ref); 1339 1.45 kardel 1340 1.76 riastrad /* 1341 1.45 kardel * simulate a PPS timestamp by dropping the fraction 1342 1.45 kardel * and applying the offset 1343 1.45 kardel */ 1344 1.45 kardel if (bt.frac >= (uint64_t)1<<63) /* skip to nearest second */ 1345 1.45 kardel bt.sec++; 1346 1.45 kardel bt.frac = 0; 1347 1.45 kardel bintime_add(&bt, &btd); 1348 1.45 kardel } else { 1349 1.45 kardel /* 1350 1.76 riastrad * create ref_ts from current time - 1351 1.45 kardel * we are supposed to be called on 1352 1.45 kardel * the second mark 1353 1.45 kardel */ 1354 1.45 kardel bt_ref = bt; 1355 1.45 kardel if (bt_ref.frac >= (uint64_t)1<<63) /* skip to nearest second */ 1356 1.45 kardel bt_ref.sec++; 1357 1.45 kardel bt_ref.frac = 0; 1358 1.45 kardel } 1359 1.45 kardel 1360 1.45 kardel /* convert bintime to timestamp */ 1361 1.1 simonb bintime2timespec(&bt, &ts); 1362 1.1 simonb 1363 1.1 simonb /* If the timecounter was wound up underneath us, bail out. */ 1364 1.1 simonb if (pps->capgen != pps->capth->th_generation) 1365 1.1 simonb return; 1366 1.1 simonb 1367 1.45 kardel /* store time stamp */ 1368 1.1 simonb *pcount = pps->capcount; 1369 1.1 simonb (*pseq)++; 1370 1.1 simonb *tsp = ts; 1371 1.1 simonb 1372 1.45 kardel /* add offset correction */ 1373 1.1 simonb if (foff) { 1374 1.2 kardel timespecadd(tsp, osp, tsp); 1375 1.1 simonb if (tsp->tv_nsec < 0) { 1376 1.1 simonb tsp->tv_nsec += 1000000000; 1377 1.1 simonb tsp->tv_sec -= 1; 1378 1.1 simonb } 1379 1.1 simonb } 1380 1.45 kardel 1381 1.45 kardel #ifdef PPS_DEBUG 1382 1.45 kardel if (ppsdebug & 0x2) { 1383 1.45 kardel struct timespec ts2; 1384 1.45 kardel struct timespec ts3; 1385 1.45 kardel 1386 1.45 kardel bintime2timespec(&bt_ref, &ts2); 1387 1.45 kardel 1388 1.45 kardel bt.sec = 0; 1389 1.45 kardel bt.frac = 0; 1390 1.45 kardel 1391 1.45 kardel if (refmode & PPS_REFEVNT_CAPCUR) { 1392 1.45 kardel bintime_addx(&bt, pps->capth->th_scale * dcount); 1393 1.45 kardel } 1394 1.45 kardel bintime2timespec(&bt, &ts3); 1395 1.45 kardel 1396 1.45 kardel log(LOG_DEBUG, "ref_ts=%"PRIi64".%09"PRIi32 1397 1.45 kardel ", ts=%"PRIi64".%09"PRIi32", read latency=%"PRIi64" ns\n", 1398 1.45 kardel ts2.tv_sec, (int32_t)ts2.tv_nsec, 1399 1.45 kardel tsp->tv_sec, (int32_t)tsp->tv_nsec, 1400 1.45 kardel timespec2ns(&ts3)); 1401 1.45 kardel } 1402 1.45 kardel #endif 1403 1.45 kardel 1404 1.1 simonb #ifdef PPS_SYNC 1405 1.1 simonb if (fhard) { 1406 1.45 kardel uint64_t scale; 1407 1.45 kardel uint64_t div; 1408 1.1 simonb 1409 1.1 simonb /* 1410 1.1 simonb * Feed the NTP PLL/FLL. 1411 1.1 simonb * The FLL wants to know how many (hardware) nanoseconds 1412 1.45 kardel * elapsed since the previous event (mod 1 second) thus 1413 1.45 kardel * we are actually looking at the frequency difference scaled 1414 1.45 kardel * in nsec. 1415 1.45 kardel * As the counter time stamps are not truly at 1Hz 1416 1.45 kardel * we need to scale the count by the elapsed 1417 1.45 kardel * reference time. 1418 1.45 kardel * valid sampling interval: [0.5..2[ sec 1419 1.1 simonb */ 1420 1.45 kardel 1421 1.45 kardel /* calculate elapsed raw count */ 1422 1.1 simonb tcount = pps->capcount - pps->ppscount[2]; 1423 1.1 simonb pps->ppscount[2] = pps->capcount; 1424 1.1 simonb tcount &= pps->capth->th_counter->tc_counter_mask; 1425 1.76 riastrad 1426 1.45 kardel /* calculate elapsed ref time */ 1427 1.45 kardel btd = bt_ref; 1428 1.45 kardel bintime_sub(&btd, &pps->ref_time); 1429 1.45 kardel pps->ref_time = bt_ref; 1430 1.45 kardel 1431 1.45 kardel /* check that we stay below 2 sec */ 1432 1.45 kardel if (btd.sec < 0 || btd.sec > 1) 1433 1.45 kardel return; 1434 1.45 kardel 1435 1.45 kardel /* we want at least 0.5 sec between samples */ 1436 1.45 kardel if (btd.sec == 0 && btd.frac < (uint64_t)1<<63) 1437 1.45 kardel return; 1438 1.45 kardel 1439 1.45 kardel /* 1440 1.45 kardel * calculate cycles per period by multiplying 1441 1.45 kardel * the frequency with the elapsed period 1442 1.45 kardel * we pick a fraction of 30 bits 1443 1.45 kardel * ~1ns resolution for elapsed time 1444 1.76 riastrad */ 1445 1.45 kardel div = (uint64_t)btd.sec << 30; 1446 1.45 kardel div |= (btd.frac >> 34) & (((uint64_t)1 << 30) - 1); 1447 1.45 kardel div *= pps->capth->th_counter->tc_frequency; 1448 1.45 kardel div >>= 30; 1449 1.45 kardel 1450 1.45 kardel if (div == 0) /* safeguard */ 1451 1.45 kardel return; 1452 1.45 kardel 1453 1.45 kardel scale = (uint64_t)1 << 63; 1454 1.45 kardel scale /= div; 1455 1.1 simonb scale *= 2; 1456 1.45 kardel 1457 1.1 simonb bt.sec = 0; 1458 1.1 simonb bt.frac = 0; 1459 1.1 simonb bintime_addx(&bt, scale * tcount); 1460 1.1 simonb bintime2timespec(&bt, &ts); 1461 1.45 kardel 1462 1.45 kardel #ifdef PPS_DEBUG 1463 1.45 kardel if (ppsdebug & 0x4) { 1464 1.45 kardel struct timespec ts2; 1465 1.45 kardel int64_t df; 1466 1.45 kardel 1467 1.45 kardel bintime2timespec(&bt_ref, &ts2); 1468 1.45 kardel df = timespec2ns(&ts); 1469 1.45 kardel if (df > 500000000) 1470 1.45 kardel df -= 1000000000; 1471 1.45 kardel log(LOG_DEBUG, "hardpps: ref_ts=%"PRIi64 1472 1.45 kardel ".%09"PRIi32", ts=%"PRIi64".%09"PRIi32 1473 1.45 kardel ", freqdiff=%"PRIi64" ns/s\n", 1474 1.45 kardel ts2.tv_sec, (int32_t)ts2.tv_nsec, 1475 1.45 kardel tsp->tv_sec, (int32_t)tsp->tv_nsec, 1476 1.45 kardel df); 1477 1.45 kardel } 1478 1.45 kardel #endif 1479 1.45 kardel 1480 1.45 kardel hardpps(tsp, timespec2ns(&ts)); 1481 1.1 simonb } 1482 1.1 simonb #endif 1483 1.1 simonb } 1484 1.1 simonb 1485 1.1 simonb /* 1486 1.1 simonb * Timecounters need to be updated every so often to prevent the hardware 1487 1.1 simonb * counter from overflowing. Updating also recalculates the cached values 1488 1.1 simonb * used by the get*() family of functions, so their precision depends on 1489 1.1 simonb * the update frequency. 1490 1.1 simonb */ 1491 1.1 simonb 1492 1.1 simonb static int tc_tick; 1493 1.1 simonb 1494 1.1 simonb void 1495 1.1 simonb tc_ticktock(void) 1496 1.1 simonb { 1497 1.1 simonb static int count; 1498 1.1 simonb 1499 1.1 simonb if (++count < tc_tick) 1500 1.1 simonb return; 1501 1.1 simonb count = 0; 1502 1.51 riastrad mutex_spin_enter(&timecounter_lock); 1503 1.56 rin if (__predict_false(timecounter_bad != 0)) { 1504 1.35 ad /* An existing timecounter has gone bad, pick a new one. */ 1505 1.35 ad (void)atomic_swap_uint(&timecounter_bad, 0); 1506 1.35 ad if (timecounter->tc_quality < 0) { 1507 1.35 ad tc_pick(); 1508 1.35 ad } 1509 1.35 ad } 1510 1.1 simonb tc_windup(); 1511 1.51 riastrad mutex_spin_exit(&timecounter_lock); 1512 1.1 simonb } 1513 1.1 simonb 1514 1.2 kardel void 1515 1.2 kardel inittimecounter(void) 1516 1.1 simonb { 1517 1.1 simonb u_int p; 1518 1.1 simonb 1519 1.37 kardel mutex_init(&timecounter_lock, MUTEX_DEFAULT, IPL_HIGH); 1520 1.30 ad 1521 1.1 simonb /* 1522 1.1 simonb * Set the initial timeout to 1523 1.1 simonb * max(1, <approx. number of hardclock ticks in a millisecond>). 1524 1.1 simonb * People should probably not use the sysctl to set the timeout 1525 1.53 msaitoh * to smaller than its initial value, since that value is the 1526 1.1 simonb * smallest reasonable one. If they want better timestamps they 1527 1.1 simonb * should use the non-"get"* functions. 1528 1.1 simonb */ 1529 1.1 simonb if (hz > 1000) 1530 1.1 simonb tc_tick = (hz + 500) / 1000; 1531 1.1 simonb else 1532 1.1 simonb tc_tick = 1; 1533 1.1 simonb p = (tc_tick * 1000000) / hz; 1534 1.18 ad aprint_verbose("timecounter: Timecounters tick every %d.%03u msec\n", 1535 1.18 ad p / 1000, p % 1000); 1536 1.1 simonb 1537 1.1 simonb /* warm up new timecounter (again) and get rolling. */ 1538 1.1 simonb (void)timecounter->tc_get_timecount(timecounter); 1539 1.1 simonb (void)timecounter->tc_get_timecount(timecounter); 1540 1.1 simonb } 1541