Home | History | Annotate | Line # | Download | only in kern
subr_time.c revision 1.23
      1 /*	$NetBSD: subr_time.c,v 1.23 2020/05/04 18:23:37 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1982, 1986, 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of the University nor the names of its contributors
     16  *    may be used to endorse or promote products derived from this software
     17  *    without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
     32  *	@(#)kern_time.c 8.4 (Berkeley) 5/26/95
     33  */
     34 
     35 #include <sys/cdefs.h>
     36 __KERNEL_RCSID(0, "$NetBSD: subr_time.c,v 1.23 2020/05/04 18:23:37 riastradh Exp $");
     37 
     38 #include <sys/param.h>
     39 #include <sys/kernel.h>
     40 #include <sys/proc.h>
     41 #include <sys/kauth.h>
     42 #include <sys/lwp.h>
     43 #include <sys/timex.h>
     44 #include <sys/time.h>
     45 #include <sys/timetc.h>
     46 #include <sys/intr.h>
     47 
     48 #ifdef DEBUG_STICKS
     49 #define DPRINTF(a) uprintf a
     50 #else
     51 #define DPRINTF(a)
     52 #endif
     53 
     54 /*
     55  * Compute number of hz until specified time.  Used to compute second
     56  * argument to callout_reset() from an absolute time.
     57  */
     58 int
     59 tvhzto(const struct timeval *tvp)
     60 {
     61 	struct timeval now, tv;
     62 
     63 	tv = *tvp;	/* Don't modify original tvp. */
     64 	getmicrotime(&now);
     65 	timersub(&tv, &now, &tv);
     66 	return tvtohz(&tv);
     67 }
     68 
     69 /*
     70  * Compute number of ticks in the specified amount of time.
     71  */
     72 int
     73 tvtohz(const struct timeval *tv)
     74 {
     75 	unsigned long ticks;
     76 	long sec, usec;
     77 
     78 	/*
     79 	 * If the number of usecs in the whole seconds part of the time
     80 	 * difference fits in a long, then the total number of usecs will
     81 	 * fit in an unsigned long.  Compute the total and convert it to
     82 	 * ticks, rounding up and adding 1 to allow for the current tick
     83 	 * to expire.  Rounding also depends on unsigned long arithmetic
     84 	 * to avoid overflow.
     85 	 *
     86 	 * Otherwise, if the number of ticks in the whole seconds part of
     87 	 * the time difference fits in a long, then convert the parts to
     88 	 * ticks separately and add, using similar rounding methods and
     89 	 * overflow avoidance.  This method would work in the previous
     90 	 * case, but it is slightly slower and assumes that hz is integral.
     91 	 *
     92 	 * Otherwise, round the time difference down to the maximum
     93 	 * representable value.
     94 	 *
     95 	 * If ints are 32-bit, then the maximum value for any timeout in
     96 	 * 10ms ticks is 248 days.
     97 	 */
     98 	sec = tv->tv_sec;
     99 	usec = tv->tv_usec;
    100 
    101 	KASSERT(usec >= 0 && usec < 1000000);
    102 
    103 	/* catch overflows in conversion time_t->int */
    104 	if (tv->tv_sec > INT_MAX)
    105 		return INT_MAX;
    106 	if (tv->tv_sec < 0)
    107 		return 0;
    108 
    109 	if (sec < 0 || (sec == 0 && usec == 0)) {
    110 		/*
    111 		 * Would expire now or in the past.  Return 0 ticks.
    112 		 * This is different from the legacy tvhzto() interface,
    113 		 * and callers need to check for it.
    114 		 */
    115 		ticks = 0;
    116 	} else if (sec <= (LONG_MAX / 1000000))
    117 		ticks = (((sec * 1000000) + (unsigned long)usec + (tick - 1))
    118 		    / tick) + 1;
    119 	else if (sec <= (LONG_MAX / hz))
    120 		ticks = (sec * hz) +
    121 		    (((unsigned long)usec + (tick - 1)) / tick) + 1;
    122 	else
    123 		ticks = LONG_MAX;
    124 
    125 	if (ticks > INT_MAX)
    126 		ticks = INT_MAX;
    127 
    128 	return ((int)ticks);
    129 }
    130 
    131 int
    132 tshzto(const struct timespec *tsp)
    133 {
    134 	struct timespec now, ts;
    135 
    136 	ts = *tsp;	/* Don't modify original tsp. */
    137 	getnanotime(&now);
    138 	timespecsub(&ts, &now, &ts);
    139 	return tstohz(&ts);
    140 }
    141 
    142 int
    143 tshztoup(const struct timespec *tsp)
    144 {
    145 	struct timespec now, ts;
    146 
    147 	ts = *tsp;	/* Don't modify original tsp. */
    148 	getnanouptime(&now);
    149 	timespecsub(&ts, &now, &ts);
    150 	return tstohz(&ts);
    151 }
    152 
    153 /*
    154  * Compute number of ticks in the specified amount of time.
    155  */
    156 int
    157 tstohz(const struct timespec *ts)
    158 {
    159 	struct timeval tv;
    160 
    161 	/*
    162 	 * usec has great enough resolution for hz, so convert to a
    163 	 * timeval and use tvtohz() above.
    164 	 */
    165 	TIMESPEC_TO_TIMEVAL(&tv, ts);
    166 	return tvtohz(&tv);
    167 }
    168 
    169 /*
    170  * Check that a proposed value to load into the .it_value or
    171  * .it_interval part of an interval timer is acceptable, and
    172  * fix it to have at least minimal value (i.e. if it is less
    173  * than the resolution of the clock, round it up.). We don't
    174  * timeout the 0,0 value because this means to disable the
    175  * timer or the interval.
    176  */
    177 int
    178 itimerfix(struct timeval *tv)
    179 {
    180 
    181 	if (tv->tv_usec < 0 || tv->tv_usec >= 1000000)
    182 		return EINVAL;
    183 	if (tv->tv_sec < 0)
    184 		return ETIMEDOUT;
    185 	if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
    186 		tv->tv_usec = tick;
    187 	return 0;
    188 }
    189 
    190 int
    191 itimespecfix(struct timespec *ts)
    192 {
    193 
    194 	if (ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
    195 		return EINVAL;
    196 	if (ts->tv_sec < 0)
    197 		return ETIMEDOUT;
    198 	if (ts->tv_sec == 0 && ts->tv_nsec != 0 && ts->tv_nsec < tick * 1000)
    199 		ts->tv_nsec = tick * 1000;
    200 	return 0;
    201 }
    202 
    203 int
    204 inittimeleft(struct timespec *ts, struct timespec *sleepts)
    205 {
    206 
    207 	if (itimespecfix(ts)) {
    208 		return -1;
    209 	}
    210 	getnanouptime(sleepts);
    211 	return 0;
    212 }
    213 
    214 int
    215 gettimeleft(struct timespec *ts, struct timespec *sleepts)
    216 {
    217 	struct timespec sleptts;
    218 
    219 	/*
    220 	 * Reduce ts by elapsed time based on monotonic time scale.
    221 	 */
    222 	getnanouptime(&sleptts);
    223 	timespecadd(ts, sleepts, ts);
    224 	timespecsub(ts, &sleptts, ts);
    225 	*sleepts = sleptts;
    226 
    227 	return tstohz(ts);
    228 }
    229 
    230 void
    231 clock_timeleft(clockid_t clockid, struct timespec *ts, struct timespec *sleepts)
    232 {
    233 	struct timespec sleptts;
    234 
    235 	clock_gettime1(clockid, &sleptts);
    236 	timespecadd(ts, sleepts, ts);
    237 	timespecsub(ts, &sleptts, ts);
    238 	*sleepts = sleptts;
    239 }
    240 
    241 static void
    242 ticks2ts(uint64_t ticks, struct timespec *ts)
    243 {
    244 	ts->tv_sec = ticks / hz;
    245 	uint64_t sticks = ticks - ts->tv_sec * hz;
    246 	if (sticks > BINTIME_SCALE_MS)	/* floor(2^64 / 1000) */
    247 		ts->tv_nsec = sticks / hz * 1000000000LL;
    248    	else if (sticks > BINTIME_SCALE_US)	/* floor(2^64 / 1000000) */
    249    		ts->tv_nsec = sticks * 1000LL / hz * 1000000LL;
    250 	else
    251    		ts->tv_nsec = sticks * 1000000000LL / hz;
    252 	DPRINTF(("%s: %ju/%ju -> %ju.%ju\n", __func__,
    253 	    (uintmax_t)ticks, (uintmax_t)sticks,
    254 	    (uintmax_t)ts->tv_sec, (uintmax_t)ts->tv_nsec));
    255 }
    256 
    257 int
    258 clock_gettime1(clockid_t clock_id, struct timespec *ts)
    259 {
    260 	int error;
    261 	uint64_t ticks;
    262 	struct proc *p;
    263 
    264 #define CPUCLOCK_ID_MASK (~(CLOCK_THREAD_CPUTIME_ID|CLOCK_PROCESS_CPUTIME_ID))
    265 	if (clock_id & CLOCK_PROCESS_CPUTIME_ID) {
    266 		pid_t pid = clock_id & CPUCLOCK_ID_MASK;
    267 
    268 		mutex_enter(proc_lock);
    269 		p = pid == 0 ? curproc : proc_find(pid);
    270 		if (p == NULL) {
    271 			mutex_exit(proc_lock);
    272 			return ESRCH;
    273 		}
    274 		ticks = p->p_uticks + p->p_sticks + p->p_iticks;
    275 		DPRINTF(("%s: u=%ju, s=%ju, i=%ju\n", __func__,
    276 		    (uintmax_t)p->p_uticks, (uintmax_t)p->p_sticks,
    277 		    (uintmax_t)p->p_iticks));
    278 		mutex_exit(proc_lock);
    279 
    280 		// XXX: Perhaps create a special kauth type
    281 		error = kauth_authorize_process(curlwp->l_cred,
    282 		    KAUTH_PROCESS_PTRACE, p,
    283 		    KAUTH_ARG(KAUTH_REQ_PROCESS_CANSEE_ENTRY), NULL, NULL);
    284 		if (error)
    285 			return error;
    286 	} else if (clock_id & CLOCK_THREAD_CPUTIME_ID) {
    287 		struct lwp *l;
    288 		lwpid_t lid = clock_id & CPUCLOCK_ID_MASK;
    289 		p = curproc;
    290 		mutex_enter(p->p_lock);
    291 		l = lid == 0 ? curlwp : lwp_find(p, lid);
    292 		if (l == NULL) {
    293 			mutex_exit(p->p_lock);
    294 			return ESRCH;
    295 		}
    296 		ticks = l->l_rticksum + l->l_slpticksum;
    297 		DPRINTF(("%s: r=%ju, s=%ju\n", __func__,
    298 		    (uintmax_t)l->l_rticksum, (uintmax_t)l->l_slpticksum));
    299 		mutex_exit(p->p_lock);
    300         } else
    301 		ticks = (uint64_t)-1;
    302 
    303 	if (ticks != (uint64_t)-1) {
    304 		ticks2ts(ticks, ts);
    305 		return 0;
    306 	}
    307 
    308 	switch (clock_id) {
    309 	case CLOCK_REALTIME:
    310 		nanotime(ts);
    311 		break;
    312 	case CLOCK_MONOTONIC:
    313 		nanouptime(ts);
    314 		break;
    315 	default:
    316 		return EINVAL;
    317 	}
    318 
    319 	return 0;
    320 }
    321 
    322 /*
    323  * Calculate delta and convert from struct timespec to the ticks.
    324  */
    325 int
    326 ts2timo(clockid_t clock_id, int flags, struct timespec *ts,
    327     int *timo, struct timespec *start)
    328 {
    329 	int error;
    330 	struct timespec tsd;
    331 
    332 	if (ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000L)
    333 		return EINVAL;
    334 
    335 	flags &= TIMER_ABSTIME;
    336 	if (start == NULL)
    337 		start = &tsd;
    338 
    339 	if (flags || start != &tsd)
    340 		if ((error = clock_gettime1(clock_id, start)) != 0)
    341 			return error;
    342 
    343 	if (flags)
    344 		timespecsub(ts, start, ts);
    345 
    346 	if ((error = itimespecfix(ts)) != 0)
    347 		return error;
    348 
    349 	if (ts->tv_sec == 0 && ts->tv_nsec == 0)
    350 		return ETIMEDOUT;
    351 
    352 	*timo = tstohz(ts);
    353 	KASSERT(*timo > 0);
    354 
    355 	return 0;
    356 }
    357 
    358 /*
    359  * timedwaitclock_setup(T, timeout, clockid, flags, epsilon)
    360  *
    361  *	Initialize state for a timedwaitclock, to be used subsequently
    362  *	with timedwaitclock_begin/end, possibly many times in a row.
    363  *
    364  *	No cleanup action required at the end; the caller-allocated
    365  *	(typically stack-allocated) timedwaitclock just holds
    366  *	parameters and a little state for timedwaitclock_begin/end.
    367  */
    368 void
    369 timedwaitclock_setup(struct timedwaitclock *T, struct timespec *timeout,
    370     clockid_t clockid, int flags, const struct bintime *epsilon)
    371 {
    372 
    373 	memset(T, 0, sizeof(*T));
    374 	T->timeout = timeout;
    375 	T->clockid = clockid;
    376 	T->flags = flags;
    377 	T->epsilon = epsilon;
    378 	T->starttime = (struct timespec){0,0};
    379 }
    380 
    381 /*
    382  * timedwaitclock_begin(T, timo)
    383  *
    384  *	Decide how many ticks to wait for the timedwaitclock T and
    385  *	store it in *timo.  Keep state for timedwaitclock_end.  May
    386  *	fail with EINVAL if the specified timeout is invalid, or if the
    387  *	specified clock fails.  Fails with ETIMEDOUT if there is no
    388  *	time left to wait.
    389  */
    390 int
    391 timedwaitclock_begin(struct timedwaitclock *T, int *timo)
    392 {
    393 	struct timespec delta;
    394 	const struct timespec *deltap;
    395 	int error;
    396 
    397 	/* Sanity-check timeout -- may have come from userland.  */
    398 	if (T->timeout->tv_nsec < 0 || T->timeout->tv_nsec >= 1000000000L)
    399 		return EINVAL;
    400 
    401 	/*
    402 	 * Compute the time delta.
    403 	 */
    404 	if ((T->flags & TIMER_ABSTIME) == TIMER_ABSTIME) {
    405 		/* Check our watch.  */
    406 		error = clock_gettime1(T->clockid, &T->starttime);
    407 		if (error)
    408 			return error;
    409 
    410 		/* If the deadline has passed, we're done.  */
    411 		if (timespeccmp(T->timeout, &T->starttime, <=))
    412 			return ETIMEDOUT;
    413 
    414 		/* Count how much time is left.  */
    415 		timespecsub(T->timeout, &T->starttime, &delta);
    416 		deltap = &delta;
    417 	} else {
    418 		/* The user specified how much time is left.  */
    419 		deltap = T->timeout;
    420 
    421 		/* If there's none left, we've timed out.  */
    422 		if (deltap->tv_sec == 0 && deltap->tv_nsec == 0)
    423 			return ETIMEDOUT;
    424 	}
    425 
    426 	/*
    427 	 * Convert to ticks, but clamp to be >=1.
    428 	 *
    429 	 * XXX In the tickless future, use a high-resolution timer if
    430 	 * timo would round to zero.
    431 	 */
    432 	*timo = tstohz(deltap);
    433 	KASSERTMSG(*timo >= 0, "negative ticks: %d", *timo);
    434 	if (*timo == 0)
    435 		*timo = 1;
    436 
    437 	/* Success!  */
    438 	return 0;
    439 }
    440 
    441 /*
    442  * timedwaitclock_end(T)
    443  *
    444  *	If the timedwaitclock T was relative, update the caller's
    445  *	original timeout to reflect how much time is left, or zero if
    446  *	there is no time left or if the clock has gone bad, so that the
    447  *	next timedwaitclock_begin will immediately time out.
    448  */
    449 void
    450 timedwaitclock_end(struct timedwaitclock *T)
    451 {
    452 	struct timespec endtime, delta;
    453 
    454 	/* If the timeout is absolute, nothing to do.  */
    455 	if ((T->flags & TIMER_ABSTIME) == TIMER_ABSTIME)
    456 		return;
    457 
    458 	/*
    459 	 * Check our watch.  If anything goes wrong with it, make sure
    460 	 * that the next time we immediately time out rather than fail
    461 	 * to deduct the time elapsed.
    462 	 */
    463 	if (clock_gettime1(T->clockid, &endtime)) {
    464 		T->timeout->tv_sec = 0;
    465 		T->timeout->tv_nsec = 0;
    466 		return;
    467 	}
    468 
    469 	/* Find how much time elapsed while we waited.  */
    470 	timespecsub(&endtime, &T->starttime, &delta);
    471 
    472 	/*
    473 	 * Paranoia: If the clock went backwards, treat it as if no
    474 	 * time elapsed at all rather than adding anything.
    475 	 */
    476 	if (delta.tv_sec < 0 ||
    477 	    (delta.tv_sec == 0 && delta.tv_nsec < 0)) {
    478 		delta.tv_sec = 0;
    479 		delta.tv_nsec = 0;
    480 	}
    481 
    482 	/*
    483 	 * Set it to the time left, or zero, whichever is larger.  We
    484 	 * do not fail with EWOULDBLOCK here because this may have been
    485 	 * an explicit wakeup, so the caller needs to check before they
    486 	 * give up or else cv_signal would be lost.
    487 	 */
    488 	if (timespeccmp(T->timeout, &delta, <=)) {
    489 		T->timeout->tv_sec = 0;
    490 		T->timeout->tv_nsec = 0;
    491 	} else {
    492 		timespecsub(T->timeout, &delta, T->timeout);
    493 	}
    494 }
    495