Home | History | Annotate | Line # | Download | only in kern
kern_time.c revision 1.111
      1 /*	$NetBSD: kern_time.c,v 1.111 2006/12/06 10:02:22 yamt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2000, 2004, 2005 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Christopher G. Demetriou.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * Copyright (c) 1982, 1986, 1989, 1993
     41  *	The Regents of the University of California.  All rights reserved.
     42  *
     43  * Redistribution and use in source and binary forms, with or without
     44  * modification, are permitted provided that the following conditions
     45  * are met:
     46  * 1. Redistributions of source code must retain the above copyright
     47  *    notice, this list of conditions and the following disclaimer.
     48  * 2. Redistributions in binary form must reproduce the above copyright
     49  *    notice, this list of conditions and the following disclaimer in the
     50  *    documentation and/or other materials provided with the distribution.
     51  * 3. Neither the name of the University nor the names of its contributors
     52  *    may be used to endorse or promote products derived from this software
     53  *    without specific prior written permission.
     54  *
     55  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     56  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     57  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     58  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     59  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     60  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     61  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     62  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     63  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     64  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     65  * SUCH DAMAGE.
     66  *
     67  *	@(#)kern_time.c	8.4 (Berkeley) 5/26/95
     68  */
     69 
     70 #include <sys/cdefs.h>
     71 __KERNEL_RCSID(0, "$NetBSD: kern_time.c,v 1.111 2006/12/06 10:02:22 yamt Exp $");
     72 
     73 #include "fs_nfs.h"
     74 #include "opt_nfs.h"
     75 #include "opt_nfsserver.h"
     76 
     77 #include <sys/param.h>
     78 #include <sys/resourcevar.h>
     79 #include <sys/kernel.h>
     80 #include <sys/systm.h>
     81 #include <sys/proc.h>
     82 #include <sys/sa.h>
     83 #include <sys/savar.h>
     84 #include <sys/vnode.h>
     85 #include <sys/signalvar.h>
     86 #include <sys/syslog.h>
     87 #ifdef __HAVE_TIMECOUNTER
     88 #include <sys/timetc.h>
     89 #else /* !__HAVE_TIMECOUNTER */
     90 #include <sys/timevar.h>
     91 #endif /* !__HAVE_TIMECOUNTER */
     92 #include <sys/kauth.h>
     93 
     94 #include <sys/mount.h>
     95 #include <sys/syscallargs.h>
     96 
     97 #include <uvm/uvm_extern.h>
     98 
     99 #if defined(NFS) || defined(NFSSERVER)
    100 #include <nfs/rpcv2.h>
    101 #include <nfs/nfsproto.h>
    102 #include <nfs/nfs.h>
    103 #include <nfs/nfs_var.h>
    104 #endif
    105 
    106 #include <machine/cpu.h>
    107 
    108 POOL_INIT(ptimer_pool, sizeof(struct ptimer), 0, 0, 0, "ptimerpl",
    109     &pool_allocator_nointr);
    110 POOL_INIT(ptimers_pool, sizeof(struct ptimers), 0, 0, 0, "ptimerspl",
    111     &pool_allocator_nointr);
    112 
    113 static void timerupcall(struct lwp *, void *);
    114 #ifdef __HAVE_TIMECOUNTER
    115 static int itimespecfix(struct timespec *);		/* XXX move itimerfix to timespecs */
    116 #endif /* __HAVE_TIMECOUNTER */
    117 
    118 /* Time of day and interval timer support.
    119  *
    120  * These routines provide the kernel entry points to get and set
    121  * the time-of-day and per-process interval timers.  Subroutines
    122  * here provide support for adding and subtracting timeval structures
    123  * and decrementing interval timers, optionally reloading the interval
    124  * timers when they expire.
    125  */
    126 
    127 /* This function is used by clock_settime and settimeofday */
    128 int
    129 settime(struct proc *p, struct timespec *ts)
    130 {
    131 	struct timeval delta, tv;
    132 #ifdef __HAVE_TIMECOUNTER
    133 	struct timeval now;
    134 	struct timespec ts1;
    135 #endif /* !__HAVE_TIMECOUNTER */
    136 	struct cpu_info *ci;
    137 	int s;
    138 
    139 	/*
    140 	 * Don't allow the time to be set forward so far it will wrap
    141 	 * and become negative, thus allowing an attacker to bypass
    142 	 * the next check below.  The cutoff is 1 year before rollover
    143 	 * occurs, so even if the attacker uses adjtime(2) to move
    144 	 * the time past the cutoff, it will take a very long time
    145 	 * to get to the wrap point.
    146 	 *
    147 	 * XXX: we check against INT_MAX since on 64-bit
    148 	 *	platforms, sizeof(int) != sizeof(long) and
    149 	 *	time_t is 32 bits even when atv.tv_sec is 64 bits.
    150 	 */
    151 	if (ts->tv_sec > INT_MAX - 365*24*60*60) {
    152 		struct proc *pp = p->p_pptr;
    153 		log(LOG_WARNING, "pid %d (%s) "
    154 		    "invoked by uid %d ppid %d (%s) "
    155 		    "tried to set clock forward to %ld\n",
    156 		    p->p_pid, p->p_comm, kauth_cred_geteuid(pp->p_cred),
    157 		    pp->p_pid, pp->p_comm, (long)ts->tv_sec);
    158 		return (EPERM);
    159 	}
    160 	TIMESPEC_TO_TIMEVAL(&tv, ts);
    161 
    162 	/* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */
    163 	s = splclock();
    164 #ifdef __HAVE_TIMECOUNTER
    165 	microtime(&now);
    166 	timersub(&tv, &now, &delta);
    167 #else /* !__HAVE_TIMECOUNTER */
    168 	timersub(&tv, &time, &delta);
    169 #endif /* !__HAVE_TIMECOUNTER */
    170 	if ((delta.tv_sec < 0 || delta.tv_usec < 0) &&
    171 	    kauth_authorize_system(p->p_cred, KAUTH_SYSTEM_TIME,
    172 	    KAUTH_REQ_SYSTEM_TIME_BACKWARDS, NULL, NULL, NULL)) {
    173 		splx(s);
    174 		return (EPERM);
    175 	}
    176 #ifdef notyet
    177 	if ((delta.tv_sec < 86400) && securelevel > 0) { /* XXX elad - notyet */
    178 		splx(s);
    179 		return (EPERM);
    180 	}
    181 #endif
    182 
    183 #ifdef __HAVE_TIMECOUNTER
    184 	TIMEVAL_TO_TIMESPEC(&tv, &ts1);
    185 	tc_setclock(&ts1);
    186 #else /* !__HAVE_TIMECOUNTER */
    187 	time = tv;
    188 #endif /* !__HAVE_TIMECOUNTER */
    189 
    190 	(void) spllowersoftclock();
    191 
    192 	timeradd(&boottime, &delta, &boottime);
    193 
    194 	/*
    195 	 * XXXSMP
    196 	 * This is wrong.  We should traverse a list of all
    197 	 * CPUs and add the delta to the runtime of those
    198 	 * CPUs which have a process on them.
    199 	 */
    200 	ci = curcpu();
    201 	timeradd(&ci->ci_schedstate.spc_runtime, &delta,
    202 	    &ci->ci_schedstate.spc_runtime);
    203 #if (defined(NFS) && !defined (NFS_V2_ONLY)) || defined(NFSSERVER)
    204 	nqnfs_lease_updatetime(delta.tv_sec);
    205 #endif
    206 	splx(s);
    207 	resettodr();
    208 	return (0);
    209 }
    210 
    211 /* ARGSUSED */
    212 int
    213 sys_clock_gettime(struct lwp *l, void *v, register_t *retval)
    214 {
    215 	struct sys_clock_gettime_args /* {
    216 		syscallarg(clockid_t) clock_id;
    217 		syscallarg(struct timespec *) tp;
    218 	} */ *uap = v;
    219 	clockid_t clock_id;
    220 	struct timespec ats;
    221 
    222 	clock_id = SCARG(uap, clock_id);
    223 	switch (clock_id) {
    224 	case CLOCK_REALTIME:
    225 		nanotime(&ats);
    226 		break;
    227 	case CLOCK_MONOTONIC:
    228 #ifdef __HAVE_TIMECOUNTER
    229 		nanouptime(&ats);
    230 #else /* !__HAVE_TIMECOUNTER */
    231 		{
    232 		int s;
    233 
    234 		/* XXX "hz" granularity */
    235 		s = splclock();
    236 		TIMEVAL_TO_TIMESPEC(&mono_time,&ats);
    237 		splx(s);
    238 		}
    239 #endif /* !__HAVE_TIMECOUNTER */
    240 		break;
    241 	default:
    242 		return (EINVAL);
    243 	}
    244 
    245 	return copyout(&ats, SCARG(uap, tp), sizeof(ats));
    246 }
    247 
    248 /* ARGSUSED */
    249 int
    250 sys_clock_settime(struct lwp *l, void *v, register_t *retval)
    251 {
    252 	struct sys_clock_settime_args /* {
    253 		syscallarg(clockid_t) clock_id;
    254 		syscallarg(const struct timespec *) tp;
    255 	} */ *uap = v;
    256 	int error;
    257 
    258 	if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_TIME,
    259 	    KAUTH_REQ_SYSTEM_TIME_SYSTEM, NULL, NULL, NULL)) != 0)
    260 		return (error);
    261 
    262 	return clock_settime1(l->l_proc, SCARG(uap, clock_id), SCARG(uap, tp));
    263 }
    264 
    265 
    266 int
    267 clock_settime1(struct proc *p, clockid_t clock_id, const struct timespec *tp)
    268 {
    269 	struct timespec ats;
    270 	int error;
    271 
    272 	if ((error = copyin(tp, &ats, sizeof(ats))) != 0)
    273 		return (error);
    274 
    275 	switch (clock_id) {
    276 	case CLOCK_REALTIME:
    277 		if ((error = settime(p, &ats)) != 0)
    278 			return (error);
    279 		break;
    280 	case CLOCK_MONOTONIC:
    281 		return (EINVAL);	/* read-only clock */
    282 	default:
    283 		return (EINVAL);
    284 	}
    285 
    286 	return 0;
    287 }
    288 
    289 int
    290 sys_clock_getres(struct lwp *l, void *v, register_t *retval)
    291 {
    292 	struct sys_clock_getres_args /* {
    293 		syscallarg(clockid_t) clock_id;
    294 		syscallarg(struct timespec *) tp;
    295 	} */ *uap = v;
    296 	clockid_t clock_id;
    297 	struct timespec ts;
    298 	int error = 0;
    299 
    300 	clock_id = SCARG(uap, clock_id);
    301 	switch (clock_id) {
    302 	case CLOCK_REALTIME:
    303 	case CLOCK_MONOTONIC:
    304 		ts.tv_sec = 0;
    305 #ifdef __HAVE_TIMECOUNTER
    306 		if (tc_getfrequency() > 1000000000)
    307 			ts.tv_nsec = 1;
    308 		else
    309 			ts.tv_nsec = 1000000000 / tc_getfrequency();
    310 #else /* !__HAVE_TIMECOUNTER */
    311 		ts.tv_nsec = 1000000000 / hz;
    312 #endif /* !__HAVE_TIMECOUNTER */
    313 		break;
    314 	default:
    315 		return (EINVAL);
    316 	}
    317 
    318 	if (SCARG(uap, tp))
    319 		error = copyout(&ts, SCARG(uap, tp), sizeof(ts));
    320 
    321 	return error;
    322 }
    323 
    324 /* ARGSUSED */
    325 int
    326 sys_nanosleep(struct lwp *l, void *v, register_t *retval)
    327 {
    328 #ifdef __HAVE_TIMECOUNTER
    329 	static int nanowait;
    330 	struct sys_nanosleep_args/* {
    331 		syscallarg(struct timespec *) rqtp;
    332 		syscallarg(struct timespec *) rmtp;
    333 	} */ *uap = v;
    334 	struct timespec rmt, rqt;
    335 	int error, timo;
    336 
    337 	error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec));
    338 	if (error)
    339 		return (error);
    340 
    341 	if (itimespecfix(&rqt))
    342 		return (EINVAL);
    343 
    344 	timo = tstohz(&rqt);
    345 	/*
    346 	 * Avoid inadvertantly sleeping forever
    347 	 */
    348 	if (timo == 0)
    349 		timo = 1;
    350 
    351 	getnanouptime(&rmt);
    352 
    353 	error = tsleep(&nanowait, PWAIT | PCATCH, "nanosleep", timo);
    354 	if (error == ERESTART)
    355 		error = EINTR;
    356 	if (error == EWOULDBLOCK)
    357 		error = 0;
    358 
    359 	if (SCARG(uap, rmtp)) {
    360 		int error1;
    361 		struct timespec rmtend;
    362 
    363 		getnanouptime(&rmtend);
    364 
    365 		timespecsub(&rmtend, &rmt, &rmt);
    366 		timespecsub(&rqt, &rmt, &rmt);
    367 		if (rmt.tv_sec < 0)
    368 			timespecclear(&rmt);
    369 
    370 		error1 = copyout((caddr_t)&rmt, (caddr_t)SCARG(uap,rmtp),
    371 			sizeof(rmt));
    372 		if (error1)
    373 			return (error1);
    374 	}
    375 
    376 	return error;
    377 #else /* !__HAVE_TIMECOUNTER */
    378 	static int nanowait;
    379 	struct sys_nanosleep_args/* {
    380 		syscallarg(struct timespec *) rqtp;
    381 		syscallarg(struct timespec *) rmtp;
    382 	} */ *uap = v;
    383 	struct timespec rqt;
    384 	struct timespec rmt;
    385 	struct timeval atv, utv;
    386 	int error, s, timo;
    387 
    388 	error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec));
    389 	if (error)
    390 		return (error);
    391 
    392 	TIMESPEC_TO_TIMEVAL(&atv,&rqt);
    393 	if (itimerfix(&atv))
    394 		return (EINVAL);
    395 
    396 	s = splclock();
    397 	timeradd(&atv,&time,&atv);
    398 	timo = hzto(&atv);
    399 	/*
    400 	 * Avoid inadvertantly sleeping forever
    401 	 */
    402 	if (timo == 0)
    403 		timo = 1;
    404 	splx(s);
    405 
    406 	error = tsleep(&nanowait, PWAIT | PCATCH, "nanosleep", timo);
    407 	if (error == ERESTART)
    408 		error = EINTR;
    409 	if (error == EWOULDBLOCK)
    410 		error = 0;
    411 
    412 	if (SCARG(uap, rmtp)) {
    413 		int error1;
    414 
    415 		s = splclock();
    416 		utv = time;
    417 		splx(s);
    418 
    419 		timersub(&atv, &utv, &utv);
    420 		if (utv.tv_sec < 0)
    421 			timerclear(&utv);
    422 
    423 		TIMEVAL_TO_TIMESPEC(&utv,&rmt);
    424 		error1 = copyout((caddr_t)&rmt, (caddr_t)SCARG(uap,rmtp),
    425 			sizeof(rmt));
    426 		if (error1)
    427 			return (error1);
    428 	}
    429 
    430 	return error;
    431 #endif /* !__HAVE_TIMECOUNTER */
    432 }
    433 
    434 /* ARGSUSED */
    435 int
    436 sys_gettimeofday(struct lwp *l, void *v, register_t *retval)
    437 {
    438 	struct sys_gettimeofday_args /* {
    439 		syscallarg(struct timeval *) tp;
    440 		syscallarg(void *) tzp;		really "struct timezone *"
    441 	} */ *uap = v;
    442 	struct timeval atv;
    443 	int error = 0;
    444 	struct timezone tzfake;
    445 
    446 	if (SCARG(uap, tp)) {
    447 		microtime(&atv);
    448 		error = copyout(&atv, SCARG(uap, tp), sizeof(atv));
    449 		if (error)
    450 			return (error);
    451 	}
    452 	if (SCARG(uap, tzp)) {
    453 		/*
    454 		 * NetBSD has no kernel notion of time zone, so we just
    455 		 * fake up a timezone struct and return it if demanded.
    456 		 */
    457 		tzfake.tz_minuteswest = 0;
    458 		tzfake.tz_dsttime = 0;
    459 		error = copyout(&tzfake, SCARG(uap, tzp), sizeof(tzfake));
    460 	}
    461 	return (error);
    462 }
    463 
    464 /* ARGSUSED */
    465 int
    466 sys_settimeofday(struct lwp *l, void *v, register_t *retval)
    467 {
    468 	struct sys_settimeofday_args /* {
    469 		syscallarg(const struct timeval *) tv;
    470 		syscallarg(const void *) tzp;	really "const struct timezone *"
    471 	} */ *uap = v;
    472 	int error;
    473 
    474 	if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_TIME,
    475 	    KAUTH_REQ_SYSTEM_TIME_SYSTEM, NULL, NULL, NULL)) != 0)
    476 		return (error);
    477 
    478 	return settimeofday1(SCARG(uap, tv), SCARG(uap, tzp), l->l_proc);
    479 }
    480 
    481 int
    482 settimeofday1(const struct timeval *utv, const struct timezone *utzp,
    483     struct proc *p)
    484 {
    485 	struct timeval atv;
    486 	struct timespec ts;
    487 	int error;
    488 
    489 	/* Verify all parameters before changing time. */
    490 	/*
    491 	 * NetBSD has no kernel notion of time zone, and only an
    492 	 * obsolete program would try to set it, so we log a warning.
    493 	 */
    494 	if (utzp)
    495 		log(LOG_WARNING, "pid %d attempted to set the "
    496 		    "(obsolete) kernel time zone\n", p->p_pid);
    497 
    498 	if (utv == NULL)
    499 		return 0;
    500 
    501 	if ((error = copyin(utv, &atv, sizeof(atv))) != 0)
    502 		return error;
    503 	TIMEVAL_TO_TIMESPEC(&atv, &ts);
    504 	return settime(p, &ts);
    505 }
    506 
    507 #ifndef __HAVE_TIMECOUNTER
    508 int	tickdelta;			/* current clock skew, us. per tick */
    509 long	timedelta;			/* unapplied time correction, us. */
    510 long	bigadj = 1000000;		/* use 10x skew above bigadj us. */
    511 #endif
    512 
    513 int	time_adjusted;			/* set if an adjustment is made */
    514 
    515 /* ARGSUSED */
    516 int
    517 sys_adjtime(struct lwp *l, void *v, register_t *retval)
    518 {
    519 	struct sys_adjtime_args /* {
    520 		syscallarg(const struct timeval *) delta;
    521 		syscallarg(struct timeval *) olddelta;
    522 	} */ *uap = v;
    523 	int error;
    524 
    525 	if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_TIME,
    526 	    KAUTH_REQ_SYSTEM_TIME_ADJTIME, NULL, NULL, NULL)) != 0)
    527 		return (error);
    528 
    529 	return adjtime1(SCARG(uap, delta), SCARG(uap, olddelta), l->l_proc);
    530 }
    531 
    532 int
    533 adjtime1(const struct timeval *delta, struct timeval *olddelta, struct proc *p)
    534 {
    535 	struct timeval atv;
    536 	int error = 0;
    537 
    538 #ifdef __HAVE_TIMECOUNTER
    539 	extern int64_t time_adjtime;  /* in kern_ntptime.c */
    540 #else /* !__HAVE_TIMECOUNTER */
    541 	long ndelta, ntickdelta, odelta;
    542 	int s;
    543 #endif /* !__HAVE_TIMECOUNTER */
    544 
    545 #ifdef __HAVE_TIMECOUNTER
    546 	if (olddelta) {
    547 		atv.tv_sec = time_adjtime / 1000000;
    548 		atv.tv_usec = time_adjtime % 1000000;
    549 		if (atv.tv_usec < 0) {
    550 			atv.tv_usec += 1000000;
    551 			atv.tv_sec--;
    552 		}
    553 		error = copyout(&atv, olddelta, sizeof(struct timeval));
    554 		if (error)
    555 			return (error);
    556 	}
    557 
    558 	if (delta) {
    559 		error = copyin(delta, &atv, sizeof(struct timeval));
    560 		if (error)
    561 			return (error);
    562 
    563 		time_adjtime = (int64_t)atv.tv_sec * 1000000 +
    564 			atv.tv_usec;
    565 
    566 		if (time_adjtime)
    567 			/* We need to save the system time during shutdown */
    568 			time_adjusted |= 1;
    569 	}
    570 #else /* !__HAVE_TIMECOUNTER */
    571 	error = copyin(delta, &atv, sizeof(struct timeval));
    572 	if (error)
    573 		return (error);
    574 
    575 	/*
    576 	 * Compute the total correction and the rate at which to apply it.
    577 	 * Round the adjustment down to a whole multiple of the per-tick
    578 	 * delta, so that after some number of incremental changes in
    579 	 * hardclock(), tickdelta will become zero, lest the correction
    580 	 * overshoot and start taking us away from the desired final time.
    581 	 */
    582 	ndelta = atv.tv_sec * 1000000 + atv.tv_usec;
    583 	if (ndelta > bigadj || ndelta < -bigadj)
    584 		ntickdelta = 10 * tickadj;
    585 	else
    586 		ntickdelta = tickadj;
    587 	if (ndelta % ntickdelta)
    588 		ndelta = ndelta / ntickdelta * ntickdelta;
    589 
    590 	/*
    591 	 * To make hardclock()'s job easier, make the per-tick delta negative
    592 	 * if we want time to run slower; then hardclock can simply compute
    593 	 * tick + tickdelta, and subtract tickdelta from timedelta.
    594 	 */
    595 	if (ndelta < 0)
    596 		ntickdelta = -ntickdelta;
    597 	if (ndelta != 0)
    598 		/* We need to save the system clock time during shutdown */
    599 		time_adjusted |= 1;
    600 	s = splclock();
    601 	odelta = timedelta;
    602 	timedelta = ndelta;
    603 	tickdelta = ntickdelta;
    604 	splx(s);
    605 
    606 	if (olddelta) {
    607 		atv.tv_sec = odelta / 1000000;
    608 		atv.tv_usec = odelta % 1000000;
    609 		error = copyout(&atv, olddelta, sizeof(struct timeval));
    610 	}
    611 #endif /* __HAVE_TIMECOUNTER */
    612 
    613 	return error;
    614 }
    615 
    616 /*
    617  * Interval timer support. Both the BSD getitimer() family and the POSIX
    618  * timer_*() family of routines are supported.
    619  *
    620  * All timers are kept in an array pointed to by p_timers, which is
    621  * allocated on demand - many processes don't use timers at all. The
    622  * first three elements in this array are reserved for the BSD timers:
    623  * element 0 is ITIMER_REAL, element 1 is ITIMER_VIRTUAL, and element
    624  * 2 is ITIMER_PROF. The rest may be allocated by the timer_create()
    625  * syscall.
    626  *
    627  * Realtime timers are kept in the ptimer structure as an absolute
    628  * time; virtual time timers are kept as a linked list of deltas.
    629  * Virtual time timers are processed in the hardclock() routine of
    630  * kern_clock.c.  The real time timer is processed by a callout
    631  * routine, called from the softclock() routine.  Since a callout may
    632  * be delayed in real time due to interrupt processing in the system,
    633  * it is possible for the real time timeout routine (realtimeexpire,
    634  * given below), to be delayed in real time past when it is supposed
    635  * to occur.  It does not suffice, therefore, to reload the real timer
    636  * .it_value from the real time timers .it_interval.  Rather, we
    637  * compute the next time in absolute time the timer should go off.  */
    638 
    639 /* Allocate a POSIX realtime timer. */
    640 int
    641 sys_timer_create(struct lwp *l, void *v, register_t *retval)
    642 {
    643 	struct sys_timer_create_args /* {
    644 		syscallarg(clockid_t) clock_id;
    645 		syscallarg(struct sigevent *) evp;
    646 		syscallarg(timer_t *) timerid;
    647 	} */ *uap = v;
    648 
    649 	return timer_create1(SCARG(uap, timerid), SCARG(uap, clock_id),
    650 	    SCARG(uap, evp), copyin, l);
    651 }
    652 
    653 int
    654 timer_create1(timer_t *tid, clockid_t id, struct sigevent *evp,
    655     copyin_t fetch_event, struct lwp *l)
    656 {
    657 	int error;
    658 	timer_t timerid;
    659 	struct ptimer *pt;
    660 	struct proc *p;
    661 
    662 	p = l->l_proc;
    663 
    664 	if (id < CLOCK_REALTIME ||
    665 	    id > CLOCK_PROF)
    666 		return (EINVAL);
    667 
    668 	if (p->p_timers == NULL)
    669 		timers_alloc(p);
    670 
    671 	/* Find a free timer slot, skipping those reserved for setitimer(). */
    672 	for (timerid = 3; timerid < TIMER_MAX; timerid++)
    673 		if (p->p_timers->pts_timers[timerid] == NULL)
    674 			break;
    675 
    676 	if (timerid == TIMER_MAX)
    677 		return EAGAIN;
    678 
    679 	pt = pool_get(&ptimer_pool, PR_WAITOK);
    680 	if (evp) {
    681 		if (((error =
    682 		    (*fetch_event)(evp, &pt->pt_ev, sizeof(pt->pt_ev))) != 0) ||
    683 		    ((pt->pt_ev.sigev_notify < SIGEV_NONE) ||
    684 			(pt->pt_ev.sigev_notify > SIGEV_SA))) {
    685 			pool_put(&ptimer_pool, pt);
    686 			return (error ? error : EINVAL);
    687 		}
    688 	} else {
    689 		pt->pt_ev.sigev_notify = SIGEV_SIGNAL;
    690 		switch (id) {
    691 		case CLOCK_REALTIME:
    692 			pt->pt_ev.sigev_signo = SIGALRM;
    693 			break;
    694 		case CLOCK_VIRTUAL:
    695 			pt->pt_ev.sigev_signo = SIGVTALRM;
    696 			break;
    697 		case CLOCK_PROF:
    698 			pt->pt_ev.sigev_signo = SIGPROF;
    699 			break;
    700 		}
    701 		pt->pt_ev.sigev_value.sival_int = timerid;
    702 	}
    703 	pt->pt_info.ksi_signo = pt->pt_ev.sigev_signo;
    704 	pt->pt_info.ksi_errno = 0;
    705 	pt->pt_info.ksi_code = 0;
    706 	pt->pt_info.ksi_pid = p->p_pid;
    707 	pt->pt_info.ksi_uid = kauth_cred_getuid(l->l_cred);
    708 	pt->pt_info.ksi_sigval = pt->pt_ev.sigev_value;
    709 
    710 	pt->pt_type = id;
    711 	pt->pt_proc = p;
    712 	pt->pt_overruns = 0;
    713 	pt->pt_poverruns = 0;
    714 	pt->pt_entry = timerid;
    715 	timerclear(&pt->pt_time.it_value);
    716 	if (id == CLOCK_REALTIME)
    717 		callout_init(&pt->pt_ch);
    718 	else
    719 		pt->pt_active = 0;
    720 
    721 	p->p_timers->pts_timers[timerid] = pt;
    722 
    723 	return copyout(&timerid, tid, sizeof(timerid));
    724 }
    725 
    726 /* Delete a POSIX realtime timer */
    727 int
    728 sys_timer_delete(struct lwp *l, void *v, register_t *retval)
    729 {
    730 	struct sys_timer_delete_args /*  {
    731 		syscallarg(timer_t) timerid;
    732 	} */ *uap = v;
    733 	struct proc *p = l->l_proc;
    734 	timer_t timerid;
    735 	struct ptimer *pt, *ptn;
    736 	int s;
    737 
    738 	timerid = SCARG(uap, timerid);
    739 
    740 	if ((p->p_timers == NULL) ||
    741 	    (timerid < 2) || (timerid >= TIMER_MAX) ||
    742 	    ((pt = p->p_timers->pts_timers[timerid]) == NULL))
    743 		return (EINVAL);
    744 
    745 	if (pt->pt_type == CLOCK_REALTIME)
    746 		callout_stop(&pt->pt_ch);
    747 	else if (pt->pt_active) {
    748 		s = splclock();
    749 		ptn = LIST_NEXT(pt, pt_list);
    750 		LIST_REMOVE(pt, pt_list);
    751 		for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list))
    752 			timeradd(&pt->pt_time.it_value, &ptn->pt_time.it_value,
    753 			    &ptn->pt_time.it_value);
    754 		splx(s);
    755 	}
    756 
    757 	p->p_timers->pts_timers[timerid] = NULL;
    758 	pool_put(&ptimer_pool, pt);
    759 
    760 	return (0);
    761 }
    762 
    763 /*
    764  * Set up the given timer. The value in pt->pt_time.it_value is taken
    765  * to be an absolute time for CLOCK_REALTIME timers and a relative
    766  * time for virtual timers.
    767  * Must be called at splclock().
    768  */
    769 void
    770 timer_settime(struct ptimer *pt)
    771 {
    772 	struct ptimer *ptn, *pptn;
    773 	struct ptlist *ptl;
    774 
    775 	if (pt->pt_type == CLOCK_REALTIME) {
    776 		callout_stop(&pt->pt_ch);
    777 		if (timerisset(&pt->pt_time.it_value)) {
    778 			/*
    779 			 * Don't need to check hzto() return value, here.
    780 			 * callout_reset() does it for us.
    781 			 */
    782 			callout_reset(&pt->pt_ch, hzto(&pt->pt_time.it_value),
    783 			    realtimerexpire, pt);
    784 		}
    785 	} else {
    786 		if (pt->pt_active) {
    787 			ptn = LIST_NEXT(pt, pt_list);
    788 			LIST_REMOVE(pt, pt_list);
    789 			for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list))
    790 				timeradd(&pt->pt_time.it_value,
    791 				    &ptn->pt_time.it_value,
    792 				    &ptn->pt_time.it_value);
    793 		}
    794 		if (timerisset(&pt->pt_time.it_value)) {
    795 			if (pt->pt_type == CLOCK_VIRTUAL)
    796 				ptl = &pt->pt_proc->p_timers->pts_virtual;
    797 			else
    798 				ptl = &pt->pt_proc->p_timers->pts_prof;
    799 
    800 			for (ptn = LIST_FIRST(ptl), pptn = NULL;
    801 			     ptn && timercmp(&pt->pt_time.it_value,
    802 				 &ptn->pt_time.it_value, >);
    803 			     pptn = ptn, ptn = LIST_NEXT(ptn, pt_list))
    804 				timersub(&pt->pt_time.it_value,
    805 				    &ptn->pt_time.it_value,
    806 				    &pt->pt_time.it_value);
    807 
    808 			if (pptn)
    809 				LIST_INSERT_AFTER(pptn, pt, pt_list);
    810 			else
    811 				LIST_INSERT_HEAD(ptl, pt, pt_list);
    812 
    813 			for ( ; ptn ; ptn = LIST_NEXT(ptn, pt_list))
    814 				timersub(&ptn->pt_time.it_value,
    815 				    &pt->pt_time.it_value,
    816 				    &ptn->pt_time.it_value);
    817 
    818 			pt->pt_active = 1;
    819 		} else
    820 			pt->pt_active = 0;
    821 	}
    822 }
    823 
    824 void
    825 timer_gettime(struct ptimer *pt, struct itimerval *aitv)
    826 {
    827 #ifdef __HAVE_TIMECOUNTER
    828 	struct timeval now;
    829 #endif
    830 	struct ptimer *ptn;
    831 
    832 	*aitv = pt->pt_time;
    833 	if (pt->pt_type == CLOCK_REALTIME) {
    834 		/*
    835 		 * Convert from absolute to relative time in .it_value
    836 		 * part of real time timer.  If time for real time
    837 		 * timer has passed return 0, else return difference
    838 		 * between current time and time for the timer to go
    839 		 * off.
    840 		 */
    841 		if (timerisset(&aitv->it_value)) {
    842 #ifdef __HAVE_TIMECOUNTER
    843 			getmicrotime(&now);
    844 			if (timercmp(&aitv->it_value, &now, <))
    845 				timerclear(&aitv->it_value);
    846 			else
    847 				timersub(&aitv->it_value, &now,
    848 				    &aitv->it_value);
    849 #else /* !__HAVE_TIMECOUNTER */
    850 			if (timercmp(&aitv->it_value, &time, <))
    851 				timerclear(&aitv->it_value);
    852 			else
    853 				timersub(&aitv->it_value, &time,
    854 				    &aitv->it_value);
    855 #endif /* !__HAVE_TIMECOUNTER */
    856 		}
    857 	} else if (pt->pt_active) {
    858 		if (pt->pt_type == CLOCK_VIRTUAL)
    859 			ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_virtual);
    860 		else
    861 			ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_prof);
    862 		for ( ; ptn && ptn != pt; ptn = LIST_NEXT(ptn, pt_list))
    863 			timeradd(&aitv->it_value,
    864 			    &ptn->pt_time.it_value, &aitv->it_value);
    865 		KASSERT(ptn != NULL); /* pt should be findable on the list */
    866 	} else
    867 		timerclear(&aitv->it_value);
    868 }
    869 
    870 
    871 
    872 /* Set and arm a POSIX realtime timer */
    873 int
    874 sys_timer_settime(struct lwp *l, void *v, register_t *retval)
    875 {
    876 	struct sys_timer_settime_args /* {
    877 		syscallarg(timer_t) timerid;
    878 		syscallarg(int) flags;
    879 		syscallarg(const struct itimerspec *) value;
    880 		syscallarg(struct itimerspec *) ovalue;
    881 	} */ *uap = v;
    882 	int error;
    883 	struct itimerspec value, ovalue, *ovp = NULL;
    884 
    885 	if ((error = copyin(SCARG(uap, value), &value,
    886 	    sizeof(struct itimerspec))) != 0)
    887 		return (error);
    888 
    889 	if (SCARG(uap, ovalue))
    890 		ovp = &ovalue;
    891 
    892 	if ((error = dotimer_settime(SCARG(uap, timerid), &value, ovp,
    893 	    SCARG(uap, flags), l->l_proc)) != 0)
    894 		return error;
    895 
    896 	if (ovp)
    897 		return copyout(&ovalue, SCARG(uap, ovalue),
    898 		    sizeof(struct itimerspec));
    899 	return 0;
    900 }
    901 
    902 int
    903 dotimer_settime(int timerid, struct itimerspec *value,
    904     struct itimerspec *ovalue, int flags, struct proc *p)
    905 {
    906 #ifdef __HAVE_TIMECOUNTER
    907 	struct timeval now;
    908 #endif
    909 	struct itimerval val, oval;
    910 	struct ptimer *pt;
    911 	int s;
    912 
    913 	if ((p->p_timers == NULL) ||
    914 	    (timerid < 2) || (timerid >= TIMER_MAX) ||
    915 	    ((pt = p->p_timers->pts_timers[timerid]) == NULL))
    916 		return (EINVAL);
    917 
    918 	TIMESPEC_TO_TIMEVAL(&val.it_value, &value->it_value);
    919 	TIMESPEC_TO_TIMEVAL(&val.it_interval, &value->it_interval);
    920 	if (itimerfix(&val.it_value) || itimerfix(&val.it_interval))
    921 		return (EINVAL);
    922 
    923 	oval = pt->pt_time;
    924 	pt->pt_time = val;
    925 
    926 	s = splclock();
    927 	/*
    928 	 * If we've been passed a relative time for a realtime timer,
    929 	 * convert it to absolute; if an absolute time for a virtual
    930 	 * timer, convert it to relative and make sure we don't set it
    931 	 * to zero, which would cancel the timer, or let it go
    932 	 * negative, which would confuse the comparison tests.
    933 	 */
    934 	if (timerisset(&pt->pt_time.it_value)) {
    935 		if (pt->pt_type == CLOCK_REALTIME) {
    936 #ifdef __HAVE_TIMECOUNTER
    937 			if ((flags & TIMER_ABSTIME) == 0) {
    938 				getmicrotime(&now);
    939 				timeradd(&pt->pt_time.it_value, &now,
    940 				    &pt->pt_time.it_value);
    941 			}
    942 #else /* !__HAVE_TIMECOUNTER */
    943 			if ((flags & TIMER_ABSTIME) == 0)
    944 				timeradd(&pt->pt_time.it_value, &time,
    945 				    &pt->pt_time.it_value);
    946 #endif /* !__HAVE_TIMECOUNTER */
    947 		} else {
    948 			if ((flags & TIMER_ABSTIME) != 0) {
    949 #ifdef __HAVE_TIMECOUNTER
    950 				getmicrotime(&now);
    951 				timersub(&pt->pt_time.it_value, &now,
    952 				    &pt->pt_time.it_value);
    953 #else /* !__HAVE_TIMECOUNTER */
    954 				timersub(&pt->pt_time.it_value, &time,
    955 				    &pt->pt_time.it_value);
    956 #endif /* !__HAVE_TIMECOUNTER */
    957 				if (!timerisset(&pt->pt_time.it_value) ||
    958 				    pt->pt_time.it_value.tv_sec < 0) {
    959 					pt->pt_time.it_value.tv_sec = 0;
    960 					pt->pt_time.it_value.tv_usec = 1;
    961 				}
    962 			}
    963 		}
    964 	}
    965 
    966 	timer_settime(pt);
    967 	splx(s);
    968 
    969 	if (ovalue) {
    970 		TIMEVAL_TO_TIMESPEC(&oval.it_value, &ovalue->it_value);
    971 		TIMEVAL_TO_TIMESPEC(&oval.it_interval, &ovalue->it_interval);
    972 	}
    973 
    974 	return (0);
    975 }
    976 
    977 /* Return the time remaining until a POSIX timer fires. */
    978 int
    979 sys_timer_gettime(struct lwp *l, void *v, register_t *retval)
    980 {
    981 	struct sys_timer_gettime_args /* {
    982 		syscallarg(timer_t) timerid;
    983 		syscallarg(struct itimerspec *) value;
    984 	} */ *uap = v;
    985 	struct itimerspec its;
    986 	int error;
    987 
    988 	if ((error = dotimer_gettime(SCARG(uap, timerid), l->l_proc,
    989 	    &its)) != 0)
    990 		return error;
    991 
    992 	return copyout(&its, SCARG(uap, value), sizeof(its));
    993 }
    994 
    995 int
    996 dotimer_gettime(int timerid, struct proc *p, struct itimerspec *its)
    997 {
    998 	int s;
    999 	struct ptimer *pt;
   1000 	struct itimerval aitv;
   1001 
   1002 	if ((p->p_timers == NULL) ||
   1003 	    (timerid < 2) || (timerid >= TIMER_MAX) ||
   1004 	    ((pt = p->p_timers->pts_timers[timerid]) == NULL))
   1005 		return (EINVAL);
   1006 
   1007 	s = splclock();
   1008 	timer_gettime(pt, &aitv);
   1009 	splx(s);
   1010 
   1011 	TIMEVAL_TO_TIMESPEC(&aitv.it_interval, &its->it_interval);
   1012 	TIMEVAL_TO_TIMESPEC(&aitv.it_value, &its->it_value);
   1013 
   1014 	return 0;
   1015 }
   1016 
   1017 /*
   1018  * Return the count of the number of times a periodic timer expired
   1019  * while a notification was already pending. The counter is reset when
   1020  * a timer expires and a notification can be posted.
   1021  */
   1022 int
   1023 sys_timer_getoverrun(struct lwp *l, void *v, register_t *retval)
   1024 {
   1025 	struct sys_timer_getoverrun_args /* {
   1026 		syscallarg(timer_t) timerid;
   1027 	} */ *uap = v;
   1028 	struct proc *p = l->l_proc;
   1029 	int timerid;
   1030 	struct ptimer *pt;
   1031 
   1032 	timerid = SCARG(uap, timerid);
   1033 
   1034 	if ((p->p_timers == NULL) ||
   1035 	    (timerid < 2) || (timerid >= TIMER_MAX) ||
   1036 	    ((pt = p->p_timers->pts_timers[timerid]) == NULL))
   1037 		return (EINVAL);
   1038 
   1039 	*retval = pt->pt_poverruns;
   1040 
   1041 	return (0);
   1042 }
   1043 
   1044 /* Glue function that triggers an upcall; called from userret(). */
   1045 static void
   1046 timerupcall(struct lwp *l, void *arg)
   1047 {
   1048 	struct ptimers *pt = (struct ptimers *)arg;
   1049 	unsigned int i, fired, done;
   1050 
   1051 	KDASSERT(l->l_proc->p_sa);
   1052 	/* Bail out if we do not own the virtual processor */
   1053 	if (l->l_savp->savp_lwp != l)
   1054 		return ;
   1055 
   1056 	KERNEL_PROC_LOCK(l);
   1057 
   1058 	fired = pt->pts_fired;
   1059 	done = 0;
   1060 	while ((i = ffs(fired)) != 0) {
   1061 		siginfo_t *si;
   1062 		int mask = 1 << --i;
   1063 		int f;
   1064 
   1065 		f = l->l_flag & L_SA;
   1066 		l->l_flag &= ~L_SA;
   1067 		si = siginfo_alloc(PR_WAITOK);
   1068 		si->_info = pt->pts_timers[i]->pt_info.ksi_info;
   1069 		if (sa_upcall(l, SA_UPCALL_SIGEV | SA_UPCALL_DEFER, NULL, l,
   1070 		    sizeof(*si), si, siginfo_free) != 0) {
   1071 			siginfo_free(si);
   1072 			/* XXX What do we do here?? */
   1073 		} else
   1074 			done |= mask;
   1075 		fired &= ~mask;
   1076 		l->l_flag |= f;
   1077 	}
   1078 	pt->pts_fired &= ~done;
   1079 	if (pt->pts_fired == 0)
   1080 		l->l_proc->p_userret = NULL;
   1081 
   1082 	KERNEL_PROC_UNLOCK(l);
   1083 }
   1084 
   1085 /*
   1086  * Real interval timer expired:
   1087  * send process whose timer expired an alarm signal.
   1088  * If time is not set up to reload, then just return.
   1089  * Else compute next time timer should go off which is > current time.
   1090  * This is where delay in processing this timeout causes multiple
   1091  * SIGALRM calls to be compressed into one.
   1092  */
   1093 void
   1094 realtimerexpire(void *arg)
   1095 {
   1096 #ifdef __HAVE_TIMECOUNTER
   1097 	struct timeval now;
   1098 #endif
   1099 	struct ptimer *pt;
   1100 	int s;
   1101 
   1102 	pt = (struct ptimer *)arg;
   1103 
   1104 	itimerfire(pt);
   1105 
   1106 	if (!timerisset(&pt->pt_time.it_interval)) {
   1107 		timerclear(&pt->pt_time.it_value);
   1108 		return;
   1109 	}
   1110 #ifdef __HAVE_TIMECOUNTER
   1111 	for (;;) {
   1112 		s = splclock();	/* XXX need spl now? */
   1113 		timeradd(&pt->pt_time.it_value,
   1114 		    &pt->pt_time.it_interval, &pt->pt_time.it_value);
   1115 		getmicrotime(&now);
   1116 		if (timercmp(&pt->pt_time.it_value, &now, >)) {
   1117 			/*
   1118 			 * Don't need to check hzto() return value, here.
   1119 			 * callout_reset() does it for us.
   1120 			 */
   1121 			callout_reset(&pt->pt_ch, hzto(&pt->pt_time.it_value),
   1122 			    realtimerexpire, pt);
   1123 			splx(s);
   1124 			return;
   1125 		}
   1126 		splx(s);
   1127 		pt->pt_overruns++;
   1128 	}
   1129 #else /* !__HAVE_TIMECOUNTER */
   1130 	for (;;) {
   1131 		s = splclock();
   1132 		timeradd(&pt->pt_time.it_value,
   1133 		    &pt->pt_time.it_interval, &pt->pt_time.it_value);
   1134 		if (timercmp(&pt->pt_time.it_value, &time, >)) {
   1135 			/*
   1136 			 * Don't need to check hzto() return value, here.
   1137 			 * callout_reset() does it for us.
   1138 			 */
   1139 			callout_reset(&pt->pt_ch, hzto(&pt->pt_time.it_value),
   1140 			    realtimerexpire, pt);
   1141 			splx(s);
   1142 			return;
   1143 		}
   1144 		splx(s);
   1145 		pt->pt_overruns++;
   1146 	}
   1147 #endif /* !__HAVE_TIMECOUNTER */
   1148 }
   1149 
   1150 /* BSD routine to get the value of an interval timer. */
   1151 /* ARGSUSED */
   1152 int
   1153 sys_getitimer(struct lwp *l, void *v, register_t *retval)
   1154 {
   1155 	struct sys_getitimer_args /* {
   1156 		syscallarg(int) which;
   1157 		syscallarg(struct itimerval *) itv;
   1158 	} */ *uap = v;
   1159 	struct proc *p = l->l_proc;
   1160 	struct itimerval aitv;
   1161 	int error;
   1162 
   1163 	error = dogetitimer(p, SCARG(uap, which), &aitv);
   1164 	if (error)
   1165 		return error;
   1166 	return (copyout(&aitv, SCARG(uap, itv), sizeof(struct itimerval)));
   1167 }
   1168 
   1169 int
   1170 dogetitimer(struct proc *p, int which, struct itimerval *itvp)
   1171 {
   1172 	int s;
   1173 
   1174 	if ((u_int)which > ITIMER_PROF)
   1175 		return (EINVAL);
   1176 
   1177 	if ((p->p_timers == NULL) || (p->p_timers->pts_timers[which] == NULL)){
   1178 		timerclear(&itvp->it_value);
   1179 		timerclear(&itvp->it_interval);
   1180 	} else {
   1181 		s = splclock();
   1182 		timer_gettime(p->p_timers->pts_timers[which], itvp);
   1183 		splx(s);
   1184 	}
   1185 
   1186 	return 0;
   1187 }
   1188 
   1189 /* BSD routine to set/arm an interval timer. */
   1190 /* ARGSUSED */
   1191 int
   1192 sys_setitimer(struct lwp *l, void *v, register_t *retval)
   1193 {
   1194 	struct sys_setitimer_args /* {
   1195 		syscallarg(int) which;
   1196 		syscallarg(const struct itimerval *) itv;
   1197 		syscallarg(struct itimerval *) oitv;
   1198 	} */ *uap = v;
   1199 	struct proc *p = l->l_proc;
   1200 	int which = SCARG(uap, which);
   1201 	struct sys_getitimer_args getargs;
   1202 	const struct itimerval *itvp;
   1203 	struct itimerval aitv;
   1204 	int error;
   1205 
   1206 	if ((u_int)which > ITIMER_PROF)
   1207 		return (EINVAL);
   1208 	itvp = SCARG(uap, itv);
   1209 	if (itvp &&
   1210 	    (error = copyin(itvp, &aitv, sizeof(struct itimerval)) != 0))
   1211 		return (error);
   1212 	if (SCARG(uap, oitv) != NULL) {
   1213 		SCARG(&getargs, which) = which;
   1214 		SCARG(&getargs, itv) = SCARG(uap, oitv);
   1215 		if ((error = sys_getitimer(l, &getargs, retval)) != 0)
   1216 			return (error);
   1217 	}
   1218 	if (itvp == 0)
   1219 		return (0);
   1220 
   1221 	return dosetitimer(p, which, &aitv);
   1222 }
   1223 
   1224 int
   1225 dosetitimer(struct proc *p, int which, struct itimerval *itvp)
   1226 {
   1227 #ifdef __HAVE_TIMECOUNTER
   1228 	struct timeval now;
   1229 #endif
   1230 	struct ptimer *pt;
   1231 	int s;
   1232 
   1233 	if (itimerfix(&itvp->it_value) || itimerfix(&itvp->it_interval))
   1234 		return (EINVAL);
   1235 
   1236 	/*
   1237 	 * Don't bother allocating data structures if the process just
   1238 	 * wants to clear the timer.
   1239 	 */
   1240 	if (!timerisset(&itvp->it_value) &&
   1241 	    ((p->p_timers == NULL) ||(p->p_timers->pts_timers[which] == NULL)))
   1242 		return (0);
   1243 
   1244 	if (p->p_timers == NULL)
   1245 		timers_alloc(p);
   1246 	if (p->p_timers->pts_timers[which] == NULL) {
   1247 		pt = pool_get(&ptimer_pool, PR_WAITOK);
   1248 		pt->pt_ev.sigev_notify = SIGEV_SIGNAL;
   1249 		pt->pt_ev.sigev_value.sival_int = which;
   1250 		pt->pt_overruns = 0;
   1251 		pt->pt_proc = p;
   1252 		pt->pt_type = which;
   1253 		pt->pt_entry = which;
   1254 		switch (which) {
   1255 		case ITIMER_REAL:
   1256 			callout_init(&pt->pt_ch);
   1257 			pt->pt_ev.sigev_signo = SIGALRM;
   1258 			break;
   1259 		case ITIMER_VIRTUAL:
   1260 			pt->pt_active = 0;
   1261 			pt->pt_ev.sigev_signo = SIGVTALRM;
   1262 			break;
   1263 		case ITIMER_PROF:
   1264 			pt->pt_active = 0;
   1265 			pt->pt_ev.sigev_signo = SIGPROF;
   1266 			break;
   1267 		}
   1268 	} else
   1269 		pt = p->p_timers->pts_timers[which];
   1270 
   1271 	pt->pt_time = *itvp;
   1272 	p->p_timers->pts_timers[which] = pt;
   1273 
   1274 	s = splclock();
   1275 	if ((which == ITIMER_REAL) && timerisset(&pt->pt_time.it_value)) {
   1276 		/* Convert to absolute time */
   1277 #ifdef __HAVE_TIMECOUNTER
   1278 		/* XXX need to wrap in splclock for timecounters case? */
   1279 		getmicrotime(&now);
   1280 		timeradd(&pt->pt_time.it_value, &now, &pt->pt_time.it_value);
   1281 #else /* !__HAVE_TIMECOUNTER */
   1282 		timeradd(&pt->pt_time.it_value, &time, &pt->pt_time.it_value);
   1283 #endif /* !__HAVE_TIMECOUNTER */
   1284 	}
   1285 	timer_settime(pt);
   1286 	splx(s);
   1287 
   1288 	return (0);
   1289 }
   1290 
   1291 /* Utility routines to manage the array of pointers to timers. */
   1292 void
   1293 timers_alloc(struct proc *p)
   1294 {
   1295 	int i;
   1296 	struct ptimers *pts;
   1297 
   1298 	pts = pool_get(&ptimers_pool, PR_WAITOK);
   1299 	LIST_INIT(&pts->pts_virtual);
   1300 	LIST_INIT(&pts->pts_prof);
   1301 	for (i = 0; i < TIMER_MAX; i++)
   1302 		pts->pts_timers[i] = NULL;
   1303 	pts->pts_fired = 0;
   1304 	p->p_timers = pts;
   1305 }
   1306 
   1307 /*
   1308  * Clean up the per-process timers. If "which" is set to TIMERS_ALL,
   1309  * then clean up all timers and free all the data structures. If
   1310  * "which" is set to TIMERS_POSIX, only clean up the timers allocated
   1311  * by timer_create(), not the BSD setitimer() timers, and only free the
   1312  * structure if none of those remain.
   1313  */
   1314 void
   1315 timers_free(struct proc *p, int which)
   1316 {
   1317 	int i, s;
   1318 	struct ptimers *pts;
   1319 	struct ptimer *pt, *ptn;
   1320 	struct timeval tv;
   1321 
   1322 	if (p->p_timers) {
   1323 		pts = p->p_timers;
   1324 		if (which == TIMERS_ALL)
   1325 			i = 0;
   1326 		else {
   1327 			s = splclock();
   1328 			timerclear(&tv);
   1329 			for (ptn = LIST_FIRST(&p->p_timers->pts_virtual);
   1330 			     ptn && ptn != pts->pts_timers[ITIMER_VIRTUAL];
   1331 			     ptn = LIST_NEXT(ptn, pt_list))
   1332 				timeradd(&tv, &ptn->pt_time.it_value, &tv);
   1333 			LIST_FIRST(&p->p_timers->pts_virtual) = NULL;
   1334 			if (ptn) {
   1335 				timeradd(&tv, &ptn->pt_time.it_value,
   1336 				    &ptn->pt_time.it_value);
   1337 				LIST_INSERT_HEAD(&p->p_timers->pts_virtual,
   1338 				    ptn, pt_list);
   1339 			}
   1340 
   1341 			timerclear(&tv);
   1342 			for (ptn = LIST_FIRST(&p->p_timers->pts_prof);
   1343 			     ptn && ptn != pts->pts_timers[ITIMER_PROF];
   1344 			     ptn = LIST_NEXT(ptn, pt_list))
   1345 				timeradd(&tv, &ptn->pt_time.it_value, &tv);
   1346 			LIST_FIRST(&p->p_timers->pts_prof) = NULL;
   1347 			if (ptn) {
   1348 				timeradd(&tv, &ptn->pt_time.it_value,
   1349 				    &ptn->pt_time.it_value);
   1350 				LIST_INSERT_HEAD(&p->p_timers->pts_prof, ptn,
   1351 				    pt_list);
   1352 			}
   1353 			splx(s);
   1354 			i = 3;
   1355 		}
   1356 		for ( ; i < TIMER_MAX; i++)
   1357 			if ((pt = pts->pts_timers[i]) != NULL) {
   1358 				if (pt->pt_type == CLOCK_REALTIME)
   1359 					callout_stop(&pt->pt_ch);
   1360 				pts->pts_timers[i] = NULL;
   1361 				pool_put(&ptimer_pool, pt);
   1362 			}
   1363 		if ((pts->pts_timers[0] == NULL) &&
   1364 		    (pts->pts_timers[1] == NULL) &&
   1365 		    (pts->pts_timers[2] == NULL)) {
   1366 			p->p_timers = NULL;
   1367 			pool_put(&ptimers_pool, pts);
   1368 		}
   1369 	}
   1370 }
   1371 
   1372 /*
   1373  * Check that a proposed value to load into the .it_value or
   1374  * .it_interval part of an interval timer is acceptable, and
   1375  * fix it to have at least minimal value (i.e. if it is less
   1376  * than the resolution of the clock, round it up.)
   1377  */
   1378 int
   1379 itimerfix(struct timeval *tv)
   1380 {
   1381 
   1382 	if (tv->tv_sec < 0 || tv->tv_usec < 0 || tv->tv_usec >= 1000000)
   1383 		return (EINVAL);
   1384 	if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
   1385 		tv->tv_usec = tick;
   1386 	return (0);
   1387 }
   1388 
   1389 #ifdef __HAVE_TIMECOUNTER
   1390 int
   1391 itimespecfix(struct timespec *ts)
   1392 {
   1393 
   1394 	if (ts->tv_sec < 0 || ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
   1395 		return (EINVAL);
   1396 	if (ts->tv_sec == 0 && ts->tv_nsec != 0 && ts->tv_nsec < tick * 1000)
   1397 		ts->tv_nsec = tick * 1000;
   1398 	return (0);
   1399 }
   1400 #endif /* __HAVE_TIMECOUNTER */
   1401 
   1402 /*
   1403  * Decrement an interval timer by a specified number
   1404  * of microseconds, which must be less than a second,
   1405  * i.e. < 1000000.  If the timer expires, then reload
   1406  * it.  In this case, carry over (usec - old value) to
   1407  * reduce the value reloaded into the timer so that
   1408  * the timer does not drift.  This routine assumes
   1409  * that it is called in a context where the timers
   1410  * on which it is operating cannot change in value.
   1411  */
   1412 int
   1413 itimerdecr(struct ptimer *pt, int usec)
   1414 {
   1415 	struct itimerval *itp;
   1416 
   1417 	itp = &pt->pt_time;
   1418 	if (itp->it_value.tv_usec < usec) {
   1419 		if (itp->it_value.tv_sec == 0) {
   1420 			/* expired, and already in next interval */
   1421 			usec -= itp->it_value.tv_usec;
   1422 			goto expire;
   1423 		}
   1424 		itp->it_value.tv_usec += 1000000;
   1425 		itp->it_value.tv_sec--;
   1426 	}
   1427 	itp->it_value.tv_usec -= usec;
   1428 	usec = 0;
   1429 	if (timerisset(&itp->it_value))
   1430 		return (1);
   1431 	/* expired, exactly at end of interval */
   1432 expire:
   1433 	if (timerisset(&itp->it_interval)) {
   1434 		itp->it_value = itp->it_interval;
   1435 		itp->it_value.tv_usec -= usec;
   1436 		if (itp->it_value.tv_usec < 0) {
   1437 			itp->it_value.tv_usec += 1000000;
   1438 			itp->it_value.tv_sec--;
   1439 		}
   1440 		timer_settime(pt);
   1441 	} else
   1442 		itp->it_value.tv_usec = 0;		/* sec is already 0 */
   1443 	return (0);
   1444 }
   1445 
   1446 void
   1447 itimerfire(struct ptimer *pt)
   1448 {
   1449 	struct proc *p = pt->pt_proc;
   1450 	struct sadata_vp *vp;
   1451 	int s;
   1452 	unsigned int i;
   1453 
   1454 	if (pt->pt_ev.sigev_notify == SIGEV_SIGNAL) {
   1455 		/*
   1456 		 * No RT signal infrastructure exists at this time;
   1457 		 * just post the signal number and throw away the
   1458 		 * value.
   1459 		 */
   1460 		if (sigismember(&p->p_sigctx.ps_siglist, pt->pt_ev.sigev_signo))
   1461 			pt->pt_overruns++;
   1462 		else {
   1463 			ksiginfo_t ksi;
   1464 			KSI_INIT(&ksi);
   1465 			ksi.ksi_signo = pt->pt_ev.sigev_signo;
   1466 			ksi.ksi_code = SI_TIMER;
   1467 			ksi.ksi_sigval = pt->pt_ev.sigev_value;
   1468 			pt->pt_poverruns = pt->pt_overruns;
   1469 			pt->pt_overruns = 0;
   1470 			kpsignal(p, &ksi, NULL);
   1471 		}
   1472 	} else if (pt->pt_ev.sigev_notify == SIGEV_SA && (p->p_flag & P_SA)) {
   1473 		/* Cause the process to generate an upcall when it returns. */
   1474 		signotify(p);
   1475 		if (p->p_userret == NULL) {
   1476 			/*
   1477 			 * XXX stop signals can be processed inside tsleep,
   1478 			 * which can be inside sa_yield's inner loop, which
   1479 			 * makes testing for sa_idle alone insuffucent to
   1480 			 * determine if we really should call setrunnable.
   1481 			 */
   1482 			pt->pt_poverruns = pt->pt_overruns;
   1483 			pt->pt_overruns = 0;
   1484 			i = 1 << pt->pt_entry;
   1485 			p->p_timers->pts_fired = i;
   1486 			p->p_userret = timerupcall;
   1487 			p->p_userret_arg = p->p_timers;
   1488 
   1489 			SCHED_LOCK(s);
   1490 			SLIST_FOREACH(vp, &p->p_sa->sa_vps, savp_next) {
   1491 				if (vp->savp_lwp->l_flag & L_SA_IDLE) {
   1492 					vp->savp_lwp->l_flag &= ~L_SA_IDLE;
   1493 					sched_wakeup(vp->savp_lwp);
   1494 					break;
   1495 				}
   1496 			}
   1497 			SCHED_UNLOCK(s);
   1498 		} else if (p->p_userret == timerupcall) {
   1499 			i = 1 << pt->pt_entry;
   1500 			if ((p->p_timers->pts_fired & i) == 0) {
   1501 				pt->pt_poverruns = pt->pt_overruns;
   1502 				pt->pt_overruns = 0;
   1503 				p->p_timers->pts_fired |= i;
   1504 			} else
   1505 				pt->pt_overruns++;
   1506 		} else {
   1507 			pt->pt_overruns++;
   1508 			if ((p->p_flag & P_WEXIT) == 0)
   1509 				printf("itimerfire(%d): overrun %d on timer %x (userret is %p)\n",
   1510 				    p->p_pid, pt->pt_overruns,
   1511 				    pt->pt_ev.sigev_value.sival_int,
   1512 				    p->p_userret);
   1513 		}
   1514 	}
   1515 
   1516 }
   1517 
   1518 /*
   1519  * ratecheck(): simple time-based rate-limit checking.  see ratecheck(9)
   1520  * for usage and rationale.
   1521  */
   1522 int
   1523 ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
   1524 {
   1525 	struct timeval tv, delta;
   1526 	int rv = 0;
   1527 #ifndef __HAVE_TIMECOUNTER
   1528 	int s;
   1529 #endif
   1530 
   1531 #ifdef __HAVE_TIMECOUNTER
   1532 	getmicrouptime(&tv);
   1533 #else /* !__HAVE_TIMECOUNTER */
   1534 	s = splclock();
   1535 	tv = mono_time;
   1536 	splx(s);
   1537 #endif /* !__HAVE_TIMECOUNTER */
   1538 	timersub(&tv, lasttime, &delta);
   1539 
   1540 	/*
   1541 	 * check for 0,0 is so that the message will be seen at least once,
   1542 	 * even if interval is huge.
   1543 	 */
   1544 	if (timercmp(&delta, mininterval, >=) ||
   1545 	    (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
   1546 		*lasttime = tv;
   1547 		rv = 1;
   1548 	}
   1549 
   1550 	return (rv);
   1551 }
   1552 
   1553 /*
   1554  * ppsratecheck(): packets (or events) per second limitation.
   1555  */
   1556 int
   1557 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
   1558 {
   1559 	struct timeval tv, delta;
   1560 	int rv;
   1561 #ifndef __HAVE_TIMECOUNTER
   1562 	int s;
   1563 #endif
   1564 
   1565 #ifdef __HAVE_TIMECOUNTER
   1566 	getmicrouptime(&tv);
   1567 #else /* !__HAVE_TIMECOUNTER */
   1568 	s = splclock();
   1569 	tv = mono_time;
   1570 	splx(s);
   1571 #endif /* !__HAVE_TIMECOUNTER */
   1572 	timersub(&tv, lasttime, &delta);
   1573 
   1574 	/*
   1575 	 * check for 0,0 is so that the message will be seen at least once.
   1576 	 * if more than one second have passed since the last update of
   1577 	 * lasttime, reset the counter.
   1578 	 *
   1579 	 * we do increment *curpps even in *curpps < maxpps case, as some may
   1580 	 * try to use *curpps for stat purposes as well.
   1581 	 */
   1582 	if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
   1583 	    delta.tv_sec >= 1) {
   1584 		*lasttime = tv;
   1585 		*curpps = 0;
   1586 	}
   1587 	if (maxpps < 0)
   1588 		rv = 1;
   1589 	else if (*curpps < maxpps)
   1590 		rv = 1;
   1591 	else
   1592 		rv = 0;
   1593 
   1594 #if 1 /*DIAGNOSTIC?*/
   1595 	/* be careful about wrap-around */
   1596 	if (*curpps + 1 > *curpps)
   1597 		*curpps = *curpps + 1;
   1598 #else
   1599 	/*
   1600 	 * assume that there's not too many calls to this function.
   1601 	 * not sure if the assumption holds, as it depends on *caller's*
   1602 	 * behavior, not the behavior of this function.
   1603 	 * IMHO it is wrong to make assumption on the caller's behavior,
   1604 	 * so the above #if is #if 1, not #ifdef DIAGNOSTIC.
   1605 	 */
   1606 	*curpps = *curpps + 1;
   1607 #endif
   1608 
   1609 	return (rv);
   1610 }
   1611