Home | History | Annotate | Line # | Download | only in kern
kern_time.c revision 1.101
      1 /*	$NetBSD: kern_time.c,v 1.101 2006/06/07 22:33:40 kardel Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2000, 2004, 2005 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Christopher G. Demetriou.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * Copyright (c) 1982, 1986, 1989, 1993
     41  *	The Regents of the University of California.  All rights reserved.
     42  *
     43  * Redistribution and use in source and binary forms, with or without
     44  * modification, are permitted provided that the following conditions
     45  * are met:
     46  * 1. Redistributions of source code must retain the above copyright
     47  *    notice, this list of conditions and the following disclaimer.
     48  * 2. Redistributions in binary form must reproduce the above copyright
     49  *    notice, this list of conditions and the following disclaimer in the
     50  *    documentation and/or other materials provided with the distribution.
     51  * 3. Neither the name of the University nor the names of its contributors
     52  *    may be used to endorse or promote products derived from this software
     53  *    without specific prior written permission.
     54  *
     55  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     56  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     57  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     58  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     59  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     60  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     61  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     62  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     63  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     64  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     65  * SUCH DAMAGE.
     66  *
     67  *	@(#)kern_time.c	8.4 (Berkeley) 5/26/95
     68  */
     69 
     70 #include <sys/cdefs.h>
     71 __KERNEL_RCSID(0, "$NetBSD: kern_time.c,v 1.101 2006/06/07 22:33:40 kardel Exp $");
     72 
     73 #include "fs_nfs.h"
     74 #include "opt_nfs.h"
     75 #include "opt_nfsserver.h"
     76 
     77 #include <sys/param.h>
     78 #include <sys/resourcevar.h>
     79 #include <sys/kernel.h>
     80 #include <sys/systm.h>
     81 #include <sys/proc.h>
     82 #include <sys/sa.h>
     83 #include <sys/savar.h>
     84 #include <sys/vnode.h>
     85 #include <sys/signalvar.h>
     86 #include <sys/syslog.h>
     87 #ifdef __HAVE_TIMECOUNTER
     88 #include <sys/timetc.h>
     89 #else /* !__HAVE_TIMECOUNTER */
     90 #include <sys/timevar.h>
     91 #endif /* !__HAVE_TIMECOUNTER */
     92 #include <sys/kauth.h>
     93 
     94 #include <sys/mount.h>
     95 #include <sys/syscallargs.h>
     96 
     97 #include <uvm/uvm_extern.h>
     98 
     99 #if defined(NFS) || defined(NFSSERVER)
    100 #include <nfs/rpcv2.h>
    101 #include <nfs/nfsproto.h>
    102 #include <nfs/nfs.h>
    103 #include <nfs/nfs_var.h>
    104 #endif
    105 
    106 #include <machine/cpu.h>
    107 
    108 POOL_INIT(ptimer_pool, sizeof(struct ptimer), 0, 0, 0, "ptimerpl",
    109     &pool_allocator_nointr);
    110 POOL_INIT(ptimers_pool, sizeof(struct ptimers), 0, 0, 0, "ptimerspl",
    111     &pool_allocator_nointr);
    112 
    113 static void timerupcall(struct lwp *, void *);
    114 #ifdef __HAVE_TIMECOUNTER
    115 static int itimespecfix(struct timespec *);		/* XXX move itimerfix to timespecs */
    116 #endif /* __HAVE_TIMECOUNTER */
    117 
    118 /* Time of day and interval timer support.
    119  *
    120  * These routines provide the kernel entry points to get and set
    121  * the time-of-day and per-process interval timers.  Subroutines
    122  * here provide support for adding and subtracting timeval structures
    123  * and decrementing interval timers, optionally reloading the interval
    124  * timers when they expire.
    125  */
    126 
    127 /* This function is used by clock_settime and settimeofday */
    128 int
    129 settime(struct proc *p, struct timespec *ts)
    130 {
    131 	struct timeval delta, tv;
    132 #ifdef __HAVE_TIMECOUNTER
    133 	struct timeval now;
    134 	struct timespec ts1;
    135 #endif /* !__HAVE_TIMECOUNTER */
    136 	struct cpu_info *ci;
    137 	int s;
    138 
    139 	/*
    140 	 * Don't allow the time to be set forward so far it will wrap
    141 	 * and become negative, thus allowing an attacker to bypass
    142 	 * the next check below.  The cutoff is 1 year before rollover
    143 	 * occurs, so even if the attacker uses adjtime(2) to move
    144 	 * the time past the cutoff, it will take a very long time
    145 	 * to get to the wrap point.
    146 	 *
    147 	 * XXX: we check against INT_MAX since on 64-bit
    148 	 *	platforms, sizeof(int) != sizeof(long) and
    149 	 *	time_t is 32 bits even when atv.tv_sec is 64 bits.
    150 	 */
    151 	if (ts->tv_sec > INT_MAX - 365*24*60*60) {
    152 		struct proc *pp = p->p_pptr;
    153 		log(LOG_WARNING, "pid %d (%s) "
    154 		    "invoked by uid %d ppid %d (%s) "
    155 		    "tried to set clock forward to %ld\n",
    156 		    p->p_pid, p->p_comm, kauth_cred_geteuid(pp->p_cred),
    157 		    pp->p_pid, pp->p_comm, (long)ts->tv_sec);
    158 		return (EPERM);
    159 	}
    160 	TIMESPEC_TO_TIMEVAL(&tv, ts);
    161 
    162 	/* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */
    163 	s = splclock();
    164 #ifdef __HAVE_TIMECOUNTER
    165 	microtime(&now);
    166 	timersub(&tv, &now, &delta);
    167 #else /* !__HAVE_TIMECOUNTER */
    168 	timersub(&tv, &time, &delta);
    169 #endif /* !__HAVE_TIMECOUNTER */
    170 	if ((delta.tv_sec < 0 || delta.tv_usec < 0) && securelevel > 1) {
    171 		splx(s);
    172 		return (EPERM);
    173 	}
    174 #ifdef notyet
    175 	if ((delta.tv_sec < 86400) && securelevel > 0) {
    176 		splx(s);
    177 		return (EPERM);
    178 	}
    179 #endif
    180 #ifdef __HAVE_TIMECOUNTER
    181 	ts1.tv_sec = tv.tv_sec;
    182 	ts1.tv_nsec = tv.tv_usec * 1000;
    183 	tc_setclock(&ts1);
    184 	(void) spllowersoftclock();
    185 #else /* !__HAVE_TIMECOUNTER */
    186 	time = tv;
    187 	(void) spllowersoftclock();
    188 	timeradd(&boottime, &delta, &boottime);
    189 #endif /* !__HAVE_TIMECOUNTER */
    190 	/*
    191 	 * XXXSMP
    192 	 * This is wrong.  We should traverse a list of all
    193 	 * CPUs and add the delta to the runtime of those
    194 	 * CPUs which have a process on them.
    195 	 */
    196 	ci = curcpu();
    197 	timeradd(&ci->ci_schedstate.spc_runtime, &delta,
    198 	    &ci->ci_schedstate.spc_runtime);
    199 #if (defined(NFS) && !defined (NFS_V2_ONLY)) || defined(NFSSERVER)
    200 	nqnfs_lease_updatetime(delta.tv_sec);
    201 #endif
    202 	splx(s);
    203 	resettodr();
    204 	return (0);
    205 }
    206 
    207 /* ARGSUSED */
    208 int
    209 sys_clock_gettime(struct lwp *l, void *v, register_t *retval)
    210 {
    211 	struct sys_clock_gettime_args /* {
    212 		syscallarg(clockid_t) clock_id;
    213 		syscallarg(struct timespec *) tp;
    214 	} */ *uap = v;
    215 	clockid_t clock_id;
    216 	struct timespec ats;
    217 
    218 	clock_id = SCARG(uap, clock_id);
    219 	switch (clock_id) {
    220 	case CLOCK_REALTIME:
    221 		nanotime(&ats);
    222 		break;
    223 	case CLOCK_MONOTONIC:
    224 #ifdef __HAVE_TIMECOUNTER
    225 		nanouptime(&ats);
    226 #else /* !__HAVE_TIMECOUNTER */
    227 		{
    228 		int s;
    229 
    230 		/* XXX "hz" granularity */
    231 		s = splclock();
    232 		TIMEVAL_TO_TIMESPEC(&mono_time,&ats);
    233 		splx(s);
    234 		}
    235 #endif /* !__HAVE_TIMECOUNTER */
    236 		break;
    237 	default:
    238 		return (EINVAL);
    239 	}
    240 
    241 	return copyout(&ats, SCARG(uap, tp), sizeof(ats));
    242 }
    243 
    244 /* ARGSUSED */
    245 int
    246 sys_clock_settime(struct lwp *l, void *v, register_t *retval)
    247 {
    248 	struct sys_clock_settime_args /* {
    249 		syscallarg(clockid_t) clock_id;
    250 		syscallarg(const struct timespec *) tp;
    251 	} */ *uap = v;
    252 	struct proc *p = l->l_proc;
    253 	int error;
    254 
    255 	if ((error = kauth_authorize_generic(p->p_cred, KAUTH_GENERIC_ISSUSER,
    256 				       &p->p_acflag)) != 0)
    257 		return (error);
    258 
    259 	return (clock_settime1(p, SCARG(uap, clock_id), SCARG(uap, tp)));
    260 }
    261 
    262 
    263 int
    264 clock_settime1(struct proc *p, clockid_t clock_id, const struct timespec *tp)
    265 {
    266 	struct timespec ats;
    267 	int error;
    268 
    269 	if ((error = copyin(tp, &ats, sizeof(ats))) != 0)
    270 		return (error);
    271 
    272 	switch (clock_id) {
    273 	case CLOCK_REALTIME:
    274 		if ((error = settime(p, &ats)) != 0)
    275 			return (error);
    276 		break;
    277 	case CLOCK_MONOTONIC:
    278 		return (EINVAL);	/* read-only clock */
    279 	default:
    280 		return (EINVAL);
    281 	}
    282 
    283 	return 0;
    284 }
    285 
    286 int
    287 sys_clock_getres(struct lwp *l, void *v, register_t *retval)
    288 {
    289 	struct sys_clock_getres_args /* {
    290 		syscallarg(clockid_t) clock_id;
    291 		syscallarg(struct timespec *) tp;
    292 	} */ *uap = v;
    293 	clockid_t clock_id;
    294 	struct timespec ts;
    295 	int error = 0;
    296 
    297 	clock_id = SCARG(uap, clock_id);
    298 	switch (clock_id) {
    299 	case CLOCK_REALTIME:
    300 	case CLOCK_MONOTONIC:
    301 		ts.tv_sec = 0;
    302 		ts.tv_nsec = 1000000000 / hz;
    303 		break;
    304 	default:
    305 		return (EINVAL);
    306 	}
    307 
    308 	if (SCARG(uap, tp))
    309 		error = copyout(&ts, SCARG(uap, tp), sizeof(ts));
    310 
    311 	return error;
    312 }
    313 
    314 /* ARGSUSED */
    315 int
    316 sys_nanosleep(struct lwp *l, void *v, register_t *retval)
    317 {
    318 #ifdef __HAVE_TIMECOUNTER
    319 	static int nanowait;
    320 	struct sys_nanosleep_args/* {
    321 		syscallarg(struct timespec *) rqtp;
    322 		syscallarg(struct timespec *) rmtp;
    323 	} */ *uap = v;
    324 	struct timespec rmt, rqt;
    325 	int error, timo;
    326 
    327 	error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec));
    328 	if (error)
    329 		return (error);
    330 
    331 	if (itimespecfix(&rqt))
    332 		return (EINVAL);
    333 
    334 	timo = tstohz(&rqt);
    335 	/*
    336 	 * Avoid inadvertantly sleeping forever
    337 	 */
    338 	if (timo == 0)
    339 		timo = 1;
    340 
    341 	error = tsleep(&nanowait, PWAIT | PCATCH, "nanosleep", timo);
    342 	if (error == ERESTART)
    343 		error = EINTR;
    344 	if (error == EWOULDBLOCK)
    345 		error = 0;
    346 
    347 	if (SCARG(uap, rmtp)) {
    348 		int error1;
    349 
    350 		getnanotime(&rmt);
    351 
    352 		timespecsub(&rqt, &rmt, &rmt);
    353 		if (rmt.tv_sec < 0)
    354 			timespecclear(&rmt);
    355 
    356 		error1 = copyout((caddr_t)&rmt, (caddr_t)SCARG(uap,rmtp),
    357 			sizeof(rmt));
    358 		if (error1)
    359 			return (error1);
    360 	}
    361 
    362 	return error;
    363 #else /* !__HAVE_TIMECOUNTER */
    364 	static int nanowait;
    365 	struct sys_nanosleep_args/* {
    366 		syscallarg(struct timespec *) rqtp;
    367 		syscallarg(struct timespec *) rmtp;
    368 	} */ *uap = v;
    369 	struct timespec rqt;
    370 	struct timespec rmt;
    371 	struct timeval atv, utv;
    372 	int error, s, timo;
    373 
    374 	error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec));
    375 	if (error)
    376 		return (error);
    377 
    378 	TIMESPEC_TO_TIMEVAL(&atv,&rqt);
    379 	if (itimerfix(&atv))
    380 		return (EINVAL);
    381 
    382 	s = splclock();
    383 	timeradd(&atv,&time,&atv);
    384 	timo = hzto(&atv);
    385 	/*
    386 	 * Avoid inadvertantly sleeping forever
    387 	 */
    388 	if (timo == 0)
    389 		timo = 1;
    390 	splx(s);
    391 
    392 	error = tsleep(&nanowait, PWAIT | PCATCH, "nanosleep", timo);
    393 	if (error == ERESTART)
    394 		error = EINTR;
    395 	if (error == EWOULDBLOCK)
    396 		error = 0;
    397 
    398 	if (SCARG(uap, rmtp)) {
    399 		int error1;
    400 
    401 		s = splclock();
    402 		utv = time;
    403 		splx(s);
    404 
    405 		timersub(&atv, &utv, &utv);
    406 		if (utv.tv_sec < 0)
    407 			timerclear(&utv);
    408 
    409 		TIMEVAL_TO_TIMESPEC(&utv,&rmt);
    410 		error1 = copyout((caddr_t)&rmt, (caddr_t)SCARG(uap,rmtp),
    411 			sizeof(rmt));
    412 		if (error1)
    413 			return (error1);
    414 	}
    415 
    416 	return error;
    417 #endif /* !__HAVE_TIMECOUNTER */
    418 }
    419 
    420 /* ARGSUSED */
    421 int
    422 sys_gettimeofday(struct lwp *l, void *v, register_t *retval)
    423 {
    424 	struct sys_gettimeofday_args /* {
    425 		syscallarg(struct timeval *) tp;
    426 		syscallarg(void *) tzp;		really "struct timezone *"
    427 	} */ *uap = v;
    428 	struct timeval atv;
    429 	int error = 0;
    430 	struct timezone tzfake;
    431 
    432 	if (SCARG(uap, tp)) {
    433 		microtime(&atv);
    434 		error = copyout(&atv, SCARG(uap, tp), sizeof(atv));
    435 		if (error)
    436 			return (error);
    437 	}
    438 	if (SCARG(uap, tzp)) {
    439 		/*
    440 		 * NetBSD has no kernel notion of time zone, so we just
    441 		 * fake up a timezone struct and return it if demanded.
    442 		 */
    443 		tzfake.tz_minuteswest = 0;
    444 		tzfake.tz_dsttime = 0;
    445 		error = copyout(&tzfake, SCARG(uap, tzp), sizeof(tzfake));
    446 	}
    447 	return (error);
    448 }
    449 
    450 /* ARGSUSED */
    451 int
    452 sys_settimeofday(struct lwp *l, void *v, register_t *retval)
    453 {
    454 	struct sys_settimeofday_args /* {
    455 		syscallarg(const struct timeval *) tv;
    456 		syscallarg(const void *) tzp;	really "const struct timezone *"
    457 	} */ *uap = v;
    458 	struct proc *p = l->l_proc;
    459 	int error;
    460 
    461 	if ((error = kauth_authorize_generic(p->p_cred, KAUTH_GENERIC_ISSUSER,
    462 				       &p->p_acflag)) != 0)
    463 		return (error);
    464 
    465 	return settimeofday1(SCARG(uap, tv), SCARG(uap, tzp), p);
    466 }
    467 
    468 int
    469 settimeofday1(const struct timeval *utv, const struct timezone *utzp,
    470     struct proc *p)
    471 {
    472 	struct timeval atv;
    473 	struct timespec ts;
    474 	int error;
    475 
    476 	/* Verify all parameters before changing time. */
    477 	/*
    478 	 * NetBSD has no kernel notion of time zone, and only an
    479 	 * obsolete program would try to set it, so we log a warning.
    480 	 */
    481 	if (utzp)
    482 		log(LOG_WARNING, "pid %d attempted to set the "
    483 		    "(obsolete) kernel time zone\n", p->p_pid);
    484 
    485 	if (utv == NULL)
    486 		return 0;
    487 
    488 	if ((error = copyin(utv, &atv, sizeof(atv))) != 0)
    489 		return error;
    490 	TIMEVAL_TO_TIMESPEC(&atv, &ts);
    491 	return settime(p, &ts);
    492 }
    493 
    494 #ifndef __HAVE_TIMECOUNTER
    495 int	tickdelta;			/* current clock skew, us. per tick */
    496 long	timedelta;			/* unapplied time correction, us. */
    497 long	bigadj = 1000000;		/* use 10x skew above bigadj us. */
    498 #endif
    499 
    500 int	time_adjusted;			/* set if an adjustment is made */
    501 
    502 /* ARGSUSED */
    503 int
    504 sys_adjtime(struct lwp *l, void *v, register_t *retval)
    505 {
    506 	struct sys_adjtime_args /* {
    507 		syscallarg(const struct timeval *) delta;
    508 		syscallarg(struct timeval *) olddelta;
    509 	} */ *uap = v;
    510 	struct proc *p = l->l_proc;
    511 	int error;
    512 
    513 	if ((error = kauth_authorize_generic(p->p_cred, KAUTH_GENERIC_ISSUSER,
    514 				       &p->p_acflag)) != 0)
    515 		return (error);
    516 
    517 	return adjtime1(SCARG(uap, delta), SCARG(uap, olddelta), p);
    518 }
    519 
    520 int
    521 adjtime1(const struct timeval *delta, struct timeval *olddelta, struct proc *p)
    522 {
    523 	struct timeval atv;
    524 	int error = 0;
    525 
    526 #ifdef __HAVE_TIMECOUNTER
    527 	extern int64_t time_adjtime;  /* in kern_ntptime.c */
    528 #else /* !__HAVE_TIMECOUNTER */
    529 	long ndelta, ntickdelta, odelta;
    530 	int s;
    531 #endif /* !__HAVE_TIMECOUNTER */
    532 
    533 #ifdef __HAVE_TIMECOUNTER
    534 	if (olddelta) {
    535 		atv.tv_sec = time_adjtime / 1000000;
    536 		atv.tv_usec = time_adjtime % 1000000;
    537 		if (atv.tv_usec < 0) {
    538 			atv.tv_usec += 1000000;
    539 			atv.tv_sec--;
    540 		}
    541 		error = copyout(&atv, olddelta, sizeof(struct timeval));
    542 		if (error)
    543 			return (error);
    544 	}
    545 
    546 	if (delta) {
    547 		error = copyin(delta, &atv, sizeof(struct timeval));
    548 		if (error)
    549 			return (error);
    550 
    551 		time_adjtime = (int64_t)atv.tv_sec * 1000000 +
    552 			atv.tv_usec;
    553 
    554 		if (time_adjtime)
    555 			/* We need to save the system time during shutdown */
    556 			time_adjusted |= 1;
    557 	}
    558 #else /* !__HAVE_TIMECOUNTER */
    559 	error = copyin(delta, &atv, sizeof(struct timeval));
    560 	if (error)
    561 		return (error);
    562 
    563 	/*
    564 	 * Compute the total correction and the rate at which to apply it.
    565 	 * Round the adjustment down to a whole multiple of the per-tick
    566 	 * delta, so that after some number of incremental changes in
    567 	 * hardclock(), tickdelta will become zero, lest the correction
    568 	 * overshoot and start taking us away from the desired final time.
    569 	 */
    570 	ndelta = atv.tv_sec * 1000000 + atv.tv_usec;
    571 	if (ndelta > bigadj || ndelta < -bigadj)
    572 		ntickdelta = 10 * tickadj;
    573 	else
    574 		ntickdelta = tickadj;
    575 	if (ndelta % ntickdelta)
    576 		ndelta = ndelta / ntickdelta * ntickdelta;
    577 
    578 	/*
    579 	 * To make hardclock()'s job easier, make the per-tick delta negative
    580 	 * if we want time to run slower; then hardclock can simply compute
    581 	 * tick + tickdelta, and subtract tickdelta from timedelta.
    582 	 */
    583 	if (ndelta < 0)
    584 		ntickdelta = -ntickdelta;
    585 	if (ndelta != 0)
    586 		/* We need to save the system clock time during shutdown */
    587 		time_adjusted |= 1;
    588 	s = splclock();
    589 	odelta = timedelta;
    590 	timedelta = ndelta;
    591 	tickdelta = ntickdelta;
    592 	splx(s);
    593 
    594 	if (olddelta) {
    595 		atv.tv_sec = odelta / 1000000;
    596 		atv.tv_usec = odelta % 1000000;
    597 		error = copyout(&atv, olddelta, sizeof(struct timeval));
    598 	}
    599 #endif /* __HAVE_TIMECOUNTER */
    600 
    601 	return error;
    602 }
    603 
    604 /*
    605  * Interval timer support. Both the BSD getitimer() family and the POSIX
    606  * timer_*() family of routines are supported.
    607  *
    608  * All timers are kept in an array pointed to by p_timers, which is
    609  * allocated on demand - many processes don't use timers at all. The
    610  * first three elements in this array are reserved for the BSD timers:
    611  * element 0 is ITIMER_REAL, element 1 is ITIMER_VIRTUAL, and element
    612  * 2 is ITIMER_PROF. The rest may be allocated by the timer_create()
    613  * syscall.
    614  *
    615  * Realtime timers are kept in the ptimer structure as an absolute
    616  * time; virtual time timers are kept as a linked list of deltas.
    617  * Virtual time timers are processed in the hardclock() routine of
    618  * kern_clock.c.  The real time timer is processed by a callout
    619  * routine, called from the softclock() routine.  Since a callout may
    620  * be delayed in real time due to interrupt processing in the system,
    621  * it is possible for the real time timeout routine (realtimeexpire,
    622  * given below), to be delayed in real time past when it is supposed
    623  * to occur.  It does not suffice, therefore, to reload the real timer
    624  * .it_value from the real time timers .it_interval.  Rather, we
    625  * compute the next time in absolute time the timer should go off.  */
    626 
    627 /* Allocate a POSIX realtime timer. */
    628 int
    629 sys_timer_create(struct lwp *l, void *v, register_t *retval)
    630 {
    631 	struct sys_timer_create_args /* {
    632 		syscallarg(clockid_t) clock_id;
    633 		syscallarg(struct sigevent *) evp;
    634 		syscallarg(timer_t *) timerid;
    635 	} */ *uap = v;
    636 
    637 	return timer_create1(SCARG(uap, timerid), SCARG(uap, clock_id),
    638 	    SCARG(uap, evp), copyin, l->l_proc);
    639 }
    640 
    641 int
    642 timer_create1(timer_t *tid, clockid_t id, struct sigevent *evp,
    643     copyin_t fetch_event, struct proc *p)
    644 {
    645 	int error;
    646 	timer_t timerid;
    647 	struct ptimer *pt;
    648 
    649 	if (id < CLOCK_REALTIME ||
    650 	    id > CLOCK_PROF)
    651 		return (EINVAL);
    652 
    653 	if (p->p_timers == NULL)
    654 		timers_alloc(p);
    655 
    656 	/* Find a free timer slot, skipping those reserved for setitimer(). */
    657 	for (timerid = 3; timerid < TIMER_MAX; timerid++)
    658 		if (p->p_timers->pts_timers[timerid] == NULL)
    659 			break;
    660 
    661 	if (timerid == TIMER_MAX)
    662 		return EAGAIN;
    663 
    664 	pt = pool_get(&ptimer_pool, PR_WAITOK);
    665 	if (evp) {
    666 		if (((error =
    667 		    (*fetch_event)(evp, &pt->pt_ev, sizeof(pt->pt_ev))) != 0) ||
    668 		    ((pt->pt_ev.sigev_notify < SIGEV_NONE) ||
    669 			(pt->pt_ev.sigev_notify > SIGEV_SA))) {
    670 			pool_put(&ptimer_pool, pt);
    671 			return (error ? error : EINVAL);
    672 		}
    673 	} else {
    674 		pt->pt_ev.sigev_notify = SIGEV_SIGNAL;
    675 		switch (id) {
    676 		case CLOCK_REALTIME:
    677 			pt->pt_ev.sigev_signo = SIGALRM;
    678 			break;
    679 		case CLOCK_VIRTUAL:
    680 			pt->pt_ev.sigev_signo = SIGVTALRM;
    681 			break;
    682 		case CLOCK_PROF:
    683 			pt->pt_ev.sigev_signo = SIGPROF;
    684 			break;
    685 		}
    686 		pt->pt_ev.sigev_value.sival_int = timerid;
    687 	}
    688 	pt->pt_info.ksi_signo = pt->pt_ev.sigev_signo;
    689 	pt->pt_info.ksi_errno = 0;
    690 	pt->pt_info.ksi_code = 0;
    691 	pt->pt_info.ksi_pid = p->p_pid;
    692 	pt->pt_info.ksi_uid = kauth_cred_getuid(p->p_cred);
    693 	pt->pt_info.ksi_sigval = pt->pt_ev.sigev_value;
    694 
    695 	pt->pt_type = id;
    696 	pt->pt_proc = p;
    697 	pt->pt_overruns = 0;
    698 	pt->pt_poverruns = 0;
    699 	pt->pt_entry = timerid;
    700 	timerclear(&pt->pt_time.it_value);
    701 	if (id == CLOCK_REALTIME)
    702 		callout_init(&pt->pt_ch);
    703 	else
    704 		pt->pt_active = 0;
    705 
    706 	p->p_timers->pts_timers[timerid] = pt;
    707 
    708 	return copyout(&timerid, tid, sizeof(timerid));
    709 }
    710 
    711 /* Delete a POSIX realtime timer */
    712 int
    713 sys_timer_delete(struct lwp *l, void *v, register_t *retval)
    714 {
    715 	struct sys_timer_delete_args /*  {
    716 		syscallarg(timer_t) timerid;
    717 	} */ *uap = v;
    718 	struct proc *p = l->l_proc;
    719 	timer_t timerid;
    720 	struct ptimer *pt, *ptn;
    721 	int s;
    722 
    723 	timerid = SCARG(uap, timerid);
    724 
    725 	if ((p->p_timers == NULL) ||
    726 	    (timerid < 2) || (timerid >= TIMER_MAX) ||
    727 	    ((pt = p->p_timers->pts_timers[timerid]) == NULL))
    728 		return (EINVAL);
    729 
    730 	if (pt->pt_type == CLOCK_REALTIME)
    731 		callout_stop(&pt->pt_ch);
    732 	else if (pt->pt_active) {
    733 		s = splclock();
    734 		ptn = LIST_NEXT(pt, pt_list);
    735 		LIST_REMOVE(pt, pt_list);
    736 		for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list))
    737 			timeradd(&pt->pt_time.it_value, &ptn->pt_time.it_value,
    738 			    &ptn->pt_time.it_value);
    739 		splx(s);
    740 	}
    741 
    742 	p->p_timers->pts_timers[timerid] = NULL;
    743 	pool_put(&ptimer_pool, pt);
    744 
    745 	return (0);
    746 }
    747 
    748 /*
    749  * Set up the given timer. The value in pt->pt_time.it_value is taken
    750  * to be an absolute time for CLOCK_REALTIME timers and a relative
    751  * time for virtual timers.
    752  * Must be called at splclock().
    753  */
    754 void
    755 timer_settime(struct ptimer *pt)
    756 {
    757 	struct ptimer *ptn, *pptn;
    758 	struct ptlist *ptl;
    759 
    760 	if (pt->pt_type == CLOCK_REALTIME) {
    761 		callout_stop(&pt->pt_ch);
    762 		if (timerisset(&pt->pt_time.it_value)) {
    763 			/*
    764 			 * Don't need to check hzto() return value, here.
    765 			 * callout_reset() does it for us.
    766 			 */
    767 			callout_reset(&pt->pt_ch, hzto(&pt->pt_time.it_value),
    768 			    realtimerexpire, pt);
    769 		}
    770 	} else {
    771 		if (pt->pt_active) {
    772 			ptn = LIST_NEXT(pt, pt_list);
    773 			LIST_REMOVE(pt, pt_list);
    774 			for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list))
    775 				timeradd(&pt->pt_time.it_value,
    776 				    &ptn->pt_time.it_value,
    777 				    &ptn->pt_time.it_value);
    778 		}
    779 		if (timerisset(&pt->pt_time.it_value)) {
    780 			if (pt->pt_type == CLOCK_VIRTUAL)
    781 				ptl = &pt->pt_proc->p_timers->pts_virtual;
    782 			else
    783 				ptl = &pt->pt_proc->p_timers->pts_prof;
    784 
    785 			for (ptn = LIST_FIRST(ptl), pptn = NULL;
    786 			     ptn && timercmp(&pt->pt_time.it_value,
    787 				 &ptn->pt_time.it_value, >);
    788 			     pptn = ptn, ptn = LIST_NEXT(ptn, pt_list))
    789 				timersub(&pt->pt_time.it_value,
    790 				    &ptn->pt_time.it_value,
    791 				    &pt->pt_time.it_value);
    792 
    793 			if (pptn)
    794 				LIST_INSERT_AFTER(pptn, pt, pt_list);
    795 			else
    796 				LIST_INSERT_HEAD(ptl, pt, pt_list);
    797 
    798 			for ( ; ptn ; ptn = LIST_NEXT(ptn, pt_list))
    799 				timersub(&ptn->pt_time.it_value,
    800 				    &pt->pt_time.it_value,
    801 				    &ptn->pt_time.it_value);
    802 
    803 			pt->pt_active = 1;
    804 		} else
    805 			pt->pt_active = 0;
    806 	}
    807 }
    808 
    809 void
    810 timer_gettime(struct ptimer *pt, struct itimerval *aitv)
    811 {
    812 #ifdef __HAVE_TIMECOUNTER
    813 	struct timeval now;
    814 #endif
    815 	struct ptimer *ptn;
    816 
    817 	*aitv = pt->pt_time;
    818 	if (pt->pt_type == CLOCK_REALTIME) {
    819 		/*
    820 		 * Convert from absolute to relative time in .it_value
    821 		 * part of real time timer.  If time for real time
    822 		 * timer has passed return 0, else return difference
    823 		 * between current time and time for the timer to go
    824 		 * off.
    825 		 */
    826 		if (timerisset(&aitv->it_value)) {
    827 #ifdef __HAVE_TIMECOUNTER
    828 			getmicrotime(&now);
    829 			if (timercmp(&aitv->it_value, &now, <))
    830 				timerclear(&aitv->it_value);
    831 			else
    832 				timersub(&aitv->it_value, &now,
    833 				    &aitv->it_value);
    834 #else /* !__HAVE_TIMECOUNTER */
    835 			if (timercmp(&aitv->it_value, &time, <))
    836 				timerclear(&aitv->it_value);
    837 			else
    838 				timersub(&aitv->it_value, &time,
    839 				    &aitv->it_value);
    840 #endif /* !__HAVE_TIMECOUNTER */
    841 		}
    842 	} else if (pt->pt_active) {
    843 		if (pt->pt_type == CLOCK_VIRTUAL)
    844 			ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_virtual);
    845 		else
    846 			ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_prof);
    847 		for ( ; ptn && ptn != pt; ptn = LIST_NEXT(ptn, pt_list))
    848 			timeradd(&aitv->it_value,
    849 			    &ptn->pt_time.it_value, &aitv->it_value);
    850 		KASSERT(ptn != NULL); /* pt should be findable on the list */
    851 	} else
    852 		timerclear(&aitv->it_value);
    853 }
    854 
    855 
    856 
    857 /* Set and arm a POSIX realtime timer */
    858 int
    859 sys_timer_settime(struct lwp *l, void *v, register_t *retval)
    860 {
    861 	struct sys_timer_settime_args /* {
    862 		syscallarg(timer_t) timerid;
    863 		syscallarg(int) flags;
    864 		syscallarg(const struct itimerspec *) value;
    865 		syscallarg(struct itimerspec *) ovalue;
    866 	} */ *uap = v;
    867 	int error;
    868 	struct itimerspec value, ovalue, *ovp = NULL;
    869 
    870 	if ((error = copyin(SCARG(uap, value), &value,
    871 	    sizeof(struct itimerspec))) != 0)
    872 		return (error);
    873 
    874 	if (SCARG(uap, ovalue))
    875 		ovp = &ovalue;
    876 
    877 	if ((error = dotimer_settime(SCARG(uap, timerid), &value, ovp,
    878 	    SCARG(uap, flags), l->l_proc)) != 0)
    879 		return error;
    880 
    881 	if (ovp)
    882 		return copyout(&ovalue, SCARG(uap, ovalue),
    883 		    sizeof(struct itimerspec));
    884 	return 0;
    885 }
    886 
    887 int
    888 dotimer_settime(int timerid, struct itimerspec *value,
    889     struct itimerspec *ovalue, int flags, struct proc *p)
    890 {
    891 #ifdef __HAVE_TIMECOUNTER
    892 	struct timeval now;
    893 #endif
    894 	struct itimerval val, oval;
    895 	struct ptimer *pt;
    896 	int s;
    897 
    898 	if ((p->p_timers == NULL) ||
    899 	    (timerid < 2) || (timerid >= TIMER_MAX) ||
    900 	    ((pt = p->p_timers->pts_timers[timerid]) == NULL))
    901 		return (EINVAL);
    902 
    903 	TIMESPEC_TO_TIMEVAL(&val.it_value, &value->it_value);
    904 	TIMESPEC_TO_TIMEVAL(&val.it_interval, &value->it_interval);
    905 	if (itimerfix(&val.it_value) || itimerfix(&val.it_interval))
    906 		return (EINVAL);
    907 
    908 	oval = pt->pt_time;
    909 	pt->pt_time = val;
    910 
    911 	s = splclock();
    912 	/*
    913 	 * If we've been passed a relative time for a realtime timer,
    914 	 * convert it to absolute; if an absolute time for a virtual
    915 	 * timer, convert it to relative and make sure we don't set it
    916 	 * to zero, which would cancel the timer, or let it go
    917 	 * negative, which would confuse the comparison tests.
    918 	 */
    919 	if (timerisset(&pt->pt_time.it_value)) {
    920 		if (pt->pt_type == CLOCK_REALTIME) {
    921 #ifdef __HAVE_TIMECOUNTER
    922 			if ((flags & TIMER_ABSTIME) == 0) {
    923 				getmicrotime(&now);
    924 				timeradd(&pt->pt_time.it_value, &now,
    925 				    &pt->pt_time.it_value);
    926 			}
    927 #else /* !__HAVE_TIMECOUNTER */
    928 			if ((flags & TIMER_ABSTIME) == 0)
    929 				timeradd(&pt->pt_time.it_value, &time,
    930 				    &pt->pt_time.it_value);
    931 #endif /* !__HAVE_TIMECOUNTER */
    932 		} else {
    933 			if ((flags & TIMER_ABSTIME) != 0) {
    934 #ifdef __HAVE_TIMECOUNTER
    935 				getmicrotime(&now);
    936 				timersub(&pt->pt_time.it_value, &now,
    937 				    &pt->pt_time.it_value);
    938 #else /* !__HAVE_TIMECOUNTER */
    939 				timersub(&pt->pt_time.it_value, &time,
    940 				    &pt->pt_time.it_value);
    941 #endif /* !__HAVE_TIMECOUNTER */
    942 				if (!timerisset(&pt->pt_time.it_value) ||
    943 				    pt->pt_time.it_value.tv_sec < 0) {
    944 					pt->pt_time.it_value.tv_sec = 0;
    945 					pt->pt_time.it_value.tv_usec = 1;
    946 				}
    947 			}
    948 		}
    949 	}
    950 
    951 	timer_settime(pt);
    952 	splx(s);
    953 
    954 	if (ovalue) {
    955 		TIMEVAL_TO_TIMESPEC(&oval.it_value, &ovalue->it_value);
    956 		TIMEVAL_TO_TIMESPEC(&oval.it_interval, &ovalue->it_interval);
    957 	}
    958 
    959 	return (0);
    960 }
    961 
    962 /* Return the time remaining until a POSIX timer fires. */
    963 int
    964 sys_timer_gettime(struct lwp *l, void *v, register_t *retval)
    965 {
    966 	struct sys_timer_gettime_args /* {
    967 		syscallarg(timer_t) timerid;
    968 		syscallarg(struct itimerspec *) value;
    969 	} */ *uap = v;
    970 	struct itimerspec its;
    971 	int error;
    972 
    973 	if ((error = dotimer_gettime(SCARG(uap, timerid), l->l_proc,
    974 	    &its)) != 0)
    975 		return error;
    976 
    977 	return copyout(&its, SCARG(uap, value), sizeof(its));
    978 }
    979 
    980 int
    981 dotimer_gettime(int timerid, struct proc *p, struct itimerspec *its)
    982 {
    983 	int s;
    984 	struct ptimer *pt;
    985 	struct itimerval aitv;
    986 
    987 	if ((p->p_timers == NULL) ||
    988 	    (timerid < 2) || (timerid >= TIMER_MAX) ||
    989 	    ((pt = p->p_timers->pts_timers[timerid]) == NULL))
    990 		return (EINVAL);
    991 
    992 	s = splclock();
    993 	timer_gettime(pt, &aitv);
    994 	splx(s);
    995 
    996 	TIMEVAL_TO_TIMESPEC(&aitv.it_interval, &its->it_interval);
    997 	TIMEVAL_TO_TIMESPEC(&aitv.it_value, &its->it_value);
    998 
    999 	return 0;
   1000 }
   1001 
   1002 /*
   1003  * Return the count of the number of times a periodic timer expired
   1004  * while a notification was already pending. The counter is reset when
   1005  * a timer expires and a notification can be posted.
   1006  */
   1007 int
   1008 sys_timer_getoverrun(struct lwp *l, void *v, register_t *retval)
   1009 {
   1010 	struct sys_timer_getoverrun_args /* {
   1011 		syscallarg(timer_t) timerid;
   1012 	} */ *uap = v;
   1013 	struct proc *p = l->l_proc;
   1014 	int timerid;
   1015 	struct ptimer *pt;
   1016 
   1017 	timerid = SCARG(uap, timerid);
   1018 
   1019 	if ((p->p_timers == NULL) ||
   1020 	    (timerid < 2) || (timerid >= TIMER_MAX) ||
   1021 	    ((pt = p->p_timers->pts_timers[timerid]) == NULL))
   1022 		return (EINVAL);
   1023 
   1024 	*retval = pt->pt_poverruns;
   1025 
   1026 	return (0);
   1027 }
   1028 
   1029 /* Glue function that triggers an upcall; called from userret(). */
   1030 static void
   1031 timerupcall(struct lwp *l, void *arg)
   1032 {
   1033 	struct ptimers *pt = (struct ptimers *)arg;
   1034 	unsigned int i, fired, done;
   1035 
   1036 	KDASSERT(l->l_proc->p_sa);
   1037 	/* Bail out if we do not own the virtual processor */
   1038 	if (l->l_savp->savp_lwp != l)
   1039 		return ;
   1040 
   1041 	KERNEL_PROC_LOCK(l);
   1042 
   1043 	fired = pt->pts_fired;
   1044 	done = 0;
   1045 	while ((i = ffs(fired)) != 0) {
   1046 		siginfo_t *si;
   1047 		int mask = 1 << --i;
   1048 		int f;
   1049 
   1050 		f = l->l_flag & L_SA;
   1051 		l->l_flag &= ~L_SA;
   1052 		si = siginfo_alloc(PR_WAITOK);
   1053 		si->_info = pt->pts_timers[i]->pt_info.ksi_info;
   1054 		if (sa_upcall(l, SA_UPCALL_SIGEV | SA_UPCALL_DEFER, NULL, l,
   1055 		    sizeof(*si), si, siginfo_free) != 0) {
   1056 			siginfo_free(si);
   1057 			/* XXX What do we do here?? */
   1058 		} else
   1059 			done |= mask;
   1060 		fired &= ~mask;
   1061 		l->l_flag |= f;
   1062 	}
   1063 	pt->pts_fired &= ~done;
   1064 	if (pt->pts_fired == 0)
   1065 		l->l_proc->p_userret = NULL;
   1066 
   1067 	KERNEL_PROC_UNLOCK(l);
   1068 }
   1069 
   1070 /*
   1071  * Real interval timer expired:
   1072  * send process whose timer expired an alarm signal.
   1073  * If time is not set up to reload, then just return.
   1074  * Else compute next time timer should go off which is > current time.
   1075  * This is where delay in processing this timeout causes multiple
   1076  * SIGALRM calls to be compressed into one.
   1077  */
   1078 void
   1079 realtimerexpire(void *arg)
   1080 {
   1081 #ifdef __HAVE_TIMECOUNTER
   1082 	struct timeval now;
   1083 #endif
   1084 	struct ptimer *pt;
   1085 	int s;
   1086 
   1087 	pt = (struct ptimer *)arg;
   1088 
   1089 	itimerfire(pt);
   1090 
   1091 	if (!timerisset(&pt->pt_time.it_interval)) {
   1092 		timerclear(&pt->pt_time.it_value);
   1093 		return;
   1094 	}
   1095 #ifdef __HAVE_TIMECOUNTER
   1096 	for (;;) {
   1097 		s = splclock();	/* XXX need spl now? */
   1098 		timeradd(&pt->pt_time.it_value,
   1099 		    &pt->pt_time.it_interval, &pt->pt_time.it_value);
   1100 		getmicrotime(&now);
   1101 		if (timercmp(&pt->pt_time.it_value, &now, >)) {
   1102 			/*
   1103 			 * Don't need to check hzto() return value, here.
   1104 			 * callout_reset() does it for us.
   1105 			 */
   1106 			callout_reset(&pt->pt_ch, hzto(&pt->pt_time.it_value),
   1107 			    realtimerexpire, pt);
   1108 			splx(s);
   1109 			return;
   1110 		}
   1111 		splx(s);
   1112 		pt->pt_overruns++;
   1113 	}
   1114 #else /* !__HAVE_TIMECOUNTER */
   1115 	for (;;) {
   1116 		s = splclock();
   1117 		timeradd(&pt->pt_time.it_value,
   1118 		    &pt->pt_time.it_interval, &pt->pt_time.it_value);
   1119 		if (timercmp(&pt->pt_time.it_value, &time, >)) {
   1120 			/*
   1121 			 * Don't need to check hzto() return value, here.
   1122 			 * callout_reset() does it for us.
   1123 			 */
   1124 			callout_reset(&pt->pt_ch, hzto(&pt->pt_time.it_value),
   1125 			    realtimerexpire, pt);
   1126 			splx(s);
   1127 			return;
   1128 		}
   1129 		splx(s);
   1130 		pt->pt_overruns++;
   1131 	}
   1132 #endif /* !__HAVE_TIMECOUNTER */
   1133 }
   1134 
   1135 /* BSD routine to get the value of an interval timer. */
   1136 /* ARGSUSED */
   1137 int
   1138 sys_getitimer(struct lwp *l, void *v, register_t *retval)
   1139 {
   1140 	struct sys_getitimer_args /* {
   1141 		syscallarg(int) which;
   1142 		syscallarg(struct itimerval *) itv;
   1143 	} */ *uap = v;
   1144 	struct proc *p = l->l_proc;
   1145 	struct itimerval aitv;
   1146 	int error;
   1147 
   1148 	error = dogetitimer(p, SCARG(uap, which), &aitv);
   1149 	if (error)
   1150 		return error;
   1151 	return (copyout(&aitv, SCARG(uap, itv), sizeof(struct itimerval)));
   1152 }
   1153 
   1154 int
   1155 dogetitimer(struct proc *p, int which, struct itimerval *itvp)
   1156 {
   1157 	int s;
   1158 
   1159 	if ((u_int)which > ITIMER_PROF)
   1160 		return (EINVAL);
   1161 
   1162 	if ((p->p_timers == NULL) || (p->p_timers->pts_timers[which] == NULL)){
   1163 		timerclear(&itvp->it_value);
   1164 		timerclear(&itvp->it_interval);
   1165 	} else {
   1166 		s = splclock();
   1167 		timer_gettime(p->p_timers->pts_timers[which], itvp);
   1168 		splx(s);
   1169 	}
   1170 
   1171 	return 0;
   1172 }
   1173 
   1174 /* BSD routine to set/arm an interval timer. */
   1175 /* ARGSUSED */
   1176 int
   1177 sys_setitimer(struct lwp *l, void *v, register_t *retval)
   1178 {
   1179 	struct sys_setitimer_args /* {
   1180 		syscallarg(int) which;
   1181 		syscallarg(const struct itimerval *) itv;
   1182 		syscallarg(struct itimerval *) oitv;
   1183 	} */ *uap = v;
   1184 	struct proc *p = l->l_proc;
   1185 	int which = SCARG(uap, which);
   1186 	struct sys_getitimer_args getargs;
   1187 	const struct itimerval *itvp;
   1188 	struct itimerval aitv;
   1189 	int error;
   1190 
   1191 	if ((u_int)which > ITIMER_PROF)
   1192 		return (EINVAL);
   1193 	itvp = SCARG(uap, itv);
   1194 	if (itvp &&
   1195 	    (error = copyin(itvp, &aitv, sizeof(struct itimerval)) != 0))
   1196 		return (error);
   1197 	if (SCARG(uap, oitv) != NULL) {
   1198 		SCARG(&getargs, which) = which;
   1199 		SCARG(&getargs, itv) = SCARG(uap, oitv);
   1200 		if ((error = sys_getitimer(l, &getargs, retval)) != 0)
   1201 			return (error);
   1202 	}
   1203 	if (itvp == 0)
   1204 		return (0);
   1205 
   1206 	return dosetitimer(p, which, &aitv);
   1207 }
   1208 
   1209 int
   1210 dosetitimer(struct proc *p, int which, struct itimerval *itvp)
   1211 {
   1212 #ifdef __HAVE_TIMECOUNTER
   1213 	struct timeval now;
   1214 #endif
   1215 	struct ptimer *pt;
   1216 	int s;
   1217 
   1218 	if (itimerfix(&itvp->it_value) || itimerfix(&itvp->it_interval))
   1219 		return (EINVAL);
   1220 
   1221 	/*
   1222 	 * Don't bother allocating data structures if the process just
   1223 	 * wants to clear the timer.
   1224 	 */
   1225 	if (!timerisset(&itvp->it_value) &&
   1226 	    ((p->p_timers == NULL) ||(p->p_timers->pts_timers[which] == NULL)))
   1227 		return (0);
   1228 
   1229 	if (p->p_timers == NULL)
   1230 		timers_alloc(p);
   1231 	if (p->p_timers->pts_timers[which] == NULL) {
   1232 		pt = pool_get(&ptimer_pool, PR_WAITOK);
   1233 		pt->pt_ev.sigev_notify = SIGEV_SIGNAL;
   1234 		pt->pt_ev.sigev_value.sival_int = which;
   1235 		pt->pt_overruns = 0;
   1236 		pt->pt_proc = p;
   1237 		pt->pt_type = which;
   1238 		pt->pt_entry = which;
   1239 		switch (which) {
   1240 		case ITIMER_REAL:
   1241 			callout_init(&pt->pt_ch);
   1242 			pt->pt_ev.sigev_signo = SIGALRM;
   1243 			break;
   1244 		case ITIMER_VIRTUAL:
   1245 			pt->pt_active = 0;
   1246 			pt->pt_ev.sigev_signo = SIGVTALRM;
   1247 			break;
   1248 		case ITIMER_PROF:
   1249 			pt->pt_active = 0;
   1250 			pt->pt_ev.sigev_signo = SIGPROF;
   1251 			break;
   1252 		}
   1253 	} else
   1254 		pt = p->p_timers->pts_timers[which];
   1255 
   1256 	pt->pt_time = *itvp;
   1257 	p->p_timers->pts_timers[which] = pt;
   1258 
   1259 	s = splclock();
   1260 	if ((which == ITIMER_REAL) && timerisset(&pt->pt_time.it_value)) {
   1261 		/* Convert to absolute time */
   1262 #ifdef __HAVE_TIMECOUNTER
   1263 		/* XXX need to wrap in splclock for timecounters case? */
   1264 		getmicrotime(&now);
   1265 		timeradd(&pt->pt_time.it_value, &now, &pt->pt_time.it_value);
   1266 #else /* !__HAVE_TIMECOUNTER */
   1267 		timeradd(&pt->pt_time.it_value, &time, &pt->pt_time.it_value);
   1268 #endif /* !__HAVE_TIMECOUNTER */
   1269 	}
   1270 	timer_settime(pt);
   1271 	splx(s);
   1272 
   1273 	return (0);
   1274 }
   1275 
   1276 /* Utility routines to manage the array of pointers to timers. */
   1277 void
   1278 timers_alloc(struct proc *p)
   1279 {
   1280 	int i;
   1281 	struct ptimers *pts;
   1282 
   1283 	pts = pool_get(&ptimers_pool, PR_WAITOK);
   1284 	LIST_INIT(&pts->pts_virtual);
   1285 	LIST_INIT(&pts->pts_prof);
   1286 	for (i = 0; i < TIMER_MAX; i++)
   1287 		pts->pts_timers[i] = NULL;
   1288 	pts->pts_fired = 0;
   1289 	p->p_timers = pts;
   1290 }
   1291 
   1292 /*
   1293  * Clean up the per-process timers. If "which" is set to TIMERS_ALL,
   1294  * then clean up all timers and free all the data structures. If
   1295  * "which" is set to TIMERS_POSIX, only clean up the timers allocated
   1296  * by timer_create(), not the BSD setitimer() timers, and only free the
   1297  * structure if none of those remain.
   1298  */
   1299 void
   1300 timers_free(struct proc *p, int which)
   1301 {
   1302 	int i, s;
   1303 	struct ptimers *pts;
   1304 	struct ptimer *pt, *ptn;
   1305 	struct timeval tv;
   1306 
   1307 	if (p->p_timers) {
   1308 		pts = p->p_timers;
   1309 		if (which == TIMERS_ALL)
   1310 			i = 0;
   1311 		else {
   1312 			s = splclock();
   1313 			timerclear(&tv);
   1314 			for (ptn = LIST_FIRST(&p->p_timers->pts_virtual);
   1315 			     ptn && ptn != pts->pts_timers[ITIMER_VIRTUAL];
   1316 			     ptn = LIST_NEXT(ptn, pt_list))
   1317 				timeradd(&tv, &ptn->pt_time.it_value, &tv);
   1318 			LIST_FIRST(&p->p_timers->pts_virtual) = NULL;
   1319 			if (ptn) {
   1320 				timeradd(&tv, &ptn->pt_time.it_value,
   1321 				    &ptn->pt_time.it_value);
   1322 				LIST_INSERT_HEAD(&p->p_timers->pts_virtual,
   1323 				    ptn, pt_list);
   1324 			}
   1325 
   1326 			timerclear(&tv);
   1327 			for (ptn = LIST_FIRST(&p->p_timers->pts_prof);
   1328 			     ptn && ptn != pts->pts_timers[ITIMER_PROF];
   1329 			     ptn = LIST_NEXT(ptn, pt_list))
   1330 				timeradd(&tv, &ptn->pt_time.it_value, &tv);
   1331 			LIST_FIRST(&p->p_timers->pts_prof) = NULL;
   1332 			if (ptn) {
   1333 				timeradd(&tv, &ptn->pt_time.it_value,
   1334 				    &ptn->pt_time.it_value);
   1335 				LIST_INSERT_HEAD(&p->p_timers->pts_prof, ptn,
   1336 				    pt_list);
   1337 			}
   1338 			splx(s);
   1339 			i = 3;
   1340 		}
   1341 		for ( ; i < TIMER_MAX; i++)
   1342 			if ((pt = pts->pts_timers[i]) != NULL) {
   1343 				if (pt->pt_type == CLOCK_REALTIME)
   1344 					callout_stop(&pt->pt_ch);
   1345 				pts->pts_timers[i] = NULL;
   1346 				pool_put(&ptimer_pool, pt);
   1347 			}
   1348 		if ((pts->pts_timers[0] == NULL) &&
   1349 		    (pts->pts_timers[1] == NULL) &&
   1350 		    (pts->pts_timers[2] == NULL)) {
   1351 			p->p_timers = NULL;
   1352 			pool_put(&ptimers_pool, pts);
   1353 		}
   1354 	}
   1355 }
   1356 
   1357 /*
   1358  * Check that a proposed value to load into the .it_value or
   1359  * .it_interval part of an interval timer is acceptable, and
   1360  * fix it to have at least minimal value (i.e. if it is less
   1361  * than the resolution of the clock, round it up.)
   1362  */
   1363 int
   1364 itimerfix(struct timeval *tv)
   1365 {
   1366 
   1367 	if (tv->tv_sec < 0 || tv->tv_usec < 0 || tv->tv_usec >= 1000000)
   1368 		return (EINVAL);
   1369 	if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
   1370 		tv->tv_usec = tick;
   1371 	return (0);
   1372 }
   1373 
   1374 #ifdef __HAVE_TIMECOUNTER
   1375 int
   1376 itimespecfix(struct timespec *ts)
   1377 {
   1378 
   1379 	if (ts->tv_sec < 0 || ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
   1380 		return (EINVAL);
   1381 	if (ts->tv_sec == 0 && ts->tv_nsec != 0 && ts->tv_nsec < tick * 1000)
   1382 		ts->tv_nsec = tick * 1000;
   1383 	return (0);
   1384 }
   1385 #endif /* __HAVE_TIMECOUNTER */
   1386 
   1387 /*
   1388  * Decrement an interval timer by a specified number
   1389  * of microseconds, which must be less than a second,
   1390  * i.e. < 1000000.  If the timer expires, then reload
   1391  * it.  In this case, carry over (usec - old value) to
   1392  * reduce the value reloaded into the timer so that
   1393  * the timer does not drift.  This routine assumes
   1394  * that it is called in a context where the timers
   1395  * on which it is operating cannot change in value.
   1396  */
   1397 int
   1398 itimerdecr(struct ptimer *pt, int usec)
   1399 {
   1400 	struct itimerval *itp;
   1401 
   1402 	itp = &pt->pt_time;
   1403 	if (itp->it_value.tv_usec < usec) {
   1404 		if (itp->it_value.tv_sec == 0) {
   1405 			/* expired, and already in next interval */
   1406 			usec -= itp->it_value.tv_usec;
   1407 			goto expire;
   1408 		}
   1409 		itp->it_value.tv_usec += 1000000;
   1410 		itp->it_value.tv_sec--;
   1411 	}
   1412 	itp->it_value.tv_usec -= usec;
   1413 	usec = 0;
   1414 	if (timerisset(&itp->it_value))
   1415 		return (1);
   1416 	/* expired, exactly at end of interval */
   1417 expire:
   1418 	if (timerisset(&itp->it_interval)) {
   1419 		itp->it_value = itp->it_interval;
   1420 		itp->it_value.tv_usec -= usec;
   1421 		if (itp->it_value.tv_usec < 0) {
   1422 			itp->it_value.tv_usec += 1000000;
   1423 			itp->it_value.tv_sec--;
   1424 		}
   1425 		timer_settime(pt);
   1426 	} else
   1427 		itp->it_value.tv_usec = 0;		/* sec is already 0 */
   1428 	return (0);
   1429 }
   1430 
   1431 void
   1432 itimerfire(struct ptimer *pt)
   1433 {
   1434 	struct proc *p = pt->pt_proc;
   1435 	struct sadata_vp *vp;
   1436 	int s;
   1437 	unsigned int i;
   1438 
   1439 	if (pt->pt_ev.sigev_notify == SIGEV_SIGNAL) {
   1440 		/*
   1441 		 * No RT signal infrastructure exists at this time;
   1442 		 * just post the signal number and throw away the
   1443 		 * value.
   1444 		 */
   1445 		if (sigismember(&p->p_sigctx.ps_siglist, pt->pt_ev.sigev_signo))
   1446 			pt->pt_overruns++;
   1447 		else {
   1448 			ksiginfo_t ksi;
   1449 			(void)memset(&ksi, 0, sizeof(ksi));
   1450 			ksi.ksi_signo = pt->pt_ev.sigev_signo;
   1451 			ksi.ksi_code = SI_TIMER;
   1452 			ksi.ksi_sigval = pt->pt_ev.sigev_value;
   1453 			pt->pt_poverruns = pt->pt_overruns;
   1454 			pt->pt_overruns = 0;
   1455 			kpsignal(p, &ksi, NULL);
   1456 		}
   1457 	} else if (pt->pt_ev.sigev_notify == SIGEV_SA && (p->p_flag & P_SA)) {
   1458 		/* Cause the process to generate an upcall when it returns. */
   1459 
   1460 		if (p->p_userret == NULL) {
   1461 			/*
   1462 			 * XXX stop signals can be processed inside tsleep,
   1463 			 * which can be inside sa_yield's inner loop, which
   1464 			 * makes testing for sa_idle alone insuffucent to
   1465 			 * determine if we really should call setrunnable.
   1466 			 */
   1467 			pt->pt_poverruns = pt->pt_overruns;
   1468 			pt->pt_overruns = 0;
   1469 			i = 1 << pt->pt_entry;
   1470 			p->p_timers->pts_fired = i;
   1471 			p->p_userret = timerupcall;
   1472 			p->p_userret_arg = p->p_timers;
   1473 
   1474 			SCHED_LOCK(s);
   1475 			SLIST_FOREACH(vp, &p->p_sa->sa_vps, savp_next) {
   1476 				if (vp->savp_lwp->l_flag & L_SA_IDLE) {
   1477 					vp->savp_lwp->l_flag &= ~L_SA_IDLE;
   1478 					sched_wakeup(vp->savp_lwp);
   1479 					break;
   1480 				}
   1481 			}
   1482 			SCHED_UNLOCK(s);
   1483 		} else if (p->p_userret == timerupcall) {
   1484 			i = 1 << pt->pt_entry;
   1485 			if ((p->p_timers->pts_fired & i) == 0) {
   1486 				pt->pt_poverruns = pt->pt_overruns;
   1487 				pt->pt_overruns = 0;
   1488 				p->p_timers->pts_fired |= i;
   1489 			} else
   1490 				pt->pt_overruns++;
   1491 		} else {
   1492 			pt->pt_overruns++;
   1493 			if ((p->p_flag & P_WEXIT) == 0)
   1494 				printf("itimerfire(%d): overrun %d on timer %x (userret is %p)\n",
   1495 				    p->p_pid, pt->pt_overruns,
   1496 				    pt->pt_ev.sigev_value.sival_int,
   1497 				    p->p_userret);
   1498 		}
   1499 	}
   1500 
   1501 }
   1502 
   1503 /*
   1504  * ratecheck(): simple time-based rate-limit checking.  see ratecheck(9)
   1505  * for usage and rationale.
   1506  */
   1507 int
   1508 ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
   1509 {
   1510 	struct timeval tv, delta;
   1511 	int rv = 0;
   1512 #ifndef __HAVE_TIMECOUNTER
   1513 	int s;
   1514 #endif
   1515 
   1516 #ifdef __HAVE_TIMECOUNTER
   1517 	getmicrouptime(&tv);
   1518 #else /* !__HAVE_TIMECOUNTER */
   1519 	s = splclock();
   1520 	tv = mono_time;
   1521 	splx(s);
   1522 #endif /* !__HAVE_TIMECOUNTER */
   1523 	timersub(&tv, lasttime, &delta);
   1524 
   1525 	/*
   1526 	 * check for 0,0 is so that the message will be seen at least once,
   1527 	 * even if interval is huge.
   1528 	 */
   1529 	if (timercmp(&delta, mininterval, >=) ||
   1530 	    (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
   1531 		*lasttime = tv;
   1532 		rv = 1;
   1533 	}
   1534 
   1535 	return (rv);
   1536 }
   1537 
   1538 /*
   1539  * ppsratecheck(): packets (or events) per second limitation.
   1540  */
   1541 int
   1542 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
   1543 {
   1544 	struct timeval tv, delta;
   1545 	int rv;
   1546 #ifndef __HAVE_TIMECOUNTER
   1547 	int s;
   1548 #endif
   1549 
   1550 #ifdef __HAVE_TIMECOUNTER
   1551 	getmicrouptime(&tv);
   1552 #else /* !__HAVE_TIMECOUNTER */
   1553 	s = splclock();
   1554 	tv = mono_time;
   1555 	splx(s);
   1556 #endif /* !__HAVE_TIMECOUNTER */
   1557 	timersub(&tv, lasttime, &delta);
   1558 
   1559 	/*
   1560 	 * check for 0,0 is so that the message will be seen at least once.
   1561 	 * if more than one second have passed since the last update of
   1562 	 * lasttime, reset the counter.
   1563 	 *
   1564 	 * we do increment *curpps even in *curpps < maxpps case, as some may
   1565 	 * try to use *curpps for stat purposes as well.
   1566 	 */
   1567 	if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
   1568 	    delta.tv_sec >= 1) {
   1569 		*lasttime = tv;
   1570 		*curpps = 0;
   1571 	}
   1572 	if (maxpps < 0)
   1573 		rv = 1;
   1574 	else if (*curpps < maxpps)
   1575 		rv = 1;
   1576 	else
   1577 		rv = 0;
   1578 
   1579 #if 1 /*DIAGNOSTIC?*/
   1580 	/* be careful about wrap-around */
   1581 	if (*curpps + 1 > *curpps)
   1582 		*curpps = *curpps + 1;
   1583 #else
   1584 	/*
   1585 	 * assume that there's not too many calls to this function.
   1586 	 * not sure if the assumption holds, as it depends on *caller's*
   1587 	 * behavior, not the behavior of this function.
   1588 	 * IMHO it is wrong to make assumption on the caller's behavior,
   1589 	 * so the above #if is #if 1, not #ifdef DIAGNOSTIC.
   1590 	 */
   1591 	*curpps = *curpps + 1;
   1592 #endif
   1593 
   1594 	return (rv);
   1595 }
   1596