kern_time.c revision 1.98.6.1 1 /* $NetBSD: kern_time.c,v 1.98.6.1 2006/02/04 14:39:43 simonb Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2004, 2005 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Christopher G. Demetriou.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1982, 1986, 1989, 1993
41 * The Regents of the University of California. All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of the University nor the names of its contributors
52 * may be used to endorse or promote products derived from this software
53 * without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * SUCH DAMAGE.
66 *
67 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95
68 */
69
70 #include <sys/cdefs.h>
71 __KERNEL_RCSID(0, "$NetBSD: kern_time.c,v 1.98.6.1 2006/02/04 14:39:43 simonb Exp $");
72
73 #include "fs_nfs.h"
74 #include "opt_nfs.h"
75 #include "opt_nfsserver.h"
76
77 #include <sys/param.h>
78 #include <sys/resourcevar.h>
79 #include <sys/kernel.h>
80 #include <sys/systm.h>
81 #include <sys/proc.h>
82 #include <sys/sa.h>
83 #include <sys/savar.h>
84 #include <sys/vnode.h>
85 #include <sys/signalvar.h>
86 #include <sys/syslog.h>
87 #ifdef __HAVE_TIMECOUNTER
88 #include <sys/timetc.h>
89 #else /* !__HAVE_TIMECOUNTER */
90 #include <sys/timevar.h>
91 #endif /* !__HAVE_TIMECOUNTER */
92
93 #include <sys/mount.h>
94 #include <sys/syscallargs.h>
95
96 #include <uvm/uvm_extern.h>
97
98 #if defined(NFS) || defined(NFSSERVER)
99 #include <nfs/rpcv2.h>
100 #include <nfs/nfsproto.h>
101 #include <nfs/nfs.h>
102 #include <nfs/nfs_var.h>
103 #endif
104
105 #include <machine/cpu.h>
106
107 POOL_INIT(ptimer_pool, sizeof(struct ptimer), 0, 0, 0, "ptimerpl",
108 &pool_allocator_nointr);
109 POOL_INIT(ptimers_pool, sizeof(struct ptimers), 0, 0, 0, "ptimerspl",
110 &pool_allocator_nointr);
111
112 static void timerupcall(struct lwp *, void *);
113 #ifdef __HAVE_TIMECOUNTER
114 static int itimespecfix(struct timespec *); /* XXX move itimerfix to timespecs */
115 #endif /* __HAVE_TIMECOUNTER */
116
117 /* Time of day and interval timer support.
118 *
119 * These routines provide the kernel entry points to get and set
120 * the time-of-day and per-process interval timers. Subroutines
121 * here provide support for adding and subtracting timeval structures
122 * and decrementing interval timers, optionally reloading the interval
123 * timers when they expire.
124 */
125
126 /* This function is used by clock_settime and settimeofday */
127 int
128 settime(struct proc *p, struct timespec *ts)
129 {
130 struct timeval delta, tv;
131 #ifdef __HAVE_TIMECOUNTER
132 struct timeval now;
133 struct timespec ts1;
134 #endif /* !__HAVE_TIMECOUNTER */
135 struct cpu_info *ci;
136 int s;
137
138 /*
139 * Don't allow the time to be set forward so far it will wrap
140 * and become negative, thus allowing an attacker to bypass
141 * the next check below. The cutoff is 1 year before rollover
142 * occurs, so even if the attacker uses adjtime(2) to move
143 * the time past the cutoff, it will take a very long time
144 * to get to the wrap point.
145 *
146 * XXX: we check against INT_MAX since on 64-bit
147 * platforms, sizeof(int) != sizeof(long) and
148 * time_t is 32 bits even when atv.tv_sec is 64 bits.
149 */
150 if (ts->tv_sec > INT_MAX - 365*24*60*60) {
151 struct proc *pp = p->p_pptr;
152 log(LOG_WARNING, "pid %d (%s) "
153 "invoked by uid %d ppid %d (%s) "
154 "tried to set clock forward to %ld\n",
155 p->p_pid, p->p_comm, pp->p_ucred->cr_uid,
156 pp->p_pid, pp->p_comm, (long)ts->tv_sec);
157 return (EPERM);
158 }
159 TIMESPEC_TO_TIMEVAL(&tv, ts);
160
161 /* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */
162 s = splclock();
163 #ifdef __HAVE_TIMECOUNTER
164 microtime(&now);
165 timersub(&tv, &now, &delta);
166 #else /* !__HAVE_TIMECOUNTER */
167 timersub(&tv, &time, &delta);
168 #endif /* !__HAVE_TIMECOUNTER */
169 if ((delta.tv_sec < 0 || delta.tv_usec < 0) && securelevel > 1) {
170 splx(s);
171 return (EPERM);
172 }
173 #ifdef notyet
174 if ((delta.tv_sec < 86400) && securelevel > 0) {
175 splx(s);
176 return (EPERM);
177 }
178 #endif
179 #ifdef __HAVE_TIMECOUNTER
180 ts1.tv_sec = tv.tv_sec;
181 ts1.tv_nsec = tv.tv_usec * 1000;
182 tc_setclock(&ts1);
183 (void) spllowersoftclock();
184 #else /* !__HAVE_TIMECOUNTER */
185 time = tv;
186 (void) spllowersoftclock();
187 timeradd(&boottime, &delta, &boottime);
188 #endif /* !__HAVE_TIMECOUNTER */
189 /*
190 * XXXSMP
191 * This is wrong. We should traverse a list of all
192 * CPUs and add the delta to the runtime of those
193 * CPUs which have a process on them.
194 */
195 ci = curcpu();
196 timeradd(&ci->ci_schedstate.spc_runtime, &delta,
197 &ci->ci_schedstate.spc_runtime);
198 #if (defined(NFS) && !defined (NFS_V2_ONLY)) || defined(NFSSERVER)
199 nqnfs_lease_updatetime(delta.tv_sec);
200 #endif
201 splx(s);
202 resettodr();
203 return (0);
204 }
205
206 /* ARGSUSED */
207 int
208 sys_clock_gettime(struct lwp *l, void *v, register_t *retval)
209 {
210 struct sys_clock_gettime_args /* {
211 syscallarg(clockid_t) clock_id;
212 syscallarg(struct timespec *) tp;
213 } */ *uap = v;
214 clockid_t clock_id;
215 struct timespec ats;
216
217 clock_id = SCARG(uap, clock_id);
218 switch (clock_id) {
219 case CLOCK_REALTIME:
220 nanotime(&ats);
221 break;
222 case CLOCK_MONOTONIC:
223 #ifdef __HAVE_TIMECOUNTER
224 nanouptime(&ats);
225 #else /* !__HAVE_TIMECOUNTER */
226 {
227 int s;
228
229 /* XXX "hz" granularity */
230 s = splclock();
231 TIMEVAL_TO_TIMESPEC(&mono_time,&ats);
232 splx(s);
233 }
234 #endif /* !__HAVE_TIMECOUNTER */
235 break;
236 default:
237 return (EINVAL);
238 }
239
240 return copyout(&ats, SCARG(uap, tp), sizeof(ats));
241 }
242
243 /* ARGSUSED */
244 int
245 sys_clock_settime(struct lwp *l, void *v, register_t *retval)
246 {
247 struct sys_clock_settime_args /* {
248 syscallarg(clockid_t) clock_id;
249 syscallarg(const struct timespec *) tp;
250 } */ *uap = v;
251 struct proc *p = l->l_proc;
252 int error;
253
254 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
255 return (error);
256
257 return (clock_settime1(p, SCARG(uap, clock_id), SCARG(uap, tp)));
258 }
259
260
261 int
262 clock_settime1(struct proc *p, clockid_t clock_id, const struct timespec *tp)
263 {
264 struct timespec ats;
265 int error;
266
267 if ((error = copyin(tp, &ats, sizeof(ats))) != 0)
268 return (error);
269
270 switch (clock_id) {
271 case CLOCK_REALTIME:
272 if ((error = settime(p, &ats)) != 0)
273 return (error);
274 break;
275 case CLOCK_MONOTONIC:
276 return (EINVAL); /* read-only clock */
277 default:
278 return (EINVAL);
279 }
280
281 return 0;
282 }
283
284 int
285 sys_clock_getres(struct lwp *l, void *v, register_t *retval)
286 {
287 struct sys_clock_getres_args /* {
288 syscallarg(clockid_t) clock_id;
289 syscallarg(struct timespec *) tp;
290 } */ *uap = v;
291 clockid_t clock_id;
292 struct timespec ts;
293 int error = 0;
294
295 clock_id = SCARG(uap, clock_id);
296 switch (clock_id) {
297 case CLOCK_REALTIME:
298 case CLOCK_MONOTONIC:
299 ts.tv_sec = 0;
300 ts.tv_nsec = 1000000000 / hz;
301 break;
302 default:
303 return (EINVAL);
304 }
305
306 if (SCARG(uap, tp))
307 error = copyout(&ts, SCARG(uap, tp), sizeof(ts));
308
309 return error;
310 }
311
312 /* ARGSUSED */
313 int
314 sys_nanosleep(struct lwp *l, void *v, register_t *retval)
315 {
316 #ifdef __HAVE_TIMECOUNTER
317 static int nanowait;
318 struct sys_nanosleep_args/* {
319 syscallarg(struct timespec *) rqtp;
320 syscallarg(struct timespec *) rmtp;
321 } */ *uap = v;
322 struct timespec rmt, rqt;
323 int error, timo;
324
325 error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec));
326 if (error)
327 return (error);
328
329 if (itimespecfix(&rqt))
330 return (EINVAL);
331
332 timo = tstohz(&rqt);
333 /*
334 * Avoid inadvertantly sleeping forever
335 */
336 if (timo == 0)
337 timo = 1;
338
339 error = tsleep(&nanowait, PWAIT | PCATCH, "nanosleep", timo);
340 if (error == ERESTART)
341 error = EINTR;
342 if (error == EWOULDBLOCK)
343 error = 0;
344
345 if (SCARG(uap, rmtp)) {
346 int error1;
347
348 getnanotime(&rmt);
349
350 timespecsub(&rqt, &rmt, &rmt);
351 if (rmt.tv_sec < 0)
352 timespecclear(&rmt);
353
354 error1 = copyout((caddr_t)&rmt, (caddr_t)SCARG(uap,rmtp),
355 sizeof(rmt));
356 if (error1)
357 return (error1);
358 }
359
360 return error;
361 #else /* !__HAVE_TIMECOUNTER */
362 static int nanowait;
363 struct sys_nanosleep_args/* {
364 syscallarg(struct timespec *) rqtp;
365 syscallarg(struct timespec *) rmtp;
366 } */ *uap = v;
367 struct timespec rqt;
368 struct timespec rmt;
369 struct timeval atv, utv;
370 int error, s, timo;
371
372 error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec));
373 if (error)
374 return (error);
375
376 TIMESPEC_TO_TIMEVAL(&atv,&rqt);
377 if (itimerfix(&atv))
378 return (EINVAL);
379
380 s = splclock();
381 timeradd(&atv,&time,&atv);
382 timo = hzto(&atv);
383 /*
384 * Avoid inadvertantly sleeping forever
385 */
386 if (timo == 0)
387 timo = 1;
388 splx(s);
389
390 error = tsleep(&nanowait, PWAIT | PCATCH, "nanosleep", timo);
391 if (error == ERESTART)
392 error = EINTR;
393 if (error == EWOULDBLOCK)
394 error = 0;
395
396 if (SCARG(uap, rmtp)) {
397 int error1;
398
399 s = splclock();
400 utv = time;
401 splx(s);
402
403 timersub(&atv, &utv, &utv);
404 if (utv.tv_sec < 0)
405 timerclear(&utv);
406
407 TIMEVAL_TO_TIMESPEC(&utv,&rmt);
408 error1 = copyout((caddr_t)&rmt, (caddr_t)SCARG(uap,rmtp),
409 sizeof(rmt));
410 if (error1)
411 return (error1);
412 }
413
414 return error;
415 #endif /* !__HAVE_TIMECOUNTER */
416 }
417
418 /* ARGSUSED */
419 int
420 sys_gettimeofday(struct lwp *l, void *v, register_t *retval)
421 {
422 struct sys_gettimeofday_args /* {
423 syscallarg(struct timeval *) tp;
424 syscallarg(void *) tzp; really "struct timezone *"
425 } */ *uap = v;
426 struct timeval atv;
427 int error = 0;
428 struct timezone tzfake;
429
430 if (SCARG(uap, tp)) {
431 microtime(&atv);
432 error = copyout(&atv, SCARG(uap, tp), sizeof(atv));
433 if (error)
434 return (error);
435 }
436 if (SCARG(uap, tzp)) {
437 /*
438 * NetBSD has no kernel notion of time zone, so we just
439 * fake up a timezone struct and return it if demanded.
440 */
441 tzfake.tz_minuteswest = 0;
442 tzfake.tz_dsttime = 0;
443 error = copyout(&tzfake, SCARG(uap, tzp), sizeof(tzfake));
444 }
445 return (error);
446 }
447
448 /* ARGSUSED */
449 int
450 sys_settimeofday(struct lwp *l, void *v, register_t *retval)
451 {
452 struct sys_settimeofday_args /* {
453 syscallarg(const struct timeval *) tv;
454 syscallarg(const void *) tzp; really "const struct timezone *"
455 } */ *uap = v;
456 struct proc *p = l->l_proc;
457 int error;
458
459 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
460 return (error);
461
462 return settimeofday1(SCARG(uap, tv), SCARG(uap, tzp), p);
463 }
464
465 int
466 settimeofday1(const struct timeval *utv, const struct timezone *utzp,
467 struct proc *p)
468 {
469 struct timeval atv;
470 struct timespec ts;
471 int error;
472
473 /* Verify all parameters before changing time. */
474 /*
475 * NetBSD has no kernel notion of time zone, and only an
476 * obsolete program would try to set it, so we log a warning.
477 */
478 if (utzp)
479 log(LOG_WARNING, "pid %d attempted to set the "
480 "(obsolete) kernel time zone\n", p->p_pid);
481
482 if (utv == NULL)
483 return 0;
484
485 if ((error = copyin(utv, &atv, sizeof(atv))) != 0)
486 return error;
487 TIMEVAL_TO_TIMESPEC(&atv, &ts);
488 return settime(p, &ts);
489 }
490
491 int tickdelta; /* current clock skew, us. per tick */
492 long timedelta; /* unapplied time correction, us. */
493 long bigadj = 1000000; /* use 10x skew above bigadj us. */
494 int time_adjusted; /* set if an adjustment is made */
495
496 /* ARGSUSED */
497 int
498 sys_adjtime(struct lwp *l, void *v, register_t *retval)
499 {
500 struct sys_adjtime_args /* {
501 syscallarg(const struct timeval *) delta;
502 syscallarg(struct timeval *) olddelta;
503 } */ *uap = v;
504 struct proc *p = l->l_proc;
505 int error;
506
507 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
508 return (error);
509
510 return adjtime1(SCARG(uap, delta), SCARG(uap, olddelta), p);
511 }
512
513 int
514 adjtime1(const struct timeval *delta, struct timeval *olddelta, struct proc *p)
515 {
516 struct timeval atv;
517 long ndelta, ntickdelta, odelta;
518 int error;
519 int s;
520
521 error = copyin(delta, &atv, sizeof(struct timeval));
522 if (error)
523 return (error);
524
525 /*
526 * Compute the total correction and the rate at which to apply it.
527 * Round the adjustment down to a whole multiple of the per-tick
528 * delta, so that after some number of incremental changes in
529 * hardclock(), tickdelta will become zero, lest the correction
530 * overshoot and start taking us away from the desired final time.
531 */
532 ndelta = atv.tv_sec * 1000000 + atv.tv_usec;
533 if (ndelta > bigadj || ndelta < -bigadj)
534 ntickdelta = 10 * tickadj;
535 else
536 ntickdelta = tickadj;
537 if (ndelta % ntickdelta)
538 ndelta = ndelta / ntickdelta * ntickdelta;
539
540 /*
541 * To make hardclock()'s job easier, make the per-tick delta negative
542 * if we want time to run slower; then hardclock can simply compute
543 * tick + tickdelta, and subtract tickdelta from timedelta.
544 */
545 if (ndelta < 0)
546 ntickdelta = -ntickdelta;
547 if (ndelta != 0)
548 /* We need to save the system clock time during shutdown */
549 time_adjusted |= 1;
550 s = splclock();
551 odelta = timedelta;
552 timedelta = ndelta;
553 tickdelta = ntickdelta;
554 splx(s);
555
556 if (olddelta) {
557 atv.tv_sec = odelta / 1000000;
558 atv.tv_usec = odelta % 1000000;
559 error = copyout(&atv, olddelta, sizeof(struct timeval));
560 }
561 return error;
562 }
563
564 /*
565 * Interval timer support. Both the BSD getitimer() family and the POSIX
566 * timer_*() family of routines are supported.
567 *
568 * All timers are kept in an array pointed to by p_timers, which is
569 * allocated on demand - many processes don't use timers at all. The
570 * first three elements in this array are reserved for the BSD timers:
571 * element 0 is ITIMER_REAL, element 1 is ITIMER_VIRTUAL, and element
572 * 2 is ITIMER_PROF. The rest may be allocated by the timer_create()
573 * syscall.
574 *
575 * Realtime timers are kept in the ptimer structure as an absolute
576 * time; virtual time timers are kept as a linked list of deltas.
577 * Virtual time timers are processed in the hardclock() routine of
578 * kern_clock.c. The real time timer is processed by a callout
579 * routine, called from the softclock() routine. Since a callout may
580 * be delayed in real time due to interrupt processing in the system,
581 * it is possible for the real time timeout routine (realtimeexpire,
582 * given below), to be delayed in real time past when it is supposed
583 * to occur. It does not suffice, therefore, to reload the real timer
584 * .it_value from the real time timers .it_interval. Rather, we
585 * compute the next time in absolute time the timer should go off. */
586
587 /* Allocate a POSIX realtime timer. */
588 int
589 sys_timer_create(struct lwp *l, void *v, register_t *retval)
590 {
591 struct sys_timer_create_args /* {
592 syscallarg(clockid_t) clock_id;
593 syscallarg(struct sigevent *) evp;
594 syscallarg(timer_t *) timerid;
595 } */ *uap = v;
596
597 return timer_create1(SCARG(uap, timerid), SCARG(uap, clock_id),
598 SCARG(uap, evp), copyin, l->l_proc);
599 }
600
601 int
602 timer_create1(timer_t *tid, clockid_t id, struct sigevent *evp,
603 copyin_t fetch_event, struct proc *p)
604 {
605 int error;
606 timer_t timerid;
607 struct ptimer *pt;
608
609 if (id < CLOCK_REALTIME ||
610 id > CLOCK_PROF)
611 return (EINVAL);
612
613 if (p->p_timers == NULL)
614 timers_alloc(p);
615
616 /* Find a free timer slot, skipping those reserved for setitimer(). */
617 for (timerid = 3; timerid < TIMER_MAX; timerid++)
618 if (p->p_timers->pts_timers[timerid] == NULL)
619 break;
620
621 if (timerid == TIMER_MAX)
622 return EAGAIN;
623
624 pt = pool_get(&ptimer_pool, PR_WAITOK);
625 if (evp) {
626 if (((error =
627 (*fetch_event)(evp, &pt->pt_ev, sizeof(pt->pt_ev))) != 0) ||
628 ((pt->pt_ev.sigev_notify < SIGEV_NONE) ||
629 (pt->pt_ev.sigev_notify > SIGEV_SA))) {
630 pool_put(&ptimer_pool, pt);
631 return (error ? error : EINVAL);
632 }
633 } else {
634 pt->pt_ev.sigev_notify = SIGEV_SIGNAL;
635 switch (id) {
636 case CLOCK_REALTIME:
637 pt->pt_ev.sigev_signo = SIGALRM;
638 break;
639 case CLOCK_VIRTUAL:
640 pt->pt_ev.sigev_signo = SIGVTALRM;
641 break;
642 case CLOCK_PROF:
643 pt->pt_ev.sigev_signo = SIGPROF;
644 break;
645 }
646 pt->pt_ev.sigev_value.sival_int = timerid;
647 }
648 pt->pt_info.ksi_signo = pt->pt_ev.sigev_signo;
649 pt->pt_info.ksi_errno = 0;
650 pt->pt_info.ksi_code = 0;
651 pt->pt_info.ksi_pid = p->p_pid;
652 pt->pt_info.ksi_uid = p->p_cred->p_ruid;
653 pt->pt_info.ksi_sigval = pt->pt_ev.sigev_value;
654
655 pt->pt_type = id;
656 pt->pt_proc = p;
657 pt->pt_overruns = 0;
658 pt->pt_poverruns = 0;
659 pt->pt_entry = timerid;
660 timerclear(&pt->pt_time.it_value);
661 if (id == CLOCK_REALTIME)
662 callout_init(&pt->pt_ch);
663 else
664 pt->pt_active = 0;
665
666 p->p_timers->pts_timers[timerid] = pt;
667
668 return copyout(&timerid, tid, sizeof(timerid));
669 }
670
671 /* Delete a POSIX realtime timer */
672 int
673 sys_timer_delete(struct lwp *l, void *v, register_t *retval)
674 {
675 struct sys_timer_delete_args /* {
676 syscallarg(timer_t) timerid;
677 } */ *uap = v;
678 struct proc *p = l->l_proc;
679 timer_t timerid;
680 struct ptimer *pt, *ptn;
681 int s;
682
683 timerid = SCARG(uap, timerid);
684
685 if ((p->p_timers == NULL) ||
686 (timerid < 2) || (timerid >= TIMER_MAX) ||
687 ((pt = p->p_timers->pts_timers[timerid]) == NULL))
688 return (EINVAL);
689
690 if (pt->pt_type == CLOCK_REALTIME)
691 callout_stop(&pt->pt_ch);
692 else if (pt->pt_active) {
693 s = splclock();
694 ptn = LIST_NEXT(pt, pt_list);
695 LIST_REMOVE(pt, pt_list);
696 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list))
697 timeradd(&pt->pt_time.it_value, &ptn->pt_time.it_value,
698 &ptn->pt_time.it_value);
699 splx(s);
700 }
701
702 p->p_timers->pts_timers[timerid] = NULL;
703 pool_put(&ptimer_pool, pt);
704
705 return (0);
706 }
707
708 /*
709 * Set up the given timer. The value in pt->pt_time.it_value is taken
710 * to be an absolute time for CLOCK_REALTIME timers and a relative
711 * time for virtual timers.
712 * Must be called at splclock().
713 */
714 void
715 timer_settime(struct ptimer *pt)
716 {
717 struct ptimer *ptn, *pptn;
718 struct ptlist *ptl;
719
720 if (pt->pt_type == CLOCK_REALTIME) {
721 callout_stop(&pt->pt_ch);
722 if (timerisset(&pt->pt_time.it_value)) {
723 /*
724 * Don't need to check hzto() return value, here.
725 * callout_reset() does it for us.
726 */
727 callout_reset(&pt->pt_ch, hzto(&pt->pt_time.it_value),
728 realtimerexpire, pt);
729 }
730 } else {
731 if (pt->pt_active) {
732 ptn = LIST_NEXT(pt, pt_list);
733 LIST_REMOVE(pt, pt_list);
734 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list))
735 timeradd(&pt->pt_time.it_value,
736 &ptn->pt_time.it_value,
737 &ptn->pt_time.it_value);
738 }
739 if (timerisset(&pt->pt_time.it_value)) {
740 if (pt->pt_type == CLOCK_VIRTUAL)
741 ptl = &pt->pt_proc->p_timers->pts_virtual;
742 else
743 ptl = &pt->pt_proc->p_timers->pts_prof;
744
745 for (ptn = LIST_FIRST(ptl), pptn = NULL;
746 ptn && timercmp(&pt->pt_time.it_value,
747 &ptn->pt_time.it_value, >);
748 pptn = ptn, ptn = LIST_NEXT(ptn, pt_list))
749 timersub(&pt->pt_time.it_value,
750 &ptn->pt_time.it_value,
751 &pt->pt_time.it_value);
752
753 if (pptn)
754 LIST_INSERT_AFTER(pptn, pt, pt_list);
755 else
756 LIST_INSERT_HEAD(ptl, pt, pt_list);
757
758 for ( ; ptn ; ptn = LIST_NEXT(ptn, pt_list))
759 timersub(&ptn->pt_time.it_value,
760 &pt->pt_time.it_value,
761 &ptn->pt_time.it_value);
762
763 pt->pt_active = 1;
764 } else
765 pt->pt_active = 0;
766 }
767 }
768
769 void
770 timer_gettime(struct ptimer *pt, struct itimerval *aitv)
771 {
772 #ifdef __HAVE_TIMECOUNTER
773 struct timeval now;
774 #endif
775 struct ptimer *ptn;
776
777 *aitv = pt->pt_time;
778 if (pt->pt_type == CLOCK_REALTIME) {
779 /*
780 * Convert from absolute to relative time in .it_value
781 * part of real time timer. If time for real time
782 * timer has passed return 0, else return difference
783 * between current time and time for the timer to go
784 * off.
785 */
786 if (timerisset(&aitv->it_value)) {
787 #ifdef __HAVE_TIMECOUNTER
788 getmicrotime(&now);
789 if (timercmp(&aitv->it_value, &now, <))
790 timerclear(&aitv->it_value);
791 else
792 timersub(&aitv->it_value, &now,
793 &aitv->it_value);
794 #else /* !__HAVE_TIMECOUNTER */
795 if (timercmp(&aitv->it_value, &time, <))
796 timerclear(&aitv->it_value);
797 else
798 timersub(&aitv->it_value, &time,
799 &aitv->it_value);
800 #endif /* !__HAVE_TIMECOUNTER */
801 }
802 } else if (pt->pt_active) {
803 if (pt->pt_type == CLOCK_VIRTUAL)
804 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_virtual);
805 else
806 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_prof);
807 for ( ; ptn && ptn != pt; ptn = LIST_NEXT(ptn, pt_list))
808 timeradd(&aitv->it_value,
809 &ptn->pt_time.it_value, &aitv->it_value);
810 KASSERT(ptn != NULL); /* pt should be findable on the list */
811 } else
812 timerclear(&aitv->it_value);
813 }
814
815
816
817 /* Set and arm a POSIX realtime timer */
818 int
819 sys_timer_settime(struct lwp *l, void *v, register_t *retval)
820 {
821 struct sys_timer_settime_args /* {
822 syscallarg(timer_t) timerid;
823 syscallarg(int) flags;
824 syscallarg(const struct itimerspec *) value;
825 syscallarg(struct itimerspec *) ovalue;
826 } */ *uap = v;
827 int error;
828 struct itimerspec value, ovalue, *ovp = NULL;
829
830 if ((error = copyin(SCARG(uap, value), &value,
831 sizeof(struct itimerspec))) != 0)
832 return (error);
833
834 if (SCARG(uap, ovalue))
835 ovp = &ovalue;
836
837 if ((error = dotimer_settime(SCARG(uap, timerid), &value, ovp,
838 SCARG(uap, flags), l->l_proc)) != 0)
839 return error;
840
841 if (ovp)
842 return copyout(&ovalue, SCARG(uap, ovalue),
843 sizeof(struct itimerspec));
844 return 0;
845 }
846
847 int
848 dotimer_settime(int timerid, struct itimerspec *value,
849 struct itimerspec *ovalue, int flags, struct proc *p)
850 {
851 #ifdef __HAVE_TIMECOUNTER
852 struct timeval now;
853 #endif
854 struct itimerval val, oval;
855 struct ptimer *pt;
856 int s;
857
858 if ((p->p_timers == NULL) ||
859 (timerid < 2) || (timerid >= TIMER_MAX) ||
860 ((pt = p->p_timers->pts_timers[timerid]) == NULL))
861 return (EINVAL);
862
863 TIMESPEC_TO_TIMEVAL(&val.it_value, &value->it_value);
864 TIMESPEC_TO_TIMEVAL(&val.it_interval, &value->it_interval);
865 if (itimerfix(&val.it_value) || itimerfix(&val.it_interval))
866 return (EINVAL);
867
868 oval = pt->pt_time;
869 pt->pt_time = val;
870
871 s = splclock();
872 /*
873 * If we've been passed a relative time for a realtime timer,
874 * convert it to absolute; if an absolute time for a virtual
875 * timer, convert it to relative and make sure we don't set it
876 * to zero, which would cancel the timer, or let it go
877 * negative, which would confuse the comparison tests.
878 */
879 if (timerisset(&pt->pt_time.it_value)) {
880 if (pt->pt_type == CLOCK_REALTIME) {
881 #ifdef __HAVE_TIMECOUNTER
882 if ((flags & TIMER_ABSTIME) == 0) {
883 getmicrotime(&now);
884 timeradd(&pt->pt_time.it_value, &now,
885 &pt->pt_time.it_value);
886 }
887 #else /* !__HAVE_TIMECOUNTER */
888 if ((flags & TIMER_ABSTIME) == 0)
889 timeradd(&pt->pt_time.it_value, &time,
890 &pt->pt_time.it_value);
891 #endif /* !__HAVE_TIMECOUNTER */
892 } else {
893 if ((flags & TIMER_ABSTIME) != 0) {
894 #ifdef __HAVE_TIMECOUNTER
895 getmicrotime(&now);
896 timersub(&pt->pt_time.it_value, &now,
897 &pt->pt_time.it_value);
898 #else /* !__HAVE_TIMECOUNTER */
899 timersub(&pt->pt_time.it_value, &time,
900 &pt->pt_time.it_value);
901 #endif /* !__HAVE_TIMECOUNTER */
902 if (!timerisset(&pt->pt_time.it_value) ||
903 pt->pt_time.it_value.tv_sec < 0) {
904 pt->pt_time.it_value.tv_sec = 0;
905 pt->pt_time.it_value.tv_usec = 1;
906 }
907 }
908 }
909 }
910
911 timer_settime(pt);
912 splx(s);
913
914 if (ovalue) {
915 TIMEVAL_TO_TIMESPEC(&oval.it_value, &ovalue->it_value);
916 TIMEVAL_TO_TIMESPEC(&oval.it_interval, &ovalue->it_interval);
917 }
918
919 return (0);
920 }
921
922 /* Return the time remaining until a POSIX timer fires. */
923 int
924 sys_timer_gettime(struct lwp *l, void *v, register_t *retval)
925 {
926 struct sys_timer_gettime_args /* {
927 syscallarg(timer_t) timerid;
928 syscallarg(struct itimerspec *) value;
929 } */ *uap = v;
930 struct itimerspec its;
931 int error;
932
933 if ((error = dotimer_gettime(SCARG(uap, timerid), l->l_proc,
934 &its)) != 0)
935 return error;
936
937 return copyout(&its, SCARG(uap, value), sizeof(its));
938 }
939
940 int
941 dotimer_gettime(int timerid, struct proc *p, struct itimerspec *its)
942 {
943 int s;
944 struct ptimer *pt;
945 struct itimerval aitv;
946
947 if ((p->p_timers == NULL) ||
948 (timerid < 2) || (timerid >= TIMER_MAX) ||
949 ((pt = p->p_timers->pts_timers[timerid]) == NULL))
950 return (EINVAL);
951
952 s = splclock();
953 timer_gettime(pt, &aitv);
954 splx(s);
955
956 TIMEVAL_TO_TIMESPEC(&aitv.it_interval, &its->it_interval);
957 TIMEVAL_TO_TIMESPEC(&aitv.it_value, &its->it_value);
958
959 return 0;
960 }
961
962 /*
963 * Return the count of the number of times a periodic timer expired
964 * while a notification was already pending. The counter is reset when
965 * a timer expires and a notification can be posted.
966 */
967 int
968 sys_timer_getoverrun(struct lwp *l, void *v, register_t *retval)
969 {
970 struct sys_timer_getoverrun_args /* {
971 syscallarg(timer_t) timerid;
972 } */ *uap = v;
973 struct proc *p = l->l_proc;
974 int timerid;
975 struct ptimer *pt;
976
977 timerid = SCARG(uap, timerid);
978
979 if ((p->p_timers == NULL) ||
980 (timerid < 2) || (timerid >= TIMER_MAX) ||
981 ((pt = p->p_timers->pts_timers[timerid]) == NULL))
982 return (EINVAL);
983
984 *retval = pt->pt_poverruns;
985
986 return (0);
987 }
988
989 /* Glue function that triggers an upcall; called from userret(). */
990 static void
991 timerupcall(struct lwp *l, void *arg)
992 {
993 struct ptimers *pt = (struct ptimers *)arg;
994 unsigned int i, fired, done;
995
996 KDASSERT(l->l_proc->p_sa);
997 /* Bail out if we do not own the virtual processor */
998 if (l->l_savp->savp_lwp != l)
999 return ;
1000
1001 KERNEL_PROC_LOCK(l);
1002
1003 fired = pt->pts_fired;
1004 done = 0;
1005 while ((i = ffs(fired)) != 0) {
1006 siginfo_t *si;
1007 int mask = 1 << --i;
1008 int f;
1009
1010 f = l->l_flag & L_SA;
1011 l->l_flag &= ~L_SA;
1012 si = siginfo_alloc(PR_WAITOK);
1013 si->_info = pt->pts_timers[i]->pt_info.ksi_info;
1014 if (sa_upcall(l, SA_UPCALL_SIGEV | SA_UPCALL_DEFER, NULL, l,
1015 sizeof(*si), si, siginfo_free) != 0) {
1016 siginfo_free(si);
1017 /* XXX What do we do here?? */
1018 } else
1019 done |= mask;
1020 fired &= ~mask;
1021 l->l_flag |= f;
1022 }
1023 pt->pts_fired &= ~done;
1024 if (pt->pts_fired == 0)
1025 l->l_proc->p_userret = NULL;
1026
1027 KERNEL_PROC_UNLOCK(l);
1028 }
1029
1030 /*
1031 * Real interval timer expired:
1032 * send process whose timer expired an alarm signal.
1033 * If time is not set up to reload, then just return.
1034 * Else compute next time timer should go off which is > current time.
1035 * This is where delay in processing this timeout causes multiple
1036 * SIGALRM calls to be compressed into one.
1037 */
1038 void
1039 realtimerexpire(void *arg)
1040 {
1041 #ifdef __HAVE_TIMECOUNTER
1042 struct timeval now;
1043 #endif
1044 struct ptimer *pt;
1045 int s;
1046
1047 pt = (struct ptimer *)arg;
1048
1049 itimerfire(pt);
1050
1051 if (!timerisset(&pt->pt_time.it_interval)) {
1052 timerclear(&pt->pt_time.it_value);
1053 return;
1054 }
1055 #ifdef __HAVE_TIMECOUNTER
1056 for (;;) {
1057 s = splclock(); /* XXX need spl now? */
1058 timeradd(&pt->pt_time.it_value,
1059 &pt->pt_time.it_interval, &pt->pt_time.it_value);
1060 getmicrotime(&now);
1061 if (timercmp(&pt->pt_time.it_value, &now, >)) {
1062 /*
1063 * Don't need to check hzto() return value, here.
1064 * callout_reset() does it for us.
1065 */
1066 callout_reset(&pt->pt_ch, hzto(&pt->pt_time.it_value),
1067 realtimerexpire, pt);
1068 splx(s);
1069 return;
1070 }
1071 splx(s);
1072 pt->pt_overruns++;
1073 }
1074 #else /* !__HAVE_TIMECOUNTER */
1075 for (;;) {
1076 s = splclock();
1077 timeradd(&pt->pt_time.it_value,
1078 &pt->pt_time.it_interval, &pt->pt_time.it_value);
1079 if (timercmp(&pt->pt_time.it_value, &time, >)) {
1080 /*
1081 * Don't need to check hzto() return value, here.
1082 * callout_reset() does it for us.
1083 */
1084 callout_reset(&pt->pt_ch, hzto(&pt->pt_time.it_value),
1085 realtimerexpire, pt);
1086 splx(s);
1087 return;
1088 }
1089 splx(s);
1090 pt->pt_overruns++;
1091 }
1092 #endif /* !__HAVE_TIMECOUNTER */
1093 }
1094
1095 /* BSD routine to get the value of an interval timer. */
1096 /* ARGSUSED */
1097 int
1098 sys_getitimer(struct lwp *l, void *v, register_t *retval)
1099 {
1100 struct sys_getitimer_args /* {
1101 syscallarg(int) which;
1102 syscallarg(struct itimerval *) itv;
1103 } */ *uap = v;
1104 struct proc *p = l->l_proc;
1105 struct itimerval aitv;
1106 int error;
1107
1108 error = dogetitimer(p, SCARG(uap, which), &aitv);
1109 if (error)
1110 return error;
1111 return (copyout(&aitv, SCARG(uap, itv), sizeof(struct itimerval)));
1112 }
1113
1114 int
1115 dogetitimer(struct proc *p, int which, struct itimerval *itvp)
1116 {
1117 int s;
1118
1119 if ((u_int)which > ITIMER_PROF)
1120 return (EINVAL);
1121
1122 if ((p->p_timers == NULL) || (p->p_timers->pts_timers[which] == NULL)){
1123 timerclear(&itvp->it_value);
1124 timerclear(&itvp->it_interval);
1125 } else {
1126 s = splclock();
1127 timer_gettime(p->p_timers->pts_timers[which], itvp);
1128 splx(s);
1129 }
1130
1131 return 0;
1132 }
1133
1134 /* BSD routine to set/arm an interval timer. */
1135 /* ARGSUSED */
1136 int
1137 sys_setitimer(struct lwp *l, void *v, register_t *retval)
1138 {
1139 struct sys_setitimer_args /* {
1140 syscallarg(int) which;
1141 syscallarg(const struct itimerval *) itv;
1142 syscallarg(struct itimerval *) oitv;
1143 } */ *uap = v;
1144 struct proc *p = l->l_proc;
1145 int which = SCARG(uap, which);
1146 struct sys_getitimer_args getargs;
1147 const struct itimerval *itvp;
1148 struct itimerval aitv;
1149 int error;
1150
1151 if ((u_int)which > ITIMER_PROF)
1152 return (EINVAL);
1153 itvp = SCARG(uap, itv);
1154 if (itvp &&
1155 (error = copyin(itvp, &aitv, sizeof(struct itimerval)) != 0))
1156 return (error);
1157 if (SCARG(uap, oitv) != NULL) {
1158 SCARG(&getargs, which) = which;
1159 SCARG(&getargs, itv) = SCARG(uap, oitv);
1160 if ((error = sys_getitimer(l, &getargs, retval)) != 0)
1161 return (error);
1162 }
1163 if (itvp == 0)
1164 return (0);
1165
1166 return dosetitimer(p, which, &aitv);
1167 }
1168
1169 int
1170 dosetitimer(struct proc *p, int which, struct itimerval *itvp)
1171 {
1172 #ifdef __HAVE_TIMECOUNTER
1173 struct timeval now;
1174 #endif
1175 struct ptimer *pt;
1176 int s;
1177
1178 if (itimerfix(&itvp->it_value) || itimerfix(&itvp->it_interval))
1179 return (EINVAL);
1180
1181 /*
1182 * Don't bother allocating data structures if the process just
1183 * wants to clear the timer.
1184 */
1185 if (!timerisset(&itvp->it_value) &&
1186 ((p->p_timers == NULL) ||(p->p_timers->pts_timers[which] == NULL)))
1187 return (0);
1188
1189 if (p->p_timers == NULL)
1190 timers_alloc(p);
1191 if (p->p_timers->pts_timers[which] == NULL) {
1192 pt = pool_get(&ptimer_pool, PR_WAITOK);
1193 pt->pt_ev.sigev_notify = SIGEV_SIGNAL;
1194 pt->pt_ev.sigev_value.sival_int = which;
1195 pt->pt_overruns = 0;
1196 pt->pt_proc = p;
1197 pt->pt_type = which;
1198 pt->pt_entry = which;
1199 switch (which) {
1200 case ITIMER_REAL:
1201 callout_init(&pt->pt_ch);
1202 pt->pt_ev.sigev_signo = SIGALRM;
1203 break;
1204 case ITIMER_VIRTUAL:
1205 pt->pt_active = 0;
1206 pt->pt_ev.sigev_signo = SIGVTALRM;
1207 break;
1208 case ITIMER_PROF:
1209 pt->pt_active = 0;
1210 pt->pt_ev.sigev_signo = SIGPROF;
1211 break;
1212 }
1213 } else
1214 pt = p->p_timers->pts_timers[which];
1215
1216 pt->pt_time = *itvp;
1217 p->p_timers->pts_timers[which] = pt;
1218
1219 s = splclock();
1220 if ((which == ITIMER_REAL) && timerisset(&pt->pt_time.it_value)) {
1221 /* Convert to absolute time */
1222 #ifdef __HAVE_TIMECOUNTER
1223 /* XXX need to wrap in splclock for timecounters case? */
1224 getmicrotime(&now);
1225 timeradd(&pt->pt_time.it_value, &now, &pt->pt_time.it_value);
1226 #else /* !__HAVE_TIMECOUNTER */
1227 timeradd(&pt->pt_time.it_value, &time, &pt->pt_time.it_value);
1228 #endif /* !__HAVE_TIMECOUNTER */
1229 }
1230 timer_settime(pt);
1231 splx(s);
1232
1233 return (0);
1234 }
1235
1236 /* Utility routines to manage the array of pointers to timers. */
1237 void
1238 timers_alloc(struct proc *p)
1239 {
1240 int i;
1241 struct ptimers *pts;
1242
1243 pts = pool_get(&ptimers_pool, 0);
1244 LIST_INIT(&pts->pts_virtual);
1245 LIST_INIT(&pts->pts_prof);
1246 for (i = 0; i < TIMER_MAX; i++)
1247 pts->pts_timers[i] = NULL;
1248 pts->pts_fired = 0;
1249 p->p_timers = pts;
1250 }
1251
1252 /*
1253 * Clean up the per-process timers. If "which" is set to TIMERS_ALL,
1254 * then clean up all timers and free all the data structures. If
1255 * "which" is set to TIMERS_POSIX, only clean up the timers allocated
1256 * by timer_create(), not the BSD setitimer() timers, and only free the
1257 * structure if none of those remain.
1258 */
1259 void
1260 timers_free(struct proc *p, int which)
1261 {
1262 int i, s;
1263 struct ptimers *pts;
1264 struct ptimer *pt, *ptn;
1265 struct timeval tv;
1266
1267 if (p->p_timers) {
1268 pts = p->p_timers;
1269 if (which == TIMERS_ALL)
1270 i = 0;
1271 else {
1272 s = splclock();
1273 timerclear(&tv);
1274 for (ptn = LIST_FIRST(&p->p_timers->pts_virtual);
1275 ptn && ptn != pts->pts_timers[ITIMER_VIRTUAL];
1276 ptn = LIST_NEXT(ptn, pt_list))
1277 timeradd(&tv, &ptn->pt_time.it_value, &tv);
1278 LIST_FIRST(&p->p_timers->pts_virtual) = NULL;
1279 if (ptn) {
1280 timeradd(&tv, &ptn->pt_time.it_value,
1281 &ptn->pt_time.it_value);
1282 LIST_INSERT_HEAD(&p->p_timers->pts_virtual,
1283 ptn, pt_list);
1284 }
1285
1286 timerclear(&tv);
1287 for (ptn = LIST_FIRST(&p->p_timers->pts_prof);
1288 ptn && ptn != pts->pts_timers[ITIMER_PROF];
1289 ptn = LIST_NEXT(ptn, pt_list))
1290 timeradd(&tv, &ptn->pt_time.it_value, &tv);
1291 LIST_FIRST(&p->p_timers->pts_prof) = NULL;
1292 if (ptn) {
1293 timeradd(&tv, &ptn->pt_time.it_value,
1294 &ptn->pt_time.it_value);
1295 LIST_INSERT_HEAD(&p->p_timers->pts_prof, ptn,
1296 pt_list);
1297 }
1298 splx(s);
1299 i = 3;
1300 }
1301 for ( ; i < TIMER_MAX; i++)
1302 if ((pt = pts->pts_timers[i]) != NULL) {
1303 if (pt->pt_type == CLOCK_REALTIME)
1304 callout_stop(&pt->pt_ch);
1305 pts->pts_timers[i] = NULL;
1306 pool_put(&ptimer_pool, pt);
1307 }
1308 if ((pts->pts_timers[0] == NULL) &&
1309 (pts->pts_timers[1] == NULL) &&
1310 (pts->pts_timers[2] == NULL)) {
1311 p->p_timers = NULL;
1312 pool_put(&ptimers_pool, pts);
1313 }
1314 }
1315 }
1316
1317 /*
1318 * Check that a proposed value to load into the .it_value or
1319 * .it_interval part of an interval timer is acceptable, and
1320 * fix it to have at least minimal value (i.e. if it is less
1321 * than the resolution of the clock, round it up.)
1322 */
1323 int
1324 itimerfix(struct timeval *tv)
1325 {
1326
1327 if (tv->tv_sec < 0 || tv->tv_usec < 0 || tv->tv_usec >= 1000000)
1328 return (EINVAL);
1329 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
1330 tv->tv_usec = tick;
1331 return (0);
1332 }
1333
1334 #ifdef __HAVE_TIMECOUNTER
1335 int
1336 itimespecfix(struct timespec *ts)
1337 {
1338
1339 if (ts->tv_sec < 0 || ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
1340 return (EINVAL);
1341 if (ts->tv_sec == 0 && ts->tv_nsec != 0 && ts->tv_nsec < tick * 1000)
1342 ts->tv_nsec = tick * 1000;
1343 return (0);
1344 }
1345 #endif /* __HAVE_TIMECOUNTER */
1346
1347 /*
1348 * Decrement an interval timer by a specified number
1349 * of microseconds, which must be less than a second,
1350 * i.e. < 1000000. If the timer expires, then reload
1351 * it. In this case, carry over (usec - old value) to
1352 * reduce the value reloaded into the timer so that
1353 * the timer does not drift. This routine assumes
1354 * that it is called in a context where the timers
1355 * on which it is operating cannot change in value.
1356 */
1357 int
1358 itimerdecr(struct ptimer *pt, int usec)
1359 {
1360 struct itimerval *itp;
1361
1362 itp = &pt->pt_time;
1363 if (itp->it_value.tv_usec < usec) {
1364 if (itp->it_value.tv_sec == 0) {
1365 /* expired, and already in next interval */
1366 usec -= itp->it_value.tv_usec;
1367 goto expire;
1368 }
1369 itp->it_value.tv_usec += 1000000;
1370 itp->it_value.tv_sec--;
1371 }
1372 itp->it_value.tv_usec -= usec;
1373 usec = 0;
1374 if (timerisset(&itp->it_value))
1375 return (1);
1376 /* expired, exactly at end of interval */
1377 expire:
1378 if (timerisset(&itp->it_interval)) {
1379 itp->it_value = itp->it_interval;
1380 itp->it_value.tv_usec -= usec;
1381 if (itp->it_value.tv_usec < 0) {
1382 itp->it_value.tv_usec += 1000000;
1383 itp->it_value.tv_sec--;
1384 }
1385 timer_settime(pt);
1386 } else
1387 itp->it_value.tv_usec = 0; /* sec is already 0 */
1388 return (0);
1389 }
1390
1391 void
1392 itimerfire(struct ptimer *pt)
1393 {
1394 struct proc *p = pt->pt_proc;
1395 struct sadata_vp *vp;
1396 int s;
1397 unsigned int i;
1398
1399 if (pt->pt_ev.sigev_notify == SIGEV_SIGNAL) {
1400 /*
1401 * No RT signal infrastructure exists at this time;
1402 * just post the signal number and throw away the
1403 * value.
1404 */
1405 if (sigismember(&p->p_sigctx.ps_siglist, pt->pt_ev.sigev_signo))
1406 pt->pt_overruns++;
1407 else {
1408 ksiginfo_t ksi;
1409 (void)memset(&ksi, 0, sizeof(ksi));
1410 ksi.ksi_signo = pt->pt_ev.sigev_signo;
1411 ksi.ksi_code = SI_TIMER;
1412 ksi.ksi_sigval = pt->pt_ev.sigev_value;
1413 pt->pt_poverruns = pt->pt_overruns;
1414 pt->pt_overruns = 0;
1415 kpsignal(p, &ksi, NULL);
1416 }
1417 } else if (pt->pt_ev.sigev_notify == SIGEV_SA && (p->p_flag & P_SA)) {
1418 /* Cause the process to generate an upcall when it returns. */
1419
1420 if (p->p_userret == NULL) {
1421 /*
1422 * XXX stop signals can be processed inside tsleep,
1423 * which can be inside sa_yield's inner loop, which
1424 * makes testing for sa_idle alone insuffucent to
1425 * determine if we really should call setrunnable.
1426 */
1427 pt->pt_poverruns = pt->pt_overruns;
1428 pt->pt_overruns = 0;
1429 i = 1 << pt->pt_entry;
1430 p->p_timers->pts_fired = i;
1431 p->p_userret = timerupcall;
1432 p->p_userret_arg = p->p_timers;
1433
1434 SCHED_LOCK(s);
1435 SLIST_FOREACH(vp, &p->p_sa->sa_vps, savp_next) {
1436 if (vp->savp_lwp->l_flag & L_SA_IDLE) {
1437 vp->savp_lwp->l_flag &= ~L_SA_IDLE;
1438 sched_wakeup(vp->savp_lwp);
1439 break;
1440 }
1441 }
1442 SCHED_UNLOCK(s);
1443 } else if (p->p_userret == timerupcall) {
1444 i = 1 << pt->pt_entry;
1445 if ((p->p_timers->pts_fired & i) == 0) {
1446 pt->pt_poverruns = pt->pt_overruns;
1447 pt->pt_overruns = 0;
1448 p->p_timers->pts_fired |= i;
1449 } else
1450 pt->pt_overruns++;
1451 } else {
1452 pt->pt_overruns++;
1453 if ((p->p_flag & P_WEXIT) == 0)
1454 printf("itimerfire(%d): overrun %d on timer %x (userret is %p)\n",
1455 p->p_pid, pt->pt_overruns,
1456 pt->pt_ev.sigev_value.sival_int,
1457 p->p_userret);
1458 }
1459 }
1460
1461 }
1462
1463 /*
1464 * ratecheck(): simple time-based rate-limit checking. see ratecheck(9)
1465 * for usage and rationale.
1466 */
1467 int
1468 ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
1469 {
1470 struct timeval tv, delta;
1471 int rv = 0;
1472 #ifndef __HAVE_TIMECOUNTER
1473 int s;
1474 #endif
1475
1476 #ifdef __HAVE_TIMECOUNTER
1477 getmicrouptime(&tv);
1478 #else /* !__HAVE_TIMECOUNTER */
1479 s = splclock();
1480 tv = mono_time;
1481 splx(s);
1482 #endif /* !__HAVE_TIMECOUNTER */
1483 timersub(&tv, lasttime, &delta);
1484
1485 /*
1486 * check for 0,0 is so that the message will be seen at least once,
1487 * even if interval is huge.
1488 */
1489 if (timercmp(&delta, mininterval, >=) ||
1490 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
1491 *lasttime = tv;
1492 rv = 1;
1493 }
1494
1495 return (rv);
1496 }
1497
1498 /*
1499 * ppsratecheck(): packets (or events) per second limitation.
1500 */
1501 int
1502 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
1503 {
1504 struct timeval tv, delta;
1505 int rv;
1506 #ifndef __HAVE_TIMECOUNTER
1507 int s;
1508 #endif
1509
1510 #ifdef __HAVE_TIMECOUNTER
1511 getmicrouptime(&tv);
1512 #else /* !__HAVE_TIMECOUNTER */
1513 s = splclock();
1514 tv = mono_time;
1515 splx(s);
1516 #endif /* !__HAVE_TIMECOUNTER */
1517 timersub(&tv, lasttime, &delta);
1518
1519 /*
1520 * check for 0,0 is so that the message will be seen at least once.
1521 * if more than one second have passed since the last update of
1522 * lasttime, reset the counter.
1523 *
1524 * we do increment *curpps even in *curpps < maxpps case, as some may
1525 * try to use *curpps for stat purposes as well.
1526 */
1527 if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
1528 delta.tv_sec >= 1) {
1529 *lasttime = tv;
1530 *curpps = 0;
1531 }
1532 if (maxpps < 0)
1533 rv = 1;
1534 else if (*curpps < maxpps)
1535 rv = 1;
1536 else
1537 rv = 0;
1538
1539 #if 1 /*DIAGNOSTIC?*/
1540 /* be careful about wrap-around */
1541 if (*curpps + 1 > *curpps)
1542 *curpps = *curpps + 1;
1543 #else
1544 /*
1545 * assume that there's not too many calls to this function.
1546 * not sure if the assumption holds, as it depends on *caller's*
1547 * behavior, not the behavior of this function.
1548 * IMHO it is wrong to make assumption on the caller's behavior,
1549 * so the above #if is #if 1, not #ifdef DIAGNOSTIC.
1550 */
1551 *curpps = *curpps + 1;
1552 #endif
1553
1554 return (rv);
1555 }
1556