kern_time.c revision 1.189.8.6 1 /* $NetBSD: kern_time.c,v 1.189.8.6 2020/05/25 17:48:16 martin Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2004, 2005, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Christopher G. Demetriou, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1982, 1986, 1989, 1993
34 * The Regents of the University of California. All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95
61 */
62
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: kern_time.c,v 1.189.8.6 2020/05/25 17:48:16 martin Exp $");
65
66 #include <sys/param.h>
67 #include <sys/resourcevar.h>
68 #include <sys/kernel.h>
69 #include <sys/systm.h>
70 #include <sys/proc.h>
71 #include <sys/vnode.h>
72 #include <sys/signalvar.h>
73 #include <sys/syslog.h>
74 #include <sys/timetc.h>
75 #include <sys/timex.h>
76 #include <sys/kauth.h>
77 #include <sys/mount.h>
78 #include <sys/syscallargs.h>
79 #include <sys/cpu.h>
80
81 static void timer_intr(void *);
82 static void itimerfire(struct ptimer *);
83 static void itimerfree(struct ptimers *, int);
84
85 kmutex_t timer_lock;
86
87 static void *timer_sih;
88 static TAILQ_HEAD(, ptimer) timer_queue;
89
90 struct pool ptimer_pool, ptimers_pool;
91
92 #define CLOCK_VIRTUAL_P(clockid) \
93 ((clockid) == CLOCK_VIRTUAL || (clockid) == CLOCK_PROF)
94
95 CTASSERT(ITIMER_REAL == CLOCK_REALTIME);
96 CTASSERT(ITIMER_VIRTUAL == CLOCK_VIRTUAL);
97 CTASSERT(ITIMER_PROF == CLOCK_PROF);
98 CTASSERT(ITIMER_MONOTONIC == CLOCK_MONOTONIC);
99
100 #define DELAYTIMER_MAX 32
101
102 /*
103 * Initialize timekeeping.
104 */
105 void
106 time_init(void)
107 {
108
109 pool_init(&ptimer_pool, sizeof(struct ptimer), 0, 0, 0, "ptimerpl",
110 &pool_allocator_nointr, IPL_NONE);
111 pool_init(&ptimers_pool, sizeof(struct ptimers), 0, 0, 0, "ptimerspl",
112 &pool_allocator_nointr, IPL_NONE);
113 }
114
115 void
116 time_init2(void)
117 {
118
119 TAILQ_INIT(&timer_queue);
120 mutex_init(&timer_lock, MUTEX_DEFAULT, IPL_SCHED);
121 timer_sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE,
122 timer_intr, NULL);
123 }
124
125 /* Time of day and interval timer support.
126 *
127 * These routines provide the kernel entry points to get and set
128 * the time-of-day and per-process interval timers. Subroutines
129 * here provide support for adding and subtracting timeval structures
130 * and decrementing interval timers, optionally reloading the interval
131 * timers when they expire.
132 */
133
134 /* This function is used by clock_settime and settimeofday */
135 static int
136 settime1(struct proc *p, const struct timespec *ts, bool check_kauth)
137 {
138 struct timespec delta, now;
139 int s;
140
141 /* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */
142 s = splclock();
143 nanotime(&now);
144 timespecsub(ts, &now, &delta);
145
146 if (check_kauth && kauth_authorize_system(kauth_cred_get(),
147 KAUTH_SYSTEM_TIME, KAUTH_REQ_SYSTEM_TIME_SYSTEM, __UNCONST(ts),
148 &delta, KAUTH_ARG(check_kauth ? false : true)) != 0) {
149 splx(s);
150 return (EPERM);
151 }
152
153 #ifdef notyet
154 if ((delta.tv_sec < 86400) && securelevel > 0) { /* XXX elad - notyet */
155 splx(s);
156 return (EPERM);
157 }
158 #endif
159
160 tc_setclock(ts);
161
162 timespecadd(&boottime, &delta, &boottime);
163
164 resettodr();
165 splx(s);
166
167 return (0);
168 }
169
170 int
171 settime(struct proc *p, struct timespec *ts)
172 {
173 return (settime1(p, ts, true));
174 }
175
176 /* ARGSUSED */
177 int
178 sys___clock_gettime50(struct lwp *l,
179 const struct sys___clock_gettime50_args *uap, register_t *retval)
180 {
181 /* {
182 syscallarg(clockid_t) clock_id;
183 syscallarg(struct timespec *) tp;
184 } */
185 int error;
186 struct timespec ats;
187
188 error = clock_gettime1(SCARG(uap, clock_id), &ats);
189 if (error != 0)
190 return error;
191
192 return copyout(&ats, SCARG(uap, tp), sizeof(ats));
193 }
194
195 /* ARGSUSED */
196 int
197 sys___clock_settime50(struct lwp *l,
198 const struct sys___clock_settime50_args *uap, register_t *retval)
199 {
200 /* {
201 syscallarg(clockid_t) clock_id;
202 syscallarg(const struct timespec *) tp;
203 } */
204 int error;
205 struct timespec ats;
206
207 if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0)
208 return error;
209
210 return clock_settime1(l->l_proc, SCARG(uap, clock_id), &ats, true);
211 }
212
213
214 int
215 clock_settime1(struct proc *p, clockid_t clock_id, const struct timespec *tp,
216 bool check_kauth)
217 {
218 int error;
219
220 switch (clock_id) {
221 case CLOCK_REALTIME:
222 if ((error = settime1(p, tp, check_kauth)) != 0)
223 return (error);
224 break;
225 case CLOCK_MONOTONIC:
226 return (EINVAL); /* read-only clock */
227 default:
228 return (EINVAL);
229 }
230
231 return 0;
232 }
233
234 int
235 sys___clock_getres50(struct lwp *l, const struct sys___clock_getres50_args *uap,
236 register_t *retval)
237 {
238 /* {
239 syscallarg(clockid_t) clock_id;
240 syscallarg(struct timespec *) tp;
241 } */
242 struct timespec ts;
243 int error;
244
245 if ((error = clock_getres1(SCARG(uap, clock_id), &ts)) != 0)
246 return error;
247
248 if (SCARG(uap, tp))
249 error = copyout(&ts, SCARG(uap, tp), sizeof(ts));
250
251 return error;
252 }
253
254 int
255 clock_getres1(clockid_t clock_id, struct timespec *ts)
256 {
257
258 switch (clock_id) {
259 case CLOCK_REALTIME:
260 case CLOCK_MONOTONIC:
261 ts->tv_sec = 0;
262 if (tc_getfrequency() > 1000000000)
263 ts->tv_nsec = 1;
264 else
265 ts->tv_nsec = 1000000000 / tc_getfrequency();
266 break;
267 default:
268 return EINVAL;
269 }
270
271 return 0;
272 }
273
274 /* ARGSUSED */
275 int
276 sys___nanosleep50(struct lwp *l, const struct sys___nanosleep50_args *uap,
277 register_t *retval)
278 {
279 /* {
280 syscallarg(struct timespec *) rqtp;
281 syscallarg(struct timespec *) rmtp;
282 } */
283 struct timespec rmt, rqt;
284 int error, error1;
285
286 error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec));
287 if (error)
288 return (error);
289
290 error = nanosleep1(l, CLOCK_MONOTONIC, 0, &rqt,
291 SCARG(uap, rmtp) ? &rmt : NULL);
292 if (SCARG(uap, rmtp) == NULL || (error != 0 && error != EINTR))
293 return error;
294
295 error1 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt));
296 return error1 ? error1 : error;
297 }
298
299 /* ARGSUSED */
300 int
301 sys_clock_nanosleep(struct lwp *l, const struct sys_clock_nanosleep_args *uap,
302 register_t *retval)
303 {
304 /* {
305 syscallarg(clockid_t) clock_id;
306 syscallarg(int) flags;
307 syscallarg(struct timespec *) rqtp;
308 syscallarg(struct timespec *) rmtp;
309 } */
310 struct timespec rmt, rqt;
311 int error, error1;
312
313 error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec));
314 if (error)
315 goto out;
316
317 error = nanosleep1(l, SCARG(uap, clock_id), SCARG(uap, flags), &rqt,
318 SCARG(uap, rmtp) ? &rmt : NULL);
319 if (SCARG(uap, rmtp) == NULL || (error != 0 && error != EINTR))
320 goto out;
321
322 if ((SCARG(uap, flags) & TIMER_ABSTIME) == 0 &&
323 (error1 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt))) != 0)
324 error = error1;
325 out:
326 *retval = error;
327 return 0;
328 }
329
330 int
331 nanosleep1(struct lwp *l, clockid_t clock_id, int flags, struct timespec *rqt,
332 struct timespec *rmt)
333 {
334 struct timespec rmtstart;
335 int error, timo;
336
337 if ((error = ts2timo(clock_id, flags, rqt, &timo, &rmtstart)) != 0) {
338 if (error == ETIMEDOUT) {
339 error = 0;
340 if (rmt != NULL)
341 rmt->tv_sec = rmt->tv_nsec = 0;
342 }
343 return error;
344 }
345
346 /*
347 * Avoid inadvertently sleeping forever
348 */
349 if (timo == 0)
350 timo = 1;
351 again:
352 error = kpause("nanoslp", true, timo, NULL);
353 if (rmt != NULL || error == 0) {
354 struct timespec rmtend;
355 struct timespec t0;
356 struct timespec *t;
357 int err;
358
359 err = clock_gettime1(clock_id, &rmtend);
360 if (err != 0)
361 return err;
362
363 t = (rmt != NULL) ? rmt : &t0;
364 if (flags & TIMER_ABSTIME) {
365 timespecsub(rqt, &rmtend, t);
366 } else {
367 timespecsub(&rmtend, &rmtstart, t);
368 timespecsub(rqt, t, t);
369 }
370 if (t->tv_sec < 0)
371 timespecclear(t);
372 if (error == 0) {
373 timo = tstohz(t);
374 if (timo > 0)
375 goto again;
376 }
377 }
378
379 if (error == ERESTART)
380 error = EINTR;
381 if (error == EWOULDBLOCK)
382 error = 0;
383
384 return error;
385 }
386
387 int
388 sys_clock_getcpuclockid2(struct lwp *l,
389 const struct sys_clock_getcpuclockid2_args *uap,
390 register_t *retval)
391 {
392 /* {
393 syscallarg(idtype_t idtype;
394 syscallarg(id_t id);
395 syscallarg(clockid_t *)clock_id;
396 } */
397 pid_t pid;
398 lwpid_t lid;
399 clockid_t clock_id;
400 id_t id = SCARG(uap, id);
401
402 switch (SCARG(uap, idtype)) {
403 case P_PID:
404 pid = id == 0 ? l->l_proc->p_pid : id;
405 clock_id = CLOCK_PROCESS_CPUTIME_ID | pid;
406 break;
407 case P_LWPID:
408 lid = id == 0 ? l->l_lid : id;
409 clock_id = CLOCK_THREAD_CPUTIME_ID | lid;
410 break;
411 default:
412 return EINVAL;
413 }
414 return copyout(&clock_id, SCARG(uap, clock_id), sizeof(clock_id));
415 }
416
417 /* ARGSUSED */
418 int
419 sys___gettimeofday50(struct lwp *l, const struct sys___gettimeofday50_args *uap,
420 register_t *retval)
421 {
422 /* {
423 syscallarg(struct timeval *) tp;
424 syscallarg(void *) tzp; really "struct timezone *";
425 } */
426 struct timeval atv;
427 int error = 0;
428 struct timezone tzfake;
429
430 if (SCARG(uap, tp)) {
431 memset(&atv, 0, sizeof(atv));
432 microtime(&atv);
433 error = copyout(&atv, SCARG(uap, tp), sizeof(atv));
434 if (error)
435 return (error);
436 }
437 if (SCARG(uap, tzp)) {
438 /*
439 * NetBSD has no kernel notion of time zone, so we just
440 * fake up a timezone struct and return it if demanded.
441 */
442 tzfake.tz_minuteswest = 0;
443 tzfake.tz_dsttime = 0;
444 error = copyout(&tzfake, SCARG(uap, tzp), sizeof(tzfake));
445 }
446 return (error);
447 }
448
449 /* ARGSUSED */
450 int
451 sys___settimeofday50(struct lwp *l, const struct sys___settimeofday50_args *uap,
452 register_t *retval)
453 {
454 /* {
455 syscallarg(const struct timeval *) tv;
456 syscallarg(const void *) tzp; really "const struct timezone *";
457 } */
458
459 return settimeofday1(SCARG(uap, tv), true, SCARG(uap, tzp), l, true);
460 }
461
462 int
463 settimeofday1(const struct timeval *utv, bool userspace,
464 const void *utzp, struct lwp *l, bool check_kauth)
465 {
466 struct timeval atv;
467 struct timespec ts;
468 int error;
469
470 /* Verify all parameters before changing time. */
471
472 /*
473 * NetBSD has no kernel notion of time zone, and only an
474 * obsolete program would try to set it, so we log a warning.
475 */
476 if (utzp)
477 log(LOG_WARNING, "pid %d attempted to set the "
478 "(obsolete) kernel time zone\n", l->l_proc->p_pid);
479
480 if (utv == NULL)
481 return 0;
482
483 if (userspace) {
484 if ((error = copyin(utv, &atv, sizeof(atv))) != 0)
485 return error;
486 utv = &atv;
487 }
488
489 TIMEVAL_TO_TIMESPEC(utv, &ts);
490 return settime1(l->l_proc, &ts, check_kauth);
491 }
492
493 int time_adjusted; /* set if an adjustment is made */
494
495 /* ARGSUSED */
496 int
497 sys___adjtime50(struct lwp *l, const struct sys___adjtime50_args *uap,
498 register_t *retval)
499 {
500 /* {
501 syscallarg(const struct timeval *) delta;
502 syscallarg(struct timeval *) olddelta;
503 } */
504 int error;
505 struct timeval atv, oldatv;
506
507 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_TIME,
508 KAUTH_REQ_SYSTEM_TIME_ADJTIME, NULL, NULL, NULL)) != 0)
509 return error;
510
511 if (SCARG(uap, delta)) {
512 error = copyin(SCARG(uap, delta), &atv,
513 sizeof(*SCARG(uap, delta)));
514 if (error)
515 return (error);
516 }
517 adjtime1(SCARG(uap, delta) ? &atv : NULL,
518 SCARG(uap, olddelta) ? &oldatv : NULL, l->l_proc);
519 if (SCARG(uap, olddelta))
520 error = copyout(&oldatv, SCARG(uap, olddelta),
521 sizeof(*SCARG(uap, olddelta)));
522 return error;
523 }
524
525 void
526 adjtime1(const struct timeval *delta, struct timeval *olddelta, struct proc *p)
527 {
528 extern int64_t time_adjtime; /* in kern_ntptime.c */
529
530 if (olddelta) {
531 memset(olddelta, 0, sizeof(*olddelta));
532 mutex_spin_enter(&timecounter_lock);
533 olddelta->tv_sec = time_adjtime / 1000000;
534 olddelta->tv_usec = time_adjtime % 1000000;
535 if (olddelta->tv_usec < 0) {
536 olddelta->tv_usec += 1000000;
537 olddelta->tv_sec--;
538 }
539 mutex_spin_exit(&timecounter_lock);
540 }
541
542 if (delta) {
543 mutex_spin_enter(&timecounter_lock);
544 time_adjtime = delta->tv_sec * 1000000 + delta->tv_usec;
545
546 if (time_adjtime) {
547 /* We need to save the system time during shutdown */
548 time_adjusted |= 1;
549 }
550 mutex_spin_exit(&timecounter_lock);
551 }
552 }
553
554 /*
555 * Interval timer support. Both the BSD getitimer() family and the POSIX
556 * timer_*() family of routines are supported.
557 *
558 * All timers are kept in an array pointed to by p_timers, which is
559 * allocated on demand - many processes don't use timers at all. The
560 * first four elements in this array are reserved for the BSD timers:
561 * element 0 is ITIMER_REAL, element 1 is ITIMER_VIRTUAL, element
562 * 2 is ITIMER_PROF, and element 3 is ITIMER_MONOTONIC. The rest may be
563 * allocated by the timer_create() syscall.
564 *
565 * Realtime timers are kept in the ptimer structure as an absolute
566 * time; virtual time timers are kept as a linked list of deltas.
567 * Virtual time timers are processed in the hardclock() routine of
568 * kern_clock.c. The real time timer is processed by a callout
569 * routine, called from the softclock() routine. Since a callout may
570 * be delayed in real time due to interrupt processing in the system,
571 * it is possible for the real time timeout routine (realtimeexpire,
572 * given below), to be delayed in real time past when it is supposed
573 * to occur. It does not suffice, therefore, to reload the real timer
574 * .it_value from the real time timers .it_interval. Rather, we
575 * compute the next time in absolute time the timer should go off. */
576
577 /* Allocate a POSIX realtime timer. */
578 int
579 sys_timer_create(struct lwp *l, const struct sys_timer_create_args *uap,
580 register_t *retval)
581 {
582 /* {
583 syscallarg(clockid_t) clock_id;
584 syscallarg(struct sigevent *) evp;
585 syscallarg(timer_t *) timerid;
586 } */
587
588 return timer_create1(SCARG(uap, timerid), SCARG(uap, clock_id),
589 SCARG(uap, evp), copyin, l);
590 }
591
592 int
593 timer_create1(timer_t *tid, clockid_t id, struct sigevent *evp,
594 copyin_t fetch_event, struct lwp *l)
595 {
596 int error;
597 timer_t timerid;
598 struct ptimers *pts;
599 struct ptimer *pt;
600 struct proc *p;
601
602 p = l->l_proc;
603
604 if ((u_int)id > CLOCK_MONOTONIC)
605 return (EINVAL);
606
607 if ((pts = p->p_timers) == NULL)
608 pts = timers_alloc(p);
609
610 pt = pool_get(&ptimer_pool, PR_WAITOK);
611 memset(pt, 0, sizeof(*pt));
612 if (evp != NULL) {
613 if (((error =
614 (*fetch_event)(evp, &pt->pt_ev, sizeof(pt->pt_ev))) != 0) ||
615 ((pt->pt_ev.sigev_notify < SIGEV_NONE) ||
616 (pt->pt_ev.sigev_notify > SIGEV_SA)) ||
617 (pt->pt_ev.sigev_notify == SIGEV_SIGNAL &&
618 (pt->pt_ev.sigev_signo <= 0 ||
619 pt->pt_ev.sigev_signo >= NSIG))) {
620 pool_put(&ptimer_pool, pt);
621 return (error ? error : EINVAL);
622 }
623 }
624
625 /* Find a free timer slot, skipping those reserved for setitimer(). */
626 mutex_spin_enter(&timer_lock);
627 for (timerid = TIMER_MIN; timerid < TIMER_MAX; timerid++)
628 if (pts->pts_timers[timerid] == NULL)
629 break;
630 if (timerid == TIMER_MAX) {
631 mutex_spin_exit(&timer_lock);
632 pool_put(&ptimer_pool, pt);
633 return EAGAIN;
634 }
635 if (evp == NULL) {
636 pt->pt_ev.sigev_notify = SIGEV_SIGNAL;
637 switch (id) {
638 case CLOCK_REALTIME:
639 case CLOCK_MONOTONIC:
640 pt->pt_ev.sigev_signo = SIGALRM;
641 break;
642 case CLOCK_VIRTUAL:
643 pt->pt_ev.sigev_signo = SIGVTALRM;
644 break;
645 case CLOCK_PROF:
646 pt->pt_ev.sigev_signo = SIGPROF;
647 break;
648 }
649 pt->pt_ev.sigev_value.sival_int = timerid;
650 }
651 pt->pt_info.ksi_signo = pt->pt_ev.sigev_signo;
652 pt->pt_info.ksi_errno = 0;
653 pt->pt_info.ksi_code = 0;
654 pt->pt_info.ksi_pid = p->p_pid;
655 pt->pt_info.ksi_uid = kauth_cred_getuid(l->l_cred);
656 pt->pt_info.ksi_value = pt->pt_ev.sigev_value;
657 pt->pt_type = id;
658 pt->pt_proc = p;
659 pt->pt_overruns = 0;
660 pt->pt_poverruns = 0;
661 pt->pt_entry = timerid;
662 pt->pt_queued = false;
663 timespecclear(&pt->pt_time.it_value);
664 if (!CLOCK_VIRTUAL_P(id))
665 callout_init(&pt->pt_ch, CALLOUT_MPSAFE);
666 else
667 pt->pt_active = 0;
668
669 pts->pts_timers[timerid] = pt;
670 mutex_spin_exit(&timer_lock);
671
672 return copyout(&timerid, tid, sizeof(timerid));
673 }
674
675 /* Delete a POSIX realtime timer */
676 int
677 sys_timer_delete(struct lwp *l, const struct sys_timer_delete_args *uap,
678 register_t *retval)
679 {
680 /* {
681 syscallarg(timer_t) timerid;
682 } */
683 struct proc *p = l->l_proc;
684 timer_t timerid;
685 struct ptimers *pts;
686 struct ptimer *pt, *ptn;
687
688 timerid = SCARG(uap, timerid);
689 pts = p->p_timers;
690
691 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX)
692 return (EINVAL);
693
694 mutex_spin_enter(&timer_lock);
695 if ((pt = pts->pts_timers[timerid]) == NULL) {
696 mutex_spin_exit(&timer_lock);
697 return (EINVAL);
698 }
699 if (CLOCK_VIRTUAL_P(pt->pt_type)) {
700 if (pt->pt_active) {
701 ptn = LIST_NEXT(pt, pt_list);
702 LIST_REMOVE(pt, pt_list);
703 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list))
704 timespecadd(&pt->pt_time.it_value,
705 &ptn->pt_time.it_value,
706 &ptn->pt_time.it_value);
707 pt->pt_active = 0;
708 }
709 }
710 itimerfree(pts, timerid);
711
712 return (0);
713 }
714
715 /*
716 * Set up the given timer. The value in pt->pt_time.it_value is taken
717 * to be an absolute time for CLOCK_REALTIME/CLOCK_MONOTONIC timers and
718 * a relative time for CLOCK_VIRTUAL/CLOCK_PROF timers.
719 */
720 void
721 timer_settime(struct ptimer *pt)
722 {
723 struct ptimer *ptn, *pptn;
724 struct ptlist *ptl;
725
726 KASSERT(mutex_owned(&timer_lock));
727
728 if (!CLOCK_VIRTUAL_P(pt->pt_type)) {
729 callout_halt(&pt->pt_ch, &timer_lock);
730 if (timespecisset(&pt->pt_time.it_value)) {
731 /*
732 * Don't need to check tshzto() return value, here.
733 * callout_reset() does it for us.
734 */
735 callout_reset(&pt->pt_ch,
736 pt->pt_type == CLOCK_MONOTONIC ?
737 tshztoup(&pt->pt_time.it_value) :
738 tshzto(&pt->pt_time.it_value),
739 realtimerexpire, pt);
740 }
741 } else {
742 if (pt->pt_active) {
743 ptn = LIST_NEXT(pt, pt_list);
744 LIST_REMOVE(pt, pt_list);
745 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list))
746 timespecadd(&pt->pt_time.it_value,
747 &ptn->pt_time.it_value,
748 &ptn->pt_time.it_value);
749 }
750 if (timespecisset(&pt->pt_time.it_value)) {
751 if (pt->pt_type == CLOCK_VIRTUAL)
752 ptl = &pt->pt_proc->p_timers->pts_virtual;
753 else
754 ptl = &pt->pt_proc->p_timers->pts_prof;
755
756 for (ptn = LIST_FIRST(ptl), pptn = NULL;
757 ptn && timespeccmp(&pt->pt_time.it_value,
758 &ptn->pt_time.it_value, >);
759 pptn = ptn, ptn = LIST_NEXT(ptn, pt_list))
760 timespecsub(&pt->pt_time.it_value,
761 &ptn->pt_time.it_value,
762 &pt->pt_time.it_value);
763
764 if (pptn)
765 LIST_INSERT_AFTER(pptn, pt, pt_list);
766 else
767 LIST_INSERT_HEAD(ptl, pt, pt_list);
768
769 for ( ; ptn ; ptn = LIST_NEXT(ptn, pt_list))
770 timespecsub(&ptn->pt_time.it_value,
771 &pt->pt_time.it_value,
772 &ptn->pt_time.it_value);
773
774 pt->pt_active = 1;
775 } else
776 pt->pt_active = 0;
777 }
778 }
779
780 void
781 timer_gettime(struct ptimer *pt, struct itimerspec *aits)
782 {
783 struct timespec now;
784 struct ptimer *ptn;
785
786 KASSERT(mutex_owned(&timer_lock));
787
788 *aits = pt->pt_time;
789 if (!CLOCK_VIRTUAL_P(pt->pt_type)) {
790 /*
791 * Convert from absolute to relative time in .it_value
792 * part of real time timer. If time for real time
793 * timer has passed return 0, else return difference
794 * between current time and time for the timer to go
795 * off.
796 */
797 if (timespecisset(&aits->it_value)) {
798 if (pt->pt_type == CLOCK_REALTIME) {
799 getnanotime(&now);
800 } else { /* CLOCK_MONOTONIC */
801 getnanouptime(&now);
802 }
803 if (timespeccmp(&aits->it_value, &now, <))
804 timespecclear(&aits->it_value);
805 else
806 timespecsub(&aits->it_value, &now,
807 &aits->it_value);
808 }
809 } else if (pt->pt_active) {
810 if (pt->pt_type == CLOCK_VIRTUAL)
811 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_virtual);
812 else
813 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_prof);
814 for ( ; ptn && ptn != pt; ptn = LIST_NEXT(ptn, pt_list))
815 timespecadd(&aits->it_value,
816 &ptn->pt_time.it_value, &aits->it_value);
817 KASSERT(ptn != NULL); /* pt should be findable on the list */
818 } else
819 timespecclear(&aits->it_value);
820 }
821
822
823
824 /* Set and arm a POSIX realtime timer */
825 int
826 sys___timer_settime50(struct lwp *l,
827 const struct sys___timer_settime50_args *uap,
828 register_t *retval)
829 {
830 /* {
831 syscallarg(timer_t) timerid;
832 syscallarg(int) flags;
833 syscallarg(const struct itimerspec *) value;
834 syscallarg(struct itimerspec *) ovalue;
835 } */
836 int error;
837 struct itimerspec value, ovalue, *ovp = NULL;
838
839 if ((error = copyin(SCARG(uap, value), &value,
840 sizeof(struct itimerspec))) != 0)
841 return (error);
842
843 if (SCARG(uap, ovalue))
844 ovp = &ovalue;
845
846 if ((error = dotimer_settime(SCARG(uap, timerid), &value, ovp,
847 SCARG(uap, flags), l->l_proc)) != 0)
848 return error;
849
850 if (ovp)
851 return copyout(&ovalue, SCARG(uap, ovalue),
852 sizeof(struct itimerspec));
853 return 0;
854 }
855
856 int
857 dotimer_settime(int timerid, struct itimerspec *value,
858 struct itimerspec *ovalue, int flags, struct proc *p)
859 {
860 struct timespec now;
861 struct itimerspec val, oval;
862 struct ptimers *pts;
863 struct ptimer *pt;
864 int error;
865
866 pts = p->p_timers;
867
868 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX)
869 return EINVAL;
870 val = *value;
871 if ((error = itimespecfix(&val.it_value)) != 0 ||
872 (error = itimespecfix(&val.it_interval)) != 0)
873 return error;
874
875 mutex_spin_enter(&timer_lock);
876 if ((pt = pts->pts_timers[timerid]) == NULL) {
877 mutex_spin_exit(&timer_lock);
878 return EINVAL;
879 }
880
881 oval = pt->pt_time;
882 pt->pt_time = val;
883
884 /*
885 * If we've been passed a relative time for a realtime timer,
886 * convert it to absolute; if an absolute time for a virtual
887 * timer, convert it to relative and make sure we don't set it
888 * to zero, which would cancel the timer, or let it go
889 * negative, which would confuse the comparison tests.
890 */
891 if (timespecisset(&pt->pt_time.it_value)) {
892 if (!CLOCK_VIRTUAL_P(pt->pt_type)) {
893 if ((flags & TIMER_ABSTIME) == 0) {
894 if (pt->pt_type == CLOCK_REALTIME) {
895 getnanotime(&now);
896 } else { /* CLOCK_MONOTONIC */
897 getnanouptime(&now);
898 }
899 timespecadd(&pt->pt_time.it_value, &now,
900 &pt->pt_time.it_value);
901 }
902 } else {
903 if ((flags & TIMER_ABSTIME) != 0) {
904 getnanotime(&now);
905 timespecsub(&pt->pt_time.it_value, &now,
906 &pt->pt_time.it_value);
907 if (!timespecisset(&pt->pt_time.it_value) ||
908 pt->pt_time.it_value.tv_sec < 0) {
909 pt->pt_time.it_value.tv_sec = 0;
910 pt->pt_time.it_value.tv_nsec = 1;
911 }
912 }
913 }
914 }
915
916 timer_settime(pt);
917 mutex_spin_exit(&timer_lock);
918
919 if (ovalue)
920 *ovalue = oval;
921
922 return (0);
923 }
924
925 /* Return the time remaining until a POSIX timer fires. */
926 int
927 sys___timer_gettime50(struct lwp *l,
928 const struct sys___timer_gettime50_args *uap, register_t *retval)
929 {
930 /* {
931 syscallarg(timer_t) timerid;
932 syscallarg(struct itimerspec *) value;
933 } */
934 struct itimerspec its;
935 int error;
936
937 if ((error = dotimer_gettime(SCARG(uap, timerid), l->l_proc,
938 &its)) != 0)
939 return error;
940
941 return copyout(&its, SCARG(uap, value), sizeof(its));
942 }
943
944 int
945 dotimer_gettime(int timerid, struct proc *p, struct itimerspec *its)
946 {
947 struct ptimer *pt;
948 struct ptimers *pts;
949
950 pts = p->p_timers;
951 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX)
952 return (EINVAL);
953 mutex_spin_enter(&timer_lock);
954 if ((pt = pts->pts_timers[timerid]) == NULL) {
955 mutex_spin_exit(&timer_lock);
956 return (EINVAL);
957 }
958 timer_gettime(pt, its);
959 mutex_spin_exit(&timer_lock);
960
961 return 0;
962 }
963
964 /*
965 * Return the count of the number of times a periodic timer expired
966 * while a notification was already pending. The counter is reset when
967 * a timer expires and a notification can be posted.
968 */
969 int
970 sys_timer_getoverrun(struct lwp *l, const struct sys_timer_getoverrun_args *uap,
971 register_t *retval)
972 {
973 /* {
974 syscallarg(timer_t) timerid;
975 } */
976 struct proc *p = l->l_proc;
977 struct ptimers *pts;
978 int timerid;
979 struct ptimer *pt;
980
981 timerid = SCARG(uap, timerid);
982
983 pts = p->p_timers;
984 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX)
985 return (EINVAL);
986 mutex_spin_enter(&timer_lock);
987 if ((pt = pts->pts_timers[timerid]) == NULL) {
988 mutex_spin_exit(&timer_lock);
989 return (EINVAL);
990 }
991 *retval = pt->pt_poverruns;
992 if (*retval >= DELAYTIMER_MAX)
993 *retval = DELAYTIMER_MAX;
994 mutex_spin_exit(&timer_lock);
995
996 return (0);
997 }
998
999 /*
1000 * Real interval timer expired:
1001 * send process whose timer expired an alarm signal.
1002 * If time is not set up to reload, then just return.
1003 * Else compute next time timer should go off which is > current time.
1004 * This is where delay in processing this timeout causes multiple
1005 * SIGALRM calls to be compressed into one.
1006 */
1007 void
1008 realtimerexpire(void *arg)
1009 {
1010 uint64_t last_val, next_val, interval, now_ns;
1011 struct timespec now, next;
1012 struct ptimer *pt;
1013 int backwards;
1014
1015 pt = arg;
1016
1017 mutex_spin_enter(&timer_lock);
1018 itimerfire(pt);
1019
1020 if (!timespecisset(&pt->pt_time.it_interval)) {
1021 timespecclear(&pt->pt_time.it_value);
1022 mutex_spin_exit(&timer_lock);
1023 return;
1024 }
1025
1026 if (pt->pt_type == CLOCK_MONOTONIC) {
1027 getnanouptime(&now);
1028 } else {
1029 getnanotime(&now);
1030 }
1031 backwards = (timespeccmp(&pt->pt_time.it_value, &now, >));
1032 timespecadd(&pt->pt_time.it_value, &pt->pt_time.it_interval, &next);
1033 /* Handle the easy case of non-overflown timers first. */
1034 if (!backwards && timespeccmp(&next, &now, >)) {
1035 pt->pt_time.it_value = next;
1036 } else {
1037 now_ns = timespec2ns(&now);
1038 last_val = timespec2ns(&pt->pt_time.it_value);
1039 interval = timespec2ns(&pt->pt_time.it_interval);
1040
1041 next_val = now_ns +
1042 (now_ns - last_val + interval - 1) % interval;
1043
1044 if (backwards)
1045 next_val += interval;
1046 else
1047 pt->pt_overruns += (now_ns - last_val) / interval;
1048
1049 pt->pt_time.it_value.tv_sec = next_val / 1000000000;
1050 pt->pt_time.it_value.tv_nsec = next_val % 1000000000;
1051 }
1052
1053 /*
1054 * Don't need to check tshzto() return value, here.
1055 * callout_reset() does it for us.
1056 */
1057 callout_reset(&pt->pt_ch, pt->pt_type == CLOCK_MONOTONIC ?
1058 tshztoup(&pt->pt_time.it_value) : tshzto(&pt->pt_time.it_value),
1059 realtimerexpire, pt);
1060 mutex_spin_exit(&timer_lock);
1061 }
1062
1063 /* BSD routine to get the value of an interval timer. */
1064 /* ARGSUSED */
1065 int
1066 sys___getitimer50(struct lwp *l, const struct sys___getitimer50_args *uap,
1067 register_t *retval)
1068 {
1069 /* {
1070 syscallarg(int) which;
1071 syscallarg(struct itimerval *) itv;
1072 } */
1073 struct proc *p = l->l_proc;
1074 struct itimerval aitv;
1075 int error;
1076
1077 memset(&aitv, 0, sizeof(aitv));
1078 error = dogetitimer(p, SCARG(uap, which), &aitv);
1079 if (error)
1080 return error;
1081 return (copyout(&aitv, SCARG(uap, itv), sizeof(struct itimerval)));
1082 }
1083
1084 int
1085 dogetitimer(struct proc *p, int which, struct itimerval *itvp)
1086 {
1087 struct ptimers *pts;
1088 struct ptimer *pt;
1089 struct itimerspec its;
1090
1091 if ((u_int)which > ITIMER_MONOTONIC)
1092 return (EINVAL);
1093
1094 mutex_spin_enter(&timer_lock);
1095 pts = p->p_timers;
1096 if (pts == NULL || (pt = pts->pts_timers[which]) == NULL) {
1097 timerclear(&itvp->it_value);
1098 timerclear(&itvp->it_interval);
1099 } else {
1100 timer_gettime(pt, &its);
1101 TIMESPEC_TO_TIMEVAL(&itvp->it_value, &its.it_value);
1102 TIMESPEC_TO_TIMEVAL(&itvp->it_interval, &its.it_interval);
1103 }
1104 mutex_spin_exit(&timer_lock);
1105
1106 return 0;
1107 }
1108
1109 /* BSD routine to set/arm an interval timer. */
1110 /* ARGSUSED */
1111 int
1112 sys___setitimer50(struct lwp *l, const struct sys___setitimer50_args *uap,
1113 register_t *retval)
1114 {
1115 /* {
1116 syscallarg(int) which;
1117 syscallarg(const struct itimerval *) itv;
1118 syscallarg(struct itimerval *) oitv;
1119 } */
1120 struct proc *p = l->l_proc;
1121 int which = SCARG(uap, which);
1122 struct sys___getitimer50_args getargs;
1123 const struct itimerval *itvp;
1124 struct itimerval aitv;
1125 int error;
1126
1127 if ((u_int)which > ITIMER_MONOTONIC)
1128 return (EINVAL);
1129 itvp = SCARG(uap, itv);
1130 if (itvp &&
1131 (error = copyin(itvp, &aitv, sizeof(struct itimerval))) != 0)
1132 return (error);
1133 if (SCARG(uap, oitv) != NULL) {
1134 SCARG(&getargs, which) = which;
1135 SCARG(&getargs, itv) = SCARG(uap, oitv);
1136 if ((error = sys___getitimer50(l, &getargs, retval)) != 0)
1137 return (error);
1138 }
1139 if (itvp == 0)
1140 return (0);
1141
1142 return dosetitimer(p, which, &aitv);
1143 }
1144
1145 int
1146 dosetitimer(struct proc *p, int which, struct itimerval *itvp)
1147 {
1148 struct timespec now;
1149 struct ptimers *pts;
1150 struct ptimer *pt, *spare;
1151
1152 KASSERT((u_int)which <= CLOCK_MONOTONIC);
1153 if (itimerfix(&itvp->it_value) || itimerfix(&itvp->it_interval))
1154 return (EINVAL);
1155
1156 /*
1157 * Don't bother allocating data structures if the process just
1158 * wants to clear the timer.
1159 */
1160 spare = NULL;
1161 pts = p->p_timers;
1162 retry:
1163 if (!timerisset(&itvp->it_value) && (pts == NULL ||
1164 pts->pts_timers[which] == NULL))
1165 return (0);
1166 if (pts == NULL)
1167 pts = timers_alloc(p);
1168 mutex_spin_enter(&timer_lock);
1169 pt = pts->pts_timers[which];
1170 if (pt == NULL) {
1171 if (spare == NULL) {
1172 mutex_spin_exit(&timer_lock);
1173 spare = pool_get(&ptimer_pool, PR_WAITOK);
1174 memset(spare, 0, sizeof(*spare));
1175 goto retry;
1176 }
1177 pt = spare;
1178 spare = NULL;
1179 pt->pt_ev.sigev_notify = SIGEV_SIGNAL;
1180 pt->pt_ev.sigev_value.sival_int = which;
1181 pt->pt_overruns = 0;
1182 pt->pt_proc = p;
1183 pt->pt_type = which;
1184 pt->pt_entry = which;
1185 pt->pt_queued = false;
1186 if (!CLOCK_VIRTUAL_P(which))
1187 callout_init(&pt->pt_ch, CALLOUT_MPSAFE);
1188 else
1189 pt->pt_active = 0;
1190
1191 switch (which) {
1192 case ITIMER_REAL:
1193 case ITIMER_MONOTONIC:
1194 pt->pt_ev.sigev_signo = SIGALRM;
1195 break;
1196 case ITIMER_VIRTUAL:
1197 pt->pt_ev.sigev_signo = SIGVTALRM;
1198 break;
1199 case ITIMER_PROF:
1200 pt->pt_ev.sigev_signo = SIGPROF;
1201 break;
1202 }
1203 pts->pts_timers[which] = pt;
1204 }
1205
1206 TIMEVAL_TO_TIMESPEC(&itvp->it_value, &pt->pt_time.it_value);
1207 TIMEVAL_TO_TIMESPEC(&itvp->it_interval, &pt->pt_time.it_interval);
1208
1209 if (timespecisset(&pt->pt_time.it_value)) {
1210 /* Convert to absolute time */
1211 /* XXX need to wrap in splclock for timecounters case? */
1212 switch (which) {
1213 case ITIMER_REAL:
1214 getnanotime(&now);
1215 timespecadd(&pt->pt_time.it_value, &now,
1216 &pt->pt_time.it_value);
1217 break;
1218 case ITIMER_MONOTONIC:
1219 getnanouptime(&now);
1220 timespecadd(&pt->pt_time.it_value, &now,
1221 &pt->pt_time.it_value);
1222 break;
1223 default:
1224 break;
1225 }
1226 }
1227 timer_settime(pt);
1228 mutex_spin_exit(&timer_lock);
1229 if (spare != NULL)
1230 pool_put(&ptimer_pool, spare);
1231
1232 return (0);
1233 }
1234
1235 /* Utility routines to manage the array of pointers to timers. */
1236 struct ptimers *
1237 timers_alloc(struct proc *p)
1238 {
1239 struct ptimers *pts;
1240 int i;
1241
1242 pts = pool_get(&ptimers_pool, PR_WAITOK);
1243 LIST_INIT(&pts->pts_virtual);
1244 LIST_INIT(&pts->pts_prof);
1245 for (i = 0; i < TIMER_MAX; i++)
1246 pts->pts_timers[i] = NULL;
1247 mutex_spin_enter(&timer_lock);
1248 if (p->p_timers == NULL) {
1249 p->p_timers = pts;
1250 mutex_spin_exit(&timer_lock);
1251 return pts;
1252 }
1253 mutex_spin_exit(&timer_lock);
1254 pool_put(&ptimers_pool, pts);
1255 return p->p_timers;
1256 }
1257
1258 /*
1259 * Clean up the per-process timers. If "which" is set to TIMERS_ALL,
1260 * then clean up all timers and free all the data structures. If
1261 * "which" is set to TIMERS_POSIX, only clean up the timers allocated
1262 * by timer_create(), not the BSD setitimer() timers, and only free the
1263 * structure if none of those remain.
1264 */
1265 void
1266 timers_free(struct proc *p, int which)
1267 {
1268 struct ptimers *pts;
1269 struct ptimer *ptn;
1270 struct timespec ts;
1271 int i;
1272
1273 if (p->p_timers == NULL)
1274 return;
1275
1276 pts = p->p_timers;
1277 mutex_spin_enter(&timer_lock);
1278 if (which == TIMERS_ALL) {
1279 p->p_timers = NULL;
1280 i = 0;
1281 } else {
1282 timespecclear(&ts);
1283 for (ptn = LIST_FIRST(&pts->pts_virtual);
1284 ptn && ptn != pts->pts_timers[ITIMER_VIRTUAL];
1285 ptn = LIST_NEXT(ptn, pt_list)) {
1286 KASSERT(ptn->pt_type == CLOCK_VIRTUAL);
1287 timespecadd(&ts, &ptn->pt_time.it_value, &ts);
1288 }
1289 LIST_FIRST(&pts->pts_virtual) = NULL;
1290 if (ptn) {
1291 KASSERT(ptn->pt_type == CLOCK_VIRTUAL);
1292 timespecadd(&ts, &ptn->pt_time.it_value,
1293 &ptn->pt_time.it_value);
1294 LIST_INSERT_HEAD(&pts->pts_virtual, ptn, pt_list);
1295 }
1296 timespecclear(&ts);
1297 for (ptn = LIST_FIRST(&pts->pts_prof);
1298 ptn && ptn != pts->pts_timers[ITIMER_PROF];
1299 ptn = LIST_NEXT(ptn, pt_list)) {
1300 KASSERT(ptn->pt_type == CLOCK_PROF);
1301 timespecadd(&ts, &ptn->pt_time.it_value, &ts);
1302 }
1303 LIST_FIRST(&pts->pts_prof) = NULL;
1304 if (ptn) {
1305 KASSERT(ptn->pt_type == CLOCK_PROF);
1306 timespecadd(&ts, &ptn->pt_time.it_value,
1307 &ptn->pt_time.it_value);
1308 LIST_INSERT_HEAD(&pts->pts_prof, ptn, pt_list);
1309 }
1310 i = TIMER_MIN;
1311 }
1312 for ( ; i < TIMER_MAX; i++) {
1313 if (pts->pts_timers[i] != NULL) {
1314 itimerfree(pts, i);
1315 mutex_spin_enter(&timer_lock);
1316 }
1317 }
1318 if (pts->pts_timers[0] == NULL && pts->pts_timers[1] == NULL &&
1319 pts->pts_timers[2] == NULL && pts->pts_timers[3] == NULL) {
1320 p->p_timers = NULL;
1321 mutex_spin_exit(&timer_lock);
1322 pool_put(&ptimers_pool, pts);
1323 } else
1324 mutex_spin_exit(&timer_lock);
1325 }
1326
1327 static void
1328 itimerfree(struct ptimers *pts, int index)
1329 {
1330 struct ptimer *pt;
1331
1332 KASSERT(mutex_owned(&timer_lock));
1333
1334 pt = pts->pts_timers[index];
1335 pts->pts_timers[index] = NULL;
1336 if (!CLOCK_VIRTUAL_P(pt->pt_type))
1337 callout_halt(&pt->pt_ch, &timer_lock);
1338 if (pt->pt_queued)
1339 TAILQ_REMOVE(&timer_queue, pt, pt_chain);
1340 mutex_spin_exit(&timer_lock);
1341 if (!CLOCK_VIRTUAL_P(pt->pt_type))
1342 callout_destroy(&pt->pt_ch);
1343 pool_put(&ptimer_pool, pt);
1344 }
1345
1346 /*
1347 * Decrement an interval timer by a specified number
1348 * of nanoseconds, which must be less than a second,
1349 * i.e. < 1000000000. If the timer expires, then reload
1350 * it. In this case, carry over (nsec - old value) to
1351 * reduce the value reloaded into the timer so that
1352 * the timer does not drift. This routine assumes
1353 * that it is called in a context where the timers
1354 * on which it is operating cannot change in value.
1355 */
1356 static int
1357 itimerdecr(struct ptimer *pt, int nsec)
1358 {
1359 struct itimerspec *itp;
1360
1361 KASSERT(mutex_owned(&timer_lock));
1362 KASSERT(CLOCK_VIRTUAL_P(pt->pt_type));
1363
1364 itp = &pt->pt_time;
1365 if (itp->it_value.tv_nsec < nsec) {
1366 if (itp->it_value.tv_sec == 0) {
1367 /* expired, and already in next interval */
1368 nsec -= itp->it_value.tv_nsec;
1369 goto expire;
1370 }
1371 itp->it_value.tv_nsec += 1000000000;
1372 itp->it_value.tv_sec--;
1373 }
1374 itp->it_value.tv_nsec -= nsec;
1375 nsec = 0;
1376 if (timespecisset(&itp->it_value))
1377 return (1);
1378 /* expired, exactly at end of interval */
1379 expire:
1380 if (timespecisset(&itp->it_interval)) {
1381 itp->it_value = itp->it_interval;
1382 itp->it_value.tv_nsec -= nsec;
1383 if (itp->it_value.tv_nsec < 0) {
1384 itp->it_value.tv_nsec += 1000000000;
1385 itp->it_value.tv_sec--;
1386 }
1387 timer_settime(pt);
1388 } else
1389 itp->it_value.tv_nsec = 0; /* sec is already 0 */
1390 return (0);
1391 }
1392
1393 static void
1394 itimerfire(struct ptimer *pt)
1395 {
1396
1397 KASSERT(mutex_owned(&timer_lock));
1398
1399 /*
1400 * XXX Can overrun, but we don't do signal queueing yet, anyway.
1401 * XXX Relying on the clock interrupt is stupid.
1402 */
1403 if (pt->pt_ev.sigev_notify != SIGEV_SIGNAL || pt->pt_queued) {
1404 return;
1405 }
1406 TAILQ_INSERT_TAIL(&timer_queue, pt, pt_chain);
1407 pt->pt_queued = true;
1408 softint_schedule(timer_sih);
1409 }
1410
1411 void
1412 timer_tick(lwp_t *l, bool user)
1413 {
1414 struct ptimers *pts;
1415 struct ptimer *pt;
1416 proc_t *p;
1417
1418 p = l->l_proc;
1419 if (p->p_timers == NULL)
1420 return;
1421
1422 mutex_spin_enter(&timer_lock);
1423 if ((pts = l->l_proc->p_timers) != NULL) {
1424 /*
1425 * Run current process's virtual and profile time, as needed.
1426 */
1427 if (user && (pt = LIST_FIRST(&pts->pts_virtual)) != NULL)
1428 if (itimerdecr(pt, tick * 1000) == 0)
1429 itimerfire(pt);
1430 if ((pt = LIST_FIRST(&pts->pts_prof)) != NULL)
1431 if (itimerdecr(pt, tick * 1000) == 0)
1432 itimerfire(pt);
1433 }
1434 mutex_spin_exit(&timer_lock);
1435 }
1436
1437 static void
1438 timer_intr(void *cookie)
1439 {
1440 ksiginfo_t ksi;
1441 struct ptimer *pt;
1442 proc_t *p;
1443
1444 mutex_enter(proc_lock);
1445 mutex_spin_enter(&timer_lock);
1446 while ((pt = TAILQ_FIRST(&timer_queue)) != NULL) {
1447 TAILQ_REMOVE(&timer_queue, pt, pt_chain);
1448 KASSERT(pt->pt_queued);
1449 pt->pt_queued = false;
1450
1451 if (pt->pt_proc->p_timers == NULL) {
1452 /* Process is dying. */
1453 continue;
1454 }
1455 p = pt->pt_proc;
1456 if (pt->pt_ev.sigev_notify != SIGEV_SIGNAL) {
1457 continue;
1458 }
1459 if (sigismember(&p->p_sigpend.sp_set, pt->pt_ev.sigev_signo)) {
1460 pt->pt_overruns++;
1461 continue;
1462 }
1463
1464 KSI_INIT(&ksi);
1465 ksi.ksi_signo = pt->pt_ev.sigev_signo;
1466 ksi.ksi_code = SI_TIMER;
1467 ksi.ksi_value = pt->pt_ev.sigev_value;
1468 pt->pt_poverruns = pt->pt_overruns;
1469 pt->pt_overruns = 0;
1470 mutex_spin_exit(&timer_lock);
1471 kpsignal(p, &ksi, NULL);
1472 mutex_spin_enter(&timer_lock);
1473 }
1474 mutex_spin_exit(&timer_lock);
1475 mutex_exit(proc_lock);
1476 }
1477
1478 /*
1479 * Check if the time will wrap if set to ts.
1480 *
1481 * ts - timespec describing the new time
1482 * delta - the delta between the current time and ts
1483 */
1484 bool
1485 time_wraps(struct timespec *ts, struct timespec *delta)
1486 {
1487
1488 /*
1489 * Don't allow the time to be set forward so far it
1490 * will wrap and become negative, thus allowing an
1491 * attacker to bypass the next check below. The
1492 * cutoff is 1 year before rollover occurs, so even
1493 * if the attacker uses adjtime(2) to move the time
1494 * past the cutoff, it will take a very long time
1495 * to get to the wrap point.
1496 */
1497 if ((ts->tv_sec > LLONG_MAX - 365*24*60*60) ||
1498 (delta->tv_sec < 0 || delta->tv_nsec < 0))
1499 return true;
1500
1501 return false;
1502 }
1503