kern_time.c revision 1.224 1 /* $NetBSD: kern_time.c,v 1.224 2024/12/22 23:16:26 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2004, 2005, 2007, 2008, 2009, 2020
5 * The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Christopher G. Demetriou, by Andrew Doran, and by Jason R. Thorpe.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1982, 1986, 1989, 1993
35 * The Regents of the University of California. All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95
62 */
63
64 #include <sys/cdefs.h>
65 __KERNEL_RCSID(0, "$NetBSD: kern_time.c,v 1.224 2024/12/22 23:16:26 riastradh Exp $");
66
67 #include <sys/param.h>
68 #include <sys/resourcevar.h>
69 #include <sys/kernel.h>
70 #include <sys/systm.h>
71 #include <sys/proc.h>
72 #include <sys/vnode.h>
73 #include <sys/signalvar.h>
74 #include <sys/syslog.h>
75 #include <sys/timetc.h>
76 #include <sys/timevar.h>
77 #include <sys/timex.h>
78 #include <sys/kauth.h>
79 #include <sys/mount.h>
80 #include <sys/syscallargs.h>
81 #include <sys/cpu.h>
82
83 kmutex_t itimer_mutex __cacheline_aligned; /* XXX static */
84 static struct itlist itimer_realtime_changed_notify;
85
86 static void itimer_callout(void *);
87 static void ptimer_intr(void *);
88 static void *ptimer_sih __read_mostly;
89 static TAILQ_HEAD(, ptimer) ptimer_queue;
90
91 #define CLOCK_VIRTUAL_P(clockid) \
92 ((clockid) == CLOCK_VIRTUAL || (clockid) == CLOCK_PROF)
93
94 CTASSERT(ITIMER_REAL == CLOCK_REALTIME);
95 CTASSERT(ITIMER_VIRTUAL == CLOCK_VIRTUAL);
96 CTASSERT(ITIMER_PROF == CLOCK_PROF);
97 CTASSERT(ITIMER_MONOTONIC == CLOCK_MONOTONIC);
98
99 #define DELAYTIMER_MAX 32
100
101 /*
102 * Initialize timekeeping.
103 */
104 void
105 time_init(void)
106 {
107
108 mutex_init(&itimer_mutex, MUTEX_DEFAULT, IPL_SCHED);
109 LIST_INIT(&itimer_realtime_changed_notify);
110
111 TAILQ_INIT(&ptimer_queue);
112 ptimer_sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE,
113 ptimer_intr, NULL);
114 }
115
116 /*
117 * Check if the time will wrap if set to ts.
118 *
119 * ts - timespec describing the new time
120 * delta - the delta between the current time and ts
121 */
122 bool
123 time_wraps(struct timespec *ts, struct timespec *delta)
124 {
125
126 /*
127 * Don't allow the time to be set forward so far it
128 * will wrap and become negative, thus allowing an
129 * attacker to bypass the next check below. The
130 * cutoff is 1 year before rollover occurs, so even
131 * if the attacker uses adjtime(2) to move the time
132 * past the cutoff, it will take a very long time
133 * to get to the wrap point.
134 */
135 if ((ts->tv_sec > LLONG_MAX - 365*24*60*60) ||
136 (delta->tv_sec < 0 || delta->tv_nsec < 0))
137 return true;
138
139 return false;
140 }
141
142 /*
143 * itimer_lock:
144 *
145 * Acquire the interval timer data lock.
146 */
147 void
148 itimer_lock(void)
149 {
150 mutex_spin_enter(&itimer_mutex);
151 }
152
153 /*
154 * itimer_unlock:
155 *
156 * Release the interval timer data lock.
157 */
158 void
159 itimer_unlock(void)
160 {
161 mutex_spin_exit(&itimer_mutex);
162 }
163
164 /*
165 * itimer_lock_held:
166 *
167 * Check that the interval timer lock is held for diagnostic
168 * assertions.
169 */
170 inline bool __diagused
171 itimer_lock_held(void)
172 {
173 return mutex_owned(&itimer_mutex);
174 }
175
176 /*
177 * Time of day and interval timer support.
178 *
179 * These routines provide the kernel entry points to get and set
180 * the time-of-day and per-process interval timers. Subroutines
181 * here provide support for adding and subtracting timeval structures
182 * and decrementing interval timers, optionally reloading the interval
183 * timers when they expire.
184 */
185
186 /* This function is used by clock_settime and settimeofday */
187 static int
188 settime1(struct proc *p, const struct timespec *ts, bool check_kauth)
189 {
190 struct timespec delta, now;
191
192 /*
193 * The time being set to an unreasonable value will cause
194 * unreasonable system behaviour.
195 */
196 if (ts->tv_sec < 0 || ts->tv_sec > (1LL << 36))
197 return EINVAL;
198
199 nanotime(&now);
200 timespecsub(ts, &now, &delta);
201
202 if (check_kauth && kauth_authorize_system(kauth_cred_get(),
203 KAUTH_SYSTEM_TIME, KAUTH_REQ_SYSTEM_TIME_SYSTEM, __UNCONST(ts),
204 &delta, KAUTH_ARG(check_kauth ? false : true)) != 0) {
205 return EPERM;
206 }
207
208 #ifdef notyet
209 if ((delta.tv_sec < 86400) && securelevel > 0) { /* XXX elad - notyet */
210 return EPERM;
211 }
212 #endif
213
214 tc_setclock(ts);
215
216 resettodr();
217
218 /*
219 * Notify pending CLOCK_REALTIME timers about the real time change.
220 * There may be inactive timers on this list, but this happens
221 * comparatively less often than timers firing, and so it's better
222 * to put the extra checks here than to complicate the other code
223 * path.
224 */
225 struct itimer *it;
226 itimer_lock();
227 LIST_FOREACH(it, &itimer_realtime_changed_notify, it_rtchgq) {
228 KASSERT(it->it_ops->ito_realtime_changed != NULL);
229 if (timespecisset(&it->it_time.it_value)) {
230 (*it->it_ops->ito_realtime_changed)(it);
231 }
232 }
233 itimer_unlock();
234
235 return 0;
236 }
237
238 int
239 settime(struct proc *p, struct timespec *ts)
240 {
241 return settime1(p, ts, true);
242 }
243
244 /* ARGSUSED */
245 int
246 sys___clock_gettime50(struct lwp *l,
247 const struct sys___clock_gettime50_args *uap, register_t *retval)
248 {
249 /* {
250 syscallarg(clockid_t) clock_id;
251 syscallarg(struct timespec *) tp;
252 } */
253 int error;
254 struct timespec ats;
255
256 error = clock_gettime1(SCARG(uap, clock_id), &ats);
257 if (error != 0)
258 return error;
259
260 return copyout(&ats, SCARG(uap, tp), sizeof(ats));
261 }
262
263 /* ARGSUSED */
264 int
265 sys___clock_settime50(struct lwp *l,
266 const struct sys___clock_settime50_args *uap, register_t *retval)
267 {
268 /* {
269 syscallarg(clockid_t) clock_id;
270 syscallarg(const struct timespec *) tp;
271 } */
272 int error;
273 struct timespec ats;
274
275 if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0)
276 return error;
277
278 return clock_settime1(l->l_proc, SCARG(uap, clock_id), &ats, true);
279 }
280
281
282 int
283 clock_settime1(struct proc *p, clockid_t clock_id, const struct timespec *tp,
284 bool check_kauth)
285 {
286 int error;
287
288 if (tp->tv_nsec < 0 || tp->tv_nsec >= 1000000000L)
289 return EINVAL;
290
291 switch (clock_id) {
292 case CLOCK_REALTIME:
293 if ((error = settime1(p, tp, check_kauth)) != 0)
294 return error;
295 break;
296 case CLOCK_MONOTONIC:
297 return EINVAL; /* read-only clock */
298 default:
299 return EINVAL;
300 }
301
302 return 0;
303 }
304
305 int
306 sys___clock_getres50(struct lwp *l, const struct sys___clock_getres50_args *uap,
307 register_t *retval)
308 {
309 /* {
310 syscallarg(clockid_t) clock_id;
311 syscallarg(struct timespec *) tp;
312 } */
313 struct timespec ts;
314 int error;
315
316 if ((error = clock_getres1(SCARG(uap, clock_id), &ts)) != 0)
317 return error;
318
319 if (SCARG(uap, tp))
320 error = copyout(&ts, SCARG(uap, tp), sizeof(ts));
321
322 return error;
323 }
324
325 int
326 clock_getres1(clockid_t clock_id, struct timespec *ts)
327 {
328
329 switch (clock_id) {
330 case CLOCK_REALTIME:
331 case CLOCK_MONOTONIC:
332 ts->tv_sec = 0;
333 if (tc_getfrequency() > 1000000000)
334 ts->tv_nsec = 1;
335 else
336 ts->tv_nsec = 1000000000 / tc_getfrequency();
337 break;
338 default:
339 return EINVAL;
340 }
341
342 return 0;
343 }
344
345 /* ARGSUSED */
346 int
347 sys___nanosleep50(struct lwp *l, const struct sys___nanosleep50_args *uap,
348 register_t *retval)
349 {
350 /* {
351 syscallarg(struct timespec *) rqtp;
352 syscallarg(struct timespec *) rmtp;
353 } */
354 struct timespec rmt, rqt;
355 int error, error1;
356
357 error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec));
358 if (error)
359 return error;
360
361 error = nanosleep1(l, CLOCK_MONOTONIC, 0, &rqt,
362 SCARG(uap, rmtp) ? &rmt : NULL);
363 if (SCARG(uap, rmtp) == NULL || (error != 0 && error != EINTR))
364 return error;
365
366 error1 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt));
367 return error1 ? error1 : error;
368 }
369
370 /* ARGSUSED */
371 int
372 sys_clock_nanosleep(struct lwp *l, const struct sys_clock_nanosleep_args *uap,
373 register_t *retval)
374 {
375 /* {
376 syscallarg(clockid_t) clock_id;
377 syscallarg(int) flags;
378 syscallarg(struct timespec *) rqtp;
379 syscallarg(struct timespec *) rmtp;
380 } */
381 struct timespec rmt, rqt;
382 int error, error1;
383
384 error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec));
385 if (error)
386 goto out;
387
388 error = nanosleep1(l, SCARG(uap, clock_id), SCARG(uap, flags), &rqt,
389 SCARG(uap, rmtp) ? &rmt : NULL);
390 if (SCARG(uap, rmtp) == NULL || (error != 0 && error != EINTR))
391 goto out;
392
393 if ((SCARG(uap, flags) & TIMER_ABSTIME) == 0 &&
394 (error1 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt))) != 0)
395 error = error1;
396 out:
397 *retval = error;
398 return 0;
399 }
400
401 int
402 nanosleep1(struct lwp *l, clockid_t clock_id, int flags, struct timespec *rqt,
403 struct timespec *rmt)
404 {
405 struct timespec rmtstart;
406 int error, timo;
407
408 if ((error = ts2timo(clock_id, flags, rqt, &timo, &rmtstart)) != 0) {
409 if (error == ETIMEDOUT) {
410 error = 0;
411 if (rmt != NULL)
412 rmt->tv_sec = rmt->tv_nsec = 0;
413 }
414 return error;
415 }
416
417 /*
418 * Avoid inadvertently sleeping forever
419 */
420 if (timo == 0)
421 timo = 1;
422 again:
423 error = kpause("nanoslp", true, timo, NULL);
424 if (error == EWOULDBLOCK)
425 error = 0;
426 if (rmt != NULL || error == 0) {
427 struct timespec rmtend;
428 struct timespec t0;
429 struct timespec *t;
430 int err;
431
432 err = clock_gettime1(clock_id, &rmtend);
433 if (err != 0)
434 return err;
435
436 t = (rmt != NULL) ? rmt : &t0;
437 if (flags & TIMER_ABSTIME) {
438 timespecsub(rqt, &rmtend, t);
439 } else {
440 if (timespeccmp(&rmtend, &rmtstart, <))
441 timespecclear(t); /* clock wound back */
442 else
443 timespecsub(&rmtend, &rmtstart, t);
444 if (timespeccmp(rqt, t, <))
445 timespecclear(t);
446 else
447 timespecsub(rqt, t, t);
448 }
449 if (t->tv_sec < 0)
450 timespecclear(t);
451 if (error == 0) {
452 timo = tstohz(t);
453 if (timo > 0)
454 goto again;
455 }
456 }
457
458 if (error == ERESTART)
459 error = EINTR;
460
461 return error;
462 }
463
464 int
465 sys_clock_getcpuclockid2(struct lwp *l,
466 const struct sys_clock_getcpuclockid2_args *uap,
467 register_t *retval)
468 {
469 /* {
470 syscallarg(idtype_t idtype;
471 syscallarg(id_t id);
472 syscallarg(clockid_t *)clock_id;
473 } */
474 pid_t pid;
475 lwpid_t lid;
476 clockid_t clock_id;
477 id_t id = SCARG(uap, id);
478
479 switch (SCARG(uap, idtype)) {
480 case P_PID:
481 pid = id == 0 ? l->l_proc->p_pid : id;
482 clock_id = CLOCK_PROCESS_CPUTIME_ID | pid;
483 break;
484 case P_LWPID:
485 lid = id == 0 ? l->l_lid : id;
486 clock_id = CLOCK_THREAD_CPUTIME_ID | lid;
487 break;
488 default:
489 return EINVAL;
490 }
491 return copyout(&clock_id, SCARG(uap, clock_id), sizeof(clock_id));
492 }
493
494 /* ARGSUSED */
495 int
496 sys___gettimeofday50(struct lwp *l, const struct sys___gettimeofday50_args *uap,
497 register_t *retval)
498 {
499 /* {
500 syscallarg(struct timeval *) tp;
501 syscallarg(void *) tzp; really "struct timezone *";
502 } */
503 struct timeval atv;
504 int error = 0;
505 struct timezone tzfake;
506
507 if (SCARG(uap, tp)) {
508 memset(&atv, 0, sizeof(atv));
509 microtime(&atv);
510 error = copyout(&atv, SCARG(uap, tp), sizeof(atv));
511 if (error)
512 return error;
513 }
514 if (SCARG(uap, tzp)) {
515 /*
516 * NetBSD has no kernel notion of time zone, so we just
517 * fake up a timezone struct and return it if demanded.
518 */
519 tzfake.tz_minuteswest = 0;
520 tzfake.tz_dsttime = 0;
521 error = copyout(&tzfake, SCARG(uap, tzp), sizeof(tzfake));
522 }
523 return error;
524 }
525
526 /* ARGSUSED */
527 int
528 sys___settimeofday50(struct lwp *l, const struct sys___settimeofday50_args *uap,
529 register_t *retval)
530 {
531 /* {
532 syscallarg(const struct timeval *) tv;
533 syscallarg(const void *) tzp; really "const struct timezone *";
534 } */
535
536 return settimeofday1(SCARG(uap, tv), true, SCARG(uap, tzp), l, true);
537 }
538
539 int
540 settimeofday1(const struct timeval *utv, bool userspace,
541 const void *utzp, struct lwp *l, bool check_kauth)
542 {
543 struct timeval atv;
544 struct timespec ts;
545 int error;
546
547 /* Verify all parameters before changing time. */
548
549 /*
550 * NetBSD has no kernel notion of time zone, and only an
551 * obsolete program would try to set it, so we log a warning.
552 */
553 if (utzp)
554 log(LOG_WARNING, "pid %d attempted to set the "
555 "(obsolete) kernel time zone\n", l->l_proc->p_pid);
556
557 if (utv == NULL)
558 return 0;
559
560 if (userspace) {
561 if ((error = copyin(utv, &atv, sizeof(atv))) != 0)
562 return error;
563 utv = &atv;
564 }
565
566 if (utv->tv_usec < 0 || utv->tv_usec >= 1000000)
567 return EINVAL;
568
569 TIMEVAL_TO_TIMESPEC(utv, &ts);
570 return settime1(l->l_proc, &ts, check_kauth);
571 }
572
573 int time_adjusted; /* set if an adjustment is made */
574
575 /* ARGSUSED */
576 int
577 sys___adjtime50(struct lwp *l, const struct sys___adjtime50_args *uap,
578 register_t *retval)
579 {
580 /* {
581 syscallarg(const struct timeval *) delta;
582 syscallarg(struct timeval *) olddelta;
583 } */
584 int error;
585 struct timeval atv, oldatv;
586
587 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_TIME,
588 KAUTH_REQ_SYSTEM_TIME_ADJTIME, NULL, NULL, NULL)) != 0)
589 return error;
590
591 if (SCARG(uap, delta)) {
592 error = copyin(SCARG(uap, delta), &atv,
593 sizeof(*SCARG(uap, delta)));
594 if (error)
595 return error;
596 }
597 adjtime1(SCARG(uap, delta) ? &atv : NULL,
598 SCARG(uap, olddelta) ? &oldatv : NULL, l->l_proc);
599 if (SCARG(uap, olddelta))
600 error = copyout(&oldatv, SCARG(uap, olddelta),
601 sizeof(*SCARG(uap, olddelta)));
602 return error;
603 }
604
605 void
606 adjtime1(const struct timeval *delta, struct timeval *olddelta, struct proc *p)
607 {
608
609 if (olddelta) {
610 memset(olddelta, 0, sizeof(*olddelta));
611 mutex_spin_enter(&timecounter_lock);
612 olddelta->tv_sec = time_adjtime / 1000000;
613 olddelta->tv_usec = time_adjtime % 1000000;
614 if (olddelta->tv_usec < 0) {
615 olddelta->tv_usec += 1000000;
616 olddelta->tv_sec--;
617 }
618 mutex_spin_exit(&timecounter_lock);
619 }
620
621 if (delta) {
622 mutex_spin_enter(&timecounter_lock);
623 /*
624 * XXX This should maybe just report failure to
625 * userland for nonsense deltas.
626 */
627 if (delta->tv_sec > INT64_MAX/1000000 - 1) {
628 time_adjtime = INT64_MAX;
629 } else if (delta->tv_sec < INT64_MIN/1000000 + 1) {
630 time_adjtime = INT64_MIN;
631 } else {
632 time_adjtime = delta->tv_sec * 1000000
633 + MAX(-999999, MIN(999999, delta->tv_usec));
634 }
635
636 if (time_adjtime) {
637 /* We need to save the system time during shutdown */
638 time_adjusted |= 1;
639 }
640 mutex_spin_exit(&timecounter_lock);
641 }
642 }
643
644 /*
645 * Interval timer support.
646 *
647 * The itimer_*() routines provide generic support for interval timers,
648 * both real (CLOCK_REALTIME, CLOCK_MONOTIME), and virtual (CLOCK_VIRTUAL,
649 * CLOCK_PROF).
650 *
651 * Real timers keep their deadline as an absolute time, and are fired
652 * by a callout. Virtual timers are kept as a linked-list of deltas,
653 * and are processed by hardclock().
654 *
655 * Because the real time timer callout may be delayed in real time due
656 * to interrupt processing on the system, it is possible for the real
657 * time timeout routine (itimer_callout()) run past after its deadline.
658 * It does not suffice, therefore, to reload the real timer .it_value
659 * from the timer's .it_interval. Rather, we compute the next deadline
660 * in absolute time based on the current time and the .it_interval value,
661 * and report any overruns.
662 *
663 * Note that while the virtual timers are supported in a generic fashion
664 * here, they only (currently) make sense as per-process timers, and thus
665 * only really work for that case.
666 */
667
668 /*
669 * itimer_init:
670 *
671 * Initialize the common data for an interval timer.
672 */
673 void
674 itimer_init(struct itimer * const it, const struct itimer_ops * const ops,
675 clockid_t const id, struct itlist * const itl)
676 {
677
678 KASSERT(itimer_lock_held());
679 KASSERT(ops != NULL);
680
681 timespecclear(&it->it_time.it_value);
682 it->it_ops = ops;
683 it->it_clockid = id;
684 it->it_overruns = 0;
685 it->it_dying = false;
686 if (!CLOCK_VIRTUAL_P(id)) {
687 KASSERT(itl == NULL);
688 callout_init(&it->it_ch, CALLOUT_MPSAFE);
689 callout_setfunc(&it->it_ch, itimer_callout, it);
690 if (id == CLOCK_REALTIME && ops->ito_realtime_changed != NULL) {
691 LIST_INSERT_HEAD(&itimer_realtime_changed_notify,
692 it, it_rtchgq);
693 }
694 } else {
695 KASSERT(itl != NULL);
696 it->it_vlist = itl;
697 it->it_active = false;
698 }
699 }
700
701 /*
702 * itimer_poison:
703 *
704 * Poison an interval timer, preventing it from being scheduled
705 * or processed, in preparation for freeing the timer.
706 */
707 void
708 itimer_poison(struct itimer * const it)
709 {
710
711 KASSERT(itimer_lock_held());
712
713 it->it_dying = true;
714
715 /*
716 * For non-virtual timers, stop the callout, or wait for it to
717 * run if it has already fired. It cannot restart again after
718 * this point: the callout won't restart itself when dying, no
719 * other users holding the lock can restart it, and any other
720 * users waiting for callout_halt concurrently (itimer_settime)
721 * will restart from the top.
722 */
723 if (!CLOCK_VIRTUAL_P(it->it_clockid)) {
724 callout_halt(&it->it_ch, &itimer_mutex);
725 if (it->it_clockid == CLOCK_REALTIME &&
726 it->it_ops->ito_realtime_changed != NULL) {
727 LIST_REMOVE(it, it_rtchgq);
728 }
729 }
730 }
731
732 /*
733 * itimer_fini:
734 *
735 * Release resources used by an interval timer.
736 *
737 * N.B. itimer_lock must be held on entry, and is released on exit.
738 */
739 void
740 itimer_fini(struct itimer * const it)
741 {
742
743 KASSERT(itimer_lock_held());
744
745 /* All done with the global state. */
746 itimer_unlock();
747
748 /* Destroy the callout, if needed. */
749 if (!CLOCK_VIRTUAL_P(it->it_clockid))
750 callout_destroy(&it->it_ch);
751 }
752
753 /*
754 * itimer_decr:
755 *
756 * Decrement an interval timer by a specified number of nanoseconds,
757 * which must be less than a second, i.e. < 1000000000. If the timer
758 * expires, then reload it. In this case, carry over (nsec - old value)
759 * to reduce the value reloaded into the timer so that the timer does
760 * not drift. This routine assumes that it is called in a context where
761 * the timers on which it is operating cannot change in value.
762 *
763 * Returns true if the timer has expired.
764 */
765 static bool
766 itimer_decr(struct itimer *it, int nsec)
767 {
768 struct itimerspec *itp;
769 int error __diagused;
770
771 KASSERT(itimer_lock_held());
772 KASSERT(CLOCK_VIRTUAL_P(it->it_clockid));
773
774 itp = &it->it_time;
775 if (itp->it_value.tv_nsec < nsec) {
776 if (itp->it_value.tv_sec == 0) {
777 /* expired, and already in next interval */
778 nsec -= itp->it_value.tv_nsec;
779 goto expire;
780 }
781 itp->it_value.tv_nsec += 1000000000;
782 itp->it_value.tv_sec--;
783 }
784 itp->it_value.tv_nsec -= nsec;
785 nsec = 0;
786 if (timespecisset(&itp->it_value))
787 return false;
788 /* expired, exactly at end of interval */
789 expire:
790 if (timespecisset(&itp->it_interval)) {
791 itp->it_value = itp->it_interval;
792 itp->it_value.tv_nsec -= nsec;
793 if (itp->it_value.tv_nsec < 0) {
794 itp->it_value.tv_nsec += 1000000000;
795 itp->it_value.tv_sec--;
796 }
797 error = itimer_settime(it);
798 KASSERT(error == 0); /* virtual, never fails */
799 } else
800 itp->it_value.tv_nsec = 0; /* sec is already 0 */
801 return true;
802 }
803
804 /*
805 * itimer_arm_real:
806 *
807 * Arm a non-virtual timer.
808 */
809 static void
810 itimer_arm_real(struct itimer * const it)
811 {
812
813 KASSERT(!it->it_dying);
814 KASSERT(!CLOCK_VIRTUAL_P(it->it_clockid));
815 KASSERT(!callout_pending(&it->it_ch));
816
817 /*
818 * Don't need to check tshzto() return value, here.
819 * callout_schedule() does it for us.
820 */
821 callout_schedule(&it->it_ch,
822 (it->it_clockid == CLOCK_MONOTONIC
823 ? tshztoup(&it->it_time.it_value)
824 : tshzto(&it->it_time.it_value)));
825 }
826
827 /*
828 * itimer_callout:
829 *
830 * Callout to expire a non-virtual timer. Queue it up for processing,
831 * and then reload, if it is configured to do so.
832 *
833 * N.B. A delay in processing this callout causes multiple
834 * SIGALRM calls to be compressed into one.
835 */
836 static void
837 itimer_callout(void *arg)
838 {
839 uint64_t last_val, next_val, interval, now_ns;
840 struct timespec now, next;
841 struct itimer * const it = arg;
842 int backwards;
843
844 itimer_lock();
845 (*it->it_ops->ito_fire)(it);
846
847 if (!timespecisset(&it->it_time.it_interval)) {
848 timespecclear(&it->it_time.it_value);
849 itimer_unlock();
850 return;
851 }
852
853 if (it->it_clockid == CLOCK_MONOTONIC) {
854 getnanouptime(&now);
855 } else {
856 getnanotime(&now);
857 }
858
859 backwards = (timespeccmp(&it->it_time.it_value, &now, >));
860
861 /* Nonnegative interval guaranteed by itimerfix. */
862 KASSERT(it->it_time.it_interval.tv_sec >= 0);
863 KASSERT(it->it_time.it_interval.tv_nsec >= 0);
864
865 /* Handle the easy case of non-overflown timers first. */
866 if (!backwards &&
867 timespecaddok(&it->it_time.it_value, &it->it_time.it_interval)) {
868 timespecadd(&it->it_time.it_value, &it->it_time.it_interval,
869 &next);
870 it->it_time.it_value = next;
871 } else {
872 now_ns = timespec2ns(&now);
873 last_val = timespec2ns(&it->it_time.it_value);
874 interval = timespec2ns(&it->it_time.it_interval);
875
876 next_val = now_ns +
877 (now_ns - last_val + interval - 1) % interval;
878
879 if (backwards)
880 next_val += interval;
881 else
882 it->it_overruns += (now_ns - last_val) / interval;
883
884 it->it_time.it_value.tv_sec = next_val / 1000000000;
885 it->it_time.it_value.tv_nsec = next_val % 1000000000;
886 }
887
888 /*
889 * Reset the callout, if it's not going away.
890 */
891 if (!it->it_dying)
892 itimer_arm_real(it);
893 itimer_unlock();
894 }
895
896 /*
897 * itimer_settime:
898 *
899 * Set up the given interval timer. The value in it->it_time.it_value
900 * is taken to be an absolute time for CLOCK_REALTIME/CLOCK_MONOTONIC
901 * timers and a relative time for CLOCK_VIRTUAL/CLOCK_PROF timers.
902 *
903 * If the callout had already fired but not yet run, fails with
904 * ERESTART -- caller must restart from the top to look up a timer.
905 *
906 * Caller is responsible for validating it->it_value and
907 * it->it_interval, e.g. with itimerfix or itimespecfix.
908 */
909 int
910 itimer_settime(struct itimer *it)
911 {
912 struct itimer *itn, *pitn;
913 struct itlist *itl;
914
915 KASSERT(itimer_lock_held());
916 KASSERT(!it->it_dying);
917 KASSERT(it->it_time.it_value.tv_sec >= 0);
918 KASSERT(it->it_time.it_value.tv_nsec >= 0);
919 KASSERT(it->it_time.it_value.tv_nsec < 1000000000);
920 KASSERT(it->it_time.it_interval.tv_sec >= 0);
921 KASSERT(it->it_time.it_interval.tv_nsec >= 0);
922 KASSERT(it->it_time.it_interval.tv_nsec < 1000000000);
923
924 if (!CLOCK_VIRTUAL_P(it->it_clockid)) {
925 /*
926 * Try to stop the callout. However, if it had already
927 * fired, we have to drop the lock to wait for it, so
928 * the world may have changed and pt may not be there
929 * any more. In that case, tell the caller to start
930 * over from the top.
931 */
932 if (callout_halt(&it->it_ch, &itimer_mutex))
933 return ERESTART;
934 KASSERT(!it->it_dying);
935
936 /* Now we can touch it and start it up again. */
937 if (timespecisset(&it->it_time.it_value))
938 itimer_arm_real(it);
939 } else {
940 if (it->it_active) {
941 itn = LIST_NEXT(it, it_list);
942 LIST_REMOVE(it, it_list);
943 for ( ; itn; itn = LIST_NEXT(itn, it_list))
944 timespecadd(&it->it_time.it_value,
945 &itn->it_time.it_value,
946 &itn->it_time.it_value);
947 }
948 if (timespecisset(&it->it_time.it_value)) {
949 itl = it->it_vlist;
950 for (itn = LIST_FIRST(itl), pitn = NULL;
951 itn && timespeccmp(&it->it_time.it_value,
952 &itn->it_time.it_value, >);
953 pitn = itn, itn = LIST_NEXT(itn, it_list))
954 timespecsub(&it->it_time.it_value,
955 &itn->it_time.it_value,
956 &it->it_time.it_value);
957
958 if (pitn)
959 LIST_INSERT_AFTER(pitn, it, it_list);
960 else
961 LIST_INSERT_HEAD(itl, it, it_list);
962
963 for ( ; itn ; itn = LIST_NEXT(itn, it_list))
964 timespecsub(&itn->it_time.it_value,
965 &it->it_time.it_value,
966 &itn->it_time.it_value);
967
968 it->it_active = true;
969 } else {
970 it->it_active = false;
971 }
972 }
973
974 /* Success! */
975 return 0;
976 }
977
978 /*
979 * itimer_gettime:
980 *
981 * Return the remaining time of an interval timer.
982 */
983 void
984 itimer_gettime(const struct itimer *it, struct itimerspec *aits)
985 {
986 struct timespec now;
987 struct itimer *itn;
988
989 KASSERT(itimer_lock_held());
990 KASSERT(!it->it_dying);
991
992 *aits = it->it_time;
993 if (!CLOCK_VIRTUAL_P(it->it_clockid)) {
994 /*
995 * Convert from absolute to relative time in .it_value
996 * part of real time timer. If time for real time
997 * timer has passed return 0, else return difference
998 * between current time and time for the timer to go
999 * off.
1000 */
1001 if (timespecisset(&aits->it_value)) {
1002 if (it->it_clockid == CLOCK_REALTIME) {
1003 getnanotime(&now);
1004 } else { /* CLOCK_MONOTONIC */
1005 getnanouptime(&now);
1006 }
1007 if (timespeccmp(&aits->it_value, &now, <))
1008 timespecclear(&aits->it_value);
1009 else
1010 timespecsub(&aits->it_value, &now,
1011 &aits->it_value);
1012 }
1013 } else if (it->it_active) {
1014 for (itn = LIST_FIRST(it->it_vlist); itn && itn != it;
1015 itn = LIST_NEXT(itn, it_list))
1016 timespecadd(&aits->it_value,
1017 &itn->it_time.it_value, &aits->it_value);
1018 KASSERT(itn != NULL); /* it should be findable on the list */
1019 } else
1020 timespecclear(&aits->it_value);
1021 }
1022
1023 /*
1024 * Per-process timer support.
1025 *
1026 * Both the BSD getitimer() family and the POSIX timer_*() family of
1027 * routines are supported.
1028 *
1029 * All timers are kept in an array pointed to by p_timers, which is
1030 * allocated on demand - many processes don't use timers at all. The
1031 * first four elements in this array are reserved for the BSD timers:
1032 * element 0 is ITIMER_REAL, element 1 is ITIMER_VIRTUAL, element
1033 * 2 is ITIMER_PROF, and element 3 is ITIMER_MONOTONIC. The rest may be
1034 * allocated by the timer_create() syscall.
1035 *
1036 * These timers are a "sub-class" of interval timer.
1037 */
1038
1039 /*
1040 * ptimer_free:
1041 *
1042 * Free the per-process timer at the specified index.
1043 */
1044 static void
1045 ptimer_free(struct ptimers *pts, int index)
1046 {
1047 struct itimer *it;
1048 struct ptimer *pt;
1049
1050 KASSERT(itimer_lock_held());
1051
1052 it = pts->pts_timers[index];
1053 pt = container_of(it, struct ptimer, pt_itimer);
1054 pts->pts_timers[index] = NULL;
1055 itimer_poison(it);
1056
1057 /*
1058 * Remove it from the queue to be signalled. Must be done
1059 * after itimer is poisoned, because we may have had to wait
1060 * for the callout to complete.
1061 */
1062 if (pt->pt_queued) {
1063 TAILQ_REMOVE(&ptimer_queue, pt, pt_chain);
1064 pt->pt_queued = false;
1065 }
1066
1067 itimer_fini(it); /* releases itimer_lock */
1068 kmem_free(pt, sizeof(*pt));
1069 }
1070
1071 /*
1072 * ptimers_alloc:
1073 *
1074 * Allocate a ptimers for the specified process.
1075 */
1076 static struct ptimers *
1077 ptimers_alloc(struct proc *p)
1078 {
1079 struct ptimers *pts;
1080 int i;
1081
1082 pts = kmem_alloc(sizeof(*pts), KM_SLEEP);
1083 LIST_INIT(&pts->pts_virtual);
1084 LIST_INIT(&pts->pts_prof);
1085 for (i = 0; i < TIMER_MAX; i++)
1086 pts->pts_timers[i] = NULL;
1087 itimer_lock();
1088 if (p->p_timers == NULL) {
1089 p->p_timers = pts;
1090 itimer_unlock();
1091 return pts;
1092 }
1093 itimer_unlock();
1094 kmem_free(pts, sizeof(*pts));
1095 return p->p_timers;
1096 }
1097
1098 /*
1099 * ptimers_free:
1100 *
1101 * Clean up the per-process timers. If "which" is set to TIMERS_ALL,
1102 * then clean up all timers and free all the data structures. If
1103 * "which" is set to TIMERS_POSIX, only clean up the timers allocated
1104 * by timer_create(), not the BSD setitimer() timers, and only free the
1105 * structure if none of those remain.
1106 *
1107 * This function is exported because it is needed in the exec and
1108 * exit code paths.
1109 */
1110 void
1111 ptimers_free(struct proc *p, int which)
1112 {
1113 struct ptimers *pts;
1114 struct itimer *itn;
1115 struct timespec ts;
1116 int i;
1117
1118 if (p->p_timers == NULL)
1119 return;
1120
1121 pts = p->p_timers;
1122 itimer_lock();
1123 if (which == TIMERS_ALL) {
1124 p->p_timers = NULL;
1125 i = 0;
1126 } else {
1127 timespecclear(&ts);
1128 for (itn = LIST_FIRST(&pts->pts_virtual);
1129 itn && itn != pts->pts_timers[ITIMER_VIRTUAL];
1130 itn = LIST_NEXT(itn, it_list)) {
1131 KASSERT(itn->it_clockid == CLOCK_VIRTUAL);
1132 timespecadd(&ts, &itn->it_time.it_value, &ts);
1133 }
1134 LIST_FIRST(&pts->pts_virtual) = NULL;
1135 if (itn) {
1136 KASSERT(itn->it_clockid == CLOCK_VIRTUAL);
1137 timespecadd(&ts, &itn->it_time.it_value,
1138 &itn->it_time.it_value);
1139 LIST_INSERT_HEAD(&pts->pts_virtual, itn, it_list);
1140 }
1141 timespecclear(&ts);
1142 for (itn = LIST_FIRST(&pts->pts_prof);
1143 itn && itn != pts->pts_timers[ITIMER_PROF];
1144 itn = LIST_NEXT(itn, it_list)) {
1145 KASSERT(itn->it_clockid == CLOCK_PROF);
1146 timespecadd(&ts, &itn->it_time.it_value, &ts);
1147 }
1148 LIST_FIRST(&pts->pts_prof) = NULL;
1149 if (itn) {
1150 KASSERT(itn->it_clockid == CLOCK_PROF);
1151 timespecadd(&ts, &itn->it_time.it_value,
1152 &itn->it_time.it_value);
1153 LIST_INSERT_HEAD(&pts->pts_prof, itn, it_list);
1154 }
1155 i = TIMER_MIN;
1156 }
1157 for ( ; i < TIMER_MAX; i++) {
1158 if (pts->pts_timers[i] != NULL) {
1159 /* Free the timer and release the lock. */
1160 ptimer_free(pts, i);
1161 /* Reacquire the lock for the next one. */
1162 itimer_lock();
1163 }
1164 }
1165 if (pts->pts_timers[0] == NULL && pts->pts_timers[1] == NULL &&
1166 pts->pts_timers[2] == NULL && pts->pts_timers[3] == NULL) {
1167 p->p_timers = NULL;
1168 itimer_unlock();
1169 kmem_free(pts, sizeof(*pts));
1170 } else
1171 itimer_unlock();
1172 }
1173
1174 /*
1175 * ptimer_fire:
1176 *
1177 * Fire a per-process timer.
1178 */
1179 static void
1180 ptimer_fire(struct itimer *it)
1181 {
1182 struct ptimer *pt = container_of(it, struct ptimer, pt_itimer);
1183
1184 KASSERT(itimer_lock_held());
1185
1186 /*
1187 * XXX Can overrun, but we don't do signal queueing yet, anyway.
1188 * XXX Relying on the clock interrupt is stupid.
1189 */
1190 if (pt->pt_ev.sigev_notify != SIGEV_SIGNAL) {
1191 return;
1192 }
1193
1194 if (!pt->pt_queued) {
1195 TAILQ_INSERT_TAIL(&ptimer_queue, pt, pt_chain);
1196 pt->pt_queued = true;
1197 softint_schedule(ptimer_sih);
1198 }
1199 }
1200
1201 /*
1202 * Operations vector for per-process timers (BSD and POSIX).
1203 */
1204 static const struct itimer_ops ptimer_itimer_ops = {
1205 .ito_fire = ptimer_fire,
1206 };
1207
1208 /*
1209 * sys_timer_create:
1210 *
1211 * System call to create a POSIX timer.
1212 */
1213 int
1214 sys_timer_create(struct lwp *l, const struct sys_timer_create_args *uap,
1215 register_t *retval)
1216 {
1217 /* {
1218 syscallarg(clockid_t) clock_id;
1219 syscallarg(struct sigevent *) evp;
1220 syscallarg(timer_t *) timerid;
1221 } */
1222
1223 return timer_create1(SCARG(uap, timerid), SCARG(uap, clock_id),
1224 SCARG(uap, evp), copyin, l);
1225 }
1226
1227 int
1228 timer_create1(timer_t *tid, clockid_t id, struct sigevent *evp,
1229 copyin_t fetch_event, struct lwp *l)
1230 {
1231 int error;
1232 timer_t timerid;
1233 struct itlist *itl;
1234 struct ptimers *pts;
1235 struct ptimer *pt;
1236 struct proc *p;
1237
1238 p = l->l_proc;
1239
1240 if ((u_int)id > CLOCK_MONOTONIC)
1241 return EINVAL;
1242
1243 if ((pts = p->p_timers) == NULL)
1244 pts = ptimers_alloc(p);
1245
1246 pt = kmem_zalloc(sizeof(*pt), KM_SLEEP);
1247 if (evp != NULL) {
1248 if (((error =
1249 (*fetch_event)(evp, &pt->pt_ev, sizeof(pt->pt_ev))) != 0) ||
1250 ((pt->pt_ev.sigev_notify < SIGEV_NONE) ||
1251 (pt->pt_ev.sigev_notify > SIGEV_SA)) ||
1252 (pt->pt_ev.sigev_notify == SIGEV_SIGNAL &&
1253 (pt->pt_ev.sigev_signo <= 0 ||
1254 pt->pt_ev.sigev_signo >= NSIG))) {
1255 kmem_free(pt, sizeof(*pt));
1256 return (error ? error : EINVAL);
1257 }
1258 }
1259
1260 /* Find a free timer slot, skipping those reserved for setitimer(). */
1261 itimer_lock();
1262 for (timerid = TIMER_MIN; timerid < TIMER_MAX; timerid++)
1263 if (pts->pts_timers[timerid] == NULL)
1264 break;
1265 if (timerid == TIMER_MAX) {
1266 itimer_unlock();
1267 kmem_free(pt, sizeof(*pt));
1268 return EAGAIN;
1269 }
1270 if (evp == NULL) {
1271 pt->pt_ev.sigev_notify = SIGEV_SIGNAL;
1272 switch (id) {
1273 case CLOCK_REALTIME:
1274 case CLOCK_MONOTONIC:
1275 pt->pt_ev.sigev_signo = SIGALRM;
1276 break;
1277 case CLOCK_VIRTUAL:
1278 pt->pt_ev.sigev_signo = SIGVTALRM;
1279 break;
1280 case CLOCK_PROF:
1281 pt->pt_ev.sigev_signo = SIGPROF;
1282 break;
1283 }
1284 pt->pt_ev.sigev_value.sival_int = timerid;
1285 }
1286
1287 switch (id) {
1288 case CLOCK_VIRTUAL:
1289 itl = &pts->pts_virtual;
1290 break;
1291 case CLOCK_PROF:
1292 itl = &pts->pts_prof;
1293 break;
1294 default:
1295 itl = NULL;
1296 }
1297
1298 itimer_init(&pt->pt_itimer, &ptimer_itimer_ops, id, itl);
1299 pt->pt_proc = p;
1300 pt->pt_poverruns = 0;
1301 pt->pt_entry = timerid;
1302 pt->pt_queued = false;
1303
1304 pts->pts_timers[timerid] = &pt->pt_itimer;
1305 itimer_unlock();
1306
1307 return copyout(&timerid, tid, sizeof(timerid));
1308 }
1309
1310 /*
1311 * sys_timer_delete:
1312 *
1313 * System call to delete a POSIX timer.
1314 */
1315 int
1316 sys_timer_delete(struct lwp *l, const struct sys_timer_delete_args *uap,
1317 register_t *retval)
1318 {
1319 /* {
1320 syscallarg(timer_t) timerid;
1321 } */
1322 struct proc *p = l->l_proc;
1323 timer_t timerid;
1324 struct ptimers *pts;
1325 struct itimer *it, *itn;
1326
1327 timerid = SCARG(uap, timerid);
1328 pts = p->p_timers;
1329
1330 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX)
1331 return EINVAL;
1332
1333 itimer_lock();
1334 if ((it = pts->pts_timers[timerid]) == NULL) {
1335 itimer_unlock();
1336 return EINVAL;
1337 }
1338
1339 if (CLOCK_VIRTUAL_P(it->it_clockid)) {
1340 if (it->it_active) {
1341 itn = LIST_NEXT(it, it_list);
1342 LIST_REMOVE(it, it_list);
1343 for ( ; itn; itn = LIST_NEXT(itn, it_list))
1344 timespecadd(&it->it_time.it_value,
1345 &itn->it_time.it_value,
1346 &itn->it_time.it_value);
1347 it->it_active = false;
1348 }
1349 }
1350
1351 /* Free the timer and release the lock. */
1352 ptimer_free(pts, timerid);
1353
1354 return 0;
1355 }
1356
1357 /*
1358 * sys___timer_settime50:
1359 *
1360 * System call to set/arm a POSIX timer.
1361 */
1362 int
1363 sys___timer_settime50(struct lwp *l,
1364 const struct sys___timer_settime50_args *uap,
1365 register_t *retval)
1366 {
1367 /* {
1368 syscallarg(timer_t) timerid;
1369 syscallarg(int) flags;
1370 syscallarg(const struct itimerspec *) value;
1371 syscallarg(struct itimerspec *) ovalue;
1372 } */
1373 int error;
1374 struct itimerspec value, ovalue, *ovp = NULL;
1375
1376 if ((error = copyin(SCARG(uap, value), &value,
1377 sizeof(struct itimerspec))) != 0)
1378 return error;
1379
1380 if (SCARG(uap, ovalue))
1381 ovp = &ovalue;
1382
1383 if ((error = dotimer_settime(SCARG(uap, timerid), &value, ovp,
1384 SCARG(uap, flags), l->l_proc)) != 0)
1385 return error;
1386
1387 if (ovp)
1388 return copyout(&ovalue, SCARG(uap, ovalue),
1389 sizeof(struct itimerspec));
1390 return 0;
1391 }
1392
1393 int
1394 dotimer_settime(int timerid, struct itimerspec *value,
1395 struct itimerspec *ovalue, int flags, struct proc *p)
1396 {
1397 struct timespec now;
1398 struct itimerspec val;
1399 struct ptimers *pts;
1400 struct itimer *it;
1401 int error;
1402
1403 pts = p->p_timers;
1404
1405 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX)
1406 return EINVAL;
1407 val = *value;
1408 if (itimespecfix(&val.it_value) != 0 ||
1409 itimespecfix(&val.it_interval) != 0)
1410 return EINVAL;
1411
1412 itimer_lock();
1413 restart:
1414 if ((it = pts->pts_timers[timerid]) == NULL) {
1415 itimer_unlock();
1416 return EINVAL;
1417 }
1418
1419 if (ovalue)
1420 itimer_gettime(it, ovalue);
1421 it->it_time = val;
1422
1423 /*
1424 * If we've been passed a relative time for a realtime timer,
1425 * convert it to absolute; if an absolute time for a virtual
1426 * timer, convert it to relative and make sure we don't set it
1427 * to zero, which would cancel the timer, or let it go
1428 * negative, which would confuse the comparison tests.
1429 */
1430 if (timespecisset(&it->it_time.it_value)) {
1431 if (!CLOCK_VIRTUAL_P(it->it_clockid)) {
1432 if ((flags & TIMER_ABSTIME) == 0) {
1433 if (it->it_clockid == CLOCK_REALTIME) {
1434 getnanotime(&now);
1435 } else { /* CLOCK_MONOTONIC */
1436 getnanouptime(&now);
1437 }
1438 timespecadd(&it->it_time.it_value, &now,
1439 &it->it_time.it_value);
1440 }
1441 } else {
1442 if ((flags & TIMER_ABSTIME) != 0) {
1443 getnanotime(&now);
1444 timespecsub(&it->it_time.it_value, &now,
1445 &it->it_time.it_value);
1446 if (!timespecisset(&it->it_time.it_value) ||
1447 it->it_time.it_value.tv_sec < 0) {
1448 it->it_time.it_value.tv_sec = 0;
1449 it->it_time.it_value.tv_nsec = 1;
1450 }
1451 }
1452 }
1453 }
1454
1455 error = itimer_settime(it);
1456 if (error == ERESTART) {
1457 KASSERT(!CLOCK_VIRTUAL_P(it->it_clockid));
1458 goto restart;
1459 }
1460 KASSERT(error == 0);
1461 itimer_unlock();
1462
1463 return 0;
1464 }
1465
1466 /*
1467 * sys___timer_gettime50:
1468 *
1469 * System call to return the time remaining until a POSIX timer fires.
1470 */
1471 int
1472 sys___timer_gettime50(struct lwp *l,
1473 const struct sys___timer_gettime50_args *uap, register_t *retval)
1474 {
1475 /* {
1476 syscallarg(timer_t) timerid;
1477 syscallarg(struct itimerspec *) value;
1478 } */
1479 struct itimerspec its;
1480 int error;
1481
1482 if ((error = dotimer_gettime(SCARG(uap, timerid), l->l_proc,
1483 &its)) != 0)
1484 return error;
1485
1486 return copyout(&its, SCARG(uap, value), sizeof(its));
1487 }
1488
1489 int
1490 dotimer_gettime(int timerid, struct proc *p, struct itimerspec *its)
1491 {
1492 struct itimer *it;
1493 struct ptimers *pts;
1494
1495 pts = p->p_timers;
1496 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX)
1497 return EINVAL;
1498 itimer_lock();
1499 if ((it = pts->pts_timers[timerid]) == NULL) {
1500 itimer_unlock();
1501 return EINVAL;
1502 }
1503 itimer_gettime(it, its);
1504 itimer_unlock();
1505
1506 return 0;
1507 }
1508
1509 /*
1510 * sys_timer_getoverrun:
1511 *
1512 * System call to return the number of times a POSIX timer has
1513 * expired while a notification was already pending. The counter
1514 * is reset when a timer expires and a notification can be posted.
1515 */
1516 int
1517 sys_timer_getoverrun(struct lwp *l, const struct sys_timer_getoverrun_args *uap,
1518 register_t *retval)
1519 {
1520 /* {
1521 syscallarg(timer_t) timerid;
1522 } */
1523 struct proc *p = l->l_proc;
1524 struct ptimers *pts;
1525 int timerid;
1526 struct itimer *it;
1527 struct ptimer *pt;
1528
1529 timerid = SCARG(uap, timerid);
1530
1531 pts = p->p_timers;
1532 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX)
1533 return EINVAL;
1534 itimer_lock();
1535 if ((it = pts->pts_timers[timerid]) == NULL) {
1536 itimer_unlock();
1537 return EINVAL;
1538 }
1539 pt = container_of(it, struct ptimer, pt_itimer);
1540 *retval = pt->pt_poverruns;
1541 if (*retval >= DELAYTIMER_MAX)
1542 *retval = DELAYTIMER_MAX;
1543 itimer_unlock();
1544
1545 return 0;
1546 }
1547
1548 /*
1549 * sys___getitimer50:
1550 *
1551 * System call to get the time remaining before a BSD timer fires.
1552 */
1553 int
1554 sys___getitimer50(struct lwp *l, const struct sys___getitimer50_args *uap,
1555 register_t *retval)
1556 {
1557 /* {
1558 syscallarg(int) which;
1559 syscallarg(struct itimerval *) itv;
1560 } */
1561 struct proc *p = l->l_proc;
1562 struct itimerval aitv;
1563 int error;
1564
1565 memset(&aitv, 0, sizeof(aitv));
1566 error = dogetitimer(p, SCARG(uap, which), &aitv);
1567 if (error)
1568 return error;
1569 return copyout(&aitv, SCARG(uap, itv), sizeof(struct itimerval));
1570 }
1571
1572 int
1573 dogetitimer(struct proc *p, int which, struct itimerval *itvp)
1574 {
1575 struct ptimers *pts;
1576 struct itimer *it;
1577 struct itimerspec its;
1578
1579 if ((u_int)which > ITIMER_MONOTONIC)
1580 return EINVAL;
1581
1582 itimer_lock();
1583 pts = p->p_timers;
1584 if (pts == NULL || (it = pts->pts_timers[which]) == NULL) {
1585 timerclear(&itvp->it_value);
1586 timerclear(&itvp->it_interval);
1587 } else {
1588 itimer_gettime(it, &its);
1589 TIMESPEC_TO_TIMEVAL(&itvp->it_value, &its.it_value);
1590 TIMESPEC_TO_TIMEVAL(&itvp->it_interval, &its.it_interval);
1591 }
1592 itimer_unlock();
1593
1594 return 0;
1595 }
1596
1597 /*
1598 * sys___setitimer50:
1599 *
1600 * System call to set/arm a BSD timer.
1601 */
1602 int
1603 sys___setitimer50(struct lwp *l, const struct sys___setitimer50_args *uap,
1604 register_t *retval)
1605 {
1606 /* {
1607 syscallarg(int) which;
1608 syscallarg(const struct itimerval *) itv;
1609 syscallarg(struct itimerval *) oitv;
1610 } */
1611 struct proc *p = l->l_proc;
1612 int which = SCARG(uap, which);
1613 struct sys___getitimer50_args getargs;
1614 const struct itimerval *itvp;
1615 struct itimerval aitv;
1616 int error;
1617
1618 itvp = SCARG(uap, itv);
1619 if (itvp &&
1620 (error = copyin(itvp, &aitv, sizeof(struct itimerval))) != 0)
1621 return error;
1622 if (SCARG(uap, oitv) != NULL) {
1623 SCARG(&getargs, which) = which;
1624 SCARG(&getargs, itv) = SCARG(uap, oitv);
1625 if ((error = sys___getitimer50(l, &getargs, retval)) != 0)
1626 return error;
1627 }
1628 if (itvp == 0)
1629 return 0;
1630
1631 return dosetitimer(p, which, &aitv);
1632 }
1633
1634 int
1635 dosetitimer(struct proc *p, int which, struct itimerval *itvp)
1636 {
1637 struct timespec now;
1638 struct ptimers *pts;
1639 struct ptimer *spare;
1640 struct itimer *it;
1641 struct itlist *itl;
1642 int error;
1643
1644 if ((u_int)which > ITIMER_MONOTONIC)
1645 return EINVAL;
1646 if (itimerfix(&itvp->it_value) || itimerfix(&itvp->it_interval))
1647 return EINVAL;
1648
1649 /*
1650 * Don't bother allocating data structures if the process just
1651 * wants to clear the timer.
1652 */
1653 spare = NULL;
1654 pts = p->p_timers;
1655 retry:
1656 if (!timerisset(&itvp->it_value) && (pts == NULL ||
1657 pts->pts_timers[which] == NULL))
1658 return 0;
1659 if (pts == NULL)
1660 pts = ptimers_alloc(p);
1661 itimer_lock();
1662 restart:
1663 it = pts->pts_timers[which];
1664 if (it == NULL) {
1665 struct ptimer *pt;
1666
1667 if (spare == NULL) {
1668 itimer_unlock();
1669 spare = kmem_zalloc(sizeof(*spare), KM_SLEEP);
1670 goto retry;
1671 }
1672 pt = spare;
1673 spare = NULL;
1674
1675 it = &pt->pt_itimer;
1676 pt->pt_ev.sigev_notify = SIGEV_SIGNAL;
1677 pt->pt_ev.sigev_value.sival_int = which;
1678
1679 switch (which) {
1680 case ITIMER_REAL:
1681 case ITIMER_MONOTONIC:
1682 itl = NULL;
1683 pt->pt_ev.sigev_signo = SIGALRM;
1684 break;
1685 case ITIMER_VIRTUAL:
1686 itl = &pts->pts_virtual;
1687 pt->pt_ev.sigev_signo = SIGVTALRM;
1688 break;
1689 case ITIMER_PROF:
1690 itl = &pts->pts_prof;
1691 pt->pt_ev.sigev_signo = SIGPROF;
1692 break;
1693 default:
1694 panic("%s: can't happen %d", __func__, which);
1695 }
1696 itimer_init(it, &ptimer_itimer_ops, which, itl);
1697 pt->pt_proc = p;
1698 pt->pt_entry = which;
1699
1700 pts->pts_timers[which] = it;
1701 }
1702
1703 TIMEVAL_TO_TIMESPEC(&itvp->it_value, &it->it_time.it_value);
1704 TIMEVAL_TO_TIMESPEC(&itvp->it_interval, &it->it_time.it_interval);
1705
1706 error = 0;
1707 if (timespecisset(&it->it_time.it_value)) {
1708 /* Convert to absolute time */
1709 /* XXX need to wrap in splclock for timecounters case? */
1710 switch (which) {
1711 case ITIMER_REAL:
1712 getnanotime(&now);
1713 if (!timespecaddok(&it->it_time.it_value, &now)) {
1714 error = EINVAL;
1715 goto out;
1716 }
1717 timespecadd(&it->it_time.it_value, &now,
1718 &it->it_time.it_value);
1719 break;
1720 case ITIMER_MONOTONIC:
1721 getnanouptime(&now);
1722 if (!timespecaddok(&it->it_time.it_value, &now)) {
1723 error = EINVAL;
1724 goto out;
1725 }
1726 timespecadd(&it->it_time.it_value, &now,
1727 &it->it_time.it_value);
1728 break;
1729 default:
1730 break;
1731 }
1732 }
1733
1734 error = itimer_settime(it);
1735 if (error == ERESTART) {
1736 KASSERT(!CLOCK_VIRTUAL_P(it->it_clockid));
1737 goto restart;
1738 }
1739 KASSERT(error == 0);
1740 out:
1741 itimer_unlock();
1742 if (spare != NULL)
1743 kmem_free(spare, sizeof(*spare));
1744
1745 return error;
1746 }
1747
1748 /*
1749 * ptimer_tick:
1750 *
1751 * Called from hardclock() to decrement per-process virtual timers.
1752 */
1753 void
1754 ptimer_tick(lwp_t *l, bool user)
1755 {
1756 struct ptimers *pts;
1757 struct itimer *it;
1758 proc_t *p;
1759
1760 p = l->l_proc;
1761 if (p->p_timers == NULL)
1762 return;
1763
1764 itimer_lock();
1765 if ((pts = l->l_proc->p_timers) != NULL) {
1766 /*
1767 * Run current process's virtual and profile time, as needed.
1768 */
1769 if (user && (it = LIST_FIRST(&pts->pts_virtual)) != NULL)
1770 if (itimer_decr(it, tick * 1000))
1771 (*it->it_ops->ito_fire)(it);
1772 if ((it = LIST_FIRST(&pts->pts_prof)) != NULL)
1773 if (itimer_decr(it, tick * 1000))
1774 (*it->it_ops->ito_fire)(it);
1775 }
1776 itimer_unlock();
1777 }
1778
1779 /*
1780 * ptimer_intr:
1781 *
1782 * Software interrupt handler for processing per-process
1783 * timer expiration.
1784 */
1785 static void
1786 ptimer_intr(void *cookie)
1787 {
1788 ksiginfo_t ksi;
1789 struct itimer *it;
1790 struct ptimer *pt;
1791 proc_t *p;
1792
1793 mutex_enter(&proc_lock);
1794 itimer_lock();
1795 while ((pt = TAILQ_FIRST(&ptimer_queue)) != NULL) {
1796 it = &pt->pt_itimer;
1797
1798 TAILQ_REMOVE(&ptimer_queue, pt, pt_chain);
1799 KASSERT(pt->pt_queued);
1800 pt->pt_queued = false;
1801
1802 p = pt->pt_proc;
1803 if (p->p_timers == NULL) {
1804 /* Process is dying. */
1805 continue;
1806 }
1807 if (pt->pt_ev.sigev_notify != SIGEV_SIGNAL) {
1808 continue;
1809 }
1810 if (sigismember(&p->p_sigpend.sp_set, pt->pt_ev.sigev_signo)) {
1811 it->it_overruns++;
1812 continue;
1813 }
1814
1815 KSI_INIT(&ksi);
1816 ksi.ksi_signo = pt->pt_ev.sigev_signo;
1817 ksi.ksi_code = SI_TIMER;
1818 ksi.ksi_value = pt->pt_ev.sigev_value;
1819 pt->pt_poverruns = it->it_overruns;
1820 it->it_overruns = 0;
1821 itimer_unlock();
1822 kpsignal(p, &ksi, NULL);
1823 itimer_lock();
1824 }
1825 itimer_unlock();
1826 mutex_exit(&proc_lock);
1827 }
1828