subr_time.c revision 1.28 1 /* $NetBSD: subr_time.c,v 1.28 2021/03/18 13:45:15 nia Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
32 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: subr_time.c,v 1.28 2021/03/18 13:45:15 nia Exp $");
37
38 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/proc.h>
41 #include <sys/kauth.h>
42 #include <sys/lwp.h>
43 #include <sys/timex.h>
44 #include <sys/time.h>
45 #include <sys/timetc.h>
46 #include <sys/intr.h>
47
48 #ifdef DEBUG_STICKS
49 #define DPRINTF(a) uprintf a
50 #else
51 #define DPRINTF(a)
52 #endif
53
54 /*
55 * Compute number of hz until specified time. Used to compute second
56 * argument to callout_reset() from an absolute time.
57 */
58 int
59 tvhzto(const struct timeval *tvp)
60 {
61 struct timeval now, tv;
62
63 tv = *tvp; /* Don't modify original tvp. */
64 getmicrotime(&now);
65 timersub(&tv, &now, &tv);
66 return tvtohz(&tv);
67 }
68
69 /*
70 * Compute number of ticks in the specified amount of time.
71 */
72 int
73 tvtohz(const struct timeval *tv)
74 {
75 unsigned long ticks;
76 long sec, usec;
77
78 /*
79 * If the number of usecs in the whole seconds part of the time
80 * difference fits in a long, then the total number of usecs will
81 * fit in an unsigned long. Compute the total and convert it to
82 * ticks, rounding up and adding 1 to allow for the current tick
83 * to expire. Rounding also depends on unsigned long arithmetic
84 * to avoid overflow.
85 *
86 * Otherwise, if the number of ticks in the whole seconds part of
87 * the time difference fits in a long, then convert the parts to
88 * ticks separately and add, using similar rounding methods and
89 * overflow avoidance. This method would work in the previous
90 * case, but it is slightly slower and assumes that hz is integral.
91 *
92 * Otherwise, round the time difference down to the maximum
93 * representable value.
94 *
95 * If ints are 32-bit, then the maximum value for any timeout in
96 * 10ms ticks is 248 days.
97 */
98 sec = tv->tv_sec;
99 usec = tv->tv_usec;
100
101 KASSERT(usec >= 0 && usec < 1000000);
102
103 /* catch overflows in conversion time_t->int */
104 if (tv->tv_sec > INT_MAX)
105 return INT_MAX;
106 if (tv->tv_sec < 0)
107 return 0;
108
109 if (sec < 0 || (sec == 0 && usec == 0)) {
110 /*
111 * Would expire now or in the past. Return 0 ticks.
112 * This is different from the legacy tvhzto() interface,
113 * and callers need to check for it.
114 */
115 ticks = 0;
116 } else if (sec <= (LONG_MAX / 1000000))
117 ticks = (((sec * 1000000) + (unsigned long)usec + (tick - 1))
118 / tick) + 1;
119 else if (sec <= (LONG_MAX / hz))
120 ticks = (sec * hz) +
121 (((unsigned long)usec + (tick - 1)) / tick) + 1;
122 else
123 ticks = LONG_MAX;
124
125 if (ticks > INT_MAX)
126 ticks = INT_MAX;
127
128 return ((int)ticks);
129 }
130
131 int
132 tshzto(const struct timespec *tsp)
133 {
134 struct timespec now, ts;
135
136 ts = *tsp; /* Don't modify original tsp. */
137 getnanotime(&now);
138 timespecsub(&ts, &now, &ts);
139 return tstohz(&ts);
140 }
141
142 int
143 tshztoup(const struct timespec *tsp)
144 {
145 struct timespec now, ts;
146
147 ts = *tsp; /* Don't modify original tsp. */
148 getnanouptime(&now);
149 timespecsub(&ts, &now, &ts);
150 return tstohz(&ts);
151 }
152
153 /*
154 * Compute number of ticks in the specified amount of time.
155 */
156 int
157 tstohz(const struct timespec *ts)
158 {
159 struct timeval tv;
160
161 /*
162 * usec has great enough resolution for hz, so convert to a
163 * timeval and use tvtohz() above.
164 */
165 TIMESPEC_TO_TIMEVAL(&tv, ts);
166 return tvtohz(&tv);
167 }
168
169 /*
170 * Check that a proposed value to load into the .it_value or
171 * .it_interval part of an interval timer is acceptable, and
172 * fix it to have at least minimal value (i.e. if it is less
173 * than the resolution of the clock, round it up.). We don't
174 * timeout the 0,0 value because this means to disable the
175 * timer or the interval.
176 */
177 int
178 itimerfix(struct timeval *tv)
179 {
180
181 if (tv->tv_usec < 0 || tv->tv_usec >= 1000000)
182 return EINVAL;
183 if (tv->tv_sec < 0)
184 return ETIMEDOUT;
185 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
186 tv->tv_usec = tick;
187 return 0;
188 }
189
190 int
191 itimespecfix(struct timespec *ts)
192 {
193
194 if (ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
195 return EINVAL;
196 if (ts->tv_sec < 0)
197 return ETIMEDOUT;
198 if (ts->tv_sec == 0 && ts->tv_nsec != 0 && ts->tv_nsec < tick * 1000)
199 ts->tv_nsec = tick * 1000;
200 return 0;
201 }
202
203 int
204 inittimeleft(struct timespec *ts, struct timespec *sleepts)
205 {
206
207 if (itimespecfix(ts)) {
208 return -1;
209 }
210 getnanouptime(sleepts);
211 return 0;
212 }
213
214 int
215 gettimeleft(struct timespec *ts, struct timespec *sleepts)
216 {
217 struct timespec sleptts;
218
219 /*
220 * Reduce ts by elapsed time based on monotonic time scale.
221 */
222 getnanouptime(&sleptts);
223 timespecadd(ts, sleepts, ts);
224 timespecsub(ts, &sleptts, ts);
225 *sleepts = sleptts;
226
227 return tstohz(ts);
228 }
229
230 void
231 clock_timeleft(clockid_t clockid, struct timespec *ts, struct timespec *sleepts)
232 {
233 struct timespec sleptts;
234
235 clock_gettime1(clockid, &sleptts);
236 timespecadd(ts, sleepts, ts);
237 timespecsub(ts, &sleptts, ts);
238 *sleepts = sleptts;
239 }
240
241 static void
242 ticks2ts(uint64_t ticks, struct timespec *ts)
243 {
244 ts->tv_sec = ticks / hz;
245 uint64_t sticks = ticks - ts->tv_sec * hz;
246 if (sticks > BINTIME_SCALE_MS) /* floor(2^64 / 1000) */
247 ts->tv_nsec = sticks / hz * 1000000000LL;
248 else if (sticks > BINTIME_SCALE_US) /* floor(2^64 / 1000000) */
249 ts->tv_nsec = sticks * 1000LL / hz * 1000000LL;
250 else
251 ts->tv_nsec = sticks * 1000000000LL / hz;
252 DPRINTF(("%s: %ju/%ju -> %ju.%ju\n", __func__,
253 (uintmax_t)ticks, (uintmax_t)sticks,
254 (uintmax_t)ts->tv_sec, (uintmax_t)ts->tv_nsec));
255 }
256
257 int
258 clock_gettime1(clockid_t clock_id, struct timespec *ts)
259 {
260 int error;
261 uint64_t ticks;
262 struct proc *p;
263
264 #define CPUCLOCK_ID_MASK (~(CLOCK_THREAD_CPUTIME_ID|CLOCK_PROCESS_CPUTIME_ID))
265 if (clock_id & CLOCK_PROCESS_CPUTIME_ID) {
266 pid_t pid = clock_id & CPUCLOCK_ID_MASK;
267
268 mutex_enter(&proc_lock);
269 p = pid == 0 ? curproc : proc_find(pid);
270 if (p == NULL) {
271 mutex_exit(&proc_lock);
272 return ESRCH;
273 }
274 ticks = p->p_uticks + p->p_sticks + p->p_iticks;
275 DPRINTF(("%s: u=%ju, s=%ju, i=%ju\n", __func__,
276 (uintmax_t)p->p_uticks, (uintmax_t)p->p_sticks,
277 (uintmax_t)p->p_iticks));
278 mutex_exit(&proc_lock);
279
280 // XXX: Perhaps create a special kauth type
281 error = kauth_authorize_process(curlwp->l_cred,
282 KAUTH_PROCESS_PTRACE, p,
283 KAUTH_ARG(KAUTH_REQ_PROCESS_CANSEE_ENTRY), NULL, NULL);
284 if (error)
285 return error;
286 } else if (clock_id & CLOCK_THREAD_CPUTIME_ID) {
287 struct lwp *l;
288 lwpid_t lid = clock_id & CPUCLOCK_ID_MASK;
289 p = curproc;
290 mutex_enter(p->p_lock);
291 l = lid == 0 ? curlwp : lwp_find(p, lid);
292 if (l == NULL) {
293 mutex_exit(p->p_lock);
294 return ESRCH;
295 }
296 ticks = l->l_rticksum + l->l_slpticksum;
297 DPRINTF(("%s: r=%ju, s=%ju\n", __func__,
298 (uintmax_t)l->l_rticksum, (uintmax_t)l->l_slpticksum));
299 mutex_exit(p->p_lock);
300 } else
301 ticks = (uint64_t)-1;
302
303 if (ticks != (uint64_t)-1) {
304 ticks2ts(ticks, ts);
305 return 0;
306 }
307
308 switch (clock_id) {
309 case CLOCK_REALTIME:
310 nanotime(ts);
311 break;
312 case CLOCK_MONOTONIC:
313 nanouptime(ts);
314 break;
315 default:
316 return EINVAL;
317 }
318
319 return 0;
320 }
321
322 /*
323 * Calculate delta and convert from struct timespec to the ticks.
324 */
325 int
326 ts2timo(clockid_t clock_id, int flags, struct timespec *ts,
327 int *timo, struct timespec *start)
328 {
329 int error;
330 struct timespec tsd;
331
332 if (ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000L)
333 return EINVAL;
334
335 if (start == NULL)
336 start = &tsd;
337
338 if (flags != TIMER_RELTIME || start != &tsd) {
339 error = clock_gettime1(clock_id, start);
340 if (error != 0)
341 return error;
342 }
343
344 if (flags != TIMER_RELTIME)
345 timespecsub(ts, start, ts);
346
347 error = itimespecfix(ts);
348 if (error != 0)
349 return error;
350
351 if (ts->tv_sec == 0 && ts->tv_nsec == 0)
352 return ETIMEDOUT;
353
354 *timo = tstohz(ts);
355 KASSERT(*timo > 0);
356
357 return 0;
358 }
359