subr_time.c revision 1.20.8.2 1 /* $NetBSD: subr_time.c,v 1.20.8.2 2024/10/13 15:33:17 martin Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
32 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: subr_time.c,v 1.20.8.2 2024/10/13 15:33:17 martin Exp $");
37
38 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/proc.h>
41 #include <sys/kauth.h>
42 #include <sys/lwp.h>
43 #include <sys/timex.h>
44 #include <sys/time.h>
45 #include <sys/timetc.h>
46 #include <sys/intr.h>
47
48 /*
49 * Compute number of hz until specified time. Used to compute second
50 * argument to callout_reset() from an absolute time.
51 */
52 int
53 tvhzto(const struct timeval *tvp)
54 {
55 struct timeval now, tv;
56
57 tv = *tvp; /* Don't modify original tvp. */
58 getmicrotime(&now);
59 timersub(&tv, &now, &tv);
60 return tvtohz(&tv);
61 }
62
63 /*
64 * Compute number of ticks in the specified amount of time.
65 */
66 int
67 tvtohz(const struct timeval *tv)
68 {
69 unsigned long ticks;
70 long sec, usec;
71
72 /*
73 * If the number of usecs in the whole seconds part of the time
74 * difference fits in a long, then the total number of usecs will
75 * fit in an unsigned long. Compute the total and convert it to
76 * ticks, rounding up and adding 1 to allow for the current tick
77 * to expire. Rounding also depends on unsigned long arithmetic
78 * to avoid overflow.
79 *
80 * Otherwise, if the number of ticks in the whole seconds part of
81 * the time difference fits in a long, then convert the parts to
82 * ticks separately and add, using similar rounding methods and
83 * overflow avoidance. This method would work in the previous
84 * case, but it is slightly slower and assumes that hz is integral.
85 *
86 * Otherwise, round the time difference down to the maximum
87 * representable value.
88 *
89 * If ints are 32-bit, then the maximum value for any timeout in
90 * 10ms ticks is 248 days.
91 */
92 sec = tv->tv_sec;
93 usec = tv->tv_usec;
94
95 KASSERT(usec >= 0);
96 KASSERT(usec < 1000000);
97
98 /* catch overflows in conversion time_t->int */
99 if (tv->tv_sec > INT_MAX)
100 return INT_MAX;
101 if (tv->tv_sec < 0)
102 return 0;
103
104 if (sec < 0 || (sec == 0 && usec == 0)) {
105 /*
106 * Would expire now or in the past. Return 0 ticks.
107 * This is different from the legacy tvhzto() interface,
108 * and callers need to check for it.
109 */
110 ticks = 0;
111 } else if (sec <= (LONG_MAX / 1000000))
112 ticks = (((sec * 1000000) + (unsigned long)usec + (tick - 1))
113 / tick) + 1;
114 else if (sec <= (LONG_MAX / hz))
115 ticks = (sec * hz) +
116 (((unsigned long)usec + (tick - 1)) / tick) + 1;
117 else
118 ticks = LONG_MAX;
119
120 if (ticks > INT_MAX)
121 ticks = INT_MAX;
122
123 return ((int)ticks);
124 }
125
126 int
127 tshzto(const struct timespec *tsp)
128 {
129 struct timespec now, ts;
130
131 ts = *tsp; /* Don't modify original tsp. */
132 getnanotime(&now);
133 timespecsub(&ts, &now, &ts);
134 return tstohz(&ts);
135 }
136
137 int
138 tshztoup(const struct timespec *tsp)
139 {
140 struct timespec now, ts;
141
142 ts = *tsp; /* Don't modify original tsp. */
143 getnanouptime(&now);
144 timespecsub(&ts, &now, &ts);
145 return tstohz(&ts);
146 }
147
148 /*
149 * Compute number of ticks in the specified amount of time.
150 */
151 int
152 tstohz(const struct timespec *ts)
153 {
154 struct timeval tv;
155
156 /*
157 * usec has great enough resolution for hz, so convert to a
158 * timeval and use tvtohz() above.
159 */
160 TIMESPEC_TO_TIMEVAL(&tv, ts);
161 return tvtohz(&tv);
162 }
163
164 /*
165 * Check that a proposed value to load into the .it_value or
166 * .it_interval part of an interval timer is acceptable, and
167 * fix it to have at least minimal value (i.e. if it is less
168 * than the resolution of the clock, round it up.). We don't
169 * timeout the 0,0 value because this means to disable the
170 * timer or the interval.
171 */
172 int
173 itimerfix(struct timeval *tv)
174 {
175
176 if (tv->tv_usec < 0 || tv->tv_usec >= 1000000)
177 return EINVAL;
178 if (tv->tv_sec < 0)
179 return ETIMEDOUT;
180 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
181 tv->tv_usec = tick;
182 return 0;
183 }
184
185 int
186 itimespecfix(struct timespec *ts)
187 {
188
189 if (ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
190 return EINVAL;
191 if (ts->tv_sec < 0)
192 return ETIMEDOUT;
193 if (ts->tv_sec == 0 && ts->tv_nsec != 0 && ts->tv_nsec < tick * 1000)
194 ts->tv_nsec = tick * 1000;
195 return 0;
196 }
197
198 int
199 inittimeleft(struct timespec *ts, struct timespec *sleepts)
200 {
201
202 if (itimespecfix(ts)) {
203 return -1;
204 }
205 getnanouptime(sleepts);
206 return 0;
207 }
208
209 int
210 gettimeleft(struct timespec *ts, struct timespec *sleepts)
211 {
212 struct timespec sleptts;
213
214 /*
215 * Reduce ts by elapsed time based on monotonic time scale.
216 */
217 getnanouptime(&sleptts);
218 timespecadd(ts, sleepts, ts);
219 timespecsub(ts, &sleptts, ts);
220 *sleepts = sleptts;
221
222 return tstohz(ts);
223 }
224
225 void
226 clock_timeleft(clockid_t clockid, struct timespec *ts, struct timespec *sleepts)
227 {
228 struct timespec sleptts;
229
230 clock_gettime1(clockid, &sleptts);
231 timespecadd(ts, sleepts, ts);
232 timespecsub(ts, &sleptts, ts);
233 *sleepts = sleptts;
234 }
235
236 int
237 clock_gettime1(clockid_t clock_id, struct timespec *ts)
238 {
239 int error;
240 struct proc *p;
241
242 #define CPUCLOCK_ID_MASK (~(CLOCK_THREAD_CPUTIME_ID|CLOCK_PROCESS_CPUTIME_ID))
243 if (clock_id & CLOCK_PROCESS_CPUTIME_ID) {
244 pid_t pid = clock_id & CPUCLOCK_ID_MASK;
245 struct timeval cputime;
246
247 mutex_enter(proc_lock);
248 p = pid == 0 ? curproc : proc_find(pid);
249 if (p == NULL) {
250 mutex_exit(proc_lock);
251 return ESRCH;
252 }
253 mutex_enter(p->p_lock);
254 calcru(p, /*usertime*/NULL, /*systime*/NULL, /*intrtime*/NULL,
255 &cputime);
256 mutex_exit(p->p_lock);
257 mutex_exit(proc_lock);
258
259 // XXX: Perhaps create a special kauth type
260 error = kauth_authorize_process(curlwp->l_cred,
261 KAUTH_PROCESS_PTRACE, p,
262 KAUTH_ARG(KAUTH_REQ_PROCESS_CANSEE_ENTRY), NULL, NULL);
263 if (error)
264 return error;
265
266 TIMEVAL_TO_TIMESPEC(&cputime, ts);
267 return 0;
268 } else if (clock_id & CLOCK_THREAD_CPUTIME_ID) {
269 struct lwp *l;
270 lwpid_t lid = clock_id & CPUCLOCK_ID_MASK;
271 struct bintime tm = {0, 0};
272
273 p = curproc;
274 mutex_enter(p->p_lock);
275 l = lid == 0 ? curlwp : lwp_find(p, lid);
276 if (l == NULL) {
277 mutex_exit(p->p_lock);
278 return ESRCH;
279 }
280 addrulwp(l, &tm);
281 mutex_exit(p->p_lock);
282
283 bintime2timespec(&tm, ts);
284 return 0;
285 }
286
287 switch (clock_id) {
288 case CLOCK_REALTIME:
289 nanotime(ts);
290 break;
291 case CLOCK_MONOTONIC:
292 nanouptime(ts);
293 break;
294 default:
295 return EINVAL;
296 }
297
298 return 0;
299 }
300
301 /*
302 * Calculate delta and convert from struct timespec to the ticks.
303 */
304 int
305 ts2timo(clockid_t clock_id, int flags, struct timespec *ts,
306 int *timo, struct timespec *start)
307 {
308 int error;
309 struct timespec tsd;
310
311 flags &= TIMER_ABSTIME;
312 if (start == NULL)
313 start = &tsd;
314
315 if (flags || start != &tsd)
316 if ((error = clock_gettime1(clock_id, start)) != 0)
317 return error;
318
319 if (flags) {
320 timespecsub(ts, start, &tsd);
321 ts = &tsd;
322 }
323
324 if ((error = itimespecfix(ts)) != 0)
325 return error;
326
327 if (ts->tv_sec == 0 && ts->tv_nsec == 0)
328 return ETIMEDOUT;
329
330 *timo = tstohz(ts);
331 KASSERT(*timo > 0);
332
333 return 0;
334 }
335