kern_time.c revision 1.53 1 /* $NetBSD: kern_time.c,v 1.53 2000/08/02 12:24:11 itojun Exp $ */
2
3 /*-
4 * Copyright (c) 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Christopher G. Demetriou.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1982, 1986, 1989, 1993
41 * The Regents of the University of California. All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by the University of
54 * California, Berkeley and its contributors.
55 * 4. Neither the name of the University nor the names of its contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
70 *
71 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95
72 */
73
74 #include "fs_nfs.h"
75 #include "opt_nfsserver.h"
76
77 #include <sys/param.h>
78 #include <sys/resourcevar.h>
79 #include <sys/kernel.h>
80 #include <sys/systm.h>
81 #include <sys/proc.h>
82 #include <sys/vnode.h>
83 #include <sys/signalvar.h>
84 #include <sys/syslog.h>
85
86 #include <sys/mount.h>
87 #include <sys/syscallargs.h>
88
89 #include <uvm/uvm_extern.h>
90
91 #if defined(NFS) || defined(NFSSERVER)
92 #include <nfs/rpcv2.h>
93 #include <nfs/nfsproto.h>
94 #include <nfs/nfs_var.h>
95 #endif
96
97 #include <machine/cpu.h>
98
99 /*
100 * Time of day and interval timer support.
101 *
102 * These routines provide the kernel entry points to get and set
103 * the time-of-day and per-process interval timers. Subroutines
104 * here provide support for adding and subtracting timeval structures
105 * and decrementing interval timers, optionally reloading the interval
106 * timers when they expire.
107 */
108
109 /* This function is used by clock_settime and settimeofday */
110 int
111 settime(tv)
112 struct timeval *tv;
113 {
114 struct timeval delta;
115 struct cpu_info *ci;
116 int s;
117
118 /* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */
119 s = splclock();
120 timersub(tv, &time, &delta);
121 if ((delta.tv_sec < 0 || delta.tv_usec < 0) && securelevel > 1)
122 return (EPERM);
123 #ifdef notyet
124 if ((delta.tv_sec < 86400) && securelevel > 0)
125 return (EPERM);
126 #endif
127 time = *tv;
128 (void) spllowersoftclock();
129 timeradd(&boottime, &delta, &boottime);
130 /*
131 * XXXSMP
132 * This is wrong. We should traverse a list of all
133 * CPUs and add the delta to the runtime of those
134 * CPUs which have a process on them.
135 */
136 ci = curcpu();
137 timeradd(&ci->ci_schedstate.spc_runtime, &delta,
138 &ci->ci_schedstate.spc_runtime);
139 # if defined(NFS) || defined(NFSSERVER)
140 nqnfs_lease_updatetime(delta.tv_sec);
141 # endif
142 splx(s);
143 resettodr();
144 return (0);
145 }
146
147 /* ARGSUSED */
148 int
149 sys_clock_gettime(p, v, retval)
150 struct proc *p;
151 void *v;
152 register_t *retval;
153 {
154 struct sys_clock_gettime_args /* {
155 syscallarg(clockid_t) clock_id;
156 syscallarg(struct timespec *) tp;
157 } */ *uap = v;
158 clockid_t clock_id;
159 struct timeval atv;
160 struct timespec ats;
161
162 clock_id = SCARG(uap, clock_id);
163 if (clock_id != CLOCK_REALTIME)
164 return (EINVAL);
165
166 microtime(&atv);
167 TIMEVAL_TO_TIMESPEC(&atv,&ats);
168
169 return copyout(&ats, SCARG(uap, tp), sizeof(ats));
170 }
171
172 /* ARGSUSED */
173 int
174 sys_clock_settime(p, v, retval)
175 struct proc *p;
176 void *v;
177 register_t *retval;
178 {
179 struct sys_clock_settime_args /* {
180 syscallarg(clockid_t) clock_id;
181 syscallarg(const struct timespec *) tp;
182 } */ *uap = v;
183 clockid_t clock_id;
184 struct timeval atv;
185 struct timespec ats;
186 int error;
187
188 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
189 return (error);
190
191 clock_id = SCARG(uap, clock_id);
192 if (clock_id != CLOCK_REALTIME)
193 return (EINVAL);
194
195 if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0)
196 return (error);
197
198 TIMESPEC_TO_TIMEVAL(&atv,&ats);
199 if ((error = settime(&atv)))
200 return (error);
201
202 return 0;
203 }
204
205 int
206 sys_clock_getres(p, v, retval)
207 struct proc *p;
208 void *v;
209 register_t *retval;
210 {
211 struct sys_clock_getres_args /* {
212 syscallarg(clockid_t) clock_id;
213 syscallarg(struct timespec *) tp;
214 } */ *uap = v;
215 clockid_t clock_id;
216 struct timespec ts;
217 int error = 0;
218
219 clock_id = SCARG(uap, clock_id);
220 if (clock_id != CLOCK_REALTIME)
221 return (EINVAL);
222
223 if (SCARG(uap, tp)) {
224 ts.tv_sec = 0;
225 ts.tv_nsec = 1000000000 / hz;
226
227 error = copyout(&ts, SCARG(uap, tp), sizeof(ts));
228 }
229
230 return error;
231 }
232
233 /* ARGSUSED */
234 int
235 sys_nanosleep(p, v, retval)
236 struct proc *p;
237 void *v;
238 register_t *retval;
239 {
240 static int nanowait;
241 struct sys_nanosleep_args/* {
242 syscallarg(struct timespec *) rqtp;
243 syscallarg(struct timespec *) rmtp;
244 } */ *uap = v;
245 struct timespec rqt;
246 struct timespec rmt;
247 struct timeval atv, utv;
248 int error, s, timo;
249
250 error = copyin((caddr_t)SCARG(uap, rqtp), (caddr_t)&rqt,
251 sizeof(struct timespec));
252 if (error)
253 return (error);
254
255 TIMESPEC_TO_TIMEVAL(&atv,&rqt)
256 if (itimerfix(&atv))
257 return (EINVAL);
258
259 s = splclock();
260 timeradd(&atv,&time,&atv);
261 timo = hzto(&atv);
262 /*
263 * Avoid inadvertantly sleeping forever
264 */
265 if (timo == 0)
266 timo = 1;
267 splx(s);
268
269 error = tsleep(&nanowait, PWAIT | PCATCH, "nanosleep", timo);
270 if (error == ERESTART)
271 error = EINTR;
272 if (error == EWOULDBLOCK)
273 error = 0;
274
275 if (SCARG(uap, rmtp)) {
276 int error;
277
278 s = splclock();
279 utv = time;
280 splx(s);
281
282 timersub(&atv, &utv, &utv);
283 if (utv.tv_sec < 0)
284 timerclear(&utv);
285
286 TIMEVAL_TO_TIMESPEC(&utv,&rmt);
287 error = copyout((caddr_t)&rmt, (caddr_t)SCARG(uap,rmtp),
288 sizeof(rmt));
289 if (error)
290 return (error);
291 }
292
293 return error;
294 }
295
296 /* ARGSUSED */
297 int
298 sys_gettimeofday(p, v, retval)
299 struct proc *p;
300 void *v;
301 register_t *retval;
302 {
303 struct sys_gettimeofday_args /* {
304 syscallarg(struct timeval *) tp;
305 syscallarg(struct timezone *) tzp;
306 } */ *uap = v;
307 struct timeval atv;
308 int error = 0;
309 struct timezone tzfake;
310
311 if (SCARG(uap, tp)) {
312 microtime(&atv);
313 error = copyout(&atv, SCARG(uap, tp), sizeof(atv));
314 if (error)
315 return (error);
316 }
317 if (SCARG(uap, tzp)) {
318 /*
319 * NetBSD has no kernel notion of time zone, so we just
320 * fake up a timezone struct and return it if demanded.
321 */
322 tzfake.tz_minuteswest = 0;
323 tzfake.tz_dsttime = 0;
324 error = copyout(&tzfake, SCARG(uap, tzp), sizeof(tzfake));
325 }
326 return (error);
327 }
328
329 /* ARGSUSED */
330 int
331 sys_settimeofday(p, v, retval)
332 struct proc *p;
333 void *v;
334 register_t *retval;
335 {
336 struct sys_settimeofday_args /* {
337 syscallarg(const struct timeval *) tv;
338 syscallarg(const struct timezone *) tzp;
339 } */ *uap = v;
340 struct timeval atv;
341 struct timezone atz;
342 int error;
343
344 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
345 return (error);
346 /* Verify all parameters before changing time. */
347 if (SCARG(uap, tv) && (error = copyin(SCARG(uap, tv),
348 &atv, sizeof(atv))))
349 return (error);
350 /* XXX since we don't use tz, probably no point in doing copyin. */
351 if (SCARG(uap, tzp) && (error = copyin(SCARG(uap, tzp),
352 &atz, sizeof(atz))))
353 return (error);
354 if (SCARG(uap, tv))
355 if ((error = settime(&atv)))
356 return (error);
357 /*
358 * NetBSD has no kernel notion of time zone, and only an
359 * obsolete program would try to set it, so we log a warning.
360 */
361 if (SCARG(uap, tzp))
362 log(LOG_WARNING, "pid %d attempted to set the "
363 "(obsolete) kernel time zone\n", p->p_pid);
364 return (0);
365 }
366
367 int tickdelta; /* current clock skew, us. per tick */
368 long timedelta; /* unapplied time correction, us. */
369 long bigadj = 1000000; /* use 10x skew above bigadj us. */
370
371 /* ARGSUSED */
372 int
373 sys_adjtime(p, v, retval)
374 struct proc *p;
375 void *v;
376 register_t *retval;
377 {
378 struct sys_adjtime_args /* {
379 syscallarg(const struct timeval *) delta;
380 syscallarg(struct timeval *) olddelta;
381 } */ *uap = v;
382 struct timeval atv;
383 long ndelta, ntickdelta, odelta;
384 int s, error;
385
386 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
387 return (error);
388
389 error = copyin(SCARG(uap, delta), &atv, sizeof(struct timeval));
390 if (error)
391 return (error);
392 if (SCARG(uap, olddelta) != NULL &&
393 uvm_useracc((caddr_t)SCARG(uap, olddelta), sizeof(struct timeval),
394 B_WRITE) == FALSE)
395 return (EFAULT);
396
397 /*
398 * Compute the total correction and the rate at which to apply it.
399 * Round the adjustment down to a whole multiple of the per-tick
400 * delta, so that after some number of incremental changes in
401 * hardclock(), tickdelta will become zero, lest the correction
402 * overshoot and start taking us away from the desired final time.
403 */
404 ndelta = atv.tv_sec * 1000000 + atv.tv_usec;
405 if (ndelta > bigadj || ndelta < -bigadj)
406 ntickdelta = 10 * tickadj;
407 else
408 ntickdelta = tickadj;
409 if (ndelta % ntickdelta)
410 ndelta = ndelta / ntickdelta * ntickdelta;
411
412 /*
413 * To make hardclock()'s job easier, make the per-tick delta negative
414 * if we want time to run slower; then hardclock can simply compute
415 * tick + tickdelta, and subtract tickdelta from timedelta.
416 */
417 if (ndelta < 0)
418 ntickdelta = -ntickdelta;
419 s = splclock();
420 odelta = timedelta;
421 timedelta = ndelta;
422 tickdelta = ntickdelta;
423 splx(s);
424
425 if (SCARG(uap, olddelta)) {
426 atv.tv_sec = odelta / 1000000;
427 atv.tv_usec = odelta % 1000000;
428 (void) copyout(&atv, SCARG(uap, olddelta),
429 sizeof(struct timeval));
430 }
431 return (0);
432 }
433
434 /*
435 * Get value of an interval timer. The process virtual and
436 * profiling virtual time timers are kept in the p_stats area, since
437 * they can be swapped out. These are kept internally in the
438 * way they are specified externally: in time until they expire.
439 *
440 * The real time interval timer is kept in the process table slot
441 * for the process, and its value (it_value) is kept as an
442 * absolute time rather than as a delta, so that it is easy to keep
443 * periodic real-time signals from drifting.
444 *
445 * Virtual time timers are processed in the hardclock() routine of
446 * kern_clock.c. The real time timer is processed by a timeout
447 * routine, called from the softclock() routine. Since a callout
448 * may be delayed in real time due to interrupt processing in the system,
449 * it is possible for the real time timeout routine (realitexpire, given below),
450 * to be delayed in real time past when it is supposed to occur. It
451 * does not suffice, therefore, to reload the real timer .it_value from the
452 * real time timers .it_interval. Rather, we compute the next time in
453 * absolute time the timer should go off.
454 */
455 /* ARGSUSED */
456 int
457 sys_getitimer(p, v, retval)
458 struct proc *p;
459 void *v;
460 register_t *retval;
461 {
462 struct sys_getitimer_args /* {
463 syscallarg(int) which;
464 syscallarg(struct itimerval *) itv;
465 } */ *uap = v;
466 int which = SCARG(uap, which);
467 struct itimerval aitv;
468 int s;
469
470 if ((u_int)which > ITIMER_PROF)
471 return (EINVAL);
472 s = splclock();
473 if (which == ITIMER_REAL) {
474 /*
475 * Convert from absolute to relative time in .it_value
476 * part of real time timer. If time for real time timer
477 * has passed return 0, else return difference between
478 * current time and time for the timer to go off.
479 */
480 aitv = p->p_realtimer;
481 if (timerisset(&aitv.it_value)) {
482 if (timercmp(&aitv.it_value, &time, <))
483 timerclear(&aitv.it_value);
484 else
485 timersub(&aitv.it_value, &time, &aitv.it_value);
486 }
487 } else
488 aitv = p->p_stats->p_timer[which];
489 splx(s);
490 return (copyout(&aitv, SCARG(uap, itv), sizeof(struct itimerval)));
491 }
492
493 /* ARGSUSED */
494 int
495 sys_setitimer(p, v, retval)
496 struct proc *p;
497 void *v;
498 register_t *retval;
499 {
500 struct sys_setitimer_args /* {
501 syscallarg(int) which;
502 syscallarg(const struct itimerval *) itv;
503 syscallarg(struct itimerval *) oitv;
504 } */ *uap = v;
505 int which = SCARG(uap, which);
506 struct sys_getitimer_args getargs;
507 struct itimerval aitv;
508 const struct itimerval *itvp;
509 int s, error;
510
511 if ((u_int)which > ITIMER_PROF)
512 return (EINVAL);
513 itvp = SCARG(uap, itv);
514 if (itvp && (error = copyin(itvp, &aitv, sizeof(struct itimerval))))
515 return (error);
516 if (SCARG(uap, oitv) != NULL) {
517 SCARG(&getargs, which) = which;
518 SCARG(&getargs, itv) = SCARG(uap, oitv);
519 if ((error = sys_getitimer(p, &getargs, retval)) != 0)
520 return (error);
521 }
522 if (itvp == 0)
523 return (0);
524 if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval))
525 return (EINVAL);
526 s = splclock();
527 if (which == ITIMER_REAL) {
528 callout_stop(&p->p_realit_ch);
529 if (timerisset(&aitv.it_value)) {
530 /*
531 * Don't need to check hzto() return value, here.
532 * callout_reset() does it for us.
533 */
534 timeradd(&aitv.it_value, &time, &aitv.it_value);
535 callout_reset(&p->p_realit_ch, hzto(&aitv.it_value),
536 realitexpire, p);
537 }
538 p->p_realtimer = aitv;
539 } else
540 p->p_stats->p_timer[which] = aitv;
541 splx(s);
542 return (0);
543 }
544
545 /*
546 * Real interval timer expired:
547 * send process whose timer expired an alarm signal.
548 * If time is not set up to reload, then just return.
549 * Else compute next time timer should go off which is > current time.
550 * This is where delay in processing this timeout causes multiple
551 * SIGALRM calls to be compressed into one.
552 */
553 void
554 realitexpire(arg)
555 void *arg;
556 {
557 struct proc *p;
558 int s;
559
560 p = (struct proc *)arg;
561 psignal(p, SIGALRM);
562 if (!timerisset(&p->p_realtimer.it_interval)) {
563 timerclear(&p->p_realtimer.it_value);
564 return;
565 }
566 for (;;) {
567 s = splclock();
568 timeradd(&p->p_realtimer.it_value,
569 &p->p_realtimer.it_interval, &p->p_realtimer.it_value);
570 if (timercmp(&p->p_realtimer.it_value, &time, >)) {
571 /*
572 * Don't need to check hzto() return value, here.
573 * callout_reset() does it for us.
574 */
575 callout_reset(&p->p_realit_ch,
576 hzto(&p->p_realtimer.it_value), realitexpire, p);
577 splx(s);
578 return;
579 }
580 splx(s);
581 }
582 }
583
584 /*
585 * Check that a proposed value to load into the .it_value or
586 * .it_interval part of an interval timer is acceptable, and
587 * fix it to have at least minimal value (i.e. if it is less
588 * than the resolution of the clock, round it up.)
589 */
590 int
591 itimerfix(tv)
592 struct timeval *tv;
593 {
594
595 if (tv->tv_sec < 0 || tv->tv_sec > 100000000 ||
596 tv->tv_usec < 0 || tv->tv_usec >= 1000000)
597 return (EINVAL);
598 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
599 tv->tv_usec = tick;
600 return (0);
601 }
602
603 /*
604 * Decrement an interval timer by a specified number
605 * of microseconds, which must be less than a second,
606 * i.e. < 1000000. If the timer expires, then reload
607 * it. In this case, carry over (usec - old value) to
608 * reduce the value reloaded into the timer so that
609 * the timer does not drift. This routine assumes
610 * that it is called in a context where the timers
611 * on which it is operating cannot change in value.
612 */
613 int
614 itimerdecr(itp, usec)
615 struct itimerval *itp;
616 int usec;
617 {
618
619 if (itp->it_value.tv_usec < usec) {
620 if (itp->it_value.tv_sec == 0) {
621 /* expired, and already in next interval */
622 usec -= itp->it_value.tv_usec;
623 goto expire;
624 }
625 itp->it_value.tv_usec += 1000000;
626 itp->it_value.tv_sec--;
627 }
628 itp->it_value.tv_usec -= usec;
629 usec = 0;
630 if (timerisset(&itp->it_value))
631 return (1);
632 /* expired, exactly at end of interval */
633 expire:
634 if (timerisset(&itp->it_interval)) {
635 itp->it_value = itp->it_interval;
636 itp->it_value.tv_usec -= usec;
637 if (itp->it_value.tv_usec < 0) {
638 itp->it_value.tv_usec += 1000000;
639 itp->it_value.tv_sec--;
640 }
641 } else
642 itp->it_value.tv_usec = 0; /* sec is already 0 */
643 return (0);
644 }
645
646 /*
647 * ratecheck(): simple time-based rate-limit checking. see ratecheck(9)
648 * for usage and rationale.
649 */
650 int
651 ratecheck(lasttime, mininterval)
652 struct timeval *lasttime;
653 const struct timeval *mininterval;
654 {
655 struct timeval tv, delta;
656 int s, rv = 0;
657
658 s = splclock();
659 tv = mono_time;
660 splx(s);
661
662 timersub(&tv, lasttime, &delta);
663
664 /*
665 * check for 0,0 is so that the message will be seen at least once,
666 * even if interval is huge.
667 */
668 if (timercmp(&delta, mininterval, >=) ||
669 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
670 *lasttime = tv;
671 rv = 1;
672 }
673
674 return (rv);
675 }
676
677 /*
678 * ppsratecheck(): packets (or events) per second limitation.
679 */
680 int
681 ppsratecheck(lasttime, curpps, maxpps)
682 struct timeval *lasttime;
683 int *curpps;
684 int maxpps; /* maximum pps allowed */
685 {
686 struct timeval tv, delta;
687 int s, rv;
688
689 s = splclock();
690 tv = mono_time;
691 splx(s);
692
693 timersub(&tv, lasttime, &delta);
694
695 /*
696 * check for 0,0 is so that the message will be seen at least once.
697 * if more than one second have passed since the last update of
698 * lasttime, reset the counter.
699 *
700 * we do increment *curpps even in *curpps < maxpps case, as some may
701 * try to use *curpps for stat purposes as well.
702 */
703 if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
704 delta.tv_sec >= 1) {
705 *lasttime = tv;
706 *curpps = 0;
707 rv = 1;
708 } else if (maxpps < 0)
709 rv = 1;
710 else if (*curpps < maxpps)
711 rv = 1;
712 else
713 rv = 0;
714
715 #if 1 /*DIAGNOSTIC?*/
716 /* be careful about wrap-around */
717 if (*curpps + 1 > *curpps)
718 *curpps = *curpps + 1;
719 #else
720 /*
721 * assume that there's not too many calls to this function.
722 * not sure if the assumption holds, as it depends on *caller's*
723 * behavior, not the behavior of this function.
724 * IMHO it is wrong to make assumption on the caller's behavior,
725 * so the above #if is #if 1, not #ifdef DIAGNOSTIC.
726 */
727 *curpps = *curpps + 1;
728 #endif
729
730 return (rv);
731 }
732