kern_timeout.c revision 1.52.2.1 1 /* $NetBSD: kern_timeout.c,v 1.52.2.1 2018/01/26 15:41:12 martin Exp $ */
2
3 /*-
4 * Copyright (c) 2003, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 2001 Thomas Nordin <nordin (at) openbsd.org>
34 * Copyright (c) 2000-2001 Artur Grabowski <art (at) openbsd.org>
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. The name of the author may not be used to endorse or promote products
47 * derived from this software without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
50 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
51 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
52 * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
53 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
54 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
55 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
56 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
57 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
58 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 */
60
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: kern_timeout.c,v 1.52.2.1 2018/01/26 15:41:12 martin Exp $");
63
64 /*
65 * Timeouts are kept in a hierarchical timing wheel. The c_time is the
66 * value of c_cpu->cc_ticks when the timeout should be called. There are
67 * four levels with 256 buckets each. See 'Scheme 7' in "Hashed and
68 * Hierarchical Timing Wheels: Efficient Data Structures for Implementing
69 * a Timer Facility" by George Varghese and Tony Lauck.
70 *
71 * Some of the "math" in here is a bit tricky. We have to beware of
72 * wrapping ints.
73 *
74 * We use the fact that any element added to the queue must be added with
75 * a positive time. That means that any element `to' on the queue cannot
76 * be scheduled to timeout further in time than INT_MAX, but c->c_time can
77 * be positive or negative so comparing it with anything is dangerous.
78 * The only way we can use the c->c_time value in any predictable way is
79 * when we calculate how far in the future `to' will timeout - "c->c_time
80 * - c->c_cpu->cc_ticks". The result will always be positive for future
81 * timeouts and 0 or negative for due timeouts.
82 */
83
84 #define _CALLOUT_PRIVATE
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/callout.h>
90 #include <sys/lwp.h>
91 #include <sys/mutex.h>
92 #include <sys/proc.h>
93 #include <sys/sleepq.h>
94 #include <sys/syncobj.h>
95 #include <sys/evcnt.h>
96 #include <sys/intr.h>
97 #include <sys/cpu.h>
98 #include <sys/kmem.h>
99
100 #ifdef DDB
101 #include <machine/db_machdep.h>
102 #include <ddb/db_interface.h>
103 #include <ddb/db_access.h>
104 #include <ddb/db_cpu.h>
105 #include <ddb/db_sym.h>
106 #include <ddb/db_output.h>
107 #endif
108
109 #define BUCKETS 1024
110 #define WHEELSIZE 256
111 #define WHEELMASK 255
112 #define WHEELBITS 8
113
114 #define MASKWHEEL(wheel, time) (((time) >> ((wheel)*WHEELBITS)) & WHEELMASK)
115
116 #define BUCKET(cc, rel, abs) \
117 (((rel) <= (1 << (2*WHEELBITS))) \
118 ? ((rel) <= (1 << WHEELBITS)) \
119 ? &(cc)->cc_wheel[MASKWHEEL(0, (abs))] \
120 : &(cc)->cc_wheel[MASKWHEEL(1, (abs)) + WHEELSIZE] \
121 : ((rel) <= (1 << (3*WHEELBITS))) \
122 ? &(cc)->cc_wheel[MASKWHEEL(2, (abs)) + 2*WHEELSIZE] \
123 : &(cc)->cc_wheel[MASKWHEEL(3, (abs)) + 3*WHEELSIZE])
124
125 #define MOVEBUCKET(cc, wheel, time) \
126 CIRCQ_APPEND(&(cc)->cc_todo, \
127 &(cc)->cc_wheel[MASKWHEEL((wheel), (time)) + (wheel)*WHEELSIZE])
128
129 /*
130 * Circular queue definitions.
131 */
132
133 #define CIRCQ_INIT(list) \
134 do { \
135 (list)->cq_next_l = (list); \
136 (list)->cq_prev_l = (list); \
137 } while (/*CONSTCOND*/0)
138
139 #define CIRCQ_INSERT(elem, list) \
140 do { \
141 (elem)->cq_prev_e = (list)->cq_prev_e; \
142 (elem)->cq_next_l = (list); \
143 (list)->cq_prev_l->cq_next_l = (elem); \
144 (list)->cq_prev_l = (elem); \
145 } while (/*CONSTCOND*/0)
146
147 #define CIRCQ_APPEND(fst, snd) \
148 do { \
149 if (!CIRCQ_EMPTY(snd)) { \
150 (fst)->cq_prev_l->cq_next_l = (snd)->cq_next_l; \
151 (snd)->cq_next_l->cq_prev_l = (fst)->cq_prev_l; \
152 (snd)->cq_prev_l->cq_next_l = (fst); \
153 (fst)->cq_prev_l = (snd)->cq_prev_l; \
154 CIRCQ_INIT(snd); \
155 } \
156 } while (/*CONSTCOND*/0)
157
158 #define CIRCQ_REMOVE(elem) \
159 do { \
160 (elem)->cq_next_l->cq_prev_e = (elem)->cq_prev_e; \
161 (elem)->cq_prev_l->cq_next_e = (elem)->cq_next_e; \
162 } while (/*CONSTCOND*/0)
163
164 #define CIRCQ_FIRST(list) ((list)->cq_next_e)
165 #define CIRCQ_NEXT(elem) ((elem)->cq_next_e)
166 #define CIRCQ_LAST(elem,list) ((elem)->cq_next_l == (list))
167 #define CIRCQ_EMPTY(list) ((list)->cq_next_l == (list))
168
169 struct callout_cpu {
170 kmutex_t *cc_lock;
171 sleepq_t cc_sleepq;
172 u_int cc_nwait;
173 u_int cc_ticks;
174 lwp_t *cc_lwp;
175 callout_impl_t *cc_active;
176 callout_impl_t *cc_cancel;
177 struct evcnt cc_ev_late;
178 struct evcnt cc_ev_block;
179 struct callout_circq cc_todo; /* Worklist */
180 struct callout_circq cc_wheel[BUCKETS]; /* Queues of timeouts */
181 char cc_name1[12];
182 char cc_name2[12];
183 };
184
185 #ifndef CRASH
186
187 static void callout_softclock(void *);
188 static struct callout_cpu callout_cpu0;
189 static void *callout_sih;
190
191 static inline kmutex_t *
192 callout_lock(callout_impl_t *c)
193 {
194 struct callout_cpu *cc;
195 kmutex_t *lock;
196
197 for (;;) {
198 cc = c->c_cpu;
199 lock = cc->cc_lock;
200 mutex_spin_enter(lock);
201 if (__predict_true(cc == c->c_cpu))
202 return lock;
203 mutex_spin_exit(lock);
204 }
205 }
206
207 /*
208 * callout_startup:
209 *
210 * Initialize the callout facility, called at system startup time.
211 * Do just enough to allow callouts to be safely registered.
212 */
213 void
214 callout_startup(void)
215 {
216 struct callout_cpu *cc;
217 int b;
218
219 KASSERT(curcpu()->ci_data.cpu_callout == NULL);
220
221 cc = &callout_cpu0;
222 cc->cc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
223 CIRCQ_INIT(&cc->cc_todo);
224 for (b = 0; b < BUCKETS; b++)
225 CIRCQ_INIT(&cc->cc_wheel[b]);
226 curcpu()->ci_data.cpu_callout = cc;
227 }
228
229 /*
230 * callout_init_cpu:
231 *
232 * Per-CPU initialization.
233 */
234 CTASSERT(sizeof(callout_impl_t) <= sizeof(callout_t));
235
236 void
237 callout_init_cpu(struct cpu_info *ci)
238 {
239 struct callout_cpu *cc;
240 int b;
241
242 if ((cc = ci->ci_data.cpu_callout) == NULL) {
243 cc = kmem_zalloc(sizeof(*cc), KM_SLEEP);
244 cc->cc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
245 CIRCQ_INIT(&cc->cc_todo);
246 for (b = 0; b < BUCKETS; b++)
247 CIRCQ_INIT(&cc->cc_wheel[b]);
248 } else {
249 /* Boot CPU, one time only. */
250 callout_sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE,
251 callout_softclock, NULL);
252 if (callout_sih == NULL)
253 panic("callout_init_cpu (2)");
254 }
255
256 sleepq_init(&cc->cc_sleepq);
257
258 snprintf(cc->cc_name1, sizeof(cc->cc_name1), "late/%u",
259 cpu_index(ci));
260 evcnt_attach_dynamic(&cc->cc_ev_late, EVCNT_TYPE_MISC,
261 NULL, "callout", cc->cc_name1);
262
263 snprintf(cc->cc_name2, sizeof(cc->cc_name2), "wait/%u",
264 cpu_index(ci));
265 evcnt_attach_dynamic(&cc->cc_ev_block, EVCNT_TYPE_MISC,
266 NULL, "callout", cc->cc_name2);
267
268 ci->ci_data.cpu_callout = cc;
269 }
270
271 /*
272 * callout_init:
273 *
274 * Initialize a callout structure. This must be quick, so we fill
275 * only the minimum number of fields.
276 */
277 void
278 callout_init(callout_t *cs, u_int flags)
279 {
280 callout_impl_t *c = (callout_impl_t *)cs;
281 struct callout_cpu *cc;
282
283 KASSERT((flags & ~CALLOUT_FLAGMASK) == 0);
284
285 cc = curcpu()->ci_data.cpu_callout;
286 c->c_func = NULL;
287 c->c_magic = CALLOUT_MAGIC;
288 if (__predict_true((flags & CALLOUT_MPSAFE) != 0 && cc != NULL)) {
289 c->c_flags = flags;
290 c->c_cpu = cc;
291 return;
292 }
293 c->c_flags = flags | CALLOUT_BOUND;
294 c->c_cpu = &callout_cpu0;
295 }
296
297 /*
298 * callout_destroy:
299 *
300 * Destroy a callout structure. The callout must be stopped.
301 */
302 void
303 callout_destroy(callout_t *cs)
304 {
305 callout_impl_t *c = (callout_impl_t *)cs;
306
307 /*
308 * It's not necessary to lock in order to see the correct value
309 * of c->c_flags. If the callout could potentially have been
310 * running, the current thread should have stopped it.
311 */
312 KASSERTMSG((c->c_flags & CALLOUT_PENDING) == 0,
313 "callout %p: c_func (%p) c_flags (%#x) destroyed from %p",
314 c, c->c_func, c->c_flags, __builtin_return_address(0));
315 KASSERT(c->c_cpu->cc_lwp == curlwp || c->c_cpu->cc_active != c);
316 KASSERTMSG(c->c_magic == CALLOUT_MAGIC,
317 "callout %p: c_magic (%#x) != CALLOUT_MAGIC (%#x)",
318 c, c->c_magic, CALLOUT_MAGIC);
319 c->c_magic = 0;
320 }
321
322 /*
323 * callout_schedule_locked:
324 *
325 * Schedule a callout to run. The function and argument must
326 * already be set in the callout structure. Must be called with
327 * callout_lock.
328 */
329 static void
330 callout_schedule_locked(callout_impl_t *c, kmutex_t *lock, int to_ticks)
331 {
332 struct callout_cpu *cc, *occ;
333 int old_time;
334
335 KASSERT(to_ticks >= 0);
336 KASSERT(c->c_func != NULL);
337
338 /* Initialize the time here, it won't change. */
339 occ = c->c_cpu;
340 c->c_flags &= ~(CALLOUT_FIRED | CALLOUT_INVOKING);
341
342 /*
343 * If this timeout is already scheduled and now is moved
344 * earlier, reschedule it now. Otherwise leave it in place
345 * and let it be rescheduled later.
346 */
347 if ((c->c_flags & CALLOUT_PENDING) != 0) {
348 /* Leave on existing CPU. */
349 old_time = c->c_time;
350 c->c_time = to_ticks + occ->cc_ticks;
351 if (c->c_time - old_time < 0) {
352 CIRCQ_REMOVE(&c->c_list);
353 CIRCQ_INSERT(&c->c_list, &occ->cc_todo);
354 }
355 mutex_spin_exit(lock);
356 return;
357 }
358
359 cc = curcpu()->ci_data.cpu_callout;
360 if ((c->c_flags & CALLOUT_BOUND) != 0 || cc == occ ||
361 !mutex_tryenter(cc->cc_lock)) {
362 /* Leave on existing CPU. */
363 c->c_time = to_ticks + occ->cc_ticks;
364 c->c_flags |= CALLOUT_PENDING;
365 CIRCQ_INSERT(&c->c_list, &occ->cc_todo);
366 } else {
367 /* Move to this CPU. */
368 c->c_cpu = cc;
369 c->c_time = to_ticks + cc->cc_ticks;
370 c->c_flags |= CALLOUT_PENDING;
371 CIRCQ_INSERT(&c->c_list, &cc->cc_todo);
372 mutex_spin_exit(cc->cc_lock);
373 }
374 mutex_spin_exit(lock);
375 }
376
377 /*
378 * callout_reset:
379 *
380 * Reset a callout structure with a new function and argument, and
381 * schedule it to run.
382 */
383 void
384 callout_reset(callout_t *cs, int to_ticks, void (*func)(void *), void *arg)
385 {
386 callout_impl_t *c = (callout_impl_t *)cs;
387 kmutex_t *lock;
388
389 KASSERT(c->c_magic == CALLOUT_MAGIC);
390 KASSERT(func != NULL);
391
392 lock = callout_lock(c);
393 c->c_func = func;
394 c->c_arg = arg;
395 callout_schedule_locked(c, lock, to_ticks);
396 }
397
398 /*
399 * callout_schedule:
400 *
401 * Schedule a callout to run. The function and argument must
402 * already be set in the callout structure.
403 */
404 void
405 callout_schedule(callout_t *cs, int to_ticks)
406 {
407 callout_impl_t *c = (callout_impl_t *)cs;
408 kmutex_t *lock;
409
410 KASSERT(c->c_magic == CALLOUT_MAGIC);
411
412 lock = callout_lock(c);
413 callout_schedule_locked(c, lock, to_ticks);
414 }
415
416 /*
417 * callout_stop:
418 *
419 * Try to cancel a pending callout. It may be too late: the callout
420 * could be running on another CPU. If called from interrupt context,
421 * the callout could already be in progress at a lower priority.
422 */
423 bool
424 callout_stop(callout_t *cs)
425 {
426 callout_impl_t *c = (callout_impl_t *)cs;
427 struct callout_cpu *cc;
428 kmutex_t *lock;
429 bool expired;
430
431 KASSERT(c->c_magic == CALLOUT_MAGIC);
432
433 lock = callout_lock(c);
434
435 if ((c->c_flags & CALLOUT_PENDING) != 0)
436 CIRCQ_REMOVE(&c->c_list);
437 expired = ((c->c_flags & CALLOUT_FIRED) != 0);
438 c->c_flags &= ~(CALLOUT_PENDING|CALLOUT_FIRED);
439
440 cc = c->c_cpu;
441 if (cc->cc_active == c) {
442 /*
443 * This is for non-MPSAFE callouts only. To synchronize
444 * effectively we must be called with kernel_lock held.
445 * It's also taken in callout_softclock.
446 */
447 cc->cc_cancel = c;
448 }
449
450 mutex_spin_exit(lock);
451
452 return expired;
453 }
454
455 /*
456 * callout_halt:
457 *
458 * Cancel a pending callout. If in-flight, block until it completes.
459 * May not be called from a hard interrupt handler. If the callout
460 * can take locks, the caller of callout_halt() must not hold any of
461 * those locks, otherwise the two could deadlock. If 'interlock' is
462 * non-NULL and we must wait for the callout to complete, it will be
463 * released and re-acquired before returning.
464 */
465 bool
466 callout_halt(callout_t *cs, void *interlock)
467 {
468 callout_impl_t *c = (callout_impl_t *)cs;
469 struct callout_cpu *cc;
470 struct lwp *l;
471 kmutex_t *lock, *relock;
472 bool expired;
473
474 KASSERT(c->c_magic == CALLOUT_MAGIC);
475 KASSERT(!cpu_intr_p());
476 KASSERT(interlock == NULL || mutex_owned(interlock));
477
478 lock = callout_lock(c);
479 relock = NULL;
480
481 expired = ((c->c_flags & CALLOUT_FIRED) != 0);
482 if ((c->c_flags & CALLOUT_PENDING) != 0)
483 CIRCQ_REMOVE(&c->c_list);
484 c->c_flags &= ~(CALLOUT_PENDING|CALLOUT_FIRED);
485
486 l = curlwp;
487 for (;;) {
488 cc = c->c_cpu;
489 if (__predict_true(cc->cc_active != c || cc->cc_lwp == l))
490 break;
491 if (interlock != NULL) {
492 /*
493 * Avoid potential scheduler lock order problems by
494 * dropping the interlock without the callout lock
495 * held.
496 */
497 mutex_spin_exit(lock);
498 mutex_exit(interlock);
499 relock = interlock;
500 interlock = NULL;
501 } else {
502 /* XXX Better to do priority inheritance. */
503 KASSERT(l->l_wchan == NULL);
504 cc->cc_nwait++;
505 cc->cc_ev_block.ev_count++;
506 l->l_kpriority = true;
507 sleepq_enter(&cc->cc_sleepq, l, cc->cc_lock);
508 sleepq_enqueue(&cc->cc_sleepq, cc, "callout",
509 &sleep_syncobj);
510 sleepq_block(0, false);
511 }
512 lock = callout_lock(c);
513 }
514
515 mutex_spin_exit(lock);
516 if (__predict_false(relock != NULL))
517 mutex_enter(relock);
518
519 return expired;
520 }
521
522 #ifdef notyet
523 /*
524 * callout_bind:
525 *
526 * Bind a callout so that it will only execute on one CPU.
527 * The callout must be stopped, and must be MPSAFE.
528 *
529 * XXX Disabled for now until it is decided how to handle
530 * offlined CPUs. We may want weak+strong binding.
531 */
532 void
533 callout_bind(callout_t *cs, struct cpu_info *ci)
534 {
535 callout_impl_t *c = (callout_impl_t *)cs;
536 struct callout_cpu *cc;
537 kmutex_t *lock;
538
539 KASSERT((c->c_flags & CALLOUT_PENDING) == 0);
540 KASSERT(c->c_cpu->cc_active != c);
541 KASSERT(c->c_magic == CALLOUT_MAGIC);
542 KASSERT((c->c_flags & CALLOUT_MPSAFE) != 0);
543
544 lock = callout_lock(c);
545 cc = ci->ci_data.cpu_callout;
546 c->c_flags |= CALLOUT_BOUND;
547 if (c->c_cpu != cc) {
548 /*
549 * Assigning c_cpu effectively unlocks the callout
550 * structure, as we don't hold the new CPU's lock.
551 * Issue memory barrier to prevent accesses being
552 * reordered.
553 */
554 membar_exit();
555 c->c_cpu = cc;
556 }
557 mutex_spin_exit(lock);
558 }
559 #endif
560
561 void
562 callout_setfunc(callout_t *cs, void (*func)(void *), void *arg)
563 {
564 callout_impl_t *c = (callout_impl_t *)cs;
565 kmutex_t *lock;
566
567 KASSERT(c->c_magic == CALLOUT_MAGIC);
568 KASSERT(func != NULL);
569
570 lock = callout_lock(c);
571 c->c_func = func;
572 c->c_arg = arg;
573 mutex_spin_exit(lock);
574 }
575
576 bool
577 callout_expired(callout_t *cs)
578 {
579 callout_impl_t *c = (callout_impl_t *)cs;
580 kmutex_t *lock;
581 bool rv;
582
583 KASSERT(c->c_magic == CALLOUT_MAGIC);
584
585 lock = callout_lock(c);
586 rv = ((c->c_flags & CALLOUT_FIRED) != 0);
587 mutex_spin_exit(lock);
588
589 return rv;
590 }
591
592 bool
593 callout_active(callout_t *cs)
594 {
595 callout_impl_t *c = (callout_impl_t *)cs;
596 kmutex_t *lock;
597 bool rv;
598
599 KASSERT(c->c_magic == CALLOUT_MAGIC);
600
601 lock = callout_lock(c);
602 rv = ((c->c_flags & (CALLOUT_PENDING|CALLOUT_FIRED)) != 0);
603 mutex_spin_exit(lock);
604
605 return rv;
606 }
607
608 bool
609 callout_pending(callout_t *cs)
610 {
611 callout_impl_t *c = (callout_impl_t *)cs;
612 kmutex_t *lock;
613 bool rv;
614
615 KASSERT(c->c_magic == CALLOUT_MAGIC);
616
617 lock = callout_lock(c);
618 rv = ((c->c_flags & CALLOUT_PENDING) != 0);
619 mutex_spin_exit(lock);
620
621 return rv;
622 }
623
624 bool
625 callout_invoking(callout_t *cs)
626 {
627 callout_impl_t *c = (callout_impl_t *)cs;
628 kmutex_t *lock;
629 bool rv;
630
631 KASSERT(c->c_magic == CALLOUT_MAGIC);
632
633 lock = callout_lock(c);
634 rv = ((c->c_flags & CALLOUT_INVOKING) != 0);
635 mutex_spin_exit(lock);
636
637 return rv;
638 }
639
640 void
641 callout_ack(callout_t *cs)
642 {
643 callout_impl_t *c = (callout_impl_t *)cs;
644 kmutex_t *lock;
645
646 KASSERT(c->c_magic == CALLOUT_MAGIC);
647
648 lock = callout_lock(c);
649 c->c_flags &= ~CALLOUT_INVOKING;
650 mutex_spin_exit(lock);
651 }
652
653 /*
654 * callout_hardclock:
655 *
656 * Called from hardclock() once every tick. We schedule a soft
657 * interrupt if there is work to be done.
658 */
659 void
660 callout_hardclock(void)
661 {
662 struct callout_cpu *cc;
663 int needsoftclock, ticks;
664
665 cc = curcpu()->ci_data.cpu_callout;
666 mutex_spin_enter(cc->cc_lock);
667
668 ticks = ++cc->cc_ticks;
669
670 MOVEBUCKET(cc, 0, ticks);
671 if (MASKWHEEL(0, ticks) == 0) {
672 MOVEBUCKET(cc, 1, ticks);
673 if (MASKWHEEL(1, ticks) == 0) {
674 MOVEBUCKET(cc, 2, ticks);
675 if (MASKWHEEL(2, ticks) == 0)
676 MOVEBUCKET(cc, 3, ticks);
677 }
678 }
679
680 needsoftclock = !CIRCQ_EMPTY(&cc->cc_todo);
681 mutex_spin_exit(cc->cc_lock);
682
683 if (needsoftclock)
684 softint_schedule(callout_sih);
685 }
686
687 /*
688 * callout_softclock:
689 *
690 * Soft interrupt handler, scheduled above if there is work to
691 * be done. Callouts are made in soft interrupt context.
692 */
693 static void
694 callout_softclock(void *v)
695 {
696 callout_impl_t *c;
697 struct callout_cpu *cc;
698 void (*func)(void *);
699 void *arg;
700 int mpsafe, count, ticks, delta;
701 lwp_t *l;
702
703 l = curlwp;
704 KASSERT(l->l_cpu == curcpu());
705 cc = l->l_cpu->ci_data.cpu_callout;
706
707 mutex_spin_enter(cc->cc_lock);
708 cc->cc_lwp = l;
709 while (!CIRCQ_EMPTY(&cc->cc_todo)) {
710 c = CIRCQ_FIRST(&cc->cc_todo);
711 KASSERT(c->c_magic == CALLOUT_MAGIC);
712 KASSERT(c->c_func != NULL);
713 KASSERT(c->c_cpu == cc);
714 KASSERT((c->c_flags & CALLOUT_PENDING) != 0);
715 KASSERT((c->c_flags & CALLOUT_FIRED) == 0);
716 CIRCQ_REMOVE(&c->c_list);
717
718 /* If due run it, otherwise insert it into the right bucket. */
719 ticks = cc->cc_ticks;
720 delta = c->c_time - ticks;
721 if (delta > 0) {
722 CIRCQ_INSERT(&c->c_list, BUCKET(cc, delta, c->c_time));
723 continue;
724 }
725 if (delta < 0)
726 cc->cc_ev_late.ev_count++;
727
728 c->c_flags = (c->c_flags & ~CALLOUT_PENDING) |
729 (CALLOUT_FIRED | CALLOUT_INVOKING);
730 mpsafe = (c->c_flags & CALLOUT_MPSAFE);
731 func = c->c_func;
732 arg = c->c_arg;
733 cc->cc_active = c;
734
735 mutex_spin_exit(cc->cc_lock);
736 KASSERT(func != NULL);
737 if (__predict_false(!mpsafe)) {
738 KERNEL_LOCK(1, NULL);
739 (*func)(arg);
740 KERNEL_UNLOCK_ONE(NULL);
741 } else
742 (*func)(arg);
743 mutex_spin_enter(cc->cc_lock);
744
745 /*
746 * We can't touch 'c' here because it might be
747 * freed already. If LWPs waiting for callout
748 * to complete, awaken them.
749 */
750 cc->cc_active = NULL;
751 if ((count = cc->cc_nwait) != 0) {
752 cc->cc_nwait = 0;
753 /* sleepq_wake() drops the lock. */
754 sleepq_wake(&cc->cc_sleepq, cc, count, cc->cc_lock);
755 mutex_spin_enter(cc->cc_lock);
756 }
757 }
758 cc->cc_lwp = NULL;
759 mutex_spin_exit(cc->cc_lock);
760 }
761 #endif
762
763 #ifdef DDB
764 static void
765 db_show_callout_bucket(struct callout_cpu *cc, struct callout_circq *kbucket,
766 struct callout_circq *bucket)
767 {
768 callout_impl_t *c, ci;
769 db_expr_t offset;
770 const char *name;
771 static char question[] = "?";
772 int b;
773
774 if (CIRCQ_LAST(bucket, kbucket))
775 return;
776
777 for (c = CIRCQ_FIRST(bucket); /*nothing*/; c = CIRCQ_NEXT(&c->c_list)) {
778 db_read_bytes((db_addr_t)c, sizeof(ci), (char *)&ci);
779 c = &ci;
780 db_find_sym_and_offset((db_addr_t)(intptr_t)c->c_func, &name,
781 &offset);
782 name = name ? name : question;
783 b = (bucket - cc->cc_wheel);
784 if (b < 0)
785 b = -WHEELSIZE;
786 db_printf("%9d %2d/%-4d %16lx %s\n",
787 c->c_time - cc->cc_ticks, b / WHEELSIZE, b,
788 (u_long)c->c_arg, name);
789 if (CIRCQ_LAST(&c->c_list, kbucket))
790 break;
791 }
792 }
793
794 void
795 db_show_callout(db_expr_t addr, bool haddr, db_expr_t count, const char *modif)
796 {
797 struct callout_cpu *cc, ccb;
798 struct cpu_info *ci, cib;
799 int b;
800
801 #ifndef CRASH
802 db_printf("hardclock_ticks now: %d\n", hardclock_ticks);
803 #endif
804 db_printf(" ticks wheel arg func\n");
805
806 /*
807 * Don't lock the callwheel; all the other CPUs are paused
808 * anyhow, and we might be called in a circumstance where
809 * some other CPU was paused while holding the lock.
810 */
811 for (ci = db_cpu_first(); ci != NULL; ci = db_cpu_next(ci)) {
812 db_read_bytes((db_addr_t)ci, sizeof(cib), (char *)&cib);
813 cc = cib.ci_data.cpu_callout;
814 db_read_bytes((db_addr_t)cc, sizeof(ccb), (char *)&ccb);
815 db_show_callout_bucket(&ccb, &cc->cc_todo, &ccb.cc_todo);
816 }
817 for (b = 0; b < BUCKETS; b++) {
818 for (ci = db_cpu_first(); ci != NULL; ci = db_cpu_next(ci)) {
819 db_read_bytes((db_addr_t)ci, sizeof(cib), (char *)&cib);
820 cc = cib.ci_data.cpu_callout;
821 db_read_bytes((db_addr_t)cc, sizeof(ccb), (char *)&ccb);
822 db_show_callout_bucket(&ccb, &cc->cc_wheel[b],
823 &ccb.cc_wheel[b]);
824 }
825 }
826 }
827 #endif /* DDB */
828