kern_timeout.c revision 1.21.4.3 1 /* $NetBSD: kern_timeout.c,v 1.21.4.3 2007/07/01 21:37:34 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2003, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 2001 Thomas Nordin <nordin (at) openbsd.org>
41 * Copyright (c) 2000-2001 Artur Grabowski <art (at) openbsd.org>
42 * All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 *
48 * 1. Redistributions of source code must retain the above copyright
49 * notice, this list of conditions and the following disclaimer.
50 * 2. Redistributions in binary form must reproduce the above copyright
51 * notice, this list of conditions and the following disclaimer in the
52 * documentation and/or other materials provided with the distribution.
53 * 3. The name of the author may not be used to endorse or promote products
54 * derived from this software without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
57 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
58 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
59 * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
60 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
61 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
62 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
63 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
64 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
65 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 */
67
68 #include <sys/cdefs.h>
69 __KERNEL_RCSID(0, "$NetBSD: kern_timeout.c,v 1.21.4.3 2007/07/01 21:37:34 ad Exp $");
70
71 /*
72 * Timeouts are kept in a hierarchical timing wheel. The c_time is the
73 * value of the global variable "hardclock_ticks" when the timeout should
74 * be called. There are four levels with 256 buckets each. See 'Scheme 7'
75 * in "Hashed and Hierarchical Timing Wheels: Efficient Data Structures
76 * for Implementing a Timer Facility" by George Varghese and Tony Lauck.
77 *
78 * Some of the "math" in here is a bit tricky. We have to beware of
79 * wrapping ints.
80 *
81 * We use the fact that any element added to the queue must be added with
82 * a positive time. That means that any element `to' on the queue cannot
83 * be scheduled to timeout further in time than INT_MAX, but c->c_time can
84 * be positive or negative so comparing it with anything is dangerous.
85 * The only way we can use the c->c_time value in any predictable way is
86 * when we calculate how far in the future `to' will timeout - "c->c_time
87 * - hardclock_ticks". The result will always be positive for future
88 * timeouts and 0 or negative for due timeouts.
89 */
90
91 #include <sys/param.h>
92 #include <sys/systm.h>
93 #include <sys/kernel.h>
94 #include <sys/lock.h>
95 #include <sys/callout.h>
96 #include <sys/mutex.h>
97 #include <sys/proc.h>
98 #include <sys/sleepq.h>
99 #include <sys/syncobj.h>
100 #include <sys/intr.h>
101
102 #ifdef DDB
103 #include <machine/db_machdep.h>
104 #include <ddb/db_interface.h>
105 #include <ddb/db_access.h>
106 #include <ddb/db_sym.h>
107 #include <ddb/db_output.h>
108 #endif
109
110 #define BUCKETS 1024
111 #define WHEELSIZE 256
112 #define WHEELMASK 255
113 #define WHEELBITS 8
114
115 /* The following funkyness is to appease gcc3's strict aliasing. */
116 struct callout_circq {
117 /* next element */
118 union {
119 struct callout_impl *elem;
120 struct callout_circq *list;
121 } cq_next;
122 /* previous element */
123 union {
124 struct callout_impl *elem;
125 struct callout_circq *list;
126 } cq_prev;
127 };
128 #define cq_next_e cq_next.elem
129 #define cq_prev_e cq_prev.elem
130 #define cq_next_l cq_next.list
131 #define cq_prev_l cq_prev.list
132
133 typedef struct callout_impl {
134 struct callout_circq c_list; /* linkage on queue */
135 void (*c_func)(void *); /* function to call */
136 void *c_arg; /* function argument */
137 void *c_oncpu; /* non-NULL while running */
138 void *c_onlwp; /* non-NULL while running */
139 int c_time; /* when callout fires */
140 u_int c_flags; /* state of this entry */
141 u_int c_runwait; /* number of waiters */
142 u_int c_magic; /* magic number */
143 } callout_impl_t;
144 #define CALLOUT_MAGIC 0x11deeba1
145
146 static struct callout_circq timeout_wheel[BUCKETS]; /* Queues of timeouts */
147 static struct callout_circq timeout_todo; /* Worklist */
148
149 #define MASKWHEEL(wheel, time) (((time) >> ((wheel)*WHEELBITS)) & WHEELMASK)
150
151 #define BUCKET(rel, abs) \
152 (((rel) <= (1 << (2*WHEELBITS))) \
153 ? ((rel) <= (1 << WHEELBITS)) \
154 ? &timeout_wheel[MASKWHEEL(0, (abs))] \
155 : &timeout_wheel[MASKWHEEL(1, (abs)) + WHEELSIZE] \
156 : ((rel) <= (1 << (3*WHEELBITS))) \
157 ? &timeout_wheel[MASKWHEEL(2, (abs)) + 2*WHEELSIZE] \
158 : &timeout_wheel[MASKWHEEL(3, (abs)) + 3*WHEELSIZE])
159
160 #define MOVEBUCKET(wheel, time) \
161 CIRCQ_APPEND(&timeout_todo, \
162 &timeout_wheel[MASKWHEEL((wheel), (time)) + (wheel)*WHEELSIZE])
163
164 /*
165 * Circular queue definitions.
166 */
167
168 #define CIRCQ_INIT(list) \
169 do { \
170 (list)->cq_next_l = (list); \
171 (list)->cq_prev_l = (list); \
172 } while (/*CONSTCOND*/0)
173
174 #define CIRCQ_INSERT(elem, list) \
175 do { \
176 (elem)->cq_prev_e = (list)->cq_prev_e; \
177 (elem)->cq_next_l = (list); \
178 (list)->cq_prev_l->cq_next_l = (elem); \
179 (list)->cq_prev_l = (elem); \
180 } while (/*CONSTCOND*/0)
181
182 #define CIRCQ_APPEND(fst, snd) \
183 do { \
184 if (!CIRCQ_EMPTY(snd)) { \
185 (fst)->cq_prev_l->cq_next_l = (snd)->cq_next_l; \
186 (snd)->cq_next_l->cq_prev_l = (fst)->cq_prev_l; \
187 (snd)->cq_prev_l->cq_next_l = (fst); \
188 (fst)->cq_prev_l = (snd)->cq_prev_l; \
189 CIRCQ_INIT(snd); \
190 } \
191 } while (/*CONSTCOND*/0)
192
193 #define CIRCQ_REMOVE(elem) \
194 do { \
195 (elem)->cq_next_l->cq_prev_e = (elem)->cq_prev_e; \
196 (elem)->cq_prev_l->cq_next_e = (elem)->cq_next_e; \
197 } while (/*CONSTCOND*/0)
198
199 #define CIRCQ_FIRST(list) ((list)->cq_next_e)
200 #define CIRCQ_NEXT(elem) ((elem)->cq_next_e)
201 #define CIRCQ_LAST(elem,list) ((elem)->cq_next_l == (list))
202 #define CIRCQ_EMPTY(list) ((list)->cq_next_l == (list))
203
204 static void callout_softclock(void *);
205
206 /*
207 * All wheels are locked with the same lock (which must also block out
208 * all interrupts). Eventually this should become per-CPU.
209 */
210 kmutex_t callout_lock;
211 sleepq_t callout_sleepq;
212 void *callout_si;
213
214 static struct evcnt callout_ev_late;
215 static struct evcnt callout_ev_block;
216
217 /*
218 * callout_barrier:
219 *
220 * If the callout is already running, wait until it completes.
221 * XXX This should do priority inheritance.
222 */
223 static void
224 callout_barrier(callout_impl_t *c)
225 {
226 extern syncobj_t sleep_syncobj;
227 struct cpu_info *ci;
228 struct lwp *l;
229
230 l = curlwp;
231
232 if ((c->c_flags & CALLOUT_MPSAFE) == 0) {
233 /*
234 * Note: we must be called with the kernel lock held,
235 * as we use it to synchronize with callout_softclock().
236 */
237 ci = c->c_oncpu;
238 ci->ci_data.cpu_callout_cancel = c;
239 return;
240 }
241
242 while ((ci = c->c_oncpu) != NULL && ci->ci_data.cpu_callout == c) {
243 KASSERT(l->l_wchan == NULL);
244
245 ci->ci_data.cpu_callout_nwait++;
246 callout_ev_block.ev_count++;
247
248 lwp_lock(l);
249 lwp_unlock_to(l, &callout_lock);
250 sleepq_enqueue(&callout_sleepq, sched_kpri(l), ci,
251 "callout", &sleep_syncobj);
252 sleepq_block(0, false);
253 mutex_spin_enter(&callout_lock);
254 }
255 }
256
257 /*
258 * callout_running:
259 *
260 * Return non-zero if callout 'c' is currently executing.
261 */
262 static inline bool
263 callout_running(callout_impl_t *c)
264 {
265 struct cpu_info *ci;
266
267 if ((ci = c->c_oncpu) == NULL)
268 return false;
269 if (ci->ci_data.cpu_callout != c)
270 return false;
271 if (c->c_onlwp == curlwp)
272 return false;
273 return true;
274 }
275
276 /*
277 * callout_startup:
278 *
279 * Initialize the callout facility, called at system startup time.
280 */
281 void
282 callout_startup(void)
283 {
284 int b;
285
286 KASSERT(sizeof(callout_impl_t) <= sizeof(callout_t));
287
288 CIRCQ_INIT(&timeout_todo);
289 for (b = 0; b < BUCKETS; b++)
290 CIRCQ_INIT(&timeout_wheel[b]);
291
292 mutex_init(&callout_lock, MUTEX_SPIN, IPL_SCHED);
293 sleepq_init(&callout_sleepq, &callout_lock);
294
295 evcnt_attach_dynamic(&callout_ev_late, EVCNT_TYPE_MISC,
296 NULL, "callout", "late");
297 evcnt_attach_dynamic(&callout_ev_block, EVCNT_TYPE_MISC,
298 NULL, "callout", "block waiting");
299 }
300
301 /*
302 * callout_startup2:
303 *
304 * Complete initialization once soft interrupts are available.
305 */
306 void
307 callout_startup2(void)
308 {
309
310 callout_si = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE,
311 callout_softclock, NULL);
312 if (callout_si == NULL)
313 panic("callout_startup2: unable to register softclock intr");
314 }
315
316 /*
317 * callout_init:
318 *
319 * Initialize a callout structure.
320 */
321 void
322 callout_init(callout_t *cs, u_int flags)
323 {
324 callout_impl_t *c = (callout_impl_t *)cs;
325
326 KASSERT((flags & ~CALLOUT_FLAGMASK) == 0);
327
328 memset(c, 0, sizeof(*c));
329 c->c_flags = flags;
330 c->c_magic = CALLOUT_MAGIC;
331 }
332
333 /*
334 * callout_destroy:
335 *
336 * Destroy a callout structure. The callout must be stopped.
337 */
338 void
339 callout_destroy(callout_t *cs)
340 {
341 callout_impl_t *c = (callout_impl_t *)cs;
342
343 /*
344 * It's not necessary to lock in order to see the correct value
345 * of c->c_flags. If the callout could potentially have been
346 * running, the current thread should have stopped it.
347 */
348 KASSERT((c->c_flags & CALLOUT_PENDING) == 0);
349 if (c->c_oncpu != NULL) {
350 KASSERT(
351 ((struct cpu_info *)c->c_oncpu)->ci_data.cpu_callout != c);
352 }
353 KASSERT(c->c_magic == CALLOUT_MAGIC);
354
355 c->c_magic = 0;
356 }
357
358
359 /*
360 * callout_reset:
361 *
362 * Reset a callout structure with a new function and argument, and
363 * schedule it to run.
364 */
365 void
366 callout_reset(callout_t *cs, int to_ticks, void (*func)(void *), void *arg)
367 {
368 callout_impl_t *c = (callout_impl_t *)cs;
369 int old_time;
370
371 KASSERT(to_ticks >= 0);
372 KASSERT(c->c_magic == CALLOUT_MAGIC);
373 KASSERT(func != NULL);
374
375 mutex_spin_enter(&callout_lock);
376
377 /* Initialize the time here, it won't change. */
378 old_time = c->c_time;
379 c->c_time = to_ticks + hardclock_ticks;
380 c->c_flags &= ~CALLOUT_FIRED;
381
382 c->c_func = func;
383 c->c_arg = arg;
384
385 /*
386 * If this timeout is already scheduled and now is moved
387 * earlier, reschedule it now. Otherwise leave it in place
388 * and let it be rescheduled later.
389 */
390 if ((c->c_flags & CALLOUT_PENDING) != 0) {
391 if (c->c_time - old_time < 0) {
392 CIRCQ_REMOVE(&c->c_list);
393 CIRCQ_INSERT(&c->c_list, &timeout_todo);
394 }
395 } else {
396 c->c_flags |= CALLOUT_PENDING;
397 CIRCQ_INSERT(&c->c_list, &timeout_todo);
398 }
399
400 mutex_spin_exit(&callout_lock);
401 }
402
403 /*
404 * callout_schedule:
405 *
406 * Schedule a callout to run. The function and argument must
407 * already be set in the callout structure.
408 */
409 void
410 callout_schedule(callout_t *cs, int to_ticks)
411 {
412 callout_impl_t *c = (callout_impl_t *)cs;
413 int old_time;
414
415 KASSERT(to_ticks >= 0);
416 KASSERT(c->c_magic == CALLOUT_MAGIC);
417 KASSERT(c->c_func != NULL);
418
419 mutex_spin_enter(&callout_lock);
420
421 /* Initialize the time here, it won't change. */
422 old_time = c->c_time;
423 c->c_time = to_ticks + hardclock_ticks;
424 c->c_flags &= ~CALLOUT_FIRED;
425
426 /*
427 * If this timeout is already scheduled and now is moved
428 * earlier, reschedule it now. Otherwise leave it in place
429 * and let it be rescheduled later.
430 */
431 if ((c->c_flags & CALLOUT_PENDING) != 0) {
432 if (c->c_time - old_time < 0) {
433 CIRCQ_REMOVE(&c->c_list);
434 CIRCQ_INSERT(&c->c_list, &timeout_todo);
435 }
436 } else {
437 c->c_flags |= CALLOUT_PENDING;
438 CIRCQ_INSERT(&c->c_list, &timeout_todo);
439 }
440
441 mutex_spin_exit(&callout_lock);
442 }
443
444 /*
445 * callout_stop:
446 *
447 * Cancel a pending callout.
448 */
449 bool
450 callout_stop(callout_t *cs)
451 {
452 callout_impl_t *c = (callout_impl_t *)cs;
453 bool expired;
454
455 KASSERT(c->c_magic == CALLOUT_MAGIC);
456
457 mutex_spin_enter(&callout_lock);
458
459 if (callout_running(c))
460 callout_barrier(c);
461
462 if ((c->c_flags & CALLOUT_PENDING) != 0)
463 CIRCQ_REMOVE(&c->c_list);
464
465 expired = ((c->c_flags & CALLOUT_FIRED) != 0);
466 c->c_flags &= ~(CALLOUT_PENDING|CALLOUT_FIRED);
467
468 mutex_spin_exit(&callout_lock);
469
470 return expired;
471 }
472
473 void
474 callout_setfunc(callout_t *cs, void (*func)(void *), void *arg)
475 {
476 callout_impl_t *c = (callout_impl_t *)cs;
477
478 KASSERT(c->c_magic == CALLOUT_MAGIC);
479
480 mutex_spin_enter(&callout_lock);
481 c->c_func = func;
482 c->c_arg = arg;
483 mutex_spin_exit(&callout_lock);
484 }
485
486 bool
487 callout_expired(callout_t *cs)
488 {
489 callout_impl_t *c = (callout_impl_t *)cs;
490 bool rv;
491
492 KASSERT(c->c_magic == CALLOUT_MAGIC);
493
494 mutex_spin_enter(&callout_lock);
495 rv = ((c->c_flags & CALLOUT_FIRED) != 0);
496 mutex_spin_exit(&callout_lock);
497
498 return rv;
499 }
500
501 bool
502 callout_active(callout_t *cs)
503 {
504 callout_impl_t *c = (callout_impl_t *)cs;
505 bool rv;
506
507 KASSERT(c->c_magic == CALLOUT_MAGIC);
508
509 mutex_spin_enter(&callout_lock);
510 rv = ((c->c_flags & (CALLOUT_PENDING|CALLOUT_FIRED)) != 0);
511 mutex_spin_exit(&callout_lock);
512
513 return rv;
514 }
515
516 bool
517 callout_pending(callout_t *cs)
518 {
519 callout_impl_t *c = (callout_impl_t *)cs;
520 bool rv;
521
522 KASSERT(c->c_magic == CALLOUT_MAGIC);
523
524 mutex_spin_enter(&callout_lock);
525 rv = ((c->c_flags & CALLOUT_PENDING) != 0);
526 mutex_spin_exit(&callout_lock);
527
528 return rv;
529 }
530
531 bool
532 callout_invoking(callout_t *cs)
533 {
534 callout_impl_t *c = (callout_impl_t *)cs;
535 bool rv;
536
537 KASSERT(c->c_magic == CALLOUT_MAGIC);
538
539 mutex_spin_enter(&callout_lock);
540 rv = ((c->c_flags & CALLOUT_INVOKING) != 0);
541 mutex_spin_exit(&callout_lock);
542
543 return rv;
544 }
545
546 void
547 callout_ack(callout_t *cs)
548 {
549 callout_impl_t *c = (callout_impl_t *)cs;
550
551 KASSERT(c->c_magic == CALLOUT_MAGIC);
552
553 mutex_spin_enter(&callout_lock);
554 c->c_flags &= ~CALLOUT_INVOKING;
555 mutex_spin_exit(&callout_lock);
556 }
557
558 /*
559 * This is called from hardclock() once every tick.
560 * We schedule callout_softclock() if there is work
561 * to be done.
562 */
563 void
564 callout_hardclock(void)
565 {
566 int needsoftclock;
567
568 mutex_spin_enter(&callout_lock);
569
570 MOVEBUCKET(0, hardclock_ticks);
571 if (MASKWHEEL(0, hardclock_ticks) == 0) {
572 MOVEBUCKET(1, hardclock_ticks);
573 if (MASKWHEEL(1, hardclock_ticks) == 0) {
574 MOVEBUCKET(2, hardclock_ticks);
575 if (MASKWHEEL(2, hardclock_ticks) == 0)
576 MOVEBUCKET(3, hardclock_ticks);
577 }
578 }
579
580 needsoftclock = !CIRCQ_EMPTY(&timeout_todo);
581 mutex_spin_exit(&callout_lock);
582
583 if (needsoftclock)
584 softint_schedule(callout_si);
585 }
586
587 /* ARGSUSED */
588 static void
589 callout_softclock(void *v)
590 {
591 callout_impl_t *c;
592 struct cpu_info *ci;
593 void (*func)(void *);
594 void *arg;
595 u_int mpsafe, count;
596 lwp_t *l;
597
598 l = curlwp;
599 ci = l->l_cpu;
600
601 mutex_spin_enter(&callout_lock);
602
603 while (!CIRCQ_EMPTY(&timeout_todo)) {
604 c = CIRCQ_FIRST(&timeout_todo);
605 KASSERT(c->c_magic == CALLOUT_MAGIC);
606 KASSERT(c->c_func != NULL);
607 CIRCQ_REMOVE(&c->c_list);
608
609 /* If due run it, otherwise insert it into the right bucket. */
610 if (c->c_time - hardclock_ticks > 0) {
611 CIRCQ_INSERT(&c->c_list,
612 BUCKET((c->c_time - hardclock_ticks), c->c_time));
613 } else {
614 if (c->c_time - hardclock_ticks < 0)
615 callout_ev_late.ev_count++;
616
617 c->c_flags ^= (CALLOUT_PENDING | CALLOUT_FIRED);
618 mpsafe = (c->c_flags & CALLOUT_MPSAFE);
619 func = c->c_func;
620 arg = c->c_arg;
621 c->c_oncpu = ci;
622 c->c_onlwp = l;
623
624 mutex_spin_exit(&callout_lock);
625 if (!mpsafe) {
626 KERNEL_LOCK(1, curlwp);
627 if (ci->ci_data.cpu_callout_cancel != c)
628 (*func)(arg);
629 KERNEL_UNLOCK_ONE(curlwp);
630 } else
631 (*func)(arg);
632 mutex_spin_enter(&callout_lock);
633
634 /*
635 * We can't touch 'c' here because it might be
636 * freed already. If LWPs waiting for callout
637 * to complete, awaken them.
638 */
639 ci->ci_data.cpu_callout_cancel = NULL;
640 ci->ci_data.cpu_callout = NULL;
641 if ((count = ci->ci_data.cpu_callout_nwait) != 0) {
642 ci->ci_data.cpu_callout_nwait = 0;
643 /* sleepq_wake() drops the lock. */
644 sleepq_wake(&callout_sleepq, ci, count);
645 mutex_spin_enter(&callout_lock);
646 }
647 }
648 }
649
650 mutex_spin_exit(&callout_lock);
651 }
652
653 #ifdef DDB
654 static void
655 db_show_callout_bucket(struct callout_circq *bucket)
656 {
657 callout_impl_t *c;
658 db_expr_t offset;
659 const char *name;
660 static char question[] = "?";
661
662 if (CIRCQ_EMPTY(bucket))
663 return;
664
665 for (c = CIRCQ_FIRST(bucket); /*nothing*/; c = CIRCQ_NEXT(&c->c_list)) {
666 db_find_sym_and_offset((db_addr_t)(intptr_t)c->c_func, &name,
667 &offset);
668 name = name ? name : question;
669 #ifdef _LP64
670 #define POINTER_WIDTH "%16lx"
671 #else
672 #define POINTER_WIDTH "%8lx"
673 #endif
674 db_printf("%9d %2d/%-4d " POINTER_WIDTH " %s\n",
675 c->c_time - hardclock_ticks,
676 (int)((bucket - timeout_wheel) / WHEELSIZE),
677 (int)(bucket - timeout_wheel), (u_long) c->c_arg, name);
678
679 if (CIRCQ_LAST(&c->c_list, bucket))
680 break;
681 }
682 }
683
684 void
685 db_show_callout(db_expr_t addr, bool haddr, db_expr_t count, const char *modif)
686 {
687 int b;
688
689 db_printf("hardclock_ticks now: %d\n", hardclock_ticks);
690 #ifdef _LP64
691 db_printf(" ticks wheel arg func\n");
692 #else
693 db_printf(" ticks wheel arg func\n");
694 #endif
695
696 /*
697 * Don't lock the callwheel; all the other CPUs are paused
698 * anyhow, and we might be called in a circumstance where
699 * some other CPU was paused while holding the lock.
700 */
701
702 db_show_callout_bucket(&timeout_todo);
703 for (b = 0; b < BUCKETS; b++)
704 db_show_callout_bucket(&timeout_wheel[b]);
705 }
706 #endif /* DDB */
707