kern_timeout.c revision 1.43.8.1 1 /* $NetBSD: kern_timeout.c,v 1.43.8.1 2009/05/13 17:21:57 jym Exp $ */
2
3 /*-
4 * Copyright (c) 2003, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 2001 Thomas Nordin <nordin (at) openbsd.org>
34 * Copyright (c) 2000-2001 Artur Grabowski <art (at) openbsd.org>
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. The name of the author may not be used to endorse or promote products
47 * derived from this software without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
50 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
51 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
52 * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
53 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
54 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
55 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
56 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
57 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
58 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 */
60
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: kern_timeout.c,v 1.43.8.1 2009/05/13 17:21:57 jym Exp $");
63
64 /*
65 * Timeouts are kept in a hierarchical timing wheel. The c_time is the
66 * value of c_cpu->cc_ticks when the timeout should be called. There are
67 * four levels with 256 buckets each. See 'Scheme 7' in "Hashed and
68 * Hierarchical Timing Wheels: Efficient Data Structures for Implementing
69 * a Timer Facility" by George Varghese and Tony Lauck.
70 *
71 * Some of the "math" in here is a bit tricky. We have to beware of
72 * wrapping ints.
73 *
74 * We use the fact that any element added to the queue must be added with
75 * a positive time. That means that any element `to' on the queue cannot
76 * be scheduled to timeout further in time than INT_MAX, but c->c_time can
77 * be positive or negative so comparing it with anything is dangerous.
78 * The only way we can use the c->c_time value in any predictable way is
79 * when we calculate how far in the future `to' will timeout - "c->c_time
80 * - c->c_cpu->cc_ticks". The result will always be positive for future
81 * timeouts and 0 or negative for due timeouts.
82 */
83
84 #define _CALLOUT_PRIVATE
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/callout.h>
90 #include <sys/mutex.h>
91 #include <sys/proc.h>
92 #include <sys/sleepq.h>
93 #include <sys/syncobj.h>
94 #include <sys/evcnt.h>
95 #include <sys/intr.h>
96 #include <sys/cpu.h>
97 #include <sys/kmem.h>
98
99 #ifdef DDB
100 #include <machine/db_machdep.h>
101 #include <ddb/db_interface.h>
102 #include <ddb/db_access.h>
103 #include <ddb/db_sym.h>
104 #include <ddb/db_output.h>
105 #endif
106
107 #define BUCKETS 1024
108 #define WHEELSIZE 256
109 #define WHEELMASK 255
110 #define WHEELBITS 8
111
112 #define MASKWHEEL(wheel, time) (((time) >> ((wheel)*WHEELBITS)) & WHEELMASK)
113
114 #define BUCKET(cc, rel, abs) \
115 (((rel) <= (1 << (2*WHEELBITS))) \
116 ? ((rel) <= (1 << WHEELBITS)) \
117 ? &(cc)->cc_wheel[MASKWHEEL(0, (abs))] \
118 : &(cc)->cc_wheel[MASKWHEEL(1, (abs)) + WHEELSIZE] \
119 : ((rel) <= (1 << (3*WHEELBITS))) \
120 ? &(cc)->cc_wheel[MASKWHEEL(2, (abs)) + 2*WHEELSIZE] \
121 : &(cc)->cc_wheel[MASKWHEEL(3, (abs)) + 3*WHEELSIZE])
122
123 #define MOVEBUCKET(cc, wheel, time) \
124 CIRCQ_APPEND(&(cc)->cc_todo, \
125 &(cc)->cc_wheel[MASKWHEEL((wheel), (time)) + (wheel)*WHEELSIZE])
126
127 /*
128 * Circular queue definitions.
129 */
130
131 #define CIRCQ_INIT(list) \
132 do { \
133 (list)->cq_next_l = (list); \
134 (list)->cq_prev_l = (list); \
135 } while (/*CONSTCOND*/0)
136
137 #define CIRCQ_INSERT(elem, list) \
138 do { \
139 (elem)->cq_prev_e = (list)->cq_prev_e; \
140 (elem)->cq_next_l = (list); \
141 (list)->cq_prev_l->cq_next_l = (elem); \
142 (list)->cq_prev_l = (elem); \
143 } while (/*CONSTCOND*/0)
144
145 #define CIRCQ_APPEND(fst, snd) \
146 do { \
147 if (!CIRCQ_EMPTY(snd)) { \
148 (fst)->cq_prev_l->cq_next_l = (snd)->cq_next_l; \
149 (snd)->cq_next_l->cq_prev_l = (fst)->cq_prev_l; \
150 (snd)->cq_prev_l->cq_next_l = (fst); \
151 (fst)->cq_prev_l = (snd)->cq_prev_l; \
152 CIRCQ_INIT(snd); \
153 } \
154 } while (/*CONSTCOND*/0)
155
156 #define CIRCQ_REMOVE(elem) \
157 do { \
158 (elem)->cq_next_l->cq_prev_e = (elem)->cq_prev_e; \
159 (elem)->cq_prev_l->cq_next_e = (elem)->cq_next_e; \
160 } while (/*CONSTCOND*/0)
161
162 #define CIRCQ_FIRST(list) ((list)->cq_next_e)
163 #define CIRCQ_NEXT(elem) ((elem)->cq_next_e)
164 #define CIRCQ_LAST(elem,list) ((elem)->cq_next_l == (list))
165 #define CIRCQ_EMPTY(list) ((list)->cq_next_l == (list))
166
167 static void callout_softclock(void *);
168
169 struct callout_cpu {
170 kmutex_t *cc_lock;
171 sleepq_t cc_sleepq;
172 u_int cc_nwait;
173 u_int cc_ticks;
174 lwp_t *cc_lwp;
175 callout_impl_t *cc_active;
176 callout_impl_t *cc_cancel;
177 struct evcnt cc_ev_late;
178 struct evcnt cc_ev_block;
179 struct callout_circq cc_todo; /* Worklist */
180 struct callout_circq cc_wheel[BUCKETS]; /* Queues of timeouts */
181 char cc_name1[12];
182 char cc_name2[12];
183 };
184
185 static struct callout_cpu callout_cpu0;
186 static void *callout_sih;
187
188 static inline kmutex_t *
189 callout_lock(callout_impl_t *c)
190 {
191 struct callout_cpu *cc;
192 kmutex_t *lock;
193
194 for (;;) {
195 cc = c->c_cpu;
196 lock = cc->cc_lock;
197 mutex_spin_enter(lock);
198 if (__predict_true(cc == c->c_cpu))
199 return lock;
200 mutex_spin_exit(lock);
201 }
202 }
203
204 /*
205 * callout_startup:
206 *
207 * Initialize the callout facility, called at system startup time.
208 * Do just enough to allow callouts to be safely registered.
209 */
210 void
211 callout_startup(void)
212 {
213 struct callout_cpu *cc;
214 int b;
215
216 KASSERT(curcpu()->ci_data.cpu_callout == NULL);
217
218 cc = &callout_cpu0;
219 cc->cc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
220 CIRCQ_INIT(&cc->cc_todo);
221 for (b = 0; b < BUCKETS; b++)
222 CIRCQ_INIT(&cc->cc_wheel[b]);
223 curcpu()->ci_data.cpu_callout = cc;
224 }
225
226 /*
227 * callout_init_cpu:
228 *
229 * Per-CPU initialization.
230 */
231 void
232 callout_init_cpu(struct cpu_info *ci)
233 {
234 struct callout_cpu *cc;
235 int b;
236
237 CTASSERT(sizeof(callout_impl_t) <= sizeof(callout_t));
238
239 if ((cc = ci->ci_data.cpu_callout) == NULL) {
240 cc = kmem_zalloc(sizeof(*cc), KM_SLEEP);
241 if (cc == NULL)
242 panic("callout_init_cpu (1)");
243 cc->cc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
244 CIRCQ_INIT(&cc->cc_todo);
245 for (b = 0; b < BUCKETS; b++)
246 CIRCQ_INIT(&cc->cc_wheel[b]);
247 } else {
248 /* Boot CPU, one time only. */
249 callout_sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE,
250 callout_softclock, NULL);
251 if (callout_sih == NULL)
252 panic("callout_init_cpu (2)");
253 }
254
255 sleepq_init(&cc->cc_sleepq);
256
257 snprintf(cc->cc_name1, sizeof(cc->cc_name1), "late/%u",
258 cpu_index(ci));
259 evcnt_attach_dynamic(&cc->cc_ev_late, EVCNT_TYPE_MISC,
260 NULL, "callout", cc->cc_name1);
261
262 snprintf(cc->cc_name2, sizeof(cc->cc_name2), "wait/%u",
263 cpu_index(ci));
264 evcnt_attach_dynamic(&cc->cc_ev_block, EVCNT_TYPE_MISC,
265 NULL, "callout", cc->cc_name2);
266
267 ci->ci_data.cpu_callout = cc;
268 }
269
270 /*
271 * callout_init:
272 *
273 * Initialize a callout structure. This must be quick, so we fill
274 * only the minimum number of fields.
275 */
276 void
277 callout_init(callout_t *cs, u_int flags)
278 {
279 callout_impl_t *c = (callout_impl_t *)cs;
280 struct callout_cpu *cc;
281
282 KASSERT((flags & ~CALLOUT_FLAGMASK) == 0);
283
284 cc = curcpu()->ci_data.cpu_callout;
285 c->c_func = NULL;
286 c->c_magic = CALLOUT_MAGIC;
287 if (__predict_true((flags & CALLOUT_MPSAFE) != 0 && cc != NULL)) {
288 c->c_flags = flags;
289 c->c_cpu = cc;
290 return;
291 }
292 c->c_flags = flags | CALLOUT_BOUND;
293 c->c_cpu = &callout_cpu0;
294 }
295
296 /*
297 * callout_destroy:
298 *
299 * Destroy a callout structure. The callout must be stopped.
300 */
301 void
302 callout_destroy(callout_t *cs)
303 {
304 callout_impl_t *c = (callout_impl_t *)cs;
305
306 /*
307 * It's not necessary to lock in order to see the correct value
308 * of c->c_flags. If the callout could potentially have been
309 * running, the current thread should have stopped it.
310 */
311 KASSERT((c->c_flags & CALLOUT_PENDING) == 0);
312 KASSERT(c->c_cpu->cc_lwp == curlwp || c->c_cpu->cc_active != c);
313 KASSERT(c->c_magic == CALLOUT_MAGIC);
314 c->c_magic = 0;
315 }
316
317 /*
318 * callout_schedule_locked:
319 *
320 * Schedule a callout to run. The function and argument must
321 * already be set in the callout structure. Must be called with
322 * callout_lock.
323 */
324 static void
325 callout_schedule_locked(callout_impl_t *c, kmutex_t *lock, int to_ticks)
326 {
327 struct callout_cpu *cc, *occ;
328 int old_time;
329
330 KASSERT(to_ticks >= 0);
331 KASSERT(c->c_func != NULL);
332
333 /* Initialize the time here, it won't change. */
334 occ = c->c_cpu;
335 c->c_flags &= ~(CALLOUT_FIRED | CALLOUT_INVOKING);
336
337 /*
338 * If this timeout is already scheduled and now is moved
339 * earlier, reschedule it now. Otherwise leave it in place
340 * and let it be rescheduled later.
341 */
342 if ((c->c_flags & CALLOUT_PENDING) != 0) {
343 /* Leave on existing CPU. */
344 old_time = c->c_time;
345 c->c_time = to_ticks + occ->cc_ticks;
346 if (c->c_time - old_time < 0) {
347 CIRCQ_REMOVE(&c->c_list);
348 CIRCQ_INSERT(&c->c_list, &occ->cc_todo);
349 }
350 mutex_spin_exit(lock);
351 return;
352 }
353
354 cc = curcpu()->ci_data.cpu_callout;
355 if ((c->c_flags & CALLOUT_BOUND) != 0 || cc == occ ||
356 !mutex_tryenter(cc->cc_lock)) {
357 /* Leave on existing CPU. */
358 c->c_time = to_ticks + occ->cc_ticks;
359 c->c_flags |= CALLOUT_PENDING;
360 CIRCQ_INSERT(&c->c_list, &occ->cc_todo);
361 } else {
362 /* Move to this CPU. */
363 c->c_cpu = cc;
364 c->c_time = to_ticks + cc->cc_ticks;
365 c->c_flags |= CALLOUT_PENDING;
366 CIRCQ_INSERT(&c->c_list, &cc->cc_todo);
367 mutex_spin_exit(cc->cc_lock);
368 }
369 mutex_spin_exit(lock);
370 }
371
372 /*
373 * callout_reset:
374 *
375 * Reset a callout structure with a new function and argument, and
376 * schedule it to run.
377 */
378 void
379 callout_reset(callout_t *cs, int to_ticks, void (*func)(void *), void *arg)
380 {
381 callout_impl_t *c = (callout_impl_t *)cs;
382 kmutex_t *lock;
383
384 KASSERT(c->c_magic == CALLOUT_MAGIC);
385 KASSERT(func != NULL);
386
387 lock = callout_lock(c);
388 c->c_func = func;
389 c->c_arg = arg;
390 callout_schedule_locked(c, lock, to_ticks);
391 }
392
393 /*
394 * callout_schedule:
395 *
396 * Schedule a callout to run. The function and argument must
397 * already be set in the callout structure.
398 */
399 void
400 callout_schedule(callout_t *cs, int to_ticks)
401 {
402 callout_impl_t *c = (callout_impl_t *)cs;
403 kmutex_t *lock;
404
405 KASSERT(c->c_magic == CALLOUT_MAGIC);
406
407 lock = callout_lock(c);
408 callout_schedule_locked(c, lock, to_ticks);
409 }
410
411 /*
412 * callout_stop:
413 *
414 * Try to cancel a pending callout. It may be too late: the callout
415 * could be running on another CPU. If called from interrupt context,
416 * the callout could already be in progress at a lower priority.
417 */
418 bool
419 callout_stop(callout_t *cs)
420 {
421 callout_impl_t *c = (callout_impl_t *)cs;
422 struct callout_cpu *cc;
423 kmutex_t *lock;
424 bool expired;
425
426 KASSERT(c->c_magic == CALLOUT_MAGIC);
427
428 lock = callout_lock(c);
429
430 if ((c->c_flags & CALLOUT_PENDING) != 0)
431 CIRCQ_REMOVE(&c->c_list);
432 expired = ((c->c_flags & CALLOUT_FIRED) != 0);
433 c->c_flags &= ~(CALLOUT_PENDING|CALLOUT_FIRED);
434
435 cc = c->c_cpu;
436 if (cc->cc_active == c) {
437 /*
438 * This is for non-MPSAFE callouts only. To synchronize
439 * effectively we must be called with kernel_lock held.
440 * It's also taken in callout_softclock.
441 */
442 cc->cc_cancel = c;
443 }
444
445 mutex_spin_exit(lock);
446
447 return expired;
448 }
449
450 /*
451 * callout_halt:
452 *
453 * Cancel a pending callout. If in-flight, block until it completes.
454 * May not be called from a hard interrupt handler. If the callout
455 * can take locks, the caller of callout_halt() must not hold any of
456 * those locks, otherwise the two could deadlock. If 'interlock' is
457 * non-NULL and we must wait for the callout to complete, it will be
458 * released and re-acquired before returning.
459 */
460 bool
461 callout_halt(callout_t *cs, void *interlock)
462 {
463 callout_impl_t *c = (callout_impl_t *)cs;
464 struct callout_cpu *cc;
465 struct lwp *l;
466 kmutex_t *lock, *relock;
467 bool expired;
468
469 KASSERT(c->c_magic == CALLOUT_MAGIC);
470 KASSERT(!cpu_intr_p());
471
472 lock = callout_lock(c);
473 relock = NULL;
474
475 expired = ((c->c_flags & CALLOUT_FIRED) != 0);
476 if ((c->c_flags & CALLOUT_PENDING) != 0)
477 CIRCQ_REMOVE(&c->c_list);
478 c->c_flags &= ~(CALLOUT_PENDING|CALLOUT_FIRED);
479
480 l = curlwp;
481 for (;;) {
482 cc = c->c_cpu;
483 if (__predict_true(cc->cc_active != c || cc->cc_lwp == l))
484 break;
485 if (interlock != NULL) {
486 /*
487 * Avoid potential scheduler lock order problems by
488 * dropping the interlock without the callout lock
489 * held.
490 */
491 mutex_spin_exit(lock);
492 mutex_exit(interlock);
493 relock = interlock;
494 interlock = NULL;
495 } else {
496 /* XXX Better to do priority inheritance. */
497 KASSERT(l->l_wchan == NULL);
498 cc->cc_nwait++;
499 cc->cc_ev_block.ev_count++;
500 l->l_kpriority = true;
501 sleepq_enter(&cc->cc_sleepq, l, cc->cc_lock);
502 sleepq_enqueue(&cc->cc_sleepq, cc, "callout",
503 &sleep_syncobj);
504 sleepq_block(0, false);
505 }
506 lock = callout_lock(c);
507 }
508
509 mutex_spin_exit(lock);
510 if (__predict_false(relock != NULL))
511 mutex_enter(relock);
512
513 return expired;
514 }
515
516 #ifdef notyet
517 /*
518 * callout_bind:
519 *
520 * Bind a callout so that it will only execute on one CPU.
521 * The callout must be stopped, and must be MPSAFE.
522 *
523 * XXX Disabled for now until it is decided how to handle
524 * offlined CPUs. We may want weak+strong binding.
525 */
526 void
527 callout_bind(callout_t *cs, struct cpu_info *ci)
528 {
529 callout_impl_t *c = (callout_impl_t *)cs;
530 struct callout_cpu *cc;
531 kmutex_t *lock;
532
533 KASSERT((c->c_flags & CALLOUT_PENDING) == 0);
534 KASSERT(c->c_cpu->cc_active != c);
535 KASSERT(c->c_magic == CALLOUT_MAGIC);
536 KASSERT((c->c_flags & CALLOUT_MPSAFE) != 0);
537
538 lock = callout_lock(c);
539 cc = ci->ci_data.cpu_callout;
540 c->c_flags |= CALLOUT_BOUND;
541 if (c->c_cpu != cc) {
542 /*
543 * Assigning c_cpu effectively unlocks the callout
544 * structure, as we don't hold the new CPU's lock.
545 * Issue memory barrier to prevent accesses being
546 * reordered.
547 */
548 membar_exit();
549 c->c_cpu = cc;
550 }
551 mutex_spin_exit(lock);
552 }
553 #endif
554
555 void
556 callout_setfunc(callout_t *cs, void (*func)(void *), void *arg)
557 {
558 callout_impl_t *c = (callout_impl_t *)cs;
559 kmutex_t *lock;
560
561 KASSERT(c->c_magic == CALLOUT_MAGIC);
562 KASSERT(func != NULL);
563
564 lock = callout_lock(c);
565 c->c_func = func;
566 c->c_arg = arg;
567 mutex_spin_exit(lock);
568 }
569
570 bool
571 callout_expired(callout_t *cs)
572 {
573 callout_impl_t *c = (callout_impl_t *)cs;
574 kmutex_t *lock;
575 bool rv;
576
577 KASSERT(c->c_magic == CALLOUT_MAGIC);
578
579 lock = callout_lock(c);
580 rv = ((c->c_flags & CALLOUT_FIRED) != 0);
581 mutex_spin_exit(lock);
582
583 return rv;
584 }
585
586 bool
587 callout_active(callout_t *cs)
588 {
589 callout_impl_t *c = (callout_impl_t *)cs;
590 kmutex_t *lock;
591 bool rv;
592
593 KASSERT(c->c_magic == CALLOUT_MAGIC);
594
595 lock = callout_lock(c);
596 rv = ((c->c_flags & (CALLOUT_PENDING|CALLOUT_FIRED)) != 0);
597 mutex_spin_exit(lock);
598
599 return rv;
600 }
601
602 bool
603 callout_pending(callout_t *cs)
604 {
605 callout_impl_t *c = (callout_impl_t *)cs;
606 kmutex_t *lock;
607 bool rv;
608
609 KASSERT(c->c_magic == CALLOUT_MAGIC);
610
611 lock = callout_lock(c);
612 rv = ((c->c_flags & CALLOUT_PENDING) != 0);
613 mutex_spin_exit(lock);
614
615 return rv;
616 }
617
618 bool
619 callout_invoking(callout_t *cs)
620 {
621 callout_impl_t *c = (callout_impl_t *)cs;
622 kmutex_t *lock;
623 bool rv;
624
625 KASSERT(c->c_magic == CALLOUT_MAGIC);
626
627 lock = callout_lock(c);
628 rv = ((c->c_flags & CALLOUT_INVOKING) != 0);
629 mutex_spin_exit(lock);
630
631 return rv;
632 }
633
634 void
635 callout_ack(callout_t *cs)
636 {
637 callout_impl_t *c = (callout_impl_t *)cs;
638 kmutex_t *lock;
639
640 KASSERT(c->c_magic == CALLOUT_MAGIC);
641
642 lock = callout_lock(c);
643 c->c_flags &= ~CALLOUT_INVOKING;
644 mutex_spin_exit(lock);
645 }
646
647 /*
648 * callout_hardclock:
649 *
650 * Called from hardclock() once every tick. We schedule a soft
651 * interrupt if there is work to be done.
652 */
653 void
654 callout_hardclock(void)
655 {
656 struct callout_cpu *cc;
657 int needsoftclock, ticks;
658
659 cc = curcpu()->ci_data.cpu_callout;
660 mutex_spin_enter(cc->cc_lock);
661
662 ticks = ++cc->cc_ticks;
663
664 MOVEBUCKET(cc, 0, ticks);
665 if (MASKWHEEL(0, ticks) == 0) {
666 MOVEBUCKET(cc, 1, ticks);
667 if (MASKWHEEL(1, ticks) == 0) {
668 MOVEBUCKET(cc, 2, ticks);
669 if (MASKWHEEL(2, ticks) == 0)
670 MOVEBUCKET(cc, 3, ticks);
671 }
672 }
673
674 needsoftclock = !CIRCQ_EMPTY(&cc->cc_todo);
675 mutex_spin_exit(cc->cc_lock);
676
677 if (needsoftclock)
678 softint_schedule(callout_sih);
679 }
680
681 /*
682 * callout_softclock:
683 *
684 * Soft interrupt handler, scheduled above if there is work to
685 * be done. Callouts are made in soft interrupt context.
686 */
687 static void
688 callout_softclock(void *v)
689 {
690 callout_impl_t *c;
691 struct callout_cpu *cc;
692 void (*func)(void *);
693 void *arg;
694 int mpsafe, count, ticks, delta;
695 lwp_t *l;
696
697 l = curlwp;
698 KASSERT(l->l_cpu == curcpu());
699 cc = l->l_cpu->ci_data.cpu_callout;
700
701 mutex_spin_enter(cc->cc_lock);
702 cc->cc_lwp = l;
703 while (!CIRCQ_EMPTY(&cc->cc_todo)) {
704 c = CIRCQ_FIRST(&cc->cc_todo);
705 KASSERT(c->c_magic == CALLOUT_MAGIC);
706 KASSERT(c->c_func != NULL);
707 KASSERT(c->c_cpu == cc);
708 KASSERT((c->c_flags & CALLOUT_PENDING) != 0);
709 KASSERT((c->c_flags & CALLOUT_FIRED) == 0);
710 CIRCQ_REMOVE(&c->c_list);
711
712 /* If due run it, otherwise insert it into the right bucket. */
713 ticks = cc->cc_ticks;
714 delta = c->c_time - ticks;
715 if (delta > 0) {
716 CIRCQ_INSERT(&c->c_list, BUCKET(cc, delta, c->c_time));
717 continue;
718 }
719 if (delta < 0)
720 cc->cc_ev_late.ev_count++;
721
722 c->c_flags = (c->c_flags & ~CALLOUT_PENDING) |
723 (CALLOUT_FIRED | CALLOUT_INVOKING);
724 mpsafe = (c->c_flags & CALLOUT_MPSAFE);
725 func = c->c_func;
726 arg = c->c_arg;
727 cc->cc_active = c;
728
729 mutex_spin_exit(cc->cc_lock);
730 KASSERT(func != NULL);
731 if (__predict_false(!mpsafe)) {
732 KERNEL_LOCK(1, NULL);
733 (*func)(arg);
734 KERNEL_UNLOCK_ONE(NULL);
735 } else
736 (*func)(arg);
737 mutex_spin_enter(cc->cc_lock);
738
739 /*
740 * We can't touch 'c' here because it might be
741 * freed already. If LWPs waiting for callout
742 * to complete, awaken them.
743 */
744 cc->cc_active = NULL;
745 if ((count = cc->cc_nwait) != 0) {
746 cc->cc_nwait = 0;
747 /* sleepq_wake() drops the lock. */
748 sleepq_wake(&cc->cc_sleepq, cc, count, cc->cc_lock);
749 mutex_spin_enter(cc->cc_lock);
750 }
751 }
752 cc->cc_lwp = NULL;
753 mutex_spin_exit(cc->cc_lock);
754 }
755
756 #ifdef DDB
757 static void
758 db_show_callout_bucket(struct callout_cpu *cc, struct callout_circq *bucket)
759 {
760 callout_impl_t *c;
761 db_expr_t offset;
762 const char *name;
763 static char question[] = "?";
764 int b;
765
766 if (CIRCQ_EMPTY(bucket))
767 return;
768
769 for (c = CIRCQ_FIRST(bucket); /*nothing*/; c = CIRCQ_NEXT(&c->c_list)) {
770 db_find_sym_and_offset((db_addr_t)(intptr_t)c->c_func, &name,
771 &offset);
772 name = name ? name : question;
773 b = (bucket - cc->cc_wheel);
774 if (b < 0)
775 b = -WHEELSIZE;
776 db_printf("%9d %2d/%-4d %16lx %s\n",
777 c->c_time - cc->cc_ticks, b / WHEELSIZE, b,
778 (u_long)c->c_arg, name);
779 if (CIRCQ_LAST(&c->c_list, bucket))
780 break;
781 }
782 }
783
784 void
785 db_show_callout(db_expr_t addr, bool haddr, db_expr_t count, const char *modif)
786 {
787 CPU_INFO_ITERATOR cii;
788 struct callout_cpu *cc;
789 struct cpu_info *ci;
790 int b;
791
792 db_printf("hardclock_ticks now: %d\n", hardclock_ticks);
793 db_printf(" ticks wheel arg func\n");
794
795 /*
796 * Don't lock the callwheel; all the other CPUs are paused
797 * anyhow, and we might be called in a circumstance where
798 * some other CPU was paused while holding the lock.
799 */
800 for (CPU_INFO_FOREACH(cii, ci)) {
801 cc = ci->ci_data.cpu_callout;
802 db_show_callout_bucket(cc, &cc->cc_todo);
803 }
804 for (b = 0; b < BUCKETS; b++) {
805 for (CPU_INFO_FOREACH(cii, ci)) {
806 cc = ci->ci_data.cpu_callout;
807 db_show_callout_bucket(cc, &cc->cc_wheel[b]);
808 }
809 }
810 }
811 #endif /* DDB */
812