Home | History | Annotate | Line # | Download | only in kern
kern_timeout.c revision 1.21.4.2
      1 /*	$NetBSD: kern_timeout.c,v 1.21.4.2 2007/06/16 19:02:55 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2003, 2006, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * Copyright (c) 2001 Thomas Nordin <nordin (at) openbsd.org>
     41  * Copyright (c) 2000-2001 Artur Grabowski <art (at) openbsd.org>
     42  * All rights reserved.
     43  *
     44  * Redistribution and use in source and binary forms, with or without
     45  * modification, are permitted provided that the following conditions
     46  * are met:
     47  *
     48  * 1. Redistributions of source code must retain the above copyright
     49  *    notice, this list of conditions and the following disclaimer.
     50  * 2. Redistributions in binary form must reproduce the above copyright
     51  *    notice, this list of conditions and the following disclaimer in the
     52  *    documentation and/or other materials provided with the distribution.
     53  * 3. The name of the author may not be used to endorse or promote products
     54  *    derived from this software without specific prior written permission.
     55  *
     56  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
     57  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
     58  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
     59  * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
     60  * EXEMPLARY, OR CONSEQUENTIAL  DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     61  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
     62  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     63  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
     64  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
     65  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     66  */
     67 
     68 #include <sys/cdefs.h>
     69 __KERNEL_RCSID(0, "$NetBSD: kern_timeout.c,v 1.21.4.2 2007/06/16 19:02:55 ad Exp $");
     70 
     71 /*
     72  * Adapted from OpenBSD: kern_timeout.c,v 1.15 2002/12/08 04:21:07 art Exp,
     73  * modified to match NetBSD's pre-existing callout API.
     74  */
     75 
     76 #include <sys/param.h>
     77 #include <sys/systm.h>
     78 #include <sys/kernel.h>
     79 #include <sys/lock.h>
     80 #include <sys/callout.h>
     81 #include <sys/mutex.h>
     82 
     83 #ifdef DDB
     84 #include <machine/db_machdep.h>
     85 #include <ddb/db_interface.h>
     86 #include <ddb/db_access.h>
     87 #include <ddb/db_sym.h>
     88 #include <ddb/db_output.h>
     89 #endif
     90 
     91 /*
     92  * Timeouts are kept in a hierarchical timing wheel. The c_time is the value
     93  * of the global variable "hardclock_ticks" when the timeout should be called.
     94  * There are four levels with 256 buckets each. See 'Scheme 7' in
     95  * "Hashed and Hierarchical Timing Wheels: Efficient Data Structures for
     96  * Implementing a Timer Facility" by George Varghese and Tony Lauck.
     97  */
     98 #define BUCKETS 1024
     99 #define WHEELSIZE 256
    100 #define WHEELMASK 255
    101 #define WHEELBITS 8
    102 
    103 static struct callout_circq timeout_wheel[BUCKETS];	/* Queues of timeouts */
    104 static struct callout_circq timeout_todo;		/* Worklist */
    105 
    106 #define MASKWHEEL(wheel, time) (((time) >> ((wheel)*WHEELBITS)) & WHEELMASK)
    107 
    108 #define BUCKET(rel, abs)						\
    109     (((rel) <= (1 << (2*WHEELBITS)))					\
    110     	? ((rel) <= (1 << WHEELBITS))					\
    111             ? &timeout_wheel[MASKWHEEL(0, (abs))]			\
    112             : &timeout_wheel[MASKWHEEL(1, (abs)) + WHEELSIZE]		\
    113         : ((rel) <= (1 << (3*WHEELBITS)))				\
    114             ? &timeout_wheel[MASKWHEEL(2, (abs)) + 2*WHEELSIZE]		\
    115             : &timeout_wheel[MASKWHEEL(3, (abs)) + 3*WHEELSIZE])
    116 
    117 #define MOVEBUCKET(wheel, time)						\
    118     CIRCQ_APPEND(&timeout_todo,						\
    119         &timeout_wheel[MASKWHEEL((wheel), (time)) + (wheel)*WHEELSIZE])
    120 
    121 /*
    122  * All wheels are locked with the same lock (which must also block out all
    123  * interrupts).
    124  */
    125 kmutex_t callout_mutex;
    126 
    127 /*
    128  * Circular queue definitions.
    129  */
    130 
    131 #define CIRCQ_INIT(list)						\
    132 do {									\
    133         (list)->cq_next_l = (list);					\
    134         (list)->cq_prev_l = (list);					\
    135 } while (/*CONSTCOND*/0)
    136 
    137 #define CIRCQ_INSERT(elem, list)					\
    138 do {									\
    139         (elem)->cq_prev_e = (list)->cq_prev_e;				\
    140         (elem)->cq_next_l = (list);					\
    141         (list)->cq_prev_l->cq_next_l = (elem);				\
    142         (list)->cq_prev_l = (elem);					\
    143 } while (/*CONSTCOND*/0)
    144 
    145 #define CIRCQ_APPEND(fst, snd)						\
    146 do {									\
    147         if (!CIRCQ_EMPTY(snd)) {					\
    148                 (fst)->cq_prev_l->cq_next_l = (snd)->cq_next_l;		\
    149                 (snd)->cq_next_l->cq_prev_l = (fst)->cq_prev_l;		\
    150                 (snd)->cq_prev_l->cq_next_l = (fst);			\
    151                 (fst)->cq_prev_l = (snd)->cq_prev_l;			\
    152                 CIRCQ_INIT(snd);					\
    153         }								\
    154 } while (/*CONSTCOND*/0)
    155 
    156 #define CIRCQ_REMOVE(elem)						\
    157 do {									\
    158         (elem)->cq_next_l->cq_prev_e = (elem)->cq_prev_e;		\
    159         (elem)->cq_prev_l->cq_next_e = (elem)->cq_next_e;		\
    160 } while (/*CONSTCOND*/0)
    161 
    162 #define CIRCQ_FIRST(list)	((list)->cq_next_e)
    163 #define CIRCQ_NEXT(elem)	((elem)->cq_next_e)
    164 #define CIRCQ_LAST(elem,list)	((elem)->cq_next_l == (list))
    165 #define CIRCQ_EMPTY(list)	((list)->cq_next_l == (list))
    166 
    167 /*
    168  * Some of the "math" in here is a bit tricky.
    169  *
    170  * We have to beware of wrapping ints.
    171  * We use the fact that any element added to the queue must be added with a
    172  * positive time. That means that any element `to' on the queue cannot be
    173  * scheduled to timeout further in time than INT_MAX, but c->c_time can
    174  * be positive or negative so comparing it with anything is dangerous.
    175  * The only way we can use the c->c_time value in any predictable way
    176  * is when we calculate how far in the future `to' will timeout -
    177  * "c->c_time - hardclock_ticks". The result will always be positive for
    178  * future timeouts and 0 or negative for due timeouts.
    179  */
    180 
    181 #ifdef CALLOUT_EVENT_COUNTERS
    182 static struct evcnt callout_ev_late;
    183 #endif
    184 
    185 /*
    186  * callout_barrier:
    187  *
    188  *	If the callout is running on another CPU, busy wait until it
    189  *	completes.
    190  */
    191 static inline void
    192 callout_barrier(struct callout *c)
    193 {
    194 #ifdef MULTIPROCESSOR
    195 	struct cpu_info *ci, *ci_cur;
    196 
    197 	KASSERT(mutex_owned(&callout_mutex));
    198 
    199 	/*
    200 	 * The callout may have already been dispatched to run on the
    201 	 * current CPU.  It's possible for us to arrive here before it
    202 	 * actually runs because the SPL is dropped from IPL_SCHED in
    203 	 * softclock(), and IPL_SOFTCLOCK is low priority. We can't deal
    204 	 * with that race easily, so for now the caller must deal with
    205 	 * it.
    206 	 */
    207 #if 1
    208 	ci_cur = curcpu();	/* XXXgcc get around alpha problem */
    209 	while ((ci = c->c_oncpu) != NULL && ci != ci_cur &&
    210 	    ci->ci_data.cpu_callout == c) {
    211 #else
    212 	while ((ci = c->c_oncpu) != NULL && ci != curcpu() &&
    213 	    ci->ci_data.cpu_callout == c) {
    214 #endif
    215 		mutex_spin_exit(&callout_mutex);
    216 		while (ci->ci_data.cpu_callout == c)
    217 			;
    218 		mutex_spin_enter(&callout_mutex);
    219 	}
    220 	c->c_oncpu = NULL;
    221 #endif
    222 }
    223 
    224 /*
    225  * callout_startup:
    226  *
    227  *	Initialize the callout facility, called at system startup time.
    228  */
    229 void
    230 callout_startup(void)
    231 {
    232 	int b;
    233 
    234 	CIRCQ_INIT(&timeout_todo);
    235 	for (b = 0; b < BUCKETS; b++)
    236 		CIRCQ_INIT(&timeout_wheel[b]);
    237 	mutex_init(&callout_mutex, MUTEX_SPIN, IPL_SCHED);
    238 
    239 #ifdef CALLOUT_EVENT_COUNTERS
    240 	evcnt_attach_dynamic(&callout_ev_late, EVCNT_TYPE_MISC,
    241 	    NULL, "callout", "late");
    242 #endif
    243 }
    244 
    245 /*
    246  * callout_init:
    247  *
    248  *	Initialize a callout structure.
    249  */
    250 void
    251 callout_init(struct callout *c)
    252 {
    253 
    254 	memset(c, 0, sizeof(*c));
    255 }
    256 
    257 /*
    258  * callout_reset:
    259  *
    260  *	Reset a callout structure with a new function and argument, and
    261  *	schedule it to run.
    262  */
    263 void
    264 callout_reset(struct callout *c, int to_ticks, void (*func)(void *), void *arg)
    265 {
    266 	int old_time;
    267 
    268 	KASSERT(to_ticks >= 0);
    269 
    270 	mutex_spin_enter(&callout_mutex);
    271 
    272 	callout_barrier(c);
    273 
    274 	/* Initialize the time here, it won't change. */
    275 	old_time = c->c_time;
    276 	c->c_time = to_ticks + hardclock_ticks;
    277 	c->c_flags &= ~(CALLOUT_FIRED|CALLOUT_INVOKING);
    278 
    279 	c->c_func = func;
    280 	c->c_arg = arg;
    281 
    282 	/*
    283 	 * If this timeout is already scheduled and now is moved
    284 	 * earlier, reschedule it now. Otherwise leave it in place
    285 	 * and let it be rescheduled later.
    286 	 */
    287 	if ((c->c_flags & CALLOUT_PENDING) != 0) {
    288 		if (c->c_time - old_time < 0) {
    289 			CIRCQ_REMOVE(&c->c_list);
    290 			CIRCQ_INSERT(&c->c_list, &timeout_todo);
    291 		}
    292 	} else {
    293 		c->c_flags |= CALLOUT_PENDING;
    294 		CIRCQ_INSERT(&c->c_list, &timeout_todo);
    295 	}
    296 
    297 	mutex_spin_exit(&callout_mutex);
    298 }
    299 
    300 /*
    301  * callout_schedule:
    302  *
    303  *	Schedule a callout to run.  The function and argument must
    304  *	already be set in the callout structure.
    305  */
    306 void
    307 callout_schedule(struct callout *c, int to_ticks)
    308 {
    309 	int old_time;
    310 
    311 	KASSERT(to_ticks >= 0);
    312 
    313 	mutex_spin_enter(&callout_mutex);
    314 
    315 	callout_barrier(c);
    316 
    317 	/* Initialize the time here, it won't change. */
    318 	old_time = c->c_time;
    319 	c->c_time = to_ticks + hardclock_ticks;
    320 	c->c_flags &= ~(CALLOUT_FIRED|CALLOUT_INVOKING);
    321 
    322 	/*
    323 	 * If this timeout is already scheduled and now is moved
    324 	 * earlier, reschedule it now. Otherwise leave it in place
    325 	 * and let it be rescheduled later.
    326 	 */
    327 	if ((c->c_flags & CALLOUT_PENDING) != 0) {
    328 		if (c->c_time - old_time < 0) {
    329 			CIRCQ_REMOVE(&c->c_list);
    330 			CIRCQ_INSERT(&c->c_list, &timeout_todo);
    331 		}
    332 	} else {
    333 		c->c_flags |= CALLOUT_PENDING;
    334 		CIRCQ_INSERT(&c->c_list, &timeout_todo);
    335 	}
    336 
    337 	mutex_spin_exit(&callout_mutex);
    338 }
    339 
    340 /*
    341  * callout_stop:
    342  *
    343  *	Cancel a pending callout.
    344  */
    345 void
    346 callout_stop(struct callout *c)
    347 {
    348 
    349 	mutex_spin_enter(&callout_mutex);
    350 
    351 	callout_barrier(c);
    352 
    353 	if ((c->c_flags & CALLOUT_PENDING) != 0)
    354 		CIRCQ_REMOVE(&c->c_list);
    355 
    356 	c->c_flags &= ~(CALLOUT_PENDING|CALLOUT_FIRED);
    357 
    358 	mutex_spin_exit(&callout_mutex);
    359 }
    360 
    361 void
    362 callout_setfunc(struct callout *c, void (*func)(void *), void *arg)
    363 {
    364 
    365 	mutex_spin_enter(&callout_mutex);
    366 	c->c_func = func;
    367 	c->c_arg = arg;
    368 	mutex_spin_exit(&callout_mutex);
    369 }
    370 
    371 bool
    372 callout_pending(struct callout *c)
    373 {
    374 	bool rv;
    375 
    376 	mutex_spin_enter(&callout_mutex);
    377 	rv = ((c->c_flags & CALLOUT_PENDING) != 0);
    378 	mutex_spin_exit(&callout_mutex);
    379 
    380 	return rv;
    381 }
    382 
    383 bool
    384 callout_expired(struct callout *c)
    385 {
    386 	bool rv;
    387 
    388 	mutex_spin_enter(&callout_mutex);
    389 	rv = ((c->c_flags & CALLOUT_FIRED) != 0);
    390 	mutex_spin_exit(&callout_mutex);
    391 
    392 	return rv;
    393 }
    394 
    395 bool
    396 callout_active(struct callout *c)
    397 {
    398 	bool rv;
    399 
    400 	mutex_spin_enter(&callout_mutex);
    401 	rv = ((c->c_flags & (CALLOUT_PENDING|CALLOUT_FIRED)) != 0);
    402 	mutex_spin_exit(&callout_mutex);
    403 
    404 	return rv;
    405 }
    406 
    407 bool
    408 callout_invoking(struct callout *c)
    409 {
    410 	bool rv;
    411 
    412 	mutex_spin_enter(&callout_mutex);
    413 	rv = ((c->c_flags & CALLOUT_INVOKING) != 0);
    414 	mutex_spin_exit(&callout_mutex);
    415 
    416 	return rv;
    417 }
    418 
    419 void
    420 callout_ack(struct callout *c)
    421 {
    422 
    423 	mutex_spin_enter(&callout_mutex);
    424 	c->c_flags &= ~CALLOUT_INVOKING;
    425 	mutex_spin_exit(&callout_mutex);
    426 }
    427 
    428 /*
    429  * This is called from hardclock() once every tick.
    430  * We return !0 if we need to schedule a softclock.
    431  */
    432 int
    433 callout_hardclock(void)
    434 {
    435 	int needsoftclock;
    436 
    437 	mutex_spin_enter(&callout_mutex);
    438 
    439 	MOVEBUCKET(0, hardclock_ticks);
    440 	if (MASKWHEEL(0, hardclock_ticks) == 0) {
    441 		MOVEBUCKET(1, hardclock_ticks);
    442 		if (MASKWHEEL(1, hardclock_ticks) == 0) {
    443 			MOVEBUCKET(2, hardclock_ticks);
    444 			if (MASKWHEEL(2, hardclock_ticks) == 0)
    445 				MOVEBUCKET(3, hardclock_ticks);
    446 		}
    447 	}
    448 
    449 	needsoftclock = !CIRCQ_EMPTY(&timeout_todo);
    450 	mutex_spin_exit(&callout_mutex);
    451 
    452 	return needsoftclock;
    453 }
    454 
    455 /* ARGSUSED */
    456 void
    457 softclock(void *v)
    458 {
    459 #ifdef MULTIPROCESSOR
    460 	struct cpu_info *ci = curcpu();
    461 #endif
    462 	struct callout *c;
    463 	void (*func)(void *);
    464 	void *arg;
    465 
    466 	mutex_spin_enter(&callout_mutex);
    467 
    468 	while (!CIRCQ_EMPTY(&timeout_todo)) {
    469 		c = CIRCQ_FIRST(&timeout_todo);
    470 		CIRCQ_REMOVE(&c->c_list);
    471 
    472 		/* If due run it, otherwise insert it into the right bucket. */
    473 		if (c->c_time - hardclock_ticks > 0) {
    474 			CIRCQ_INSERT(&c->c_list,
    475 			    BUCKET((c->c_time - hardclock_ticks), c->c_time));
    476 		} else {
    477 #ifdef CALLOUT_EVENT_COUNTERS
    478 			if (c->c_time - hardclock_ticks < 0)
    479 				callout_ev_late.ev_count++;
    480 #endif
    481 			c->c_flags = (c->c_flags & ~CALLOUT_PENDING) |
    482 			    (CALLOUT_FIRED|CALLOUT_INVOKING);
    483 
    484 			func = c->c_func;
    485 			arg = c->c_arg;
    486 
    487 #ifdef MULTIPROCESSOR
    488 			c->c_oncpu = ci;
    489 			ci->ci_data.cpu_callout = c;
    490 #endif
    491 			mutex_spin_exit(&callout_mutex);
    492 			KERNEL_LOCK(1, curlwp);
    493 			(*func)(arg);
    494 			KERNEL_UNLOCK_ONE(curlwp);
    495 			mutex_spin_enter(&callout_mutex);
    496 #ifdef MULTIPROCESSOR
    497 			ci->ci_data.cpu_callout = NULL;
    498 			/*
    499 			 * we can't touch 'c' here because it might be
    500 			 * freed already.
    501 			 */
    502 #endif
    503 		}
    504 	}
    505 
    506 	mutex_spin_exit(&callout_mutex);
    507 }
    508 
    509 #ifdef DDB
    510 static void
    511 db_show_callout_bucket(struct callout_circq *bucket)
    512 {
    513 	struct callout *c;
    514 	db_expr_t offset;
    515 	const char *name;
    516 	static char question[] = "?";
    517 
    518 	if (CIRCQ_EMPTY(bucket))
    519 		return;
    520 
    521 	for (c = CIRCQ_FIRST(bucket); /*nothing*/; c = CIRCQ_NEXT(&c->c_list)) {
    522 		db_find_sym_and_offset((db_addr_t)(intptr_t)c->c_func, &name,
    523 		    &offset);
    524 		name = name ? name : question;
    525 #ifdef _LP64
    526 #define	POINTER_WIDTH	"%16lx"
    527 #else
    528 #define	POINTER_WIDTH	"%8lx"
    529 #endif
    530 		db_printf("%9d %2d/%-4d " POINTER_WIDTH "  %s\n",
    531 		    c->c_time - hardclock_ticks,
    532 		    (int)((bucket - timeout_wheel) / WHEELSIZE),
    533 		    (int)(bucket - timeout_wheel), (u_long) c->c_arg, name);
    534 
    535 		if (CIRCQ_LAST(&c->c_list, bucket))
    536 			break;
    537 	}
    538 }
    539 
    540 void
    541 db_show_callout(db_expr_t addr, bool haddr, db_expr_t count, const char *modif)
    542 {
    543 	int b;
    544 
    545 	db_printf("hardclock_ticks now: %d\n", hardclock_ticks);
    546 #ifdef _LP64
    547 	db_printf("    ticks  wheel               arg  func\n");
    548 #else
    549 	db_printf("    ticks  wheel       arg  func\n");
    550 #endif
    551 
    552 	/*
    553 	 * Don't lock the callwheel; all the other CPUs are paused
    554 	 * anyhow, and we might be called in a circumstance where
    555 	 * some other CPU was paused while holding the lock.
    556 	 */
    557 
    558 	db_show_callout_bucket(&timeout_todo);
    559 	for (b = 0; b < BUCKETS; b++)
    560 		db_show_callout_bucket(&timeout_wheel[b]);
    561 }
    562 #endif /* DDB */
    563