Home | History | Annotate | Line # | Download | only in kern
kern_condvar.c revision 1.22.2.1
      1  1.22.2.1  simonb /*	$NetBSD: kern_condvar.c,v 1.22.2.1 2008/06/18 16:33:35 simonb Exp $	*/
      2       1.2      ad 
      3       1.2      ad /*-
      4      1.15      ad  * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
      5       1.2      ad  * All rights reserved.
      6       1.2      ad  *
      7       1.2      ad  * This code is derived from software contributed to The NetBSD Foundation
      8       1.2      ad  * by Andrew Doran.
      9       1.2      ad  *
     10       1.2      ad  * Redistribution and use in source and binary forms, with or without
     11       1.2      ad  * modification, are permitted provided that the following conditions
     12       1.2      ad  * are met:
     13       1.2      ad  * 1. Redistributions of source code must retain the above copyright
     14       1.2      ad  *    notice, this list of conditions and the following disclaimer.
     15       1.2      ad  * 2. Redistributions in binary form must reproduce the above copyright
     16       1.2      ad  *    notice, this list of conditions and the following disclaimer in the
     17       1.2      ad  *    documentation and/or other materials provided with the distribution.
     18       1.2      ad  *
     19       1.2      ad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20       1.2      ad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21       1.2      ad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22       1.2      ad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23       1.2      ad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24       1.2      ad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25       1.2      ad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26       1.2      ad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27       1.2      ad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28       1.2      ad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29       1.2      ad  * POSSIBILITY OF SUCH DAMAGE.
     30       1.2      ad  */
     31       1.2      ad 
     32       1.2      ad /*
     33  1.22.2.1  simonb  * Kernel condition variable implementation.
     34       1.2      ad  */
     35       1.2      ad 
     36       1.2      ad #include <sys/cdefs.h>
     37  1.22.2.1  simonb __KERNEL_RCSID(0, "$NetBSD: kern_condvar.c,v 1.22.2.1 2008/06/18 16:33:35 simonb Exp $");
     38       1.2      ad 
     39       1.2      ad #include <sys/param.h>
     40       1.2      ad #include <sys/proc.h>
     41       1.2      ad #include <sys/sched.h>
     42       1.2      ad #include <sys/systm.h>
     43       1.2      ad #include <sys/condvar.h>
     44       1.2      ad #include <sys/sleepq.h>
     45      1.20      ad #include <sys/lockdebug.h>
     46  1.22.2.1  simonb #include <sys/cpu.h>
     47      1.20      ad 
     48      1.20      ad #include <uvm/uvm_extern.h>
     49      1.20      ad 
     50      1.20      ad #define	CV_SLEEPQ(cv)	((sleepq_t *)(cv)->cv_opaque)
     51      1.20      ad #define	CV_DEBUG_P(cv)	((cv)->cv_wmesg != nodebug)
     52      1.20      ad #define	CV_RA		((uintptr_t)__builtin_return_address(0))
     53       1.2      ad 
     54      1.16      ad static u_int	cv_unsleep(lwp_t *, bool);
     55      1.20      ad static void	cv_wakeup_one(kcondvar_t *);
     56      1.20      ad static void	cv_wakeup_all(kcondvar_t *);
     57       1.2      ad 
     58      1.10      ad static syncobj_t cv_syncobj = {
     59       1.2      ad 	SOBJ_SLEEPQ_SORTED,
     60       1.2      ad 	cv_unsleep,
     61      1.14      ad 	sleepq_changepri,
     62       1.4    yamt 	sleepq_lendpri,
     63       1.4    yamt 	syncobj_noowner,
     64       1.2      ad };
     65       1.2      ad 
     66      1.20      ad lockops_t cv_lockops = {
     67      1.20      ad 	"Condition variable",
     68      1.20      ad 	LOCKOPS_CV,
     69      1.20      ad 	NULL
     70      1.20      ad };
     71      1.20      ad 
     72      1.10      ad static const char deadcv[] = "deadcv";
     73      1.20      ad static const char nodebug[] = "nodebug";
     74      1.10      ad 
     75       1.2      ad /*
     76       1.2      ad  * cv_init:
     77       1.2      ad  *
     78       1.2      ad  *	Initialize a condition variable for use.
     79       1.2      ad  */
     80       1.2      ad void
     81       1.2      ad cv_init(kcondvar_t *cv, const char *wmesg)
     82       1.2      ad {
     83      1.21      ad #ifdef LOCKDEBUG
     84      1.20      ad 	bool dodebug;
     85       1.2      ad 
     86      1.20      ad 	dodebug = LOCKDEBUG_ALLOC(cv, &cv_lockops,
     87      1.20      ad 	    (uintptr_t)__builtin_return_address(0));
     88      1.21      ad 	if (!dodebug) {
     89      1.20      ad 		/* XXX This will break vfs_lockf. */
     90      1.21      ad 		wmesg = nodebug;
     91      1.20      ad 	}
     92      1.21      ad #endif
     93      1.21      ad 	KASSERT(wmesg != NULL);
     94      1.21      ad 	cv->cv_wmesg = wmesg;
     95      1.20      ad 	sleepq_init(CV_SLEEPQ(cv));
     96       1.2      ad }
     97       1.2      ad 
     98       1.2      ad /*
     99       1.2      ad  * cv_destroy:
    100       1.2      ad  *
    101       1.2      ad  *	Tear down a condition variable.
    102       1.2      ad  */
    103       1.2      ad void
    104       1.2      ad cv_destroy(kcondvar_t *cv)
    105       1.2      ad {
    106       1.2      ad 
    107      1.20      ad 	LOCKDEBUG_FREE(CV_DEBUG_P(cv), cv);
    108       1.2      ad #ifdef DIAGNOSTIC
    109      1.15      ad 	KASSERT(cv_is_valid(cv));
    110      1.10      ad 	cv->cv_wmesg = deadcv;
    111       1.2      ad #endif
    112       1.2      ad }
    113       1.2      ad 
    114       1.2      ad /*
    115       1.2      ad  * cv_enter:
    116       1.2      ad  *
    117       1.2      ad  *	Look up and lock the sleep queue corresponding to the given
    118       1.2      ad  *	condition variable, and increment the number of waiters.
    119       1.2      ad  */
    120      1.20      ad static inline void
    121       1.6      ad cv_enter(kcondvar_t *cv, kmutex_t *mtx, lwp_t *l)
    122       1.2      ad {
    123       1.2      ad 	sleepq_t *sq;
    124      1.18      ad 	kmutex_t *mp;
    125       1.2      ad 
    126      1.15      ad 	KASSERT(cv_is_valid(cv));
    127  1.22.2.1  simonb 	KASSERT(!cpu_intr_p());
    128      1.14      ad 	KASSERT((l->l_pflag & LP_INTR) == 0 || panicstr != NULL);
    129       1.2      ad 
    130      1.20      ad 	LOCKDEBUG_LOCKED(CV_DEBUG_P(cv), cv, mtx, CV_RA, 0);
    131      1.20      ad 
    132      1.14      ad 	l->l_kpriority = true;
    133  1.22.2.1  simonb 	mp = sleepq_hashlock(cv);
    134      1.20      ad 	sq = CV_SLEEPQ(cv);
    135      1.18      ad 	sleepq_enter(sq, l, mp);
    136      1.14      ad 	sleepq_enqueue(sq, cv, cv->cv_wmesg, &cv_syncobj);
    137       1.2      ad 	mutex_exit(mtx);
    138  1.22.2.1  simonb 	KASSERT(cv_has_waiters(cv));
    139       1.2      ad }
    140       1.2      ad 
    141       1.2      ad /*
    142       1.6      ad  * cv_exit:
    143       1.6      ad  *
    144       1.6      ad  *	After resuming execution, check to see if we have been restarted
    145       1.6      ad  *	as a result of cv_signal().  If we have, but cannot take the
    146       1.6      ad  *	wakeup (because of eg a pending Unix signal or timeout) then try
    147       1.6      ad  *	to ensure that another LWP sees it.  This is necessary because
    148       1.6      ad  *	there may be multiple waiters, and at least one should take the
    149       1.6      ad  *	wakeup if possible.
    150       1.6      ad  */
    151       1.6      ad static inline int
    152       1.6      ad cv_exit(kcondvar_t *cv, kmutex_t *mtx, lwp_t *l, const int error)
    153       1.6      ad {
    154       1.6      ad 
    155       1.6      ad 	mutex_enter(mtx);
    156      1.20      ad 	if (__predict_false(error != 0))
    157       1.6      ad 		cv_signal(cv);
    158       1.6      ad 
    159      1.20      ad 	LOCKDEBUG_UNLOCKED(CV_DEBUG_P(cv), cv, CV_RA, 0);
    160      1.15      ad 	KASSERT(cv_is_valid(cv));
    161      1.10      ad 
    162       1.6      ad 	return error;
    163       1.6      ad }
    164       1.6      ad 
    165       1.6      ad /*
    166       1.2      ad  * cv_unsleep:
    167       1.2      ad  *
    168       1.2      ad  *	Remove an LWP from the condition variable and sleep queue.  This
    169       1.2      ad  *	is called when the LWP has not been awoken normally but instead
    170       1.2      ad  *	interrupted: for example, when a signal is received.  Must be
    171       1.2      ad  *	called with the LWP locked, and must return it unlocked.
    172       1.2      ad  */
    173      1.16      ad static u_int
    174      1.16      ad cv_unsleep(lwp_t *l, bool cleanup)
    175       1.2      ad {
    176      1.10      ad 	kcondvar_t *cv;
    177       1.2      ad 
    178      1.15      ad 	cv = (kcondvar_t *)(uintptr_t)l->l_wchan;
    179      1.15      ad 
    180      1.20      ad 	KASSERT(l->l_wchan == (wchan_t)cv);
    181      1.20      ad 	KASSERT(l->l_sleepq == CV_SLEEPQ(cv));
    182      1.15      ad 	KASSERT(cv_is_valid(cv));
    183  1.22.2.1  simonb 	KASSERT(cv_has_waiters(cv));
    184       1.2      ad 
    185      1.16      ad 	return sleepq_unsleep(l, cleanup);
    186       1.2      ad }
    187       1.2      ad 
    188       1.2      ad /*
    189       1.2      ad  * cv_wait:
    190       1.2      ad  *
    191       1.2      ad  *	Wait non-interruptably on a condition variable until awoken.
    192       1.2      ad  */
    193       1.2      ad void
    194       1.2      ad cv_wait(kcondvar_t *cv, kmutex_t *mtx)
    195       1.2      ad {
    196       1.6      ad 	lwp_t *l = curlwp;
    197       1.2      ad 
    198       1.8    yamt 	KASSERT(mutex_owned(mtx));
    199       1.2      ad 
    200      1.20      ad 	cv_enter(cv, mtx, l);
    201       1.8    yamt 	(void)sleepq_block(0, false);
    202       1.6      ad 	(void)cv_exit(cv, mtx, l, 0);
    203       1.2      ad }
    204       1.2      ad 
    205       1.2      ad /*
    206       1.2      ad  * cv_wait_sig:
    207       1.2      ad  *
    208       1.2      ad  *	Wait on a condition variable until a awoken or a signal is received.
    209       1.2      ad  *	Will also return early if the process is exiting.  Returns zero if
    210       1.2      ad  *	awoken normallly, ERESTART if a signal was received and the system
    211       1.2      ad  *	call is restartable, or EINTR otherwise.
    212       1.2      ad  */
    213       1.2      ad int
    214       1.2      ad cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
    215       1.2      ad {
    216       1.6      ad 	lwp_t *l = curlwp;
    217       1.2      ad 	int error;
    218       1.2      ad 
    219       1.8    yamt 	KASSERT(mutex_owned(mtx));
    220       1.2      ad 
    221      1.20      ad 	cv_enter(cv, mtx, l);
    222       1.8    yamt 	error = sleepq_block(0, true);
    223       1.6      ad 	return cv_exit(cv, mtx, l, error);
    224       1.2      ad }
    225       1.2      ad 
    226       1.2      ad /*
    227       1.2      ad  * cv_timedwait:
    228       1.2      ad  *
    229       1.2      ad  *	Wait on a condition variable until awoken or the specified timeout
    230       1.2      ad  *	expires.  Returns zero if awoken normally or EWOULDBLOCK if the
    231       1.2      ad  *	timeout expired.
    232       1.2      ad  */
    233       1.2      ad int
    234       1.2      ad cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int timo)
    235       1.2      ad {
    236       1.6      ad 	lwp_t *l = curlwp;
    237       1.2      ad 	int error;
    238       1.2      ad 
    239       1.8    yamt 	KASSERT(mutex_owned(mtx));
    240       1.2      ad 
    241      1.20      ad 	cv_enter(cv, mtx, l);
    242       1.8    yamt 	error = sleepq_block(timo, false);
    243       1.6      ad 	return cv_exit(cv, mtx, l, error);
    244       1.2      ad }
    245       1.2      ad 
    246       1.2      ad /*
    247       1.2      ad  * cv_timedwait_sig:
    248       1.2      ad  *
    249       1.2      ad  *	Wait on a condition variable until a timeout expires, awoken or a
    250       1.2      ad  *	signal is received.  Will also return early if the process is
    251       1.2      ad  *	exiting.  Returns zero if awoken normallly, EWOULDBLOCK if the
    252       1.2      ad  *	timeout expires, ERESTART if a signal was received and the system
    253       1.2      ad  *	call is restartable, or EINTR otherwise.
    254       1.2      ad  */
    255       1.2      ad int
    256       1.2      ad cv_timedwait_sig(kcondvar_t *cv, kmutex_t *mtx, int timo)
    257       1.2      ad {
    258       1.6      ad 	lwp_t *l = curlwp;
    259       1.2      ad 	int error;
    260       1.2      ad 
    261       1.8    yamt 	KASSERT(mutex_owned(mtx));
    262       1.2      ad 
    263      1.20      ad 	cv_enter(cv, mtx, l);
    264       1.8    yamt 	error = sleepq_block(timo, true);
    265       1.6      ad 	return cv_exit(cv, mtx, l, error);
    266       1.2      ad }
    267       1.2      ad 
    268       1.2      ad /*
    269       1.2      ad  * cv_signal:
    270       1.2      ad  *
    271       1.2      ad  *	Wake the highest priority LWP waiting on a condition variable.
    272       1.2      ad  *	Must be called with the interlocking mutex held.
    273       1.2      ad  */
    274       1.2      ad void
    275       1.2      ad cv_signal(kcondvar_t *cv)
    276       1.2      ad {
    277      1.20      ad 
    278      1.22      ad 	/* LOCKDEBUG_WAKEUP(CV_DEBUG_P(cv), cv, CV_RA); */
    279      1.20      ad 	KASSERT(cv_is_valid(cv));
    280      1.20      ad 
    281  1.22.2.1  simonb 	if (__predict_false(!TAILQ_EMPTY(CV_SLEEPQ(cv))))
    282      1.20      ad 		cv_wakeup_one(cv);
    283      1.20      ad }
    284      1.20      ad 
    285      1.20      ad static void __noinline
    286      1.20      ad cv_wakeup_one(kcondvar_t *cv)
    287      1.20      ad {
    288       1.2      ad 	sleepq_t *sq;
    289      1.18      ad 	kmutex_t *mp;
    290      1.20      ad 	int swapin;
    291      1.20      ad 	lwp_t *l;
    292       1.2      ad 
    293      1.15      ad 	KASSERT(cv_is_valid(cv));
    294      1.15      ad 
    295  1.22.2.1  simonb 	mp = sleepq_hashlock(cv);
    296      1.20      ad 	sq = CV_SLEEPQ(cv);
    297      1.20      ad 	l = TAILQ_FIRST(sq);
    298      1.20      ad 	if (l == NULL) {
    299      1.20      ad 		mutex_spin_exit(mp);
    300       1.2      ad 		return;
    301      1.20      ad 	}
    302      1.20      ad 	KASSERT(l->l_sleepq == sq);
    303      1.20      ad 	KASSERT(l->l_mutex == mp);
    304      1.20      ad 	KASSERT(l->l_wchan == cv);
    305      1.20      ad 	swapin = sleepq_remove(sq, l);
    306      1.20      ad 	mutex_spin_exit(mp);
    307       1.2      ad 
    308       1.2      ad 	/*
    309      1.20      ad 	 * If there are newly awakend threads that need to be swapped in,
    310      1.20      ad 	 * then kick the swapper into action.
    311       1.2      ad 	 */
    312      1.20      ad 	if (swapin)
    313      1.20      ad 		uvm_kick_scheduler();
    314      1.15      ad 
    315      1.15      ad 	KASSERT(cv_is_valid(cv));
    316       1.2      ad }
    317       1.2      ad 
    318       1.2      ad /*
    319       1.2      ad  * cv_broadcast:
    320       1.2      ad  *
    321       1.2      ad  *	Wake all LWPs waiting on a condition variable.  Must be called
    322       1.2      ad  *	with the interlocking mutex held.
    323       1.2      ad  */
    324       1.2      ad void
    325       1.2      ad cv_broadcast(kcondvar_t *cv)
    326       1.2      ad {
    327      1.20      ad 
    328      1.22      ad 	/* LOCKDEBUG_WAKEUP(CV_DEBUG_P(cv), cv, CV_RA); */
    329      1.20      ad 	KASSERT(cv_is_valid(cv));
    330      1.20      ad 
    331  1.22.2.1  simonb 	if (__predict_false(!TAILQ_EMPTY(CV_SLEEPQ(cv))))
    332      1.20      ad 		cv_wakeup_all(cv);
    333      1.20      ad }
    334      1.20      ad 
    335      1.20      ad static void __noinline
    336      1.20      ad cv_wakeup_all(kcondvar_t *cv)
    337      1.20      ad {
    338       1.2      ad 	sleepq_t *sq;
    339      1.18      ad 	kmutex_t *mp;
    340      1.20      ad 	int swapin;
    341      1.20      ad 	lwp_t *l, *next;
    342       1.2      ad 
    343      1.15      ad 	KASSERT(cv_is_valid(cv));
    344      1.15      ad 
    345  1.22.2.1  simonb 	mp = sleepq_hashlock(cv);
    346      1.20      ad 	swapin = 0;
    347  1.22.2.1  simonb 	sq = CV_SLEEPQ(cv);
    348      1.20      ad 	for (l = TAILQ_FIRST(sq); l != NULL; l = next) {
    349      1.20      ad 		KASSERT(l->l_sleepq == sq);
    350      1.20      ad 		KASSERT(l->l_mutex == mp);
    351      1.20      ad 		KASSERT(l->l_wchan == cv);
    352      1.20      ad 		next = TAILQ_NEXT(l, l_sleepchain);
    353      1.20      ad 		swapin |= sleepq_remove(sq, l);
    354      1.20      ad 	}
    355      1.20      ad 	mutex_spin_exit(mp);
    356       1.2      ad 
    357      1.20      ad 	/*
    358      1.20      ad 	 * If there are newly awakend threads that need to be swapped in,
    359      1.20      ad 	 * then kick the swapper into action.
    360      1.20      ad 	 */
    361      1.20      ad 	if (swapin)
    362      1.20      ad 		uvm_kick_scheduler();
    363      1.15      ad 
    364      1.15      ad 	KASSERT(cv_is_valid(cv));
    365       1.2      ad }
    366       1.2      ad 
    367       1.2      ad /*
    368      1.11      ad  * cv_wakeup:
    369      1.11      ad  *
    370      1.11      ad  *	Wake all LWPs waiting on a condition variable.  For cases
    371      1.11      ad  *	where the address may be waited on by mtsleep()/tsleep().
    372      1.11      ad  *	Not a documented call.
    373      1.11      ad  */
    374      1.11      ad void
    375      1.11      ad cv_wakeup(kcondvar_t *cv)
    376      1.11      ad {
    377      1.11      ad 
    378      1.20      ad 	cv_wakeup_all(cv);
    379      1.20      ad 	wakeup(cv);
    380      1.11      ad }
    381      1.11      ad 
    382      1.11      ad /*
    383       1.2      ad  * cv_has_waiters:
    384       1.2      ad  *
    385       1.2      ad  *	For diagnostic assertions: return non-zero if a condition
    386       1.2      ad  *	variable has waiters.
    387       1.2      ad  */
    388       1.7      ad bool
    389       1.2      ad cv_has_waiters(kcondvar_t *cv)
    390       1.2      ad {
    391       1.2      ad 
    392      1.20      ad 	return !TAILQ_EMPTY(CV_SLEEPQ(cv));
    393       1.2      ad }
    394      1.15      ad 
    395      1.15      ad /*
    396      1.15      ad  * cv_is_valid:
    397      1.15      ad  *
    398      1.15      ad  *	For diagnostic assertions: return non-zero if a condition
    399      1.15      ad  *	variable appears to be valid.  No locks need be held.
    400      1.15      ad  */
    401      1.15      ad bool
    402      1.15      ad cv_is_valid(kcondvar_t *cv)
    403      1.15      ad {
    404      1.15      ad 
    405      1.20      ad 	return cv->cv_wmesg != deadcv && cv->cv_wmesg != NULL;
    406      1.15      ad }
    407