Home | History | Annotate | Line # | Download | only in kern
kern_condvar.c revision 1.4
      1 /*	$NetBSD: kern_condvar.c,v 1.4 2007/02/26 09:20:52 yamt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2006, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * Kernel condition variable implementation, modeled after those found in
     41  * Solaris, a description of which can be found in:
     42  *
     43  *	Solaris Internals: Core Kernel Architecture, Jim Mauro and
     44  *	    Richard McDougall.
     45  */
     46 
     47 #include <sys/cdefs.h>
     48 __KERNEL_RCSID(0, "$NetBSD: kern_condvar.c,v 1.4 2007/02/26 09:20:52 yamt Exp $");
     49 
     50 #include <sys/param.h>
     51 #include <sys/proc.h>
     52 #include <sys/sched.h>
     53 #include <sys/systm.h>
     54 #include <sys/condvar.h>
     55 #include <sys/sleepq.h>
     56 
     57 static void	cv_unsleep(struct lwp *);
     58 static void	cv_changepri(struct lwp *, int);
     59 
     60 syncobj_t cv_syncobj = {
     61 	SOBJ_SLEEPQ_SORTED,
     62 	cv_unsleep,
     63 	cv_changepri,
     64 	sleepq_lendpri,
     65 	syncobj_noowner,
     66 };
     67 
     68 /*
     69  * cv_init:
     70  *
     71  *	Initialize a condition variable for use.
     72  */
     73 void
     74 cv_init(kcondvar_t *cv, const char *wmesg)
     75 {
     76 
     77 	KASSERT(wmesg != NULL);
     78 
     79 	cv->cv_wmesg = wmesg;
     80 	cv->cv_waiters = 0;
     81 }
     82 
     83 /*
     84  * cv_destroy:
     85  *
     86  *	Tear down a condition variable.
     87  */
     88 void
     89 cv_destroy(kcondvar_t *cv)
     90 {
     91 
     92 #ifdef DIAGNOSTIC
     93 	KASSERT(cv->cv_waiters == 0 && cv->cv_wmesg != NULL);
     94 	cv->cv_wmesg = NULL;
     95 #endif
     96 }
     97 
     98 /*
     99  * cv_enter:
    100  *
    101  *	Look up and lock the sleep queue corresponding to the given
    102  *	condition variable, and increment the number of waiters.
    103  */
    104 static inline sleepq_t *
    105 cv_enter(kcondvar_t *cv, kmutex_t *mtx, struct lwp *l)
    106 {
    107 	sleepq_t *sq;
    108 
    109 	KASSERT(cv->cv_wmesg != NULL);
    110 
    111 	sq = sleeptab_lookup(&sleeptab, cv);
    112 	cv->cv_waiters++;
    113 	sleepq_enter(sq, l);
    114 	mutex_exit(mtx);
    115 
    116 	return sq;
    117 }
    118 
    119 /*
    120  * cv_unsleep:
    121  *
    122  *	Remove an LWP from the condition variable and sleep queue.  This
    123  *	is called when the LWP has not been awoken normally but instead
    124  *	interrupted: for example, when a signal is received.  Must be
    125  *	called with the LWP locked, and must return it unlocked.
    126  */
    127 static void
    128 cv_unsleep(struct lwp *l)
    129 {
    130 	uintptr_t addr;
    131 
    132 	KASSERT(l->l_wchan != NULL);
    133 	LOCK_ASSERT(lwp_locked(l, l->l_sleepq->sq_mutex));
    134 
    135 	addr = (uintptr_t)l->l_wchan;
    136 	((kcondvar_t *)addr)->cv_waiters--;
    137 
    138 	sleepq_unsleep(l);
    139 }
    140 
    141 /*
    142  * cv_changepri:
    143  *
    144  *	Adjust the real (user) priority of an LWP blocked on a CV.
    145  */
    146 static void
    147 cv_changepri(struct lwp *l, int pri)
    148 {
    149 	sleepq_t *sq = l->l_sleepq;
    150 	int opri;
    151 
    152 	KASSERT(lwp_locked(l, sq->sq_mutex));
    153 
    154 	opri = lwp_eprio(l);
    155 	l->l_usrpri = pri;
    156 	l->l_priority = sched_kpri(l);
    157 
    158 	if (lwp_eprio(l) != opri) {
    159 		TAILQ_REMOVE(&sq->sq_queue, l, l_sleepchain);
    160 		sleepq_insert(sq, l, l->l_syncobj);
    161 	}
    162 }
    163 
    164 /*
    165  * cv_wait:
    166  *
    167  *	Wait non-interruptably on a condition variable until awoken.
    168  */
    169 void
    170 cv_wait(kcondvar_t *cv, kmutex_t *mtx)
    171 {
    172 	struct lwp *l = curlwp;
    173 	sleepq_t *sq;
    174 
    175 	LOCK_ASSERT(mutex_owned(mtx));
    176 
    177 	if (sleepq_dontsleep(l)) {
    178 		(void)sleepq_abort(mtx, 0);
    179 		return;
    180 	}
    181 
    182 	sq = cv_enter(cv, mtx, l);
    183 	sleepq_block(sq, sched_kpri(l), cv, cv->cv_wmesg, 0, 0, &cv_syncobj);
    184 	(void)sleepq_unblock(0, 0);
    185 	mutex_enter(mtx);
    186 }
    187 
    188 /*
    189  * cv_wait_sig:
    190  *
    191  *	Wait on a condition variable until a awoken or a signal is received.
    192  *	Will also return early if the process is exiting.  Returns zero if
    193  *	awoken normallly, ERESTART if a signal was received and the system
    194  *	call is restartable, or EINTR otherwise.
    195  */
    196 int
    197 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
    198 {
    199 	struct lwp *l = curlwp;
    200 	sleepq_t *sq;
    201 	int error;
    202 
    203 	LOCK_ASSERT(mutex_owned(mtx));
    204 
    205 	if (sleepq_dontsleep(l))
    206 		return sleepq_abort(mtx, 0);
    207 
    208 	sq = cv_enter(cv, mtx, l);
    209 	sleepq_block(sq, sched_kpri(l), cv, cv->cv_wmesg, 0, 1, &cv_syncobj);
    210 	error = sleepq_unblock(0, 1);
    211 	mutex_enter(mtx);
    212 
    213 	return error;
    214 }
    215 
    216 /*
    217  * cv_timedwait:
    218  *
    219  *	Wait on a condition variable until awoken or the specified timeout
    220  *	expires.  Returns zero if awoken normally or EWOULDBLOCK if the
    221  *	timeout expired.
    222  */
    223 int
    224 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int timo)
    225 {
    226 	struct lwp *l = curlwp;
    227 	sleepq_t *sq;
    228 	int error;
    229 
    230 	LOCK_ASSERT(mutex_owned(mtx));
    231 
    232 	if (sleepq_dontsleep(l))
    233 		return sleepq_abort(mtx, 0);
    234 
    235 	sq = cv_enter(cv, mtx, l);
    236 	sleepq_block(sq, sched_kpri(l), cv, cv->cv_wmesg, timo, 0, &cv_syncobj);
    237 	error = sleepq_unblock(timo, 0);
    238 	mutex_enter(mtx);
    239 
    240  	return error;
    241 }
    242 
    243 /*
    244  * cv_timedwait_sig:
    245  *
    246  *	Wait on a condition variable until a timeout expires, awoken or a
    247  *	signal is received.  Will also return early if the process is
    248  *	exiting.  Returns zero if awoken normallly, EWOULDBLOCK if the
    249  *	timeout expires, ERESTART if a signal was received and the system
    250  *	call is restartable, or EINTR otherwise.
    251  */
    252 int
    253 cv_timedwait_sig(kcondvar_t *cv, kmutex_t *mtx, int timo)
    254 {
    255 	struct lwp *l = curlwp;
    256 	sleepq_t *sq;
    257 	int error;
    258 
    259 	LOCK_ASSERT(mutex_owned(mtx));
    260 
    261 	if (sleepq_dontsleep(l))
    262 		return sleepq_abort(mtx, 0);
    263 
    264 	sq = cv_enter(cv, mtx, l);
    265 	sleepq_block(sq, sched_kpri(l), cv, cv->cv_wmesg, timo, 1, &cv_syncobj);
    266 	error = sleepq_unblock(timo, 1);
    267 	mutex_enter(mtx);
    268 
    269  	return error;
    270 }
    271 
    272 /*
    273  * cv_signal:
    274  *
    275  *	Wake the highest priority LWP waiting on a condition variable.
    276  *	Must be called with the interlocking mutex held.
    277  */
    278 void
    279 cv_signal(kcondvar_t *cv)
    280 {
    281 	sleepq_t *sq;
    282 
    283 	if (cv->cv_waiters == 0)
    284 		return;
    285 
    286 	/*
    287 	 * cv->cv_waiters may be stale and have dropped to zero, but
    288 	 * while holding the interlock (the mutex passed to cv_wait()
    289 	 * and similar) we will see non-zero values when it matters.
    290 	 */
    291 
    292 	sq = sleeptab_lookup(&sleeptab, cv);
    293 	if (cv->cv_waiters != 0) {
    294 		cv->cv_waiters--;
    295 		sleepq_wake(sq, cv, 1);
    296 	} else
    297 		sleepq_unlock(sq);
    298 }
    299 
    300 /*
    301  * cv_broadcast:
    302  *
    303  *	Wake all LWPs waiting on a condition variable.  Must be called
    304  *	with the interlocking mutex held.
    305  */
    306 void
    307 cv_broadcast(kcondvar_t *cv)
    308 {
    309 	sleepq_t *sq;
    310 	u_int cnt;
    311 
    312 	if (cv->cv_waiters == 0)
    313 		return;
    314 
    315 	sq = sleeptab_lookup(&sleeptab, cv);
    316 	if ((cnt = cv->cv_waiters) != 0) {
    317 		cv->cv_waiters = 0;
    318 		sleepq_wake(sq, cv, cnt);
    319 	} else
    320 		sleepq_unlock(sq);
    321 }
    322 
    323 /*
    324  * cv_wakeup:
    325  *
    326  *	Wake all LWPs waiting on a condition variable.  The interlock
    327  *	need not be held, but it is the caller's responsibility to
    328  *	ensure correct synchronization.
    329  */
    330 void
    331 cv_wakeup(kcondvar_t *cv)
    332 {
    333 	sleepq_t *sq;
    334 	u_int cnt;
    335 
    336 	sq = sleeptab_lookup(&sleeptab, cv);
    337 	if ((cnt = cv->cv_waiters) != 0) {
    338 		cv->cv_waiters = 0;
    339 		sleepq_wake(sq, cv, cnt);
    340 	} else
    341 		sleepq_unlock(sq);
    342 }
    343 
    344 /*
    345  * cv_has_waiters:
    346  *
    347  *	For diagnostic assertions: return non-zero if a condition
    348  *	variable has waiters.
    349  */
    350 int
    351 cv_has_waiters(kcondvar_t *cv)
    352 {
    353 
    354 	/* No need to interlock here */
    355 	return (int)cv->cv_waiters;
    356 }
    357