Home | History | Annotate | Line # | Download | only in kern
kern_turnstile.c revision 1.3
      1 /*	$NetBSD: kern_turnstile.c,v 1.3 2007/02/15 20:21:13 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe and Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * Turnstiles are described in detail in:
     41  *
     42  *	Solaris Internals: Core Kernel Architecture, Jim Mauro and
     43  *	    Richard McDougall.
     44  *
     45  * Turnstiles are kept in a hash table.  There are likely to be many more
     46  * synchronisation objects than there are threads.  Since a thread can block
     47  * on only one lock at a time, we only need one turnstile per thread, and
     48  * so they are allocated at thread creation time.
     49  *
     50  * When a thread decides it needs to block on a lock, it looks up the
     51  * active turnstile for that lock.  If no active turnstile exists, then
     52  * the process lends its turnstile to the lock.  If there is already an
     53  * active turnstile for the lock, the thread places its turnstile on a
     54  * list of free turnstiles, and references the active one instead.
     55  *
     56  * The act of looking up the turnstile acquires an interlock on the sleep
     57  * queue.  If a thread decides it doesn't need to block after all, then this
     58  * interlock must be released by explicitly aborting the turnstile
     59  * operation.
     60  *
     61  * When a thread is awakened, it needs to get its turnstile back.  If there
     62  * are still other threads waiting in the active turnstile, the the thread
     63  * grabs a free turnstile off the free list.  Otherwise, it can take back
     64  * the active turnstile from the lock (thus deactivating the turnstile).
     65  *
     66  * Turnstiles are the place to do priority inheritence.  However, we do
     67  * not currently implement that.
     68  */
     69 
     70 #include <sys/cdefs.h>
     71 __KERNEL_RCSID(0, "$NetBSD: kern_turnstile.c,v 1.3 2007/02/15 20:21:13 ad Exp $");
     72 
     73 #include "opt_lockdebug.h"
     74 #include "opt_multiprocessor.h"
     75 #include "opt_ktrace.h"
     76 #include "opt_ddb.h"
     77 
     78 #include <sys/param.h>
     79 #include <sys/lock.h>
     80 #include <sys/pool.h>
     81 #include <sys/proc.h>
     82 #include <sys/sleepq.h>
     83 #include <sys/systm.h>
     84 
     85 #include <uvm/uvm_extern.h>
     86 
     87 #define	TS_HASH_SIZE	64
     88 #define	TS_HASH_MASK	(TS_HASH_SIZE - 1)
     89 #define	TS_HASH(obj)	(((uintptr_t)(obj) >> 3) & TS_HASH_MASK)
     90 
     91 tschain_t	turnstile_tab[TS_HASH_SIZE];
     92 
     93 struct pool turnstile_pool;
     94 struct pool_cache turnstile_cache;
     95 
     96 int	turnstile_ctor(void *, void *, int);
     97 void	turnstile_unsleep(struct lwp *);
     98 void	turnstile_changepri(struct lwp *, int);
     99 
    100 extern turnstile_t turnstile0;
    101 
    102 syncobj_t turnstile_syncobj = {
    103 	SOBJ_SLEEPQ_FIFO,
    104 	turnstile_unsleep,
    105 	turnstile_changepri
    106 };
    107 
    108 /*
    109  * turnstile_init:
    110  *
    111  *	Initialize the turnstile mechanism.
    112  */
    113 void
    114 turnstile_init(void)
    115 {
    116 	tschain_t *tc;
    117 	int i;
    118 
    119 	for (i = 0; i < TS_HASH_SIZE; i++) {
    120 		tc = &turnstile_tab[i];
    121 		LIST_INIT(&tc->tc_chain);
    122 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    123 		mutex_init(&tc->tc_mutexstore, MUTEX_SPIN, IPL_SCHED);
    124 		tc->tc_mutex = &tc->tc_mutexstore;
    125 #else
    126 		tc->tc_mutex = &sched_mutex;
    127 #endif
    128 	}
    129 
    130 	pool_init(&turnstile_pool, sizeof(turnstile_t), 0, 0, 0,
    131 	    "tstilepl", &pool_allocator_nointr);
    132 	pool_cache_init(&turnstile_cache, &turnstile_pool,
    133 	    turnstile_ctor, NULL, NULL);
    134 
    135 	(void)turnstile_ctor(NULL, &turnstile0, 0);
    136 }
    137 
    138 /*
    139  * turnstile_ctor:
    140  *
    141  *	Constructor for turnstiles.
    142  */
    143 int
    144 turnstile_ctor(void *arg, void *obj, int flags)
    145 {
    146 	turnstile_t *ts = obj;
    147 
    148 	memset(ts, 0, sizeof(*ts));
    149 	sleepq_init(&ts->ts_sleepq[TS_READER_Q], NULL);
    150 	sleepq_init(&ts->ts_sleepq[TS_WRITER_Q], NULL);
    151 	return (0);
    152 }
    153 
    154 /*
    155  * turnstile_remove:
    156  *
    157  *	Remove an LWP from a turnstile sleep queue and wake it.
    158  */
    159 static inline int
    160 turnstile_remove(turnstile_t *ts, struct lwp *l, sleepq_t *sq)
    161 {
    162 	turnstile_t *nts;
    163 
    164 	KASSERT(l->l_ts == ts);
    165 
    166 	/*
    167 	 * This process is no longer using the active turnstile.
    168 	 * Find an inactive one on the free list to give to it.
    169 	 */
    170 	if ((nts = ts->ts_free) != NULL) {
    171 		KASSERT(TS_ALL_WAITERS(ts) > 1);
    172 		l->l_ts = nts;
    173 		ts->ts_free = nts->ts_free;
    174 		nts->ts_free = NULL;
    175 	} else {
    176 		/*
    177 		 * If the free list is empty, this is the last
    178 		 * waiter.
    179 		 */
    180 		KASSERT(TS_ALL_WAITERS(ts) == 1);
    181 		LIST_REMOVE(ts, ts_chain);
    182 	}
    183 
    184 	return sleepq_remove(sq, l);
    185 }
    186 
    187 /*
    188  * turnstile_lookup:
    189  *
    190  *	Look up the turnstile for the specified lock.  This acquires and
    191  *	holds the turnstile chain lock (sleep queue interlock).
    192  */
    193 turnstile_t *
    194 turnstile_lookup(wchan_t obj)
    195 {
    196 	turnstile_t *ts;
    197 	tschain_t *tc;
    198 
    199 	tc = &turnstile_tab[TS_HASH(obj)];
    200 	mutex_spin_enter(tc->tc_mutex);
    201 
    202 	LIST_FOREACH(ts, &tc->tc_chain, ts_chain)
    203 		if (ts->ts_obj == obj)
    204 			return (ts);
    205 
    206 	/*
    207 	 * No turnstile yet for this lock.  No problem, turnstile_block()
    208 	 * handles this by fetching the turnstile from the blocking thread.
    209 	 */
    210 	return (NULL);
    211 }
    212 
    213 /*
    214  * turnstile_exit:
    215  *
    216  *	Abort a turnstile operation.
    217  */
    218 void
    219 turnstile_exit(wchan_t obj)
    220 {
    221 	tschain_t *tc;
    222 
    223 	tc = &turnstile_tab[TS_HASH(obj)];
    224 	mutex_spin_exit(tc->tc_mutex);
    225 }
    226 
    227 /*
    228  * turnstile_block:
    229  *
    230  *	 Enter an object into the turnstile chain and prepare the current
    231  *	 LWP for sleep.
    232  */
    233 void
    234 turnstile_block(turnstile_t *ts, int q, wchan_t obj)
    235 {
    236 	struct lwp *l;
    237 	turnstile_t *ots;
    238 	tschain_t *tc;
    239 	sleepq_t *sq;
    240 
    241 	tc = &turnstile_tab[TS_HASH(obj)];
    242 	l = curlwp;
    243 
    244 	KASSERT(q == TS_READER_Q || q == TS_WRITER_Q);
    245 	KASSERT(mutex_owned(tc->tc_mutex));
    246 	KASSERT(l != NULL && l->l_ts != NULL);
    247 
    248 	if (ts == NULL) {
    249 		/*
    250 		 * We are the first thread to wait for this object;
    251 		 * lend our turnstile to it.
    252 		 */
    253 		ts = l->l_ts;
    254 		KASSERT(TS_ALL_WAITERS(ts) == 0);
    255 		KASSERT(TAILQ_EMPTY(&ts->ts_sleepq[TS_READER_Q].sq_queue) &&
    256 			TAILQ_EMPTY(&ts->ts_sleepq[TS_WRITER_Q].sq_queue));
    257 		ts->ts_obj = obj;
    258 		ts->ts_sleepq[TS_READER_Q].sq_mutex = tc->tc_mutex;
    259 		ts->ts_sleepq[TS_WRITER_Q].sq_mutex = tc->tc_mutex;
    260 		LIST_INSERT_HEAD(&tc->tc_chain, ts, ts_chain);
    261 	} else {
    262 		/*
    263 		 * Object already has a turnstile.  Put our turnstile
    264 		 * onto the free list, and reference the existing
    265 		 * turnstile instead.
    266 		 */
    267 		ots = l->l_ts;
    268 		ots->ts_free = ts->ts_free;
    269 		ts->ts_free = ots;
    270 		l->l_ts = ts;
    271 
    272 		KASSERT(TS_ALL_WAITERS(ts) != 0);
    273 		KASSERT(!TAILQ_EMPTY(&ts->ts_sleepq[TS_READER_Q].sq_queue) ||
    274 			!TAILQ_EMPTY(&ts->ts_sleepq[TS_WRITER_Q].sq_queue));
    275 	}
    276 
    277 	sq = &ts->ts_sleepq[q];
    278 	sleepq_enter(sq, l);
    279 	sleepq_block(sq, sched_kpri(l), obj, "tstile", 0, 0,
    280 	    &turnstile_syncobj);
    281 }
    282 
    283 /*
    284  * turnstile_wakeup:
    285  *
    286  *	Wake up the specified number of threads that are blocked
    287  *	in a turnstile.
    288  */
    289 void
    290 turnstile_wakeup(turnstile_t *ts, int q, int count, struct lwp *nl)
    291 {
    292 	sleepq_t *sq;
    293 	tschain_t *tc;
    294 	struct lwp *l;
    295 	int swapin;
    296 
    297 	tc = &turnstile_tab[TS_HASH(ts->ts_obj)];
    298 	sq = &ts->ts_sleepq[q];
    299 	swapin = 0;
    300 
    301 	KASSERT(q == TS_READER_Q || q == TS_WRITER_Q);
    302 	KASSERT(count > 0 && count <= TS_WAITERS(ts, q));
    303 	KASSERT(mutex_owned(tc->tc_mutex) && sq->sq_mutex == tc->tc_mutex);
    304 
    305 	if (nl != NULL) {
    306 #if defined(DEBUG) || defined(LOCKDEBUG)
    307 		TAILQ_FOREACH(l, &sq->sq_queue, l_sleepchain) {
    308 			if (l == nl)
    309 				break;
    310 		}
    311 		if (l == NULL)
    312 			panic("turnstile_wakeup: nl not on sleepq");
    313 #endif
    314 		swapin |= turnstile_remove(ts, nl, sq);
    315 	} else {
    316 		while (count-- > 0) {
    317 			l = TAILQ_FIRST(&sq->sq_queue);
    318 			KASSERT(l != NULL);
    319 			swapin |= turnstile_remove(ts, l, sq);
    320 		}
    321 	}
    322 	mutex_spin_exit(tc->tc_mutex);
    323 
    324 	/*
    325 	 * If there are newly awakend threads that need to be swapped in,
    326 	 * then kick the swapper into action.
    327 	 */
    328 	if (swapin)
    329 		uvm_kick_scheduler();
    330 }
    331 
    332 /*
    333  * turnstile_unsleep:
    334  *
    335  *	Remove an LWP from the turnstile.  This is called when the LWP has
    336  *	not been awoken normally but instead interrupted: for example, if it
    337  *	has received a signal.  It's not a valid action for turnstiles,
    338  *	since LWPs blocking on a turnstile are not interruptable.
    339  */
    340 void
    341 turnstile_unsleep(struct lwp *l)
    342 {
    343 
    344 	lwp_unlock(l);
    345 	panic("turnstile_unsleep");
    346 }
    347 
    348 /*
    349  * turnstile_changepri:
    350  *
    351  *	Adjust the priority of an LWP residing on a turnstile.  Since we do
    352  *	not yet do priority inheritance, we mostly ignore this action.
    353  */
    354 void
    355 turnstile_changepri(struct lwp *l, int pri)
    356 {
    357 
    358 	/* LWPs on turnstiles always have kernel priority. */
    359 	l->l_usrpri = pri;
    360 	l->l_priority = sched_kpri(l);
    361 }
    362 
    363 #if defined(LOCKDEBUG)
    364 /*
    365  * turnstile_print:
    366  *
    367  *	Given the address of a lock object, print the contents of a
    368  *	turnstile.
    369  */
    370 void
    371 turnstile_print(volatile void *obj, void (*pr)(const char *, ...))
    372 {
    373 	turnstile_t *ts;
    374 	tschain_t *tc;
    375 	sleepq_t *rsq, *wsq;
    376 	struct lwp *l;
    377 
    378 	tc = &turnstile_tab[TS_HASH(obj)];
    379 
    380 	LIST_FOREACH(ts, &tc->tc_chain, ts_chain)
    381 		if (ts->ts_obj == obj)
    382 			break;
    383 
    384 	(*pr)("Turnstile chain at %p with tc_mutex at %p.\n", tc, tc->tc_mutex);
    385 	if (ts == NULL) {
    386 		(*pr)("=> No active turnstile for this lock.\n");
    387 		return;
    388 	}
    389 
    390 	rsq = &ts->ts_sleepq[TS_READER_Q];
    391 	wsq = &ts->ts_sleepq[TS_WRITER_Q];
    392 
    393 	(*pr)("=> Turnstile at %p (wrq=%p, rdq=%p).\n", ts, rsq, wsq);
    394 
    395 	(*pr)("=> %d waiting readers:", rsq->sq_waiters);
    396 	TAILQ_FOREACH(l, &rsq->sq_queue, l_sleepchain) {
    397 		(*pr)(" %p", l);
    398 	}
    399 	(*pr)("\n");
    400 
    401 	(*pr)("=> %d waiting writers:", wsq->sq_waiters);
    402 	TAILQ_FOREACH(l, &wsq->sq_queue, l_sleepchain) {
    403 		(*pr)(" %p", l);
    404 	}
    405 	(*pr)("\n");
    406 }
    407 #endif	/* LOCKDEBUG */
    408