Home | History | Annotate | Line # | Download | only in kern
kern_turnstile.c revision 1.2
      1 /*	$NetBSD: kern_turnstile.c,v 1.2 2007/02/09 21:55:31 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe and Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * Turnstiles are described in detail in:
     41  *
     42  *	Solaris Internals: Core Kernel Architecture, Jim Mauro and
     43  *	    Richard McDougall.
     44  *
     45  * Turnstiles are kept in a hash table.  There are likely to be many more
     46  * synchronisation objects than there are threads.  Since a thread can block
     47  * on only one lock at a time, we only need one turnstile per thread, and
     48  * so they are allocated at thread creation time.
     49  *
     50  * When a thread decides it needs to block on a lock, it looks up the
     51  * active turnstile for that lock.  If no active turnstile exists, then
     52  * the process lends its turnstile to the lock.  If there is already an
     53  * active turnstile for the lock, the thread places its turnstile on a
     54  * list of free turnstiles, and references the active one instead.
     55  *
     56  * The act of looking up the turnstile acquires an interlock on the sleep
     57  * queue.  If a thread decides it doesn't need to block after all, then this
     58  * interlock must be released by explicitly aborting the turnstile
     59  * operation.
     60  *
     61  * When a thread is awakened, it needs to get its turnstile back.  If there
     62  * are still other threads waiting in the active turnstile, the the thread
     63  * grabs a free turnstile off the free list.  Otherwise, it can take back
     64  * the active turnstile from the lock (thus deactivating the turnstile).
     65  *
     66  * Turnstiles are the place to do priority inheritence.  However, we do
     67  * not currently implement that.
     68  */
     69 
     70 #include <sys/cdefs.h>
     71 __KERNEL_RCSID(0, "$NetBSD: kern_turnstile.c,v 1.2 2007/02/09 21:55:31 ad Exp $");
     72 
     73 #include "opt_lockdebug.h"
     74 #include "opt_multiprocessor.h"
     75 #include "opt_ktrace.h"
     76 #include "opt_ddb.h"
     77 
     78 #include <sys/param.h>
     79 #include <sys/lock.h>
     80 #include <sys/pool.h>
     81 #include <sys/proc.h>
     82 #include <sys/sleepq.h>
     83 #include <sys/systm.h>
     84 
     85 #define	TS_HASH_SIZE	64
     86 #define	TS_HASH_MASK	(TS_HASH_SIZE - 1)
     87 #define	TS_HASH(obj)	(((uintptr_t)(obj) >> 3) & TS_HASH_MASK)
     88 
     89 tschain_t	turnstile_tab[TS_HASH_SIZE];
     90 
     91 struct pool turnstile_pool;
     92 struct pool_cache turnstile_cache;
     93 
     94 int	turnstile_ctor(void *, void *, int);
     95 void	turnstile_unsleep(struct lwp *);
     96 void	turnstile_changepri(struct lwp *, int);
     97 
     98 extern turnstile_t turnstile0;
     99 
    100 syncobj_t turnstile_syncobj = {
    101 	SOBJ_SLEEPQ_FIFO,
    102 	turnstile_unsleep,
    103 	turnstile_changepri
    104 };
    105 
    106 /*
    107  * turnstile_init:
    108  *
    109  *	Initialize the turnstile mechanism.
    110  */
    111 void
    112 turnstile_init(void)
    113 {
    114 	tschain_t *tc;
    115 	int i;
    116 
    117 	for (i = 0; i < TS_HASH_SIZE; i++) {
    118 		tc = &turnstile_tab[i];
    119 		LIST_INIT(&tc->tc_chain);
    120 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    121 		mutex_init(&tc->tc_mutexstore, MUTEX_SPIN, IPL_SCHED);
    122 		tc->tc_mutex = &tc->tc_mutexstore;
    123 #else
    124 		tc->tc_mutex = &sched_mutex;
    125 #endif
    126 	}
    127 
    128 	pool_init(&turnstile_pool, sizeof(turnstile_t), 0, 0, 0,
    129 	    "tstilepl", &pool_allocator_nointr);
    130 	pool_cache_init(&turnstile_cache, &turnstile_pool,
    131 	    turnstile_ctor, NULL, NULL);
    132 
    133 	(void)turnstile_ctor(NULL, &turnstile0, 0);
    134 }
    135 
    136 /*
    137  * turnstile_ctor:
    138  *
    139  *	Constructor for turnstiles.
    140  */
    141 int
    142 turnstile_ctor(void *arg, void *obj, int flags)
    143 {
    144 	turnstile_t *ts = obj;
    145 
    146 	memset(ts, 0, sizeof(*ts));
    147 	sleepq_init(&ts->ts_sleepq[TS_READER_Q], NULL);
    148 	sleepq_init(&ts->ts_sleepq[TS_WRITER_Q], NULL);
    149 	return (0);
    150 }
    151 
    152 /*
    153  * turnstile_remove:
    154  *
    155  *	Remove an LWP from a turnstile sleep queue and wake it.
    156  */
    157 static inline int
    158 turnstile_remove(turnstile_t *ts, struct lwp *l, sleepq_t *sq)
    159 {
    160 	turnstile_t *nts;
    161 
    162 	KASSERT(l->l_ts == ts);
    163 
    164 	/*
    165 	 * This process is no longer using the active turnstile.
    166 	 * Find an inactive one on the free list to give to it.
    167 	 */
    168 	if ((nts = ts->ts_free) != NULL) {
    169 		KASSERT(TS_ALL_WAITERS(ts) > 1);
    170 		l->l_ts = nts;
    171 		ts->ts_free = nts->ts_free;
    172 		nts->ts_free = NULL;
    173 	} else {
    174 		/*
    175 		 * If the free list is empty, this is the last
    176 		 * waiter.
    177 		 */
    178 		KASSERT(TS_ALL_WAITERS(ts) == 1);
    179 		LIST_REMOVE(ts, ts_chain);
    180 	}
    181 
    182 	return sleepq_remove(sq, l);
    183 }
    184 
    185 /*
    186  * turnstile_lookup:
    187  *
    188  *	Look up the turnstile for the specified lock.  This acquires and
    189  *	holds the turnstile chain lock (sleep queue interlock).
    190  */
    191 turnstile_t *
    192 turnstile_lookup(wchan_t obj)
    193 {
    194 	turnstile_t *ts;
    195 	tschain_t *tc;
    196 
    197 	tc = &turnstile_tab[TS_HASH(obj)];
    198 	mutex_spin_enter(tc->tc_mutex);
    199 
    200 	LIST_FOREACH(ts, &tc->tc_chain, ts_chain)
    201 		if (ts->ts_obj == obj)
    202 			return (ts);
    203 
    204 	/*
    205 	 * No turnstile yet for this lock.  No problem, turnstile_block()
    206 	 * handles this by fetching the turnstile from the blocking thread.
    207 	 */
    208 	return (NULL);
    209 }
    210 
    211 /*
    212  * turnstile_exit:
    213  *
    214  *	Abort a turnstile operation.
    215  */
    216 void
    217 turnstile_exit(wchan_t obj)
    218 {
    219 	tschain_t *tc;
    220 
    221 	tc = &turnstile_tab[TS_HASH(obj)];
    222 	mutex_spin_exit(tc->tc_mutex);
    223 }
    224 
    225 /*
    226  * turnstile_block:
    227  *
    228  *	 Enter an object into the turnstile chain and prepare the current
    229  *	 LWP for sleep.
    230  */
    231 void
    232 turnstile_block(turnstile_t *ts, int q, wchan_t obj)
    233 {
    234 	struct lwp *l;
    235 	turnstile_t *ots;
    236 	tschain_t *tc;
    237 	sleepq_t *sq;
    238 
    239 	tc = &turnstile_tab[TS_HASH(obj)];
    240 	l = curlwp;
    241 
    242 	KASSERT(q == TS_READER_Q || q == TS_WRITER_Q);
    243 	KASSERT(mutex_owned(tc->tc_mutex));
    244 	KASSERT(l != NULL && l->l_ts != NULL);
    245 
    246 	if (ts == NULL) {
    247 		/*
    248 		 * We are the first thread to wait for this object;
    249 		 * lend our turnstile to it.
    250 		 */
    251 		ts = l->l_ts;
    252 		KASSERT(TS_ALL_WAITERS(ts) == 0);
    253 		KASSERT(TAILQ_EMPTY(&ts->ts_sleepq[TS_READER_Q].sq_queue) &&
    254 			TAILQ_EMPTY(&ts->ts_sleepq[TS_WRITER_Q].sq_queue));
    255 		ts->ts_obj = obj;
    256 		ts->ts_sleepq[TS_READER_Q].sq_mutex = tc->tc_mutex;
    257 		ts->ts_sleepq[TS_WRITER_Q].sq_mutex = tc->tc_mutex;
    258 		LIST_INSERT_HEAD(&tc->tc_chain, ts, ts_chain);
    259 	} else {
    260 		/*
    261 		 * Object already has a turnstile.  Put our turnstile
    262 		 * onto the free list, and reference the existing
    263 		 * turnstile instead.
    264 		 */
    265 		ots = l->l_ts;
    266 		ots->ts_free = ts->ts_free;
    267 		ts->ts_free = ots;
    268 		l->l_ts = ts;
    269 
    270 		KASSERT(TS_ALL_WAITERS(ts) != 0);
    271 		KASSERT(!TAILQ_EMPTY(&ts->ts_sleepq[TS_READER_Q].sq_queue) ||
    272 			!TAILQ_EMPTY(&ts->ts_sleepq[TS_WRITER_Q].sq_queue));
    273 	}
    274 
    275 	sq = &ts->ts_sleepq[q];
    276 	sleepq_enter(sq, l);
    277 	sleepq_block(sq, sched_kpri(l), obj, "tstile", 0, 0,
    278 	    &turnstile_syncobj);
    279 }
    280 
    281 /*
    282  * turnstile_wakeup:
    283  *
    284  *	Wake up the specified number of threads that are blocked
    285  *	in a turnstile.
    286  */
    287 void
    288 turnstile_wakeup(turnstile_t *ts, int q, int count, struct lwp *nl)
    289 {
    290 	sleepq_t *sq;
    291 	tschain_t *tc;
    292 	struct lwp *l;
    293 	int swapin;
    294 
    295 	tc = &turnstile_tab[TS_HASH(ts->ts_obj)];
    296 	sq = &ts->ts_sleepq[q];
    297 	swapin = 0;
    298 
    299 	KASSERT(q == TS_READER_Q || q == TS_WRITER_Q);
    300 	KASSERT(count > 0 && count <= TS_WAITERS(ts, q));
    301 	KASSERT(mutex_owned(tc->tc_mutex) && sq->sq_mutex == tc->tc_mutex);
    302 
    303 	if (nl != NULL) {
    304 #if defined(DEBUG) || defined(LOCKDEBUG)
    305 		TAILQ_FOREACH(l, &sq->sq_queue, l_sleepchain) {
    306 			if (l == nl)
    307 				break;
    308 		}
    309 		if (l == NULL)
    310 			panic("turnstile_wakeup: nl not on sleepq");
    311 #endif
    312 		swapin |= turnstile_remove(ts, nl, sq);
    313 	} else {
    314 		while (count-- > 0) {
    315 			l = TAILQ_FIRST(&sq->sq_queue);
    316 			KASSERT(l != NULL);
    317 			swapin |= turnstile_remove(ts, l, sq);
    318 		}
    319 	}
    320 	mutex_spin_exit(tc->tc_mutex);
    321 
    322 	/*
    323 	 * If there are newly awakend threads that need to be swapped in,
    324 	 * then kick the swapper into action.
    325 	 */
    326 	if (swapin)
    327 		wakeup(&proc0);
    328 }
    329 
    330 /*
    331  * turnstile_unsleep:
    332  *
    333  *	Remove an LWP from the turnstile.  This is called when the LWP has
    334  *	not been awoken normally but instead interrupted: for example, if it
    335  *	has received a signal.  It's not a valid action for turnstiles,
    336  *	since LWPs blocking on a turnstile are not interruptable.
    337  */
    338 void
    339 turnstile_unsleep(struct lwp *l)
    340 {
    341 
    342 	lwp_unlock(l);
    343 	panic("turnstile_unsleep");
    344 }
    345 
    346 /*
    347  * turnstile_changepri:
    348  *
    349  *	Adjust the priority of an LWP residing on a turnstile.  Since we do
    350  *	not yet do priority inheritance, we mostly ignore this action.
    351  */
    352 void
    353 turnstile_changepri(struct lwp *l, int pri)
    354 {
    355 
    356 	/* LWPs on turnstiles always have kernel priority. */
    357 	l->l_usrpri = pri;
    358 	l->l_priority = sched_kpri(l);
    359 }
    360 
    361 #if defined(LOCKDEBUG)
    362 /*
    363  * turnstile_print:
    364  *
    365  *	Given the address of a lock object, print the contents of a
    366  *	turnstile.
    367  */
    368 void
    369 turnstile_print(volatile void *obj, void (*pr)(const char *, ...))
    370 {
    371 	turnstile_t *ts;
    372 	tschain_t *tc;
    373 	sleepq_t *rsq, *wsq;
    374 	struct lwp *l;
    375 
    376 	tc = &turnstile_tab[TS_HASH(obj)];
    377 
    378 	LIST_FOREACH(ts, &tc->tc_chain, ts_chain)
    379 		if (ts->ts_obj == obj)
    380 			break;
    381 
    382 	(*pr)("Turnstile chain at %p with tc_mutex at %p.\n", tc, tc->tc_mutex);
    383 	if (ts == NULL) {
    384 		(*pr)("=> No active turnstile for this lock.\n");
    385 		return;
    386 	}
    387 
    388 	rsq = &ts->ts_sleepq[TS_READER_Q];
    389 	wsq = &ts->ts_sleepq[TS_WRITER_Q];
    390 
    391 	(*pr)("=> Turnstile at %p (wrq=%p, rdq=%p).\n", ts, rsq, wsq);
    392 
    393 	(*pr)("=> %d waiting readers:", rsq->sq_waiters);
    394 	TAILQ_FOREACH(l, &rsq->sq_queue, l_sleepchain) {
    395 		(*pr)(" %p", l);
    396 	}
    397 	(*pr)("\n");
    398 
    399 	(*pr)("=> %d waiting writers:", wsq->sq_waiters);
    400 	TAILQ_FOREACH(l, &wsq->sq_queue, l_sleepchain) {
    401 		(*pr)(" %p", l);
    402 	}
    403 	(*pr)("\n");
    404 }
    405 #endif	/* LOCKDEBUG */
    406