Home | History | Annotate | Line # | Download | only in kern
kern_turnstile.c revision 1.1.2.3
      1 /*	$NetBSD: kern_turnstile.c,v 1.1.2.3 2002/03/10 21:05:11 thorpej Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * Turnsiles are specialized sleep queues for use by locks.  Turnstiles
     41  * are described in detail in:
     42  *
     43  *	Solaris Internals: Core Kernel Architecture, Jim Mauro and
     44  *	    Richard McDougall.
     45  *
     46  * Turnstiles are kept in a hash table.  There are likely to be many more
     47  * lock objects than there are threads.  Since a thread can block on only
     48  * one lock at a time, we only need one turnstile per thread, and so they
     49  * are allocated at thread creation time.
     50  *
     51  * When a thread decides it needs to block on a lock, it looks up the
     52  * active turnstile for that lock.  If no active turnstile exists, then
     53  * the process lends its turnstile to the lock.  If there is already
     54  * an active turnstile for the lock, the thread places its turnstile on
     55  * a list of free turnstiles, and references the active one instead.
     56  *
     57  * The act of looking up the turnstile acquires an interlock on the sleep
     58  * queue.  If a thread decides it doesn't need to block after all, then
     59  * this interlock must be released by explicitly aborting the turnstile
     60  * operation.
     61  *
     62  * When a thread is awakened, it needs to get its turnstile back.  If
     63  * there are still other threads waiting in the active turnstile, the
     64  * the thread grabs a free turnstile off the free list.  Otherwise, it
     65  * can take back the active turnstile from the lock (thus deactivating
     66  * the turnstile).
     67  *
     68  * Turnstiles are the place to do priority inheritence.  However, we do
     69  * not currently implement that.
     70  *
     71  * We also do not differentiate between the reader and writer queues,
     72  * although we currently provide for it in the API so that we can add
     73  * support for it later.
     74  *
     75  * XXX We currently have to interlock with the sched_lock.  The locking
     76  * order is:
     77  *
     78  *	turnstile chain -> sched_lock
     79  */
     80 
     81 #include <sys/cdefs.h>
     82 __KERNEL_RCSID(0, "$NetBSD: kern_turnstile.c,v 1.1.2.3 2002/03/10 21:05:11 thorpej Exp $");
     83 
     84 #include <sys/param.h>
     85 #include <sys/lock.h>
     86 #include <sys/pool.h>
     87 #include <sys/proc.h>
     88 #include <sys/resourcevar.h>
     89 #include <sys/sched.h>
     90 #include <sys/systm.h>
     91 
     92 /*
     93  * Turnstile hash -- shift the lock object to eliminate the zero bits
     94  * of the address, and mask it off with the turnstile table's size.
     95  */
     96 #if LONG_BIT == 64
     97 #define	TURNSTILE_HASH_SHIFT	3
     98 #elif LONG_BIT == 32
     99 #define	TURNSTILE_HASH_SHIFT	2
    100 #else
    101 #error "Don't know how big your pointers are."
    102 #endif
    103 
    104 #define	TURNSTILE_HASH_SIZE	64	/* XXXJRT tune */
    105 #define	TURNSTILE_HASH_MASK	(TURNSTILE_HASH_SIZE - 1)
    106 
    107 #define	TURNSTILE_HASH(obj)						\
    108 	((((u_long)(obj)) >> TURNSTILE_HASH_SHIFT) & TURNSTILE_HASH_MASK)
    109 
    110 struct turnstile_chain {
    111 	__cpu_simple_lock_t tc_lock;	/* lock on hash chain */
    112 	int		    tc_oldspl;	/* saved spl of lock holder
    113 					   (only valid while tc_lock held) */
    114 	LIST_HEAD(, turnstile) tc_chain;/* turnstile chain */
    115 } turnstile_table[TURNSTILE_HASH_SIZE];
    116 
    117 #define	TURNSTILE_CHAIN(obj)						\
    118 	&turnstile_table[TURNSTILE_HASH(obj)]
    119 
    120 #define	TURNSTILE_CHAIN_LOCK(tc)					\
    121 do {									\
    122 	int _s_ = splsched();						\
    123 	__cpu_simple_lock(&(tc)->tc_lock);				\
    124 	(tc)->tc_oldspl = _s_;						\
    125 } while (/*CONSTCOND*/0)
    126 
    127 #define	TURNSTILE_CHAIN_UNLOCK(tc)					\
    128 do {									\
    129 	int _s_ = (tc)->tc_oldspl;					\
    130 	__cpu_simple_unlock(&(tc)->tc_lock);				\
    131 	splx(_s_);							\
    132 } while (/*CONSTCOND*/0)
    133 
    134 static const char turnstile_wmesg[] = "tstile";
    135 
    136 struct pool turnstile_pool;
    137 struct pool_cache turnstile_cache;
    138 
    139 int	turnstile_ctor(void *, void *, int);
    140 
    141 /*
    142  * turnstile_init:
    143  *
    144  *	Initialize the turnstile mechanism.
    145  */
    146 void
    147 turnstile_init(void)
    148 {
    149 	struct turnstile_chain *tc;
    150 	int i;
    151 
    152 	for (i = 0; i < TURNSTILE_HASH_SIZE; i++) {
    153 		tc = &turnstile_table[i];
    154 		__cpu_simple_lock_init(&tc->tc_lock);
    155 		LIST_INIT(&tc->tc_chain);
    156 	}
    157 
    158 	pool_init(&turnstile_pool, sizeof(struct turnstile), 0, 0, 0,
    159 	    "tspool", &pool_allocator_nointr);
    160 	pool_cache_init(&turnstile_cache, &turnstile_pool,
    161 	    turnstile_ctor, NULL, NULL);
    162 }
    163 
    164 /*
    165  * turnstile_ctor:
    166  *
    167  *	Constructor for turnstiles.
    168  */
    169 int
    170 turnstile_ctor(void *arg, void *obj, int flags)
    171 {
    172 	struct turnstile *ts = obj;
    173 
    174 	memset(ts, 0, sizeof(*ts));
    175 	return (0);
    176 }
    177 
    178 static void
    179 turnstile_remque(struct turnstile *ts, struct proc *p, struct slpque *qp)
    180 {
    181 	struct proc **q = &qp->sq_head;
    182 	struct turnstile *nts;
    183 
    184 	KASSERT(p->p_ts == ts);
    185 
    186 	/*
    187 	 * This process is no longer using the active turnstile.
    188 	 * Find an inactive one on the free list to give to it.
    189 	 */
    190 	if ((nts = ts->ts_free) != NULL) {
    191 		KASSERT(ts->ts_waiters > 1);
    192 		p->p_ts = nts;
    193 		ts->ts_free = nts->ts_free;
    194 		nts->ts_free = NULL;
    195 	} else {
    196 		/*
    197 		 * If the free list is empty, this is the last
    198 		 * waiter.
    199 		 */
    200 		KASSERT(ts->ts_waiters == 1);
    201 		LIST_REMOVE(ts, ts_chain);
    202 	}
    203 
    204 	ts->ts_waiters--;
    205 
    206 	*q = p->p_forw;
    207 	if (qp->sq_tailp == &p->p_forw)
    208 		qp->sq_tailp = q;
    209 
    210 	KASSERT(ts->ts_waiters != 0 || ts->ts_sleepq.sq_head == NULL);
    211 	KASSERT(ts->ts_waiters == 0 || ts->ts_sleepq.sq_head != NULL);
    212 }
    213 
    214 /*
    215  * turnstile_lookup:
    216  *
    217  *	Look up the turnstile for the specified lock object.  This
    218  *	acquires and holds the turnstile chain lock (sleep queue
    219  *	interlock).
    220  */
    221 struct turnstile *
    222 turnstile_lookup(void *lp)
    223 {
    224 	struct turnstile_chain *tc = TURNSTILE_CHAIN(lp);
    225 	struct turnstile *ts;
    226 
    227 	TURNSTILE_CHAIN_LOCK(tc);
    228 
    229 	LIST_FOREACH(ts, &tc->tc_chain, ts_chain)
    230 		if (ts->ts_obj == lp)
    231 			return (ts);
    232 
    233 	/*
    234 	 * No turnstile yet for this lock.  No problem, turnstile_block()
    235 	 * handle this by fetching the turnstile from the blocking thread.
    236 	 */
    237 	return (NULL);
    238 }
    239 
    240 /*
    241  * turnstile_exit:
    242  *
    243  *	Abort a turnstile operation.
    244  */
    245 void
    246 turnstile_exit(void *lp)
    247 {
    248 	struct turnstile_chain *tc = TURNSTILE_CHAIN(lp);
    249 
    250 	TURNSTILE_CHAIN_UNLOCK(tc);
    251 }
    252 
    253 /*
    254  * turnstile_block:
    255  *
    256  *	Block a thread on a lock object.
    257  */
    258 int
    259 turnstile_block(struct turnstile *ts, int rw, void *lp)
    260 {
    261 	struct turnstile_chain *tc = TURNSTILE_CHAIN(lp);
    262 	struct proc *p = curproc;
    263 	struct turnstile *ots;
    264 	struct slpque *qp;
    265 	int s;
    266 
    267 	KASSERT(p->p_ts != NULL);
    268 
    269 	if (ts == NULL) {
    270 		/*
    271 		 * We are the first thread to wait for this lock;
    272 		 * lend our turnstile to it.
    273 		 */
    274 		ts = p->p_ts;
    275 		KASSERT(ts->ts_waiters == 0);
    276 		KASSERT(ts->ts_sleepq.sq_head == NULL);
    277 		ts->ts_obj = lp;
    278 		LIST_INSERT_HEAD(&tc->tc_chain, ts, ts_chain);
    279 	} else {
    280 		/*
    281 		 * Lock already has a turnstile.  Put our turnstile
    282 		 * onto the free list, and reference the existing
    283 		 * turnstile instead.
    284 		 */
    285 		ots = p->p_ts;
    286 		ots->ts_free = ts->ts_free;
    287 		ts->ts_free = ots;
    288 		p->p_ts = ts;
    289 	}
    290 
    291 #ifdef DIAGNOSTIC
    292 	if (p->p_stat != SONPROC)
    293 		panic("turnstile_block: p_stat %d != SONPROC", p->p_stat);
    294 	if (p->p_back != NULL)
    295 		panic("turnstile_block: p_back != NULL");
    296 #endif
    297 
    298 #ifdef KTRACE
    299 	if (KTRPOINT(p, KTR_CSW))
    300 		ktrcsw(p, 1, 0);
    301 #endif
    302 
    303 	/* XXXJRT PCATCH? */
    304 
    305 	p->p_wchan = lp;
    306 	p->p_wmesg = turnstile_wmesg;
    307 	p->p_slptime = 0;
    308 	/* p->p_priority = XXXJRT */
    309 
    310 	ts->ts_waiters++;
    311 
    312 	qp = &ts->ts_sleepq;
    313 	if (qp->sq_head == NULL)
    314 		qp->sq_head = p;
    315 	else
    316 		*qp->sq_tailp = p;
    317 	*(qp->sq_tailp = &p->p_forw) = NULL;
    318 
    319 	p->p_stat = SSLEEP;
    320 	p->p_stats->p_ru.ru_nvcsw++;
    321 
    322 	/*
    323 	 * XXX We currently need to interlock with sched_lock.
    324 	 * Note we're already at splsched().
    325 	 */
    326 	_SCHED_LOCK;
    327 
    328 	/*
    329 	 * We can now release the turnstile chain interlock; the
    330 	 * scheduler lock is held, so a thread can't get in to
    331 	 * do a turnstile_wakeup() before we do the switch.
    332 	 *
    333 	 * Note: we need to remember our old spl which is currently
    334 	 * stored in the turnstile chain, because we have to stay
    335 	 * st splsched while the sched_lock is held.
    336 	 */
    337 	s = tc->tc_oldspl;
    338 	__cpu_simple_unlock(&tc->tc_lock);
    339 
    340 	mi_switch(p);
    341 
    342 	SCHED_ASSERT_UNLOCKED();
    343 	splx(s);
    344 
    345 	/*
    346 	 * We are now back to the base spl level we were at when the
    347 	 * caller called turnstile_lookup().
    348 	 */
    349 
    350 	KDASSERT(p->p_cpu != NULL);
    351 	KDASSERT(p->p_cpu == curcpu());
    352 	p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri;
    353 
    354 	KDASSERT((p->p_flag & (P_SINTR|P_TIMEOUT)) == 0);
    355 
    356 #ifdef KTRACE
    357 	if (KTRPOINT(p, KTR_CSW))
    358 		ktrcsw(p, 0, 0);
    359 #endif
    360 
    361 	return (0);
    362 }
    363 
    364 /*
    365  * turnstile_wakeup:
    366  *
    367  *	Wake up the specified number of threads that are blocked
    368  *	in a turnstile.
    369  */
    370 void
    371 turnstile_wakeup(struct turnstile *ts, int rw, int count)
    372 {
    373 	struct turnstile_chain *tc = TURNSTILE_CHAIN(ts->ts_obj);
    374 	struct slpque *qp = &ts->ts_sleepq;
    375 	struct proc *p;
    376 
    377 	/* XXX We currently interlock with sched_lock. */
    378 	_SCHED_LOCK;
    379 
    380 	while (count-- > 0) {
    381 		p = qp->sq_head;
    382 
    383 		KASSERT(p != NULL);
    384 
    385 		turnstile_remque(ts, p, qp);
    386 
    387 		p->p_wchan = NULL;
    388 
    389 		if (p->p_stat == SSLEEP)
    390 			awaken(p);
    391 	}
    392 
    393 	_SCHED_UNLOCK;
    394 
    395 	TURNSTILE_CHAIN_UNLOCK(tc);
    396 }
    397