Home | History | Annotate | Line # | Download | only in kern
kern_turnstile.c revision 1.1.2.1
      1 /*	$NetBSD: kern_turnstile.c,v 1.1.2.1 2002/03/10 19:08:24 thorpej Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * Turnsiles are specialized sleep queues for use by locks.  Turnstiles
     41  * are described in detail in:
     42  *
     43  *	Solaris Internals: Core Kernel Architecture, Jim Mauro and
     44  *	    Richard McDougall.
     45  *
     46  * Turnstiles are kept in a hash table.  Since there are likely to
     47  * be many more lock objects than there are threads.  Since a thread
     48  * can block on only one lock at a time, we only need one turnstile
     49  * per thread, and so they are allocated at thread creation time.
     50  *
     51  * When a thread decides it needs to block on a lock, it looks up the
     52  * active turnstile for that lock.  If no active turnstile exists, then
     53  * the process lends its turnstile to the lock.  If there is already
     54  * an active turnstile for the lock, the thread places its turnstile on
     55  * a list of free turnstiles, and references the active one instead.
     56  *
     57  * The act of looking up the turnstile acquires an interlock on the sleep
     58  * queue.  If a thread decides it doesn't need to block after all, then
     59  * this interlock must be released by explicitly aborting the turnstile
     60  * operation.
     61  *
     62  * When a thread is awakened, it needs to get its turnstile back.  If
     63  * there are still other threads waiting in the active turnstile, the
     64  * the thread grabs a free turnstile off the free list.  Otherwise, it
     65  * can take back the active turnstile from the lock (thus deactivating
     66  * the turnstile).
     67  *
     68  * Turnstiles are the place to do priority inheritence.  However, we do
     69  * not currently implement that.
     70  *
     71  * We also do not differentiate between the reader and writer queues,
     72  * although we currently provide for it in the API so that we can add
     73  * support for it later.
     74  *
     75  * XXX We currently have to interlock with the sched_lock.  The locking
     76  * order is:
     77  *
     78  *	turnstile chain -> sched_lock
     79  */
     80 
     81 #include <sys/cdefs.h>
     82 __KERNEL_RCSID(0, "$NetBSD: kern_turnstile.c,v 1.1.2.1 2002/03/10 19:08:24 thorpej Exp $");
     83 
     84 #include <sys/param.h>
     85 #include <sys/lock.h>
     86 #include <sys/pool.h>
     87 #include <sys/proc.h>
     88 #include <sys/resourcevar.h>
     89 #include <sys/sched.h>
     90 #include <sys/systm.h>
     91 
     92 /*
     93  * Turnstile hash -- shift the lock object to eliminate the zero bits
     94  * of the address, and mask it off with the turnstile table's size.
     95  */
     96 #if LONG_BIT == 64
     97 #define	TURNSTILE_HASH_SHIFT	3
     98 #elif LONG_BIT == 32
     99 #define	TURNSTILE_HASH_SHIFT	2
    100 #else
    101 #error "Don't know how big your pointers are."
    102 #endif
    103 
    104 #define	TURNSTILE_HASH_SIZE	64	/* XXXJRT tune */
    105 #define	TURNSTILE_HASH_MASK	(TURNSTILE_HASH_SIZE - 1)
    106 
    107 #define	TURNSTILE_HASH(obj)						\
    108 	((((u_long)(obj)) >> TURNSTILE_HASH_SHIFT) & TURNSTILE_HASH_MASK)
    109 
    110 struct turnstile_chain {
    111 	__cpu_simple_lock_t tc_lock;	/* lock on hash chain */
    112 	int		    tc_oldspl;	/* saved spl of lock holder */
    113 	LIST_HEAD(, turnstile) tc_chain;/* turnstile chain */
    114 } turnstile_table[TURNSTILE_HASH_SIZE];
    115 
    116 #define	TURNSTILE_CHAIN(obj)						\
    117 	&turnstile_table[TURNSTILE_HASH(obj)]
    118 
    119 #define	TURNSTILE_CHAIN_LOCK(tc)					\
    120 do {									\
    121 	int _s_ = splsched();						\
    122 	__cpu_simple_lock(&(tc)->tc_lock);				\
    123 	(tc)->tc_oldspl = _s_;						\
    124 } while (/*CONSTCOND*/0)
    125 
    126 #define	TURNSTILE_CHAIN_UNLOCK(tc)					\
    127 do {									\
    128 	__cpu_simple_unlock(&(tc)->tc_lock);				\
    129 	splx((tc)->tc_oldspl);						\
    130 } while (/*CONSTCOND*/0)
    131 
    132 static const char turnstile_wmesg[] = "tstile";
    133 
    134 struct pool turnstile_pool;
    135 
    136 /*
    137  * turnstile_init:
    138  *
    139  *	Initialize the turnstile mechanism.
    140  */
    141 void
    142 turnstile_init(void)
    143 {
    144 	struct turnstile_chain *tc;
    145 	int i;
    146 
    147 	for (i = 0; i < TURNSTILE_HASH_SIZE; i++) {
    148 		tc = &turnstile_table[i];
    149 		__cpu_simple_lock_init(&tc->tc_lock);
    150 		LIST_INIT(&tc->tc_chain);
    151 	}
    152 
    153 	pool_init(&turnstile_pool, sizeof(struct turnstile), 0, 0, 0,
    154 	    "tspool", &pool_allocator_nointr);
    155 }
    156 
    157 static void
    158 turnstile_remque(struct turnstile *ts, struct proc *p, struct slpque *qp)
    159 {
    160 	struct proc **q = &qp->sq_head;
    161 	struct turnstile *nts;
    162 
    163 	KASSERT(p->p_ts == ts);
    164 
    165 	/*
    166 	 * This process is no longer using the active turnstile.
    167 	 * Find an inactive one on the free list to give to it.
    168 	 */
    169 	if ((nts == ts->ts_free) != NULL) {
    170 		KASSERT(ts->ts_waiters > 1);
    171 		p->p_ts = nts;
    172 		ts->ts_free = nts->ts_free;
    173 		nts->ts_free = NULL;
    174 	} else {
    175 		/*
    176 		 * If the free list is empty, this is the last
    177 		 * waiter.
    178 		 */
    179 		KASSERT(ts->ts_waiters == 1);
    180 		LIST_REMOVE(ts, ts_chain);
    181 	}
    182 
    183 	ts->ts_waiters--;
    184 
    185 	*q = p->p_forw;
    186 	if (qp->sq_tailp == &p->p_forw)
    187 		qp->sq_tailp = q;
    188 }
    189 
    190 /*
    191  * turnstile_lookup:
    192  *
    193  *	Look up the turnstile for the specified lock object.  This
    194  *	acquires and holds the turnstile chain lock (sleep queue
    195  *	interlock).
    196  */
    197 struct turnstile *
    198 turnstile_lookup(void *lp)
    199 {
    200 	struct turnstile_chain *tc = TURNSTILE_CHAIN(lp);
    201 	struct turnstile *ts;
    202 
    203 	TURNSTILE_CHAIN_LOCK(tc);
    204 
    205 	LIST_FOREACH(ts, &tc->tc_chain, ts_chain)
    206 		if (ts->ts_obj == lp)
    207 			return (ts);
    208 
    209 	/*
    210 	 * No turnstile yet for this lock.  No problem, turnstile_block()
    211 	 * handle this by fetching the turnstile from the blocking thread.
    212 	 */
    213 	return (NULL);
    214 }
    215 
    216 /*
    217  * turnstile_exit:
    218  *
    219  *	Abort a turnstile operation.
    220  */
    221 void
    222 turnstile_exit(void *lp)
    223 {
    224 	struct turnstile_chain *tc = TURNSTILE_CHAIN(lp);
    225 
    226 	TURNSTILE_CHAIN_UNLOCK(tc);
    227 }
    228 
    229 /*
    230  * turnstile_block:
    231  *
    232  *	Block a thread on a lock object.
    233  */
    234 int
    235 turnstile_block(struct turnstile *ts, int rw, void *lp)
    236 {
    237 	struct turnstile_chain *tc = TURNSTILE_CHAIN(lp);
    238 	struct proc *p = curproc;
    239 	struct turnstile *ots;
    240 	struct slpque *qp;
    241 	int s;
    242 
    243 	KASSERT(p->p_ts != NULL);
    244 
    245 	if (ts == NULL) {
    246 		/*
    247 		 * We are the first thread to wait for this lock;
    248 		 * lend our turnstile to it.
    249 		 */
    250 		ts = p->p_ts;
    251 		KASSERT(ts->ts_waiters == 0);
    252 		KASSERT(ts->ts_sleepq.sq_head == NULL);
    253 		ts->ts_obj = lp;
    254 		LIST_INSERT_HEAD(&tc->tc_chain, ts, ts_chain);
    255 	} else {
    256 		/*
    257 		 * Lock already has a turnstile.  Put our turnstile
    258 		 * onto the free list, and reference the existing
    259 		 * turnstile instead.
    260 		 */
    261 		ots = p->p_ts;
    262 		ots->ts_free = ts->ts_free;
    263 		ts->ts_free = ots;
    264 		p->p_ts = ts;
    265 	}
    266 
    267 #ifdef DIAGNOSTIC
    268 	if (p->p_stat != SONPROC)
    269 		panic("turnstile_block: p_stat %d != SONPROC", p->p_stat);
    270 	if (p->p_back != NULL)
    271 		panic("turnstile_block: p_back != NULL");
    272 #endif
    273 
    274 #ifdef KTRACE
    275 	if (KTRPOINT(p, KTR_CSW))
    276 		ktrcsw(p, 1, 0);
    277 #endif
    278 
    279 	/* XXXJRT PCATCH? */
    280 
    281 	p->p_wchan = lp;
    282 	p->p_wmesg = turnstile_wmesg;
    283 	p->p_slptime = 0;
    284 	/* p->p_priority = XXXJRT */
    285 
    286 	ts->ts_waiters++;
    287 
    288 	qp = &ts->ts_sleepq;
    289 	if (qp->sq_head == NULL)
    290 		qp->sq_head = p;
    291 	else
    292 		*qp->sq_tailp = p;
    293 	*(qp->sq_tailp = &p->p_forw) = NULL;
    294 
    295 	p->p_stat = SSLEEP;
    296 	p->p_stats->p_ru.ru_nvcsw++;
    297 
    298 	/*
    299 	 * XXX We currently need to interlock with sched_lock.
    300 	 * Note we're already at splsched().
    301 	 */
    302 	_SCHED_LOCK;
    303 
    304 	/*
    305 	 * We can now release the turnstile chain interlock; the
    306 	 * scheduler lock is held, so a thread can't get in to
    307 	 * do a turnstile_wakeup() before we do the switch.
    308 	 *
    309 	 * Note: we need to remember our old spl which is currently
    310 	 * stored in the turnstile chain, because we have to stay
    311 	 * st splsched while the sched_lock is held.
    312 	 */
    313 	s = tc->tc_oldspl;
    314 	__cpu_simple_unlock(&tc->tc_lock);
    315 
    316 	mi_switch(p);
    317 
    318 	SCHED_ASSERT_UNLOCKED();
    319 	splx(s);
    320 
    321 	/*
    322 	 * We are now back to the base spl level we were at when the
    323 	 * caller called turnstile_lookup().
    324 	 */
    325 
    326 	KDASSERT(p->p_cpu != NULL);
    327 	KDASSERT(p->p_cpu == curcpu());
    328 	p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri;
    329 
    330 	KDASSERT((p->p_flag & (P_SINTR|P_TIMEOUT)) == 0);
    331 
    332 #ifdef KTRACE
    333 	if (KTRPOINT(p, KTR_CSW))
    334 		ktrcsw(p, 0, 0);
    335 #endif
    336 
    337 	return (0);
    338 }
    339 
    340 /*
    341  * turnstile_wakeup:
    342  *
    343  *	Wake up the specified number of threads that are blocked
    344  *	in a turnstile.
    345  */
    346 void
    347 turnstile_wakeup(struct turnstile *ts, int rw, int count)
    348 {
    349 	struct turnstile_chain *tc = TURNSTILE_CHAIN(ts->ts_obj);
    350 	struct slpque *qp = &ts->ts_sleepq;
    351 	struct proc *p;
    352 
    353 	/* XXX We currently interlock with sched_lock. */
    354 	_SCHED_LOCK;
    355 
    356 	while (count-- > 0) {
    357 		p = qp->sq_head;
    358 
    359 		KASSERT(p != NULL);
    360 
    361 		turnstile_remque(ts, p, qp);
    362 
    363 		p->p_wchan = NULL;
    364 
    365 		if (p->p_stat == SSLEEP)
    366 			awaken(p);
    367 	}
    368 
    369 	_SCHED_UNLOCK;
    370 
    371 	TURNSTILE_CHAIN_UNLOCK(tc);
    372 }
    373