Home | History | Annotate | Line # | Download | only in kern
kern_turnstile.c revision 1.54
      1  1.54  riastrad /*	$NetBSD: kern_turnstile.c,v 1.54 2023/10/15 10:27:11 riastradh Exp $	*/
      2   1.2        ad 
      3   1.2        ad /*-
      4  1.49        ad  * Copyright (c) 2002, 2006, 2007, 2009, 2019, 2020, 2023
      5  1.37        ad  *     The NetBSD Foundation, Inc.
      6   1.2        ad  * All rights reserved.
      7   1.2        ad  *
      8   1.2        ad  * This code is derived from software contributed to The NetBSD Foundation
      9   1.2        ad  * by Jason R. Thorpe and Andrew Doran.
     10   1.2        ad  *
     11   1.2        ad  * Redistribution and use in source and binary forms, with or without
     12   1.2        ad  * modification, are permitted provided that the following conditions
     13   1.2        ad  * are met:
     14   1.2        ad  * 1. Redistributions of source code must retain the above copyright
     15   1.2        ad  *    notice, this list of conditions and the following disclaimer.
     16   1.2        ad  * 2. Redistributions in binary form must reproduce the above copyright
     17   1.2        ad  *    notice, this list of conditions and the following disclaimer in the
     18   1.2        ad  *    documentation and/or other materials provided with the distribution.
     19   1.2        ad  *
     20   1.2        ad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21   1.2        ad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22   1.2        ad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23   1.2        ad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24   1.2        ad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   1.2        ad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   1.2        ad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   1.2        ad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   1.2        ad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   1.2        ad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   1.2        ad  * POSSIBILITY OF SUCH DAMAGE.
     31   1.2        ad  */
     32   1.2        ad 
     33   1.2        ad /*
     34   1.2        ad  * Turnstiles are described in detail in:
     35   1.2        ad  *
     36   1.2        ad  *	Solaris Internals: Core Kernel Architecture, Jim Mauro and
     37   1.2        ad  *	    Richard McDougall.
     38   1.2        ad  *
     39   1.2        ad  * Turnstiles are kept in a hash table.  There are likely to be many more
     40   1.2        ad  * synchronisation objects than there are threads.  Since a thread can block
     41   1.2        ad  * on only one lock at a time, we only need one turnstile per thread, and
     42   1.2        ad  * so they are allocated at thread creation time.
     43   1.2        ad  *
     44   1.2        ad  * When a thread decides it needs to block on a lock, it looks up the
     45   1.2        ad  * active turnstile for that lock.  If no active turnstile exists, then
     46   1.2        ad  * the process lends its turnstile to the lock.  If there is already an
     47   1.2        ad  * active turnstile for the lock, the thread places its turnstile on a
     48   1.2        ad  * list of free turnstiles, and references the active one instead.
     49   1.2        ad  *
     50   1.2        ad  * The act of looking up the turnstile acquires an interlock on the sleep
     51   1.2        ad  * queue.  If a thread decides it doesn't need to block after all, then this
     52   1.2        ad  * interlock must be released by explicitly aborting the turnstile
     53   1.2        ad  * operation.
     54   1.2        ad  *
     55   1.2        ad  * When a thread is awakened, it needs to get its turnstile back.  If there
     56  1.18       alc  * are still other threads waiting in the active turnstile, the thread
     57   1.2        ad  * grabs a free turnstile off the free list.  Otherwise, it can take back
     58   1.2        ad  * the active turnstile from the lock (thus deactivating the turnstile).
     59   1.2        ad  *
     60  1.33        ad  * Turnstiles are where we do priority inheritence.
     61   1.2        ad  */
     62   1.2        ad 
     63   1.2        ad #include <sys/cdefs.h>
     64  1.54  riastrad __KERNEL_RCSID(0, "$NetBSD: kern_turnstile.c,v 1.54 2023/10/15 10:27:11 riastradh Exp $");
     65   1.2        ad 
     66   1.2        ad #include <sys/param.h>
     67   1.4      yamt #include <sys/lockdebug.h>
     68  1.45  riastrad #include <sys/proc.h>
     69   1.2        ad #include <sys/sleepq.h>
     70  1.45  riastrad #include <sys/sleeptab.h>
     71  1.54  riastrad #include <sys/syncobj.h>
     72   1.2        ad #include <sys/systm.h>
     73   1.2        ad 
     74  1.33        ad /*
     75  1.33        ad  * Shift of 6 aligns to typical cache line size of 64 bytes;  there's no
     76  1.33        ad  * point having two turnstile locks to back two lock objects that share one
     77  1.33        ad  * cache line.
     78  1.33        ad  */
     79  1.33        ad #define	TS_HASH_SIZE	128
     80   1.2        ad #define	TS_HASH_MASK	(TS_HASH_SIZE - 1)
     81  1.33        ad #define	TS_HASH(obj)	(((uintptr_t)(obj) >> 6) & TS_HASH_MASK)
     82   1.2        ad 
     83  1.33        ad static tschain_t	turnstile_chains[TS_HASH_SIZE] __cacheline_aligned;
     84   1.2        ad 
     85  1.35        ad static union {
     86  1.35        ad 	kmutex_t	lock;
     87  1.35        ad 	uint8_t		pad[COHERENCY_UNIT];
     88  1.35        ad } turnstile_locks[TS_HASH_SIZE] __cacheline_aligned;
     89  1.35        ad 
     90   1.2        ad /*
     91   1.2        ad  * turnstile_init:
     92   1.2        ad  *
     93   1.2        ad  *	Initialize the turnstile mechanism.
     94   1.2        ad  */
     95   1.2        ad void
     96   1.2        ad turnstile_init(void)
     97   1.2        ad {
     98   1.2        ad 	int i;
     99   1.2        ad 
    100   1.2        ad 	for (i = 0; i < TS_HASH_SIZE; i++) {
    101  1.33        ad 		LIST_INIT(&turnstile_chains[i]);
    102  1.35        ad 		mutex_init(&turnstile_locks[i].lock, MUTEX_DEFAULT, IPL_SCHED);
    103   1.2        ad 	}
    104   1.2        ad 
    105  1.40        ad 	turnstile_ctor(&turnstile0);
    106   1.2        ad }
    107   1.2        ad 
    108   1.2        ad /*
    109   1.2        ad  * turnstile_ctor:
    110   1.2        ad  *
    111   1.2        ad  *	Constructor for turnstiles.
    112   1.2        ad  */
    113  1.40        ad void
    114  1.40        ad turnstile_ctor(turnstile_t *ts)
    115   1.2        ad {
    116   1.2        ad 
    117   1.2        ad 	memset(ts, 0, sizeof(*ts));
    118  1.21        ad 	sleepq_init(&ts->ts_sleepq[TS_READER_Q]);
    119  1.21        ad 	sleepq_init(&ts->ts_sleepq[TS_WRITER_Q]);
    120   1.2        ad }
    121   1.2        ad 
    122   1.2        ad /*
    123   1.2        ad  * turnstile_remove:
    124   1.2        ad  *
    125   1.2        ad  *	Remove an LWP from a turnstile sleep queue and wake it.
    126   1.2        ad  */
    127   1.9      yamt static inline void
    128  1.21        ad turnstile_remove(turnstile_t *ts, lwp_t *l, int q)
    129   1.2        ad {
    130   1.2        ad 	turnstile_t *nts;
    131   1.2        ad 
    132   1.2        ad 	KASSERT(l->l_ts == ts);
    133   1.2        ad 
    134   1.2        ad 	/*
    135   1.2        ad 	 * This process is no longer using the active turnstile.
    136   1.2        ad 	 * Find an inactive one on the free list to give to it.
    137   1.2        ad 	 */
    138   1.2        ad 	if ((nts = ts->ts_free) != NULL) {
    139   1.2        ad 		KASSERT(TS_ALL_WAITERS(ts) > 1);
    140   1.2        ad 		l->l_ts = nts;
    141   1.2        ad 		ts->ts_free = nts->ts_free;
    142   1.2        ad 		nts->ts_free = NULL;
    143   1.2        ad 	} else {
    144   1.2        ad 		/*
    145   1.2        ad 		 * If the free list is empty, this is the last
    146   1.2        ad 		 * waiter.
    147   1.2        ad 		 */
    148   1.2        ad 		KASSERT(TS_ALL_WAITERS(ts) == 1);
    149   1.2        ad 		LIST_REMOVE(ts, ts_chain);
    150   1.2        ad 	}
    151   1.2        ad 
    152  1.21        ad 	ts->ts_waiters[q]--;
    153  1.53        ad 	sleepq_remove(&ts->ts_sleepq[q], l, true);
    154   1.2        ad }
    155   1.2        ad 
    156   1.2        ad /*
    157   1.2        ad  * turnstile_lookup:
    158   1.2        ad  *
    159   1.2        ad  *	Look up the turnstile for the specified lock.  This acquires and
    160   1.2        ad  *	holds the turnstile chain lock (sleep queue interlock).
    161   1.2        ad  */
    162   1.2        ad turnstile_t *
    163   1.2        ad turnstile_lookup(wchan_t obj)
    164   1.2        ad {
    165   1.2        ad 	turnstile_t *ts;
    166   1.2        ad 	tschain_t *tc;
    167  1.33        ad 	u_int hash;
    168   1.2        ad 
    169  1.33        ad 	hash = TS_HASH(obj);
    170  1.33        ad 	tc = &turnstile_chains[hash];
    171  1.35        ad 	mutex_spin_enter(&turnstile_locks[hash].lock);
    172   1.2        ad 
    173  1.33        ad 	LIST_FOREACH(ts, tc, ts_chain)
    174   1.2        ad 		if (ts->ts_obj == obj)
    175   1.2        ad 			return (ts);
    176   1.2        ad 
    177   1.2        ad 	/*
    178   1.2        ad 	 * No turnstile yet for this lock.  No problem, turnstile_block()
    179   1.2        ad 	 * handles this by fetching the turnstile from the blocking thread.
    180   1.2        ad 	 */
    181   1.2        ad 	return (NULL);
    182   1.2        ad }
    183   1.2        ad 
    184   1.2        ad /*
    185   1.2        ad  * turnstile_exit:
    186   1.2        ad  *
    187   1.2        ad  *	Abort a turnstile operation.
    188   1.2        ad  */
    189   1.2        ad void
    190   1.2        ad turnstile_exit(wchan_t obj)
    191   1.2        ad {
    192   1.2        ad 
    193  1.35        ad 	mutex_spin_exit(&turnstile_locks[TS_HASH(obj)].lock);
    194   1.2        ad }
    195   1.2        ad 
    196   1.2        ad /*
    197  1.31      yamt  * turnstile_lendpri:
    198  1.31      yamt  *
    199  1.31      yamt  *	Lend our priority to lwps on the blocking chain.
    200  1.31      yamt  *
    201  1.32      yamt  *	If the current owner of the lock (l->l_wchan, set by sleepq_enqueue)
    202  1.32      yamt  *	has a priority lower than ours (lwp_eprio(l)), lend our priority to
    203  1.32      yamt  *	him to avoid priority inversions.
    204   1.2        ad  */
    205  1.31      yamt 
    206  1.31      yamt static void
    207  1.31      yamt turnstile_lendpri(lwp_t *cur)
    208   1.2        ad {
    209  1.31      yamt 	lwp_t * l = cur;
    210  1.31      yamt 	pri_t prio;
    211  1.19        ad 
    212  1.19        ad 	/*
    213  1.22        ad 	 * NOTE: if you get a panic in this code block, it is likely that
    214  1.22        ad 	 * a lock has been destroyed or corrupted while still in use.  Try
    215  1.22        ad 	 * compiling a kernel with LOCKDEBUG to pinpoint the problem.
    216   1.4      yamt 	 */
    217  1.31      yamt 
    218  1.31      yamt 	LOCKDEBUG_BARRIER(l->l_mutex, 1);
    219  1.31      yamt 	KASSERT(l == curlwp);
    220  1.11        ad 	prio = lwp_eprio(l);
    221   1.4      yamt 	for (;;) {
    222  1.31      yamt 		lwp_t *owner;
    223  1.31      yamt 		turnstile_t *ts;
    224   1.4      yamt 		bool dolock;
    225   1.4      yamt 
    226   1.4      yamt 		if (l->l_wchan == NULL)
    227   1.4      yamt 			break;
    228   1.4      yamt 
    229  1.32      yamt 		/*
    230  1.32      yamt 		 * Ask syncobj the owner of the lock.
    231  1.32      yamt 		 */
    232   1.4      yamt 		owner = (*l->l_syncobj->sobj_owner)(l->l_wchan);
    233   1.4      yamt 		if (owner == NULL)
    234   1.4      yamt 			break;
    235   1.4      yamt 
    236  1.32      yamt 		/*
    237  1.32      yamt 		 * The owner may have changed as we have dropped the tc lock.
    238  1.32      yamt 		 */
    239  1.25    bouyer 		if (cur == owner) {
    240  1.25    bouyer 			/*
    241  1.32      yamt 			 * We own the lock: stop here, sleepq_block()
    242  1.41    andvar 			 * should wake up immediately.
    243  1.25    bouyer 			 */
    244  1.25    bouyer 			break;
    245  1.25    bouyer 		}
    246  1.32      yamt 		/*
    247  1.32      yamt 		 * Acquire owner->l_mutex if we don't have it yet.
    248  1.32      yamt 		 * Because we already have another LWP lock (l->l_mutex) held,
    249  1.32      yamt 		 * we need to play a try lock dance to avoid deadlock.
    250  1.32      yamt 		 */
    251  1.42  riastrad 		dolock = l->l_mutex != atomic_load_relaxed(&owner->l_mutex);
    252  1.28      yamt 		if (l == owner || (dolock && !lwp_trylock(owner))) {
    253   1.4      yamt 			/*
    254  1.32      yamt 			 * The owner was changed behind us or trylock failed.
    255  1.32      yamt 			 * Restart from curlwp.
    256  1.32      yamt 			 *
    257  1.25    bouyer 			 * Note that there may be a livelock here:
    258  1.43    andvar 			 * the owner may try grabbing cur's lock (which is the
    259  1.32      yamt 			 * tc lock) while we're trying to grab the owner's lock.
    260   1.4      yamt 			 */
    261   1.4      yamt 			lwp_unlock(l);
    262   1.4      yamt 			l = cur;
    263   1.4      yamt 			lwp_lock(l);
    264   1.4      yamt 			prio = lwp_eprio(l);
    265   1.4      yamt 			continue;
    266   1.4      yamt 		}
    267  1.32      yamt 		/*
    268  1.32      yamt 		 * If the owner's priority is already higher than ours,
    269  1.32      yamt 		 * there's nothing to do anymore.
    270  1.32      yamt 		 */
    271  1.11        ad 		if (prio <= lwp_eprio(owner)) {
    272   1.4      yamt 			if (dolock)
    273   1.4      yamt 				lwp_unlock(owner);
    274   1.4      yamt 			break;
    275   1.4      yamt 		}
    276  1.32      yamt 		/*
    277  1.32      yamt 		 * Lend our priority to the 'owner' LWP.
    278  1.32      yamt 		 *
    279  1.32      yamt 		 * Update lenders info for turnstile_unlendpri.
    280  1.32      yamt 		 */
    281   1.4      yamt 		ts = l->l_ts;
    282   1.4      yamt 		KASSERT(ts->ts_inheritor == owner || ts->ts_inheritor == NULL);
    283   1.4      yamt 		if (ts->ts_inheritor == NULL) {
    284   1.4      yamt 			ts->ts_inheritor = owner;
    285   1.4      yamt 			ts->ts_eprio = prio;
    286   1.4      yamt 			SLIST_INSERT_HEAD(&owner->l_pi_lenders, ts, ts_pichain);
    287   1.4      yamt 			lwp_lendpri(owner, prio);
    288  1.11        ad 		} else if (prio > ts->ts_eprio) {
    289   1.4      yamt 			ts->ts_eprio = prio;
    290   1.4      yamt 			lwp_lendpri(owner, prio);
    291   1.4      yamt 		}
    292   1.4      yamt 		if (dolock)
    293   1.4      yamt 			lwp_unlock(l);
    294  1.32      yamt 		LOCKDEBUG_BARRIER(owner->l_mutex, 1);
    295   1.4      yamt 		l = owner;
    296   1.4      yamt 	}
    297   1.4      yamt 	LOCKDEBUG_BARRIER(l->l_mutex, 1);
    298  1.42  riastrad 	if (cur->l_mutex != atomic_load_relaxed(&l->l_mutex)) {
    299   1.4      yamt 		lwp_unlock(l);
    300   1.4      yamt 		lwp_lock(cur);
    301   1.4      yamt 	}
    302   1.4      yamt 	LOCKDEBUG_BARRIER(cur->l_mutex, 1);
    303  1.31      yamt }
    304  1.31      yamt 
    305  1.31      yamt /*
    306  1.31      yamt  * turnstile_unlendpri: undo turnstile_lendpri
    307  1.31      yamt  */
    308  1.31      yamt 
    309  1.31      yamt static void
    310  1.31      yamt turnstile_unlendpri(turnstile_t *ts)
    311  1.31      yamt {
    312  1.31      yamt 	lwp_t * const l = curlwp;
    313  1.31      yamt 	turnstile_t *iter;
    314  1.31      yamt 	turnstile_t *next;
    315  1.31      yamt 	turnstile_t *prev = NULL;
    316  1.31      yamt 	pri_t prio;
    317  1.31      yamt 	bool dolock;
    318  1.31      yamt 
    319  1.31      yamt 	KASSERT(ts->ts_inheritor != NULL);
    320  1.31      yamt 	ts->ts_inheritor = NULL;
    321  1.42  riastrad 	dolock = (atomic_load_relaxed(&l->l_mutex) ==
    322  1.42  riastrad 	    l->l_cpu->ci_schedstate.spc_lwplock);
    323  1.31      yamt 	if (dolock) {
    324  1.31      yamt 		lwp_lock(l);
    325  1.31      yamt 	}
    326  1.31      yamt 
    327  1.31      yamt 	/*
    328  1.31      yamt 	 * the following loop does two things.
    329  1.31      yamt 	 *
    330  1.31      yamt 	 * - remove ts from the list.
    331  1.31      yamt 	 *
    332  1.31      yamt 	 * - from the rest of the list, find the highest priority.
    333  1.31      yamt 	 */
    334  1.31      yamt 
    335  1.31      yamt 	prio = -1;
    336  1.31      yamt 	KASSERT(!SLIST_EMPTY(&l->l_pi_lenders));
    337  1.31      yamt 	for (iter = SLIST_FIRST(&l->l_pi_lenders);
    338  1.31      yamt 	    iter != NULL; iter = next) {
    339  1.31      yamt 		KASSERT(lwp_eprio(l) >= ts->ts_eprio);
    340  1.31      yamt 		next = SLIST_NEXT(iter, ts_pichain);
    341  1.31      yamt 		if (iter == ts) {
    342  1.31      yamt 			if (prev == NULL) {
    343  1.31      yamt 				SLIST_REMOVE_HEAD(&l->l_pi_lenders,
    344  1.31      yamt 				    ts_pichain);
    345  1.31      yamt 			} else {
    346  1.31      yamt 				SLIST_REMOVE_AFTER(prev, ts_pichain);
    347  1.31      yamt 			}
    348  1.31      yamt 		} else if (prio < iter->ts_eprio) {
    349  1.31      yamt 			prio = iter->ts_eprio;
    350  1.31      yamt 		}
    351  1.31      yamt 		prev = iter;
    352  1.31      yamt 	}
    353  1.31      yamt 
    354  1.31      yamt 	lwp_lendpri(l, prio);
    355   1.4      yamt 
    356  1.31      yamt 	if (dolock) {
    357  1.31      yamt 		lwp_unlock(l);
    358  1.31      yamt 	}
    359  1.31      yamt }
    360  1.31      yamt 
    361  1.31      yamt /*
    362  1.31      yamt  * turnstile_block:
    363  1.31      yamt  *
    364  1.31      yamt  *	 Enter an object into the turnstile chain and prepare the current
    365  1.31      yamt  *	 LWP for sleep.
    366  1.31      yamt  */
    367  1.31      yamt void
    368  1.31      yamt turnstile_block(turnstile_t *ts, int q, wchan_t obj, syncobj_t *sobj)
    369  1.31      yamt {
    370  1.31      yamt 	lwp_t * const l = curlwp; /* cached curlwp */
    371  1.31      yamt 	turnstile_t *ots;
    372  1.31      yamt 	tschain_t *tc;
    373  1.33        ad 	kmutex_t *lock;
    374  1.31      yamt 	sleepq_t *sq;
    375  1.33        ad 	u_int hash;
    376  1.51        ad 	int nlocks;
    377  1.31      yamt 
    378  1.33        ad 	hash = TS_HASH(obj);
    379  1.33        ad 	tc = &turnstile_chains[hash];
    380  1.35        ad 	lock = &turnstile_locks[hash].lock;
    381  1.31      yamt 
    382  1.31      yamt 	KASSERT(q == TS_READER_Q || q == TS_WRITER_Q);
    383  1.33        ad 	KASSERT(mutex_owned(lock));
    384  1.46  riastrad 	KASSERT(l != NULL);
    385  1.46  riastrad 	KASSERT(l->l_ts != NULL);
    386  1.31      yamt 
    387  1.31      yamt 	if (ts == NULL) {
    388  1.31      yamt 		/*
    389  1.31      yamt 		 * We are the first thread to wait for this object;
    390  1.31      yamt 		 * lend our turnstile to it.
    391  1.31      yamt 		 */
    392  1.31      yamt 		ts = l->l_ts;
    393  1.31      yamt 		KASSERT(TS_ALL_WAITERS(ts) == 0);
    394  1.46  riastrad 		KASSERT(LIST_EMPTY(&ts->ts_sleepq[TS_READER_Q]));
    395  1.46  riastrad 		KASSERT(LIST_EMPTY(&ts->ts_sleepq[TS_WRITER_Q]));
    396  1.31      yamt 		ts->ts_obj = obj;
    397  1.31      yamt 		ts->ts_inheritor = NULL;
    398  1.33        ad 		LIST_INSERT_HEAD(tc, ts, ts_chain);
    399  1.31      yamt 	} else {
    400  1.31      yamt 		/*
    401  1.31      yamt 		 * Object already has a turnstile.  Put our turnstile
    402  1.31      yamt 		 * onto the free list, and reference the existing
    403  1.31      yamt 		 * turnstile instead.
    404  1.31      yamt 		 */
    405  1.31      yamt 		ots = l->l_ts;
    406  1.31      yamt 		KASSERT(ots->ts_free == NULL);
    407  1.31      yamt 		ots->ts_free = ts->ts_free;
    408  1.31      yamt 		ts->ts_free = ots;
    409  1.31      yamt 		l->l_ts = ts;
    410  1.31      yamt 
    411  1.31      yamt 		KASSERT(ts->ts_obj == obj);
    412  1.31      yamt 		KASSERT(TS_ALL_WAITERS(ts) != 0);
    413  1.37        ad 		KASSERT(!LIST_EMPTY(&ts->ts_sleepq[TS_READER_Q]) ||
    414  1.37        ad 			!LIST_EMPTY(&ts->ts_sleepq[TS_WRITER_Q]));
    415  1.31      yamt 	}
    416  1.31      yamt 
    417  1.31      yamt 	sq = &ts->ts_sleepq[q];
    418  1.31      yamt 	ts->ts_waiters[q]++;
    419  1.51        ad 	nlocks = sleepq_enter(sq, l, lock);
    420  1.33        ad 	LOCKDEBUG_BARRIER(lock, 1);
    421  1.52        ad 	sleepq_enqueue(sq, obj, sobj->sobj_name, sobj, false);
    422  1.31      yamt 
    423  1.31      yamt 	/*
    424  1.31      yamt 	 * Disable preemption across this entire block, as we may drop
    425  1.31      yamt 	 * scheduler locks (allowing preemption), and would prefer not
    426  1.31      yamt 	 * to be interrupted while in a state of flux.
    427  1.31      yamt 	 */
    428  1.31      yamt 	KPREEMPT_DISABLE(l);
    429  1.33        ad 	KASSERT(lock == l->l_mutex);
    430  1.31      yamt 	turnstile_lendpri(l);
    431  1.51        ad 	sleepq_block(0, false, sobj, nlocks);
    432  1.31      yamt 	KPREEMPT_ENABLE(l);
    433   1.2        ad }
    434   1.2        ad 
    435   1.2        ad /*
    436   1.2        ad  * turnstile_wakeup:
    437   1.2        ad  *
    438   1.2        ad  *	Wake up the specified number of threads that are blocked
    439   1.2        ad  *	in a turnstile.
    440   1.2        ad  */
    441   1.2        ad void
    442  1.10        ad turnstile_wakeup(turnstile_t *ts, int q, int count, lwp_t *nl)
    443   1.2        ad {
    444   1.2        ad 	sleepq_t *sq;
    445  1.33        ad 	kmutex_t *lock;
    446  1.33        ad 	u_int hash;
    447  1.10        ad 	lwp_t *l;
    448   1.2        ad 
    449  1.33        ad 	hash = TS_HASH(ts->ts_obj);
    450  1.35        ad 	lock = &turnstile_locks[hash].lock;
    451   1.2        ad 	sq = &ts->ts_sleepq[q];
    452   1.2        ad 
    453   1.2        ad 	KASSERT(q == TS_READER_Q || q == TS_WRITER_Q);
    454  1.46  riastrad 	KASSERT(count > 0);
    455  1.46  riastrad 	KASSERT(count <= TS_WAITERS(ts, q));
    456  1.33        ad 	KASSERT(mutex_owned(lock));
    457   1.4      yamt 	KASSERT(ts->ts_inheritor == curlwp || ts->ts_inheritor == NULL);
    458   1.4      yamt 
    459   1.4      yamt 	/*
    460   1.4      yamt 	 * restore inherited priority if necessary.
    461   1.4      yamt 	 */
    462   1.4      yamt 
    463   1.4      yamt 	if (ts->ts_inheritor != NULL) {
    464  1.31      yamt 		turnstile_unlendpri(ts);
    465   1.4      yamt 	}
    466   1.2        ad 
    467   1.2        ad 	if (nl != NULL) {
    468   1.2        ad #if defined(DEBUG) || defined(LOCKDEBUG)
    469  1.37        ad 		LIST_FOREACH(l, sq, l_sleepchain) {
    470   1.2        ad 			if (l == nl)
    471   1.2        ad 				break;
    472   1.2        ad 		}
    473   1.2        ad 		if (l == NULL)
    474   1.2        ad 			panic("turnstile_wakeup: nl not on sleepq");
    475   1.2        ad #endif
    476  1.21        ad 		turnstile_remove(ts, nl, q);
    477   1.2        ad 	} else {
    478   1.2        ad 		while (count-- > 0) {
    479  1.37        ad 			l = LIST_FIRST(sq);
    480   1.2        ad 			KASSERT(l != NULL);
    481  1.21        ad 			turnstile_remove(ts, l, q);
    482   1.2        ad 		}
    483   1.2        ad 	}
    484  1.33        ad 	mutex_spin_exit(lock);
    485   1.2        ad }
    486   1.2        ad 
    487   1.2        ad /*
    488   1.2        ad  * turnstile_unsleep:
    489   1.2        ad  *
    490   1.2        ad  *	Remove an LWP from the turnstile.  This is called when the LWP has
    491   1.2        ad  *	not been awoken normally but instead interrupted: for example, if it
    492   1.2        ad  *	has received a signal.  It's not a valid action for turnstiles,
    493   1.2        ad  *	since LWPs blocking on a turnstile are not interruptable.
    494   1.2        ad  */
    495  1.26     rmind void
    496  1.16        ad turnstile_unsleep(lwp_t *l, bool cleanup)
    497   1.2        ad {
    498   1.2        ad 
    499   1.2        ad 	lwp_unlock(l);
    500   1.2        ad 	panic("turnstile_unsleep");
    501   1.2        ad }
    502   1.2        ad 
    503   1.2        ad /*
    504   1.2        ad  * turnstile_changepri:
    505   1.2        ad  *
    506   1.4      yamt  *	Adjust the priority of an LWP residing on a turnstile.
    507   1.2        ad  */
    508   1.2        ad void
    509  1.10        ad turnstile_changepri(lwp_t *l, pri_t pri)
    510   1.2        ad {
    511   1.2        ad 
    512   1.4      yamt 	/* XXX priority inheritance */
    513   1.4      yamt 	sleepq_changepri(l, pri);
    514   1.2        ad }
    515   1.2        ad 
    516   1.2        ad #if defined(LOCKDEBUG)
    517   1.2        ad /*
    518   1.2        ad  * turnstile_print:
    519   1.2        ad  *
    520   1.2        ad  *	Given the address of a lock object, print the contents of a
    521   1.2        ad  *	turnstile.
    522   1.2        ad  */
    523   1.2        ad void
    524   1.2        ad turnstile_print(volatile void *obj, void (*pr)(const char *, ...))
    525   1.2        ad {
    526   1.2        ad 	turnstile_t *ts;
    527   1.2        ad 	tschain_t *tc;
    528   1.2        ad 	sleepq_t *rsq, *wsq;
    529  1.33        ad 	u_int hash;
    530  1.10        ad 	lwp_t *l;
    531   1.2        ad 
    532  1.33        ad 	hash = TS_HASH(obj);
    533  1.33        ad 	tc = &turnstile_chains[hash];
    534   1.2        ad 
    535  1.33        ad 	LIST_FOREACH(ts, tc, ts_chain)
    536   1.2        ad 		if (ts->ts_obj == obj)
    537   1.2        ad 			break;
    538   1.2        ad 
    539   1.2        ad 	if (ts == NULL) {
    540  1.36        ad 		(*pr)("Turnstile: no active turnstile for this lock.\n");
    541   1.2        ad 		return;
    542   1.2        ad 	}
    543   1.2        ad 
    544   1.2        ad 	rsq = &ts->ts_sleepq[TS_READER_Q];
    545   1.2        ad 	wsq = &ts->ts_sleepq[TS_WRITER_Q];
    546   1.2        ad 
    547  1.36        ad 	(*pr)("Turnstile:\n");
    548  1.21        ad 	(*pr)("=> %d waiting readers:", TS_WAITERS(ts, TS_READER_Q));
    549  1.38        ad 	LIST_FOREACH(l, rsq, l_sleepchain) {
    550   1.2        ad 		(*pr)(" %p", l);
    551   1.2        ad 	}
    552   1.2        ad 	(*pr)("\n");
    553   1.2        ad 
    554  1.21        ad 	(*pr)("=> %d waiting writers:", TS_WAITERS(ts, TS_WRITER_Q));
    555  1.38        ad 	LIST_FOREACH(l, wsq, l_sleepchain) {
    556   1.2        ad 		(*pr)(" %p", l);
    557   1.2        ad 	}
    558   1.2        ad 	(*pr)("\n");
    559   1.2        ad }
    560   1.2        ad #endif	/* LOCKDEBUG */
    561