Home | History | Annotate | Line # | Download | only in kern
      1  1.56    andvar /*	$NetBSD: kern_turnstile.c,v 1.56 2025/06/27 21:36:24 andvar Exp $	*/
      2   1.2        ad 
      3   1.2        ad /*-
      4  1.49        ad  * Copyright (c) 2002, 2006, 2007, 2009, 2019, 2020, 2023
      5  1.37        ad  *     The NetBSD Foundation, Inc.
      6   1.2        ad  * All rights reserved.
      7   1.2        ad  *
      8   1.2        ad  * This code is derived from software contributed to The NetBSD Foundation
      9   1.2        ad  * by Jason R. Thorpe and Andrew Doran.
     10   1.2        ad  *
     11   1.2        ad  * Redistribution and use in source and binary forms, with or without
     12   1.2        ad  * modification, are permitted provided that the following conditions
     13   1.2        ad  * are met:
     14   1.2        ad  * 1. Redistributions of source code must retain the above copyright
     15   1.2        ad  *    notice, this list of conditions and the following disclaimer.
     16   1.2        ad  * 2. Redistributions in binary form must reproduce the above copyright
     17   1.2        ad  *    notice, this list of conditions and the following disclaimer in the
     18   1.2        ad  *    documentation and/or other materials provided with the distribution.
     19   1.2        ad  *
     20   1.2        ad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21   1.2        ad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22   1.2        ad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23   1.2        ad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24   1.2        ad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   1.2        ad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   1.2        ad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   1.2        ad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   1.2        ad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   1.2        ad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   1.2        ad  * POSSIBILITY OF SUCH DAMAGE.
     31   1.2        ad  */
     32   1.2        ad 
     33   1.2        ad /*
     34   1.2        ad  * Turnstiles are described in detail in:
     35   1.2        ad  *
     36   1.2        ad  *	Solaris Internals: Core Kernel Architecture, Jim Mauro and
     37   1.2        ad  *	    Richard McDougall.
     38   1.2        ad  *
     39   1.2        ad  * Turnstiles are kept in a hash table.  There are likely to be many more
     40   1.2        ad  * synchronisation objects than there are threads.  Since a thread can block
     41   1.2        ad  * on only one lock at a time, we only need one turnstile per thread, and
     42   1.2        ad  * so they are allocated at thread creation time.
     43   1.2        ad  *
     44   1.2        ad  * When a thread decides it needs to block on a lock, it looks up the
     45   1.2        ad  * active turnstile for that lock.  If no active turnstile exists, then
     46   1.2        ad  * the process lends its turnstile to the lock.  If there is already an
     47   1.2        ad  * active turnstile for the lock, the thread places its turnstile on a
     48   1.2        ad  * list of free turnstiles, and references the active one instead.
     49   1.2        ad  *
     50   1.2        ad  * The act of looking up the turnstile acquires an interlock on the sleep
     51   1.2        ad  * queue.  If a thread decides it doesn't need to block after all, then this
     52   1.2        ad  * interlock must be released by explicitly aborting the turnstile
     53   1.2        ad  * operation.
     54   1.2        ad  *
     55   1.2        ad  * When a thread is awakened, it needs to get its turnstile back.  If there
     56  1.18       alc  * are still other threads waiting in the active turnstile, the thread
     57   1.2        ad  * grabs a free turnstile off the free list.  Otherwise, it can take back
     58   1.2        ad  * the active turnstile from the lock (thus deactivating the turnstile).
     59   1.2        ad  *
     60  1.56    andvar  * Turnstiles are where we do priority inheritance.
     61   1.2        ad  */
     62   1.2        ad 
     63   1.2        ad #include <sys/cdefs.h>
     64  1.56    andvar __KERNEL_RCSID(0, "$NetBSD: kern_turnstile.c,v 1.56 2025/06/27 21:36:24 andvar Exp $");
     65   1.2        ad 
     66   1.2        ad #include <sys/param.h>
     67  1.55  riastrad 
     68   1.4      yamt #include <sys/lockdebug.h>
     69  1.55  riastrad #include <sys/lwp.h>
     70  1.45  riastrad #include <sys/proc.h>
     71   1.2        ad #include <sys/sleepq.h>
     72  1.45  riastrad #include <sys/sleeptab.h>
     73  1.54  riastrad #include <sys/syncobj.h>
     74   1.2        ad #include <sys/systm.h>
     75   1.2        ad 
     76  1.33        ad /*
     77  1.33        ad  * Shift of 6 aligns to typical cache line size of 64 bytes;  there's no
     78  1.33        ad  * point having two turnstile locks to back two lock objects that share one
     79  1.33        ad  * cache line.
     80  1.33        ad  */
     81  1.33        ad #define	TS_HASH_SIZE	128
     82   1.2        ad #define	TS_HASH_MASK	(TS_HASH_SIZE - 1)
     83  1.33        ad #define	TS_HASH(obj)	(((uintptr_t)(obj) >> 6) & TS_HASH_MASK)
     84   1.2        ad 
     85  1.33        ad static tschain_t	turnstile_chains[TS_HASH_SIZE] __cacheline_aligned;
     86   1.2        ad 
     87  1.35        ad static union {
     88  1.35        ad 	kmutex_t	lock;
     89  1.35        ad 	uint8_t		pad[COHERENCY_UNIT];
     90  1.35        ad } turnstile_locks[TS_HASH_SIZE] __cacheline_aligned;
     91  1.35        ad 
     92   1.2        ad /*
     93   1.2        ad  * turnstile_init:
     94   1.2        ad  *
     95   1.2        ad  *	Initialize the turnstile mechanism.
     96   1.2        ad  */
     97   1.2        ad void
     98   1.2        ad turnstile_init(void)
     99   1.2        ad {
    100   1.2        ad 	int i;
    101   1.2        ad 
    102   1.2        ad 	for (i = 0; i < TS_HASH_SIZE; i++) {
    103  1.33        ad 		LIST_INIT(&turnstile_chains[i]);
    104  1.35        ad 		mutex_init(&turnstile_locks[i].lock, MUTEX_DEFAULT, IPL_SCHED);
    105   1.2        ad 	}
    106   1.2        ad 
    107  1.40        ad 	turnstile_ctor(&turnstile0);
    108   1.2        ad }
    109   1.2        ad 
    110   1.2        ad /*
    111   1.2        ad  * turnstile_ctor:
    112   1.2        ad  *
    113   1.2        ad  *	Constructor for turnstiles.
    114   1.2        ad  */
    115  1.40        ad void
    116  1.40        ad turnstile_ctor(turnstile_t *ts)
    117   1.2        ad {
    118   1.2        ad 
    119   1.2        ad 	memset(ts, 0, sizeof(*ts));
    120  1.21        ad 	sleepq_init(&ts->ts_sleepq[TS_READER_Q]);
    121  1.21        ad 	sleepq_init(&ts->ts_sleepq[TS_WRITER_Q]);
    122   1.2        ad }
    123   1.2        ad 
    124   1.2        ad /*
    125   1.2        ad  * turnstile_remove:
    126   1.2        ad  *
    127   1.2        ad  *	Remove an LWP from a turnstile sleep queue and wake it.
    128   1.2        ad  */
    129   1.9      yamt static inline void
    130  1.21        ad turnstile_remove(turnstile_t *ts, lwp_t *l, int q)
    131   1.2        ad {
    132   1.2        ad 	turnstile_t *nts;
    133   1.2        ad 
    134   1.2        ad 	KASSERT(l->l_ts == ts);
    135   1.2        ad 
    136   1.2        ad 	/*
    137   1.2        ad 	 * This process is no longer using the active turnstile.
    138   1.2        ad 	 * Find an inactive one on the free list to give to it.
    139   1.2        ad 	 */
    140   1.2        ad 	if ((nts = ts->ts_free) != NULL) {
    141   1.2        ad 		KASSERT(TS_ALL_WAITERS(ts) > 1);
    142   1.2        ad 		l->l_ts = nts;
    143   1.2        ad 		ts->ts_free = nts->ts_free;
    144   1.2        ad 		nts->ts_free = NULL;
    145   1.2        ad 	} else {
    146   1.2        ad 		/*
    147   1.2        ad 		 * If the free list is empty, this is the last
    148   1.2        ad 		 * waiter.
    149   1.2        ad 		 */
    150   1.2        ad 		KASSERT(TS_ALL_WAITERS(ts) == 1);
    151   1.2        ad 		LIST_REMOVE(ts, ts_chain);
    152   1.2        ad 	}
    153   1.2        ad 
    154  1.21        ad 	ts->ts_waiters[q]--;
    155  1.53        ad 	sleepq_remove(&ts->ts_sleepq[q], l, true);
    156   1.2        ad }
    157   1.2        ad 
    158   1.2        ad /*
    159   1.2        ad  * turnstile_lookup:
    160   1.2        ad  *
    161   1.2        ad  *	Look up the turnstile for the specified lock.  This acquires and
    162   1.2        ad  *	holds the turnstile chain lock (sleep queue interlock).
    163   1.2        ad  */
    164   1.2        ad turnstile_t *
    165   1.2        ad turnstile_lookup(wchan_t obj)
    166   1.2        ad {
    167   1.2        ad 	turnstile_t *ts;
    168   1.2        ad 	tschain_t *tc;
    169  1.33        ad 	u_int hash;
    170   1.2        ad 
    171  1.33        ad 	hash = TS_HASH(obj);
    172  1.33        ad 	tc = &turnstile_chains[hash];
    173  1.35        ad 	mutex_spin_enter(&turnstile_locks[hash].lock);
    174   1.2        ad 
    175  1.33        ad 	LIST_FOREACH(ts, tc, ts_chain)
    176   1.2        ad 		if (ts->ts_obj == obj)
    177   1.2        ad 			return (ts);
    178   1.2        ad 
    179   1.2        ad 	/*
    180   1.2        ad 	 * No turnstile yet for this lock.  No problem, turnstile_block()
    181   1.2        ad 	 * handles this by fetching the turnstile from the blocking thread.
    182   1.2        ad 	 */
    183   1.2        ad 	return (NULL);
    184   1.2        ad }
    185   1.2        ad 
    186   1.2        ad /*
    187   1.2        ad  * turnstile_exit:
    188   1.2        ad  *
    189   1.2        ad  *	Abort a turnstile operation.
    190   1.2        ad  */
    191   1.2        ad void
    192   1.2        ad turnstile_exit(wchan_t obj)
    193   1.2        ad {
    194   1.2        ad 
    195  1.35        ad 	mutex_spin_exit(&turnstile_locks[TS_HASH(obj)].lock);
    196   1.2        ad }
    197   1.2        ad 
    198   1.2        ad /*
    199  1.31      yamt  * turnstile_lendpri:
    200  1.31      yamt  *
    201  1.31      yamt  *	Lend our priority to lwps on the blocking chain.
    202  1.31      yamt  *
    203  1.32      yamt  *	If the current owner of the lock (l->l_wchan, set by sleepq_enqueue)
    204  1.32      yamt  *	has a priority lower than ours (lwp_eprio(l)), lend our priority to
    205  1.32      yamt  *	him to avoid priority inversions.
    206   1.2        ad  */
    207  1.31      yamt 
    208  1.31      yamt static void
    209  1.31      yamt turnstile_lendpri(lwp_t *cur)
    210   1.2        ad {
    211  1.31      yamt 	lwp_t * l = cur;
    212  1.31      yamt 	pri_t prio;
    213  1.19        ad 
    214  1.19        ad 	/*
    215  1.22        ad 	 * NOTE: if you get a panic in this code block, it is likely that
    216  1.22        ad 	 * a lock has been destroyed or corrupted while still in use.  Try
    217  1.22        ad 	 * compiling a kernel with LOCKDEBUG to pinpoint the problem.
    218   1.4      yamt 	 */
    219  1.31      yamt 
    220  1.31      yamt 	LOCKDEBUG_BARRIER(l->l_mutex, 1);
    221  1.31      yamt 	KASSERT(l == curlwp);
    222  1.11        ad 	prio = lwp_eprio(l);
    223   1.4      yamt 	for (;;) {
    224  1.31      yamt 		lwp_t *owner;
    225  1.31      yamt 		turnstile_t *ts;
    226   1.4      yamt 		bool dolock;
    227   1.4      yamt 
    228   1.4      yamt 		if (l->l_wchan == NULL)
    229   1.4      yamt 			break;
    230   1.4      yamt 
    231  1.32      yamt 		/*
    232  1.32      yamt 		 * Ask syncobj the owner of the lock.
    233  1.32      yamt 		 */
    234   1.4      yamt 		owner = (*l->l_syncobj->sobj_owner)(l->l_wchan);
    235   1.4      yamt 		if (owner == NULL)
    236   1.4      yamt 			break;
    237   1.4      yamt 
    238  1.32      yamt 		/*
    239  1.32      yamt 		 * The owner may have changed as we have dropped the tc lock.
    240  1.32      yamt 		 */
    241  1.25    bouyer 		if (cur == owner) {
    242  1.25    bouyer 			/*
    243  1.32      yamt 			 * We own the lock: stop here, sleepq_block()
    244  1.41    andvar 			 * should wake up immediately.
    245  1.25    bouyer 			 */
    246  1.25    bouyer 			break;
    247  1.25    bouyer 		}
    248  1.32      yamt 		/*
    249  1.32      yamt 		 * Acquire owner->l_mutex if we don't have it yet.
    250  1.32      yamt 		 * Because we already have another LWP lock (l->l_mutex) held,
    251  1.32      yamt 		 * we need to play a try lock dance to avoid deadlock.
    252  1.32      yamt 		 */
    253  1.42  riastrad 		dolock = l->l_mutex != atomic_load_relaxed(&owner->l_mutex);
    254  1.28      yamt 		if (l == owner || (dolock && !lwp_trylock(owner))) {
    255   1.4      yamt 			/*
    256  1.32      yamt 			 * The owner was changed behind us or trylock failed.
    257  1.32      yamt 			 * Restart from curlwp.
    258  1.32      yamt 			 *
    259  1.25    bouyer 			 * Note that there may be a livelock here:
    260  1.43    andvar 			 * the owner may try grabbing cur's lock (which is the
    261  1.32      yamt 			 * tc lock) while we're trying to grab the owner's lock.
    262   1.4      yamt 			 */
    263   1.4      yamt 			lwp_unlock(l);
    264   1.4      yamt 			l = cur;
    265   1.4      yamt 			lwp_lock(l);
    266   1.4      yamt 			prio = lwp_eprio(l);
    267   1.4      yamt 			continue;
    268   1.4      yamt 		}
    269  1.32      yamt 		/*
    270  1.32      yamt 		 * If the owner's priority is already higher than ours,
    271  1.32      yamt 		 * there's nothing to do anymore.
    272  1.32      yamt 		 */
    273  1.11        ad 		if (prio <= lwp_eprio(owner)) {
    274   1.4      yamt 			if (dolock)
    275   1.4      yamt 				lwp_unlock(owner);
    276   1.4      yamt 			break;
    277   1.4      yamt 		}
    278  1.32      yamt 		/*
    279  1.32      yamt 		 * Lend our priority to the 'owner' LWP.
    280  1.32      yamt 		 *
    281  1.32      yamt 		 * Update lenders info for turnstile_unlendpri.
    282  1.32      yamt 		 */
    283   1.4      yamt 		ts = l->l_ts;
    284   1.4      yamt 		KASSERT(ts->ts_inheritor == owner || ts->ts_inheritor == NULL);
    285   1.4      yamt 		if (ts->ts_inheritor == NULL) {
    286   1.4      yamt 			ts->ts_inheritor = owner;
    287   1.4      yamt 			ts->ts_eprio = prio;
    288   1.4      yamt 			SLIST_INSERT_HEAD(&owner->l_pi_lenders, ts, ts_pichain);
    289   1.4      yamt 			lwp_lendpri(owner, prio);
    290  1.11        ad 		} else if (prio > ts->ts_eprio) {
    291   1.4      yamt 			ts->ts_eprio = prio;
    292   1.4      yamt 			lwp_lendpri(owner, prio);
    293   1.4      yamt 		}
    294   1.4      yamt 		if (dolock)
    295   1.4      yamt 			lwp_unlock(l);
    296  1.32      yamt 		LOCKDEBUG_BARRIER(owner->l_mutex, 1);
    297   1.4      yamt 		l = owner;
    298   1.4      yamt 	}
    299   1.4      yamt 	LOCKDEBUG_BARRIER(l->l_mutex, 1);
    300  1.42  riastrad 	if (cur->l_mutex != atomic_load_relaxed(&l->l_mutex)) {
    301   1.4      yamt 		lwp_unlock(l);
    302   1.4      yamt 		lwp_lock(cur);
    303   1.4      yamt 	}
    304   1.4      yamt 	LOCKDEBUG_BARRIER(cur->l_mutex, 1);
    305  1.31      yamt }
    306  1.31      yamt 
    307  1.31      yamt /*
    308  1.31      yamt  * turnstile_unlendpri: undo turnstile_lendpri
    309  1.31      yamt  */
    310  1.31      yamt 
    311  1.31      yamt static void
    312  1.31      yamt turnstile_unlendpri(turnstile_t *ts)
    313  1.31      yamt {
    314  1.31      yamt 	lwp_t * const l = curlwp;
    315  1.31      yamt 	turnstile_t *iter;
    316  1.31      yamt 	turnstile_t *next;
    317  1.31      yamt 	turnstile_t *prev = NULL;
    318  1.31      yamt 	pri_t prio;
    319  1.31      yamt 	bool dolock;
    320  1.31      yamt 
    321  1.31      yamt 	KASSERT(ts->ts_inheritor != NULL);
    322  1.31      yamt 	ts->ts_inheritor = NULL;
    323  1.42  riastrad 	dolock = (atomic_load_relaxed(&l->l_mutex) ==
    324  1.42  riastrad 	    l->l_cpu->ci_schedstate.spc_lwplock);
    325  1.31      yamt 	if (dolock) {
    326  1.31      yamt 		lwp_lock(l);
    327  1.31      yamt 	}
    328  1.31      yamt 
    329  1.31      yamt 	/*
    330  1.31      yamt 	 * the following loop does two things.
    331  1.31      yamt 	 *
    332  1.31      yamt 	 * - remove ts from the list.
    333  1.31      yamt 	 *
    334  1.31      yamt 	 * - from the rest of the list, find the highest priority.
    335  1.31      yamt 	 */
    336  1.31      yamt 
    337  1.31      yamt 	prio = -1;
    338  1.31      yamt 	KASSERT(!SLIST_EMPTY(&l->l_pi_lenders));
    339  1.31      yamt 	for (iter = SLIST_FIRST(&l->l_pi_lenders);
    340  1.31      yamt 	    iter != NULL; iter = next) {
    341  1.31      yamt 		KASSERT(lwp_eprio(l) >= ts->ts_eprio);
    342  1.31      yamt 		next = SLIST_NEXT(iter, ts_pichain);
    343  1.31      yamt 		if (iter == ts) {
    344  1.31      yamt 			if (prev == NULL) {
    345  1.31      yamt 				SLIST_REMOVE_HEAD(&l->l_pi_lenders,
    346  1.31      yamt 				    ts_pichain);
    347  1.31      yamt 			} else {
    348  1.31      yamt 				SLIST_REMOVE_AFTER(prev, ts_pichain);
    349  1.31      yamt 			}
    350  1.31      yamt 		} else if (prio < iter->ts_eprio) {
    351  1.31      yamt 			prio = iter->ts_eprio;
    352  1.31      yamt 		}
    353  1.31      yamt 		prev = iter;
    354  1.31      yamt 	}
    355  1.31      yamt 
    356  1.31      yamt 	lwp_lendpri(l, prio);
    357   1.4      yamt 
    358  1.31      yamt 	if (dolock) {
    359  1.31      yamt 		lwp_unlock(l);
    360  1.31      yamt 	}
    361  1.31      yamt }
    362  1.31      yamt 
    363  1.31      yamt /*
    364  1.31      yamt  * turnstile_block:
    365  1.31      yamt  *
    366  1.31      yamt  *	 Enter an object into the turnstile chain and prepare the current
    367  1.31      yamt  *	 LWP for sleep.
    368  1.31      yamt  */
    369  1.31      yamt void
    370  1.31      yamt turnstile_block(turnstile_t *ts, int q, wchan_t obj, syncobj_t *sobj)
    371  1.31      yamt {
    372  1.31      yamt 	lwp_t * const l = curlwp; /* cached curlwp */
    373  1.31      yamt 	turnstile_t *ots;
    374  1.31      yamt 	tschain_t *tc;
    375  1.33        ad 	kmutex_t *lock;
    376  1.31      yamt 	sleepq_t *sq;
    377  1.33        ad 	u_int hash;
    378  1.51        ad 	int nlocks;
    379  1.31      yamt 
    380  1.33        ad 	hash = TS_HASH(obj);
    381  1.33        ad 	tc = &turnstile_chains[hash];
    382  1.35        ad 	lock = &turnstile_locks[hash].lock;
    383  1.31      yamt 
    384  1.31      yamt 	KASSERT(q == TS_READER_Q || q == TS_WRITER_Q);
    385  1.33        ad 	KASSERT(mutex_owned(lock));
    386  1.46  riastrad 	KASSERT(l != NULL);
    387  1.46  riastrad 	KASSERT(l->l_ts != NULL);
    388  1.31      yamt 
    389  1.31      yamt 	if (ts == NULL) {
    390  1.31      yamt 		/*
    391  1.31      yamt 		 * We are the first thread to wait for this object;
    392  1.31      yamt 		 * lend our turnstile to it.
    393  1.31      yamt 		 */
    394  1.31      yamt 		ts = l->l_ts;
    395  1.31      yamt 		KASSERT(TS_ALL_WAITERS(ts) == 0);
    396  1.46  riastrad 		KASSERT(LIST_EMPTY(&ts->ts_sleepq[TS_READER_Q]));
    397  1.46  riastrad 		KASSERT(LIST_EMPTY(&ts->ts_sleepq[TS_WRITER_Q]));
    398  1.31      yamt 		ts->ts_obj = obj;
    399  1.31      yamt 		ts->ts_inheritor = NULL;
    400  1.33        ad 		LIST_INSERT_HEAD(tc, ts, ts_chain);
    401  1.31      yamt 	} else {
    402  1.31      yamt 		/*
    403  1.31      yamt 		 * Object already has a turnstile.  Put our turnstile
    404  1.31      yamt 		 * onto the free list, and reference the existing
    405  1.31      yamt 		 * turnstile instead.
    406  1.31      yamt 		 */
    407  1.31      yamt 		ots = l->l_ts;
    408  1.31      yamt 		KASSERT(ots->ts_free == NULL);
    409  1.31      yamt 		ots->ts_free = ts->ts_free;
    410  1.31      yamt 		ts->ts_free = ots;
    411  1.31      yamt 		l->l_ts = ts;
    412  1.31      yamt 
    413  1.31      yamt 		KASSERT(ts->ts_obj == obj);
    414  1.31      yamt 		KASSERT(TS_ALL_WAITERS(ts) != 0);
    415  1.37        ad 		KASSERT(!LIST_EMPTY(&ts->ts_sleepq[TS_READER_Q]) ||
    416  1.37        ad 			!LIST_EMPTY(&ts->ts_sleepq[TS_WRITER_Q]));
    417  1.31      yamt 	}
    418  1.31      yamt 
    419  1.31      yamt 	sq = &ts->ts_sleepq[q];
    420  1.31      yamt 	ts->ts_waiters[q]++;
    421  1.51        ad 	nlocks = sleepq_enter(sq, l, lock);
    422  1.33        ad 	LOCKDEBUG_BARRIER(lock, 1);
    423  1.52        ad 	sleepq_enqueue(sq, obj, sobj->sobj_name, sobj, false);
    424  1.31      yamt 
    425  1.31      yamt 	/*
    426  1.31      yamt 	 * Disable preemption across this entire block, as we may drop
    427  1.31      yamt 	 * scheduler locks (allowing preemption), and would prefer not
    428  1.31      yamt 	 * to be interrupted while in a state of flux.
    429  1.31      yamt 	 */
    430  1.31      yamt 	KPREEMPT_DISABLE(l);
    431  1.33        ad 	KASSERT(lock == l->l_mutex);
    432  1.31      yamt 	turnstile_lendpri(l);
    433  1.51        ad 	sleepq_block(0, false, sobj, nlocks);
    434  1.31      yamt 	KPREEMPT_ENABLE(l);
    435   1.2        ad }
    436   1.2        ad 
    437   1.2        ad /*
    438   1.2        ad  * turnstile_wakeup:
    439   1.2        ad  *
    440   1.2        ad  *	Wake up the specified number of threads that are blocked
    441   1.2        ad  *	in a turnstile.
    442   1.2        ad  */
    443   1.2        ad void
    444  1.10        ad turnstile_wakeup(turnstile_t *ts, int q, int count, lwp_t *nl)
    445   1.2        ad {
    446   1.2        ad 	sleepq_t *sq;
    447  1.33        ad 	kmutex_t *lock;
    448  1.33        ad 	u_int hash;
    449  1.10        ad 	lwp_t *l;
    450   1.2        ad 
    451  1.33        ad 	hash = TS_HASH(ts->ts_obj);
    452  1.35        ad 	lock = &turnstile_locks[hash].lock;
    453   1.2        ad 	sq = &ts->ts_sleepq[q];
    454   1.2        ad 
    455   1.2        ad 	KASSERT(q == TS_READER_Q || q == TS_WRITER_Q);
    456  1.46  riastrad 	KASSERT(count > 0);
    457  1.46  riastrad 	KASSERT(count <= TS_WAITERS(ts, q));
    458  1.33        ad 	KASSERT(mutex_owned(lock));
    459   1.4      yamt 	KASSERT(ts->ts_inheritor == curlwp || ts->ts_inheritor == NULL);
    460   1.4      yamt 
    461   1.4      yamt 	/*
    462   1.4      yamt 	 * restore inherited priority if necessary.
    463   1.4      yamt 	 */
    464   1.4      yamt 
    465   1.4      yamt 	if (ts->ts_inheritor != NULL) {
    466  1.31      yamt 		turnstile_unlendpri(ts);
    467   1.4      yamt 	}
    468   1.2        ad 
    469   1.2        ad 	if (nl != NULL) {
    470   1.2        ad #if defined(DEBUG) || defined(LOCKDEBUG)
    471  1.37        ad 		LIST_FOREACH(l, sq, l_sleepchain) {
    472   1.2        ad 			if (l == nl)
    473   1.2        ad 				break;
    474   1.2        ad 		}
    475   1.2        ad 		if (l == NULL)
    476   1.2        ad 			panic("turnstile_wakeup: nl not on sleepq");
    477   1.2        ad #endif
    478  1.21        ad 		turnstile_remove(ts, nl, q);
    479   1.2        ad 	} else {
    480   1.2        ad 		while (count-- > 0) {
    481  1.37        ad 			l = LIST_FIRST(sq);
    482   1.2        ad 			KASSERT(l != NULL);
    483  1.21        ad 			turnstile_remove(ts, l, q);
    484   1.2        ad 		}
    485   1.2        ad 	}
    486  1.33        ad 	mutex_spin_exit(lock);
    487   1.2        ad }
    488   1.2        ad 
    489   1.2        ad /*
    490   1.2        ad  * turnstile_unsleep:
    491   1.2        ad  *
    492   1.2        ad  *	Remove an LWP from the turnstile.  This is called when the LWP has
    493   1.2        ad  *	not been awoken normally but instead interrupted: for example, if it
    494   1.2        ad  *	has received a signal.  It's not a valid action for turnstiles,
    495   1.2        ad  *	since LWPs blocking on a turnstile are not interruptable.
    496   1.2        ad  */
    497  1.26     rmind void
    498  1.16        ad turnstile_unsleep(lwp_t *l, bool cleanup)
    499   1.2        ad {
    500   1.2        ad 
    501   1.2        ad 	lwp_unlock(l);
    502   1.2        ad 	panic("turnstile_unsleep");
    503   1.2        ad }
    504   1.2        ad 
    505   1.2        ad /*
    506   1.2        ad  * turnstile_changepri:
    507   1.2        ad  *
    508   1.4      yamt  *	Adjust the priority of an LWP residing on a turnstile.
    509   1.2        ad  */
    510   1.2        ad void
    511  1.10        ad turnstile_changepri(lwp_t *l, pri_t pri)
    512   1.2        ad {
    513   1.2        ad 
    514   1.4      yamt 	/* XXX priority inheritance */
    515   1.4      yamt 	sleepq_changepri(l, pri);
    516   1.2        ad }
    517   1.2        ad 
    518   1.2        ad #if defined(LOCKDEBUG)
    519   1.2        ad /*
    520   1.2        ad  * turnstile_print:
    521   1.2        ad  *
    522   1.2        ad  *	Given the address of a lock object, print the contents of a
    523   1.2        ad  *	turnstile.
    524   1.2        ad  */
    525   1.2        ad void
    526   1.2        ad turnstile_print(volatile void *obj, void (*pr)(const char *, ...))
    527   1.2        ad {
    528   1.2        ad 	turnstile_t *ts;
    529   1.2        ad 	tschain_t *tc;
    530   1.2        ad 	sleepq_t *rsq, *wsq;
    531  1.33        ad 	u_int hash;
    532  1.10        ad 	lwp_t *l;
    533   1.2        ad 
    534  1.33        ad 	hash = TS_HASH(obj);
    535  1.33        ad 	tc = &turnstile_chains[hash];
    536   1.2        ad 
    537  1.33        ad 	LIST_FOREACH(ts, tc, ts_chain)
    538   1.2        ad 		if (ts->ts_obj == obj)
    539   1.2        ad 			break;
    540   1.2        ad 
    541   1.2        ad 	if (ts == NULL) {
    542  1.36        ad 		(*pr)("Turnstile: no active turnstile for this lock.\n");
    543   1.2        ad 		return;
    544   1.2        ad 	}
    545   1.2        ad 
    546   1.2        ad 	rsq = &ts->ts_sleepq[TS_READER_Q];
    547   1.2        ad 	wsq = &ts->ts_sleepq[TS_WRITER_Q];
    548   1.2        ad 
    549  1.36        ad 	(*pr)("Turnstile:\n");
    550  1.21        ad 	(*pr)("=> %d waiting readers:", TS_WAITERS(ts, TS_READER_Q));
    551  1.38        ad 	LIST_FOREACH(l, rsq, l_sleepchain) {
    552   1.2        ad 		(*pr)(" %p", l);
    553   1.2        ad 	}
    554   1.2        ad 	(*pr)("\n");
    555   1.2        ad 
    556  1.21        ad 	(*pr)("=> %d waiting writers:", TS_WAITERS(ts, TS_WRITER_Q));
    557  1.38        ad 	LIST_FOREACH(l, wsq, l_sleepchain) {
    558   1.2        ad 		(*pr)(" %p", l);
    559   1.2        ad 	}
    560   1.2        ad 	(*pr)("\n");
    561   1.2        ad }
    562   1.2        ad #endif	/* LOCKDEBUG */
    563