Home | History | Annotate | Line # | Download | only in kern
kern_sleepq.c revision 1.86
      1 /*	$NetBSD: kern_sleepq.c,v 1.86 2023/10/15 10:29:02 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2006, 2007, 2008, 2009, 2019, 2020, 2023
      5  *     The NetBSD Foundation, Inc.
      6  * All rights reserved.
      7  *
      8  * This code is derived from software contributed to The NetBSD Foundation
      9  * by Andrew Doran.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 /*
     34  * Sleep queue implementation, used by turnstiles and general sleep/wakeup
     35  * interfaces.
     36  */
     37 
     38 #include <sys/cdefs.h>
     39 __KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.86 2023/10/15 10:29:02 riastradh Exp $");
     40 
     41 #include <sys/param.h>
     42 
     43 #include <sys/cpu.h>
     44 #include <sys/intr.h>
     45 #include <sys/kernel.h>
     46 #include <sys/ktrace.h>
     47 #include <sys/pool.h>
     48 #include <sys/proc.h>
     49 #include <sys/resourcevar.h>
     50 #include <sys/sched.h>
     51 #include <sys/sleepq.h>
     52 #include <sys/syncobj.h>
     53 #include <sys/systm.h>
     54 
     55 /*
     56  * for sleepq_abort:
     57  * During autoconfiguration or after a panic, a sleep will simply lower the
     58  * priority briefly to allow interrupts, then return.  The priority to be
     59  * used (IPL_SAFEPRI) is machine-dependent, thus this value is initialized and
     60  * maintained in the machine-dependent layers.  This priority will typically
     61  * be 0, or the lowest priority that is safe for use on the interrupt stack;
     62  * it can be made higher to block network software interrupts after panics.
     63  */
     64 #ifndef	IPL_SAFEPRI
     65 #define	IPL_SAFEPRI	0
     66 #endif
     67 
     68 static int	sleepq_sigtoerror(lwp_t *, int);
     69 
     70 /* General purpose sleep table, used by mtsleep() and condition variables. */
     71 sleeptab_t	sleeptab __cacheline_aligned;
     72 sleepqlock_t	sleepq_locks[SLEEPTAB_HASH_SIZE] __cacheline_aligned;
     73 
     74 /*
     75  * sleeptab_init:
     76  *
     77  *	Initialize a sleep table.
     78  */
     79 void
     80 sleeptab_init(sleeptab_t *st)
     81 {
     82 	static bool again;
     83 	int i;
     84 
     85 	for (i = 0; i < SLEEPTAB_HASH_SIZE; i++) {
     86 		if (!again) {
     87 			mutex_init(&sleepq_locks[i].lock, MUTEX_DEFAULT,
     88 			    IPL_SCHED);
     89 		}
     90 		sleepq_init(&st->st_queue[i]);
     91 	}
     92 	again = true;
     93 }
     94 
     95 /*
     96  * sleepq_init:
     97  *
     98  *	Prepare a sleep queue for use.
     99  */
    100 void
    101 sleepq_init(sleepq_t *sq)
    102 {
    103 
    104 	LIST_INIT(sq);
    105 }
    106 
    107 /*
    108  * sleepq_remove:
    109  *
    110  *	Remove an LWP from a sleep queue and wake it up.  Distinguish
    111  *	between deliberate wakeups (which are a valuable information) and
    112  *	"unsleep" (an out-of-band action must be taken).
    113  *
    114  *	For wakeup, convert any interruptable wait into non-interruptable
    115  *	one before waking the LWP.  Otherwise, if only one LWP is awoken it
    116  *	could fail to do something useful with the wakeup due to an error
    117  *	return and the caller of e.g. cv_signal() may not expect this.
    118  */
    119 void
    120 sleepq_remove(sleepq_t *sq, lwp_t *l, bool wakeup)
    121 {
    122 	struct schedstate_percpu *spc;
    123 	struct cpu_info *ci;
    124 
    125 	KASSERT(lwp_locked(l, NULL));
    126 
    127 	if ((l->l_syncobj->sobj_flag & SOBJ_SLEEPQ_NULL) == 0) {
    128 		KASSERT(sq != NULL);
    129 		LIST_REMOVE(l, l_sleepchain);
    130 	} else {
    131 		KASSERT(sq == NULL);
    132 	}
    133 
    134 	l->l_syncobj = &sched_syncobj;
    135 	l->l_wchan = NULL;
    136 	l->l_sleepq = NULL;
    137 	l->l_flag &= wakeup ? ~(LW_SINTR|LW_CATCHINTR|LW_STIMO) : ~LW_SINTR;
    138 
    139 	ci = l->l_cpu;
    140 	spc = &ci->ci_schedstate;
    141 
    142 	/*
    143 	 * If not sleeping, the LWP must have been suspended.  Let whoever
    144 	 * holds it stopped set it running again.
    145 	 */
    146 	if (l->l_stat != LSSLEEP) {
    147 		KASSERT(l->l_stat == LSSTOP || l->l_stat == LSSUSPENDED);
    148 		lwp_setlock(l, spc->spc_lwplock);
    149 		return;
    150 	}
    151 
    152 	/*
    153 	 * If the LWP is still on the CPU, mark it as LSONPROC.  It may be
    154 	 * about to call mi_switch(), in which case it will yield.
    155 	 */
    156 	if ((l->l_pflag & LP_RUNNING) != 0) {
    157 		l->l_stat = LSONPROC;
    158 		l->l_slptime = 0;
    159 		lwp_setlock(l, spc->spc_lwplock);
    160 		return;
    161 	}
    162 
    163 	/* Update sleep time delta, call the wake-up handler of scheduler */
    164 	l->l_slpticksum += (getticks() - l->l_slpticks);
    165 	sched_wakeup(l);
    166 
    167 	/* Look for a CPU to wake up */
    168 	l->l_cpu = sched_takecpu(l);
    169 	ci = l->l_cpu;
    170 	spc = &ci->ci_schedstate;
    171 
    172 	/*
    173 	 * Set it running.
    174 	 */
    175 	spc_lock(ci);
    176 	lwp_setlock(l, spc->spc_mutex);
    177 	sched_setrunnable(l);
    178 	l->l_stat = LSRUN;
    179 	l->l_slptime = 0;
    180 	sched_enqueue(l);
    181 	sched_resched_lwp(l, true);
    182 	/* LWP & SPC now unlocked, but we still hold sleep queue lock. */
    183 }
    184 
    185 /*
    186  * sleepq_insert:
    187  *
    188  *	Insert an LWP into the sleep queue, optionally sorting by priority.
    189  */
    190 static void
    191 sleepq_insert(sleepq_t *sq, lwp_t *l, syncobj_t *sobj)
    192 {
    193 
    194 	if ((sobj->sobj_flag & SOBJ_SLEEPQ_NULL) != 0) {
    195 		KASSERT(sq == NULL);
    196 		return;
    197 	}
    198 	KASSERT(sq != NULL);
    199 
    200 	if ((sobj->sobj_flag & SOBJ_SLEEPQ_SORTED) != 0) {
    201 		lwp_t *l2, *l_last = NULL;
    202 		const pri_t pri = lwp_eprio(l);
    203 
    204 		LIST_FOREACH(l2, sq, l_sleepchain) {
    205 			l_last = l2;
    206 			if (lwp_eprio(l2) < pri) {
    207 				LIST_INSERT_BEFORE(l2, l, l_sleepchain);
    208 				return;
    209 			}
    210 		}
    211 		/*
    212 		 * Ensure FIFO ordering if no waiters are of lower priority.
    213 		 */
    214 		if (l_last != NULL) {
    215 			LIST_INSERT_AFTER(l_last, l, l_sleepchain);
    216 			return;
    217 		}
    218 	}
    219 
    220 	LIST_INSERT_HEAD(sq, l, l_sleepchain);
    221 }
    222 
    223 /*
    224  * sleepq_enter:
    225  *
    226  *	Prepare to block on a sleep queue, after which any interlock can be
    227  *	safely released.
    228  */
    229 int
    230 sleepq_enter(sleepq_t *sq, lwp_t *l, kmutex_t *mp)
    231 {
    232 	int nlocks;
    233 
    234 	KASSERT((sq != NULL) == (mp != NULL));
    235 
    236 	/*
    237 	 * Acquire the per-LWP mutex and lend it our sleep queue lock.
    238 	 * Once interlocked, we can release the kernel lock.
    239 	 */
    240 	lwp_lock(l);
    241 	if (mp != NULL) {
    242 		lwp_unlock_to(l, mp);
    243 	}
    244 	if (__predict_false((nlocks = l->l_blcnt) != 0)) {
    245 		KERNEL_UNLOCK_ALL(NULL, NULL);
    246 	}
    247 	return nlocks;
    248 }
    249 
    250 /*
    251  * sleepq_enqueue:
    252  *
    253  *	Enter an LWP into the sleep queue and prepare for sleep.  The sleep
    254  *	queue must already be locked, and any interlock (such as the kernel
    255  *	lock) must have be released (see sleeptab_lookup(), sleepq_enter()).
    256  */
    257 void
    258 sleepq_enqueue(sleepq_t *sq, wchan_t wchan, const char *wmesg, syncobj_t *sobj,
    259     bool catch_p)
    260 {
    261 	lwp_t *l = curlwp;
    262 
    263 	KASSERT(lwp_locked(l, NULL));
    264 	KASSERT(l->l_stat == LSONPROC);
    265 	KASSERT(l->l_wchan == NULL);
    266 	KASSERT(l->l_sleepq == NULL);
    267 	KASSERT((l->l_flag & LW_SINTR) == 0);
    268 
    269 	l->l_syncobj = sobj;
    270 	l->l_wchan = wchan;
    271 	l->l_sleepq = sq;
    272 	l->l_wmesg = wmesg;
    273 	l->l_slptime = 0;
    274 	l->l_stat = LSSLEEP;
    275 	if (catch_p)
    276 		l->l_flag |= LW_SINTR;
    277 
    278 	sleepq_insert(sq, l, sobj);
    279 
    280 	/* Save the time when thread has slept */
    281 	l->l_slpticks = getticks();
    282 	sched_slept(l);
    283 }
    284 
    285 /*
    286  * sleepq_transfer:
    287  *
    288  *	Move an LWP from one sleep queue to another.  Both sleep queues
    289  *	must already be locked.
    290  *
    291  *	The LWP will be updated with the new sleepq, wchan, wmesg,
    292  *	sobj, and mutex.  The interruptible flag will also be updated.
    293  */
    294 void
    295 sleepq_transfer(lwp_t *l, sleepq_t *from_sq, sleepq_t *sq, wchan_t wchan,
    296     const char *wmesg, syncobj_t *sobj, kmutex_t *mp, bool catch_p)
    297 {
    298 
    299 	KASSERT(l->l_sleepq == from_sq);
    300 
    301 	LIST_REMOVE(l, l_sleepchain);
    302 	l->l_syncobj = sobj;
    303 	l->l_wchan = wchan;
    304 	l->l_sleepq = sq;
    305 	l->l_wmesg = wmesg;
    306 
    307 	if (catch_p)
    308 		l->l_flag = LW_SINTR | LW_CATCHINTR;
    309 	else
    310 		l->l_flag = ~(LW_SINTR | LW_CATCHINTR);
    311 
    312 	/*
    313 	 * This allows the transfer from one sleepq to another where
    314 	 * it is known that they're both protected by the same lock.
    315 	 */
    316 	if (mp != NULL)
    317 		lwp_setlock(l, mp);
    318 
    319 	sleepq_insert(sq, l, sobj);
    320 }
    321 
    322 /*
    323  * sleepq_uncatch:
    324  *
    325  *	Mark the LWP as no longer sleeping interruptibly.
    326  */
    327 void
    328 sleepq_uncatch(lwp_t *l)
    329 {
    330 
    331 	l->l_flag &= ~(LW_SINTR | LW_CATCHINTR | LW_STIMO);
    332 }
    333 
    334 /*
    335  * sleepq_block:
    336  *
    337  *	After any intermediate step such as releasing an interlock, switch.
    338  * 	sleepq_block() may return early under exceptional conditions, for
    339  * 	example if the LWP's containing process is exiting.
    340  *
    341  *	timo is a timeout in ticks.  timo = 0 specifies an infinite timeout.
    342  */
    343 int
    344 sleepq_block(int timo, bool catch_p, syncobj_t *syncobj, int nlocks)
    345 {
    346 	const int mask = LW_CANCELLED|LW_WEXIT|LW_WCORE|LW_PENDSIG|LW_RESTART;
    347 	int error = 0, sig, flag;
    348 	struct proc *p;
    349 	lwp_t *l = curlwp;
    350 	bool early = false;
    351 
    352 	ktrcsw(1, 0, syncobj);
    353 
    354 	/*
    355 	 * If sleeping interruptably, check for pending signals, exits or
    356 	 * core dump events.
    357 	 *
    358 	 * Note the usage of LW_CATCHINTR.  This expresses our intent
    359 	 * to catch or not catch sleep interruptions, which might change
    360 	 * while we are sleeping.  It is independent from LW_SINTR because
    361 	 * we don't want to leave LW_SINTR set when the LWP is not asleep.
    362 	 */
    363 	flag = l->l_flag;
    364 	if (catch_p) {
    365 		if ((flag & mask) != 0) {
    366 			if ((flag & (LW_CANCELLED|LW_WEXIT|LW_WCORE)) != 0) {
    367 				l->l_flag = flag & ~LW_CANCELLED;
    368 				error = EINTR;
    369 				early = true;
    370 			} else if ((flag & LW_PENDSIG) != 0 &&
    371 			    sigispending(l, 0))
    372 				early = true;
    373 		}
    374 		l->l_flag = (flag | LW_CATCHINTR) & ~LW_RESTART;
    375 	} else
    376 		l->l_flag = flag & ~(LW_CATCHINTR | LW_RESTART);
    377 
    378 	if (early) {
    379 		/* lwp_unsleep() will release the lock */
    380 		lwp_unsleep(l, true);
    381 	} else {
    382 		/*
    383 		 * The LWP may have already been awoken if the caller
    384 		 * dropped the sleep queue lock between sleepq_enqueue() and
    385 		 * sleepq_block().  If that happens l_stat will be LSONPROC
    386 		 * and mi_switch() will treat this as a preemption.  No need
    387 		 * to do anything special here.
    388 		 */
    389 		if (timo) {
    390 			l->l_flag &= ~LW_STIMO;
    391 			callout_schedule(&l->l_timeout_ch, timo);
    392 		}
    393 		l->l_boostpri = l->l_syncobj->sobj_boostpri;
    394 		spc_lock(l->l_cpu);
    395 		mi_switch(l);
    396 
    397 		/* The LWP and sleep queue are now unlocked. */
    398 		if (timo) {
    399 			/*
    400 			 * Even if the callout appears to have fired, we
    401 			 * need to stop it in order to synchronise with
    402 			 * other CPUs.  It's important that we do this in
    403 			 * this LWP's context, and not during wakeup, in
    404 			 * order to keep the callout & its cache lines
    405 			 * co-located on the CPU with the LWP.
    406 			 */
    407 			(void)callout_halt(&l->l_timeout_ch, NULL);
    408 			error = (l->l_flag & LW_STIMO) ? EWOULDBLOCK : 0;
    409 		}
    410 	}
    411 
    412 	/*
    413 	 * LW_CATCHINTR is only modified in this function OR when we
    414 	 * are asleep (with the sleepq locked).  We can therefore safely
    415 	 * test it unlocked here as it is guaranteed to be stable by
    416 	 * virtue of us running.
    417 	 *
    418 	 * We do not bother clearing it if set; that would require us
    419 	 * to take the LWP lock, and it doesn't seem worth the hassle
    420 	 * considering it is only meaningful here inside this function,
    421 	 * and is set to reflect intent upon entry.
    422 	 */
    423 	flag = atomic_load_relaxed(&l->l_flag);
    424 	if (__predict_false((flag & mask) != 0)) {
    425 		if ((flag & LW_CATCHINTR) == 0 || error != 0)
    426 			/* nothing */;
    427 		else if ((flag & (LW_CANCELLED | LW_WEXIT | LW_WCORE)) != 0)
    428 			error = EINTR;
    429 		else if ((flag & LW_PENDSIG) != 0) {
    430 			/*
    431 			 * Acquiring p_lock may cause us to recurse
    432 			 * through the sleep path and back into this
    433 			 * routine, but is safe because LWPs sleeping
    434 			 * on locks are non-interruptable and we will
    435 			 * not recurse again.
    436 			 */
    437 			p = l->l_proc;
    438 			mutex_enter(p->p_lock);
    439 			if (((sig = sigispending(l, 0)) != 0 &&
    440 			    (sigprop[sig] & SA_STOP) == 0) ||
    441 			    (sig = issignal(l)) != 0)
    442 				error = sleepq_sigtoerror(l, sig);
    443 			mutex_exit(p->p_lock);
    444 		} else if ((flag & LW_RESTART) != 0)
    445 			error = ERESTART;
    446 	}
    447 
    448 	ktrcsw(0, 0, syncobj);
    449 	if (__predict_false(nlocks != 0)) {
    450 		KERNEL_LOCK(nlocks, NULL);
    451 	}
    452 	return error;
    453 }
    454 
    455 /*
    456  * sleepq_wake:
    457  *
    458  *	Wake zero or more LWPs blocked on a single wait channel.
    459  */
    460 void
    461 sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected, kmutex_t *mp)
    462 {
    463 	lwp_t *l, *next;
    464 
    465 	KASSERT(mutex_owned(mp));
    466 
    467 	for (l = LIST_FIRST(sq); l != NULL; l = next) {
    468 		KASSERT(l->l_sleepq == sq);
    469 		KASSERT(l->l_mutex == mp);
    470 		next = LIST_NEXT(l, l_sleepchain);
    471 		if (l->l_wchan != wchan)
    472 			continue;
    473 		sleepq_remove(sq, l, true);
    474 		if (--expected == 0)
    475 			break;
    476 	}
    477 
    478 	mutex_spin_exit(mp);
    479 }
    480 
    481 /*
    482  * sleepq_unsleep:
    483  *
    484  *	Remove an LWP from its sleep queue and set it runnable again.
    485  *	sleepq_unsleep() is called with the LWP's mutex held, and will
    486  *	release it if "unlock" is true.
    487  */
    488 void
    489 sleepq_unsleep(lwp_t *l, bool unlock)
    490 {
    491 	sleepq_t *sq = l->l_sleepq;
    492 	kmutex_t *mp = l->l_mutex;
    493 
    494 	KASSERT(lwp_locked(l, mp));
    495 	KASSERT(l->l_wchan != NULL);
    496 
    497 	sleepq_remove(sq, l, false);
    498 	if (unlock) {
    499 		mutex_spin_exit(mp);
    500 	}
    501 }
    502 
    503 /*
    504  * sleepq_timeout:
    505  *
    506  *	Entered via the callout(9) subsystem to time out an LWP that is on a
    507  *	sleep queue.
    508  */
    509 void
    510 sleepq_timeout(void *arg)
    511 {
    512 	lwp_t *l = arg;
    513 
    514 	/*
    515 	 * Lock the LWP.  Assuming it's still on the sleep queue, its
    516 	 * current mutex will also be the sleep queue mutex.
    517 	 */
    518 	lwp_lock(l);
    519 
    520 	if (l->l_wchan == NULL || l->l_syncobj == &callout_syncobj) {
    521 		/*
    522 		 * Somebody beat us to it, or the LWP is blocked in
    523 		 * callout_halt() waiting for us to finish here.  In
    524 		 * neither case should the LWP produce EWOULDBLOCK.
    525 		 */
    526 		lwp_unlock(l);
    527 		return;
    528 	}
    529 
    530 	l->l_flag |= LW_STIMO;
    531 	lwp_unsleep(l, true);
    532 }
    533 
    534 /*
    535  * sleepq_sigtoerror:
    536  *
    537  *	Given a signal number, interpret and return an error code.
    538  */
    539 static int
    540 sleepq_sigtoerror(lwp_t *l, int sig)
    541 {
    542 	struct proc *p = l->l_proc;
    543 	int error;
    544 
    545 	KASSERT(mutex_owned(p->p_lock));
    546 
    547 	/*
    548 	 * If this sleep was canceled, don't let the syscall restart.
    549 	 */
    550 	if ((SIGACTION(p, sig).sa_flags & SA_RESTART) == 0)
    551 		error = EINTR;
    552 	else
    553 		error = ERESTART;
    554 
    555 	return error;
    556 }
    557 
    558 /*
    559  * sleepq_abort:
    560  *
    561  *	After a panic or during autoconfiguration, lower the interrupt
    562  *	priority level to give pending interrupts a chance to run, and
    563  *	then return.  Called if sleepq_dontsleep() returns non-zero, and
    564  *	always returns zero.
    565  */
    566 int
    567 sleepq_abort(kmutex_t *mtx, int unlock)
    568 {
    569 	int s;
    570 
    571 	s = splhigh();
    572 	splx(IPL_SAFEPRI);
    573 	splx(s);
    574 	if (mtx != NULL && unlock != 0)
    575 		mutex_exit(mtx);
    576 
    577 	return 0;
    578 }
    579 
    580 /*
    581  * sleepq_reinsert:
    582  *
    583  *	Move the position of the lwp in the sleep queue after a possible
    584  *	change of the lwp's effective priority.
    585  */
    586 static void
    587 sleepq_reinsert(sleepq_t *sq, lwp_t *l)
    588 {
    589 
    590 	KASSERT(l->l_sleepq == sq);
    591 	if ((l->l_syncobj->sobj_flag & SOBJ_SLEEPQ_SORTED) == 0) {
    592 		return;
    593 	}
    594 
    595 	/*
    596 	 * Don't let the sleep queue become empty, even briefly.
    597 	 * cv_signal() and cv_broadcast() inspect it without the
    598 	 * sleep queue lock held and need to see a non-empty queue
    599 	 * head if there are waiters.
    600 	 */
    601 	if (LIST_FIRST(sq) == l && LIST_NEXT(l, l_sleepchain) == NULL) {
    602 		return;
    603 	}
    604 	LIST_REMOVE(l, l_sleepchain);
    605 	sleepq_insert(sq, l, l->l_syncobj);
    606 }
    607 
    608 /*
    609  * sleepq_changepri:
    610  *
    611  *	Adjust the priority of an LWP residing on a sleepq.
    612  */
    613 void
    614 sleepq_changepri(lwp_t *l, pri_t pri)
    615 {
    616 	sleepq_t *sq = l->l_sleepq;
    617 
    618 	KASSERT(lwp_locked(l, NULL));
    619 
    620 	l->l_priority = pri;
    621 	sleepq_reinsert(sq, l);
    622 }
    623 
    624 /*
    625  * sleepq_changepri:
    626  *
    627  *	Adjust the lended priority of an LWP residing on a sleepq.
    628  */
    629 void
    630 sleepq_lendpri(lwp_t *l, pri_t pri)
    631 {
    632 	sleepq_t *sq = l->l_sleepq;
    633 
    634 	KASSERT(lwp_locked(l, NULL));
    635 
    636 	l->l_inheritedprio = pri;
    637 	l->l_auxprio = MAX(l->l_inheritedprio, l->l_protectprio);
    638 	sleepq_reinsert(sq, l);
    639 }
    640