Home | History | Annotate | Line # | Download | only in kern
kern_synch.c revision 1.174
      1 /*	$NetBSD: kern_synch.c,v 1.174 2007/02/09 21:55:31 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 /*-
     41  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     42  *	The Regents of the University of California.  All rights reserved.
     43  * (c) UNIX System Laboratories, Inc.
     44  * All or some portions of this file are derived from material licensed
     45  * to the University of California by American Telephone and Telegraph
     46  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     47  * the permission of UNIX System Laboratories, Inc.
     48  *
     49  * Redistribution and use in source and binary forms, with or without
     50  * modification, are permitted provided that the following conditions
     51  * are met:
     52  * 1. Redistributions of source code must retain the above copyright
     53  *    notice, this list of conditions and the following disclaimer.
     54  * 2. Redistributions in binary form must reproduce the above copyright
     55  *    notice, this list of conditions and the following disclaimer in the
     56  *    documentation and/or other materials provided with the distribution.
     57  * 3. Neither the name of the University nor the names of its contributors
     58  *    may be used to endorse or promote products derived from this software
     59  *    without specific prior written permission.
     60  *
     61  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     62  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     63  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     64  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     65  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     66  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     67  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     68  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     69  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     70  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     71  * SUCH DAMAGE.
     72  *
     73  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
     74  */
     75 
     76 #include <sys/cdefs.h>
     77 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.174 2007/02/09 21:55:31 ad Exp $");
     78 
     79 #include "opt_ddb.h"
     80 #include "opt_kstack.h"
     81 #include "opt_lockdebug.h"
     82 #include "opt_multiprocessor.h"
     83 #include "opt_perfctrs.h"
     84 
     85 #define	__MUTEX_PRIVATE
     86 
     87 #include <sys/param.h>
     88 #include <sys/systm.h>
     89 #include <sys/callout.h>
     90 #include <sys/proc.h>
     91 #include <sys/kernel.h>
     92 #include <sys/buf.h>
     93 #if defined(PERFCTRS)
     94 #include <sys/pmc.h>
     95 #endif
     96 #include <sys/signalvar.h>
     97 #include <sys/resourcevar.h>
     98 #include <sys/sched.h>
     99 #include <sys/kauth.h>
    100 #include <sys/sleepq.h>
    101 #include <sys/lockdebug.h>
    102 
    103 #include <uvm/uvm_extern.h>
    104 
    105 #include <machine/cpu.h>
    106 
    107 int	lbolt;			/* once a second sleep address */
    108 int	rrticks;		/* number of hardclock ticks per roundrobin() */
    109 
    110 /*
    111  * The global scheduler state.
    112  */
    113 kmutex_t	sched_mutex;		/* global sched state mutex */
    114 struct prochd	sched_qs[RUNQUE_NQS];	/* run queues */
    115 volatile uint32_t sched_whichqs;	/* bitmap of non-empty queues */
    116 
    117 void	schedcpu(void *);
    118 void	updatepri(struct lwp *);
    119 void	sa_awaken(struct lwp *);
    120 
    121 void	sched_unsleep(struct lwp *);
    122 void	sched_changepri(struct lwp *, int);
    123 
    124 struct callout schedcpu_ch = CALLOUT_INITIALIZER_SETFUNC(schedcpu, NULL);
    125 static unsigned int schedcpu_ticks;
    126 
    127 syncobj_t sleep_syncobj = {
    128 	SOBJ_SLEEPQ_SORTED,
    129 	sleepq_unsleep,
    130 	sleepq_changepri
    131 };
    132 
    133 syncobj_t sched_syncobj = {
    134 	SOBJ_SLEEPQ_SORTED,
    135 	sched_unsleep,
    136 	sched_changepri
    137 };
    138 
    139 /*
    140  * Force switch among equal priority processes every 100ms.
    141  * Called from hardclock every hz/10 == rrticks hardclock ticks.
    142  */
    143 /* ARGSUSED */
    144 void
    145 roundrobin(struct cpu_info *ci)
    146 {
    147 	struct schedstate_percpu *spc = &ci->ci_schedstate;
    148 
    149 	spc->spc_rrticks = rrticks;
    150 
    151 	if (curlwp != NULL) {
    152 		if (spc->spc_flags & SPCF_SEENRR) {
    153 			/*
    154 			 * The process has already been through a roundrobin
    155 			 * without switching and may be hogging the CPU.
    156 			 * Indicate that the process should yield.
    157 			 */
    158 			spc->spc_flags |= SPCF_SHOULDYIELD;
    159 		} else
    160 			spc->spc_flags |= SPCF_SEENRR;
    161 	}
    162 	cpu_need_resched(curcpu());
    163 }
    164 
    165 #define	PPQ	(128 / RUNQUE_NQS)	/* priorities per queue */
    166 #define	NICE_WEIGHT 2			/* priorities per nice level */
    167 
    168 #define	ESTCPU_SHIFT	11
    169 #define	ESTCPU_MAX	((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
    170 #define	ESTCPULIM(e)	min((e), ESTCPU_MAX)
    171 
    172 /*
    173  * Constants for digital decay and forget:
    174  *	90% of (p_estcpu) usage in 5 * loadav time
    175  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
    176  *          Note that, as ps(1) mentions, this can let percentages
    177  *          total over 100% (I've seen 137.9% for 3 processes).
    178  *
    179  * Note that hardclock updates p_estcpu and p_cpticks independently.
    180  *
    181  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
    182  * That is, the system wants to compute a value of decay such
    183  * that the following for loop:
    184  * 	for (i = 0; i < (5 * loadavg); i++)
    185  * 		p_estcpu *= decay;
    186  * will compute
    187  * 	p_estcpu *= 0.1;
    188  * for all values of loadavg:
    189  *
    190  * Mathematically this loop can be expressed by saying:
    191  * 	decay ** (5 * loadavg) ~= .1
    192  *
    193  * The system computes decay as:
    194  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
    195  *
    196  * We wish to prove that the system's computation of decay
    197  * will always fulfill the equation:
    198  * 	decay ** (5 * loadavg) ~= .1
    199  *
    200  * If we compute b as:
    201  * 	b = 2 * loadavg
    202  * then
    203  * 	decay = b / (b + 1)
    204  *
    205  * We now need to prove two things:
    206  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
    207  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
    208  *
    209  * Facts:
    210  *         For x close to zero, exp(x) =~ 1 + x, since
    211  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
    212  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
    213  *         For x close to zero, ln(1+x) =~ x, since
    214  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
    215  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
    216  *         ln(.1) =~ -2.30
    217  *
    218  * Proof of (1):
    219  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
    220  *	solving for factor,
    221  *      ln(factor) =~ (-2.30/5*loadav), or
    222  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
    223  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
    224  *
    225  * Proof of (2):
    226  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
    227  *	solving for power,
    228  *      power*ln(b/(b+1)) =~ -2.30, or
    229  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
    230  *
    231  * Actual power values for the implemented algorithm are as follows:
    232  *      loadav: 1       2       3       4
    233  *      power:  5.68    10.32   14.94   19.55
    234  */
    235 
    236 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
    237 #define	loadfactor(loadav)	(2 * (loadav))
    238 
    239 static fixpt_t
    240 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
    241 {
    242 
    243 	if (estcpu == 0) {
    244 		return 0;
    245 	}
    246 
    247 #if !defined(_LP64)
    248 	/* avoid 64bit arithmetics. */
    249 #define	FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
    250 	if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
    251 		return estcpu * loadfac / (loadfac + FSCALE);
    252 	}
    253 #endif /* !defined(_LP64) */
    254 
    255 	return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
    256 }
    257 
    258 /*
    259  * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
    260  * sleeping for at least seven times the loadfactor will decay p_estcpu to
    261  * less than (1 << ESTCPU_SHIFT).
    262  *
    263  * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
    264  */
    265 static fixpt_t
    266 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
    267 {
    268 
    269 	if ((n << FSHIFT) >= 7 * loadfac) {
    270 		return 0;
    271 	}
    272 
    273 	while (estcpu != 0 && n > 1) {
    274 		estcpu = decay_cpu(loadfac, estcpu);
    275 		n--;
    276 	}
    277 
    278 	return estcpu;
    279 }
    280 
    281 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
    282 fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
    283 
    284 /*
    285  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
    286  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
    287  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
    288  *
    289  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
    290  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
    291  *
    292  * If you dont want to bother with the faster/more-accurate formula, you
    293  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
    294  * (more general) method of calculating the %age of CPU used by a process.
    295  */
    296 #define	CCPU_SHIFT	11
    297 
    298 /*
    299  * schedcpu:
    300  *
    301  *	Recompute process priorities, every hz ticks.
    302  *
    303  *	XXXSMP This needs to be reorganised in order to reduce the locking
    304  *	burden.
    305  */
    306 /* ARGSUSED */
    307 void
    308 schedcpu(void *arg)
    309 {
    310 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
    311 	struct rlimit *rlim;
    312 	struct lwp *l;
    313 	struct proc *p;
    314 	int minslp, clkhz, sig;
    315 	long runtm;
    316 
    317 	schedcpu_ticks++;
    318 
    319 	mutex_enter(&proclist_mutex);
    320 	PROCLIST_FOREACH(p, &allproc) {
    321 		/*
    322 		 * Increment time in/out of memory and sleep time (if
    323 		 * sleeping).  We ignore overflow; with 16-bit int's
    324 		 * (remember them?) overflow takes 45 days.
    325 		 */
    326 		minslp = 2;
    327 		mutex_enter(&p->p_smutex);
    328 		runtm = p->p_rtime.tv_sec;
    329 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    330 			lwp_lock(l);
    331 			runtm += l->l_rtime.tv_sec;
    332 			l->l_swtime++;
    333 			if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
    334 			    l->l_stat == LSSUSPENDED) {
    335 				l->l_slptime++;
    336 				minslp = min(minslp, l->l_slptime);
    337 			} else
    338 				minslp = 0;
    339 			lwp_unlock(l);
    340 		}
    341 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
    342 
    343 		/*
    344 		 * Check if the process exceeds its CPU resource allocation.
    345 		 * If over max, kill it.
    346 		 */
    347 		rlim = &p->p_rlimit[RLIMIT_CPU];
    348 		sig = 0;
    349 		if (runtm >= rlim->rlim_cur) {
    350 			if (runtm >= rlim->rlim_max)
    351 				sig = SIGKILL;
    352 			else {
    353 				sig = SIGXCPU;
    354 				if (rlim->rlim_cur < rlim->rlim_max)
    355 					rlim->rlim_cur += 5;
    356 			}
    357 		}
    358 
    359 		/*
    360 		 * If the process has run for more than autonicetime, reduce
    361 		 * priority to give others a chance.
    362 		 */
    363 		if (autonicetime && runtm > autonicetime && p->p_nice == NZERO
    364 		    && kauth_cred_geteuid(p->p_cred)) {
    365 			mutex_spin_enter(&p->p_stmutex);
    366 			p->p_nice = autoniceval + NZERO;
    367 			resetprocpriority(p);
    368 			mutex_spin_exit(&p->p_stmutex);
    369 		}
    370 
    371 		/*
    372 		 * If the process has slept the entire second,
    373 		 * stop recalculating its priority until it wakes up.
    374 		 */
    375 		if (minslp <= 1) {
    376 			/*
    377 			 * p_pctcpu is only for ps.
    378 			 */
    379 			mutex_spin_enter(&p->p_stmutex);
    380 			clkhz = stathz != 0 ? stathz : hz;
    381 #if	(FSHIFT >= CCPU_SHIFT)
    382 			p->p_pctcpu += (clkhz == 100)?
    383 			    ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
    384 			    100 * (((fixpt_t) p->p_cpticks)
    385 			    << (FSHIFT - CCPU_SHIFT)) / clkhz;
    386 #else
    387 			p->p_pctcpu += ((FSCALE - ccpu) *
    388 			    (p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
    389 #endif
    390 			p->p_cpticks = 0;
    391 			p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
    392 
    393 			LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    394 				lwp_lock(l);
    395 				if (l->l_slptime <= 1 &&
    396 				    l->l_priority >= PUSER)
    397 					resetpriority(l);
    398 				lwp_unlock(l);
    399 			}
    400 			mutex_spin_exit(&p->p_stmutex);
    401 		}
    402 
    403 		mutex_exit(&p->p_smutex);
    404 		if (sig) {
    405 			psignal(p, sig);
    406 		}
    407 	}
    408 	mutex_exit(&proclist_mutex);
    409 	uvm_meter();
    410 	wakeup((caddr_t)&lbolt);
    411 	callout_schedule(&schedcpu_ch, hz);
    412 }
    413 
    414 /*
    415  * Recalculate the priority of a process after it has slept for a while.
    416  */
    417 void
    418 updatepri(struct lwp *l)
    419 {
    420 	struct proc *p = l->l_proc;
    421 	fixpt_t loadfac;
    422 
    423 	LOCK_ASSERT(lwp_locked(l, NULL));
    424 	KASSERT(l->l_slptime > 1);
    425 
    426 	loadfac = loadfactor(averunnable.ldavg[0]);
    427 
    428 	l->l_slptime--; /* the first time was done in schedcpu */
    429 	/* XXX NJWLWP */
    430 	/* XXXSMP occasionally unlocked, should be per-LWP */
    431 	p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
    432 	resetpriority(l);
    433 }
    434 
    435 /*
    436  * During autoconfiguration or after a panic, a sleep will simply lower the
    437  * priority briefly to allow interrupts, then return.  The priority to be
    438  * used (safepri) is machine-dependent, thus this value is initialized and
    439  * maintained in the machine-dependent layers.  This priority will typically
    440  * be 0, or the lowest priority that is safe for use on the interrupt stack;
    441  * it can be made higher to block network software interrupts after panics.
    442  */
    443 int	safepri;
    444 
    445 /*
    446  * OBSOLETE INTERFACE
    447  *
    448  * General sleep call.  Suspends the current process until a wakeup is
    449  * performed on the specified identifier.  The process will then be made
    450  * runnable with the specified priority.  Sleeps at most timo/hz seconds (0
    451  * means no timeout).  If pri includes PCATCH flag, signals are checked
    452  * before and after sleeping, else signals are not checked.  Returns 0 if
    453  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
    454  * signal needs to be delivered, ERESTART is returned if the current system
    455  * call should be restarted if possible, and EINTR is returned if the system
    456  * call should be interrupted by the signal (return EINTR).
    457  *
    458  * The interlock is held until we are on a sleep queue. The interlock will
    459  * be locked before returning back to the caller unless the PNORELOCK flag
    460  * is specified, in which case the interlock will always be unlocked upon
    461  * return.
    462  */
    463 int
    464 ltsleep(wchan_t ident, int priority, const char *wmesg, int timo,
    465 	volatile struct simplelock *interlock)
    466 {
    467 	struct lwp *l = curlwp;
    468 	sleepq_t *sq;
    469 	int error, catch;
    470 
    471 	if (sleepq_dontsleep(l)) {
    472 		(void)sleepq_abort(NULL, 0);
    473 		if ((priority & PNORELOCK) != 0)
    474 			simple_unlock(interlock);
    475 		return 0;
    476 	}
    477 
    478 	sq = sleeptab_lookup(&sleeptab, ident);
    479 	sleepq_enter(sq, l);
    480 
    481 	if (interlock != NULL) {
    482 		LOCK_ASSERT(simple_lock_held(interlock));
    483 		simple_unlock(interlock);
    484 	}
    485 
    486 	catch = priority & PCATCH;
    487 	sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
    488 	    &sleep_syncobj);
    489 	error = sleepq_unblock(timo, catch);
    490 
    491 	if (interlock != NULL && (priority & PNORELOCK) == 0)
    492 		simple_lock(interlock);
    493 
    494 	return error;
    495 }
    496 
    497 /*
    498  * General sleep call for situations where a wake-up is not expected.
    499  */
    500 int
    501 kpause(const char *wmesg, boolean_t intr, int timo, kmutex_t *mtx)
    502 {
    503 	struct lwp *l = curlwp;
    504 	sleepq_t *sq;
    505 	int error;
    506 
    507 	if (sleepq_dontsleep(l))
    508 		return sleepq_abort(NULL, 0);
    509 
    510 	if (mtx != NULL)
    511 		mutex_exit(mtx);
    512 	sq = sleeptab_lookup(&sleeptab, l);
    513 	sleepq_enter(sq, l);
    514 	sleepq_block(sq, sched_kpri(l), l, wmesg, timo, intr, &sleep_syncobj);
    515 	error = sleepq_unblock(timo, intr);
    516 	if (mtx != NULL)
    517 		mutex_enter(mtx);
    518 
    519 	return error;
    520 }
    521 
    522 /*
    523  * OBSOLETE INTERFACE
    524  *
    525  * Make all processes sleeping on the specified identifier runnable.
    526  */
    527 void
    528 wakeup(wchan_t ident)
    529 {
    530 	sleepq_t *sq;
    531 
    532 	if (cold)
    533 		return;
    534 
    535 	sq = sleeptab_lookup(&sleeptab, ident);
    536 	sleepq_wake(sq, ident, (u_int)-1);
    537 }
    538 
    539 /*
    540  * OBSOLETE INTERFACE
    541  *
    542  * Make the highest priority process first in line on the specified
    543  * identifier runnable.
    544  */
    545 void
    546 wakeup_one(wchan_t ident)
    547 {
    548 	sleepq_t *sq;
    549 
    550 	if (cold)
    551 		return;
    552 
    553 	sq = sleeptab_lookup(&sleeptab, ident);
    554 	sleepq_wake(sq, ident, 1);
    555 }
    556 
    557 
    558 /*
    559  * General yield call.  Puts the current process back on its run queue and
    560  * performs a voluntary context switch.  Should only be called when the
    561  * current process explicitly requests it (eg sched_yield(2) in compat code).
    562  */
    563 void
    564 yield(void)
    565 {
    566 	struct lwp *l = curlwp;
    567 
    568 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
    569 	lwp_lock(l);
    570 	if (l->l_stat == LSONPROC) {
    571 		KASSERT(lwp_locked(l, &sched_mutex));
    572 		l->l_priority = l->l_usrpri;
    573 	}
    574 	l->l_nvcsw++;
    575 	mi_switch(l, NULL);
    576 	KERNEL_LOCK(l->l_biglocks, l);
    577 }
    578 
    579 /*
    580  * General preemption call.  Puts the current process back on its run queue
    581  * and performs an involuntary context switch.
    582  */
    583 void
    584 preempt(void)
    585 {
    586 	struct lwp *l = curlwp;
    587 
    588 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
    589 	lwp_lock(l);
    590 	if (l->l_stat == LSONPROC) {
    591 		KASSERT(lwp_locked(l, &sched_mutex));
    592 		l->l_priority = l->l_usrpri;
    593 	}
    594 	l->l_nivcsw++;
    595 	(void)mi_switch(l, NULL);
    596 	KERNEL_LOCK(l->l_biglocks, l);
    597 }
    598 
    599 /*
    600  * The machine independent parts of context switch.  Switch to "new"
    601  * if non-NULL, otherwise let cpu_switch choose the next lwp.
    602  *
    603  * Returns 1 if another process was actually run.
    604  */
    605 int
    606 mi_switch(struct lwp *l, struct lwp *newl)
    607 {
    608 	struct schedstate_percpu *spc;
    609 	struct timeval tv;
    610 	int retval, oldspl;
    611 	long s, u;
    612 #if PERFCTRS
    613 	struct proc *p = l->l_proc;
    614 #endif
    615 
    616 	LOCK_ASSERT(lwp_locked(l, NULL));
    617 
    618 #ifdef LOCKDEBUG
    619 	spinlock_switchcheck();
    620 	simple_lock_switchcheck();
    621 #endif
    622 #ifdef KSTACK_CHECK_MAGIC
    623 	kstack_check_magic(l);
    624 #endif
    625 
    626 	/*
    627 	 * It's safe to read the per CPU schedstate unlocked here, as all we
    628 	 * are after is the run time and that's guarenteed to have been last
    629 	 * updated by this CPU.
    630 	 */
    631 	KDASSERT(l->l_cpu == curcpu());
    632 	spc = &l->l_cpu->ci_schedstate;
    633 
    634 	/*
    635 	 * Compute the amount of time during which the current
    636 	 * process was running.
    637 	 */
    638 	microtime(&tv);
    639 	u = l->l_rtime.tv_usec +
    640 	    (tv.tv_usec - spc->spc_runtime.tv_usec);
    641 	s = l->l_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
    642 	if (u < 0) {
    643 		u += 1000000;
    644 		s--;
    645 	} else if (u >= 1000000) {
    646 		u -= 1000000;
    647 		s++;
    648 	}
    649 	l->l_rtime.tv_usec = u;
    650 	l->l_rtime.tv_sec = s;
    651 
    652 	/*
    653 	 * XXXSMP If we are using h/w performance counters, save context.
    654 	 */
    655 #if PERFCTRS
    656 	if (PMC_ENABLED(p)) {
    657 		pmc_save_context(p);
    658 	}
    659 #endif
    660 
    661 	/*
    662 	 * Acquire the sched_mutex if necessary.  It will be released by
    663 	 * cpu_switch once it has decided to idle, or picked another LWP
    664 	 * to run.
    665 	 */
    666 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    667 	if (l->l_mutex != &sched_mutex) {
    668 		mutex_spin_enter(&sched_mutex);
    669 		lwp_unlock(l);
    670 	}
    671 #endif
    672 
    673 	/*
    674 	 * If on the CPU and we have gotten this far, then we must yield.
    675 	 */
    676 	KASSERT(l->l_stat != LSRUN);
    677 	if (l->l_stat == LSONPROC) {
    678 		KASSERT(lwp_locked(l, &sched_mutex));
    679 		l->l_stat = LSRUN;
    680 		setrunqueue(l);
    681 	}
    682 	uvmexp.swtch++;
    683 
    684 	/*
    685 	 * Process is about to yield the CPU; clear the appropriate
    686 	 * scheduling flags.
    687 	 */
    688 	spc->spc_flags &= ~SPCF_SWITCHCLEAR;
    689 
    690 	LOCKDEBUG_BARRIER(&sched_mutex, 1);
    691 
    692 	/*
    693 	 * Switch to the new current LWP.  When we run again, we'll
    694 	 * return back here.
    695 	 */
    696 	oldspl = MUTEX_SPIN_OLDSPL(l->l_cpu);
    697 
    698 	if (newl == NULL || newl->l_back == NULL)
    699 		retval = cpu_switch(l, NULL);
    700 	else {
    701 		KASSERT(lwp_locked(newl, &sched_mutex));
    702 		remrunqueue(newl);
    703 		cpu_switchto(l, newl);
    704 		retval = 0;
    705 	}
    706 
    707 	/*
    708 	 * XXXSMP If we are using h/w performance counters, restore context.
    709 	 */
    710 #if PERFCTRS
    711 	if (PMC_ENABLED(p)) {
    712 		pmc_restore_context(p);
    713 	}
    714 #endif
    715 
    716 	/*
    717 	 * We're running again; record our new start time.  We might
    718 	 * be running on a new CPU now, so don't use the cached
    719 	 * schedstate_percpu pointer.
    720 	 */
    721 	KDASSERT(l->l_cpu == curcpu());
    722 	microtime(&l->l_cpu->ci_schedstate.spc_runtime);
    723 	splx(oldspl);
    724 
    725 	return retval;
    726 }
    727 
    728 /*
    729  * Initialize the (doubly-linked) run queues
    730  * to be empty.
    731  */
    732 void
    733 rqinit()
    734 {
    735 	int i;
    736 
    737 	for (i = 0; i < RUNQUE_NQS; i++)
    738 		sched_qs[i].ph_link = sched_qs[i].ph_rlink =
    739 		    (struct lwp *)&sched_qs[i];
    740 
    741 	mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
    742 }
    743 
    744 static inline void
    745 resched_lwp(struct lwp *l, u_char pri)
    746 {
    747 	struct cpu_info *ci;
    748 
    749 	/*
    750 	 * XXXSMP
    751 	 * Since l->l_cpu persists across a context switch,
    752 	 * this gives us *very weak* processor affinity, in
    753 	 * that we notify the CPU on which the process last
    754 	 * ran that it should try to switch.
    755 	 *
    756 	 * This does not guarantee that the process will run on
    757 	 * that processor next, because another processor might
    758 	 * grab it the next time it performs a context switch.
    759 	 *
    760 	 * This also does not handle the case where its last
    761 	 * CPU is running a higher-priority process, but every
    762 	 * other CPU is running a lower-priority process.  There
    763 	 * are ways to handle this situation, but they're not
    764 	 * currently very pretty, and we also need to weigh the
    765 	 * cost of moving a process from one CPU to another.
    766 	 *
    767 	 * XXXSMP
    768 	 * There is also the issue of locking the other CPU's
    769 	 * sched state, which we currently do not do.
    770 	 */
    771 	ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
    772 	if (pri < ci->ci_schedstate.spc_curpriority)
    773 		cpu_need_resched(ci);
    774 }
    775 
    776 /*
    777  * Change process state to be runnable, placing it on the run queue if it is
    778  * in memory, and awakening the swapper if it isn't in memory.
    779  *
    780  * Call with the process and LWP locked.  Will return with the LWP unlocked.
    781  */
    782 void
    783 setrunnable(struct lwp *l)
    784 {
    785 	struct proc *p = l->l_proc;
    786 	sigset_t *ss;
    787 
    788 	LOCK_ASSERT(mutex_owned(&p->p_smutex));
    789 	LOCK_ASSERT(lwp_locked(l, NULL));
    790 
    791 	switch (l->l_stat) {
    792 	case LSSTOP:
    793 		/*
    794 		 * If we're being traced (possibly because someone attached us
    795 		 * while we were stopped), check for a signal from the debugger.
    796 		 */
    797 		if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
    798 			if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
    799 				ss = &l->l_sigpend.sp_set;
    800 			else
    801 				ss = &p->p_sigpend.sp_set;
    802 			sigaddset(ss, p->p_xstat);
    803 			signotify(l);
    804 		}
    805 		p->p_nrlwps++;
    806 		break;
    807 	case LSSUSPENDED:
    808 		l->l_flag &= ~L_WSUSPEND;
    809 		p->p_nrlwps++;
    810 		break;
    811 	case LSSLEEP:
    812 		KASSERT(l->l_wchan != NULL);
    813 		break;
    814 	default:
    815 		panic("setrunnable: lwp %p state was %d", l, l->l_stat);
    816 	}
    817 
    818 	/*
    819 	 * If the LWP was sleeping interruptably, then it's OK to start it
    820 	 * again.  If not, mark it as still sleeping.
    821 	 */
    822 	if (l->l_wchan != NULL) {
    823 		l->l_stat = LSSLEEP;
    824 		if ((l->l_flag & L_SINTR) != 0)
    825 			lwp_unsleep(l);
    826 		else {
    827 			lwp_unlock(l);
    828 #ifdef DIAGNOSTIC
    829 			panic("setrunnable: !L_SINTR");
    830 #endif
    831 		}
    832 		return;
    833 	}
    834 
    835 	LOCK_ASSERT(lwp_locked(l, &sched_mutex));
    836 
    837 	/*
    838 	 * If the LWP is still on the CPU, mark it as LSONPROC.  It may be
    839 	 * about to call mi_switch(), in which case it will yield.
    840 	 *
    841 	 * XXXSMP Will need to change for preemption.
    842 	 */
    843 #ifdef MULTIPROCESSOR
    844 	if (l->l_cpu->ci_curlwp == l) {
    845 #else
    846 	if (l == curlwp) {
    847 #endif
    848 		l->l_stat = LSONPROC;
    849 		l->l_slptime = 0;
    850 		lwp_unlock(l);
    851 		return;
    852 	}
    853 
    854 	/*
    855 	 * Set the LWP runnable.  If it's swapped out, we need to wake the swapper
    856 	 * to bring it back in.  Otherwise, enter it into a run queue.
    857 	 */
    858 	if (l->l_slptime > 1)
    859 		updatepri(l);
    860 	l->l_stat = LSRUN;
    861 	l->l_slptime = 0;
    862 
    863 	if (l->l_flag & L_INMEM) {
    864 		setrunqueue(l);
    865 		resched_lwp(l, l->l_priority);
    866 		lwp_unlock(l);
    867 	} else {
    868 		lwp_unlock(l);
    869 		wakeup(&proc0);
    870 	}
    871 }
    872 
    873 /*
    874  * Compute the priority of a process when running in user mode.
    875  * Arrange to reschedule if the resulting priority is better
    876  * than that of the current process.
    877  */
    878 void
    879 resetpriority(struct lwp *l)
    880 {
    881 	unsigned int newpriority;
    882 	struct proc *p = l->l_proc;
    883 
    884 	/* XXXSMP LOCK_ASSERT(mutex_owned(&p->p_stmutex)); */
    885 	LOCK_ASSERT(lwp_locked(l, NULL));
    886 
    887 	if ((l->l_flag & L_SYSTEM) != 0)
    888 		return;
    889 
    890 	newpriority = PUSER + (p->p_estcpu >> ESTCPU_SHIFT) +
    891 	    NICE_WEIGHT * (p->p_nice - NZERO);
    892 	newpriority = min(newpriority, MAXPRI);
    893 	lwp_changepri(l, newpriority);
    894 }
    895 
    896 /*
    897  * Recompute priority for all LWPs in a process.
    898  */
    899 void
    900 resetprocpriority(struct proc *p)
    901 {
    902 	struct lwp *l;
    903 
    904 	LOCK_ASSERT(mutex_owned(&p->p_stmutex));
    905 
    906 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    907 		lwp_lock(l);
    908 		resetpriority(l);
    909 		lwp_unlock(l);
    910 	}
    911 }
    912 
    913 /*
    914  * We adjust the priority of the current process.  The priority of a process
    915  * gets worse as it accumulates CPU time.  The CPU usage estimator (p_estcpu)
    916  * is increased here.  The formula for computing priorities (in kern_synch.c)
    917  * will compute a different value each time p_estcpu increases. This can
    918  * cause a switch, but unless the priority crosses a PPQ boundary the actual
    919  * queue will not change.  The CPU usage estimator ramps up quite quickly
    920  * when the process is running (linearly), and decays away exponentially, at
    921  * a rate which is proportionally slower when the system is busy.  The basic
    922  * principle is that the system will 90% forget that the process used a lot
    923  * of CPU time in 5 * loadav seconds.  This causes the system to favor
    924  * processes which haven't run much recently, and to round-robin among other
    925  * processes.
    926  */
    927 
    928 void
    929 schedclock(struct lwp *l)
    930 {
    931 	struct proc *p = l->l_proc;
    932 
    933 	mutex_spin_enter(&p->p_stmutex);
    934 	p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
    935 	lwp_lock(l);
    936 	resetpriority(l);
    937 	mutex_spin_exit(&p->p_stmutex);
    938 	if ((l->l_flag & L_SYSTEM) == 0 && l->l_priority >= PUSER)
    939 		l->l_priority = l->l_usrpri;
    940 	lwp_unlock(l);
    941 }
    942 
    943 /*
    944  * suspendsched:
    945  *
    946  *	Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
    947  */
    948 void
    949 suspendsched(void)
    950 {
    951 #ifdef MULTIPROCESSOR
    952 	CPU_INFO_ITERATOR cii;
    953 	struct cpu_info *ci;
    954 #endif
    955 	struct lwp *l;
    956 	struct proc *p;
    957 
    958 	/*
    959 	 * We do this by process in order not to violate the locking rules.
    960 	 */
    961 	mutex_enter(&proclist_mutex);
    962 	PROCLIST_FOREACH(p, &allproc) {
    963 		mutex_enter(&p->p_smutex);
    964 
    965 		if ((p->p_flag & P_SYSTEM) != 0) {
    966 			mutex_exit(&p->p_smutex);
    967 			continue;
    968 		}
    969 
    970 		p->p_stat = SSTOP;
    971 
    972 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    973 			if (l == curlwp)
    974 				continue;
    975 
    976 			lwp_lock(l);
    977 
    978 			/*
    979 			 * Set L_WREBOOT so that the LWP will suspend itself
    980 			 * when it tries to return to user mode.  We want to
    981 			 * try and get to get as many LWPs as possible to
    982 			 * the user / kernel boundary, so that they will
    983 			 * release any locks that they hold.
    984 			 */
    985 			l->l_flag |= (L_WREBOOT | L_WSUSPEND);
    986 
    987 			if (l->l_stat == LSSLEEP &&
    988 			    (l->l_flag & L_SINTR) != 0) {
    989 				/* setrunnable() will release the lock. */
    990 				setrunnable(l);
    991 				continue;
    992 			}
    993 
    994 			lwp_unlock(l);
    995 		}
    996 
    997 		mutex_exit(&p->p_smutex);
    998 	}
    999 	mutex_exit(&proclist_mutex);
   1000 
   1001 	/*
   1002 	 * Kick all CPUs to make them preempt any LWPs running in user mode.
   1003 	 * They'll trap into the kernel and suspend themselves in userret().
   1004 	 */
   1005 	sched_lock(0);
   1006 #ifdef MULTIPROCESSOR
   1007 	for (CPU_INFO_FOREACH(cii, ci))
   1008 		cpu_need_resched(ci);
   1009 #else
   1010 	cpu_need_resched(curcpu());
   1011 #endif
   1012 	sched_unlock(0);
   1013 }
   1014 
   1015 /*
   1016  * scheduler_fork_hook:
   1017  *
   1018  *	Inherit the parent's scheduler history.
   1019  */
   1020 void
   1021 scheduler_fork_hook(struct proc *parent, struct proc *child)
   1022 {
   1023 
   1024 	LOCK_ASSERT(mutex_owned(&parent->p_smutex));
   1025 
   1026 	child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
   1027 	child->p_forktime = schedcpu_ticks;
   1028 }
   1029 
   1030 /*
   1031  * scheduler_wait_hook:
   1032  *
   1033  *	Chargeback parents for the sins of their children.
   1034  */
   1035 void
   1036 scheduler_wait_hook(struct proc *parent, struct proc *child)
   1037 {
   1038 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
   1039 	fixpt_t estcpu;
   1040 
   1041 	/* XXX Only if parent != init?? */
   1042 
   1043 	mutex_spin_enter(&parent->p_stmutex);
   1044 	estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
   1045 	    schedcpu_ticks - child->p_forktime);
   1046 	if (child->p_estcpu > estcpu)
   1047 		parent->p_estcpu =
   1048 		    ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
   1049 	mutex_spin_exit(&parent->p_stmutex);
   1050 }
   1051 
   1052 /*
   1053  * sched_kpri:
   1054  *
   1055  *	Scale a priority level to a kernel priority level, usually
   1056  *	for an LWP that is about to sleep.
   1057  */
   1058 int
   1059 sched_kpri(struct lwp *l)
   1060 {
   1061 	/*
   1062 	 * Scale user priorities (127 -> 50) up to kernel priorities
   1063 	 * in the range (49 -> 8).  Reserve the top 8 kernel priorities
   1064 	 * for high priority kthreads.  Kernel priorities passed in
   1065 	 * are left "as is".  XXX This is somewhat arbitrary.
   1066 	 */
   1067 	static const uint8_t kpri_tab[] = {
   1068 		 0,   1,   2,   3,   4,   5,   6,   7,
   1069 		 8,   9,  10,  11,  12,  13,  14,  15,
   1070 		16,  17,  18,  19,  20,  21,  22,  23,
   1071 		24,  25,  26,  27,  28,  29,  30,  31,
   1072 		32,  33,  34,  35,  36,  37,  38,  39,
   1073 		40,  41,  42,  43,  44,  45,  46,  47,
   1074 		48,  49,   8,   8,   9,   9,  10,  10,
   1075 		11,  11,  12,  12,  13,  14,  14,  15,
   1076 		15,  16,  16,  17,  17,  18,  18,  19,
   1077 		20,  20,  21,  21,  22,  22,  23,  23,
   1078 		24,  24,  25,  26,  26,  27,  27,  28,
   1079 		28,  29,  29,  30,  30,  31,  32,  32,
   1080 		33,  33,  34,  34,  35,  35,  36,  36,
   1081 		37,  38,  38,  39,  39,  40,  40,  41,
   1082 		41,  42,  42,  43,  44,  44,  45,  45,
   1083 		46,  46,  47,  47,  48,  48,  49,  49,
   1084 	};
   1085 
   1086 	return kpri_tab[l->l_usrpri];
   1087 }
   1088 
   1089 /*
   1090  * sched_unsleep:
   1091  *
   1092  *	The is called when the LWP has not been awoken normally but instead
   1093  *	interrupted: for example, if the sleep timed out.  Because of this,
   1094  *	it's not a valid action for running or idle LWPs.
   1095  */
   1096 void
   1097 sched_unsleep(struct lwp *l)
   1098 {
   1099 
   1100 	lwp_unlock(l);
   1101 	panic("sched_unsleep");
   1102 }
   1103 
   1104 /*
   1105  * sched_changepri:
   1106  *
   1107  *	Adjust the priority of an LWP.
   1108  */
   1109 void
   1110 sched_changepri(struct lwp *l, int pri)
   1111 {
   1112 
   1113 	LOCK_ASSERT(lwp_locked(l, &sched_mutex));
   1114 
   1115 	l->l_usrpri = pri;
   1116 
   1117 	if (l->l_priority < PUSER)
   1118 		return;
   1119 	if (l->l_stat != LSRUN || (l->l_flag & L_INMEM) == 0 ||
   1120 	    (l->l_priority / PPQ) == (pri / PPQ)) {
   1121 		l->l_priority = pri;
   1122 		return;
   1123 	}
   1124 
   1125 	remrunqueue(l);
   1126 	l->l_priority = pri;
   1127 	setrunqueue(l);
   1128 	resched_lwp(l, pri);
   1129 }
   1130 
   1131 /*
   1132  * Low-level routines to access the run queue.  Optimised assembler
   1133  * routines can override these.
   1134  */
   1135 
   1136 #ifndef __HAVE_MD_RUNQUEUE
   1137 
   1138 /*
   1139  * On some architectures, it's faster to use a MSB ordering for the priorites
   1140  * than the traditional LSB ordering.
   1141  */
   1142 #ifdef __HAVE_BIGENDIAN_BITOPS
   1143 #define	RQMASK(n) (0x80000000 >> (n))
   1144 #else
   1145 #define	RQMASK(n) (0x00000001 << (n))
   1146 #endif
   1147 
   1148 /*
   1149  * The primitives that manipulate the run queues.  whichqs tells which
   1150  * of the 32 queues qs have processes in them.  Setrunqueue puts processes
   1151  * into queues, remrunqueue removes them from queues.  The running process is
   1152  * on no queue, other processes are on a queue related to p->p_priority,
   1153  * divided by 4 actually to shrink the 0-127 range of priorities into the 32
   1154  * available queues.
   1155  */
   1156 #ifdef RQDEBUG
   1157 static void
   1158 checkrunqueue(int whichq, struct lwp *l)
   1159 {
   1160 	const struct prochd * const rq = &sched_qs[whichq];
   1161 	struct lwp *l2;
   1162 	int found = 0;
   1163 	int die = 0;
   1164 	int empty = 1;
   1165 	for (l2 = rq->ph_link; l2 != (const void*) rq; l2 = l2->l_forw) {
   1166 		if (l2->l_stat != LSRUN) {
   1167 			printf("checkrunqueue[%d]: lwp %p state (%d) "
   1168 			    " != LSRUN\n", whichq, l2, l2->l_stat);
   1169 		}
   1170 		if (l2->l_back->l_forw != l2) {
   1171 			printf("checkrunqueue[%d]: lwp %p back-qptr (%p) "
   1172 			    "corrupt %p\n", whichq, l2, l2->l_back,
   1173 			    l2->l_back->l_forw);
   1174 			die = 1;
   1175 		}
   1176 		if (l2->l_forw->l_back != l2) {
   1177 			printf("checkrunqueue[%d]: lwp %p forw-qptr (%p) "
   1178 			    "corrupt %p\n", whichq, l2, l2->l_forw,
   1179 			    l2->l_forw->l_back);
   1180 			die = 1;
   1181 		}
   1182 		if (l2 == l)
   1183 			found = 1;
   1184 		empty = 0;
   1185 	}
   1186 	if (empty && (sched_whichqs & RQMASK(whichq)) != 0) {
   1187 		printf("checkrunqueue[%d]: bit set for empty run-queue %p\n",
   1188 		    whichq, rq);
   1189 		die = 1;
   1190 	} else if (!empty && (sched_whichqs & RQMASK(whichq)) == 0) {
   1191 		printf("checkrunqueue[%d]: bit clear for non-empty "
   1192 		    "run-queue %p\n", whichq, rq);
   1193 		die = 1;
   1194 	}
   1195 	if (l != NULL && (sched_whichqs & RQMASK(whichq)) == 0) {
   1196 		printf("checkrunqueue[%d]: bit clear for active lwp %p\n",
   1197 		    whichq, l);
   1198 		die = 1;
   1199 	}
   1200 	if (l != NULL && empty) {
   1201 		printf("checkrunqueue[%d]: empty run-queue %p with "
   1202 		    "active lwp %p\n", whichq, rq, l);
   1203 		die = 1;
   1204 	}
   1205 	if (l != NULL && !found) {
   1206 		printf("checkrunqueue[%d]: lwp %p not in runqueue %p!",
   1207 		    whichq, l, rq);
   1208 		die = 1;
   1209 	}
   1210 	if (die)
   1211 		panic("checkrunqueue: inconsistency found");
   1212 }
   1213 #endif /* RQDEBUG */
   1214 
   1215 void
   1216 setrunqueue(struct lwp *l)
   1217 {
   1218 	struct prochd *rq;
   1219 	struct lwp *prev;
   1220 	const int whichq = l->l_priority / PPQ;
   1221 
   1222 	LOCK_ASSERT(lwp_locked(l, &sched_mutex));
   1223 
   1224 #ifdef RQDEBUG
   1225 	checkrunqueue(whichq, NULL);
   1226 #endif
   1227 #ifdef DIAGNOSTIC
   1228 	if (l->l_back != NULL || l->l_stat != LSRUN)
   1229 		panic("setrunqueue");
   1230 #endif
   1231 	sched_whichqs |= RQMASK(whichq);
   1232 	rq = &sched_qs[whichq];
   1233 	prev = rq->ph_rlink;
   1234 	l->l_forw = (struct lwp *)rq;
   1235 	rq->ph_rlink = l;
   1236 	prev->l_forw = l;
   1237 	l->l_back = prev;
   1238 #ifdef RQDEBUG
   1239 	checkrunqueue(whichq, l);
   1240 #endif
   1241 }
   1242 
   1243 /*
   1244  * XXXSMP When LWP dispatch (cpu_switch()) is changed to use remrunqueue(),
   1245  * drop of the effective priority level from kernel to user needs to be
   1246  * moved here from userret().  The assignment in userret() is currently
   1247  * done unlocked.
   1248  */
   1249 void
   1250 remrunqueue(struct lwp *l)
   1251 {
   1252 	struct lwp *prev, *next;
   1253 	const int whichq = l->l_priority / PPQ;
   1254 
   1255 	LOCK_ASSERT(lwp_locked(l, &sched_mutex));
   1256 
   1257 #ifdef RQDEBUG
   1258 	checkrunqueue(whichq, l);
   1259 #endif
   1260 
   1261 #if defined(DIAGNOSTIC)
   1262 	if (((sched_whichqs & RQMASK(whichq)) == 0) || l->l_back == NULL) {
   1263 		/* Shouldn't happen - interrupts disabled. */
   1264 		panic("remrunqueue: bit %d not set", whichq);
   1265 	}
   1266 #endif
   1267 	prev = l->l_back;
   1268 	l->l_back = NULL;
   1269 	next = l->l_forw;
   1270 	prev->l_forw = next;
   1271 	next->l_back = prev;
   1272 	if (prev == next)
   1273 		sched_whichqs &= ~RQMASK(whichq);
   1274 #ifdef RQDEBUG
   1275 	checkrunqueue(whichq, NULL);
   1276 #endif
   1277 }
   1278 
   1279 #undef RQMASK
   1280 #endif /* !defined(__HAVE_MD_RUNQUEUE) */
   1281