Home | History | Annotate | Line # | Download | only in kern
kern_synch.c revision 1.179
      1 /*	$NetBSD: kern_synch.c,v 1.179 2007/02/18 16:03:06 dsl Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 /*-
     41  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     42  *	The Regents of the University of California.  All rights reserved.
     43  * (c) UNIX System Laboratories, Inc.
     44  * All or some portions of this file are derived from material licensed
     45  * to the University of California by American Telephone and Telegraph
     46  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     47  * the permission of UNIX System Laboratories, Inc.
     48  *
     49  * Redistribution and use in source and binary forms, with or without
     50  * modification, are permitted provided that the following conditions
     51  * are met:
     52  * 1. Redistributions of source code must retain the above copyright
     53  *    notice, this list of conditions and the following disclaimer.
     54  * 2. Redistributions in binary form must reproduce the above copyright
     55  *    notice, this list of conditions and the following disclaimer in the
     56  *    documentation and/or other materials provided with the distribution.
     57  * 3. Neither the name of the University nor the names of its contributors
     58  *    may be used to endorse or promote products derived from this software
     59  *    without specific prior written permission.
     60  *
     61  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     62  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     63  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     64  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     65  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     66  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     67  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     68  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     69  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     70  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     71  * SUCH DAMAGE.
     72  *
     73  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
     74  */
     75 
     76 #include <sys/cdefs.h>
     77 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.179 2007/02/18 16:03:06 dsl Exp $");
     78 
     79 #include "opt_ddb.h"
     80 #include "opt_kstack.h"
     81 #include "opt_lockdebug.h"
     82 #include "opt_multiprocessor.h"
     83 #include "opt_perfctrs.h"
     84 
     85 #define	__MUTEX_PRIVATE
     86 
     87 #include <sys/param.h>
     88 #include <sys/systm.h>
     89 #include <sys/callout.h>
     90 #include <sys/proc.h>
     91 #include <sys/kernel.h>
     92 #include <sys/buf.h>
     93 #if defined(PERFCTRS)
     94 #include <sys/pmc.h>
     95 #endif
     96 #include <sys/signalvar.h>
     97 #include <sys/resourcevar.h>
     98 #include <sys/sched.h>
     99 #include <sys/syscall_stats.h>
    100 #include <sys/kauth.h>
    101 #include <sys/sleepq.h>
    102 #include <sys/lockdebug.h>
    103 
    104 #include <uvm/uvm_extern.h>
    105 
    106 #include <machine/cpu.h>
    107 
    108 int	lbolt;			/* once a second sleep address */
    109 int	rrticks;		/* number of hardclock ticks per roundrobin() */
    110 
    111 /*
    112  * The global scheduler state.
    113  */
    114 kmutex_t	sched_mutex;		/* global sched state mutex */
    115 struct prochd	sched_qs[RUNQUE_NQS];	/* run queues */
    116 volatile uint32_t sched_whichqs;	/* bitmap of non-empty queues */
    117 
    118 void	schedcpu(void *);
    119 void	updatepri(struct lwp *);
    120 
    121 void	sched_unsleep(struct lwp *);
    122 void	sched_changepri(struct lwp *, int);
    123 
    124 struct callout schedcpu_ch = CALLOUT_INITIALIZER_SETFUNC(schedcpu, NULL);
    125 static unsigned int schedcpu_ticks;
    126 
    127 syncobj_t sleep_syncobj = {
    128 	SOBJ_SLEEPQ_SORTED,
    129 	sleepq_unsleep,
    130 	sleepq_changepri
    131 };
    132 
    133 syncobj_t sched_syncobj = {
    134 	SOBJ_SLEEPQ_SORTED,
    135 	sched_unsleep,
    136 	sched_changepri
    137 };
    138 
    139 /*
    140  * Force switch among equal priority processes every 100ms.
    141  * Called from hardclock every hz/10 == rrticks hardclock ticks.
    142  */
    143 /* ARGSUSED */
    144 void
    145 roundrobin(struct cpu_info *ci)
    146 {
    147 	struct schedstate_percpu *spc = &ci->ci_schedstate;
    148 
    149 	spc->spc_rrticks = rrticks;
    150 
    151 	if (curlwp != NULL) {
    152 		if (spc->spc_flags & SPCF_SEENRR) {
    153 			/*
    154 			 * The process has already been through a roundrobin
    155 			 * without switching and may be hogging the CPU.
    156 			 * Indicate that the process should yield.
    157 			 */
    158 			spc->spc_flags |= SPCF_SHOULDYIELD;
    159 		} else
    160 			spc->spc_flags |= SPCF_SEENRR;
    161 	}
    162 	cpu_need_resched(curcpu());
    163 }
    164 
    165 #define	PPQ	(128 / RUNQUE_NQS)	/* priorities per queue */
    166 #define	NICE_WEIGHT 2			/* priorities per nice level */
    167 
    168 #define	ESTCPU_SHIFT	11
    169 #define	ESTCPU_MAX	((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
    170 #define	ESTCPULIM(e)	min((e), ESTCPU_MAX)
    171 
    172 /*
    173  * Constants for digital decay and forget:
    174  *	90% of (p_estcpu) usage in 5 * loadav time
    175  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
    176  *          Note that, as ps(1) mentions, this can let percentages
    177  *          total over 100% (I've seen 137.9% for 3 processes).
    178  *
    179  * Note that hardclock updates p_estcpu and p_cpticks independently.
    180  *
    181  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
    182  * That is, the system wants to compute a value of decay such
    183  * that the following for loop:
    184  * 	for (i = 0; i < (5 * loadavg); i++)
    185  * 		p_estcpu *= decay;
    186  * will compute
    187  * 	p_estcpu *= 0.1;
    188  * for all values of loadavg:
    189  *
    190  * Mathematically this loop can be expressed by saying:
    191  * 	decay ** (5 * loadavg) ~= .1
    192  *
    193  * The system computes decay as:
    194  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
    195  *
    196  * We wish to prove that the system's computation of decay
    197  * will always fulfill the equation:
    198  * 	decay ** (5 * loadavg) ~= .1
    199  *
    200  * If we compute b as:
    201  * 	b = 2 * loadavg
    202  * then
    203  * 	decay = b / (b + 1)
    204  *
    205  * We now need to prove two things:
    206  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
    207  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
    208  *
    209  * Facts:
    210  *         For x close to zero, exp(x) =~ 1 + x, since
    211  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
    212  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
    213  *         For x close to zero, ln(1+x) =~ x, since
    214  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
    215  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
    216  *         ln(.1) =~ -2.30
    217  *
    218  * Proof of (1):
    219  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
    220  *	solving for factor,
    221  *      ln(factor) =~ (-2.30/5*loadav), or
    222  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
    223  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
    224  *
    225  * Proof of (2):
    226  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
    227  *	solving for power,
    228  *      power*ln(b/(b+1)) =~ -2.30, or
    229  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
    230  *
    231  * Actual power values for the implemented algorithm are as follows:
    232  *      loadav: 1       2       3       4
    233  *      power:  5.68    10.32   14.94   19.55
    234  */
    235 
    236 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
    237 #define	loadfactor(loadav)	(2 * (loadav))
    238 
    239 static fixpt_t
    240 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
    241 {
    242 
    243 	if (estcpu == 0) {
    244 		return 0;
    245 	}
    246 
    247 #if !defined(_LP64)
    248 	/* avoid 64bit arithmetics. */
    249 #define	FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
    250 	if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
    251 		return estcpu * loadfac / (loadfac + FSCALE);
    252 	}
    253 #endif /* !defined(_LP64) */
    254 
    255 	return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
    256 }
    257 
    258 /*
    259  * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
    260  * sleeping for at least seven times the loadfactor will decay p_estcpu to
    261  * less than (1 << ESTCPU_SHIFT).
    262  *
    263  * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
    264  */
    265 static fixpt_t
    266 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
    267 {
    268 
    269 	if ((n << FSHIFT) >= 7 * loadfac) {
    270 		return 0;
    271 	}
    272 
    273 	while (estcpu != 0 && n > 1) {
    274 		estcpu = decay_cpu(loadfac, estcpu);
    275 		n--;
    276 	}
    277 
    278 	return estcpu;
    279 }
    280 
    281 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
    282 fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
    283 
    284 /*
    285  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
    286  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
    287  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
    288  *
    289  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
    290  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
    291  *
    292  * If you dont want to bother with the faster/more-accurate formula, you
    293  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
    294  * (more general) method of calculating the %age of CPU used by a process.
    295  */
    296 #define	CCPU_SHIFT	11
    297 
    298 /*
    299  * schedcpu:
    300  *
    301  *	Recompute process priorities, every hz ticks.
    302  *
    303  *	XXXSMP This needs to be reorganised in order to reduce the locking
    304  *	burden.
    305  */
    306 /* ARGSUSED */
    307 void
    308 schedcpu(void *arg)
    309 {
    310 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
    311 	struct rlimit *rlim;
    312 	struct lwp *l;
    313 	struct proc *p;
    314 	int minslp, clkhz, sig;
    315 	long runtm;
    316 
    317 	schedcpu_ticks++;
    318 
    319 	mutex_enter(&proclist_mutex);
    320 	PROCLIST_FOREACH(p, &allproc) {
    321 		/*
    322 		 * Increment time in/out of memory and sleep time (if
    323 		 * sleeping).  We ignore overflow; with 16-bit int's
    324 		 * (remember them?) overflow takes 45 days.
    325 		 */
    326 		minslp = 2;
    327 		mutex_enter(&p->p_smutex);
    328 		runtm = p->p_rtime.tv_sec;
    329 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    330 			lwp_lock(l);
    331 			runtm += l->l_rtime.tv_sec;
    332 			l->l_swtime++;
    333 			if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
    334 			    l->l_stat == LSSUSPENDED) {
    335 				l->l_slptime++;
    336 				minslp = min(minslp, l->l_slptime);
    337 			} else
    338 				minslp = 0;
    339 			lwp_unlock(l);
    340 		}
    341 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
    342 
    343 		/*
    344 		 * Check if the process exceeds its CPU resource allocation.
    345 		 * If over max, kill it.
    346 		 */
    347 		rlim = &p->p_rlimit[RLIMIT_CPU];
    348 		sig = 0;
    349 		if (runtm >= rlim->rlim_cur) {
    350 			if (runtm >= rlim->rlim_max)
    351 				sig = SIGKILL;
    352 			else {
    353 				sig = SIGXCPU;
    354 				if (rlim->rlim_cur < rlim->rlim_max)
    355 					rlim->rlim_cur += 5;
    356 			}
    357 		}
    358 
    359 		/*
    360 		 * If the process has run for more than autonicetime, reduce
    361 		 * priority to give others a chance.
    362 		 */
    363 		if (autonicetime && runtm > autonicetime && p->p_nice == NZERO
    364 		    && kauth_cred_geteuid(p->p_cred)) {
    365 			mutex_spin_enter(&p->p_stmutex);
    366 			p->p_nice = autoniceval + NZERO;
    367 			resetprocpriority(p);
    368 			mutex_spin_exit(&p->p_stmutex);
    369 		}
    370 
    371 		/*
    372 		 * If the process has slept the entire second,
    373 		 * stop recalculating its priority until it wakes up.
    374 		 */
    375 		if (minslp <= 1) {
    376 			/*
    377 			 * p_pctcpu is only for ps.
    378 			 */
    379 			mutex_spin_enter(&p->p_stmutex);
    380 			clkhz = stathz != 0 ? stathz : hz;
    381 #if	(FSHIFT >= CCPU_SHIFT)
    382 			p->p_pctcpu += (clkhz == 100)?
    383 			    ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
    384 			    100 * (((fixpt_t) p->p_cpticks)
    385 			    << (FSHIFT - CCPU_SHIFT)) / clkhz;
    386 #else
    387 			p->p_pctcpu += ((FSCALE - ccpu) *
    388 			    (p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
    389 #endif
    390 			p->p_cpticks = 0;
    391 			p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
    392 
    393 			LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    394 				lwp_lock(l);
    395 				if (l->l_slptime <= 1 &&
    396 				    l->l_priority >= PUSER)
    397 					resetpriority(l);
    398 				lwp_unlock(l);
    399 			}
    400 			mutex_spin_exit(&p->p_stmutex);
    401 		}
    402 
    403 		mutex_exit(&p->p_smutex);
    404 		if (sig) {
    405 			psignal(p, sig);
    406 		}
    407 	}
    408 	mutex_exit(&proclist_mutex);
    409 	uvm_meter();
    410 	wakeup((caddr_t)&lbolt);
    411 	callout_schedule(&schedcpu_ch, hz);
    412 }
    413 
    414 /*
    415  * Recalculate the priority of a process after it has slept for a while.
    416  */
    417 void
    418 updatepri(struct lwp *l)
    419 {
    420 	struct proc *p = l->l_proc;
    421 	fixpt_t loadfac;
    422 
    423 	LOCK_ASSERT(lwp_locked(l, NULL));
    424 	KASSERT(l->l_slptime > 1);
    425 
    426 	loadfac = loadfactor(averunnable.ldavg[0]);
    427 
    428 	l->l_slptime--; /* the first time was done in schedcpu */
    429 	/* XXX NJWLWP */
    430 	/* XXXSMP occasionally unlocked, should be per-LWP */
    431 	p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
    432 	resetpriority(l);
    433 }
    434 
    435 /*
    436  * During autoconfiguration or after a panic, a sleep will simply lower the
    437  * priority briefly to allow interrupts, then return.  The priority to be
    438  * used (safepri) is machine-dependent, thus this value is initialized and
    439  * maintained in the machine-dependent layers.  This priority will typically
    440  * be 0, or the lowest priority that is safe for use on the interrupt stack;
    441  * it can be made higher to block network software interrupts after panics.
    442  */
    443 int	safepri;
    444 
    445 /*
    446  * OBSOLETE INTERFACE
    447  *
    448  * General sleep call.  Suspends the current process until a wakeup is
    449  * performed on the specified identifier.  The process will then be made
    450  * runnable with the specified priority.  Sleeps at most timo/hz seconds (0
    451  * means no timeout).  If pri includes PCATCH flag, signals are checked
    452  * before and after sleeping, else signals are not checked.  Returns 0 if
    453  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
    454  * signal needs to be delivered, ERESTART is returned if the current system
    455  * call should be restarted if possible, and EINTR is returned if the system
    456  * call should be interrupted by the signal (return EINTR).
    457  *
    458  * The interlock is held until we are on a sleep queue. The interlock will
    459  * be locked before returning back to the caller unless the PNORELOCK flag
    460  * is specified, in which case the interlock will always be unlocked upon
    461  * return.
    462  */
    463 int
    464 ltsleep(wchan_t ident, int priority, const char *wmesg, int timo,
    465 	volatile struct simplelock *interlock)
    466 {
    467 	struct lwp *l = curlwp;
    468 	sleepq_t *sq;
    469 	int error, catch;
    470 
    471 	if (sleepq_dontsleep(l)) {
    472 		(void)sleepq_abort(NULL, 0);
    473 		if ((priority & PNORELOCK) != 0)
    474 			simple_unlock(interlock);
    475 		return 0;
    476 	}
    477 
    478 	sq = sleeptab_lookup(&sleeptab, ident);
    479 	sleepq_enter(sq, l);
    480 
    481 	if (interlock != NULL) {
    482 		LOCK_ASSERT(simple_lock_held(interlock));
    483 		simple_unlock(interlock);
    484 	}
    485 
    486 	catch = priority & PCATCH;
    487 	sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
    488 	    &sleep_syncobj);
    489 	error = sleepq_unblock(timo, catch);
    490 
    491 	if (interlock != NULL && (priority & PNORELOCK) == 0)
    492 		simple_lock(interlock);
    493 
    494 	return error;
    495 }
    496 
    497 /*
    498  * General sleep call for situations where a wake-up is not expected.
    499  */
    500 int
    501 kpause(const char *wmesg, boolean_t intr, int timo, kmutex_t *mtx)
    502 {
    503 	struct lwp *l = curlwp;
    504 	sleepq_t *sq;
    505 	int error;
    506 
    507 	if (sleepq_dontsleep(l))
    508 		return sleepq_abort(NULL, 0);
    509 
    510 	if (mtx != NULL)
    511 		mutex_exit(mtx);
    512 	sq = sleeptab_lookup(&sleeptab, l);
    513 	sleepq_enter(sq, l);
    514 	sleepq_block(sq, sched_kpri(l), l, wmesg, timo, intr, &sleep_syncobj);
    515 	error = sleepq_unblock(timo, intr);
    516 	if (mtx != NULL)
    517 		mutex_enter(mtx);
    518 
    519 	return error;
    520 }
    521 
    522 /*
    523  * OBSOLETE INTERFACE
    524  *
    525  * Make all processes sleeping on the specified identifier runnable.
    526  */
    527 void
    528 wakeup(wchan_t ident)
    529 {
    530 	sleepq_t *sq;
    531 
    532 	if (cold)
    533 		return;
    534 
    535 	sq = sleeptab_lookup(&sleeptab, ident);
    536 	sleepq_wake(sq, ident, (u_int)-1);
    537 }
    538 
    539 /*
    540  * OBSOLETE INTERFACE
    541  *
    542  * Make the highest priority process first in line on the specified
    543  * identifier runnable.
    544  */
    545 void
    546 wakeup_one(wchan_t ident)
    547 {
    548 	sleepq_t *sq;
    549 
    550 	if (cold)
    551 		return;
    552 
    553 	sq = sleeptab_lookup(&sleeptab, ident);
    554 	sleepq_wake(sq, ident, 1);
    555 }
    556 
    557 
    558 /*
    559  * General yield call.  Puts the current process back on its run queue and
    560  * performs a voluntary context switch.  Should only be called when the
    561  * current process explicitly requests it (eg sched_yield(2) in compat code).
    562  */
    563 void
    564 yield(void)
    565 {
    566 	struct lwp *l = curlwp;
    567 
    568 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
    569 	lwp_lock(l);
    570 	if (l->l_stat == LSONPROC) {
    571 		KASSERT(lwp_locked(l, &sched_mutex));
    572 		l->l_priority = l->l_usrpri;
    573 	}
    574 	l->l_nvcsw++;
    575 	mi_switch(l, NULL);
    576 	KERNEL_LOCK(l->l_biglocks, l);
    577 }
    578 
    579 /*
    580  * General preemption call.  Puts the current process back on its run queue
    581  * and performs an involuntary context switch.
    582  */
    583 void
    584 preempt(void)
    585 {
    586 	struct lwp *l = curlwp;
    587 
    588 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
    589 	lwp_lock(l);
    590 	if (l->l_stat == LSONPROC) {
    591 		KASSERT(lwp_locked(l, &sched_mutex));
    592 		l->l_priority = l->l_usrpri;
    593 	}
    594 	l->l_nivcsw++;
    595 	(void)mi_switch(l, NULL);
    596 	KERNEL_LOCK(l->l_biglocks, l);
    597 }
    598 
    599 /*
    600  * The machine independent parts of context switch.  Switch to "new"
    601  * if non-NULL, otherwise let cpu_switch choose the next lwp.
    602  *
    603  * Returns 1 if another process was actually run.
    604  */
    605 int
    606 mi_switch(struct lwp *l, struct lwp *newl)
    607 {
    608 	struct schedstate_percpu *spc;
    609 	struct timeval tv;
    610 	int retval, oldspl;
    611 	long s, u;
    612 
    613 	LOCK_ASSERT(lwp_locked(l, NULL));
    614 
    615 #ifdef LOCKDEBUG
    616 	spinlock_switchcheck();
    617 	simple_lock_switchcheck();
    618 #endif
    619 #ifdef KSTACK_CHECK_MAGIC
    620 	kstack_check_magic(l);
    621 #endif
    622 
    623 	/*
    624 	 * It's safe to read the per CPU schedstate unlocked here, as all we
    625 	 * are after is the run time and that's guarenteed to have been last
    626 	 * updated by this CPU.
    627 	 */
    628 	KDASSERT(l->l_cpu == curcpu());
    629 	spc = &l->l_cpu->ci_schedstate;
    630 
    631 	/*
    632 	 * Compute the amount of time during which the current
    633 	 * process was running.
    634 	 */
    635 	microtime(&tv);
    636 	u = l->l_rtime.tv_usec +
    637 	    (tv.tv_usec - spc->spc_runtime.tv_usec);
    638 	s = l->l_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
    639 	if (u < 0 || u >= 1000000) {
    640 		if (u < 0) {
    641 			u += 1000000;
    642 			s--;
    643 		} else  {
    644 			u -= 1000000;
    645 			s++;
    646 		}
    647 	}
    648 	l->l_rtime.tv_usec = u;
    649 	l->l_rtime.tv_sec = s;
    650 
    651 	/*
    652 	 * XXXSMP If we are using h/w performance counters, save context.
    653 	 */
    654 #if PERFCTRS
    655 	if (PMC_ENABLED(l->l_proc)) {
    656 		pmc_save_context(l->l_proc);
    657 	}
    658 #endif
    659 
    660 	/*
    661 	 * Acquire the sched_mutex if necessary.  It will be released by
    662 	 * cpu_switch once it has decided to idle, or picked another LWP
    663 	 * to run.
    664 	 */
    665 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    666 	if (l->l_mutex != &sched_mutex) {
    667 		mutex_spin_enter(&sched_mutex);
    668 		lwp_unlock(l);
    669 	}
    670 #endif
    671 
    672 	/*
    673 	 * If on the CPU and we have gotten this far, then we must yield.
    674 	 */
    675 	KASSERT(l->l_stat != LSRUN);
    676 	if (l->l_stat == LSONPROC) {
    677 		KASSERT(lwp_locked(l, &sched_mutex));
    678 		l->l_stat = LSRUN;
    679 		setrunqueue(l);
    680 	}
    681 	uvmexp.swtch++;
    682 
    683 	/*
    684 	 * Process is about to yield the CPU; clear the appropriate
    685 	 * scheduling flags.
    686 	 */
    687 	spc->spc_flags &= ~SPCF_SWITCHCLEAR;
    688 
    689 	LOCKDEBUG_BARRIER(&sched_mutex, 1);
    690 
    691 	/*
    692 	 * Switch to the new current LWP.  When we run again, we'll
    693 	 * return back here.
    694 	 */
    695 	oldspl = MUTEX_SPIN_OLDSPL(l->l_cpu);
    696 
    697 	if (newl == NULL || newl->l_back == NULL)
    698 		retval = cpu_switch(l, NULL);
    699 	else {
    700 		KASSERT(lwp_locked(newl, &sched_mutex));
    701 		remrunqueue(newl);
    702 		cpu_switchto(l, newl);
    703 		retval = 0;
    704 	}
    705 
    706 	/*
    707 	 * XXXSMP If we are using h/w performance counters, restore context.
    708 	 */
    709 #if PERFCTRS
    710 	if (PMC_ENABLED(l->l_proc)) {
    711 		pmc_restore_context(l->l_proc);
    712 	}
    713 #endif
    714 
    715 	/*
    716 	 * We're running again; record our new start time.  We might
    717 	 * be running on a new CPU now, so don't use the cached
    718 	 * schedstate_percpu pointer.
    719 	 */
    720 	KDASSERT(l->l_cpu == curcpu());
    721 	microtime(&l->l_cpu->ci_schedstate.spc_runtime);
    722 	splx(oldspl);
    723 
    724 	return retval;
    725 }
    726 
    727 /*
    728  * Initialize the (doubly-linked) run queues
    729  * to be empty.
    730  */
    731 void
    732 rqinit()
    733 {
    734 	int i;
    735 
    736 	for (i = 0; i < RUNQUE_NQS; i++)
    737 		sched_qs[i].ph_link = sched_qs[i].ph_rlink =
    738 		    (struct lwp *)&sched_qs[i];
    739 
    740 	mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
    741 }
    742 
    743 static inline void
    744 resched_lwp(struct lwp *l, u_char pri)
    745 {
    746 	struct cpu_info *ci;
    747 
    748 	/*
    749 	 * XXXSMP
    750 	 * Since l->l_cpu persists across a context switch,
    751 	 * this gives us *very weak* processor affinity, in
    752 	 * that we notify the CPU on which the process last
    753 	 * ran that it should try to switch.
    754 	 *
    755 	 * This does not guarantee that the process will run on
    756 	 * that processor next, because another processor might
    757 	 * grab it the next time it performs a context switch.
    758 	 *
    759 	 * This also does not handle the case where its last
    760 	 * CPU is running a higher-priority process, but every
    761 	 * other CPU is running a lower-priority process.  There
    762 	 * are ways to handle this situation, but they're not
    763 	 * currently very pretty, and we also need to weigh the
    764 	 * cost of moving a process from one CPU to another.
    765 	 *
    766 	 * XXXSMP
    767 	 * There is also the issue of locking the other CPU's
    768 	 * sched state, which we currently do not do.
    769 	 */
    770 	ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
    771 	if (pri < ci->ci_schedstate.spc_curpriority)
    772 		cpu_need_resched(ci);
    773 }
    774 
    775 /*
    776  * Change process state to be runnable, placing it on the run queue if it is
    777  * in memory, and awakening the swapper if it isn't in memory.
    778  *
    779  * Call with the process and LWP locked.  Will return with the LWP unlocked.
    780  */
    781 void
    782 setrunnable(struct lwp *l)
    783 {
    784 	struct proc *p = l->l_proc;
    785 	sigset_t *ss;
    786 
    787 	LOCK_ASSERT(mutex_owned(&p->p_smutex));
    788 	LOCK_ASSERT(lwp_locked(l, NULL));
    789 
    790 	switch (l->l_stat) {
    791 	case LSSTOP:
    792 		/*
    793 		 * If we're being traced (possibly because someone attached us
    794 		 * while we were stopped), check for a signal from the debugger.
    795 		 */
    796 		if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
    797 			if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
    798 				ss = &l->l_sigpend.sp_set;
    799 			else
    800 				ss = &p->p_sigpend.sp_set;
    801 			sigaddset(ss, p->p_xstat);
    802 			signotify(l);
    803 		}
    804 		p->p_nrlwps++;
    805 		break;
    806 	case LSSUSPENDED:
    807 		l->l_flag &= ~LW_WSUSPEND;
    808 		p->p_nrlwps++;
    809 		break;
    810 	case LSSLEEP:
    811 		KASSERT(l->l_wchan != NULL);
    812 		break;
    813 	default:
    814 		panic("setrunnable: lwp %p state was %d", l, l->l_stat);
    815 	}
    816 
    817 	/*
    818 	 * If the LWP was sleeping interruptably, then it's OK to start it
    819 	 * again.  If not, mark it as still sleeping.
    820 	 */
    821 	if (l->l_wchan != NULL) {
    822 		l->l_stat = LSSLEEP;
    823 		if ((l->l_flag & LW_SINTR) != 0)
    824 			lwp_unsleep(l);
    825 		else {
    826 			lwp_unlock(l);
    827 #ifdef DIAGNOSTIC
    828 			panic("setrunnable: !L_SINTR");
    829 #endif
    830 		}
    831 		return;
    832 	}
    833 
    834 	LOCK_ASSERT(lwp_locked(l, &sched_mutex));
    835 
    836 	/*
    837 	 * If the LWP is still on the CPU, mark it as LSONPROC.  It may be
    838 	 * about to call mi_switch(), in which case it will yield.
    839 	 *
    840 	 * XXXSMP Will need to change for preemption.
    841 	 */
    842 #ifdef MULTIPROCESSOR
    843 	if (l->l_cpu->ci_curlwp == l) {
    844 #else
    845 	if (l == curlwp) {
    846 #endif
    847 		l->l_stat = LSONPROC;
    848 		l->l_slptime = 0;
    849 		lwp_unlock(l);
    850 		return;
    851 	}
    852 
    853 	/*
    854 	 * Set the LWP runnable.  If it's swapped out, we need to wake the swapper
    855 	 * to bring it back in.  Otherwise, enter it into a run queue.
    856 	 */
    857 	if (l->l_slptime > 1)
    858 		updatepri(l);
    859 	l->l_stat = LSRUN;
    860 	l->l_slptime = 0;
    861 
    862 	if (l->l_flag & LW_INMEM) {
    863 		setrunqueue(l);
    864 		resched_lwp(l, l->l_priority);
    865 		lwp_unlock(l);
    866 	} else {
    867 		lwp_unlock(l);
    868 		uvm_kick_scheduler();
    869 	}
    870 }
    871 
    872 /*
    873  * Compute the priority of a process when running in user mode.
    874  * Arrange to reschedule if the resulting priority is better
    875  * than that of the current process.
    876  */
    877 void
    878 resetpriority(struct lwp *l)
    879 {
    880 	unsigned int newpriority;
    881 	struct proc *p = l->l_proc;
    882 
    883 	/* XXXSMP LOCK_ASSERT(mutex_owned(&p->p_stmutex)); */
    884 	LOCK_ASSERT(lwp_locked(l, NULL));
    885 
    886 	if ((l->l_flag & LW_SYSTEM) != 0)
    887 		return;
    888 
    889 	newpriority = PUSER + (p->p_estcpu >> ESTCPU_SHIFT) +
    890 	    NICE_WEIGHT * (p->p_nice - NZERO);
    891 	newpriority = min(newpriority, MAXPRI);
    892 	lwp_changepri(l, newpriority);
    893 }
    894 
    895 /*
    896  * Recompute priority for all LWPs in a process.
    897  */
    898 void
    899 resetprocpriority(struct proc *p)
    900 {
    901 	struct lwp *l;
    902 
    903 	LOCK_ASSERT(mutex_owned(&p->p_stmutex));
    904 
    905 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    906 		lwp_lock(l);
    907 		resetpriority(l);
    908 		lwp_unlock(l);
    909 	}
    910 }
    911 
    912 /*
    913  * We adjust the priority of the current process.  The priority of a process
    914  * gets worse as it accumulates CPU time.  The CPU usage estimator (p_estcpu)
    915  * is increased here.  The formula for computing priorities (in kern_synch.c)
    916  * will compute a different value each time p_estcpu increases. This can
    917  * cause a switch, but unless the priority crosses a PPQ boundary the actual
    918  * queue will not change.  The CPU usage estimator ramps up quite quickly
    919  * when the process is running (linearly), and decays away exponentially, at
    920  * a rate which is proportionally slower when the system is busy.  The basic
    921  * principle is that the system will 90% forget that the process used a lot
    922  * of CPU time in 5 * loadav seconds.  This causes the system to favor
    923  * processes which haven't run much recently, and to round-robin among other
    924  * processes.
    925  */
    926 
    927 void
    928 schedclock(struct lwp *l)
    929 {
    930 	struct proc *p = l->l_proc;
    931 
    932 	mutex_spin_enter(&p->p_stmutex);
    933 	p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
    934 	lwp_lock(l);
    935 	resetpriority(l);
    936 	mutex_spin_exit(&p->p_stmutex);
    937 	if ((l->l_flag & LW_SYSTEM) == 0 && l->l_priority >= PUSER)
    938 		l->l_priority = l->l_usrpri;
    939 	lwp_unlock(l);
    940 }
    941 
    942 /*
    943  * suspendsched:
    944  *
    945  *	Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
    946  */
    947 void
    948 suspendsched(void)
    949 {
    950 #ifdef MULTIPROCESSOR
    951 	CPU_INFO_ITERATOR cii;
    952 	struct cpu_info *ci;
    953 #endif
    954 	struct lwp *l;
    955 	struct proc *p;
    956 
    957 	/*
    958 	 * We do this by process in order not to violate the locking rules.
    959 	 */
    960 	mutex_enter(&proclist_mutex);
    961 	PROCLIST_FOREACH(p, &allproc) {
    962 		mutex_enter(&p->p_smutex);
    963 
    964 		if ((p->p_flag & PK_SYSTEM) != 0) {
    965 			mutex_exit(&p->p_smutex);
    966 			continue;
    967 		}
    968 
    969 		p->p_stat = SSTOP;
    970 
    971 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    972 			if (l == curlwp)
    973 				continue;
    974 
    975 			lwp_lock(l);
    976 
    977 			/*
    978 			 * Set L_WREBOOT so that the LWP will suspend itself
    979 			 * when it tries to return to user mode.  We want to
    980 			 * try and get to get as many LWPs as possible to
    981 			 * the user / kernel boundary, so that they will
    982 			 * release any locks that they hold.
    983 			 */
    984 			l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
    985 
    986 			if (l->l_stat == LSSLEEP &&
    987 			    (l->l_flag & LW_SINTR) != 0) {
    988 				/* setrunnable() will release the lock. */
    989 				setrunnable(l);
    990 				continue;
    991 			}
    992 
    993 			lwp_unlock(l);
    994 		}
    995 
    996 		mutex_exit(&p->p_smutex);
    997 	}
    998 	mutex_exit(&proclist_mutex);
    999 
   1000 	/*
   1001 	 * Kick all CPUs to make them preempt any LWPs running in user mode.
   1002 	 * They'll trap into the kernel and suspend themselves in userret().
   1003 	 */
   1004 	sched_lock(0);
   1005 #ifdef MULTIPROCESSOR
   1006 	for (CPU_INFO_FOREACH(cii, ci))
   1007 		cpu_need_resched(ci);
   1008 #else
   1009 	cpu_need_resched(curcpu());
   1010 #endif
   1011 	sched_unlock(0);
   1012 }
   1013 
   1014 /*
   1015  * scheduler_fork_hook:
   1016  *
   1017  *	Inherit the parent's scheduler history.
   1018  */
   1019 void
   1020 scheduler_fork_hook(struct proc *parent, struct proc *child)
   1021 {
   1022 
   1023 	LOCK_ASSERT(mutex_owned(&parent->p_smutex));
   1024 
   1025 	child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
   1026 	child->p_forktime = schedcpu_ticks;
   1027 }
   1028 
   1029 /*
   1030  * scheduler_wait_hook:
   1031  *
   1032  *	Chargeback parents for the sins of their children.
   1033  */
   1034 void
   1035 scheduler_wait_hook(struct proc *parent, struct proc *child)
   1036 {
   1037 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
   1038 	fixpt_t estcpu;
   1039 
   1040 	/* XXX Only if parent != init?? */
   1041 
   1042 	mutex_spin_enter(&parent->p_stmutex);
   1043 	estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
   1044 	    schedcpu_ticks - child->p_forktime);
   1045 	if (child->p_estcpu > estcpu)
   1046 		parent->p_estcpu =
   1047 		    ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
   1048 	mutex_spin_exit(&parent->p_stmutex);
   1049 }
   1050 
   1051 /*
   1052  * sched_kpri:
   1053  *
   1054  *	Scale a priority level to a kernel priority level, usually
   1055  *	for an LWP that is about to sleep.
   1056  */
   1057 int
   1058 sched_kpri(struct lwp *l)
   1059 {
   1060 	/*
   1061 	 * Scale user priorities (127 -> 50) up to kernel priorities
   1062 	 * in the range (49 -> 8).  Reserve the top 8 kernel priorities
   1063 	 * for high priority kthreads.  Kernel priorities passed in
   1064 	 * are left "as is".  XXX This is somewhat arbitrary.
   1065 	 */
   1066 	static const uint8_t kpri_tab[] = {
   1067 		 0,   1,   2,   3,   4,   5,   6,   7,
   1068 		 8,   9,  10,  11,  12,  13,  14,  15,
   1069 		16,  17,  18,  19,  20,  21,  22,  23,
   1070 		24,  25,  26,  27,  28,  29,  30,  31,
   1071 		32,  33,  34,  35,  36,  37,  38,  39,
   1072 		40,  41,  42,  43,  44,  45,  46,  47,
   1073 		48,  49,   8,   8,   9,   9,  10,  10,
   1074 		11,  11,  12,  12,  13,  14,  14,  15,
   1075 		15,  16,  16,  17,  17,  18,  18,  19,
   1076 		20,  20,  21,  21,  22,  22,  23,  23,
   1077 		24,  24,  25,  26,  26,  27,  27,  28,
   1078 		28,  29,  29,  30,  30,  31,  32,  32,
   1079 		33,  33,  34,  34,  35,  35,  36,  36,
   1080 		37,  38,  38,  39,  39,  40,  40,  41,
   1081 		41,  42,  42,  43,  44,  44,  45,  45,
   1082 		46,  46,  47,  47,  48,  48,  49,  49,
   1083 	};
   1084 
   1085 	return kpri_tab[l->l_usrpri];
   1086 }
   1087 
   1088 /*
   1089  * sched_unsleep:
   1090  *
   1091  *	The is called when the LWP has not been awoken normally but instead
   1092  *	interrupted: for example, if the sleep timed out.  Because of this,
   1093  *	it's not a valid action for running or idle LWPs.
   1094  */
   1095 void
   1096 sched_unsleep(struct lwp *l)
   1097 {
   1098 
   1099 	lwp_unlock(l);
   1100 	panic("sched_unsleep");
   1101 }
   1102 
   1103 /*
   1104  * sched_changepri:
   1105  *
   1106  *	Adjust the priority of an LWP.
   1107  */
   1108 void
   1109 sched_changepri(struct lwp *l, int pri)
   1110 {
   1111 
   1112 	LOCK_ASSERT(lwp_locked(l, &sched_mutex));
   1113 
   1114 	l->l_usrpri = pri;
   1115 
   1116 	if (l->l_priority < PUSER)
   1117 		return;
   1118 	if (l->l_stat != LSRUN || (l->l_flag & LW_INMEM) == 0 ||
   1119 	    (l->l_priority / PPQ) == (pri / PPQ)) {
   1120 		l->l_priority = pri;
   1121 		return;
   1122 	}
   1123 
   1124 	remrunqueue(l);
   1125 	l->l_priority = pri;
   1126 	setrunqueue(l);
   1127 	resched_lwp(l, pri);
   1128 }
   1129 
   1130 /*
   1131  * Low-level routines to access the run queue.  Optimised assembler
   1132  * routines can override these.
   1133  */
   1134 
   1135 #ifndef __HAVE_MD_RUNQUEUE
   1136 
   1137 /*
   1138  * On some architectures, it's faster to use a MSB ordering for the priorites
   1139  * than the traditional LSB ordering.
   1140  */
   1141 #ifdef __HAVE_BIGENDIAN_BITOPS
   1142 #define	RQMASK(n) (0x80000000 >> (n))
   1143 #else
   1144 #define	RQMASK(n) (0x00000001 << (n))
   1145 #endif
   1146 
   1147 /*
   1148  * The primitives that manipulate the run queues.  whichqs tells which
   1149  * of the 32 queues qs have processes in them.  Setrunqueue puts processes
   1150  * into queues, remrunqueue removes them from queues.  The running process is
   1151  * on no queue, other processes are on a queue related to p->p_priority,
   1152  * divided by 4 actually to shrink the 0-127 range of priorities into the 32
   1153  * available queues.
   1154  */
   1155 #ifdef RQDEBUG
   1156 static void
   1157 checkrunqueue(int whichq, struct lwp *l)
   1158 {
   1159 	const struct prochd * const rq = &sched_qs[whichq];
   1160 	struct lwp *l2;
   1161 	int found = 0;
   1162 	int die = 0;
   1163 	int empty = 1;
   1164 	for (l2 = rq->ph_link; l2 != (const void*) rq; l2 = l2->l_forw) {
   1165 		if (l2->l_stat != LSRUN) {
   1166 			printf("checkrunqueue[%d]: lwp %p state (%d) "
   1167 			    " != LSRUN\n", whichq, l2, l2->l_stat);
   1168 		}
   1169 		if (l2->l_back->l_forw != l2) {
   1170 			printf("checkrunqueue[%d]: lwp %p back-qptr (%p) "
   1171 			    "corrupt %p\n", whichq, l2, l2->l_back,
   1172 			    l2->l_back->l_forw);
   1173 			die = 1;
   1174 		}
   1175 		if (l2->l_forw->l_back != l2) {
   1176 			printf("checkrunqueue[%d]: lwp %p forw-qptr (%p) "
   1177 			    "corrupt %p\n", whichq, l2, l2->l_forw,
   1178 			    l2->l_forw->l_back);
   1179 			die = 1;
   1180 		}
   1181 		if (l2 == l)
   1182 			found = 1;
   1183 		empty = 0;
   1184 	}
   1185 	if (empty && (sched_whichqs & RQMASK(whichq)) != 0) {
   1186 		printf("checkrunqueue[%d]: bit set for empty run-queue %p\n",
   1187 		    whichq, rq);
   1188 		die = 1;
   1189 	} else if (!empty && (sched_whichqs & RQMASK(whichq)) == 0) {
   1190 		printf("checkrunqueue[%d]: bit clear for non-empty "
   1191 		    "run-queue %p\n", whichq, rq);
   1192 		die = 1;
   1193 	}
   1194 	if (l != NULL && (sched_whichqs & RQMASK(whichq)) == 0) {
   1195 		printf("checkrunqueue[%d]: bit clear for active lwp %p\n",
   1196 		    whichq, l);
   1197 		die = 1;
   1198 	}
   1199 	if (l != NULL && empty) {
   1200 		printf("checkrunqueue[%d]: empty run-queue %p with "
   1201 		    "active lwp %p\n", whichq, rq, l);
   1202 		die = 1;
   1203 	}
   1204 	if (l != NULL && !found) {
   1205 		printf("checkrunqueue[%d]: lwp %p not in runqueue %p!",
   1206 		    whichq, l, rq);
   1207 		die = 1;
   1208 	}
   1209 	if (die)
   1210 		panic("checkrunqueue: inconsistency found");
   1211 }
   1212 #endif /* RQDEBUG */
   1213 
   1214 void
   1215 setrunqueue(struct lwp *l)
   1216 {
   1217 	struct prochd *rq;
   1218 	struct lwp *prev;
   1219 	const int whichq = l->l_priority / PPQ;
   1220 
   1221 	LOCK_ASSERT(lwp_locked(l, &sched_mutex));
   1222 
   1223 #ifdef RQDEBUG
   1224 	checkrunqueue(whichq, NULL);
   1225 #endif
   1226 #ifdef DIAGNOSTIC
   1227 	if (l->l_back != NULL || l->l_stat != LSRUN)
   1228 		panic("setrunqueue");
   1229 #endif
   1230 	sched_whichqs |= RQMASK(whichq);
   1231 	rq = &sched_qs[whichq];
   1232 	prev = rq->ph_rlink;
   1233 	l->l_forw = (struct lwp *)rq;
   1234 	rq->ph_rlink = l;
   1235 	prev->l_forw = l;
   1236 	l->l_back = prev;
   1237 #ifdef RQDEBUG
   1238 	checkrunqueue(whichq, l);
   1239 #endif
   1240 }
   1241 
   1242 /*
   1243  * XXXSMP When LWP dispatch (cpu_switch()) is changed to use remrunqueue(),
   1244  * drop of the effective priority level from kernel to user needs to be
   1245  * moved here from userret().  The assignment in userret() is currently
   1246  * done unlocked.
   1247  */
   1248 void
   1249 remrunqueue(struct lwp *l)
   1250 {
   1251 	struct lwp *prev, *next;
   1252 	const int whichq = l->l_priority / PPQ;
   1253 
   1254 	LOCK_ASSERT(lwp_locked(l, &sched_mutex));
   1255 
   1256 #ifdef RQDEBUG
   1257 	checkrunqueue(whichq, l);
   1258 #endif
   1259 
   1260 #if defined(DIAGNOSTIC)
   1261 	if (((sched_whichqs & RQMASK(whichq)) == 0) || l->l_back == NULL) {
   1262 		/* Shouldn't happen - interrupts disabled. */
   1263 		panic("remrunqueue: bit %d not set", whichq);
   1264 	}
   1265 #endif
   1266 	prev = l->l_back;
   1267 	l->l_back = NULL;
   1268 	next = l->l_forw;
   1269 	prev->l_forw = next;
   1270 	next->l_back = prev;
   1271 	if (prev == next)
   1272 		sched_whichqs &= ~RQMASK(whichq);
   1273 #ifdef RQDEBUG
   1274 	checkrunqueue(whichq, NULL);
   1275 #endif
   1276 }
   1277 
   1278 #undef RQMASK
   1279 #endif /* !defined(__HAVE_MD_RUNQUEUE) */
   1280