Home | History | Annotate | Line # | Download | only in kern
kern_synch.c revision 1.166.2.7
      1 /*	$NetBSD: kern_synch.c,v 1.166.2.7 2006/12/29 20:27:44 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000, 2004, 2006 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 /*-
     41  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     42  *	The Regents of the University of California.  All rights reserved.
     43  * (c) UNIX System Laboratories, Inc.
     44  * All or some portions of this file are derived from material licensed
     45  * to the University of California by American Telephone and Telegraph
     46  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     47  * the permission of UNIX System Laboratories, Inc.
     48  *
     49  * Redistribution and use in source and binary forms, with or without
     50  * modification, are permitted provided that the following conditions
     51  * are met:
     52  * 1. Redistributions of source code must retain the above copyright
     53  *    notice, this list of conditions and the following disclaimer.
     54  * 2. Redistributions in binary form must reproduce the above copyright
     55  *    notice, this list of conditions and the following disclaimer in the
     56  *    documentation and/or other materials provided with the distribution.
     57  * 3. Neither the name of the University nor the names of its contributors
     58  *    may be used to endorse or promote products derived from this software
     59  *    without specific prior written permission.
     60  *
     61  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     62  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     63  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     64  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     65  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     66  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     67  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     68  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     69  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     70  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     71  * SUCH DAMAGE.
     72  *
     73  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
     74  */
     75 
     76 #include <sys/cdefs.h>
     77 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.166.2.7 2006/12/29 20:27:44 ad Exp $");
     78 
     79 #include "opt_ddb.h"
     80 #include "opt_kstack.h"
     81 #include "opt_lockdebug.h"
     82 #include "opt_multiprocessor.h"
     83 #include "opt_perfctrs.h"
     84 
     85 #define	__MUTEX_PRIVATE
     86 
     87 #include <sys/param.h>
     88 #include <sys/systm.h>
     89 #include <sys/callout.h>
     90 #include <sys/proc.h>
     91 #include <sys/kernel.h>
     92 #include <sys/buf.h>
     93 #if defined(PERFCTRS)
     94 #include <sys/pmc.h>
     95 #endif
     96 #include <sys/signalvar.h>
     97 #include <sys/resourcevar.h>
     98 #include <sys/sched.h>
     99 #include <sys/sa.h>
    100 #include <sys/savar.h>
    101 #include <sys/kauth.h>
    102 #include <sys/sleepq.h>
    103 #include <sys/lockdebug.h>
    104 
    105 #include <uvm/uvm_extern.h>
    106 
    107 #include <machine/cpu.h>
    108 
    109 int	lbolt;			/* once a second sleep address */
    110 int	rrticks;		/* number of hardclock ticks per roundrobin() */
    111 
    112 /*
    113  * The global scheduler state.
    114  */
    115 kmutex_t	sched_mutex;		/* global sched state mutex */
    116 struct prochd	sched_qs[RUNQUE_NQS];	/* run queues */
    117 volatile uint32_t sched_whichqs;	/* bitmap of non-empty queues */
    118 
    119 void	schedcpu(void *);
    120 void	updatepri(struct lwp *);
    121 void	sa_awaken(struct lwp *);
    122 
    123 void	sched_unsleep(struct lwp *);
    124 void	sched_changepri(struct lwp *, int);
    125 
    126 struct callout schedcpu_ch = CALLOUT_INITIALIZER_SETFUNC(schedcpu, NULL);
    127 static unsigned int schedcpu_ticks;
    128 
    129 syncobj_t sleep_syncobj = {
    130 	SOBJ_SLEEPQ_SORTED,
    131 	sleepq_unsleep,
    132 	sleepq_changepri
    133 };
    134 
    135 syncobj_t sched_syncobj = {
    136 	SOBJ_SLEEPQ_SORTED,
    137 	sched_unsleep,
    138 	sched_changepri
    139 };
    140 
    141 /*
    142  * Force switch among equal priority processes every 100ms.
    143  * Called from hardclock every hz/10 == rrticks hardclock ticks.
    144  */
    145 /* ARGSUSED */
    146 void
    147 roundrobin(struct cpu_info *ci)
    148 {
    149 	struct schedstate_percpu *spc = &ci->ci_schedstate;
    150 
    151 	spc->spc_rrticks = rrticks;
    152 
    153 	if (curlwp != NULL) {
    154 		if (spc->spc_flags & SPCF_SEENRR) {
    155 			/*
    156 			 * The process has already been through a roundrobin
    157 			 * without switching and may be hogging the CPU.
    158 			 * Indicate that the process should yield.
    159 			 */
    160 			spc->spc_flags |= SPCF_SHOULDYIELD;
    161 		} else
    162 			spc->spc_flags |= SPCF_SEENRR;
    163 	}
    164 	cpu_need_resched(curcpu());
    165 }
    166 
    167 #define	PPQ	(128 / RUNQUE_NQS)	/* priorities per queue */
    168 #define	NICE_WEIGHT 2			/* priorities per nice level */
    169 
    170 #define	ESTCPU_SHIFT	11
    171 #define	ESTCPU_MAX	((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
    172 #define	ESTCPULIM(e)	min((e), ESTCPU_MAX)
    173 
    174 /*
    175  * Constants for digital decay and forget:
    176  *	90% of (p_estcpu) usage in 5 * loadav time
    177  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
    178  *          Note that, as ps(1) mentions, this can let percentages
    179  *          total over 100% (I've seen 137.9% for 3 processes).
    180  *
    181  * Note that hardclock updates p_estcpu and p_cpticks independently.
    182  *
    183  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
    184  * That is, the system wants to compute a value of decay such
    185  * that the following for loop:
    186  * 	for (i = 0; i < (5 * loadavg); i++)
    187  * 		p_estcpu *= decay;
    188  * will compute
    189  * 	p_estcpu *= 0.1;
    190  * for all values of loadavg:
    191  *
    192  * Mathematically this loop can be expressed by saying:
    193  * 	decay ** (5 * loadavg) ~= .1
    194  *
    195  * The system computes decay as:
    196  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
    197  *
    198  * We wish to prove that the system's computation of decay
    199  * will always fulfill the equation:
    200  * 	decay ** (5 * loadavg) ~= .1
    201  *
    202  * If we compute b as:
    203  * 	b = 2 * loadavg
    204  * then
    205  * 	decay = b / (b + 1)
    206  *
    207  * We now need to prove two things:
    208  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
    209  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
    210  *
    211  * Facts:
    212  *         For x close to zero, exp(x) =~ 1 + x, since
    213  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
    214  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
    215  *         For x close to zero, ln(1+x) =~ x, since
    216  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
    217  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
    218  *         ln(.1) =~ -2.30
    219  *
    220  * Proof of (1):
    221  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
    222  *	solving for factor,
    223  *      ln(factor) =~ (-2.30/5*loadav), or
    224  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
    225  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
    226  *
    227  * Proof of (2):
    228  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
    229  *	solving for power,
    230  *      power*ln(b/(b+1)) =~ -2.30, or
    231  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
    232  *
    233  * Actual power values for the implemented algorithm are as follows:
    234  *      loadav: 1       2       3       4
    235  *      power:  5.68    10.32   14.94   19.55
    236  */
    237 
    238 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
    239 #define	loadfactor(loadav)	(2 * (loadav))
    240 
    241 static fixpt_t
    242 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
    243 {
    244 
    245 	if (estcpu == 0) {
    246 		return 0;
    247 	}
    248 
    249 #if !defined(_LP64)
    250 	/* avoid 64bit arithmetics. */
    251 #define	FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
    252 	if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
    253 		return estcpu * loadfac / (loadfac + FSCALE);
    254 	}
    255 #endif /* !defined(_LP64) */
    256 
    257 	return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
    258 }
    259 
    260 /*
    261  * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
    262  * sleeping for at least seven times the loadfactor will decay p_estcpu to
    263  * less than (1 << ESTCPU_SHIFT).
    264  *
    265  * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
    266  */
    267 static fixpt_t
    268 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
    269 {
    270 
    271 	if ((n << FSHIFT) >= 7 * loadfac) {
    272 		return 0;
    273 	}
    274 
    275 	while (estcpu != 0 && n > 1) {
    276 		estcpu = decay_cpu(loadfac, estcpu);
    277 		n--;
    278 	}
    279 
    280 	return estcpu;
    281 }
    282 
    283 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
    284 fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
    285 
    286 /*
    287  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
    288  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
    289  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
    290  *
    291  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
    292  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
    293  *
    294  * If you dont want to bother with the faster/more-accurate formula, you
    295  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
    296  * (more general) method of calculating the %age of CPU used by a process.
    297  */
    298 #define	CCPU_SHIFT	11
    299 
    300 /*
    301  * schedcpu:
    302  *
    303  *	Recompute process priorities, every hz ticks.
    304  *
    305  *	XXXSMP This needs to be reorganised in order to reduce the locking
    306  *	burden.
    307  */
    308 /* ARGSUSED */
    309 void
    310 schedcpu(void *arg)
    311 {
    312 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
    313 	struct rlimit *rlim;
    314 	struct lwp *l;
    315 	struct proc *p;
    316 	int minslp, clkhz;
    317 	long runtm;
    318 
    319 	schedcpu_ticks++;
    320 
    321 	mutex_enter(&proclist_mutex);
    322 	PROCLIST_FOREACH(p, &allproc) {
    323 		/*
    324 		 * Increment time in/out of memory and sleep time (if
    325 		 * sleeping).  We ignore overflow; with 16-bit int's
    326 		 * (remember them?) overflow takes 45 days.
    327 		 */
    328 		minslp = 2;
    329 		runtm = 0;
    330 		mutex_enter(&p->p_smutex);
    331 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    332 			lwp_lock(l);
    333 			runtm += l->l_rtime.tv_sec;
    334 			l->l_swtime++;
    335 			if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
    336 			    l->l_stat == LSSUSPENDED) {
    337 				l->l_slptime++;
    338 				minslp = min(minslp, l->l_slptime);
    339 			} else
    340 				minslp = 0;
    341 			lwp_unlock(l);
    342 		}
    343 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
    344 
    345 		/*
    346 		 * Check if the process exceeds its CPU resource allocation.
    347 		 * If over max, kill it.
    348 		 */
    349 		rlim = &p->p_rlimit[RLIMIT_CPU];
    350 		if (runtm >= rlim->rlim_cur) {
    351 			if (runtm >= rlim->rlim_max)
    352 				psignal(p, SIGKILL);
    353 			else {
    354 				psignal(p, SIGXCPU);
    355 				if (rlim->rlim_cur < rlim->rlim_max)
    356 					rlim->rlim_cur += 5;
    357 			}
    358 		}
    359 
    360 		/*
    361 		 * If the process has run for more than autonicetime, reduce
    362 		 * priority to give others a chance.
    363 		 */
    364 		if (autonicetime && runtm > autonicetime && p->p_nice == NZERO
    365 		    && kauth_cred_geteuid(p->p_cred)) {
    366 			p->p_nice = autoniceval + NZERO;
    367 			resetprocpriority(p);
    368 		}
    369 
    370 		/*
    371 		 * If the process has slept the entire second,
    372 		 * stop recalculating its priority until it wakes up.
    373 		 */
    374 		if (minslp > 1) {
    375 			mutex_exit(&p->p_smutex);
    376 			continue;
    377 		}
    378 
    379 		/*
    380 		 * p_pctcpu is only for ps.
    381 		 */
    382 		mutex_enter(&p->p_stmutex);
    383 		clkhz = stathz != 0 ? stathz : hz;
    384 #if	(FSHIFT >= CCPU_SHIFT)
    385 		p->p_pctcpu += (clkhz == 100)?
    386 			((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
    387                 	100 * (((fixpt_t) p->p_cpticks)
    388 				<< (FSHIFT - CCPU_SHIFT)) / clkhz;
    389 #else
    390 		p->p_pctcpu += ((FSCALE - ccpu) *
    391 			(p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
    392 #endif
    393 		p->p_cpticks = 0;
    394 		mutex_exit(&p->p_stmutex);
    395 		p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
    396 
    397 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    398 			lwp_lock(l);
    399 			if (l->l_slptime <= 1)
    400 				resetpriority(l);
    401 			lwp_unlock(l);
    402 		}
    403 		mutex_exit(&p->p_smutex);
    404 	}
    405 	mutex_exit(&proclist_mutex);
    406 	uvm_meter();
    407 	wakeup((caddr_t)&lbolt);
    408 	callout_schedule(&schedcpu_ch, hz);
    409 }
    410 
    411 /*
    412  * Recalculate the priority of a process after it has slept for a while.
    413  */
    414 void
    415 updatepri(struct lwp *l)
    416 {
    417 	struct proc *p = l->l_proc;
    418 	fixpt_t loadfac;
    419 
    420 	LOCK_ASSERT(lwp_locked(l, NULL));
    421 	KASSERT(l->l_slptime > 1);
    422 
    423 	loadfac = loadfactor(averunnable.ldavg[0]);
    424 
    425 	l->l_slptime--; /* the first time was done in schedcpu */
    426 	/* XXX NJWLWP */
    427 	/* XXXSMP occasionaly unlocked. */
    428 	p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
    429 	resetpriority(l);
    430 }
    431 
    432 /*
    433  * During autoconfiguration or after a panic, a sleep will simply lower the
    434  * priority briefly to allow interrupts, then return.  The priority to be
    435  * used (safepri) is machine-dependent, thus this value is initialized and
    436  * maintained in the machine-dependent layers.  This priority will typically
    437  * be 0, or the lowest priority that is safe for use on the interrupt stack;
    438  * it can be made higher to block network software interrupts after panics.
    439  */
    440 int	safepri;
    441 
    442 /*
    443  * ltsleep: see mtsleep() for comments.
    444  */
    445 int
    446 ltsleep(wchan_t ident, int priority, const char *wmesg, int timo,
    447 	volatile struct simplelock *interlock)
    448 {
    449 	struct lwp *l = curlwp;
    450 	sleepq_t *sq;
    451 	int error, catch;
    452 
    453 	if (sleepq_dontsleep(l)) {
    454 		(void)sleepq_abort(NULL, 0);
    455 		if ((priority & PNORELOCK) != 0)
    456 			simple_unlock(interlock);
    457 		return 0;
    458 	}
    459 
    460 	sq = sleeptab_lookup(&sleeptab, ident);
    461 	sleepq_enter(sq, l);
    462 
    463 	if (interlock != NULL) {
    464 		LOCK_ASSERT(simple_lock_held(interlock));
    465 		simple_unlock(interlock);
    466 	}
    467 
    468 	catch = priority & PCATCH;
    469 	sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
    470 	    &sleep_syncobj);
    471 	error = sleepq_unblock(timo, catch);
    472 
    473 	if (interlock != NULL && (priority & PNORELOCK) == 0)
    474 		simple_lock(interlock);
    475 
    476 	return error;
    477 }
    478 
    479 /*
    480  * General sleep call.  Suspends the current process until a wakeup is
    481  * performed on the specified identifier.  The process will then be made
    482  * runnable with the specified priority.  Sleeps at most timo/hz seconds (0
    483  * means no timeout).  If pri includes PCATCH flag, signals are checked
    484  * before and after sleeping, else signals are not checked.  Returns 0 if
    485  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
    486  * signal needs to be delivered, ERESTART is returned if the current system
    487  * call should be restarted if possible, and EINTR is returned if the system
    488  * call should be interrupted by the signal (return EINTR).
    489  *
    490  * The interlock is held until we are on a sleep queue. The interlock will
    491  * be locked before returning back to the caller unless the PNORELOCK flag
    492  * is specified, in which case the interlock will always be unlocked upon
    493  * return.
    494  */
    495 int
    496 mtsleep(wchan_t ident, int priority, const char *wmesg, int timo,
    497 	kmutex_t *mtx)
    498 {
    499 	struct lwp *l = curlwp;
    500 	sleepq_t *sq;
    501 	int error, catch;
    502 
    503 	if (sleepq_dontsleep(l))
    504 		return sleepq_abort(mtx, priority & PNORELOCK);
    505 
    506 	sq = sleeptab_lookup(&sleeptab, ident);
    507 	sleepq_enter(sq, l);
    508 
    509 	if (mtx != NULL) {
    510 		LOCK_ASSERT(mutex_owned(mtx));
    511 		mutex_exit(mtx);
    512 	}
    513 
    514 	catch = priority & PCATCH;
    515 	sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
    516 	    &sleep_syncobj);
    517 	error = sleepq_unblock(timo, catch);
    518 
    519 	if (mtx != NULL && (priority & PNORELOCK) == 0)
    520 		mutex_enter(mtx);
    521 
    522 	return error;
    523 }
    524 
    525 /*
    526  * sched_pause:
    527  *
    528  *	General sleep call for situations where a wake-up is not expected.
    529  */
    530 int
    531 sched_pause(const char *wmesg, boolean_t intr, int timo)
    532 {
    533 	struct lwp *l = curlwp;
    534 	sleepq_t *sq;
    535 
    536 	if (sleepq_dontsleep(l))
    537 		return sleepq_abort(NULL, 0);
    538 
    539 	sq = sleeptab_lookup(&sleeptab, l);
    540 	sleepq_enter(sq, l);
    541 	sleepq_block(sq, sched_kpri(l), l, wmesg, timo, intr, &sleep_syncobj);
    542 	return sleepq_unblock(timo, intr);
    543 }
    544 
    545 void
    546 sa_awaken(struct lwp *l)
    547 {
    548 
    549 	LOCK_ASSERT(lwp_locked(l, NULL));
    550 
    551 	if (l == l->l_savp->savp_lwp && l->l_flag & L_SA_YIELD)
    552 		l->l_flag &= ~L_SA_IDLE;
    553 }
    554 
    555 /*
    556  * Make all processes sleeping on the specified identifier runnable.
    557  */
    558 void
    559 wakeup(wchan_t ident)
    560 {
    561 	sleepq_t *sq;
    562 
    563 	if (cold)
    564 		return;
    565 
    566 	sq = sleeptab_lookup(&sleeptab, ident);
    567 	sleepq_wake(sq, ident, (u_int)-1);
    568 }
    569 
    570 /*
    571  * Make the highest priority process first in line on the specified
    572  * identifier runnable.
    573  */
    574 void
    575 wakeup_one(wchan_t ident)
    576 {
    577 	sleepq_t *sq;
    578 
    579 	if (cold)
    580 		return;
    581 
    582 	sq = sleeptab_lookup(&sleeptab, ident);
    583 	sleepq_wake(sq, ident, 1);
    584 }
    585 
    586 
    587 /*
    588  * General yield call.  Puts the current process back on its run queue and
    589  * performs a voluntary context switch.  Should only be called when the
    590  * current process explicitly requests it (eg sched_yield(2) in compat code).
    591  */
    592 void
    593 yield(void)
    594 {
    595 	struct lwp *l = curlwp;
    596 
    597 	lwp_lock(l);
    598 	if (l->l_stat == LSONPROC) {
    599 		KASSERT(lwp_locked(l, &sched_mutex));
    600 		l->l_priority = l->l_usrpri;
    601 	}
    602 	l->l_nvcsw++;
    603 	mi_switch(l, NULL);
    604 }
    605 
    606 /*
    607  * General preemption call.  Puts the current process back on its run queue
    608  * and performs an involuntary context switch.
    609  * The 'more' ("more work to do") argument is boolean. Returning to userspace
    610  * preempt() calls pass 0. "Voluntary" preemptions in e.g. uiomove() pass 1.
    611  * This will be used to indicate to the SA subsystem that the LWP is
    612  * not yet finished in the kernel.
    613  */
    614 void
    615 preempt(int more)
    616 {
    617 	struct lwp *l = curlwp;
    618 	int r;
    619 
    620 	lwp_lock(l);
    621 	if (l->l_stat == LSONPROC) {
    622 		KASSERT(lwp_locked(l, &sched_mutex));
    623 		l->l_priority = l->l_usrpri;
    624 	}
    625 	l->l_nivcsw++;
    626 	r = mi_switch(l, NULL);
    627 
    628 	if ((l->l_flag & L_SA) != 0 && r != 0 && more == 0)
    629 		sa_preempt(l);
    630 }
    631 
    632 /*
    633  * The machine independent parts of context switch.  Switch to "new"
    634  * if non-NULL, otherwise let cpu_switch choose the next lwp.
    635  *
    636  * Returns 1 if another process was actually run.
    637  */
    638 int
    639 mi_switch(struct lwp *l, struct lwp *newl)
    640 {
    641 	struct schedstate_percpu *spc;
    642 	struct timeval tv;
    643 	int hold_count;
    644 	int retval, oldspl;
    645 	long s, u;
    646 #if PERFCTRS
    647 	struct proc *p = l->l_proc;
    648 #endif
    649 
    650 	LOCK_ASSERT(lwp_locked(l, NULL));
    651 
    652 	/*
    653 	 * Release the kernel_lock, as we are about to yield the CPU.
    654 	 */
    655 	hold_count = KERNEL_UNLOCK(0, l);
    656 
    657 #ifdef LOCKDEBUG
    658 	spinlock_switchcheck();
    659 	simple_lock_switchcheck();
    660 #endif
    661 #ifdef KSTACK_CHECK_MAGIC
    662 	kstack_check_magic(l);
    663 #endif
    664 
    665 	/*
    666 	 * It's safe to read the per CPU schedstate unlocked here, as all we
    667 	 * are after is the run time and that's guarenteed to have been last
    668 	 * updated by this CPU.
    669 	 */
    670 	KDASSERT(l->l_cpu == curcpu());
    671 	spc = &l->l_cpu->ci_schedstate;
    672 
    673 	/*
    674 	 * Compute the amount of time during which the current
    675 	 * process was running.
    676 	 */
    677 	microtime(&tv);
    678 	u = l->l_rtime.tv_usec +
    679 	    (tv.tv_usec - spc->spc_runtime.tv_usec);
    680 	s = l->l_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
    681 	if (u < 0) {
    682 		u += 1000000;
    683 		s--;
    684 	} else if (u >= 1000000) {
    685 		u -= 1000000;
    686 		s++;
    687 	}
    688 	l->l_rtime.tv_usec = u;
    689 	l->l_rtime.tv_sec = s;
    690 
    691 	/*
    692 	 * XXXSMP If we are using h/w performance counters, save context.
    693 	 */
    694 #if PERFCTRS
    695 	if (PMC_ENABLED(p)) {
    696 		pmc_save_context(p);
    697 	}
    698 #endif
    699 
    700 	/*
    701 	 * Acquire the sched_mutex if necessary.  It will be released by
    702 	 * cpu_switch once it has decided to idle, or picked another LWP
    703 	 * to run.
    704 	 */
    705 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    706 	if (l->l_mutex != &sched_mutex) {
    707 		mutex_enter(&sched_mutex);
    708 		lwp_unlock(l);
    709 	}
    710 #endif
    711 
    712 	/*
    713 	 * If on the CPU and we have gotten this far, then we must yield.
    714 	 */
    715 	KASSERT(l->l_stat != LSRUN);
    716 	if (l->l_stat == LSONPROC) {
    717 		KASSERT(lwp_locked(l, &sched_mutex));
    718 		l->l_stat = LSRUN;
    719 		setrunqueue(l);
    720 	}
    721 	uvmexp.swtch++;
    722 
    723 	/*
    724 	 * Process is about to yield the CPU; clear the appropriate
    725 	 * scheduling flags.
    726 	 */
    727 	spc->spc_flags &= ~SPCF_SWITCHCLEAR;
    728 
    729 	LOCKDEBUG_BARRIER(&sched_mutex, 1);
    730 
    731 	/*
    732 	 * Switch to the new current LWP.  When we run again, we'll
    733 	 * return back here.
    734 	 */
    735 	oldspl = MUTEX_SPIN_OLDSPL(l->l_cpu);
    736 
    737 	if (newl == NULL || newl->l_back == NULL)
    738 		retval = cpu_switch(l, NULL);
    739 	else {
    740 		KASSERT(lwp_locked(newl, &sched_mutex));
    741 		remrunqueue(newl);
    742 		cpu_switchto(l, newl);
    743 		retval = 0;
    744 	}
    745 
    746 	/*
    747 	 * XXXSMP If we are using h/w performance counters, restore context.
    748 	 */
    749 #if PERFCTRS
    750 	if (PMC_ENABLED(p)) {
    751 		pmc_restore_context(p);
    752 	}
    753 #endif
    754 
    755 	/*
    756 	 * We're running again; record our new start time.  We might
    757 	 * be running on a new CPU now, so don't use the cached
    758 	 * schedstate_percpu pointer.
    759 	 */
    760 	KDASSERT(l->l_cpu == curcpu());
    761 	microtime(&l->l_cpu->ci_schedstate.spc_runtime);
    762 
    763 	/*
    764 	 * Reacquire the kernel_lock.
    765 	 */
    766 	splx(oldspl);
    767 	KERNEL_LOCK(hold_count, l);
    768 
    769 	return retval;
    770 }
    771 
    772 /*
    773  * Initialize the (doubly-linked) run queues
    774  * to be empty.
    775  */
    776 void
    777 rqinit()
    778 {
    779 	int i;
    780 
    781 	for (i = 0; i < RUNQUE_NQS; i++)
    782 		sched_qs[i].ph_link = sched_qs[i].ph_rlink =
    783 		    (struct lwp *)&sched_qs[i];
    784 
    785 	mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
    786 }
    787 
    788 static inline void
    789 resched_lwp(struct lwp *l, u_char pri)
    790 {
    791 	struct cpu_info *ci;
    792 
    793 	/*
    794 	 * XXXSMP
    795 	 * Since l->l_cpu persists across a context switch,
    796 	 * this gives us *very weak* processor affinity, in
    797 	 * that we notify the CPU on which the process last
    798 	 * ran that it should try to switch.
    799 	 *
    800 	 * This does not guarantee that the process will run on
    801 	 * that processor next, because another processor might
    802 	 * grab it the next time it performs a context switch.
    803 	 *
    804 	 * This also does not handle the case where its last
    805 	 * CPU is running a higher-priority process, but every
    806 	 * other CPU is running a lower-priority process.  There
    807 	 * are ways to handle this situation, but they're not
    808 	 * currently very pretty, and we also need to weigh the
    809 	 * cost of moving a process from one CPU to another.
    810 	 *
    811 	 * XXXSMP
    812 	 * There is also the issue of locking the other CPU's
    813 	 * sched state, which we currently do not do.
    814 	 */
    815 	ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
    816 	if (pri < ci->ci_schedstate.spc_curpriority)
    817 		cpu_need_resched(ci);
    818 }
    819 
    820 /*
    821  * Change process state to be runnable, placing it on the run queue if it is
    822  * in memory, and awakening the swapper if it isn't in memory.
    823  *
    824  * Call with the process and LWP locked.  Will return with the LWP unlocked.
    825  */
    826 void
    827 setrunnable(struct lwp *l)
    828 {
    829 	struct proc *p = l->l_proc;
    830 
    831 	LOCK_ASSERT(mutex_owned(&p->p_smutex));
    832 	LOCK_ASSERT(lwp_locked(l, NULL));
    833 
    834 	switch (l->l_stat) {
    835 	case LSSTOP:
    836 		/*
    837 		 * If we're being traced (possibly because someone attached us
    838 		 * while we were stopped), check for a signal from the debugger.
    839 		 */
    840 		if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
    841 			sigaddset(&l->l_sigpend.sp_set, p->p_xstat);
    842 			signotify(l);
    843 		}
    844 		p->p_nrlwps++;
    845 		break;
    846 	case LSSUSPENDED:
    847 		l->l_flag &= ~L_WSUSPEND;
    848 		p->p_nrlwps++;
    849 		break;
    850 	case LSSLEEP:
    851 		KASSERT(l->l_wchan != NULL);
    852 		break;
    853 	default:
    854 		panic("setrunnable: lwp %p state was %d", l, l->l_stat);
    855 	}
    856 
    857 	/*
    858 	 * If the LWP was sleeping interruptably, then it's OK to start it
    859 	 * again.  If not, mark it as still sleeping.
    860 	 */
    861 	if (l->l_wchan != NULL) {
    862 		l->l_stat = LSSLEEP;
    863 		if ((l->l_flag & L_SINTR) != 0)
    864 			lwp_unsleep(l);
    865 		else {
    866 			lwp_unlock(l);
    867 #ifdef DIAGNOSTIC
    868 			panic("setrunnable: !L_SINTR");
    869 #endif
    870 		}
    871 		return;
    872 	}
    873 
    874 	LOCK_ASSERT(lwp_locked(l, &sched_mutex));
    875 
    876 	if (l->l_proc->p_sa)
    877 		sa_awaken(l);
    878 
    879 	/*
    880 	 * If the LWP is still on the CPU, mark it as LSONPROC.  It may be
    881 	 * about to call mi_switch(), in which case it will yield.
    882 	 *
    883 	 * XXXSMP Will need to change for preemption.
    884 	 */
    885 #ifdef MULTIPROCESSOR
    886 	if (l->l_cpu->ci_curlwp == l) {
    887 #else
    888 	if (l == curlwp) {
    889 #endif
    890 		l->l_stat = LSONPROC;
    891 		l->l_slptime = 0;
    892 		lwp_unlock(l);
    893 		return;
    894 	}
    895 
    896 	/*
    897 	 * Set the LWP runnable.  If it's swapped out, we need to wake the swapper
    898 	 * to bring it back in.  Otherwise, enter it into a run queue.
    899 	 */
    900 	if (l->l_slptime > 1)
    901 		updatepri(l);
    902 	l->l_stat = LSRUN;
    903 	l->l_slptime = 0;
    904 
    905 	if (l->l_flag & L_INMEM) {
    906 		setrunqueue(l);
    907 		resched_lwp(l, l->l_priority);
    908 		lwp_unlock(l);
    909 	} else {
    910 		lwp_unlock(l);
    911 		wakeup(&proc0);
    912 	}
    913 }
    914 
    915 /*
    916  * Compute the priority of a process when running in user mode.
    917  * Arrange to reschedule if the resulting priority is better
    918  * than that of the current process.
    919  */
    920 void
    921 resetpriority(struct lwp *l)
    922 {
    923 	unsigned int newpriority;
    924 	struct proc *p = l->l_proc;
    925 
    926 	LOCK_ASSERT(lwp_locked(l, NULL));
    927 
    928 	newpriority = PUSER + (p->p_estcpu >> ESTCPU_SHIFT) +
    929 			NICE_WEIGHT * (p->p_nice - NZERO);
    930 	newpriority = min(newpriority, MAXPRI);
    931 	l->l_usrpri = newpriority;
    932 	lwp_changepri(l, l->l_usrpri);
    933 }
    934 
    935 /*
    936  * Recompute priority for all LWPs in a process.
    937  */
    938 void
    939 resetprocpriority(struct proc *p)
    940 {
    941 	struct lwp *l;
    942 
    943 	LOCK_ASSERT(mutex_owned(&p->p_smutex));
    944 
    945 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    946 		lwp_lock(l);
    947 		resetpriority(l);
    948 		lwp_unlock(l);
    949 	}
    950 }
    951 
    952 /*
    953  * We adjust the priority of the current process.  The priority of a process
    954  * gets worse as it accumulates CPU time.  The CPU usage estimator (p_estcpu)
    955  * is increased here.  The formula for computing priorities (in kern_synch.c)
    956  * will compute a different value each time p_estcpu increases. This can
    957  * cause a switch, but unless the priority crosses a PPQ boundary the actual
    958  * queue will not change.  The CPU usage estimator ramps up quite quickly
    959  * when the process is running (linearly), and decays away exponentially, at
    960  * a rate which is proportionally slower when the system is busy.  The basic
    961  * principle is that the system will 90% forget that the process used a lot
    962  * of CPU time in 5 * loadav seconds.  This causes the system to favor
    963  * processes which haven't run much recently, and to round-robin among other
    964  * processes.
    965  */
    966 
    967 void
    968 schedclock(struct lwp *l)
    969 {
    970 	struct proc *p = l->l_proc;
    971 
    972 	mutex_enter(&p->p_smutex);
    973 	p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
    974 	lwp_lock(l);
    975 	resetpriority(l);
    976 	mutex_exit(&p->p_smutex);
    977 	if (l->l_priority >= PUSER)
    978 		l->l_priority = l->l_usrpri;
    979 	lwp_unlock(l);
    980 }
    981 
    982 /*
    983  * suspendsched:
    984  *
    985  *	Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
    986  */
    987 void
    988 suspendsched(void)
    989 {
    990 #ifdef MULTIPROCESSOR
    991 	CPU_INFO_ITERATOR cii;
    992 	struct cpu_info *ci;
    993 #endif
    994 	struct lwp *l;
    995 	struct proc *p;
    996 
    997 	/*
    998 	 * We do this by process in order not to violate the locking rules.
    999 	 */
   1000 	mutex_enter(&proclist_mutex);
   1001 	PROCLIST_FOREACH(p, &allproc) {
   1002 		mutex_enter(&p->p_smutex);
   1003 
   1004 		if ((p->p_flag & P_SYSTEM) != 0) {
   1005 			mutex_exit(&p->p_smutex);
   1006 			continue;
   1007 		}
   1008 
   1009 		p->p_stat = SSTOP;
   1010 
   1011 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1012 			if (l == curlwp)
   1013 				continue;
   1014 
   1015 			lwp_lock(l);
   1016 
   1017 			/*
   1018 			 * Set L_WREBOOT so that the LWP will suspend itself
   1019 			 * when it tries to return to user mode.  We want to
   1020 			 * try and get to get as many LWPs as possible to
   1021 			 * the user / kernel boundary, so that they will
   1022 			 * release any locks that they hold.
   1023 			 */
   1024 			l->l_flag |= (L_WREBOOT | L_WSUSPEND);
   1025 
   1026 			if (l->l_stat == LSSLEEP &&
   1027 			    (l->l_flag & L_SINTR) != 0) {
   1028 				/* setrunnable() will release the lock. */
   1029 				setrunnable(l);
   1030 				continue;
   1031 			}
   1032 
   1033 			lwp_unlock(l);
   1034 		}
   1035 
   1036 		mutex_exit(&p->p_smutex);
   1037 	}
   1038 	mutex_exit(&proclist_mutex);
   1039 
   1040 	/*
   1041 	 * Kick all CPUs to make them preempt any LWPs running in user mode.
   1042 	 * They'll trap into the kernel and suspend themselves in userret().
   1043 	 */
   1044 	sched_lock(0);
   1045 #ifdef MULTIPROCESSOR
   1046 	for (CPU_INFO_FOREACH(cii, ci))
   1047 		cpu_need_resched(ci);
   1048 #else
   1049 	cpu_need_resched(curcpu());
   1050 #endif
   1051 	sched_unlock(0);
   1052 }
   1053 
   1054 /*
   1055  * scheduler_fork_hook:
   1056  *
   1057  *	Inherit the parent's scheduler history.
   1058  */
   1059 void
   1060 scheduler_fork_hook(struct proc *parent, struct proc *child)
   1061 {
   1062 
   1063 	LOCK_ASSERT(mutex_owned(&parent->p_smutex));
   1064 
   1065 	child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
   1066 	child->p_forktime = schedcpu_ticks;
   1067 }
   1068 
   1069 /*
   1070  * scheduler_wait_hook:
   1071  *
   1072  *	Chargeback parents for the sins of their children.
   1073  */
   1074 void
   1075 scheduler_wait_hook(struct proc *parent, struct proc *child)
   1076 {
   1077 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
   1078 	fixpt_t estcpu;
   1079 
   1080 	/* XXX Only if parent != init?? */
   1081 
   1082 	mutex_enter(&parent->p_smutex);
   1083 	estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
   1084 	    schedcpu_ticks - child->p_forktime);
   1085 	if (child->p_estcpu > estcpu)
   1086 		parent->p_estcpu =
   1087 		    ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
   1088 	mutex_exit(&parent->p_smutex);
   1089 }
   1090 
   1091 /*
   1092  * sched_kpri:
   1093  *
   1094  *	Given an LWP a priority boost before it sleeps.  Currently we scale
   1095  *	user priorites into the range 60 -> 40, and kernel priorities into
   1096  *	40 -> 0.
   1097  */
   1098 int
   1099 sched_kpri(struct lwp *l)
   1100 {
   1101 	static const uint8_t kpri_tab[] = {
   1102 		 0,   0,   1,   2,   3,   4,   4,   5,
   1103 		 6,   7,   8,   8,   9,  10,  11,  12,
   1104 		12,  13,  14,  15,  16,  16,  17,  18,
   1105 		19,  20,  20,  21,  22,  23,  24,  24,
   1106 		25,  26,  27,  28,  28,  29,  30,  31,
   1107 		32,  32,  33,  34,  35,  36,  36,  37,
   1108 		38,  39,  40,  40,  40,  40,  41,  41,
   1109 		41,  41,  42,  42,  42,  42,  43,  43,
   1110 		43,  43,  44,  44,  44,  44,  45,  45,
   1111 		45,  45,  46,  46,  46,  47,  47,  47,
   1112 		47,  48,  48,  48,  48,  49,  49,  49,
   1113 		49,  50,  50,  50,  50,  51,  51,  51,
   1114 		51,  52,  52,  52,  52,  53,  53,  53,
   1115 		54,  54,  54,  54,  55,  55,  55,  55,
   1116 		56,  56,  56,  56,  57,  57,  57,  57,
   1117 		58,  58,  58,  58,  59,  59,  59,  60,
   1118 	};
   1119 
   1120 	return kpri_tab[l->l_priority];
   1121 }
   1122 
   1123 /*
   1124  * sched_unsleep:
   1125  *
   1126  *	The is called when the LWP has not been awoken normally but instead
   1127  *	interrupted: for example, if the sleep timed out.  Because of this,
   1128  *	it's not a valid action for running or idle LWPs.
   1129  */
   1130 void
   1131 sched_unsleep(struct lwp *l)
   1132 {
   1133 
   1134 	lwp_unlock(l);
   1135 	panic("sched_unsleep");
   1136 }
   1137 
   1138 /*
   1139  * sched_changepri:
   1140  *
   1141  *	Adjust the priority of an LWP.
   1142  */
   1143 void
   1144 sched_changepri(struct lwp *l, int pri)
   1145 {
   1146 
   1147 	LOCK_ASSERT(lwp_locked(l, &sched_mutex));
   1148 
   1149 	if (l->l_stat != LSRUN || (l->l_flag & L_INMEM) == 0 ||
   1150 	    (l->l_priority / PPQ) == (l->l_usrpri / PPQ)) {
   1151 		l->l_priority = pri;
   1152 		return;
   1153 	}
   1154 
   1155 	remrunqueue(l);
   1156 	l->l_priority = pri;
   1157 	setrunqueue(l);
   1158 	resched_lwp(l, pri);
   1159 }
   1160 
   1161 /*
   1162  * Low-level routines to access the run queue.  Optimised assembler
   1163  * routines can override these.
   1164  */
   1165 
   1166 #ifndef __HAVE_MD_RUNQUEUE
   1167 
   1168 /*
   1169  * On some architectures, it's faster to use a MSB ordering for the priorites
   1170  * than the traditional LSB ordering.
   1171  */
   1172 #ifdef __HAVE_BIGENDIAN_BITOPS
   1173 #define	RQMASK(n) (0x80000000 >> (n))
   1174 #else
   1175 #define	RQMASK(n) (0x00000001 << (n))
   1176 #endif
   1177 
   1178 /*
   1179  * The primitives that manipulate the run queues.  whichqs tells which
   1180  * of the 32 queues qs have processes in them.  Setrunqueue puts processes
   1181  * into queues, remrunqueue removes them from queues.  The running process is
   1182  * on no queue, other processes are on a queue related to p->p_priority,
   1183  * divided by 4 actually to shrink the 0-127 range of priorities into the 32
   1184  * available queues.
   1185  */
   1186 #ifdef RQDEBUG
   1187 static void
   1188 checkrunqueue(int whichq, struct lwp *l)
   1189 {
   1190 	const struct prochd * const rq = &sched_qs[whichq];
   1191 	struct lwp *l2;
   1192 	int found = 0;
   1193 	int die = 0;
   1194 	int empty = 1;
   1195 	for (l2 = rq->ph_link; l2 != (const void*) rq; l2 = l2->l_forw) {
   1196 		if (l2->l_stat != LSRUN) {
   1197 			printf("checkrunqueue[%d]: lwp %p state (%d) "
   1198 			    " != LSRUN\n", whichq, l2, l2->l_stat);
   1199 		}
   1200 		if (l2->l_back->l_forw != l2) {
   1201 			printf("checkrunqueue[%d]: lwp %p back-qptr (%p) "
   1202 			    "corrupt %p\n", whichq, l2, l2->l_back,
   1203 			    l2->l_back->l_forw);
   1204 			die = 1;
   1205 		}
   1206 		if (l2->l_forw->l_back != l2) {
   1207 			printf("checkrunqueue[%d]: lwp %p forw-qptr (%p) "
   1208 			    "corrupt %p\n", whichq, l2, l2->l_forw,
   1209 			    l2->l_forw->l_back);
   1210 			die = 1;
   1211 		}
   1212 		if (l2 == l)
   1213 			found = 1;
   1214 		empty = 0;
   1215 	}
   1216 	if (empty && (sched_whichqs & RQMASK(whichq)) != 0) {
   1217 		printf("checkrunqueue[%d]: bit set for empty run-queue %p\n",
   1218 		    whichq, rq);
   1219 		die = 1;
   1220 	} else if (!empty && (sched_whichqs & RQMASK(whichq)) == 0) {
   1221 		printf("checkrunqueue[%d]: bit clear for non-empty "
   1222 		    "run-queue %p\n", whichq, rq);
   1223 		die = 1;
   1224 	}
   1225 	if (l != NULL && (sched_whichqs & RQMASK(whichq)) == 0) {
   1226 		printf("checkrunqueue[%d]: bit clear for active lwp %p\n",
   1227 		    whichq, l);
   1228 		die = 1;
   1229 	}
   1230 	if (l != NULL && empty) {
   1231 		printf("checkrunqueue[%d]: empty run-queue %p with "
   1232 		    "active lwp %p\n", whichq, rq, l);
   1233 		die = 1;
   1234 	}
   1235 	if (l != NULL && !found) {
   1236 		printf("checkrunqueue[%d]: lwp %p not in runqueue %p!",
   1237 		    whichq, l, rq);
   1238 		die = 1;
   1239 	}
   1240 	if (die)
   1241 		panic("checkrunqueue: inconsistency found");
   1242 }
   1243 #endif /* RQDEBUG */
   1244 
   1245 void
   1246 setrunqueue(struct lwp *l)
   1247 {
   1248 	struct prochd *rq;
   1249 	struct lwp *prev;
   1250 	const int whichq = l->l_priority / PPQ;
   1251 
   1252 	LOCK_ASSERT(lwp_locked(l, &sched_mutex));
   1253 
   1254 #ifdef RQDEBUG
   1255 	checkrunqueue(whichq, NULL);
   1256 #endif
   1257 #ifdef DIAGNOSTIC
   1258 	if (l->l_back != NULL || l->l_stat != LSRUN)
   1259 		panic("setrunqueue");
   1260 #endif
   1261 	sched_whichqs |= RQMASK(whichq);
   1262 	rq = &sched_qs[whichq];
   1263 	prev = rq->ph_rlink;
   1264 	l->l_forw = (struct lwp *)rq;
   1265 	rq->ph_rlink = l;
   1266 	prev->l_forw = l;
   1267 	l->l_back = prev;
   1268 #ifdef RQDEBUG
   1269 	checkrunqueue(whichq, l);
   1270 #endif
   1271 }
   1272 
   1273 void
   1274 remrunqueue(struct lwp *l)
   1275 {
   1276 	struct lwp *prev, *next;
   1277 	const int whichq = l->l_priority / PPQ;
   1278 
   1279 	LOCK_ASSERT(lwp_locked(l, &sched_mutex));
   1280 
   1281 #ifdef RQDEBUG
   1282 	checkrunqueue(whichq, l);
   1283 #endif
   1284 
   1285 #if defined(DIAGNOSTIC)
   1286 	if (((sched_whichqs & RQMASK(whichq)) == 0) || l->l_back == NULL) {
   1287 		/* Shouldn't happen - interrupts disabled. */
   1288 		panic("remrunqueue: bit %d not set", whichq);
   1289 	}
   1290 #endif
   1291 	prev = l->l_back;
   1292 	l->l_back = NULL;
   1293 	next = l->l_forw;
   1294 	prev->l_forw = next;
   1295 	next->l_back = prev;
   1296 	if (prev == next)
   1297 		sched_whichqs &= ~RQMASK(whichq);
   1298 #ifdef RQDEBUG
   1299 	checkrunqueue(whichq, NULL);
   1300 #endif
   1301 }
   1302 
   1303 #undef RQMASK
   1304 #endif /* !defined(__HAVE_MD_RUNQUEUE) */
   1305