Home | History | Annotate | Line # | Download | only in kern
kern_synch.c revision 1.166.2.10
      1 /*	$NetBSD: kern_synch.c,v 1.166.2.10 2007/01/25 10:55:47 yamt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000, 2004, 2006 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 /*-
     41  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     42  *	The Regents of the University of California.  All rights reserved.
     43  * (c) UNIX System Laboratories, Inc.
     44  * All or some portions of this file are derived from material licensed
     45  * to the University of California by American Telephone and Telegraph
     46  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     47  * the permission of UNIX System Laboratories, Inc.
     48  *
     49  * Redistribution and use in source and binary forms, with or without
     50  * modification, are permitted provided that the following conditions
     51  * are met:
     52  * 1. Redistributions of source code must retain the above copyright
     53  *    notice, this list of conditions and the following disclaimer.
     54  * 2. Redistributions in binary form must reproduce the above copyright
     55  *    notice, this list of conditions and the following disclaimer in the
     56  *    documentation and/or other materials provided with the distribution.
     57  * 3. Neither the name of the University nor the names of its contributors
     58  *    may be used to endorse or promote products derived from this software
     59  *    without specific prior written permission.
     60  *
     61  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     62  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     63  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     64  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     65  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     66  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     67  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     68  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     69  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     70  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     71  * SUCH DAMAGE.
     72  *
     73  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
     74  */
     75 
     76 #include <sys/cdefs.h>
     77 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.166.2.10 2007/01/25 10:55:47 yamt Exp $");
     78 
     79 #include "opt_ddb.h"
     80 #include "opt_kstack.h"
     81 #include "opt_lockdebug.h"
     82 #include "opt_multiprocessor.h"
     83 #include "opt_perfctrs.h"
     84 
     85 #define	__MUTEX_PRIVATE
     86 
     87 #include <sys/param.h>
     88 #include <sys/systm.h>
     89 #include <sys/callout.h>
     90 #include <sys/proc.h>
     91 #include <sys/kernel.h>
     92 #include <sys/buf.h>
     93 #if defined(PERFCTRS)
     94 #include <sys/pmc.h>
     95 #endif
     96 #include <sys/signalvar.h>
     97 #include <sys/resourcevar.h>
     98 #include <sys/sched.h>
     99 #include <sys/sa.h>
    100 #include <sys/savar.h>
    101 #include <sys/kauth.h>
    102 #include <sys/sleepq.h>
    103 #include <sys/lockdebug.h>
    104 
    105 #include <uvm/uvm_extern.h>
    106 
    107 #include <machine/cpu.h>
    108 
    109 int	lbolt;			/* once a second sleep address */
    110 int	rrticks;		/* number of hardclock ticks per roundrobin() */
    111 
    112 /*
    113  * The global scheduler state.
    114  */
    115 kmutex_t	sched_mutex;		/* global sched state mutex */
    116 struct prochd	sched_qs[RUNQUE_NQS];	/* run queues */
    117 volatile uint32_t sched_whichqs;	/* bitmap of non-empty queues */
    118 
    119 void	schedcpu(void *);
    120 void	updatepri(struct lwp *);
    121 void	sa_awaken(struct lwp *);
    122 
    123 void	sched_unsleep(struct lwp *);
    124 void	sched_changepri(struct lwp *, int);
    125 
    126 struct callout schedcpu_ch = CALLOUT_INITIALIZER_SETFUNC(schedcpu, NULL);
    127 static unsigned int schedcpu_ticks;
    128 
    129 syncobj_t sleep_syncobj = {
    130 	SOBJ_SLEEPQ_SORTED,
    131 	sleepq_unsleep,
    132 	sleepq_changepri
    133 };
    134 
    135 syncobj_t sched_syncobj = {
    136 	SOBJ_SLEEPQ_SORTED,
    137 	sched_unsleep,
    138 	sched_changepri
    139 };
    140 
    141 /*
    142  * Force switch among equal priority processes every 100ms.
    143  * Called from hardclock every hz/10 == rrticks hardclock ticks.
    144  */
    145 /* ARGSUSED */
    146 void
    147 roundrobin(struct cpu_info *ci)
    148 {
    149 	struct schedstate_percpu *spc = &ci->ci_schedstate;
    150 
    151 	spc->spc_rrticks = rrticks;
    152 
    153 	if (curlwp != NULL) {
    154 		if (spc->spc_flags & SPCF_SEENRR) {
    155 			/*
    156 			 * The process has already been through a roundrobin
    157 			 * without switching and may be hogging the CPU.
    158 			 * Indicate that the process should yield.
    159 			 */
    160 			spc->spc_flags |= SPCF_SHOULDYIELD;
    161 		} else
    162 			spc->spc_flags |= SPCF_SEENRR;
    163 	}
    164 	cpu_need_resched(curcpu());
    165 }
    166 
    167 #define	PPQ	(128 / RUNQUE_NQS)	/* priorities per queue */
    168 #define	NICE_WEIGHT 2			/* priorities per nice level */
    169 
    170 #define	ESTCPU_SHIFT	11
    171 #define	ESTCPU_MAX	((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
    172 #define	ESTCPULIM(e)	min((e), ESTCPU_MAX)
    173 
    174 /*
    175  * Constants for digital decay and forget:
    176  *	90% of (p_estcpu) usage in 5 * loadav time
    177  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
    178  *          Note that, as ps(1) mentions, this can let percentages
    179  *          total over 100% (I've seen 137.9% for 3 processes).
    180  *
    181  * Note that hardclock updates p_estcpu and p_cpticks independently.
    182  *
    183  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
    184  * That is, the system wants to compute a value of decay such
    185  * that the following for loop:
    186  * 	for (i = 0; i < (5 * loadavg); i++)
    187  * 		p_estcpu *= decay;
    188  * will compute
    189  * 	p_estcpu *= 0.1;
    190  * for all values of loadavg:
    191  *
    192  * Mathematically this loop can be expressed by saying:
    193  * 	decay ** (5 * loadavg) ~= .1
    194  *
    195  * The system computes decay as:
    196  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
    197  *
    198  * We wish to prove that the system's computation of decay
    199  * will always fulfill the equation:
    200  * 	decay ** (5 * loadavg) ~= .1
    201  *
    202  * If we compute b as:
    203  * 	b = 2 * loadavg
    204  * then
    205  * 	decay = b / (b + 1)
    206  *
    207  * We now need to prove two things:
    208  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
    209  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
    210  *
    211  * Facts:
    212  *         For x close to zero, exp(x) =~ 1 + x, since
    213  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
    214  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
    215  *         For x close to zero, ln(1+x) =~ x, since
    216  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
    217  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
    218  *         ln(.1) =~ -2.30
    219  *
    220  * Proof of (1):
    221  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
    222  *	solving for factor,
    223  *      ln(factor) =~ (-2.30/5*loadav), or
    224  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
    225  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
    226  *
    227  * Proof of (2):
    228  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
    229  *	solving for power,
    230  *      power*ln(b/(b+1)) =~ -2.30, or
    231  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
    232  *
    233  * Actual power values for the implemented algorithm are as follows:
    234  *      loadav: 1       2       3       4
    235  *      power:  5.68    10.32   14.94   19.55
    236  */
    237 
    238 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
    239 #define	loadfactor(loadav)	(2 * (loadav))
    240 
    241 static fixpt_t
    242 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
    243 {
    244 
    245 	if (estcpu == 0) {
    246 		return 0;
    247 	}
    248 
    249 #if !defined(_LP64)
    250 	/* avoid 64bit arithmetics. */
    251 #define	FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
    252 	if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
    253 		return estcpu * loadfac / (loadfac + FSCALE);
    254 	}
    255 #endif /* !defined(_LP64) */
    256 
    257 	return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
    258 }
    259 
    260 /*
    261  * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
    262  * sleeping for at least seven times the loadfactor will decay p_estcpu to
    263  * less than (1 << ESTCPU_SHIFT).
    264  *
    265  * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
    266  */
    267 static fixpt_t
    268 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
    269 {
    270 
    271 	if ((n << FSHIFT) >= 7 * loadfac) {
    272 		return 0;
    273 	}
    274 
    275 	while (estcpu != 0 && n > 1) {
    276 		estcpu = decay_cpu(loadfac, estcpu);
    277 		n--;
    278 	}
    279 
    280 	return estcpu;
    281 }
    282 
    283 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
    284 fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
    285 
    286 /*
    287  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
    288  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
    289  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
    290  *
    291  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
    292  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
    293  *
    294  * If you dont want to bother with the faster/more-accurate formula, you
    295  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
    296  * (more general) method of calculating the %age of CPU used by a process.
    297  */
    298 #define	CCPU_SHIFT	11
    299 
    300 /*
    301  * schedcpu:
    302  *
    303  *	Recompute process priorities, every hz ticks.
    304  *
    305  *	XXXSMP This needs to be reorganised in order to reduce the locking
    306  *	burden.
    307  */
    308 /* ARGSUSED */
    309 void
    310 schedcpu(void *arg)
    311 {
    312 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
    313 	struct rlimit *rlim;
    314 	struct lwp *l;
    315 	struct proc *p;
    316 	int minslp, clkhz;
    317 	long runtm;
    318 
    319 	schedcpu_ticks++;
    320 
    321 	mutex_enter(&proclist_mutex);
    322 	PROCLIST_FOREACH(p, &allproc) {
    323 		int sig = 0;
    324 		/*
    325 		 * Increment time in/out of memory and sleep time (if
    326 		 * sleeping).  We ignore overflow; with 16-bit int's
    327 		 * (remember them?) overflow takes 45 days.
    328 		 */
    329 		minslp = 2;
    330 		mutex_enter(&p->p_smutex);
    331 		runtm = p->p_rtime.tv_sec;
    332 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    333 			lwp_lock(l);
    334 			runtm += l->l_rtime.tv_sec;
    335 			l->l_swtime++;
    336 			if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
    337 			    l->l_stat == LSSUSPENDED) {
    338 				l->l_slptime++;
    339 				minslp = min(minslp, l->l_slptime);
    340 			} else
    341 				minslp = 0;
    342 			lwp_unlock(l);
    343 		}
    344 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
    345 
    346 		/*
    347 		 * Check if the process exceeds its CPU resource allocation.
    348 		 * If over max, kill it.
    349 		 */
    350 		rlim = &p->p_rlimit[RLIMIT_CPU];
    351 		if (runtm >= rlim->rlim_cur) {
    352 			if (runtm >= rlim->rlim_max)
    353 				sig = SIGKILL;
    354 			else {
    355 				sig = SIGXCPU;
    356 				if (rlim->rlim_cur < rlim->rlim_max)
    357 					rlim->rlim_cur += 5;
    358 			}
    359 		}
    360 
    361 		/*
    362 		 * If the process has run for more than autonicetime, reduce
    363 		 * priority to give others a chance.
    364 		 */
    365 		if (autonicetime && runtm > autonicetime && p->p_nice == NZERO
    366 		    && kauth_cred_geteuid(p->p_cred)) {
    367 			p->p_nice = autoniceval + NZERO;
    368 			resetprocpriority(p);
    369 		}
    370 
    371 		/*
    372 		 * If the process has slept the entire second,
    373 		 * stop recalculating its priority until it wakes up.
    374 		 */
    375 		if (minslp > 1) {
    376 			goto skip;
    377 		}
    378 
    379 		/*
    380 		 * p_pctcpu is only for ps.
    381 		 */
    382 		mutex_enter(&p->p_stmutex);
    383 		clkhz = stathz != 0 ? stathz : hz;
    384 #if	(FSHIFT >= CCPU_SHIFT)
    385 		p->p_pctcpu += (clkhz == 100)?
    386 			((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
    387                 	100 * (((fixpt_t) p->p_cpticks)
    388 				<< (FSHIFT - CCPU_SHIFT)) / clkhz;
    389 #else
    390 		p->p_pctcpu += ((FSCALE - ccpu) *
    391 			(p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
    392 #endif
    393 		p->p_cpticks = 0;
    394 		mutex_exit(&p->p_stmutex);
    395 		p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
    396 
    397 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    398 			lwp_lock(l);
    399 			if (l->l_slptime <= 1)
    400 				resetpriority(l);
    401 			lwp_unlock(l);
    402 		}
    403 skip:
    404 		mutex_exit(&p->p_smutex);
    405 		if (sig) {
    406 			psignal(p, sig);
    407 		}
    408 	}
    409 	mutex_exit(&proclist_mutex);
    410 	uvm_meter();
    411 	wakeup((caddr_t)&lbolt);
    412 	callout_schedule(&schedcpu_ch, hz);
    413 }
    414 
    415 /*
    416  * Recalculate the priority of a process after it has slept for a while.
    417  */
    418 void
    419 updatepri(struct lwp *l)
    420 {
    421 	struct proc *p = l->l_proc;
    422 	fixpt_t loadfac;
    423 
    424 	LOCK_ASSERT(lwp_locked(l, NULL));
    425 	KASSERT(l->l_slptime > 1);
    426 
    427 	loadfac = loadfactor(averunnable.ldavg[0]);
    428 
    429 	l->l_slptime--; /* the first time was done in schedcpu */
    430 	/* XXX NJWLWP */
    431 	/* XXXSMP occasionaly unlocked. */
    432 	p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
    433 	resetpriority(l);
    434 }
    435 
    436 /*
    437  * During autoconfiguration or after a panic, a sleep will simply lower the
    438  * priority briefly to allow interrupts, then return.  The priority to be
    439  * used (safepri) is machine-dependent, thus this value is initialized and
    440  * maintained in the machine-dependent layers.  This priority will typically
    441  * be 0, or the lowest priority that is safe for use on the interrupt stack;
    442  * it can be made higher to block network software interrupts after panics.
    443  */
    444 int	safepri;
    445 
    446 /*
    447  * ltsleep: see mtsleep() for comments.
    448  */
    449 int
    450 ltsleep(wchan_t ident, int priority, const char *wmesg, int timo,
    451 	volatile struct simplelock *interlock)
    452 {
    453 	struct lwp *l = curlwp;
    454 	sleepq_t *sq;
    455 	int error, catch;
    456 
    457 	if (sleepq_dontsleep(l)) {
    458 		(void)sleepq_abort(NULL, 0);
    459 		if ((priority & PNORELOCK) != 0)
    460 			simple_unlock(interlock);
    461 		return 0;
    462 	}
    463 
    464 	sq = sleeptab_lookup(&sleeptab, ident);
    465 	sleepq_enter(sq, l);
    466 
    467 	if (interlock != NULL) {
    468 		LOCK_ASSERT(simple_lock_held(interlock));
    469 		simple_unlock(interlock);
    470 	}
    471 
    472 	catch = priority & PCATCH;
    473 	sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
    474 	    &sleep_syncobj);
    475 	error = sleepq_unblock(timo, catch);
    476 
    477 	if (interlock != NULL && (priority & PNORELOCK) == 0)
    478 		simple_lock(interlock);
    479 
    480 	return error;
    481 }
    482 
    483 /*
    484  * General sleep call.  Suspends the current process until a wakeup is
    485  * performed on the specified identifier.  The process will then be made
    486  * runnable with the specified priority.  Sleeps at most timo/hz seconds (0
    487  * means no timeout).  If pri includes PCATCH flag, signals are checked
    488  * before and after sleeping, else signals are not checked.  Returns 0 if
    489  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
    490  * signal needs to be delivered, ERESTART is returned if the current system
    491  * call should be restarted if possible, and EINTR is returned if the system
    492  * call should be interrupted by the signal (return EINTR).
    493  *
    494  * The interlock is held until we are on a sleep queue. The interlock will
    495  * be locked before returning back to the caller unless the PNORELOCK flag
    496  * is specified, in which case the interlock will always be unlocked upon
    497  * return.
    498  */
    499 int
    500 mtsleep(wchan_t ident, int priority, const char *wmesg, int timo,
    501 	kmutex_t *mtx)
    502 {
    503 	struct lwp *l = curlwp;
    504 	sleepq_t *sq;
    505 	int error, catch;
    506 
    507 	if (sleepq_dontsleep(l))
    508 		return sleepq_abort(mtx, priority & PNORELOCK);
    509 
    510 	sq = sleeptab_lookup(&sleeptab, ident);
    511 	sleepq_enter(sq, l);
    512 
    513 	if (mtx != NULL) {
    514 		LOCK_ASSERT(mutex_owned(mtx));
    515 		mutex_exit(mtx);
    516 	}
    517 
    518 	catch = priority & PCATCH;
    519 	sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
    520 	    &sleep_syncobj);
    521 	error = sleepq_unblock(timo, catch);
    522 
    523 	if (mtx != NULL && (priority & PNORELOCK) == 0)
    524 		mutex_enter(mtx);
    525 
    526 	return error;
    527 }
    528 
    529 /*
    530  * sched_pause:
    531  *
    532  *	General sleep call for situations where a wake-up is not expected.
    533  */
    534 int
    535 sched_pause(const char *wmesg, boolean_t intr, int timo)
    536 {
    537 	struct lwp *l = curlwp;
    538 	sleepq_t *sq;
    539 
    540 	if (sleepq_dontsleep(l))
    541 		return sleepq_abort(NULL, 0);
    542 
    543 	sq = sleeptab_lookup(&sleeptab, l);
    544 	sleepq_enter(sq, l);
    545 	sleepq_block(sq, sched_kpri(l), l, wmesg, timo, intr, &sleep_syncobj);
    546 	return sleepq_unblock(timo, intr);
    547 }
    548 
    549 void
    550 sa_awaken(struct lwp *l)
    551 {
    552 
    553 	LOCK_ASSERT(lwp_locked(l, NULL));
    554 
    555 	if (l == l->l_savp->savp_lwp && l->l_flag & L_SA_YIELD)
    556 		l->l_flag &= ~L_SA_IDLE;
    557 }
    558 
    559 /*
    560  * Make all processes sleeping on the specified identifier runnable.
    561  */
    562 void
    563 wakeup(wchan_t ident)
    564 {
    565 	sleepq_t *sq;
    566 
    567 	if (cold)
    568 		return;
    569 
    570 	sq = sleeptab_lookup(&sleeptab, ident);
    571 	sleepq_wake(sq, ident, (u_int)-1);
    572 }
    573 
    574 /*
    575  * Make the highest priority process first in line on the specified
    576  * identifier runnable.
    577  */
    578 void
    579 wakeup_one(wchan_t ident)
    580 {
    581 	sleepq_t *sq;
    582 
    583 	if (cold)
    584 		return;
    585 
    586 	sq = sleeptab_lookup(&sleeptab, ident);
    587 	sleepq_wake(sq, ident, 1);
    588 }
    589 
    590 
    591 /*
    592  * General yield call.  Puts the current process back on its run queue and
    593  * performs a voluntary context switch.  Should only be called when the
    594  * current process explicitly requests it (eg sched_yield(2) in compat code).
    595  */
    596 void
    597 yield(void)
    598 {
    599 	struct lwp *l = curlwp;
    600 
    601 	lwp_lock(l);
    602 	if (l->l_stat == LSONPROC) {
    603 		KASSERT(lwp_locked(l, &sched_mutex));
    604 		l->l_priority = l->l_usrpri;
    605 	}
    606 	l->l_nvcsw++;
    607 	mi_switch(l, NULL);
    608 }
    609 
    610 /*
    611  * General preemption call.  Puts the current process back on its run queue
    612  * and performs an involuntary context switch.
    613  * The 'more' ("more work to do") argument is boolean. Returning to userspace
    614  * preempt() calls pass 0. "Voluntary" preemptions in e.g. uiomove() pass 1.
    615  * This will be used to indicate to the SA subsystem that the LWP is
    616  * not yet finished in the kernel.
    617  */
    618 void
    619 preempt(int more)
    620 {
    621 	struct lwp *l = curlwp;
    622 	int r;
    623 
    624 	lwp_lock(l);
    625 	if (l->l_stat == LSONPROC) {
    626 		KASSERT(lwp_locked(l, &sched_mutex));
    627 		l->l_priority = l->l_usrpri;
    628 	}
    629 	l->l_nivcsw++;
    630 	r = mi_switch(l, NULL);
    631 
    632 	if ((l->l_flag & L_SA) != 0 && r != 0 && more == 0)
    633 		sa_preempt(l);
    634 }
    635 
    636 /*
    637  * The machine independent parts of context switch.  Switch to "new"
    638  * if non-NULL, otherwise let cpu_switch choose the next lwp.
    639  *
    640  * Returns 1 if another process was actually run.
    641  */
    642 int
    643 mi_switch(struct lwp *l, struct lwp *newl)
    644 {
    645 	struct schedstate_percpu *spc;
    646 	struct timeval tv;
    647 #ifdef MULTIPROCESSOR
    648 	int hold_count;
    649 #endif
    650 	int retval, oldspl;
    651 	long s, u;
    652 #if PERFCTRS
    653 	struct proc *p = l->l_proc;
    654 #endif
    655 
    656 	LOCK_ASSERT(lwp_locked(l, NULL));
    657 
    658 	/*
    659 	 * Release the kernel_lock, as we are about to yield the CPU.
    660 	 */
    661 	KERNEL_UNLOCK_ALL(l, &hold_count);
    662 
    663 #ifdef LOCKDEBUG
    664 	spinlock_switchcheck();
    665 	simple_lock_switchcheck();
    666 #endif
    667 #ifdef KSTACK_CHECK_MAGIC
    668 	kstack_check_magic(l);
    669 #endif
    670 
    671 	/*
    672 	 * It's safe to read the per CPU schedstate unlocked here, as all we
    673 	 * are after is the run time and that's guarenteed to have been last
    674 	 * updated by this CPU.
    675 	 */
    676 	KDASSERT(l->l_cpu == curcpu());
    677 	spc = &l->l_cpu->ci_schedstate;
    678 
    679 	/*
    680 	 * Compute the amount of time during which the current
    681 	 * process was running.
    682 	 */
    683 	microtime(&tv);
    684 	u = l->l_rtime.tv_usec +
    685 	    (tv.tv_usec - spc->spc_runtime.tv_usec);
    686 	s = l->l_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
    687 	if (u < 0) {
    688 		u += 1000000;
    689 		s--;
    690 	} else if (u >= 1000000) {
    691 		u -= 1000000;
    692 		s++;
    693 	}
    694 	l->l_rtime.tv_usec = u;
    695 	l->l_rtime.tv_sec = s;
    696 
    697 	/*
    698 	 * XXXSMP If we are using h/w performance counters, save context.
    699 	 */
    700 #if PERFCTRS
    701 	if (PMC_ENABLED(p)) {
    702 		pmc_save_context(p);
    703 	}
    704 #endif
    705 
    706 	/*
    707 	 * Acquire the sched_mutex if necessary.  It will be released by
    708 	 * cpu_switch once it has decided to idle, or picked another LWP
    709 	 * to run.
    710 	 */
    711 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    712 	if (l->l_mutex != &sched_mutex) {
    713 		mutex_enter(&sched_mutex);
    714 		lwp_unlock(l);
    715 	}
    716 #endif
    717 
    718 	/*
    719 	 * If on the CPU and we have gotten this far, then we must yield.
    720 	 */
    721 	KASSERT(l->l_stat != LSRUN);
    722 	if (l->l_stat == LSONPROC) {
    723 		KASSERT(lwp_locked(l, &sched_mutex));
    724 		l->l_stat = LSRUN;
    725 		setrunqueue(l);
    726 	}
    727 	uvmexp.swtch++;
    728 
    729 	/*
    730 	 * Process is about to yield the CPU; clear the appropriate
    731 	 * scheduling flags.
    732 	 */
    733 	spc->spc_flags &= ~SPCF_SWITCHCLEAR;
    734 
    735 	LOCKDEBUG_BARRIER(&sched_mutex, 1);
    736 
    737 	/*
    738 	 * Switch to the new current LWP.  When we run again, we'll
    739 	 * return back here.
    740 	 */
    741 	oldspl = MUTEX_SPIN_OLDSPL(l->l_cpu);
    742 
    743 	if (newl == NULL || newl->l_back == NULL)
    744 		retval = cpu_switch(l, NULL);
    745 	else {
    746 		KASSERT(lwp_locked(newl, &sched_mutex));
    747 		remrunqueue(newl);
    748 		cpu_switchto(l, newl);
    749 		retval = 0;
    750 	}
    751 
    752 	/*
    753 	 * XXXSMP If we are using h/w performance counters, restore context.
    754 	 */
    755 #if PERFCTRS
    756 	if (PMC_ENABLED(p)) {
    757 		pmc_restore_context(p);
    758 	}
    759 #endif
    760 
    761 	/*
    762 	 * We're running again; record our new start time.  We might
    763 	 * be running on a new CPU now, so don't use the cached
    764 	 * schedstate_percpu pointer.
    765 	 */
    766 	KDASSERT(l->l_cpu == curcpu());
    767 	microtime(&l->l_cpu->ci_schedstate.spc_runtime);
    768 
    769 	/*
    770 	 * Reacquire the kernel_lock.
    771 	 */
    772 	splx(oldspl);
    773 	KERNEL_LOCK(hold_count, l);
    774 
    775 	return retval;
    776 }
    777 
    778 /*
    779  * Initialize the (doubly-linked) run queues
    780  * to be empty.
    781  */
    782 void
    783 rqinit()
    784 {
    785 	int i;
    786 
    787 	for (i = 0; i < RUNQUE_NQS; i++)
    788 		sched_qs[i].ph_link = sched_qs[i].ph_rlink =
    789 		    (struct lwp *)&sched_qs[i];
    790 
    791 	mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
    792 }
    793 
    794 static inline void
    795 resched_lwp(struct lwp *l, u_char pri)
    796 {
    797 	struct cpu_info *ci;
    798 
    799 	/*
    800 	 * XXXSMP
    801 	 * Since l->l_cpu persists across a context switch,
    802 	 * this gives us *very weak* processor affinity, in
    803 	 * that we notify the CPU on which the process last
    804 	 * ran that it should try to switch.
    805 	 *
    806 	 * This does not guarantee that the process will run on
    807 	 * that processor next, because another processor might
    808 	 * grab it the next time it performs a context switch.
    809 	 *
    810 	 * This also does not handle the case where its last
    811 	 * CPU is running a higher-priority process, but every
    812 	 * other CPU is running a lower-priority process.  There
    813 	 * are ways to handle this situation, but they're not
    814 	 * currently very pretty, and we also need to weigh the
    815 	 * cost of moving a process from one CPU to another.
    816 	 *
    817 	 * XXXSMP
    818 	 * There is also the issue of locking the other CPU's
    819 	 * sched state, which we currently do not do.
    820 	 */
    821 	ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
    822 	if (pri < ci->ci_schedstate.spc_curpriority)
    823 		cpu_need_resched(ci);
    824 }
    825 
    826 /*
    827  * Change process state to be runnable, placing it on the run queue if it is
    828  * in memory, and awakening the swapper if it isn't in memory.
    829  *
    830  * Call with the process and LWP locked.  Will return with the LWP unlocked.
    831  */
    832 void
    833 setrunnable(struct lwp *l)
    834 {
    835 	struct proc *p = l->l_proc;
    836 
    837 	LOCK_ASSERT(mutex_owned(&p->p_smutex));
    838 	LOCK_ASSERT(lwp_locked(l, NULL));
    839 
    840 	switch (l->l_stat) {
    841 	case LSSTOP:
    842 		/*
    843 		 * If we're being traced (possibly because someone attached us
    844 		 * while we were stopped), check for a signal from the debugger.
    845 		 */
    846 		if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
    847 			sigaddset(&l->l_sigpend.sp_set, p->p_xstat);
    848 			signotify(l);
    849 		}
    850 		p->p_nrlwps++;
    851 		break;
    852 	case LSSUSPENDED:
    853 		l->l_flag &= ~L_WSUSPEND;
    854 		p->p_nrlwps++;
    855 		break;
    856 	case LSSLEEP:
    857 		KASSERT(l->l_wchan != NULL);
    858 		break;
    859 	default:
    860 		panic("setrunnable: lwp %p state was %d", l, l->l_stat);
    861 	}
    862 
    863 	/*
    864 	 * If the LWP was sleeping interruptably, then it's OK to start it
    865 	 * again.  If not, mark it as still sleeping.
    866 	 */
    867 	if (l->l_wchan != NULL) {
    868 		l->l_stat = LSSLEEP;
    869 		if ((l->l_flag & L_SINTR) != 0)
    870 			lwp_unsleep(l);
    871 		else {
    872 			lwp_unlock(l);
    873 #ifdef DIAGNOSTIC
    874 			panic("setrunnable: !L_SINTR");
    875 #endif
    876 		}
    877 		return;
    878 	}
    879 
    880 	LOCK_ASSERT(lwp_locked(l, &sched_mutex));
    881 
    882 	if (l->l_proc->p_sa)
    883 		sa_awaken(l);
    884 
    885 	/*
    886 	 * If the LWP is still on the CPU, mark it as LSONPROC.  It may be
    887 	 * about to call mi_switch(), in which case it will yield.
    888 	 *
    889 	 * XXXSMP Will need to change for preemption.
    890 	 */
    891 #ifdef MULTIPROCESSOR
    892 	if (l->l_cpu->ci_curlwp == l) {
    893 #else
    894 	if (l == curlwp) {
    895 #endif
    896 		l->l_stat = LSONPROC;
    897 		l->l_slptime = 0;
    898 		lwp_unlock(l);
    899 		return;
    900 	}
    901 
    902 	/*
    903 	 * Set the LWP runnable.  If it's swapped out, we need to wake the swapper
    904 	 * to bring it back in.  Otherwise, enter it into a run queue.
    905 	 */
    906 	if (l->l_slptime > 1)
    907 		updatepri(l);
    908 	l->l_stat = LSRUN;
    909 	l->l_slptime = 0;
    910 
    911 	if (l->l_flag & L_INMEM) {
    912 		setrunqueue(l);
    913 		resched_lwp(l, l->l_priority);
    914 		lwp_unlock(l);
    915 	} else {
    916 		lwp_unlock(l);
    917 		wakeup(&proc0);
    918 	}
    919 }
    920 
    921 /*
    922  * Compute the priority of a process when running in user mode.
    923  * Arrange to reschedule if the resulting priority is better
    924  * than that of the current process.
    925  */
    926 void
    927 resetpriority(struct lwp *l)
    928 {
    929 	unsigned int newpriority;
    930 	struct proc *p = l->l_proc;
    931 
    932 	LOCK_ASSERT(lwp_locked(l, NULL));
    933 
    934 	newpriority = PUSER + (p->p_estcpu >> ESTCPU_SHIFT) +
    935 			NICE_WEIGHT * (p->p_nice - NZERO);
    936 	newpriority = min(newpriority, MAXPRI);
    937 	l->l_usrpri = newpriority;
    938 	lwp_changepri(l, l->l_usrpri);
    939 }
    940 
    941 /*
    942  * Recompute priority for all LWPs in a process.
    943  */
    944 void
    945 resetprocpriority(struct proc *p)
    946 {
    947 	struct lwp *l;
    948 
    949 	LOCK_ASSERT(mutex_owned(&p->p_smutex));
    950 
    951 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    952 		lwp_lock(l);
    953 		resetpriority(l);
    954 		lwp_unlock(l);
    955 	}
    956 }
    957 
    958 /*
    959  * We adjust the priority of the current process.  The priority of a process
    960  * gets worse as it accumulates CPU time.  The CPU usage estimator (p_estcpu)
    961  * is increased here.  The formula for computing priorities (in kern_synch.c)
    962  * will compute a different value each time p_estcpu increases. This can
    963  * cause a switch, but unless the priority crosses a PPQ boundary the actual
    964  * queue will not change.  The CPU usage estimator ramps up quite quickly
    965  * when the process is running (linearly), and decays away exponentially, at
    966  * a rate which is proportionally slower when the system is busy.  The basic
    967  * principle is that the system will 90% forget that the process used a lot
    968  * of CPU time in 5 * loadav seconds.  This causes the system to favor
    969  * processes which haven't run much recently, and to round-robin among other
    970  * processes.
    971  */
    972 
    973 void
    974 schedclock(struct lwp *l)
    975 {
    976 	struct proc *p = l->l_proc;
    977 
    978 	mutex_enter(&p->p_smutex);
    979 	p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
    980 	lwp_lock(l);
    981 	resetpriority(l);
    982 	mutex_exit(&p->p_smutex);
    983 	if (l->l_priority >= PUSER)
    984 		l->l_priority = l->l_usrpri;
    985 	lwp_unlock(l);
    986 }
    987 
    988 /*
    989  * suspendsched:
    990  *
    991  *	Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
    992  */
    993 void
    994 suspendsched(void)
    995 {
    996 #ifdef MULTIPROCESSOR
    997 	CPU_INFO_ITERATOR cii;
    998 	struct cpu_info *ci;
    999 #endif
   1000 	struct lwp *l;
   1001 	struct proc *p;
   1002 
   1003 	/*
   1004 	 * We do this by process in order not to violate the locking rules.
   1005 	 */
   1006 	mutex_enter(&proclist_mutex);
   1007 	PROCLIST_FOREACH(p, &allproc) {
   1008 		mutex_enter(&p->p_smutex);
   1009 
   1010 		if ((p->p_flag & P_SYSTEM) != 0) {
   1011 			mutex_exit(&p->p_smutex);
   1012 			continue;
   1013 		}
   1014 
   1015 		p->p_stat = SSTOP;
   1016 
   1017 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1018 			if (l == curlwp)
   1019 				continue;
   1020 
   1021 			lwp_lock(l);
   1022 
   1023 			/*
   1024 			 * Set L_WREBOOT so that the LWP will suspend itself
   1025 			 * when it tries to return to user mode.  We want to
   1026 			 * try and get to get as many LWPs as possible to
   1027 			 * the user / kernel boundary, so that they will
   1028 			 * release any locks that they hold.
   1029 			 */
   1030 			l->l_flag |= (L_WREBOOT | L_WSUSPEND);
   1031 
   1032 			if (l->l_stat == LSSLEEP &&
   1033 			    (l->l_flag & L_SINTR) != 0) {
   1034 				/* setrunnable() will release the lock. */
   1035 				setrunnable(l);
   1036 				continue;
   1037 			}
   1038 
   1039 			lwp_unlock(l);
   1040 		}
   1041 
   1042 		mutex_exit(&p->p_smutex);
   1043 	}
   1044 	mutex_exit(&proclist_mutex);
   1045 
   1046 	/*
   1047 	 * Kick all CPUs to make them preempt any LWPs running in user mode.
   1048 	 * They'll trap into the kernel and suspend themselves in userret().
   1049 	 */
   1050 	sched_lock(0);
   1051 #ifdef MULTIPROCESSOR
   1052 	for (CPU_INFO_FOREACH(cii, ci))
   1053 		cpu_need_resched(ci);
   1054 #else
   1055 	cpu_need_resched(curcpu());
   1056 #endif
   1057 	sched_unlock(0);
   1058 }
   1059 
   1060 /*
   1061  * scheduler_fork_hook:
   1062  *
   1063  *	Inherit the parent's scheduler history.
   1064  */
   1065 void
   1066 scheduler_fork_hook(struct proc *parent, struct proc *child)
   1067 {
   1068 
   1069 	LOCK_ASSERT(mutex_owned(&parent->p_smutex));
   1070 
   1071 	child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
   1072 	child->p_forktime = schedcpu_ticks;
   1073 }
   1074 
   1075 /*
   1076  * scheduler_wait_hook:
   1077  *
   1078  *	Chargeback parents for the sins of their children.
   1079  */
   1080 void
   1081 scheduler_wait_hook(struct proc *parent, struct proc *child)
   1082 {
   1083 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
   1084 	fixpt_t estcpu;
   1085 
   1086 	/* XXX Only if parent != init?? */
   1087 
   1088 	mutex_enter(&parent->p_smutex);
   1089 	estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
   1090 	    schedcpu_ticks - child->p_forktime);
   1091 	if (child->p_estcpu > estcpu)
   1092 		parent->p_estcpu =
   1093 		    ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
   1094 	mutex_exit(&parent->p_smutex);
   1095 }
   1096 
   1097 /*
   1098  * sched_kpri:
   1099  *
   1100  *	Given an LWP a priority boost before it sleeps.  Currently we scale
   1101  *	user priorites into the range 60 -> 40, and kernel priorities into
   1102  *	40 -> 0.
   1103  */
   1104 int
   1105 sched_kpri(struct lwp *l)
   1106 {
   1107 	static const uint8_t kpri_tab[] = {
   1108 		 0,   0,   1,   2,   3,   4,   4,   5,
   1109 		 6,   7,   8,   8,   9,  10,  11,  12,
   1110 		12,  13,  14,  15,  16,  16,  17,  18,
   1111 		19,  20,  20,  21,  22,  23,  24,  24,
   1112 		25,  26,  27,  28,  28,  29,  30,  31,
   1113 		32,  32,  33,  34,  35,  36,  36,  37,
   1114 		38,  39,  40,  40,  40,  40,  41,  41,
   1115 		41,  41,  42,  42,  42,  42,  43,  43,
   1116 		43,  43,  44,  44,  44,  44,  45,  45,
   1117 		45,  45,  46,  46,  46,  47,  47,  47,
   1118 		47,  48,  48,  48,  48,  49,  49,  49,
   1119 		49,  50,  50,  50,  50,  51,  51,  51,
   1120 		51,  52,  52,  52,  52,  53,  53,  53,
   1121 		54,  54,  54,  54,  55,  55,  55,  55,
   1122 		56,  56,  56,  56,  57,  57,  57,  57,
   1123 		58,  58,  58,  58,  59,  59,  59,  60,
   1124 	};
   1125 
   1126 	return kpri_tab[l->l_priority];
   1127 }
   1128 
   1129 /*
   1130  * sched_unsleep:
   1131  *
   1132  *	The is called when the LWP has not been awoken normally but instead
   1133  *	interrupted: for example, if the sleep timed out.  Because of this,
   1134  *	it's not a valid action for running or idle LWPs.
   1135  */
   1136 void
   1137 sched_unsleep(struct lwp *l)
   1138 {
   1139 
   1140 	lwp_unlock(l);
   1141 	panic("sched_unsleep");
   1142 }
   1143 
   1144 /*
   1145  * sched_changepri:
   1146  *
   1147  *	Adjust the priority of an LWP.
   1148  */
   1149 void
   1150 sched_changepri(struct lwp *l, int pri)
   1151 {
   1152 
   1153 	LOCK_ASSERT(lwp_locked(l, &sched_mutex));
   1154 
   1155 	if (l->l_stat != LSRUN || (l->l_flag & L_INMEM) == 0 ||
   1156 	    (l->l_priority / PPQ) == (l->l_usrpri / PPQ)) {
   1157 		l->l_priority = pri;
   1158 		return;
   1159 	}
   1160 
   1161 	remrunqueue(l);
   1162 	l->l_priority = pri;
   1163 	setrunqueue(l);
   1164 	resched_lwp(l, pri);
   1165 }
   1166 
   1167 /*
   1168  * Low-level routines to access the run queue.  Optimised assembler
   1169  * routines can override these.
   1170  */
   1171 
   1172 #ifndef __HAVE_MD_RUNQUEUE
   1173 
   1174 /*
   1175  * On some architectures, it's faster to use a MSB ordering for the priorites
   1176  * than the traditional LSB ordering.
   1177  */
   1178 #ifdef __HAVE_BIGENDIAN_BITOPS
   1179 #define	RQMASK(n) (0x80000000 >> (n))
   1180 #else
   1181 #define	RQMASK(n) (0x00000001 << (n))
   1182 #endif
   1183 
   1184 /*
   1185  * The primitives that manipulate the run queues.  whichqs tells which
   1186  * of the 32 queues qs have processes in them.  Setrunqueue puts processes
   1187  * into queues, remrunqueue removes them from queues.  The running process is
   1188  * on no queue, other processes are on a queue related to p->p_priority,
   1189  * divided by 4 actually to shrink the 0-127 range of priorities into the 32
   1190  * available queues.
   1191  */
   1192 #ifdef RQDEBUG
   1193 static void
   1194 checkrunqueue(int whichq, struct lwp *l)
   1195 {
   1196 	const struct prochd * const rq = &sched_qs[whichq];
   1197 	struct lwp *l2;
   1198 	int found = 0;
   1199 	int die = 0;
   1200 	int empty = 1;
   1201 	for (l2 = rq->ph_link; l2 != (const void*) rq; l2 = l2->l_forw) {
   1202 		if (l2->l_stat != LSRUN) {
   1203 			printf("checkrunqueue[%d]: lwp %p state (%d) "
   1204 			    " != LSRUN\n", whichq, l2, l2->l_stat);
   1205 		}
   1206 		if (l2->l_back->l_forw != l2) {
   1207 			printf("checkrunqueue[%d]: lwp %p back-qptr (%p) "
   1208 			    "corrupt %p\n", whichq, l2, l2->l_back,
   1209 			    l2->l_back->l_forw);
   1210 			die = 1;
   1211 		}
   1212 		if (l2->l_forw->l_back != l2) {
   1213 			printf("checkrunqueue[%d]: lwp %p forw-qptr (%p) "
   1214 			    "corrupt %p\n", whichq, l2, l2->l_forw,
   1215 			    l2->l_forw->l_back);
   1216 			die = 1;
   1217 		}
   1218 		if (l2 == l)
   1219 			found = 1;
   1220 		empty = 0;
   1221 	}
   1222 	if (empty && (sched_whichqs & RQMASK(whichq)) != 0) {
   1223 		printf("checkrunqueue[%d]: bit set for empty run-queue %p\n",
   1224 		    whichq, rq);
   1225 		die = 1;
   1226 	} else if (!empty && (sched_whichqs & RQMASK(whichq)) == 0) {
   1227 		printf("checkrunqueue[%d]: bit clear for non-empty "
   1228 		    "run-queue %p\n", whichq, rq);
   1229 		die = 1;
   1230 	}
   1231 	if (l != NULL && (sched_whichqs & RQMASK(whichq)) == 0) {
   1232 		printf("checkrunqueue[%d]: bit clear for active lwp %p\n",
   1233 		    whichq, l);
   1234 		die = 1;
   1235 	}
   1236 	if (l != NULL && empty) {
   1237 		printf("checkrunqueue[%d]: empty run-queue %p with "
   1238 		    "active lwp %p\n", whichq, rq, l);
   1239 		die = 1;
   1240 	}
   1241 	if (l != NULL && !found) {
   1242 		printf("checkrunqueue[%d]: lwp %p not in runqueue %p!",
   1243 		    whichq, l, rq);
   1244 		die = 1;
   1245 	}
   1246 	if (die)
   1247 		panic("checkrunqueue: inconsistency found");
   1248 }
   1249 #endif /* RQDEBUG */
   1250 
   1251 void
   1252 setrunqueue(struct lwp *l)
   1253 {
   1254 	struct prochd *rq;
   1255 	struct lwp *prev;
   1256 	const int whichq = l->l_priority / PPQ;
   1257 
   1258 	LOCK_ASSERT(lwp_locked(l, &sched_mutex));
   1259 
   1260 #ifdef RQDEBUG
   1261 	checkrunqueue(whichq, NULL);
   1262 #endif
   1263 #ifdef DIAGNOSTIC
   1264 	if (l->l_back != NULL || l->l_stat != LSRUN)
   1265 		panic("setrunqueue");
   1266 #endif
   1267 	sched_whichqs |= RQMASK(whichq);
   1268 	rq = &sched_qs[whichq];
   1269 	prev = rq->ph_rlink;
   1270 	l->l_forw = (struct lwp *)rq;
   1271 	rq->ph_rlink = l;
   1272 	prev->l_forw = l;
   1273 	l->l_back = prev;
   1274 #ifdef RQDEBUG
   1275 	checkrunqueue(whichq, l);
   1276 #endif
   1277 }
   1278 
   1279 void
   1280 remrunqueue(struct lwp *l)
   1281 {
   1282 	struct lwp *prev, *next;
   1283 	const int whichq = l->l_priority / PPQ;
   1284 
   1285 	LOCK_ASSERT(lwp_locked(l, &sched_mutex));
   1286 
   1287 #ifdef RQDEBUG
   1288 	checkrunqueue(whichq, l);
   1289 #endif
   1290 
   1291 #if defined(DIAGNOSTIC)
   1292 	if (((sched_whichqs & RQMASK(whichq)) == 0) || l->l_back == NULL) {
   1293 		/* Shouldn't happen - interrupts disabled. */
   1294 		panic("remrunqueue: bit %d not set", whichq);
   1295 	}
   1296 #endif
   1297 	prev = l->l_back;
   1298 	l->l_back = NULL;
   1299 	next = l->l_forw;
   1300 	prev->l_forw = next;
   1301 	next->l_back = prev;
   1302 	if (prev == next)
   1303 		sched_whichqs &= ~RQMASK(whichq);
   1304 #ifdef RQDEBUG
   1305 	checkrunqueue(whichq, NULL);
   1306 #endif
   1307 }
   1308 
   1309 #undef RQMASK
   1310 #endif /* !defined(__HAVE_MD_RUNQUEUE) */
   1311