Home | History | Annotate | Line # | Download | only in kern
sched_4bsd.c revision 1.1.2.1
      1 /*	$NetBSD: sched_4bsd.c,v 1.1.2.1 2007/02/20 21:48:46 rmind Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and
     10  * Daniel Sieger.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by the NetBSD
     23  *	Foundation, Inc. and its contributors.
     24  * 4. Neither the name of The NetBSD Foundation nor the names of its
     25  *    contributors may be used to endorse or promote products derived
     26  *    from this software without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     29  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     30  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     31  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     32  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     33  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     34  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     35  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     36  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     37  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     38  * POSSIBILITY OF SUCH DAMAGE.
     39  */
     40 
     41 /*-
     42  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     43  *	The Regents of the University of California.  All rights reserved.
     44  * (c) UNIX System Laboratories, Inc.
     45  * All or some portions of this file are derived from material licensed
     46  * to the University of California by American Telephone and Telegraph
     47  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     48  * the permission of UNIX System Laboratories, Inc.
     49  *
     50  * Redistribution and use in source and binary forms, with or without
     51  * modification, are permitted provided that the following conditions
     52  * are met:
     53  * 1. Redistributions of source code must retain the above copyright
     54  *    notice, this list of conditions and the following disclaimer.
     55  * 2. Redistributions in binary form must reproduce the above copyright
     56  *    notice, this list of conditions and the following disclaimer in the
     57  *    documentation and/or other materials provided with the distribution.
     58  * 3. Neither the name of the University nor the names of its contributors
     59  *    may be used to endorse or promote products derived from this software
     60  *    without specific prior written permission.
     61  *
     62  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     63  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     64  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     65  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     66  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     67  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     68  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     69  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     70  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     71  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     72  * SUCH DAMAGE.
     73  *
     74  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
     75  */
     76 
     77 #include <sys/cdefs.h>
     78 __KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.1.2.1 2007/02/20 21:48:46 rmind Exp $");
     79 
     80 #include "opt_ddb.h"
     81 #include "opt_kstack.h"
     82 #include "opt_lockdebug.h"
     83 #include "opt_multiprocessor.h"
     84 #include "opt_perfctrs.h"
     85 
     86 #define	__MUTEX_PRIVATE
     87 
     88 #include <sys/param.h>
     89 #include <sys/systm.h>
     90 #include <sys/callout.h>
     91 #include <sys/proc.h>
     92 #include <sys/kernel.h>
     93 #include <sys/buf.h>
     94 #if defined(PERFCTRS)
     95 #include <sys/pmc.h>
     96 #endif
     97 #include <sys/signalvar.h>
     98 #include <sys/resourcevar.h>
     99 #include <sys/sched.h>
    100 #include <sys/kauth.h>
    101 #include <sys/sleepq.h>
    102 #include <sys/lockdebug.h>
    103 
    104 #include <uvm/uvm_extern.h>
    105 
    106 #include <machine/cpu.h>
    107 
    108 /*
    109  * Run queues.
    110  *
    111  * We have 32 run queues in descending priority of 0..31.  We maintain
    112  * a bitmask of non-empty queues in order speed up finding the first
    113  * runnable process.  The bitmask is maintained only by machine-dependent
    114  * code, allowing the most efficient instructions to be used to find the
    115  * first non-empty queue.
    116  */
    117 
    118 
    119 #define	RUNQUE_NQS		32      /* number of runqueues */
    120 #define	PPQ	(128 / RUNQUE_NQS)	/* priorities per queue */
    121 
    122 struct prochd {
    123 	struct lwp *ph_link;
    124 	struct lwp *ph_rlink;
    125 };
    126 
    127 struct prochd sched_qs[RUNQUE_NQS];	/* run queues */
    128 volatile uint32_t sched_whichqs;	/* bitmap of non-empty queues */
    129 
    130 void schedcpu(void *);
    131 void updatepri(struct lwp *);
    132 void resetpriority (struct lwp *);
    133 void resetprocpriority(struct proc *);
    134 
    135 struct callout schedcpu_ch = CALLOUT_INITIALIZER_SETFUNC(schedcpu, NULL);
    136 static unsigned int schedcpu_ticks;
    137 
    138 int rrticks; /* number of hardclock ticks per sched_tick() */
    139 
    140 /*
    141  * Force switch among equal priority processes every 100ms.
    142  * Called from hardclock every hz/10 == rrticks hardclock ticks.
    143  */
    144 /* ARGSUSED */
    145 void
    146 sched_tick(struct cpu_info *ci)
    147 {
    148 	struct schedstate_percpu *spc = &ci->ci_schedstate;
    149 
    150 	spc->spc_ticks = rrticks;
    151 
    152 	if (!CURCPU_IDLE_P()) {
    153 		if (spc->spc_flags & SPCF_SEENRR) {
    154 			/*
    155 			 * The process has already been through a roundrobin
    156 			 * without switching and may be hogging the CPU.
    157 			 * Indicate that the process should yield.
    158 			 */
    159 			spc->spc_flags |= SPCF_SHOULDYIELD;
    160 		} else
    161 			spc->spc_flags |= SPCF_SEENRR;
    162 	}
    163 	cpu_need_resched(curcpu());
    164 }
    165 
    166 #define	NICE_WEIGHT 2			/* priorities per nice level */
    167 
    168 #define	ESTCPU_SHIFT	11
    169 #define	ESTCPU_MAX	((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
    170 #define	ESTCPULIM(e)	min((e), ESTCPU_MAX)
    171 
    172 /*
    173  * Constants for digital decay and forget:
    174  *	90% of (p_estcpu) usage in 5 * loadav time
    175  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
    176  *          Note that, as ps(1) mentions, this can let percentages
    177  *          total over 100% (I've seen 137.9% for 3 processes).
    178  *
    179  * Note that hardclock updates p_estcpu and p_cpticks independently.
    180  *
    181  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
    182  * That is, the system wants to compute a value of decay such
    183  * that the following for loop:
    184  * 	for (i = 0; i < (5 * loadavg); i++)
    185  * 		p_estcpu *= decay;
    186  * will compute
    187  * 	p_estcpu *= 0.1;
    188  * for all values of loadavg:
    189  *
    190  * Mathematically this loop can be expressed by saying:
    191  * 	decay ** (5 * loadavg) ~= .1
    192  *
    193  * The system computes decay as:
    194  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
    195  *
    196  * We wish to prove that the system's computation of decay
    197  * will always fulfill the equation:
    198  * 	decay ** (5 * loadavg) ~= .1
    199  *
    200  * If we compute b as:
    201  * 	b = 2 * loadavg
    202  * then
    203  * 	decay = b / (b + 1)
    204  *
    205  * We now need to prove two things:
    206  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
    207  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
    208  *
    209  * Facts:
    210  *         For x close to zero, exp(x) =~ 1 + x, since
    211  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
    212  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
    213  *         For x close to zero, ln(1+x) =~ x, since
    214  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
    215  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
    216  *         ln(.1) =~ -2.30
    217  *
    218  * Proof of (1):
    219  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
    220  *	solving for factor,
    221  *      ln(factor) =~ (-2.30/5*loadav), or
    222  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
    223  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
    224  *
    225  * Proof of (2):
    226  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
    227  *	solving for power,
    228  *      power*ln(b/(b+1)) =~ -2.30, or
    229  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
    230  *
    231  * Actual power values for the implemented algorithm are as follows:
    232  *      loadav: 1       2       3       4
    233  *      power:  5.68    10.32   14.94   19.55
    234  */
    235 
    236 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
    237 #define	loadfactor(loadav)	(2 * (loadav))
    238 
    239 static fixpt_t
    240 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
    241 {
    242 
    243 	if (estcpu == 0) {
    244 		return 0;
    245 	}
    246 
    247 #if !defined(_LP64)
    248 	/* avoid 64bit arithmetics. */
    249 #define	FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
    250 	if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
    251 		return estcpu * loadfac / (loadfac + FSCALE);
    252 	}
    253 #endif /* !defined(_LP64) */
    254 
    255 	return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
    256 }
    257 
    258 /*
    259  * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
    260  * sleeping for at least seven times the loadfactor will decay p_estcpu to
    261  * less than (1 << ESTCPU_SHIFT).
    262  *
    263  * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
    264  */
    265 static fixpt_t
    266 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
    267 {
    268 
    269 	if ((n << FSHIFT) >= 7 * loadfac) {
    270 		return 0;
    271 	}
    272 
    273 	while (estcpu != 0 && n > 1) {
    274 		estcpu = decay_cpu(loadfac, estcpu);
    275 		n--;
    276 	}
    277 
    278 	return estcpu;
    279 }
    280 
    281 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
    282 fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
    283 
    284 /*
    285  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
    286  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
    287  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
    288  *
    289  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
    290  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
    291  *
    292  * If you dont want to bother with the faster/more-accurate formula, you
    293  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
    294  * (more general) method of calculating the %age of CPU used by a process.
    295  */
    296 #define	CCPU_SHIFT	11
    297 
    298 /*
    299  * schedcpu:
    300  *
    301  *	Recompute process priorities, every hz ticks.
    302  *
    303  *	XXXSMP This needs to be reorganised in order to reduce the locking
    304  *	burden.
    305  */
    306 /* ARGSUSED */
    307 void
    308 schedcpu(void *arg)
    309 {
    310 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
    311 	struct rlimit *rlim;
    312 	struct lwp *l;
    313 	struct proc *p;
    314 	int minslp, clkhz, sig;
    315 	long runtm;
    316 
    317 	schedcpu_ticks++;
    318 
    319 	mutex_enter(&proclist_mutex);
    320 	PROCLIST_FOREACH(p, &allproc) {
    321 		/*
    322 		 * Increment time in/out of memory and sleep time (if
    323 		 * sleeping).  We ignore overflow; with 16-bit int's
    324 		 * (remember them?) overflow takes 45 days.
    325 		 */
    326 		minslp = 2;
    327 		mutex_enter(&p->p_smutex);
    328 		runtm = p->p_rtime.tv_sec;
    329 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    330 			if ((l->l_flag & L_IDLE) != 0)
    331 				continue;
    332 			lwp_lock(l);
    333 			runtm += l->l_rtime.tv_sec;
    334 			l->l_swtime++;
    335 			if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
    336 			    l->l_stat == LSSUSPENDED) {
    337 				l->l_slptime++;
    338 				minslp = min(minslp, l->l_slptime);
    339 			} else
    340 				minslp = 0;
    341 			lwp_unlock(l);
    342 		}
    343 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
    344 
    345 		/*
    346 		 * Check if the process exceeds its CPU resource allocation.
    347 		 * If over max, kill it.
    348 		 */
    349 		rlim = &p->p_rlimit[RLIMIT_CPU];
    350 		sig = 0;
    351 		if (runtm >= rlim->rlim_cur) {
    352 			if (runtm >= rlim->rlim_max)
    353 				sig = SIGKILL;
    354 			else {
    355 				sig = SIGXCPU;
    356 				if (rlim->rlim_cur < rlim->rlim_max)
    357 					rlim->rlim_cur += 5;
    358 			}
    359 		}
    360 
    361 		/*
    362 		 * If the process has run for more than autonicetime, reduce
    363 		 * priority to give others a chance.
    364 		 */
    365 		if (autonicetime && runtm > autonicetime && p->p_nice == NZERO
    366 		    && kauth_cred_geteuid(p->p_cred)) {
    367 			mutex_spin_enter(&p->p_stmutex);
    368 			p->p_nice = autoniceval + NZERO;
    369 			resetprocpriority(p);
    370 			mutex_spin_exit(&p->p_stmutex);
    371 		}
    372 
    373 		/*
    374 		 * If the process has slept the entire second,
    375 		 * stop recalculating its priority until it wakes up.
    376 		 */
    377 		if (minslp <= 1) {
    378 			/*
    379 			 * p_pctcpu is only for ps.
    380 			 */
    381 			mutex_spin_enter(&p->p_stmutex);
    382 			clkhz = stathz != 0 ? stathz : hz;
    383 #if	(FSHIFT >= CCPU_SHIFT)
    384 			p->p_pctcpu += (clkhz == 100)?
    385 			    ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
    386 			    100 * (((fixpt_t) p->p_cpticks)
    387 			    << (FSHIFT - CCPU_SHIFT)) / clkhz;
    388 #else
    389 			p->p_pctcpu += ((FSCALE - ccpu) *
    390 			    (p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
    391 #endif
    392 			p->p_cpticks = 0;
    393 			p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
    394 
    395 			LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    396 				if ((l->l_flag & L_IDLE) != 0)
    397 					continue;
    398 				lwp_lock(l);
    399 				if (l->l_slptime <= 1 &&
    400 				    l->l_priority >= PUSER)
    401 					resetpriority(l);
    402 				lwp_unlock(l);
    403 			}
    404 			mutex_spin_exit(&p->p_stmutex);
    405 		}
    406 
    407 		mutex_exit(&p->p_smutex);
    408 		if (sig) {
    409 			psignal(p, sig);
    410 		}
    411 	}
    412 	mutex_exit(&proclist_mutex);
    413 	uvm_meter();
    414 	wakeup((caddr_t)&lbolt);
    415 	callout_schedule(&schedcpu_ch, hz);
    416 }
    417 
    418 /*
    419  * Recalculate the priority of a process after it has slept for a while.
    420  */
    421 void
    422 updatepri(struct lwp *l)
    423 {
    424 	struct proc *p = l->l_proc;
    425 	fixpt_t loadfac;
    426 
    427 	LOCK_ASSERT(lwp_locked(l, NULL));
    428 	KASSERT(l->l_slptime > 1);
    429 
    430 	loadfac = loadfactor(averunnable.ldavg[0]);
    431 
    432 	l->l_slptime--; /* the first time was done in schedcpu */
    433 	/* XXX NJWLWP */
    434 	/* XXXSMP occasionally unlocked, should be per-LWP */
    435 	p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
    436 	resetpriority(l);
    437 }
    438 
    439 /*
    440  * Initialize the (doubly-linked) run queues
    441  * to be empty.
    442  */
    443 void
    444 sched_rqinit()
    445 {
    446 	int i;
    447 
    448 	for (i = 0; i < RUNQUE_NQS; i++)
    449 		sched_qs[i].ph_link = sched_qs[i].ph_rlink =
    450 		    (struct lwp *)&sched_qs[i];
    451 
    452 	mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
    453 }
    454 
    455 void
    456 sched_setup()
    457 {
    458 	rrticks = hz / 10;
    459 
    460 	schedcpu(NULL);
    461 }
    462 
    463 void
    464 sched_setrunnable(struct lwp *l)
    465 {
    466  	if (l->l_slptime > 1)
    467  		updatepri(l);
    468 }
    469 
    470 boolean_t
    471 sched_curcpu_runnable_p(void)
    472 {
    473 
    474 	return sched_whichqs != 0;
    475 }
    476 
    477 void
    478 sched_nice(struct proc *chgp, int n)
    479 {
    480 	chgp->p_nice = n;
    481 	(void)resetprocpriority(chgp);
    482 }
    483 
    484 /*
    485  * Compute the priority of a process when running in user mode.
    486  * Arrange to reschedule if the resulting priority is better
    487  * than that of the current process.
    488  */
    489 void
    490 resetpriority(struct lwp *l)
    491 {
    492 	unsigned int newpriority;
    493 	struct proc *p = l->l_proc;
    494 
    495 	/* XXXSMP LOCK_ASSERT(mutex_owned(&p->p_stmutex)); */
    496 	LOCK_ASSERT(lwp_locked(l, NULL));
    497 
    498 	if ((l->l_flag & L_SYSTEM) != 0)
    499 		return;
    500 
    501 	newpriority = PUSER + (p->p_estcpu >> ESTCPU_SHIFT) +
    502 	    NICE_WEIGHT * (p->p_nice - NZERO);
    503 	newpriority = min(newpriority, MAXPRI);
    504 	lwp_changepri(l, newpriority);
    505 }
    506 
    507 /*
    508  * Recompute priority for all LWPs in a process.
    509  */
    510 void
    511 resetprocpriority(struct proc *p)
    512 {
    513 	struct lwp *l;
    514 
    515 	LOCK_ASSERT(mutex_owned(&p->p_stmutex));
    516 
    517 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    518 		lwp_lock(l);
    519 		resetpriority(l);
    520 		lwp_unlock(l);
    521 	}
    522 }
    523 
    524 /*
    525  * We adjust the priority of the current process.  The priority of a process
    526  * gets worse as it accumulates CPU time.  The CPU usage estimator (p_estcpu)
    527  * is increased here.  The formula for computing priorities (in kern_synch.c)
    528  * will compute a different value each time p_estcpu increases. This can
    529  * cause a switch, but unless the priority crosses a PPQ boundary the actual
    530  * queue will not change.  The CPU usage estimator ramps up quite quickly
    531  * when the process is running (linearly), and decays away exponentially, at
    532  * a rate which is proportionally slower when the system is busy.  The basic
    533  * principle is that the system will 90% forget that the process used a lot
    534  * of CPU time in 5 * loadav seconds.  This causes the system to favor
    535  * processes which haven't run much recently, and to round-robin among other
    536  * processes.
    537  */
    538 
    539 void
    540 schedclock(struct lwp *l)
    541 {
    542 	struct proc *p = l->l_proc;
    543 
    544 	KASSERT(!CURCPU_IDLE_P());
    545 	mutex_spin_enter(&p->p_stmutex);
    546 	p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
    547 	lwp_lock(l);
    548 	resetpriority(l);
    549 	mutex_spin_exit(&p->p_stmutex);
    550 	if ((l->l_flag & L_SYSTEM) == 0 && l->l_priority >= PUSER)
    551 		l->l_priority = l->l_usrpri;
    552 	lwp_unlock(l);
    553 }
    554 
    555 /*
    556  * scheduler_fork_hook:
    557  *
    558  *	Inherit the parent's scheduler history.
    559  */
    560 void
    561 sched_proc_fork(struct proc *parent, struct proc *child)
    562 {
    563 
    564 	LOCK_ASSERT(mutex_owned(&parent->p_smutex));
    565 
    566 	child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
    567 	child->p_forktime = schedcpu_ticks;
    568 }
    569 
    570 /*
    571  * scheduler_wait_hook:
    572  *
    573  *	Chargeback parents for the sins of their children.
    574  */
    575 void
    576 sched_proc_exit(struct proc *parent, struct proc *child)
    577 {
    578 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
    579 	fixpt_t estcpu;
    580 
    581 	/* XXX Only if parent != init?? */
    582 
    583 	mutex_spin_enter(&parent->p_stmutex);
    584 	estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
    585 	    schedcpu_ticks - child->p_forktime);
    586 	if (child->p_estcpu > estcpu)
    587 		parent->p_estcpu =
    588 		    ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
    589 	mutex_spin_exit(&parent->p_stmutex);
    590 }
    591 
    592 /*
    593  * sched_changepri:
    594  *
    595  *	Adjust the priority of an LWP.
    596  */
    597 void
    598 sched_changepri(struct lwp *l, int pri)
    599 {
    600 
    601 	LOCK_ASSERT(lwp_locked(l, &sched_mutex));
    602 
    603 	l->l_usrpri = pri;
    604 
    605 	if (l->l_priority < PUSER)
    606 		return;
    607 	if (l->l_stat != LSRUN || (l->l_flag & L_INMEM) == 0 ||
    608 	    (l->l_priority / PPQ) == (pri / PPQ)) {
    609 		l->l_priority = pri;
    610 		return;
    611 	}
    612 
    613 	sched_dequeue(l);
    614 	l->l_priority = pri;
    615 	sched_enqueue(l);
    616 	resched_cpu(l, pri);
    617 }
    618 
    619 /*
    620  * On some architectures, it's faster to use a MSB ordering for the priorites
    621  * than the traditional LSB ordering.
    622  */
    623 #ifdef __HAVE_BIGENDIAN_BITOPS
    624 #define	RQMASK(n) (0x80000000 >> (n))
    625 #else
    626 #define	RQMASK(n) (0x00000001 << (n))
    627 #endif
    628 
    629 /*
    630  * Low-level routines to access the run queue.  Optimised assembler
    631  * routines can override these.
    632  */
    633 
    634 #ifndef __HAVE_MD_RUNQUEUE
    635 
    636 /*
    637  * The primitives that manipulate the run queues.  whichqs tells which
    638  * of the 32 queues qs have processes in them.  Setrunqueue puts processes
    639  * into queues, remrunqueue removes them from queues.  The running process is
    640  * on no queue, other processes are on a queue related to p->p_priority,
    641  * divided by 4 actually to shrink the 0-127 range of priorities into the 32
    642  * available queues.
    643  */
    644 #ifdef RQDEBUG
    645 static void
    646 checkrunqueue(int whichq, struct lwp *l)
    647 {
    648 	const struct prochd * const rq = &sched_qs[whichq];
    649 	struct lwp *l2;
    650 	int found = 0;
    651 	int die = 0;
    652 	int empty = 1;
    653 	for (l2 = rq->ph_link; l2 != (const void*) rq; l2 = l2->l_forw) {
    654 		if (l2->l_stat != LSRUN) {
    655 			printf("checkrunqueue[%d]: lwp %p state (%d) "
    656 			    " != LSRUN\n", whichq, l2, l2->l_stat);
    657 		}
    658 		if (l2->l_back->l_forw != l2) {
    659 			printf("checkrunqueue[%d]: lwp %p back-qptr (%p) "
    660 			    "corrupt %p\n", whichq, l2, l2->l_back,
    661 			    l2->l_back->l_forw);
    662 			die = 1;
    663 		}
    664 		if (l2->l_forw->l_back != l2) {
    665 			printf("checkrunqueue[%d]: lwp %p forw-qptr (%p) "
    666 			    "corrupt %p\n", whichq, l2, l2->l_forw,
    667 			    l2->l_forw->l_back);
    668 			die = 1;
    669 		}
    670 		if (l2 == l)
    671 			found = 1;
    672 		empty = 0;
    673 	}
    674 	if (empty && (sched_whichqs & RQMASK(whichq)) != 0) {
    675 		printf("checkrunqueue[%d]: bit set for empty run-queue %p\n",
    676 		    whichq, rq);
    677 		die = 1;
    678 	} else if (!empty && (sched_whichqs & RQMASK(whichq)) == 0) {
    679 		printf("checkrunqueue[%d]: bit clear for non-empty "
    680 		    "run-queue %p\n", whichq, rq);
    681 		die = 1;
    682 	}
    683 	if (l != NULL && (sched_whichqs & RQMASK(whichq)) == 0) {
    684 		printf("checkrunqueue[%d]: bit clear for active lwp %p\n",
    685 		    whichq, l);
    686 		die = 1;
    687 	}
    688 	if (l != NULL && empty) {
    689 		printf("checkrunqueue[%d]: empty run-queue %p with "
    690 		    "active lwp %p\n", whichq, rq, l);
    691 		die = 1;
    692 	}
    693 	if (l != NULL && !found) {
    694 		printf("checkrunqueue[%d]: lwp %p not in runqueue %p!",
    695 		    whichq, l, rq);
    696 		die = 1;
    697 	}
    698 	if (die)
    699 		panic("checkrunqueue: inconsistency found");
    700 }
    701 #endif /* RQDEBUG */
    702 
    703 void
    704 sched_enqueue(struct lwp *l)
    705 {
    706 	struct prochd*rq;
    707 	struct lwp *prev;
    708 	const int whichq = l->l_priority / PPQ;
    709 
    710 	LOCK_ASSERT(lwp_locked(l, &sched_mutex));
    711 
    712 #ifdef RQDEBUG
    713 	checkrunqueue(whichq, NULL);
    714 #endif
    715 #ifdef DIAGNOSTIC
    716 	if (l->l_back != NULL || l->l_stat != LSRUN)
    717 		panic("setrunqueue");
    718 #endif
    719 	sched_whichqs |= RQMASK(whichq);
    720 	rq = &sched_qs[whichq];
    721 	prev = rq->ph_rlink;
    722 	l->l_forw = (struct lwp *)rq;
    723 	rq->ph_rlink = l;
    724 	prev->l_forw = l;
    725 	l->l_back = prev;
    726 #ifdef RQDEBUG
    727 	checkrunqueue(whichq, l);
    728 #endif
    729 }
    730 
    731 /*
    732  * XXXSMP When LWP dispatch (cpu_switch()) is changed to use remrunqueue(),
    733  * drop of the effective priority level from kernel to user needs to be
    734  * moved here from userret().  The assignment in userret() is currently
    735  * done unlocked.
    736  */
    737 void
    738 sched_dequeue(struct lwp *l)
    739 {
    740 	struct lwp *prev, *next;
    741 	const int whichq = l->l_priority / PPQ;
    742 
    743 	LOCK_ASSERT(lwp_locked(l, &sched_mutex));
    744 
    745 #ifdef RQDEBUG
    746 	checkrunqueue(whichq, l);
    747 #endif
    748 
    749 #if defined(DIAGNOSTIC)
    750 	if (((sched_whichqs & RQMASK(whichq)) == 0) || l->l_back == NULL) {
    751 		/* Shouldn't happen - interrupts disabled. */
    752 		panic("remrunqueue: bit %d not set", whichq);
    753 	}
    754 #endif
    755 	prev = l->l_back;
    756 	l->l_back = NULL;
    757 	next = l->l_forw;
    758 	prev->l_forw = next;
    759 	next->l_back = prev;
    760 	if (prev == next)
    761 		sched_whichqs &= ~RQMASK(whichq);
    762 #ifdef RQDEBUG
    763 	checkrunqueue(whichq, NULL);
    764 #endif
    765 }
    766 
    767 struct lwp *
    768 sched_nextlwp(void)
    769 {
    770 	const struct prochd *rq;
    771 	struct lwp *l;
    772 	int whichq;
    773 
    774 	if (sched_whichqs == 0) {
    775 		return NULL;
    776 	}
    777 #ifdef __HAVE_BIGENDIAN_BITOPS
    778 	for (whichq = 0; ; whichq++) {
    779 		if ((sched_whichqs & RQMASK(whichq)) != 0) {
    780 			break;
    781 		}
    782 	}
    783 #else
    784 	whichq = ffs(sched_whichqs) - 1;
    785 #endif
    786 	rq = &sched_qs[whichq];
    787 	l = rq->ph_link;
    788 	return l;
    789 }
    790 
    791 #endif /* !defined(__HAVE_MD_RUNQUEUE) */
    792 
    793 #if defined(DDB)
    794 void
    795 sched_print_runqueue(void (*pr)(const char *, ...))
    796 {
    797 	struct prochd *ph;
    798 	struct lwp *l;
    799 	int i, first;
    800 
    801 	for (i = 0; i < RUNQUE_NQS; i++)
    802 	{
    803 		first = 1;
    804 		ph = &sched_qs[i];
    805 		for (l = ph->ph_link; l != (void *)ph; l = l->l_forw) {
    806 			if (first) {
    807 				(*pr)("%c%d",
    808 				    (sched_whichqs & RQMASK(i))
    809 				    ? ' ' : '!', i);
    810 				first = 0;
    811 			}
    812 			(*pr)("\t%d.%d (%s) pri=%d usrpri=%d\n",
    813 			    l->l_proc->p_pid,
    814 			    l->l_lid, l->l_proc->p_comm,
    815 			    (int)l->l_priority, (int)l->l_usrpri);
    816 		}
    817 	}
    818 }
    819 #endif /* defined(DDB) */
    820 #undef RQMASK
    821