Home | History | Annotate | Line # | Download | only in kern
sched_4bsd.c revision 1.1.2.21
      1 /*	$NetBSD: sched_4bsd.c,v 1.1.2.21 2007/03/24 15:11:20 yamt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and
     10  * Daniel Sieger.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by the NetBSD
     23  *	Foundation, Inc. and its contributors.
     24  * 4. Neither the name of The NetBSD Foundation nor the names of its
     25  *    contributors may be used to endorse or promote products derived
     26  *    from this software without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     29  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     30  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     31  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     32  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     33  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     34  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     35  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     36  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     37  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     38  * POSSIBILITY OF SUCH DAMAGE.
     39  */
     40 
     41 /*-
     42  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     43  *	The Regents of the University of California.  All rights reserved.
     44  * (c) UNIX System Laboratories, Inc.
     45  * All or some portions of this file are derived from material licensed
     46  * to the University of California by American Telephone and Telegraph
     47  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     48  * the permission of UNIX System Laboratories, Inc.
     49  *
     50  * Redistribution and use in source and binary forms, with or without
     51  * modification, are permitted provided that the following conditions
     52  * are met:
     53  * 1. Redistributions of source code must retain the above copyright
     54  *    notice, this list of conditions and the following disclaimer.
     55  * 2. Redistributions in binary form must reproduce the above copyright
     56  *    notice, this list of conditions and the following disclaimer in the
     57  *    documentation and/or other materials provided with the distribution.
     58  * 3. Neither the name of the University nor the names of its contributors
     59  *    may be used to endorse or promote products derived from this software
     60  *    without specific prior written permission.
     61  *
     62  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     63  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     64  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     65  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     66  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     67  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     68  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     69  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     70  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     71  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     72  * SUCH DAMAGE.
     73  *
     74  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
     75  */
     76 
     77 #include <sys/cdefs.h>
     78 __KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.1.2.21 2007/03/24 15:11:20 yamt Exp $");
     79 
     80 #include "opt_ddb.h"
     81 #include "opt_lockdebug.h"
     82 #include "opt_perfctrs.h"
     83 
     84 #define	__MUTEX_PRIVATE
     85 
     86 #include <sys/param.h>
     87 #include <sys/systm.h>
     88 #include <sys/callout.h>
     89 #include <sys/cpu.h>
     90 #include <sys/proc.h>
     91 #include <sys/kernel.h>
     92 #include <sys/signalvar.h>
     93 #include <sys/resourcevar.h>
     94 #include <sys/sched.h>
     95 #include <sys/sysctl.h>
     96 #include <sys/kauth.h>
     97 #include <sys/lockdebug.h>
     98 
     99 #include <uvm/uvm_extern.h>
    100 
    101 /*
    102  * Run queues.
    103  *
    104  * We have 32 run queues in descending priority of 0..31.  We maintain
    105  * a bitmask of non-empty queues in order speed up finding the first
    106  * runnable process.  The bitmask is maintained only by machine-dependent
    107  * code, allowing the most efficient instructions to be used to find the
    108  * first non-empty queue.
    109  */
    110 
    111 #define	RUNQUE_NQS		32      /* number of runqueues */
    112 #define	PPQ	(128 / RUNQUE_NQS)	/* priorities per queue */
    113 
    114 typedef struct subqueue {
    115 	TAILQ_HEAD(, lwp) sq_queue;
    116 } subqueue_t;
    117 typedef struct runqueue {
    118 	subqueue_t rq_subqueues[RUNQUE_NQS];	/* run queues */
    119 	uint32_t rq_bitmap;	/* bitmap of non-empty queues */
    120 } runqueue_t;
    121 static runqueue_t global_queue;
    122 
    123 static void schedcpu(void *);
    124 static void updatepri(struct lwp *);
    125 static void resetpriority(struct lwp *);
    126 static void resetprocpriority(struct proc *);
    127 
    128 struct callout schedcpu_ch = CALLOUT_INITIALIZER_SETFUNC(schedcpu, NULL);
    129 static unsigned int schedcpu_ticks;
    130 
    131 /* The global scheduler state */
    132 kmutex_t sched_mutex;
    133 
    134 /* Number of hardclock ticks per sched_tick() */
    135 int rrticks;
    136 
    137 /*
    138  * Force switch among equal priority processes every 100ms.
    139  * Called from hardclock every hz/10 == rrticks hardclock ticks.
    140  */
    141 /* ARGSUSED */
    142 void
    143 sched_tick(struct cpu_info *ci)
    144 {
    145 	struct schedstate_percpu *spc = &ci->ci_schedstate;
    146 
    147 	spc->spc_ticks = rrticks;
    148 
    149 	if (!CURCPU_IDLE_P()) {
    150 		if (spc->spc_flags & SPCF_SEENRR) {
    151 			/*
    152 			 * The process has already been through a roundrobin
    153 			 * without switching and may be hogging the CPU.
    154 			 * Indicate that the process should yield.
    155 			 */
    156 			spc->spc_flags |= SPCF_SHOULDYIELD;
    157 		} else
    158 			spc->spc_flags |= SPCF_SEENRR;
    159 	}
    160 	cpu_need_resched(curcpu(), 0);
    161 }
    162 
    163 #define	NICE_WEIGHT 2			/* priorities per nice level */
    164 
    165 #define	ESTCPU_SHIFT	11
    166 #define	ESTCPU_MAX	((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
    167 #define	ESTCPULIM(e)	min((e), ESTCPU_MAX)
    168 
    169 /*
    170  * Constants for digital decay and forget:
    171  *	90% of (p_estcpu) usage in 5 * loadav time
    172  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
    173  *          Note that, as ps(1) mentions, this can let percentages
    174  *          total over 100% (I've seen 137.9% for 3 processes).
    175  *
    176  * Note that hardclock updates p_estcpu and p_cpticks independently.
    177  *
    178  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
    179  * That is, the system wants to compute a value of decay such
    180  * that the following for loop:
    181  * 	for (i = 0; i < (5 * loadavg); i++)
    182  * 		p_estcpu *= decay;
    183  * will compute
    184  * 	p_estcpu *= 0.1;
    185  * for all values of loadavg:
    186  *
    187  * Mathematically this loop can be expressed by saying:
    188  * 	decay ** (5 * loadavg) ~= .1
    189  *
    190  * The system computes decay as:
    191  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
    192  *
    193  * We wish to prove that the system's computation of decay
    194  * will always fulfill the equation:
    195  * 	decay ** (5 * loadavg) ~= .1
    196  *
    197  * If we compute b as:
    198  * 	b = 2 * loadavg
    199  * then
    200  * 	decay = b / (b + 1)
    201  *
    202  * We now need to prove two things:
    203  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
    204  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
    205  *
    206  * Facts:
    207  *         For x close to zero, exp(x) =~ 1 + x, since
    208  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
    209  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
    210  *         For x close to zero, ln(1+x) =~ x, since
    211  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
    212  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
    213  *         ln(.1) =~ -2.30
    214  *
    215  * Proof of (1):
    216  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
    217  *	solving for factor,
    218  *      ln(factor) =~ (-2.30/5*loadav), or
    219  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
    220  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
    221  *
    222  * Proof of (2):
    223  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
    224  *	solving for power,
    225  *      power*ln(b/(b+1)) =~ -2.30, or
    226  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
    227  *
    228  * Actual power values for the implemented algorithm are as follows:
    229  *      loadav: 1       2       3       4
    230  *      power:  5.68    10.32   14.94   19.55
    231  */
    232 
    233 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
    234 #define	loadfactor(loadav)	(2 * (loadav))
    235 
    236 static fixpt_t
    237 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
    238 {
    239 
    240 	if (estcpu == 0) {
    241 		return 0;
    242 	}
    243 
    244 #if !defined(_LP64)
    245 	/* avoid 64bit arithmetics. */
    246 #define	FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
    247 	if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
    248 		return estcpu * loadfac / (loadfac + FSCALE);
    249 	}
    250 #endif /* !defined(_LP64) */
    251 
    252 	return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
    253 }
    254 
    255 /*
    256  * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
    257  * sleeping for at least seven times the loadfactor will decay p_estcpu to
    258  * less than (1 << ESTCPU_SHIFT).
    259  *
    260  * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
    261  */
    262 static fixpt_t
    263 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
    264 {
    265 
    266 	if ((n << FSHIFT) >= 7 * loadfac) {
    267 		return 0;
    268 	}
    269 
    270 	while (estcpu != 0 && n > 1) {
    271 		estcpu = decay_cpu(loadfac, estcpu);
    272 		n--;
    273 	}
    274 
    275 	return estcpu;
    276 }
    277 
    278 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
    279 fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
    280 
    281 /*
    282  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
    283  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
    284  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
    285  *
    286  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
    287  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
    288  *
    289  * If you dont want to bother with the faster/more-accurate formula, you
    290  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
    291  * (more general) method of calculating the %age of CPU used by a process.
    292  */
    293 #define	CCPU_SHIFT	11
    294 
    295 /*
    296  * schedcpu:
    297  *
    298  *	Recompute process priorities, every hz ticks.
    299  *
    300  *	XXXSMP This needs to be reorganised in order to reduce the locking
    301  *	burden.
    302  */
    303 /* ARGSUSED */
    304 static void
    305 schedcpu(void *arg)
    306 {
    307 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
    308 	struct rlimit *rlim;
    309 	struct lwp *l;
    310 	struct proc *p;
    311 	int minslp, clkhz, sig;
    312 	long runtm;
    313 
    314 	schedcpu_ticks++;
    315 
    316 	mutex_enter(&proclist_mutex);
    317 	PROCLIST_FOREACH(p, &allproc) {
    318 		/*
    319 		 * Increment time in/out of memory and sleep time (if
    320 		 * sleeping).  We ignore overflow; with 16-bit int's
    321 		 * (remember them?) overflow takes 45 days.
    322 		 */
    323 		minslp = 2;
    324 		mutex_enter(&p->p_smutex);
    325 		runtm = p->p_rtime.tv_sec;
    326 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    327 			if ((l->l_flag & LW_IDLE) != 0)
    328 				continue;
    329 			lwp_lock(l);
    330 			runtm += l->l_rtime.tv_sec;
    331 			l->l_swtime++;
    332 			if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
    333 			    l->l_stat == LSSUSPENDED) {
    334 				l->l_slptime++;
    335 				minslp = min(minslp, l->l_slptime);
    336 			} else
    337 				minslp = 0;
    338 			lwp_unlock(l);
    339 		}
    340 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
    341 
    342 		/*
    343 		 * Check if the process exceeds its CPU resource allocation.
    344 		 * If over max, kill it.
    345 		 */
    346 		rlim = &p->p_rlimit[RLIMIT_CPU];
    347 		sig = 0;
    348 		if (runtm >= rlim->rlim_cur) {
    349 			if (runtm >= rlim->rlim_max)
    350 				sig = SIGKILL;
    351 			else {
    352 				sig = SIGXCPU;
    353 				if (rlim->rlim_cur < rlim->rlim_max)
    354 					rlim->rlim_cur += 5;
    355 			}
    356 		}
    357 
    358 		/*
    359 		 * If the process has run for more than autonicetime, reduce
    360 		 * priority to give others a chance.
    361 		 */
    362 		if (autonicetime && runtm > autonicetime && p->p_nice == NZERO
    363 		    && kauth_cred_geteuid(p->p_cred)) {
    364 			mutex_spin_enter(&p->p_stmutex);
    365 			p->p_nice = autoniceval + NZERO;
    366 			resetprocpriority(p);
    367 			mutex_spin_exit(&p->p_stmutex);
    368 		}
    369 
    370 		/*
    371 		 * If the process has slept the entire second,
    372 		 * stop recalculating its priority until it wakes up.
    373 		 */
    374 		if (minslp <= 1) {
    375 			/*
    376 			 * p_pctcpu is only for ps.
    377 			 */
    378 			mutex_spin_enter(&p->p_stmutex);
    379 			clkhz = stathz != 0 ? stathz : hz;
    380 #if	(FSHIFT >= CCPU_SHIFT)
    381 			p->p_pctcpu += (clkhz == 100)?
    382 			    ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
    383 			    100 * (((fixpt_t) p->p_cpticks)
    384 			    << (FSHIFT - CCPU_SHIFT)) / clkhz;
    385 #else
    386 			p->p_pctcpu += ((FSCALE - ccpu) *
    387 			    (p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
    388 #endif
    389 			p->p_cpticks = 0;
    390 			p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
    391 
    392 			LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    393 				if ((l->l_flag & LW_IDLE) != 0)
    394 					continue;
    395 				lwp_lock(l);
    396 				if (l->l_slptime <= 1 &&
    397 				    l->l_priority >= PUSER)
    398 					resetpriority(l);
    399 				lwp_unlock(l);
    400 			}
    401 			mutex_spin_exit(&p->p_stmutex);
    402 		}
    403 
    404 		mutex_exit(&p->p_smutex);
    405 		if (sig) {
    406 			psignal(p, sig);
    407 		}
    408 	}
    409 	mutex_exit(&proclist_mutex);
    410 	uvm_meter();
    411 	wakeup(&lbolt);
    412 	callout_schedule(&schedcpu_ch, hz);
    413 }
    414 
    415 /*
    416  * Recalculate the priority of a process after it has slept for a while.
    417  */
    418 static void
    419 updatepri(struct lwp *l)
    420 {
    421 	struct proc *p = l->l_proc;
    422 	fixpt_t loadfac;
    423 
    424 	LOCK_ASSERT(lwp_locked(l, NULL));
    425 	KASSERT(l->l_slptime > 1);
    426 
    427 	loadfac = loadfactor(averunnable.ldavg[0]);
    428 
    429 	l->l_slptime--; /* the first time was done in schedcpu */
    430 	/* XXX NJWLWP */
    431 	/* XXXSMP occasionally unlocked, should be per-LWP */
    432 	p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
    433 	resetpriority(l);
    434 }
    435 
    436 /*
    437  * On some architectures, it's faster to use a MSB ordering for the priorites
    438  * than the traditional LSB ordering.
    439  */
    440 #ifdef __HAVE_BIGENDIAN_BITOPS
    441 #define	RQMASK(n) (0x80000000 >> (n))
    442 #else
    443 #define	RQMASK(n) (0x00000001 << (n))
    444 #endif
    445 
    446 /*
    447  * The primitives that manipulate the run queues.  whichqs tells which
    448  * of the 32 queues qs have processes in them.  sched_enqueue() puts processes
    449  * into queues, sched_dequeue removes them from queues.  The running process is
    450  * on no queue, other processes are on a queue related to p->p_priority,
    451  * divided by 4 actually to shrink the 0-127 range of priorities into the 32
    452  * available queues.
    453  */
    454 #ifdef RQDEBUG
    455 static void
    456 runqueue_check(const runqueue_t *rq, int whichq, struct lwp *l)
    457 {
    458 	const subqueue_t * const sq = &rq->rq_subqueues[whichq];
    459 	const uint32_t bitmap = rq->rq_bitmap;
    460 	struct lwp *l2;
    461 	int found = 0;
    462 	int die = 0;
    463 	int empty = 1;
    464 
    465 	TAILQ_FOREACH(l2, &sq->sq_queue, l_runq) {
    466 		if (l2->l_stat != LSRUN) {
    467 			printf("runqueue_check[%d]: lwp %p state (%d) "
    468 			    " != LSRUN\n", whichq, l2, l2->l_stat);
    469 		}
    470 		if (l2 == l)
    471 			found = 1;
    472 		empty = 0;
    473 	}
    474 	if (empty && (bitmap & RQMASK(whichq)) != 0) {
    475 		printf("runqueue_check[%d]: bit set for empty run-queue %p\n",
    476 		    whichq, rq);
    477 		die = 1;
    478 	} else if (!empty && (bitmap & RQMASK(whichq)) == 0) {
    479 		printf("runqueue_check[%d]: bit clear for non-empty "
    480 		    "run-queue %p\n", whichq, rq);
    481 		die = 1;
    482 	}
    483 	if (l != NULL && (bitmap & RQMASK(whichq)) == 0) {
    484 		printf("runqueue_check[%d]: bit clear for active lwp %p\n",
    485 		    whichq, l);
    486 		die = 1;
    487 	}
    488 	if (l != NULL && empty) {
    489 		printf("runqueue_check[%d]: empty run-queue %p with "
    490 		    "active lwp %p\n", whichq, rq, l);
    491 		die = 1;
    492 	}
    493 	if (l != NULL && !found) {
    494 		printf("runqueue_check[%d]: lwp %p not in runqueue %p!",
    495 		    whichq, l, rq);
    496 		die = 1;
    497 	}
    498 	if (die)
    499 		panic("runqueue_check: inconsistency found");
    500 }
    501 #else /* RQDEBUG */
    502 #define	runqueue_check(a, b, c)	/* nothing */
    503 #endif /* RQDEBUG */
    504 
    505 static void
    506 runqueue_init(runqueue_t *rq)
    507 {
    508 	int i;
    509 
    510 	for (i = 0; i < RUNQUE_NQS; i++)
    511 		TAILQ_INIT(&rq->rq_subqueues[i].sq_queue);
    512 }
    513 
    514 static void
    515 runqueue_enqueue(runqueue_t *rq, struct lwp *l)
    516 {
    517 	subqueue_t *sq;
    518 	const int whichq = lwp_eprio(l) / PPQ;
    519 
    520 	LOCK_ASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    521 
    522 	runqueue_check(rq, whichq, NULL);
    523 	rq->rq_bitmap |= RQMASK(whichq);
    524 	sq = &rq->rq_subqueues[whichq];
    525 	TAILQ_INSERT_TAIL(&sq->sq_queue, l, l_runq);
    526 	runqueue_check(rq, whichq, l);
    527 }
    528 
    529 static void
    530 runqueue_dequeue(runqueue_t *rq, struct lwp *l)
    531 {
    532 	subqueue_t *sq;
    533 	const int whichq = lwp_eprio(l) / PPQ;
    534 
    535 	LOCK_ASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    536 
    537 	runqueue_check(rq, whichq, l);
    538 	KASSERT((rq->rq_bitmap & RQMASK(whichq)) != 0);
    539 	sq = &rq->rq_subqueues[whichq];
    540 	TAILQ_REMOVE(&sq->sq_queue, l, l_runq);
    541 	if (TAILQ_EMPTY(&sq->sq_queue))
    542 		rq->rq_bitmap &= ~RQMASK(whichq);
    543 	runqueue_check(rq, whichq, NULL);
    544 }
    545 
    546 static struct lwp *
    547 runqueue_nextlwp(runqueue_t *rq)
    548 {
    549 	const uint32_t bitmap = rq->rq_bitmap;
    550 	int whichq;
    551 
    552 	if (bitmap == 0) {
    553 		return NULL;
    554 	}
    555 #ifdef __HAVE_BIGENDIAN_BITOPS
    556 	/* XXX should introduce a fast "fls" function. */
    557 	for (whichq = 0; ; whichq++) {
    558 		if ((bitmap & RQMASK(whichq)) != 0) {
    559 			break;
    560 		}
    561 	}
    562 #else
    563 	whichq = ffs(bitmap) - 1;
    564 #endif
    565 	return TAILQ_FIRST(&rq->rq_subqueues[whichq].sq_queue);
    566 }
    567 
    568 #if defined(DDB)
    569 static void
    570 runqueue_print(const runqueue_t *rq, void (*pr)(const char *, ...))
    571 {
    572 	const uint32_t bitmap = rq->rq_bitmap;
    573 	struct lwp *l;
    574 	int i, first;
    575 
    576 	for (i = 0; i < RUNQUE_NQS; i++) {
    577 		const subqueue_t *sq;
    578 		first = 1;
    579 		sq = &rq->rq_subqueues[i];
    580 		TAILQ_FOREACH(l, &sq->sq_queue, l_runq) {
    581 			if (first) {
    582 				(*pr)("%c%d",
    583 				    (bitmap & RQMASK(i)) ? ' ' : '!', i);
    584 				first = 0;
    585 			}
    586 			(*pr)("\t%d.%d (%s) pri=%d usrpri=%d\n",
    587 			    l->l_proc->p_pid,
    588 			    l->l_lid, l->l_proc->p_comm,
    589 			    (int)l->l_priority, (int)l->l_usrpri);
    590 		}
    591 	}
    592 }
    593 #endif /* defined(DDB) */
    594 #undef RQMASK
    595 
    596 /*
    597  * Initialize the (doubly-linked) run queues
    598  * to be empty.
    599  */
    600 void
    601 sched_rqinit()
    602 {
    603 
    604 	runqueue_init(&global_queue);
    605 	mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
    606 	/* Initialize the lock pointer for lwp0 */
    607 	lwp0.l_mutex = &sched_mutex;
    608 }
    609 
    610 void
    611 sched_cpuattach(struct cpu_info *ci)
    612 {
    613 
    614 	ci->ci_schedstate.spc_mutex = &sched_mutex;
    615 }
    616 
    617 void
    618 sched_setup()
    619 {
    620 
    621 	rrticks = hz / 10;
    622 	schedcpu(NULL);
    623 }
    624 
    625 void
    626 sched_setrunnable(struct lwp *l)
    627 {
    628 
    629  	if (l->l_slptime > 1)
    630  		updatepri(l);
    631 }
    632 
    633 bool
    634 sched_curcpu_runnable_p(void)
    635 {
    636 
    637 	return global_queue.rq_bitmap != 0;
    638 }
    639 
    640 void
    641 sched_nice(struct proc *chgp, int n)
    642 {
    643 
    644 	chgp->p_nice = n;
    645 	(void)resetprocpriority(chgp);
    646 }
    647 
    648 /*
    649  * Compute the priority of a process when running in user mode.
    650  * Arrange to reschedule if the resulting priority is better
    651  * than that of the current process.
    652  */
    653 static void
    654 resetpriority(struct lwp *l)
    655 {
    656 	unsigned int newpriority;
    657 	struct proc *p = l->l_proc;
    658 
    659 	/* XXXSMP LOCK_ASSERT(mutex_owned(&p->p_stmutex)); */
    660 	LOCK_ASSERT(lwp_locked(l, NULL));
    661 
    662 	if ((l->l_flag & LW_SYSTEM) != 0)
    663 		return;
    664 
    665 	newpriority = PUSER + (p->p_estcpu >> ESTCPU_SHIFT) +
    666 	    NICE_WEIGHT * (p->p_nice - NZERO);
    667 	newpriority = min(newpriority, MAXPRI);
    668 	lwp_changepri(l, newpriority);
    669 }
    670 
    671 /*
    672  * Recompute priority for all LWPs in a process.
    673  */
    674 static void
    675 resetprocpriority(struct proc *p)
    676 {
    677 	struct lwp *l;
    678 
    679 	LOCK_ASSERT(mutex_owned(&p->p_stmutex));
    680 
    681 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    682 		lwp_lock(l);
    683 		resetpriority(l);
    684 		lwp_unlock(l);
    685 	}
    686 }
    687 
    688 /*
    689  * We adjust the priority of the current process.  The priority of a process
    690  * gets worse as it accumulates CPU time.  The CPU usage estimator (p_estcpu)
    691  * is increased here.  The formula for computing priorities (in kern_synch.c)
    692  * will compute a different value each time p_estcpu increases. This can
    693  * cause a switch, but unless the priority crosses a PPQ boundary the actual
    694  * queue will not change.  The CPU usage estimator ramps up quite quickly
    695  * when the process is running (linearly), and decays away exponentially, at
    696  * a rate which is proportionally slower when the system is busy.  The basic
    697  * principle is that the system will 90% forget that the process used a lot
    698  * of CPU time in 5 * loadav seconds.  This causes the system to favor
    699  * processes which haven't run much recently, and to round-robin among other
    700  * processes.
    701  */
    702 
    703 void
    704 sched_schedclock(struct lwp *l)
    705 {
    706 	struct proc *p = l->l_proc;
    707 
    708 	KASSERT(!CURCPU_IDLE_P());
    709 	mutex_spin_enter(&p->p_stmutex);
    710 	p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
    711 	lwp_lock(l);
    712 	resetpriority(l);
    713 	mutex_spin_exit(&p->p_stmutex);
    714 	if ((l->l_flag & LW_SYSTEM) == 0 && l->l_priority >= PUSER)
    715 		l->l_priority = l->l_usrpri;
    716 	lwp_unlock(l);
    717 }
    718 
    719 /*
    720  * scheduler_fork_hook:
    721  *
    722  *	Inherit the parent's scheduler history.
    723  */
    724 void
    725 sched_proc_fork(struct proc *parent, struct proc *child)
    726 {
    727 
    728 	LOCK_ASSERT(mutex_owned(&parent->p_smutex));
    729 
    730 	child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
    731 	child->p_forktime = schedcpu_ticks;
    732 }
    733 
    734 /*
    735  * scheduler_wait_hook:
    736  *
    737  *	Chargeback parents for the sins of their children.
    738  */
    739 void
    740 sched_proc_exit(struct proc *parent, struct proc *child)
    741 {
    742 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
    743 	fixpt_t estcpu;
    744 
    745 	/* XXX Only if parent != init?? */
    746 
    747 	mutex_spin_enter(&parent->p_stmutex);
    748 	estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
    749 	    schedcpu_ticks - child->p_forktime);
    750 	if (child->p_estcpu > estcpu)
    751 		parent->p_estcpu =
    752 		    ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
    753 	mutex_spin_exit(&parent->p_stmutex);
    754 }
    755 
    756 void
    757 sched_enqueue(struct lwp *l, bool ctxswitch)
    758 {
    759 
    760 	runqueue_enqueue(&global_queue, l);
    761 }
    762 
    763 /*
    764  * XXXSMP When LWP dispatch (cpu_switch()) is changed to use sched_dequeue(),
    765  * drop of the effective priority level from kernel to user needs to be
    766  * moved here from userret().  The assignment in userret() is currently
    767  * done unlocked.
    768  */
    769 void
    770 sched_dequeue(struct lwp *l)
    771 {
    772 
    773 	runqueue_dequeue(&global_queue, l);
    774 }
    775 
    776 struct lwp *
    777 sched_nextlwp(struct lwp *l)
    778 {
    779 
    780 	return runqueue_nextlwp(&global_queue);
    781 }
    782 
    783 /* Dummy */
    784 void
    785 sched_lwp_fork(struct lwp *l)
    786 {
    787 
    788 }
    789 
    790 void
    791 sched_lwp_exit(struct lwp *l)
    792 {
    793 
    794 }
    795 
    796 void
    797 sched_slept(struct lwp *l)
    798 {
    799 
    800 }
    801 
    802 /* SysCtl */
    803 
    804 SYSCTL_SETUP(sysctl_sched_setup, "sysctl kern.sched subtree setup")
    805 {
    806 
    807 	sysctl_createv(clog, 0, NULL, NULL,
    808 		CTLFLAG_PERMANENT,
    809 		CTLTYPE_NODE, "kern", NULL,
    810 		NULL, 0, NULL, 0,
    811 		CTL_KERN, CTL_EOL);
    812 	sysctl_createv(clog, 0, NULL, NULL,
    813 		CTLFLAG_PERMANENT,
    814 		CTLTYPE_NODE, "sched",
    815 		SYSCTL_DESCR("Scheduler options"),
    816 		NULL, 0, NULL, 0,
    817 		CTL_KERN, KERN_SCHED, CTL_EOL);
    818 	sysctl_createv(clog, 0, NULL, NULL,
    819 		CTLFLAG_PERMANENT,
    820 		CTLTYPE_STRING, "name", NULL,
    821 		NULL, 0, __UNCONST("4.4BSD"), 0,
    822 		CTL_KERN, KERN_SCHED, CTL_CREATE, CTL_EOL);
    823 	sysctl_createv(clog, 0, NULL, NULL,
    824 		CTLFLAG_PERMANENT,
    825 		CTLTYPE_INT, "ccpu",
    826 		SYSCTL_DESCR("Scheduler exponential decay value"),
    827 		NULL, 0, &ccpu, 0,
    828 		CTL_KERN, KERN_SCHED, CTL_CREATE, CTL_EOL);
    829 }
    830 
    831 #if defined(DDB)
    832 void
    833 sched_print_runqueue(void (*pr)(const char *, ...))
    834 {
    835 
    836 	runqueue_print(&global_queue, pr);
    837 }
    838 #endif /* defined(DDB) */
    839