Home | History | Annotate | Line # | Download | only in kern
sched_4bsd.c revision 1.1.6.7
      1 /*	$NetBSD: sched_4bsd.c,v 1.1.6.7 2007/08/21 13:59:44 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and
     10  * Daniel Sieger.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by the NetBSD
     23  *	Foundation, Inc. and its contributors.
     24  * 4. Neither the name of The NetBSD Foundation nor the names of its
     25  *    contributors may be used to endorse or promote products derived
     26  *    from this software without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     29  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     30  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     31  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     32  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     33  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     34  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     35  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     36  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     37  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     38  * POSSIBILITY OF SUCH DAMAGE.
     39  */
     40 
     41 /*-
     42  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     43  *	The Regents of the University of California.  All rights reserved.
     44  * (c) UNIX System Laboratories, Inc.
     45  * All or some portions of this file are derived from material licensed
     46  * to the University of California by American Telephone and Telegraph
     47  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     48  * the permission of UNIX System Laboratories, Inc.
     49  *
     50  * Redistribution and use in source and binary forms, with or without
     51  * modification, are permitted provided that the following conditions
     52  * are met:
     53  * 1. Redistributions of source code must retain the above copyright
     54  *    notice, this list of conditions and the following disclaimer.
     55  * 2. Redistributions in binary form must reproduce the above copyright
     56  *    notice, this list of conditions and the following disclaimer in the
     57  *    documentation and/or other materials provided with the distribution.
     58  * 3. Neither the name of the University nor the names of its contributors
     59  *    may be used to endorse or promote products derived from this software
     60  *    without specific prior written permission.
     61  *
     62  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     63  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     64  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     65  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     66  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     67  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     68  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     69  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     70  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     71  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     72  * SUCH DAMAGE.
     73  *
     74  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
     75  */
     76 
     77 #include <sys/cdefs.h>
     78 __KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.1.6.7 2007/08/21 13:59:44 ad Exp $");
     79 
     80 #include "opt_ddb.h"
     81 #include "opt_lockdebug.h"
     82 #include "opt_perfctrs.h"
     83 
     84 #define	__MUTEX_PRIVATE
     85 
     86 #include <sys/param.h>
     87 #include <sys/systm.h>
     88 #include <sys/callout.h>
     89 #include <sys/cpu.h>
     90 #include <sys/proc.h>
     91 #include <sys/kernel.h>
     92 #include <sys/signalvar.h>
     93 #include <sys/resourcevar.h>
     94 #include <sys/sched.h>
     95 #include <sys/sysctl.h>
     96 #include <sys/kauth.h>
     97 #include <sys/lockdebug.h>
     98 #include <sys/kmem.h>
     99 #include <sys/intr.h>
    100 
    101 #include <uvm/uvm_extern.h>
    102 
    103 /*
    104  * Run queues.
    105  *
    106  * We maintain a bitmask of non-empty queues in order speed up finding
    107  * the first runnable process.
    108  */
    109 
    110 #define	PPQ		4			/* priorities per queue */
    111 #define	RUNQUE_NQS	(PRI_COUNT / PPQ)	/* number of runqueues */
    112 
    113 typedef struct subqueue {
    114 	TAILQ_HEAD(, lwp) sq_queue;
    115 } subqueue_t;
    116 
    117 typedef struct runqueue {
    118 	subqueue_t	rq_subqueues[RUNQUE_NQS];	/* run queues */
    119 	uint64_t	rq_bitmap;	/* bitmap of non-empty queues */
    120 } runqueue_t;
    121 
    122 static runqueue_t global_queue;
    123 
    124 static void updatepri(struct lwp *);
    125 static void resetpriority(struct lwp *);
    126 static void resetprocpriority(struct proc *);
    127 
    128 extern unsigned int sched_pstats_ticks; /* defined in kern_synch.c */
    129 
    130 /* The global scheduler state */
    131 kmutex_t sched_mutex;
    132 
    133 /* Number of hardclock ticks per sched_tick() */
    134 int rrticks;
    135 
    136 const int schedppq = PPQ;
    137 
    138 /*
    139  * Force switch among equal priority processes every 100ms.
    140  * Called from hardclock every hz/10 == rrticks hardclock ticks.
    141  */
    142 /* ARGSUSED */
    143 void
    144 sched_tick(struct cpu_info *ci)
    145 {
    146 	struct schedstate_percpu *spc = &ci->ci_schedstate;
    147 
    148 	spc->spc_ticks = rrticks;
    149 
    150 	spc_lock(ci);
    151 	if (!CURCPU_IDLE_P()) {
    152 		if (spc->spc_flags & SPCF_SEENRR) {
    153 			/*
    154 			 * The process has already been through a roundrobin
    155 			 * without switching and may be hogging the CPU.
    156 			 * Indicate that the process should yield.
    157 			 */
    158 			spc->spc_flags |= SPCF_SHOULDYIELD;
    159 		} else
    160 			spc->spc_flags |= SPCF_SEENRR;
    161 	}
    162 	cpu_need_resched(ci, 0);
    163 	spc_unlock(ci);
    164 }
    165 
    166 #define	NICE_WEIGHT 	1			/* priorities per nice level */
    167 
    168 #define	ESTCPU_SHIFT	11
    169 #define	ESTCPU_MAX	((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
    170 #define	ESTCPULIM(e)	min((e), ESTCPU_MAX)
    171 
    172 /*
    173  * Constants for digital decay and forget:
    174  *	90% of (p_estcpu) usage in 5 * loadav time
    175  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
    176  *          Note that, as ps(1) mentions, this can let percentages
    177  *          total over 100% (I've seen 137.9% for 3 processes).
    178  *
    179  * Note that hardclock updates p_estcpu and p_cpticks independently.
    180  *
    181  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
    182  * That is, the system wants to compute a value of decay such
    183  * that the following for loop:
    184  * 	for (i = 0; i < (5 * loadavg); i++)
    185  * 		p_estcpu *= decay;
    186  * will compute
    187  * 	p_estcpu *= 0.1;
    188  * for all values of loadavg:
    189  *
    190  * Mathematically this loop can be expressed by saying:
    191  * 	decay ** (5 * loadavg) ~= .1
    192  *
    193  * The system computes decay as:
    194  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
    195  *
    196  * We wish to prove that the system's computation of decay
    197  * will always fulfill the equation:
    198  * 	decay ** (5 * loadavg) ~= .1
    199  *
    200  * If we compute b as:
    201  * 	b = 2 * loadavg
    202  * then
    203  * 	decay = b / (b + 1)
    204  *
    205  * We now need to prove two things:
    206  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
    207  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
    208  *
    209  * Facts:
    210  *         For x close to zero, exp(x) =~ 1 + x, since
    211  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
    212  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
    213  *         For x close to zero, ln(1+x) =~ x, since
    214  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
    215  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
    216  *         ln(.1) =~ -2.30
    217  *
    218  * Proof of (1):
    219  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
    220  *	solving for factor,
    221  *      ln(factor) =~ (-2.30/5*loadav), or
    222  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
    223  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
    224  *
    225  * Proof of (2):
    226  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
    227  *	solving for power,
    228  *      power*ln(b/(b+1)) =~ -2.30, or
    229  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
    230  *
    231  * Actual power values for the implemented algorithm are as follows:
    232  *      loadav: 1       2       3       4
    233  *      power:  5.68    10.32   14.94   19.55
    234  */
    235 
    236 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
    237 #define	loadfactor(loadav)	(2 * (loadav))
    238 
    239 static fixpt_t
    240 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
    241 {
    242 
    243 	if (estcpu == 0) {
    244 		return 0;
    245 	}
    246 
    247 #if !defined(_LP64)
    248 	/* avoid 64bit arithmetics. */
    249 #define	FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
    250 	if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
    251 		return estcpu * loadfac / (loadfac + FSCALE);
    252 	}
    253 #endif /* !defined(_LP64) */
    254 
    255 	return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
    256 }
    257 
    258 /*
    259  * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
    260  * sleeping for at least seven times the loadfactor will decay p_estcpu to
    261  * less than (1 << ESTCPU_SHIFT).
    262  *
    263  * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
    264  */
    265 static fixpt_t
    266 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
    267 {
    268 
    269 	if ((n << FSHIFT) >= 7 * loadfac) {
    270 		return 0;
    271 	}
    272 
    273 	while (estcpu != 0 && n > 1) {
    274 		estcpu = decay_cpu(loadfac, estcpu);
    275 		n--;
    276 	}
    277 
    278 	return estcpu;
    279 }
    280 
    281 /*
    282  * sched_pstats_hook:
    283  *
    284  * Periodically called from sched_pstats(); used to recalculate priorities.
    285  */
    286 void
    287 sched_pstats_hook(struct proc *p, int minslp)
    288 {
    289 	struct lwp *l;
    290 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
    291 
    292 	/*
    293 	 * If the process has slept the entire second,
    294 	 * stop recalculating its priority until it wakes up.
    295 	 */
    296 	if (minslp <= 1) {
    297 		p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
    298 
    299 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    300 			if ((l->l_flag & LW_IDLE) != 0)
    301 				continue;
    302 			lwp_lock(l);
    303 			if (l->l_slptime <= 1 && l->l_priority < PRI_KERNEL)
    304 				resetpriority(l);
    305 			lwp_unlock(l);
    306 		}
    307 	}
    308 }
    309 
    310 /*
    311  * Recalculate the priority of a process after it has slept for a while.
    312  */
    313 static void
    314 updatepri(struct lwp *l)
    315 {
    316 	struct proc *p = l->l_proc;
    317 	fixpt_t loadfac;
    318 
    319 	KASSERT(lwp_locked(l, NULL));
    320 	KASSERT(l->l_slptime > 1);
    321 
    322 	loadfac = loadfactor(averunnable.ldavg[0]);
    323 
    324 	l->l_slptime--; /* the first time was done in sched_pstats */
    325 	/* XXX NJWLWP */
    326 	/* XXXSMP occasionally unlocked, should be per-LWP */
    327 	p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
    328 	resetpriority(l);
    329 }
    330 
    331 /*
    332  * On some architectures, it's faster to use a MSB ordering for the priorites
    333  * than the traditional LSB ordering.
    334  */
    335 #define	RQMASK(n)	(1ULL << (n))
    336 #define WHICHQ(p)	(RUNQUE_NQS - 1 - ((p) / PPQ))
    337 
    338 /*
    339  * The primitives that manipulate the run queues.  whichqs tells which of
    340  * the queues have processes in them.  sched_enqueue() puts processes into
    341  * queues, sched_dequeue() removes them from queues.
    342  */
    343 #ifdef RQDEBUG
    344 static void
    345 runqueue_check(const runqueue_t *rq, int whichq, struct lwp *l)
    346 {
    347 	const subqueue_t * const sq = &rq->rq_subqueues[whichq];
    348 	const uint32_t bitmap = rq->rq_bitmap;
    349 	struct lwp *l2;
    350 	int found = 0;
    351 	int die = 0;
    352 	int empty = 1;
    353 
    354 	TAILQ_FOREACH(l2, &sq->sq_queue, l_runq) {
    355 		if (l2->l_stat != LSRUN) {
    356 			printf("runqueue_check[%d]: lwp %p state (%d) "
    357 			    " != LSRUN\n", whichq, l2, l2->l_stat);
    358 		}
    359 		if (l2 == l)
    360 			found = 1;
    361 		empty = 0;
    362 	}
    363 	if (empty && (bitmap & RQMASK(whichq)) != 0) {
    364 		printf("runqueue_check[%d]: bit set for empty run-queue %p\n",
    365 		    whichq, rq);
    366 		die = 1;
    367 	} else if (!empty && (bitmap & RQMASK(whichq)) == 0) {
    368 		printf("runqueue_check[%d]: bit clear for non-empty "
    369 		    "run-queue %p\n", whichq, rq);
    370 		die = 1;
    371 	}
    372 	if (l != NULL && (bitmap & RQMASK(whichq)) == 0) {
    373 		printf("runqueue_check[%d]: bit clear for active lwp %p\n",
    374 		    whichq, l);
    375 		die = 1;
    376 	}
    377 	if (l != NULL && empty) {
    378 		printf("runqueue_check[%d]: empty run-queue %p with "
    379 		    "active lwp %p\n", whichq, rq, l);
    380 		die = 1;
    381 	}
    382 	if (l != NULL && !found) {
    383 		printf("runqueue_check[%d]: lwp %p not in runqueue %p!",
    384 		    whichq, l, rq);
    385 		die = 1;
    386 	}
    387 	if (die)
    388 		panic("runqueue_check: inconsistency found");
    389 }
    390 #else /* RQDEBUG */
    391 #define	runqueue_check(a, b, c)	/* nothing */
    392 #endif /* RQDEBUG */
    393 
    394 static void
    395 runqueue_init(runqueue_t *rq)
    396 {
    397 	int i;
    398 
    399 	for (i = 0; i < RUNQUE_NQS; i++)
    400 		TAILQ_INIT(&rq->rq_subqueues[i].sq_queue);
    401 }
    402 
    403 static void
    404 runqueue_enqueue(runqueue_t *rq, struct lwp *l)
    405 {
    406 	subqueue_t *sq;
    407 	const int whichq = WHICHQ(lwp_eprio(l));
    408 	const uint64_t rqmask = RQMASK(whichq);
    409 
    410 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    411 
    412 	runqueue_check(rq, whichq, NULL);
    413 	rq->rq_bitmap |= rqmask;
    414 	sq = &rq->rq_subqueues[whichq];
    415 	TAILQ_INSERT_TAIL(&sq->sq_queue, l, l_runq);
    416 	runqueue_check(rq, whichq, l);
    417 }
    418 
    419 static void
    420 runqueue_dequeue(runqueue_t *rq, struct lwp *l)
    421 {
    422 	subqueue_t *sq;
    423 	const int whichq = WHICHQ(lwp_eprio(l));
    424 	const uint64_t rqmask = RQMASK(whichq);
    425 
    426 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    427 
    428 	runqueue_check(rq, whichq, l);
    429 	KASSERT((rq->rq_bitmap & rqmask) != 0);
    430 	sq = &rq->rq_subqueues[whichq];
    431 	TAILQ_REMOVE(&sq->sq_queue, l, l_runq);
    432 	if (TAILQ_EMPTY(&sq->sq_queue))
    433 		rq->rq_bitmap &= ~rqmask;
    434 	runqueue_check(rq, whichq, NULL);
    435 }
    436 
    437 static struct lwp *
    438 runqueue_nextlwp(runqueue_t *rq)
    439 {
    440 	const uint64_t bitmap = rq->rq_bitmap;
    441 	int whichq;
    442 
    443 	if (bitmap == 0) {
    444 		return NULL;
    445 	}
    446 	whichq = ffs((uint32_t)bitmap) - 1;
    447 	if (whichq != -1)
    448 		return TAILQ_FIRST(&rq->rq_subqueues[whichq].sq_queue);
    449 	whichq = ffs((uint32_t)(bitmap >> 32)) - 1;
    450 	return TAILQ_FIRST(&rq->rq_subqueues[whichq + 32].sq_queue);
    451 }
    452 
    453 #if defined(DDB)
    454 static void
    455 runqueue_print(const runqueue_t *rq, void (*pr)(const char *, ...))
    456 {
    457 	const uint64_t bitmap = rq->rq_bitmap;
    458 	struct lwp *l;
    459 	int i, first;
    460 
    461 	for (i = 0; i < RUNQUE_NQS; i++) {
    462 		const subqueue_t *sq;
    463 		first = 1;
    464 		sq = &rq->rq_subqueues[i];
    465 		TAILQ_FOREACH(l, &sq->sq_queue, l_runq) {
    466 			if (first) {
    467 				(*pr)("%c%d",
    468 				    (bitmap & RQMASK(i)) ? ' ' : '!', i);
    469 				first = 0;
    470 			}
    471 			(*pr)("\t%d.%d (%s) pri=%d usrpri=%d\n",
    472 			    l->l_proc->p_pid,
    473 			    l->l_lid, l->l_proc->p_comm,
    474 			    (int)l->l_priority, (int)l->l_usrpri);
    475 		}
    476 	}
    477 }
    478 #endif /* defined(DDB) */
    479 
    480 /*
    481  * Initialize the (doubly-linked) run queues
    482  * to be empty.
    483  */
    484 void
    485 sched_rqinit()
    486 {
    487 
    488 	runqueue_init(&global_queue);
    489 	mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
    490 	/* Initialize the lock pointer for lwp0 */
    491 	lwp0.l_mutex = &curcpu()->ci_schedstate.spc_lwplock;
    492 }
    493 
    494 void
    495 sched_cpuattach(struct cpu_info *ci)
    496 {
    497 	runqueue_t *rq;
    498 
    499 	ci->ci_schedstate.spc_mutex = &sched_mutex;
    500 	rq = kmem_zalloc(sizeof(*rq), KM_NOSLEEP);
    501 	runqueue_init(rq);
    502 	ci->ci_schedstate.spc_sched_info = rq;
    503 }
    504 
    505 void
    506 sched_setup()
    507 {
    508 
    509 	rrticks = hz / 10;
    510 }
    511 
    512 void
    513 sched_setrunnable(struct lwp *l)
    514 {
    515 
    516  	if (l->l_slptime > 1)
    517  		updatepri(l);
    518 }
    519 
    520 bool
    521 sched_curcpu_runnable_p(void)
    522 {
    523 	struct schedstate_percpu *spc;
    524 	runqueue_t *rq;
    525 
    526 	spc = &curcpu()->ci_schedstate;
    527 	rq = spc->spc_sched_info;
    528 
    529 	if (__predict_true((spc->spc_flags & SPCF_OFFLINE) == 0))
    530 		return (global_queue.rq_bitmap | rq->rq_bitmap) != 0;
    531 	return rq->rq_bitmap != 0;
    532 }
    533 
    534 void
    535 sched_nice(struct proc *chgp, int n)
    536 {
    537 
    538 	chgp->p_nice = n;
    539 	(void)resetprocpriority(chgp);
    540 }
    541 
    542 /*
    543  * Compute the priority of a process when running in user mode.
    544  * Arrange to reschedule if the resulting priority is better
    545  * than that of the current process.
    546  */
    547 static void
    548 resetpriority(struct lwp *l)
    549 {
    550 	unsigned int newpriority;
    551 	struct proc *p = l->l_proc;
    552 
    553 	/* XXXSMP KASSERT(mutex_owned(&p->p_stmutex)); */
    554 	KASSERT(lwp_locked(l, NULL));
    555 
    556 	if ((l->l_flag & LW_SYSTEM) != 0)
    557 		return;
    558 
    559 	newpriority = PRI_KERNEL - 1 - (p->p_estcpu >> ESTCPU_SHIFT) -
    560 	    NICE_WEIGHT * (p->p_nice - NZERO);
    561 	newpriority = max(newpriority, 0);
    562 	lwp_changepri(l, newpriority);
    563 }
    564 
    565 /*
    566  * Recompute priority for all LWPs in a process.
    567  */
    568 static void
    569 resetprocpriority(struct proc *p)
    570 {
    571 	struct lwp *l;
    572 
    573 	KASSERT(mutex_owned(&p->p_stmutex));
    574 
    575 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    576 		lwp_lock(l);
    577 		resetpriority(l);
    578 		lwp_unlock(l);
    579 	}
    580 }
    581 
    582 /*
    583  * We adjust the priority of the current process.  The priority of a process
    584  * gets worse as it accumulates CPU time.  The CPU usage estimator (p_estcpu)
    585  * is increased here.  The formula for computing priorities (in kern_synch.c)
    586  * will compute a different value each time p_estcpu increases. This can
    587  * cause a switch, but unless the priority crosses a PPQ boundary the actual
    588  * queue will not change.  The CPU usage estimator ramps up quite quickly
    589  * when the process is running (linearly), and decays away exponentially, at
    590  * a rate which is proportionally slower when the system is busy.  The basic
    591  * principle is that the system will 90% forget that the process used a lot
    592  * of CPU time in 5 * loadav seconds.  This causes the system to favor
    593  * processes which haven't run much recently, and to round-robin among other
    594  * processes.
    595  */
    596 
    597 void
    598 sched_schedclock(struct lwp *l)
    599 {
    600 	struct proc *p = l->l_proc;
    601 
    602 	KASSERT(!CURCPU_IDLE_P());
    603 	mutex_spin_enter(&p->p_stmutex);
    604 	p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
    605 	lwp_lock(l);
    606 	resetpriority(l);
    607 	mutex_spin_exit(&p->p_stmutex);
    608 	if ((l->l_flag & LW_SYSTEM) == 0 && l->l_priority < PRI_KERNEL)
    609 		l->l_priority = l->l_usrpri;
    610 	lwp_unlock(l);
    611 }
    612 
    613 /*
    614  * sched_proc_fork:
    615  *
    616  *	Inherit the parent's scheduler history.
    617  */
    618 void
    619 sched_proc_fork(struct proc *parent, struct proc *child)
    620 {
    621 
    622 	KASSERT(mutex_owned(&parent->p_smutex));
    623 
    624 	child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
    625 	child->p_forktime = sched_pstats_ticks;
    626 }
    627 
    628 /*
    629  * sched_proc_exit:
    630  *
    631  *	Chargeback parents for the sins of their children.
    632  */
    633 void
    634 sched_proc_exit(struct proc *parent, struct proc *child)
    635 {
    636 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
    637 	fixpt_t estcpu;
    638 
    639 	/* XXX Only if parent != init?? */
    640 
    641 	mutex_spin_enter(&parent->p_stmutex);
    642 	estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
    643 	    sched_pstats_ticks - child->p_forktime);
    644 	if (child->p_estcpu > estcpu)
    645 		parent->p_estcpu =
    646 		    ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
    647 	mutex_spin_exit(&parent->p_stmutex);
    648 }
    649 
    650 void
    651 sched_enqueue(struct lwp *l, bool ctxswitch)
    652 {
    653 
    654 	if ((l->l_flag & LW_BOUND) != 0)
    655 		runqueue_enqueue(l->l_cpu->ci_schedstate.spc_sched_info, l);
    656 	else
    657 		runqueue_enqueue(&global_queue, l);
    658 }
    659 
    660 /*
    661  * XXXSMP When LWP dispatch (cpu_switch()) is changed to use sched_dequeue(),
    662  * drop of the effective priority level from kernel to user needs to be
    663  * moved here from userret().  The assignment in userret() is currently
    664  * done unlocked.
    665  */
    666 void
    667 sched_dequeue(struct lwp *l)
    668 {
    669 
    670 	if ((l->l_flag & LW_BOUND) != 0)
    671 		runqueue_dequeue(l->l_cpu->ci_schedstate.spc_sched_info, l);
    672 	else
    673 		runqueue_dequeue(&global_queue, l);
    674 }
    675 
    676 struct lwp *
    677 sched_nextlwp(void)
    678 {
    679 	struct schedstate_percpu *spc;
    680 	lwp_t *l1, *l2;
    681 
    682 	spc = &curcpu()->ci_schedstate;
    683 
    684 	/* For now, just pick the highest priority LWP. */
    685 	l1 = runqueue_nextlwp(spc->spc_sched_info);
    686 	if (__predict_false((spc->spc_flags & SPCF_OFFLINE) != 0))
    687 		return l1;
    688 	l2 = runqueue_nextlwp(&global_queue);
    689 
    690 	if (l1 == NULL)
    691 		return l2;
    692 	if (l2 == NULL)
    693 		return l1;
    694 	if (lwp_eprio(l2) > lwp_eprio(l1))
    695 		return l2;
    696 	else
    697 		return l1;
    698 }
    699 
    700 void
    701 sched_lwp_fork(struct lwp *l)
    702 {
    703 
    704 }
    705 
    706 void
    707 sched_lwp_exit(struct lwp *l)
    708 {
    709 
    710 }
    711 
    712 /*
    713  * sysctl setup.  XXX This should be split with kern_synch.c.
    714  */
    715 SYSCTL_SETUP(sysctl_sched_setup, "sysctl kern.sched subtree setup")
    716 {
    717 	const struct sysctlnode *node = NULL;
    718 
    719 	sysctl_createv(clog, 0, NULL, NULL,
    720 		CTLFLAG_PERMANENT,
    721 		CTLTYPE_NODE, "kern", NULL,
    722 		NULL, 0, NULL, 0,
    723 		CTL_KERN, CTL_EOL);
    724 	sysctl_createv(clog, 0, NULL, &node,
    725 		CTLFLAG_PERMANENT,
    726 		CTLTYPE_NODE, "sched",
    727 		SYSCTL_DESCR("Scheduler options"),
    728 		NULL, 0, NULL, 0,
    729 		CTL_KERN, CTL_CREATE, CTL_EOL);
    730 
    731 	KASSERT(node != NULL);
    732 
    733 	sysctl_createv(clog, 0, &node, NULL,
    734 		CTLFLAG_PERMANENT,
    735 		CTLTYPE_STRING, "name", NULL,
    736 		NULL, 0, __UNCONST("4.4BSD"), 0,
    737 		CTL_CREATE, CTL_EOL);
    738 	sysctl_createv(clog, 0, &node, NULL,
    739 		CTLFLAG_READWRITE,
    740 		CTLTYPE_INT, "timesoftints",
    741 		SYSCTL_DESCR("Track CPU time for soft interrupts"),
    742 		NULL, 0, &softint_timing, 0,
    743 		CTL_CREATE, CTL_EOL);
    744 }
    745 
    746 #if defined(DDB)
    747 void
    748 sched_print_runqueue(void (*pr)(const char *, ...))
    749 {
    750 
    751 	runqueue_print(&global_queue, pr);
    752 }
    753 #endif /* defined(DDB) */
    754