Home | History | Annotate | Line # | Download | only in kern
sched_4bsd.c revision 1.2
      1 /*	$NetBSD: sched_4bsd.c,v 1.2 2007/05/17 14:51:41 yamt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and
     10  * Daniel Sieger.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by the NetBSD
     23  *	Foundation, Inc. and its contributors.
     24  * 4. Neither the name of The NetBSD Foundation nor the names of its
     25  *    contributors may be used to endorse or promote products derived
     26  *    from this software without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     29  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     30  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     31  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     32  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     33  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     34  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     35  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     36  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     37  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     38  * POSSIBILITY OF SUCH DAMAGE.
     39  */
     40 
     41 /*-
     42  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     43  *	The Regents of the University of California.  All rights reserved.
     44  * (c) UNIX System Laboratories, Inc.
     45  * All or some portions of this file are derived from material licensed
     46  * to the University of California by American Telephone and Telegraph
     47  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     48  * the permission of UNIX System Laboratories, Inc.
     49  *
     50  * Redistribution and use in source and binary forms, with or without
     51  * modification, are permitted provided that the following conditions
     52  * are met:
     53  * 1. Redistributions of source code must retain the above copyright
     54  *    notice, this list of conditions and the following disclaimer.
     55  * 2. Redistributions in binary form must reproduce the above copyright
     56  *    notice, this list of conditions and the following disclaimer in the
     57  *    documentation and/or other materials provided with the distribution.
     58  * 3. Neither the name of the University nor the names of its contributors
     59  *    may be used to endorse or promote products derived from this software
     60  *    without specific prior written permission.
     61  *
     62  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     63  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     64  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     65  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     66  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     67  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     68  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     69  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     70  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     71  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     72  * SUCH DAMAGE.
     73  *
     74  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
     75  */
     76 
     77 #include <sys/cdefs.h>
     78 __KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.2 2007/05/17 14:51:41 yamt Exp $");
     79 
     80 #include "opt_ddb.h"
     81 #include "opt_lockdebug.h"
     82 #include "opt_perfctrs.h"
     83 
     84 #define	__MUTEX_PRIVATE
     85 
     86 #include <sys/param.h>
     87 #include <sys/systm.h>
     88 #include <sys/callout.h>
     89 #include <sys/cpu.h>
     90 #include <sys/proc.h>
     91 #include <sys/kernel.h>
     92 #include <sys/signalvar.h>
     93 #include <sys/resourcevar.h>
     94 #include <sys/sched.h>
     95 #include <sys/sysctl.h>
     96 #include <sys/kauth.h>
     97 #include <sys/lockdebug.h>
     98 #include <sys/kmem.h>
     99 
    100 #include <uvm/uvm_extern.h>
    101 
    102 /*
    103  * Run queues.
    104  *
    105  * We have 32 run queues in descending priority of 0..31.  We maintain
    106  * a bitmask of non-empty queues in order speed up finding the first
    107  * runnable process.  The bitmask is maintained only by machine-dependent
    108  * code, allowing the most efficient instructions to be used to find the
    109  * first non-empty queue.
    110  */
    111 
    112 #define	RUNQUE_NQS		32      /* number of runqueues */
    113 #define	PPQ	(128 / RUNQUE_NQS)	/* priorities per queue */
    114 
    115 typedef struct subqueue {
    116 	TAILQ_HEAD(, lwp) sq_queue;
    117 } subqueue_t;
    118 typedef struct runqueue {
    119 	subqueue_t rq_subqueues[RUNQUE_NQS];	/* run queues */
    120 	uint32_t rq_bitmap;	/* bitmap of non-empty queues */
    121 } runqueue_t;
    122 static runqueue_t global_queue;
    123 
    124 static void updatepri(struct lwp *);
    125 static void resetpriority(struct lwp *);
    126 static void resetprocpriority(struct proc *);
    127 
    128 extern unsigned int sched_pstats_ticks; /* defined in kern_synch.c */
    129 
    130 /* The global scheduler state */
    131 kmutex_t sched_mutex;
    132 
    133 /* Number of hardclock ticks per sched_tick() */
    134 int rrticks;
    135 
    136 /*
    137  * Force switch among equal priority processes every 100ms.
    138  * Called from hardclock every hz/10 == rrticks hardclock ticks.
    139  */
    140 /* ARGSUSED */
    141 void
    142 sched_tick(struct cpu_info *ci)
    143 {
    144 	struct schedstate_percpu *spc = &ci->ci_schedstate;
    145 
    146 	spc->spc_ticks = rrticks;
    147 
    148 	if (!CURCPU_IDLE_P()) {
    149 		if (spc->spc_flags & SPCF_SEENRR) {
    150 			/*
    151 			 * The process has already been through a roundrobin
    152 			 * without switching and may be hogging the CPU.
    153 			 * Indicate that the process should yield.
    154 			 */
    155 			spc->spc_flags |= SPCF_SHOULDYIELD;
    156 		} else
    157 			spc->spc_flags |= SPCF_SEENRR;
    158 	}
    159 	cpu_need_resched(curcpu(), 0);
    160 }
    161 
    162 #define	NICE_WEIGHT 2			/* priorities per nice level */
    163 
    164 #define	ESTCPU_SHIFT	11
    165 #define	ESTCPU_MAX	((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
    166 #define	ESTCPULIM(e)	min((e), ESTCPU_MAX)
    167 
    168 /*
    169  * Constants for digital decay and forget:
    170  *	90% of (p_estcpu) usage in 5 * loadav time
    171  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
    172  *          Note that, as ps(1) mentions, this can let percentages
    173  *          total over 100% (I've seen 137.9% for 3 processes).
    174  *
    175  * Note that hardclock updates p_estcpu and p_cpticks independently.
    176  *
    177  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
    178  * That is, the system wants to compute a value of decay such
    179  * that the following for loop:
    180  * 	for (i = 0; i < (5 * loadavg); i++)
    181  * 		p_estcpu *= decay;
    182  * will compute
    183  * 	p_estcpu *= 0.1;
    184  * for all values of loadavg:
    185  *
    186  * Mathematically this loop can be expressed by saying:
    187  * 	decay ** (5 * loadavg) ~= .1
    188  *
    189  * The system computes decay as:
    190  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
    191  *
    192  * We wish to prove that the system's computation of decay
    193  * will always fulfill the equation:
    194  * 	decay ** (5 * loadavg) ~= .1
    195  *
    196  * If we compute b as:
    197  * 	b = 2 * loadavg
    198  * then
    199  * 	decay = b / (b + 1)
    200  *
    201  * We now need to prove two things:
    202  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
    203  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
    204  *
    205  * Facts:
    206  *         For x close to zero, exp(x) =~ 1 + x, since
    207  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
    208  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
    209  *         For x close to zero, ln(1+x) =~ x, since
    210  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
    211  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
    212  *         ln(.1) =~ -2.30
    213  *
    214  * Proof of (1):
    215  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
    216  *	solving for factor,
    217  *      ln(factor) =~ (-2.30/5*loadav), or
    218  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
    219  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
    220  *
    221  * Proof of (2):
    222  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
    223  *	solving for power,
    224  *      power*ln(b/(b+1)) =~ -2.30, or
    225  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
    226  *
    227  * Actual power values for the implemented algorithm are as follows:
    228  *      loadav: 1       2       3       4
    229  *      power:  5.68    10.32   14.94   19.55
    230  */
    231 
    232 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
    233 #define	loadfactor(loadav)	(2 * (loadav))
    234 
    235 static fixpt_t
    236 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
    237 {
    238 
    239 	if (estcpu == 0) {
    240 		return 0;
    241 	}
    242 
    243 #if !defined(_LP64)
    244 	/* avoid 64bit arithmetics. */
    245 #define	FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
    246 	if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
    247 		return estcpu * loadfac / (loadfac + FSCALE);
    248 	}
    249 #endif /* !defined(_LP64) */
    250 
    251 	return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
    252 }
    253 
    254 /*
    255  * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
    256  * sleeping for at least seven times the loadfactor will decay p_estcpu to
    257  * less than (1 << ESTCPU_SHIFT).
    258  *
    259  * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
    260  */
    261 static fixpt_t
    262 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
    263 {
    264 
    265 	if ((n << FSHIFT) >= 7 * loadfac) {
    266 		return 0;
    267 	}
    268 
    269 	while (estcpu != 0 && n > 1) {
    270 		estcpu = decay_cpu(loadfac, estcpu);
    271 		n--;
    272 	}
    273 
    274 	return estcpu;
    275 }
    276 
    277 /*
    278  * sched_pstats_hook:
    279  *
    280  * Periodically called from sched_pstats(); used to recalculate priorities.
    281  */
    282 void
    283 sched_pstats_hook(struct proc *p, int minslp)
    284 {
    285 	struct lwp *l;
    286 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
    287 
    288 	/*
    289 	 * If the process has slept the entire second,
    290 	 * stop recalculating its priority until it wakes up.
    291 	 */
    292 	if (minslp <= 1) {
    293 		p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
    294 
    295 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    296 			if ((l->l_flag & LW_IDLE) != 0)
    297 				continue;
    298 			lwp_lock(l);
    299 			if (l->l_slptime <= 1 && l->l_priority >= PUSER)
    300 				resetpriority(l);
    301 			lwp_unlock(l);
    302 		}
    303 	}
    304 }
    305 
    306 /*
    307  * Recalculate the priority of a process after it has slept for a while.
    308  */
    309 static void
    310 updatepri(struct lwp *l)
    311 {
    312 	struct proc *p = l->l_proc;
    313 	fixpt_t loadfac;
    314 
    315 	LOCK_ASSERT(lwp_locked(l, NULL));
    316 	KASSERT(l->l_slptime > 1);
    317 
    318 	loadfac = loadfactor(averunnable.ldavg[0]);
    319 
    320 	l->l_slptime--; /* the first time was done in sched_pstats */
    321 	/* XXX NJWLWP */
    322 	/* XXXSMP occasionally unlocked, should be per-LWP */
    323 	p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
    324 	resetpriority(l);
    325 }
    326 
    327 /*
    328  * On some architectures, it's faster to use a MSB ordering for the priorites
    329  * than the traditional LSB ordering.
    330  */
    331 #define	RQMASK(n) (0x00000001 << (n))
    332 
    333 /*
    334  * The primitives that manipulate the run queues.  whichqs tells which
    335  * of the 32 queues qs have processes in them.  sched_enqueue() puts processes
    336  * into queues, sched_dequeue removes them from queues.  The running process is
    337  * on no queue, other processes are on a queue related to p->p_priority,
    338  * divided by 4 actually to shrink the 0-127 range of priorities into the 32
    339  * available queues.
    340  */
    341 #ifdef RQDEBUG
    342 static void
    343 runqueue_check(const runqueue_t *rq, int whichq, struct lwp *l)
    344 {
    345 	const subqueue_t * const sq = &rq->rq_subqueues[whichq];
    346 	const uint32_t bitmap = rq->rq_bitmap;
    347 	struct lwp *l2;
    348 	int found = 0;
    349 	int die = 0;
    350 	int empty = 1;
    351 
    352 	TAILQ_FOREACH(l2, &sq->sq_queue, l_runq) {
    353 		if (l2->l_stat != LSRUN) {
    354 			printf("runqueue_check[%d]: lwp %p state (%d) "
    355 			    " != LSRUN\n", whichq, l2, l2->l_stat);
    356 		}
    357 		if (l2 == l)
    358 			found = 1;
    359 		empty = 0;
    360 	}
    361 	if (empty && (bitmap & RQMASK(whichq)) != 0) {
    362 		printf("runqueue_check[%d]: bit set for empty run-queue %p\n",
    363 		    whichq, rq);
    364 		die = 1;
    365 	} else if (!empty && (bitmap & RQMASK(whichq)) == 0) {
    366 		printf("runqueue_check[%d]: bit clear for non-empty "
    367 		    "run-queue %p\n", whichq, rq);
    368 		die = 1;
    369 	}
    370 	if (l != NULL && (bitmap & RQMASK(whichq)) == 0) {
    371 		printf("runqueue_check[%d]: bit clear for active lwp %p\n",
    372 		    whichq, l);
    373 		die = 1;
    374 	}
    375 	if (l != NULL && empty) {
    376 		printf("runqueue_check[%d]: empty run-queue %p with "
    377 		    "active lwp %p\n", whichq, rq, l);
    378 		die = 1;
    379 	}
    380 	if (l != NULL && !found) {
    381 		printf("runqueue_check[%d]: lwp %p not in runqueue %p!",
    382 		    whichq, l, rq);
    383 		die = 1;
    384 	}
    385 	if (die)
    386 		panic("runqueue_check: inconsistency found");
    387 }
    388 #else /* RQDEBUG */
    389 #define	runqueue_check(a, b, c)	/* nothing */
    390 #endif /* RQDEBUG */
    391 
    392 static void
    393 runqueue_init(runqueue_t *rq)
    394 {
    395 	int i;
    396 
    397 	for (i = 0; i < RUNQUE_NQS; i++)
    398 		TAILQ_INIT(&rq->rq_subqueues[i].sq_queue);
    399 }
    400 
    401 static void
    402 runqueue_enqueue(runqueue_t *rq, struct lwp *l)
    403 {
    404 	subqueue_t *sq;
    405 	const int whichq = lwp_eprio(l) / PPQ;
    406 
    407 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    408 
    409 	runqueue_check(rq, whichq, NULL);
    410 	rq->rq_bitmap |= RQMASK(whichq);
    411 	sq = &rq->rq_subqueues[whichq];
    412 	TAILQ_INSERT_TAIL(&sq->sq_queue, l, l_runq);
    413 	runqueue_check(rq, whichq, l);
    414 }
    415 
    416 static void
    417 runqueue_dequeue(runqueue_t *rq, struct lwp *l)
    418 {
    419 	subqueue_t *sq;
    420 	const int whichq = lwp_eprio(l) / PPQ;
    421 
    422 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    423 
    424 	runqueue_check(rq, whichq, l);
    425 	KASSERT((rq->rq_bitmap & RQMASK(whichq)) != 0);
    426 	sq = &rq->rq_subqueues[whichq];
    427 	TAILQ_REMOVE(&sq->sq_queue, l, l_runq);
    428 	if (TAILQ_EMPTY(&sq->sq_queue))
    429 		rq->rq_bitmap &= ~RQMASK(whichq);
    430 	runqueue_check(rq, whichq, NULL);
    431 }
    432 
    433 static struct lwp *
    434 runqueue_nextlwp(runqueue_t *rq)
    435 {
    436 	const uint32_t bitmap = rq->rq_bitmap;
    437 	int whichq;
    438 
    439 	if (bitmap == 0) {
    440 		return NULL;
    441 	}
    442 	whichq = ffs(bitmap) - 1;
    443 	return TAILQ_FIRST(&rq->rq_subqueues[whichq].sq_queue);
    444 }
    445 
    446 #if defined(DDB)
    447 static void
    448 runqueue_print(const runqueue_t *rq, void (*pr)(const char *, ...))
    449 {
    450 	const uint32_t bitmap = rq->rq_bitmap;
    451 	struct lwp *l;
    452 	int i, first;
    453 
    454 	for (i = 0; i < RUNQUE_NQS; i++) {
    455 		const subqueue_t *sq;
    456 		first = 1;
    457 		sq = &rq->rq_subqueues[i];
    458 		TAILQ_FOREACH(l, &sq->sq_queue, l_runq) {
    459 			if (first) {
    460 				(*pr)("%c%d",
    461 				    (bitmap & RQMASK(i)) ? ' ' : '!', i);
    462 				first = 0;
    463 			}
    464 			(*pr)("\t%d.%d (%s) pri=%d usrpri=%d\n",
    465 			    l->l_proc->p_pid,
    466 			    l->l_lid, l->l_proc->p_comm,
    467 			    (int)l->l_priority, (int)l->l_usrpri);
    468 		}
    469 	}
    470 }
    471 #endif /* defined(DDB) */
    472 #undef RQMASK
    473 
    474 /*
    475  * Initialize the (doubly-linked) run queues
    476  * to be empty.
    477  */
    478 void
    479 sched_rqinit()
    480 {
    481 
    482 	runqueue_init(&global_queue);
    483 	mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
    484 	/* Initialize the lock pointer for lwp0 */
    485 	lwp0.l_mutex = &curcpu()->ci_schedstate.spc_lwplock;
    486 }
    487 
    488 void
    489 sched_cpuattach(struct cpu_info *ci)
    490 {
    491 	runqueue_t *rq;
    492 
    493 	ci->ci_schedstate.spc_mutex = &sched_mutex;
    494 	rq = kmem_zalloc(sizeof(*rq), KM_NOSLEEP);
    495 	runqueue_init(rq);
    496 	ci->ci_schedstate.spc_sched_info = rq;
    497 }
    498 
    499 void
    500 sched_setup()
    501 {
    502 
    503 	rrticks = hz / 10;
    504 	sched_pstats(NULL);
    505 }
    506 
    507 void
    508 sched_setrunnable(struct lwp *l)
    509 {
    510 
    511  	if (l->l_slptime > 1)
    512  		updatepri(l);
    513 }
    514 
    515 bool
    516 sched_curcpu_runnable_p(void)
    517 {
    518 	runqueue_t *rq = curcpu()->ci_schedstate.spc_sched_info;
    519 
    520 	return (global_queue.rq_bitmap | rq->rq_bitmap) != 0;
    521 }
    522 
    523 void
    524 sched_nice(struct proc *chgp, int n)
    525 {
    526 
    527 	chgp->p_nice = n;
    528 	(void)resetprocpriority(chgp);
    529 }
    530 
    531 /*
    532  * Compute the priority of a process when running in user mode.
    533  * Arrange to reschedule if the resulting priority is better
    534  * than that of the current process.
    535  */
    536 static void
    537 resetpriority(struct lwp *l)
    538 {
    539 	unsigned int newpriority;
    540 	struct proc *p = l->l_proc;
    541 
    542 	/* XXXSMP LOCK_ASSERT(mutex_owned(&p->p_stmutex)); */
    543 	LOCK_ASSERT(lwp_locked(l, NULL));
    544 
    545 	if ((l->l_flag & LW_SYSTEM) != 0)
    546 		return;
    547 
    548 	newpriority = PUSER + (p->p_estcpu >> ESTCPU_SHIFT) +
    549 	    NICE_WEIGHT * (p->p_nice - NZERO);
    550 	newpriority = min(newpriority, MAXPRI);
    551 	lwp_changepri(l, newpriority);
    552 }
    553 
    554 /*
    555  * Recompute priority for all LWPs in a process.
    556  */
    557 static void
    558 resetprocpriority(struct proc *p)
    559 {
    560 	struct lwp *l;
    561 
    562 	LOCK_ASSERT(mutex_owned(&p->p_stmutex));
    563 
    564 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    565 		lwp_lock(l);
    566 		resetpriority(l);
    567 		lwp_unlock(l);
    568 	}
    569 }
    570 
    571 /*
    572  * We adjust the priority of the current process.  The priority of a process
    573  * gets worse as it accumulates CPU time.  The CPU usage estimator (p_estcpu)
    574  * is increased here.  The formula for computing priorities (in kern_synch.c)
    575  * will compute a different value each time p_estcpu increases. This can
    576  * cause a switch, but unless the priority crosses a PPQ boundary the actual
    577  * queue will not change.  The CPU usage estimator ramps up quite quickly
    578  * when the process is running (linearly), and decays away exponentially, at
    579  * a rate which is proportionally slower when the system is busy.  The basic
    580  * principle is that the system will 90% forget that the process used a lot
    581  * of CPU time in 5 * loadav seconds.  This causes the system to favor
    582  * processes which haven't run much recently, and to round-robin among other
    583  * processes.
    584  */
    585 
    586 void
    587 sched_schedclock(struct lwp *l)
    588 {
    589 	struct proc *p = l->l_proc;
    590 
    591 	KASSERT(!CURCPU_IDLE_P());
    592 	mutex_spin_enter(&p->p_stmutex);
    593 	p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
    594 	lwp_lock(l);
    595 	resetpriority(l);
    596 	mutex_spin_exit(&p->p_stmutex);
    597 	if ((l->l_flag & LW_SYSTEM) == 0 && l->l_priority >= PUSER)
    598 		l->l_priority = l->l_usrpri;
    599 	lwp_unlock(l);
    600 }
    601 
    602 /*
    603  * sched_proc_fork:
    604  *
    605  *	Inherit the parent's scheduler history.
    606  */
    607 void
    608 sched_proc_fork(struct proc *parent, struct proc *child)
    609 {
    610 
    611 	LOCK_ASSERT(mutex_owned(&parent->p_smutex));
    612 
    613 	child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
    614 	child->p_forktime = sched_pstats_ticks;
    615 }
    616 
    617 /*
    618  * sched_proc_exit:
    619  *
    620  *	Chargeback parents for the sins of their children.
    621  */
    622 void
    623 sched_proc_exit(struct proc *parent, struct proc *child)
    624 {
    625 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
    626 	fixpt_t estcpu;
    627 
    628 	/* XXX Only if parent != init?? */
    629 
    630 	mutex_spin_enter(&parent->p_stmutex);
    631 	estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
    632 	    sched_pstats_ticks - child->p_forktime);
    633 	if (child->p_estcpu > estcpu)
    634 		parent->p_estcpu =
    635 		    ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
    636 	mutex_spin_exit(&parent->p_stmutex);
    637 }
    638 
    639 void
    640 sched_enqueue(struct lwp *l, bool ctxswitch)
    641 {
    642 
    643 	if ((l->l_flag & LW_BOUND) != 0)
    644 		runqueue_enqueue(l->l_cpu->ci_schedstate.spc_sched_info, l);
    645 	else
    646 		runqueue_enqueue(&global_queue, l);
    647 }
    648 
    649 /*
    650  * XXXSMP When LWP dispatch (cpu_switch()) is changed to use sched_dequeue(),
    651  * drop of the effective priority level from kernel to user needs to be
    652  * moved here from userret().  The assignment in userret() is currently
    653  * done unlocked.
    654  */
    655 void
    656 sched_dequeue(struct lwp *l)
    657 {
    658 
    659 	if ((l->l_flag & LW_BOUND) != 0)
    660 		runqueue_dequeue(l->l_cpu->ci_schedstate.spc_sched_info, l);
    661 	else
    662 		runqueue_dequeue(&global_queue, l);
    663 }
    664 
    665 struct lwp *
    666 sched_nextlwp(void)
    667 {
    668 	lwp_t *l1, *l2;
    669 
    670 	/* For now, just pick the highest priority LWP. */
    671 	l1 = runqueue_nextlwp(curcpu()->ci_schedstate.spc_sched_info);
    672 	l2 = runqueue_nextlwp(&global_queue);
    673 
    674 	if (l1 == NULL)
    675 		return l2;
    676 	if (l2 == NULL)
    677 		return l1;
    678 	if (lwp_eprio(l2) < lwp_eprio(l1))
    679 		return l2;
    680 	else
    681 		return l1;
    682 }
    683 
    684 /* Dummy */
    685 void
    686 sched_lwp_fork(struct lwp *l)
    687 {
    688 
    689 }
    690 
    691 void
    692 sched_lwp_exit(struct lwp *l)
    693 {
    694 
    695 }
    696 
    697 /* SysCtl */
    698 
    699 SYSCTL_SETUP(sysctl_sched_setup, "sysctl kern.sched subtree setup")
    700 {
    701 	const struct sysctlnode *node = NULL;
    702 
    703 	sysctl_createv(clog, 0, NULL, NULL,
    704 		CTLFLAG_PERMANENT,
    705 		CTLTYPE_NODE, "kern", NULL,
    706 		NULL, 0, NULL, 0,
    707 		CTL_KERN, CTL_EOL);
    708 	sysctl_createv(clog, 0, NULL, &node,
    709 		CTLFLAG_PERMANENT,
    710 		CTLTYPE_NODE, "sched",
    711 		SYSCTL_DESCR("Scheduler options"),
    712 		NULL, 0, NULL, 0,
    713 		CTL_KERN, CTL_CREATE, CTL_EOL);
    714 
    715 	if (node != NULL) {
    716 		sysctl_createv(clog, 0, &node, NULL,
    717 			CTLFLAG_PERMANENT,
    718 			CTLTYPE_STRING, "name", NULL,
    719 			NULL, 0, __UNCONST("4.4BSD"), 0,
    720 			CTL_CREATE, CTL_EOL);
    721 	}
    722 }
    723 
    724 #if defined(DDB)
    725 void
    726 sched_print_runqueue(void (*pr)(const char *, ...))
    727 {
    728 
    729 	runqueue_print(&global_queue, pr);
    730 }
    731 #endif /* defined(DDB) */
    732