Home | History | Annotate | Line # | Download | only in kern
sched_4bsd.c revision 1.1.6.15
      1 /*	$NetBSD: sched_4bsd.c,v 1.1.6.15 2007/11/05 16:51:52 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and
     10  * Daniel Sieger.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by the NetBSD
     23  *	Foundation, Inc. and its contributors.
     24  * 4. Neither the name of The NetBSD Foundation nor the names of its
     25  *    contributors may be used to endorse or promote products derived
     26  *    from this software without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     29  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     30  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     31  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     32  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     33  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     34  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     35  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     36  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     37  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     38  * POSSIBILITY OF SUCH DAMAGE.
     39  */
     40 
     41 /*-
     42  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     43  *	The Regents of the University of California.  All rights reserved.
     44  * (c) UNIX System Laboratories, Inc.
     45  * All or some portions of this file are derived from material licensed
     46  * to the University of California by American Telephone and Telegraph
     47  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     48  * the permission of UNIX System Laboratories, Inc.
     49  *
     50  * Redistribution and use in source and binary forms, with or without
     51  * modification, are permitted provided that the following conditions
     52  * are met:
     53  * 1. Redistributions of source code must retain the above copyright
     54  *    notice, this list of conditions and the following disclaimer.
     55  * 2. Redistributions in binary form must reproduce the above copyright
     56  *    notice, this list of conditions and the following disclaimer in the
     57  *    documentation and/or other materials provided with the distribution.
     58  * 3. Neither the name of the University nor the names of its contributors
     59  *    may be used to endorse or promote products derived from this software
     60  *    without specific prior written permission.
     61  *
     62  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     63  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     64  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     65  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     66  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     67  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     68  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     69  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     70  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     71  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     72  * SUCH DAMAGE.
     73  *
     74  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
     75  */
     76 
     77 #include <sys/cdefs.h>
     78 __KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.1.6.15 2007/11/05 16:51:52 ad Exp $");
     79 
     80 #include "opt_ddb.h"
     81 #include "opt_lockdebug.h"
     82 #include "opt_perfctrs.h"
     83 
     84 #define	__MUTEX_PRIVATE
     85 
     86 #include <sys/param.h>
     87 #include <sys/systm.h>
     88 #include <sys/callout.h>
     89 #include <sys/cpu.h>
     90 #include <sys/proc.h>
     91 #include <sys/kernel.h>
     92 #include <sys/signalvar.h>
     93 #include <sys/resourcevar.h>
     94 #include <sys/sched.h>
     95 #include <sys/sysctl.h>
     96 #include <sys/kauth.h>
     97 #include <sys/lockdebug.h>
     98 #include <sys/kmem.h>
     99 #include <sys/intr.h>
    100 
    101 #include <uvm/uvm_extern.h>
    102 
    103 /*
    104  * Run queues.
    105  *
    106  * We maintain bitmasks of non-empty queues in order speed up finding
    107  * the first runnable process.  Since there can be (by definition) few
    108  * real time LWPs in the the system, we maintain them on a linked list,
    109  * sorted by priority.
    110  */
    111 
    112 #define	PPB_SHIFT	5
    113 #define	PPB_MASK	31
    114 
    115 #define	NUM_Q		(NPRI_KERNEL + NPRI_USER)
    116 #define	NUM_PPB		(1 << PPB_SHIFT)
    117 #define	NUM_B		(NUM_Q / NUM_PPB)
    118 
    119 typedef struct runqueue {
    120 	TAILQ_HEAD(, lwp) rq_fixedpri;		/* realtime, kthread */
    121 	u_int		rq_count;		/* total # jobs */
    122 	uint32_t	rq_bitmap[NUM_B];	/* bitmap of queues */
    123 	TAILQ_HEAD(, lwp) rq_queue[NUM_Q];	/* user+kernel */
    124 } runqueue_t;
    125 
    126 static runqueue_t global_queue;
    127 
    128 static void updatepri(struct lwp *);
    129 static void resetpriority(struct lwp *);
    130 
    131 fixpt_t decay_cpu(fixpt_t, fixpt_t);
    132 
    133 extern unsigned int sched_pstats_ticks; /* defined in kern_synch.c */
    134 
    135 /* The global scheduler state */
    136 kmutex_t sched_mutex;
    137 
    138 /* Number of hardclock ticks per sched_tick() */
    139 int rrticks;
    140 
    141 const int schedppq = 1;
    142 
    143 /*
    144  * Force switch among equal priority processes every 100ms.
    145  * Called from hardclock every hz/10 == rrticks hardclock ticks.
    146  *
    147  * There's no need to lock anywhere in this routine, as it's
    148  * CPU-local and runs at IPL_SCHED (called from clock interrupt).
    149  */
    150 /* ARGSUSED */
    151 void
    152 sched_tick(struct cpu_info *ci)
    153 {
    154 	struct schedstate_percpu *spc = &ci->ci_schedstate;
    155 
    156 	spc->spc_ticks = rrticks;
    157 
    158 	if (CURCPU_IDLE_P())
    159 		return;
    160 
    161 	if (spc->spc_flags & SPCF_SEENRR) {
    162 		/*
    163 		 * The process has already been through a roundrobin
    164 		 * without switching and may be hogging the CPU.
    165 		 * Indicate that the process should yield.
    166 		 */
    167 		spc->spc_flags |= SPCF_SHOULDYIELD;
    168 	} else
    169 		spc->spc_flags |= SPCF_SEENRR;
    170 
    171 	cpu_need_resched(ci, 0);
    172 }
    173 
    174 /*
    175  * Why PRIO_MAX - 2? From setpriority(2):
    176  *
    177  *	prio is a value in the range -20 to 20.  The default priority is
    178  *	0; lower priorities cause more favorable scheduling.  A value of
    179  *	19 or 20 will schedule a process only when nothing at priority <=
    180  *	0 is runnable.
    181  *
    182  * This gives estcpu influence over 18 priority levels, and leaves nice
    183  * with 40 levels.  One way to think about it is that nice has 20 levels
    184  * either side of estcpu's 18.
    185  */
    186 #define	ESTCPU_SHIFT	11
    187 #define	ESTCPU_MAX	((PRIO_MAX - 2) << ESTCPU_SHIFT)
    188 #define	ESTCPU_ACCUM	(1 << (ESTCPU_SHIFT - 1))
    189 #define	ESTCPULIM(e)	min((e), ESTCPU_MAX)
    190 
    191 /*
    192  * Constants for digital decay and forget:
    193  *	90% of (l_estcpu) usage in 5 * loadav time
    194  *	95% of (l_pctcpu) usage in 60 seconds (load insensitive)
    195  *          Note that, as ps(1) mentions, this can let percentages
    196  *          total over 100% (I've seen 137.9% for 3 processes).
    197  *
    198  * Note that hardclock updates l_estcpu and l_cpticks independently.
    199  *
    200  * We wish to decay away 90% of l_estcpu in (5 * loadavg) seconds.
    201  * That is, the system wants to compute a value of decay such
    202  * that the following for loop:
    203  * 	for (i = 0; i < (5 * loadavg); i++)
    204  * 		l_estcpu *= decay;
    205  * will compute
    206  * 	l_estcpu *= 0.1;
    207  * for all values of loadavg:
    208  *
    209  * Mathematically this loop can be expressed by saying:
    210  * 	decay ** (5 * loadavg) ~= .1
    211  *
    212  * The system computes decay as:
    213  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
    214  *
    215  * We wish to prove that the system's computation of decay
    216  * will always fulfill the equation:
    217  * 	decay ** (5 * loadavg) ~= .1
    218  *
    219  * If we compute b as:
    220  * 	b = 2 * loadavg
    221  * then
    222  * 	decay = b / (b + 1)
    223  *
    224  * We now need to prove two things:
    225  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
    226  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
    227  *
    228  * Facts:
    229  *         For x close to zero, exp(x) =~ 1 + x, since
    230  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
    231  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
    232  *         For x close to zero, ln(1+x) =~ x, since
    233  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
    234  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
    235  *         ln(.1) =~ -2.30
    236  *
    237  * Proof of (1):
    238  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
    239  *	solving for factor,
    240  *      ln(factor) =~ (-2.30/5*loadav), or
    241  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
    242  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
    243  *
    244  * Proof of (2):
    245  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
    246  *	solving for power,
    247  *      power*ln(b/(b+1)) =~ -2.30, or
    248  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
    249  *
    250  * Actual power values for the implemented algorithm are as follows:
    251  *      loadav: 1       2       3       4
    252  *      power:  5.68    10.32   14.94   19.55
    253  */
    254 
    255 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
    256 #define	loadfactor(loadav)	(2 * (loadav))
    257 
    258 fixpt_t
    259 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
    260 {
    261 
    262 	if (estcpu == 0) {
    263 		return 0;
    264 	}
    265 
    266 #if !defined(_LP64)
    267 	/* avoid 64bit arithmetics. */
    268 #define	FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
    269 	if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
    270 		return estcpu * loadfac / (loadfac + FSCALE);
    271 	}
    272 #endif /* !defined(_LP64) */
    273 
    274 	return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
    275 }
    276 
    277 /*
    278  * For all load averages >= 1 and max l_estcpu of (255 << ESTCPU_SHIFT),
    279  * sleeping for at least seven times the loadfactor will decay l_estcpu to
    280  * less than (1 << ESTCPU_SHIFT).
    281  *
    282  * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
    283  */
    284 static fixpt_t
    285 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
    286 {
    287 
    288 	if ((n << FSHIFT) >= 7 * loadfac) {
    289 		return 0;
    290 	}
    291 
    292 	while (estcpu != 0 && n > 1) {
    293 		estcpu = decay_cpu(loadfac, estcpu);
    294 		n--;
    295 	}
    296 
    297 	return estcpu;
    298 }
    299 
    300 /*
    301  * sched_pstats_hook:
    302  *
    303  * Periodically called from sched_pstats(); used to recalculate priorities.
    304  */
    305 void
    306 sched_pstats_hook(struct lwp *l)
    307 {
    308 	fixpt_t loadfac;
    309 
    310 	/*
    311 	 * If the LWP has slept an entire second, stop recalculating
    312 	 * its priority until it wakes up.
    313 	 */
    314 	if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
    315 	    l->l_stat == LSSUSPENDED) {
    316 		l->l_slptime++;
    317 		if (l->l_slptime <= 1) {
    318 			loadfac = 2 * (averunnable.ldavg[0]);
    319 			l->l_estcpu = decay_cpu(loadfac, l->l_estcpu);
    320 		}
    321 	}
    322 	if (l->l_slptime <= 1)
    323 		resetpriority(l);
    324 }
    325 
    326 /*
    327  * Recalculate the priority of a process after it has slept for a while.
    328  */
    329 static void
    330 updatepri(struct lwp *l)
    331 {
    332 	fixpt_t loadfac;
    333 
    334 	KASSERT(lwp_locked(l, NULL));
    335 	KASSERT(l->l_slptime > 1);
    336 
    337 	loadfac = loadfactor(averunnable.ldavg[0]);
    338 
    339 	l->l_slptime--; /* the first time was done in sched_pstats */
    340 	l->l_estcpu = decay_cpu_batch(loadfac, l->l_estcpu, l->l_slptime);
    341 	resetpriority(l);
    342 }
    343 
    344 static void
    345 runqueue_init(runqueue_t *rq)
    346 {
    347 	int i;
    348 
    349 	for (i = 0; i < NUM_Q; i++)
    350 		TAILQ_INIT(&rq->rq_queue[i]);
    351 	for (i = 0; i < NUM_B; i++)
    352 		rq->rq_bitmap[i] = 0;
    353 	TAILQ_INIT(&rq->rq_fixedpri);
    354 	rq->rq_count = 0;
    355 }
    356 
    357 static void
    358 runqueue_enqueue(runqueue_t *rq, struct lwp *l)
    359 {
    360 	pri_t pri;
    361 	lwp_t *l2;
    362 
    363 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    364 
    365 	pri = lwp_eprio(l);
    366 	rq->rq_count++;
    367 
    368 	if (pri >= PRI_KTHREAD) {
    369 		TAILQ_FOREACH(l2, &rq->rq_fixedpri, l_runq) {
    370 			if (lwp_eprio(l2) < pri) {
    371 				TAILQ_INSERT_BEFORE(l2, l, l_runq);
    372 				return;
    373 			}
    374 		}
    375 		TAILQ_INSERT_TAIL(&rq->rq_fixedpri, l, l_runq);
    376 		return;
    377 	}
    378 
    379 	rq->rq_bitmap[pri >> PPB_SHIFT] |=
    380 	    (0x80000000U >> (pri & PPB_MASK));
    381 	TAILQ_INSERT_TAIL(&rq->rq_queue[pri], l, l_runq);
    382 }
    383 
    384 static void
    385 runqueue_dequeue(runqueue_t *rq, struct lwp *l)
    386 {
    387 	pri_t pri;
    388 
    389 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    390 
    391 	pri = lwp_eprio(l);
    392 	rq->rq_count--;
    393 
    394 	if (pri >= PRI_KTHREAD) {
    395 		TAILQ_REMOVE(&rq->rq_fixedpri, l, l_runq);
    396 		return;
    397 	}
    398 
    399 	TAILQ_REMOVE(&rq->rq_queue[pri], l, l_runq);
    400 	if (TAILQ_EMPTY(&rq->rq_queue[pri]))
    401 		rq->rq_bitmap[pri >> PPB_SHIFT] ^=
    402 		    (0x80000000U >> (pri & PPB_MASK));
    403 }
    404 
    405 #if (NUM_B != 3) || (NUM_Q != 96)
    406 #error adjust runqueue_nextlwp
    407 #endif
    408 
    409 static struct lwp *
    410 runqueue_nextlwp(runqueue_t *rq)
    411 {
    412 	pri_t pri;
    413 
    414 	KASSERT(rq->rq_count != 0);
    415 
    416 	if (!TAILQ_EMPTY(&rq->rq_fixedpri))
    417 		return TAILQ_FIRST(&rq->rq_fixedpri);
    418 
    419 	if (rq->rq_bitmap[2] != 0)
    420 		pri = 96 - ffs(rq->rq_bitmap[2]);
    421 	else if (rq->rq_bitmap[1] != 0)
    422 		pri = 64 - ffs(rq->rq_bitmap[1]);
    423 	else
    424 		pri = 32 - ffs(rq->rq_bitmap[0]);
    425 	return TAILQ_FIRST(&rq->rq_queue[pri]);
    426 }
    427 
    428 #if defined(DDB)
    429 static void
    430 runqueue_print(const runqueue_t *rq, void (*pr)(const char *, ...))
    431 {
    432 	CPU_INFO_ITERATOR cii;
    433 	struct cpu_info *ci;
    434 	lwp_t *l;
    435 	int i;
    436 
    437 	printf("PID\tLID\tPRI\tIPRI\tEPRI\tLWP\t\t NAME\n");
    438 
    439 	TAILQ_FOREACH(l, &rq->rq_fixedpri, l_runq) {
    440 		(*pr)("%d\t%d\%d\t%d\t%d\t%016lx %s\n",
    441 		    l->l_proc->p_pid, l->l_lid, (int)l->l_priority,
    442 		    (int)l->l_inheritedprio, lwp_eprio(l),
    443 		    (long)l, l->l_proc->p_comm);
    444 	}
    445 
    446 	for (i = NUM_Q - 1; i >= 0; i--) {
    447 		TAILQ_FOREACH(l, &rq->rq_queue[i], l_runq) {
    448 			(*pr)("%d\t%d\t%d\t%d\t%d\t%016lx %s\n",
    449 			    l->l_proc->p_pid, l->l_lid, (int)l->l_priority,
    450 			    (int)l->l_inheritedprio, lwp_eprio(l),
    451 			    (long)l, l->l_proc->p_comm);
    452 		}
    453 	}
    454 
    455 	printf("CPUIDX\tRESCHED\tCURPRI\tFLAGS\n");
    456 	for (CPU_INFO_FOREACH(cii, ci)) {
    457 		printf("%d\t%d\t%d\t%04x\n", (int)ci->ci_index,
    458 		    (int)ci->ci_want_resched,
    459 		    (int)ci->ci_schedstate.spc_curpriority,
    460 		    (int)ci->ci_schedstate.spc_flags);
    461 	}
    462 
    463 	printf("NEXTLWP\n%016lx\n", (long)sched_nextlwp());
    464 }
    465 #endif /* defined(DDB) */
    466 
    467 /*
    468  * Initialize the (doubly-linked) run queues
    469  * to be empty.
    470  */
    471 void
    472 sched_rqinit()
    473 {
    474 
    475 	runqueue_init(&global_queue);
    476 	mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
    477 	/* Initialize the lock pointer for lwp0 */
    478 	lwp0.l_mutex = &curcpu()->ci_schedstate.spc_lwplock;
    479 }
    480 
    481 void
    482 sched_cpuattach(struct cpu_info *ci)
    483 {
    484 	runqueue_t *rq;
    485 
    486 	ci->ci_schedstate.spc_mutex = &sched_mutex;
    487 	rq = kmem_zalloc(sizeof(*rq), KM_NOSLEEP);
    488 	runqueue_init(rq);
    489 	ci->ci_schedstate.spc_sched_info = rq;
    490 }
    491 
    492 void
    493 sched_setup()
    494 {
    495 
    496 	rrticks = hz / 10;
    497 }
    498 
    499 void
    500 sched_setrunnable(struct lwp *l)
    501 {
    502 
    503  	if (l->l_slptime > 1)
    504  		updatepri(l);
    505 }
    506 
    507 bool
    508 sched_curcpu_runnable_p(void)
    509 {
    510 	struct schedstate_percpu *spc;
    511 	struct cpu_info *ci;
    512 	int bits;
    513 
    514 	ci = curcpu();
    515 	spc = &ci->ci_schedstate;
    516 #ifndef __HAVE_FAST_SOFTINTS
    517 	bits = ci->ci_data.cpu_softints;
    518 	bits |= ((runqueue_t *)spc->spc_sched_info)->rq_count;
    519 #else
    520 	bits = ((runqueue_t *)spc->spc_sched_info)->rq_count;
    521 #endif
    522 	if (__predict_true((spc->spc_flags & SPCF_OFFLINE) == 0))
    523 		bits |= global_queue.rq_count;
    524 	return bits != 0;
    525 }
    526 
    527 void
    528 sched_nice(struct proc *p, int n)
    529 {
    530 	struct lwp *l;
    531 
    532 	KASSERT(mutex_owned(&p->p_smutex));
    533 
    534 	p->p_nice = n;
    535 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    536 		lwp_lock(l);
    537 		resetpriority(l);
    538 		lwp_unlock(l);
    539 	}
    540 }
    541 
    542 /*
    543  * Recompute the priority of an LWP.  Arrange to reschedule if
    544  * the resulting priority is better than that of the current LWP.
    545  */
    546 static void
    547 resetpriority(struct lwp *l)
    548 {
    549 	pri_t pri;
    550 	struct proc *p = l->l_proc;
    551 
    552 	KASSERT(lwp_locked(l, NULL));
    553 
    554 	if (l->l_class != SCHED_OTHER)
    555 		return;
    556 
    557 	/* See comments above ESTCPU_SHIFT definition. */
    558 	pri = (PRI_KERNEL - 1) - (l->l_estcpu >> ESTCPU_SHIFT) - p->p_nice;
    559 	pri = imax(pri, 0);
    560 	if (pri != l->l_priority)
    561 		lwp_changepri(l, pri);
    562 }
    563 
    564 /*
    565  * We adjust the priority of the current process.  The priority of a process
    566  * gets worse as it accumulates CPU time.  The CPU usage estimator (l_estcpu)
    567  * is increased here.  The formula for computing priorities (in kern_synch.c)
    568  * will compute a different value each time l_estcpu increases. This can
    569  * cause a switch, but unless the priority crosses a PPQ boundary the actual
    570  * queue will not change.  The CPU usage estimator ramps up quite quickly
    571  * when the process is running (linearly), and decays away exponentially, at
    572  * a rate which is proportionally slower when the system is busy.  The basic
    573  * principle is that the system will 90% forget that the process used a lot
    574  * of CPU time in 5 * loadav seconds.  This causes the system to favor
    575  * processes which haven't run much recently, and to round-robin among other
    576  * processes.
    577  */
    578 
    579 void
    580 sched_schedclock(struct lwp *l)
    581 {
    582 
    583 	if (l->l_class != SCHED_OTHER)
    584 		return;
    585 
    586 	KASSERT(!CURCPU_IDLE_P());
    587 	l->l_estcpu = ESTCPULIM(l->l_estcpu + ESTCPU_ACCUM);
    588 	lwp_lock(l);
    589 	resetpriority(l);
    590 	lwp_unlock(l);
    591 }
    592 
    593 /*
    594  * sched_proc_fork:
    595  *
    596  *	Inherit the parent's scheduler history.
    597  */
    598 void
    599 sched_proc_fork(struct proc *parent, struct proc *child)
    600 {
    601 	lwp_t *pl;
    602 
    603 	KASSERT(mutex_owned(&parent->p_smutex));
    604 
    605 	pl = LIST_FIRST(&parent->p_lwps);
    606 	child->p_estcpu_inherited = pl->l_estcpu;
    607 	child->p_forktime = sched_pstats_ticks;
    608 }
    609 
    610 /*
    611  * sched_proc_exit:
    612  *
    613  *	Chargeback parents for the sins of their children.
    614  */
    615 void
    616 sched_proc_exit(struct proc *parent, struct proc *child)
    617 {
    618 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
    619 	fixpt_t estcpu;
    620 	lwp_t *pl, *cl;
    621 
    622 	/* XXX Only if parent != init?? */
    623 
    624 	mutex_enter(&parent->p_smutex);
    625 	pl = LIST_FIRST(&parent->p_lwps);
    626 	cl = LIST_FIRST(&child->p_lwps);
    627 	estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
    628 	    sched_pstats_ticks - child->p_forktime);
    629 	if (cl->l_estcpu > estcpu) {
    630 		lwp_lock(pl);
    631 		pl->l_estcpu = ESTCPULIM(pl->l_estcpu + cl->l_estcpu - estcpu);
    632 		lwp_unlock(pl);
    633 	}
    634 	mutex_exit(&parent->p_smutex);
    635 }
    636 
    637 void
    638 sched_enqueue(struct lwp *l, bool ctxswitch)
    639 {
    640 
    641 	if ((l->l_flag & LW_BOUND) != 0)
    642 		runqueue_enqueue(l->l_cpu->ci_schedstate.spc_sched_info, l);
    643 	else
    644 		runqueue_enqueue(&global_queue, l);
    645 }
    646 
    647 /*
    648  * XXXSMP When LWP dispatch (cpu_switch()) is changed to use sched_dequeue(),
    649  * drop of the effective priority level from kernel to user needs to be
    650  * moved here from userret().  The assignment in userret() is currently
    651  * done unlocked.
    652  */
    653 void
    654 sched_dequeue(struct lwp *l)
    655 {
    656 
    657 	if ((l->l_flag & LW_BOUND) != 0)
    658 		runqueue_dequeue(l->l_cpu->ci_schedstate.spc_sched_info, l);
    659 	else
    660 		runqueue_dequeue(&global_queue, l);
    661 }
    662 
    663 struct lwp *
    664 sched_nextlwp(void)
    665 {
    666 	struct schedstate_percpu *spc;
    667 	runqueue_t *rq;
    668 	lwp_t *l1, *l2;
    669 
    670 	spc = &curcpu()->ci_schedstate;
    671 
    672 	/* For now, just pick the highest priority LWP. */
    673 	rq = spc->spc_sched_info;
    674 	l1 = NULL;
    675 	if (rq->rq_count != 0)
    676 		l1 = runqueue_nextlwp(rq);
    677 
    678 	rq = &global_queue;
    679 	if (__predict_false((spc->spc_flags & SPCF_OFFLINE) != 0) ||
    680 	    rq->rq_count == 0)
    681 		return l1;
    682 	l2 = runqueue_nextlwp(rq);
    683 
    684 	if (l1 == NULL)
    685 		return l2;
    686 	if (l2 == NULL)
    687 		return l1;
    688 	if (lwp_eprio(l2) > lwp_eprio(l1))
    689 		return l2;
    690 	else
    691 		return l1;
    692 }
    693 
    694 struct cpu_info *
    695 sched_takecpu(struct lwp *l)
    696 {
    697 
    698 	return l->l_cpu;
    699 }
    700 
    701 void
    702 sched_wakeup(struct lwp *l)
    703 {
    704 
    705 }
    706 
    707 void
    708 sched_slept(struct lwp *l)
    709 {
    710 
    711 }
    712 
    713 void
    714 sched_lwp_fork(struct lwp *l1, struct lwp *l2)
    715 {
    716 
    717 	l2->l_estcpu = l1->l_estcpu;
    718 }
    719 
    720 void
    721 sched_lwp_exit(struct lwp *l)
    722 {
    723 
    724 }
    725 
    726 void
    727 sched_lwp_collect(struct lwp *t)
    728 {
    729 	lwp_t *l;
    730 
    731 	/* Absorb estcpu value of collected LWP. */
    732 	l = curlwp;
    733 	lwp_lock(l);
    734 	l->l_estcpu += t->l_estcpu;
    735 	lwp_unlock(l);
    736 }
    737 
    738 /*
    739  * sysctl setup.  XXX This should be split with kern_synch.c.
    740  */
    741 SYSCTL_SETUP(sysctl_sched_setup, "sysctl kern.sched subtree setup")
    742 {
    743 	const struct sysctlnode *node = NULL;
    744 
    745 	sysctl_createv(clog, 0, NULL, NULL,
    746 		CTLFLAG_PERMANENT,
    747 		CTLTYPE_NODE, "kern", NULL,
    748 		NULL, 0, NULL, 0,
    749 		CTL_KERN, CTL_EOL);
    750 	sysctl_createv(clog, 0, NULL, &node,
    751 		CTLFLAG_PERMANENT,
    752 		CTLTYPE_NODE, "sched",
    753 		SYSCTL_DESCR("Scheduler options"),
    754 		NULL, 0, NULL, 0,
    755 		CTL_KERN, CTL_CREATE, CTL_EOL);
    756 
    757 	KASSERT(node != NULL);
    758 
    759 	sysctl_createv(clog, 0, &node, NULL,
    760 		CTLFLAG_PERMANENT,
    761 		CTLTYPE_STRING, "name", NULL,
    762 		NULL, 0, __UNCONST("4.4BSD"), 0,
    763 		CTL_CREATE, CTL_EOL);
    764 	sysctl_createv(clog, 0, &node, NULL,
    765 		CTLFLAG_READWRITE,
    766 		CTLTYPE_INT, "timesoftints",
    767 		SYSCTL_DESCR("Track CPU time for soft interrupts"),
    768 		NULL, 0, &softint_timing, 0,
    769 		CTL_CREATE, CTL_EOL);
    770 }
    771 
    772 #if defined(DDB)
    773 void
    774 sched_print_runqueue(void (*pr)(const char *, ...))
    775 {
    776 
    777 	runqueue_print(&global_queue, pr);
    778 }
    779 #endif /* defined(DDB) */
    780