Home | History | Annotate | Line # | Download | only in kern
sched_4bsd.c revision 1.24.10.1.2.1
      1  1.24.10.1.2.1    matt /*	$NetBSD: sched_4bsd.c,v 1.24.10.1.2.1 2010/04/21 00:28:17 matt Exp $	*/
      2            1.2    yamt 
      3            1.2    yamt /*-
      4           1.16      ad  * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008 The NetBSD Foundation, Inc.
      5            1.2    yamt  * All rights reserved.
      6            1.2    yamt  *
      7            1.2    yamt  * This code is derived from software contributed to The NetBSD Foundation
      8            1.2    yamt  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9            1.2    yamt  * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and
     10            1.2    yamt  * Daniel Sieger.
     11            1.2    yamt  *
     12            1.2    yamt  * Redistribution and use in source and binary forms, with or without
     13            1.2    yamt  * modification, are permitted provided that the following conditions
     14            1.2    yamt  * are met:
     15            1.2    yamt  * 1. Redistributions of source code must retain the above copyright
     16            1.2    yamt  *    notice, this list of conditions and the following disclaimer.
     17            1.2    yamt  * 2. Redistributions in binary form must reproduce the above copyright
     18            1.2    yamt  *    notice, this list of conditions and the following disclaimer in the
     19            1.2    yamt  *    documentation and/or other materials provided with the distribution.
     20            1.2    yamt  *
     21            1.2    yamt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     22            1.2    yamt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     23            1.2    yamt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     24            1.2    yamt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     25            1.2    yamt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     26            1.2    yamt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     27            1.2    yamt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     28            1.2    yamt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     29            1.2    yamt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     30            1.2    yamt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     31            1.2    yamt  * POSSIBILITY OF SUCH DAMAGE.
     32            1.2    yamt  */
     33            1.2    yamt 
     34            1.2    yamt /*-
     35            1.2    yamt  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     36            1.2    yamt  *	The Regents of the University of California.  All rights reserved.
     37            1.2    yamt  * (c) UNIX System Laboratories, Inc.
     38            1.2    yamt  * All or some portions of this file are derived from material licensed
     39            1.2    yamt  * to the University of California by American Telephone and Telegraph
     40            1.2    yamt  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     41            1.2    yamt  * the permission of UNIX System Laboratories, Inc.
     42            1.2    yamt  *
     43            1.2    yamt  * Redistribution and use in source and binary forms, with or without
     44            1.2    yamt  * modification, are permitted provided that the following conditions
     45            1.2    yamt  * are met:
     46            1.2    yamt  * 1. Redistributions of source code must retain the above copyright
     47            1.2    yamt  *    notice, this list of conditions and the following disclaimer.
     48            1.2    yamt  * 2. Redistributions in binary form must reproduce the above copyright
     49            1.2    yamt  *    notice, this list of conditions and the following disclaimer in the
     50            1.2    yamt  *    documentation and/or other materials provided with the distribution.
     51            1.2    yamt  * 3. Neither the name of the University nor the names of its contributors
     52            1.2    yamt  *    may be used to endorse or promote products derived from this software
     53            1.2    yamt  *    without specific prior written permission.
     54            1.2    yamt  *
     55            1.2    yamt  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     56            1.2    yamt  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     57            1.2    yamt  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     58            1.2    yamt  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     59            1.2    yamt  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     60            1.2    yamt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     61            1.2    yamt  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     62            1.2    yamt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     63            1.2    yamt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     64            1.2    yamt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     65            1.2    yamt  * SUCH DAMAGE.
     66            1.2    yamt  *
     67            1.2    yamt  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
     68            1.2    yamt  */
     69            1.2    yamt 
     70            1.2    yamt #include <sys/cdefs.h>
     71  1.24.10.1.2.1    matt __KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.24.10.1.2.1 2010/04/21 00:28:17 matt Exp $");
     72            1.2    yamt 
     73            1.2    yamt #include "opt_ddb.h"
     74            1.2    yamt #include "opt_lockdebug.h"
     75            1.2    yamt #include "opt_perfctrs.h"
     76            1.2    yamt 
     77            1.2    yamt #include <sys/param.h>
     78            1.2    yamt #include <sys/systm.h>
     79            1.2    yamt #include <sys/callout.h>
     80            1.2    yamt #include <sys/cpu.h>
     81            1.2    yamt #include <sys/proc.h>
     82            1.2    yamt #include <sys/kernel.h>
     83            1.2    yamt #include <sys/signalvar.h>
     84            1.2    yamt #include <sys/resourcevar.h>
     85            1.2    yamt #include <sys/sched.h>
     86            1.2    yamt #include <sys/sysctl.h>
     87            1.2    yamt #include <sys/kauth.h>
     88            1.2    yamt #include <sys/lockdebug.h>
     89            1.2    yamt #include <sys/kmem.h>
     90            1.5      ad #include <sys/intr.h>
     91            1.2    yamt 
     92            1.2    yamt #include <uvm/uvm_extern.h>
     93            1.2    yamt 
     94            1.2    yamt static void updatepri(struct lwp *);
     95            1.2    yamt static void resetpriority(struct lwp *);
     96            1.2    yamt 
     97            1.2    yamt extern unsigned int sched_pstats_ticks; /* defined in kern_synch.c */
     98            1.2    yamt 
     99            1.2    yamt /* Number of hardclock ticks per sched_tick() */
    100           1.12   rmind static int rrticks;
    101            1.2    yamt 
    102            1.2    yamt /*
    103            1.2    yamt  * Force switch among equal priority processes every 100ms.
    104            1.2    yamt  * Called from hardclock every hz/10 == rrticks hardclock ticks.
    105            1.5      ad  *
    106            1.5      ad  * There's no need to lock anywhere in this routine, as it's
    107            1.5      ad  * CPU-local and runs at IPL_SCHED (called from clock interrupt).
    108            1.2    yamt  */
    109            1.2    yamt /* ARGSUSED */
    110            1.2    yamt void
    111            1.2    yamt sched_tick(struct cpu_info *ci)
    112            1.2    yamt {
    113            1.2    yamt 	struct schedstate_percpu *spc = &ci->ci_schedstate;
    114           1.23      ad 	lwp_t *l;
    115            1.2    yamt 
    116            1.2    yamt 	spc->spc_ticks = rrticks;
    117            1.2    yamt 
    118           1.15      ad 	if (CURCPU_IDLE_P()) {
    119           1.15      ad 		cpu_need_resched(ci, 0);
    120            1.7   rmind 		return;
    121           1.15      ad 	}
    122           1.23      ad 	l = ci->ci_data.cpu_onproc;
    123           1.23      ad 	if (l == NULL) {
    124           1.19    yamt 		return;
    125           1.19    yamt 	}
    126           1.23      ad 	switch (l->l_class) {
    127           1.23      ad 	case SCHED_FIFO:
    128           1.23      ad 		/* No timeslicing for FIFO jobs. */
    129           1.23      ad 		break;
    130           1.23      ad 	case SCHED_RR:
    131           1.23      ad 		/* Force it into mi_switch() to look for other jobs to run. */
    132           1.23      ad 		cpu_need_resched(ci, RESCHED_KPREEMPT);
    133           1.23      ad 		break;
    134           1.23      ad 	default:
    135           1.23      ad 		if (spc->spc_flags & SPCF_SHOULDYIELD) {
    136           1.23      ad 			/*
    137           1.23      ad 			 * Process is stuck in kernel somewhere, probably
    138           1.23      ad 			 * due to buggy or inefficient code.  Force a
    139           1.23      ad 			 * kernel preemption.
    140           1.23      ad 			 */
    141           1.23      ad 			cpu_need_resched(ci, RESCHED_KPREEMPT);
    142           1.23      ad 		} else if (spc->spc_flags & SPCF_SEENRR) {
    143           1.23      ad 			/*
    144           1.23      ad 			 * The process has already been through a roundrobin
    145           1.23      ad 			 * without switching and may be hogging the CPU.
    146           1.23      ad 			 * Indicate that the process should yield.
    147           1.23      ad 			 */
    148           1.23      ad 			spc->spc_flags |= SPCF_SHOULDYIELD;
    149           1.23      ad 			cpu_need_resched(ci, 0);
    150           1.23      ad 		} else {
    151           1.23      ad 			spc->spc_flags |= SPCF_SEENRR;
    152           1.23      ad 		}
    153           1.23      ad 		break;
    154           1.23      ad 	}
    155            1.2    yamt }
    156            1.2    yamt 
    157            1.8      ad /*
    158            1.8      ad  * Why PRIO_MAX - 2? From setpriority(2):
    159            1.8      ad  *
    160            1.8      ad  *	prio is a value in the range -20 to 20.  The default priority is
    161            1.8      ad  *	0; lower priorities cause more favorable scheduling.  A value of
    162            1.8      ad  *	19 or 20 will schedule a process only when nothing at priority <=
    163            1.8      ad  *	0 is runnable.
    164            1.8      ad  *
    165            1.8      ad  * This gives estcpu influence over 18 priority levels, and leaves nice
    166            1.8      ad  * with 40 levels.  One way to think about it is that nice has 20 levels
    167            1.8      ad  * either side of estcpu's 18.
    168            1.8      ad  */
    169            1.2    yamt #define	ESTCPU_SHIFT	11
    170            1.8      ad #define	ESTCPU_MAX	((PRIO_MAX - 2) << ESTCPU_SHIFT)
    171            1.8      ad #define	ESTCPU_ACCUM	(1 << (ESTCPU_SHIFT - 1))
    172            1.2    yamt #define	ESTCPULIM(e)	min((e), ESTCPU_MAX)
    173            1.2    yamt 
    174            1.2    yamt /*
    175            1.2    yamt  * Constants for digital decay and forget:
    176            1.8      ad  *	90% of (l_estcpu) usage in 5 * loadav time
    177            1.8      ad  *	95% of (l_pctcpu) usage in 60 seconds (load insensitive)
    178            1.2    yamt  *          Note that, as ps(1) mentions, this can let percentages
    179            1.2    yamt  *          total over 100% (I've seen 137.9% for 3 processes).
    180            1.2    yamt  *
    181            1.8      ad  * Note that hardclock updates l_estcpu and l_cpticks independently.
    182            1.2    yamt  *
    183            1.8      ad  * We wish to decay away 90% of l_estcpu in (5 * loadavg) seconds.
    184            1.2    yamt  * That is, the system wants to compute a value of decay such
    185            1.2    yamt  * that the following for loop:
    186            1.2    yamt  * 	for (i = 0; i < (5 * loadavg); i++)
    187            1.8      ad  * 		l_estcpu *= decay;
    188            1.2    yamt  * will compute
    189            1.8      ad  * 	l_estcpu *= 0.1;
    190            1.2    yamt  * for all values of loadavg:
    191            1.2    yamt  *
    192            1.2    yamt  * Mathematically this loop can be expressed by saying:
    193            1.2    yamt  * 	decay ** (5 * loadavg) ~= .1
    194            1.2    yamt  *
    195            1.2    yamt  * The system computes decay as:
    196            1.2    yamt  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
    197            1.2    yamt  *
    198            1.2    yamt  * We wish to prove that the system's computation of decay
    199            1.2    yamt  * will always fulfill the equation:
    200            1.2    yamt  * 	decay ** (5 * loadavg) ~= .1
    201            1.2    yamt  *
    202            1.2    yamt  * If we compute b as:
    203            1.2    yamt  * 	b = 2 * loadavg
    204            1.2    yamt  * then
    205            1.2    yamt  * 	decay = b / (b + 1)
    206            1.2    yamt  *
    207            1.2    yamt  * We now need to prove two things:
    208            1.2    yamt  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
    209            1.2    yamt  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
    210            1.2    yamt  *
    211            1.2    yamt  * Facts:
    212            1.2    yamt  *         For x close to zero, exp(x) =~ 1 + x, since
    213            1.2    yamt  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
    214            1.2    yamt  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
    215            1.2    yamt  *         For x close to zero, ln(1+x) =~ x, since
    216            1.2    yamt  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
    217            1.2    yamt  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
    218            1.2    yamt  *         ln(.1) =~ -2.30
    219            1.2    yamt  *
    220            1.2    yamt  * Proof of (1):
    221            1.2    yamt  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
    222            1.2    yamt  *	solving for factor,
    223            1.2    yamt  *      ln(factor) =~ (-2.30/5*loadav), or
    224            1.2    yamt  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
    225            1.2    yamt  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
    226            1.2    yamt  *
    227            1.2    yamt  * Proof of (2):
    228            1.2    yamt  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
    229            1.2    yamt  *	solving for power,
    230            1.2    yamt  *      power*ln(b/(b+1)) =~ -2.30, or
    231            1.2    yamt  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
    232            1.2    yamt  *
    233            1.2    yamt  * Actual power values for the implemented algorithm are as follows:
    234            1.2    yamt  *      loadav: 1       2       3       4
    235            1.2    yamt  *      power:  5.68    10.32   14.94   19.55
    236            1.2    yamt  */
    237            1.2    yamt 
    238            1.2    yamt /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
    239            1.2    yamt #define	loadfactor(loadav)	(2 * (loadav))
    240            1.2    yamt 
    241           1.17    yamt static fixpt_t
    242            1.2    yamt decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
    243            1.2    yamt {
    244            1.2    yamt 
    245            1.2    yamt 	if (estcpu == 0) {
    246            1.2    yamt 		return 0;
    247            1.2    yamt 	}
    248            1.2    yamt 
    249            1.2    yamt #if !defined(_LP64)
    250            1.2    yamt 	/* avoid 64bit arithmetics. */
    251            1.2    yamt #define	FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
    252            1.2    yamt 	if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
    253            1.2    yamt 		return estcpu * loadfac / (loadfac + FSCALE);
    254            1.2    yamt 	}
    255            1.2    yamt #endif /* !defined(_LP64) */
    256            1.2    yamt 
    257            1.2    yamt 	return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
    258            1.2    yamt }
    259            1.2    yamt 
    260            1.2    yamt /*
    261            1.8      ad  * For all load averages >= 1 and max l_estcpu of (255 << ESTCPU_SHIFT),
    262            1.8      ad  * sleeping for at least seven times the loadfactor will decay l_estcpu to
    263            1.2    yamt  * less than (1 << ESTCPU_SHIFT).
    264            1.2    yamt  *
    265            1.2    yamt  * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
    266            1.2    yamt  */
    267            1.2    yamt static fixpt_t
    268            1.2    yamt decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
    269            1.2    yamt {
    270            1.2    yamt 
    271            1.2    yamt 	if ((n << FSHIFT) >= 7 * loadfac) {
    272            1.2    yamt 		return 0;
    273            1.2    yamt 	}
    274            1.2    yamt 
    275            1.2    yamt 	while (estcpu != 0 && n > 1) {
    276            1.2    yamt 		estcpu = decay_cpu(loadfac, estcpu);
    277            1.2    yamt 		n--;
    278            1.2    yamt 	}
    279            1.2    yamt 
    280            1.2    yamt 	return estcpu;
    281            1.2    yamt }
    282            1.2    yamt 
    283            1.2    yamt /*
    284            1.2    yamt  * sched_pstats_hook:
    285            1.2    yamt  *
    286            1.2    yamt  * Periodically called from sched_pstats(); used to recalculate priorities.
    287            1.2    yamt  */
    288            1.2    yamt void
    289           1.22   rmind sched_pstats_hook(struct lwp *l, int batch)
    290            1.2    yamt {
    291      1.24.10.1  bouyer 	fixpt_t loadfac;
    292            1.2    yamt 
    293            1.8      ad 	/*
    294            1.8      ad 	 * If the LWP has slept an entire second, stop recalculating
    295            1.8      ad 	 * its priority until it wakes up.
    296            1.8      ad 	 */
    297           1.24   rmind 	KASSERT(lwp_locked(l, NULL));
    298      1.24.10.1  bouyer 	if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
    299      1.24.10.1  bouyer 	    l->l_stat == LSSUSPENDED) {
    300      1.24.10.1  bouyer 		if (l->l_slptime > 1) {
    301      1.24.10.1  bouyer 			return;
    302      1.24.10.1  bouyer 		}
    303            1.8      ad 	}
    304      1.24.10.1  bouyer 	loadfac = 2 * (averunnable.ldavg[0]);
    305      1.24.10.1  bouyer 	l->l_estcpu = decay_cpu(loadfac, l->l_estcpu);
    306      1.24.10.1  bouyer 	resetpriority(l);
    307            1.2    yamt }
    308            1.2    yamt 
    309            1.2    yamt /*
    310            1.2    yamt  * Recalculate the priority of a process after it has slept for a while.
    311            1.2    yamt  */
    312            1.2    yamt static void
    313            1.2    yamt updatepri(struct lwp *l)
    314            1.2    yamt {
    315            1.2    yamt 	fixpt_t loadfac;
    316            1.2    yamt 
    317            1.3      ad 	KASSERT(lwp_locked(l, NULL));
    318            1.2    yamt 	KASSERT(l->l_slptime > 1);
    319            1.2    yamt 
    320            1.2    yamt 	loadfac = loadfactor(averunnable.ldavg[0]);
    321            1.2    yamt 
    322            1.2    yamt 	l->l_slptime--; /* the first time was done in sched_pstats */
    323            1.8      ad 	l->l_estcpu = decay_cpu_batch(loadfac, l->l_estcpu, l->l_slptime);
    324            1.2    yamt 	resetpriority(l);
    325            1.2    yamt }
    326            1.2    yamt 
    327            1.2    yamt void
    328           1.14    matt sched_rqinit(void)
    329            1.2    yamt {
    330            1.2    yamt 
    331            1.2    yamt }
    332            1.2    yamt 
    333            1.2    yamt void
    334            1.2    yamt sched_setrunnable(struct lwp *l)
    335            1.2    yamt {
    336            1.2    yamt 
    337            1.2    yamt  	if (l->l_slptime > 1)
    338            1.2    yamt  		updatepri(l);
    339            1.2    yamt }
    340            1.2    yamt 
    341            1.2    yamt void
    342            1.8      ad sched_nice(struct proc *p, int n)
    343            1.2    yamt {
    344            1.8      ad 	struct lwp *l;
    345            1.8      ad 
    346           1.20      ad 	KASSERT(mutex_owned(p->p_lock));
    347            1.2    yamt 
    348            1.8      ad 	p->p_nice = n;
    349            1.8      ad 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    350            1.8      ad 		lwp_lock(l);
    351            1.8      ad 		resetpriority(l);
    352            1.8      ad 		lwp_unlock(l);
    353            1.8      ad 	}
    354            1.2    yamt }
    355            1.2    yamt 
    356            1.2    yamt /*
    357            1.8      ad  * Recompute the priority of an LWP.  Arrange to reschedule if
    358            1.8      ad  * the resulting priority is better than that of the current LWP.
    359            1.2    yamt  */
    360            1.2    yamt static void
    361            1.2    yamt resetpriority(struct lwp *l)
    362            1.2    yamt {
    363            1.8      ad 	pri_t pri;
    364            1.2    yamt 	struct proc *p = l->l_proc;
    365            1.2    yamt 
    366            1.8      ad 	KASSERT(lwp_locked(l, NULL));
    367            1.2    yamt 
    368            1.8      ad 	if (l->l_class != SCHED_OTHER)
    369            1.2    yamt 		return;
    370            1.2    yamt 
    371            1.8      ad 	/* See comments above ESTCPU_SHIFT definition. */
    372            1.8      ad 	pri = (PRI_KERNEL - 1) - (l->l_estcpu >> ESTCPU_SHIFT) - p->p_nice;
    373            1.8      ad 	pri = imax(pri, 0);
    374            1.8      ad 	if (pri != l->l_priority)
    375            1.8      ad 		lwp_changepri(l, pri);
    376            1.2    yamt }
    377            1.2    yamt 
    378            1.2    yamt /*
    379            1.2    yamt  * We adjust the priority of the current process.  The priority of a process
    380            1.8      ad  * gets worse as it accumulates CPU time.  The CPU usage estimator (l_estcpu)
    381            1.2    yamt  * is increased here.  The formula for computing priorities (in kern_synch.c)
    382            1.8      ad  * will compute a different value each time l_estcpu increases. This can
    383            1.2    yamt  * cause a switch, but unless the priority crosses a PPQ boundary the actual
    384            1.2    yamt  * queue will not change.  The CPU usage estimator ramps up quite quickly
    385            1.2    yamt  * when the process is running (linearly), and decays away exponentially, at
    386            1.2    yamt  * a rate which is proportionally slower when the system is busy.  The basic
    387            1.2    yamt  * principle is that the system will 90% forget that the process used a lot
    388            1.2    yamt  * of CPU time in 5 * loadav seconds.  This causes the system to favor
    389            1.2    yamt  * processes which haven't run much recently, and to round-robin among other
    390            1.2    yamt  * processes.
    391            1.2    yamt  */
    392            1.2    yamt 
    393            1.2    yamt void
    394            1.2    yamt sched_schedclock(struct lwp *l)
    395            1.2    yamt {
    396            1.8      ad 
    397            1.8      ad 	if (l->l_class != SCHED_OTHER)
    398            1.8      ad 		return;
    399            1.2    yamt 
    400            1.2    yamt 	KASSERT(!CURCPU_IDLE_P());
    401            1.8      ad 	l->l_estcpu = ESTCPULIM(l->l_estcpu + ESTCPU_ACCUM);
    402            1.2    yamt 	lwp_lock(l);
    403            1.2    yamt 	resetpriority(l);
    404            1.2    yamt 	lwp_unlock(l);
    405            1.2    yamt }
    406            1.2    yamt 
    407            1.2    yamt /*
    408            1.2    yamt  * sched_proc_fork:
    409            1.2    yamt  *
    410            1.2    yamt  *	Inherit the parent's scheduler history.
    411            1.2    yamt  */
    412            1.2    yamt void
    413            1.2    yamt sched_proc_fork(struct proc *parent, struct proc *child)
    414            1.2    yamt {
    415            1.8      ad 	lwp_t *pl;
    416            1.2    yamt 
    417           1.20      ad 	KASSERT(mutex_owned(parent->p_lock));
    418            1.2    yamt 
    419            1.8      ad 	pl = LIST_FIRST(&parent->p_lwps);
    420            1.8      ad 	child->p_estcpu_inherited = pl->l_estcpu;
    421            1.2    yamt 	child->p_forktime = sched_pstats_ticks;
    422            1.2    yamt }
    423            1.2    yamt 
    424            1.2    yamt /*
    425            1.2    yamt  * sched_proc_exit:
    426            1.2    yamt  *
    427            1.2    yamt  *	Chargeback parents for the sins of their children.
    428            1.2    yamt  */
    429            1.2    yamt void
    430            1.2    yamt sched_proc_exit(struct proc *parent, struct proc *child)
    431            1.2    yamt {
    432            1.2    yamt 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
    433            1.2    yamt 	fixpt_t estcpu;
    434            1.8      ad 	lwp_t *pl, *cl;
    435            1.2    yamt 
    436            1.2    yamt 	/* XXX Only if parent != init?? */
    437            1.2    yamt 
    438           1.20      ad 	mutex_enter(parent->p_lock);
    439            1.8      ad 	pl = LIST_FIRST(&parent->p_lwps);
    440            1.8      ad 	cl = LIST_FIRST(&child->p_lwps);
    441            1.2    yamt 	estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
    442            1.2    yamt 	    sched_pstats_ticks - child->p_forktime);
    443            1.8      ad 	if (cl->l_estcpu > estcpu) {
    444            1.8      ad 		lwp_lock(pl);
    445            1.8      ad 		pl->l_estcpu = ESTCPULIM(pl->l_estcpu + cl->l_estcpu - estcpu);
    446            1.8      ad 		lwp_unlock(pl);
    447            1.8      ad 	}
    448           1.20      ad 	mutex_exit(parent->p_lock);
    449            1.2    yamt }
    450            1.2    yamt 
    451            1.2    yamt void
    452            1.6   rmind sched_wakeup(struct lwp *l)
    453            1.6   rmind {
    454            1.6   rmind 
    455            1.6   rmind }
    456            1.6   rmind 
    457            1.6   rmind void
    458            1.6   rmind sched_slept(struct lwp *l)
    459            1.6   rmind {
    460            1.6   rmind 
    461            1.6   rmind }
    462            1.6   rmind 
    463            1.2    yamt void
    464            1.8      ad sched_lwp_fork(struct lwp *l1, struct lwp *l2)
    465            1.2    yamt {
    466            1.2    yamt 
    467            1.8      ad 	l2->l_estcpu = l1->l_estcpu;
    468            1.2    yamt }
    469            1.2    yamt 
    470            1.2    yamt void
    471            1.8      ad sched_lwp_collect(struct lwp *t)
    472            1.8      ad {
    473            1.8      ad 	lwp_t *l;
    474            1.8      ad 
    475            1.8      ad 	/* Absorb estcpu value of collected LWP. */
    476            1.8      ad 	l = curlwp;
    477            1.8      ad 	lwp_lock(l);
    478            1.8      ad 	l->l_estcpu += t->l_estcpu;
    479            1.8      ad 	lwp_unlock(l);
    480            1.8      ad }
    481            1.8      ad 
    482           1.16      ad void
    483           1.16      ad sched_oncpu(lwp_t *l)
    484           1.16      ad {
    485           1.16      ad 
    486           1.16      ad }
    487           1.16      ad 
    488           1.16      ad void
    489           1.16      ad sched_newts(lwp_t *l)
    490           1.16      ad {
    491           1.16      ad 
    492           1.16      ad }
    493           1.16      ad 
    494            1.5      ad /*
    495           1.12   rmind  * Sysctl nodes and initialization.
    496            1.5      ad  */
    497           1.12   rmind 
    498           1.12   rmind static int
    499           1.12   rmind sysctl_sched_rtts(SYSCTLFN_ARGS)
    500           1.12   rmind {
    501           1.12   rmind 	struct sysctlnode node;
    502           1.12   rmind 	int rttsms = hztoms(rrticks);
    503           1.12   rmind 
    504           1.12   rmind 	node = *rnode;
    505           1.12   rmind 	node.sysctl_data = &rttsms;
    506           1.12   rmind 	return sysctl_lookup(SYSCTLFN_CALL(&node));
    507           1.12   rmind }
    508           1.12   rmind 
    509           1.16      ad SYSCTL_SETUP(sysctl_sched_4bsd_setup, "sysctl sched setup")
    510            1.2    yamt {
    511            1.2    yamt 	const struct sysctlnode *node = NULL;
    512            1.2    yamt 
    513            1.2    yamt 	sysctl_createv(clog, 0, NULL, NULL,
    514            1.2    yamt 		CTLFLAG_PERMANENT,
    515            1.2    yamt 		CTLTYPE_NODE, "kern", NULL,
    516            1.2    yamt 		NULL, 0, NULL, 0,
    517            1.2    yamt 		CTL_KERN, CTL_EOL);
    518            1.2    yamt 	sysctl_createv(clog, 0, NULL, &node,
    519            1.2    yamt 		CTLFLAG_PERMANENT,
    520            1.2    yamt 		CTLTYPE_NODE, "sched",
    521            1.2    yamt 		SYSCTL_DESCR("Scheduler options"),
    522            1.2    yamt 		NULL, 0, NULL, 0,
    523            1.2    yamt 		CTL_KERN, CTL_CREATE, CTL_EOL);
    524            1.2    yamt 
    525           1.16      ad 	if (node == NULL)
    526           1.16      ad 		return;
    527            1.5      ad 
    528           1.16      ad 	rrticks = hz / 10;
    529           1.16      ad 
    530           1.16      ad 	sysctl_createv(NULL, 0, &node, NULL,
    531            1.5      ad 		CTLFLAG_PERMANENT,
    532            1.5      ad 		CTLTYPE_STRING, "name", NULL,
    533            1.5      ad 		NULL, 0, __UNCONST("4.4BSD"), 0,
    534            1.5      ad 		CTL_CREATE, CTL_EOL);
    535           1.16      ad 	sysctl_createv(NULL, 0, &node, NULL,
    536           1.12   rmind 		CTLFLAG_PERMANENT,
    537           1.12   rmind 		CTLTYPE_INT, "rtts",
    538           1.12   rmind 		SYSCTL_DESCR("Round-robin time quantum (in miliseconds)"),
    539           1.12   rmind 		sysctl_sched_rtts, 0, NULL, 0,
    540           1.12   rmind 		CTL_CREATE, CTL_EOL);
    541            1.2    yamt }
    542