Home | History | Annotate | Line # | Download | only in kern
sched_4bsd.c revision 1.40
      1 /*	$NetBSD: sched_4bsd.c,v 1.40 2019/12/01 15:34:46 ad Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2019
      5  *     The NetBSD Foundation, Inc.
      6  * All rights reserved.
      7  *
      8  * This code is derived from software contributed to The NetBSD Foundation
      9  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
     10  * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and
     11  * Daniel Sieger.
     12  *
     13  * Redistribution and use in source and binary forms, with or without
     14  * modification, are permitted provided that the following conditions
     15  * are met:
     16  * 1. Redistributions of source code must retain the above copyright
     17  *    notice, this list of conditions and the following disclaimer.
     18  * 2. Redistributions in binary form must reproduce the above copyright
     19  *    notice, this list of conditions and the following disclaimer in the
     20  *    documentation and/or other materials provided with the distribution.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     24  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     25  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32  * POSSIBILITY OF SUCH DAMAGE.
     33  */
     34 
     35 /*
     36  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     37  *	The Regents of the University of California.  All rights reserved.
     38  * (c) UNIX System Laboratories, Inc.
     39  * All or some portions of this file are derived from material licensed
     40  * to the University of California by American Telephone and Telegraph
     41  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     42  * the permission of UNIX System Laboratories, Inc.
     43  *
     44  * Redistribution and use in source and binary forms, with or without
     45  * modification, are permitted provided that the following conditions
     46  * are met:
     47  * 1. Redistributions of source code must retain the above copyright
     48  *    notice, this list of conditions and the following disclaimer.
     49  * 2. Redistributions in binary form must reproduce the above copyright
     50  *    notice, this list of conditions and the following disclaimer in the
     51  *    documentation and/or other materials provided with the distribution.
     52  * 3. Neither the name of the University nor the names of its contributors
     53  *    may be used to endorse or promote products derived from this software
     54  *    without specific prior written permission.
     55  *
     56  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     57  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     58  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     59  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     60  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     61  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     62  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     63  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     64  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     65  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     66  * SUCH DAMAGE.
     67  *
     68  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
     69  */
     70 
     71 #include <sys/cdefs.h>
     72 __KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.40 2019/12/01 15:34:46 ad Exp $");
     73 
     74 #include "opt_ddb.h"
     75 #include "opt_lockdebug.h"
     76 
     77 #include <sys/param.h>
     78 #include <sys/systm.h>
     79 #include <sys/callout.h>
     80 #include <sys/cpu.h>
     81 #include <sys/proc.h>
     82 #include <sys/kernel.h>
     83 #include <sys/resourcevar.h>
     84 #include <sys/sched.h>
     85 #include <sys/sysctl.h>
     86 #include <sys/lockdebug.h>
     87 #include <sys/intr.h>
     88 #include <sys/atomic.h>
     89 
     90 static void updatepri(struct lwp *);
     91 static void resetpriority(struct lwp *);
     92 
     93 extern unsigned int sched_pstats_ticks; /* defined in kern_synch.c */
     94 
     95 /* Number of hardclock ticks per sched_tick() */
     96 static int rrticks __read_mostly;
     97 
     98 /*
     99  * Force switch among equal priority processes every 100ms.
    100  * Called from hardclock every hz/10 == rrticks hardclock ticks.
    101  */
    102 /* ARGSUSED */
    103 void
    104 sched_tick(struct cpu_info *ci)
    105 {
    106 	struct schedstate_percpu *spc = &ci->ci_schedstate;
    107 	lwp_t *l;
    108 
    109 	spc->spc_ticks = rrticks;
    110 
    111 	if (CURCPU_IDLE_P()) {
    112 		atomic_or_uint(&ci->ci_want_resched,
    113 		    RESCHED_IDLE | RESCHED_UPREEMPT);
    114 		return;
    115 	}
    116 	l = ci->ci_onproc;
    117 	if (l == NULL) {
    118 		return;
    119 	}
    120 	/*
    121 	 * Can only be spc_lwplock or a turnstile lock at this point
    122 	 * (if we interrupted priority inheritance trylock dance).
    123 	 */
    124 	KASSERT(l->l_mutex != spc->spc_mutex);
    125 	switch (l->l_class) {
    126 	case SCHED_FIFO:
    127 		/* No timeslicing for FIFO jobs. */
    128 		break;
    129 	case SCHED_RR:
    130 		/* Force it into mi_switch() to look for other jobs to run. */
    131 #ifdef __HAVE_PREEMPTION
    132 		atomic_or_uint(&l->l_dopreempt, DOPREEMPT_ACTIVE);
    133 		atomic_or_uint(&ci->ci_want_resched, RESCHED_KPREEMPT);
    134 #else
    135 		atomic_or_uint(&ci->ci_want_resched, RESCHED_UPREEMPT);
    136 #endif
    137 		break;
    138 	default:
    139 		if (spc->spc_flags & SPCF_SHOULDYIELD) {
    140 			/*
    141 			 * Process is stuck in kernel somewhere, probably
    142 			 * due to buggy or inefficient code.  Force a
    143 			 * kernel preemption.
    144 			 */
    145 #ifdef __HAVE_PREEMPTION
    146 			atomic_or_uint(&l->l_dopreempt, DOPREEMPT_ACTIVE);
    147 			atomic_or_uint(&ci->ci_want_resched, RESCHED_KPREEMPT);
    148 #else
    149 			atomic_or_uint(&ci->ci_want_resched, RESCHED_UPREEMPT);
    150 #endif
    151 		} else if (spc->spc_flags & SPCF_SEENRR) {
    152 			/*
    153 			 * The process has already been through a roundrobin
    154 			 * without switching and may be hogging the CPU.
    155 			 * Indicate that the process should yield.
    156 			 */
    157 			spc->spc_flags |= SPCF_SHOULDYIELD;
    158 			atomic_or_uint(&ci->ci_want_resched, RESCHED_UPREEMPT);
    159 		} else {
    160 			spc->spc_flags |= SPCF_SEENRR;
    161 		}
    162 		break;
    163 	}
    164 }
    165 
    166 /*
    167  * Why PRIO_MAX - 2? From setpriority(2):
    168  *
    169  *	prio is a value in the range -20 to 20.  The default priority is
    170  *	0; lower priorities cause more favorable scheduling.  A value of
    171  *	19 or 20 will schedule a process only when nothing at priority <=
    172  *	0 is runnable.
    173  *
    174  * This gives estcpu influence over 18 priority levels, and leaves nice
    175  * with 40 levels.  One way to think about it is that nice has 20 levels
    176  * either side of estcpu's 18.
    177  */
    178 #define	ESTCPU_SHIFT	11
    179 #define	ESTCPU_MAX	((PRIO_MAX - 2) << ESTCPU_SHIFT)
    180 #define	ESTCPU_ACCUM	(1 << (ESTCPU_SHIFT - 1))
    181 #define	ESTCPULIM(e)	uimin((e), ESTCPU_MAX)
    182 
    183 /*
    184  * The main parameter used by this algorithm is 'l_estcpu'. It is an estimate
    185  * of the recent CPU utilization of the thread.
    186  *
    187  * l_estcpu is:
    188  *  - increased each time the hardclock ticks and the thread is found to
    189  *    be executing, in sched_schedclock() called from hardclock()
    190  *  - decreased (filtered) on each sched tick, in sched_pstats_hook()
    191  * If the lwp is sleeping for more than a second, we don't touch l_estcpu: it
    192  * will be updated in sched_setrunnable() when the lwp wakes up, in burst mode
    193  * (ie, we decrease it n times).
    194  *
    195  * Note that hardclock updates l_estcpu and l_cpticks independently.
    196  *
    197  * -----------------------------------------------------------------------------
    198  *
    199  * Here we describe how l_estcpu is decreased.
    200  *
    201  * Constants for digital decay (filter):
    202  *     90% of l_estcpu usage in (5 * loadavg) seconds
    203  *
    204  * We wish to decay away 90% of l_estcpu in (5 * loadavg) seconds. That is, we
    205  * want to compute a value of decay such that the following loop:
    206  *     for (i = 0; i < (5 * loadavg); i++)
    207  *         l_estcpu *= decay;
    208  * will result in
    209  *     l_estcpu *= 0.1;
    210  * for all values of loadavg.
    211  *
    212  * Mathematically this loop can be expressed by saying:
    213  *     decay ** (5 * loadavg) ~= .1
    214  *
    215  * And finally, the corresponding value of decay we're using is:
    216  *     decay = (2 * loadavg) / (2 * loadavg + 1)
    217  *
    218  * -----------------------------------------------------------------------------
    219  *
    220  * Now, let's prove that the value of decay stated above will always fulfill
    221  * the equation:
    222  *     decay ** (5 * loadavg) ~= .1
    223  *
    224  * If we compute b as:
    225  *     b = 2 * loadavg
    226  * then
    227  *     decay = b / (b + 1)
    228  *
    229  * We now need to prove two things:
    230  *     1) Given [factor ** (5 * loadavg) =~ .1], prove [factor == b/(b+1)].
    231  *     2) Given [b/(b+1) ** power =~ .1], prove [power == (5 * loadavg)].
    232  *
    233  * Facts:
    234  *   * For x real: exp(x) = 0! + x**1/1! + x**2/2! + ...
    235  *     Therefore, for x close to zero, exp(x) =~ 1 + x.
    236  *     In turn, for b large enough, exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
    237  *
    238  *   * For b large enough, (b-1)/b =~ b/(b+1).
    239  *
    240  *   * For x belonging to [-1;1[, ln(1-x) = - x - x**2/2 - x**3/3 - ...
    241  *     Therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
    242  *
    243  *   * ln(0.1) =~ -2.30
    244  *
    245  * Proof of (1):
    246  *     factor ** (5 * loadavg) =~ 0.1
    247  *  => ln(factor) =~ -2.30 / (5 * loadavg)
    248  *  => factor =~ exp(-1 / ((5 / 2.30) * loadavg))
    249  *            =~ exp(-1 / (2 * loadavg))
    250  *            =~ exp(-1 / b)
    251  *            =~ (b - 1) / b
    252  *            =~ b / (b + 1)
    253  *            =~ (2 * loadavg) / ((2 * loadavg) + 1)
    254  *
    255  * Proof of (2):
    256  *     (b / (b + 1)) ** power =~ .1
    257  *  => power * ln(b / (b + 1)) =~ -2.30
    258  *  => power * (-1 / (b + 1)) =~ -2.30
    259  *  => power =~ 2.30 * (b + 1)
    260  *  => power =~ 4.60 * loadavg + 2.30
    261  *  => power =~ 5 * loadavg
    262  *
    263  * Conclusion: decay = (2 * loadavg) / (2 * loadavg + 1)
    264  */
    265 
    266 /* See calculations above */
    267 #define	loadfactor(loadavg)  (2 * (loadavg))
    268 
    269 static fixpt_t
    270 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
    271 {
    272 
    273 	if (estcpu == 0) {
    274 		return 0;
    275 	}
    276 
    277 #if !defined(_LP64)
    278 	/* avoid 64bit arithmetics. */
    279 #define	FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
    280 	if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
    281 		return estcpu * loadfac / (loadfac + FSCALE);
    282 	}
    283 #endif
    284 
    285 	return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
    286 }
    287 
    288 static fixpt_t
    289 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
    290 {
    291 
    292 	/*
    293 	 * For all load averages >= 1 and max l_estcpu of (255 << ESTCPU_SHIFT),
    294 	 * if we slept for at least seven times the loadfactor, we will decay
    295 	 * l_estcpu to less than (1 << ESTCPU_SHIFT), and therefore we can
    296 	 * return zero directly.
    297 	 *
    298 	 * Note that our ESTCPU_MAX is actually much smaller than
    299 	 * (255 << ESTCPU_SHIFT).
    300 	 */
    301 	if ((n << FSHIFT) >= 7 * loadfac) {
    302 		return 0;
    303 	}
    304 
    305 	while (estcpu != 0 && n > 1) {
    306 		estcpu = decay_cpu(loadfac, estcpu);
    307 		n--;
    308 	}
    309 
    310 	return estcpu;
    311 }
    312 
    313 /*
    314  * sched_pstats_hook:
    315  *
    316  * Periodically called from sched_pstats(); used to recalculate priorities.
    317  */
    318 void
    319 sched_pstats_hook(struct lwp *l, int batch)
    320 {
    321 	fixpt_t loadfac;
    322 
    323 	/*
    324 	 * If the LWP has slept an entire second, stop recalculating
    325 	 * its priority until it wakes up.
    326 	 */
    327 	KASSERT(lwp_locked(l, NULL));
    328 	if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
    329 	    l->l_stat == LSSUSPENDED) {
    330 		if (l->l_slptime > 1) {
    331 			return;
    332 		}
    333 	}
    334 
    335 	loadfac = loadfactor(averunnable.ldavg[0]);
    336 	l->l_estcpu = decay_cpu(loadfac, l->l_estcpu);
    337 	resetpriority(l);
    338 }
    339 
    340 /*
    341  * Recalculate the priority of an LWP after it has slept for a while.
    342  */
    343 static void
    344 updatepri(struct lwp *l)
    345 {
    346 	fixpt_t loadfac;
    347 
    348 	KASSERT(lwp_locked(l, NULL));
    349 	KASSERT(l->l_slptime > 1);
    350 
    351 	loadfac = loadfactor(averunnable.ldavg[0]);
    352 
    353 	l->l_slptime--; /* the first time was done in sched_pstats */
    354 	l->l_estcpu = decay_cpu_batch(loadfac, l->l_estcpu, l->l_slptime);
    355 	resetpriority(l);
    356 }
    357 
    358 void
    359 sched_rqinit(void)
    360 {
    361 
    362 }
    363 
    364 void
    365 sched_setrunnable(struct lwp *l)
    366 {
    367 
    368  	if (l->l_slptime > 1)
    369  		updatepri(l);
    370 }
    371 
    372 void
    373 sched_nice(struct proc *p, int n)
    374 {
    375 	struct lwp *l;
    376 
    377 	KASSERT(mutex_owned(p->p_lock));
    378 
    379 	p->p_nice = n;
    380 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    381 		lwp_lock(l);
    382 		resetpriority(l);
    383 		lwp_unlock(l);
    384 	}
    385 }
    386 
    387 /*
    388  * Recompute the priority of an LWP.  Arrange to reschedule if
    389  * the resulting priority is better than that of the current LWP.
    390  */
    391 static void
    392 resetpriority(struct lwp *l)
    393 {
    394 	pri_t pri;
    395 	struct proc *p = l->l_proc;
    396 
    397 	KASSERT(lwp_locked(l, NULL));
    398 
    399 	if (l->l_class != SCHED_OTHER)
    400 		return;
    401 
    402 	/* See comments above ESTCPU_SHIFT definition. */
    403 	pri = (PRI_KERNEL - 1) - (l->l_estcpu >> ESTCPU_SHIFT) - p->p_nice;
    404 	pri = imax(pri, 0);
    405 	if (pri != l->l_priority)
    406 		lwp_changepri(l, pri);
    407 }
    408 
    409 /*
    410  * We adjust the priority of the current LWP.  The priority of a LWP
    411  * gets worse as it accumulates CPU time.  The CPU usage estimator (l_estcpu)
    412  * is increased here.  The formula for computing priorities will compute a
    413  * different value each time l_estcpu increases. This can cause a switch,
    414  * but unless the priority crosses a PPQ boundary the actual queue will not
    415  * change.  The CPU usage estimator ramps up quite quickly when the process
    416  * is running (linearly), and decays away exponentially, at a rate which is
    417  * proportionally slower when the system is busy.  The basic principle is
    418  * that the system will 90% forget that the process used a lot of CPU time
    419  * in (5 * loadavg) seconds.  This causes the system to favor processes which
    420  * haven't run much recently, and to round-robin among other processes.
    421  */
    422 void
    423 sched_schedclock(struct lwp *l)
    424 {
    425 
    426 	if (l->l_class != SCHED_OTHER)
    427 		return;
    428 
    429 	KASSERT(!CURCPU_IDLE_P());
    430 	l->l_estcpu = ESTCPULIM(l->l_estcpu + ESTCPU_ACCUM);
    431 	lwp_lock(l);
    432 	resetpriority(l);
    433 	lwp_unlock(l);
    434 }
    435 
    436 /*
    437  * sched_proc_fork:
    438  *
    439  *	Inherit the parent's scheduler history.
    440  */
    441 void
    442 sched_proc_fork(struct proc *parent, struct proc *child)
    443 {
    444 	lwp_t *pl;
    445 
    446 	KASSERT(mutex_owned(parent->p_lock));
    447 
    448 	pl = LIST_FIRST(&parent->p_lwps);
    449 	child->p_estcpu_inherited = pl->l_estcpu;
    450 	child->p_forktime = sched_pstats_ticks;
    451 }
    452 
    453 /*
    454  * sched_proc_exit:
    455  *
    456  *	Chargeback parents for the sins of their children.
    457  */
    458 void
    459 sched_proc_exit(struct proc *parent, struct proc *child)
    460 {
    461 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
    462 	fixpt_t estcpu;
    463 	lwp_t *pl, *cl;
    464 
    465 	/* XXX Only if parent != init?? */
    466 
    467 	mutex_enter(parent->p_lock);
    468 	pl = LIST_FIRST(&parent->p_lwps);
    469 	cl = LIST_FIRST(&child->p_lwps);
    470 	estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
    471 	    sched_pstats_ticks - child->p_forktime);
    472 	if (cl->l_estcpu > estcpu) {
    473 		lwp_lock(pl);
    474 		pl->l_estcpu = ESTCPULIM(pl->l_estcpu + cl->l_estcpu - estcpu);
    475 		lwp_unlock(pl);
    476 	}
    477 	mutex_exit(parent->p_lock);
    478 }
    479 
    480 void
    481 sched_wakeup(struct lwp *l)
    482 {
    483 
    484 }
    485 
    486 void
    487 sched_slept(struct lwp *l)
    488 {
    489 
    490 }
    491 
    492 void
    493 sched_lwp_fork(struct lwp *l1, struct lwp *l2)
    494 {
    495 
    496 	l2->l_estcpu = l1->l_estcpu;
    497 }
    498 
    499 void
    500 sched_lwp_collect(struct lwp *t)
    501 {
    502 	lwp_t *l;
    503 
    504 	/* Absorb estcpu value of collected LWP. */
    505 	l = curlwp;
    506 	lwp_lock(l);
    507 	l->l_estcpu += t->l_estcpu;
    508 	lwp_unlock(l);
    509 }
    510 
    511 void
    512 sched_oncpu(lwp_t *l)
    513 {
    514 
    515 }
    516 
    517 void
    518 sched_newts(lwp_t *l)
    519 {
    520 
    521 }
    522 
    523 /*
    524  * Sysctl nodes and initialization.
    525  */
    526 
    527 static int
    528 sysctl_sched_rtts(SYSCTLFN_ARGS)
    529 {
    530 	struct sysctlnode node;
    531 	int rttsms = hztoms(rrticks);
    532 
    533 	node = *rnode;
    534 	node.sysctl_data = &rttsms;
    535 	return sysctl_lookup(SYSCTLFN_CALL(&node));
    536 }
    537 
    538 SYSCTL_SETUP(sysctl_sched_4bsd_setup, "sysctl sched setup")
    539 {
    540 	const struct sysctlnode *node = NULL;
    541 
    542 	sysctl_createv(clog, 0, NULL, &node,
    543 		CTLFLAG_PERMANENT,
    544 		CTLTYPE_NODE, "sched",
    545 		SYSCTL_DESCR("Scheduler options"),
    546 		NULL, 0, NULL, 0,
    547 		CTL_KERN, CTL_CREATE, CTL_EOL);
    548 
    549 	if (node == NULL)
    550 		return;
    551 
    552 	rrticks = hz / 10;
    553 
    554 	sysctl_createv(NULL, 0, &node, NULL,
    555 		CTLFLAG_PERMANENT,
    556 		CTLTYPE_STRING, "name", NULL,
    557 		NULL, 0, __UNCONST("4.4BSD"), 0,
    558 		CTL_CREATE, CTL_EOL);
    559 	sysctl_createv(NULL, 0, &node, NULL,
    560 		CTLFLAG_PERMANENT,
    561 		CTLTYPE_INT, "rtts",
    562 		SYSCTL_DESCR("Round-robin time quantum (in milliseconds)"),
    563 		sysctl_sched_rtts, 0, NULL, 0,
    564 		CTL_CREATE, CTL_EOL);
    565 }
    566