Home | History | Annotate | Line # | Download | only in kern
      1 /*	$NetBSD: sched_4bsd.c,v 1.47 2025/01/17 04:11:33 mrg Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2019, 2020
      5  *     The NetBSD Foundation, Inc.
      6  * All rights reserved.
      7  *
      8  * This code is derived from software contributed to The NetBSD Foundation
      9  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
     10  * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and
     11  * Daniel Sieger.
     12  *
     13  * Redistribution and use in source and binary forms, with or without
     14  * modification, are permitted provided that the following conditions
     15  * are met:
     16  * 1. Redistributions of source code must retain the above copyright
     17  *    notice, this list of conditions and the following disclaimer.
     18  * 2. Redistributions in binary form must reproduce the above copyright
     19  *    notice, this list of conditions and the following disclaimer in the
     20  *    documentation and/or other materials provided with the distribution.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     24  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     25  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32  * POSSIBILITY OF SUCH DAMAGE.
     33  */
     34 
     35 /*
     36  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     37  *	The Regents of the University of California.  All rights reserved.
     38  * (c) UNIX System Laboratories, Inc.
     39  * All or some portions of this file are derived from material licensed
     40  * to the University of California by American Telephone and Telegraph
     41  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     42  * the permission of UNIX System Laboratories, Inc.
     43  *
     44  * Redistribution and use in source and binary forms, with or without
     45  * modification, are permitted provided that the following conditions
     46  * are met:
     47  * 1. Redistributions of source code must retain the above copyright
     48  *    notice, this list of conditions and the following disclaimer.
     49  * 2. Redistributions in binary form must reproduce the above copyright
     50  *    notice, this list of conditions and the following disclaimer in the
     51  *    documentation and/or other materials provided with the distribution.
     52  * 3. Neither the name of the University nor the names of its contributors
     53  *    may be used to endorse or promote products derived from this software
     54  *    without specific prior written permission.
     55  *
     56  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     57  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     58  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     59  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     60  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     61  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     62  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     63  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     64  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     65  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     66  * SUCH DAMAGE.
     67  *
     68  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
     69  */
     70 
     71 #include <sys/cdefs.h>
     72 __KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.47 2025/01/17 04:11:33 mrg Exp $");
     73 
     74 #include "opt_ddb.h"
     75 #include "opt_lockdebug.h"
     76 
     77 #include <sys/param.h>
     78 #include <sys/systm.h>
     79 #include <sys/callout.h>
     80 #include <sys/cpu.h>
     81 #include <sys/proc.h>
     82 #include <sys/kernel.h>
     83 #include <sys/resourcevar.h>
     84 #include <sys/sched.h>
     85 #include <sys/sysctl.h>
     86 #include <sys/lockdebug.h>
     87 #include <sys/intr.h>
     88 #include <sys/atomic.h>
     89 
     90 static void updatepri(struct lwp *);
     91 static void resetpriority(struct lwp *);
     92 
     93 /* Number of hardclock ticks per sched_tick() */
     94 u_int sched_rrticks __read_mostly;
     95 
     96 /*
     97  * Force switch among equal priority processes every 100ms.
     98  * Called from hardclock every hz/10 == sched_rrticks hardclock ticks.
     99  */
    100 /* ARGSUSED */
    101 void
    102 sched_tick(struct cpu_info *ci)
    103 {
    104 	struct schedstate_percpu *spc = &ci->ci_schedstate;
    105 	pri_t pri = PRI_NONE;
    106 	lwp_t *l;
    107 
    108 	spc->spc_ticks = sched_rrticks;
    109 
    110 	if (CURCPU_IDLE_P()) {
    111 		spc_lock(ci);
    112 		sched_resched_cpu(ci, MAXPRI_KTHREAD, true);
    113 		/* spc now unlocked */
    114 		return;
    115 	}
    116 	l = ci->ci_onproc;
    117 	if (l == NULL) {
    118 		return;
    119 	}
    120 	/*
    121 	 * Can only be spc_lwplock or a turnstile lock at this point
    122 	 * (if we interrupted priority inheritance trylock dance).
    123 	 */
    124 	KASSERT(l->l_mutex != spc->spc_mutex);
    125 	switch (l->l_class) {
    126 	case SCHED_FIFO:
    127 		/* No timeslicing for FIFO jobs. */
    128 		break;
    129 	case SCHED_RR:
    130 		/* Force it into mi_switch() to look for other jobs to run. */
    131 		pri = MAXPRI_KERNEL_RT;
    132 		break;
    133 	default:
    134 		if (spc->spc_flags & SPCF_SHOULDYIELD) {
    135 			/*
    136 			 * Process is stuck in kernel somewhere, probably
    137 			 * due to buggy or inefficient code.  Force a
    138 			 * kernel preemption.
    139 			 */
    140 			pri = MAXPRI_KERNEL_RT;
    141 		} else if (spc->spc_flags & SPCF_SEENRR) {
    142 			/*
    143 			 * The process has already been through a roundrobin
    144 			 * without switching and may be hogging the CPU.
    145 			 * Indicate that the process should yield.
    146 			 */
    147 			pri = MAXPRI_KTHREAD;
    148 			spc->spc_flags |= SPCF_SHOULDYIELD;
    149 		} else if (!cpu_is_1stclass(ci)) {
    150 			/*
    151 			 * For SMT or asymmetric systems push a little
    152 			 * harder: if this is not a 1st class CPU, try to
    153 			 * find a better one to run this LWP.
    154 			 */
    155 			pri = MAXPRI_KTHREAD;
    156 			spc->spc_flags |= SPCF_SHOULDYIELD;
    157 		} else {
    158 			spc->spc_flags |= SPCF_SEENRR;
    159 		}
    160 		break;
    161 	}
    162 
    163 	if (pri != PRI_NONE) {
    164 		spc_lock(ci);
    165 		sched_resched_cpu(ci, pri, true);
    166 		/* spc now unlocked */
    167 	}
    168 }
    169 
    170 /*
    171  * Why PRIO_MAX - 2? From setpriority(2):
    172  *
    173  *	prio is a value in the range -20 to 20.  The default priority is
    174  *	0; lower priorities cause more favorable scheduling.  A value of
    175  *	19 or 20 will schedule a process only when nothing at priority <=
    176  *	0 is runnable.
    177  *
    178  * This gives estcpu influence over 18 priority levels, and leaves nice
    179  * with 40 levels.  One way to think about it is that nice has 20 levels
    180  * either side of estcpu's 18.
    181  */
    182 #define	ESTCPU_SHIFT	11
    183 #define	ESTCPU_MAX	((PRIO_MAX - 2) << ESTCPU_SHIFT)
    184 #define	ESTCPU_ACCUM	(1 << (ESTCPU_SHIFT - 1))
    185 #define	ESTCPULIM(e)	uimin((e), ESTCPU_MAX)
    186 
    187 /*
    188  * The main parameter used by this algorithm is 'l_estcpu'. It is an estimate
    189  * of the recent CPU utilization of the thread.
    190  *
    191  * l_estcpu is:
    192  *  - increased each time the hardclock ticks and the thread is found to
    193  *    be executing, in sched_schedclock() called from hardclock()
    194  *  - decreased (filtered) on each sched tick, in sched_pstats_hook()
    195  * If the lwp is sleeping for more than a second, we don't touch l_estcpu: it
    196  * will be updated in sched_setrunnable() when the lwp wakes up, in burst mode
    197  * (ie, we decrease it n times).
    198  *
    199  * Note that hardclock updates l_estcpu and l_cpticks independently.
    200  *
    201  * -----------------------------------------------------------------------------
    202  *
    203  * Here we describe how l_estcpu is decreased.
    204  *
    205  * Constants for digital decay (filter):
    206  *     90% of l_estcpu usage in (5 * loadavg) seconds
    207  *
    208  * We wish to decay away 90% of l_estcpu in (5 * loadavg) seconds. That is, we
    209  * want to compute a value of decay such that the following loop:
    210  *     for (i = 0; i < (5 * loadavg); i++)
    211  *         l_estcpu *= decay;
    212  * will result in
    213  *     l_estcpu *= 0.1;
    214  * for all values of loadavg.
    215  *
    216  * Mathematically this loop can be expressed by saying:
    217  *     decay ** (5 * loadavg) ~= .1
    218  *
    219  * And finally, the corresponding value of decay we're using is:
    220  *     decay = (2 * loadavg) / (2 * loadavg + 1)
    221  *
    222  * -----------------------------------------------------------------------------
    223  *
    224  * Now, let's prove that the value of decay stated above will always fulfill
    225  * the equation:
    226  *     decay ** (5 * loadavg) ~= .1
    227  *
    228  * If we compute b as:
    229  *     b = 2 * loadavg
    230  * then
    231  *     decay = b / (b + 1)
    232  *
    233  * We now need to prove two things:
    234  *     1) Given [factor ** (5 * loadavg) =~ .1], prove [factor == b/(b+1)].
    235  *     2) Given [b/(b+1) ** power =~ .1], prove [power == (5 * loadavg)].
    236  *
    237  * Facts:
    238  *   * For x real: exp(x) = 0! + x**1/1! + x**2/2! + ...
    239  *     Therefore, for x close to zero, exp(x) =~ 1 + x.
    240  *     In turn, for b large enough, exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
    241  *
    242  *   * For b large enough, (b-1)/b =~ b/(b+1).
    243  *
    244  *   * For x belonging to [-1;1[, ln(1-x) = - x - x**2/2 - x**3/3 - ...
    245  *     Therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
    246  *
    247  *   * ln(0.1) =~ -2.30
    248  *
    249  * Proof of (1):
    250  *     factor ** (5 * loadavg) =~ 0.1
    251  *  => ln(factor) =~ -2.30 / (5 * loadavg)
    252  *  => factor =~ exp(-1 / ((5 / 2.30) * loadavg))
    253  *            =~ exp(-1 / (2 * loadavg))
    254  *            =~ exp(-1 / b)
    255  *            =~ (b - 1) / b
    256  *            =~ b / (b + 1)
    257  *            =~ (2 * loadavg) / ((2 * loadavg) + 1)
    258  *
    259  * Proof of (2):
    260  *     (b / (b + 1)) ** power =~ .1
    261  *  => power * ln(b / (b + 1)) =~ -2.30
    262  *  => power * (-1 / (b + 1)) =~ -2.30
    263  *  => power =~ 2.30 * (b + 1)
    264  *  => power =~ 4.60 * loadavg + 2.30
    265  *  => power =~ 5 * loadavg
    266  *
    267  * Conclusion: decay = (2 * loadavg) / (2 * loadavg + 1)
    268  */
    269 
    270 /* See calculations above */
    271 #define	loadfactor(loadavg)  (2 * (loadavg))
    272 
    273 static fixpt_t
    274 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
    275 {
    276 
    277 	if (estcpu == 0) {
    278 		return 0;
    279 	}
    280 
    281 #if !defined(_LP64)
    282 	/* avoid 64bit arithmetics. */
    283 #define	FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
    284 	if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
    285 		return estcpu * loadfac / (loadfac + FSCALE);
    286 	}
    287 #endif
    288 
    289 	return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
    290 }
    291 
    292 static fixpt_t
    293 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
    294 {
    295 
    296 	/*
    297 	 * For all load averages >= 1 and max l_estcpu of (255 << ESTCPU_SHIFT),
    298 	 * if we slept for at least seven times the loadfactor, we will decay
    299 	 * l_estcpu to less than (1 << ESTCPU_SHIFT), and therefore we can
    300 	 * return zero directly.
    301 	 *
    302 	 * Note that our ESTCPU_MAX is actually much smaller than
    303 	 * (255 << ESTCPU_SHIFT).
    304 	 */
    305 	if ((n << FSHIFT) >= 7 * loadfac) {
    306 		return 0;
    307 	}
    308 
    309 	while (estcpu != 0 && n > 1) {
    310 		estcpu = decay_cpu(loadfac, estcpu);
    311 		n--;
    312 	}
    313 
    314 	return estcpu;
    315 }
    316 
    317 /*
    318  * sched_pstats_hook:
    319  *
    320  * Periodically called from sched_pstats(); used to recalculate priorities.
    321  */
    322 void
    323 sched_pstats_hook(struct lwp *l, int batch)
    324 {
    325 	fixpt_t loadfac;
    326 
    327 	/*
    328 	 * If the LWP has slept an entire second, stop recalculating
    329 	 * its priority until it wakes up.
    330 	 */
    331 	KASSERT(lwp_locked(l, NULL));
    332 	if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
    333 	    l->l_stat == LSSUSPENDED) {
    334 		if (l->l_slptime > 1) {
    335 			return;
    336 		}
    337 	}
    338 
    339 	loadfac = loadfactor(averunnable.ldavg[0]);
    340 	l->l_estcpu = decay_cpu(loadfac, l->l_estcpu);
    341 	resetpriority(l);
    342 }
    343 
    344 /*
    345  * Recalculate the priority of an LWP after it has slept for a while.
    346  */
    347 static void
    348 updatepri(struct lwp *l)
    349 {
    350 	fixpt_t loadfac;
    351 
    352 	KASSERT(lwp_locked(l, NULL));
    353 	KASSERT(l->l_slptime > 1);
    354 
    355 	loadfac = loadfactor(averunnable.ldavg[0]);
    356 
    357 	l->l_slptime--; /* the first time was done in sched_pstats */
    358 	l->l_estcpu = decay_cpu_batch(loadfac, l->l_estcpu, l->l_slptime);
    359 	resetpriority(l);
    360 }
    361 
    362 void
    363 sched_rqinit(void)
    364 {
    365 
    366 }
    367 
    368 void
    369 sched_setrunnable(struct lwp *l)
    370 {
    371 
    372  	if (l->l_slptime > 1)
    373  		updatepri(l);
    374 }
    375 
    376 void
    377 sched_nice(struct proc *p, int n)
    378 {
    379 	struct lwp *l;
    380 
    381 	KASSERT(mutex_owned(p->p_lock));
    382 
    383 	p->p_nice = n;
    384 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    385 		lwp_lock(l);
    386 		resetpriority(l);
    387 		lwp_unlock(l);
    388 	}
    389 }
    390 
    391 /*
    392  * Recompute the priority of an LWP.  Arrange to reschedule if
    393  * the resulting priority is better than that of the current LWP.
    394  */
    395 static void
    396 resetpriority(struct lwp *l)
    397 {
    398 	pri_t pri;
    399 	struct proc *p = l->l_proc;
    400 
    401 	KASSERT(lwp_locked(l, NULL));
    402 
    403 	if (l->l_class != SCHED_OTHER)
    404 		return;
    405 
    406 	/* See comments above ESTCPU_SHIFT definition. */
    407 	pri = (PRI_KERNEL - 1) - (l->l_estcpu >> ESTCPU_SHIFT) - p->p_nice;
    408 	pri = imax(pri, 0);
    409 	if (pri != l->l_priority)
    410 		lwp_changepri(l, pri);
    411 }
    412 
    413 /*
    414  * We adjust the priority of the current LWP.  The priority of a LWP
    415  * gets worse as it accumulates CPU time.  The CPU usage estimator (l_estcpu)
    416  * is increased here.  The formula for computing priorities will compute a
    417  * different value each time l_estcpu increases. This can cause a switch,
    418  * but unless the priority crosses a PPQ boundary the actual queue will not
    419  * change.  The CPU usage estimator ramps up quite quickly when the process
    420  * is running (linearly), and decays away exponentially, at a rate which is
    421  * proportionally slower when the system is busy.  The basic principle is
    422  * that the system will 90% forget that the process used a lot of CPU time
    423  * in (5 * loadavg) seconds.  This causes the system to favor processes which
    424  * haven't run much recently, and to round-robin among other processes.
    425  */
    426 void
    427 sched_schedclock(struct lwp *l)
    428 {
    429 
    430 	if (l->l_class != SCHED_OTHER)
    431 		return;
    432 
    433 	KASSERT(!CURCPU_IDLE_P());
    434 	l->l_estcpu = ESTCPULIM(l->l_estcpu + ESTCPU_ACCUM);
    435 	lwp_lock(l);
    436 	resetpriority(l);
    437 	lwp_unlock(l);
    438 }
    439 
    440 /*
    441  * sched_proc_fork:
    442  *
    443  *	Inherit the parent's scheduler history.
    444  */
    445 void
    446 sched_proc_fork(struct proc *parent, struct proc *child)
    447 {
    448 	lwp_t *pl;
    449 
    450 	KASSERT(mutex_owned(parent->p_lock));
    451 
    452 	pl = LIST_FIRST(&parent->p_lwps);
    453 	child->p_estcpu_inherited = pl->l_estcpu;
    454 	child->p_forktime = sched_pstats_ticks;
    455 }
    456 
    457 /*
    458  * sched_proc_exit:
    459  *
    460  *	Chargeback parents for the sins of their children.
    461  */
    462 void
    463 sched_proc_exit(struct proc *parent, struct proc *child)
    464 {
    465 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
    466 	fixpt_t estcpu;
    467 	lwp_t *pl, *cl;
    468 
    469 	/* XXX Only if parent != init?? */
    470 
    471 	mutex_enter(parent->p_lock);
    472 	pl = LIST_FIRST(&parent->p_lwps);
    473 	cl = LIST_FIRST(&child->p_lwps);
    474 	estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
    475 	    sched_pstats_ticks - child->p_forktime);
    476 	if (cl->l_estcpu > estcpu) {
    477 		lwp_lock(pl);
    478 		pl->l_estcpu = ESTCPULIM(pl->l_estcpu + cl->l_estcpu - estcpu);
    479 		lwp_unlock(pl);
    480 	}
    481 	mutex_exit(parent->p_lock);
    482 }
    483 
    484 void
    485 sched_wakeup(struct lwp *l)
    486 {
    487 
    488 }
    489 
    490 void
    491 sched_slept(struct lwp *l)
    492 {
    493 
    494 }
    495 
    496 void
    497 sched_lwp_fork(struct lwp *l1, struct lwp *l2)
    498 {
    499 
    500 	l2->l_estcpu = l1->l_estcpu;
    501 }
    502 
    503 void
    504 sched_lwp_collect(struct lwp *t)
    505 {
    506 	lwp_t *l;
    507 
    508 	/* Absorb estcpu value of collected LWP. */
    509 	l = curlwp;
    510 	lwp_lock(l);
    511 	l->l_estcpu += t->l_estcpu;
    512 	lwp_unlock(l);
    513 }
    514 
    515 void
    516 sched_oncpu(lwp_t *l)
    517 {
    518 
    519 }
    520 
    521 void
    522 sched_newts(lwp_t *l)
    523 {
    524 
    525 }
    526 
    527 /*
    528  * Sysctl nodes and initialization.
    529  */
    530 
    531 static int
    532 sysctl_sched_rtts(SYSCTLFN_ARGS)
    533 {
    534 	struct sysctlnode node;
    535 	int rttsms = hztoms(sched_rrticks);
    536 
    537 	node = *rnode;
    538 	node.sysctl_data = &rttsms;
    539 	return sysctl_lookup(SYSCTLFN_CALL(&node));
    540 }
    541 
    542 SYSCTL_SETUP(sysctl_sched_4bsd_setup, "sysctl sched setup")
    543 {
    544 	const struct sysctlnode *node = NULL;
    545 
    546 	sysctl_createv(clog, 0, NULL, &node,
    547 		CTLFLAG_PERMANENT,
    548 		CTLTYPE_NODE, "sched",
    549 		SYSCTL_DESCR("Scheduler options"),
    550 		NULL, 0, NULL, 0,
    551 		CTL_KERN, CTL_CREATE, CTL_EOL);
    552 
    553 	if (node == NULL)
    554 		return;
    555 
    556 	sched_rrticks = hz / 10;
    557 
    558 	sysctl_createv(NULL, 0, &node, NULL,
    559 		CTLFLAG_PERMANENT,
    560 		CTLTYPE_STRING, "name", NULL,
    561 		NULL, 0, __UNCONST("4.4BSD"), 0,
    562 		CTL_CREATE, CTL_EOL);
    563 	sysctl_createv(NULL, 0, &node, NULL,
    564 		CTLFLAG_PERMANENT,
    565 		CTLTYPE_INT, "rtts",
    566 		SYSCTL_DESCR("Round-robin time quantum (in milliseconds)"),
    567 		sysctl_sched_rtts, 0, NULL, 0,
    568 		CTL_CREATE, CTL_EOL);
    569 }
    570