Home | History | Annotate | Line # | Download | only in kern
sched_4bsd.c revision 1.7
      1  1.7  rmind /*	$NetBSD: sched_4bsd.c,v 1.7 2007/10/10 21:24:53 rmind Exp $	*/
      2  1.2   yamt 
      3  1.2   yamt /*-
      4  1.2   yamt  * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
      5  1.2   yamt  * All rights reserved.
      6  1.2   yamt  *
      7  1.2   yamt  * This code is derived from software contributed to The NetBSD Foundation
      8  1.2   yamt  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  1.2   yamt  * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and
     10  1.2   yamt  * Daniel Sieger.
     11  1.2   yamt  *
     12  1.2   yamt  * Redistribution and use in source and binary forms, with or without
     13  1.2   yamt  * modification, are permitted provided that the following conditions
     14  1.2   yamt  * are met:
     15  1.2   yamt  * 1. Redistributions of source code must retain the above copyright
     16  1.2   yamt  *    notice, this list of conditions and the following disclaimer.
     17  1.2   yamt  * 2. Redistributions in binary form must reproduce the above copyright
     18  1.2   yamt  *    notice, this list of conditions and the following disclaimer in the
     19  1.2   yamt  *    documentation and/or other materials provided with the distribution.
     20  1.2   yamt  * 3. All advertising materials mentioning features or use of this software
     21  1.2   yamt  *    must display the following acknowledgement:
     22  1.2   yamt  *	This product includes software developed by the NetBSD
     23  1.2   yamt  *	Foundation, Inc. and its contributors.
     24  1.2   yamt  * 4. Neither the name of The NetBSD Foundation nor the names of its
     25  1.2   yamt  *    contributors may be used to endorse or promote products derived
     26  1.2   yamt  *    from this software without specific prior written permission.
     27  1.2   yamt  *
     28  1.2   yamt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     29  1.2   yamt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     30  1.2   yamt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     31  1.2   yamt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     32  1.2   yamt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     33  1.2   yamt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     34  1.2   yamt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     35  1.2   yamt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     36  1.2   yamt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     37  1.2   yamt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     38  1.2   yamt  * POSSIBILITY OF SUCH DAMAGE.
     39  1.2   yamt  */
     40  1.2   yamt 
     41  1.2   yamt /*-
     42  1.2   yamt  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     43  1.2   yamt  *	The Regents of the University of California.  All rights reserved.
     44  1.2   yamt  * (c) UNIX System Laboratories, Inc.
     45  1.2   yamt  * All or some portions of this file are derived from material licensed
     46  1.2   yamt  * to the University of California by American Telephone and Telegraph
     47  1.2   yamt  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     48  1.2   yamt  * the permission of UNIX System Laboratories, Inc.
     49  1.2   yamt  *
     50  1.2   yamt  * Redistribution and use in source and binary forms, with or without
     51  1.2   yamt  * modification, are permitted provided that the following conditions
     52  1.2   yamt  * are met:
     53  1.2   yamt  * 1. Redistributions of source code must retain the above copyright
     54  1.2   yamt  *    notice, this list of conditions and the following disclaimer.
     55  1.2   yamt  * 2. Redistributions in binary form must reproduce the above copyright
     56  1.2   yamt  *    notice, this list of conditions and the following disclaimer in the
     57  1.2   yamt  *    documentation and/or other materials provided with the distribution.
     58  1.2   yamt  * 3. Neither the name of the University nor the names of its contributors
     59  1.2   yamt  *    may be used to endorse or promote products derived from this software
     60  1.2   yamt  *    without specific prior written permission.
     61  1.2   yamt  *
     62  1.2   yamt  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     63  1.2   yamt  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     64  1.2   yamt  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     65  1.2   yamt  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     66  1.2   yamt  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     67  1.2   yamt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     68  1.2   yamt  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     69  1.2   yamt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     70  1.2   yamt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     71  1.2   yamt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     72  1.2   yamt  * SUCH DAMAGE.
     73  1.2   yamt  *
     74  1.2   yamt  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
     75  1.2   yamt  */
     76  1.2   yamt 
     77  1.2   yamt #include <sys/cdefs.h>
     78  1.7  rmind __KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.7 2007/10/10 21:24:53 rmind Exp $");
     79  1.2   yamt 
     80  1.2   yamt #include "opt_ddb.h"
     81  1.2   yamt #include "opt_lockdebug.h"
     82  1.2   yamt #include "opt_perfctrs.h"
     83  1.2   yamt 
     84  1.2   yamt #define	__MUTEX_PRIVATE
     85  1.2   yamt 
     86  1.2   yamt #include <sys/param.h>
     87  1.2   yamt #include <sys/systm.h>
     88  1.2   yamt #include <sys/callout.h>
     89  1.2   yamt #include <sys/cpu.h>
     90  1.2   yamt #include <sys/proc.h>
     91  1.2   yamt #include <sys/kernel.h>
     92  1.2   yamt #include <sys/signalvar.h>
     93  1.2   yamt #include <sys/resourcevar.h>
     94  1.2   yamt #include <sys/sched.h>
     95  1.2   yamt #include <sys/sysctl.h>
     96  1.2   yamt #include <sys/kauth.h>
     97  1.2   yamt #include <sys/lockdebug.h>
     98  1.2   yamt #include <sys/kmem.h>
     99  1.5     ad #include <sys/intr.h>
    100  1.2   yamt 
    101  1.2   yamt #include <uvm/uvm_extern.h>
    102  1.2   yamt 
    103  1.2   yamt /*
    104  1.2   yamt  * Run queues.
    105  1.2   yamt  *
    106  1.2   yamt  * We have 32 run queues in descending priority of 0..31.  We maintain
    107  1.2   yamt  * a bitmask of non-empty queues in order speed up finding the first
    108  1.2   yamt  * runnable process.  The bitmask is maintained only by machine-dependent
    109  1.2   yamt  * code, allowing the most efficient instructions to be used to find the
    110  1.2   yamt  * first non-empty queue.
    111  1.2   yamt  */
    112  1.2   yamt 
    113  1.2   yamt #define	RUNQUE_NQS		32      /* number of runqueues */
    114  1.2   yamt #define	PPQ	(128 / RUNQUE_NQS)	/* priorities per queue */
    115  1.2   yamt 
    116  1.2   yamt typedef struct subqueue {
    117  1.2   yamt 	TAILQ_HEAD(, lwp) sq_queue;
    118  1.2   yamt } subqueue_t;
    119  1.2   yamt typedef struct runqueue {
    120  1.2   yamt 	subqueue_t rq_subqueues[RUNQUE_NQS];	/* run queues */
    121  1.2   yamt 	uint32_t rq_bitmap;	/* bitmap of non-empty queues */
    122  1.2   yamt } runqueue_t;
    123  1.2   yamt static runqueue_t global_queue;
    124  1.2   yamt 
    125  1.2   yamt static void updatepri(struct lwp *);
    126  1.2   yamt static void resetpriority(struct lwp *);
    127  1.2   yamt static void resetprocpriority(struct proc *);
    128  1.2   yamt 
    129  1.6  rmind fixpt_t decay_cpu(fixpt_t, fixpt_t);
    130  1.6  rmind 
    131  1.2   yamt extern unsigned int sched_pstats_ticks; /* defined in kern_synch.c */
    132  1.2   yamt 
    133  1.2   yamt /* The global scheduler state */
    134  1.2   yamt kmutex_t sched_mutex;
    135  1.2   yamt 
    136  1.2   yamt /* Number of hardclock ticks per sched_tick() */
    137  1.2   yamt int rrticks;
    138  1.2   yamt 
    139  1.2   yamt /*
    140  1.2   yamt  * Force switch among equal priority processes every 100ms.
    141  1.2   yamt  * Called from hardclock every hz/10 == rrticks hardclock ticks.
    142  1.5     ad  *
    143  1.5     ad  * There's no need to lock anywhere in this routine, as it's
    144  1.5     ad  * CPU-local and runs at IPL_SCHED (called from clock interrupt).
    145  1.2   yamt  */
    146  1.2   yamt /* ARGSUSED */
    147  1.2   yamt void
    148  1.2   yamt sched_tick(struct cpu_info *ci)
    149  1.2   yamt {
    150  1.2   yamt 	struct schedstate_percpu *spc = &ci->ci_schedstate;
    151  1.2   yamt 
    152  1.2   yamt 	spc->spc_ticks = rrticks;
    153  1.2   yamt 
    154  1.7  rmind 	if (CURCPU_IDLE_P())
    155  1.7  rmind 		return;
    156  1.7  rmind 
    157  1.7  rmind 	if (spc->spc_flags & SPCF_SEENRR) {
    158  1.7  rmind 		/*
    159  1.7  rmind 		 * The process has already been through a roundrobin
    160  1.7  rmind 		 * without switching and may be hogging the CPU.
    161  1.7  rmind 		 * Indicate that the process should yield.
    162  1.7  rmind 		 */
    163  1.7  rmind 		spc->spc_flags |= SPCF_SHOULDYIELD;
    164  1.7  rmind 	} else
    165  1.7  rmind 		spc->spc_flags |= SPCF_SEENRR;
    166  1.7  rmind 
    167  1.7  rmind 	cpu_need_resched(ci, 0);
    168  1.2   yamt }
    169  1.2   yamt 
    170  1.2   yamt #define	NICE_WEIGHT 2			/* priorities per nice level */
    171  1.2   yamt 
    172  1.2   yamt #define	ESTCPU_SHIFT	11
    173  1.2   yamt #define	ESTCPU_MAX	((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
    174  1.2   yamt #define	ESTCPULIM(e)	min((e), ESTCPU_MAX)
    175  1.2   yamt 
    176  1.2   yamt /*
    177  1.2   yamt  * Constants for digital decay and forget:
    178  1.2   yamt  *	90% of (p_estcpu) usage in 5 * loadav time
    179  1.2   yamt  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
    180  1.2   yamt  *          Note that, as ps(1) mentions, this can let percentages
    181  1.2   yamt  *          total over 100% (I've seen 137.9% for 3 processes).
    182  1.2   yamt  *
    183  1.2   yamt  * Note that hardclock updates p_estcpu and p_cpticks independently.
    184  1.2   yamt  *
    185  1.2   yamt  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
    186  1.2   yamt  * That is, the system wants to compute a value of decay such
    187  1.2   yamt  * that the following for loop:
    188  1.2   yamt  * 	for (i = 0; i < (5 * loadavg); i++)
    189  1.2   yamt  * 		p_estcpu *= decay;
    190  1.2   yamt  * will compute
    191  1.2   yamt  * 	p_estcpu *= 0.1;
    192  1.2   yamt  * for all values of loadavg:
    193  1.2   yamt  *
    194  1.2   yamt  * Mathematically this loop can be expressed by saying:
    195  1.2   yamt  * 	decay ** (5 * loadavg) ~= .1
    196  1.2   yamt  *
    197  1.2   yamt  * The system computes decay as:
    198  1.2   yamt  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
    199  1.2   yamt  *
    200  1.2   yamt  * We wish to prove that the system's computation of decay
    201  1.2   yamt  * will always fulfill the equation:
    202  1.2   yamt  * 	decay ** (5 * loadavg) ~= .1
    203  1.2   yamt  *
    204  1.2   yamt  * If we compute b as:
    205  1.2   yamt  * 	b = 2 * loadavg
    206  1.2   yamt  * then
    207  1.2   yamt  * 	decay = b / (b + 1)
    208  1.2   yamt  *
    209  1.2   yamt  * We now need to prove two things:
    210  1.2   yamt  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
    211  1.2   yamt  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
    212  1.2   yamt  *
    213  1.2   yamt  * Facts:
    214  1.2   yamt  *         For x close to zero, exp(x) =~ 1 + x, since
    215  1.2   yamt  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
    216  1.2   yamt  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
    217  1.2   yamt  *         For x close to zero, ln(1+x) =~ x, since
    218  1.2   yamt  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
    219  1.2   yamt  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
    220  1.2   yamt  *         ln(.1) =~ -2.30
    221  1.2   yamt  *
    222  1.2   yamt  * Proof of (1):
    223  1.2   yamt  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
    224  1.2   yamt  *	solving for factor,
    225  1.2   yamt  *      ln(factor) =~ (-2.30/5*loadav), or
    226  1.2   yamt  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
    227  1.2   yamt  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
    228  1.2   yamt  *
    229  1.2   yamt  * Proof of (2):
    230  1.2   yamt  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
    231  1.2   yamt  *	solving for power,
    232  1.2   yamt  *      power*ln(b/(b+1)) =~ -2.30, or
    233  1.2   yamt  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
    234  1.2   yamt  *
    235  1.2   yamt  * Actual power values for the implemented algorithm are as follows:
    236  1.2   yamt  *      loadav: 1       2       3       4
    237  1.2   yamt  *      power:  5.68    10.32   14.94   19.55
    238  1.2   yamt  */
    239  1.2   yamt 
    240  1.2   yamt /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
    241  1.2   yamt #define	loadfactor(loadav)	(2 * (loadav))
    242  1.2   yamt 
    243  1.6  rmind fixpt_t
    244  1.2   yamt decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
    245  1.2   yamt {
    246  1.2   yamt 
    247  1.2   yamt 	if (estcpu == 0) {
    248  1.2   yamt 		return 0;
    249  1.2   yamt 	}
    250  1.2   yamt 
    251  1.2   yamt #if !defined(_LP64)
    252  1.2   yamt 	/* avoid 64bit arithmetics. */
    253  1.2   yamt #define	FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
    254  1.2   yamt 	if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
    255  1.2   yamt 		return estcpu * loadfac / (loadfac + FSCALE);
    256  1.2   yamt 	}
    257  1.2   yamt #endif /* !defined(_LP64) */
    258  1.2   yamt 
    259  1.2   yamt 	return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
    260  1.2   yamt }
    261  1.2   yamt 
    262  1.2   yamt /*
    263  1.2   yamt  * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
    264  1.2   yamt  * sleeping for at least seven times the loadfactor will decay p_estcpu to
    265  1.2   yamt  * less than (1 << ESTCPU_SHIFT).
    266  1.2   yamt  *
    267  1.2   yamt  * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
    268  1.2   yamt  */
    269  1.2   yamt static fixpt_t
    270  1.2   yamt decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
    271  1.2   yamt {
    272  1.2   yamt 
    273  1.2   yamt 	if ((n << FSHIFT) >= 7 * loadfac) {
    274  1.2   yamt 		return 0;
    275  1.2   yamt 	}
    276  1.2   yamt 
    277  1.2   yamt 	while (estcpu != 0 && n > 1) {
    278  1.2   yamt 		estcpu = decay_cpu(loadfac, estcpu);
    279  1.2   yamt 		n--;
    280  1.2   yamt 	}
    281  1.2   yamt 
    282  1.2   yamt 	return estcpu;
    283  1.2   yamt }
    284  1.2   yamt 
    285  1.2   yamt /*
    286  1.2   yamt  * sched_pstats_hook:
    287  1.2   yamt  *
    288  1.2   yamt  * Periodically called from sched_pstats(); used to recalculate priorities.
    289  1.2   yamt  */
    290  1.2   yamt void
    291  1.6  rmind sched_pstats_hook(struct lwp *l)
    292  1.2   yamt {
    293  1.2   yamt 
    294  1.6  rmind 	if (l->l_slptime <= 1 && l->l_priority >= PUSER)
    295  1.6  rmind 		resetpriority(l);
    296  1.2   yamt }
    297  1.2   yamt 
    298  1.2   yamt /*
    299  1.2   yamt  * Recalculate the priority of a process after it has slept for a while.
    300  1.2   yamt  */
    301  1.2   yamt static void
    302  1.2   yamt updatepri(struct lwp *l)
    303  1.2   yamt {
    304  1.2   yamt 	struct proc *p = l->l_proc;
    305  1.2   yamt 	fixpt_t loadfac;
    306  1.2   yamt 
    307  1.3     ad 	KASSERT(lwp_locked(l, NULL));
    308  1.2   yamt 	KASSERT(l->l_slptime > 1);
    309  1.2   yamt 
    310  1.2   yamt 	loadfac = loadfactor(averunnable.ldavg[0]);
    311  1.2   yamt 
    312  1.2   yamt 	l->l_slptime--; /* the first time was done in sched_pstats */
    313  1.2   yamt 	/* XXX NJWLWP */
    314  1.2   yamt 	/* XXXSMP occasionally unlocked, should be per-LWP */
    315  1.2   yamt 	p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
    316  1.2   yamt 	resetpriority(l);
    317  1.2   yamt }
    318  1.2   yamt 
    319  1.2   yamt /*
    320  1.2   yamt  * On some architectures, it's faster to use a MSB ordering for the priorites
    321  1.2   yamt  * than the traditional LSB ordering.
    322  1.2   yamt  */
    323  1.2   yamt #define	RQMASK(n) (0x00000001 << (n))
    324  1.2   yamt 
    325  1.2   yamt /*
    326  1.2   yamt  * The primitives that manipulate the run queues.  whichqs tells which
    327  1.2   yamt  * of the 32 queues qs have processes in them.  sched_enqueue() puts processes
    328  1.2   yamt  * into queues, sched_dequeue removes them from queues.  The running process is
    329  1.2   yamt  * on no queue, other processes are on a queue related to p->p_priority,
    330  1.2   yamt  * divided by 4 actually to shrink the 0-127 range of priorities into the 32
    331  1.2   yamt  * available queues.
    332  1.2   yamt  */
    333  1.2   yamt #ifdef RQDEBUG
    334  1.2   yamt static void
    335  1.2   yamt runqueue_check(const runqueue_t *rq, int whichq, struct lwp *l)
    336  1.2   yamt {
    337  1.2   yamt 	const subqueue_t * const sq = &rq->rq_subqueues[whichq];
    338  1.2   yamt 	const uint32_t bitmap = rq->rq_bitmap;
    339  1.2   yamt 	struct lwp *l2;
    340  1.2   yamt 	int found = 0;
    341  1.2   yamt 	int die = 0;
    342  1.2   yamt 	int empty = 1;
    343  1.2   yamt 
    344  1.2   yamt 	TAILQ_FOREACH(l2, &sq->sq_queue, l_runq) {
    345  1.2   yamt 		if (l2->l_stat != LSRUN) {
    346  1.2   yamt 			printf("runqueue_check[%d]: lwp %p state (%d) "
    347  1.2   yamt 			    " != LSRUN\n", whichq, l2, l2->l_stat);
    348  1.2   yamt 		}
    349  1.2   yamt 		if (l2 == l)
    350  1.2   yamt 			found = 1;
    351  1.2   yamt 		empty = 0;
    352  1.2   yamt 	}
    353  1.2   yamt 	if (empty && (bitmap & RQMASK(whichq)) != 0) {
    354  1.2   yamt 		printf("runqueue_check[%d]: bit set for empty run-queue %p\n",
    355  1.2   yamt 		    whichq, rq);
    356  1.2   yamt 		die = 1;
    357  1.2   yamt 	} else if (!empty && (bitmap & RQMASK(whichq)) == 0) {
    358  1.2   yamt 		printf("runqueue_check[%d]: bit clear for non-empty "
    359  1.2   yamt 		    "run-queue %p\n", whichq, rq);
    360  1.2   yamt 		die = 1;
    361  1.2   yamt 	}
    362  1.2   yamt 	if (l != NULL && (bitmap & RQMASK(whichq)) == 0) {
    363  1.2   yamt 		printf("runqueue_check[%d]: bit clear for active lwp %p\n",
    364  1.2   yamt 		    whichq, l);
    365  1.2   yamt 		die = 1;
    366  1.2   yamt 	}
    367  1.2   yamt 	if (l != NULL && empty) {
    368  1.2   yamt 		printf("runqueue_check[%d]: empty run-queue %p with "
    369  1.2   yamt 		    "active lwp %p\n", whichq, rq, l);
    370  1.2   yamt 		die = 1;
    371  1.2   yamt 	}
    372  1.2   yamt 	if (l != NULL && !found) {
    373  1.2   yamt 		printf("runqueue_check[%d]: lwp %p not in runqueue %p!",
    374  1.2   yamt 		    whichq, l, rq);
    375  1.2   yamt 		die = 1;
    376  1.2   yamt 	}
    377  1.2   yamt 	if (die)
    378  1.2   yamt 		panic("runqueue_check: inconsistency found");
    379  1.2   yamt }
    380  1.2   yamt #else /* RQDEBUG */
    381  1.2   yamt #define	runqueue_check(a, b, c)	/* nothing */
    382  1.2   yamt #endif /* RQDEBUG */
    383  1.2   yamt 
    384  1.2   yamt static void
    385  1.2   yamt runqueue_init(runqueue_t *rq)
    386  1.2   yamt {
    387  1.2   yamt 	int i;
    388  1.2   yamt 
    389  1.2   yamt 	for (i = 0; i < RUNQUE_NQS; i++)
    390  1.2   yamt 		TAILQ_INIT(&rq->rq_subqueues[i].sq_queue);
    391  1.2   yamt }
    392  1.2   yamt 
    393  1.2   yamt static void
    394  1.2   yamt runqueue_enqueue(runqueue_t *rq, struct lwp *l)
    395  1.2   yamt {
    396  1.2   yamt 	subqueue_t *sq;
    397  1.2   yamt 	const int whichq = lwp_eprio(l) / PPQ;
    398  1.2   yamt 
    399  1.2   yamt 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    400  1.2   yamt 
    401  1.2   yamt 	runqueue_check(rq, whichq, NULL);
    402  1.2   yamt 	rq->rq_bitmap |= RQMASK(whichq);
    403  1.2   yamt 	sq = &rq->rq_subqueues[whichq];
    404  1.2   yamt 	TAILQ_INSERT_TAIL(&sq->sq_queue, l, l_runq);
    405  1.2   yamt 	runqueue_check(rq, whichq, l);
    406  1.2   yamt }
    407  1.2   yamt 
    408  1.2   yamt static void
    409  1.2   yamt runqueue_dequeue(runqueue_t *rq, struct lwp *l)
    410  1.2   yamt {
    411  1.2   yamt 	subqueue_t *sq;
    412  1.2   yamt 	const int whichq = lwp_eprio(l) / PPQ;
    413  1.2   yamt 
    414  1.2   yamt 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    415  1.2   yamt 
    416  1.2   yamt 	runqueue_check(rq, whichq, l);
    417  1.2   yamt 	KASSERT((rq->rq_bitmap & RQMASK(whichq)) != 0);
    418  1.2   yamt 	sq = &rq->rq_subqueues[whichq];
    419  1.2   yamt 	TAILQ_REMOVE(&sq->sq_queue, l, l_runq);
    420  1.2   yamt 	if (TAILQ_EMPTY(&sq->sq_queue))
    421  1.2   yamt 		rq->rq_bitmap &= ~RQMASK(whichq);
    422  1.2   yamt 	runqueue_check(rq, whichq, NULL);
    423  1.2   yamt }
    424  1.2   yamt 
    425  1.2   yamt static struct lwp *
    426  1.2   yamt runqueue_nextlwp(runqueue_t *rq)
    427  1.2   yamt {
    428  1.2   yamt 	const uint32_t bitmap = rq->rq_bitmap;
    429  1.2   yamt 	int whichq;
    430  1.2   yamt 
    431  1.2   yamt 	if (bitmap == 0) {
    432  1.2   yamt 		return NULL;
    433  1.2   yamt 	}
    434  1.2   yamt 	whichq = ffs(bitmap) - 1;
    435  1.2   yamt 	return TAILQ_FIRST(&rq->rq_subqueues[whichq].sq_queue);
    436  1.2   yamt }
    437  1.2   yamt 
    438  1.2   yamt #if defined(DDB)
    439  1.2   yamt static void
    440  1.2   yamt runqueue_print(const runqueue_t *rq, void (*pr)(const char *, ...))
    441  1.2   yamt {
    442  1.2   yamt 	const uint32_t bitmap = rq->rq_bitmap;
    443  1.2   yamt 	struct lwp *l;
    444  1.2   yamt 	int i, first;
    445  1.2   yamt 
    446  1.2   yamt 	for (i = 0; i < RUNQUE_NQS; i++) {
    447  1.2   yamt 		const subqueue_t *sq;
    448  1.2   yamt 		first = 1;
    449  1.2   yamt 		sq = &rq->rq_subqueues[i];
    450  1.2   yamt 		TAILQ_FOREACH(l, &sq->sq_queue, l_runq) {
    451  1.2   yamt 			if (first) {
    452  1.2   yamt 				(*pr)("%c%d",
    453  1.2   yamt 				    (bitmap & RQMASK(i)) ? ' ' : '!', i);
    454  1.2   yamt 				first = 0;
    455  1.2   yamt 			}
    456  1.2   yamt 			(*pr)("\t%d.%d (%s) pri=%d usrpri=%d\n",
    457  1.2   yamt 			    l->l_proc->p_pid,
    458  1.2   yamt 			    l->l_lid, l->l_proc->p_comm,
    459  1.2   yamt 			    (int)l->l_priority, (int)l->l_usrpri);
    460  1.2   yamt 		}
    461  1.2   yamt 	}
    462  1.2   yamt }
    463  1.2   yamt #endif /* defined(DDB) */
    464  1.2   yamt #undef RQMASK
    465  1.2   yamt 
    466  1.2   yamt /*
    467  1.2   yamt  * Initialize the (doubly-linked) run queues
    468  1.2   yamt  * to be empty.
    469  1.2   yamt  */
    470  1.2   yamt void
    471  1.2   yamt sched_rqinit()
    472  1.2   yamt {
    473  1.2   yamt 
    474  1.2   yamt 	runqueue_init(&global_queue);
    475  1.2   yamt 	mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
    476  1.2   yamt 	/* Initialize the lock pointer for lwp0 */
    477  1.2   yamt 	lwp0.l_mutex = &curcpu()->ci_schedstate.spc_lwplock;
    478  1.2   yamt }
    479  1.2   yamt 
    480  1.2   yamt void
    481  1.2   yamt sched_cpuattach(struct cpu_info *ci)
    482  1.2   yamt {
    483  1.2   yamt 	runqueue_t *rq;
    484  1.2   yamt 
    485  1.2   yamt 	ci->ci_schedstate.spc_mutex = &sched_mutex;
    486  1.2   yamt 	rq = kmem_zalloc(sizeof(*rq), KM_NOSLEEP);
    487  1.2   yamt 	runqueue_init(rq);
    488  1.2   yamt 	ci->ci_schedstate.spc_sched_info = rq;
    489  1.2   yamt }
    490  1.2   yamt 
    491  1.2   yamt void
    492  1.2   yamt sched_setup()
    493  1.2   yamt {
    494  1.2   yamt 
    495  1.2   yamt 	rrticks = hz / 10;
    496  1.2   yamt }
    497  1.2   yamt 
    498  1.2   yamt void
    499  1.2   yamt sched_setrunnable(struct lwp *l)
    500  1.2   yamt {
    501  1.2   yamt 
    502  1.2   yamt  	if (l->l_slptime > 1)
    503  1.2   yamt  		updatepri(l);
    504  1.2   yamt }
    505  1.2   yamt 
    506  1.2   yamt bool
    507  1.2   yamt sched_curcpu_runnable_p(void)
    508  1.2   yamt {
    509  1.4     ad 	struct schedstate_percpu *spc;
    510  1.4     ad 	runqueue_t *rq;
    511  1.4     ad 
    512  1.4     ad 	spc = &curcpu()->ci_schedstate;
    513  1.4     ad 	rq = spc->spc_sched_info;
    514  1.2   yamt 
    515  1.4     ad 	if (__predict_true((spc->spc_flags & SPCF_OFFLINE) == 0))
    516  1.4     ad 		return (global_queue.rq_bitmap | rq->rq_bitmap) != 0;
    517  1.4     ad 	return rq->rq_bitmap != 0;
    518  1.2   yamt }
    519  1.2   yamt 
    520  1.2   yamt void
    521  1.2   yamt sched_nice(struct proc *chgp, int n)
    522  1.2   yamt {
    523  1.2   yamt 
    524  1.2   yamt 	chgp->p_nice = n;
    525  1.2   yamt 	(void)resetprocpriority(chgp);
    526  1.2   yamt }
    527  1.2   yamt 
    528  1.2   yamt /*
    529  1.2   yamt  * Compute the priority of a process when running in user mode.
    530  1.2   yamt  * Arrange to reschedule if the resulting priority is better
    531  1.2   yamt  * than that of the current process.
    532  1.2   yamt  */
    533  1.2   yamt static void
    534  1.2   yamt resetpriority(struct lwp *l)
    535  1.2   yamt {
    536  1.2   yamt 	unsigned int newpriority;
    537  1.2   yamt 	struct proc *p = l->l_proc;
    538  1.2   yamt 
    539  1.2   yamt 	/* XXXSMP LOCK_ASSERT(mutex_owned(&p->p_stmutex)); */
    540  1.2   yamt 	LOCK_ASSERT(lwp_locked(l, NULL));
    541  1.2   yamt 
    542  1.2   yamt 	if ((l->l_flag & LW_SYSTEM) != 0)
    543  1.2   yamt 		return;
    544  1.2   yamt 
    545  1.2   yamt 	newpriority = PUSER + (p->p_estcpu >> ESTCPU_SHIFT) +
    546  1.2   yamt 	    NICE_WEIGHT * (p->p_nice - NZERO);
    547  1.2   yamt 	newpriority = min(newpriority, MAXPRI);
    548  1.2   yamt 	lwp_changepri(l, newpriority);
    549  1.2   yamt }
    550  1.2   yamt 
    551  1.2   yamt /*
    552  1.2   yamt  * Recompute priority for all LWPs in a process.
    553  1.2   yamt  */
    554  1.2   yamt static void
    555  1.2   yamt resetprocpriority(struct proc *p)
    556  1.2   yamt {
    557  1.2   yamt 	struct lwp *l;
    558  1.2   yamt 
    559  1.3     ad 	KASSERT(mutex_owned(&p->p_stmutex));
    560  1.2   yamt 
    561  1.2   yamt 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    562  1.2   yamt 		lwp_lock(l);
    563  1.2   yamt 		resetpriority(l);
    564  1.2   yamt 		lwp_unlock(l);
    565  1.2   yamt 	}
    566  1.2   yamt }
    567  1.2   yamt 
    568  1.2   yamt /*
    569  1.2   yamt  * We adjust the priority of the current process.  The priority of a process
    570  1.2   yamt  * gets worse as it accumulates CPU time.  The CPU usage estimator (p_estcpu)
    571  1.2   yamt  * is increased here.  The formula for computing priorities (in kern_synch.c)
    572  1.2   yamt  * will compute a different value each time p_estcpu increases. This can
    573  1.2   yamt  * cause a switch, but unless the priority crosses a PPQ boundary the actual
    574  1.2   yamt  * queue will not change.  The CPU usage estimator ramps up quite quickly
    575  1.2   yamt  * when the process is running (linearly), and decays away exponentially, at
    576  1.2   yamt  * a rate which is proportionally slower when the system is busy.  The basic
    577  1.2   yamt  * principle is that the system will 90% forget that the process used a lot
    578  1.2   yamt  * of CPU time in 5 * loadav seconds.  This causes the system to favor
    579  1.2   yamt  * processes which haven't run much recently, and to round-robin among other
    580  1.2   yamt  * processes.
    581  1.2   yamt  */
    582  1.2   yamt 
    583  1.2   yamt void
    584  1.2   yamt sched_schedclock(struct lwp *l)
    585  1.2   yamt {
    586  1.2   yamt 	struct proc *p = l->l_proc;
    587  1.2   yamt 
    588  1.2   yamt 	KASSERT(!CURCPU_IDLE_P());
    589  1.2   yamt 	mutex_spin_enter(&p->p_stmutex);
    590  1.2   yamt 	p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
    591  1.2   yamt 	lwp_lock(l);
    592  1.2   yamt 	resetpriority(l);
    593  1.2   yamt 	mutex_spin_exit(&p->p_stmutex);
    594  1.2   yamt 	if ((l->l_flag & LW_SYSTEM) == 0 && l->l_priority >= PUSER)
    595  1.2   yamt 		l->l_priority = l->l_usrpri;
    596  1.2   yamt 	lwp_unlock(l);
    597  1.2   yamt }
    598  1.2   yamt 
    599  1.2   yamt /*
    600  1.2   yamt  * sched_proc_fork:
    601  1.2   yamt  *
    602  1.2   yamt  *	Inherit the parent's scheduler history.
    603  1.2   yamt  */
    604  1.2   yamt void
    605  1.2   yamt sched_proc_fork(struct proc *parent, struct proc *child)
    606  1.2   yamt {
    607  1.2   yamt 
    608  1.3     ad 	KASSERT(mutex_owned(&parent->p_smutex));
    609  1.2   yamt 
    610  1.2   yamt 	child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
    611  1.2   yamt 	child->p_forktime = sched_pstats_ticks;
    612  1.2   yamt }
    613  1.2   yamt 
    614  1.2   yamt /*
    615  1.2   yamt  * sched_proc_exit:
    616  1.2   yamt  *
    617  1.2   yamt  *	Chargeback parents for the sins of their children.
    618  1.2   yamt  */
    619  1.2   yamt void
    620  1.2   yamt sched_proc_exit(struct proc *parent, struct proc *child)
    621  1.2   yamt {
    622  1.2   yamt 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
    623  1.2   yamt 	fixpt_t estcpu;
    624  1.2   yamt 
    625  1.2   yamt 	/* XXX Only if parent != init?? */
    626  1.2   yamt 
    627  1.2   yamt 	mutex_spin_enter(&parent->p_stmutex);
    628  1.2   yamt 	estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
    629  1.2   yamt 	    sched_pstats_ticks - child->p_forktime);
    630  1.2   yamt 	if (child->p_estcpu > estcpu)
    631  1.2   yamt 		parent->p_estcpu =
    632  1.2   yamt 		    ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
    633  1.2   yamt 	mutex_spin_exit(&parent->p_stmutex);
    634  1.2   yamt }
    635  1.2   yamt 
    636  1.2   yamt void
    637  1.2   yamt sched_enqueue(struct lwp *l, bool ctxswitch)
    638  1.2   yamt {
    639  1.2   yamt 
    640  1.2   yamt 	if ((l->l_flag & LW_BOUND) != 0)
    641  1.2   yamt 		runqueue_enqueue(l->l_cpu->ci_schedstate.spc_sched_info, l);
    642  1.2   yamt 	else
    643  1.2   yamt 		runqueue_enqueue(&global_queue, l);
    644  1.2   yamt }
    645  1.2   yamt 
    646  1.2   yamt /*
    647  1.2   yamt  * XXXSMP When LWP dispatch (cpu_switch()) is changed to use sched_dequeue(),
    648  1.2   yamt  * drop of the effective priority level from kernel to user needs to be
    649  1.2   yamt  * moved here from userret().  The assignment in userret() is currently
    650  1.2   yamt  * done unlocked.
    651  1.2   yamt  */
    652  1.2   yamt void
    653  1.2   yamt sched_dequeue(struct lwp *l)
    654  1.2   yamt {
    655  1.2   yamt 
    656  1.2   yamt 	if ((l->l_flag & LW_BOUND) != 0)
    657  1.2   yamt 		runqueue_dequeue(l->l_cpu->ci_schedstate.spc_sched_info, l);
    658  1.2   yamt 	else
    659  1.2   yamt 		runqueue_dequeue(&global_queue, l);
    660  1.2   yamt }
    661  1.2   yamt 
    662  1.2   yamt struct lwp *
    663  1.2   yamt sched_nextlwp(void)
    664  1.2   yamt {
    665  1.4     ad 	struct schedstate_percpu *spc;
    666  1.2   yamt 	lwp_t *l1, *l2;
    667  1.2   yamt 
    668  1.4     ad 	spc = &curcpu()->ci_schedstate;
    669  1.4     ad 
    670  1.2   yamt 	/* For now, just pick the highest priority LWP. */
    671  1.4     ad 	l1 = runqueue_nextlwp(spc->spc_sched_info);
    672  1.4     ad 	if (__predict_false((spc->spc_flags & SPCF_OFFLINE) != 0))
    673  1.4     ad 		return l1;
    674  1.2   yamt 	l2 = runqueue_nextlwp(&global_queue);
    675  1.2   yamt 
    676  1.2   yamt 	if (l1 == NULL)
    677  1.2   yamt 		return l2;
    678  1.2   yamt 	if (l2 == NULL)
    679  1.2   yamt 		return l1;
    680  1.2   yamt 	if (lwp_eprio(l2) < lwp_eprio(l1))
    681  1.2   yamt 		return l2;
    682  1.2   yamt 	else
    683  1.2   yamt 		return l1;
    684  1.2   yamt }
    685  1.2   yamt 
    686  1.6  rmind /*
    687  1.6  rmind  * Dummy.
    688  1.6  rmind  */
    689  1.6  rmind 
    690  1.6  rmind struct cpu_info *
    691  1.6  rmind sched_takecpu(struct lwp *l)
    692  1.6  rmind {
    693  1.6  rmind 
    694  1.6  rmind 	return l->l_cpu;
    695  1.6  rmind }
    696  1.6  rmind 
    697  1.6  rmind void
    698  1.6  rmind sched_wakeup(struct lwp *l)
    699  1.6  rmind {
    700  1.6  rmind 
    701  1.6  rmind }
    702  1.6  rmind 
    703  1.6  rmind void
    704  1.6  rmind sched_slept(struct lwp *l)
    705  1.6  rmind {
    706  1.6  rmind 
    707  1.6  rmind }
    708  1.6  rmind 
    709  1.2   yamt void
    710  1.2   yamt sched_lwp_fork(struct lwp *l)
    711  1.2   yamt {
    712  1.2   yamt 
    713  1.2   yamt }
    714  1.2   yamt 
    715  1.2   yamt void
    716  1.2   yamt sched_lwp_exit(struct lwp *l)
    717  1.2   yamt {
    718  1.2   yamt 
    719  1.2   yamt }
    720  1.2   yamt 
    721  1.5     ad /*
    722  1.5     ad  * sysctl setup.  XXX This should be split with kern_synch.c.
    723  1.5     ad  */
    724  1.2   yamt SYSCTL_SETUP(sysctl_sched_setup, "sysctl kern.sched subtree setup")
    725  1.2   yamt {
    726  1.2   yamt 	const struct sysctlnode *node = NULL;
    727  1.2   yamt 
    728  1.2   yamt 	sysctl_createv(clog, 0, NULL, NULL,
    729  1.2   yamt 		CTLFLAG_PERMANENT,
    730  1.2   yamt 		CTLTYPE_NODE, "kern", NULL,
    731  1.2   yamt 		NULL, 0, NULL, 0,
    732  1.2   yamt 		CTL_KERN, CTL_EOL);
    733  1.2   yamt 	sysctl_createv(clog, 0, NULL, &node,
    734  1.2   yamt 		CTLFLAG_PERMANENT,
    735  1.2   yamt 		CTLTYPE_NODE, "sched",
    736  1.2   yamt 		SYSCTL_DESCR("Scheduler options"),
    737  1.2   yamt 		NULL, 0, NULL, 0,
    738  1.2   yamt 		CTL_KERN, CTL_CREATE, CTL_EOL);
    739  1.2   yamt 
    740  1.5     ad 	KASSERT(node != NULL);
    741  1.5     ad 
    742  1.5     ad 	sysctl_createv(clog, 0, &node, NULL,
    743  1.5     ad 		CTLFLAG_PERMANENT,
    744  1.5     ad 		CTLTYPE_STRING, "name", NULL,
    745  1.5     ad 		NULL, 0, __UNCONST("4.4BSD"), 0,
    746  1.5     ad 		CTL_CREATE, CTL_EOL);
    747  1.5     ad 	sysctl_createv(clog, 0, &node, NULL,
    748  1.5     ad 		CTLFLAG_READWRITE,
    749  1.5     ad 		CTLTYPE_INT, "timesoftints",
    750  1.5     ad 		SYSCTL_DESCR("Track CPU time for soft interrupts"),
    751  1.5     ad 		NULL, 0, &softint_timing, 0,
    752  1.5     ad 		CTL_CREATE, CTL_EOL);
    753  1.2   yamt }
    754  1.2   yamt 
    755  1.2   yamt #if defined(DDB)
    756  1.2   yamt void
    757  1.2   yamt sched_print_runqueue(void (*pr)(const char *, ...))
    758  1.2   yamt {
    759  1.2   yamt 
    760  1.2   yamt 	runqueue_print(&global_queue, pr);
    761  1.2   yamt }
    762  1.2   yamt #endif /* defined(DDB) */
    763