Home | History | Annotate | Line # | Download | only in kern
sched_4bsd.c revision 1.1.6.13
      1  1.1.6.13     ad /*	$NetBSD: sched_4bsd.c,v 1.1.6.13 2007/11/01 21:58:21 ad Exp $	*/
      2   1.1.6.1     ad 
      3   1.1.6.1     ad /*-
      4   1.1.6.1     ad  * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
      5   1.1.6.1     ad  * All rights reserved.
      6   1.1.6.1     ad  *
      7   1.1.6.1     ad  * This code is derived from software contributed to The NetBSD Foundation
      8   1.1.6.1     ad  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9   1.1.6.1     ad  * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and
     10   1.1.6.1     ad  * Daniel Sieger.
     11   1.1.6.1     ad  *
     12   1.1.6.1     ad  * Redistribution and use in source and binary forms, with or without
     13   1.1.6.1     ad  * modification, are permitted provided that the following conditions
     14   1.1.6.1     ad  * are met:
     15   1.1.6.1     ad  * 1. Redistributions of source code must retain the above copyright
     16   1.1.6.1     ad  *    notice, this list of conditions and the following disclaimer.
     17   1.1.6.1     ad  * 2. Redistributions in binary form must reproduce the above copyright
     18   1.1.6.1     ad  *    notice, this list of conditions and the following disclaimer in the
     19   1.1.6.1     ad  *    documentation and/or other materials provided with the distribution.
     20   1.1.6.1     ad  * 3. All advertising materials mentioning features or use of this software
     21   1.1.6.1     ad  *    must display the following acknowledgement:
     22   1.1.6.1     ad  *	This product includes software developed by the NetBSD
     23   1.1.6.1     ad  *	Foundation, Inc. and its contributors.
     24   1.1.6.1     ad  * 4. Neither the name of The NetBSD Foundation nor the names of its
     25   1.1.6.1     ad  *    contributors may be used to endorse or promote products derived
     26   1.1.6.1     ad  *    from this software without specific prior written permission.
     27   1.1.6.1     ad  *
     28   1.1.6.1     ad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     29   1.1.6.1     ad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     30   1.1.6.1     ad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     31   1.1.6.1     ad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     32   1.1.6.1     ad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     33   1.1.6.1     ad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     34   1.1.6.1     ad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     35   1.1.6.1     ad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     36   1.1.6.1     ad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     37   1.1.6.1     ad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     38   1.1.6.1     ad  * POSSIBILITY OF SUCH DAMAGE.
     39   1.1.6.1     ad  */
     40   1.1.6.1     ad 
     41   1.1.6.1     ad /*-
     42   1.1.6.1     ad  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     43   1.1.6.1     ad  *	The Regents of the University of California.  All rights reserved.
     44   1.1.6.1     ad  * (c) UNIX System Laboratories, Inc.
     45   1.1.6.1     ad  * All or some portions of this file are derived from material licensed
     46   1.1.6.1     ad  * to the University of California by American Telephone and Telegraph
     47   1.1.6.1     ad  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     48   1.1.6.1     ad  * the permission of UNIX System Laboratories, Inc.
     49   1.1.6.1     ad  *
     50   1.1.6.1     ad  * Redistribution and use in source and binary forms, with or without
     51   1.1.6.1     ad  * modification, are permitted provided that the following conditions
     52   1.1.6.1     ad  * are met:
     53   1.1.6.1     ad  * 1. Redistributions of source code must retain the above copyright
     54   1.1.6.1     ad  *    notice, this list of conditions and the following disclaimer.
     55   1.1.6.1     ad  * 2. Redistributions in binary form must reproduce the above copyright
     56   1.1.6.1     ad  *    notice, this list of conditions and the following disclaimer in the
     57   1.1.6.1     ad  *    documentation and/or other materials provided with the distribution.
     58   1.1.6.1     ad  * 3. Neither the name of the University nor the names of its contributors
     59   1.1.6.1     ad  *    may be used to endorse or promote products derived from this software
     60   1.1.6.1     ad  *    without specific prior written permission.
     61   1.1.6.1     ad  *
     62   1.1.6.1     ad  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     63   1.1.6.1     ad  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     64   1.1.6.1     ad  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     65   1.1.6.1     ad  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     66   1.1.6.1     ad  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     67   1.1.6.1     ad  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     68   1.1.6.1     ad  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     69   1.1.6.1     ad  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     70   1.1.6.1     ad  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     71   1.1.6.1     ad  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     72   1.1.6.1     ad  * SUCH DAMAGE.
     73   1.1.6.1     ad  *
     74   1.1.6.1     ad  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
     75   1.1.6.1     ad  */
     76   1.1.6.1     ad 
     77   1.1.6.1     ad #include <sys/cdefs.h>
     78  1.1.6.13     ad __KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.1.6.13 2007/11/01 21:58:21 ad Exp $");
     79   1.1.6.1     ad 
     80   1.1.6.1     ad #include "opt_ddb.h"
     81   1.1.6.1     ad #include "opt_lockdebug.h"
     82   1.1.6.1     ad #include "opt_perfctrs.h"
     83   1.1.6.1     ad 
     84   1.1.6.1     ad #define	__MUTEX_PRIVATE
     85   1.1.6.1     ad 
     86   1.1.6.1     ad #include <sys/param.h>
     87   1.1.6.1     ad #include <sys/systm.h>
     88   1.1.6.1     ad #include <sys/callout.h>
     89   1.1.6.1     ad #include <sys/cpu.h>
     90   1.1.6.1     ad #include <sys/proc.h>
     91   1.1.6.1     ad #include <sys/kernel.h>
     92   1.1.6.1     ad #include <sys/signalvar.h>
     93   1.1.6.1     ad #include <sys/resourcevar.h>
     94   1.1.6.1     ad #include <sys/sched.h>
     95   1.1.6.1     ad #include <sys/sysctl.h>
     96   1.1.6.1     ad #include <sys/kauth.h>
     97   1.1.6.1     ad #include <sys/lockdebug.h>
     98   1.1.6.1     ad #include <sys/kmem.h>
     99   1.1.6.5     ad #include <sys/intr.h>
    100   1.1.6.1     ad 
    101   1.1.6.1     ad #include <uvm/uvm_extern.h>
    102   1.1.6.1     ad 
    103   1.1.6.1     ad /*
    104   1.1.6.1     ad  * Run queues.
    105   1.1.6.1     ad  *
    106   1.1.6.9     ad  * We maintain bitmasks of non-empty queues in order speed up finding
    107   1.1.6.9     ad  * the first runnable process.  Since there can be (by definition) few
    108   1.1.6.9     ad  * real time LWPs in the the system, we maintain them on a linked list,
    109   1.1.6.9     ad  * sorted by priority.
    110   1.1.6.1     ad  */
    111   1.1.6.1     ad 
    112   1.1.6.9     ad #define	PPB_SHIFT	5
    113   1.1.6.9     ad #define	PPB_MASK	31
    114   1.1.6.9     ad 
    115   1.1.6.9     ad #define	NUM_Q		(NPRI_KERNEL + NPRI_USER)
    116   1.1.6.9     ad #define	NUM_PPB		(1 << PPB_SHIFT)
    117   1.1.6.9     ad #define	NUM_B		(NUM_Q / NUM_PPB)
    118   1.1.6.2     ad 
    119   1.1.6.1     ad typedef struct runqueue {
    120   1.1.6.9     ad 	TAILQ_HEAD(, lwp) rq_rt;		/* realtime */
    121   1.1.6.9     ad 	u_int		rq_count;		/* total # jobs */
    122  1.1.6.13     ad 	uint32_t	rq_bitmap[NUM_B];	/* bitmap of queues */
    123  1.1.6.13     ad 	TAILQ_HEAD(, lwp) rq_queue[NUM_Q];	/* user+kernel */
    124   1.1.6.1     ad } runqueue_t;
    125   1.1.6.2     ad 
    126   1.1.6.1     ad static runqueue_t global_queue;
    127   1.1.6.1     ad 
    128   1.1.6.1     ad static void updatepri(struct lwp *);
    129   1.1.6.1     ad static void resetpriority(struct lwp *);
    130   1.1.6.1     ad static void resetprocpriority(struct proc *);
    131   1.1.6.1     ad 
    132  1.1.6.10  rmind fixpt_t decay_cpu(fixpt_t, fixpt_t);
    133  1.1.6.10  rmind 
    134   1.1.6.1     ad extern unsigned int sched_pstats_ticks; /* defined in kern_synch.c */
    135   1.1.6.1     ad 
    136   1.1.6.1     ad /* The global scheduler state */
    137   1.1.6.1     ad kmutex_t sched_mutex;
    138   1.1.6.1     ad 
    139   1.1.6.1     ad /* Number of hardclock ticks per sched_tick() */
    140   1.1.6.1     ad int rrticks;
    141   1.1.6.1     ad 
    142   1.1.6.9     ad const int schedppq = 1;
    143   1.1.6.4     ad 
    144   1.1.6.1     ad /*
    145   1.1.6.1     ad  * Force switch among equal priority processes every 100ms.
    146   1.1.6.1     ad  * Called from hardclock every hz/10 == rrticks hardclock ticks.
    147   1.1.6.8     ad  *
    148   1.1.6.8     ad  * There's no need to lock anywhere in this routine, as it's
    149   1.1.6.8     ad  * CPU-local and runs at IPL_SCHED (called from clock interrupt).
    150   1.1.6.1     ad  */
    151   1.1.6.1     ad /* ARGSUSED */
    152   1.1.6.1     ad void
    153   1.1.6.1     ad sched_tick(struct cpu_info *ci)
    154   1.1.6.1     ad {
    155   1.1.6.1     ad 	struct schedstate_percpu *spc = &ci->ci_schedstate;
    156   1.1.6.1     ad 
    157   1.1.6.1     ad 	spc->spc_ticks = rrticks;
    158   1.1.6.1     ad 
    159  1.1.6.10  rmind 	if (CURCPU_IDLE_P())
    160  1.1.6.10  rmind 		return;
    161  1.1.6.10  rmind 
    162  1.1.6.10  rmind 	if (spc->spc_flags & SPCF_SEENRR) {
    163  1.1.6.10  rmind 		/*
    164  1.1.6.10  rmind 		 * The process has already been through a roundrobin
    165  1.1.6.10  rmind 		 * without switching and may be hogging the CPU.
    166  1.1.6.10  rmind 		 * Indicate that the process should yield.
    167  1.1.6.10  rmind 		 */
    168  1.1.6.10  rmind 		spc->spc_flags |= SPCF_SHOULDYIELD;
    169  1.1.6.10  rmind 	} else
    170  1.1.6.10  rmind 		spc->spc_flags |= SPCF_SEENRR;
    171  1.1.6.10  rmind 
    172   1.1.6.7     ad 	cpu_need_resched(ci, 0);
    173   1.1.6.1     ad }
    174   1.1.6.1     ad 
    175  1.1.6.13     ad /*
    176  1.1.6.13     ad  * Why PRIO_MAX - 2? From setpriority(2):
    177  1.1.6.13     ad  *
    178  1.1.6.13     ad  *	prio is a value in the range -20 to 20.  The default priority is
    179  1.1.6.13     ad  *	0; lower priorities cause more favorable scheduling.  A value of
    180  1.1.6.13     ad  *	19 or 20 will schedule a process only when nothing at priority <=
    181  1.1.6.13     ad  *	0 is runnable.
    182  1.1.6.13     ad  *
    183  1.1.6.13     ad  * This gives estcpu influence over 18 priority levels, and leaves nice
    184  1.1.6.13     ad  * with 40 levels.  One way to think about it is that nice has 20 levels
    185  1.1.6.13     ad  * either side of estcpu's 18.
    186  1.1.6.13     ad  */
    187   1.1.6.1     ad #define	ESTCPU_SHIFT	11
    188  1.1.6.13     ad #define	ESTCPU_MAX	((PRIO_MAX - 2) << ESTCPU_SHIFT)
    189  1.1.6.13     ad #define	ESTCPU_ACCUM	(1 << (ESTCPU_SHIFT - 1))
    190   1.1.6.1     ad #define	ESTCPULIM(e)	min((e), ESTCPU_MAX)
    191   1.1.6.1     ad 
    192   1.1.6.1     ad /*
    193   1.1.6.1     ad  * Constants for digital decay and forget:
    194  1.1.6.13     ad  *	90% of (l_estcpu) usage in 5 * loadav time
    195  1.1.6.13     ad  *	95% of (l_pctcpu) usage in 60 seconds (load insensitive)
    196   1.1.6.1     ad  *          Note that, as ps(1) mentions, this can let percentages
    197   1.1.6.1     ad  *          total over 100% (I've seen 137.9% for 3 processes).
    198   1.1.6.1     ad  *
    199  1.1.6.13     ad  * Note that hardclock updates l_estcpu and l_cpticks independently.
    200   1.1.6.1     ad  *
    201  1.1.6.13     ad  * We wish to decay away 90% of l_estcpu in (5 * loadavg) seconds.
    202   1.1.6.1     ad  * That is, the system wants to compute a value of decay such
    203   1.1.6.1     ad  * that the following for loop:
    204   1.1.6.1     ad  * 	for (i = 0; i < (5 * loadavg); i++)
    205  1.1.6.13     ad  * 		l_estcpu *= decay;
    206   1.1.6.1     ad  * will compute
    207  1.1.6.13     ad  * 	l_estcpu *= 0.1;
    208   1.1.6.1     ad  * for all values of loadavg:
    209   1.1.6.1     ad  *
    210   1.1.6.1     ad  * Mathematically this loop can be expressed by saying:
    211   1.1.6.1     ad  * 	decay ** (5 * loadavg) ~= .1
    212   1.1.6.1     ad  *
    213   1.1.6.1     ad  * The system computes decay as:
    214   1.1.6.1     ad  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
    215   1.1.6.1     ad  *
    216   1.1.6.1     ad  * We wish to prove that the system's computation of decay
    217   1.1.6.1     ad  * will always fulfill the equation:
    218   1.1.6.1     ad  * 	decay ** (5 * loadavg) ~= .1
    219   1.1.6.1     ad  *
    220   1.1.6.1     ad  * If we compute b as:
    221   1.1.6.1     ad  * 	b = 2 * loadavg
    222   1.1.6.1     ad  * then
    223   1.1.6.1     ad  * 	decay = b / (b + 1)
    224   1.1.6.1     ad  *
    225   1.1.6.1     ad  * We now need to prove two things:
    226   1.1.6.1     ad  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
    227   1.1.6.1     ad  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
    228   1.1.6.1     ad  *
    229   1.1.6.1     ad  * Facts:
    230   1.1.6.1     ad  *         For x close to zero, exp(x) =~ 1 + x, since
    231   1.1.6.1     ad  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
    232   1.1.6.1     ad  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
    233   1.1.6.1     ad  *         For x close to zero, ln(1+x) =~ x, since
    234   1.1.6.1     ad  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
    235   1.1.6.1     ad  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
    236   1.1.6.1     ad  *         ln(.1) =~ -2.30
    237   1.1.6.1     ad  *
    238   1.1.6.1     ad  * Proof of (1):
    239   1.1.6.1     ad  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
    240   1.1.6.1     ad  *	solving for factor,
    241   1.1.6.1     ad  *      ln(factor) =~ (-2.30/5*loadav), or
    242   1.1.6.1     ad  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
    243   1.1.6.1     ad  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
    244   1.1.6.1     ad  *
    245   1.1.6.1     ad  * Proof of (2):
    246   1.1.6.1     ad  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
    247   1.1.6.1     ad  *	solving for power,
    248   1.1.6.1     ad  *      power*ln(b/(b+1)) =~ -2.30, or
    249   1.1.6.1     ad  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
    250   1.1.6.1     ad  *
    251   1.1.6.1     ad  * Actual power values for the implemented algorithm are as follows:
    252   1.1.6.1     ad  *      loadav: 1       2       3       4
    253   1.1.6.1     ad  *      power:  5.68    10.32   14.94   19.55
    254   1.1.6.1     ad  */
    255   1.1.6.1     ad 
    256   1.1.6.1     ad /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
    257   1.1.6.1     ad #define	loadfactor(loadav)	(2 * (loadav))
    258   1.1.6.1     ad 
    259  1.1.6.10  rmind fixpt_t
    260   1.1.6.1     ad decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
    261   1.1.6.1     ad {
    262   1.1.6.1     ad 
    263   1.1.6.1     ad 	if (estcpu == 0) {
    264   1.1.6.1     ad 		return 0;
    265   1.1.6.1     ad 	}
    266   1.1.6.1     ad 
    267   1.1.6.1     ad #if !defined(_LP64)
    268   1.1.6.1     ad 	/* avoid 64bit arithmetics. */
    269   1.1.6.1     ad #define	FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
    270   1.1.6.1     ad 	if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
    271   1.1.6.1     ad 		return estcpu * loadfac / (loadfac + FSCALE);
    272   1.1.6.1     ad 	}
    273   1.1.6.1     ad #endif /* !defined(_LP64) */
    274   1.1.6.1     ad 
    275   1.1.6.1     ad 	return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
    276   1.1.6.1     ad }
    277   1.1.6.1     ad 
    278   1.1.6.1     ad /*
    279  1.1.6.13     ad  * For all load averages >= 1 and max l_estcpu of (255 << ESTCPU_SHIFT),
    280  1.1.6.13     ad  * sleeping for at least seven times the loadfactor will decay l_estcpu to
    281   1.1.6.1     ad  * less than (1 << ESTCPU_SHIFT).
    282   1.1.6.1     ad  *
    283   1.1.6.1     ad  * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
    284   1.1.6.1     ad  */
    285   1.1.6.1     ad static fixpt_t
    286   1.1.6.1     ad decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
    287   1.1.6.1     ad {
    288   1.1.6.1     ad 
    289   1.1.6.1     ad 	if ((n << FSHIFT) >= 7 * loadfac) {
    290   1.1.6.1     ad 		return 0;
    291   1.1.6.1     ad 	}
    292   1.1.6.1     ad 
    293   1.1.6.1     ad 	while (estcpu != 0 && n > 1) {
    294   1.1.6.1     ad 		estcpu = decay_cpu(loadfac, estcpu);
    295   1.1.6.1     ad 		n--;
    296   1.1.6.1     ad 	}
    297   1.1.6.1     ad 
    298   1.1.6.1     ad 	return estcpu;
    299   1.1.6.1     ad }
    300   1.1.6.1     ad 
    301   1.1.6.1     ad /*
    302   1.1.6.1     ad  * sched_pstats_hook:
    303   1.1.6.1     ad  *
    304   1.1.6.1     ad  * Periodically called from sched_pstats(); used to recalculate priorities.
    305   1.1.6.1     ad  */
    306   1.1.6.1     ad void
    307  1.1.6.10  rmind sched_pstats_hook(struct lwp *l)
    308   1.1.6.1     ad {
    309  1.1.6.13     ad 	fixpt_t loadfac;
    310   1.1.6.1     ad 
    311  1.1.6.13     ad 	/*
    312  1.1.6.13     ad 	 * If the LWP has slept an entire second, stop recalculating
    313  1.1.6.13     ad 	 * its priority until it wakes up.
    314  1.1.6.13     ad 	 */
    315  1.1.6.13     ad 	if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
    316  1.1.6.13     ad 	    l->l_stat == LSSUSPENDED) {
    317  1.1.6.13     ad 		l->l_slptime++;
    318  1.1.6.13     ad 		if (l->l_slptime <= 1) {
    319  1.1.6.13     ad 			loadfac = 2 * (averunnable.ldavg[0]);
    320  1.1.6.13     ad 			l->l_estcpu = decay_cpu(loadfac, l->l_estcpu);
    321  1.1.6.13     ad 		}
    322  1.1.6.13     ad 	}
    323  1.1.6.13     ad 	if (l->l_slptime <= 1)
    324  1.1.6.10  rmind 		resetpriority(l);
    325   1.1.6.1     ad }
    326   1.1.6.1     ad 
    327   1.1.6.1     ad /*
    328   1.1.6.1     ad  * Recalculate the priority of a process after it has slept for a while.
    329   1.1.6.1     ad  */
    330   1.1.6.1     ad static void
    331   1.1.6.1     ad updatepri(struct lwp *l)
    332   1.1.6.1     ad {
    333   1.1.6.1     ad 	fixpt_t loadfac;
    334   1.1.6.1     ad 
    335   1.1.6.1     ad 	KASSERT(lwp_locked(l, NULL));
    336   1.1.6.1     ad 	KASSERT(l->l_slptime > 1);
    337   1.1.6.1     ad 
    338   1.1.6.1     ad 	loadfac = loadfactor(averunnable.ldavg[0]);
    339   1.1.6.1     ad 
    340   1.1.6.1     ad 	l->l_slptime--; /* the first time was done in sched_pstats */
    341  1.1.6.13     ad 	l->l_estcpu = decay_cpu_batch(loadfac, l->l_estcpu, l->l_slptime);
    342   1.1.6.1     ad 	resetpriority(l);
    343   1.1.6.1     ad }
    344   1.1.6.1     ad 
    345   1.1.6.1     ad static void
    346   1.1.6.1     ad runqueue_init(runqueue_t *rq)
    347   1.1.6.1     ad {
    348   1.1.6.1     ad 	int i;
    349   1.1.6.1     ad 
    350   1.1.6.9     ad 	for (i = 0; i < NUM_Q; i++)
    351   1.1.6.9     ad 		TAILQ_INIT(&rq->rq_queue[i]);
    352   1.1.6.9     ad 	for (i = 0; i < NUM_B; i++)
    353   1.1.6.9     ad 		rq->rq_bitmap[i] = 0;
    354   1.1.6.9     ad 	TAILQ_INIT(&rq->rq_rt);
    355   1.1.6.9     ad 	rq->rq_count = 0;
    356   1.1.6.1     ad }
    357   1.1.6.1     ad 
    358   1.1.6.1     ad static void
    359   1.1.6.1     ad runqueue_enqueue(runqueue_t *rq, struct lwp *l)
    360   1.1.6.1     ad {
    361   1.1.6.9     ad 	pri_t pri;
    362   1.1.6.9     ad 	lwp_t *l2;
    363   1.1.6.1     ad 
    364   1.1.6.1     ad 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    365   1.1.6.1     ad 
    366   1.1.6.9     ad 	pri = lwp_eprio(l);
    367   1.1.6.9     ad 	rq->rq_count++;
    368   1.1.6.9     ad 
    369   1.1.6.9     ad 	if (pri >= PRI_USER_RT) {
    370   1.1.6.9     ad 		TAILQ_FOREACH(l2, &rq->rq_rt, l_runq) {
    371   1.1.6.9     ad 			if (lwp_eprio(l2) < pri) {
    372   1.1.6.9     ad 				TAILQ_INSERT_BEFORE(l2, l, l_runq);
    373   1.1.6.9     ad 				return;
    374   1.1.6.9     ad 			}
    375   1.1.6.9     ad 		}
    376   1.1.6.9     ad 		TAILQ_INSERT_TAIL(&rq->rq_rt, l, l_runq);
    377   1.1.6.9     ad 		return;
    378   1.1.6.9     ad 	}
    379   1.1.6.9     ad 
    380   1.1.6.9     ad 	rq->rq_bitmap[pri >> PPB_SHIFT] |=
    381  1.1.6.13     ad 	    (0x80000000U >> (pri & PPB_MASK));
    382   1.1.6.9     ad 	TAILQ_INSERT_TAIL(&rq->rq_queue[pri], l, l_runq);
    383   1.1.6.1     ad }
    384   1.1.6.1     ad 
    385   1.1.6.1     ad static void
    386   1.1.6.1     ad runqueue_dequeue(runqueue_t *rq, struct lwp *l)
    387   1.1.6.1     ad {
    388   1.1.6.9     ad 	pri_t pri;
    389   1.1.6.1     ad 
    390   1.1.6.1     ad 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    391   1.1.6.1     ad 
    392   1.1.6.9     ad 	pri = lwp_eprio(l);
    393   1.1.6.9     ad 	rq->rq_count--;
    394   1.1.6.9     ad 
    395   1.1.6.9     ad 	if (pri >= PRI_USER_RT) {
    396   1.1.6.9     ad 		TAILQ_REMOVE(&rq->rq_rt, l, l_runq);
    397   1.1.6.9     ad 		return;
    398   1.1.6.9     ad 	}
    399   1.1.6.9     ad 
    400   1.1.6.9     ad 	TAILQ_REMOVE(&rq->rq_queue[pri], l, l_runq);
    401   1.1.6.9     ad 	if (TAILQ_EMPTY(&rq->rq_queue[pri]))
    402  1.1.6.13     ad 		rq->rq_bitmap[pri >> PPB_SHIFT] ^=
    403  1.1.6.13     ad 		    (0x80000000U >> (pri & PPB_MASK));
    404   1.1.6.1     ad }
    405   1.1.6.1     ad 
    406  1.1.6.13     ad #if (NUM_B != 3) || (NUM_Q != 96)
    407  1.1.6.13     ad #error adjust runqueue_nextlwp
    408  1.1.6.13     ad #endif
    409  1.1.6.13     ad 
    410   1.1.6.1     ad static struct lwp *
    411   1.1.6.1     ad runqueue_nextlwp(runqueue_t *rq)
    412   1.1.6.1     ad {
    413   1.1.6.9     ad 	pri_t pri;
    414   1.1.6.9     ad 
    415   1.1.6.9     ad 	KASSERT(rq->rq_count != 0);
    416   1.1.6.9     ad 
    417   1.1.6.9     ad 	if (!TAILQ_EMPTY(&rq->rq_rt))
    418   1.1.6.9     ad 		return TAILQ_FIRST(&rq->rq_rt);
    419   1.1.6.1     ad 
    420  1.1.6.13     ad 	if (rq->rq_bitmap[2] != 0)
    421  1.1.6.13     ad 		pri = 96 - ffs(rq->rq_bitmap[2]);
    422  1.1.6.13     ad 	else if (rq->rq_bitmap[1] != 0)
    423  1.1.6.13     ad 		pri = 64 - ffs(rq->rq_bitmap[1]);
    424  1.1.6.13     ad 	else
    425  1.1.6.13     ad 		pri = 32 - ffs(rq->rq_bitmap[0]);
    426  1.1.6.13     ad 	return TAILQ_FIRST(&rq->rq_queue[pri]);
    427   1.1.6.1     ad }
    428   1.1.6.1     ad 
    429   1.1.6.1     ad #if defined(DDB)
    430   1.1.6.1     ad static void
    431   1.1.6.1     ad runqueue_print(const runqueue_t *rq, void (*pr)(const char *, ...))
    432   1.1.6.1     ad {
    433  1.1.6.13     ad 	CPU_INFO_ITERATOR cii;
    434  1.1.6.13     ad 	struct cpu_info *ci;
    435   1.1.6.9     ad 	lwp_t *l;
    436   1.1.6.9     ad 	int i;
    437   1.1.6.1     ad 
    438  1.1.6.13     ad 	printf("PID\tLID\tPRI\tIPRI\tEPRI\tLWP\t\t NAME\n");
    439  1.1.6.13     ad 
    440   1.1.6.9     ad 	TAILQ_FOREACH(l, &rq->rq_rt, l_runq) {
    441  1.1.6.13     ad 		(*pr)("%d\t%d\%d\t%d\t%d\t%016lx %s\n",
    442  1.1.6.13     ad 		    l->l_proc->p_pid, l->l_lid, (int)l->l_priority,
    443  1.1.6.13     ad 		    (int)l->l_inheritedprio, lwp_eprio(l),
    444  1.1.6.13     ad 		    (long)l, l->l_proc->p_comm);
    445   1.1.6.9     ad 	}
    446   1.1.6.9     ad 
    447   1.1.6.9     ad 	for (i = NUM_Q - 1; i >= 0; i--) {
    448   1.1.6.9     ad 		TAILQ_FOREACH(l, &rq->rq_queue[i], l_runq) {
    449  1.1.6.13     ad 			(*pr)("%d\t%d\t%d\t%d\t%d\t%016lx %s\n",
    450  1.1.6.13     ad 			    l->l_proc->p_pid, l->l_lid, (int)l->l_priority,
    451  1.1.6.13     ad 			    (int)l->l_inheritedprio, lwp_eprio(l),
    452  1.1.6.13     ad 			    (long)l, l->l_proc->p_comm);
    453   1.1.6.1     ad 		}
    454   1.1.6.1     ad 	}
    455  1.1.6.13     ad 
    456  1.1.6.13     ad 	printf("CPUIDX\tRESCHED\tCURPRI\tFLAGS\n");
    457  1.1.6.13     ad 	for (CPU_INFO_FOREACH(cii, ci)) {
    458  1.1.6.13     ad 		printf("%d\t%d\t%d\t%04x\n", (int)ci->ci_index,
    459  1.1.6.13     ad 		    (int)ci->ci_want_resched,
    460  1.1.6.13     ad 		    (int)ci->ci_schedstate.spc_curpriority,
    461  1.1.6.13     ad 		    (int)ci->ci_schedstate.spc_flags);
    462  1.1.6.13     ad 	}
    463  1.1.6.13     ad 
    464  1.1.6.13     ad 	printf("NEXTLWP\n%016lx\n", (long)sched_nextlwp());
    465   1.1.6.1     ad }
    466   1.1.6.1     ad #endif /* defined(DDB) */
    467   1.1.6.1     ad 
    468   1.1.6.1     ad /*
    469   1.1.6.1     ad  * Initialize the (doubly-linked) run queues
    470   1.1.6.1     ad  * to be empty.
    471   1.1.6.1     ad  */
    472   1.1.6.1     ad void
    473   1.1.6.1     ad sched_rqinit()
    474   1.1.6.1     ad {
    475   1.1.6.1     ad 
    476   1.1.6.1     ad 	runqueue_init(&global_queue);
    477   1.1.6.1     ad 	mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
    478   1.1.6.1     ad 	/* Initialize the lock pointer for lwp0 */
    479   1.1.6.1     ad 	lwp0.l_mutex = &curcpu()->ci_schedstate.spc_lwplock;
    480   1.1.6.1     ad }
    481   1.1.6.1     ad 
    482   1.1.6.1     ad void
    483   1.1.6.1     ad sched_cpuattach(struct cpu_info *ci)
    484   1.1.6.1     ad {
    485   1.1.6.1     ad 	runqueue_t *rq;
    486   1.1.6.1     ad 
    487   1.1.6.1     ad 	ci->ci_schedstate.spc_mutex = &sched_mutex;
    488   1.1.6.1     ad 	rq = kmem_zalloc(sizeof(*rq), KM_NOSLEEP);
    489   1.1.6.1     ad 	runqueue_init(rq);
    490   1.1.6.1     ad 	ci->ci_schedstate.spc_sched_info = rq;
    491   1.1.6.1     ad }
    492   1.1.6.1     ad 
    493   1.1.6.1     ad void
    494   1.1.6.1     ad sched_setup()
    495   1.1.6.1     ad {
    496   1.1.6.1     ad 
    497   1.1.6.1     ad 	rrticks = hz / 10;
    498   1.1.6.1     ad }
    499   1.1.6.1     ad 
    500   1.1.6.1     ad void
    501   1.1.6.1     ad sched_setrunnable(struct lwp *l)
    502   1.1.6.1     ad {
    503   1.1.6.1     ad 
    504   1.1.6.1     ad  	if (l->l_slptime > 1)
    505   1.1.6.1     ad  		updatepri(l);
    506   1.1.6.1     ad }
    507   1.1.6.1     ad 
    508   1.1.6.1     ad bool
    509   1.1.6.1     ad sched_curcpu_runnable_p(void)
    510   1.1.6.1     ad {
    511   1.1.6.6     ad 	struct schedstate_percpu *spc;
    512  1.1.6.11     ad 	struct cpu_info *ci;
    513  1.1.6.13     ad 	int bits;
    514   1.1.6.6     ad 
    515  1.1.6.11     ad 	ci = curcpu();
    516  1.1.6.11     ad 	spc = &ci->ci_schedstate;
    517  1.1.6.11     ad #ifndef __HAVE_FAST_SOFTINTS
    518  1.1.6.13     ad 	bits = ci->ci_data.cpu_softints;
    519  1.1.6.13     ad 	bits |= ((runqueue_t *)spc->spc_sched_info)->rq_count;
    520  1.1.6.13     ad #else
    521  1.1.6.13     ad 	bits = ((runqueue_t *)spc->spc_sched_info)->rq_count;
    522  1.1.6.11     ad #endif
    523   1.1.6.6     ad 	if (__predict_true((spc->spc_flags & SPCF_OFFLINE) == 0))
    524  1.1.6.13     ad 		bits |= global_queue.rq_count;
    525  1.1.6.13     ad 	return bits != 0;
    526   1.1.6.1     ad }
    527   1.1.6.1     ad 
    528   1.1.6.1     ad void
    529   1.1.6.1     ad sched_nice(struct proc *chgp, int n)
    530   1.1.6.1     ad {
    531   1.1.6.1     ad 
    532   1.1.6.1     ad 	chgp->p_nice = n;
    533   1.1.6.1     ad 	(void)resetprocpriority(chgp);
    534   1.1.6.1     ad }
    535   1.1.6.1     ad 
    536   1.1.6.1     ad /*
    537  1.1.6.13     ad  * Recompute the priority of an LWP.  Arrange to reschedule if
    538  1.1.6.13     ad  * the resulting priority is better than that of the current LWP.
    539   1.1.6.1     ad  */
    540   1.1.6.1     ad static void
    541   1.1.6.1     ad resetpriority(struct lwp *l)
    542   1.1.6.1     ad {
    543  1.1.6.13     ad 	pri_t pri;
    544   1.1.6.1     ad 	struct proc *p = l->l_proc;
    545   1.1.6.1     ad 
    546   1.1.6.1     ad 	KASSERT(lwp_locked(l, NULL));
    547   1.1.6.1     ad 
    548  1.1.6.13     ad 	if (l->l_class != SCHED_OTHER)
    549   1.1.6.1     ad 		return;
    550   1.1.6.1     ad 
    551  1.1.6.13     ad 	/* See comments above ESTCPU_SHIFT definition. */
    552  1.1.6.13     ad 	pri = (PRI_KERNEL - 1) - (l->l_estcpu >> ESTCPU_SHIFT) - p->p_nice;
    553  1.1.6.13     ad 	pri = imax(pri, 0);
    554  1.1.6.13     ad 	if (pri != l->l_priority)
    555  1.1.6.13     ad 		lwp_changepri(l, pri);
    556   1.1.6.1     ad }
    557   1.1.6.1     ad 
    558   1.1.6.1     ad /*
    559   1.1.6.1     ad  * Recompute priority for all LWPs in a process.
    560   1.1.6.1     ad  */
    561   1.1.6.1     ad static void
    562   1.1.6.1     ad resetprocpriority(struct proc *p)
    563   1.1.6.1     ad {
    564   1.1.6.1     ad 	struct lwp *l;
    565   1.1.6.1     ad 
    566   1.1.6.1     ad 	KASSERT(mutex_owned(&p->p_stmutex));
    567   1.1.6.1     ad 
    568   1.1.6.1     ad 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    569   1.1.6.1     ad 		lwp_lock(l);
    570   1.1.6.1     ad 		resetpriority(l);
    571   1.1.6.1     ad 		lwp_unlock(l);
    572   1.1.6.1     ad 	}
    573   1.1.6.1     ad }
    574   1.1.6.1     ad 
    575   1.1.6.1     ad /*
    576   1.1.6.1     ad  * We adjust the priority of the current process.  The priority of a process
    577  1.1.6.13     ad  * gets worse as it accumulates CPU time.  The CPU usage estimator (l_estcpu)
    578   1.1.6.1     ad  * is increased here.  The formula for computing priorities (in kern_synch.c)
    579  1.1.6.13     ad  * will compute a different value each time l_estcpu increases. This can
    580   1.1.6.1     ad  * cause a switch, but unless the priority crosses a PPQ boundary the actual
    581   1.1.6.1     ad  * queue will not change.  The CPU usage estimator ramps up quite quickly
    582   1.1.6.1     ad  * when the process is running (linearly), and decays away exponentially, at
    583   1.1.6.1     ad  * a rate which is proportionally slower when the system is busy.  The basic
    584   1.1.6.1     ad  * principle is that the system will 90% forget that the process used a lot
    585   1.1.6.1     ad  * of CPU time in 5 * loadav seconds.  This causes the system to favor
    586   1.1.6.1     ad  * processes which haven't run much recently, and to round-robin among other
    587   1.1.6.1     ad  * processes.
    588   1.1.6.1     ad  */
    589   1.1.6.1     ad 
    590   1.1.6.1     ad void
    591   1.1.6.1     ad sched_schedclock(struct lwp *l)
    592   1.1.6.1     ad {
    593   1.1.6.1     ad 	struct proc *p = l->l_proc;
    594   1.1.6.1     ad 
    595  1.1.6.13     ad 	if (l->l_class != SCHED_OTHER)
    596  1.1.6.13     ad 		return;
    597  1.1.6.13     ad 
    598   1.1.6.1     ad 	KASSERT(!CURCPU_IDLE_P());
    599   1.1.6.1     ad 	mutex_spin_enter(&p->p_stmutex);
    600  1.1.6.13     ad 	l->l_estcpu = ESTCPULIM(l->l_estcpu + ESTCPU_ACCUM);
    601   1.1.6.1     ad 	lwp_lock(l);
    602   1.1.6.1     ad 	resetpriority(l);
    603   1.1.6.1     ad 	mutex_spin_exit(&p->p_stmutex);
    604   1.1.6.1     ad 	lwp_unlock(l);
    605   1.1.6.1     ad }
    606   1.1.6.1     ad 
    607   1.1.6.1     ad /*
    608   1.1.6.1     ad  * sched_proc_fork:
    609   1.1.6.1     ad  *
    610   1.1.6.1     ad  *	Inherit the parent's scheduler history.
    611   1.1.6.1     ad  */
    612   1.1.6.1     ad void
    613   1.1.6.1     ad sched_proc_fork(struct proc *parent, struct proc *child)
    614   1.1.6.1     ad {
    615  1.1.6.13     ad 	lwp_t *pl;
    616   1.1.6.1     ad 
    617   1.1.6.1     ad 	KASSERT(mutex_owned(&parent->p_smutex));
    618   1.1.6.1     ad 
    619  1.1.6.13     ad 	pl = LIST_FIRST(&parent->p_lwps);
    620  1.1.6.13     ad 	child->p_estcpu_inherited = pl->l_estcpu;
    621   1.1.6.1     ad 	child->p_forktime = sched_pstats_ticks;
    622   1.1.6.1     ad }
    623   1.1.6.1     ad 
    624   1.1.6.1     ad /*
    625   1.1.6.1     ad  * sched_proc_exit:
    626   1.1.6.1     ad  *
    627   1.1.6.1     ad  *	Chargeback parents for the sins of their children.
    628   1.1.6.1     ad  */
    629   1.1.6.1     ad void
    630   1.1.6.1     ad sched_proc_exit(struct proc *parent, struct proc *child)
    631   1.1.6.1     ad {
    632   1.1.6.1     ad 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
    633   1.1.6.1     ad 	fixpt_t estcpu;
    634  1.1.6.13     ad 	lwp_t *pl, *cl;
    635   1.1.6.1     ad 
    636   1.1.6.1     ad 	/* XXX Only if parent != init?? */
    637   1.1.6.1     ad 
    638  1.1.6.13     ad 	mutex_enter(&parent->p_smutex);
    639  1.1.6.13     ad 	pl = LIST_FIRST(&parent->p_lwps);
    640  1.1.6.13     ad 	cl = LIST_FIRST(&child->p_lwps);
    641   1.1.6.1     ad 	estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
    642   1.1.6.1     ad 	    sched_pstats_ticks - child->p_forktime);
    643  1.1.6.13     ad 	if (cl->l_estcpu > estcpu) {
    644  1.1.6.13     ad 		lwp_lock(pl);
    645  1.1.6.13     ad 		pl->l_estcpu = ESTCPULIM(pl->l_estcpu + cl->l_estcpu - estcpu);
    646  1.1.6.13     ad 		lwp_unlock(pl);
    647  1.1.6.13     ad 	}
    648  1.1.6.13     ad 	mutex_exit(&parent->p_smutex);
    649   1.1.6.1     ad }
    650   1.1.6.1     ad 
    651   1.1.6.1     ad void
    652   1.1.6.1     ad sched_enqueue(struct lwp *l, bool ctxswitch)
    653   1.1.6.1     ad {
    654   1.1.6.1     ad 
    655   1.1.6.1     ad 	if ((l->l_flag & LW_BOUND) != 0)
    656   1.1.6.1     ad 		runqueue_enqueue(l->l_cpu->ci_schedstate.spc_sched_info, l);
    657   1.1.6.1     ad 	else
    658   1.1.6.1     ad 		runqueue_enqueue(&global_queue, l);
    659   1.1.6.1     ad }
    660   1.1.6.1     ad 
    661   1.1.6.1     ad /*
    662   1.1.6.1     ad  * XXXSMP When LWP dispatch (cpu_switch()) is changed to use sched_dequeue(),
    663   1.1.6.1     ad  * drop of the effective priority level from kernel to user needs to be
    664   1.1.6.1     ad  * moved here from userret().  The assignment in userret() is currently
    665   1.1.6.1     ad  * done unlocked.
    666   1.1.6.1     ad  */
    667   1.1.6.1     ad void
    668   1.1.6.1     ad sched_dequeue(struct lwp *l)
    669   1.1.6.1     ad {
    670   1.1.6.1     ad 
    671   1.1.6.1     ad 	if ((l->l_flag & LW_BOUND) != 0)
    672   1.1.6.1     ad 		runqueue_dequeue(l->l_cpu->ci_schedstate.spc_sched_info, l);
    673   1.1.6.1     ad 	else
    674   1.1.6.1     ad 		runqueue_dequeue(&global_queue, l);
    675   1.1.6.1     ad }
    676   1.1.6.1     ad 
    677   1.1.6.1     ad struct lwp *
    678   1.1.6.1     ad sched_nextlwp(void)
    679   1.1.6.1     ad {
    680   1.1.6.6     ad 	struct schedstate_percpu *spc;
    681   1.1.6.9     ad 	runqueue_t *rq;
    682   1.1.6.1     ad 	lwp_t *l1, *l2;
    683   1.1.6.1     ad 
    684   1.1.6.6     ad 	spc = &curcpu()->ci_schedstate;
    685   1.1.6.6     ad 
    686   1.1.6.1     ad 	/* For now, just pick the highest priority LWP. */
    687   1.1.6.9     ad 	rq = spc->spc_sched_info;
    688   1.1.6.9     ad 	l1 = NULL;
    689   1.1.6.9     ad 	if (rq->rq_count != 0)
    690   1.1.6.9     ad 		l1 = runqueue_nextlwp(rq);
    691   1.1.6.9     ad 
    692   1.1.6.9     ad 	rq = &global_queue;
    693   1.1.6.9     ad 	if (__predict_false((spc->spc_flags & SPCF_OFFLINE) != 0) ||
    694   1.1.6.9     ad 	    rq->rq_count == 0)
    695   1.1.6.6     ad 		return l1;
    696   1.1.6.9     ad 	l2 = runqueue_nextlwp(rq);
    697   1.1.6.1     ad 
    698   1.1.6.1     ad 	if (l1 == NULL)
    699   1.1.6.1     ad 		return l2;
    700   1.1.6.1     ad 	if (l2 == NULL)
    701   1.1.6.1     ad 		return l1;
    702   1.1.6.2     ad 	if (lwp_eprio(l2) > lwp_eprio(l1))
    703   1.1.6.1     ad 		return l2;
    704   1.1.6.1     ad 	else
    705   1.1.6.1     ad 		return l1;
    706   1.1.6.1     ad }
    707   1.1.6.1     ad 
    708  1.1.6.10  rmind struct cpu_info *
    709  1.1.6.10  rmind sched_takecpu(struct lwp *l)
    710  1.1.6.10  rmind {
    711  1.1.6.10  rmind 
    712  1.1.6.10  rmind 	return l->l_cpu;
    713  1.1.6.10  rmind }
    714  1.1.6.10  rmind 
    715  1.1.6.10  rmind void
    716  1.1.6.10  rmind sched_wakeup(struct lwp *l)
    717  1.1.6.10  rmind {
    718  1.1.6.10  rmind 
    719  1.1.6.10  rmind }
    720  1.1.6.10  rmind 
    721  1.1.6.10  rmind void
    722  1.1.6.10  rmind sched_slept(struct lwp *l)
    723  1.1.6.10  rmind {
    724  1.1.6.10  rmind 
    725  1.1.6.10  rmind }
    726  1.1.6.10  rmind 
    727   1.1.6.1     ad void
    728   1.1.6.1     ad sched_lwp_fork(struct lwp *l)
    729   1.1.6.1     ad {
    730   1.1.6.1     ad 
    731   1.1.6.1     ad }
    732   1.1.6.1     ad 
    733   1.1.6.1     ad void
    734   1.1.6.1     ad sched_lwp_exit(struct lwp *l)
    735   1.1.6.1     ad {
    736   1.1.6.1     ad 
    737   1.1.6.1     ad }
    738   1.1.6.1     ad 
    739   1.1.6.5     ad /*
    740   1.1.6.5     ad  * sysctl setup.  XXX This should be split with kern_synch.c.
    741   1.1.6.5     ad  */
    742   1.1.6.1     ad SYSCTL_SETUP(sysctl_sched_setup, "sysctl kern.sched subtree setup")
    743   1.1.6.1     ad {
    744   1.1.6.1     ad 	const struct sysctlnode *node = NULL;
    745   1.1.6.1     ad 
    746   1.1.6.1     ad 	sysctl_createv(clog, 0, NULL, NULL,
    747   1.1.6.1     ad 		CTLFLAG_PERMANENT,
    748   1.1.6.1     ad 		CTLTYPE_NODE, "kern", NULL,
    749   1.1.6.1     ad 		NULL, 0, NULL, 0,
    750   1.1.6.1     ad 		CTL_KERN, CTL_EOL);
    751   1.1.6.1     ad 	sysctl_createv(clog, 0, NULL, &node,
    752   1.1.6.1     ad 		CTLFLAG_PERMANENT,
    753   1.1.6.1     ad 		CTLTYPE_NODE, "sched",
    754   1.1.6.1     ad 		SYSCTL_DESCR("Scheduler options"),
    755   1.1.6.1     ad 		NULL, 0, NULL, 0,
    756   1.1.6.1     ad 		CTL_KERN, CTL_CREATE, CTL_EOL);
    757   1.1.6.1     ad 
    758   1.1.6.5     ad 	KASSERT(node != NULL);
    759   1.1.6.5     ad 
    760   1.1.6.5     ad 	sysctl_createv(clog, 0, &node, NULL,
    761   1.1.6.5     ad 		CTLFLAG_PERMANENT,
    762   1.1.6.5     ad 		CTLTYPE_STRING, "name", NULL,
    763   1.1.6.5     ad 		NULL, 0, __UNCONST("4.4BSD"), 0,
    764   1.1.6.5     ad 		CTL_CREATE, CTL_EOL);
    765   1.1.6.5     ad 	sysctl_createv(clog, 0, &node, NULL,
    766   1.1.6.5     ad 		CTLFLAG_READWRITE,
    767   1.1.6.5     ad 		CTLTYPE_INT, "timesoftints",
    768   1.1.6.5     ad 		SYSCTL_DESCR("Track CPU time for soft interrupts"),
    769   1.1.6.5     ad 		NULL, 0, &softint_timing, 0,
    770   1.1.6.5     ad 		CTL_CREATE, CTL_EOL);
    771   1.1.6.1     ad }
    772   1.1.6.1     ad 
    773   1.1.6.1     ad #if defined(DDB)
    774   1.1.6.1     ad void
    775   1.1.6.1     ad sched_print_runqueue(void (*pr)(const char *, ...))
    776   1.1.6.1     ad {
    777   1.1.6.1     ad 
    778   1.1.6.1     ad 	runqueue_print(&global_queue, pr);
    779   1.1.6.1     ad }
    780   1.1.6.1     ad #endif /* defined(DDB) */
    781