Home | History | Annotate | Line # | Download | only in kern
sched_4bsd.c revision 1.4.4.4
      1  1.4.4.4  yamt /*	$NetBSD: sched_4bsd.c,v 1.4.4.4 2007/11/15 11:44:47 yamt Exp $	*/
      2  1.4.4.2  yamt 
      3  1.4.4.2  yamt /*-
      4  1.4.4.2  yamt  * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
      5  1.4.4.2  yamt  * All rights reserved.
      6  1.4.4.2  yamt  *
      7  1.4.4.2  yamt  * This code is derived from software contributed to The NetBSD Foundation
      8  1.4.4.2  yamt  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  1.4.4.2  yamt  * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and
     10  1.4.4.2  yamt  * Daniel Sieger.
     11  1.4.4.2  yamt  *
     12  1.4.4.2  yamt  * Redistribution and use in source and binary forms, with or without
     13  1.4.4.2  yamt  * modification, are permitted provided that the following conditions
     14  1.4.4.2  yamt  * are met:
     15  1.4.4.2  yamt  * 1. Redistributions of source code must retain the above copyright
     16  1.4.4.2  yamt  *    notice, this list of conditions and the following disclaimer.
     17  1.4.4.2  yamt  * 2. Redistributions in binary form must reproduce the above copyright
     18  1.4.4.2  yamt  *    notice, this list of conditions and the following disclaimer in the
     19  1.4.4.2  yamt  *    documentation and/or other materials provided with the distribution.
     20  1.4.4.2  yamt  * 3. All advertising materials mentioning features or use of this software
     21  1.4.4.2  yamt  *    must display the following acknowledgement:
     22  1.4.4.2  yamt  *	This product includes software developed by the NetBSD
     23  1.4.4.2  yamt  *	Foundation, Inc. and its contributors.
     24  1.4.4.2  yamt  * 4. Neither the name of The NetBSD Foundation nor the names of its
     25  1.4.4.2  yamt  *    contributors may be used to endorse or promote products derived
     26  1.4.4.2  yamt  *    from this software without specific prior written permission.
     27  1.4.4.2  yamt  *
     28  1.4.4.2  yamt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     29  1.4.4.2  yamt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     30  1.4.4.2  yamt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     31  1.4.4.2  yamt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     32  1.4.4.2  yamt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     33  1.4.4.2  yamt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     34  1.4.4.2  yamt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     35  1.4.4.2  yamt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     36  1.4.4.2  yamt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     37  1.4.4.2  yamt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     38  1.4.4.2  yamt  * POSSIBILITY OF SUCH DAMAGE.
     39  1.4.4.2  yamt  */
     40  1.4.4.2  yamt 
     41  1.4.4.2  yamt /*-
     42  1.4.4.2  yamt  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     43  1.4.4.2  yamt  *	The Regents of the University of California.  All rights reserved.
     44  1.4.4.2  yamt  * (c) UNIX System Laboratories, Inc.
     45  1.4.4.2  yamt  * All or some portions of this file are derived from material licensed
     46  1.4.4.2  yamt  * to the University of California by American Telephone and Telegraph
     47  1.4.4.2  yamt  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     48  1.4.4.2  yamt  * the permission of UNIX System Laboratories, Inc.
     49  1.4.4.2  yamt  *
     50  1.4.4.2  yamt  * Redistribution and use in source and binary forms, with or without
     51  1.4.4.2  yamt  * modification, are permitted provided that the following conditions
     52  1.4.4.2  yamt  * are met:
     53  1.4.4.2  yamt  * 1. Redistributions of source code must retain the above copyright
     54  1.4.4.2  yamt  *    notice, this list of conditions and the following disclaimer.
     55  1.4.4.2  yamt  * 2. Redistributions in binary form must reproduce the above copyright
     56  1.4.4.2  yamt  *    notice, this list of conditions and the following disclaimer in the
     57  1.4.4.2  yamt  *    documentation and/or other materials provided with the distribution.
     58  1.4.4.2  yamt  * 3. Neither the name of the University nor the names of its contributors
     59  1.4.4.2  yamt  *    may be used to endorse or promote products derived from this software
     60  1.4.4.2  yamt  *    without specific prior written permission.
     61  1.4.4.2  yamt  *
     62  1.4.4.2  yamt  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     63  1.4.4.2  yamt  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     64  1.4.4.2  yamt  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     65  1.4.4.2  yamt  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     66  1.4.4.2  yamt  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     67  1.4.4.2  yamt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     68  1.4.4.2  yamt  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     69  1.4.4.2  yamt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     70  1.4.4.2  yamt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     71  1.4.4.2  yamt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     72  1.4.4.2  yamt  * SUCH DAMAGE.
     73  1.4.4.2  yamt  *
     74  1.4.4.2  yamt  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
     75  1.4.4.2  yamt  */
     76  1.4.4.2  yamt 
     77  1.4.4.2  yamt #include <sys/cdefs.h>
     78  1.4.4.4  yamt __KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.4.4.4 2007/11/15 11:44:47 yamt Exp $");
     79  1.4.4.2  yamt 
     80  1.4.4.2  yamt #include "opt_ddb.h"
     81  1.4.4.2  yamt #include "opt_lockdebug.h"
     82  1.4.4.2  yamt #include "opt_perfctrs.h"
     83  1.4.4.2  yamt 
     84  1.4.4.2  yamt #define	__MUTEX_PRIVATE
     85  1.4.4.2  yamt 
     86  1.4.4.2  yamt #include <sys/param.h>
     87  1.4.4.2  yamt #include <sys/systm.h>
     88  1.4.4.2  yamt #include <sys/callout.h>
     89  1.4.4.2  yamt #include <sys/cpu.h>
     90  1.4.4.2  yamt #include <sys/proc.h>
     91  1.4.4.2  yamt #include <sys/kernel.h>
     92  1.4.4.2  yamt #include <sys/signalvar.h>
     93  1.4.4.2  yamt #include <sys/resourcevar.h>
     94  1.4.4.2  yamt #include <sys/sched.h>
     95  1.4.4.2  yamt #include <sys/sysctl.h>
     96  1.4.4.2  yamt #include <sys/kauth.h>
     97  1.4.4.2  yamt #include <sys/lockdebug.h>
     98  1.4.4.2  yamt #include <sys/kmem.h>
     99  1.4.4.3  yamt #include <sys/intr.h>
    100  1.4.4.2  yamt 
    101  1.4.4.2  yamt #include <uvm/uvm_extern.h>
    102  1.4.4.2  yamt 
    103  1.4.4.2  yamt /*
    104  1.4.4.2  yamt  * Run queues.
    105  1.4.4.2  yamt  *
    106  1.4.4.4  yamt  * We maintain bitmasks of non-empty queues in order speed up finding
    107  1.4.4.4  yamt  * the first runnable process.  Since there can be (by definition) few
    108  1.4.4.4  yamt  * real time LWPs in the the system, we maintain them on a linked list,
    109  1.4.4.4  yamt  * sorted by priority.
    110  1.4.4.2  yamt  */
    111  1.4.4.2  yamt 
    112  1.4.4.4  yamt #define	PPB_SHIFT	5
    113  1.4.4.4  yamt #define	PPB_MASK	31
    114  1.4.4.4  yamt 
    115  1.4.4.4  yamt #define	NUM_Q		(NPRI_KERNEL + NPRI_USER)
    116  1.4.4.4  yamt #define	NUM_PPB		(1 << PPB_SHIFT)
    117  1.4.4.4  yamt #define	NUM_B		(NUM_Q / NUM_PPB)
    118  1.4.4.4  yamt 
    119  1.4.4.2  yamt typedef struct runqueue {
    120  1.4.4.4  yamt 	TAILQ_HEAD(, lwp) rq_fixedpri;		/* realtime, kthread */
    121  1.4.4.4  yamt 	u_int		rq_count;		/* total # jobs */
    122  1.4.4.4  yamt 	uint32_t	rq_bitmap[NUM_B];	/* bitmap of queues */
    123  1.4.4.4  yamt 	TAILQ_HEAD(, lwp) rq_queue[NUM_Q];	/* user+kernel */
    124  1.4.4.2  yamt } runqueue_t;
    125  1.4.4.4  yamt 
    126  1.4.4.2  yamt static runqueue_t global_queue;
    127  1.4.4.2  yamt 
    128  1.4.4.2  yamt static void updatepri(struct lwp *);
    129  1.4.4.2  yamt static void resetpriority(struct lwp *);
    130  1.4.4.2  yamt 
    131  1.4.4.3  yamt fixpt_t decay_cpu(fixpt_t, fixpt_t);
    132  1.4.4.3  yamt 
    133  1.4.4.2  yamt extern unsigned int sched_pstats_ticks; /* defined in kern_synch.c */
    134  1.4.4.2  yamt 
    135  1.4.4.2  yamt /* The global scheduler state */
    136  1.4.4.2  yamt kmutex_t sched_mutex;
    137  1.4.4.2  yamt 
    138  1.4.4.2  yamt /* Number of hardclock ticks per sched_tick() */
    139  1.4.4.2  yamt int rrticks;
    140  1.4.4.2  yamt 
    141  1.4.4.4  yamt const int schedppq = 1;
    142  1.4.4.4  yamt 
    143  1.4.4.2  yamt /*
    144  1.4.4.2  yamt  * Force switch among equal priority processes every 100ms.
    145  1.4.4.2  yamt  * Called from hardclock every hz/10 == rrticks hardclock ticks.
    146  1.4.4.3  yamt  *
    147  1.4.4.3  yamt  * There's no need to lock anywhere in this routine, as it's
    148  1.4.4.3  yamt  * CPU-local and runs at IPL_SCHED (called from clock interrupt).
    149  1.4.4.2  yamt  */
    150  1.4.4.2  yamt /* ARGSUSED */
    151  1.4.4.2  yamt void
    152  1.4.4.2  yamt sched_tick(struct cpu_info *ci)
    153  1.4.4.2  yamt {
    154  1.4.4.2  yamt 	struct schedstate_percpu *spc = &ci->ci_schedstate;
    155  1.4.4.2  yamt 
    156  1.4.4.2  yamt 	spc->spc_ticks = rrticks;
    157  1.4.4.2  yamt 
    158  1.4.4.3  yamt 	if (CURCPU_IDLE_P())
    159  1.4.4.3  yamt 		return;
    160  1.4.4.3  yamt 
    161  1.4.4.3  yamt 	if (spc->spc_flags & SPCF_SEENRR) {
    162  1.4.4.3  yamt 		/*
    163  1.4.4.3  yamt 		 * The process has already been through a roundrobin
    164  1.4.4.3  yamt 		 * without switching and may be hogging the CPU.
    165  1.4.4.3  yamt 		 * Indicate that the process should yield.
    166  1.4.4.3  yamt 		 */
    167  1.4.4.3  yamt 		spc->spc_flags |= SPCF_SHOULDYIELD;
    168  1.4.4.3  yamt 	} else
    169  1.4.4.3  yamt 		spc->spc_flags |= SPCF_SEENRR;
    170  1.4.4.3  yamt 
    171  1.4.4.3  yamt 	cpu_need_resched(ci, 0);
    172  1.4.4.2  yamt }
    173  1.4.4.2  yamt 
    174  1.4.4.4  yamt /*
    175  1.4.4.4  yamt  * Why PRIO_MAX - 2? From setpriority(2):
    176  1.4.4.4  yamt  *
    177  1.4.4.4  yamt  *	prio is a value in the range -20 to 20.  The default priority is
    178  1.4.4.4  yamt  *	0; lower priorities cause more favorable scheduling.  A value of
    179  1.4.4.4  yamt  *	19 or 20 will schedule a process only when nothing at priority <=
    180  1.4.4.4  yamt  *	0 is runnable.
    181  1.4.4.4  yamt  *
    182  1.4.4.4  yamt  * This gives estcpu influence over 18 priority levels, and leaves nice
    183  1.4.4.4  yamt  * with 40 levels.  One way to think about it is that nice has 20 levels
    184  1.4.4.4  yamt  * either side of estcpu's 18.
    185  1.4.4.4  yamt  */
    186  1.4.4.2  yamt #define	ESTCPU_SHIFT	11
    187  1.4.4.4  yamt #define	ESTCPU_MAX	((PRIO_MAX - 2) << ESTCPU_SHIFT)
    188  1.4.4.4  yamt #define	ESTCPU_ACCUM	(1 << (ESTCPU_SHIFT - 1))
    189  1.4.4.2  yamt #define	ESTCPULIM(e)	min((e), ESTCPU_MAX)
    190  1.4.4.2  yamt 
    191  1.4.4.2  yamt /*
    192  1.4.4.2  yamt  * Constants for digital decay and forget:
    193  1.4.4.4  yamt  *	90% of (l_estcpu) usage in 5 * loadav time
    194  1.4.4.4  yamt  *	95% of (l_pctcpu) usage in 60 seconds (load insensitive)
    195  1.4.4.2  yamt  *          Note that, as ps(1) mentions, this can let percentages
    196  1.4.4.2  yamt  *          total over 100% (I've seen 137.9% for 3 processes).
    197  1.4.4.2  yamt  *
    198  1.4.4.4  yamt  * Note that hardclock updates l_estcpu and l_cpticks independently.
    199  1.4.4.2  yamt  *
    200  1.4.4.4  yamt  * We wish to decay away 90% of l_estcpu in (5 * loadavg) seconds.
    201  1.4.4.2  yamt  * That is, the system wants to compute a value of decay such
    202  1.4.4.2  yamt  * that the following for loop:
    203  1.4.4.2  yamt  * 	for (i = 0; i < (5 * loadavg); i++)
    204  1.4.4.4  yamt  * 		l_estcpu *= decay;
    205  1.4.4.2  yamt  * will compute
    206  1.4.4.4  yamt  * 	l_estcpu *= 0.1;
    207  1.4.4.2  yamt  * for all values of loadavg:
    208  1.4.4.2  yamt  *
    209  1.4.4.2  yamt  * Mathematically this loop can be expressed by saying:
    210  1.4.4.2  yamt  * 	decay ** (5 * loadavg) ~= .1
    211  1.4.4.2  yamt  *
    212  1.4.4.2  yamt  * The system computes decay as:
    213  1.4.4.2  yamt  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
    214  1.4.4.2  yamt  *
    215  1.4.4.2  yamt  * We wish to prove that the system's computation of decay
    216  1.4.4.2  yamt  * will always fulfill the equation:
    217  1.4.4.2  yamt  * 	decay ** (5 * loadavg) ~= .1
    218  1.4.4.2  yamt  *
    219  1.4.4.2  yamt  * If we compute b as:
    220  1.4.4.2  yamt  * 	b = 2 * loadavg
    221  1.4.4.2  yamt  * then
    222  1.4.4.2  yamt  * 	decay = b / (b + 1)
    223  1.4.4.2  yamt  *
    224  1.4.4.2  yamt  * We now need to prove two things:
    225  1.4.4.2  yamt  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
    226  1.4.4.2  yamt  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
    227  1.4.4.2  yamt  *
    228  1.4.4.2  yamt  * Facts:
    229  1.4.4.2  yamt  *         For x close to zero, exp(x) =~ 1 + x, since
    230  1.4.4.2  yamt  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
    231  1.4.4.2  yamt  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
    232  1.4.4.2  yamt  *         For x close to zero, ln(1+x) =~ x, since
    233  1.4.4.2  yamt  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
    234  1.4.4.2  yamt  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
    235  1.4.4.2  yamt  *         ln(.1) =~ -2.30
    236  1.4.4.2  yamt  *
    237  1.4.4.2  yamt  * Proof of (1):
    238  1.4.4.2  yamt  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
    239  1.4.4.2  yamt  *	solving for factor,
    240  1.4.4.2  yamt  *      ln(factor) =~ (-2.30/5*loadav), or
    241  1.4.4.2  yamt  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
    242  1.4.4.2  yamt  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
    243  1.4.4.2  yamt  *
    244  1.4.4.2  yamt  * Proof of (2):
    245  1.4.4.2  yamt  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
    246  1.4.4.2  yamt  *	solving for power,
    247  1.4.4.2  yamt  *      power*ln(b/(b+1)) =~ -2.30, or
    248  1.4.4.2  yamt  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
    249  1.4.4.2  yamt  *
    250  1.4.4.2  yamt  * Actual power values for the implemented algorithm are as follows:
    251  1.4.4.2  yamt  *      loadav: 1       2       3       4
    252  1.4.4.2  yamt  *      power:  5.68    10.32   14.94   19.55
    253  1.4.4.2  yamt  */
    254  1.4.4.2  yamt 
    255  1.4.4.2  yamt /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
    256  1.4.4.2  yamt #define	loadfactor(loadav)	(2 * (loadav))
    257  1.4.4.2  yamt 
    258  1.4.4.3  yamt fixpt_t
    259  1.4.4.2  yamt decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
    260  1.4.4.2  yamt {
    261  1.4.4.2  yamt 
    262  1.4.4.2  yamt 	if (estcpu == 0) {
    263  1.4.4.2  yamt 		return 0;
    264  1.4.4.2  yamt 	}
    265  1.4.4.2  yamt 
    266  1.4.4.2  yamt #if !defined(_LP64)
    267  1.4.4.2  yamt 	/* avoid 64bit arithmetics. */
    268  1.4.4.2  yamt #define	FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
    269  1.4.4.2  yamt 	if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
    270  1.4.4.2  yamt 		return estcpu * loadfac / (loadfac + FSCALE);
    271  1.4.4.2  yamt 	}
    272  1.4.4.2  yamt #endif /* !defined(_LP64) */
    273  1.4.4.2  yamt 
    274  1.4.4.2  yamt 	return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
    275  1.4.4.2  yamt }
    276  1.4.4.2  yamt 
    277  1.4.4.2  yamt /*
    278  1.4.4.4  yamt  * For all load averages >= 1 and max l_estcpu of (255 << ESTCPU_SHIFT),
    279  1.4.4.4  yamt  * sleeping for at least seven times the loadfactor will decay l_estcpu to
    280  1.4.4.2  yamt  * less than (1 << ESTCPU_SHIFT).
    281  1.4.4.2  yamt  *
    282  1.4.4.2  yamt  * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
    283  1.4.4.2  yamt  */
    284  1.4.4.2  yamt static fixpt_t
    285  1.4.4.2  yamt decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
    286  1.4.4.2  yamt {
    287  1.4.4.2  yamt 
    288  1.4.4.2  yamt 	if ((n << FSHIFT) >= 7 * loadfac) {
    289  1.4.4.2  yamt 		return 0;
    290  1.4.4.2  yamt 	}
    291  1.4.4.2  yamt 
    292  1.4.4.2  yamt 	while (estcpu != 0 && n > 1) {
    293  1.4.4.2  yamt 		estcpu = decay_cpu(loadfac, estcpu);
    294  1.4.4.2  yamt 		n--;
    295  1.4.4.2  yamt 	}
    296  1.4.4.2  yamt 
    297  1.4.4.2  yamt 	return estcpu;
    298  1.4.4.2  yamt }
    299  1.4.4.2  yamt 
    300  1.4.4.2  yamt /*
    301  1.4.4.2  yamt  * sched_pstats_hook:
    302  1.4.4.2  yamt  *
    303  1.4.4.2  yamt  * Periodically called from sched_pstats(); used to recalculate priorities.
    304  1.4.4.2  yamt  */
    305  1.4.4.2  yamt void
    306  1.4.4.3  yamt sched_pstats_hook(struct lwp *l)
    307  1.4.4.2  yamt {
    308  1.4.4.4  yamt 	fixpt_t loadfac;
    309  1.4.4.4  yamt 	int sleeptm;
    310  1.4.4.2  yamt 
    311  1.4.4.4  yamt 	/*
    312  1.4.4.4  yamt 	 * If the LWP has slept an entire second, stop recalculating
    313  1.4.4.4  yamt 	 * its priority until it wakes up.
    314  1.4.4.4  yamt 	 */
    315  1.4.4.4  yamt 	if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
    316  1.4.4.4  yamt 	    l->l_stat == LSSUSPENDED) {
    317  1.4.4.4  yamt 		l->l_slptime++;
    318  1.4.4.4  yamt 		sleeptm = 1;
    319  1.4.4.4  yamt 	} else {
    320  1.4.4.4  yamt 		sleeptm = 0x7fffffff;
    321  1.4.4.4  yamt 	}
    322  1.4.4.4  yamt 
    323  1.4.4.4  yamt 	if (l->l_slptime <= sleeptm) {
    324  1.4.4.4  yamt 		loadfac = 2 * (averunnable.ldavg[0]);
    325  1.4.4.4  yamt 		l->l_estcpu = decay_cpu(loadfac, l->l_estcpu);
    326  1.4.4.3  yamt 		resetpriority(l);
    327  1.4.4.4  yamt 	}
    328  1.4.4.2  yamt }
    329  1.4.4.2  yamt 
    330  1.4.4.2  yamt /*
    331  1.4.4.2  yamt  * Recalculate the priority of a process after it has slept for a while.
    332  1.4.4.2  yamt  */
    333  1.4.4.2  yamt static void
    334  1.4.4.2  yamt updatepri(struct lwp *l)
    335  1.4.4.2  yamt {
    336  1.4.4.2  yamt 	fixpt_t loadfac;
    337  1.4.4.2  yamt 
    338  1.4.4.2  yamt 	KASSERT(lwp_locked(l, NULL));
    339  1.4.4.2  yamt 	KASSERT(l->l_slptime > 1);
    340  1.4.4.2  yamt 
    341  1.4.4.2  yamt 	loadfac = loadfactor(averunnable.ldavg[0]);
    342  1.4.4.2  yamt 
    343  1.4.4.2  yamt 	l->l_slptime--; /* the first time was done in sched_pstats */
    344  1.4.4.4  yamt 	l->l_estcpu = decay_cpu_batch(loadfac, l->l_estcpu, l->l_slptime);
    345  1.4.4.2  yamt 	resetpriority(l);
    346  1.4.4.2  yamt }
    347  1.4.4.2  yamt 
    348  1.4.4.2  yamt static void
    349  1.4.4.2  yamt runqueue_init(runqueue_t *rq)
    350  1.4.4.2  yamt {
    351  1.4.4.2  yamt 	int i;
    352  1.4.4.2  yamt 
    353  1.4.4.4  yamt 	for (i = 0; i < NUM_Q; i++)
    354  1.4.4.4  yamt 		TAILQ_INIT(&rq->rq_queue[i]);
    355  1.4.4.4  yamt 	for (i = 0; i < NUM_B; i++)
    356  1.4.4.4  yamt 		rq->rq_bitmap[i] = 0;
    357  1.4.4.4  yamt 	TAILQ_INIT(&rq->rq_fixedpri);
    358  1.4.4.4  yamt 	rq->rq_count = 0;
    359  1.4.4.2  yamt }
    360  1.4.4.2  yamt 
    361  1.4.4.2  yamt static void
    362  1.4.4.2  yamt runqueue_enqueue(runqueue_t *rq, struct lwp *l)
    363  1.4.4.2  yamt {
    364  1.4.4.4  yamt 	pri_t pri;
    365  1.4.4.4  yamt 	lwp_t *l2;
    366  1.4.4.2  yamt 
    367  1.4.4.2  yamt 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    368  1.4.4.2  yamt 
    369  1.4.4.4  yamt 	pri = lwp_eprio(l);
    370  1.4.4.4  yamt 	rq->rq_count++;
    371  1.4.4.4  yamt 
    372  1.4.4.4  yamt 	if (pri >= PRI_KTHREAD) {
    373  1.4.4.4  yamt 		TAILQ_FOREACH(l2, &rq->rq_fixedpri, l_runq) {
    374  1.4.4.4  yamt 			if (lwp_eprio(l2) < pri) {
    375  1.4.4.4  yamt 				TAILQ_INSERT_BEFORE(l2, l, l_runq);
    376  1.4.4.4  yamt 				return;
    377  1.4.4.4  yamt 			}
    378  1.4.4.4  yamt 		}
    379  1.4.4.4  yamt 		TAILQ_INSERT_TAIL(&rq->rq_fixedpri, l, l_runq);
    380  1.4.4.4  yamt 		return;
    381  1.4.4.4  yamt 	}
    382  1.4.4.4  yamt 
    383  1.4.4.4  yamt 	rq->rq_bitmap[pri >> PPB_SHIFT] |=
    384  1.4.4.4  yamt 	    (0x80000000U >> (pri & PPB_MASK));
    385  1.4.4.4  yamt 	TAILQ_INSERT_TAIL(&rq->rq_queue[pri], l, l_runq);
    386  1.4.4.2  yamt }
    387  1.4.4.2  yamt 
    388  1.4.4.2  yamt static void
    389  1.4.4.2  yamt runqueue_dequeue(runqueue_t *rq, struct lwp *l)
    390  1.4.4.2  yamt {
    391  1.4.4.4  yamt 	pri_t pri;
    392  1.4.4.2  yamt 
    393  1.4.4.2  yamt 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    394  1.4.4.2  yamt 
    395  1.4.4.4  yamt 	pri = lwp_eprio(l);
    396  1.4.4.4  yamt 	rq->rq_count--;
    397  1.4.4.4  yamt 
    398  1.4.4.4  yamt 	if (pri >= PRI_KTHREAD) {
    399  1.4.4.4  yamt 		TAILQ_REMOVE(&rq->rq_fixedpri, l, l_runq);
    400  1.4.4.4  yamt 		return;
    401  1.4.4.4  yamt 	}
    402  1.4.4.4  yamt 
    403  1.4.4.4  yamt 	TAILQ_REMOVE(&rq->rq_queue[pri], l, l_runq);
    404  1.4.4.4  yamt 	if (TAILQ_EMPTY(&rq->rq_queue[pri]))
    405  1.4.4.4  yamt 		rq->rq_bitmap[pri >> PPB_SHIFT] ^=
    406  1.4.4.4  yamt 		    (0x80000000U >> (pri & PPB_MASK));
    407  1.4.4.2  yamt }
    408  1.4.4.2  yamt 
    409  1.4.4.4  yamt #if (NUM_B != 3) || (NUM_Q != 96)
    410  1.4.4.4  yamt #error adjust runqueue_nextlwp
    411  1.4.4.4  yamt #endif
    412  1.4.4.4  yamt 
    413  1.4.4.2  yamt static struct lwp *
    414  1.4.4.2  yamt runqueue_nextlwp(runqueue_t *rq)
    415  1.4.4.2  yamt {
    416  1.4.4.4  yamt 	pri_t pri;
    417  1.4.4.2  yamt 
    418  1.4.4.4  yamt 	KASSERT(rq->rq_count != 0);
    419  1.4.4.4  yamt 
    420  1.4.4.4  yamt 	if (!TAILQ_EMPTY(&rq->rq_fixedpri))
    421  1.4.4.4  yamt 		return TAILQ_FIRST(&rq->rq_fixedpri);
    422  1.4.4.4  yamt 
    423  1.4.4.4  yamt 	if (rq->rq_bitmap[2] != 0)
    424  1.4.4.4  yamt 		pri = 96 - ffs(rq->rq_bitmap[2]);
    425  1.4.4.4  yamt 	else if (rq->rq_bitmap[1] != 0)
    426  1.4.4.4  yamt 		pri = 64 - ffs(rq->rq_bitmap[1]);
    427  1.4.4.4  yamt 	else
    428  1.4.4.4  yamt 		pri = 32 - ffs(rq->rq_bitmap[0]);
    429  1.4.4.4  yamt 	return TAILQ_FIRST(&rq->rq_queue[pri]);
    430  1.4.4.2  yamt }
    431  1.4.4.2  yamt 
    432  1.4.4.2  yamt #if defined(DDB)
    433  1.4.4.2  yamt static void
    434  1.4.4.2  yamt runqueue_print(const runqueue_t *rq, void (*pr)(const char *, ...))
    435  1.4.4.2  yamt {
    436  1.4.4.4  yamt 	CPU_INFO_ITERATOR cii;
    437  1.4.4.4  yamt 	struct cpu_info *ci;
    438  1.4.4.4  yamt 	lwp_t *l;
    439  1.4.4.4  yamt 	int i;
    440  1.4.4.2  yamt 
    441  1.4.4.4  yamt 	printf("PID\tLID\tPRI\tIPRI\tEPRI\tLWP\t\t NAME\n");
    442  1.4.4.4  yamt 
    443  1.4.4.4  yamt 	TAILQ_FOREACH(l, &rq->rq_fixedpri, l_runq) {
    444  1.4.4.4  yamt 		(*pr)("%d\t%d\%d\t%d\t%d\t%016lx %s\n",
    445  1.4.4.4  yamt 		    l->l_proc->p_pid, l->l_lid, (int)l->l_priority,
    446  1.4.4.4  yamt 		    (int)l->l_inheritedprio, lwp_eprio(l),
    447  1.4.4.4  yamt 		    (long)l, l->l_proc->p_comm);
    448  1.4.4.4  yamt 	}
    449  1.4.4.4  yamt 
    450  1.4.4.4  yamt 	for (i = NUM_Q - 1; i >= 0; i--) {
    451  1.4.4.4  yamt 		TAILQ_FOREACH(l, &rq->rq_queue[i], l_runq) {
    452  1.4.4.4  yamt 			(*pr)("%d\t%d\t%d\t%d\t%d\t%016lx %s\n",
    453  1.4.4.4  yamt 			    l->l_proc->p_pid, l->l_lid, (int)l->l_priority,
    454  1.4.4.4  yamt 			    (int)l->l_inheritedprio, lwp_eprio(l),
    455  1.4.4.4  yamt 			    (long)l, l->l_proc->p_comm);
    456  1.4.4.2  yamt 		}
    457  1.4.4.2  yamt 	}
    458  1.4.4.4  yamt 
    459  1.4.4.4  yamt 	printf("CPUIDX\tRESCHED\tCURPRI\tFLAGS\n");
    460  1.4.4.4  yamt 	for (CPU_INFO_FOREACH(cii, ci)) {
    461  1.4.4.4  yamt 		printf("%d\t%d\t%d\t%04x\n", (int)ci->ci_index,
    462  1.4.4.4  yamt 		    (int)ci->ci_want_resched,
    463  1.4.4.4  yamt 		    (int)ci->ci_schedstate.spc_curpriority,
    464  1.4.4.4  yamt 		    (int)ci->ci_schedstate.spc_flags);
    465  1.4.4.4  yamt 	}
    466  1.4.4.4  yamt 
    467  1.4.4.4  yamt 	printf("NEXTLWP\n%016lx\n", (long)sched_nextlwp());
    468  1.4.4.2  yamt }
    469  1.4.4.2  yamt #endif /* defined(DDB) */
    470  1.4.4.2  yamt 
    471  1.4.4.2  yamt /*
    472  1.4.4.2  yamt  * Initialize the (doubly-linked) run queues
    473  1.4.4.2  yamt  * to be empty.
    474  1.4.4.2  yamt  */
    475  1.4.4.2  yamt void
    476  1.4.4.2  yamt sched_rqinit()
    477  1.4.4.2  yamt {
    478  1.4.4.2  yamt 
    479  1.4.4.2  yamt 	runqueue_init(&global_queue);
    480  1.4.4.2  yamt 	mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
    481  1.4.4.2  yamt 	/* Initialize the lock pointer for lwp0 */
    482  1.4.4.2  yamt 	lwp0.l_mutex = &curcpu()->ci_schedstate.spc_lwplock;
    483  1.4.4.2  yamt }
    484  1.4.4.2  yamt 
    485  1.4.4.2  yamt void
    486  1.4.4.2  yamt sched_cpuattach(struct cpu_info *ci)
    487  1.4.4.2  yamt {
    488  1.4.4.2  yamt 	runqueue_t *rq;
    489  1.4.4.2  yamt 
    490  1.4.4.2  yamt 	ci->ci_schedstate.spc_mutex = &sched_mutex;
    491  1.4.4.2  yamt 	rq = kmem_zalloc(sizeof(*rq), KM_NOSLEEP);
    492  1.4.4.2  yamt 	runqueue_init(rq);
    493  1.4.4.2  yamt 	ci->ci_schedstate.spc_sched_info = rq;
    494  1.4.4.2  yamt }
    495  1.4.4.2  yamt 
    496  1.4.4.2  yamt void
    497  1.4.4.2  yamt sched_setup()
    498  1.4.4.2  yamt {
    499  1.4.4.2  yamt 
    500  1.4.4.2  yamt 	rrticks = hz / 10;
    501  1.4.4.2  yamt }
    502  1.4.4.2  yamt 
    503  1.4.4.2  yamt void
    504  1.4.4.2  yamt sched_setrunnable(struct lwp *l)
    505  1.4.4.2  yamt {
    506  1.4.4.2  yamt 
    507  1.4.4.2  yamt  	if (l->l_slptime > 1)
    508  1.4.4.2  yamt  		updatepri(l);
    509  1.4.4.2  yamt }
    510  1.4.4.2  yamt 
    511  1.4.4.2  yamt bool
    512  1.4.4.2  yamt sched_curcpu_runnable_p(void)
    513  1.4.4.2  yamt {
    514  1.4.4.2  yamt 	struct schedstate_percpu *spc;
    515  1.4.4.4  yamt 	struct cpu_info *ci;
    516  1.4.4.4  yamt 	int bits;
    517  1.4.4.2  yamt 
    518  1.4.4.4  yamt 	ci = curcpu();
    519  1.4.4.4  yamt 	spc = &ci->ci_schedstate;
    520  1.4.4.4  yamt #ifndef __HAVE_FAST_SOFTINTS
    521  1.4.4.4  yamt 	bits = ci->ci_data.cpu_softints;
    522  1.4.4.4  yamt 	bits |= ((runqueue_t *)spc->spc_sched_info)->rq_count;
    523  1.4.4.4  yamt #else
    524  1.4.4.4  yamt 	bits = ((runqueue_t *)spc->spc_sched_info)->rq_count;
    525  1.4.4.4  yamt #endif
    526  1.4.4.2  yamt 	if (__predict_true((spc->spc_flags & SPCF_OFFLINE) == 0))
    527  1.4.4.4  yamt 		bits |= global_queue.rq_count;
    528  1.4.4.4  yamt 	return bits != 0;
    529  1.4.4.2  yamt }
    530  1.4.4.2  yamt 
    531  1.4.4.2  yamt void
    532  1.4.4.4  yamt sched_nice(struct proc *p, int n)
    533  1.4.4.2  yamt {
    534  1.4.4.4  yamt 	struct lwp *l;
    535  1.4.4.4  yamt 
    536  1.4.4.4  yamt 	KASSERT(mutex_owned(&p->p_smutex));
    537  1.4.4.2  yamt 
    538  1.4.4.4  yamt 	p->p_nice = n;
    539  1.4.4.4  yamt 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    540  1.4.4.4  yamt 		lwp_lock(l);
    541  1.4.4.4  yamt 		resetpriority(l);
    542  1.4.4.4  yamt 		lwp_unlock(l);
    543  1.4.4.4  yamt 	}
    544  1.4.4.2  yamt }
    545  1.4.4.2  yamt 
    546  1.4.4.2  yamt /*
    547  1.4.4.4  yamt  * Recompute the priority of an LWP.  Arrange to reschedule if
    548  1.4.4.4  yamt  * the resulting priority is better than that of the current LWP.
    549  1.4.4.2  yamt  */
    550  1.4.4.2  yamt static void
    551  1.4.4.2  yamt resetpriority(struct lwp *l)
    552  1.4.4.2  yamt {
    553  1.4.4.4  yamt 	pri_t pri;
    554  1.4.4.2  yamt 	struct proc *p = l->l_proc;
    555  1.4.4.2  yamt 
    556  1.4.4.4  yamt 	KASSERT(lwp_locked(l, NULL));
    557  1.4.4.2  yamt 
    558  1.4.4.4  yamt 	if (l->l_class != SCHED_OTHER)
    559  1.4.4.2  yamt 		return;
    560  1.4.4.2  yamt 
    561  1.4.4.4  yamt 	/* See comments above ESTCPU_SHIFT definition. */
    562  1.4.4.4  yamt 	pri = (PRI_KERNEL - 1) - (l->l_estcpu >> ESTCPU_SHIFT) - p->p_nice;
    563  1.4.4.4  yamt 	pri = imax(pri, 0);
    564  1.4.4.4  yamt 	if (pri != l->l_priority)
    565  1.4.4.4  yamt 		lwp_changepri(l, pri);
    566  1.4.4.2  yamt }
    567  1.4.4.2  yamt 
    568  1.4.4.2  yamt /*
    569  1.4.4.2  yamt  * We adjust the priority of the current process.  The priority of a process
    570  1.4.4.4  yamt  * gets worse as it accumulates CPU time.  The CPU usage estimator (l_estcpu)
    571  1.4.4.2  yamt  * is increased here.  The formula for computing priorities (in kern_synch.c)
    572  1.4.4.4  yamt  * will compute a different value each time l_estcpu increases. This can
    573  1.4.4.2  yamt  * cause a switch, but unless the priority crosses a PPQ boundary the actual
    574  1.4.4.2  yamt  * queue will not change.  The CPU usage estimator ramps up quite quickly
    575  1.4.4.2  yamt  * when the process is running (linearly), and decays away exponentially, at
    576  1.4.4.2  yamt  * a rate which is proportionally slower when the system is busy.  The basic
    577  1.4.4.2  yamt  * principle is that the system will 90% forget that the process used a lot
    578  1.4.4.2  yamt  * of CPU time in 5 * loadav seconds.  This causes the system to favor
    579  1.4.4.2  yamt  * processes which haven't run much recently, and to round-robin among other
    580  1.4.4.2  yamt  * processes.
    581  1.4.4.2  yamt  */
    582  1.4.4.2  yamt 
    583  1.4.4.2  yamt void
    584  1.4.4.2  yamt sched_schedclock(struct lwp *l)
    585  1.4.4.2  yamt {
    586  1.4.4.4  yamt 
    587  1.4.4.4  yamt 	if (l->l_class != SCHED_OTHER)
    588  1.4.4.4  yamt 		return;
    589  1.4.4.2  yamt 
    590  1.4.4.2  yamt 	KASSERT(!CURCPU_IDLE_P());
    591  1.4.4.4  yamt 	l->l_estcpu = ESTCPULIM(l->l_estcpu + ESTCPU_ACCUM);
    592  1.4.4.2  yamt 	lwp_lock(l);
    593  1.4.4.2  yamt 	resetpriority(l);
    594  1.4.4.2  yamt 	lwp_unlock(l);
    595  1.4.4.2  yamt }
    596  1.4.4.2  yamt 
    597  1.4.4.2  yamt /*
    598  1.4.4.2  yamt  * sched_proc_fork:
    599  1.4.4.2  yamt  *
    600  1.4.4.2  yamt  *	Inherit the parent's scheduler history.
    601  1.4.4.2  yamt  */
    602  1.4.4.2  yamt void
    603  1.4.4.2  yamt sched_proc_fork(struct proc *parent, struct proc *child)
    604  1.4.4.2  yamt {
    605  1.4.4.4  yamt 	lwp_t *pl;
    606  1.4.4.2  yamt 
    607  1.4.4.2  yamt 	KASSERT(mutex_owned(&parent->p_smutex));
    608  1.4.4.2  yamt 
    609  1.4.4.4  yamt 	pl = LIST_FIRST(&parent->p_lwps);
    610  1.4.4.4  yamt 	child->p_estcpu_inherited = pl->l_estcpu;
    611  1.4.4.2  yamt 	child->p_forktime = sched_pstats_ticks;
    612  1.4.4.2  yamt }
    613  1.4.4.2  yamt 
    614  1.4.4.2  yamt /*
    615  1.4.4.2  yamt  * sched_proc_exit:
    616  1.4.4.2  yamt  *
    617  1.4.4.2  yamt  *	Chargeback parents for the sins of their children.
    618  1.4.4.2  yamt  */
    619  1.4.4.2  yamt void
    620  1.4.4.2  yamt sched_proc_exit(struct proc *parent, struct proc *child)
    621  1.4.4.2  yamt {
    622  1.4.4.2  yamt 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
    623  1.4.4.2  yamt 	fixpt_t estcpu;
    624  1.4.4.4  yamt 	lwp_t *pl, *cl;
    625  1.4.4.2  yamt 
    626  1.4.4.2  yamt 	/* XXX Only if parent != init?? */
    627  1.4.4.2  yamt 
    628  1.4.4.4  yamt 	mutex_enter(&parent->p_smutex);
    629  1.4.4.4  yamt 	pl = LIST_FIRST(&parent->p_lwps);
    630  1.4.4.4  yamt 	cl = LIST_FIRST(&child->p_lwps);
    631  1.4.4.2  yamt 	estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
    632  1.4.4.2  yamt 	    sched_pstats_ticks - child->p_forktime);
    633  1.4.4.4  yamt 	if (cl->l_estcpu > estcpu) {
    634  1.4.4.4  yamt 		lwp_lock(pl);
    635  1.4.4.4  yamt 		pl->l_estcpu = ESTCPULIM(pl->l_estcpu + cl->l_estcpu - estcpu);
    636  1.4.4.4  yamt 		lwp_unlock(pl);
    637  1.4.4.4  yamt 	}
    638  1.4.4.4  yamt 	mutex_exit(&parent->p_smutex);
    639  1.4.4.2  yamt }
    640  1.4.4.2  yamt 
    641  1.4.4.2  yamt void
    642  1.4.4.2  yamt sched_enqueue(struct lwp *l, bool ctxswitch)
    643  1.4.4.2  yamt {
    644  1.4.4.2  yamt 
    645  1.4.4.2  yamt 	if ((l->l_flag & LW_BOUND) != 0)
    646  1.4.4.2  yamt 		runqueue_enqueue(l->l_cpu->ci_schedstate.spc_sched_info, l);
    647  1.4.4.2  yamt 	else
    648  1.4.4.2  yamt 		runqueue_enqueue(&global_queue, l);
    649  1.4.4.2  yamt }
    650  1.4.4.2  yamt 
    651  1.4.4.2  yamt /*
    652  1.4.4.2  yamt  * XXXSMP When LWP dispatch (cpu_switch()) is changed to use sched_dequeue(),
    653  1.4.4.2  yamt  * drop of the effective priority level from kernel to user needs to be
    654  1.4.4.2  yamt  * moved here from userret().  The assignment in userret() is currently
    655  1.4.4.2  yamt  * done unlocked.
    656  1.4.4.2  yamt  */
    657  1.4.4.2  yamt void
    658  1.4.4.2  yamt sched_dequeue(struct lwp *l)
    659  1.4.4.2  yamt {
    660  1.4.4.2  yamt 
    661  1.4.4.2  yamt 	if ((l->l_flag & LW_BOUND) != 0)
    662  1.4.4.2  yamt 		runqueue_dequeue(l->l_cpu->ci_schedstate.spc_sched_info, l);
    663  1.4.4.2  yamt 	else
    664  1.4.4.2  yamt 		runqueue_dequeue(&global_queue, l);
    665  1.4.4.2  yamt }
    666  1.4.4.2  yamt 
    667  1.4.4.2  yamt struct lwp *
    668  1.4.4.2  yamt sched_nextlwp(void)
    669  1.4.4.2  yamt {
    670  1.4.4.2  yamt 	struct schedstate_percpu *spc;
    671  1.4.4.4  yamt 	runqueue_t *rq;
    672  1.4.4.2  yamt 	lwp_t *l1, *l2;
    673  1.4.4.2  yamt 
    674  1.4.4.2  yamt 	spc = &curcpu()->ci_schedstate;
    675  1.4.4.2  yamt 
    676  1.4.4.2  yamt 	/* For now, just pick the highest priority LWP. */
    677  1.4.4.4  yamt 	rq = spc->spc_sched_info;
    678  1.4.4.4  yamt 	l1 = NULL;
    679  1.4.4.4  yamt 	if (rq->rq_count != 0)
    680  1.4.4.4  yamt 		l1 = runqueue_nextlwp(rq);
    681  1.4.4.4  yamt 
    682  1.4.4.4  yamt 	rq = &global_queue;
    683  1.4.4.4  yamt 	if (__predict_false((spc->spc_flags & SPCF_OFFLINE) != 0) ||
    684  1.4.4.4  yamt 	    rq->rq_count == 0)
    685  1.4.4.2  yamt 		return l1;
    686  1.4.4.4  yamt 	l2 = runqueue_nextlwp(rq);
    687  1.4.4.2  yamt 
    688  1.4.4.2  yamt 	if (l1 == NULL)
    689  1.4.4.2  yamt 		return l2;
    690  1.4.4.2  yamt 	if (l2 == NULL)
    691  1.4.4.2  yamt 		return l1;
    692  1.4.4.4  yamt 	if (lwp_eprio(l2) > lwp_eprio(l1))
    693  1.4.4.2  yamt 		return l2;
    694  1.4.4.2  yamt 	else
    695  1.4.4.2  yamt 		return l1;
    696  1.4.4.2  yamt }
    697  1.4.4.2  yamt 
    698  1.4.4.3  yamt struct cpu_info *
    699  1.4.4.3  yamt sched_takecpu(struct lwp *l)
    700  1.4.4.3  yamt {
    701  1.4.4.3  yamt 
    702  1.4.4.3  yamt 	return l->l_cpu;
    703  1.4.4.3  yamt }
    704  1.4.4.3  yamt 
    705  1.4.4.3  yamt void
    706  1.4.4.3  yamt sched_wakeup(struct lwp *l)
    707  1.4.4.3  yamt {
    708  1.4.4.3  yamt 
    709  1.4.4.3  yamt }
    710  1.4.4.3  yamt 
    711  1.4.4.3  yamt void
    712  1.4.4.3  yamt sched_slept(struct lwp *l)
    713  1.4.4.3  yamt {
    714  1.4.4.3  yamt 
    715  1.4.4.3  yamt }
    716  1.4.4.3  yamt 
    717  1.4.4.2  yamt void
    718  1.4.4.4  yamt sched_lwp_fork(struct lwp *l1, struct lwp *l2)
    719  1.4.4.2  yamt {
    720  1.4.4.2  yamt 
    721  1.4.4.4  yamt 	l2->l_estcpu = l1->l_estcpu;
    722  1.4.4.2  yamt }
    723  1.4.4.2  yamt 
    724  1.4.4.2  yamt void
    725  1.4.4.2  yamt sched_lwp_exit(struct lwp *l)
    726  1.4.4.2  yamt {
    727  1.4.4.2  yamt 
    728  1.4.4.2  yamt }
    729  1.4.4.2  yamt 
    730  1.4.4.4  yamt void
    731  1.4.4.4  yamt sched_lwp_collect(struct lwp *t)
    732  1.4.4.4  yamt {
    733  1.4.4.4  yamt 	lwp_t *l;
    734  1.4.4.4  yamt 
    735  1.4.4.4  yamt 	/* Absorb estcpu value of collected LWP. */
    736  1.4.4.4  yamt 	l = curlwp;
    737  1.4.4.4  yamt 	lwp_lock(l);
    738  1.4.4.4  yamt 	l->l_estcpu += t->l_estcpu;
    739  1.4.4.4  yamt 	lwp_unlock(l);
    740  1.4.4.4  yamt }
    741  1.4.4.4  yamt 
    742  1.4.4.3  yamt /*
    743  1.4.4.3  yamt  * sysctl setup.  XXX This should be split with kern_synch.c.
    744  1.4.4.3  yamt  */
    745  1.4.4.2  yamt SYSCTL_SETUP(sysctl_sched_setup, "sysctl kern.sched subtree setup")
    746  1.4.4.2  yamt {
    747  1.4.4.2  yamt 	const struct sysctlnode *node = NULL;
    748  1.4.4.2  yamt 
    749  1.4.4.2  yamt 	sysctl_createv(clog, 0, NULL, NULL,
    750  1.4.4.2  yamt 		CTLFLAG_PERMANENT,
    751  1.4.4.2  yamt 		CTLTYPE_NODE, "kern", NULL,
    752  1.4.4.2  yamt 		NULL, 0, NULL, 0,
    753  1.4.4.2  yamt 		CTL_KERN, CTL_EOL);
    754  1.4.4.2  yamt 	sysctl_createv(clog, 0, NULL, &node,
    755  1.4.4.2  yamt 		CTLFLAG_PERMANENT,
    756  1.4.4.2  yamt 		CTLTYPE_NODE, "sched",
    757  1.4.4.2  yamt 		SYSCTL_DESCR("Scheduler options"),
    758  1.4.4.2  yamt 		NULL, 0, NULL, 0,
    759  1.4.4.2  yamt 		CTL_KERN, CTL_CREATE, CTL_EOL);
    760  1.4.4.2  yamt 
    761  1.4.4.3  yamt 	KASSERT(node != NULL);
    762  1.4.4.3  yamt 
    763  1.4.4.3  yamt 	sysctl_createv(clog, 0, &node, NULL,
    764  1.4.4.3  yamt 		CTLFLAG_PERMANENT,
    765  1.4.4.3  yamt 		CTLTYPE_STRING, "name", NULL,
    766  1.4.4.3  yamt 		NULL, 0, __UNCONST("4.4BSD"), 0,
    767  1.4.4.3  yamt 		CTL_CREATE, CTL_EOL);
    768  1.4.4.3  yamt 	sysctl_createv(clog, 0, &node, NULL,
    769  1.4.4.3  yamt 		CTLFLAG_READWRITE,
    770  1.4.4.3  yamt 		CTLTYPE_INT, "timesoftints",
    771  1.4.4.3  yamt 		SYSCTL_DESCR("Track CPU time for soft interrupts"),
    772  1.4.4.3  yamt 		NULL, 0, &softint_timing, 0,
    773  1.4.4.3  yamt 		CTL_CREATE, CTL_EOL);
    774  1.4.4.2  yamt }
    775  1.4.4.2  yamt 
    776  1.4.4.2  yamt #if defined(DDB)
    777  1.4.4.2  yamt void
    778  1.4.4.2  yamt sched_print_runqueue(void (*pr)(const char *, ...))
    779  1.4.4.2  yamt {
    780  1.4.4.2  yamt 
    781  1.4.4.2  yamt 	runqueue_print(&global_queue, pr);
    782  1.4.4.2  yamt }
    783  1.4.4.2  yamt #endif /* defined(DDB) */
    784