Home | History | Annotate | Line # | Download | only in kern
kern_synch.c revision 1.187
      1  1.187        ad /*	$NetBSD: kern_synch.c,v 1.187 2007/03/11 21:36:49 ad Exp $	*/
      2   1.63   thorpej 
      3   1.63   thorpej /*-
      4  1.174        ad  * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
      5   1.63   thorpej  * All rights reserved.
      6   1.63   thorpej  *
      7   1.63   thorpej  * This code is derived from software contributed to The NetBSD Foundation
      8   1.63   thorpej  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  1.174        ad  * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
     10   1.63   thorpej  *
     11   1.63   thorpej  * Redistribution and use in source and binary forms, with or without
     12   1.63   thorpej  * modification, are permitted provided that the following conditions
     13   1.63   thorpej  * are met:
     14   1.63   thorpej  * 1. Redistributions of source code must retain the above copyright
     15   1.63   thorpej  *    notice, this list of conditions and the following disclaimer.
     16   1.63   thorpej  * 2. Redistributions in binary form must reproduce the above copyright
     17   1.63   thorpej  *    notice, this list of conditions and the following disclaimer in the
     18   1.63   thorpej  *    documentation and/or other materials provided with the distribution.
     19   1.63   thorpej  * 3. All advertising materials mentioning features or use of this software
     20   1.63   thorpej  *    must display the following acknowledgement:
     21   1.63   thorpej  *	This product includes software developed by the NetBSD
     22   1.63   thorpej  *	Foundation, Inc. and its contributors.
     23   1.63   thorpej  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24   1.63   thorpej  *    contributors may be used to endorse or promote products derived
     25   1.63   thorpej  *    from this software without specific prior written permission.
     26   1.63   thorpej  *
     27   1.63   thorpej  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28   1.63   thorpej  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29   1.63   thorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30   1.63   thorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31   1.63   thorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32   1.63   thorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33   1.63   thorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34   1.63   thorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35   1.63   thorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36   1.63   thorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37   1.63   thorpej  * POSSIBILITY OF SUCH DAMAGE.
     38   1.63   thorpej  */
     39   1.26       cgd 
     40   1.26       cgd /*-
     41   1.26       cgd  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     42   1.26       cgd  *	The Regents of the University of California.  All rights reserved.
     43   1.26       cgd  * (c) UNIX System Laboratories, Inc.
     44   1.26       cgd  * All or some portions of this file are derived from material licensed
     45   1.26       cgd  * to the University of California by American Telephone and Telegraph
     46   1.26       cgd  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     47   1.26       cgd  * the permission of UNIX System Laboratories, Inc.
     48   1.26       cgd  *
     49   1.26       cgd  * Redistribution and use in source and binary forms, with or without
     50   1.26       cgd  * modification, are permitted provided that the following conditions
     51   1.26       cgd  * are met:
     52   1.26       cgd  * 1. Redistributions of source code must retain the above copyright
     53   1.26       cgd  *    notice, this list of conditions and the following disclaimer.
     54   1.26       cgd  * 2. Redistributions in binary form must reproduce the above copyright
     55   1.26       cgd  *    notice, this list of conditions and the following disclaimer in the
     56   1.26       cgd  *    documentation and/or other materials provided with the distribution.
     57  1.136       agc  * 3. Neither the name of the University nor the names of its contributors
     58   1.26       cgd  *    may be used to endorse or promote products derived from this software
     59   1.26       cgd  *    without specific prior written permission.
     60   1.26       cgd  *
     61   1.26       cgd  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     62   1.26       cgd  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     63   1.26       cgd  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     64   1.26       cgd  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     65   1.26       cgd  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     66   1.26       cgd  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     67   1.26       cgd  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     68   1.26       cgd  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     69   1.26       cgd  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     70   1.26       cgd  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     71   1.26       cgd  * SUCH DAMAGE.
     72   1.26       cgd  *
     73   1.50      fvdl  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
     74   1.26       cgd  */
     75  1.106     lukem 
     76  1.106     lukem #include <sys/cdefs.h>
     77  1.187        ad __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.187 2007/03/11 21:36:49 ad Exp $");
     78   1.48       mrg 
     79   1.52  jonathan #include "opt_ddb.h"
     80  1.109      yamt #include "opt_kstack.h"
     81   1.82   thorpej #include "opt_lockdebug.h"
     82   1.83   thorpej #include "opt_multiprocessor.h"
     83  1.110    briggs #include "opt_perfctrs.h"
     84   1.26       cgd 
     85  1.174        ad #define	__MUTEX_PRIVATE
     86  1.174        ad 
     87   1.26       cgd #include <sys/param.h>
     88   1.26       cgd #include <sys/systm.h>
     89   1.68   thorpej #include <sys/callout.h>
     90   1.26       cgd #include <sys/proc.h>
     91   1.26       cgd #include <sys/kernel.h>
     92   1.26       cgd #include <sys/buf.h>
     93  1.111    briggs #if defined(PERFCTRS)
     94  1.110    briggs #include <sys/pmc.h>
     95  1.111    briggs #endif
     96   1.26       cgd #include <sys/signalvar.h>
     97   1.26       cgd #include <sys/resourcevar.h>
     98   1.55      ross #include <sys/sched.h>
     99  1.179       dsl #include <sys/syscall_stats.h>
    100  1.161      elad #include <sys/kauth.h>
    101  1.174        ad #include <sys/sleepq.h>
    102  1.174        ad #include <sys/lockdebug.h>
    103   1.47       mrg 
    104   1.47       mrg #include <uvm/uvm_extern.h>
    105   1.47       mrg 
    106   1.26       cgd #include <machine/cpu.h>
    107   1.34  christos 
    108   1.26       cgd int	lbolt;			/* once a second sleep address */
    109   1.88  sommerfe int	rrticks;		/* number of hardclock ticks per roundrobin() */
    110   1.26       cgd 
    111  1.152      yamt /*
    112   1.73   thorpej  * The global scheduler state.
    113   1.73   thorpej  */
    114  1.174        ad kmutex_t	sched_mutex;		/* global sched state mutex */
    115  1.174        ad struct prochd	sched_qs[RUNQUE_NQS];	/* run queues */
    116  1.159     perry volatile uint32_t sched_whichqs;	/* bitmap of non-empty queues */
    117   1.83   thorpej 
    118  1.174        ad void	schedcpu(void *);
    119  1.174        ad void	updatepri(struct lwp *);
    120   1.34  christos 
    121  1.174        ad void	sched_unsleep(struct lwp *);
    122  1.185      yamt void	sched_changepri(struct lwp *, pri_t);
    123  1.185      yamt void	sched_lendpri(struct lwp *, pri_t);
    124   1.63   thorpej 
    125  1.143      yamt struct callout schedcpu_ch = CALLOUT_INITIALIZER_SETFUNC(schedcpu, NULL);
    126  1.157      yamt static unsigned int schedcpu_ticks;
    127  1.122   thorpej 
    128  1.174        ad syncobj_t sleep_syncobj = {
    129  1.174        ad 	SOBJ_SLEEPQ_SORTED,
    130  1.174        ad 	sleepq_unsleep,
    131  1.184      yamt 	sleepq_changepri,
    132  1.184      yamt 	sleepq_lendpri,
    133  1.184      yamt 	syncobj_noowner,
    134  1.174        ad };
    135  1.174        ad 
    136  1.174        ad syncobj_t sched_syncobj = {
    137  1.174        ad 	SOBJ_SLEEPQ_SORTED,
    138  1.174        ad 	sched_unsleep,
    139  1.184      yamt 	sched_changepri,
    140  1.184      yamt 	sched_lendpri,
    141  1.184      yamt 	syncobj_noowner,
    142  1.174        ad };
    143  1.122   thorpej 
    144   1.26       cgd /*
    145   1.26       cgd  * Force switch among equal priority processes every 100ms.
    146   1.88  sommerfe  * Called from hardclock every hz/10 == rrticks hardclock ticks.
    147   1.26       cgd  */
    148   1.26       cgd /* ARGSUSED */
    149   1.26       cgd void
    150   1.89  sommerfe roundrobin(struct cpu_info *ci)
    151   1.26       cgd {
    152   1.89  sommerfe 	struct schedstate_percpu *spc = &ci->ci_schedstate;
    153   1.26       cgd 
    154   1.88  sommerfe 	spc->spc_rrticks = rrticks;
    155  1.130   nathanw 
    156  1.122   thorpej 	if (curlwp != NULL) {
    157   1.73   thorpej 		if (spc->spc_flags & SPCF_SEENRR) {
    158   1.69   thorpej 			/*
    159   1.69   thorpej 			 * The process has already been through a roundrobin
    160   1.69   thorpej 			 * without switching and may be hogging the CPU.
    161   1.69   thorpej 			 * Indicate that the process should yield.
    162   1.69   thorpej 			 */
    163   1.73   thorpej 			spc->spc_flags |= SPCF_SHOULDYIELD;
    164   1.69   thorpej 		} else
    165   1.73   thorpej 			spc->spc_flags |= SPCF_SEENRR;
    166   1.69   thorpej 	}
    167  1.174        ad 	cpu_need_resched(curcpu());
    168   1.26       cgd }
    169   1.26       cgd 
    170  1.153      yamt #define	PPQ	(128 / RUNQUE_NQS)	/* priorities per queue */
    171  1.153      yamt #define	NICE_WEIGHT 2			/* priorities per nice level */
    172  1.153      yamt 
    173  1.153      yamt #define	ESTCPU_SHIFT	11
    174  1.153      yamt #define	ESTCPU_MAX	((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
    175  1.153      yamt #define	ESTCPULIM(e)	min((e), ESTCPU_MAX)
    176  1.153      yamt 
    177   1.26       cgd /*
    178   1.26       cgd  * Constants for digital decay and forget:
    179   1.26       cgd  *	90% of (p_estcpu) usage in 5 * loadav time
    180   1.26       cgd  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
    181   1.26       cgd  *          Note that, as ps(1) mentions, this can let percentages
    182   1.26       cgd  *          total over 100% (I've seen 137.9% for 3 processes).
    183   1.26       cgd  *
    184   1.26       cgd  * Note that hardclock updates p_estcpu and p_cpticks independently.
    185   1.26       cgd  *
    186   1.26       cgd  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
    187   1.26       cgd  * That is, the system wants to compute a value of decay such
    188   1.26       cgd  * that the following for loop:
    189   1.26       cgd  * 	for (i = 0; i < (5 * loadavg); i++)
    190   1.26       cgd  * 		p_estcpu *= decay;
    191   1.26       cgd  * will compute
    192   1.26       cgd  * 	p_estcpu *= 0.1;
    193   1.26       cgd  * for all values of loadavg:
    194   1.26       cgd  *
    195   1.26       cgd  * Mathematically this loop can be expressed by saying:
    196   1.26       cgd  * 	decay ** (5 * loadavg) ~= .1
    197   1.26       cgd  *
    198   1.26       cgd  * The system computes decay as:
    199   1.26       cgd  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
    200   1.26       cgd  *
    201   1.26       cgd  * We wish to prove that the system's computation of decay
    202   1.26       cgd  * will always fulfill the equation:
    203   1.26       cgd  * 	decay ** (5 * loadavg) ~= .1
    204   1.26       cgd  *
    205   1.26       cgd  * If we compute b as:
    206   1.26       cgd  * 	b = 2 * loadavg
    207   1.26       cgd  * then
    208   1.26       cgd  * 	decay = b / (b + 1)
    209   1.26       cgd  *
    210   1.26       cgd  * We now need to prove two things:
    211   1.26       cgd  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
    212   1.26       cgd  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
    213  1.130   nathanw  *
    214   1.26       cgd  * Facts:
    215   1.26       cgd  *         For x close to zero, exp(x) =~ 1 + x, since
    216   1.26       cgd  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
    217   1.26       cgd  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
    218   1.26       cgd  *         For x close to zero, ln(1+x) =~ x, since
    219   1.26       cgd  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
    220   1.26       cgd  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
    221   1.26       cgd  *         ln(.1) =~ -2.30
    222   1.26       cgd  *
    223   1.26       cgd  * Proof of (1):
    224   1.26       cgd  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
    225   1.26       cgd  *	solving for factor,
    226   1.26       cgd  *      ln(factor) =~ (-2.30/5*loadav), or
    227   1.26       cgd  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
    228   1.26       cgd  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
    229   1.26       cgd  *
    230   1.26       cgd  * Proof of (2):
    231   1.26       cgd  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
    232   1.26       cgd  *	solving for power,
    233   1.26       cgd  *      power*ln(b/(b+1)) =~ -2.30, or
    234   1.26       cgd  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
    235   1.26       cgd  *
    236   1.26       cgd  * Actual power values for the implemented algorithm are as follows:
    237   1.26       cgd  *      loadav: 1       2       3       4
    238   1.26       cgd  *      power:  5.68    10.32   14.94   19.55
    239   1.26       cgd  */
    240   1.26       cgd 
    241   1.26       cgd /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
    242   1.26       cgd #define	loadfactor(loadav)	(2 * (loadav))
    243  1.153      yamt 
    244  1.153      yamt static fixpt_t
    245  1.153      yamt decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
    246  1.153      yamt {
    247  1.153      yamt 
    248  1.153      yamt 	if (estcpu == 0) {
    249  1.153      yamt 		return 0;
    250  1.153      yamt 	}
    251  1.153      yamt 
    252  1.153      yamt #if !defined(_LP64)
    253  1.153      yamt 	/* avoid 64bit arithmetics. */
    254  1.153      yamt #define	FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
    255  1.153      yamt 	if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
    256  1.153      yamt 		return estcpu * loadfac / (loadfac + FSCALE);
    257  1.153      yamt 	}
    258  1.153      yamt #endif /* !defined(_LP64) */
    259  1.153      yamt 
    260  1.153      yamt 	return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
    261  1.153      yamt }
    262   1.26       cgd 
    263  1.157      yamt /*
    264  1.157      yamt  * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
    265  1.157      yamt  * sleeping for at least seven times the loadfactor will decay p_estcpu to
    266  1.157      yamt  * less than (1 << ESTCPU_SHIFT).
    267  1.157      yamt  *
    268  1.157      yamt  * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
    269  1.157      yamt  */
    270  1.157      yamt static fixpt_t
    271  1.157      yamt decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
    272  1.157      yamt {
    273  1.157      yamt 
    274  1.157      yamt 	if ((n << FSHIFT) >= 7 * loadfac) {
    275  1.157      yamt 		return 0;
    276  1.157      yamt 	}
    277  1.157      yamt 
    278  1.157      yamt 	while (estcpu != 0 && n > 1) {
    279  1.157      yamt 		estcpu = decay_cpu(loadfac, estcpu);
    280  1.157      yamt 		n--;
    281  1.157      yamt 	}
    282  1.157      yamt 
    283  1.157      yamt 	return estcpu;
    284  1.157      yamt }
    285  1.157      yamt 
    286   1.26       cgd /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
    287   1.26       cgd fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
    288   1.26       cgd 
    289   1.26       cgd /*
    290   1.26       cgd  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
    291   1.26       cgd  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
    292   1.26       cgd  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
    293   1.26       cgd  *
    294   1.26       cgd  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
    295   1.26       cgd  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
    296   1.26       cgd  *
    297   1.26       cgd  * If you dont want to bother with the faster/more-accurate formula, you
    298   1.26       cgd  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
    299   1.26       cgd  * (more general) method of calculating the %age of CPU used by a process.
    300   1.26       cgd  */
    301   1.26       cgd #define	CCPU_SHIFT	11
    302   1.26       cgd 
    303   1.26       cgd /*
    304  1.174        ad  * schedcpu:
    305  1.174        ad  *
    306  1.174        ad  *	Recompute process priorities, every hz ticks.
    307  1.174        ad  *
    308  1.174        ad  *	XXXSMP This needs to be reorganised in order to reduce the locking
    309  1.174        ad  *	burden.
    310   1.26       cgd  */
    311   1.26       cgd /* ARGSUSED */
    312   1.26       cgd void
    313  1.171      yamt schedcpu(void *arg)
    314   1.26       cgd {
    315   1.71  augustss 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
    316  1.174        ad 	struct rlimit *rlim;
    317  1.122   thorpej 	struct lwp *l;
    318   1.71  augustss 	struct proc *p;
    319  1.174        ad 	int minslp, clkhz, sig;
    320  1.174        ad 	long runtm;
    321   1.26       cgd 
    322  1.157      yamt 	schedcpu_ticks++;
    323  1.157      yamt 
    324  1.174        ad 	mutex_enter(&proclist_mutex);
    325  1.145      yamt 	PROCLIST_FOREACH(p, &allproc) {
    326   1.26       cgd 		/*
    327  1.174        ad 		 * Increment time in/out of memory and sleep time (if
    328  1.174        ad 		 * sleeping).  We ignore overflow; with 16-bit int's
    329   1.26       cgd 		 * (remember them?) overflow takes 45 days.
    330   1.26       cgd 		 */
    331  1.122   thorpej 		minslp = 2;
    332  1.174        ad 		mutex_enter(&p->p_smutex);
    333  1.174        ad 		runtm = p->p_rtime.tv_sec;
    334  1.122   thorpej 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    335  1.174        ad 			lwp_lock(l);
    336  1.174        ad 			runtm += l->l_rtime.tv_sec;
    337  1.122   thorpej 			l->l_swtime++;
    338  1.130   nathanw 			if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
    339  1.122   thorpej 			    l->l_stat == LSSUSPENDED) {
    340  1.122   thorpej 				l->l_slptime++;
    341  1.122   thorpej 				minslp = min(minslp, l->l_slptime);
    342  1.122   thorpej 			} else
    343  1.122   thorpej 				minslp = 0;
    344  1.174        ad 			lwp_unlock(l);
    345  1.122   thorpej 		}
    346   1.26       cgd 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
    347  1.174        ad 
    348  1.174        ad 		/*
    349  1.174        ad 		 * Check if the process exceeds its CPU resource allocation.
    350  1.174        ad 		 * If over max, kill it.
    351  1.174        ad 		 */
    352  1.174        ad 		rlim = &p->p_rlimit[RLIMIT_CPU];
    353  1.174        ad 		sig = 0;
    354  1.174        ad 		if (runtm >= rlim->rlim_cur) {
    355  1.174        ad 			if (runtm >= rlim->rlim_max)
    356  1.174        ad 				sig = SIGKILL;
    357  1.174        ad 			else {
    358  1.174        ad 				sig = SIGXCPU;
    359  1.174        ad 				if (rlim->rlim_cur < rlim->rlim_max)
    360  1.174        ad 					rlim->rlim_cur += 5;
    361  1.174        ad 			}
    362  1.174        ad 		}
    363  1.174        ad 
    364  1.174        ad 		/*
    365  1.174        ad 		 * If the process has run for more than autonicetime, reduce
    366  1.174        ad 		 * priority to give others a chance.
    367  1.174        ad 		 */
    368  1.174        ad 		if (autonicetime && runtm > autonicetime && p->p_nice == NZERO
    369  1.174        ad 		    && kauth_cred_geteuid(p->p_cred)) {
    370  1.174        ad 			mutex_spin_enter(&p->p_stmutex);
    371  1.174        ad 			p->p_nice = autoniceval + NZERO;
    372  1.174        ad 			resetprocpriority(p);
    373  1.174        ad 			mutex_spin_exit(&p->p_stmutex);
    374  1.174        ad 		}
    375  1.174        ad 
    376   1.26       cgd 		/*
    377   1.26       cgd 		 * If the process has slept the entire second,
    378   1.26       cgd 		 * stop recalculating its priority until it wakes up.
    379   1.26       cgd 		 */
    380  1.174        ad 		if (minslp <= 1) {
    381  1.174        ad 			/*
    382  1.174        ad 			 * p_pctcpu is only for ps.
    383  1.174        ad 			 */
    384  1.174        ad 			mutex_spin_enter(&p->p_stmutex);
    385  1.174        ad 			clkhz = stathz != 0 ? stathz : hz;
    386   1.26       cgd #if	(FSHIFT >= CCPU_SHIFT)
    387  1.174        ad 			p->p_pctcpu += (clkhz == 100)?
    388  1.174        ad 			    ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
    389  1.174        ad 			    100 * (((fixpt_t) p->p_cpticks)
    390  1.174        ad 			    << (FSHIFT - CCPU_SHIFT)) / clkhz;
    391   1.26       cgd #else
    392  1.174        ad 			p->p_pctcpu += ((FSCALE - ccpu) *
    393  1.174        ad 			    (p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
    394   1.26       cgd #endif
    395  1.174        ad 			p->p_cpticks = 0;
    396  1.174        ad 			p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
    397  1.174        ad 
    398  1.174        ad 			LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    399  1.174        ad 				lwp_lock(l);
    400  1.174        ad 				if (l->l_slptime <= 1 &&
    401  1.174        ad 				    l->l_priority >= PUSER)
    402  1.174        ad 					resetpriority(l);
    403  1.174        ad 				lwp_unlock(l);
    404  1.122   thorpej 			}
    405  1.174        ad 			mutex_spin_exit(&p->p_stmutex);
    406  1.174        ad 		}
    407  1.174        ad 
    408  1.174        ad 		mutex_exit(&p->p_smutex);
    409  1.174        ad 		if (sig) {
    410  1.174        ad 			psignal(p, sig);
    411   1.26       cgd 		}
    412   1.26       cgd 	}
    413  1.174        ad 	mutex_exit(&proclist_mutex);
    414   1.47       mrg 	uvm_meter();
    415  1.186  christos 	wakeup((void *)&lbolt);
    416  1.143      yamt 	callout_schedule(&schedcpu_ch, hz);
    417   1.26       cgd }
    418   1.26       cgd 
    419   1.26       cgd /*
    420   1.26       cgd  * Recalculate the priority of a process after it has slept for a while.
    421   1.26       cgd  */
    422   1.26       cgd void
    423  1.122   thorpej updatepri(struct lwp *l)
    424   1.26       cgd {
    425  1.122   thorpej 	struct proc *p = l->l_proc;
    426   1.83   thorpej 	fixpt_t loadfac;
    427   1.83   thorpej 
    428  1.174        ad 	LOCK_ASSERT(lwp_locked(l, NULL));
    429  1.157      yamt 	KASSERT(l->l_slptime > 1);
    430   1.83   thorpej 
    431   1.83   thorpej 	loadfac = loadfactor(averunnable.ldavg[0]);
    432   1.26       cgd 
    433  1.157      yamt 	l->l_slptime--; /* the first time was done in schedcpu */
    434  1.157      yamt 	/* XXX NJWLWP */
    435  1.174        ad 	/* XXXSMP occasionally unlocked, should be per-LWP */
    436  1.157      yamt 	p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
    437  1.122   thorpej 	resetpriority(l);
    438   1.26       cgd }
    439   1.26       cgd 
    440   1.26       cgd /*
    441  1.174        ad  * During autoconfiguration or after a panic, a sleep will simply lower the
    442  1.174        ad  * priority briefly to allow interrupts, then return.  The priority to be
    443  1.174        ad  * used (safepri) is machine-dependent, thus this value is initialized and
    444  1.174        ad  * maintained in the machine-dependent layers.  This priority will typically
    445  1.174        ad  * be 0, or the lowest priority that is safe for use on the interrupt stack;
    446  1.174        ad  * it can be made higher to block network software interrupts after panics.
    447   1.26       cgd  */
    448  1.174        ad int	safepri;
    449   1.26       cgd 
    450   1.26       cgd /*
    451  1.174        ad  * OBSOLETE INTERFACE
    452  1.174        ad  *
    453   1.26       cgd  * General sleep call.  Suspends the current process until a wakeup is
    454   1.26       cgd  * performed on the specified identifier.  The process will then be made
    455  1.174        ad  * runnable with the specified priority.  Sleeps at most timo/hz seconds (0
    456  1.174        ad  * means no timeout).  If pri includes PCATCH flag, signals are checked
    457   1.26       cgd  * before and after sleeping, else signals are not checked.  Returns 0 if
    458   1.26       cgd  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
    459   1.26       cgd  * signal needs to be delivered, ERESTART is returned if the current system
    460   1.26       cgd  * call should be restarted if possible, and EINTR is returned if the system
    461   1.26       cgd  * call should be interrupted by the signal (return EINTR).
    462   1.77   thorpej  *
    463  1.174        ad  * The interlock is held until we are on a sleep queue. The interlock will
    464  1.174        ad  * be locked before returning back to the caller unless the PNORELOCK flag
    465  1.174        ad  * is specified, in which case the interlock will always be unlocked upon
    466  1.174        ad  * return.
    467   1.26       cgd  */
    468   1.26       cgd int
    469  1.185      yamt ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
    470  1.174        ad 	volatile struct simplelock *interlock)
    471   1.26       cgd {
    472  1.122   thorpej 	struct lwp *l = curlwp;
    473  1.174        ad 	sleepq_t *sq;
    474  1.174        ad 	int error, catch;
    475   1.26       cgd 
    476  1.174        ad 	if (sleepq_dontsleep(l)) {
    477  1.174        ad 		(void)sleepq_abort(NULL, 0);
    478  1.174        ad 		if ((priority & PNORELOCK) != 0)
    479   1.77   thorpej 			simple_unlock(interlock);
    480  1.174        ad 		return 0;
    481   1.26       cgd 	}
    482   1.78  sommerfe 
    483  1.174        ad 	sq = sleeptab_lookup(&sleeptab, ident);
    484  1.174        ad 	sleepq_enter(sq, l);
    485   1.42       cgd 
    486  1.174        ad 	if (interlock != NULL) {
    487  1.174        ad 		LOCK_ASSERT(simple_lock_held(interlock));
    488  1.174        ad 		simple_unlock(interlock);
    489  1.150       chs 	}
    490  1.150       chs 
    491  1.174        ad 	catch = priority & PCATCH;
    492  1.174        ad 	sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
    493  1.174        ad 	    &sleep_syncobj);
    494  1.174        ad 	error = sleepq_unblock(timo, catch);
    495  1.126        pk 
    496  1.174        ad 	if (interlock != NULL && (priority & PNORELOCK) == 0)
    497  1.126        pk 		simple_lock(interlock);
    498  1.174        ad 
    499  1.174        ad 	return error;
    500   1.26       cgd }
    501   1.26       cgd 
    502  1.187        ad int
    503  1.187        ad mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
    504  1.187        ad 	kmutex_t *mtx)
    505  1.187        ad {
    506  1.187        ad 	struct lwp *l = curlwp;
    507  1.187        ad 	sleepq_t *sq;
    508  1.187        ad 	int error, catch;
    509  1.187        ad 
    510  1.187        ad 	if (sleepq_dontsleep(l)) {
    511  1.187        ad 		(void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
    512  1.187        ad 		return 0;
    513  1.187        ad 	}
    514  1.187        ad 
    515  1.187        ad 	sq = sleeptab_lookup(&sleeptab, ident);
    516  1.187        ad 	sleepq_enter(sq, l);
    517  1.187        ad 	mutex_exit(mtx);
    518  1.187        ad 
    519  1.187        ad 	catch = priority & PCATCH;
    520  1.187        ad 	sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
    521  1.187        ad 	    &sleep_syncobj);
    522  1.187        ad 	error = sleepq_unblock(timo, catch);
    523  1.187        ad 
    524  1.187        ad 	if ((priority & PNORELOCK) == 0)
    525  1.187        ad 		mutex_enter(mtx);
    526  1.187        ad 
    527  1.187        ad 	return error;
    528  1.187        ad }
    529  1.187        ad 
    530   1.26       cgd /*
    531  1.174        ad  * General sleep call for situations where a wake-up is not expected.
    532   1.26       cgd  */
    533  1.174        ad int
    534  1.182   thorpej kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
    535   1.26       cgd {
    536  1.174        ad 	struct lwp *l = curlwp;
    537  1.174        ad 	sleepq_t *sq;
    538  1.174        ad 	int error;
    539   1.26       cgd 
    540  1.174        ad 	if (sleepq_dontsleep(l))
    541  1.174        ad 		return sleepq_abort(NULL, 0);
    542   1.26       cgd 
    543  1.174        ad 	if (mtx != NULL)
    544  1.174        ad 		mutex_exit(mtx);
    545  1.174        ad 	sq = sleeptab_lookup(&sleeptab, l);
    546  1.174        ad 	sleepq_enter(sq, l);
    547  1.174        ad 	sleepq_block(sq, sched_kpri(l), l, wmesg, timo, intr, &sleep_syncobj);
    548  1.174        ad 	error = sleepq_unblock(timo, intr);
    549  1.174        ad 	if (mtx != NULL)
    550  1.174        ad 		mutex_enter(mtx);
    551   1.83   thorpej 
    552  1.174        ad 	return error;
    553  1.139        cl }
    554  1.139        cl 
    555   1.26       cgd /*
    556  1.174        ad  * OBSOLETE INTERFACE
    557  1.174        ad  *
    558   1.26       cgd  * Make all processes sleeping on the specified identifier runnable.
    559   1.26       cgd  */
    560   1.26       cgd void
    561  1.174        ad wakeup(wchan_t ident)
    562   1.26       cgd {
    563  1.174        ad 	sleepq_t *sq;
    564   1.83   thorpej 
    565  1.174        ad 	if (cold)
    566  1.174        ad 		return;
    567   1.83   thorpej 
    568  1.174        ad 	sq = sleeptab_lookup(&sleeptab, ident);
    569  1.174        ad 	sleepq_wake(sq, ident, (u_int)-1);
    570   1.63   thorpej }
    571   1.63   thorpej 
    572   1.63   thorpej /*
    573  1.174        ad  * OBSOLETE INTERFACE
    574  1.174        ad  *
    575   1.63   thorpej  * Make the highest priority process first in line on the specified
    576   1.63   thorpej  * identifier runnable.
    577   1.63   thorpej  */
    578  1.174        ad void
    579  1.174        ad wakeup_one(wchan_t ident)
    580   1.63   thorpej {
    581  1.174        ad 	sleepq_t *sq;
    582   1.63   thorpej 
    583  1.174        ad 	if (cold)
    584  1.174        ad 		return;
    585  1.174        ad 
    586  1.174        ad 	sq = sleeptab_lookup(&sleeptab, ident);
    587  1.174        ad 	sleepq_wake(sq, ident, 1);
    588  1.174        ad }
    589   1.63   thorpej 
    590  1.117  gmcgarry 
    591  1.117  gmcgarry /*
    592  1.117  gmcgarry  * General yield call.  Puts the current process back on its run queue and
    593  1.117  gmcgarry  * performs a voluntary context switch.  Should only be called when the
    594  1.117  gmcgarry  * current process explicitly requests it (eg sched_yield(2) in compat code).
    595  1.117  gmcgarry  */
    596  1.117  gmcgarry void
    597  1.117  gmcgarry yield(void)
    598  1.117  gmcgarry {
    599  1.122   thorpej 	struct lwp *l = curlwp;
    600  1.117  gmcgarry 
    601  1.174        ad 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
    602  1.174        ad 	lwp_lock(l);
    603  1.174        ad 	if (l->l_stat == LSONPROC) {
    604  1.174        ad 		KASSERT(lwp_locked(l, &sched_mutex));
    605  1.174        ad 		l->l_priority = l->l_usrpri;
    606  1.174        ad 	}
    607  1.174        ad 	l->l_nvcsw++;
    608  1.122   thorpej 	mi_switch(l, NULL);
    609  1.174        ad 	KERNEL_LOCK(l->l_biglocks, l);
    610   1.69   thorpej }
    611   1.69   thorpej 
    612   1.69   thorpej /*
    613   1.69   thorpej  * General preemption call.  Puts the current process back on its run queue
    614  1.156    rpaulo  * and performs an involuntary context switch.
    615   1.69   thorpej  */
    616   1.69   thorpej void
    617  1.174        ad preempt(void)
    618   1.69   thorpej {
    619  1.122   thorpej 	struct lwp *l = curlwp;
    620   1.69   thorpej 
    621  1.174        ad 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
    622  1.174        ad 	lwp_lock(l);
    623  1.174        ad 	if (l->l_stat == LSONPROC) {
    624  1.174        ad 		KASSERT(lwp_locked(l, &sched_mutex));
    625  1.174        ad 		l->l_priority = l->l_usrpri;
    626  1.174        ad 	}
    627  1.174        ad 	l->l_nivcsw++;
    628  1.174        ad 	(void)mi_switch(l, NULL);
    629  1.174        ad 	KERNEL_LOCK(l->l_biglocks, l);
    630   1.69   thorpej }
    631   1.69   thorpej 
    632   1.69   thorpej /*
    633  1.174        ad  * The machine independent parts of context switch.  Switch to "new"
    634  1.174        ad  * if non-NULL, otherwise let cpu_switch choose the next lwp.
    635  1.130   nathanw  *
    636  1.122   thorpej  * Returns 1 if another process was actually run.
    637   1.26       cgd  */
    638  1.122   thorpej int
    639  1.122   thorpej mi_switch(struct lwp *l, struct lwp *newl)
    640   1.26       cgd {
    641   1.76   thorpej 	struct schedstate_percpu *spc;
    642  1.174        ad 	struct timeval tv;
    643  1.174        ad 	int retval, oldspl;
    644   1.71  augustss 	long s, u;
    645   1.26       cgd 
    646  1.174        ad 	LOCK_ASSERT(lwp_locked(l, NULL));
    647  1.174        ad 
    648  1.174        ad #ifdef LOCKDEBUG
    649  1.174        ad 	spinlock_switchcheck();
    650  1.174        ad 	simple_lock_switchcheck();
    651  1.174        ad #endif
    652  1.174        ad #ifdef KSTACK_CHECK_MAGIC
    653  1.174        ad 	kstack_check_magic(l);
    654  1.174        ad #endif
    655   1.83   thorpej 
    656   1.90  sommerfe 	/*
    657  1.174        ad 	 * It's safe to read the per CPU schedstate unlocked here, as all we
    658  1.174        ad 	 * are after is the run time and that's guarenteed to have been last
    659  1.174        ad 	 * updated by this CPU.
    660   1.90  sommerfe 	 */
    661  1.122   thorpej 	KDASSERT(l->l_cpu == curcpu());
    662  1.122   thorpej 	spc = &l->l_cpu->ci_schedstate;
    663   1.76   thorpej 
    664   1.26       cgd 	/*
    665   1.26       cgd 	 * Compute the amount of time during which the current
    666  1.113  gmcgarry 	 * process was running.
    667   1.26       cgd 	 */
    668   1.26       cgd 	microtime(&tv);
    669  1.174        ad 	u = l->l_rtime.tv_usec +
    670  1.122   thorpej 	    (tv.tv_usec - spc->spc_runtime.tv_usec);
    671  1.174        ad 	s = l->l_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
    672  1.181       dsl 	if (u < 0) {
    673  1.181       dsl 		u += 1000000;
    674  1.181       dsl 		s--;
    675  1.181       dsl 	} else if (u >= 1000000) {
    676  1.181       dsl 		u -= 1000000;
    677  1.181       dsl 		s++;
    678   1.26       cgd 	}
    679  1.174        ad 	l->l_rtime.tv_usec = u;
    680  1.174        ad 	l->l_rtime.tv_sec = s;
    681   1.26       cgd 
    682  1.180       dsl 	/* Count time spent in current system call */
    683  1.180       dsl 	SYSCALL_TIME_SLEEP(l);
    684  1.180       dsl 
    685   1.26       cgd 	/*
    686  1.174        ad 	 * XXXSMP If we are using h/w performance counters, save context.
    687   1.69   thorpej 	 */
    688  1.174        ad #if PERFCTRS
    689  1.175  christos 	if (PMC_ENABLED(l->l_proc)) {
    690  1.175  christos 		pmc_save_context(l->l_proc);
    691  1.174        ad 	}
    692  1.109      yamt #endif
    693   1.26       cgd 
    694  1.113  gmcgarry 	/*
    695  1.174        ad 	 * Acquire the sched_mutex if necessary.  It will be released by
    696  1.174        ad 	 * cpu_switch once it has decided to idle, or picked another LWP
    697  1.174        ad 	 * to run.
    698  1.113  gmcgarry 	 */
    699  1.174        ad #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    700  1.174        ad 	if (l->l_mutex != &sched_mutex) {
    701  1.174        ad 		mutex_spin_enter(&sched_mutex);
    702  1.174        ad 		lwp_unlock(l);
    703  1.166  christos 	}
    704  1.110    briggs #endif
    705  1.113  gmcgarry 
    706  1.113  gmcgarry 	/*
    707  1.174        ad 	 * If on the CPU and we have gotten this far, then we must yield.
    708  1.113  gmcgarry 	 */
    709  1.174        ad 	KASSERT(l->l_stat != LSRUN);
    710  1.174        ad 	if (l->l_stat == LSONPROC) {
    711  1.174        ad 		KASSERT(lwp_locked(l, &sched_mutex));
    712  1.174        ad 		l->l_stat = LSRUN;
    713  1.174        ad 		setrunqueue(l);
    714  1.174        ad 	}
    715  1.114  gmcgarry 	uvmexp.swtch++;
    716  1.174        ad 
    717  1.174        ad 	/*
    718  1.174        ad 	 * Process is about to yield the CPU; clear the appropriate
    719  1.174        ad 	 * scheduling flags.
    720  1.174        ad 	 */
    721  1.174        ad 	spc->spc_flags &= ~SPCF_SWITCHCLEAR;
    722  1.174        ad 
    723  1.174        ad 	LOCKDEBUG_BARRIER(&sched_mutex, 1);
    724  1.174        ad 
    725  1.174        ad 	/*
    726  1.174        ad 	 * Switch to the new current LWP.  When we run again, we'll
    727  1.174        ad 	 * return back here.
    728  1.174        ad 	 */
    729  1.174        ad 	oldspl = MUTEX_SPIN_OLDSPL(l->l_cpu);
    730  1.174        ad 
    731  1.174        ad 	if (newl == NULL || newl->l_back == NULL)
    732  1.122   thorpej 		retval = cpu_switch(l, NULL);
    733  1.174        ad 	else {
    734  1.174        ad 		KASSERT(lwp_locked(newl, &sched_mutex));
    735  1.122   thorpej 		remrunqueue(newl);
    736  1.122   thorpej 		cpu_switchto(l, newl);
    737  1.122   thorpej 		retval = 0;
    738  1.122   thorpej 	}
    739  1.110    briggs 
    740  1.110    briggs 	/*
    741  1.174        ad 	 * XXXSMP If we are using h/w performance counters, restore context.
    742   1.26       cgd 	 */
    743  1.114  gmcgarry #if PERFCTRS
    744  1.175  christos 	if (PMC_ENABLED(l->l_proc)) {
    745  1.175  christos 		pmc_restore_context(l->l_proc);
    746  1.166  christos 	}
    747  1.114  gmcgarry #endif
    748  1.110    briggs 
    749  1.110    briggs 	/*
    750   1.76   thorpej 	 * We're running again; record our new start time.  We might
    751  1.174        ad 	 * be running on a new CPU now, so don't use the cached
    752   1.76   thorpej 	 * schedstate_percpu pointer.
    753   1.76   thorpej 	 */
    754  1.180       dsl 	SYSCALL_TIME_WAKEUP(l);
    755  1.122   thorpej 	KDASSERT(l->l_cpu == curcpu());
    756  1.122   thorpej 	microtime(&l->l_cpu->ci_schedstate.spc_runtime);
    757  1.174        ad 	splx(oldspl);
    758  1.169      yamt 
    759  1.122   thorpej 	return retval;
    760   1.26       cgd }
    761   1.26       cgd 
    762   1.26       cgd /*
    763   1.26       cgd  * Initialize the (doubly-linked) run queues
    764   1.26       cgd  * to be empty.
    765   1.26       cgd  */
    766   1.26       cgd void
    767   1.26       cgd rqinit()
    768   1.26       cgd {
    769   1.71  augustss 	int i;
    770   1.26       cgd 
    771   1.73   thorpej 	for (i = 0; i < RUNQUE_NQS; i++)
    772   1.73   thorpej 		sched_qs[i].ph_link = sched_qs[i].ph_rlink =
    773  1.122   thorpej 		    (struct lwp *)&sched_qs[i];
    774  1.174        ad 
    775  1.174        ad 	mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
    776   1.26       cgd }
    777   1.26       cgd 
    778  1.158     perry static inline void
    779  1.184      yamt resched_lwp(struct lwp *l)
    780  1.119   thorpej {
    781  1.119   thorpej 	struct cpu_info *ci;
    782  1.185      yamt 	const pri_t pri = lwp_eprio(l);
    783  1.119   thorpej 
    784  1.119   thorpej 	/*
    785  1.119   thorpej 	 * XXXSMP
    786  1.122   thorpej 	 * Since l->l_cpu persists across a context switch,
    787  1.119   thorpej 	 * this gives us *very weak* processor affinity, in
    788  1.119   thorpej 	 * that we notify the CPU on which the process last
    789  1.119   thorpej 	 * ran that it should try to switch.
    790  1.119   thorpej 	 *
    791  1.119   thorpej 	 * This does not guarantee that the process will run on
    792  1.119   thorpej 	 * that processor next, because another processor might
    793  1.119   thorpej 	 * grab it the next time it performs a context switch.
    794  1.119   thorpej 	 *
    795  1.119   thorpej 	 * This also does not handle the case where its last
    796  1.119   thorpej 	 * CPU is running a higher-priority process, but every
    797  1.119   thorpej 	 * other CPU is running a lower-priority process.  There
    798  1.119   thorpej 	 * are ways to handle this situation, but they're not
    799  1.119   thorpej 	 * currently very pretty, and we also need to weigh the
    800  1.119   thorpej 	 * cost of moving a process from one CPU to another.
    801  1.119   thorpej 	 *
    802  1.119   thorpej 	 * XXXSMP
    803  1.119   thorpej 	 * There is also the issue of locking the other CPU's
    804  1.119   thorpej 	 * sched state, which we currently do not do.
    805  1.119   thorpej 	 */
    806  1.122   thorpej 	ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
    807  1.121   thorpej 	if (pri < ci->ci_schedstate.spc_curpriority)
    808  1.174        ad 		cpu_need_resched(ci);
    809  1.119   thorpej }
    810  1.119   thorpej 
    811   1.26       cgd /*
    812  1.174        ad  * Change process state to be runnable, placing it on the run queue if it is
    813  1.174        ad  * in memory, and awakening the swapper if it isn't in memory.
    814  1.174        ad  *
    815  1.174        ad  * Call with the process and LWP locked.  Will return with the LWP unlocked.
    816   1.26       cgd  */
    817   1.26       cgd void
    818  1.122   thorpej setrunnable(struct lwp *l)
    819   1.26       cgd {
    820  1.122   thorpej 	struct proc *p = l->l_proc;
    821  1.174        ad 	sigset_t *ss;
    822   1.26       cgd 
    823  1.183        ad 	KASSERT(mutex_owned(&p->p_smutex));
    824  1.183        ad 	KASSERT(lwp_locked(l, NULL));
    825   1.83   thorpej 
    826  1.122   thorpej 	switch (l->l_stat) {
    827  1.122   thorpej 	case LSSTOP:
    828   1.33   mycroft 		/*
    829   1.33   mycroft 		 * If we're being traced (possibly because someone attached us
    830   1.33   mycroft 		 * while we were stopped), check for a signal from the debugger.
    831   1.33   mycroft 		 */
    832  1.174        ad 		if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
    833  1.174        ad 			if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
    834  1.174        ad 				ss = &l->l_sigpend.sp_set;
    835  1.174        ad 			else
    836  1.174        ad 				ss = &p->p_sigpend.sp_set;
    837  1.174        ad 			sigaddset(ss, p->p_xstat);
    838  1.174        ad 			signotify(l);
    839   1.53   mycroft 		}
    840  1.174        ad 		p->p_nrlwps++;
    841   1.26       cgd 		break;
    842  1.174        ad 	case LSSUSPENDED:
    843  1.178     pavel 		l->l_flag &= ~LW_WSUSPEND;
    844  1.174        ad 		p->p_nrlwps++;
    845  1.122   thorpej 		break;
    846  1.174        ad 	case LSSLEEP:
    847  1.174        ad 		KASSERT(l->l_wchan != NULL);
    848   1.26       cgd 		break;
    849  1.174        ad 	default:
    850  1.174        ad 		panic("setrunnable: lwp %p state was %d", l, l->l_stat);
    851   1.26       cgd 	}
    852  1.139        cl 
    853  1.174        ad 	/*
    854  1.174        ad 	 * If the LWP was sleeping interruptably, then it's OK to start it
    855  1.174        ad 	 * again.  If not, mark it as still sleeping.
    856  1.174        ad 	 */
    857  1.174        ad 	if (l->l_wchan != NULL) {
    858  1.174        ad 		l->l_stat = LSSLEEP;
    859  1.183        ad 		/* lwp_unsleep() will release the lock. */
    860  1.183        ad 		lwp_unsleep(l);
    861  1.174        ad 		return;
    862  1.174        ad 	}
    863  1.139        cl 
    864  1.174        ad 	LOCK_ASSERT(lwp_locked(l, &sched_mutex));
    865  1.122   thorpej 
    866  1.174        ad 	/*
    867  1.174        ad 	 * If the LWP is still on the CPU, mark it as LSONPROC.  It may be
    868  1.174        ad 	 * about to call mi_switch(), in which case it will yield.
    869  1.174        ad 	 *
    870  1.174        ad 	 * XXXSMP Will need to change for preemption.
    871  1.174        ad 	 */
    872  1.174        ad #ifdef MULTIPROCESSOR
    873  1.174        ad 	if (l->l_cpu->ci_curlwp == l) {
    874  1.174        ad #else
    875  1.174        ad 	if (l == curlwp) {
    876  1.174        ad #endif
    877  1.174        ad 		l->l_stat = LSONPROC;
    878  1.174        ad 		l->l_slptime = 0;
    879  1.174        ad 		lwp_unlock(l);
    880  1.174        ad 		return;
    881  1.174        ad 	}
    882  1.122   thorpej 
    883  1.174        ad 	/*
    884  1.174        ad 	 * Set the LWP runnable.  If it's swapped out, we need to wake the swapper
    885  1.174        ad 	 * to bring it back in.  Otherwise, enter it into a run queue.
    886  1.174        ad 	 */
    887  1.122   thorpej 	if (l->l_slptime > 1)
    888  1.122   thorpej 		updatepri(l);
    889  1.174        ad 	l->l_stat = LSRUN;
    890  1.122   thorpej 	l->l_slptime = 0;
    891  1.174        ad 
    892  1.178     pavel 	if (l->l_flag & LW_INMEM) {
    893  1.174        ad 		setrunqueue(l);
    894  1.184      yamt 		resched_lwp(l);
    895  1.174        ad 		lwp_unlock(l);
    896  1.174        ad 	} else {
    897  1.174        ad 		lwp_unlock(l);
    898  1.177        ad 		uvm_kick_scheduler();
    899  1.174        ad 	}
    900   1.26       cgd }
    901   1.26       cgd 
    902   1.26       cgd /*
    903   1.26       cgd  * Compute the priority of a process when running in user mode.
    904   1.26       cgd  * Arrange to reschedule if the resulting priority is better
    905   1.26       cgd  * than that of the current process.
    906   1.26       cgd  */
    907   1.26       cgd void
    908  1.122   thorpej resetpriority(struct lwp *l)
    909   1.26       cgd {
    910  1.185      yamt 	pri_t newpriority;
    911  1.122   thorpej 	struct proc *p = l->l_proc;
    912   1.26       cgd 
    913  1.174        ad 	/* XXXSMP LOCK_ASSERT(mutex_owned(&p->p_stmutex)); */
    914  1.174        ad 	LOCK_ASSERT(lwp_locked(l, NULL));
    915  1.174        ad 
    916  1.178     pavel 	if ((l->l_flag & LW_SYSTEM) != 0)
    917  1.174        ad 		return;
    918   1.83   thorpej 
    919  1.153      yamt 	newpriority = PUSER + (p->p_estcpu >> ESTCPU_SHIFT) +
    920  1.174        ad 	    NICE_WEIGHT * (p->p_nice - NZERO);
    921   1.26       cgd 	newpriority = min(newpriority, MAXPRI);
    922  1.174        ad 	lwp_changepri(l, newpriority);
    923  1.122   thorpej }
    924  1.122   thorpej 
    925  1.130   nathanw /*
    926  1.122   thorpej  * Recompute priority for all LWPs in a process.
    927  1.122   thorpej  */
    928  1.122   thorpej void
    929  1.122   thorpej resetprocpriority(struct proc *p)
    930  1.122   thorpej {
    931  1.122   thorpej 	struct lwp *l;
    932  1.122   thorpej 
    933  1.174        ad 	LOCK_ASSERT(mutex_owned(&p->p_stmutex));
    934  1.174        ad 
    935  1.174        ad 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    936  1.174        ad 		lwp_lock(l);
    937  1.174        ad 		resetpriority(l);
    938  1.174        ad 		lwp_unlock(l);
    939  1.174        ad 	}
    940   1.55      ross }
    941   1.55      ross 
    942   1.55      ross /*
    943   1.56      ross  * We adjust the priority of the current process.  The priority of a process
    944  1.141       wiz  * gets worse as it accumulates CPU time.  The CPU usage estimator (p_estcpu)
    945   1.56      ross  * is increased here.  The formula for computing priorities (in kern_synch.c)
    946   1.56      ross  * will compute a different value each time p_estcpu increases. This can
    947   1.56      ross  * cause a switch, but unless the priority crosses a PPQ boundary the actual
    948  1.141       wiz  * queue will not change.  The CPU usage estimator ramps up quite quickly
    949   1.56      ross  * when the process is running (linearly), and decays away exponentially, at
    950   1.56      ross  * a rate which is proportionally slower when the system is busy.  The basic
    951   1.80   nathanw  * principle is that the system will 90% forget that the process used a lot
    952   1.56      ross  * of CPU time in 5 * loadav seconds.  This causes the system to favor
    953   1.56      ross  * processes which haven't run much recently, and to round-robin among other
    954   1.56      ross  * processes.
    955   1.55      ross  */
    956   1.55      ross 
    957   1.55      ross void
    958  1.122   thorpej schedclock(struct lwp *l)
    959   1.55      ross {
    960  1.122   thorpej 	struct proc *p = l->l_proc;
    961   1.77   thorpej 
    962  1.174        ad 	mutex_spin_enter(&p->p_stmutex);
    963  1.153      yamt 	p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
    964  1.174        ad 	lwp_lock(l);
    965  1.122   thorpej 	resetpriority(l);
    966  1.174        ad 	mutex_spin_exit(&p->p_stmutex);
    967  1.178     pavel 	if ((l->l_flag & LW_SYSTEM) == 0 && l->l_priority >= PUSER)
    968  1.122   thorpej 		l->l_priority = l->l_usrpri;
    969  1.174        ad 	lwp_unlock(l);
    970   1.26       cgd }
    971   1.94    bouyer 
    972  1.174        ad /*
    973  1.174        ad  * suspendsched:
    974  1.174        ad  *
    975  1.174        ad  *	Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
    976  1.174        ad  */
    977   1.94    bouyer void
    978  1.174        ad suspendsched(void)
    979   1.94    bouyer {
    980  1.174        ad #ifdef MULTIPROCESSOR
    981  1.174        ad 	CPU_INFO_ITERATOR cii;
    982  1.174        ad 	struct cpu_info *ci;
    983  1.174        ad #endif
    984  1.122   thorpej 	struct lwp *l;
    985  1.174        ad 	struct proc *p;
    986   1.94    bouyer 
    987   1.94    bouyer 	/*
    988  1.174        ad 	 * We do this by process in order not to violate the locking rules.
    989   1.94    bouyer 	 */
    990  1.174        ad 	mutex_enter(&proclist_mutex);
    991  1.174        ad 	PROCLIST_FOREACH(p, &allproc) {
    992  1.174        ad 		mutex_enter(&p->p_smutex);
    993  1.174        ad 
    994  1.178     pavel 		if ((p->p_flag & PK_SYSTEM) != 0) {
    995  1.174        ad 			mutex_exit(&p->p_smutex);
    996   1.94    bouyer 			continue;
    997  1.174        ad 		}
    998  1.174        ad 
    999  1.174        ad 		p->p_stat = SSTOP;
   1000  1.174        ad 
   1001  1.174        ad 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1002  1.174        ad 			if (l == curlwp)
   1003  1.174        ad 				continue;
   1004  1.174        ad 
   1005  1.174        ad 			lwp_lock(l);
   1006  1.122   thorpej 
   1007   1.97     enami 			/*
   1008  1.174        ad 			 * Set L_WREBOOT so that the LWP will suspend itself
   1009  1.174        ad 			 * when it tries to return to user mode.  We want to
   1010  1.174        ad 			 * try and get to get as many LWPs as possible to
   1011  1.174        ad 			 * the user / kernel boundary, so that they will
   1012  1.174        ad 			 * release any locks that they hold.
   1013   1.97     enami 			 */
   1014  1.178     pavel 			l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
   1015  1.174        ad 
   1016  1.174        ad 			if (l->l_stat == LSSLEEP &&
   1017  1.178     pavel 			    (l->l_flag & LW_SINTR) != 0) {
   1018  1.174        ad 				/* setrunnable() will release the lock. */
   1019  1.174        ad 				setrunnable(l);
   1020  1.174        ad 				continue;
   1021  1.174        ad 			}
   1022  1.174        ad 
   1023  1.174        ad 			lwp_unlock(l);
   1024   1.94    bouyer 		}
   1025  1.174        ad 
   1026  1.174        ad 		mutex_exit(&p->p_smutex);
   1027   1.94    bouyer 	}
   1028  1.174        ad 	mutex_exit(&proclist_mutex);
   1029  1.174        ad 
   1030  1.174        ad 	/*
   1031  1.174        ad 	 * Kick all CPUs to make them preempt any LWPs running in user mode.
   1032  1.174        ad 	 * They'll trap into the kernel and suspend themselves in userret().
   1033  1.174        ad 	 */
   1034  1.174        ad 	sched_lock(0);
   1035  1.174        ad #ifdef MULTIPROCESSOR
   1036  1.174        ad 	for (CPU_INFO_FOREACH(cii, ci))
   1037  1.174        ad 		cpu_need_resched(ci);
   1038  1.174        ad #else
   1039  1.174        ad 	cpu_need_resched(curcpu());
   1040  1.174        ad #endif
   1041  1.174        ad 	sched_unlock(0);
   1042   1.94    bouyer }
   1043  1.113  gmcgarry 
   1044  1.113  gmcgarry /*
   1045  1.151      yamt  * scheduler_fork_hook:
   1046  1.151      yamt  *
   1047  1.151      yamt  *	Inherit the parent's scheduler history.
   1048  1.151      yamt  */
   1049  1.151      yamt void
   1050  1.151      yamt scheduler_fork_hook(struct proc *parent, struct proc *child)
   1051  1.151      yamt {
   1052  1.151      yamt 
   1053  1.174        ad 	LOCK_ASSERT(mutex_owned(&parent->p_smutex));
   1054  1.174        ad 
   1055  1.157      yamt 	child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
   1056  1.157      yamt 	child->p_forktime = schedcpu_ticks;
   1057  1.151      yamt }
   1058  1.151      yamt 
   1059  1.151      yamt /*
   1060  1.151      yamt  * scheduler_wait_hook:
   1061  1.151      yamt  *
   1062  1.151      yamt  *	Chargeback parents for the sins of their children.
   1063  1.151      yamt  */
   1064  1.151      yamt void
   1065  1.151      yamt scheduler_wait_hook(struct proc *parent, struct proc *child)
   1066  1.151      yamt {
   1067  1.157      yamt 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
   1068  1.157      yamt 	fixpt_t estcpu;
   1069  1.151      yamt 
   1070  1.151      yamt 	/* XXX Only if parent != init?? */
   1071  1.157      yamt 
   1072  1.174        ad 	mutex_spin_enter(&parent->p_stmutex);
   1073  1.157      yamt 	estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
   1074  1.157      yamt 	    schedcpu_ticks - child->p_forktime);
   1075  1.174        ad 	if (child->p_estcpu > estcpu)
   1076  1.157      yamt 		parent->p_estcpu =
   1077  1.157      yamt 		    ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
   1078  1.174        ad 	mutex_spin_exit(&parent->p_stmutex);
   1079  1.174        ad }
   1080  1.174        ad 
   1081  1.174        ad /*
   1082  1.174        ad  * sched_kpri:
   1083  1.174        ad  *
   1084  1.174        ad  *	Scale a priority level to a kernel priority level, usually
   1085  1.174        ad  *	for an LWP that is about to sleep.
   1086  1.174        ad  */
   1087  1.185      yamt pri_t
   1088  1.174        ad sched_kpri(struct lwp *l)
   1089  1.174        ad {
   1090  1.174        ad 	/*
   1091  1.174        ad 	 * Scale user priorities (127 -> 50) up to kernel priorities
   1092  1.174        ad 	 * in the range (49 -> 8).  Reserve the top 8 kernel priorities
   1093  1.174        ad 	 * for high priority kthreads.  Kernel priorities passed in
   1094  1.174        ad 	 * are left "as is".  XXX This is somewhat arbitrary.
   1095  1.174        ad 	 */
   1096  1.174        ad 	static const uint8_t kpri_tab[] = {
   1097  1.174        ad 		 0,   1,   2,   3,   4,   5,   6,   7,
   1098  1.174        ad 		 8,   9,  10,  11,  12,  13,  14,  15,
   1099  1.174        ad 		16,  17,  18,  19,  20,  21,  22,  23,
   1100  1.174        ad 		24,  25,  26,  27,  28,  29,  30,  31,
   1101  1.174        ad 		32,  33,  34,  35,  36,  37,  38,  39,
   1102  1.174        ad 		40,  41,  42,  43,  44,  45,  46,  47,
   1103  1.174        ad 		48,  49,   8,   8,   9,   9,  10,  10,
   1104  1.174        ad 		11,  11,  12,  12,  13,  14,  14,  15,
   1105  1.174        ad 		15,  16,  16,  17,  17,  18,  18,  19,
   1106  1.174        ad 		20,  20,  21,  21,  22,  22,  23,  23,
   1107  1.174        ad 		24,  24,  25,  26,  26,  27,  27,  28,
   1108  1.174        ad 		28,  29,  29,  30,  30,  31,  32,  32,
   1109  1.174        ad 		33,  33,  34,  34,  35,  35,  36,  36,
   1110  1.174        ad 		37,  38,  38,  39,  39,  40,  40,  41,
   1111  1.174        ad 		41,  42,  42,  43,  44,  44,  45,  45,
   1112  1.174        ad 		46,  46,  47,  47,  48,  48,  49,  49,
   1113  1.174        ad 	};
   1114  1.174        ad 
   1115  1.185      yamt 	return (pri_t)kpri_tab[l->l_usrpri];
   1116  1.174        ad }
   1117  1.174        ad 
   1118  1.174        ad /*
   1119  1.174        ad  * sched_unsleep:
   1120  1.174        ad  *
   1121  1.174        ad  *	The is called when the LWP has not been awoken normally but instead
   1122  1.174        ad  *	interrupted: for example, if the sleep timed out.  Because of this,
   1123  1.174        ad  *	it's not a valid action for running or idle LWPs.
   1124  1.174        ad  */
   1125  1.174        ad void
   1126  1.174        ad sched_unsleep(struct lwp *l)
   1127  1.174        ad {
   1128  1.174        ad 
   1129  1.174        ad 	lwp_unlock(l);
   1130  1.174        ad 	panic("sched_unsleep");
   1131  1.174        ad }
   1132  1.174        ad 
   1133  1.174        ad /*
   1134  1.174        ad  * sched_changepri:
   1135  1.174        ad  *
   1136  1.174        ad  *	Adjust the priority of an LWP.
   1137  1.174        ad  */
   1138  1.174        ad void
   1139  1.185      yamt sched_changepri(struct lwp *l, pri_t pri)
   1140  1.174        ad {
   1141  1.174        ad 
   1142  1.174        ad 	LOCK_ASSERT(lwp_locked(l, &sched_mutex));
   1143  1.174        ad 
   1144  1.174        ad 	l->l_usrpri = pri;
   1145  1.174        ad 	if (l->l_priority < PUSER)
   1146  1.174        ad 		return;
   1147  1.184      yamt 
   1148  1.184      yamt 	if (l->l_stat != LSRUN || (l->l_flag & LW_INMEM) == 0) {
   1149  1.174        ad 		l->l_priority = pri;
   1150  1.174        ad 		return;
   1151  1.157      yamt 	}
   1152  1.174        ad 
   1153  1.174        ad 	remrunqueue(l);
   1154  1.174        ad 	l->l_priority = pri;
   1155  1.174        ad 	setrunqueue(l);
   1156  1.184      yamt 	resched_lwp(l);
   1157  1.184      yamt }
   1158  1.184      yamt 
   1159  1.184      yamt void
   1160  1.185      yamt sched_lendpri(struct lwp *l, pri_t pri)
   1161  1.184      yamt {
   1162  1.184      yamt 
   1163  1.184      yamt 	LOCK_ASSERT(lwp_locked(l, &sched_mutex));
   1164  1.184      yamt 
   1165  1.184      yamt 	if (l->l_stat != LSRUN || (l->l_flag & LW_INMEM) == 0) {
   1166  1.184      yamt 		l->l_inheritedprio = pri;
   1167  1.184      yamt 		return;
   1168  1.184      yamt 	}
   1169  1.184      yamt 
   1170  1.184      yamt 	remrunqueue(l);
   1171  1.184      yamt 	l->l_inheritedprio = pri;
   1172  1.184      yamt 	setrunqueue(l);
   1173  1.184      yamt 	resched_lwp(l);
   1174  1.184      yamt }
   1175  1.184      yamt 
   1176  1.184      yamt struct lwp *
   1177  1.184      yamt syncobj_noowner(wchan_t wchan)
   1178  1.184      yamt {
   1179  1.184      yamt 
   1180  1.184      yamt 	return NULL;
   1181  1.151      yamt }
   1182  1.151      yamt 
   1183  1.151      yamt /*
   1184  1.113  gmcgarry  * Low-level routines to access the run queue.  Optimised assembler
   1185  1.113  gmcgarry  * routines can override these.
   1186  1.113  gmcgarry  */
   1187  1.113  gmcgarry 
   1188  1.113  gmcgarry #ifndef __HAVE_MD_RUNQUEUE
   1189  1.115  nisimura 
   1190  1.130   nathanw /*
   1191  1.134      matt  * On some architectures, it's faster to use a MSB ordering for the priorites
   1192  1.134      matt  * than the traditional LSB ordering.
   1193  1.134      matt  */
   1194  1.134      matt #ifdef __HAVE_BIGENDIAN_BITOPS
   1195  1.134      matt #define	RQMASK(n) (0x80000000 >> (n))
   1196  1.134      matt #else
   1197  1.134      matt #define	RQMASK(n) (0x00000001 << (n))
   1198  1.134      matt #endif
   1199  1.134      matt 
   1200  1.134      matt /*
   1201  1.115  nisimura  * The primitives that manipulate the run queues.  whichqs tells which
   1202  1.115  nisimura  * of the 32 queues qs have processes in them.  Setrunqueue puts processes
   1203  1.115  nisimura  * into queues, remrunqueue removes them from queues.  The running process is
   1204  1.115  nisimura  * on no queue, other processes are on a queue related to p->p_priority,
   1205  1.115  nisimura  * divided by 4 actually to shrink the 0-127 range of priorities into the 32
   1206  1.115  nisimura  * available queues.
   1207  1.130   nathanw  */
   1208  1.146      matt #ifdef RQDEBUG
   1209  1.146      matt static void
   1210  1.146      matt checkrunqueue(int whichq, struct lwp *l)
   1211  1.146      matt {
   1212  1.146      matt 	const struct prochd * const rq = &sched_qs[whichq];
   1213  1.146      matt 	struct lwp *l2;
   1214  1.146      matt 	int found = 0;
   1215  1.146      matt 	int die = 0;
   1216  1.146      matt 	int empty = 1;
   1217  1.164  christos 	for (l2 = rq->ph_link; l2 != (const void*) rq; l2 = l2->l_forw) {
   1218  1.146      matt 		if (l2->l_stat != LSRUN) {
   1219  1.146      matt 			printf("checkrunqueue[%d]: lwp %p state (%d) "
   1220  1.146      matt 			    " != LSRUN\n", whichq, l2, l2->l_stat);
   1221  1.146      matt 		}
   1222  1.146      matt 		if (l2->l_back->l_forw != l2) {
   1223  1.146      matt 			printf("checkrunqueue[%d]: lwp %p back-qptr (%p) "
   1224  1.146      matt 			    "corrupt %p\n", whichq, l2, l2->l_back,
   1225  1.146      matt 			    l2->l_back->l_forw);
   1226  1.146      matt 			die = 1;
   1227  1.146      matt 		}
   1228  1.146      matt 		if (l2->l_forw->l_back != l2) {
   1229  1.146      matt 			printf("checkrunqueue[%d]: lwp %p forw-qptr (%p) "
   1230  1.146      matt 			    "corrupt %p\n", whichq, l2, l2->l_forw,
   1231  1.146      matt 			    l2->l_forw->l_back);
   1232  1.146      matt 			die = 1;
   1233  1.146      matt 		}
   1234  1.146      matt 		if (l2 == l)
   1235  1.146      matt 			found = 1;
   1236  1.146      matt 		empty = 0;
   1237  1.146      matt 	}
   1238  1.146      matt 	if (empty && (sched_whichqs & RQMASK(whichq)) != 0) {
   1239  1.146      matt 		printf("checkrunqueue[%d]: bit set for empty run-queue %p\n",
   1240  1.146      matt 		    whichq, rq);
   1241  1.146      matt 		die = 1;
   1242  1.146      matt 	} else if (!empty && (sched_whichqs & RQMASK(whichq)) == 0) {
   1243  1.146      matt 		printf("checkrunqueue[%d]: bit clear for non-empty "
   1244  1.146      matt 		    "run-queue %p\n", whichq, rq);
   1245  1.146      matt 		die = 1;
   1246  1.146      matt 	}
   1247  1.146      matt 	if (l != NULL && (sched_whichqs & RQMASK(whichq)) == 0) {
   1248  1.146      matt 		printf("checkrunqueue[%d]: bit clear for active lwp %p\n",
   1249  1.146      matt 		    whichq, l);
   1250  1.146      matt 		die = 1;
   1251  1.146      matt 	}
   1252  1.146      matt 	if (l != NULL && empty) {
   1253  1.146      matt 		printf("checkrunqueue[%d]: empty run-queue %p with "
   1254  1.146      matt 		    "active lwp %p\n", whichq, rq, l);
   1255  1.146      matt 		die = 1;
   1256  1.146      matt 	}
   1257  1.146      matt 	if (l != NULL && !found) {
   1258  1.146      matt 		printf("checkrunqueue[%d]: lwp %p not in runqueue %p!",
   1259  1.146      matt 		    whichq, l, rq);
   1260  1.146      matt 		die = 1;
   1261  1.146      matt 	}
   1262  1.146      matt 	if (die)
   1263  1.146      matt 		panic("checkrunqueue: inconsistency found");
   1264  1.146      matt }
   1265  1.146      matt #endif /* RQDEBUG */
   1266  1.146      matt 
   1267  1.113  gmcgarry void
   1268  1.122   thorpej setrunqueue(struct lwp *l)
   1269  1.113  gmcgarry {
   1270  1.113  gmcgarry 	struct prochd *rq;
   1271  1.122   thorpej 	struct lwp *prev;
   1272  1.184      yamt 	const int whichq = lwp_eprio(l) / PPQ;
   1273  1.113  gmcgarry 
   1274  1.174        ad 	LOCK_ASSERT(lwp_locked(l, &sched_mutex));
   1275  1.174        ad 
   1276  1.146      matt #ifdef RQDEBUG
   1277  1.146      matt 	checkrunqueue(whichq, NULL);
   1278  1.146      matt #endif
   1279  1.113  gmcgarry #ifdef DIAGNOSTIC
   1280  1.174        ad 	if (l->l_back != NULL || l->l_stat != LSRUN)
   1281  1.113  gmcgarry 		panic("setrunqueue");
   1282  1.113  gmcgarry #endif
   1283  1.134      matt 	sched_whichqs |= RQMASK(whichq);
   1284  1.113  gmcgarry 	rq = &sched_qs[whichq];
   1285  1.113  gmcgarry 	prev = rq->ph_rlink;
   1286  1.122   thorpej 	l->l_forw = (struct lwp *)rq;
   1287  1.122   thorpej 	rq->ph_rlink = l;
   1288  1.122   thorpej 	prev->l_forw = l;
   1289  1.122   thorpej 	l->l_back = prev;
   1290  1.146      matt #ifdef RQDEBUG
   1291  1.146      matt 	checkrunqueue(whichq, l);
   1292  1.146      matt #endif
   1293  1.113  gmcgarry }
   1294  1.113  gmcgarry 
   1295  1.174        ad /*
   1296  1.174        ad  * XXXSMP When LWP dispatch (cpu_switch()) is changed to use remrunqueue(),
   1297  1.174        ad  * drop of the effective priority level from kernel to user needs to be
   1298  1.174        ad  * moved here from userret().  The assignment in userret() is currently
   1299  1.174        ad  * done unlocked.
   1300  1.174        ad  */
   1301  1.113  gmcgarry void
   1302  1.122   thorpej remrunqueue(struct lwp *l)
   1303  1.113  gmcgarry {
   1304  1.122   thorpej 	struct lwp *prev, *next;
   1305  1.184      yamt 	const int whichq = lwp_eprio(l) / PPQ;
   1306  1.174        ad 
   1307  1.174        ad 	LOCK_ASSERT(lwp_locked(l, &sched_mutex));
   1308  1.174        ad 
   1309  1.146      matt #ifdef RQDEBUG
   1310  1.146      matt 	checkrunqueue(whichq, l);
   1311  1.146      matt #endif
   1312  1.174        ad 
   1313  1.174        ad #if defined(DIAGNOSTIC)
   1314  1.174        ad 	if (((sched_whichqs & RQMASK(whichq)) == 0) || l->l_back == NULL) {
   1315  1.174        ad 		/* Shouldn't happen - interrupts disabled. */
   1316  1.146      matt 		panic("remrunqueue: bit %d not set", whichq);
   1317  1.174        ad 	}
   1318  1.113  gmcgarry #endif
   1319  1.122   thorpej 	prev = l->l_back;
   1320  1.122   thorpej 	l->l_back = NULL;
   1321  1.122   thorpej 	next = l->l_forw;
   1322  1.122   thorpej 	prev->l_forw = next;
   1323  1.122   thorpej 	next->l_back = prev;
   1324  1.113  gmcgarry 	if (prev == next)
   1325  1.134      matt 		sched_whichqs &= ~RQMASK(whichq);
   1326  1.146      matt #ifdef RQDEBUG
   1327  1.146      matt 	checkrunqueue(whichq, NULL);
   1328  1.146      matt #endif
   1329  1.113  gmcgarry }
   1330  1.113  gmcgarry 
   1331  1.134      matt #undef RQMASK
   1332  1.134      matt #endif /* !defined(__HAVE_MD_RUNQUEUE) */
   1333