Home | History | Annotate | Line # | Download | only in kern
kern_synch.c revision 1.101.2.30
      1 /*	$NetBSD: kern_synch.c,v 1.101.2.30 2003/01/06 17:29:47 nathanw Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 /*-
     41  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     42  *	The Regents of the University of California.  All rights reserved.
     43  * (c) UNIX System Laboratories, Inc.
     44  * All or some portions of this file are derived from material licensed
     45  * to the University of California by American Telephone and Telegraph
     46  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     47  * the permission of UNIX System Laboratories, Inc.
     48  *
     49  * Redistribution and use in source and binary forms, with or without
     50  * modification, are permitted provided that the following conditions
     51  * are met:
     52  * 1. Redistributions of source code must retain the above copyright
     53  *    notice, this list of conditions and the following disclaimer.
     54  * 2. Redistributions in binary form must reproduce the above copyright
     55  *    notice, this list of conditions and the following disclaimer in the
     56  *    documentation and/or other materials provided with the distribution.
     57  * 3. All advertising materials mentioning features or use of this software
     58  *    must display the following acknowledgement:
     59  *	This product includes software developed by the University of
     60  *	California, Berkeley and its contributors.
     61  * 4. Neither the name of the University nor the names of its contributors
     62  *    may be used to endorse or promote products derived from this software
     63  *    without specific prior written permission.
     64  *
     65  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     66  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     67  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     68  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     69  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     70  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     71  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     72  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     73  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     74  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     75  * SUCH DAMAGE.
     76  *
     77  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
     78  */
     79 
     80 #include <sys/cdefs.h>
     81 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.101.2.30 2003/01/06 17:29:47 nathanw Exp $");
     82 
     83 #include "opt_ddb.h"
     84 #include "opt_ktrace.h"
     85 #include "opt_kstack.h"
     86 #include "opt_lockdebug.h"
     87 #include "opt_multiprocessor.h"
     88 #include "opt_perfctrs.h"
     89 
     90 #include <sys/param.h>
     91 #include <sys/systm.h>
     92 #include <sys/callout.h>
     93 #include <sys/proc.h>
     94 #include <sys/kernel.h>
     95 #include <sys/buf.h>
     96 #if defined(PERFCTRS)
     97 #include <sys/pmc.h>
     98 #endif
     99 #include <sys/signalvar.h>
    100 #include <sys/resourcevar.h>
    101 #include <sys/sched.h>
    102 #include <sys/sa.h>
    103 #include <sys/savar.h>
    104 
    105 #include <uvm/uvm_extern.h>
    106 
    107 #ifdef KTRACE
    108 #include <sys/ktrace.h>
    109 #endif
    110 
    111 #include <machine/cpu.h>
    112 
    113 int	lbolt;			/* once a second sleep address */
    114 int	rrticks;		/* number of hardclock ticks per roundrobin() */
    115 
    116 /*
    117  * The global scheduler state.
    118  */
    119 struct prochd sched_qs[RUNQUE_NQS];	/* run queues */
    120 __volatile u_int32_t sched_whichqs;	/* bitmap of non-empty queues */
    121 struct slpque sched_slpque[SLPQUE_TABLESIZE]; /* sleep queues */
    122 
    123 struct simplelock sched_lock = SIMPLELOCK_INITIALIZER;
    124 
    125 void schedcpu(void *);
    126 void updatepri(struct lwp *);
    127 void endtsleep(void *);
    128 
    129 __inline void awaken(struct lwp *);
    130 
    131 struct callout schedcpu_ch = CALLOUT_INITIALIZER;
    132 
    133 
    134 
    135 /*
    136  * Force switch among equal priority processes every 100ms.
    137  * Called from hardclock every hz/10 == rrticks hardclock ticks.
    138  */
    139 /* ARGSUSED */
    140 void
    141 roundrobin(struct cpu_info *ci)
    142 {
    143 	struct schedstate_percpu *spc = &ci->ci_schedstate;
    144 
    145 	spc->spc_rrticks = rrticks;
    146 
    147 	if (curlwp != NULL) {
    148 		if (spc->spc_flags & SPCF_SEENRR) {
    149 			/*
    150 			 * The process has already been through a roundrobin
    151 			 * without switching and may be hogging the CPU.
    152 			 * Indicate that the process should yield.
    153 			 */
    154 			spc->spc_flags |= SPCF_SHOULDYIELD;
    155 		} else
    156 			spc->spc_flags |= SPCF_SEENRR;
    157 	}
    158 	need_resched(curcpu());
    159 }
    160 
    161 /*
    162  * Constants for digital decay and forget:
    163  *	90% of (p_estcpu) usage in 5 * loadav time
    164  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
    165  *          Note that, as ps(1) mentions, this can let percentages
    166  *          total over 100% (I've seen 137.9% for 3 processes).
    167  *
    168  * Note that hardclock updates p_estcpu and p_cpticks independently.
    169  *
    170  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
    171  * That is, the system wants to compute a value of decay such
    172  * that the following for loop:
    173  * 	for (i = 0; i < (5 * loadavg); i++)
    174  * 		p_estcpu *= decay;
    175  * will compute
    176  * 	p_estcpu *= 0.1;
    177  * for all values of loadavg:
    178  *
    179  * Mathematically this loop can be expressed by saying:
    180  * 	decay ** (5 * loadavg) ~= .1
    181  *
    182  * The system computes decay as:
    183  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
    184  *
    185  * We wish to prove that the system's computation of decay
    186  * will always fulfill the equation:
    187  * 	decay ** (5 * loadavg) ~= .1
    188  *
    189  * If we compute b as:
    190  * 	b = 2 * loadavg
    191  * then
    192  * 	decay = b / (b + 1)
    193  *
    194  * We now need to prove two things:
    195  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
    196  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
    197  *
    198  * Facts:
    199  *         For x close to zero, exp(x) =~ 1 + x, since
    200  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
    201  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
    202  *         For x close to zero, ln(1+x) =~ x, since
    203  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
    204  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
    205  *         ln(.1) =~ -2.30
    206  *
    207  * Proof of (1):
    208  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
    209  *	solving for factor,
    210  *      ln(factor) =~ (-2.30/5*loadav), or
    211  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
    212  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
    213  *
    214  * Proof of (2):
    215  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
    216  *	solving for power,
    217  *      power*ln(b/(b+1)) =~ -2.30, or
    218  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
    219  *
    220  * Actual power values for the implemented algorithm are as follows:
    221  *      loadav: 1       2       3       4
    222  *      power:  5.68    10.32   14.94   19.55
    223  */
    224 
    225 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
    226 #define	loadfactor(loadav)	(2 * (loadav))
    227 #define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
    228 
    229 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
    230 fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
    231 
    232 /*
    233  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
    234  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
    235  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
    236  *
    237  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
    238  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
    239  *
    240  * If you dont want to bother with the faster/more-accurate formula, you
    241  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
    242  * (more general) method of calculating the %age of CPU used by a process.
    243  */
    244 #define	CCPU_SHIFT	11
    245 
    246 /*
    247  * Recompute process priorities, every hz ticks.
    248  */
    249 /* ARGSUSED */
    250 void
    251 schedcpu(void *arg)
    252 {
    253 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
    254 	struct lwp *l;
    255 	struct proc *p;
    256 	int s, s1, minslp;
    257 	unsigned int newcpu;
    258 	int clkhz;
    259 
    260 	proclist_lock_read();
    261 	LIST_FOREACH(p, &allproc, p_list) {
    262 		/*
    263 		 * Increment time in/out of memory and sleep time
    264 		 * (if sleeping).  We ignore overflow; with 16-bit int's
    265 		 * (remember them?) overflow takes 45 days.
    266 		 */
    267 		minslp = 2;
    268 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    269 			l->l_swtime++;
    270 			if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
    271 			    l->l_stat == LSSUSPENDED) {
    272 				l->l_slptime++;
    273 				minslp = min(minslp, l->l_slptime);
    274 			} else
    275 				minslp = 0;
    276 		}
    277 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
    278 		/*
    279 		 * If the process has slept the entire second,
    280 		 * stop recalculating its priority until it wakes up.
    281 		 */
    282 		if (minslp > 1)
    283 			continue;
    284 		s = splstatclock();	/* prevent state changes */
    285 		/*
    286 		 * p_pctcpu is only for ps.
    287 		 */
    288 		clkhz = stathz != 0 ? stathz : hz;
    289 #if	(FSHIFT >= CCPU_SHIFT)
    290 		p->p_pctcpu += (clkhz == 100)?
    291 			((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
    292                 	100 * (((fixpt_t) p->p_cpticks)
    293 				<< (FSHIFT - CCPU_SHIFT)) / clkhz;
    294 #else
    295 		p->p_pctcpu += ((FSCALE - ccpu) *
    296 			(p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
    297 #endif
    298 		p->p_cpticks = 0;
    299 		newcpu = (u_int)decay_cpu(loadfac, p->p_estcpu);
    300 		p->p_estcpu = newcpu;
    301 		SCHED_LOCK(s1);
    302 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    303 			if (l->l_slptime > 1)
    304 				continue;
    305 			resetpriority(l);
    306 			if (l->l_priority >= PUSER) {
    307 				if (l->l_stat == LSRUN &&
    308 				    (l->l_flag & L_INMEM) &&
    309 				    (l->l_priority / PPQ) != (l->l_usrpri / PPQ)) {
    310 					remrunqueue(l);
    311 					l->l_priority = l->l_usrpri;
    312 					setrunqueue(l);
    313 				} else
    314 					l->l_priority = l->l_usrpri;
    315 			}
    316 		}
    317 		SCHED_UNLOCK(s1);
    318 		splx(s);
    319 	}
    320 	proclist_unlock_read();
    321 	uvm_meter();
    322 	wakeup((caddr_t)&lbolt);
    323 	callout_reset(&schedcpu_ch, hz, schedcpu, NULL);
    324 }
    325 
    326 /*
    327  * Recalculate the priority of a process after it has slept for a while.
    328  * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
    329  * least six times the loadfactor will decay p_estcpu to zero.
    330  */
    331 void
    332 updatepri(struct lwp *l)
    333 {
    334 	struct proc *p = l->l_proc;
    335 	unsigned int newcpu;
    336 	fixpt_t loadfac;
    337 
    338 	SCHED_ASSERT_LOCKED();
    339 
    340 	newcpu = p->p_estcpu;
    341 	loadfac = loadfactor(averunnable.ldavg[0]);
    342 
    343 	if (l->l_slptime > 5 * loadfac)
    344 		p->p_estcpu = 0; /* XXX NJWLWP */
    345 	else {
    346 		l->l_slptime--;	/* the first time was done in schedcpu */
    347 		while (newcpu && --l->l_slptime)
    348 			newcpu = (int) decay_cpu(loadfac, newcpu);
    349 		p->p_estcpu = newcpu;
    350 	}
    351 	resetpriority(l);
    352 }
    353 
    354 /*
    355  * During autoconfiguration or after a panic, a sleep will simply
    356  * lower the priority briefly to allow interrupts, then return.
    357  * The priority to be used (safepri) is machine-dependent, thus this
    358  * value is initialized and maintained in the machine-dependent layers.
    359  * This priority will typically be 0, or the lowest priority
    360  * that is safe for use on the interrupt stack; it can be made
    361  * higher to block network software interrupts after panics.
    362  */
    363 int safepri;
    364 
    365 /*
    366  * General sleep call.  Suspends the current process until a wakeup is
    367  * performed on the specified identifier.  The process will then be made
    368  * runnable with the specified priority.  Sleeps at most timo/hz seconds
    369  * (0 means no timeout).  If pri includes PCATCH flag, signals are checked
    370  * before and after sleeping, else signals are not checked.  Returns 0 if
    371  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
    372  * signal needs to be delivered, ERESTART is returned if the current system
    373  * call should be restarted if possible, and EINTR is returned if the system
    374  * call should be interrupted by the signal (return EINTR).
    375  *
    376  * The interlock is held until the scheduler_slock is acquired.  The
    377  * interlock will be locked before returning back to the caller
    378  * unless the PNORELOCK flag is specified, in which case the
    379  * interlock will always be unlocked upon return.
    380  */
    381 int
    382 ltsleep(void *ident, int priority, const char *wmesg, int timo,
    383     __volatile struct simplelock *interlock)
    384 {
    385 	struct lwp *l = curlwp;
    386 	struct proc *p = l->l_proc;
    387 	struct slpque *qp;
    388 	int sig, s;
    389 	int catch = priority & PCATCH;
    390 	int relock = (priority & PNORELOCK) == 0;
    391 	int exiterr = (priority & PNOEXITERR) == 0;
    392 
    393 	/*
    394 	 * XXXSMP
    395 	 * This is probably bogus.  Figure out what the right
    396 	 * thing to do here really is.
    397 	 * Note that not sleeping if ltsleep is called with curlwp == NULL
    398 	 * in the shutdown case is disgusting but partly necessary given
    399 	 * how shutdown (barely) works.
    400 	 */
    401 	if (cold || (doing_shutdown && (panicstr || (l == NULL)))) {
    402 		/*
    403 		 * After a panic, or during autoconfiguration,
    404 		 * just give interrupts a chance, then just return;
    405 		 * don't run any other procs or panic below,
    406 		 * in case this is the idle process and already asleep.
    407 		 */
    408 		s = splhigh();
    409 		splx(safepri);
    410 		splx(s);
    411 		if (interlock != NULL && relock == 0)
    412 			simple_unlock(interlock);
    413 		return (0);
    414 	}
    415 
    416 	KASSERT(p != NULL);
    417 	LOCK_ASSERT(interlock == NULL || simple_lock_held(interlock));
    418 
    419 #ifdef KTRACE
    420 	if (KTRPOINT(p, KTR_CSW))
    421 		ktrcsw(p, 1, 0);
    422 #endif
    423 
    424 	SCHED_LOCK(s);
    425 
    426 #ifdef DIAGNOSTIC
    427 	if (ident == NULL)
    428 		panic("ltsleep: ident == NULL");
    429 	if (l->l_stat != LSONPROC)
    430 		panic("ltsleep: l_stat %d != LSONPROC", l->l_stat);
    431 	if (l->l_back != NULL)
    432 		panic("ltsleep: p_back != NULL");
    433 #endif
    434 
    435 	l->l_wchan = ident;
    436 	l->l_wmesg = wmesg;
    437 	l->l_slptime = 0;
    438 	l->l_priority = priority & PRIMASK;
    439 
    440 	qp = SLPQUE(ident);
    441 	if (qp->sq_head == 0)
    442 		qp->sq_head = l;
    443 	else {
    444 		*qp->sq_tailp = l;
    445 	}
    446 	*(qp->sq_tailp = &l->l_forw) = 0;
    447 
    448 	if (timo)
    449 		callout_reset(&l->l_tsleep_ch, timo, endtsleep, l);
    450 
    451 	/*
    452 	 * We can now release the interlock; the scheduler_slock
    453 	 * is held, so a thread can't get in to do wakeup() before
    454 	 * we do the switch.
    455 	 *
    456 	 * XXX We leave the code block here, after inserting ourselves
    457 	 * on the sleep queue, because we might want a more clever
    458 	 * data structure for the sleep queues at some point.
    459 	 */
    460 	if (interlock != NULL)
    461 		simple_unlock(interlock);
    462 
    463 	/*
    464 	 * We put ourselves on the sleep queue and start our timeout
    465 	 * before calling CURSIG, as we could stop there, and a wakeup
    466 	 * or a SIGCONT (or both) could occur while we were stopped.
    467 	 * A SIGCONT would cause us to be marked as SSLEEP
    468 	 * without resuming us, thus we must be ready for sleep
    469 	 * when CURSIG is called.  If the wakeup happens while we're
    470 	 * stopped, p->p_wchan will be 0 upon return from CURSIG.
    471 	 */
    472 	if (catch) {
    473 		l->l_flag |= L_SINTR;
    474 		if ((sig = CURSIG(l)) != 0) {
    475 			if (l->l_wchan != NULL)
    476 				unsleep(l);
    477 			l->l_stat = LSONPROC;
    478 			SCHED_UNLOCK(s);
    479 			goto resume;
    480 		}
    481 		if (l->l_wchan == NULL) {
    482 			catch = 0;
    483 			SCHED_UNLOCK(s);
    484 			goto resume;
    485 		}
    486 	} else
    487 		sig = 0;
    488 	l->l_stat = LSSLEEP;
    489 	p->p_nrlwps--;
    490 	p->p_stats->p_ru.ru_nvcsw++;
    491 	SCHED_ASSERT_LOCKED();
    492 	if (l->l_flag & L_SA)
    493 		sa_switch(l, SA_UPCALL_BLOCKED);
    494 	else
    495 		mi_switch(l, NULL);
    496 
    497 #if	defined(DDB) && !defined(GPROF)
    498 	/* handy breakpoint location after process "wakes" */
    499 	__asm(".globl bpendtsleep ; bpendtsleep:");
    500 #endif
    501 	/*
    502 	 * p->p_nrlwps is incremented by whoever made us runnable again,
    503 	 * either setrunnable() or awaken().
    504 	 */
    505 
    506 	SCHED_ASSERT_UNLOCKED();
    507 	splx(s);
    508 
    509  resume:
    510 	KDASSERT(l->l_cpu != NULL);
    511 	KDASSERT(l->l_cpu == curcpu());
    512 	l->l_cpu->ci_schedstate.spc_curpriority = l->l_usrpri;
    513 
    514 	l->l_flag &= ~L_SINTR;
    515 	if (l->l_flag & L_TIMEOUT) {
    516 		l->l_flag &= ~L_TIMEOUT;
    517 		if (sig == 0) {
    518 #ifdef KTRACE
    519 			if (KTRPOINT(p, KTR_CSW))
    520 				ktrcsw(p, 0, 0);
    521 #endif
    522 			if (relock && interlock != NULL)
    523 				simple_lock(interlock);
    524 			return (EWOULDBLOCK);
    525 		}
    526 	} else if (timo)
    527 		callout_stop(&l->l_tsleep_ch);
    528 	if (catch && (sig != 0 || (sig = CURSIG(l)) != 0)) {
    529 #ifdef KTRACE
    530 		if (KTRPOINT(p, KTR_CSW))
    531 			ktrcsw(p, 0, 0);
    532 #endif
    533 		if (relock && interlock != NULL)
    534 			simple_lock(interlock);
    535 		if ((SIGACTION(p, sig).sa_flags & SA_RESTART) == 0)
    536 			return (EINTR);
    537 		return (ERESTART);
    538 	}
    539 	/* XXXNJW this is very much a kluge.
    540 	 * revisit. a better way of preventing looping/hanging syscalls like
    541 	 * wait4() and _lwp_wait() from wedging an exiting process
    542 	 * would be preferred.
    543 	 */
    544 	if (catch && ((p->p_flag & P_WEXIT) && exiterr))
    545 		return (EINTR);
    546 #ifdef KTRACE
    547 	if (KTRPOINT(p, KTR_CSW))
    548 		ktrcsw(p, 0, 0);
    549 #endif
    550 	if (relock && interlock != NULL)
    551 		simple_lock(interlock);
    552 	return (0);
    553 }
    554 
    555 /*
    556  * Implement timeout for tsleep.
    557  * If process hasn't been awakened (wchan non-zero),
    558  * set timeout flag and undo the sleep.  If proc
    559  * is stopped, just unsleep so it will remain stopped.
    560  */
    561 void
    562 endtsleep(void *arg)
    563 {
    564 	struct lwp *l;
    565 	int s;
    566 
    567 	l = (struct lwp *)arg;
    568 	SCHED_LOCK(s);
    569 	if (l->l_wchan) {
    570 		if (l->l_stat == LSSLEEP)
    571 			setrunnable(l);
    572 		else
    573 			unsleep(l);
    574 		l->l_flag |= L_TIMEOUT;
    575 	}
    576 	SCHED_UNLOCK(s);
    577 }
    578 
    579 /*
    580  * Remove a process from its wait queue
    581  */
    582 void
    583 unsleep(struct lwp *l)
    584 {
    585 	struct slpque *qp;
    586 	struct lwp **hp;
    587 
    588 	SCHED_ASSERT_LOCKED();
    589 
    590 	if (l->l_wchan) {
    591 		hp = &(qp = SLPQUE(l->l_wchan))->sq_head;
    592 		while (*hp != l)
    593 			hp = &(*hp)->l_forw;
    594 		*hp = l->l_forw;
    595 		if (qp->sq_tailp == &l->l_forw)
    596 			qp->sq_tailp = hp;
    597 		l->l_wchan = 0;
    598 	}
    599 }
    600 
    601 /*
    602  * Optimized-for-wakeup() version of setrunnable().
    603  */
    604 __inline void
    605 awaken(struct lwp *l)
    606 {
    607 
    608 	SCHED_ASSERT_LOCKED();
    609 
    610 	if (l->l_slptime > 1)
    611 		updatepri(l);
    612 	l->l_slptime = 0;
    613 	l->l_stat = LSRUN;
    614 	l->l_proc->p_nrlwps++;
    615 	/*
    616 	 * Since curpriority is a user priority, p->p_priority
    617 	 * is always better than curpriority on the last CPU on
    618 	 * which it ran.
    619 	 *
    620 	 * XXXSMP See affinity comment in resched_proc().
    621 	 */
    622 	if (l->l_flag & L_INMEM) {
    623 		setrunqueue(l);
    624 		if (l->l_flag & L_SA)
    625 			l->l_proc->p_sa->sa_woken = l;
    626 		KASSERT(l->l_cpu != NULL);
    627 		need_resched(l->l_cpu);
    628 	} else
    629 		sched_wakeup(&proc0);
    630 }
    631 
    632 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    633 void
    634 sched_unlock_idle(void)
    635 {
    636 
    637 	simple_unlock(&sched_lock);
    638 }
    639 
    640 void
    641 sched_lock_idle(void)
    642 {
    643 
    644 	simple_lock(&sched_lock);
    645 }
    646 #endif /* MULTIPROCESSOR || LOCKDEBUG */
    647 
    648 /*
    649  * Make all processes sleeping on the specified identifier runnable.
    650  */
    651 
    652 void
    653 wakeup(void *ident)
    654 {
    655 	int s;
    656 
    657 	SCHED_ASSERT_UNLOCKED();
    658 
    659 	SCHED_LOCK(s);
    660 	sched_wakeup(ident);
    661 	SCHED_UNLOCK(s);
    662 }
    663 
    664 void
    665 sched_wakeup(void *ident)
    666 {
    667 	struct slpque *qp;
    668 	struct lwp *l, **q;
    669 
    670 	SCHED_ASSERT_LOCKED();
    671 
    672 	qp = SLPQUE(ident);
    673  restart:
    674 	for (q = &qp->sq_head; (l = *q) != NULL; ) {
    675 #ifdef DIAGNOSTIC
    676 		if (l->l_back || (l->l_stat != LSSLEEP &&
    677 		    l->l_stat != LSSTOP && l->l_stat != LSSUSPENDED))
    678 			panic("wakeup");
    679 #endif
    680 		if (l->l_wchan == ident) {
    681 			l->l_wchan = 0;
    682 			*q = l->l_forw;
    683 			if (qp->sq_tailp == &l->l_forw)
    684 				qp->sq_tailp = q;
    685 			if (l->l_stat == LSSLEEP) {
    686 				awaken(l);
    687 				goto restart;
    688 			}
    689 		} else
    690 			q = &l->l_forw;
    691 	}
    692 }
    693 
    694 /*
    695  * Make the highest priority process first in line on the specified
    696  * identifier runnable.
    697  */
    698 void
    699 wakeup_one(void *ident)
    700 {
    701 	struct slpque *qp;
    702 	struct lwp *l, **q;
    703 	struct lwp *best_sleepp, **best_sleepq;
    704 	struct lwp *best_stopp, **best_stopq;
    705 	int s;
    706 
    707 	best_sleepp = best_stopp = NULL;
    708 	best_sleepq = best_stopq = NULL;
    709 
    710 	SCHED_LOCK(s);
    711 
    712 	qp = SLPQUE(ident);
    713 
    714 	for (q = &qp->sq_head; (l = *q) != NULL; q = &l->l_forw) {
    715 #ifdef DIAGNOSTIC
    716 		if (l->l_back || (l->l_stat != LSSLEEP &&
    717 		    l->l_stat != LSSTOP && l->l_stat != LSSUSPENDED))
    718 			panic("wakeup_one");
    719 #endif
    720 		if (l->l_wchan == ident) {
    721 			if (l->l_stat == LSSLEEP) {
    722 				if (best_sleepp == NULL ||
    723 				    l->l_priority < best_sleepp->l_priority) {
    724 					best_sleepp = l;
    725 					best_sleepq = q;
    726 				}
    727 			} else {
    728 				if (best_stopp == NULL ||
    729 				    l->l_priority < best_stopp->l_priority) {
    730 				    	best_stopp = l;
    731 					best_stopq = q;
    732 				}
    733 			}
    734 		}
    735 	}
    736 
    737 	/*
    738 	 * Consider any SSLEEP process higher than the highest priority SSTOP
    739 	 * process.
    740 	 */
    741 	if (best_sleepp != NULL) {
    742 		l = best_sleepp;
    743 		q = best_sleepq;
    744 	} else {
    745 		l = best_stopp;
    746 		q = best_stopq;
    747 	}
    748 
    749 	if (l != NULL) {
    750 		l->l_wchan = NULL;
    751 		*q = l->l_forw;
    752 		if (qp->sq_tailp == &l->l_forw)
    753 			qp->sq_tailp = q;
    754 		if (l->l_stat == LSSLEEP)
    755 			awaken(l);
    756 	}
    757 	SCHED_UNLOCK(s);
    758 }
    759 
    760 /*
    761  * General yield call.  Puts the current process back on its run queue and
    762  * performs a voluntary context switch.  Should only be called when the
    763  * current process explicitly requests it (eg sched_yield(2) in compat code).
    764  */
    765 void
    766 yield(void)
    767 {
    768 	struct lwp *l = curlwp;
    769 	int s;
    770 
    771 	SCHED_LOCK(s);
    772 	l->l_priority = l->l_usrpri;
    773 	l->l_stat = LSRUN;
    774 	setrunqueue(l);
    775 	l->l_proc->p_stats->p_ru.ru_nvcsw++;
    776 	mi_switch(l, NULL);
    777 	SCHED_ASSERT_UNLOCKED();
    778 	splx(s);
    779 }
    780 
    781 /*
    782  * General preemption call.  Puts the current process back on its run queue
    783  * and performs an involuntary context switch.  If a process is supplied,
    784  * we switch to that process.  Otherwise, we use the normal process selection
    785  * criteria.
    786  */
    787 
    788 void
    789 preempt(struct lwp *newl)
    790 {
    791 	struct lwp *l = curlwp;
    792 	int r, s;
    793 
    794 	if (l->l_flag & L_SA) {
    795 		SCHED_LOCK(s);
    796 		l->l_priority = l->l_usrpri;
    797 		l->l_stat = LSRUN;
    798 		setrunqueue(l);
    799 		l->l_proc->p_stats->p_ru.ru_nivcsw++;
    800 		r = mi_switch(l, newl);
    801 		SCHED_ASSERT_UNLOCKED();
    802 		splx(s);
    803 		if (r != 0)
    804 			sa_preempt(l);
    805 	} else {
    806 		SCHED_LOCK(s);
    807 		l->l_priority = l->l_usrpri;
    808 		l->l_stat = LSRUN;
    809 		setrunqueue(l);
    810 		l->l_proc->p_stats->p_ru.ru_nivcsw++;
    811 		mi_switch(l, newl);
    812 		SCHED_ASSERT_UNLOCKED();
    813 		splx(s);
    814 	}
    815 
    816 }
    817 
    818 /*
    819  * The machine independent parts of context switch.
    820  * Must be called at splsched() (no higher!) and with
    821  * the sched_lock held.
    822  * Switch to "new" if non-NULL, otherwise let cpu_switch choose
    823  * the next lwp.
    824  *
    825  * Returns 1 if another process was actually run.
    826  */
    827 int
    828 mi_switch(struct lwp *l, struct lwp *newl)
    829 {
    830 	struct schedstate_percpu *spc;
    831 	struct rlimit *rlim;
    832 	long s, u;
    833 	struct timeval tv;
    834 #if defined(MULTIPROCESSOR)
    835 	int hold_count;
    836 #endif
    837 	struct proc *p = l->l_proc;
    838 	int retval;
    839 
    840 	SCHED_ASSERT_LOCKED();
    841 
    842 #if defined(MULTIPROCESSOR)
    843 	/*
    844 	 * Release the kernel_lock, as we are about to yield the CPU.
    845 	 * The scheduler lock is still held until cpu_switch()
    846 	 * selects a new process and removes it from the run queue.
    847 	 */
    848 	if (l->l_flag & L_BIGLOCK)
    849 		hold_count = spinlock_release_all(&kernel_lock);
    850 #endif
    851 
    852 	KDASSERT(l->l_cpu != NULL);
    853 	KDASSERT(l->l_cpu == curcpu());
    854 
    855 	spc = &l->l_cpu->ci_schedstate;
    856 
    857 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC)
    858 	spinlock_switchcheck();
    859 #endif
    860 #ifdef LOCKDEBUG
    861 	simple_lock_switchcheck();
    862 #endif
    863 
    864 	/*
    865 	 * Compute the amount of time during which the current
    866 	 * process was running.
    867 	 */
    868 	microtime(&tv);
    869 	u = p->p_rtime.tv_usec +
    870 	    (tv.tv_usec - spc->spc_runtime.tv_usec);
    871 	s = p->p_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
    872 	if (u < 0) {
    873 		u += 1000000;
    874 		s--;
    875 	} else if (u >= 1000000) {
    876 		u -= 1000000;
    877 		s++;
    878 	}
    879 	p->p_rtime.tv_usec = u;
    880 	p->p_rtime.tv_sec = s;
    881 
    882 	/*
    883 	 * Check if the process exceeds its cpu resource allocation.
    884 	 * If over max, kill it.  In any case, if it has run for more
    885 	 * than 10 minutes, reduce priority to give others a chance.
    886 	 */
    887 	rlim = &p->p_rlimit[RLIMIT_CPU];
    888 	if (s >= rlim->rlim_cur) {
    889 		/*
    890 		 * XXXSMP: we're inside the scheduler lock perimeter;
    891 		 * use sched_psignal.
    892 		 */
    893 		if (s >= rlim->rlim_max)
    894 			sched_psignal(p, SIGKILL);
    895 		else {
    896 			sched_psignal(p, SIGXCPU);
    897 			if (rlim->rlim_cur < rlim->rlim_max)
    898 				rlim->rlim_cur += 5;
    899 		}
    900 	}
    901 	if (autonicetime && s > autonicetime && p->p_ucred->cr_uid &&
    902 	    p->p_nice == NZERO) {
    903 		p->p_nice = autoniceval + NZERO;
    904 		resetpriority(l);
    905 	}
    906 
    907 	/*
    908 	 * Process is about to yield the CPU; clear the appropriate
    909 	 * scheduling flags.
    910 	 */
    911 	spc->spc_flags &= ~SPCF_SWITCHCLEAR;
    912 
    913 #ifdef KSTACK_CHECK_MAGIC
    914 	kstack_check_magic(p);
    915 #endif
    916 
    917 	/*
    918 	 * If we are using h/w performance counters, save context.
    919 	 */
    920 #if PERFCTRS
    921 	if (PMC_ENABLED(p))
    922 		pmc_save_context(p);
    923 #endif
    924 
    925 	/*
    926 	 * Switch to the new current process.  When we
    927 	 * run again, we'll return back here.
    928 	 */
    929 	uvmexp.swtch++;
    930 	if (newl == NULL) {
    931 		retval = cpu_switch(l, NULL);
    932 	} else {
    933 		remrunqueue(newl);
    934 		cpu_switchto(l, newl);
    935 		retval = 0;
    936 	}
    937 
    938 	/*
    939 	 * If we are using h/w performance counters, restore context.
    940 	 */
    941 #if PERFCTRS
    942 	if (PMC_ENABLED(p))
    943 		pmc_restore_context(p);
    944 #endif
    945 
    946 	/*
    947 	 * Make sure that MD code released the scheduler lock before
    948 	 * resuming us.
    949 	 */
    950 	SCHED_ASSERT_UNLOCKED();
    951 
    952 	/*
    953 	 * We're running again; record our new start time.  We might
    954 	 * be running on a new CPU now, so don't use the cache'd
    955 	 * schedstate_percpu pointer.
    956 	 */
    957 	KDASSERT(l->l_cpu != NULL);
    958 	KDASSERT(l->l_cpu == curcpu());
    959 	microtime(&l->l_cpu->ci_schedstate.spc_runtime);
    960 
    961 #if defined(MULTIPROCESSOR)
    962 	/*
    963 	 * Reacquire the kernel_lock now.  We do this after we've
    964 	 * released the scheduler lock to avoid deadlock, and before
    965 	 * we reacquire the interlock.
    966 	 */
    967 	if (l->l_flag & L_BIGLOCK)
    968 		spinlock_acquire_count(&kernel_lock, hold_count);
    969 #endif
    970 
    971 	return retval;
    972 }
    973 
    974 /*
    975  * Initialize the (doubly-linked) run queues
    976  * to be empty.
    977  */
    978 void
    979 rqinit()
    980 {
    981 	int i;
    982 
    983 	for (i = 0; i < RUNQUE_NQS; i++)
    984 		sched_qs[i].ph_link = sched_qs[i].ph_rlink =
    985 		    (struct lwp *)&sched_qs[i];
    986 }
    987 
    988 static __inline void
    989 resched_proc(struct lwp *l)
    990 {
    991 	struct cpu_info *ci;
    992 
    993 	/*
    994 	 * XXXSMP
    995 	 * Since l->l_cpu persists across a context switch,
    996 	 * this gives us *very weak* processor affinity, in
    997 	 * that we notify the CPU on which the process last
    998 	 * ran that it should try to switch.
    999 	 *
   1000 	 * This does not guarantee that the process will run on
   1001 	 * that processor next, because another processor might
   1002 	 * grab it the next time it performs a context switch.
   1003 	 *
   1004 	 * This also does not handle the case where its last
   1005 	 * CPU is running a higher-priority process, but every
   1006 	 * other CPU is running a lower-priority process.  There
   1007 	 * are ways to handle this situation, but they're not
   1008 	 * currently very pretty, and we also need to weigh the
   1009 	 * cost of moving a process from one CPU to another.
   1010 	 *
   1011 	 * XXXSMP
   1012 	 * There is also the issue of locking the other CPU's
   1013 	 * sched state, which we currently do not do.
   1014 	 */
   1015 	ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
   1016 	if (l->l_priority < ci->ci_schedstate.spc_curpriority)
   1017 		need_resched(ci);
   1018 }
   1019 
   1020 /*
   1021  * Change process state to be runnable,
   1022  * placing it on the run queue if it is in memory,
   1023  * and awakening the swapper if it isn't in memory.
   1024  */
   1025 void
   1026 setrunnable(struct lwp *l)
   1027 {
   1028 	struct proc *p = l->l_proc;
   1029 
   1030 	SCHED_ASSERT_LOCKED();
   1031 
   1032 	switch (l->l_stat) {
   1033 	case 0:
   1034 	case LSRUN:
   1035 	case LSONPROC:
   1036 	case LSZOMB:
   1037 	case LSDEAD:
   1038 	default:
   1039 		panic("setrunnable");
   1040 	case LSSTOP:
   1041 		/*
   1042 		 * If we're being traced (possibly because someone attached us
   1043 		 * while we were stopped), check for a signal from the debugger.
   1044 		 */
   1045 		if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0) {
   1046 			sigaddset(&p->p_sigctx.ps_siglist, p->p_xstat);
   1047 			CHECKSIGS(p);
   1048 		}
   1049 	case LSSLEEP:
   1050 		unsleep(l);		/* e.g. when sending signals */
   1051 		break;
   1052 
   1053 	case LSIDL:
   1054 		break;
   1055 	case LSSUSPENDED:
   1056 		break;
   1057 	}
   1058 	l->l_stat = LSRUN;
   1059 	p->p_nrlwps++;
   1060 
   1061 	if (l->l_flag & L_INMEM)
   1062 		setrunqueue(l);
   1063 
   1064 	if (l->l_slptime > 1)
   1065 		updatepri(l);
   1066 	l->l_slptime = 0;
   1067 	if ((l->l_flag & L_INMEM) == 0)
   1068 		sched_wakeup((caddr_t)&proc0);
   1069 	else
   1070 		resched_proc(l);
   1071 }
   1072 
   1073 /*
   1074  * Compute the priority of a process when running in user mode.
   1075  * Arrange to reschedule if the resulting priority is better
   1076  * than that of the current process.
   1077  */
   1078 void
   1079 resetpriority(struct lwp *l)
   1080 {
   1081 	unsigned int newpriority;
   1082 	struct proc *p = l->l_proc;
   1083 
   1084 	SCHED_ASSERT_LOCKED();
   1085 
   1086 	newpriority = PUSER + p->p_estcpu +
   1087 			NICE_WEIGHT * (p->p_nice - NZERO);
   1088 	newpriority = min(newpriority, MAXPRI);
   1089 	l->l_usrpri = newpriority;
   1090 	resched_proc(l);
   1091 }
   1092 
   1093 /*
   1094  * Recompute priority for all LWPs in a process.
   1095  */
   1096 void
   1097 resetprocpriority(struct proc *p)
   1098 {
   1099 	struct lwp *l;
   1100 
   1101 	LIST_FOREACH(l, &p->p_lwps, l_sibling)
   1102 	    resetpriority(l);
   1103 }
   1104 
   1105 /*
   1106  * We adjust the priority of the current process.  The priority of a process
   1107  * gets worse as it accumulates CPU time.  The cpu usage estimator (p_estcpu)
   1108  * is increased here.  The formula for computing priorities (in kern_synch.c)
   1109  * will compute a different value each time p_estcpu increases. This can
   1110  * cause a switch, but unless the priority crosses a PPQ boundary the actual
   1111  * queue will not change.  The cpu usage estimator ramps up quite quickly
   1112  * when the process is running (linearly), and decays away exponentially, at
   1113  * a rate which is proportionally slower when the system is busy.  The basic
   1114  * principle is that the system will 90% forget that the process used a lot
   1115  * of CPU time in 5 * loadav seconds.  This causes the system to favor
   1116  * processes which haven't run much recently, and to round-robin among other
   1117  * processes.
   1118  */
   1119 
   1120 void
   1121 schedclock(struct lwp *l)
   1122 {
   1123 	struct proc *p = l->l_proc;
   1124 	int s;
   1125 
   1126 	p->p_estcpu = ESTCPULIM(p->p_estcpu + 1);
   1127 	SCHED_LOCK(s);
   1128 	resetpriority(l);
   1129 	SCHED_UNLOCK(s);
   1130 
   1131 	if (l->l_priority >= PUSER)
   1132 		l->l_priority = l->l_usrpri;
   1133 }
   1134 
   1135 void
   1136 suspendsched()
   1137 {
   1138 	struct lwp *l;
   1139 	int s;
   1140 
   1141 	/*
   1142 	 * Convert all non-P_SYSTEM LSSLEEP or LSRUN processes to
   1143 	 * LSSUSPENDED.
   1144 	 */
   1145 	proclist_lock_read();
   1146 	SCHED_LOCK(s);
   1147 	LIST_FOREACH(l, &alllwp, l_list) {
   1148 		if ((l->l_proc->p_flag & P_SYSTEM) != 0)
   1149 			continue;
   1150 
   1151 		switch (l->l_stat) {
   1152 		case LSRUN:
   1153 			l->l_proc->p_nrlwps--;
   1154 			if ((l->l_flag & L_INMEM) != 0)
   1155 				remrunqueue(l);
   1156 			/* FALLTHROUGH */
   1157 		case LSSLEEP:
   1158 			l->l_stat = LSSUSPENDED;
   1159 			break;
   1160 		case LSONPROC:
   1161 			/*
   1162 			 * XXX SMP: we need to deal with processes on
   1163 			 * others CPU !
   1164 			 */
   1165 			break;
   1166 		default:
   1167 			break;
   1168 		}
   1169 	}
   1170 	SCHED_UNLOCK(s);
   1171 	proclist_unlock_read();
   1172 }
   1173 
   1174 /*
   1175  * Low-level routines to access the run queue.  Optimised assembler
   1176  * routines can override these.
   1177  */
   1178 
   1179 #ifndef __HAVE_MD_RUNQUEUE
   1180 
   1181 /*
   1182  * The primitives that manipulate the run queues.  whichqs tells which
   1183  * of the 32 queues qs have processes in them.  Setrunqueue puts processes
   1184  * into queues, remrunqueue removes them from queues.  The running process is
   1185  * on no queue, other processes are on a queue related to p->p_priority,
   1186  * divided by 4 actually to shrink the 0-127 range of priorities into the 32
   1187  * available queues.
   1188  */
   1189 
   1190 void
   1191 setrunqueue(struct lwp *l)
   1192 {
   1193 	struct prochd *rq;
   1194 	struct lwp *prev;
   1195 	int whichq;
   1196 
   1197 #ifdef DIAGNOSTIC
   1198 	if (l->l_back != NULL || l->l_wchan != NULL || l->l_stat != LSRUN)
   1199 		panic("setrunqueue");
   1200 #endif
   1201 	whichq = l->l_priority / 4;
   1202 	sched_whichqs |= (1<<whichq);
   1203 	rq = &sched_qs[whichq];
   1204 	prev = rq->ph_rlink;
   1205 	l->l_forw = (struct lwp *)rq;
   1206 	rq->ph_rlink = l;
   1207 	prev->l_forw = l;
   1208 	l->l_back = prev;
   1209 }
   1210 
   1211 void
   1212 remrunqueue(struct lwp *l)
   1213 {
   1214 	struct lwp *prev, *next;
   1215 	int whichq;
   1216 
   1217 	whichq = l->l_priority / 4;
   1218 #ifdef DIAGNOSTIC
   1219 	if (((sched_whichqs & (1<<whichq)) == 0))
   1220 		panic("remrunqueue");
   1221 #endif
   1222 	prev = l->l_back;
   1223 	l->l_back = NULL;
   1224 	next = l->l_forw;
   1225 	prev->l_forw = next;
   1226 	next->l_back = prev;
   1227 	if (prev == next)
   1228 		sched_whichqs &= ~(1<<whichq);
   1229 }
   1230 
   1231 #endif
   1232