Home | History | Annotate | Line # | Download | only in kern
kern_synch.c revision 1.77
      1 /*	$NetBSD: kern_synch.c,v 1.77 2000/06/08 05:50:37 thorpej Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 /*-
     41  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     42  *	The Regents of the University of California.  All rights reserved.
     43  * (c) UNIX System Laboratories, Inc.
     44  * All or some portions of this file are derived from material licensed
     45  * to the University of California by American Telephone and Telegraph
     46  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     47  * the permission of UNIX System Laboratories, Inc.
     48  *
     49  * Redistribution and use in source and binary forms, with or without
     50  * modification, are permitted provided that the following conditions
     51  * are met:
     52  * 1. Redistributions of source code must retain the above copyright
     53  *    notice, this list of conditions and the following disclaimer.
     54  * 2. Redistributions in binary form must reproduce the above copyright
     55  *    notice, this list of conditions and the following disclaimer in the
     56  *    documentation and/or other materials provided with the distribution.
     57  * 3. All advertising materials mentioning features or use of this software
     58  *    must display the following acknowledgement:
     59  *	This product includes software developed by the University of
     60  *	California, Berkeley and its contributors.
     61  * 4. Neither the name of the University nor the names of its contributors
     62  *    may be used to endorse or promote products derived from this software
     63  *    without specific prior written permission.
     64  *
     65  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     66  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     67  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     68  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     69  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     70  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     71  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     72  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     73  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     74  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     75  * SUCH DAMAGE.
     76  *
     77  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
     78  */
     79 
     80 #include "opt_ddb.h"
     81 #include "opt_ktrace.h"
     82 
     83 #include <sys/param.h>
     84 #include <sys/systm.h>
     85 #include <sys/callout.h>
     86 #include <sys/proc.h>
     87 #include <sys/kernel.h>
     88 #include <sys/buf.h>
     89 #include <sys/signalvar.h>
     90 #include <sys/resourcevar.h>
     91 #include <vm/vm.h>
     92 #include <sys/sched.h>
     93 
     94 #include <uvm/uvm_extern.h>
     95 
     96 #ifdef KTRACE
     97 #include <sys/ktrace.h>
     98 #endif
     99 
    100 #include <machine/cpu.h>
    101 
    102 int	lbolt;			/* once a second sleep address */
    103 
    104 /*
    105  * The global scheduler state.
    106  */
    107 struct prochd sched_qs[RUNQUE_NQS];	/* run queues */
    108 __volatile u_int32_t sched_whichqs;	/* bitmap of non-empty queues */
    109 struct slpque sched_slpque[SLPQUE_TABLESIZE]; /* sleep queues */
    110 
    111 void roundrobin(void *);
    112 void schedcpu(void *);
    113 void updatepri(struct proc *);
    114 void endtsleep(void *);
    115 
    116 __inline void awaken(struct proc *);
    117 
    118 struct callout roundrobin_ch = CALLOUT_INITIALIZER;
    119 struct callout schedcpu_ch = CALLOUT_INITIALIZER;
    120 
    121 /*
    122  * Force switch among equal priority processes every 100ms.
    123  */
    124 /* ARGSUSED */
    125 void
    126 roundrobin(void *arg)
    127 {
    128 	struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
    129 	int s;
    130 
    131 	if (curproc != NULL) {
    132 		s = splstatclock();
    133 		if (spc->spc_flags & SPCF_SEENRR) {
    134 			/*
    135 			 * The process has already been through a roundrobin
    136 			 * without switching and may be hogging the CPU.
    137 			 * Indicate that the process should yield.
    138 			 */
    139 			spc->spc_flags |= SPCF_SHOULDYIELD;
    140 		} else
    141 			spc->spc_flags |= SPCF_SEENRR;
    142 		splx(s);
    143 	}
    144 	need_resched();
    145 	callout_reset(&roundrobin_ch, hz / 10, roundrobin, NULL);
    146 }
    147 
    148 /*
    149  * Constants for digital decay and forget:
    150  *	90% of (p_estcpu) usage in 5 * loadav time
    151  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
    152  *          Note that, as ps(1) mentions, this can let percentages
    153  *          total over 100% (I've seen 137.9% for 3 processes).
    154  *
    155  * Note that hardclock updates p_estcpu and p_cpticks independently.
    156  *
    157  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
    158  * That is, the system wants to compute a value of decay such
    159  * that the following for loop:
    160  * 	for (i = 0; i < (5 * loadavg); i++)
    161  * 		p_estcpu *= decay;
    162  * will compute
    163  * 	p_estcpu *= 0.1;
    164  * for all values of loadavg:
    165  *
    166  * Mathematically this loop can be expressed by saying:
    167  * 	decay ** (5 * loadavg) ~= .1
    168  *
    169  * The system computes decay as:
    170  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
    171  *
    172  * We wish to prove that the system's computation of decay
    173  * will always fulfill the equation:
    174  * 	decay ** (5 * loadavg) ~= .1
    175  *
    176  * If we compute b as:
    177  * 	b = 2 * loadavg
    178  * then
    179  * 	decay = b / (b + 1)
    180  *
    181  * We now need to prove two things:
    182  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
    183  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
    184  *
    185  * Facts:
    186  *         For x close to zero, exp(x) =~ 1 + x, since
    187  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
    188  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
    189  *         For x close to zero, ln(1+x) =~ x, since
    190  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
    191  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
    192  *         ln(.1) =~ -2.30
    193  *
    194  * Proof of (1):
    195  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
    196  *	solving for factor,
    197  *      ln(factor) =~ (-2.30/5*loadav), or
    198  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
    199  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
    200  *
    201  * Proof of (2):
    202  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
    203  *	solving for power,
    204  *      power*ln(b/(b+1)) =~ -2.30, or
    205  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
    206  *
    207  * Actual power values for the implemented algorithm are as follows:
    208  *      loadav: 1       2       3       4
    209  *      power:  5.68    10.32   14.94   19.55
    210  */
    211 
    212 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
    213 #define	loadfactor(loadav)	(2 * (loadav))
    214 #define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
    215 
    216 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
    217 fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
    218 
    219 /*
    220  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
    221  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
    222  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
    223  *
    224  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
    225  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
    226  *
    227  * If you dont want to bother with the faster/more-accurate formula, you
    228  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
    229  * (more general) method of calculating the %age of CPU used by a process.
    230  */
    231 #define	CCPU_SHIFT	11
    232 
    233 /*
    234  * Recompute process priorities, every hz ticks.
    235  */
    236 /* ARGSUSED */
    237 void
    238 schedcpu(void *arg)
    239 {
    240 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
    241 	struct proc *p;
    242 	int s;
    243 	unsigned int newcpu;
    244 	int clkhz;
    245 
    246 	proclist_lock_read();
    247 	for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
    248 		/*
    249 		 * Increment time in/out of memory and sleep time
    250 		 * (if sleeping).  We ignore overflow; with 16-bit int's
    251 		 * (remember them?) overflow takes 45 days.
    252 		 */
    253 		p->p_swtime++;
    254 		if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
    255 			p->p_slptime++;
    256 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
    257 		/*
    258 		 * If the process has slept the entire second,
    259 		 * stop recalculating its priority until it wakes up.
    260 		 */
    261 		if (p->p_slptime > 1)
    262 			continue;
    263 		s = splstatclock();	/* prevent state changes */
    264 		/*
    265 		 * p_pctcpu is only for ps.
    266 		 */
    267 		clkhz = stathz != 0 ? stathz : hz;
    268 #if	(FSHIFT >= CCPU_SHIFT)
    269 		p->p_pctcpu += (clkhz == 100)?
    270 			((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
    271                 	100 * (((fixpt_t) p->p_cpticks)
    272 				<< (FSHIFT - CCPU_SHIFT)) / clkhz;
    273 #else
    274 		p->p_pctcpu += ((FSCALE - ccpu) *
    275 			(p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
    276 #endif
    277 		p->p_cpticks = 0;
    278 		newcpu = (u_int)decay_cpu(loadfac, p->p_estcpu);
    279 		p->p_estcpu = newcpu;
    280 		resetpriority(p);
    281 		if (p->p_priority >= PUSER) {
    282 			if (p->p_stat == SRUN &&
    283 			    (p->p_flag & P_INMEM) &&
    284 			    (p->p_priority / PPQ) != (p->p_usrpri / PPQ)) {
    285 				remrunqueue(p);
    286 				p->p_priority = p->p_usrpri;
    287 				setrunqueue(p);
    288 			} else
    289 				p->p_priority = p->p_usrpri;
    290 		}
    291 		splx(s);
    292 	}
    293 	proclist_unlock_read();
    294 	uvm_meter();
    295 	wakeup((caddr_t)&lbolt);
    296 	callout_reset(&schedcpu_ch, hz, schedcpu, NULL);
    297 }
    298 
    299 /*
    300  * Recalculate the priority of a process after it has slept for a while.
    301  * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
    302  * least six times the loadfactor will decay p_estcpu to zero.
    303  */
    304 void
    305 updatepri(struct proc *p)
    306 {
    307 	unsigned int newcpu = p->p_estcpu;
    308 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
    309 
    310 	if (p->p_slptime > 5 * loadfac)
    311 		p->p_estcpu = 0;
    312 	else {
    313 		p->p_slptime--;	/* the first time was done in schedcpu */
    314 		while (newcpu && --p->p_slptime)
    315 			newcpu = (int) decay_cpu(loadfac, newcpu);
    316 		p->p_estcpu = newcpu;
    317 	}
    318 	resetpriority(p);
    319 }
    320 
    321 /*
    322  * During autoconfiguration or after a panic, a sleep will simply
    323  * lower the priority briefly to allow interrupts, then return.
    324  * The priority to be used (safepri) is machine-dependent, thus this
    325  * value is initialized and maintained in the machine-dependent layers.
    326  * This priority will typically be 0, or the lowest priority
    327  * that is safe for use on the interrupt stack; it can be made
    328  * higher to block network software interrupts after panics.
    329  */
    330 int safepri;
    331 
    332 /*
    333  * General sleep call.  Suspends the current process until a wakeup is
    334  * performed on the specified identifier.  The process will then be made
    335  * runnable with the specified priority.  Sleeps at most timo/hz seconds
    336  * (0 means no timeout).  If pri includes PCATCH flag, signals are checked
    337  * before and after sleeping, else signals are not checked.  Returns 0 if
    338  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
    339  * signal needs to be delivered, ERESTART is returned if the current system
    340  * call should be restarted if possible, and EINTR is returned if the system
    341  * call should be interrupted by the signal (return EINTR).
    342  *
    343  * The interlock is held until the scheduler_slock is held.  The
    344  * interlock will be locked before returning back to the caller
    345  * unless the PNORELOCK flag is specified, in which case the
    346  * interlock will always be unlocked upon return.
    347  */
    348 int
    349 ltsleep(void *ident, int priority, const char *wmesg, int timo,
    350     __volatile struct simplelock *interlock)
    351 {
    352 	struct proc *p = curproc;
    353 	struct slpque *qp;
    354 	int sig, s;
    355 	int catch = priority & PCATCH;
    356 	int relock = (priority & PNORELOCK) == 0;
    357 #if 0 /* XXXSMP */
    358 	int dobiglock = (p->p_flags & P_BIGLOCK) != 0;
    359 #endif
    360 
    361 	/*
    362 	 * XXXSMP
    363 	 * This is probably bogus.  Figure out what the right
    364 	 * thing to do here really is.
    365 	 */
    366 	if (cold || panicstr) {
    367 		/*
    368 		 * After a panic, or during autoconfiguration,
    369 		 * just give interrupts a chance, then just return;
    370 		 * don't run any other procs or panic below,
    371 		 * in case this is the idle process and already asleep.
    372 		 */
    373 		s = splhigh();
    374 		splx(safepri);
    375 		splx(s);
    376 		if (interlock != NULL && relock == 0)
    377 			simple_unlock(interlock);
    378 		return (0);
    379 	}
    380 
    381 #ifdef KTRACE
    382 	if (KTRPOINT(p, KTR_CSW))
    383 		ktrcsw(p, 1, 0);
    384 #endif
    385 
    386 	s = splhigh();			/* XXXSMP: SCHED_LOCK(s) */
    387 
    388 #ifdef DIAGNOSTIC
    389 	if (ident == NULL)
    390 		panic("ltsleep: ident == NULL");
    391 	if (p->p_stat != SONPROC)
    392 		panic("ltsleep: p_stat %d != SONPROC", p->p_stat);
    393 	if (p->p_back != NULL)
    394 		panic("ltsleep: p_back != NULL");
    395 #endif
    396 
    397 	p->p_wchan = ident;
    398 	p->p_wmesg = wmesg;
    399 	p->p_slptime = 0;
    400 	p->p_priority = priority & PRIMASK;
    401 
    402 	qp = SLPQUE(ident);
    403 	if (qp->sq_head == 0)
    404 		qp->sq_head = p;
    405 	else
    406 		*qp->sq_tailp = p;
    407 	*(qp->sq_tailp = &p->p_forw) = 0;
    408 
    409 	if (timo)
    410 		callout_reset(&p->p_tsleep_ch, timo, endtsleep, p);
    411 
    412 	/*
    413 	 * We can now release the interlock; the scheduler_slock
    414 	 * is held, so a thread can't get in to do wakeup() before
    415 	 * we do the switch.
    416 	 *
    417 	 * XXX We leave the code block here, after inserting ourselves
    418 	 * on the sleep queue, because we might want a more clever
    419 	 * data structure for the sleep queues at some point.
    420 	 */
    421 	if (interlock != NULL)
    422 		simple_unlock(interlock);
    423 
    424 	/*
    425 	 * We put ourselves on the sleep queue and start our timeout
    426 	 * before calling CURSIG, as we could stop there, and a wakeup
    427 	 * or a SIGCONT (or both) could occur while we were stopped.
    428 	 * A SIGCONT would cause us to be marked as SSLEEP
    429 	 * without resuming us, thus we must be ready for sleep
    430 	 * when CURSIG is called.  If the wakeup happens while we're
    431 	 * stopped, p->p_wchan will be 0 upon return from CURSIG.
    432 	 */
    433 	if (catch) {
    434 		p->p_flag |= P_SINTR;
    435 		if ((sig = CURSIG(p)) != 0) {
    436 			if (p->p_wchan != NULL)
    437 				unsleep(p);
    438 			p->p_stat = SONPROC;
    439 #if 0 /* XXXSMP */
    440 			/*
    441 			 * We're going to skip the unlock, so
    442 			 * we don't need to relock after resume.
    443 			 */
    444 			dobiglock = 0;
    445 #endif
    446 			goto resume;
    447 		}
    448 		if (p->p_wchan == NULL) {
    449 			catch = 0;
    450 #if 0 /* XXXSMP */
    451 			/* See above. */
    452 			dobiglock = 0;
    453 #endif
    454 			goto resume;
    455 		}
    456 	} else
    457 		sig = 0;
    458 	p->p_stat = SSLEEP;
    459 	p->p_stats->p_ru.ru_nvcsw++;
    460 
    461 #if 0 /* XXXSMP */
    462 	if (dobiglock) {
    463 		/*
    464 		 * Release the kernel_lock, as we are about to
    465 		 * yield the CPU.  The scheduler_slock is still
    466 		 * held until cpu_switch() selects a new process
    467 		 * and removes it from the run queue.
    468 		 */
    469 		kernel_lock_release();
    470 	}
    471 #endif
    472 
    473 	/* scheduler_slock held */
    474 	mi_switch(p);
    475 	/* scheduler_slock held */
    476 #ifdef	DDB
    477 	/* handy breakpoint location after process "wakes" */
    478 	asm(".globl bpendtsleep ; bpendtsleep:");
    479 #endif
    480 
    481  resume:
    482 	KDASSERT(p->p_cpu != NULL);
    483 	KDASSERT(p->p_cpu == curcpu());
    484 	p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri;
    485 	splx(s);			/* XXXSMP: SCHED_UNLOCK(s) */
    486 #if 0 /* XXXSMP */
    487 	if (dobiglock) {
    488 		/*
    489 		 * Reacquire the kernel_lock now.  We do this after
    490 		 * we've released scheduler_slock to avoid deadlock.
    491 		 */
    492 		kernel_lock_acquire(LK_EXCLUSIVE);
    493 	}
    494 #endif
    495 	p->p_flag &= ~P_SINTR;
    496 	if (p->p_flag & P_TIMEOUT) {
    497 		p->p_flag &= ~P_TIMEOUT;
    498 		if (sig == 0) {
    499 #ifdef KTRACE
    500 			if (KTRPOINT(p, KTR_CSW))
    501 				ktrcsw(p, 0, 0);
    502 #endif
    503 			if (relock && interlock != NULL)
    504 				simple_lock(interlock);
    505 			return (EWOULDBLOCK);
    506 		}
    507 	} else if (timo)
    508 		callout_stop(&p->p_tsleep_ch);
    509 	if (catch && (sig != 0 || (sig = CURSIG(p)) != 0)) {
    510 #ifdef KTRACE
    511 		if (KTRPOINT(p, KTR_CSW))
    512 			ktrcsw(p, 0, 0);
    513 #endif
    514 		if (relock && interlock != NULL)
    515 			simple_lock(interlock);
    516 		if ((p->p_sigacts->ps_sigact[sig].sa_flags & SA_RESTART) == 0)
    517 			return (EINTR);
    518 		return (ERESTART);
    519 	}
    520 #ifdef KTRACE
    521 	if (KTRPOINT(p, KTR_CSW))
    522 		ktrcsw(p, 0, 0);
    523 #endif
    524 	if (relock && interlock != NULL)
    525 		simple_lock(interlock);
    526 	return (0);
    527 }
    528 
    529 /*
    530  * Implement timeout for tsleep.
    531  * If process hasn't been awakened (wchan non-zero),
    532  * set timeout flag and undo the sleep.  If proc
    533  * is stopped, just unsleep so it will remain stopped.
    534  */
    535 void
    536 endtsleep(void *arg)
    537 {
    538 	struct proc *p;
    539 	int s;
    540 
    541 	p = (struct proc *)arg;
    542 	s = splhigh();
    543 	if (p->p_wchan) {
    544 		if (p->p_stat == SSLEEP)
    545 			setrunnable(p);
    546 		else
    547 			unsleep(p);
    548 		p->p_flag |= P_TIMEOUT;
    549 	}
    550 	splx(s);
    551 }
    552 
    553 /*
    554  * Remove a process from its wait queue
    555  */
    556 void
    557 unsleep(struct proc *p)
    558 {
    559 	struct slpque *qp;
    560 	struct proc **hp;
    561 	int s;
    562 
    563 	s = splhigh();
    564 	if (p->p_wchan) {
    565 		hp = &(qp = SLPQUE(p->p_wchan))->sq_head;
    566 		while (*hp != p)
    567 			hp = &(*hp)->p_forw;
    568 		*hp = p->p_forw;
    569 		if (qp->sq_tailp == &p->p_forw)
    570 			qp->sq_tailp = hp;
    571 		p->p_wchan = 0;
    572 	}
    573 	splx(s);
    574 }
    575 
    576 /*
    577  * Optimized-for-wakeup() version of setrunnable().
    578  */
    579 __inline void
    580 awaken(struct proc *p)
    581 {
    582 
    583 	if (p->p_slptime > 1)
    584 		updatepri(p);
    585 	p->p_slptime = 0;
    586 	p->p_stat = SRUN;
    587 
    588 	/*
    589 	 * Since curpriority is a user priority, p->p_priority
    590 	 * is always better than curpriority.
    591 	 */
    592 	if (p->p_flag & P_INMEM) {
    593 		setrunqueue(p);
    594 		need_resched();
    595 	} else
    596 		wakeup(&proc0);
    597 }
    598 
    599 /*
    600  * Make all processes sleeping on the specified identifier runnable.
    601  */
    602 void
    603 wakeup(void *ident)
    604 {
    605 	struct slpque *qp;
    606 	struct proc *p, **q;
    607 	int s;
    608 
    609 	s = splhigh();			/* XXXSMP: SCHED_LOCK(s) */
    610 
    611 	qp = SLPQUE(ident);
    612  restart:
    613 	for (q = &qp->sq_head; (p = *q) != NULL; ) {
    614 #ifdef DIAGNOSTIC
    615 		if (p->p_back || (p->p_stat != SSLEEP && p->p_stat != SSTOP))
    616 			panic("wakeup");
    617 #endif
    618 		if (p->p_wchan == ident) {
    619 			p->p_wchan = 0;
    620 			*q = p->p_forw;
    621 			if (qp->sq_tailp == &p->p_forw)
    622 				qp->sq_tailp = q;
    623 			if (p->p_stat == SSLEEP) {
    624 				awaken(p);
    625 				goto restart;
    626 			}
    627 		} else
    628 			q = &p->p_forw;
    629 	}
    630 	splx(s);			/* XXXSMP: SCHED_UNLOCK(s) */
    631 }
    632 
    633 /*
    634  * Make the highest priority process first in line on the specified
    635  * identifier runnable.
    636  */
    637 void
    638 wakeup_one(void *ident)
    639 {
    640 	struct slpque *qp;
    641 	struct proc *p, **q;
    642 	struct proc *best_sleepp, **best_sleepq;
    643 	struct proc *best_stopp, **best_stopq;
    644 	int s;
    645 
    646 	best_sleepp = best_stopp = NULL;
    647 	best_sleepq = best_stopq = NULL;
    648 
    649 	s = splhigh();		/* XXXSMP: SCHED_LOCK(s) */
    650 
    651 	qp = SLPQUE(ident);
    652 
    653 	for (q = &qp->sq_head; (p = *q) != NULL; q = &p->p_forw) {
    654 #ifdef DIAGNOSTIC
    655 		if (p->p_back || (p->p_stat != SSLEEP && p->p_stat != SSTOP))
    656 			panic("wakeup_one");
    657 #endif
    658 		if (p->p_wchan == ident) {
    659 			if (p->p_stat == SSLEEP) {
    660 				if (best_sleepp == NULL ||
    661 				    p->p_priority < best_sleepp->p_priority) {
    662 					best_sleepp = p;
    663 					best_sleepq = q;
    664 				}
    665 			} else {
    666 				if (best_stopp == NULL ||
    667 				    p->p_priority < best_stopp->p_priority) {
    668 					best_stopp = p;
    669 					best_stopq = q;
    670 				}
    671 			}
    672 		}
    673 	}
    674 
    675 	/*
    676 	 * Consider any SSLEEP process higher than the highest priority SSTOP
    677 	 * process.
    678 	 */
    679 	if (best_sleepp != NULL) {
    680 		p = best_sleepp;
    681 		q = best_sleepq;
    682 	} else {
    683 		p = best_stopp;
    684 		q = best_stopq;
    685 	}
    686 
    687 	if (p != NULL) {
    688 		p->p_wchan = NULL;
    689 		*q = p->p_forw;
    690 		if (qp->sq_tailp == &p->p_forw)
    691 			qp->sq_tailp = q;
    692 		if (p->p_stat == SSLEEP)
    693 			awaken(p);
    694 	}
    695 	splx(s);		/* XXXSMP: SCHED_UNLOCK(s) */
    696 }
    697 
    698 /*
    699  * General yield call.  Puts the current process back on its run queue and
    700  * performs a voluntary context switch.
    701  */
    702 void
    703 yield(void)
    704 {
    705 	struct proc *p = curproc;
    706 	int s;
    707 
    708 	s = splstatclock();
    709 	p->p_priority = p->p_usrpri;
    710 	p->p_stat = SRUN;
    711 	setrunqueue(p);
    712 	p->p_stats->p_ru.ru_nvcsw++;
    713 	mi_switch(p);
    714 	splx(s);
    715 }
    716 
    717 /*
    718  * General preemption call.  Puts the current process back on its run queue
    719  * and performs an involuntary context switch.  If a process is supplied,
    720  * we switch to that process.  Otherwise, we use the normal process selection
    721  * criteria.
    722  */
    723 void
    724 preempt(struct proc *newp)
    725 {
    726 	struct proc *p = curproc;
    727 	int s;
    728 
    729 	/*
    730 	 * XXX Switching to a specific process is not supported yet.
    731 	 */
    732 	if (newp != NULL)
    733 		panic("preempt: cpu_preempt not yet implemented");
    734 
    735 	s = splstatclock();
    736 	p->p_priority = p->p_usrpri;
    737 	p->p_stat = SRUN;
    738 	setrunqueue(p);
    739 	p->p_stats->p_ru.ru_nivcsw++;
    740 	mi_switch(p);
    741 	splx(s);
    742 }
    743 
    744 /*
    745  * The machine independent parts of context switch.
    746  * Must be called at splstatclock() or higher.
    747  */
    748 void
    749 mi_switch(struct proc *p)
    750 {
    751 	struct schedstate_percpu *spc;
    752 	struct rlimit *rlim;
    753 	long s, u;
    754 	struct timeval tv;
    755 
    756 	KDASSERT(p->p_cpu != NULL);
    757 	KDASSERT(p->p_cpu == curcpu());
    758 
    759 	spc = &p->p_cpu->ci_schedstate;
    760 
    761 #ifdef DEBUG
    762 	if (p->p_simple_locks) {
    763 		printf("p->p_simple_locks %d\n", p->p_simple_locks);
    764 #ifdef LOCKDEBUG
    765 		simple_lock_dump();
    766 #endif
    767 		panic("sleep: holding simple lock");
    768 	}
    769 #endif
    770 	/*
    771 	 * Compute the amount of time during which the current
    772 	 * process was running, and add that to its total so far.
    773 	 */
    774 	microtime(&tv);
    775 	u = p->p_rtime.tv_usec + (tv.tv_usec - spc->spc_runtime.tv_usec);
    776 	s = p->p_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
    777 	if (u < 0) {
    778 		u += 1000000;
    779 		s--;
    780 	} else if (u >= 1000000) {
    781 		u -= 1000000;
    782 		s++;
    783 	}
    784 	p->p_rtime.tv_usec = u;
    785 	p->p_rtime.tv_sec = s;
    786 
    787 	/*
    788 	 * Check if the process exceeds its cpu resource allocation.
    789 	 * If over max, kill it.  In any case, if it has run for more
    790 	 * than 10 minutes, reduce priority to give others a chance.
    791 	 */
    792 	rlim = &p->p_rlimit[RLIMIT_CPU];
    793 	if (s >= rlim->rlim_cur) {
    794 		if (s >= rlim->rlim_max)
    795 			psignal(p, SIGKILL);
    796 		else {
    797 			psignal(p, SIGXCPU);
    798 			if (rlim->rlim_cur < rlim->rlim_max)
    799 				rlim->rlim_cur += 5;
    800 		}
    801 	}
    802 	if (autonicetime && s > autonicetime && p->p_ucred->cr_uid &&
    803 	    p->p_nice == NZERO) {
    804 		p->p_nice = autoniceval + NZERO;
    805 		resetpriority(p);
    806 	}
    807 
    808 	/*
    809 	 * Process is about to yield the CPU; clear the appropriate
    810 	 * scheduling flags.
    811 	 */
    812 	spc->spc_flags &= ~SPCF_SWITCHCLEAR;
    813 
    814 	/*
    815 	 * Pick a new current process and switch to it.  When we
    816 	 * run again, we'll return back here.
    817 	 */
    818 	uvmexp.swtch++;
    819 	cpu_switch(p);
    820 
    821 	/*
    822 	 * We're running again; record our new start time.  We might
    823 	 * be running on a new CPU now, so don't use the cache'd
    824 	 * schedstate_percpu pointer.
    825 	 */
    826 	KDASSERT(p->p_cpu != NULL);
    827 	KDASSERT(p->p_cpu == curcpu());
    828 	microtime(&p->p_cpu->ci_schedstate.spc_runtime);
    829 }
    830 
    831 /*
    832  * Initialize the (doubly-linked) run queues
    833  * to be empty.
    834  */
    835 void
    836 rqinit()
    837 {
    838 	int i;
    839 
    840 	for (i = 0; i < RUNQUE_NQS; i++)
    841 		sched_qs[i].ph_link = sched_qs[i].ph_rlink =
    842 		    (struct proc *)&sched_qs[i];
    843 }
    844 
    845 /*
    846  * Change process state to be runnable,
    847  * placing it on the run queue if it is in memory,
    848  * and awakening the swapper if it isn't in memory.
    849  */
    850 void
    851 setrunnable(struct proc *p)
    852 {
    853 	int s;
    854 
    855 	s = splhigh();
    856 	switch (p->p_stat) {
    857 	case 0:
    858 	case SRUN:
    859 	case SONPROC:
    860 	case SZOMB:
    861 	case SDEAD:
    862 	default:
    863 		panic("setrunnable");
    864 	case SSTOP:
    865 		/*
    866 		 * If we're being traced (possibly because someone attached us
    867 		 * while we were stopped), check for a signal from the debugger.
    868 		 */
    869 		if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0) {
    870 			sigaddset(&p->p_siglist, p->p_xstat);
    871 			p->p_sigcheck = 1;
    872 		}
    873 	case SSLEEP:
    874 		unsleep(p);		/* e.g. when sending signals */
    875 		break;
    876 
    877 	case SIDL:
    878 		break;
    879 	}
    880 	p->p_stat = SRUN;
    881 	if (p->p_flag & P_INMEM)
    882 		setrunqueue(p);
    883 	splx(s);
    884 	if (p->p_slptime > 1)
    885 		updatepri(p);
    886 	p->p_slptime = 0;
    887 	if ((p->p_flag & P_INMEM) == 0)
    888 		wakeup((caddr_t)&proc0);
    889 	else if (p->p_priority < curcpu()->ci_schedstate.spc_curpriority) {
    890 		/*
    891 		 * XXXSMP
    892 		 * This is wrong.  It will work, but what really
    893 		 * needs to happen is:
    894 		 *
    895 		 *	- Need to check if p is higher priority
    896 		 *	  than the process currently running on
    897 		 *	  the CPU p last ran on (let p_cpu persist
    898 		 *	  after a context switch?), and preempt
    899 		 *	  that one (or, if there is no process
    900 		 *	  there, simply need_resched() that CPU.
    901 		 *
    902 		 *	- Failing that, traverse a list of
    903 		 *	  available CPUs and need_resched() the
    904 		 *	  CPU with the lowest priority that's
    905 		 *	  lower than p's.
    906 		 */
    907 		need_resched();
    908 	}
    909 }
    910 
    911 /*
    912  * Compute the priority of a process when running in user mode.
    913  * Arrange to reschedule if the resulting priority is better
    914  * than that of the current process.
    915  */
    916 void
    917 resetpriority(struct proc *p)
    918 {
    919 	unsigned int newpriority;
    920 
    921 	newpriority = PUSER + p->p_estcpu + NICE_WEIGHT * (p->p_nice - NZERO);
    922 	newpriority = min(newpriority, MAXPRI);
    923 	p->p_usrpri = newpriority;
    924 	if (newpriority < curcpu()->ci_schedstate.spc_curpriority) {
    925 		/*
    926 		 * XXXSMP
    927 		 * Same applies as in setrunnable() above.
    928 		 */
    929 		need_resched();
    930 	}
    931 }
    932 
    933 /*
    934  * We adjust the priority of the current process.  The priority of a process
    935  * gets worse as it accumulates CPU time.  The cpu usage estimator (p_estcpu)
    936  * is increased here.  The formula for computing priorities (in kern_synch.c)
    937  * will compute a different value each time p_estcpu increases. This can
    938  * cause a switch, but unless the priority crosses a PPQ boundary the actual
    939  * queue will not change.  The cpu usage estimator ramps up quite quickly
    940  * when the process is running (linearly), and decays away exponentially, at
    941  * a rate which is proportionally slower when the system is busy.  The basic
    942  * principal is that the system will 90% forget that the process used a lot
    943  * of CPU time in 5 * loadav seconds.  This causes the system to favor
    944  * processes which haven't run much recently, and to round-robin among other
    945  * processes.
    946  */
    947 
    948 void
    949 schedclock(struct proc *p)
    950 {
    951 
    952 	p->p_estcpu = ESTCPULIM(p->p_estcpu + 1);
    953 	resetpriority(p);
    954 	if (p->p_priority >= PUSER)
    955 		p->p_priority = p->p_usrpri;
    956 }
    957