Home | History | Annotate | Line # | Download | only in kern
kern_synch.c revision 1.205
      1 /*	$NetBSD: kern_synch.c,v 1.205 2007/11/06 17:57:46 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
     10  * Daniel Sieger.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by the NetBSD
     23  *	Foundation, Inc. and its contributors.
     24  * 4. Neither the name of The NetBSD Foundation nor the names of its
     25  *    contributors may be used to endorse or promote products derived
     26  *    from this software without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     29  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     30  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     31  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     32  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     33  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     34  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     35  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     36  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     37  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     38  * POSSIBILITY OF SUCH DAMAGE.
     39  */
     40 
     41 /*-
     42  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     43  *	The Regents of the University of California.  All rights reserved.
     44  * (c) UNIX System Laboratories, Inc.
     45  * All or some portions of this file are derived from material licensed
     46  * to the University of California by American Telephone and Telegraph
     47  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     48  * the permission of UNIX System Laboratories, Inc.
     49  *
     50  * Redistribution and use in source and binary forms, with or without
     51  * modification, are permitted provided that the following conditions
     52  * are met:
     53  * 1. Redistributions of source code must retain the above copyright
     54  *    notice, this list of conditions and the following disclaimer.
     55  * 2. Redistributions in binary form must reproduce the above copyright
     56  *    notice, this list of conditions and the following disclaimer in the
     57  *    documentation and/or other materials provided with the distribution.
     58  * 3. Neither the name of the University nor the names of its contributors
     59  *    may be used to endorse or promote products derived from this software
     60  *    without specific prior written permission.
     61  *
     62  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     63  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     64  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     65  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     66  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     67  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     68  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     69  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     70  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     71  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     72  * SUCH DAMAGE.
     73  *
     74  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
     75  */
     76 
     77 #include <sys/cdefs.h>
     78 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.205 2007/11/06 17:57:46 ad Exp $");
     79 
     80 #include "opt_kstack.h"
     81 #include "opt_lockdebug.h"
     82 #include "opt_multiprocessor.h"
     83 #include "opt_perfctrs.h"
     84 
     85 #define	__MUTEX_PRIVATE
     86 
     87 #include <sys/param.h>
     88 #include <sys/systm.h>
     89 #include <sys/proc.h>
     90 #include <sys/kernel.h>
     91 #if defined(PERFCTRS)
     92 #include <sys/pmc.h>
     93 #endif
     94 #include <sys/cpu.h>
     95 #include <sys/resourcevar.h>
     96 #include <sys/sched.h>
     97 #include <sys/syscall_stats.h>
     98 #include <sys/sleepq.h>
     99 #include <sys/lockdebug.h>
    100 #include <sys/evcnt.h>
    101 #include <sys/intr.h>
    102 
    103 #include <uvm/uvm_extern.h>
    104 
    105 callout_t sched_pstats_ch;
    106 unsigned int sched_pstats_ticks;
    107 
    108 kcondvar_t	lbolt;			/* once a second sleep address */
    109 
    110 static void	sched_unsleep(struct lwp *);
    111 static void	sched_changepri(struct lwp *, pri_t);
    112 static void	sched_lendpri(struct lwp *, pri_t);
    113 
    114 syncobj_t sleep_syncobj = {
    115 	SOBJ_SLEEPQ_SORTED,
    116 	sleepq_unsleep,
    117 	sleepq_changepri,
    118 	sleepq_lendpri,
    119 	syncobj_noowner,
    120 };
    121 
    122 syncobj_t sched_syncobj = {
    123 	SOBJ_SLEEPQ_SORTED,
    124 	sched_unsleep,
    125 	sched_changepri,
    126 	sched_lendpri,
    127 	syncobj_noowner,
    128 };
    129 
    130 /*
    131  * During autoconfiguration or after a panic, a sleep will simply lower the
    132  * priority briefly to allow interrupts, then return.  The priority to be
    133  * used (safepri) is machine-dependent, thus this value is initialized and
    134  * maintained in the machine-dependent layers.  This priority will typically
    135  * be 0, or the lowest priority that is safe for use on the interrupt stack;
    136  * it can be made higher to block network software interrupts after panics.
    137  */
    138 int	safepri;
    139 
    140 /*
    141  * OBSOLETE INTERFACE
    142  *
    143  * General sleep call.  Suspends the current process until a wakeup is
    144  * performed on the specified identifier.  The process will then be made
    145  * runnable with the specified priority.  Sleeps at most timo/hz seconds (0
    146  * means no timeout).  If pri includes PCATCH flag, signals are checked
    147  * before and after sleeping, else signals are not checked.  Returns 0 if
    148  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
    149  * signal needs to be delivered, ERESTART is returned if the current system
    150  * call should be restarted if possible, and EINTR is returned if the system
    151  * call should be interrupted by the signal (return EINTR).
    152  *
    153  * The interlock is held until we are on a sleep queue. The interlock will
    154  * be locked before returning back to the caller unless the PNORELOCK flag
    155  * is specified, in which case the interlock will always be unlocked upon
    156  * return.
    157  */
    158 int
    159 ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
    160 	volatile struct simplelock *interlock)
    161 {
    162 	struct lwp *l = curlwp;
    163 	sleepq_t *sq;
    164 	int error;
    165 
    166 	KASSERT((l->l_pflag & LP_INTR) == 0);
    167 
    168 	if (sleepq_dontsleep(l)) {
    169 		(void)sleepq_abort(NULL, 0);
    170 		if ((priority & PNORELOCK) != 0)
    171 			simple_unlock(interlock);
    172 		return 0;
    173 	}
    174 
    175 	l->l_kpriority = true;
    176 	sq = sleeptab_lookup(&sleeptab, ident);
    177 	sleepq_enter(sq, l);
    178 	sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
    179 
    180 	if (interlock != NULL) {
    181 		KASSERT(simple_lock_held(interlock));
    182 		simple_unlock(interlock);
    183 	}
    184 
    185 	error = sleepq_block(timo, priority & PCATCH);
    186 
    187 	if (interlock != NULL && (priority & PNORELOCK) == 0)
    188 		simple_lock(interlock);
    189 
    190 	return error;
    191 }
    192 
    193 int
    194 mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
    195 	kmutex_t *mtx)
    196 {
    197 	struct lwp *l = curlwp;
    198 	sleepq_t *sq;
    199 	int error;
    200 
    201 	KASSERT((l->l_pflag & LP_INTR) == 0);
    202 
    203 	if (sleepq_dontsleep(l)) {
    204 		(void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
    205 		return 0;
    206 	}
    207 
    208 	l->l_kpriority = true;
    209 	sq = sleeptab_lookup(&sleeptab, ident);
    210 	sleepq_enter(sq, l);
    211 	sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
    212 	mutex_exit(mtx);
    213 	error = sleepq_block(timo, priority & PCATCH);
    214 
    215 	if ((priority & PNORELOCK) == 0)
    216 		mutex_enter(mtx);
    217 
    218 	return error;
    219 }
    220 
    221 /*
    222  * General sleep call for situations where a wake-up is not expected.
    223  */
    224 int
    225 kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
    226 {
    227 	struct lwp *l = curlwp;
    228 	sleepq_t *sq;
    229 	int error;
    230 
    231 	if (sleepq_dontsleep(l))
    232 		return sleepq_abort(NULL, 0);
    233 
    234 	if (mtx != NULL)
    235 		mutex_exit(mtx);
    236 	l->l_kpriority = true;
    237 	sq = sleeptab_lookup(&sleeptab, l);
    238 	sleepq_enter(sq, l);
    239 	sleepq_enqueue(sq, l, wmesg, &sleep_syncobj);
    240 	error = sleepq_block(timo, intr);
    241 	if (mtx != NULL)
    242 		mutex_enter(mtx);
    243 
    244 	return error;
    245 }
    246 
    247 /*
    248  * OBSOLETE INTERFACE
    249  *
    250  * Make all processes sleeping on the specified identifier runnable.
    251  */
    252 void
    253 wakeup(wchan_t ident)
    254 {
    255 	sleepq_t *sq;
    256 
    257 	if (cold)
    258 		return;
    259 
    260 	sq = sleeptab_lookup(&sleeptab, ident);
    261 	sleepq_wake(sq, ident, (u_int)-1);
    262 }
    263 
    264 /*
    265  * OBSOLETE INTERFACE
    266  *
    267  * Make the highest priority process first in line on the specified
    268  * identifier runnable.
    269  */
    270 void
    271 wakeup_one(wchan_t ident)
    272 {
    273 	sleepq_t *sq;
    274 
    275 	if (cold)
    276 		return;
    277 
    278 	sq = sleeptab_lookup(&sleeptab, ident);
    279 	sleepq_wake(sq, ident, 1);
    280 }
    281 
    282 
    283 /*
    284  * General yield call.  Puts the current process back on its run queue and
    285  * performs a voluntary context switch.  Should only be called when the
    286  * current process explicitly requests it (eg sched_yield(2)).
    287  */
    288 void
    289 yield(void)
    290 {
    291 	struct lwp *l = curlwp;
    292 
    293 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
    294 	lwp_lock(l);
    295 	KASSERT(lwp_locked(l, &l->l_cpu->ci_schedstate.spc_lwplock));
    296 	KASSERT(l->l_stat == LSONPROC);
    297 	l->l_kpriority = false;
    298 	if (l->l_class == SCHED_OTHER) {
    299 		/*
    300 		 * Only for timeshared threads.  It will be reset
    301 		 * by the scheduler in due course.
    302 		 */
    303 		l->l_priority = 0;
    304 	}
    305 	(void)mi_switch(l);
    306 	KERNEL_LOCK(l->l_biglocks, l);
    307 }
    308 
    309 /*
    310  * General preemption call.  Puts the current process back on its run queue
    311  * and performs an involuntary context switch.
    312  */
    313 void
    314 preempt(void)
    315 {
    316 	struct lwp *l = curlwp;
    317 
    318 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
    319 	lwp_lock(l);
    320 	KASSERT(lwp_locked(l, &l->l_cpu->ci_schedstate.spc_lwplock));
    321 	KASSERT(l->l_stat == LSONPROC);
    322 	l->l_kpriority = false;
    323 	l->l_nivcsw++;
    324 	(void)mi_switch(l);
    325 	KERNEL_LOCK(l->l_biglocks, l);
    326 }
    327 
    328 /*
    329  * Compute the amount of time during which the current lwp was running.
    330  *
    331  * - update l_rtime unless it's an idle lwp.
    332  */
    333 
    334 void
    335 updatertime(lwp_t *l, const struct timeval *tv)
    336 {
    337 	long s, u;
    338 
    339 	if ((l->l_flag & LW_IDLE) != 0)
    340 		return;
    341 
    342 	u = l->l_rtime.tv_usec + (tv->tv_usec - l->l_stime.tv_usec);
    343 	s = l->l_rtime.tv_sec + (tv->tv_sec - l->l_stime.tv_sec);
    344 	if (u < 0) {
    345 		u += 1000000;
    346 		s--;
    347 	} else if (u >= 1000000) {
    348 		u -= 1000000;
    349 		s++;
    350 	}
    351 	l->l_rtime.tv_usec = u;
    352 	l->l_rtime.tv_sec = s;
    353 }
    354 
    355 /*
    356  * The machine independent parts of context switch.
    357  *
    358  * Returns 1 if another LWP was actually run.
    359  */
    360 int
    361 mi_switch(lwp_t *l)
    362 {
    363 	struct schedstate_percpu *spc;
    364 	struct lwp *newl;
    365 	int retval, oldspl;
    366 	struct cpu_info *ci;
    367 	struct timeval tv;
    368 	bool returning;
    369 
    370 	KASSERT(lwp_locked(l, NULL));
    371 	LOCKDEBUG_BARRIER(l->l_mutex, 1);
    372 
    373 #ifdef KSTACK_CHECK_MAGIC
    374 	kstack_check_magic(l);
    375 #endif
    376 
    377 	microtime(&tv);
    378 
    379 	/*
    380 	 * It's safe to read the per CPU schedstate unlocked here, as all we
    381 	 * are after is the run time and that's guarenteed to have been last
    382 	 * updated by this CPU.
    383 	 */
    384 	ci = l->l_cpu;
    385 	KDASSERT(ci == curcpu());
    386 
    387 	/*
    388 	 * Process is about to yield the CPU; clear the appropriate
    389 	 * scheduling flags.
    390 	 */
    391 	spc = &ci->ci_schedstate;
    392 	returning = false;
    393 	newl = NULL;
    394 
    395 	/*
    396 	 * If we have been asked to switch to a specific LWP, then there
    397 	 * is no need to inspect the run queues.  If a soft interrupt is
    398 	 * blocking, then return to the interrupted thread without adjusting
    399 	 * VM context or its start time: neither have been changed in order
    400 	 * to take the interrupt.
    401 	 */
    402 	if (l->l_switchto != NULL) {
    403 		if ((l->l_pflag & LP_INTR) != 0) {
    404 			returning = true;
    405 			softint_block(l);
    406 			if ((l->l_flag & LW_TIMEINTR) != 0)
    407 				updatertime(l, &tv);
    408 		}
    409 		newl = l->l_switchto;
    410 		l->l_switchto = NULL;
    411 	}
    412 #ifndef __HAVE_FAST_SOFTINTS
    413 	else if (ci->ci_data.cpu_softints != 0) {
    414 		/* There are pending soft interrupts, so pick one. */
    415 		newl = softint_picklwp();
    416 		newl->l_stat = LSONPROC;
    417 		newl->l_flag |= LW_RUNNING;
    418 	}
    419 #endif	/* !__HAVE_FAST_SOFTINTS */
    420 
    421 	/* Count time spent in current system call */
    422 	if (!returning) {
    423 		SYSCALL_TIME_SLEEP(l);
    424 
    425 		/*
    426 		 * XXXSMP If we are using h/w performance counters,
    427 		 * save context.
    428 		 */
    429 #if PERFCTRS
    430 		if (PMC_ENABLED(l->l_proc)) {
    431 			pmc_save_context(l->l_proc);
    432 		}
    433 #endif
    434 		updatertime(l, &tv);
    435 	}
    436 
    437 	/*
    438 	 * If on the CPU and we have gotten this far, then we must yield.
    439 	 */
    440 	mutex_spin_enter(spc->spc_mutex);
    441 	KASSERT(l->l_stat != LSRUN);
    442 	if (l->l_stat == LSONPROC && l != newl) {
    443 		KASSERT(lwp_locked(l, &spc->spc_lwplock));
    444 		if ((l->l_flag & LW_IDLE) == 0) {
    445 			l->l_stat = LSRUN;
    446 			lwp_setlock(l, spc->spc_mutex);
    447 			sched_enqueue(l, true);
    448 		} else
    449 			l->l_stat = LSIDL;
    450 	}
    451 
    452 	/*
    453 	 * Let sched_nextlwp() select the LWP to run the CPU next.
    454 	 * If no LWP is runnable, switch to the idle LWP.
    455 	 * Note that spc_lwplock might not necessary be held.
    456 	 */
    457 	if (newl == NULL) {
    458 		newl = sched_nextlwp();
    459 		if (newl != NULL) {
    460 			sched_dequeue(newl);
    461 			KASSERT(lwp_locked(newl, spc->spc_mutex));
    462 			newl->l_stat = LSONPROC;
    463 			newl->l_cpu = ci;
    464 			newl->l_flag |= LW_RUNNING;
    465 			lwp_setlock(newl, &spc->spc_lwplock);
    466 		} else {
    467 			newl = ci->ci_data.cpu_idlelwp;
    468 			newl->l_stat = LSONPROC;
    469 			newl->l_flag |= LW_RUNNING;
    470 		}
    471 		/*
    472 		 * Only clear want_resched if there are no
    473 		 * pending (slow) software interrupts.
    474 		 */
    475 		ci->ci_want_resched = ci->ci_data.cpu_softints;
    476 		spc->spc_flags &= ~SPCF_SWITCHCLEAR;
    477 		spc->spc_curpriority = lwp_eprio(newl);
    478 	}
    479 
    480 	/* Items that must be updated with the CPU locked. */
    481 	if (!returning) {
    482 		/* Update the new LWP's start time. */
    483 		newl->l_stime = tv;
    484 
    485 		/*
    486 		 * ci_curlwp changes when a fast soft interrupt occurs.
    487 		 * We use cpu_onproc to keep track of which kernel or
    488 		 * user thread is running 'underneath' the software
    489 		 * interrupt.  This is important for time accounting,
    490 		 * itimers and forcing user threads to preempt (aston).
    491 		 */
    492 		ci->ci_data.cpu_onproc = newl;
    493 	}
    494 
    495 	if (l != newl) {
    496 		struct lwp *prevlwp;
    497 
    498 		/*
    499 		 * If the old LWP has been moved to a run queue above,
    500 		 * drop the general purpose LWP lock: it's now locked
    501 		 * by the scheduler lock.
    502 		 *
    503 		 * Otherwise, drop the scheduler lock.  We're done with
    504 		 * the run queues for now.
    505 		 */
    506 		if (l->l_mutex == spc->spc_mutex) {
    507 			mutex_spin_exit(&spc->spc_lwplock);
    508 		} else {
    509 			mutex_spin_exit(spc->spc_mutex);
    510 		}
    511 
    512 		/* Unlocked, but for statistics only. */
    513 		uvmexp.swtch++;
    514 
    515 		/*
    516 		 * Save old VM context, unless a soft interrupt
    517 		 * handler is blocking.
    518 		 */
    519 		if (!returning)
    520 			pmap_deactivate(l);
    521 
    522 		/* Switch to the new LWP.. */
    523 		l->l_ncsw++;
    524 		l->l_flag &= ~LW_RUNNING;
    525 		oldspl = MUTEX_SPIN_OLDSPL(ci);
    526 		prevlwp = cpu_switchto(l, newl, returning);
    527 		/*
    528 		 * .. we have switched away and are now back so we must
    529 		 * be the new curlwp.  prevlwp is who we replaced.
    530 		 */
    531 		if (prevlwp != NULL) {
    532 			curcpu()->ci_mtx_oldspl = oldspl;
    533 			lwp_unlock(prevlwp);
    534 		} else {
    535 			splx(oldspl);
    536 		}
    537 
    538 		/* Restore VM context. */
    539 		pmap_activate(l);
    540 		retval = 1;
    541 	} else {
    542 		/* Nothing to do - just unlock and return. */
    543 		mutex_spin_exit(spc->spc_mutex);
    544 		lwp_unlock(l);
    545 		retval = 0;
    546 	}
    547 
    548 	KASSERT(l == curlwp);
    549 	KASSERT(l->l_stat == LSONPROC);
    550 	KASSERT(l->l_cpu == curcpu());
    551 
    552 	/*
    553 	 * XXXSMP If we are using h/w performance counters, restore context.
    554 	 */
    555 #if PERFCTRS
    556 	if (PMC_ENABLED(l->l_proc)) {
    557 		pmc_restore_context(l->l_proc);
    558 	}
    559 #endif
    560 
    561 	/*
    562 	 * We're running again; record our new start time.  We might
    563 	 * be running on a new CPU now, so don't use the cached
    564 	 * schedstate_percpu pointer.
    565 	 */
    566 	SYSCALL_TIME_WAKEUP(l);
    567 	KASSERT(curlwp == l);
    568 	KDASSERT(l->l_cpu == curcpu());
    569 	LOCKDEBUG_BARRIER(NULL, 1);
    570 
    571 	return retval;
    572 }
    573 
    574 /*
    575  * Change process state to be runnable, placing it on the run queue if it is
    576  * in memory, and awakening the swapper if it isn't in memory.
    577  *
    578  * Call with the process and LWP locked.  Will return with the LWP unlocked.
    579  */
    580 void
    581 setrunnable(struct lwp *l)
    582 {
    583 	struct proc *p = l->l_proc;
    584 	struct cpu_info *ci;
    585 	sigset_t *ss;
    586 
    587 	KASSERT((l->l_flag & LW_IDLE) == 0);
    588 	KASSERT(mutex_owned(&p->p_smutex));
    589 	KASSERT(lwp_locked(l, NULL));
    590 	KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex);
    591 
    592 	switch (l->l_stat) {
    593 	case LSSTOP:
    594 		/*
    595 		 * If we're being traced (possibly because someone attached us
    596 		 * while we were stopped), check for a signal from the debugger.
    597 		 */
    598 		if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
    599 			if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
    600 				ss = &l->l_sigpend.sp_set;
    601 			else
    602 				ss = &p->p_sigpend.sp_set;
    603 			sigaddset(ss, p->p_xstat);
    604 			signotify(l);
    605 		}
    606 		p->p_nrlwps++;
    607 		break;
    608 	case LSSUSPENDED:
    609 		l->l_flag &= ~LW_WSUSPEND;
    610 		p->p_nrlwps++;
    611 		cv_broadcast(&p->p_lwpcv);
    612 		break;
    613 	case LSSLEEP:
    614 		KASSERT(l->l_wchan != NULL);
    615 		break;
    616 	default:
    617 		panic("setrunnable: lwp %p state was %d", l, l->l_stat);
    618 	}
    619 
    620 	/*
    621 	 * If the LWP was sleeping interruptably, then it's OK to start it
    622 	 * again.  If not, mark it as still sleeping.
    623 	 */
    624 	if (l->l_wchan != NULL) {
    625 		l->l_stat = LSSLEEP;
    626 		/* lwp_unsleep() will release the lock. */
    627 		lwp_unsleep(l);
    628 		return;
    629 	}
    630 
    631 	/*
    632 	 * If the LWP is still on the CPU, mark it as LSONPROC.  It may be
    633 	 * about to call mi_switch(), in which case it will yield.
    634 	 */
    635 	if ((l->l_flag & LW_RUNNING) != 0) {
    636 		l->l_stat = LSONPROC;
    637 		l->l_slptime = 0;
    638 		lwp_unlock(l);
    639 		return;
    640 	}
    641 
    642 	/*
    643 	 * Look for a CPU to run.
    644 	 * Set the LWP runnable.
    645 	 */
    646 	ci = sched_takecpu(l);
    647 	ci = l->l_cpu;
    648 	spc_lock(ci);
    649 	l->l_cpu = ci;
    650 	lwp_unlock_to(l, ci->ci_schedstate.spc_mutex);
    651 
    652 	sched_setrunnable(l);
    653 	l->l_stat = LSRUN;
    654 	l->l_slptime = 0;
    655 
    656 	/*
    657 	 * If thread is swapped out - wake the swapper to bring it back in.
    658 	 * Otherwise, enter it into a run queue.
    659 	 */
    660 	if (l->l_flag & LW_INMEM) {
    661 		sched_enqueue(l, false);
    662 		resched_cpu(l);
    663 		lwp_unlock(l);
    664 	} else {
    665 		lwp_unlock(l);
    666 		uvm_kick_scheduler();
    667 	}
    668 }
    669 
    670 /*
    671  * suspendsched:
    672  *
    673  *	Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
    674  */
    675 void
    676 suspendsched(void)
    677 {
    678 	CPU_INFO_ITERATOR cii;
    679 	struct cpu_info *ci;
    680 	struct lwp *l;
    681 	struct proc *p;
    682 
    683 	/*
    684 	 * We do this by process in order not to violate the locking rules.
    685 	 */
    686 	mutex_enter(&proclist_lock);
    687 	PROCLIST_FOREACH(p, &allproc) {
    688 		mutex_enter(&p->p_smutex);
    689 
    690 		if ((p->p_flag & PK_SYSTEM) != 0) {
    691 			mutex_exit(&p->p_smutex);
    692 			continue;
    693 		}
    694 
    695 		p->p_stat = SSTOP;
    696 
    697 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    698 			if (l == curlwp)
    699 				continue;
    700 
    701 			lwp_lock(l);
    702 
    703 			/*
    704 			 * Set L_WREBOOT so that the LWP will suspend itself
    705 			 * when it tries to return to user mode.  We want to
    706 			 * try and get to get as many LWPs as possible to
    707 			 * the user / kernel boundary, so that they will
    708 			 * release any locks that they hold.
    709 			 */
    710 			l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
    711 
    712 			if (l->l_stat == LSSLEEP &&
    713 			    (l->l_flag & LW_SINTR) != 0) {
    714 				/* setrunnable() will release the lock. */
    715 				setrunnable(l);
    716 				continue;
    717 			}
    718 
    719 			lwp_unlock(l);
    720 		}
    721 
    722 		mutex_exit(&p->p_smutex);
    723 	}
    724 	mutex_exit(&proclist_lock);
    725 
    726 	/*
    727 	 * Kick all CPUs to make them preempt any LWPs running in user mode.
    728 	 * They'll trap into the kernel and suspend themselves in userret().
    729 	 */
    730 	for (CPU_INFO_FOREACH(cii, ci)) {
    731 		spc_lock(ci);
    732 		cpu_need_resched(ci, RESCHED_IMMED);
    733 		spc_unlock(ci);
    734 	}
    735 }
    736 
    737 /*
    738  * sched_kpri:
    739  *
    740  *	Scale a priority level to a kernel priority level, usually
    741  *	for an LWP that is about to sleep.
    742  */
    743 pri_t
    744 sched_kpri(struct lwp *l)
    745 {
    746 	pri_t pri;
    747 
    748 #ifndef __HAVE_FAST_SOFTINTS
    749 	/*
    750 	 * Hack: if a user thread is being used to run a soft
    751 	 * interrupt, we need to boost the priority here.
    752 	 */
    753 	if ((l->l_pflag & LP_INTR) != 0 && l->l_priority < PRI_KERNEL_RT)
    754 		return softint_kpri(l);
    755 #endif
    756 
    757 	/*
    758 	 * Scale user priorities (0 -> 63) up to kernel priorities
    759 	 * in the range (64 -> 95).  This makes assumptions about
    760 	 * the priority space and so should be kept in sync with
    761 	 * param.h.
    762 	 */
    763 	if ((pri = l->l_priority) >= PRI_KERNEL)
    764 		return pri;
    765 	return (pri >> 1) + PRI_KERNEL;
    766 }
    767 
    768 /*
    769  * sched_unsleep:
    770  *
    771  *	The is called when the LWP has not been awoken normally but instead
    772  *	interrupted: for example, if the sleep timed out.  Because of this,
    773  *	it's not a valid action for running or idle LWPs.
    774  */
    775 static void
    776 sched_unsleep(struct lwp *l)
    777 {
    778 
    779 	lwp_unlock(l);
    780 	panic("sched_unsleep");
    781 }
    782 
    783 void
    784 resched_cpu(struct lwp *l)
    785 {
    786 	struct cpu_info *ci;
    787 
    788 	/*
    789 	 * XXXSMP
    790 	 * Since l->l_cpu persists across a context switch,
    791 	 * this gives us *very weak* processor affinity, in
    792 	 * that we notify the CPU on which the process last
    793 	 * ran that it should try to switch.
    794 	 *
    795 	 * This does not guarantee that the process will run on
    796 	 * that processor next, because another processor might
    797 	 * grab it the next time it performs a context switch.
    798 	 *
    799 	 * This also does not handle the case where its last
    800 	 * CPU is running a higher-priority process, but every
    801 	 * other CPU is running a lower-priority process.  There
    802 	 * are ways to handle this situation, but they're not
    803 	 * currently very pretty, and we also need to weigh the
    804 	 * cost of moving a process from one CPU to another.
    805 	 */
    806 	ci = l->l_cpu;
    807 	if (lwp_eprio(l) > ci->ci_schedstate.spc_curpriority)
    808 		cpu_need_resched(ci, 0);
    809 }
    810 
    811 static void
    812 sched_changepri(struct lwp *l, pri_t pri)
    813 {
    814 
    815 	KASSERT(lwp_locked(l, NULL));
    816 
    817 	if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
    818 		KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    819 		sched_dequeue(l);
    820 		l->l_priority = pri;
    821 		sched_enqueue(l, false);
    822 	} else {
    823 		l->l_priority = pri;
    824 	}
    825 	resched_cpu(l);
    826 }
    827 
    828 static void
    829 sched_lendpri(struct lwp *l, pri_t pri)
    830 {
    831 
    832 	KASSERT(lwp_locked(l, NULL));
    833 
    834 	if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
    835 		KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    836 		sched_dequeue(l);
    837 		l->l_inheritedprio = pri;
    838 		sched_enqueue(l, false);
    839 	} else {
    840 		l->l_inheritedprio = pri;
    841 	}
    842 	resched_cpu(l);
    843 }
    844 
    845 struct lwp *
    846 syncobj_noowner(wchan_t wchan)
    847 {
    848 
    849 	return NULL;
    850 }
    851 
    852 
    853 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
    854 fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
    855 
    856 /*
    857  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
    858  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
    859  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
    860  *
    861  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
    862  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
    863  *
    864  * If you dont want to bother with the faster/more-accurate formula, you
    865  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
    866  * (more general) method of calculating the %age of CPU used by a process.
    867  */
    868 #define	CCPU_SHIFT	(FSHIFT + 1)
    869 
    870 /*
    871  * sched_pstats:
    872  *
    873  * Update process statistics and check CPU resource allocation.
    874  * Call scheduler-specific hook to eventually adjust process/LWP
    875  * priorities.
    876  */
    877 /* ARGSUSED */
    878 void
    879 sched_pstats(void *arg)
    880 {
    881 	struct rlimit *rlim;
    882 	struct lwp *l;
    883 	struct proc *p;
    884 	int sig, clkhz;
    885 	long runtm;
    886 
    887 	sched_pstats_ticks++;
    888 
    889 	mutex_enter(&proclist_mutex);
    890 	PROCLIST_FOREACH(p, &allproc) {
    891 		/*
    892 		 * Increment time in/out of memory and sleep time (if
    893 		 * sleeping).  We ignore overflow; with 16-bit int's
    894 		 * (remember them?) overflow takes 45 days.
    895 		 */
    896 		mutex_enter(&p->p_smutex);
    897 		mutex_spin_enter(&p->p_stmutex);
    898 		runtm = p->p_rtime.tv_sec;
    899 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    900 			if ((l->l_flag & LW_IDLE) != 0)
    901 				continue;
    902 			lwp_lock(l);
    903 			runtm += l->l_rtime.tv_sec;
    904 			l->l_swtime++;
    905 			sched_pstats_hook(l);
    906 			lwp_unlock(l);
    907 
    908 			/*
    909 			 * p_pctcpu is only for ps.
    910 			 */
    911 			l->l_pctcpu = (l->l_pctcpu * ccpu) >> FSHIFT;
    912 			if (l->l_slptime < 1) {
    913 				clkhz = stathz != 0 ? stathz : hz;
    914 #if	(FSHIFT >= CCPU_SHIFT)
    915 				l->l_pctcpu += (clkhz == 100) ?
    916 				    ((fixpt_t)l->l_cpticks) <<
    917 				        (FSHIFT - CCPU_SHIFT) :
    918 				    100 * (((fixpt_t) p->p_cpticks)
    919 				        << (FSHIFT - CCPU_SHIFT)) / clkhz;
    920 #else
    921 				l->l_pctcpu += ((FSCALE - ccpu) *
    922 				    (l->l_cpticks * FSCALE / clkhz)) >> FSHIFT;
    923 #endif
    924 				l->l_cpticks = 0;
    925 			}
    926 		}
    927 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
    928 		mutex_spin_exit(&p->p_stmutex);
    929 
    930 		/*
    931 		 * Check if the process exceeds its CPU resource allocation.
    932 		 * If over max, kill it.
    933 		 */
    934 		rlim = &p->p_rlimit[RLIMIT_CPU];
    935 		sig = 0;
    936 		if (runtm >= rlim->rlim_cur) {
    937 			if (runtm >= rlim->rlim_max)
    938 				sig = SIGKILL;
    939 			else {
    940 				sig = SIGXCPU;
    941 				if (rlim->rlim_cur < rlim->rlim_max)
    942 					rlim->rlim_cur += 5;
    943 			}
    944 		}
    945 		mutex_exit(&p->p_smutex);
    946 		if (sig) {
    947 			psignal(p, sig);
    948 		}
    949 	}
    950 	mutex_exit(&proclist_mutex);
    951 	uvm_meter();
    952 	cv_wakeup(&lbolt);
    953 	callout_schedule(&sched_pstats_ch, hz);
    954 }
    955 
    956 void
    957 sched_init(void)
    958 {
    959 
    960 	callout_init(&sched_pstats_ch, 0);
    961 	callout_setfunc(&sched_pstats_ch, sched_pstats, NULL);
    962 	sched_setup();
    963 	sched_pstats(NULL);
    964 }
    965