Home | History | Annotate | Line # | Download | only in kern
kern_synch.c revision 1.192.2.4
      1 /*	$NetBSD: kern_synch.c,v 1.192.2.4 2007/10/04 15:44:52 joerg Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
     10  * Daniel Sieger.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by the NetBSD
     23  *	Foundation, Inc. and its contributors.
     24  * 4. Neither the name of The NetBSD Foundation nor the names of its
     25  *    contributors may be used to endorse or promote products derived
     26  *    from this software without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     29  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     30  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     31  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     32  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     33  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     34  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     35  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     36  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     37  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     38  * POSSIBILITY OF SUCH DAMAGE.
     39  */
     40 
     41 /*-
     42  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     43  *	The Regents of the University of California.  All rights reserved.
     44  * (c) UNIX System Laboratories, Inc.
     45  * All or some portions of this file are derived from material licensed
     46  * to the University of California by American Telephone and Telegraph
     47  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     48  * the permission of UNIX System Laboratories, Inc.
     49  *
     50  * Redistribution and use in source and binary forms, with or without
     51  * modification, are permitted provided that the following conditions
     52  * are met:
     53  * 1. Redistributions of source code must retain the above copyright
     54  *    notice, this list of conditions and the following disclaimer.
     55  * 2. Redistributions in binary form must reproduce the above copyright
     56  *    notice, this list of conditions and the following disclaimer in the
     57  *    documentation and/or other materials provided with the distribution.
     58  * 3. Neither the name of the University nor the names of its contributors
     59  *    may be used to endorse or promote products derived from this software
     60  *    without specific prior written permission.
     61  *
     62  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     63  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     64  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     65  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     66  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     67  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     68  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     69  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     70  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     71  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     72  * SUCH DAMAGE.
     73  *
     74  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
     75  */
     76 
     77 #include <sys/cdefs.h>
     78 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.192.2.4 2007/10/04 15:44:52 joerg Exp $");
     79 
     80 #include "opt_kstack.h"
     81 #include "opt_lockdebug.h"
     82 #include "opt_multiprocessor.h"
     83 #include "opt_perfctrs.h"
     84 
     85 #define	__MUTEX_PRIVATE
     86 
     87 #include <sys/param.h>
     88 #include <sys/systm.h>
     89 #include <sys/proc.h>
     90 #include <sys/kernel.h>
     91 #if defined(PERFCTRS)
     92 #include <sys/pmc.h>
     93 #endif
     94 #include <sys/cpu.h>
     95 #include <sys/resourcevar.h>
     96 #include <sys/sched.h>
     97 #include <sys/syscall_stats.h>
     98 #include <sys/sleepq.h>
     99 #include <sys/lockdebug.h>
    100 #include <sys/evcnt.h>
    101 
    102 #include <uvm/uvm_extern.h>
    103 
    104 callout_t sched_pstats_ch;
    105 unsigned int sched_pstats_ticks;
    106 
    107 kcondvar_t	lbolt;			/* once a second sleep address */
    108 
    109 static void	sched_unsleep(struct lwp *);
    110 static void	sched_changepri(struct lwp *, pri_t);
    111 static void	sched_lendpri(struct lwp *, pri_t);
    112 
    113 syncobj_t sleep_syncobj = {
    114 	SOBJ_SLEEPQ_SORTED,
    115 	sleepq_unsleep,
    116 	sleepq_changepri,
    117 	sleepq_lendpri,
    118 	syncobj_noowner,
    119 };
    120 
    121 syncobj_t sched_syncobj = {
    122 	SOBJ_SLEEPQ_SORTED,
    123 	sched_unsleep,
    124 	sched_changepri,
    125 	sched_lendpri,
    126 	syncobj_noowner,
    127 };
    128 
    129 /*
    130  * During autoconfiguration or after a panic, a sleep will simply lower the
    131  * priority briefly to allow interrupts, then return.  The priority to be
    132  * used (safepri) is machine-dependent, thus this value is initialized and
    133  * maintained in the machine-dependent layers.  This priority will typically
    134  * be 0, or the lowest priority that is safe for use on the interrupt stack;
    135  * it can be made higher to block network software interrupts after panics.
    136  */
    137 int	safepri;
    138 
    139 /*
    140  * OBSOLETE INTERFACE
    141  *
    142  * General sleep call.  Suspends the current process until a wakeup is
    143  * performed on the specified identifier.  The process will then be made
    144  * runnable with the specified priority.  Sleeps at most timo/hz seconds (0
    145  * means no timeout).  If pri includes PCATCH flag, signals are checked
    146  * before and after sleeping, else signals are not checked.  Returns 0 if
    147  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
    148  * signal needs to be delivered, ERESTART is returned if the current system
    149  * call should be restarted if possible, and EINTR is returned if the system
    150  * call should be interrupted by the signal (return EINTR).
    151  *
    152  * The interlock is held until we are on a sleep queue. The interlock will
    153  * be locked before returning back to the caller unless the PNORELOCK flag
    154  * is specified, in which case the interlock will always be unlocked upon
    155  * return.
    156  */
    157 int
    158 ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
    159 	volatile struct simplelock *interlock)
    160 {
    161 	struct lwp *l = curlwp;
    162 	sleepq_t *sq;
    163 	int error;
    164 
    165 	if (sleepq_dontsleep(l)) {
    166 		(void)sleepq_abort(NULL, 0);
    167 		if ((priority & PNORELOCK) != 0)
    168 			simple_unlock(interlock);
    169 		return 0;
    170 	}
    171 
    172 	sq = sleeptab_lookup(&sleeptab, ident);
    173 	sleepq_enter(sq, l);
    174 	sleepq_enqueue(sq, priority & PRIMASK, ident, wmesg, &sleep_syncobj);
    175 
    176 	if (interlock != NULL) {
    177 		LOCK_ASSERT(simple_lock_held(interlock));
    178 		simple_unlock(interlock);
    179 	}
    180 
    181 	error = sleepq_block(timo, priority & PCATCH);
    182 
    183 	if (interlock != NULL && (priority & PNORELOCK) == 0)
    184 		simple_lock(interlock);
    185 
    186 	return error;
    187 }
    188 
    189 int
    190 mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
    191 	kmutex_t *mtx)
    192 {
    193 	struct lwp *l = curlwp;
    194 	sleepq_t *sq;
    195 	int error;
    196 
    197 	if (sleepq_dontsleep(l)) {
    198 		(void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
    199 		return 0;
    200 	}
    201 
    202 	sq = sleeptab_lookup(&sleeptab, ident);
    203 	sleepq_enter(sq, l);
    204 	sleepq_enqueue(sq, priority & PRIMASK, ident, wmesg, &sleep_syncobj);
    205 	mutex_exit(mtx);
    206 	error = sleepq_block(timo, priority & PCATCH);
    207 
    208 	if ((priority & PNORELOCK) == 0)
    209 		mutex_enter(mtx);
    210 
    211 	return error;
    212 }
    213 
    214 /*
    215  * General sleep call for situations where a wake-up is not expected.
    216  */
    217 int
    218 kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
    219 {
    220 	struct lwp *l = curlwp;
    221 	sleepq_t *sq;
    222 	int error;
    223 
    224 	if (sleepq_dontsleep(l))
    225 		return sleepq_abort(NULL, 0);
    226 
    227 	if (mtx != NULL)
    228 		mutex_exit(mtx);
    229 	sq = sleeptab_lookup(&sleeptab, l);
    230 	sleepq_enter(sq, l);
    231 	sleepq_enqueue(sq, sched_kpri(l), l, wmesg, &sleep_syncobj);
    232 	error = sleepq_block(timo, intr);
    233 	if (mtx != NULL)
    234 		mutex_enter(mtx);
    235 
    236 	return error;
    237 }
    238 
    239 /*
    240  * OBSOLETE INTERFACE
    241  *
    242  * Make all processes sleeping on the specified identifier runnable.
    243  */
    244 void
    245 wakeup(wchan_t ident)
    246 {
    247 	sleepq_t *sq;
    248 
    249 	if (cold)
    250 		return;
    251 
    252 	sq = sleeptab_lookup(&sleeptab, ident);
    253 	sleepq_wake(sq, ident, (u_int)-1);
    254 }
    255 
    256 /*
    257  * OBSOLETE INTERFACE
    258  *
    259  * Make the highest priority process first in line on the specified
    260  * identifier runnable.
    261  */
    262 void
    263 wakeup_one(wchan_t ident)
    264 {
    265 	sleepq_t *sq;
    266 
    267 	if (cold)
    268 		return;
    269 
    270 	sq = sleeptab_lookup(&sleeptab, ident);
    271 	sleepq_wake(sq, ident, 1);
    272 }
    273 
    274 
    275 /*
    276  * General yield call.  Puts the current process back on its run queue and
    277  * performs a voluntary context switch.  Should only be called when the
    278  * current process explicitly requests it (eg sched_yield(2)).
    279  */
    280 void
    281 yield(void)
    282 {
    283 	struct lwp *l = curlwp;
    284 
    285 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
    286 	lwp_lock(l);
    287 	KASSERT(lwp_locked(l, &l->l_cpu->ci_schedstate.spc_lwplock));
    288 	KASSERT(l->l_stat == LSONPROC);
    289 	/* XXX Only do this for timeshared threads. */
    290 	l->l_priority = MAXPRI;
    291 	(void)mi_switch(l);
    292 	KERNEL_LOCK(l->l_biglocks, l);
    293 }
    294 
    295 /*
    296  * General preemption call.  Puts the current process back on its run queue
    297  * and performs an involuntary context switch.
    298  */
    299 void
    300 preempt(void)
    301 {
    302 	struct lwp *l = curlwp;
    303 
    304 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
    305 	lwp_lock(l);
    306 	KASSERT(lwp_locked(l, &l->l_cpu->ci_schedstate.spc_lwplock));
    307 	KASSERT(l->l_stat == LSONPROC);
    308 	l->l_priority = l->l_usrpri;
    309 	l->l_nivcsw++;
    310 	(void)mi_switch(l);
    311 	KERNEL_LOCK(l->l_biglocks, l);
    312 }
    313 
    314 /*
    315  * Compute the amount of time during which the current lwp was running.
    316  *
    317  * - update l_rtime unless it's an idle lwp.
    318  * - update spc_runtime for the next lwp.
    319  */
    320 
    321 static inline void
    322 updatertime(struct lwp *l, struct schedstate_percpu *spc)
    323 {
    324 	struct timeval tv;
    325 	long s, u;
    326 
    327 	if ((l->l_flag & LW_IDLE) != 0) {
    328 		microtime(&spc->spc_runtime);
    329 		return;
    330 	}
    331 
    332 	microtime(&tv);
    333 	u = l->l_rtime.tv_usec + (tv.tv_usec - spc->spc_runtime.tv_usec);
    334 	s = l->l_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
    335 	if (u < 0) {
    336 		u += 1000000;
    337 		s--;
    338 	} else if (u >= 1000000) {
    339 		u -= 1000000;
    340 		s++;
    341 	}
    342 	l->l_rtime.tv_usec = u;
    343 	l->l_rtime.tv_sec = s;
    344 
    345 	spc->spc_runtime = tv;
    346 }
    347 
    348 /*
    349  * The machine independent parts of context switch.
    350  *
    351  * Returns 1 if another LWP was actually run.
    352  */
    353 int
    354 mi_switch(struct lwp *l)
    355 {
    356 	struct schedstate_percpu *spc;
    357 	struct lwp *newl;
    358 	int retval, oldspl;
    359 	struct cpu_info *ci;
    360 
    361 	KASSERT(lwp_locked(l, NULL));
    362 	LOCKDEBUG_BARRIER(l->l_mutex, 1);
    363 
    364 #ifdef KSTACK_CHECK_MAGIC
    365 	kstack_check_magic(l);
    366 #endif
    367 
    368 	/*
    369 	 * It's safe to read the per CPU schedstate unlocked here, as all we
    370 	 * are after is the run time and that's guarenteed to have been last
    371 	 * updated by this CPU.
    372 	 */
    373 	ci = l->l_cpu;
    374 	KDASSERT(ci == curcpu());
    375 
    376 	/*
    377 	 * Process is about to yield the CPU; clear the appropriate
    378 	 * scheduling flags.
    379 	 */
    380 	spc = &ci->ci_schedstate;
    381 	newl = NULL;
    382 
    383 	if (l->l_switchto != NULL) {
    384 		newl = l->l_switchto;
    385 		l->l_switchto = NULL;
    386 	}
    387 
    388 	/* Count time spent in current system call */
    389 	SYSCALL_TIME_SLEEP(l);
    390 
    391 	/*
    392 	 * XXXSMP If we are using h/w performance counters,
    393 	 * save context.
    394 	 */
    395 #if PERFCTRS
    396 	if (PMC_ENABLED(l->l_proc)) {
    397 		pmc_save_context(l->l_proc);
    398 	}
    399 #endif
    400 	updatertime(l, spc);
    401 
    402 	/*
    403 	 * If on the CPU and we have gotten this far, then we must yield.
    404 	 */
    405 	mutex_spin_enter(spc->spc_mutex);
    406 	spc->spc_flags &= ~SPCF_SWITCHCLEAR;
    407 	KASSERT(l->l_stat != LSRUN);
    408 	if (l->l_stat == LSONPROC) {
    409 		KASSERT(lwp_locked(l, &spc->spc_lwplock));
    410 		if ((l->l_flag & LW_IDLE) == 0) {
    411 			l->l_stat = LSRUN;
    412 			lwp_setlock(l, spc->spc_mutex);
    413 			sched_enqueue(l, true);
    414 		} else
    415 			l->l_stat = LSIDL;
    416 	}
    417 
    418 	/*
    419 	 * Let sched_nextlwp() select the LWP to run the CPU next.
    420 	 * If no LWP is runnable, switch to the idle LWP.
    421 	 */
    422 	if (newl == NULL) {
    423 		newl = sched_nextlwp();
    424 		if (newl != NULL) {
    425 			sched_dequeue(newl);
    426 			KASSERT(lwp_locked(newl, spc->spc_mutex));
    427 			newl->l_stat = LSONPROC;
    428 			newl->l_cpu = ci;
    429 			newl->l_flag |= LW_RUNNING;
    430 			lwp_setlock(newl, &spc->spc_lwplock);
    431 		} else {
    432 			newl = ci->ci_data.cpu_idlelwp;
    433 			newl->l_stat = LSONPROC;
    434 			newl->l_flag |= LW_RUNNING;
    435 		}
    436 		ci->ci_want_resched = 0;
    437 	}
    438 
    439 	spc->spc_curpriority = newl->l_usrpri;
    440 	/* XXX The following may be done unlocked if newl != NULL above. */
    441 	newl->l_priority = newl->l_usrpri;
    442 
    443 	if (l != newl) {
    444 		struct lwp *prevlwp;
    445 
    446 		/*
    447 		 * If the old LWP has been moved to a run queue above,
    448 		 * drop the general purpose LWP lock: it's now locked
    449 		 * by the scheduler lock.
    450 		 *
    451 		 * Otherwise, drop the scheduler lock.  We're done with
    452 		 * the run queues for now.
    453 		 */
    454 		if (l->l_mutex == spc->spc_mutex) {
    455 			mutex_spin_exit(&spc->spc_lwplock);
    456 		} else {
    457 			mutex_spin_exit(spc->spc_mutex);
    458 		}
    459 
    460 		/* Unlocked, but for statistics only. */
    461 		uvmexp.swtch++;
    462 
    463 		/* Save old VM context. */
    464 		pmap_deactivate(l);
    465 
    466 		/* Switch to the new LWP.. */
    467 		l->l_ncsw++;
    468 		l->l_flag &= ~LW_RUNNING;
    469 		oldspl = MUTEX_SPIN_OLDSPL(ci);
    470 		prevlwp = cpu_switchto(l, newl);
    471 
    472 		/*
    473 		 * .. we have switched away and are now back so we must
    474 		 * be the new curlwp.  prevlwp is who we replaced.
    475 		 */
    476 		if (prevlwp != NULL) {
    477 			curcpu()->ci_mtx_oldspl = oldspl;
    478 			lwp_unlock(prevlwp);
    479 		} else {
    480 			splx(oldspl);
    481 		}
    482 
    483 		/* Restore VM context. */
    484 		pmap_activate(l);
    485 		retval = 1;
    486 	} else {
    487 		/* Nothing to do - just unlock and return. */
    488 		mutex_spin_exit(spc->spc_mutex);
    489 		lwp_unlock(l);
    490 		retval = 0;
    491 	}
    492 
    493 	KASSERT(l == curlwp);
    494 	KASSERT(l->l_stat == LSONPROC);
    495 
    496 	/*
    497 	 * XXXSMP If we are using h/w performance counters, restore context.
    498 	 */
    499 #if PERFCTRS
    500 	if (PMC_ENABLED(l->l_proc)) {
    501 		pmc_restore_context(l->l_proc);
    502 	}
    503 #endif
    504 
    505 	/*
    506 	 * We're running again; record our new start time.  We might
    507 	 * be running on a new CPU now, so don't use the cached
    508 	 * schedstate_percpu pointer.
    509 	 */
    510 	SYSCALL_TIME_WAKEUP(l);
    511 	KASSERT(curlwp == l);
    512 	KDASSERT(l->l_cpu == curcpu());
    513 	LOCKDEBUG_BARRIER(NULL, 1);
    514 
    515 	return retval;
    516 }
    517 
    518 /*
    519  * Change process state to be runnable, placing it on the run queue if it is
    520  * in memory, and awakening the swapper if it isn't in memory.
    521  *
    522  * Call with the process and LWP locked.  Will return with the LWP unlocked.
    523  */
    524 void
    525 setrunnable(struct lwp *l)
    526 {
    527 	struct proc *p = l->l_proc;
    528 	sigset_t *ss;
    529 
    530 	KASSERT((l->l_flag & LW_IDLE) == 0);
    531 	KASSERT(mutex_owned(&p->p_smutex));
    532 	KASSERT(lwp_locked(l, NULL));
    533 
    534 	switch (l->l_stat) {
    535 	case LSSTOP:
    536 		/*
    537 		 * If we're being traced (possibly because someone attached us
    538 		 * while we were stopped), check for a signal from the debugger.
    539 		 */
    540 		if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
    541 			if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
    542 				ss = &l->l_sigpend.sp_set;
    543 			else
    544 				ss = &p->p_sigpend.sp_set;
    545 			sigaddset(ss, p->p_xstat);
    546 			signotify(l);
    547 		}
    548 		p->p_nrlwps++;
    549 		break;
    550 	case LSSUSPENDED:
    551 		l->l_flag &= ~LW_WSUSPEND;
    552 		p->p_nrlwps++;
    553 		cv_broadcast(&p->p_lwpcv);
    554 		break;
    555 	case LSSLEEP:
    556 		KASSERT(l->l_wchan != NULL);
    557 		break;
    558 	default:
    559 		panic("setrunnable: lwp %p state was %d", l, l->l_stat);
    560 	}
    561 
    562 	/*
    563 	 * If the LWP was sleeping interruptably, then it's OK to start it
    564 	 * again.  If not, mark it as still sleeping.
    565 	 */
    566 	if (l->l_wchan != NULL) {
    567 		l->l_stat = LSSLEEP;
    568 		/* lwp_unsleep() will release the lock. */
    569 		lwp_unsleep(l);
    570 		return;
    571 	}
    572 
    573 	/*
    574 	 * If the LWP is still on the CPU, mark it as LSONPROC.  It may be
    575 	 * about to call mi_switch(), in which case it will yield.
    576 	 */
    577 	if ((l->l_flag & LW_RUNNING) != 0) {
    578 		l->l_stat = LSONPROC;
    579 		l->l_slptime = 0;
    580 		lwp_unlock(l);
    581 		return;
    582 	}
    583 
    584 	/*
    585 	 * Set the LWP runnable.  If it's swapped out, we need to wake the swapper
    586 	 * to bring it back in.  Otherwise, enter it into a run queue.
    587 	 */
    588 	if (l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex) {
    589 		spc_lock(l->l_cpu);
    590 		lwp_unlock_to(l, l->l_cpu->ci_schedstate.spc_mutex);
    591 	}
    592 
    593 	sched_setrunnable(l);
    594 	l->l_stat = LSRUN;
    595 	l->l_slptime = 0;
    596 
    597 	if (l->l_flag & LW_INMEM) {
    598 		sched_enqueue(l, false);
    599 		resched_cpu(l);
    600 		lwp_unlock(l);
    601 	} else {
    602 		lwp_unlock(l);
    603 		uvm_kick_scheduler();
    604 	}
    605 }
    606 
    607 /*
    608  * suspendsched:
    609  *
    610  *	Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
    611  */
    612 void
    613 suspendsched(void)
    614 {
    615 	CPU_INFO_ITERATOR cii;
    616 	struct cpu_info *ci;
    617 	struct lwp *l;
    618 	struct proc *p;
    619 
    620 	/*
    621 	 * We do this by process in order not to violate the locking rules.
    622 	 */
    623 	mutex_enter(&proclist_mutex);
    624 	PROCLIST_FOREACH(p, &allproc) {
    625 		mutex_enter(&p->p_smutex);
    626 
    627 		if ((p->p_flag & PK_SYSTEM) != 0) {
    628 			mutex_exit(&p->p_smutex);
    629 			continue;
    630 		}
    631 
    632 		p->p_stat = SSTOP;
    633 
    634 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    635 			if (l == curlwp)
    636 				continue;
    637 
    638 			lwp_lock(l);
    639 
    640 			/*
    641 			 * Set L_WREBOOT so that the LWP will suspend itself
    642 			 * when it tries to return to user mode.  We want to
    643 			 * try and get to get as many LWPs as possible to
    644 			 * the user / kernel boundary, so that they will
    645 			 * release any locks that they hold.
    646 			 */
    647 			l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
    648 
    649 			if (l->l_stat == LSSLEEP &&
    650 			    (l->l_flag & LW_SINTR) != 0) {
    651 				/* setrunnable() will release the lock. */
    652 				setrunnable(l);
    653 				continue;
    654 			}
    655 
    656 			lwp_unlock(l);
    657 		}
    658 
    659 		mutex_exit(&p->p_smutex);
    660 	}
    661 	mutex_exit(&proclist_mutex);
    662 
    663 	/*
    664 	 * Kick all CPUs to make them preempt any LWPs running in user mode.
    665 	 * They'll trap into the kernel and suspend themselves in userret().
    666 	 */
    667 	for (CPU_INFO_FOREACH(cii, ci))
    668 		cpu_need_resched(ci, 0);
    669 }
    670 
    671 /*
    672  * sched_kpri:
    673  *
    674  *	Scale a priority level to a kernel priority level, usually
    675  *	for an LWP that is about to sleep.
    676  */
    677 pri_t
    678 sched_kpri(struct lwp *l)
    679 {
    680 	/*
    681 	 * Scale user priorities (127 -> 50) up to kernel priorities
    682 	 * in the range (49 -> 8).  Reserve the top 8 kernel priorities
    683 	 * for high priority kthreads.  Kernel priorities passed in
    684 	 * are left "as is".  XXX This is somewhat arbitrary.
    685 	 */
    686 	static const uint8_t kpri_tab[] = {
    687 		 0,   1,   2,   3,   4,   5,   6,   7,
    688 		 8,   9,  10,  11,  12,  13,  14,  15,
    689 		16,  17,  18,  19,  20,  21,  22,  23,
    690 		24,  25,  26,  27,  28,  29,  30,  31,
    691 		32,  33,  34,  35,  36,  37,  38,  39,
    692 		40,  41,  42,  43,  44,  45,  46,  47,
    693 		48,  49,   8,   8,   9,   9,  10,  10,
    694 		11,  11,  12,  12,  13,  14,  14,  15,
    695 		15,  16,  16,  17,  17,  18,  18,  19,
    696 		20,  20,  21,  21,  22,  22,  23,  23,
    697 		24,  24,  25,  26,  26,  27,  27,  28,
    698 		28,  29,  29,  30,  30,  31,  32,  32,
    699 		33,  33,  34,  34,  35,  35,  36,  36,
    700 		37,  38,  38,  39,  39,  40,  40,  41,
    701 		41,  42,  42,  43,  44,  44,  45,  45,
    702 		46,  46,  47,  47,  48,  48,  49,  49,
    703 	};
    704 
    705 	return (pri_t)kpri_tab[l->l_usrpri];
    706 }
    707 
    708 /*
    709  * sched_unsleep:
    710  *
    711  *	The is called when the LWP has not been awoken normally but instead
    712  *	interrupted: for example, if the sleep timed out.  Because of this,
    713  *	it's not a valid action for running or idle LWPs.
    714  */
    715 static void
    716 sched_unsleep(struct lwp *l)
    717 {
    718 
    719 	lwp_unlock(l);
    720 	panic("sched_unsleep");
    721 }
    722 
    723 inline void
    724 resched_cpu(struct lwp *l)
    725 {
    726 	struct cpu_info *ci;
    727 	const pri_t pri = lwp_eprio(l);
    728 
    729 	/*
    730 	 * XXXSMP
    731 	 * Since l->l_cpu persists across a context switch,
    732 	 * this gives us *very weak* processor affinity, in
    733 	 * that we notify the CPU on which the process last
    734 	 * ran that it should try to switch.
    735 	 *
    736 	 * This does not guarantee that the process will run on
    737 	 * that processor next, because another processor might
    738 	 * grab it the next time it performs a context switch.
    739 	 *
    740 	 * This also does not handle the case where its last
    741 	 * CPU is running a higher-priority process, but every
    742 	 * other CPU is running a lower-priority process.  There
    743 	 * are ways to handle this situation, but they're not
    744 	 * currently very pretty, and we also need to weigh the
    745 	 * cost of moving a process from one CPU to another.
    746 	 */
    747 	ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
    748 	if (pri < ci->ci_schedstate.spc_curpriority)
    749 		cpu_need_resched(ci, 0);
    750 }
    751 
    752 static void
    753 sched_changepri(struct lwp *l, pri_t pri)
    754 {
    755 
    756 	KASSERT(lwp_locked(l, NULL));
    757 
    758 	l->l_usrpri = pri;
    759 	if (l->l_priority < PUSER)
    760 		return;
    761 
    762 	if (l->l_stat != LSRUN || (l->l_flag & LW_INMEM) == 0) {
    763 		l->l_priority = pri;
    764 		return;
    765 	}
    766 
    767 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    768 
    769 	sched_dequeue(l);
    770 	l->l_priority = pri;
    771 	sched_enqueue(l, false);
    772 	resched_cpu(l);
    773 }
    774 
    775 static void
    776 sched_lendpri(struct lwp *l, pri_t pri)
    777 {
    778 
    779 	KASSERT(lwp_locked(l, NULL));
    780 
    781 	if (l->l_stat != LSRUN || (l->l_flag & LW_INMEM) == 0) {
    782 		l->l_inheritedprio = pri;
    783 		return;
    784 	}
    785 
    786 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    787 
    788 	sched_dequeue(l);
    789 	l->l_inheritedprio = pri;
    790 	sched_enqueue(l, false);
    791 	resched_cpu(l);
    792 }
    793 
    794 struct lwp *
    795 syncobj_noowner(wchan_t wchan)
    796 {
    797 
    798 	return NULL;
    799 }
    800 
    801 
    802 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
    803 fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
    804 
    805 /*
    806  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
    807  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
    808  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
    809  *
    810  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
    811  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
    812  *
    813  * If you dont want to bother with the faster/more-accurate formula, you
    814  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
    815  * (more general) method of calculating the %age of CPU used by a process.
    816  */
    817 #define	CCPU_SHIFT	(FSHIFT + 1)
    818 
    819 /*
    820  * sched_pstats:
    821  *
    822  * Update process statistics and check CPU resource allocation.
    823  * Call scheduler-specific hook to eventually adjust process/LWP
    824  * priorities.
    825  *
    826  *	XXXSMP This needs to be reorganised in order to reduce the locking
    827  *	burden.
    828  */
    829 /* ARGSUSED */
    830 void
    831 sched_pstats(void *arg)
    832 {
    833 	struct rlimit *rlim;
    834 	struct lwp *l;
    835 	struct proc *p;
    836 	int minslp, sig, clkhz;
    837 	long runtm;
    838 
    839 	sched_pstats_ticks++;
    840 
    841 	mutex_enter(&proclist_mutex);
    842 	PROCLIST_FOREACH(p, &allproc) {
    843 		/*
    844 		 * Increment time in/out of memory and sleep time (if
    845 		 * sleeping).  We ignore overflow; with 16-bit int's
    846 		 * (remember them?) overflow takes 45 days.
    847 		 */
    848 		minslp = 2;
    849 		mutex_enter(&p->p_smutex);
    850 		mutex_spin_enter(&p->p_stmutex);
    851 		runtm = p->p_rtime.tv_sec;
    852 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    853 			if ((l->l_flag & LW_IDLE) != 0)
    854 				continue;
    855 			lwp_lock(l);
    856 			runtm += l->l_rtime.tv_sec;
    857 			l->l_swtime++;
    858 			if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
    859 			    l->l_stat == LSSUSPENDED) {
    860 				l->l_slptime++;
    861 				minslp = min(minslp, l->l_slptime);
    862 			} else
    863 				minslp = 0;
    864 			lwp_unlock(l);
    865 
    866 			/*
    867 			 * p_pctcpu is only for ps.
    868 			 */
    869 			l->l_pctcpu = (l->l_pctcpu * ccpu) >> FSHIFT;
    870 			if (l->l_slptime < 1) {
    871 				clkhz = stathz != 0 ? stathz : hz;
    872 #if	(FSHIFT >= CCPU_SHIFT)
    873 				l->l_pctcpu += (clkhz == 100) ?
    874 				    ((fixpt_t)l->l_cpticks) <<
    875 				        (FSHIFT - CCPU_SHIFT) :
    876 				    100 * (((fixpt_t) p->p_cpticks)
    877 				        << (FSHIFT - CCPU_SHIFT)) / clkhz;
    878 #else
    879 				l->l_pctcpu += ((FSCALE - ccpu) *
    880 				    (l->l_cpticks * FSCALE / clkhz)) >> FSHIFT;
    881 #endif
    882 				l->l_cpticks = 0;
    883 			}
    884 		}
    885 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
    886 		sched_pstats_hook(p, minslp);
    887 		mutex_spin_exit(&p->p_stmutex);
    888 
    889 		/*
    890 		 * Check if the process exceeds its CPU resource allocation.
    891 		 * If over max, kill it.
    892 		 */
    893 		rlim = &p->p_rlimit[RLIMIT_CPU];
    894 		sig = 0;
    895 		if (runtm >= rlim->rlim_cur) {
    896 			if (runtm >= rlim->rlim_max)
    897 				sig = SIGKILL;
    898 			else {
    899 				sig = SIGXCPU;
    900 				if (rlim->rlim_cur < rlim->rlim_max)
    901 					rlim->rlim_cur += 5;
    902 			}
    903 		}
    904 		mutex_exit(&p->p_smutex);
    905 		if (sig) {
    906 			psignal(p, sig);
    907 		}
    908 	}
    909 	mutex_exit(&proclist_mutex);
    910 	uvm_meter();
    911 	cv_wakeup(&lbolt);
    912 	callout_schedule(&sched_pstats_ch, hz);
    913 }
    914 
    915 void
    916 sched_init(void)
    917 {
    918 
    919 	cv_init(&lbolt, "lbolt");
    920 	callout_init(&sched_pstats_ch, 0);
    921 	callout_setfunc(&sched_pstats_ch, sched_pstats, NULL);
    922 	sched_setup();
    923 	sched_pstats(NULL);
    924 }
    925