Home | History | Annotate | Line # | Download | only in kern
kern_synch.c revision 1.186.2.6
      1 /*	$NetBSD: kern_synch.c,v 1.186.2.6 2007/06/08 14:17:22 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
     10  * Daniel Sieger.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by the NetBSD
     23  *	Foundation, Inc. and its contributors.
     24  * 4. Neither the name of The NetBSD Foundation nor the names of its
     25  *    contributors may be used to endorse or promote products derived
     26  *    from this software without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     29  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     30  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     31  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     32  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     33  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     34  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     35  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     36  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     37  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     38  * POSSIBILITY OF SUCH DAMAGE.
     39  */
     40 
     41 /*-
     42  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     43  *	The Regents of the University of California.  All rights reserved.
     44  * (c) UNIX System Laboratories, Inc.
     45  * All or some portions of this file are derived from material licensed
     46  * to the University of California by American Telephone and Telegraph
     47  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     48  * the permission of UNIX System Laboratories, Inc.
     49  *
     50  * Redistribution and use in source and binary forms, with or without
     51  * modification, are permitted provided that the following conditions
     52  * are met:
     53  * 1. Redistributions of source code must retain the above copyright
     54  *    notice, this list of conditions and the following disclaimer.
     55  * 2. Redistributions in binary form must reproduce the above copyright
     56  *    notice, this list of conditions and the following disclaimer in the
     57  *    documentation and/or other materials provided with the distribution.
     58  * 3. Neither the name of the University nor the names of its contributors
     59  *    may be used to endorse or promote products derived from this software
     60  *    without specific prior written permission.
     61  *
     62  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     63  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     64  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     65  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     66  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     67  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     68  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     69  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     70  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     71  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     72  * SUCH DAMAGE.
     73  *
     74  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
     75  */
     76 
     77 #include <sys/cdefs.h>
     78 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.186.2.6 2007/06/08 14:17:22 ad Exp $");
     79 
     80 #include "opt_kstack.h"
     81 #include "opt_lockdebug.h"
     82 #include "opt_multiprocessor.h"
     83 #include "opt_perfctrs.h"
     84 
     85 #define	__MUTEX_PRIVATE
     86 
     87 #include <sys/param.h>
     88 #include <sys/systm.h>
     89 #include <sys/proc.h>
     90 #include <sys/kernel.h>
     91 #if defined(PERFCTRS)
     92 #include <sys/pmc.h>
     93 #endif
     94 #include <sys/cpu.h>
     95 #include <sys/resourcevar.h>
     96 #include <sys/sched.h>
     97 #include <sys/syscall_stats.h>
     98 #include <sys/sleepq.h>
     99 #include <sys/lockdebug.h>
    100 
    101 #include <uvm/uvm_extern.h>
    102 
    103 struct callout sched_pstats_ch = CALLOUT_INITIALIZER_SETFUNC(sched_pstats, NULL);
    104 unsigned int sched_pstats_ticks;
    105 
    106 kcondvar_t	lbolt;			/* once a second sleep address */
    107 
    108 static void	sched_unsleep(struct lwp *);
    109 static void	sched_changepri(struct lwp *, pri_t);
    110 static void	sched_lendpri(struct lwp *, pri_t);
    111 
    112 syncobj_t sleep_syncobj = {
    113 	SOBJ_SLEEPQ_SORTED,
    114 	sleepq_unsleep,
    115 	sleepq_changepri,
    116 	sleepq_lendpri,
    117 	syncobj_noowner,
    118 };
    119 
    120 syncobj_t sched_syncobj = {
    121 	SOBJ_SLEEPQ_SORTED,
    122 	sched_unsleep,
    123 	sched_changepri,
    124 	sched_lendpri,
    125 	syncobj_noowner,
    126 };
    127 
    128 /*
    129  * During autoconfiguration or after a panic, a sleep will simply lower the
    130  * priority briefly to allow interrupts, then return.  The priority to be
    131  * used (safepri) is machine-dependent, thus this value is initialized and
    132  * maintained in the machine-dependent layers.  This priority will typically
    133  * be 0, or the lowest priority that is safe for use on the interrupt stack;
    134  * it can be made higher to block network software interrupts after panics.
    135  */
    136 int	safepri;
    137 
    138 /*
    139  * OBSOLETE INTERFACE
    140  *
    141  * General sleep call.  Suspends the current process until a wakeup is
    142  * performed on the specified identifier.  The process will then be made
    143  * runnable with the specified priority.  Sleeps at most timo/hz seconds (0
    144  * means no timeout).  If pri includes PCATCH flag, signals are checked
    145  * before and after sleeping, else signals are not checked.  Returns 0 if
    146  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
    147  * signal needs to be delivered, ERESTART is returned if the current system
    148  * call should be restarted if possible, and EINTR is returned if the system
    149  * call should be interrupted by the signal (return EINTR).
    150  *
    151  * The interlock is held until we are on a sleep queue. The interlock will
    152  * be locked before returning back to the caller unless the PNORELOCK flag
    153  * is specified, in which case the interlock will always be unlocked upon
    154  * return.
    155  */
    156 int
    157 ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
    158 	volatile struct simplelock *interlock)
    159 {
    160 	struct lwp *l = curlwp;
    161 	sleepq_t *sq;
    162 	int error;
    163 
    164 	if (sleepq_dontsleep(l)) {
    165 		(void)sleepq_abort(NULL, 0);
    166 		if ((priority & PNORELOCK) != 0)
    167 			simple_unlock(interlock);
    168 		return 0;
    169 	}
    170 
    171 	sq = sleeptab_lookup(&sleeptab, ident);
    172 	sleepq_enter(sq, l);
    173 	sleepq_enqueue(sq, priority & PRIMASK, ident, wmesg, &sleep_syncobj);
    174 
    175 	if (interlock != NULL) {
    176 		KASSERT(simple_lock_held(interlock));
    177 		simple_unlock(interlock);
    178 	}
    179 
    180 	error = sleepq_block(timo, priority & PCATCH);
    181 
    182 	if (interlock != NULL && (priority & PNORELOCK) == 0)
    183 		simple_lock(interlock);
    184 
    185 	return error;
    186 }
    187 
    188 int
    189 mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
    190 	kmutex_t *mtx)
    191 {
    192 	struct lwp *l = curlwp;
    193 	sleepq_t *sq;
    194 	int error;
    195 
    196 	if (sleepq_dontsleep(l)) {
    197 		(void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
    198 		return 0;
    199 	}
    200 
    201 	sq = sleeptab_lookup(&sleeptab, ident);
    202 	sleepq_enter(sq, l);
    203 	sleepq_enqueue(sq, priority & PRIMASK, ident, wmesg, &sleep_syncobj);
    204 	mutex_exit(mtx);
    205 	error = sleepq_block(timo, priority & PCATCH);
    206 
    207 	if ((priority & PNORELOCK) == 0)
    208 		mutex_enter(mtx);
    209 
    210 	return error;
    211 }
    212 
    213 /*
    214  * General sleep call for situations where a wake-up is not expected.
    215  */
    216 int
    217 kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
    218 {
    219 	struct lwp *l = curlwp;
    220 	sleepq_t *sq;
    221 	int error;
    222 
    223 	if (sleepq_dontsleep(l))
    224 		return sleepq_abort(NULL, 0);
    225 
    226 	if (mtx != NULL)
    227 		mutex_exit(mtx);
    228 	sq = sleeptab_lookup(&sleeptab, l);
    229 	sleepq_enter(sq, l);
    230 	sleepq_enqueue(sq, sched_kpri(l), l, wmesg, &sleep_syncobj);
    231 	error = sleepq_block(timo, intr);
    232 	if (mtx != NULL)
    233 		mutex_enter(mtx);
    234 
    235 	return error;
    236 }
    237 
    238 /*
    239  * OBSOLETE INTERFACE
    240  *
    241  * Make all processes sleeping on the specified identifier runnable.
    242  */
    243 void
    244 wakeup(wchan_t ident)
    245 {
    246 	sleepq_t *sq;
    247 
    248 	if (cold)
    249 		return;
    250 
    251 	sq = sleeptab_lookup(&sleeptab, ident);
    252 	sleepq_wake(sq, ident, (u_int)-1);
    253 }
    254 
    255 /*
    256  * OBSOLETE INTERFACE
    257  *
    258  * Make the highest priority process first in line on the specified
    259  * identifier runnable.
    260  */
    261 void
    262 wakeup_one(wchan_t ident)
    263 {
    264 	sleepq_t *sq;
    265 
    266 	if (cold)
    267 		return;
    268 
    269 	sq = sleeptab_lookup(&sleeptab, ident);
    270 	sleepq_wake(sq, ident, 1);
    271 }
    272 
    273 
    274 /*
    275  * General yield call.  Puts the current process back on its run queue and
    276  * performs a voluntary context switch.  Should only be called when the
    277  * current process explicitly requests it (eg sched_yield(2) in compat code).
    278  */
    279 void
    280 yield(void)
    281 {
    282 	struct lwp *l = curlwp;
    283 
    284 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
    285 	lwp_lock(l);
    286 	KASSERT(lwp_locked(l, &l->l_cpu->ci_schedstate.spc_lwplock));
    287 	KASSERT(l->l_stat == LSONPROC);
    288 	l->l_priority = l->l_usrpri;
    289 	(void)mi_switch(l);
    290 	KERNEL_LOCK(l->l_biglocks, l);
    291 }
    292 
    293 /*
    294  * General preemption call.  Puts the current process back on its run queue
    295  * and performs an involuntary context switch.
    296  */
    297 void
    298 preempt(void)
    299 {
    300 	struct lwp *l = curlwp;
    301 
    302 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
    303 	lwp_lock(l);
    304 	KASSERT(lwp_locked(l, &l->l_cpu->ci_schedstate.spc_lwplock));
    305 	KASSERT(l->l_stat == LSONPROC);
    306 	l->l_priority = l->l_usrpri;
    307 	l->l_nivcsw++;
    308 	(void)mi_switch(l);
    309 	KERNEL_LOCK(l->l_biglocks, l);
    310 }
    311 
    312 /*
    313  * Compute the amount of time during which the current lwp was running.
    314  *
    315  * - update l_rtime unless it's an idle lwp.
    316  * - update spc_runtime for the next lwp.
    317  */
    318 
    319 static inline void
    320 updatertime(struct lwp *l, struct schedstate_percpu *spc)
    321 {
    322 	struct timeval tv;
    323 	long s, u;
    324 
    325 	if ((l->l_flag & LW_IDLE) != 0) {
    326 		microtime(&spc->spc_runtime);
    327 		return;
    328 	}
    329 
    330 	microtime(&tv);
    331 	u = l->l_rtime.tv_usec + (tv.tv_usec - spc->spc_runtime.tv_usec);
    332 	s = l->l_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
    333 	if (u < 0) {
    334 		u += 1000000;
    335 		s--;
    336 	} else if (u >= 1000000) {
    337 		u -= 1000000;
    338 		s++;
    339 	}
    340 	l->l_rtime.tv_usec = u;
    341 	l->l_rtime.tv_sec = s;
    342 
    343 	spc->spc_runtime = tv;
    344 }
    345 
    346 /*
    347  * The machine independent parts of context switch.
    348  *
    349  * Returns 1 if another LWP was actually run.
    350  */
    351 int
    352 mi_switch(struct lwp *l)
    353 {
    354 	struct schedstate_percpu *spc;
    355 	struct lwp *newl;
    356 	int retval, oldspl;
    357 
    358 	KASSERT(lwp_locked(l, NULL));
    359 	LOCKDEBUG_BARRIER(l->l_mutex, 1);
    360 
    361 #ifdef KSTACK_CHECK_MAGIC
    362 	kstack_check_magic(l);
    363 #endif
    364 
    365 	/*
    366 	 * It's safe to read the per CPU schedstate unlocked here, as all we
    367 	 * are after is the run time and that's guarenteed to have been last
    368 	 * updated by this CPU.
    369 	 */
    370 	KDASSERT(l->l_cpu == curcpu());
    371 
    372 	/* Count time spent in current system call */
    373 	SYSCALL_TIME_SLEEP(l);
    374 
    375 	/*
    376 	 * XXXSMP If we are using h/w performance counters, save context.
    377 	 */
    378 #if PERFCTRS
    379 	if (PMC_ENABLED(l->l_proc)) {
    380 		pmc_save_context(l->l_proc);
    381 	}
    382 #endif
    383 	/*
    384 	 * Process is about to yield the CPU; clear the appropriate
    385 	 * scheduling flags.
    386 	 */
    387 	spc = &l->l_cpu->ci_schedstate;
    388 	spc->spc_flags &= ~SPCF_SWITCHCLEAR;
    389 	updatertime(l, spc);
    390 
    391 	/*
    392 	 * If on the CPU and we have gotten this far, then we must yield.
    393 	 */
    394 	mutex_spin_enter(spc->spc_mutex);
    395 	KASSERT(l->l_stat != LSRUN);
    396 	if (l->l_stat == LSONPROC) {
    397 		KASSERT(lwp_locked(l, &spc->spc_lwplock));
    398 		if ((l->l_flag & LW_IDLE) == 0) {
    399 			l->l_stat = LSRUN;
    400 			lwp_setlock(l, spc->spc_mutex);
    401 			sched_enqueue(l, true);
    402 		} else
    403 			l->l_stat = LSIDL;
    404 	}
    405 
    406 	/*
    407 	 * Let sched_nextlwp() select the LWP to run the CPU next.
    408 	 * If no LWP is runnable, switch to the idle LWP.
    409 	 */
    410 	newl = sched_nextlwp();
    411 	if (newl) {
    412 		sched_dequeue(newl);
    413 		KASSERT(lwp_locked(newl, spc->spc_mutex));
    414 		newl->l_stat = LSONPROC;
    415 		newl->l_cpu = l->l_cpu;
    416 		newl->l_flag |= LW_RUNNING;
    417 		lwp_setlock(newl, &spc->spc_lwplock);
    418 	} else {
    419 		newl = l->l_cpu->ci_data.cpu_idlelwp;
    420 		newl->l_stat = LSONPROC;
    421 		newl->l_flag |= LW_RUNNING;
    422 	}
    423 	spc->spc_curpriority = newl->l_usrpri;
    424 	cpu_did_resched();
    425 
    426 	if (l != newl) {
    427 		struct lwp *prevlwp;
    428 
    429 		/*
    430 		 * If the old LWP has been moved to a run queue above,
    431 		 * drop the general purpose LWP lock: it's now locked
    432 		 * by the scheduler lock.
    433 		 *
    434 		 * Otherwise, drop the scheduler lock.  We're done with
    435 		 * the run queues for now.
    436 		 */
    437 		if (l->l_mutex == spc->spc_mutex) {
    438 			mutex_spin_exit(&spc->spc_lwplock);
    439 		} else {
    440 			mutex_spin_exit(spc->spc_mutex);
    441 		}
    442 
    443 		/* Unlocked, but for statistics only. */
    444 		uvmexp.swtch++;
    445 
    446 		/* Save old VM context. */
    447 		pmap_deactivate(l);
    448 
    449 		/* Switch to the new LWP.. */
    450 		l->l_ncsw++;
    451 		l->l_flag &= ~LW_RUNNING;
    452 		oldspl = MUTEX_SPIN_OLDSPL(l->l_cpu);
    453 		prevlwp = cpu_switchto(l, newl);
    454 
    455 		/*
    456 		 * .. we have switched away and are now back so we must
    457 		 * be the new curlwp.  prevlwp is who we replaced.
    458 		 */
    459 		curlwp = l;
    460 		if (prevlwp != NULL) {
    461 			curcpu()->ci_mtx_oldspl = oldspl;
    462 			lwp_unlock(prevlwp);
    463 		} else {
    464 			splx(oldspl);
    465 		}
    466 
    467 		/* Restore VM context. */
    468 		pmap_activate(l);
    469 		retval = 1;
    470 	} else {
    471 		/* Nothing to do - just unlock and return. */
    472 		mutex_spin_exit(spc->spc_mutex);
    473 		lwp_unlock(l);
    474 		retval = 0;
    475 	}
    476 
    477 	KASSERT(l == curlwp);
    478 	KASSERT(l->l_stat == LSONPROC);
    479 
    480 	/*
    481 	 * XXXSMP If we are using h/w performance counters, restore context.
    482 	 */
    483 #if PERFCTRS
    484 	if (PMC_ENABLED(l->l_proc)) {
    485 		pmc_restore_context(l->l_proc);
    486 	}
    487 #endif
    488 
    489 	/*
    490 	 * We're running again; record our new start time.  We might
    491 	 * be running on a new CPU now, so don't use the cached
    492 	 * schedstate_percpu pointer.
    493 	 */
    494 	SYSCALL_TIME_WAKEUP(l);
    495 	KDASSERT(l->l_cpu == curcpu());
    496 	LOCKDEBUG_BARRIER(NULL, 1);
    497 
    498 	return retval;
    499 }
    500 
    501 /*
    502  * Change process state to be runnable, placing it on the run queue if it is
    503  * in memory, and awakening the swapper if it isn't in memory.
    504  *
    505  * Call with the process and LWP locked.  Will return with the LWP unlocked.
    506  */
    507 void
    508 setrunnable(struct lwp *l)
    509 {
    510 	struct proc *p = l->l_proc;
    511 	sigset_t *ss;
    512 
    513 	KASSERT((l->l_flag & LW_IDLE) == 0);
    514 	KASSERT(mutex_owned(&p->p_smutex));
    515 	KASSERT(lwp_locked(l, NULL));
    516 
    517 	switch (l->l_stat) {
    518 	case LSSTOP:
    519 		/*
    520 		 * If we're being traced (possibly because someone attached us
    521 		 * while we were stopped), check for a signal from the debugger.
    522 		 */
    523 		if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
    524 			if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
    525 				ss = &l->l_sigpend.sp_set;
    526 			else
    527 				ss = &p->p_sigpend.sp_set;
    528 			sigaddset(ss, p->p_xstat);
    529 			signotify(l);
    530 		}
    531 		p->p_nrlwps++;
    532 		break;
    533 	case LSSUSPENDED:
    534 		l->l_flag &= ~LW_WSUSPEND;
    535 		p->p_nrlwps++;
    536 		break;
    537 	case LSSLEEP:
    538 		KASSERT(l->l_wchan != NULL);
    539 		break;
    540 	default:
    541 		panic("setrunnable: lwp %p state was %d", l, l->l_stat);
    542 	}
    543 
    544 	/*
    545 	 * If the LWP was sleeping interruptably, then it's OK to start it
    546 	 * again.  If not, mark it as still sleeping.
    547 	 */
    548 	if (l->l_wchan != NULL) {
    549 		l->l_stat = LSSLEEP;
    550 		/* lwp_unsleep() will release the lock. */
    551 		lwp_unsleep(l);
    552 		return;
    553 	}
    554 
    555 	KASSERT(lwp_locked(l, &l->l_cpu->ci_schedstate.spc_lwplock));
    556 
    557 	/*
    558 	 * If the LWP is still on the CPU, mark it as LSONPROC.  It may be
    559 	 * about to call mi_switch(), in which case it will yield.
    560 	 */
    561 	if ((l->l_flag & LW_RUNNING) != 0) {
    562 		l->l_stat = LSONPROC;
    563 		l->l_slptime = 0;
    564 		lwp_unlock(l);
    565 		return;
    566 	}
    567 
    568 	/*
    569 	 * Set the LWP runnable.  If it's swapped out, we need to wake the swapper
    570 	 * to bring it back in.  Otherwise, enter it into a run queue.
    571 	 */
    572 	spc_lock(l->l_cpu);
    573 	lwp_unlock_to(l, l->l_cpu->ci_schedstate.spc_mutex);
    574 	sched_setrunnable(l);
    575 	l->l_stat = LSRUN;
    576 	l->l_slptime = 0;
    577 
    578 	if (l->l_flag & LW_INMEM) {
    579 		sched_enqueue(l, false);
    580 		resched_cpu(l);
    581 		lwp_unlock(l);
    582 	} else {
    583 		lwp_unlock(l);
    584 		uvm_kick_scheduler();
    585 	}
    586 }
    587 
    588 /*
    589  * suspendsched:
    590  *
    591  *	Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
    592  */
    593 void
    594 suspendsched(void)
    595 {
    596 #ifdef MULTIPROCESSOR
    597 	CPU_INFO_ITERATOR cii;
    598 	struct cpu_info *ci;
    599 #endif
    600 	struct lwp *l;
    601 	struct proc *p;
    602 
    603 	/*
    604 	 * We do this by process in order not to violate the locking rules.
    605 	 */
    606 	mutex_enter(&proclist_mutex);
    607 	PROCLIST_FOREACH(p, &allproc) {
    608 		mutex_enter(&p->p_smutex);
    609 
    610 		if ((p->p_flag & PK_SYSTEM) != 0) {
    611 			mutex_exit(&p->p_smutex);
    612 			continue;
    613 		}
    614 
    615 		p->p_stat = SSTOP;
    616 
    617 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    618 			if (l == curlwp)
    619 				continue;
    620 
    621 			lwp_lock(l);
    622 
    623 			/*
    624 			 * Set L_WREBOOT so that the LWP will suspend itself
    625 			 * when it tries to return to user mode.  We want to
    626 			 * try and get to get as many LWPs as possible to
    627 			 * the user / kernel boundary, so that they will
    628 			 * release any locks that they hold.
    629 			 */
    630 			l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
    631 
    632 			if (l->l_stat == LSSLEEP &&
    633 			    (l->l_flag & LW_SINTR) != 0) {
    634 				/* setrunnable() will release the lock. */
    635 				setrunnable(l);
    636 				continue;
    637 			}
    638 
    639 			lwp_unlock(l);
    640 		}
    641 
    642 		mutex_exit(&p->p_smutex);
    643 	}
    644 	mutex_exit(&proclist_mutex);
    645 
    646 	/*
    647 	 * Kick all CPUs to make them preempt any LWPs running in user mode.
    648 	 * They'll trap into the kernel and suspend themselves in userret().
    649 	 */
    650 #ifdef MULTIPROCESSOR
    651 	for (CPU_INFO_FOREACH(cii, ci))
    652 		cpu_need_resched(ci, 0);
    653 #else
    654 	cpu_need_resched(curcpu(), 0);
    655 #endif
    656 }
    657 
    658 /*
    659  * sched_kpri:
    660  *
    661  *	Scale a priority level to a kernel priority level, usually
    662  *	for an LWP that is about to sleep.
    663  */
    664 pri_t
    665 sched_kpri(struct lwp *l)
    666 {
    667 	/*
    668 	 * Scale user priorities (127 -> 50) up to kernel priorities
    669 	 * in the range (49 -> 8).  Reserve the top 8 kernel priorities
    670 	 * for high priority kthreads.  Kernel priorities passed in
    671 	 * are left "as is".  XXX This is somewhat arbitrary.
    672 	 */
    673 	static const uint8_t kpri_tab[] = {
    674 		 0,   1,   2,   3,   4,   5,   6,   7,
    675 		 8,   9,  10,  11,  12,  13,  14,  15,
    676 		16,  17,  18,  19,  20,  21,  22,  23,
    677 		24,  25,  26,  27,  28,  29,  30,  31,
    678 		32,  33,  34,  35,  36,  37,  38,  39,
    679 		40,  41,  42,  43,  44,  45,  46,  47,
    680 		48,  49,   8,   8,   9,   9,  10,  10,
    681 		11,  11,  12,  12,  13,  14,  14,  15,
    682 		15,  16,  16,  17,  17,  18,  18,  19,
    683 		20,  20,  21,  21,  22,  22,  23,  23,
    684 		24,  24,  25,  26,  26,  27,  27,  28,
    685 		28,  29,  29,  30,  30,  31,  32,  32,
    686 		33,  33,  34,  34,  35,  35,  36,  36,
    687 		37,  38,  38,  39,  39,  40,  40,  41,
    688 		41,  42,  42,  43,  44,  44,  45,  45,
    689 		46,  46,  47,  47,  48,  48,  49,  49,
    690 	};
    691 
    692 	return (pri_t)kpri_tab[l->l_usrpri];
    693 }
    694 
    695 /*
    696  * sched_unsleep:
    697  *
    698  *	The is called when the LWP has not been awoken normally but instead
    699  *	interrupted: for example, if the sleep timed out.  Because of this,
    700  *	it's not a valid action for running or idle LWPs.
    701  */
    702 static void
    703 sched_unsleep(struct lwp *l)
    704 {
    705 
    706 	lwp_unlock(l);
    707 	panic("sched_unsleep");
    708 }
    709 
    710 inline void
    711 resched_cpu(struct lwp *l)
    712 {
    713 	struct cpu_info *ci;
    714 	const pri_t pri = lwp_eprio(l);
    715 
    716 	/*
    717 	 * XXXSMP
    718 	 * Since l->l_cpu persists across a context switch,
    719 	 * this gives us *very weak* processor affinity, in
    720 	 * that we notify the CPU on which the process last
    721 	 * ran that it should try to switch.
    722 	 *
    723 	 * This does not guarantee that the process will run on
    724 	 * that processor next, because another processor might
    725 	 * grab it the next time it performs a context switch.
    726 	 *
    727 	 * This also does not handle the case where its last
    728 	 * CPU is running a higher-priority process, but every
    729 	 * other CPU is running a lower-priority process.  There
    730 	 * are ways to handle this situation, but they're not
    731 	 * currently very pretty, and we also need to weigh the
    732 	 * cost of moving a process from one CPU to another.
    733 	 */
    734 	ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
    735 	if (pri < ci->ci_schedstate.spc_curpriority)
    736 		cpu_need_resched(ci, 0);
    737 }
    738 
    739 static void
    740 sched_changepri(struct lwp *l, pri_t pri)
    741 {
    742 
    743 	KASSERT(lwp_locked(l, NULL));
    744 
    745 	l->l_usrpri = pri;
    746 	if (l->l_priority < PUSER)
    747 		return;
    748 
    749 	if (l->l_stat != LSRUN || (l->l_flag & LW_INMEM) == 0) {
    750 		l->l_priority = pri;
    751 		return;
    752 	}
    753 
    754 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    755 
    756 	sched_dequeue(l);
    757 	l->l_priority = pri;
    758 	sched_enqueue(l, false);
    759 	resched_cpu(l);
    760 }
    761 
    762 static void
    763 sched_lendpri(struct lwp *l, pri_t pri)
    764 {
    765 
    766 	KASSERT(lwp_locked(l, NULL));
    767 
    768 	if (l->l_stat != LSRUN || (l->l_flag & LW_INMEM) == 0) {
    769 		l->l_inheritedprio = pri;
    770 		return;
    771 	}
    772 
    773 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    774 
    775 	sched_dequeue(l);
    776 	l->l_inheritedprio = pri;
    777 	sched_enqueue(l, false);
    778 	resched_cpu(l);
    779 }
    780 
    781 struct lwp *
    782 syncobj_noowner(wchan_t wchan)
    783 {
    784 
    785 	return NULL;
    786 }
    787 
    788 
    789 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
    790 fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
    791 
    792 /*
    793  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
    794  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
    795  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
    796  *
    797  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
    798  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
    799  *
    800  * If you dont want to bother with the faster/more-accurate formula, you
    801  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
    802  * (more general) method of calculating the %age of CPU used by a process.
    803  */
    804 #define	CCPU_SHIFT	(FSHIFT + 1)
    805 
    806 /*
    807  * sched_pstats:
    808  *
    809  * Update process statistics and check CPU resource allocation.
    810  * Call scheduler-specific hook to eventually adjust process/LWP
    811  * priorities.
    812  *
    813  *	XXXSMP This needs to be reorganised in order to reduce the locking
    814  *	burden.
    815  */
    816 /* ARGSUSED */
    817 void
    818 sched_pstats(void *arg)
    819 {
    820 	struct rlimit *rlim;
    821 	struct lwp *l;
    822 	struct proc *p;
    823 	int minslp, sig, clkhz;
    824 	long runtm;
    825 
    826 	sched_pstats_ticks++;
    827 
    828 	mutex_enter(&proclist_mutex);
    829 	PROCLIST_FOREACH(p, &allproc) {
    830 		/*
    831 		 * Increment time in/out of memory and sleep time (if
    832 		 * sleeping).  We ignore overflow; with 16-bit int's
    833 		 * (remember them?) overflow takes 45 days.
    834 		 */
    835 		minslp = 2;
    836 		mutex_enter(&p->p_smutex);
    837 		mutex_spin_enter(&p->p_stmutex);
    838 		runtm = p->p_rtime.tv_sec;
    839 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    840 			if ((l->l_flag & LW_IDLE) != 0)
    841 				continue;
    842 			lwp_lock(l);
    843 			runtm += l->l_rtime.tv_sec;
    844 			l->l_swtime++;
    845 			if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
    846 			    l->l_stat == LSSUSPENDED) {
    847 				l->l_slptime++;
    848 				minslp = min(minslp, l->l_slptime);
    849 			} else
    850 				minslp = 0;
    851 			lwp_unlock(l);
    852 
    853 			/*
    854 			 * p_pctcpu is only for ps.
    855 			 */
    856 			l->l_pctcpu = (l->l_pctcpu * ccpu) >> FSHIFT;
    857 			if (l->l_slptime < 1) {
    858 				clkhz = stathz != 0 ? stathz : hz;
    859 #if	(FSHIFT >= CCPU_SHIFT)
    860 				l->l_pctcpu += (clkhz == 100) ?
    861 				    ((fixpt_t)l->l_cpticks) <<
    862 				        (FSHIFT - CCPU_SHIFT) :
    863 				    100 * (((fixpt_t) p->p_cpticks)
    864 				        << (FSHIFT - CCPU_SHIFT)) / clkhz;
    865 #else
    866 				l->l_pctcpu += ((FSCALE - ccpu) *
    867 				    (l->l_cpticks * FSCALE / clkhz)) >> FSHIFT;
    868 #endif
    869 				l->l_cpticks = 0;
    870 			}
    871 		}
    872 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
    873 		sched_pstats_hook(p, minslp);
    874 		mutex_spin_exit(&p->p_stmutex);
    875 
    876 		/*
    877 		 * Check if the process exceeds its CPU resource allocation.
    878 		 * If over max, kill it.
    879 		 */
    880 		rlim = &p->p_rlimit[RLIMIT_CPU];
    881 		sig = 0;
    882 		if (runtm >= rlim->rlim_cur) {
    883 			if (runtm >= rlim->rlim_max)
    884 				sig = SIGKILL;
    885 			else {
    886 				sig = SIGXCPU;
    887 				if (rlim->rlim_cur < rlim->rlim_max)
    888 					rlim->rlim_cur += 5;
    889 			}
    890 		}
    891 		mutex_exit(&p->p_smutex);
    892 		if (sig) {
    893 			psignal(p, sig);
    894 		}
    895 	}
    896 	mutex_exit(&proclist_mutex);
    897 	uvm_meter();
    898 	wakeup(&lbolt);
    899 	callout_schedule(&sched_pstats_ch, hz);
    900 }
    901