Home | History | Annotate | Line # | Download | only in kern
kern_synch.c revision 1.232
      1 /*	$NetBSD: kern_synch.c,v 1.232 2008/04/28 15:38:03 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
     10  * Daniel Sieger.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by the NetBSD
     23  *	Foundation, Inc. and its contributors.
     24  * 4. Neither the name of The NetBSD Foundation nor the names of its
     25  *    contributors may be used to endorse or promote products derived
     26  *    from this software without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     29  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     30  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     31  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     32  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     33  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     34  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     35  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     36  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     37  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     38  * POSSIBILITY OF SUCH DAMAGE.
     39  */
     40 
     41 /*
     42  * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org>
     43  * All rights reserved.
     44  *
     45  * Redistribution and use in source and binary forms, with or without
     46  * modification, are permitted provided that the following conditions
     47  * are met:
     48  * 1. Redistributions of source code must retain the above copyright
     49  *    notice, this list of conditions and the following disclaimer.
     50  * 2. Redistributions in binary form must reproduce the above copyright
     51  *    notice, this list of conditions and the following disclaimer in the
     52  *    documentation and/or other materials provided with the distribution.
     53  *
     54  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     64  * SUCH DAMAGE.
     65  */
     66 
     67 /*-
     68  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     69  *	The Regents of the University of California.  All rights reserved.
     70  * (c) UNIX System Laboratories, Inc.
     71  * All or some portions of this file are derived from material licensed
     72  * to the University of California by American Telephone and Telegraph
     73  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     74  * the permission of UNIX System Laboratories, Inc.
     75  *
     76  * Redistribution and use in source and binary forms, with or without
     77  * modification, are permitted provided that the following conditions
     78  * are met:
     79  * 1. Redistributions of source code must retain the above copyright
     80  *    notice, this list of conditions and the following disclaimer.
     81  * 2. Redistributions in binary form must reproduce the above copyright
     82  *    notice, this list of conditions and the following disclaimer in the
     83  *    documentation and/or other materials provided with the distribution.
     84  * 3. Neither the name of the University nor the names of its contributors
     85  *    may be used to endorse or promote products derived from this software
     86  *    without specific prior written permission.
     87  *
     88  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     89  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     90  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     91  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     92  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     93  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     94  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     95  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     96  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     97  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     98  * SUCH DAMAGE.
     99  *
    100  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
    101  */
    102 
    103 #include <sys/cdefs.h>
    104 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.232 2008/04/28 15:38:03 ad Exp $");
    105 
    106 #include "opt_kstack.h"
    107 #include "opt_lockdebug.h"
    108 #include "opt_multiprocessor.h"
    109 #include "opt_perfctrs.h"
    110 #include "opt_preemption.h"
    111 
    112 #define	__MUTEX_PRIVATE
    113 
    114 #include <sys/param.h>
    115 #include <sys/systm.h>
    116 #include <sys/proc.h>
    117 #include <sys/kernel.h>
    118 #if defined(PERFCTRS)
    119 #include <sys/pmc.h>
    120 #endif
    121 #include <sys/cpu.h>
    122 #include <sys/resourcevar.h>
    123 #include <sys/sched.h>
    124 #include <sys/syscall_stats.h>
    125 #include <sys/sleepq.h>
    126 #include <sys/lockdebug.h>
    127 #include <sys/evcnt.h>
    128 #include <sys/intr.h>
    129 #include <sys/lwpctl.h>
    130 #include <sys/atomic.h>
    131 #include <sys/simplelock.h>
    132 #include <sys/bitops.h>
    133 #include <sys/kmem.h>
    134 #include <sys/sysctl.h>
    135 #include <sys/idle.h>
    136 
    137 #include <uvm/uvm_extern.h>
    138 
    139 #include <dev/lockstat.h>
    140 
    141 /*
    142  * Priority related defintions.
    143  */
    144 #define	PRI_TS_COUNT	(NPRI_USER)
    145 #define	PRI_RT_COUNT	(PRI_COUNT - PRI_TS_COUNT)
    146 #define	PRI_HTS_RANGE	(PRI_TS_COUNT / 10)
    147 
    148 #define	PRI_HIGHEST_TS	(MAXPRI_USER)
    149 
    150 /*
    151  * Bits per map.
    152  */
    153 #define	BITMAP_BITS	(32)
    154 #define	BITMAP_SHIFT	(5)
    155 #define	BITMAP_MSB	(0x80000000U)
    156 #define	BITMAP_MASK	(BITMAP_BITS - 1)
    157 
    158 /*
    159  * Structures, runqueue.
    160  */
    161 
    162 typedef struct {
    163 	TAILQ_HEAD(, lwp) q_head;
    164 } queue_t;
    165 
    166 typedef struct {
    167 	/* Lock and bitmap */
    168 	uint32_t	r_bitmap[PRI_COUNT >> BITMAP_SHIFT];
    169 	/* Counters */
    170 	u_int		r_count;	/* Count of the threads */
    171 	u_int		r_avgcount;	/* Average count of threads */
    172 	u_int		r_mcount;	/* Count of migratable threads */
    173 	/* Runqueues */
    174 	queue_t		r_rt_queue[PRI_RT_COUNT];
    175 	queue_t		r_ts_queue[PRI_TS_COUNT];
    176 } runqueue_t;
    177 
    178 static u_int	sched_unsleep(struct lwp *, bool);
    179 static void	sched_changepri(struct lwp *, pri_t);
    180 static void	sched_lendpri(struct lwp *, pri_t);
    181 static void	*sched_getrq(runqueue_t *, const pri_t);
    182 #ifdef MULTIPROCESSOR
    183 static lwp_t	*sched_catchlwp(void);
    184 static void	sched_balance(void *);
    185 #endif
    186 
    187 syncobj_t sleep_syncobj = {
    188 	SOBJ_SLEEPQ_SORTED,
    189 	sleepq_unsleep,
    190 	sleepq_changepri,
    191 	sleepq_lendpri,
    192 	syncobj_noowner,
    193 };
    194 
    195 syncobj_t sched_syncobj = {
    196 	SOBJ_SLEEPQ_SORTED,
    197 	sched_unsleep,
    198 	sched_changepri,
    199 	sched_lendpri,
    200 	syncobj_noowner,
    201 };
    202 
    203 const int 	schedppq = 1;
    204 callout_t 	sched_pstats_ch;
    205 unsigned	sched_pstats_ticks;
    206 kcondvar_t	lbolt;			/* once a second sleep address */
    207 
    208 /*
    209  * Kernel preemption.
    210  */
    211 #ifdef PREEMPTION
    212 #if 0
    213 int		sched_kpreempt_pri = PRI_USER_RT;
    214 #else
    215 /* XXX disable for now until any bugs are worked out. */
    216 int		sched_kpreempt_pri = 1000;
    217 #endif
    218 
    219 static struct evcnt kpreempt_ev_crit;
    220 static struct evcnt kpreempt_ev_klock;
    221 static struct evcnt kpreempt_ev_ipl;
    222 static struct evcnt kpreempt_ev_immed;
    223 #else
    224 int		sched_kpreempt_pri = INT_MAX;
    225 #endif
    226 int		sched_upreempt_pri = PRI_KERNEL;
    227 
    228 /*
    229  * Migration and balancing.
    230  */
    231 static u_int	cacheht_time;		/* Cache hotness time */
    232 static u_int	min_catch;		/* Minimal LWP count for catching */
    233 static u_int	balance_period;		/* Balance period */
    234 static struct cpu_info *worker_ci;	/* Victim CPU */
    235 #ifdef MULTIPROCESSOR
    236 static struct callout balance_ch;	/* Callout of balancer */
    237 #endif
    238 
    239 /*
    240  * During autoconfiguration or after a panic, a sleep will simply lower the
    241  * priority briefly to allow interrupts, then return.  The priority to be
    242  * used (safepri) is machine-dependent, thus this value is initialized and
    243  * maintained in the machine-dependent layers.  This priority will typically
    244  * be 0, or the lowest priority that is safe for use on the interrupt stack;
    245  * it can be made higher to block network software interrupts after panics.
    246  */
    247 int	safepri;
    248 
    249 /*
    250  * OBSOLETE INTERFACE
    251  *
    252  * General sleep call.  Suspends the current process until a wakeup is
    253  * performed on the specified identifier.  The process will then be made
    254  * runnable with the specified priority.  Sleeps at most timo/hz seconds (0
    255  * means no timeout).  If pri includes PCATCH flag, signals are checked
    256  * before and after sleeping, else signals are not checked.  Returns 0 if
    257  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
    258  * signal needs to be delivered, ERESTART is returned if the current system
    259  * call should be restarted if possible, and EINTR is returned if the system
    260  * call should be interrupted by the signal (return EINTR).
    261  *
    262  * The interlock is held until we are on a sleep queue. The interlock will
    263  * be locked before returning back to the caller unless the PNORELOCK flag
    264  * is specified, in which case the interlock will always be unlocked upon
    265  * return.
    266  */
    267 int
    268 ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
    269 	volatile struct simplelock *interlock)
    270 {
    271 	struct lwp *l = curlwp;
    272 	sleepq_t *sq;
    273 	int error;
    274 
    275 	KASSERT((l->l_pflag & LP_INTR) == 0);
    276 
    277 	if (sleepq_dontsleep(l)) {
    278 		(void)sleepq_abort(NULL, 0);
    279 		if ((priority & PNORELOCK) != 0)
    280 			simple_unlock(interlock);
    281 		return 0;
    282 	}
    283 
    284 	l->l_kpriority = true;
    285 	sq = sleeptab_lookup(&sleeptab, ident);
    286 	sleepq_enter(sq, l);
    287 	sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
    288 
    289 	if (interlock != NULL) {
    290 		KASSERT(simple_lock_held(interlock));
    291 		simple_unlock(interlock);
    292 	}
    293 
    294 	error = sleepq_block(timo, priority & PCATCH);
    295 
    296 	if (interlock != NULL && (priority & PNORELOCK) == 0)
    297 		simple_lock(interlock);
    298 
    299 	return error;
    300 }
    301 
    302 int
    303 mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
    304 	kmutex_t *mtx)
    305 {
    306 	struct lwp *l = curlwp;
    307 	sleepq_t *sq;
    308 	int error;
    309 
    310 	KASSERT((l->l_pflag & LP_INTR) == 0);
    311 
    312 	if (sleepq_dontsleep(l)) {
    313 		(void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
    314 		return 0;
    315 	}
    316 
    317 	l->l_kpriority = true;
    318 	sq = sleeptab_lookup(&sleeptab, ident);
    319 	sleepq_enter(sq, l);
    320 	sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
    321 	mutex_exit(mtx);
    322 	error = sleepq_block(timo, priority & PCATCH);
    323 
    324 	if ((priority & PNORELOCK) == 0)
    325 		mutex_enter(mtx);
    326 
    327 	return error;
    328 }
    329 
    330 /*
    331  * General sleep call for situations where a wake-up is not expected.
    332  */
    333 int
    334 kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
    335 {
    336 	struct lwp *l = curlwp;
    337 	sleepq_t *sq;
    338 	int error;
    339 
    340 	if (sleepq_dontsleep(l))
    341 		return sleepq_abort(NULL, 0);
    342 
    343 	if (mtx != NULL)
    344 		mutex_exit(mtx);
    345 	l->l_kpriority = true;
    346 	sq = sleeptab_lookup(&sleeptab, l);
    347 	sleepq_enter(sq, l);
    348 	sleepq_enqueue(sq, l, wmesg, &sleep_syncobj);
    349 	error = sleepq_block(timo, intr);
    350 	if (mtx != NULL)
    351 		mutex_enter(mtx);
    352 
    353 	return error;
    354 }
    355 
    356 /*
    357  * OBSOLETE INTERFACE
    358  *
    359  * Make all processes sleeping on the specified identifier runnable.
    360  */
    361 void
    362 wakeup(wchan_t ident)
    363 {
    364 	sleepq_t *sq;
    365 
    366 	if (cold)
    367 		return;
    368 
    369 	sq = sleeptab_lookup(&sleeptab, ident);
    370 	sleepq_wake(sq, ident, (u_int)-1);
    371 }
    372 
    373 /*
    374  * OBSOLETE INTERFACE
    375  *
    376  * Make the highest priority process first in line on the specified
    377  * identifier runnable.
    378  */
    379 void
    380 wakeup_one(wchan_t ident)
    381 {
    382 	sleepq_t *sq;
    383 
    384 	if (cold)
    385 		return;
    386 
    387 	sq = sleeptab_lookup(&sleeptab, ident);
    388 	sleepq_wake(sq, ident, 1);
    389 }
    390 
    391 
    392 /*
    393  * General yield call.  Puts the current process back on its run queue and
    394  * performs a voluntary context switch.  Should only be called when the
    395  * current process explicitly requests it (eg sched_yield(2)).
    396  */
    397 void
    398 yield(void)
    399 {
    400 	struct lwp *l = curlwp;
    401 
    402 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
    403 	lwp_lock(l);
    404 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
    405 	KASSERT(l->l_stat == LSONPROC);
    406 	l->l_kpriority = false;
    407 	(void)mi_switch(l);
    408 	KERNEL_LOCK(l->l_biglocks, l);
    409 }
    410 
    411 /*
    412  * General preemption call.  Puts the current process back on its run queue
    413  * and performs an involuntary context switch.
    414  */
    415 void
    416 preempt(void)
    417 {
    418 	struct lwp *l = curlwp;
    419 
    420 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
    421 	lwp_lock(l);
    422 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
    423 	KASSERT(l->l_stat == LSONPROC);
    424 	l->l_kpriority = false;
    425 	l->l_nivcsw++;
    426 	(void)mi_switch(l);
    427 	KERNEL_LOCK(l->l_biglocks, l);
    428 }
    429 
    430 #ifdef PREEMPTION
    431 /* XXX Yuck, for lockstat. */
    432 static char	in_critical_section;
    433 static char	kernel_lock_held;
    434 static char	spl_raised;
    435 static char	is_softint;
    436 
    437 /*
    438  * Handle a request made by another agent to preempt the current LWP
    439  * in-kernel.  Usually called when l_dopreempt may be non-zero.
    440  */
    441 bool
    442 kpreempt(uintptr_t where)
    443 {
    444 	uintptr_t failed;
    445 	lwp_t *l;
    446 	int s, dop;
    447 
    448 	l = curlwp;
    449 	failed = 0;
    450 	while ((dop = l->l_dopreempt) != 0) {
    451 		if (l->l_stat != LSONPROC) {
    452 			/*
    453 			 * About to block (or die), let it happen.
    454 			 * Doesn't really count as "preemption has
    455 			 * been blocked", since we're going to
    456 			 * context switch.
    457 			 */
    458 			l->l_dopreempt = 0;
    459 			return true;
    460 		}
    461 		if (__predict_false((l->l_flag & LW_IDLE) != 0)) {
    462 			/* Can't preempt idle loop, don't count as failure. */
    463 		    	l->l_dopreempt = 0;
    464 		    	return true;
    465 		}
    466 		if (__predict_false(l->l_nopreempt != 0)) {
    467 			/* LWP holds preemption disabled, explicitly. */
    468 			if ((dop & DOPREEMPT_COUNTED) == 0) {
    469 				atomic_inc_64(&kpreempt_ev_crit.ev_count);
    470 			}
    471 			failed = (uintptr_t)&in_critical_section;
    472 			break;
    473 		}
    474 		if (__predict_false((l->l_pflag & LP_INTR) != 0)) {
    475 		    	/* Can't preempt soft interrupts yet. */
    476 		    	l->l_dopreempt = 0;
    477 		    	failed = (uintptr_t)&is_softint;
    478 		    	break;
    479 		}
    480 		s = splsched();
    481 		if (__predict_false(l->l_blcnt != 0 ||
    482 		    curcpu()->ci_biglock_wanted != NULL)) {
    483 			/* Hold or want kernel_lock, code is not MT safe. */
    484 			splx(s);
    485 			if ((dop & DOPREEMPT_COUNTED) == 0) {
    486 				atomic_inc_64(&kpreempt_ev_klock.ev_count);
    487 			}
    488 			failed = (uintptr_t)&kernel_lock_held;
    489 			break;
    490 		}
    491 		if (__predict_false(!cpu_kpreempt_enter(where, s))) {
    492 			/*
    493 			 * It may be that the IPL is too high.
    494 			 * kpreempt_enter() can schedule an
    495 			 * interrupt to retry later.
    496 			 */
    497 			splx(s);
    498 			if ((dop & DOPREEMPT_COUNTED) == 0) {
    499 				atomic_inc_64(&kpreempt_ev_ipl.ev_count);
    500 			}
    501 			failed = (uintptr_t)&spl_raised;
    502 			break;
    503 		}
    504 		/* Do it! */
    505 		if (__predict_true((dop & DOPREEMPT_COUNTED) == 0)) {
    506 			atomic_inc_64(&kpreempt_ev_immed.ev_count);
    507 		}
    508 		lwp_lock(l);
    509 		mi_switch(l);
    510 		l->l_nopreempt++;
    511 		splx(s);
    512 
    513 		/* Take care of any MD cleanup. */
    514 		cpu_kpreempt_exit(where);
    515 		l->l_nopreempt--;
    516 	}
    517 
    518 	/* Record preemption failure for reporting via lockstat. */
    519 	if (__predict_false(failed)) {
    520 		atomic_or_uint(&l->l_dopreempt, DOPREEMPT_COUNTED);
    521 		int lsflag = 0;
    522 		LOCKSTAT_ENTER(lsflag);
    523 		/* Might recurse, make it atomic. */
    524 		if (__predict_false(lsflag)) {
    525 			if (where == 0) {
    526 				where = (uintptr_t)__builtin_return_address(0);
    527 			}
    528 			if (atomic_cas_ptr_ni((void *)&l->l_pfailaddr,
    529 			    NULL, (void *)where) == NULL) {
    530 				LOCKSTAT_START_TIMER(lsflag, l->l_pfailtime);
    531 				l->l_pfaillock = failed;
    532 			}
    533 		}
    534 		LOCKSTAT_EXIT(lsflag);
    535 	}
    536 
    537 	return failed;
    538 }
    539 
    540 /*
    541  * Return true if preemption is explicitly disabled.
    542  */
    543 bool
    544 kpreempt_disabled(void)
    545 {
    546 	lwp_t *l;
    547 
    548 	l = curlwp;
    549 
    550 	return l->l_nopreempt != 0 || l->l_stat == LSZOMB ||
    551 	    (l->l_flag & LW_IDLE) != 0 || cpu_kpreempt_disabled();
    552 }
    553 #else
    554 bool
    555 kpreempt(uintptr_t where)
    556 {
    557 
    558 	panic("kpreempt");
    559 	return true;
    560 }
    561 
    562 bool
    563 kpreempt_disabled(void)
    564 {
    565 
    566 	return true;
    567 }
    568 #endif
    569 
    570 /*
    571  * Disable kernel preemption.
    572  */
    573 void
    574 kpreempt_disable(void)
    575 {
    576 
    577 	KPREEMPT_DISABLE(curlwp);
    578 }
    579 
    580 /*
    581  * Reenable kernel preemption.
    582  */
    583 void
    584 kpreempt_enable(void)
    585 {
    586 
    587 	KPREEMPT_ENABLE(curlwp);
    588 }
    589 
    590 /*
    591  * Compute the amount of time during which the current lwp was running.
    592  *
    593  * - update l_rtime unless it's an idle lwp.
    594  */
    595 
    596 void
    597 updatertime(lwp_t *l, const struct bintime *now)
    598 {
    599 
    600 	if ((l->l_flag & LW_IDLE) != 0)
    601 		return;
    602 
    603 	/* rtime += now - stime */
    604 	bintime_add(&l->l_rtime, now);
    605 	bintime_sub(&l->l_rtime, &l->l_stime);
    606 }
    607 
    608 /*
    609  * The machine independent parts of context switch.
    610  *
    611  * Returns 1 if another LWP was actually run.
    612  */
    613 int
    614 mi_switch(lwp_t *l)
    615 {
    616 	struct cpu_info *ci, *tci = NULL;
    617 	struct schedstate_percpu *spc;
    618 	struct lwp *newl;
    619 	int retval, oldspl;
    620 	struct bintime bt;
    621 	bool returning;
    622 
    623 	KASSERT(lwp_locked(l, NULL));
    624 	KASSERT(kpreempt_disabled());
    625 	LOCKDEBUG_BARRIER(l->l_mutex, 1);
    626 
    627 #ifdef KSTACK_CHECK_MAGIC
    628 	kstack_check_magic(l);
    629 #endif
    630 
    631 	binuptime(&bt);
    632 
    633 	KASSERT(l->l_cpu == curcpu());
    634 	ci = l->l_cpu;
    635 	spc = &ci->ci_schedstate;
    636 	returning = false;
    637 	newl = NULL;
    638 
    639 	/*
    640 	 * If we have been asked to switch to a specific LWP, then there
    641 	 * is no need to inspect the run queues.  If a soft interrupt is
    642 	 * blocking, then return to the interrupted thread without adjusting
    643 	 * VM context or its start time: neither have been changed in order
    644 	 * to take the interrupt.
    645 	 */
    646 	if (l->l_switchto != NULL) {
    647 		if ((l->l_pflag & LP_INTR) != 0) {
    648 			returning = true;
    649 			softint_block(l);
    650 			if ((l->l_flag & LW_TIMEINTR) != 0)
    651 				updatertime(l, &bt);
    652 		}
    653 		newl = l->l_switchto;
    654 		l->l_switchto = NULL;
    655 	}
    656 #ifndef __HAVE_FAST_SOFTINTS
    657 	else if (ci->ci_data.cpu_softints != 0) {
    658 		/* There are pending soft interrupts, so pick one. */
    659 		newl = softint_picklwp();
    660 		newl->l_stat = LSONPROC;
    661 		newl->l_flag |= LW_RUNNING;
    662 	}
    663 #endif	/* !__HAVE_FAST_SOFTINTS */
    664 
    665 	/* Count time spent in current system call */
    666 	if (!returning) {
    667 		SYSCALL_TIME_SLEEP(l);
    668 
    669 		/*
    670 		 * XXXSMP If we are using h/w performance counters,
    671 		 * save context.
    672 		 */
    673 #if PERFCTRS
    674 		if (PMC_ENABLED(l->l_proc)) {
    675 			pmc_save_context(l->l_proc);
    676 		}
    677 #endif
    678 		updatertime(l, &bt);
    679 	}
    680 
    681 	/*
    682 	 * If on the CPU and we have gotten this far, then we must yield.
    683 	 */
    684 	KASSERT(l->l_stat != LSRUN);
    685 	if (l->l_stat == LSONPROC && (l->l_target_cpu || l != newl)) {
    686 		KASSERT(lwp_locked(l, spc->spc_lwplock));
    687 
    688 		if (l->l_target_cpu == l->l_cpu) {
    689 			l->l_target_cpu = NULL;
    690 		} else {
    691 			tci = l->l_target_cpu;
    692 		}
    693 
    694 		if (__predict_false(tci != NULL)) {
    695 			/* Double-lock the runqueues */
    696 			spc_dlock(ci, tci);
    697 		} else {
    698 			/* Lock the runqueue */
    699 			spc_lock(ci);
    700 		}
    701 
    702 		if ((l->l_flag & LW_IDLE) == 0) {
    703 			l->l_stat = LSRUN;
    704 			if (__predict_false(tci != NULL)) {
    705 				/*
    706 				 * Set the new CPU, lock and unset the
    707 				 * l_target_cpu - thread will be enqueued
    708 				 * to the runqueue of target CPU.
    709 				 */
    710 				l->l_cpu = tci;
    711 				lwp_setlock(l, tci->ci_schedstate.spc_mutex);
    712 				l->l_target_cpu = NULL;
    713 			} else {
    714 				lwp_setlock(l, spc->spc_mutex);
    715 			}
    716 			sched_enqueue(l, true);
    717 		} else {
    718 			KASSERT(tci == NULL);
    719 			l->l_stat = LSIDL;
    720 		}
    721 	} else {
    722 		/* Lock the runqueue */
    723 		spc_lock(ci);
    724 	}
    725 
    726 	/*
    727 	 * Let sched_nextlwp() select the LWP to run the CPU next.
    728 	 * If no LWP is runnable, select the idle LWP.
    729 	 *
    730 	 * Note that spc_lwplock might not necessary be held, and
    731 	 * new thread would be unlocked after setting the LWP-lock.
    732 	 */
    733 	if (newl == NULL) {
    734 		newl = sched_nextlwp();
    735 		if (newl != NULL) {
    736 			sched_dequeue(newl);
    737 			KASSERT(lwp_locked(newl, spc->spc_mutex));
    738 			newl->l_stat = LSONPROC;
    739 			newl->l_cpu = ci;
    740 			newl->l_flag |= LW_RUNNING;
    741 			lwp_setlock(newl, spc->spc_lwplock);
    742 		} else {
    743 			newl = ci->ci_data.cpu_idlelwp;
    744 			newl->l_stat = LSONPROC;
    745 			newl->l_flag |= LW_RUNNING;
    746 		}
    747 		/*
    748 		 * Only clear want_resched if there are no
    749 		 * pending (slow) software interrupts.
    750 		 */
    751 		ci->ci_want_resched = ci->ci_data.cpu_softints;
    752 		spc->spc_flags &= ~SPCF_SWITCHCLEAR;
    753 		spc->spc_curpriority = lwp_eprio(newl);
    754 	}
    755 
    756 	/* Items that must be updated with the CPU locked. */
    757 	if (!returning) {
    758 		/* Update the new LWP's start time. */
    759 		newl->l_stime = bt;
    760 
    761 		/*
    762 		 * ci_curlwp changes when a fast soft interrupt occurs.
    763 		 * We use cpu_onproc to keep track of which kernel or
    764 		 * user thread is running 'underneath' the software
    765 		 * interrupt.  This is important for time accounting,
    766 		 * itimers and forcing user threads to preempt (aston).
    767 		 */
    768 		ci->ci_data.cpu_onproc = newl;
    769 	}
    770 
    771 	/* Kernel preemption related tasks. */
    772 	l->l_dopreempt = 0;
    773 	if (__predict_false(l->l_pfailaddr != 0)) {
    774 		LOCKSTAT_FLAG(lsflag);
    775 		LOCKSTAT_ENTER(lsflag);
    776 		LOCKSTAT_STOP_TIMER(lsflag, l->l_pfailtime);
    777 		LOCKSTAT_EVENT_RA(lsflag, l->l_pfaillock, LB_NOPREEMPT|LB_SPIN,
    778 		    1, l->l_pfailtime, l->l_pfailaddr);
    779 		LOCKSTAT_EXIT(lsflag);
    780 		l->l_pfailtime = 0;
    781 		l->l_pfaillock = 0;
    782 		l->l_pfailaddr = 0;
    783 	}
    784 
    785 	if (l != newl) {
    786 		struct lwp *prevlwp;
    787 
    788 		/* Release all locks, but leave the current LWP locked */
    789 		if (l->l_mutex == l->l_cpu->ci_schedstate.spc_mutex) {
    790 			/*
    791 			 * In case of migration, drop the local runqueue
    792 			 * lock, thread is on other runqueue now.
    793 			 */
    794 			if (__predict_false(tci != NULL))
    795 				spc_unlock(ci);
    796 			/*
    797 			 * Drop spc_lwplock, if the current LWP has been moved
    798 			 * to the run queue (it is now locked by spc_mutex).
    799 			 */
    800 			mutex_spin_exit(spc->spc_lwplock);
    801 		} else {
    802 			/*
    803 			 * Otherwise, drop the spc_mutex, we are done with the
    804 			 * run queues.
    805 			 */
    806 			mutex_spin_exit(spc->spc_mutex);
    807 			KASSERT(tci == NULL);
    808 		}
    809 
    810 		/*
    811 		 * Mark that context switch is going to be perfomed
    812 		 * for this LWP, to protect it from being switched
    813 		 * to on another CPU.
    814 		 */
    815 		KASSERT(l->l_ctxswtch == 0);
    816 		l->l_ctxswtch = 1;
    817 		l->l_ncsw++;
    818 		l->l_flag &= ~LW_RUNNING;
    819 
    820 		/*
    821 		 * Increase the count of spin-mutexes before the release
    822 		 * of the last lock - we must remain at IPL_SCHED during
    823 		 * the context switch.
    824 		 */
    825 		oldspl = MUTEX_SPIN_OLDSPL(ci);
    826 		ci->ci_mtx_count--;
    827 		lwp_unlock(l);
    828 
    829 		/* Count the context switch on this CPU. */
    830 		ci->ci_data.cpu_nswtch++;
    831 
    832 		/* Update status for lwpctl, if present. */
    833 		if (l->l_lwpctl != NULL)
    834 			l->l_lwpctl->lc_curcpu = LWPCTL_CPU_NONE;
    835 
    836 		/*
    837 		 * Save old VM context, unless a soft interrupt
    838 		 * handler is blocking.
    839 		 */
    840 		if (!returning)
    841 			pmap_deactivate(l);
    842 
    843 		/*
    844 		 * We may need to spin-wait for if 'newl' is still
    845 		 * context switching on another CPU.
    846 		 */
    847 		if (newl->l_ctxswtch != 0) {
    848 			u_int count;
    849 			count = SPINLOCK_BACKOFF_MIN;
    850 			while (newl->l_ctxswtch)
    851 				SPINLOCK_BACKOFF(count);
    852 		}
    853 
    854 		/* Switch to the new LWP.. */
    855 		prevlwp = cpu_switchto(l, newl, returning);
    856 		ci = curcpu();
    857 
    858 		/*
    859 		 * Switched away - we have new curlwp.
    860 		 * Restore VM context and IPL.
    861 		 */
    862 		pmap_activate(l);
    863 		if (prevlwp != NULL) {
    864 			/* Normalize the count of the spin-mutexes */
    865 			ci->ci_mtx_count++;
    866 			/* Unmark the state of context switch */
    867 			membar_exit();
    868 			prevlwp->l_ctxswtch = 0;
    869 		}
    870 
    871 		/* Update status for lwpctl, if present. */
    872 		if (l->l_lwpctl != NULL) {
    873 			l->l_lwpctl->lc_curcpu = (int)cpu_index(ci);
    874 			l->l_lwpctl->lc_pctr++;
    875 		}
    876 
    877 		KASSERT(l->l_cpu == ci);
    878 		splx(oldspl);
    879 		retval = 1;
    880 	} else {
    881 		/* Nothing to do - just unlock and return. */
    882 		KASSERT(tci == NULL);
    883 		spc_unlock(ci);
    884 		lwp_unlock(l);
    885 		retval = 0;
    886 	}
    887 
    888 	KASSERT(l == curlwp);
    889 	KASSERT(l->l_stat == LSONPROC);
    890 
    891 	/*
    892 	 * XXXSMP If we are using h/w performance counters, restore context.
    893 	 * XXXSMP preemption problem.
    894 	 */
    895 #if PERFCTRS
    896 	if (PMC_ENABLED(l->l_proc)) {
    897 		pmc_restore_context(l->l_proc);
    898 	}
    899 #endif
    900 	SYSCALL_TIME_WAKEUP(l);
    901 	LOCKDEBUG_BARRIER(NULL, 1);
    902 
    903 	return retval;
    904 }
    905 
    906 /*
    907  * Change process state to be runnable, placing it on the run queue if it is
    908  * in memory, and awakening the swapper if it isn't in memory.
    909  *
    910  * Call with the process and LWP locked.  Will return with the LWP unlocked.
    911  */
    912 void
    913 setrunnable(struct lwp *l)
    914 {
    915 	struct proc *p = l->l_proc;
    916 	struct cpu_info *ci;
    917 	sigset_t *ss;
    918 
    919 	KASSERT((l->l_flag & LW_IDLE) == 0);
    920 	KASSERT(mutex_owned(p->p_lock));
    921 	KASSERT(lwp_locked(l, NULL));
    922 	KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex);
    923 
    924 	switch (l->l_stat) {
    925 	case LSSTOP:
    926 		/*
    927 		 * If we're being traced (possibly because someone attached us
    928 		 * while we were stopped), check for a signal from the debugger.
    929 		 */
    930 		if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
    931 			if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
    932 				ss = &l->l_sigpend.sp_set;
    933 			else
    934 				ss = &p->p_sigpend.sp_set;
    935 			sigaddset(ss, p->p_xstat);
    936 			signotify(l);
    937 		}
    938 		p->p_nrlwps++;
    939 		break;
    940 	case LSSUSPENDED:
    941 		l->l_flag &= ~LW_WSUSPEND;
    942 		p->p_nrlwps++;
    943 		cv_broadcast(&p->p_lwpcv);
    944 		break;
    945 	case LSSLEEP:
    946 		KASSERT(l->l_wchan != NULL);
    947 		break;
    948 	default:
    949 		panic("setrunnable: lwp %p state was %d", l, l->l_stat);
    950 	}
    951 
    952 	/*
    953 	 * If the LWP was sleeping interruptably, then it's OK to start it
    954 	 * again.  If not, mark it as still sleeping.
    955 	 */
    956 	if (l->l_wchan != NULL) {
    957 		l->l_stat = LSSLEEP;
    958 		/* lwp_unsleep() will release the lock. */
    959 		lwp_unsleep(l, true);
    960 		return;
    961 	}
    962 
    963 	/*
    964 	 * If the LWP is still on the CPU, mark it as LSONPROC.  It may be
    965 	 * about to call mi_switch(), in which case it will yield.
    966 	 */
    967 	if ((l->l_flag & LW_RUNNING) != 0) {
    968 		l->l_stat = LSONPROC;
    969 		l->l_slptime = 0;
    970 		lwp_unlock(l);
    971 		return;
    972 	}
    973 
    974 	/*
    975 	 * Look for a CPU to run.
    976 	 * Set the LWP runnable.
    977 	 */
    978 	ci = sched_takecpu(l);
    979 	l->l_cpu = ci;
    980 	if (l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex) {
    981 		lwp_unlock_to(l, ci->ci_schedstate.spc_mutex);
    982 		lwp_lock(l);
    983 	}
    984 	sched_setrunnable(l);
    985 	l->l_stat = LSRUN;
    986 	l->l_slptime = 0;
    987 
    988 	/*
    989 	 * If thread is swapped out - wake the swapper to bring it back in.
    990 	 * Otherwise, enter it into a run queue.
    991 	 */
    992 	if (l->l_flag & LW_INMEM) {
    993 		sched_enqueue(l, false);
    994 		resched_cpu(l);
    995 		lwp_unlock(l);
    996 	} else {
    997 		lwp_unlock(l);
    998 		uvm_kick_scheduler();
    999 	}
   1000 }
   1001 
   1002 /*
   1003  * suspendsched:
   1004  *
   1005  *	Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
   1006  */
   1007 void
   1008 suspendsched(void)
   1009 {
   1010 	CPU_INFO_ITERATOR cii;
   1011 	struct cpu_info *ci;
   1012 	struct lwp *l;
   1013 	struct proc *p;
   1014 
   1015 	/*
   1016 	 * We do this by process in order not to violate the locking rules.
   1017 	 */
   1018 	mutex_enter(proc_lock);
   1019 	PROCLIST_FOREACH(p, &allproc) {
   1020 		mutex_enter(p->p_lock);
   1021 
   1022 		if ((p->p_flag & PK_SYSTEM) != 0) {
   1023 			mutex_exit(p->p_lock);
   1024 			continue;
   1025 		}
   1026 
   1027 		p->p_stat = SSTOP;
   1028 
   1029 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1030 			if (l == curlwp)
   1031 				continue;
   1032 
   1033 			lwp_lock(l);
   1034 
   1035 			/*
   1036 			 * Set L_WREBOOT so that the LWP will suspend itself
   1037 			 * when it tries to return to user mode.  We want to
   1038 			 * try and get to get as many LWPs as possible to
   1039 			 * the user / kernel boundary, so that they will
   1040 			 * release any locks that they hold.
   1041 			 */
   1042 			l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
   1043 
   1044 			if (l->l_stat == LSSLEEP &&
   1045 			    (l->l_flag & LW_SINTR) != 0) {
   1046 				/* setrunnable() will release the lock. */
   1047 				setrunnable(l);
   1048 				continue;
   1049 			}
   1050 
   1051 			lwp_unlock(l);
   1052 		}
   1053 
   1054 		mutex_exit(p->p_lock);
   1055 	}
   1056 	mutex_exit(proc_lock);
   1057 
   1058 	/*
   1059 	 * Kick all CPUs to make them preempt any LWPs running in user mode.
   1060 	 * They'll trap into the kernel and suspend themselves in userret().
   1061 	 */
   1062 	for (CPU_INFO_FOREACH(cii, ci)) {
   1063 		spc_lock(ci);
   1064 		cpu_need_resched(ci, RESCHED_IMMED);
   1065 		spc_unlock(ci);
   1066 	}
   1067 }
   1068 
   1069 /*
   1070  * sched_unsleep:
   1071  *
   1072  *	The is called when the LWP has not been awoken normally but instead
   1073  *	interrupted: for example, if the sleep timed out.  Because of this,
   1074  *	it's not a valid action for running or idle LWPs.
   1075  */
   1076 static u_int
   1077 sched_unsleep(struct lwp *l, bool cleanup)
   1078 {
   1079 
   1080 	lwp_unlock(l);
   1081 	panic("sched_unsleep");
   1082 }
   1083 
   1084 void
   1085 resched_cpu(struct lwp *l)
   1086 {
   1087 	struct cpu_info *ci;
   1088 
   1089 	/*
   1090 	 * XXXSMP
   1091 	 * Since l->l_cpu persists across a context switch,
   1092 	 * this gives us *very weak* processor affinity, in
   1093 	 * that we notify the CPU on which the process last
   1094 	 * ran that it should try to switch.
   1095 	 *
   1096 	 * This does not guarantee that the process will run on
   1097 	 * that processor next, because another processor might
   1098 	 * grab it the next time it performs a context switch.
   1099 	 *
   1100 	 * This also does not handle the case where its last
   1101 	 * CPU is running a higher-priority process, but every
   1102 	 * other CPU is running a lower-priority process.  There
   1103 	 * are ways to handle this situation, but they're not
   1104 	 * currently very pretty, and we also need to weigh the
   1105 	 * cost of moving a process from one CPU to another.
   1106 	 */
   1107 	ci = l->l_cpu;
   1108 	if (lwp_eprio(l) > ci->ci_schedstate.spc_curpriority)
   1109 		cpu_need_resched(ci, 0);
   1110 }
   1111 
   1112 static void
   1113 sched_changepri(struct lwp *l, pri_t pri)
   1114 {
   1115 
   1116 	KASSERT(lwp_locked(l, NULL));
   1117 
   1118 	if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
   1119 		KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
   1120 		sched_dequeue(l);
   1121 		l->l_priority = pri;
   1122 		sched_enqueue(l, false);
   1123 	} else {
   1124 		l->l_priority = pri;
   1125 	}
   1126 	resched_cpu(l);
   1127 }
   1128 
   1129 static void
   1130 sched_lendpri(struct lwp *l, pri_t pri)
   1131 {
   1132 
   1133 	KASSERT(lwp_locked(l, NULL));
   1134 
   1135 	if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
   1136 		KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
   1137 		sched_dequeue(l);
   1138 		l->l_inheritedprio = pri;
   1139 		sched_enqueue(l, false);
   1140 	} else {
   1141 		l->l_inheritedprio = pri;
   1142 	}
   1143 	resched_cpu(l);
   1144 }
   1145 
   1146 struct lwp *
   1147 syncobj_noowner(wchan_t wchan)
   1148 {
   1149 
   1150 	return NULL;
   1151 }
   1152 
   1153 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
   1154 fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
   1155 
   1156 /*
   1157  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
   1158  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
   1159  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
   1160  *
   1161  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
   1162  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
   1163  *
   1164  * If you dont want to bother with the faster/more-accurate formula, you
   1165  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
   1166  * (more general) method of calculating the %age of CPU used by a process.
   1167  */
   1168 #define	CCPU_SHIFT	(FSHIFT + 1)
   1169 
   1170 /*
   1171  * sched_pstats:
   1172  *
   1173  * Update process statistics and check CPU resource allocation.
   1174  * Call scheduler-specific hook to eventually adjust process/LWP
   1175  * priorities.
   1176  */
   1177 /* ARGSUSED */
   1178 void
   1179 sched_pstats(void *arg)
   1180 {
   1181 	struct rlimit *rlim;
   1182 	struct lwp *l;
   1183 	struct proc *p;
   1184 	int sig, clkhz;
   1185 	long runtm;
   1186 
   1187 	sched_pstats_ticks++;
   1188 
   1189 	mutex_enter(proc_lock);
   1190 	PROCLIST_FOREACH(p, &allproc) {
   1191 		/*
   1192 		 * Increment time in/out of memory and sleep time (if
   1193 		 * sleeping).  We ignore overflow; with 16-bit int's
   1194 		 * (remember them?) overflow takes 45 days.
   1195 		 */
   1196 		mutex_enter(p->p_lock);
   1197 		mutex_spin_enter(&p->p_stmutex);
   1198 		runtm = p->p_rtime.sec;
   1199 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1200 			if ((l->l_flag & LW_IDLE) != 0)
   1201 				continue;
   1202 			lwp_lock(l);
   1203 			runtm += l->l_rtime.sec;
   1204 			l->l_swtime++;
   1205 			sched_pstats_hook(l);
   1206 			lwp_unlock(l);
   1207 
   1208 			/*
   1209 			 * p_pctcpu is only for ps.
   1210 			 */
   1211 			l->l_pctcpu = (l->l_pctcpu * ccpu) >> FSHIFT;
   1212 			if (l->l_slptime < 1) {
   1213 				clkhz = stathz != 0 ? stathz : hz;
   1214 #if	(FSHIFT >= CCPU_SHIFT)
   1215 				l->l_pctcpu += (clkhz == 100) ?
   1216 				    ((fixpt_t)l->l_cpticks) <<
   1217 				        (FSHIFT - CCPU_SHIFT) :
   1218 				    100 * (((fixpt_t) p->p_cpticks)
   1219 				        << (FSHIFT - CCPU_SHIFT)) / clkhz;
   1220 #else
   1221 				l->l_pctcpu += ((FSCALE - ccpu) *
   1222 				    (l->l_cpticks * FSCALE / clkhz)) >> FSHIFT;
   1223 #endif
   1224 				l->l_cpticks = 0;
   1225 			}
   1226 		}
   1227 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
   1228 		mutex_spin_exit(&p->p_stmutex);
   1229 
   1230 		/*
   1231 		 * Check if the process exceeds its CPU resource allocation.
   1232 		 * If over max, kill it.
   1233 		 */
   1234 		rlim = &p->p_rlimit[RLIMIT_CPU];
   1235 		sig = 0;
   1236 		if (runtm >= rlim->rlim_cur) {
   1237 			if (runtm >= rlim->rlim_max)
   1238 				sig = SIGKILL;
   1239 			else {
   1240 				sig = SIGXCPU;
   1241 				if (rlim->rlim_cur < rlim->rlim_max)
   1242 					rlim->rlim_cur += 5;
   1243 			}
   1244 		}
   1245 		mutex_exit(p->p_lock);
   1246 		if (sig)
   1247 			psignal(p, sig);
   1248 	}
   1249 	mutex_exit(proc_lock);
   1250 	uvm_meter();
   1251 	cv_wakeup(&lbolt);
   1252 	callout_schedule(&sched_pstats_ch, hz);
   1253 }
   1254 
   1255 void
   1256 sched_init(void)
   1257 {
   1258 
   1259 	cv_init(&lbolt, "lbolt");
   1260 	callout_init(&sched_pstats_ch, CALLOUT_MPSAFE);
   1261 	callout_setfunc(&sched_pstats_ch, sched_pstats, NULL);
   1262 
   1263 	/* Balancing */
   1264 	worker_ci = curcpu();
   1265 	cacheht_time = mstohz(5);		/* ~5 ms  */
   1266 	balance_period = mstohz(300);		/* ~300ms */
   1267 
   1268 	/* Minimal count of LWPs for catching: log2(count of CPUs) */
   1269 	min_catch = min(ilog2(ncpu), 4);
   1270 
   1271 #ifdef PREEMPTION
   1272 	evcnt_attach_dynamic(&kpreempt_ev_crit, EVCNT_TYPE_INTR, NULL,
   1273 	   "kpreempt", "defer: critical section");
   1274 	evcnt_attach_dynamic(&kpreempt_ev_klock, EVCNT_TYPE_INTR, NULL,
   1275 	   "kpreempt", "defer: kernel_lock");
   1276 	evcnt_attach_dynamic(&kpreempt_ev_ipl, EVCNT_TYPE_INTR, NULL,
   1277 	   "kpreempt", "defer: IPL");
   1278 	evcnt_attach_dynamic(&kpreempt_ev_immed, EVCNT_TYPE_INTR, NULL,
   1279 	   "kpreempt", "immediate");
   1280 #endif
   1281 
   1282 	/* Initialize balancing callout and run it */
   1283 #ifdef MULTIPROCESSOR
   1284 	callout_init(&balance_ch, CALLOUT_MPSAFE);
   1285 	callout_setfunc(&balance_ch, sched_balance, NULL);
   1286 	callout_schedule(&balance_ch, balance_period);
   1287 #endif
   1288 	sched_pstats(NULL);
   1289 }
   1290 
   1291 SYSCTL_SETUP(sysctl_sched_setup, "sysctl sched setup")
   1292 {
   1293 	const struct sysctlnode *node = NULL;
   1294 
   1295 	sysctl_createv(clog, 0, NULL, NULL,
   1296 		CTLFLAG_PERMANENT,
   1297 		CTLTYPE_NODE, "kern", NULL,
   1298 		NULL, 0, NULL, 0,
   1299 		CTL_KERN, CTL_EOL);
   1300 	sysctl_createv(clog, 0, NULL, &node,
   1301 		CTLFLAG_PERMANENT,
   1302 		CTLTYPE_NODE, "sched",
   1303 		SYSCTL_DESCR("Scheduler options"),
   1304 		NULL, 0, NULL, 0,
   1305 		CTL_KERN, CTL_CREATE, CTL_EOL);
   1306 
   1307 	if (node == NULL)
   1308 		return;
   1309 
   1310 	sysctl_createv(clog, 0, &node, NULL,
   1311 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1312 		CTLTYPE_INT, "cacheht_time",
   1313 		SYSCTL_DESCR("Cache hotness time (in ticks)"),
   1314 		NULL, 0, &cacheht_time, 0,
   1315 		CTL_CREATE, CTL_EOL);
   1316 	sysctl_createv(clog, 0, &node, NULL,
   1317 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1318 		CTLTYPE_INT, "balance_period",
   1319 		SYSCTL_DESCR("Balance period (in ticks)"),
   1320 		NULL, 0, &balance_period, 0,
   1321 		CTL_CREATE, CTL_EOL);
   1322 	sysctl_createv(clog, 0, &node, NULL,
   1323 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1324 		CTLTYPE_INT, "min_catch",
   1325 		SYSCTL_DESCR("Minimal count of threads for catching"),
   1326 		NULL, 0, &min_catch, 0,
   1327 		CTL_CREATE, CTL_EOL);
   1328 	sysctl_createv(clog, 0, &node, NULL,
   1329 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1330 		CTLTYPE_INT, "timesoftints",
   1331 		SYSCTL_DESCR("Track CPU time for soft interrupts"),
   1332 		NULL, 0, &softint_timing, 0,
   1333 		CTL_CREATE, CTL_EOL);
   1334 	sysctl_createv(clog, 0, &node, NULL,
   1335 #ifdef PREEMPTION
   1336 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1337 #else
   1338 		CTLFLAG_PERMANENT,
   1339 #endif
   1340 		CTLTYPE_INT, "kpreempt_pri",
   1341 		SYSCTL_DESCR("Minimum priority to trigger kernel preemption"),
   1342 		NULL, 0, &sched_kpreempt_pri, 0,
   1343 		CTL_CREATE, CTL_EOL);
   1344 	sysctl_createv(clog, 0, &node, NULL,
   1345 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1346 		CTLTYPE_INT, "upreempt_pri",
   1347 		SYSCTL_DESCR("Minimum priority to trigger user preemption"),
   1348 		NULL, 0, &sched_upreempt_pri, 0,
   1349 		CTL_CREATE, CTL_EOL);
   1350 }
   1351 
   1352 void
   1353 sched_cpuattach(struct cpu_info *ci)
   1354 {
   1355 	runqueue_t *ci_rq;
   1356 	void *rq_ptr;
   1357 	u_int i, size;
   1358 
   1359 	if (ci->ci_schedstate.spc_lwplock == NULL) {
   1360 		ci->ci_schedstate.spc_lwplock =
   1361 		    mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
   1362 	}
   1363 	if (ci == lwp0.l_cpu) {
   1364 		/* Initialize the scheduler structure of the primary LWP */
   1365 		lwp0.l_mutex = ci->ci_schedstate.spc_lwplock;
   1366 	}
   1367 	if (ci->ci_schedstate.spc_mutex != NULL) {
   1368 		/* Already initialized. */
   1369 		return;
   1370 	}
   1371 
   1372 	/* Allocate the run queue */
   1373 	size = roundup2(sizeof(runqueue_t), coherency_unit) + coherency_unit;
   1374 	rq_ptr = kmem_zalloc(size, KM_SLEEP);
   1375 	if (rq_ptr == NULL) {
   1376 		panic("sched_cpuattach: could not allocate the runqueue");
   1377 	}
   1378 	ci_rq = (void *)(roundup2((uintptr_t)(rq_ptr), coherency_unit));
   1379 
   1380 	/* Initialize run queues */
   1381 	ci->ci_schedstate.spc_mutex =
   1382 	    mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
   1383 	for (i = 0; i < PRI_RT_COUNT; i++)
   1384 		TAILQ_INIT(&ci_rq->r_rt_queue[i].q_head);
   1385 	for (i = 0; i < PRI_TS_COUNT; i++)
   1386 		TAILQ_INIT(&ci_rq->r_ts_queue[i].q_head);
   1387 
   1388 	ci->ci_schedstate.spc_sched_info = ci_rq;
   1389 }
   1390 
   1391 /*
   1392  * Control of the runqueue.
   1393  */
   1394 
   1395 static void *
   1396 sched_getrq(runqueue_t *ci_rq, const pri_t prio)
   1397 {
   1398 
   1399 	KASSERT(prio < PRI_COUNT);
   1400 	return (prio <= PRI_HIGHEST_TS) ?
   1401 	    &ci_rq->r_ts_queue[prio].q_head :
   1402 	    &ci_rq->r_rt_queue[prio - PRI_HIGHEST_TS - 1].q_head;
   1403 }
   1404 
   1405 void
   1406 sched_enqueue(struct lwp *l, bool swtch)
   1407 {
   1408 	runqueue_t *ci_rq;
   1409 	struct schedstate_percpu *spc;
   1410 	TAILQ_HEAD(, lwp) *q_head;
   1411 	const pri_t eprio = lwp_eprio(l);
   1412 	struct cpu_info *ci;
   1413 	int type;
   1414 
   1415 	ci = l->l_cpu;
   1416 	spc = &ci->ci_schedstate;
   1417 	ci_rq = spc->spc_sched_info;
   1418 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
   1419 
   1420 	/* Update the last run time on switch */
   1421 	if (__predict_true(swtch == true)) {
   1422 		l->l_rticks = hardclock_ticks;
   1423 		l->l_rticksum += (hardclock_ticks - l->l_rticks);
   1424 	} else if (l->l_rticks == 0)
   1425 		l->l_rticks = hardclock_ticks;
   1426 
   1427 	/* Enqueue the thread */
   1428 	q_head = sched_getrq(ci_rq, eprio);
   1429 	if (TAILQ_EMPTY(q_head)) {
   1430 		u_int i;
   1431 		uint32_t q;
   1432 
   1433 		/* Mark bit */
   1434 		i = eprio >> BITMAP_SHIFT;
   1435 		q = BITMAP_MSB >> (eprio & BITMAP_MASK);
   1436 		KASSERT((ci_rq->r_bitmap[i] & q) == 0);
   1437 		ci_rq->r_bitmap[i] |= q;
   1438 	}
   1439 	TAILQ_INSERT_TAIL(q_head, l, l_runq);
   1440 	ci_rq->r_count++;
   1441 	if ((l->l_pflag & LP_BOUND) == 0)
   1442 		ci_rq->r_mcount++;
   1443 
   1444 	/*
   1445 	 * Update the value of highest priority in the runqueue,
   1446 	 * if priority of this thread is higher.
   1447 	 */
   1448 	if (eprio > spc->spc_maxpriority)
   1449 		spc->spc_maxpriority = eprio;
   1450 
   1451 	sched_newts(l);
   1452 
   1453 	/*
   1454 	 * Wake the chosen CPU or cause a preemption if the newly
   1455 	 * enqueued thread has higher priority.  Don't cause a
   1456 	 * preemption if the thread is yielding (swtch).
   1457 	 */
   1458 	if (!swtch && eprio > spc->spc_curpriority) {
   1459 		if (eprio >= sched_kpreempt_pri)
   1460 			type = RESCHED_KPREEMPT;
   1461 		else if (eprio >= sched_upreempt_pri)
   1462 			type = RESCHED_IMMED;
   1463 		else
   1464 			type = 0;
   1465 		cpu_need_resched(ci, type);
   1466 	}
   1467 }
   1468 
   1469 void
   1470 sched_dequeue(struct lwp *l)
   1471 {
   1472 	runqueue_t *ci_rq;
   1473 	TAILQ_HEAD(, lwp) *q_head;
   1474 	struct schedstate_percpu *spc;
   1475 	const pri_t eprio = lwp_eprio(l);
   1476 
   1477 	spc = & l->l_cpu->ci_schedstate;
   1478 	ci_rq = spc->spc_sched_info;
   1479 	KASSERT(lwp_locked(l, spc->spc_mutex));
   1480 
   1481 	KASSERT(eprio <= spc->spc_maxpriority);
   1482 	KASSERT(ci_rq->r_bitmap[eprio >> BITMAP_SHIFT] != 0);
   1483 	KASSERT(ci_rq->r_count > 0);
   1484 
   1485 	ci_rq->r_count--;
   1486 	if ((l->l_pflag & LP_BOUND) == 0)
   1487 		ci_rq->r_mcount--;
   1488 
   1489 	q_head = sched_getrq(ci_rq, eprio);
   1490 	TAILQ_REMOVE(q_head, l, l_runq);
   1491 	if (TAILQ_EMPTY(q_head)) {
   1492 		u_int i;
   1493 		uint32_t q;
   1494 
   1495 		/* Unmark bit */
   1496 		i = eprio >> BITMAP_SHIFT;
   1497 		q = BITMAP_MSB >> (eprio & BITMAP_MASK);
   1498 		KASSERT((ci_rq->r_bitmap[i] & q) != 0);
   1499 		ci_rq->r_bitmap[i] &= ~q;
   1500 
   1501 		/*
   1502 		 * Update the value of highest priority in the runqueue, in a
   1503 		 * case it was a last thread in the queue of highest priority.
   1504 		 */
   1505 		if (eprio != spc->spc_maxpriority)
   1506 			return;
   1507 
   1508 		do {
   1509 			if (ci_rq->r_bitmap[i] != 0) {
   1510 				q = ffs(ci_rq->r_bitmap[i]);
   1511 				spc->spc_maxpriority =
   1512 				    (i << BITMAP_SHIFT) + (BITMAP_BITS - q);
   1513 				return;
   1514 			}
   1515 		} while (i--);
   1516 
   1517 		/* If not found - set the lowest value */
   1518 		spc->spc_maxpriority = 0;
   1519 	}
   1520 }
   1521 
   1522 /*
   1523  * Migration and balancing.
   1524  */
   1525 
   1526 #ifdef MULTIPROCESSOR
   1527 
   1528 /* Estimate if LWP is cache-hot */
   1529 static inline bool
   1530 lwp_cache_hot(const struct lwp *l)
   1531 {
   1532 
   1533 	if (l->l_slptime || l->l_rticks == 0)
   1534 		return false;
   1535 
   1536 	return (hardclock_ticks - l->l_rticks <= cacheht_time);
   1537 }
   1538 
   1539 /* Check if LWP can migrate to the chosen CPU */
   1540 static inline bool
   1541 sched_migratable(const struct lwp *l, struct cpu_info *ci)
   1542 {
   1543 	const struct schedstate_percpu *spc = &ci->ci_schedstate;
   1544 
   1545 	/* CPU is offline */
   1546 	if (__predict_false(spc->spc_flags & SPCF_OFFLINE))
   1547 		return false;
   1548 
   1549 	/* Affinity bind */
   1550 	if (__predict_false(l->l_flag & LW_AFFINITY))
   1551 		return CPU_ISSET(cpu_index(ci), &l->l_affinity);
   1552 
   1553 	/* Processor-set */
   1554 	return (spc->spc_psid == l->l_psid);
   1555 }
   1556 
   1557 /*
   1558  * Estimate the migration of LWP to the other CPU.
   1559  * Take and return the CPU, if migration is needed.
   1560  */
   1561 struct cpu_info *
   1562 sched_takecpu(struct lwp *l)
   1563 {
   1564 	struct cpu_info *ci, *tci, *first, *next;
   1565 	struct schedstate_percpu *spc;
   1566 	runqueue_t *ci_rq, *ici_rq;
   1567 	pri_t eprio, lpri, pri;
   1568 
   1569 	KASSERT(lwp_locked(l, NULL));
   1570 
   1571 	ci = l->l_cpu;
   1572 	spc = &ci->ci_schedstate;
   1573 	ci_rq = spc->spc_sched_info;
   1574 
   1575 	/* If thread is strictly bound, do not estimate other CPUs */
   1576 	if (l->l_pflag & LP_BOUND)
   1577 		return ci;
   1578 
   1579 	/* CPU of this thread is idling - run there */
   1580 	if (ci_rq->r_count == 0)
   1581 		return ci;
   1582 
   1583 	eprio = lwp_eprio(l);
   1584 
   1585 	/* Stay if thread is cache-hot */
   1586 	if (__predict_true(l->l_stat != LSIDL) &&
   1587 	    lwp_cache_hot(l) && eprio >= spc->spc_curpriority)
   1588 		return ci;
   1589 
   1590 	/* Run on current CPU if priority of thread is higher */
   1591 	ci = curcpu();
   1592 	spc = &ci->ci_schedstate;
   1593 	if (eprio > spc->spc_curpriority && sched_migratable(l, ci))
   1594 		return ci;
   1595 
   1596 	/*
   1597 	 * Look for the CPU with the lowest priority thread.  In case of
   1598 	 * equal priority, choose the CPU with the fewest of threads.
   1599 	 */
   1600 	first = l->l_cpu;
   1601 	ci = first;
   1602 	tci = first;
   1603 	lpri = PRI_COUNT;
   1604 	do {
   1605 		next = CIRCLEQ_LOOP_NEXT(&cpu_queue, ci, ci_data.cpu_qchain);
   1606 		spc = &ci->ci_schedstate;
   1607 		ici_rq = spc->spc_sched_info;
   1608 		pri = max(spc->spc_curpriority, spc->spc_maxpriority);
   1609 		if (pri > lpri)
   1610 			continue;
   1611 
   1612 		if (pri == lpri && ci_rq->r_count < ici_rq->r_count)
   1613 			continue;
   1614 
   1615 		if (!sched_migratable(l, ci))
   1616 			continue;
   1617 
   1618 		lpri = pri;
   1619 		tci = ci;
   1620 		ci_rq = ici_rq;
   1621 	} while (ci = next, ci != first);
   1622 
   1623 	return tci;
   1624 }
   1625 
   1626 /*
   1627  * Tries to catch an LWP from the runqueue of other CPU.
   1628  */
   1629 static struct lwp *
   1630 sched_catchlwp(void)
   1631 {
   1632 	struct cpu_info *curci = curcpu(), *ci = worker_ci;
   1633 	struct schedstate_percpu *spc;
   1634 	TAILQ_HEAD(, lwp) *q_head;
   1635 	runqueue_t *ci_rq;
   1636 	struct lwp *l;
   1637 
   1638 	if (curci == ci)
   1639 		return NULL;
   1640 
   1641 	/* Lockless check */
   1642 	spc = &ci->ci_schedstate;
   1643 	ci_rq = spc->spc_sched_info;
   1644 	if (ci_rq->r_mcount < min_catch)
   1645 		return NULL;
   1646 
   1647 	/*
   1648 	 * Double-lock the runqueues.
   1649 	 */
   1650 	if (curci < ci) {
   1651 		spc_lock(ci);
   1652 	} else if (!mutex_tryenter(ci->ci_schedstate.spc_mutex)) {
   1653 		const runqueue_t *cur_rq = curci->ci_schedstate.spc_sched_info;
   1654 
   1655 		spc_unlock(curci);
   1656 		spc_lock(ci);
   1657 		spc_lock(curci);
   1658 
   1659 		if (cur_rq->r_count) {
   1660 			spc_unlock(ci);
   1661 			return NULL;
   1662 		}
   1663 	}
   1664 
   1665 	if (ci_rq->r_mcount < min_catch) {
   1666 		spc_unlock(ci);
   1667 		return NULL;
   1668 	}
   1669 
   1670 	/* Take the highest priority thread */
   1671 	q_head = sched_getrq(ci_rq, spc->spc_maxpriority);
   1672 	l = TAILQ_FIRST(q_head);
   1673 
   1674 	for (;;) {
   1675 		/* Check the first and next result from the queue */
   1676 		if (l == NULL)
   1677 			break;
   1678 		KASSERT(l->l_stat == LSRUN);
   1679 		KASSERT(l->l_flag & LW_INMEM);
   1680 
   1681 		/* Look for threads, whose are allowed to migrate */
   1682 		if ((l->l_pflag & LP_BOUND) || lwp_cache_hot(l) ||
   1683 		    !sched_migratable(l, curci)) {
   1684 			l = TAILQ_NEXT(l, l_runq);
   1685 			continue;
   1686 		}
   1687 
   1688 		/* Grab the thread, and move to the local run queue */
   1689 		sched_dequeue(l);
   1690 		l->l_cpu = curci;
   1691 		lwp_unlock_to(l, curci->ci_schedstate.spc_mutex);
   1692 		sched_enqueue(l, false);
   1693 		return l;
   1694 	}
   1695 	spc_unlock(ci);
   1696 
   1697 	return l;
   1698 }
   1699 
   1700 /*
   1701  * Periodical calculations for balancing.
   1702  */
   1703 static void
   1704 sched_balance(void *nocallout)
   1705 {
   1706 	struct cpu_info *ci, *hci;
   1707 	runqueue_t *ci_rq;
   1708 	CPU_INFO_ITERATOR cii;
   1709 	u_int highest;
   1710 
   1711 	hci = curcpu();
   1712 	highest = 0;
   1713 
   1714 	/* Make lockless countings */
   1715 	for (CPU_INFO_FOREACH(cii, ci)) {
   1716 		ci_rq = ci->ci_schedstate.spc_sched_info;
   1717 
   1718 		/* Average count of the threads */
   1719 		ci_rq->r_avgcount = (ci_rq->r_avgcount + ci_rq->r_mcount) >> 1;
   1720 
   1721 		/* Look for CPU with the highest average */
   1722 		if (ci_rq->r_avgcount > highest) {
   1723 			hci = ci;
   1724 			highest = ci_rq->r_avgcount;
   1725 		}
   1726 	}
   1727 
   1728 	/* Update the worker */
   1729 	worker_ci = hci;
   1730 
   1731 	if (nocallout == NULL)
   1732 		callout_schedule(&balance_ch, balance_period);
   1733 }
   1734 
   1735 #else
   1736 
   1737 struct cpu_info *
   1738 sched_takecpu(struct lwp *l)
   1739 {
   1740 
   1741 	return l->l_cpu;
   1742 }
   1743 
   1744 #endif	/* MULTIPROCESSOR */
   1745 
   1746 /*
   1747  * Scheduler mill.
   1748  */
   1749 struct lwp *
   1750 sched_nextlwp(void)
   1751 {
   1752 	struct cpu_info *ci = curcpu();
   1753 	struct schedstate_percpu *spc;
   1754 	TAILQ_HEAD(, lwp) *q_head;
   1755 	runqueue_t *ci_rq;
   1756 	struct lwp *l;
   1757 
   1758 	spc = &ci->ci_schedstate;
   1759 	ci_rq = spc->spc_sched_info;
   1760 
   1761 #ifdef MULTIPROCESSOR
   1762 	/* If runqueue is empty, try to catch some thread from other CPU */
   1763 	if (__predict_false(spc->spc_flags & SPCF_OFFLINE)) {
   1764 		if ((ci_rq->r_count - ci_rq->r_mcount) == 0)
   1765 			return NULL;
   1766 	} else if (ci_rq->r_count == 0) {
   1767 		/* Reset the counter, and call the balancer */
   1768 		ci_rq->r_avgcount = 0;
   1769 		sched_balance(ci);
   1770 
   1771 		/* The re-locking will be done inside */
   1772 		return sched_catchlwp();
   1773 	}
   1774 #else
   1775 	if (ci_rq->r_count == 0)
   1776 		return NULL;
   1777 #endif
   1778 
   1779 	/* Take the highest priority thread */
   1780 	KASSERT(ci_rq->r_bitmap[spc->spc_maxpriority >> BITMAP_SHIFT]);
   1781 	q_head = sched_getrq(ci_rq, spc->spc_maxpriority);
   1782 	l = TAILQ_FIRST(q_head);
   1783 	KASSERT(l != NULL);
   1784 
   1785 	sched_oncpu(l);
   1786 	l->l_rticks = hardclock_ticks;
   1787 
   1788 	return l;
   1789 }
   1790 
   1791 bool
   1792 sched_curcpu_runnable_p(void)
   1793 {
   1794 	const struct cpu_info *ci;
   1795 	const runqueue_t *ci_rq;
   1796 	bool rv;
   1797 
   1798 	kpreempt_disable();
   1799 	ci = curcpu();
   1800 	ci_rq = ci->ci_schedstate.spc_sched_info;
   1801 
   1802 #ifndef __HAVE_FAST_SOFTINTS
   1803 	if (ci->ci_data.cpu_softints) {
   1804 		kpreempt_enable();
   1805 		return true;
   1806 	}
   1807 #endif
   1808 
   1809 	if (ci->ci_schedstate.spc_flags & SPCF_OFFLINE)
   1810 		rv = (ci_rq->r_count - ci_rq->r_mcount);
   1811 	else
   1812 		rv = ci_rq->r_count != 0;
   1813 	kpreempt_enable();
   1814 
   1815 	return rv;
   1816 }
   1817 
   1818 /*
   1819  * Debugging.
   1820  */
   1821 
   1822 #ifdef DDB
   1823 
   1824 void
   1825 sched_print_runqueue(void (*pr)(const char *, ...)
   1826     __attribute__((__format__(__printf__,1,2))))
   1827 {
   1828 	runqueue_t *ci_rq;
   1829 	struct schedstate_percpu *spc;
   1830 	struct lwp *l;
   1831 	struct proc *p;
   1832 	int i;
   1833 	struct cpu_info *ci;
   1834 	CPU_INFO_ITERATOR cii;
   1835 
   1836 	for (CPU_INFO_FOREACH(cii, ci)) {
   1837 		spc = &ci->ci_schedstate;
   1838 		ci_rq = spc->spc_sched_info;
   1839 
   1840 		(*pr)("Run-queue (CPU = %u):\n", ci->ci_index);
   1841 		(*pr)(" pid.lid = %d.%d, threads count = %u, "
   1842 		    "avgcount = %u, highest pri = %d\n",
   1843 #ifdef MULTIPROCESSOR
   1844 		    ci->ci_curlwp->l_proc->p_pid, ci->ci_curlwp->l_lid,
   1845 #else
   1846 		    curlwp->l_proc->p_pid, curlwp->l_lid,
   1847 #endif
   1848 		    ci_rq->r_count, ci_rq->r_avgcount, spc->spc_maxpriority);
   1849 		i = (PRI_COUNT >> BITMAP_SHIFT) - 1;
   1850 		do {
   1851 			uint32_t q;
   1852 			q = ci_rq->r_bitmap[i];
   1853 			(*pr)(" bitmap[%d] => [ %d (0x%x) ]\n", i, ffs(q), q);
   1854 		} while (i--);
   1855 	}
   1856 
   1857 	(*pr)("   %5s %4s %4s %10s %3s %18s %4s %s\n",
   1858 	    "LID", "PRI", "EPRI", "FL", "ST", "LWP", "CPU", "LRTIME");
   1859 
   1860 	PROCLIST_FOREACH(p, &allproc) {
   1861 		(*pr)(" /- %d (%s)\n", (int)p->p_pid, p->p_comm);
   1862 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1863 			ci = l->l_cpu;
   1864 			(*pr)(" | %5d %4u %4u 0x%8.8x %3s %18p %4u %u\n",
   1865 			    (int)l->l_lid, l->l_priority, lwp_eprio(l),
   1866 			    l->l_flag, l->l_stat == LSRUN ? "RQ" :
   1867 			    (l->l_stat == LSSLEEP ? "SQ" : "-"),
   1868 			    l, ci->ci_index,
   1869 			    (u_int)(hardclock_ticks - l->l_rticks));
   1870 		}
   1871 	}
   1872 }
   1873 
   1874 #endif
   1875