Home | History | Annotate | Line # | Download | only in kern
kern_synch.c revision 1.231
      1 /*	$NetBSD: kern_synch.c,v 1.231 2008/04/28 15:36:01 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
     10  * Daniel Sieger.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by the NetBSD
     23  *	Foundation, Inc. and its contributors.
     24  * 4. Neither the name of The NetBSD Foundation nor the names of its
     25  *    contributors may be used to endorse or promote products derived
     26  *    from this software without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     29  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     30  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     31  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     32  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     33  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     34  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     35  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     36  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     37  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     38  * POSSIBILITY OF SUCH DAMAGE.
     39  */
     40 
     41 /*
     42  * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org>
     43  * All rights reserved.
     44  *
     45  * Redistribution and use in source and binary forms, with or without
     46  * modification, are permitted provided that the following conditions
     47  * are met:
     48  * 1. Redistributions of source code must retain the above copyright
     49  *    notice, this list of conditions and the following disclaimer.
     50  * 2. Redistributions in binary form must reproduce the above copyright
     51  *    notice, this list of conditions and the following disclaimer in the
     52  *    documentation and/or other materials provided with the distribution.
     53  *
     54  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     64  * SUCH DAMAGE.
     65  */
     66 
     67 /*-
     68  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     69  *	The Regents of the University of California.  All rights reserved.
     70  * (c) UNIX System Laboratories, Inc.
     71  * All or some portions of this file are derived from material licensed
     72  * to the University of California by American Telephone and Telegraph
     73  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     74  * the permission of UNIX System Laboratories, Inc.
     75  *
     76  * Redistribution and use in source and binary forms, with or without
     77  * modification, are permitted provided that the following conditions
     78  * are met:
     79  * 1. Redistributions of source code must retain the above copyright
     80  *    notice, this list of conditions and the following disclaimer.
     81  * 2. Redistributions in binary form must reproduce the above copyright
     82  *    notice, this list of conditions and the following disclaimer in the
     83  *    documentation and/or other materials provided with the distribution.
     84  * 3. Neither the name of the University nor the names of its contributors
     85  *    may be used to endorse or promote products derived from this software
     86  *    without specific prior written permission.
     87  *
     88  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     89  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     90  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     91  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     92  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     93  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     94  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     95  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     96  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     97  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     98  * SUCH DAMAGE.
     99  *
    100  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
    101  */
    102 
    103 #include <sys/cdefs.h>
    104 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.231 2008/04/28 15:36:01 ad Exp $");
    105 
    106 #include "opt_kstack.h"
    107 #include "opt_lockdebug.h"
    108 #include "opt_multiprocessor.h"
    109 #include "opt_perfctrs.h"
    110 #include "opt_preemption.h"
    111 
    112 #define	__MUTEX_PRIVATE
    113 
    114 #include <sys/param.h>
    115 #include <sys/systm.h>
    116 #include <sys/proc.h>
    117 #include <sys/kernel.h>
    118 #if defined(PERFCTRS)
    119 #include <sys/pmc.h>
    120 #endif
    121 #include <sys/cpu.h>
    122 #include <sys/resourcevar.h>
    123 #include <sys/sched.h>
    124 #include <sys/syscall_stats.h>
    125 #include <sys/sleepq.h>
    126 #include <sys/lockdebug.h>
    127 #include <sys/evcnt.h>
    128 #include <sys/intr.h>
    129 #include <sys/lwpctl.h>
    130 #include <sys/atomic.h>
    131 #include <sys/simplelock.h>
    132 #include <sys/bitops.h>
    133 #include <sys/kmem.h>
    134 #include <sys/sysctl.h>
    135 #include <sys/idle.h>
    136 
    137 #include <uvm/uvm_extern.h>
    138 
    139 #include <dev/lockstat.h>
    140 
    141 /*
    142  * Priority related defintions.
    143  */
    144 #define	PRI_TS_COUNT	(NPRI_USER)
    145 #define	PRI_RT_COUNT	(PRI_COUNT - PRI_TS_COUNT)
    146 #define	PRI_HTS_RANGE	(PRI_TS_COUNT / 10)
    147 
    148 #define	PRI_HIGHEST_TS	(MAXPRI_USER)
    149 
    150 /*
    151  * Bits per map.
    152  */
    153 #define	BITMAP_BITS	(32)
    154 #define	BITMAP_SHIFT	(5)
    155 #define	BITMAP_MSB	(0x80000000U)
    156 #define	BITMAP_MASK	(BITMAP_BITS - 1)
    157 
    158 /*
    159  * Structures, runqueue.
    160  */
    161 
    162 typedef struct {
    163 	TAILQ_HEAD(, lwp) q_head;
    164 } queue_t;
    165 
    166 typedef struct {
    167 	/* Lock and bitmap */
    168 	uint32_t	r_bitmap[PRI_COUNT >> BITMAP_SHIFT];
    169 	/* Counters */
    170 	u_int		r_count;	/* Count of the threads */
    171 	u_int		r_avgcount;	/* Average count of threads */
    172 	u_int		r_mcount;	/* Count of migratable threads */
    173 	/* Runqueues */
    174 	queue_t		r_rt_queue[PRI_RT_COUNT];
    175 	queue_t		r_ts_queue[PRI_TS_COUNT];
    176 } runqueue_t;
    177 
    178 static u_int	sched_unsleep(struct lwp *, bool);
    179 static void	sched_changepri(struct lwp *, pri_t);
    180 static void	sched_lendpri(struct lwp *, pri_t);
    181 static void	*sched_getrq(runqueue_t *, const pri_t);
    182 #ifdef MULTIPROCESSOR
    183 static lwp_t	*sched_catchlwp(void);
    184 static void	sched_balance(void *);
    185 #endif
    186 
    187 syncobj_t sleep_syncobj = {
    188 	SOBJ_SLEEPQ_SORTED,
    189 	sleepq_unsleep,
    190 	sleepq_changepri,
    191 	sleepq_lendpri,
    192 	syncobj_noowner,
    193 };
    194 
    195 syncobj_t sched_syncobj = {
    196 	SOBJ_SLEEPQ_SORTED,
    197 	sched_unsleep,
    198 	sched_changepri,
    199 	sched_lendpri,
    200 	syncobj_noowner,
    201 };
    202 
    203 const int 	schedppq = 1;
    204 callout_t 	sched_pstats_ch;
    205 unsigned	sched_pstats_ticks;
    206 kcondvar_t	lbolt;			/* once a second sleep address */
    207 
    208 /*
    209  * Kernel preemption.
    210  */
    211 #ifdef PREEMPTION
    212 int		sched_kpreempt_pri = PRI_USER_RT;
    213 
    214 static struct evcnt kpreempt_ev_crit;
    215 static struct evcnt kpreempt_ev_klock;
    216 static struct evcnt kpreempt_ev_ipl;
    217 static struct evcnt kpreempt_ev_immed;
    218 #else
    219 int		sched_kpreempt_pri = INT_MAX;
    220 #endif
    221 int		sched_upreempt_pri = PRI_KERNEL;
    222 
    223 /*
    224  * Migration and balancing.
    225  */
    226 static u_int	cacheht_time;		/* Cache hotness time */
    227 static u_int	min_catch;		/* Minimal LWP count for catching */
    228 static u_int	balance_period;		/* Balance period */
    229 static struct cpu_info *worker_ci;	/* Victim CPU */
    230 #ifdef MULTIPROCESSOR
    231 static struct callout balance_ch;	/* Callout of balancer */
    232 #endif
    233 
    234 /*
    235  * During autoconfiguration or after a panic, a sleep will simply lower the
    236  * priority briefly to allow interrupts, then return.  The priority to be
    237  * used (safepri) is machine-dependent, thus this value is initialized and
    238  * maintained in the machine-dependent layers.  This priority will typically
    239  * be 0, or the lowest priority that is safe for use on the interrupt stack;
    240  * it can be made higher to block network software interrupts after panics.
    241  */
    242 int	safepri;
    243 
    244 /*
    245  * OBSOLETE INTERFACE
    246  *
    247  * General sleep call.  Suspends the current process until a wakeup is
    248  * performed on the specified identifier.  The process will then be made
    249  * runnable with the specified priority.  Sleeps at most timo/hz seconds (0
    250  * means no timeout).  If pri includes PCATCH flag, signals are checked
    251  * before and after sleeping, else signals are not checked.  Returns 0 if
    252  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
    253  * signal needs to be delivered, ERESTART is returned if the current system
    254  * call should be restarted if possible, and EINTR is returned if the system
    255  * call should be interrupted by the signal (return EINTR).
    256  *
    257  * The interlock is held until we are on a sleep queue. The interlock will
    258  * be locked before returning back to the caller unless the PNORELOCK flag
    259  * is specified, in which case the interlock will always be unlocked upon
    260  * return.
    261  */
    262 int
    263 ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
    264 	volatile struct simplelock *interlock)
    265 {
    266 	struct lwp *l = curlwp;
    267 	sleepq_t *sq;
    268 	int error;
    269 
    270 	KASSERT((l->l_pflag & LP_INTR) == 0);
    271 
    272 	if (sleepq_dontsleep(l)) {
    273 		(void)sleepq_abort(NULL, 0);
    274 		if ((priority & PNORELOCK) != 0)
    275 			simple_unlock(interlock);
    276 		return 0;
    277 	}
    278 
    279 	l->l_kpriority = true;
    280 	sq = sleeptab_lookup(&sleeptab, ident);
    281 	sleepq_enter(sq, l);
    282 	sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
    283 
    284 	if (interlock != NULL) {
    285 		KASSERT(simple_lock_held(interlock));
    286 		simple_unlock(interlock);
    287 	}
    288 
    289 	error = sleepq_block(timo, priority & PCATCH);
    290 
    291 	if (interlock != NULL && (priority & PNORELOCK) == 0)
    292 		simple_lock(interlock);
    293 
    294 	return error;
    295 }
    296 
    297 int
    298 mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
    299 	kmutex_t *mtx)
    300 {
    301 	struct lwp *l = curlwp;
    302 	sleepq_t *sq;
    303 	int error;
    304 
    305 	KASSERT((l->l_pflag & LP_INTR) == 0);
    306 
    307 	if (sleepq_dontsleep(l)) {
    308 		(void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
    309 		return 0;
    310 	}
    311 
    312 	l->l_kpriority = true;
    313 	sq = sleeptab_lookup(&sleeptab, ident);
    314 	sleepq_enter(sq, l);
    315 	sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
    316 	mutex_exit(mtx);
    317 	error = sleepq_block(timo, priority & PCATCH);
    318 
    319 	if ((priority & PNORELOCK) == 0)
    320 		mutex_enter(mtx);
    321 
    322 	return error;
    323 }
    324 
    325 /*
    326  * General sleep call for situations where a wake-up is not expected.
    327  */
    328 int
    329 kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
    330 {
    331 	struct lwp *l = curlwp;
    332 	sleepq_t *sq;
    333 	int error;
    334 
    335 	if (sleepq_dontsleep(l))
    336 		return sleepq_abort(NULL, 0);
    337 
    338 	if (mtx != NULL)
    339 		mutex_exit(mtx);
    340 	l->l_kpriority = true;
    341 	sq = sleeptab_lookup(&sleeptab, l);
    342 	sleepq_enter(sq, l);
    343 	sleepq_enqueue(sq, l, wmesg, &sleep_syncobj);
    344 	error = sleepq_block(timo, intr);
    345 	if (mtx != NULL)
    346 		mutex_enter(mtx);
    347 
    348 	return error;
    349 }
    350 
    351 /*
    352  * OBSOLETE INTERFACE
    353  *
    354  * Make all processes sleeping on the specified identifier runnable.
    355  */
    356 void
    357 wakeup(wchan_t ident)
    358 {
    359 	sleepq_t *sq;
    360 
    361 	if (cold)
    362 		return;
    363 
    364 	sq = sleeptab_lookup(&sleeptab, ident);
    365 	sleepq_wake(sq, ident, (u_int)-1);
    366 }
    367 
    368 /*
    369  * OBSOLETE INTERFACE
    370  *
    371  * Make the highest priority process first in line on the specified
    372  * identifier runnable.
    373  */
    374 void
    375 wakeup_one(wchan_t ident)
    376 {
    377 	sleepq_t *sq;
    378 
    379 	if (cold)
    380 		return;
    381 
    382 	sq = sleeptab_lookup(&sleeptab, ident);
    383 	sleepq_wake(sq, ident, 1);
    384 }
    385 
    386 
    387 /*
    388  * General yield call.  Puts the current process back on its run queue and
    389  * performs a voluntary context switch.  Should only be called when the
    390  * current process explicitly requests it (eg sched_yield(2)).
    391  */
    392 void
    393 yield(void)
    394 {
    395 	struct lwp *l = curlwp;
    396 
    397 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
    398 	lwp_lock(l);
    399 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
    400 	KASSERT(l->l_stat == LSONPROC);
    401 	l->l_kpriority = false;
    402 	(void)mi_switch(l);
    403 	KERNEL_LOCK(l->l_biglocks, l);
    404 }
    405 
    406 /*
    407  * General preemption call.  Puts the current process back on its run queue
    408  * and performs an involuntary context switch.
    409  */
    410 void
    411 preempt(void)
    412 {
    413 	struct lwp *l = curlwp;
    414 
    415 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
    416 	lwp_lock(l);
    417 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
    418 	KASSERT(l->l_stat == LSONPROC);
    419 	l->l_kpriority = false;
    420 	l->l_nivcsw++;
    421 	(void)mi_switch(l);
    422 	KERNEL_LOCK(l->l_biglocks, l);
    423 }
    424 
    425 #ifdef PREEMPTION
    426 /* XXX Yuck, for lockstat. */
    427 static char	in_critical_section;
    428 static char	kernel_lock_held;
    429 static char	spl_raised;
    430 static char	is_softint;
    431 
    432 /*
    433  * Handle a request made by another agent to preempt the current LWP
    434  * in-kernel.  Usually called when l_dopreempt may be non-zero.
    435  */
    436 bool
    437 kpreempt(uintptr_t where)
    438 {
    439 	uintptr_t failed;
    440 	lwp_t *l;
    441 	int s, dop;
    442 
    443 	l = curlwp;
    444 	failed = 0;
    445 	while ((dop = l->l_dopreempt) != 0) {
    446 		if (l->l_stat != LSONPROC) {
    447 			/*
    448 			 * About to block (or die), let it happen.
    449 			 * Doesn't really count as "preemption has
    450 			 * been blocked", since we're going to
    451 			 * context switch.
    452 			 */
    453 			l->l_dopreempt = 0;
    454 			return true;
    455 		}
    456 		if (__predict_false((l->l_flag & LW_IDLE) != 0)) {
    457 			/* Can't preempt idle loop, don't count as failure. */
    458 		    	l->l_dopreempt = 0;
    459 		    	return true;
    460 		}
    461 		if (__predict_false(l->l_nopreempt != 0)) {
    462 			/* LWP holds preemption disabled, explicitly. */
    463 			if ((dop & DOPREEMPT_COUNTED) == 0) {
    464 				atomic_inc_64(&kpreempt_ev_crit.ev_count);
    465 			}
    466 			failed = (uintptr_t)&in_critical_section;
    467 			break;
    468 		}
    469 		if (__predict_false((l->l_pflag & LP_INTR) != 0)) {
    470 		    	/* Can't preempt soft interrupts yet. */
    471 		    	l->l_dopreempt = 0;
    472 		    	failed = (uintptr_t)&is_softint;
    473 		    	break;
    474 		}
    475 		s = splsched();
    476 		if (__predict_false(l->l_blcnt != 0 ||
    477 		    curcpu()->ci_biglock_wanted != NULL)) {
    478 			/* Hold or want kernel_lock, code is not MT safe. */
    479 			splx(s);
    480 			if ((dop & DOPREEMPT_COUNTED) == 0) {
    481 				atomic_inc_64(&kpreempt_ev_klock.ev_count);
    482 			}
    483 			failed = (uintptr_t)&kernel_lock_held;
    484 			break;
    485 		}
    486 		if (__predict_false(!cpu_kpreempt_enter(where, s))) {
    487 			/*
    488 			 * It may be that the IPL is too high.
    489 			 * kpreempt_enter() can schedule an
    490 			 * interrupt to retry later.
    491 			 */
    492 			splx(s);
    493 			if ((dop & DOPREEMPT_COUNTED) == 0) {
    494 				atomic_inc_64(&kpreempt_ev_ipl.ev_count);
    495 			}
    496 			failed = (uintptr_t)&spl_raised;
    497 			break;
    498 		}
    499 		/* Do it! */
    500 		if (__predict_true((dop & DOPREEMPT_COUNTED) == 0)) {
    501 			atomic_inc_64(&kpreempt_ev_immed.ev_count);
    502 		}
    503 		lwp_lock(l);
    504 		mi_switch(l);
    505 		l->l_nopreempt++;
    506 		splx(s);
    507 
    508 		/* Take care of any MD cleanup. */
    509 		cpu_kpreempt_exit(where);
    510 		l->l_nopreempt--;
    511 	}
    512 
    513 	/* Record preemption failure for reporting via lockstat. */
    514 	if (__predict_false(failed)) {
    515 		atomic_or_uint(&l->l_dopreempt, DOPREEMPT_COUNTED);
    516 		int lsflag = 0;
    517 		LOCKSTAT_ENTER(lsflag);
    518 		/* Might recurse, make it atomic. */
    519 		if (__predict_false(lsflag)) {
    520 			if (where == 0) {
    521 				where = (uintptr_t)__builtin_return_address(0);
    522 			}
    523 			if (atomic_cas_ptr_ni((void *)&l->l_pfailaddr,
    524 			    NULL, (void *)where) == NULL) {
    525 				LOCKSTAT_START_TIMER(lsflag, l->l_pfailtime);
    526 				l->l_pfaillock = failed;
    527 			}
    528 		}
    529 		LOCKSTAT_EXIT(lsflag);
    530 	}
    531 
    532 	return failed;
    533 }
    534 
    535 /*
    536  * Return true if preemption is explicitly disabled.
    537  */
    538 bool
    539 kpreempt_disabled(void)
    540 {
    541 	lwp_t *l;
    542 
    543 	l = curlwp;
    544 
    545 	return l->l_nopreempt != 0 || l->l_stat == LSZOMB ||
    546 	    (l->l_flag & LW_IDLE) != 0 || cpu_kpreempt_disabled();
    547 }
    548 #else
    549 bool
    550 kpreempt(uintptr_t where)
    551 {
    552 
    553 	panic("kpreempt");
    554 	return true;
    555 }
    556 
    557 bool
    558 kpreempt_disabled(void)
    559 {
    560 
    561 	return true;
    562 }
    563 #endif
    564 
    565 /*
    566  * Disable kernel preemption.
    567  */
    568 void
    569 kpreempt_disable(void)
    570 {
    571 
    572 	KPREEMPT_DISABLE(curlwp);
    573 }
    574 
    575 /*
    576  * Reenable kernel preemption.
    577  */
    578 void
    579 kpreempt_enable(void)
    580 {
    581 
    582 	KPREEMPT_ENABLE(curlwp);
    583 }
    584 
    585 /*
    586  * Compute the amount of time during which the current lwp was running.
    587  *
    588  * - update l_rtime unless it's an idle lwp.
    589  */
    590 
    591 void
    592 updatertime(lwp_t *l, const struct bintime *now)
    593 {
    594 
    595 	if ((l->l_flag & LW_IDLE) != 0)
    596 		return;
    597 
    598 	/* rtime += now - stime */
    599 	bintime_add(&l->l_rtime, now);
    600 	bintime_sub(&l->l_rtime, &l->l_stime);
    601 }
    602 
    603 /*
    604  * The machine independent parts of context switch.
    605  *
    606  * Returns 1 if another LWP was actually run.
    607  */
    608 int
    609 mi_switch(lwp_t *l)
    610 {
    611 	struct cpu_info *ci, *tci = NULL;
    612 	struct schedstate_percpu *spc;
    613 	struct lwp *newl;
    614 	int retval, oldspl;
    615 	struct bintime bt;
    616 	bool returning;
    617 
    618 	KASSERT(lwp_locked(l, NULL));
    619 	KASSERT(kpreempt_disabled());
    620 	LOCKDEBUG_BARRIER(l->l_mutex, 1);
    621 
    622 #ifdef KSTACK_CHECK_MAGIC
    623 	kstack_check_magic(l);
    624 #endif
    625 
    626 	binuptime(&bt);
    627 
    628 	KASSERT(l->l_cpu == curcpu());
    629 	ci = l->l_cpu;
    630 	spc = &ci->ci_schedstate;
    631 	returning = false;
    632 	newl = NULL;
    633 
    634 	/*
    635 	 * If we have been asked to switch to a specific LWP, then there
    636 	 * is no need to inspect the run queues.  If a soft interrupt is
    637 	 * blocking, then return to the interrupted thread without adjusting
    638 	 * VM context or its start time: neither have been changed in order
    639 	 * to take the interrupt.
    640 	 */
    641 	if (l->l_switchto != NULL) {
    642 		if ((l->l_pflag & LP_INTR) != 0) {
    643 			returning = true;
    644 			softint_block(l);
    645 			if ((l->l_flag & LW_TIMEINTR) != 0)
    646 				updatertime(l, &bt);
    647 		}
    648 		newl = l->l_switchto;
    649 		l->l_switchto = NULL;
    650 	}
    651 #ifndef __HAVE_FAST_SOFTINTS
    652 	else if (ci->ci_data.cpu_softints != 0) {
    653 		/* There are pending soft interrupts, so pick one. */
    654 		newl = softint_picklwp();
    655 		newl->l_stat = LSONPROC;
    656 		newl->l_flag |= LW_RUNNING;
    657 	}
    658 #endif	/* !__HAVE_FAST_SOFTINTS */
    659 
    660 	/* Count time spent in current system call */
    661 	if (!returning) {
    662 		SYSCALL_TIME_SLEEP(l);
    663 
    664 		/*
    665 		 * XXXSMP If we are using h/w performance counters,
    666 		 * save context.
    667 		 */
    668 #if PERFCTRS
    669 		if (PMC_ENABLED(l->l_proc)) {
    670 			pmc_save_context(l->l_proc);
    671 		}
    672 #endif
    673 		updatertime(l, &bt);
    674 	}
    675 
    676 	/*
    677 	 * If on the CPU and we have gotten this far, then we must yield.
    678 	 */
    679 	KASSERT(l->l_stat != LSRUN);
    680 	if (l->l_stat == LSONPROC && (l->l_target_cpu || l != newl)) {
    681 		KASSERT(lwp_locked(l, spc->spc_lwplock));
    682 
    683 		if (l->l_target_cpu == l->l_cpu) {
    684 			l->l_target_cpu = NULL;
    685 		} else {
    686 			tci = l->l_target_cpu;
    687 		}
    688 
    689 		if (__predict_false(tci != NULL)) {
    690 			/* Double-lock the runqueues */
    691 			spc_dlock(ci, tci);
    692 		} else {
    693 			/* Lock the runqueue */
    694 			spc_lock(ci);
    695 		}
    696 
    697 		if ((l->l_flag & LW_IDLE) == 0) {
    698 			l->l_stat = LSRUN;
    699 			if (__predict_false(tci != NULL)) {
    700 				/*
    701 				 * Set the new CPU, lock and unset the
    702 				 * l_target_cpu - thread will be enqueued
    703 				 * to the runqueue of target CPU.
    704 				 */
    705 				l->l_cpu = tci;
    706 				lwp_setlock(l, tci->ci_schedstate.spc_mutex);
    707 				l->l_target_cpu = NULL;
    708 			} else {
    709 				lwp_setlock(l, spc->spc_mutex);
    710 			}
    711 			sched_enqueue(l, true);
    712 		} else {
    713 			KASSERT(tci == NULL);
    714 			l->l_stat = LSIDL;
    715 		}
    716 	} else {
    717 		/* Lock the runqueue */
    718 		spc_lock(ci);
    719 	}
    720 
    721 	/*
    722 	 * Let sched_nextlwp() select the LWP to run the CPU next.
    723 	 * If no LWP is runnable, select the idle LWP.
    724 	 *
    725 	 * Note that spc_lwplock might not necessary be held, and
    726 	 * new thread would be unlocked after setting the LWP-lock.
    727 	 */
    728 	if (newl == NULL) {
    729 		newl = sched_nextlwp();
    730 		if (newl != NULL) {
    731 			sched_dequeue(newl);
    732 			KASSERT(lwp_locked(newl, spc->spc_mutex));
    733 			newl->l_stat = LSONPROC;
    734 			newl->l_cpu = ci;
    735 			newl->l_flag |= LW_RUNNING;
    736 			lwp_setlock(newl, spc->spc_lwplock);
    737 		} else {
    738 			newl = ci->ci_data.cpu_idlelwp;
    739 			newl->l_stat = LSONPROC;
    740 			newl->l_flag |= LW_RUNNING;
    741 		}
    742 		/*
    743 		 * Only clear want_resched if there are no
    744 		 * pending (slow) software interrupts.
    745 		 */
    746 		ci->ci_want_resched = ci->ci_data.cpu_softints;
    747 		spc->spc_flags &= ~SPCF_SWITCHCLEAR;
    748 		spc->spc_curpriority = lwp_eprio(newl);
    749 	}
    750 
    751 	/* Items that must be updated with the CPU locked. */
    752 	if (!returning) {
    753 		/* Update the new LWP's start time. */
    754 		newl->l_stime = bt;
    755 
    756 		/*
    757 		 * ci_curlwp changes when a fast soft interrupt occurs.
    758 		 * We use cpu_onproc to keep track of which kernel or
    759 		 * user thread is running 'underneath' the software
    760 		 * interrupt.  This is important for time accounting,
    761 		 * itimers and forcing user threads to preempt (aston).
    762 		 */
    763 		ci->ci_data.cpu_onproc = newl;
    764 	}
    765 
    766 	/* Kernel preemption related tasks. */
    767 	l->l_dopreempt = 0;
    768 	if (__predict_false(l->l_pfailaddr != 0)) {
    769 		LOCKSTAT_FLAG(lsflag);
    770 		LOCKSTAT_ENTER(lsflag);
    771 		LOCKSTAT_STOP_TIMER(lsflag, l->l_pfailtime);
    772 		LOCKSTAT_EVENT_RA(lsflag, l->l_pfaillock, LB_NOPREEMPT|LB_SPIN,
    773 		    1, l->l_pfailtime, l->l_pfailaddr);
    774 		LOCKSTAT_EXIT(lsflag);
    775 		l->l_pfailtime = 0;
    776 		l->l_pfaillock = 0;
    777 		l->l_pfailaddr = 0;
    778 	}
    779 
    780 	if (l != newl) {
    781 		struct lwp *prevlwp;
    782 
    783 		/* Release all locks, but leave the current LWP locked */
    784 		if (l->l_mutex == l->l_cpu->ci_schedstate.spc_mutex) {
    785 			/*
    786 			 * In case of migration, drop the local runqueue
    787 			 * lock, thread is on other runqueue now.
    788 			 */
    789 			if (__predict_false(tci != NULL))
    790 				spc_unlock(ci);
    791 			/*
    792 			 * Drop spc_lwplock, if the current LWP has been moved
    793 			 * to the run queue (it is now locked by spc_mutex).
    794 			 */
    795 			mutex_spin_exit(spc->spc_lwplock);
    796 		} else {
    797 			/*
    798 			 * Otherwise, drop the spc_mutex, we are done with the
    799 			 * run queues.
    800 			 */
    801 			mutex_spin_exit(spc->spc_mutex);
    802 			KASSERT(tci == NULL);
    803 		}
    804 
    805 		/*
    806 		 * Mark that context switch is going to be perfomed
    807 		 * for this LWP, to protect it from being switched
    808 		 * to on another CPU.
    809 		 */
    810 		KASSERT(l->l_ctxswtch == 0);
    811 		l->l_ctxswtch = 1;
    812 		l->l_ncsw++;
    813 		l->l_flag &= ~LW_RUNNING;
    814 
    815 		/*
    816 		 * Increase the count of spin-mutexes before the release
    817 		 * of the last lock - we must remain at IPL_SCHED during
    818 		 * the context switch.
    819 		 */
    820 		oldspl = MUTEX_SPIN_OLDSPL(ci);
    821 		ci->ci_mtx_count--;
    822 		lwp_unlock(l);
    823 
    824 		/* Count the context switch on this CPU. */
    825 		ci->ci_data.cpu_nswtch++;
    826 
    827 		/* Update status for lwpctl, if present. */
    828 		if (l->l_lwpctl != NULL)
    829 			l->l_lwpctl->lc_curcpu = LWPCTL_CPU_NONE;
    830 
    831 		/*
    832 		 * Save old VM context, unless a soft interrupt
    833 		 * handler is blocking.
    834 		 */
    835 		if (!returning)
    836 			pmap_deactivate(l);
    837 
    838 		/*
    839 		 * We may need to spin-wait for if 'newl' is still
    840 		 * context switching on another CPU.
    841 		 */
    842 		if (newl->l_ctxswtch != 0) {
    843 			u_int count;
    844 			count = SPINLOCK_BACKOFF_MIN;
    845 			while (newl->l_ctxswtch)
    846 				SPINLOCK_BACKOFF(count);
    847 		}
    848 
    849 		/* Switch to the new LWP.. */
    850 		prevlwp = cpu_switchto(l, newl, returning);
    851 		ci = curcpu();
    852 
    853 		/*
    854 		 * Switched away - we have new curlwp.
    855 		 * Restore VM context and IPL.
    856 		 */
    857 		pmap_activate(l);
    858 		if (prevlwp != NULL) {
    859 			/* Normalize the count of the spin-mutexes */
    860 			ci->ci_mtx_count++;
    861 			/* Unmark the state of context switch */
    862 			membar_exit();
    863 			prevlwp->l_ctxswtch = 0;
    864 		}
    865 
    866 		/* Update status for lwpctl, if present. */
    867 		if (l->l_lwpctl != NULL) {
    868 			l->l_lwpctl->lc_curcpu = (int)cpu_index(ci);
    869 			l->l_lwpctl->lc_pctr++;
    870 		}
    871 
    872 		KASSERT(l->l_cpu == ci);
    873 		splx(oldspl);
    874 		retval = 1;
    875 	} else {
    876 		/* Nothing to do - just unlock and return. */
    877 		KASSERT(tci == NULL);
    878 		spc_unlock(ci);
    879 		lwp_unlock(l);
    880 		retval = 0;
    881 	}
    882 
    883 	KASSERT(l == curlwp);
    884 	KASSERT(l->l_stat == LSONPROC);
    885 
    886 	/*
    887 	 * XXXSMP If we are using h/w performance counters, restore context.
    888 	 * XXXSMP preemption problem.
    889 	 */
    890 #if PERFCTRS
    891 	if (PMC_ENABLED(l->l_proc)) {
    892 		pmc_restore_context(l->l_proc);
    893 	}
    894 #endif
    895 	SYSCALL_TIME_WAKEUP(l);
    896 	LOCKDEBUG_BARRIER(NULL, 1);
    897 
    898 	return retval;
    899 }
    900 
    901 /*
    902  * Change process state to be runnable, placing it on the run queue if it is
    903  * in memory, and awakening the swapper if it isn't in memory.
    904  *
    905  * Call with the process and LWP locked.  Will return with the LWP unlocked.
    906  */
    907 void
    908 setrunnable(struct lwp *l)
    909 {
    910 	struct proc *p = l->l_proc;
    911 	struct cpu_info *ci;
    912 	sigset_t *ss;
    913 
    914 	KASSERT((l->l_flag & LW_IDLE) == 0);
    915 	KASSERT(mutex_owned(p->p_lock));
    916 	KASSERT(lwp_locked(l, NULL));
    917 	KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex);
    918 
    919 	switch (l->l_stat) {
    920 	case LSSTOP:
    921 		/*
    922 		 * If we're being traced (possibly because someone attached us
    923 		 * while we were stopped), check for a signal from the debugger.
    924 		 */
    925 		if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
    926 			if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
    927 				ss = &l->l_sigpend.sp_set;
    928 			else
    929 				ss = &p->p_sigpend.sp_set;
    930 			sigaddset(ss, p->p_xstat);
    931 			signotify(l);
    932 		}
    933 		p->p_nrlwps++;
    934 		break;
    935 	case LSSUSPENDED:
    936 		l->l_flag &= ~LW_WSUSPEND;
    937 		p->p_nrlwps++;
    938 		cv_broadcast(&p->p_lwpcv);
    939 		break;
    940 	case LSSLEEP:
    941 		KASSERT(l->l_wchan != NULL);
    942 		break;
    943 	default:
    944 		panic("setrunnable: lwp %p state was %d", l, l->l_stat);
    945 	}
    946 
    947 	/*
    948 	 * If the LWP was sleeping interruptably, then it's OK to start it
    949 	 * again.  If not, mark it as still sleeping.
    950 	 */
    951 	if (l->l_wchan != NULL) {
    952 		l->l_stat = LSSLEEP;
    953 		/* lwp_unsleep() will release the lock. */
    954 		lwp_unsleep(l, true);
    955 		return;
    956 	}
    957 
    958 	/*
    959 	 * If the LWP is still on the CPU, mark it as LSONPROC.  It may be
    960 	 * about to call mi_switch(), in which case it will yield.
    961 	 */
    962 	if ((l->l_flag & LW_RUNNING) != 0) {
    963 		l->l_stat = LSONPROC;
    964 		l->l_slptime = 0;
    965 		lwp_unlock(l);
    966 		return;
    967 	}
    968 
    969 	/*
    970 	 * Look for a CPU to run.
    971 	 * Set the LWP runnable.
    972 	 */
    973 	ci = sched_takecpu(l);
    974 	l->l_cpu = ci;
    975 	if (l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex) {
    976 		lwp_unlock_to(l, ci->ci_schedstate.spc_mutex);
    977 		lwp_lock(l);
    978 	}
    979 	sched_setrunnable(l);
    980 	l->l_stat = LSRUN;
    981 	l->l_slptime = 0;
    982 
    983 	/*
    984 	 * If thread is swapped out - wake the swapper to bring it back in.
    985 	 * Otherwise, enter it into a run queue.
    986 	 */
    987 	if (l->l_flag & LW_INMEM) {
    988 		sched_enqueue(l, false);
    989 		resched_cpu(l);
    990 		lwp_unlock(l);
    991 	} else {
    992 		lwp_unlock(l);
    993 		uvm_kick_scheduler();
    994 	}
    995 }
    996 
    997 /*
    998  * suspendsched:
    999  *
   1000  *	Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
   1001  */
   1002 void
   1003 suspendsched(void)
   1004 {
   1005 	CPU_INFO_ITERATOR cii;
   1006 	struct cpu_info *ci;
   1007 	struct lwp *l;
   1008 	struct proc *p;
   1009 
   1010 	/*
   1011 	 * We do this by process in order not to violate the locking rules.
   1012 	 */
   1013 	mutex_enter(proc_lock);
   1014 	PROCLIST_FOREACH(p, &allproc) {
   1015 		mutex_enter(p->p_lock);
   1016 
   1017 		if ((p->p_flag & PK_SYSTEM) != 0) {
   1018 			mutex_exit(p->p_lock);
   1019 			continue;
   1020 		}
   1021 
   1022 		p->p_stat = SSTOP;
   1023 
   1024 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1025 			if (l == curlwp)
   1026 				continue;
   1027 
   1028 			lwp_lock(l);
   1029 
   1030 			/*
   1031 			 * Set L_WREBOOT so that the LWP will suspend itself
   1032 			 * when it tries to return to user mode.  We want to
   1033 			 * try and get to get as many LWPs as possible to
   1034 			 * the user / kernel boundary, so that they will
   1035 			 * release any locks that they hold.
   1036 			 */
   1037 			l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
   1038 
   1039 			if (l->l_stat == LSSLEEP &&
   1040 			    (l->l_flag & LW_SINTR) != 0) {
   1041 				/* setrunnable() will release the lock. */
   1042 				setrunnable(l);
   1043 				continue;
   1044 			}
   1045 
   1046 			lwp_unlock(l);
   1047 		}
   1048 
   1049 		mutex_exit(p->p_lock);
   1050 	}
   1051 	mutex_exit(proc_lock);
   1052 
   1053 	/*
   1054 	 * Kick all CPUs to make them preempt any LWPs running in user mode.
   1055 	 * They'll trap into the kernel and suspend themselves in userret().
   1056 	 */
   1057 	for (CPU_INFO_FOREACH(cii, ci)) {
   1058 		spc_lock(ci);
   1059 		cpu_need_resched(ci, RESCHED_IMMED);
   1060 		spc_unlock(ci);
   1061 	}
   1062 }
   1063 
   1064 /*
   1065  * sched_unsleep:
   1066  *
   1067  *	The is called when the LWP has not been awoken normally but instead
   1068  *	interrupted: for example, if the sleep timed out.  Because of this,
   1069  *	it's not a valid action for running or idle LWPs.
   1070  */
   1071 static u_int
   1072 sched_unsleep(struct lwp *l, bool cleanup)
   1073 {
   1074 
   1075 	lwp_unlock(l);
   1076 	panic("sched_unsleep");
   1077 }
   1078 
   1079 void
   1080 resched_cpu(struct lwp *l)
   1081 {
   1082 	struct cpu_info *ci;
   1083 
   1084 	/*
   1085 	 * XXXSMP
   1086 	 * Since l->l_cpu persists across a context switch,
   1087 	 * this gives us *very weak* processor affinity, in
   1088 	 * that we notify the CPU on which the process last
   1089 	 * ran that it should try to switch.
   1090 	 *
   1091 	 * This does not guarantee that the process will run on
   1092 	 * that processor next, because another processor might
   1093 	 * grab it the next time it performs a context switch.
   1094 	 *
   1095 	 * This also does not handle the case where its last
   1096 	 * CPU is running a higher-priority process, but every
   1097 	 * other CPU is running a lower-priority process.  There
   1098 	 * are ways to handle this situation, but they're not
   1099 	 * currently very pretty, and we also need to weigh the
   1100 	 * cost of moving a process from one CPU to another.
   1101 	 */
   1102 	ci = l->l_cpu;
   1103 	if (lwp_eprio(l) > ci->ci_schedstate.spc_curpriority)
   1104 		cpu_need_resched(ci, 0);
   1105 }
   1106 
   1107 static void
   1108 sched_changepri(struct lwp *l, pri_t pri)
   1109 {
   1110 
   1111 	KASSERT(lwp_locked(l, NULL));
   1112 
   1113 	if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
   1114 		KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
   1115 		sched_dequeue(l);
   1116 		l->l_priority = pri;
   1117 		sched_enqueue(l, false);
   1118 	} else {
   1119 		l->l_priority = pri;
   1120 	}
   1121 	resched_cpu(l);
   1122 }
   1123 
   1124 static void
   1125 sched_lendpri(struct lwp *l, pri_t pri)
   1126 {
   1127 
   1128 	KASSERT(lwp_locked(l, NULL));
   1129 
   1130 	if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
   1131 		KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
   1132 		sched_dequeue(l);
   1133 		l->l_inheritedprio = pri;
   1134 		sched_enqueue(l, false);
   1135 	} else {
   1136 		l->l_inheritedprio = pri;
   1137 	}
   1138 	resched_cpu(l);
   1139 }
   1140 
   1141 struct lwp *
   1142 syncobj_noowner(wchan_t wchan)
   1143 {
   1144 
   1145 	return NULL;
   1146 }
   1147 
   1148 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
   1149 fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
   1150 
   1151 /*
   1152  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
   1153  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
   1154  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
   1155  *
   1156  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
   1157  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
   1158  *
   1159  * If you dont want to bother with the faster/more-accurate formula, you
   1160  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
   1161  * (more general) method of calculating the %age of CPU used by a process.
   1162  */
   1163 #define	CCPU_SHIFT	(FSHIFT + 1)
   1164 
   1165 /*
   1166  * sched_pstats:
   1167  *
   1168  * Update process statistics and check CPU resource allocation.
   1169  * Call scheduler-specific hook to eventually adjust process/LWP
   1170  * priorities.
   1171  */
   1172 /* ARGSUSED */
   1173 void
   1174 sched_pstats(void *arg)
   1175 {
   1176 	struct rlimit *rlim;
   1177 	struct lwp *l;
   1178 	struct proc *p;
   1179 	int sig, clkhz;
   1180 	long runtm;
   1181 
   1182 	sched_pstats_ticks++;
   1183 
   1184 	mutex_enter(proc_lock);
   1185 	PROCLIST_FOREACH(p, &allproc) {
   1186 		/*
   1187 		 * Increment time in/out of memory and sleep time (if
   1188 		 * sleeping).  We ignore overflow; with 16-bit int's
   1189 		 * (remember them?) overflow takes 45 days.
   1190 		 */
   1191 		mutex_enter(p->p_lock);
   1192 		mutex_spin_enter(&p->p_stmutex);
   1193 		runtm = p->p_rtime.sec;
   1194 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1195 			if ((l->l_flag & LW_IDLE) != 0)
   1196 				continue;
   1197 			lwp_lock(l);
   1198 			runtm += l->l_rtime.sec;
   1199 			l->l_swtime++;
   1200 			sched_pstats_hook(l);
   1201 			lwp_unlock(l);
   1202 
   1203 			/*
   1204 			 * p_pctcpu is only for ps.
   1205 			 */
   1206 			l->l_pctcpu = (l->l_pctcpu * ccpu) >> FSHIFT;
   1207 			if (l->l_slptime < 1) {
   1208 				clkhz = stathz != 0 ? stathz : hz;
   1209 #if	(FSHIFT >= CCPU_SHIFT)
   1210 				l->l_pctcpu += (clkhz == 100) ?
   1211 				    ((fixpt_t)l->l_cpticks) <<
   1212 				        (FSHIFT - CCPU_SHIFT) :
   1213 				    100 * (((fixpt_t) p->p_cpticks)
   1214 				        << (FSHIFT - CCPU_SHIFT)) / clkhz;
   1215 #else
   1216 				l->l_pctcpu += ((FSCALE - ccpu) *
   1217 				    (l->l_cpticks * FSCALE / clkhz)) >> FSHIFT;
   1218 #endif
   1219 				l->l_cpticks = 0;
   1220 			}
   1221 		}
   1222 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
   1223 		mutex_spin_exit(&p->p_stmutex);
   1224 
   1225 		/*
   1226 		 * Check if the process exceeds its CPU resource allocation.
   1227 		 * If over max, kill it.
   1228 		 */
   1229 		rlim = &p->p_rlimit[RLIMIT_CPU];
   1230 		sig = 0;
   1231 		if (runtm >= rlim->rlim_cur) {
   1232 			if (runtm >= rlim->rlim_max)
   1233 				sig = SIGKILL;
   1234 			else {
   1235 				sig = SIGXCPU;
   1236 				if (rlim->rlim_cur < rlim->rlim_max)
   1237 					rlim->rlim_cur += 5;
   1238 			}
   1239 		}
   1240 		mutex_exit(p->p_lock);
   1241 		if (sig)
   1242 			psignal(p, sig);
   1243 	}
   1244 	mutex_exit(proc_lock);
   1245 	uvm_meter();
   1246 	cv_wakeup(&lbolt);
   1247 	callout_schedule(&sched_pstats_ch, hz);
   1248 }
   1249 
   1250 void
   1251 sched_init(void)
   1252 {
   1253 
   1254 	cv_init(&lbolt, "lbolt");
   1255 	callout_init(&sched_pstats_ch, CALLOUT_MPSAFE);
   1256 	callout_setfunc(&sched_pstats_ch, sched_pstats, NULL);
   1257 
   1258 	/* Balancing */
   1259 	worker_ci = curcpu();
   1260 	cacheht_time = mstohz(5);		/* ~5 ms  */
   1261 	balance_period = mstohz(300);		/* ~300ms */
   1262 
   1263 	/* Minimal count of LWPs for catching: log2(count of CPUs) */
   1264 	min_catch = min(ilog2(ncpu), 4);
   1265 
   1266 #ifdef PREEMPTION
   1267 	evcnt_attach_dynamic(&kpreempt_ev_crit, EVCNT_TYPE_INTR, NULL,
   1268 	   "kpreempt", "defer: critical section");
   1269 	evcnt_attach_dynamic(&kpreempt_ev_klock, EVCNT_TYPE_INTR, NULL,
   1270 	   "kpreempt", "defer: kernel_lock");
   1271 	evcnt_attach_dynamic(&kpreempt_ev_ipl, EVCNT_TYPE_INTR, NULL,
   1272 	   "kpreempt", "defer: IPL");
   1273 	evcnt_attach_dynamic(&kpreempt_ev_immed, EVCNT_TYPE_INTR, NULL,
   1274 	   "kpreempt", "immediate");
   1275 #endif
   1276 
   1277 	/* Initialize balancing callout and run it */
   1278 #ifdef MULTIPROCESSOR
   1279 	callout_init(&balance_ch, CALLOUT_MPSAFE);
   1280 	callout_setfunc(&balance_ch, sched_balance, NULL);
   1281 	callout_schedule(&balance_ch, balance_period);
   1282 #endif
   1283 	sched_pstats(NULL);
   1284 }
   1285 
   1286 SYSCTL_SETUP(sysctl_sched_setup, "sysctl sched setup")
   1287 {
   1288 	const struct sysctlnode *node = NULL;
   1289 
   1290 	sysctl_createv(clog, 0, NULL, NULL,
   1291 		CTLFLAG_PERMANENT,
   1292 		CTLTYPE_NODE, "kern", NULL,
   1293 		NULL, 0, NULL, 0,
   1294 		CTL_KERN, CTL_EOL);
   1295 	sysctl_createv(clog, 0, NULL, &node,
   1296 		CTLFLAG_PERMANENT,
   1297 		CTLTYPE_NODE, "sched",
   1298 		SYSCTL_DESCR("Scheduler options"),
   1299 		NULL, 0, NULL, 0,
   1300 		CTL_KERN, CTL_CREATE, CTL_EOL);
   1301 
   1302 	if (node == NULL)
   1303 		return;
   1304 
   1305 	sysctl_createv(clog, 0, &node, NULL,
   1306 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1307 		CTLTYPE_INT, "cacheht_time",
   1308 		SYSCTL_DESCR("Cache hotness time (in ticks)"),
   1309 		NULL, 0, &cacheht_time, 0,
   1310 		CTL_CREATE, CTL_EOL);
   1311 	sysctl_createv(clog, 0, &node, NULL,
   1312 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1313 		CTLTYPE_INT, "balance_period",
   1314 		SYSCTL_DESCR("Balance period (in ticks)"),
   1315 		NULL, 0, &balance_period, 0,
   1316 		CTL_CREATE, CTL_EOL);
   1317 	sysctl_createv(clog, 0, &node, NULL,
   1318 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1319 		CTLTYPE_INT, "min_catch",
   1320 		SYSCTL_DESCR("Minimal count of threads for catching"),
   1321 		NULL, 0, &min_catch, 0,
   1322 		CTL_CREATE, CTL_EOL);
   1323 	sysctl_createv(clog, 0, &node, NULL,
   1324 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1325 		CTLTYPE_INT, "timesoftints",
   1326 		SYSCTL_DESCR("Track CPU time for soft interrupts"),
   1327 		NULL, 0, &softint_timing, 0,
   1328 		CTL_CREATE, CTL_EOL);
   1329 	sysctl_createv(clog, 0, &node, NULL,
   1330 #ifdef PREEMPTION
   1331 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1332 #else
   1333 		CTLFLAG_PERMANENT,
   1334 #endif
   1335 		CTLTYPE_INT, "kpreempt_pri",
   1336 		SYSCTL_DESCR("Minimum priority to trigger kernel preemption"),
   1337 		NULL, 0, &sched_kpreempt_pri, 0,
   1338 		CTL_CREATE, CTL_EOL);
   1339 	sysctl_createv(clog, 0, &node, NULL,
   1340 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1341 		CTLTYPE_INT, "upreempt_pri",
   1342 		SYSCTL_DESCR("Minimum priority to trigger user preemption"),
   1343 		NULL, 0, &sched_upreempt_pri, 0,
   1344 		CTL_CREATE, CTL_EOL);
   1345 }
   1346 
   1347 void
   1348 sched_cpuattach(struct cpu_info *ci)
   1349 {
   1350 	runqueue_t *ci_rq;
   1351 	void *rq_ptr;
   1352 	u_int i, size;
   1353 
   1354 	if (ci->ci_schedstate.spc_lwplock == NULL) {
   1355 		ci->ci_schedstate.spc_lwplock =
   1356 		    mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
   1357 	}
   1358 	if (ci == lwp0.l_cpu) {
   1359 		/* Initialize the scheduler structure of the primary LWP */
   1360 		lwp0.l_mutex = ci->ci_schedstate.spc_lwplock;
   1361 	}
   1362 	if (ci->ci_schedstate.spc_mutex != NULL) {
   1363 		/* Already initialized. */
   1364 		return;
   1365 	}
   1366 
   1367 	/* Allocate the run queue */
   1368 	size = roundup2(sizeof(runqueue_t), coherency_unit) + coherency_unit;
   1369 	rq_ptr = kmem_zalloc(size, KM_SLEEP);
   1370 	if (rq_ptr == NULL) {
   1371 		panic("sched_cpuattach: could not allocate the runqueue");
   1372 	}
   1373 	ci_rq = (void *)(roundup2((uintptr_t)(rq_ptr), coherency_unit));
   1374 
   1375 	/* Initialize run queues */
   1376 	ci->ci_schedstate.spc_mutex =
   1377 	    mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
   1378 	for (i = 0; i < PRI_RT_COUNT; i++)
   1379 		TAILQ_INIT(&ci_rq->r_rt_queue[i].q_head);
   1380 	for (i = 0; i < PRI_TS_COUNT; i++)
   1381 		TAILQ_INIT(&ci_rq->r_ts_queue[i].q_head);
   1382 
   1383 	ci->ci_schedstate.spc_sched_info = ci_rq;
   1384 }
   1385 
   1386 /*
   1387  * Control of the runqueue.
   1388  */
   1389 
   1390 static void *
   1391 sched_getrq(runqueue_t *ci_rq, const pri_t prio)
   1392 {
   1393 
   1394 	KASSERT(prio < PRI_COUNT);
   1395 	return (prio <= PRI_HIGHEST_TS) ?
   1396 	    &ci_rq->r_ts_queue[prio].q_head :
   1397 	    &ci_rq->r_rt_queue[prio - PRI_HIGHEST_TS - 1].q_head;
   1398 }
   1399 
   1400 void
   1401 sched_enqueue(struct lwp *l, bool swtch)
   1402 {
   1403 	runqueue_t *ci_rq;
   1404 	struct schedstate_percpu *spc;
   1405 	TAILQ_HEAD(, lwp) *q_head;
   1406 	const pri_t eprio = lwp_eprio(l);
   1407 	struct cpu_info *ci;
   1408 	int type;
   1409 
   1410 	ci = l->l_cpu;
   1411 	spc = &ci->ci_schedstate;
   1412 	ci_rq = spc->spc_sched_info;
   1413 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
   1414 
   1415 	/* Update the last run time on switch */
   1416 	if (__predict_true(swtch == true)) {
   1417 		l->l_rticks = hardclock_ticks;
   1418 		l->l_rticksum += (hardclock_ticks - l->l_rticks);
   1419 	} else if (l->l_rticks == 0)
   1420 		l->l_rticks = hardclock_ticks;
   1421 
   1422 	/* Enqueue the thread */
   1423 	q_head = sched_getrq(ci_rq, eprio);
   1424 	if (TAILQ_EMPTY(q_head)) {
   1425 		u_int i;
   1426 		uint32_t q;
   1427 
   1428 		/* Mark bit */
   1429 		i = eprio >> BITMAP_SHIFT;
   1430 		q = BITMAP_MSB >> (eprio & BITMAP_MASK);
   1431 		KASSERT((ci_rq->r_bitmap[i] & q) == 0);
   1432 		ci_rq->r_bitmap[i] |= q;
   1433 	}
   1434 	TAILQ_INSERT_TAIL(q_head, l, l_runq);
   1435 	ci_rq->r_count++;
   1436 	if ((l->l_pflag & LP_BOUND) == 0)
   1437 		ci_rq->r_mcount++;
   1438 
   1439 	/*
   1440 	 * Update the value of highest priority in the runqueue,
   1441 	 * if priority of this thread is higher.
   1442 	 */
   1443 	if (eprio > spc->spc_maxpriority)
   1444 		spc->spc_maxpriority = eprio;
   1445 
   1446 	sched_newts(l);
   1447 
   1448 	/*
   1449 	 * Wake the chosen CPU or cause a preemption if the newly
   1450 	 * enqueued thread has higher priority.  Don't cause a
   1451 	 * preemption if the thread is yielding (swtch).
   1452 	 */
   1453 	if (!swtch && eprio > spc->spc_curpriority) {
   1454 		if (eprio >= sched_kpreempt_pri)
   1455 			type = RESCHED_KPREEMPT;
   1456 		else if (eprio >= sched_upreempt_pri)
   1457 			type = RESCHED_IMMED;
   1458 		else
   1459 			type = 0;
   1460 		cpu_need_resched(ci, type);
   1461 	}
   1462 }
   1463 
   1464 void
   1465 sched_dequeue(struct lwp *l)
   1466 {
   1467 	runqueue_t *ci_rq;
   1468 	TAILQ_HEAD(, lwp) *q_head;
   1469 	struct schedstate_percpu *spc;
   1470 	const pri_t eprio = lwp_eprio(l);
   1471 
   1472 	spc = & l->l_cpu->ci_schedstate;
   1473 	ci_rq = spc->spc_sched_info;
   1474 	KASSERT(lwp_locked(l, spc->spc_mutex));
   1475 
   1476 	KASSERT(eprio <= spc->spc_maxpriority);
   1477 	KASSERT(ci_rq->r_bitmap[eprio >> BITMAP_SHIFT] != 0);
   1478 	KASSERT(ci_rq->r_count > 0);
   1479 
   1480 	ci_rq->r_count--;
   1481 	if ((l->l_pflag & LP_BOUND) == 0)
   1482 		ci_rq->r_mcount--;
   1483 
   1484 	q_head = sched_getrq(ci_rq, eprio);
   1485 	TAILQ_REMOVE(q_head, l, l_runq);
   1486 	if (TAILQ_EMPTY(q_head)) {
   1487 		u_int i;
   1488 		uint32_t q;
   1489 
   1490 		/* Unmark bit */
   1491 		i = eprio >> BITMAP_SHIFT;
   1492 		q = BITMAP_MSB >> (eprio & BITMAP_MASK);
   1493 		KASSERT((ci_rq->r_bitmap[i] & q) != 0);
   1494 		ci_rq->r_bitmap[i] &= ~q;
   1495 
   1496 		/*
   1497 		 * Update the value of highest priority in the runqueue, in a
   1498 		 * case it was a last thread in the queue of highest priority.
   1499 		 */
   1500 		if (eprio != spc->spc_maxpriority)
   1501 			return;
   1502 
   1503 		do {
   1504 			if (ci_rq->r_bitmap[i] != 0) {
   1505 				q = ffs(ci_rq->r_bitmap[i]);
   1506 				spc->spc_maxpriority =
   1507 				    (i << BITMAP_SHIFT) + (BITMAP_BITS - q);
   1508 				return;
   1509 			}
   1510 		} while (i--);
   1511 
   1512 		/* If not found - set the lowest value */
   1513 		spc->spc_maxpriority = 0;
   1514 	}
   1515 }
   1516 
   1517 /*
   1518  * Migration and balancing.
   1519  */
   1520 
   1521 #ifdef MULTIPROCESSOR
   1522 
   1523 /* Estimate if LWP is cache-hot */
   1524 static inline bool
   1525 lwp_cache_hot(const struct lwp *l)
   1526 {
   1527 
   1528 	if (l->l_slptime || l->l_rticks == 0)
   1529 		return false;
   1530 
   1531 	return (hardclock_ticks - l->l_rticks <= cacheht_time);
   1532 }
   1533 
   1534 /* Check if LWP can migrate to the chosen CPU */
   1535 static inline bool
   1536 sched_migratable(const struct lwp *l, struct cpu_info *ci)
   1537 {
   1538 	const struct schedstate_percpu *spc = &ci->ci_schedstate;
   1539 
   1540 	/* CPU is offline */
   1541 	if (__predict_false(spc->spc_flags & SPCF_OFFLINE))
   1542 		return false;
   1543 
   1544 	/* Affinity bind */
   1545 	if (__predict_false(l->l_flag & LW_AFFINITY))
   1546 		return CPU_ISSET(cpu_index(ci), &l->l_affinity);
   1547 
   1548 	/* Processor-set */
   1549 	return (spc->spc_psid == l->l_psid);
   1550 }
   1551 
   1552 /*
   1553  * Estimate the migration of LWP to the other CPU.
   1554  * Take and return the CPU, if migration is needed.
   1555  */
   1556 struct cpu_info *
   1557 sched_takecpu(struct lwp *l)
   1558 {
   1559 	struct cpu_info *ci, *tci, *first, *next;
   1560 	struct schedstate_percpu *spc;
   1561 	runqueue_t *ci_rq, *ici_rq;
   1562 	pri_t eprio, lpri, pri;
   1563 
   1564 	KASSERT(lwp_locked(l, NULL));
   1565 
   1566 	ci = l->l_cpu;
   1567 	spc = &ci->ci_schedstate;
   1568 	ci_rq = spc->spc_sched_info;
   1569 
   1570 	/* If thread is strictly bound, do not estimate other CPUs */
   1571 	if (l->l_pflag & LP_BOUND)
   1572 		return ci;
   1573 
   1574 	/* CPU of this thread is idling - run there */
   1575 	if (ci_rq->r_count == 0)
   1576 		return ci;
   1577 
   1578 	eprio = lwp_eprio(l);
   1579 
   1580 	/* Stay if thread is cache-hot */
   1581 	if (__predict_true(l->l_stat != LSIDL) &&
   1582 	    lwp_cache_hot(l) && eprio >= spc->spc_curpriority)
   1583 		return ci;
   1584 
   1585 	/* Run on current CPU if priority of thread is higher */
   1586 	ci = curcpu();
   1587 	spc = &ci->ci_schedstate;
   1588 	if (eprio > spc->spc_curpriority && sched_migratable(l, ci))
   1589 		return ci;
   1590 
   1591 	/*
   1592 	 * Look for the CPU with the lowest priority thread.  In case of
   1593 	 * equal priority, choose the CPU with the fewest of threads.
   1594 	 */
   1595 	first = l->l_cpu;
   1596 	ci = first;
   1597 	tci = first;
   1598 	lpri = PRI_COUNT;
   1599 	do {
   1600 		next = CIRCLEQ_LOOP_NEXT(&cpu_queue, ci, ci_data.cpu_qchain);
   1601 		spc = &ci->ci_schedstate;
   1602 		ici_rq = spc->spc_sched_info;
   1603 		pri = max(spc->spc_curpriority, spc->spc_maxpriority);
   1604 		if (pri > lpri)
   1605 			continue;
   1606 
   1607 		if (pri == lpri && ci_rq->r_count < ici_rq->r_count)
   1608 			continue;
   1609 
   1610 		if (!sched_migratable(l, ci))
   1611 			continue;
   1612 
   1613 		lpri = pri;
   1614 		tci = ci;
   1615 		ci_rq = ici_rq;
   1616 	} while (ci = next, ci != first);
   1617 
   1618 	return tci;
   1619 }
   1620 
   1621 /*
   1622  * Tries to catch an LWP from the runqueue of other CPU.
   1623  */
   1624 static struct lwp *
   1625 sched_catchlwp(void)
   1626 {
   1627 	struct cpu_info *curci = curcpu(), *ci = worker_ci;
   1628 	struct schedstate_percpu *spc;
   1629 	TAILQ_HEAD(, lwp) *q_head;
   1630 	runqueue_t *ci_rq;
   1631 	struct lwp *l;
   1632 
   1633 	if (curci == ci)
   1634 		return NULL;
   1635 
   1636 	/* Lockless check */
   1637 	spc = &ci->ci_schedstate;
   1638 	ci_rq = spc->spc_sched_info;
   1639 	if (ci_rq->r_mcount < min_catch)
   1640 		return NULL;
   1641 
   1642 	/*
   1643 	 * Double-lock the runqueues.
   1644 	 */
   1645 	if (curci < ci) {
   1646 		spc_lock(ci);
   1647 	} else if (!mutex_tryenter(ci->ci_schedstate.spc_mutex)) {
   1648 		const runqueue_t *cur_rq = curci->ci_schedstate.spc_sched_info;
   1649 
   1650 		spc_unlock(curci);
   1651 		spc_lock(ci);
   1652 		spc_lock(curci);
   1653 
   1654 		if (cur_rq->r_count) {
   1655 			spc_unlock(ci);
   1656 			return NULL;
   1657 		}
   1658 	}
   1659 
   1660 	if (ci_rq->r_mcount < min_catch) {
   1661 		spc_unlock(ci);
   1662 		return NULL;
   1663 	}
   1664 
   1665 	/* Take the highest priority thread */
   1666 	q_head = sched_getrq(ci_rq, spc->spc_maxpriority);
   1667 	l = TAILQ_FIRST(q_head);
   1668 
   1669 	for (;;) {
   1670 		/* Check the first and next result from the queue */
   1671 		if (l == NULL)
   1672 			break;
   1673 		KASSERT(l->l_stat == LSRUN);
   1674 		KASSERT(l->l_flag & LW_INMEM);
   1675 
   1676 		/* Look for threads, whose are allowed to migrate */
   1677 		if ((l->l_pflag & LP_BOUND) || lwp_cache_hot(l) ||
   1678 		    !sched_migratable(l, curci)) {
   1679 			l = TAILQ_NEXT(l, l_runq);
   1680 			continue;
   1681 		}
   1682 
   1683 		/* Grab the thread, and move to the local run queue */
   1684 		sched_dequeue(l);
   1685 		l->l_cpu = curci;
   1686 		lwp_unlock_to(l, curci->ci_schedstate.spc_mutex);
   1687 		sched_enqueue(l, false);
   1688 		return l;
   1689 	}
   1690 	spc_unlock(ci);
   1691 
   1692 	return l;
   1693 }
   1694 
   1695 /*
   1696  * Periodical calculations for balancing.
   1697  */
   1698 static void
   1699 sched_balance(void *nocallout)
   1700 {
   1701 	struct cpu_info *ci, *hci;
   1702 	runqueue_t *ci_rq;
   1703 	CPU_INFO_ITERATOR cii;
   1704 	u_int highest;
   1705 
   1706 	hci = curcpu();
   1707 	highest = 0;
   1708 
   1709 	/* Make lockless countings */
   1710 	for (CPU_INFO_FOREACH(cii, ci)) {
   1711 		ci_rq = ci->ci_schedstate.spc_sched_info;
   1712 
   1713 		/* Average count of the threads */
   1714 		ci_rq->r_avgcount = (ci_rq->r_avgcount + ci_rq->r_mcount) >> 1;
   1715 
   1716 		/* Look for CPU with the highest average */
   1717 		if (ci_rq->r_avgcount > highest) {
   1718 			hci = ci;
   1719 			highest = ci_rq->r_avgcount;
   1720 		}
   1721 	}
   1722 
   1723 	/* Update the worker */
   1724 	worker_ci = hci;
   1725 
   1726 	if (nocallout == NULL)
   1727 		callout_schedule(&balance_ch, balance_period);
   1728 }
   1729 
   1730 #else
   1731 
   1732 struct cpu_info *
   1733 sched_takecpu(struct lwp *l)
   1734 {
   1735 
   1736 	return l->l_cpu;
   1737 }
   1738 
   1739 #endif	/* MULTIPROCESSOR */
   1740 
   1741 /*
   1742  * Scheduler mill.
   1743  */
   1744 struct lwp *
   1745 sched_nextlwp(void)
   1746 {
   1747 	struct cpu_info *ci = curcpu();
   1748 	struct schedstate_percpu *spc;
   1749 	TAILQ_HEAD(, lwp) *q_head;
   1750 	runqueue_t *ci_rq;
   1751 	struct lwp *l;
   1752 
   1753 	spc = &ci->ci_schedstate;
   1754 	ci_rq = spc->spc_sched_info;
   1755 
   1756 #ifdef MULTIPROCESSOR
   1757 	/* If runqueue is empty, try to catch some thread from other CPU */
   1758 	if (__predict_false(spc->spc_flags & SPCF_OFFLINE)) {
   1759 		if ((ci_rq->r_count - ci_rq->r_mcount) == 0)
   1760 			return NULL;
   1761 	} else if (ci_rq->r_count == 0) {
   1762 		/* Reset the counter, and call the balancer */
   1763 		ci_rq->r_avgcount = 0;
   1764 		sched_balance(ci);
   1765 
   1766 		/* The re-locking will be done inside */
   1767 		return sched_catchlwp();
   1768 	}
   1769 #else
   1770 	if (ci_rq->r_count == 0)
   1771 		return NULL;
   1772 #endif
   1773 
   1774 	/* Take the highest priority thread */
   1775 	KASSERT(ci_rq->r_bitmap[spc->spc_maxpriority >> BITMAP_SHIFT]);
   1776 	q_head = sched_getrq(ci_rq, spc->spc_maxpriority);
   1777 	l = TAILQ_FIRST(q_head);
   1778 	KASSERT(l != NULL);
   1779 
   1780 	sched_oncpu(l);
   1781 	l->l_rticks = hardclock_ticks;
   1782 
   1783 	return l;
   1784 }
   1785 
   1786 bool
   1787 sched_curcpu_runnable_p(void)
   1788 {
   1789 	const struct cpu_info *ci;
   1790 	const runqueue_t *ci_rq;
   1791 	bool rv;
   1792 
   1793 	kpreempt_disable();
   1794 	ci = curcpu();
   1795 	ci_rq = ci->ci_schedstate.spc_sched_info;
   1796 
   1797 #ifndef __HAVE_FAST_SOFTINTS
   1798 	if (ci->ci_data.cpu_softints) {
   1799 		kpreempt_enable();
   1800 		return true;
   1801 	}
   1802 #endif
   1803 
   1804 	if (ci->ci_schedstate.spc_flags & SPCF_OFFLINE)
   1805 		rv = (ci_rq->r_count - ci_rq->r_mcount);
   1806 	else
   1807 		rv = ci_rq->r_count != 0;
   1808 	kpreempt_enable();
   1809 
   1810 	return rv;
   1811 }
   1812 
   1813 /*
   1814  * Debugging.
   1815  */
   1816 
   1817 #ifdef DDB
   1818 
   1819 void
   1820 sched_print_runqueue(void (*pr)(const char *, ...)
   1821     __attribute__((__format__(__printf__,1,2))))
   1822 {
   1823 	runqueue_t *ci_rq;
   1824 	struct schedstate_percpu *spc;
   1825 	struct lwp *l;
   1826 	struct proc *p;
   1827 	int i;
   1828 	struct cpu_info *ci;
   1829 	CPU_INFO_ITERATOR cii;
   1830 
   1831 	for (CPU_INFO_FOREACH(cii, ci)) {
   1832 		spc = &ci->ci_schedstate;
   1833 		ci_rq = spc->spc_sched_info;
   1834 
   1835 		(*pr)("Run-queue (CPU = %u):\n", ci->ci_index);
   1836 		(*pr)(" pid.lid = %d.%d, threads count = %u, "
   1837 		    "avgcount = %u, highest pri = %d\n",
   1838 #ifdef MULTIPROCESSOR
   1839 		    ci->ci_curlwp->l_proc->p_pid, ci->ci_curlwp->l_lid,
   1840 #else
   1841 		    curlwp->l_proc->p_pid, curlwp->l_lid,
   1842 #endif
   1843 		    ci_rq->r_count, ci_rq->r_avgcount, spc->spc_maxpriority);
   1844 		i = (PRI_COUNT >> BITMAP_SHIFT) - 1;
   1845 		do {
   1846 			uint32_t q;
   1847 			q = ci_rq->r_bitmap[i];
   1848 			(*pr)(" bitmap[%d] => [ %d (0x%x) ]\n", i, ffs(q), q);
   1849 		} while (i--);
   1850 	}
   1851 
   1852 	(*pr)("   %5s %4s %4s %10s %3s %18s %4s %s\n",
   1853 	    "LID", "PRI", "EPRI", "FL", "ST", "LWP", "CPU", "LRTIME");
   1854 
   1855 	PROCLIST_FOREACH(p, &allproc) {
   1856 		(*pr)(" /- %d (%s)\n", (int)p->p_pid, p->p_comm);
   1857 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1858 			ci = l->l_cpu;
   1859 			(*pr)(" | %5d %4u %4u 0x%8.8x %3s %18p %4u %u\n",
   1860 			    (int)l->l_lid, l->l_priority, lwp_eprio(l),
   1861 			    l->l_flag, l->l_stat == LSRUN ? "RQ" :
   1862 			    (l->l_stat == LSSLEEP ? "SQ" : "-"),
   1863 			    l, ci->ci_index,
   1864 			    (u_int)(hardclock_ticks - l->l_rticks));
   1865 		}
   1866 	}
   1867 }
   1868 
   1869 #endif
   1870