Home | History | Annotate | Line # | Download | only in kern
kern_synch.c revision 1.230
      1 /*	$NetBSD: kern_synch.c,v 1.230 2008/04/27 11:37:48 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
     10  * Daniel Sieger.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by the NetBSD
     23  *	Foundation, Inc. and its contributors.
     24  * 4. Neither the name of The NetBSD Foundation nor the names of its
     25  *    contributors may be used to endorse or promote products derived
     26  *    from this software without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     29  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     30  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     31  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     32  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     33  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     34  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     35  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     36  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     37  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     38  * POSSIBILITY OF SUCH DAMAGE.
     39  */
     40 
     41 /*
     42  * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org>
     43  * All rights reserved.
     44  *
     45  * Redistribution and use in source and binary forms, with or without
     46  * modification, are permitted provided that the following conditions
     47  * are met:
     48  * 1. Redistributions of source code must retain the above copyright
     49  *    notice, this list of conditions and the following disclaimer.
     50  * 2. Redistributions in binary form must reproduce the above copyright
     51  *    notice, this list of conditions and the following disclaimer in the
     52  *    documentation and/or other materials provided with the distribution.
     53  *
     54  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     64  * SUCH DAMAGE.
     65  */
     66 
     67 /*-
     68  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     69  *	The Regents of the University of California.  All rights reserved.
     70  * (c) UNIX System Laboratories, Inc.
     71  * All or some portions of this file are derived from material licensed
     72  * to the University of California by American Telephone and Telegraph
     73  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     74  * the permission of UNIX System Laboratories, Inc.
     75  *
     76  * Redistribution and use in source and binary forms, with or without
     77  * modification, are permitted provided that the following conditions
     78  * are met:
     79  * 1. Redistributions of source code must retain the above copyright
     80  *    notice, this list of conditions and the following disclaimer.
     81  * 2. Redistributions in binary form must reproduce the above copyright
     82  *    notice, this list of conditions and the following disclaimer in the
     83  *    documentation and/or other materials provided with the distribution.
     84  * 3. Neither the name of the University nor the names of its contributors
     85  *    may be used to endorse or promote products derived from this software
     86  *    without specific prior written permission.
     87  *
     88  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     89  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     90  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     91  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     92  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     93  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     94  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     95  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     96  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     97  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     98  * SUCH DAMAGE.
     99  *
    100  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
    101  */
    102 
    103 #include <sys/cdefs.h>
    104 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.230 2008/04/27 11:37:48 ad Exp $");
    105 
    106 #include "opt_kstack.h"
    107 #include "opt_lockdebug.h"
    108 #include "opt_multiprocessor.h"
    109 #include "opt_perfctrs.h"
    110 
    111 #define	__MUTEX_PRIVATE
    112 
    113 #include <sys/param.h>
    114 #include <sys/systm.h>
    115 #include <sys/proc.h>
    116 #include <sys/kernel.h>
    117 #if defined(PERFCTRS)
    118 #include <sys/pmc.h>
    119 #endif
    120 #include <sys/cpu.h>
    121 #include <sys/resourcevar.h>
    122 #include <sys/sched.h>
    123 #include <sys/syscall_stats.h>
    124 #include <sys/sleepq.h>
    125 #include <sys/lockdebug.h>
    126 #include <sys/evcnt.h>
    127 #include <sys/intr.h>
    128 #include <sys/lwpctl.h>
    129 #include <sys/atomic.h>
    130 #include <sys/simplelock.h>
    131 #include <sys/bitops.h>
    132 #include <sys/kmem.h>
    133 #include <sys/sysctl.h>
    134 #include <sys/idle.h>
    135 
    136 #include <uvm/uvm_extern.h>
    137 
    138 /*
    139  * Priority related defintions.
    140  */
    141 #define	PRI_TS_COUNT	(NPRI_USER)
    142 #define	PRI_RT_COUNT	(PRI_COUNT - PRI_TS_COUNT)
    143 #define	PRI_HTS_RANGE	(PRI_TS_COUNT / 10)
    144 
    145 #define	PRI_HIGHEST_TS	(MAXPRI_USER)
    146 
    147 /*
    148  * Bits per map.
    149  */
    150 #define	BITMAP_BITS	(32)
    151 #define	BITMAP_SHIFT	(5)
    152 #define	BITMAP_MSB	(0x80000000U)
    153 #define	BITMAP_MASK	(BITMAP_BITS - 1)
    154 
    155 /*
    156  * Structures, runqueue.
    157  */
    158 
    159 typedef struct {
    160 	TAILQ_HEAD(, lwp) q_head;
    161 } queue_t;
    162 
    163 typedef struct {
    164 	/* Lock and bitmap */
    165 	uint32_t	r_bitmap[PRI_COUNT >> BITMAP_SHIFT];
    166 	/* Counters */
    167 	u_int		r_count;	/* Count of the threads */
    168 	u_int		r_avgcount;	/* Average count of threads */
    169 	u_int		r_mcount;	/* Count of migratable threads */
    170 	/* Runqueues */
    171 	queue_t		r_rt_queue[PRI_RT_COUNT];
    172 	queue_t		r_ts_queue[PRI_TS_COUNT];
    173 } runqueue_t;
    174 
    175 static u_int	sched_unsleep(struct lwp *, bool);
    176 static void	sched_changepri(struct lwp *, pri_t);
    177 static void	sched_lendpri(struct lwp *, pri_t);
    178 static void	*sched_getrq(runqueue_t *, const pri_t);
    179 #ifdef MULTIPROCESSOR
    180 static lwp_t	*sched_catchlwp(void);
    181 static void	sched_balance(void *);
    182 #endif
    183 
    184 syncobj_t sleep_syncobj = {
    185 	SOBJ_SLEEPQ_SORTED,
    186 	sleepq_unsleep,
    187 	sleepq_changepri,
    188 	sleepq_lendpri,
    189 	syncobj_noowner,
    190 };
    191 
    192 syncobj_t sched_syncobj = {
    193 	SOBJ_SLEEPQ_SORTED,
    194 	sched_unsleep,
    195 	sched_changepri,
    196 	sched_lendpri,
    197 	syncobj_noowner,
    198 };
    199 
    200 const int 	schedppq = 1;
    201 callout_t 	sched_pstats_ch;
    202 unsigned	sched_pstats_ticks;
    203 kcondvar_t	lbolt;			/* once a second sleep address */
    204 
    205 /*
    206  * Migration and balancing.
    207  */
    208 static u_int	cacheht_time;		/* Cache hotness time */
    209 static u_int	min_catch;		/* Minimal LWP count for catching */
    210 static u_int	balance_period;		/* Balance period */
    211 static struct cpu_info *worker_ci;	/* Victim CPU */
    212 #ifdef MULTIPROCESSOR
    213 static struct callout balance_ch;	/* Callout of balancer */
    214 #endif
    215 
    216 /*
    217  * During autoconfiguration or after a panic, a sleep will simply lower the
    218  * priority briefly to allow interrupts, then return.  The priority to be
    219  * used (safepri) is machine-dependent, thus this value is initialized and
    220  * maintained in the machine-dependent layers.  This priority will typically
    221  * be 0, or the lowest priority that is safe for use on the interrupt stack;
    222  * it can be made higher to block network software interrupts after panics.
    223  */
    224 int	safepri;
    225 
    226 /*
    227  * OBSOLETE INTERFACE
    228  *
    229  * General sleep call.  Suspends the current process until a wakeup is
    230  * performed on the specified identifier.  The process will then be made
    231  * runnable with the specified priority.  Sleeps at most timo/hz seconds (0
    232  * means no timeout).  If pri includes PCATCH flag, signals are checked
    233  * before and after sleeping, else signals are not checked.  Returns 0 if
    234  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
    235  * signal needs to be delivered, ERESTART is returned if the current system
    236  * call should be restarted if possible, and EINTR is returned if the system
    237  * call should be interrupted by the signal (return EINTR).
    238  *
    239  * The interlock is held until we are on a sleep queue. The interlock will
    240  * be locked before returning back to the caller unless the PNORELOCK flag
    241  * is specified, in which case the interlock will always be unlocked upon
    242  * return.
    243  */
    244 int
    245 ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
    246 	volatile struct simplelock *interlock)
    247 {
    248 	struct lwp *l = curlwp;
    249 	sleepq_t *sq;
    250 	int error;
    251 
    252 	KASSERT((l->l_pflag & LP_INTR) == 0);
    253 
    254 	if (sleepq_dontsleep(l)) {
    255 		(void)sleepq_abort(NULL, 0);
    256 		if ((priority & PNORELOCK) != 0)
    257 			simple_unlock(interlock);
    258 		return 0;
    259 	}
    260 
    261 	l->l_kpriority = true;
    262 	sq = sleeptab_lookup(&sleeptab, ident);
    263 	sleepq_enter(sq, l);
    264 	sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
    265 
    266 	if (interlock != NULL) {
    267 		KASSERT(simple_lock_held(interlock));
    268 		simple_unlock(interlock);
    269 	}
    270 
    271 	error = sleepq_block(timo, priority & PCATCH);
    272 
    273 	if (interlock != NULL && (priority & PNORELOCK) == 0)
    274 		simple_lock(interlock);
    275 
    276 	return error;
    277 }
    278 
    279 int
    280 mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
    281 	kmutex_t *mtx)
    282 {
    283 	struct lwp *l = curlwp;
    284 	sleepq_t *sq;
    285 	int error;
    286 
    287 	KASSERT((l->l_pflag & LP_INTR) == 0);
    288 
    289 	if (sleepq_dontsleep(l)) {
    290 		(void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
    291 		return 0;
    292 	}
    293 
    294 	l->l_kpriority = true;
    295 	sq = sleeptab_lookup(&sleeptab, ident);
    296 	sleepq_enter(sq, l);
    297 	sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
    298 	mutex_exit(mtx);
    299 	error = sleepq_block(timo, priority & PCATCH);
    300 
    301 	if ((priority & PNORELOCK) == 0)
    302 		mutex_enter(mtx);
    303 
    304 	return error;
    305 }
    306 
    307 /*
    308  * General sleep call for situations where a wake-up is not expected.
    309  */
    310 int
    311 kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
    312 {
    313 	struct lwp *l = curlwp;
    314 	sleepq_t *sq;
    315 	int error;
    316 
    317 	if (sleepq_dontsleep(l))
    318 		return sleepq_abort(NULL, 0);
    319 
    320 	if (mtx != NULL)
    321 		mutex_exit(mtx);
    322 	l->l_kpriority = true;
    323 	sq = sleeptab_lookup(&sleeptab, l);
    324 	sleepq_enter(sq, l);
    325 	sleepq_enqueue(sq, l, wmesg, &sleep_syncobj);
    326 	error = sleepq_block(timo, intr);
    327 	if (mtx != NULL)
    328 		mutex_enter(mtx);
    329 
    330 	return error;
    331 }
    332 
    333 /*
    334  * OBSOLETE INTERFACE
    335  *
    336  * Make all processes sleeping on the specified identifier runnable.
    337  */
    338 void
    339 wakeup(wchan_t ident)
    340 {
    341 	sleepq_t *sq;
    342 
    343 	if (cold)
    344 		return;
    345 
    346 	sq = sleeptab_lookup(&sleeptab, ident);
    347 	sleepq_wake(sq, ident, (u_int)-1);
    348 }
    349 
    350 /*
    351  * OBSOLETE INTERFACE
    352  *
    353  * Make the highest priority process first in line on the specified
    354  * identifier runnable.
    355  */
    356 void
    357 wakeup_one(wchan_t ident)
    358 {
    359 	sleepq_t *sq;
    360 
    361 	if (cold)
    362 		return;
    363 
    364 	sq = sleeptab_lookup(&sleeptab, ident);
    365 	sleepq_wake(sq, ident, 1);
    366 }
    367 
    368 
    369 /*
    370  * General yield call.  Puts the current process back on its run queue and
    371  * performs a voluntary context switch.  Should only be called when the
    372  * current process explicitly requests it (eg sched_yield(2)).
    373  */
    374 void
    375 yield(void)
    376 {
    377 	struct lwp *l = curlwp;
    378 
    379 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
    380 	lwp_lock(l);
    381 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
    382 	KASSERT(l->l_stat == LSONPROC);
    383 	l->l_kpriority = false;
    384 	(void)mi_switch(l);
    385 	KERNEL_LOCK(l->l_biglocks, l);
    386 }
    387 
    388 /*
    389  * General preemption call.  Puts the current process back on its run queue
    390  * and performs an involuntary context switch.
    391  */
    392 void
    393 preempt(void)
    394 {
    395 	struct lwp *l = curlwp;
    396 
    397 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
    398 	lwp_lock(l);
    399 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
    400 	KASSERT(l->l_stat == LSONPROC);
    401 	l->l_kpriority = false;
    402 	l->l_nivcsw++;
    403 	(void)mi_switch(l);
    404 	KERNEL_LOCK(l->l_biglocks, l);
    405 }
    406 
    407 /*
    408  * Disable kernel preemption.
    409  */
    410 void
    411 kpreempt_disable(void)
    412 {
    413 
    414 	KPREEMPT_DISABLE();
    415 }
    416 
    417 /*
    418  * Reenable kernel preemption.
    419  */
    420 void
    421 kpreempt_enable(void)
    422 {
    423 
    424 	KPREEMPT_ENABLE();
    425 }
    426 
    427 /*
    428  * Return true if preemption is explicitly disabled.
    429  */
    430 bool
    431 kpreempt_disabled(void)
    432 {
    433 
    434 	/* Just a diagnostic check, so for now always true. */
    435 	return true;
    436 }
    437 
    438 /*
    439  * Compute the amount of time during which the current lwp was running.
    440  *
    441  * - update l_rtime unless it's an idle lwp.
    442  */
    443 
    444 void
    445 updatertime(lwp_t *l, const struct bintime *now)
    446 {
    447 
    448 	if ((l->l_flag & LW_IDLE) != 0)
    449 		return;
    450 
    451 	/* rtime += now - stime */
    452 	bintime_add(&l->l_rtime, now);
    453 	bintime_sub(&l->l_rtime, &l->l_stime);
    454 }
    455 
    456 /*
    457  * The machine independent parts of context switch.
    458  *
    459  * Returns 1 if another LWP was actually run.
    460  */
    461 int
    462 mi_switch(lwp_t *l)
    463 {
    464 	struct cpu_info *ci, *tci = NULL;
    465 	struct schedstate_percpu *spc;
    466 	struct lwp *newl;
    467 	int retval, oldspl;
    468 	struct bintime bt;
    469 	bool returning;
    470 
    471 	KASSERT(lwp_locked(l, NULL));
    472 	LOCKDEBUG_BARRIER(l->l_mutex, 1);
    473 
    474 #ifdef KSTACK_CHECK_MAGIC
    475 	kstack_check_magic(l);
    476 #endif
    477 
    478 	binuptime(&bt);
    479 
    480 	KDASSERT(l->l_cpu == curcpu());
    481 	ci = l->l_cpu;
    482 	spc = &ci->ci_schedstate;
    483 	returning = false;
    484 	newl = NULL;
    485 
    486 	/*
    487 	 * If we have been asked to switch to a specific LWP, then there
    488 	 * is no need to inspect the run queues.  If a soft interrupt is
    489 	 * blocking, then return to the interrupted thread without adjusting
    490 	 * VM context or its start time: neither have been changed in order
    491 	 * to take the interrupt.
    492 	 */
    493 	if (l->l_switchto != NULL) {
    494 		if ((l->l_pflag & LP_INTR) != 0) {
    495 			returning = true;
    496 			softint_block(l);
    497 			if ((l->l_flag & LW_TIMEINTR) != 0)
    498 				updatertime(l, &bt);
    499 		}
    500 		newl = l->l_switchto;
    501 		l->l_switchto = NULL;
    502 	}
    503 #ifndef __HAVE_FAST_SOFTINTS
    504 	else if (ci->ci_data.cpu_softints != 0) {
    505 		/* There are pending soft interrupts, so pick one. */
    506 		newl = softint_picklwp();
    507 		newl->l_stat = LSONPROC;
    508 		newl->l_flag |= LW_RUNNING;
    509 	}
    510 #endif	/* !__HAVE_FAST_SOFTINTS */
    511 
    512 	/* Count time spent in current system call */
    513 	if (!returning) {
    514 		SYSCALL_TIME_SLEEP(l);
    515 
    516 		/*
    517 		 * XXXSMP If we are using h/w performance counters,
    518 		 * save context.
    519 		 */
    520 #if PERFCTRS
    521 		if (PMC_ENABLED(l->l_proc)) {
    522 			pmc_save_context(l->l_proc);
    523 		}
    524 #endif
    525 		updatertime(l, &bt);
    526 	}
    527 
    528 	/*
    529 	 * If on the CPU and we have gotten this far, then we must yield.
    530 	 */
    531 	KASSERT(l->l_stat != LSRUN);
    532 	if (l->l_stat == LSONPROC && (l->l_target_cpu || l != newl)) {
    533 		KASSERT(lwp_locked(l, spc->spc_lwplock));
    534 
    535 		if (l->l_target_cpu == l->l_cpu) {
    536 			l->l_target_cpu = NULL;
    537 		} else {
    538 			tci = l->l_target_cpu;
    539 		}
    540 
    541 		if (__predict_false(tci != NULL)) {
    542 			/* Double-lock the runqueues */
    543 			spc_dlock(ci, tci);
    544 		} else {
    545 			/* Lock the runqueue */
    546 			spc_lock(ci);
    547 		}
    548 
    549 		if ((l->l_flag & LW_IDLE) == 0) {
    550 			l->l_stat = LSRUN;
    551 			if (__predict_false(tci != NULL)) {
    552 				/*
    553 				 * Set the new CPU, lock and unset the
    554 				 * l_target_cpu - thread will be enqueued
    555 				 * to the runqueue of target CPU.
    556 				 */
    557 				l->l_cpu = tci;
    558 				lwp_setlock(l, tci->ci_schedstate.spc_mutex);
    559 				l->l_target_cpu = NULL;
    560 			} else {
    561 				lwp_setlock(l, spc->spc_mutex);
    562 			}
    563 			sched_enqueue(l, true);
    564 		} else {
    565 			KASSERT(tci == NULL);
    566 			l->l_stat = LSIDL;
    567 		}
    568 	} else {
    569 		/* Lock the runqueue */
    570 		spc_lock(ci);
    571 	}
    572 
    573 	/*
    574 	 * Let sched_nextlwp() select the LWP to run the CPU next.
    575 	 * If no LWP is runnable, select the idle LWP.
    576 	 *
    577 	 * Note that spc_lwplock might not necessary be held, and
    578 	 * new thread would be unlocked after setting the LWP-lock.
    579 	 */
    580 	if (newl == NULL) {
    581 		newl = sched_nextlwp();
    582 		if (newl != NULL) {
    583 			sched_dequeue(newl);
    584 			KASSERT(lwp_locked(newl, spc->spc_mutex));
    585 			newl->l_stat = LSONPROC;
    586 			newl->l_cpu = ci;
    587 			newl->l_flag |= LW_RUNNING;
    588 			lwp_setlock(newl, spc->spc_lwplock);
    589 		} else {
    590 			newl = ci->ci_data.cpu_idlelwp;
    591 			newl->l_stat = LSONPROC;
    592 			newl->l_flag |= LW_RUNNING;
    593 		}
    594 		/*
    595 		 * Only clear want_resched if there are no
    596 		 * pending (slow) software interrupts.
    597 		 */
    598 		ci->ci_want_resched = ci->ci_data.cpu_softints;
    599 		spc->spc_flags &= ~SPCF_SWITCHCLEAR;
    600 		spc->spc_curpriority = lwp_eprio(newl);
    601 	}
    602 
    603 	/* Items that must be updated with the CPU locked. */
    604 	if (!returning) {
    605 		/* Update the new LWP's start time. */
    606 		newl->l_stime = bt;
    607 
    608 		/*
    609 		 * ci_curlwp changes when a fast soft interrupt occurs.
    610 		 * We use cpu_onproc to keep track of which kernel or
    611 		 * user thread is running 'underneath' the software
    612 		 * interrupt.  This is important for time accounting,
    613 		 * itimers and forcing user threads to preempt (aston).
    614 		 */
    615 		ci->ci_data.cpu_onproc = newl;
    616 	}
    617 
    618 	if (l != newl) {
    619 		struct lwp *prevlwp;
    620 
    621 		/* Release all locks, but leave the current LWP locked */
    622 		if (l->l_mutex == l->l_cpu->ci_schedstate.spc_mutex) {
    623 			/*
    624 			 * In case of migration, drop the local runqueue
    625 			 * lock, thread is on other runqueue now.
    626 			 */
    627 			if (__predict_false(tci != NULL))
    628 				spc_unlock(ci);
    629 			/*
    630 			 * Drop spc_lwplock, if the current LWP has been moved
    631 			 * to the run queue (it is now locked by spc_mutex).
    632 			 */
    633 			mutex_spin_exit(spc->spc_lwplock);
    634 		} else {
    635 			/*
    636 			 * Otherwise, drop the spc_mutex, we are done with the
    637 			 * run queues.
    638 			 */
    639 			mutex_spin_exit(spc->spc_mutex);
    640 			KASSERT(tci == NULL);
    641 		}
    642 
    643 		/*
    644 		 * Mark that context switch is going to be perfomed
    645 		 * for this LWP, to protect it from being switched
    646 		 * to on another CPU.
    647 		 */
    648 		KASSERT(l->l_ctxswtch == 0);
    649 		l->l_ctxswtch = 1;
    650 		l->l_ncsw++;
    651 		l->l_flag &= ~LW_RUNNING;
    652 
    653 		/*
    654 		 * Increase the count of spin-mutexes before the release
    655 		 * of the last lock - we must remain at IPL_SCHED during
    656 		 * the context switch.
    657 		 */
    658 		oldspl = MUTEX_SPIN_OLDSPL(ci);
    659 		ci->ci_mtx_count--;
    660 		lwp_unlock(l);
    661 
    662 		/* Count the context switch on this CPU. */
    663 		ci->ci_data.cpu_nswtch++;
    664 
    665 		/* Update status for lwpctl, if present. */
    666 		if (l->l_lwpctl != NULL)
    667 			l->l_lwpctl->lc_curcpu = LWPCTL_CPU_NONE;
    668 
    669 		/*
    670 		 * Save old VM context, unless a soft interrupt
    671 		 * handler is blocking.
    672 		 */
    673 		if (!returning)
    674 			pmap_deactivate(l);
    675 
    676 		/*
    677 		 * We may need to spin-wait for if 'newl' is still
    678 		 * context switching on another CPU.
    679 		 */
    680 		if (newl->l_ctxswtch != 0) {
    681 			u_int count;
    682 			count = SPINLOCK_BACKOFF_MIN;
    683 			while (newl->l_ctxswtch)
    684 				SPINLOCK_BACKOFF(count);
    685 		}
    686 
    687 		/* Switch to the new LWP.. */
    688 		prevlwp = cpu_switchto(l, newl, returning);
    689 		ci = curcpu();
    690 
    691 		/*
    692 		 * Switched away - we have new curlwp.
    693 		 * Restore VM context and IPL.
    694 		 */
    695 		pmap_activate(l);
    696 		if (prevlwp != NULL) {
    697 			/* Normalize the count of the spin-mutexes */
    698 			ci->ci_mtx_count++;
    699 			/* Unmark the state of context switch */
    700 			membar_exit();
    701 			prevlwp->l_ctxswtch = 0;
    702 		}
    703 		splx(oldspl);
    704 
    705 		/* Update status for lwpctl, if present. */
    706 		if (l->l_lwpctl != NULL) {
    707 			l->l_lwpctl->lc_curcpu = (int)cpu_index(ci);
    708 			l->l_lwpctl->lc_pctr++;
    709 		}
    710 
    711 		retval = 1;
    712 	} else {
    713 		/* Nothing to do - just unlock and return. */
    714 		KASSERT(tci == NULL);
    715 		spc_unlock(ci);
    716 		lwp_unlock(l);
    717 		retval = 0;
    718 	}
    719 
    720 	KASSERT(l == curlwp);
    721 	KASSERT(l->l_stat == LSONPROC);
    722 	KASSERT(l->l_cpu == ci);
    723 
    724 	/*
    725 	 * XXXSMP If we are using h/w performance counters, restore context.
    726 	 */
    727 #if PERFCTRS
    728 	if (PMC_ENABLED(l->l_proc)) {
    729 		pmc_restore_context(l->l_proc);
    730 	}
    731 #endif
    732 	SYSCALL_TIME_WAKEUP(l);
    733 	LOCKDEBUG_BARRIER(NULL, 1);
    734 
    735 	return retval;
    736 }
    737 
    738 /*
    739  * Change process state to be runnable, placing it on the run queue if it is
    740  * in memory, and awakening the swapper if it isn't in memory.
    741  *
    742  * Call with the process and LWP locked.  Will return with the LWP unlocked.
    743  */
    744 void
    745 setrunnable(struct lwp *l)
    746 {
    747 	struct proc *p = l->l_proc;
    748 	struct cpu_info *ci;
    749 	sigset_t *ss;
    750 
    751 	KASSERT((l->l_flag & LW_IDLE) == 0);
    752 	KASSERT(mutex_owned(p->p_lock));
    753 	KASSERT(lwp_locked(l, NULL));
    754 	KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex);
    755 
    756 	switch (l->l_stat) {
    757 	case LSSTOP:
    758 		/*
    759 		 * If we're being traced (possibly because someone attached us
    760 		 * while we were stopped), check for a signal from the debugger.
    761 		 */
    762 		if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
    763 			if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
    764 				ss = &l->l_sigpend.sp_set;
    765 			else
    766 				ss = &p->p_sigpend.sp_set;
    767 			sigaddset(ss, p->p_xstat);
    768 			signotify(l);
    769 		}
    770 		p->p_nrlwps++;
    771 		break;
    772 	case LSSUSPENDED:
    773 		l->l_flag &= ~LW_WSUSPEND;
    774 		p->p_nrlwps++;
    775 		cv_broadcast(&p->p_lwpcv);
    776 		break;
    777 	case LSSLEEP:
    778 		KASSERT(l->l_wchan != NULL);
    779 		break;
    780 	default:
    781 		panic("setrunnable: lwp %p state was %d", l, l->l_stat);
    782 	}
    783 
    784 	/*
    785 	 * If the LWP was sleeping interruptably, then it's OK to start it
    786 	 * again.  If not, mark it as still sleeping.
    787 	 */
    788 	if (l->l_wchan != NULL) {
    789 		l->l_stat = LSSLEEP;
    790 		/* lwp_unsleep() will release the lock. */
    791 		lwp_unsleep(l, true);
    792 		return;
    793 	}
    794 
    795 	/*
    796 	 * If the LWP is still on the CPU, mark it as LSONPROC.  It may be
    797 	 * about to call mi_switch(), in which case it will yield.
    798 	 */
    799 	if ((l->l_flag & LW_RUNNING) != 0) {
    800 		l->l_stat = LSONPROC;
    801 		l->l_slptime = 0;
    802 		lwp_unlock(l);
    803 		return;
    804 	}
    805 
    806 	/*
    807 	 * Look for a CPU to run.
    808 	 * Set the LWP runnable.
    809 	 */
    810 	ci = sched_takecpu(l);
    811 	l->l_cpu = ci;
    812 	if (l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex) {
    813 		lwp_unlock_to(l, ci->ci_schedstate.spc_mutex);
    814 		lwp_lock(l);
    815 	}
    816 	sched_setrunnable(l);
    817 	l->l_stat = LSRUN;
    818 	l->l_slptime = 0;
    819 
    820 	/*
    821 	 * If thread is swapped out - wake the swapper to bring it back in.
    822 	 * Otherwise, enter it into a run queue.
    823 	 */
    824 	if (l->l_flag & LW_INMEM) {
    825 		sched_enqueue(l, false);
    826 		resched_cpu(l);
    827 		lwp_unlock(l);
    828 	} else {
    829 		lwp_unlock(l);
    830 		uvm_kick_scheduler();
    831 	}
    832 }
    833 
    834 /*
    835  * suspendsched:
    836  *
    837  *	Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
    838  */
    839 void
    840 suspendsched(void)
    841 {
    842 	CPU_INFO_ITERATOR cii;
    843 	struct cpu_info *ci;
    844 	struct lwp *l;
    845 	struct proc *p;
    846 
    847 	/*
    848 	 * We do this by process in order not to violate the locking rules.
    849 	 */
    850 	mutex_enter(proc_lock);
    851 	PROCLIST_FOREACH(p, &allproc) {
    852 		mutex_enter(p->p_lock);
    853 
    854 		if ((p->p_flag & PK_SYSTEM) != 0) {
    855 			mutex_exit(p->p_lock);
    856 			continue;
    857 		}
    858 
    859 		p->p_stat = SSTOP;
    860 
    861 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    862 			if (l == curlwp)
    863 				continue;
    864 
    865 			lwp_lock(l);
    866 
    867 			/*
    868 			 * Set L_WREBOOT so that the LWP will suspend itself
    869 			 * when it tries to return to user mode.  We want to
    870 			 * try and get to get as many LWPs as possible to
    871 			 * the user / kernel boundary, so that they will
    872 			 * release any locks that they hold.
    873 			 */
    874 			l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
    875 
    876 			if (l->l_stat == LSSLEEP &&
    877 			    (l->l_flag & LW_SINTR) != 0) {
    878 				/* setrunnable() will release the lock. */
    879 				setrunnable(l);
    880 				continue;
    881 			}
    882 
    883 			lwp_unlock(l);
    884 		}
    885 
    886 		mutex_exit(p->p_lock);
    887 	}
    888 	mutex_exit(proc_lock);
    889 
    890 	/*
    891 	 * Kick all CPUs to make them preempt any LWPs running in user mode.
    892 	 * They'll trap into the kernel and suspend themselves in userret().
    893 	 */
    894 	for (CPU_INFO_FOREACH(cii, ci)) {
    895 		spc_lock(ci);
    896 		cpu_need_resched(ci, RESCHED_IMMED);
    897 		spc_unlock(ci);
    898 	}
    899 }
    900 
    901 /*
    902  * sched_unsleep:
    903  *
    904  *	The is called when the LWP has not been awoken normally but instead
    905  *	interrupted: for example, if the sleep timed out.  Because of this,
    906  *	it's not a valid action for running or idle LWPs.
    907  */
    908 static u_int
    909 sched_unsleep(struct lwp *l, bool cleanup)
    910 {
    911 
    912 	lwp_unlock(l);
    913 	panic("sched_unsleep");
    914 }
    915 
    916 void
    917 resched_cpu(struct lwp *l)
    918 {
    919 	struct cpu_info *ci;
    920 
    921 	/*
    922 	 * XXXSMP
    923 	 * Since l->l_cpu persists across a context switch,
    924 	 * this gives us *very weak* processor affinity, in
    925 	 * that we notify the CPU on which the process last
    926 	 * ran that it should try to switch.
    927 	 *
    928 	 * This does not guarantee that the process will run on
    929 	 * that processor next, because another processor might
    930 	 * grab it the next time it performs a context switch.
    931 	 *
    932 	 * This also does not handle the case where its last
    933 	 * CPU is running a higher-priority process, but every
    934 	 * other CPU is running a lower-priority process.  There
    935 	 * are ways to handle this situation, but they're not
    936 	 * currently very pretty, and we also need to weigh the
    937 	 * cost of moving a process from one CPU to another.
    938 	 */
    939 	ci = l->l_cpu;
    940 	if (lwp_eprio(l) > ci->ci_schedstate.spc_curpriority)
    941 		cpu_need_resched(ci, 0);
    942 }
    943 
    944 static void
    945 sched_changepri(struct lwp *l, pri_t pri)
    946 {
    947 
    948 	KASSERT(lwp_locked(l, NULL));
    949 
    950 	if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
    951 		KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    952 		sched_dequeue(l);
    953 		l->l_priority = pri;
    954 		sched_enqueue(l, false);
    955 	} else {
    956 		l->l_priority = pri;
    957 	}
    958 	resched_cpu(l);
    959 }
    960 
    961 static void
    962 sched_lendpri(struct lwp *l, pri_t pri)
    963 {
    964 
    965 	KASSERT(lwp_locked(l, NULL));
    966 
    967 	if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
    968 		KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    969 		sched_dequeue(l);
    970 		l->l_inheritedprio = pri;
    971 		sched_enqueue(l, false);
    972 	} else {
    973 		l->l_inheritedprio = pri;
    974 	}
    975 	resched_cpu(l);
    976 }
    977 
    978 struct lwp *
    979 syncobj_noowner(wchan_t wchan)
    980 {
    981 
    982 	return NULL;
    983 }
    984 
    985 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
    986 fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
    987 
    988 /*
    989  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
    990  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
    991  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
    992  *
    993  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
    994  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
    995  *
    996  * If you dont want to bother with the faster/more-accurate formula, you
    997  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
    998  * (more general) method of calculating the %age of CPU used by a process.
    999  */
   1000 #define	CCPU_SHIFT	(FSHIFT + 1)
   1001 
   1002 /*
   1003  * sched_pstats:
   1004  *
   1005  * Update process statistics and check CPU resource allocation.
   1006  * Call scheduler-specific hook to eventually adjust process/LWP
   1007  * priorities.
   1008  */
   1009 /* ARGSUSED */
   1010 void
   1011 sched_pstats(void *arg)
   1012 {
   1013 	struct rlimit *rlim;
   1014 	struct lwp *l;
   1015 	struct proc *p;
   1016 	int sig, clkhz;
   1017 	long runtm;
   1018 
   1019 	sched_pstats_ticks++;
   1020 
   1021 	mutex_enter(proc_lock);
   1022 	PROCLIST_FOREACH(p, &allproc) {
   1023 		/*
   1024 		 * Increment time in/out of memory and sleep time (if
   1025 		 * sleeping).  We ignore overflow; with 16-bit int's
   1026 		 * (remember them?) overflow takes 45 days.
   1027 		 */
   1028 		mutex_enter(p->p_lock);
   1029 		mutex_spin_enter(&p->p_stmutex);
   1030 		runtm = p->p_rtime.sec;
   1031 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1032 			if ((l->l_flag & LW_IDLE) != 0)
   1033 				continue;
   1034 			lwp_lock(l);
   1035 			runtm += l->l_rtime.sec;
   1036 			l->l_swtime++;
   1037 			sched_pstats_hook(l);
   1038 			lwp_unlock(l);
   1039 
   1040 			/*
   1041 			 * p_pctcpu is only for ps.
   1042 			 */
   1043 			l->l_pctcpu = (l->l_pctcpu * ccpu) >> FSHIFT;
   1044 			if (l->l_slptime < 1) {
   1045 				clkhz = stathz != 0 ? stathz : hz;
   1046 #if	(FSHIFT >= CCPU_SHIFT)
   1047 				l->l_pctcpu += (clkhz == 100) ?
   1048 				    ((fixpt_t)l->l_cpticks) <<
   1049 				        (FSHIFT - CCPU_SHIFT) :
   1050 				    100 * (((fixpt_t) p->p_cpticks)
   1051 				        << (FSHIFT - CCPU_SHIFT)) / clkhz;
   1052 #else
   1053 				l->l_pctcpu += ((FSCALE - ccpu) *
   1054 				    (l->l_cpticks * FSCALE / clkhz)) >> FSHIFT;
   1055 #endif
   1056 				l->l_cpticks = 0;
   1057 			}
   1058 		}
   1059 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
   1060 		mutex_spin_exit(&p->p_stmutex);
   1061 
   1062 		/*
   1063 		 * Check if the process exceeds its CPU resource allocation.
   1064 		 * If over max, kill it.
   1065 		 */
   1066 		rlim = &p->p_rlimit[RLIMIT_CPU];
   1067 		sig = 0;
   1068 		if (runtm >= rlim->rlim_cur) {
   1069 			if (runtm >= rlim->rlim_max)
   1070 				sig = SIGKILL;
   1071 			else {
   1072 				sig = SIGXCPU;
   1073 				if (rlim->rlim_cur < rlim->rlim_max)
   1074 					rlim->rlim_cur += 5;
   1075 			}
   1076 		}
   1077 		mutex_exit(p->p_lock);
   1078 		if (sig)
   1079 			psignal(p, sig);
   1080 	}
   1081 	mutex_exit(proc_lock);
   1082 	uvm_meter();
   1083 	cv_wakeup(&lbolt);
   1084 	callout_schedule(&sched_pstats_ch, hz);
   1085 }
   1086 
   1087 void
   1088 sched_init(void)
   1089 {
   1090 
   1091 	cv_init(&lbolt, "lbolt");
   1092 	callout_init(&sched_pstats_ch, CALLOUT_MPSAFE);
   1093 	callout_setfunc(&sched_pstats_ch, sched_pstats, NULL);
   1094 
   1095 	/* Balancing */
   1096 	worker_ci = curcpu();
   1097 	cacheht_time = mstohz(5);		/* ~5 ms  */
   1098 	balance_period = mstohz(300);		/* ~300ms */
   1099 
   1100 	/* Minimal count of LWPs for catching: log2(count of CPUs) */
   1101 	min_catch = min(ilog2(ncpu), 4);
   1102 
   1103 	/* Initialize balancing callout and run it */
   1104 #ifdef MULTIPROCESSOR
   1105 	callout_init(&balance_ch, CALLOUT_MPSAFE);
   1106 	callout_setfunc(&balance_ch, sched_balance, NULL);
   1107 	callout_schedule(&balance_ch, balance_period);
   1108 #endif
   1109 	sched_pstats(NULL);
   1110 }
   1111 
   1112 SYSCTL_SETUP(sysctl_sched_setup, "sysctl sched setup")
   1113 {
   1114 	const struct sysctlnode *node = NULL;
   1115 
   1116 	sysctl_createv(clog, 0, NULL, NULL,
   1117 		CTLFLAG_PERMANENT,
   1118 		CTLTYPE_NODE, "kern", NULL,
   1119 		NULL, 0, NULL, 0,
   1120 		CTL_KERN, CTL_EOL);
   1121 	sysctl_createv(clog, 0, NULL, &node,
   1122 		CTLFLAG_PERMANENT,
   1123 		CTLTYPE_NODE, "sched",
   1124 		SYSCTL_DESCR("Scheduler options"),
   1125 		NULL, 0, NULL, 0,
   1126 		CTL_KERN, CTL_CREATE, CTL_EOL);
   1127 
   1128 	if (node == NULL)
   1129 		return;
   1130 
   1131 	sysctl_createv(clog, 0, &node, NULL,
   1132 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1133 		CTLTYPE_INT, "cacheht_time",
   1134 		SYSCTL_DESCR("Cache hotness time (in ticks)"),
   1135 		NULL, 0, &cacheht_time, 0,
   1136 		CTL_CREATE, CTL_EOL);
   1137 	sysctl_createv(clog, 0, &node, NULL,
   1138 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1139 		CTLTYPE_INT, "balance_period",
   1140 		SYSCTL_DESCR("Balance period (in ticks)"),
   1141 		NULL, 0, &balance_period, 0,
   1142 		CTL_CREATE, CTL_EOL);
   1143 	sysctl_createv(clog, 0, &node, NULL,
   1144 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1145 		CTLTYPE_INT, "min_catch",
   1146 		SYSCTL_DESCR("Minimal count of threads for catching"),
   1147 		NULL, 0, &min_catch, 0,
   1148 		CTL_CREATE, CTL_EOL);
   1149 	sysctl_createv(clog, 0, &node, NULL,
   1150 		CTLFLAG_READWRITE,
   1151 		CTLTYPE_INT, "timesoftints",
   1152 		SYSCTL_DESCR("Track CPU time for soft interrupts"),
   1153 		NULL, 0, &softint_timing, 0,
   1154 		CTL_CREATE, CTL_EOL);
   1155 }
   1156 
   1157 void
   1158 sched_cpuattach(struct cpu_info *ci)
   1159 {
   1160 	runqueue_t *ci_rq;
   1161 	void *rq_ptr;
   1162 	u_int i, size;
   1163 
   1164 	if (ci->ci_schedstate.spc_lwplock == NULL) {
   1165 		ci->ci_schedstate.spc_lwplock =
   1166 		    mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
   1167 	}
   1168 	if (ci == lwp0.l_cpu) {
   1169 		/* Initialize the scheduler structure of the primary LWP */
   1170 		lwp0.l_mutex = ci->ci_schedstate.spc_lwplock;
   1171 	}
   1172 	if (ci->ci_schedstate.spc_mutex != NULL) {
   1173 		/* Already initialized. */
   1174 		return;
   1175 	}
   1176 
   1177 	/* Allocate the run queue */
   1178 	size = roundup2(sizeof(runqueue_t), coherency_unit) + coherency_unit;
   1179 	rq_ptr = kmem_zalloc(size, KM_SLEEP);
   1180 	if (rq_ptr == NULL) {
   1181 		panic("sched_cpuattach: could not allocate the runqueue");
   1182 	}
   1183 	ci_rq = (void *)(roundup2((uintptr_t)(rq_ptr), coherency_unit));
   1184 
   1185 	/* Initialize run queues */
   1186 	ci->ci_schedstate.spc_mutex =
   1187 	    mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
   1188 	for (i = 0; i < PRI_RT_COUNT; i++)
   1189 		TAILQ_INIT(&ci_rq->r_rt_queue[i].q_head);
   1190 	for (i = 0; i < PRI_TS_COUNT; i++)
   1191 		TAILQ_INIT(&ci_rq->r_ts_queue[i].q_head);
   1192 
   1193 	ci->ci_schedstate.spc_sched_info = ci_rq;
   1194 }
   1195 
   1196 /*
   1197  * Control of the runqueue.
   1198  */
   1199 
   1200 static void *
   1201 sched_getrq(runqueue_t *ci_rq, const pri_t prio)
   1202 {
   1203 
   1204 	KASSERT(prio < PRI_COUNT);
   1205 	return (prio <= PRI_HIGHEST_TS) ?
   1206 	    &ci_rq->r_ts_queue[prio].q_head :
   1207 	    &ci_rq->r_rt_queue[prio - PRI_HIGHEST_TS - 1].q_head;
   1208 }
   1209 
   1210 void
   1211 sched_enqueue(struct lwp *l, bool swtch)
   1212 {
   1213 	runqueue_t *ci_rq;
   1214 	struct schedstate_percpu *spc;
   1215 	TAILQ_HEAD(, lwp) *q_head;
   1216 	const pri_t eprio = lwp_eprio(l);
   1217 	struct cpu_info *ci;
   1218 
   1219 	ci = l->l_cpu;
   1220 	spc = &ci->ci_schedstate;
   1221 	ci_rq = spc->spc_sched_info;
   1222 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
   1223 
   1224 	/* Update the last run time on switch */
   1225 	if (__predict_true(swtch == true)) {
   1226 		l->l_rticks = hardclock_ticks;
   1227 		l->l_rticksum += (hardclock_ticks - l->l_rticks);
   1228 	} else if (l->l_rticks == 0)
   1229 		l->l_rticks = hardclock_ticks;
   1230 
   1231 	/* Enqueue the thread */
   1232 	q_head = sched_getrq(ci_rq, eprio);
   1233 	if (TAILQ_EMPTY(q_head)) {
   1234 		u_int i;
   1235 		uint32_t q;
   1236 
   1237 		/* Mark bit */
   1238 		i = eprio >> BITMAP_SHIFT;
   1239 		q = BITMAP_MSB >> (eprio & BITMAP_MASK);
   1240 		KASSERT((ci_rq->r_bitmap[i] & q) == 0);
   1241 		ci_rq->r_bitmap[i] |= q;
   1242 	}
   1243 	TAILQ_INSERT_TAIL(q_head, l, l_runq);
   1244 	ci_rq->r_count++;
   1245 	if ((l->l_pflag & LP_BOUND) == 0)
   1246 		ci_rq->r_mcount++;
   1247 
   1248 	/*
   1249 	 * Update the value of highest priority in the runqueue,
   1250 	 * if priority of this thread is higher.
   1251 	 */
   1252 	if (eprio > spc->spc_maxpriority)
   1253 		spc->spc_maxpriority = eprio;
   1254 
   1255 	sched_newts(l);
   1256 
   1257 	/*
   1258 	 * Wake the chosen CPU or cause a preemption if the newly
   1259 	 * enqueued thread has higher priority.  Don't cause a
   1260 	 * preemption if the thread is yielding (swtch).
   1261 	 */
   1262 	if (!swtch && eprio > spc->spc_curpriority) {
   1263 		cpu_need_resched(ci,
   1264 		    (eprio >= PRI_KERNEL ? RESCHED_IMMED : 0));
   1265 	}
   1266 }
   1267 
   1268 void
   1269 sched_dequeue(struct lwp *l)
   1270 {
   1271 	runqueue_t *ci_rq;
   1272 	TAILQ_HEAD(, lwp) *q_head;
   1273 	struct schedstate_percpu *spc;
   1274 	const pri_t eprio = lwp_eprio(l);
   1275 
   1276 	spc = & l->l_cpu->ci_schedstate;
   1277 	ci_rq = spc->spc_sched_info;
   1278 	KASSERT(lwp_locked(l, spc->spc_mutex));
   1279 
   1280 	KASSERT(eprio <= spc->spc_maxpriority);
   1281 	KASSERT(ci_rq->r_bitmap[eprio >> BITMAP_SHIFT] != 0);
   1282 	KASSERT(ci_rq->r_count > 0);
   1283 
   1284 	ci_rq->r_count--;
   1285 	if ((l->l_pflag & LP_BOUND) == 0)
   1286 		ci_rq->r_mcount--;
   1287 
   1288 	q_head = sched_getrq(ci_rq, eprio);
   1289 	TAILQ_REMOVE(q_head, l, l_runq);
   1290 	if (TAILQ_EMPTY(q_head)) {
   1291 		u_int i;
   1292 		uint32_t q;
   1293 
   1294 		/* Unmark bit */
   1295 		i = eprio >> BITMAP_SHIFT;
   1296 		q = BITMAP_MSB >> (eprio & BITMAP_MASK);
   1297 		KASSERT((ci_rq->r_bitmap[i] & q) != 0);
   1298 		ci_rq->r_bitmap[i] &= ~q;
   1299 
   1300 		/*
   1301 		 * Update the value of highest priority in the runqueue, in a
   1302 		 * case it was a last thread in the queue of highest priority.
   1303 		 */
   1304 		if (eprio != spc->spc_maxpriority)
   1305 			return;
   1306 
   1307 		do {
   1308 			if (ci_rq->r_bitmap[i] != 0) {
   1309 				q = ffs(ci_rq->r_bitmap[i]);
   1310 				spc->spc_maxpriority =
   1311 				    (i << BITMAP_SHIFT) + (BITMAP_BITS - q);
   1312 				return;
   1313 			}
   1314 		} while (i--);
   1315 
   1316 		/* If not found - set the lowest value */
   1317 		spc->spc_maxpriority = 0;
   1318 	}
   1319 }
   1320 
   1321 /*
   1322  * Migration and balancing.
   1323  */
   1324 
   1325 #ifdef MULTIPROCESSOR
   1326 
   1327 /* Estimate if LWP is cache-hot */
   1328 static inline bool
   1329 lwp_cache_hot(const struct lwp *l)
   1330 {
   1331 
   1332 	if (l->l_slptime || l->l_rticks == 0)
   1333 		return false;
   1334 
   1335 	return (hardclock_ticks - l->l_rticks <= cacheht_time);
   1336 }
   1337 
   1338 /* Check if LWP can migrate to the chosen CPU */
   1339 static inline bool
   1340 sched_migratable(const struct lwp *l, struct cpu_info *ci)
   1341 {
   1342 	const struct schedstate_percpu *spc = &ci->ci_schedstate;
   1343 
   1344 	/* CPU is offline */
   1345 	if (__predict_false(spc->spc_flags & SPCF_OFFLINE))
   1346 		return false;
   1347 
   1348 	/* Affinity bind */
   1349 	if (__predict_false(l->l_flag & LW_AFFINITY))
   1350 		return CPU_ISSET(cpu_index(ci), &l->l_affinity);
   1351 
   1352 	/* Processor-set */
   1353 	return (spc->spc_psid == l->l_psid);
   1354 }
   1355 
   1356 /*
   1357  * Estimate the migration of LWP to the other CPU.
   1358  * Take and return the CPU, if migration is needed.
   1359  */
   1360 struct cpu_info *
   1361 sched_takecpu(struct lwp *l)
   1362 {
   1363 	struct cpu_info *ci, *tci, *first, *next;
   1364 	struct schedstate_percpu *spc;
   1365 	runqueue_t *ci_rq, *ici_rq;
   1366 	pri_t eprio, lpri, pri;
   1367 
   1368 	KASSERT(lwp_locked(l, NULL));
   1369 
   1370 	ci = l->l_cpu;
   1371 	spc = &ci->ci_schedstate;
   1372 	ci_rq = spc->spc_sched_info;
   1373 
   1374 	/* If thread is strictly bound, do not estimate other CPUs */
   1375 	if (l->l_pflag & LP_BOUND)
   1376 		return ci;
   1377 
   1378 	/* CPU of this thread is idling - run there */
   1379 	if (ci_rq->r_count == 0)
   1380 		return ci;
   1381 
   1382 	eprio = lwp_eprio(l);
   1383 
   1384 	/* Stay if thread is cache-hot */
   1385 	if (__predict_true(l->l_stat != LSIDL) &&
   1386 	    lwp_cache_hot(l) && eprio >= spc->spc_curpriority)
   1387 		return ci;
   1388 
   1389 	/* Run on current CPU if priority of thread is higher */
   1390 	ci = curcpu();
   1391 	spc = &ci->ci_schedstate;
   1392 	if (eprio > spc->spc_curpriority && sched_migratable(l, ci))
   1393 		return ci;
   1394 
   1395 	/*
   1396 	 * Look for the CPU with the lowest priority thread.  In case of
   1397 	 * equal priority, choose the CPU with the fewest of threads.
   1398 	 */
   1399 	first = l->l_cpu;
   1400 	ci = first;
   1401 	tci = first;
   1402 	lpri = PRI_COUNT;
   1403 	do {
   1404 		next = CIRCLEQ_LOOP_NEXT(&cpu_queue, ci, ci_data.cpu_qchain);
   1405 		spc = &ci->ci_schedstate;
   1406 		ici_rq = spc->spc_sched_info;
   1407 		pri = max(spc->spc_curpriority, spc->spc_maxpriority);
   1408 		if (pri > lpri)
   1409 			continue;
   1410 
   1411 		if (pri == lpri && ci_rq->r_count < ici_rq->r_count)
   1412 			continue;
   1413 
   1414 		if (!sched_migratable(l, ci))
   1415 			continue;
   1416 
   1417 		lpri = pri;
   1418 		tci = ci;
   1419 		ci_rq = ici_rq;
   1420 	} while (ci = next, ci != first);
   1421 
   1422 	return tci;
   1423 }
   1424 
   1425 /*
   1426  * Tries to catch an LWP from the runqueue of other CPU.
   1427  */
   1428 static struct lwp *
   1429 sched_catchlwp(void)
   1430 {
   1431 	struct cpu_info *curci = curcpu(), *ci = worker_ci;
   1432 	struct schedstate_percpu *spc;
   1433 	TAILQ_HEAD(, lwp) *q_head;
   1434 	runqueue_t *ci_rq;
   1435 	struct lwp *l;
   1436 
   1437 	if (curci == ci)
   1438 		return NULL;
   1439 
   1440 	/* Lockless check */
   1441 	spc = &ci->ci_schedstate;
   1442 	ci_rq = spc->spc_sched_info;
   1443 	if (ci_rq->r_mcount < min_catch)
   1444 		return NULL;
   1445 
   1446 	/*
   1447 	 * Double-lock the runqueues.
   1448 	 */
   1449 	if (curci < ci) {
   1450 		spc_lock(ci);
   1451 	} else if (!mutex_tryenter(ci->ci_schedstate.spc_mutex)) {
   1452 		const runqueue_t *cur_rq = curci->ci_schedstate.spc_sched_info;
   1453 
   1454 		spc_unlock(curci);
   1455 		spc_lock(ci);
   1456 		spc_lock(curci);
   1457 
   1458 		if (cur_rq->r_count) {
   1459 			spc_unlock(ci);
   1460 			return NULL;
   1461 		}
   1462 	}
   1463 
   1464 	if (ci_rq->r_mcount < min_catch) {
   1465 		spc_unlock(ci);
   1466 		return NULL;
   1467 	}
   1468 
   1469 	/* Take the highest priority thread */
   1470 	q_head = sched_getrq(ci_rq, spc->spc_maxpriority);
   1471 	l = TAILQ_FIRST(q_head);
   1472 
   1473 	for (;;) {
   1474 		/* Check the first and next result from the queue */
   1475 		if (l == NULL)
   1476 			break;
   1477 		KASSERT(l->l_stat == LSRUN);
   1478 		KASSERT(l->l_flag & LW_INMEM);
   1479 
   1480 		/* Look for threads, whose are allowed to migrate */
   1481 		if ((l->l_pflag & LP_BOUND) || lwp_cache_hot(l) ||
   1482 		    !sched_migratable(l, curci)) {
   1483 			l = TAILQ_NEXT(l, l_runq);
   1484 			continue;
   1485 		}
   1486 
   1487 		/* Grab the thread, and move to the local run queue */
   1488 		sched_dequeue(l);
   1489 		l->l_cpu = curci;
   1490 		lwp_unlock_to(l, curci->ci_schedstate.spc_mutex);
   1491 		sched_enqueue(l, false);
   1492 		return l;
   1493 	}
   1494 	spc_unlock(ci);
   1495 
   1496 	return l;
   1497 }
   1498 
   1499 /*
   1500  * Periodical calculations for balancing.
   1501  */
   1502 static void
   1503 sched_balance(void *nocallout)
   1504 {
   1505 	struct cpu_info *ci, *hci;
   1506 	runqueue_t *ci_rq;
   1507 	CPU_INFO_ITERATOR cii;
   1508 	u_int highest;
   1509 
   1510 	hci = curcpu();
   1511 	highest = 0;
   1512 
   1513 	/* Make lockless countings */
   1514 	for (CPU_INFO_FOREACH(cii, ci)) {
   1515 		ci_rq = ci->ci_schedstate.spc_sched_info;
   1516 
   1517 		/* Average count of the threads */
   1518 		ci_rq->r_avgcount = (ci_rq->r_avgcount + ci_rq->r_mcount) >> 1;
   1519 
   1520 		/* Look for CPU with the highest average */
   1521 		if (ci_rq->r_avgcount > highest) {
   1522 			hci = ci;
   1523 			highest = ci_rq->r_avgcount;
   1524 		}
   1525 	}
   1526 
   1527 	/* Update the worker */
   1528 	worker_ci = hci;
   1529 
   1530 	if (nocallout == NULL)
   1531 		callout_schedule(&balance_ch, balance_period);
   1532 }
   1533 
   1534 #else
   1535 
   1536 struct cpu_info *
   1537 sched_takecpu(struct lwp *l)
   1538 {
   1539 
   1540 	return l->l_cpu;
   1541 }
   1542 
   1543 #endif	/* MULTIPROCESSOR */
   1544 
   1545 /*
   1546  * Scheduler mill.
   1547  */
   1548 struct lwp *
   1549 sched_nextlwp(void)
   1550 {
   1551 	struct cpu_info *ci = curcpu();
   1552 	struct schedstate_percpu *spc;
   1553 	TAILQ_HEAD(, lwp) *q_head;
   1554 	runqueue_t *ci_rq;
   1555 	struct lwp *l;
   1556 
   1557 	spc = &ci->ci_schedstate;
   1558 	ci_rq = spc->spc_sched_info;
   1559 
   1560 #ifdef MULTIPROCESSOR
   1561 	/* If runqueue is empty, try to catch some thread from other CPU */
   1562 	if (__predict_false(spc->spc_flags & SPCF_OFFLINE)) {
   1563 		if ((ci_rq->r_count - ci_rq->r_mcount) == 0)
   1564 			return NULL;
   1565 	} else if (ci_rq->r_count == 0) {
   1566 		/* Reset the counter, and call the balancer */
   1567 		ci_rq->r_avgcount = 0;
   1568 		sched_balance(ci);
   1569 
   1570 		/* The re-locking will be done inside */
   1571 		return sched_catchlwp();
   1572 	}
   1573 #else
   1574 	if (ci_rq->r_count == 0)
   1575 		return NULL;
   1576 #endif
   1577 
   1578 	/* Take the highest priority thread */
   1579 	KASSERT(ci_rq->r_bitmap[spc->spc_maxpriority >> BITMAP_SHIFT]);
   1580 	q_head = sched_getrq(ci_rq, spc->spc_maxpriority);
   1581 	l = TAILQ_FIRST(q_head);
   1582 	KASSERT(l != NULL);
   1583 
   1584 	sched_oncpu(l);
   1585 	l->l_rticks = hardclock_ticks;
   1586 
   1587 	return l;
   1588 }
   1589 
   1590 bool
   1591 sched_curcpu_runnable_p(void)
   1592 {
   1593 	const struct cpu_info *ci = curcpu();
   1594 	const runqueue_t *ci_rq = ci->ci_schedstate.spc_sched_info;
   1595 
   1596 #ifndef __HAVE_FAST_SOFTINTS
   1597 	if (ci->ci_data.cpu_softints)
   1598 		return true;
   1599 #endif
   1600 
   1601 	if (ci->ci_schedstate.spc_flags & SPCF_OFFLINE)
   1602 		return (ci_rq->r_count - ci_rq->r_mcount);
   1603 
   1604 	return ci_rq->r_count;
   1605 }
   1606 
   1607 /*
   1608  * Debugging.
   1609  */
   1610 
   1611 #ifdef DDB
   1612 
   1613 void
   1614 sched_print_runqueue(void (*pr)(const char *, ...)
   1615     __attribute__((__format__(__printf__,1,2))))
   1616 {
   1617 	runqueue_t *ci_rq;
   1618 	struct schedstate_percpu *spc;
   1619 	struct lwp *l;
   1620 	struct proc *p;
   1621 	int i;
   1622 	struct cpu_info *ci;
   1623 	CPU_INFO_ITERATOR cii;
   1624 
   1625 	for (CPU_INFO_FOREACH(cii, ci)) {
   1626 		spc = &ci->ci_schedstate;
   1627 		ci_rq = spc->spc_sched_info;
   1628 
   1629 		(*pr)("Run-queue (CPU = %u):\n", ci->ci_index);
   1630 		(*pr)(" pid.lid = %d.%d, threads count = %u, "
   1631 		    "avgcount = %u, highest pri = %d\n",
   1632 #ifdef MULTIPROCESSOR
   1633 		    ci->ci_curlwp->l_proc->p_pid, ci->ci_curlwp->l_lid,
   1634 #else
   1635 		    curlwp->l_proc->p_pid, curlwp->l_lid,
   1636 #endif
   1637 		    ci_rq->r_count, ci_rq->r_avgcount, spc->spc_maxpriority);
   1638 		i = (PRI_COUNT >> BITMAP_SHIFT) - 1;
   1639 		do {
   1640 			uint32_t q;
   1641 			q = ci_rq->r_bitmap[i];
   1642 			(*pr)(" bitmap[%d] => [ %d (0x%x) ]\n", i, ffs(q), q);
   1643 		} while (i--);
   1644 	}
   1645 
   1646 	(*pr)("   %5s %4s %4s %10s %3s %18s %4s %s\n",
   1647 	    "LID", "PRI", "EPRI", "FL", "ST", "LWP", "CPU", "LRTIME");
   1648 
   1649 	PROCLIST_FOREACH(p, &allproc) {
   1650 		(*pr)(" /- %d (%s)\n", (int)p->p_pid, p->p_comm);
   1651 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1652 			ci = l->l_cpu;
   1653 			(*pr)(" | %5d %4u %4u 0x%8.8x %3s %18p %4u %u\n",
   1654 			    (int)l->l_lid, l->l_priority, lwp_eprio(l),
   1655 			    l->l_flag, l->l_stat == LSRUN ? "RQ" :
   1656 			    (l->l_stat == LSSLEEP ? "SQ" : "-"),
   1657 			    l, ci->ci_index,
   1658 			    (u_int)(hardclock_ticks - l->l_rticks));
   1659 		}
   1660 	}
   1661 }
   1662 
   1663 #endif
   1664