Home | History | Annotate | Line # | Download | only in kern
kern_synch.c revision 1.231
      1  1.231        ad /*	$NetBSD: kern_synch.c,v 1.231 2008/04/28 15:36:01 ad Exp $	*/
      2   1.63   thorpej 
      3   1.63   thorpej /*-
      4  1.218        ad  * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008 The NetBSD Foundation, Inc.
      5   1.63   thorpej  * All rights reserved.
      6   1.63   thorpej  *
      7   1.63   thorpej  * This code is derived from software contributed to The NetBSD Foundation
      8   1.63   thorpej  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  1.188      yamt  * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
     10  1.188      yamt  * Daniel Sieger.
     11   1.63   thorpej  *
     12   1.63   thorpej  * Redistribution and use in source and binary forms, with or without
     13   1.63   thorpej  * modification, are permitted provided that the following conditions
     14   1.63   thorpej  * are met:
     15   1.63   thorpej  * 1. Redistributions of source code must retain the above copyright
     16   1.63   thorpej  *    notice, this list of conditions and the following disclaimer.
     17   1.63   thorpej  * 2. Redistributions in binary form must reproduce the above copyright
     18   1.63   thorpej  *    notice, this list of conditions and the following disclaimer in the
     19   1.63   thorpej  *    documentation and/or other materials provided with the distribution.
     20   1.63   thorpej  * 3. All advertising materials mentioning features or use of this software
     21   1.63   thorpej  *    must display the following acknowledgement:
     22   1.63   thorpej  *	This product includes software developed by the NetBSD
     23   1.63   thorpej  *	Foundation, Inc. and its contributors.
     24   1.63   thorpej  * 4. Neither the name of The NetBSD Foundation nor the names of its
     25   1.63   thorpej  *    contributors may be used to endorse or promote products derived
     26   1.63   thorpej  *    from this software without specific prior written permission.
     27   1.63   thorpej  *
     28   1.63   thorpej  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     29   1.63   thorpej  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     30   1.63   thorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     31   1.63   thorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     32   1.63   thorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     33   1.63   thorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     34   1.63   thorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     35   1.63   thorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     36   1.63   thorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     37   1.63   thorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     38   1.63   thorpej  * POSSIBILITY OF SUCH DAMAGE.
     39   1.63   thorpej  */
     40   1.26       cgd 
     41  1.223        ad /*
     42  1.223        ad  * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org>
     43  1.223        ad  * All rights reserved.
     44  1.223        ad  *
     45  1.223        ad  * Redistribution and use in source and binary forms, with or without
     46  1.223        ad  * modification, are permitted provided that the following conditions
     47  1.223        ad  * are met:
     48  1.223        ad  * 1. Redistributions of source code must retain the above copyright
     49  1.223        ad  *    notice, this list of conditions and the following disclaimer.
     50  1.223        ad  * 2. Redistributions in binary form must reproduce the above copyright
     51  1.223        ad  *    notice, this list of conditions and the following disclaimer in the
     52  1.223        ad  *    documentation and/or other materials provided with the distribution.
     53  1.223        ad  *
     54  1.223        ad  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     55  1.223        ad  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     56  1.223        ad  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     57  1.223        ad  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     58  1.223        ad  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     59  1.223        ad  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     60  1.223        ad  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     61  1.223        ad  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     62  1.223        ad  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     63  1.223        ad  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     64  1.223        ad  * SUCH DAMAGE.
     65  1.223        ad  */
     66  1.223        ad 
     67   1.26       cgd /*-
     68   1.26       cgd  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     69   1.26       cgd  *	The Regents of the University of California.  All rights reserved.
     70   1.26       cgd  * (c) UNIX System Laboratories, Inc.
     71   1.26       cgd  * All or some portions of this file are derived from material licensed
     72   1.26       cgd  * to the University of California by American Telephone and Telegraph
     73   1.26       cgd  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     74   1.26       cgd  * the permission of UNIX System Laboratories, Inc.
     75   1.26       cgd  *
     76   1.26       cgd  * Redistribution and use in source and binary forms, with or without
     77   1.26       cgd  * modification, are permitted provided that the following conditions
     78   1.26       cgd  * are met:
     79   1.26       cgd  * 1. Redistributions of source code must retain the above copyright
     80   1.26       cgd  *    notice, this list of conditions and the following disclaimer.
     81   1.26       cgd  * 2. Redistributions in binary form must reproduce the above copyright
     82   1.26       cgd  *    notice, this list of conditions and the following disclaimer in the
     83   1.26       cgd  *    documentation and/or other materials provided with the distribution.
     84  1.136       agc  * 3. Neither the name of the University nor the names of its contributors
     85   1.26       cgd  *    may be used to endorse or promote products derived from this software
     86   1.26       cgd  *    without specific prior written permission.
     87   1.26       cgd  *
     88   1.26       cgd  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     89   1.26       cgd  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     90   1.26       cgd  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     91   1.26       cgd  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     92   1.26       cgd  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     93   1.26       cgd  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     94   1.26       cgd  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     95   1.26       cgd  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     96   1.26       cgd  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     97   1.26       cgd  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     98   1.26       cgd  * SUCH DAMAGE.
     99   1.26       cgd  *
    100   1.50      fvdl  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
    101   1.26       cgd  */
    102  1.106     lukem 
    103  1.106     lukem #include <sys/cdefs.h>
    104  1.231        ad __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.231 2008/04/28 15:36:01 ad Exp $");
    105   1.48       mrg 
    106  1.109      yamt #include "opt_kstack.h"
    107   1.82   thorpej #include "opt_lockdebug.h"
    108   1.83   thorpej #include "opt_multiprocessor.h"
    109  1.110    briggs #include "opt_perfctrs.h"
    110  1.231        ad #include "opt_preemption.h"
    111   1.26       cgd 
    112  1.174        ad #define	__MUTEX_PRIVATE
    113  1.174        ad 
    114   1.26       cgd #include <sys/param.h>
    115   1.26       cgd #include <sys/systm.h>
    116   1.26       cgd #include <sys/proc.h>
    117   1.26       cgd #include <sys/kernel.h>
    118  1.111    briggs #if defined(PERFCTRS)
    119  1.110    briggs #include <sys/pmc.h>
    120  1.111    briggs #endif
    121  1.188      yamt #include <sys/cpu.h>
    122   1.26       cgd #include <sys/resourcevar.h>
    123   1.55      ross #include <sys/sched.h>
    124  1.179       dsl #include <sys/syscall_stats.h>
    125  1.174        ad #include <sys/sleepq.h>
    126  1.174        ad #include <sys/lockdebug.h>
    127  1.190        ad #include <sys/evcnt.h>
    128  1.199        ad #include <sys/intr.h>
    129  1.207        ad #include <sys/lwpctl.h>
    130  1.209        ad #include <sys/atomic.h>
    131  1.215        ad #include <sys/simplelock.h>
    132  1.223        ad #include <sys/bitops.h>
    133  1.223        ad #include <sys/kmem.h>
    134  1.223        ad #include <sys/sysctl.h>
    135  1.223        ad #include <sys/idle.h>
    136   1.47       mrg 
    137   1.47       mrg #include <uvm/uvm_extern.h>
    138   1.47       mrg 
    139  1.231        ad #include <dev/lockstat.h>
    140  1.231        ad 
    141  1.223        ad /*
    142  1.223        ad  * Priority related defintions.
    143  1.223        ad  */
    144  1.223        ad #define	PRI_TS_COUNT	(NPRI_USER)
    145  1.223        ad #define	PRI_RT_COUNT	(PRI_COUNT - PRI_TS_COUNT)
    146  1.223        ad #define	PRI_HTS_RANGE	(PRI_TS_COUNT / 10)
    147  1.223        ad 
    148  1.223        ad #define	PRI_HIGHEST_TS	(MAXPRI_USER)
    149  1.223        ad 
    150  1.223        ad /*
    151  1.223        ad  * Bits per map.
    152  1.223        ad  */
    153  1.223        ad #define	BITMAP_BITS	(32)
    154  1.223        ad #define	BITMAP_SHIFT	(5)
    155  1.223        ad #define	BITMAP_MSB	(0x80000000U)
    156  1.223        ad #define	BITMAP_MASK	(BITMAP_BITS - 1)
    157  1.223        ad 
    158  1.223        ad /*
    159  1.223        ad  * Structures, runqueue.
    160  1.223        ad  */
    161   1.34  christos 
    162  1.223        ad typedef struct {
    163  1.223        ad 	TAILQ_HEAD(, lwp) q_head;
    164  1.223        ad } queue_t;
    165  1.223        ad 
    166  1.223        ad typedef struct {
    167  1.223        ad 	/* Lock and bitmap */
    168  1.223        ad 	uint32_t	r_bitmap[PRI_COUNT >> BITMAP_SHIFT];
    169  1.223        ad 	/* Counters */
    170  1.223        ad 	u_int		r_count;	/* Count of the threads */
    171  1.223        ad 	u_int		r_avgcount;	/* Average count of threads */
    172  1.223        ad 	u_int		r_mcount;	/* Count of migratable threads */
    173  1.223        ad 	/* Runqueues */
    174  1.223        ad 	queue_t		r_rt_queue[PRI_RT_COUNT];
    175  1.223        ad 	queue_t		r_ts_queue[PRI_TS_COUNT];
    176  1.223        ad } runqueue_t;
    177   1.26       cgd 
    178  1.221        ad static u_int	sched_unsleep(struct lwp *, bool);
    179  1.188      yamt static void	sched_changepri(struct lwp *, pri_t);
    180  1.188      yamt static void	sched_lendpri(struct lwp *, pri_t);
    181  1.223        ad static void	*sched_getrq(runqueue_t *, const pri_t);
    182  1.223        ad #ifdef MULTIPROCESSOR
    183  1.223        ad static lwp_t	*sched_catchlwp(void);
    184  1.223        ad static void	sched_balance(void *);
    185  1.223        ad #endif
    186  1.122   thorpej 
    187  1.174        ad syncobj_t sleep_syncobj = {
    188  1.174        ad 	SOBJ_SLEEPQ_SORTED,
    189  1.174        ad 	sleepq_unsleep,
    190  1.184      yamt 	sleepq_changepri,
    191  1.184      yamt 	sleepq_lendpri,
    192  1.184      yamt 	syncobj_noowner,
    193  1.174        ad };
    194  1.174        ad 
    195  1.174        ad syncobj_t sched_syncobj = {
    196  1.174        ad 	SOBJ_SLEEPQ_SORTED,
    197  1.174        ad 	sched_unsleep,
    198  1.184      yamt 	sched_changepri,
    199  1.184      yamt 	sched_lendpri,
    200  1.184      yamt 	syncobj_noowner,
    201  1.174        ad };
    202  1.122   thorpej 
    203  1.223        ad const int 	schedppq = 1;
    204  1.223        ad callout_t 	sched_pstats_ch;
    205  1.223        ad unsigned	sched_pstats_ticks;
    206  1.223        ad kcondvar_t	lbolt;			/* once a second sleep address */
    207  1.223        ad 
    208  1.223        ad /*
    209  1.231        ad  * Kernel preemption.
    210  1.231        ad  */
    211  1.231        ad #ifdef PREEMPTION
    212  1.231        ad int		sched_kpreempt_pri = PRI_USER_RT;
    213  1.231        ad 
    214  1.231        ad static struct evcnt kpreempt_ev_crit;
    215  1.231        ad static struct evcnt kpreempt_ev_klock;
    216  1.231        ad static struct evcnt kpreempt_ev_ipl;
    217  1.231        ad static struct evcnt kpreempt_ev_immed;
    218  1.231        ad #else
    219  1.231        ad int		sched_kpreempt_pri = INT_MAX;
    220  1.231        ad #endif
    221  1.231        ad int		sched_upreempt_pri = PRI_KERNEL;
    222  1.231        ad 
    223  1.231        ad /*
    224  1.223        ad  * Migration and balancing.
    225  1.223        ad  */
    226  1.223        ad static u_int	cacheht_time;		/* Cache hotness time */
    227  1.223        ad static u_int	min_catch;		/* Minimal LWP count for catching */
    228  1.223        ad static u_int	balance_period;		/* Balance period */
    229  1.223        ad static struct cpu_info *worker_ci;	/* Victim CPU */
    230  1.223        ad #ifdef MULTIPROCESSOR
    231  1.223        ad static struct callout balance_ch;	/* Callout of balancer */
    232  1.223        ad #endif
    233  1.223        ad 
    234   1.26       cgd /*
    235  1.174        ad  * During autoconfiguration or after a panic, a sleep will simply lower the
    236  1.174        ad  * priority briefly to allow interrupts, then return.  The priority to be
    237  1.174        ad  * used (safepri) is machine-dependent, thus this value is initialized and
    238  1.174        ad  * maintained in the machine-dependent layers.  This priority will typically
    239  1.174        ad  * be 0, or the lowest priority that is safe for use on the interrupt stack;
    240  1.174        ad  * it can be made higher to block network software interrupts after panics.
    241   1.26       cgd  */
    242  1.174        ad int	safepri;
    243   1.26       cgd 
    244   1.26       cgd /*
    245  1.174        ad  * OBSOLETE INTERFACE
    246  1.174        ad  *
    247   1.26       cgd  * General sleep call.  Suspends the current process until a wakeup is
    248   1.26       cgd  * performed on the specified identifier.  The process will then be made
    249  1.174        ad  * runnable with the specified priority.  Sleeps at most timo/hz seconds (0
    250  1.174        ad  * means no timeout).  If pri includes PCATCH flag, signals are checked
    251   1.26       cgd  * before and after sleeping, else signals are not checked.  Returns 0 if
    252   1.26       cgd  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
    253   1.26       cgd  * signal needs to be delivered, ERESTART is returned if the current system
    254   1.26       cgd  * call should be restarted if possible, and EINTR is returned if the system
    255   1.26       cgd  * call should be interrupted by the signal (return EINTR).
    256   1.77   thorpej  *
    257  1.174        ad  * The interlock is held until we are on a sleep queue. The interlock will
    258  1.174        ad  * be locked before returning back to the caller unless the PNORELOCK flag
    259  1.174        ad  * is specified, in which case the interlock will always be unlocked upon
    260  1.174        ad  * return.
    261   1.26       cgd  */
    262   1.26       cgd int
    263  1.185      yamt ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
    264  1.174        ad 	volatile struct simplelock *interlock)
    265   1.26       cgd {
    266  1.122   thorpej 	struct lwp *l = curlwp;
    267  1.174        ad 	sleepq_t *sq;
    268  1.188      yamt 	int error;
    269   1.26       cgd 
    270  1.204        ad 	KASSERT((l->l_pflag & LP_INTR) == 0);
    271  1.204        ad 
    272  1.174        ad 	if (sleepq_dontsleep(l)) {
    273  1.174        ad 		(void)sleepq_abort(NULL, 0);
    274  1.174        ad 		if ((priority & PNORELOCK) != 0)
    275   1.77   thorpej 			simple_unlock(interlock);
    276  1.174        ad 		return 0;
    277   1.26       cgd 	}
    278   1.78  sommerfe 
    279  1.204        ad 	l->l_kpriority = true;
    280  1.174        ad 	sq = sleeptab_lookup(&sleeptab, ident);
    281  1.174        ad 	sleepq_enter(sq, l);
    282  1.204        ad 	sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
    283   1.42       cgd 
    284  1.174        ad 	if (interlock != NULL) {
    285  1.204        ad 		KASSERT(simple_lock_held(interlock));
    286  1.174        ad 		simple_unlock(interlock);
    287  1.150       chs 	}
    288  1.150       chs 
    289  1.188      yamt 	error = sleepq_block(timo, priority & PCATCH);
    290  1.126        pk 
    291  1.174        ad 	if (interlock != NULL && (priority & PNORELOCK) == 0)
    292  1.126        pk 		simple_lock(interlock);
    293  1.174        ad 
    294  1.174        ad 	return error;
    295   1.26       cgd }
    296   1.26       cgd 
    297  1.187        ad int
    298  1.187        ad mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
    299  1.187        ad 	kmutex_t *mtx)
    300  1.187        ad {
    301  1.187        ad 	struct lwp *l = curlwp;
    302  1.187        ad 	sleepq_t *sq;
    303  1.188      yamt 	int error;
    304  1.187        ad 
    305  1.204        ad 	KASSERT((l->l_pflag & LP_INTR) == 0);
    306  1.204        ad 
    307  1.187        ad 	if (sleepq_dontsleep(l)) {
    308  1.187        ad 		(void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
    309  1.187        ad 		return 0;
    310  1.187        ad 	}
    311  1.187        ad 
    312  1.204        ad 	l->l_kpriority = true;
    313  1.187        ad 	sq = sleeptab_lookup(&sleeptab, ident);
    314  1.187        ad 	sleepq_enter(sq, l);
    315  1.204        ad 	sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
    316  1.187        ad 	mutex_exit(mtx);
    317  1.188      yamt 	error = sleepq_block(timo, priority & PCATCH);
    318  1.187        ad 
    319  1.187        ad 	if ((priority & PNORELOCK) == 0)
    320  1.187        ad 		mutex_enter(mtx);
    321  1.187        ad 
    322  1.187        ad 	return error;
    323  1.187        ad }
    324  1.187        ad 
    325   1.26       cgd /*
    326  1.174        ad  * General sleep call for situations where a wake-up is not expected.
    327   1.26       cgd  */
    328  1.174        ad int
    329  1.182   thorpej kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
    330   1.26       cgd {
    331  1.174        ad 	struct lwp *l = curlwp;
    332  1.174        ad 	sleepq_t *sq;
    333  1.174        ad 	int error;
    334   1.26       cgd 
    335  1.174        ad 	if (sleepq_dontsleep(l))
    336  1.174        ad 		return sleepq_abort(NULL, 0);
    337   1.26       cgd 
    338  1.174        ad 	if (mtx != NULL)
    339  1.174        ad 		mutex_exit(mtx);
    340  1.204        ad 	l->l_kpriority = true;
    341  1.174        ad 	sq = sleeptab_lookup(&sleeptab, l);
    342  1.174        ad 	sleepq_enter(sq, l);
    343  1.204        ad 	sleepq_enqueue(sq, l, wmesg, &sleep_syncobj);
    344  1.188      yamt 	error = sleepq_block(timo, intr);
    345  1.174        ad 	if (mtx != NULL)
    346  1.174        ad 		mutex_enter(mtx);
    347   1.83   thorpej 
    348  1.174        ad 	return error;
    349  1.139        cl }
    350  1.139        cl 
    351   1.26       cgd /*
    352  1.174        ad  * OBSOLETE INTERFACE
    353  1.174        ad  *
    354   1.26       cgd  * Make all processes sleeping on the specified identifier runnable.
    355   1.26       cgd  */
    356   1.26       cgd void
    357  1.174        ad wakeup(wchan_t ident)
    358   1.26       cgd {
    359  1.174        ad 	sleepq_t *sq;
    360   1.83   thorpej 
    361  1.174        ad 	if (cold)
    362  1.174        ad 		return;
    363   1.83   thorpej 
    364  1.174        ad 	sq = sleeptab_lookup(&sleeptab, ident);
    365  1.174        ad 	sleepq_wake(sq, ident, (u_int)-1);
    366   1.63   thorpej }
    367   1.63   thorpej 
    368   1.63   thorpej /*
    369  1.174        ad  * OBSOLETE INTERFACE
    370  1.174        ad  *
    371   1.63   thorpej  * Make the highest priority process first in line on the specified
    372   1.63   thorpej  * identifier runnable.
    373   1.63   thorpej  */
    374  1.174        ad void
    375  1.174        ad wakeup_one(wchan_t ident)
    376   1.63   thorpej {
    377  1.174        ad 	sleepq_t *sq;
    378   1.63   thorpej 
    379  1.174        ad 	if (cold)
    380  1.174        ad 		return;
    381  1.188      yamt 
    382  1.174        ad 	sq = sleeptab_lookup(&sleeptab, ident);
    383  1.174        ad 	sleepq_wake(sq, ident, 1);
    384  1.174        ad }
    385   1.63   thorpej 
    386  1.117  gmcgarry 
    387  1.117  gmcgarry /*
    388  1.117  gmcgarry  * General yield call.  Puts the current process back on its run queue and
    389  1.117  gmcgarry  * performs a voluntary context switch.  Should only be called when the
    390  1.198        ad  * current process explicitly requests it (eg sched_yield(2)).
    391  1.117  gmcgarry  */
    392  1.117  gmcgarry void
    393  1.117  gmcgarry yield(void)
    394  1.117  gmcgarry {
    395  1.122   thorpej 	struct lwp *l = curlwp;
    396  1.117  gmcgarry 
    397  1.174        ad 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
    398  1.174        ad 	lwp_lock(l);
    399  1.217        ad 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
    400  1.188      yamt 	KASSERT(l->l_stat == LSONPROC);
    401  1.204        ad 	l->l_kpriority = false;
    402  1.188      yamt 	(void)mi_switch(l);
    403  1.174        ad 	KERNEL_LOCK(l->l_biglocks, l);
    404   1.69   thorpej }
    405   1.69   thorpej 
    406   1.69   thorpej /*
    407   1.69   thorpej  * General preemption call.  Puts the current process back on its run queue
    408  1.156    rpaulo  * and performs an involuntary context switch.
    409   1.69   thorpej  */
    410   1.69   thorpej void
    411  1.174        ad preempt(void)
    412   1.69   thorpej {
    413  1.122   thorpej 	struct lwp *l = curlwp;
    414   1.69   thorpej 
    415  1.174        ad 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
    416  1.174        ad 	lwp_lock(l);
    417  1.217        ad 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
    418  1.188      yamt 	KASSERT(l->l_stat == LSONPROC);
    419  1.204        ad 	l->l_kpriority = false;
    420  1.174        ad 	l->l_nivcsw++;
    421  1.188      yamt 	(void)mi_switch(l);
    422  1.174        ad 	KERNEL_LOCK(l->l_biglocks, l);
    423   1.69   thorpej }
    424   1.69   thorpej 
    425  1.231        ad #ifdef PREEMPTION
    426  1.231        ad /* XXX Yuck, for lockstat. */
    427  1.231        ad static char	in_critical_section;
    428  1.231        ad static char	kernel_lock_held;
    429  1.231        ad static char	spl_raised;
    430  1.231        ad static char	is_softint;
    431  1.231        ad 
    432  1.231        ad /*
    433  1.231        ad  * Handle a request made by another agent to preempt the current LWP
    434  1.231        ad  * in-kernel.  Usually called when l_dopreempt may be non-zero.
    435  1.231        ad  */
    436  1.231        ad bool
    437  1.231        ad kpreempt(uintptr_t where)
    438  1.231        ad {
    439  1.231        ad 	uintptr_t failed;
    440  1.231        ad 	lwp_t *l;
    441  1.231        ad 	int s, dop;
    442  1.231        ad 
    443  1.231        ad 	l = curlwp;
    444  1.231        ad 	failed = 0;
    445  1.231        ad 	while ((dop = l->l_dopreempt) != 0) {
    446  1.231        ad 		if (l->l_stat != LSONPROC) {
    447  1.231        ad 			/*
    448  1.231        ad 			 * About to block (or die), let it happen.
    449  1.231        ad 			 * Doesn't really count as "preemption has
    450  1.231        ad 			 * been blocked", since we're going to
    451  1.231        ad 			 * context switch.
    452  1.231        ad 			 */
    453  1.231        ad 			l->l_dopreempt = 0;
    454  1.231        ad 			return true;
    455  1.231        ad 		}
    456  1.231        ad 		if (__predict_false((l->l_flag & LW_IDLE) != 0)) {
    457  1.231        ad 			/* Can't preempt idle loop, don't count as failure. */
    458  1.231        ad 		    	l->l_dopreempt = 0;
    459  1.231        ad 		    	return true;
    460  1.231        ad 		}
    461  1.231        ad 		if (__predict_false(l->l_nopreempt != 0)) {
    462  1.231        ad 			/* LWP holds preemption disabled, explicitly. */
    463  1.231        ad 			if ((dop & DOPREEMPT_COUNTED) == 0) {
    464  1.231        ad 				atomic_inc_64(&kpreempt_ev_crit.ev_count);
    465  1.231        ad 			}
    466  1.231        ad 			failed = (uintptr_t)&in_critical_section;
    467  1.231        ad 			break;
    468  1.231        ad 		}
    469  1.231        ad 		if (__predict_false((l->l_pflag & LP_INTR) != 0)) {
    470  1.231        ad 		    	/* Can't preempt soft interrupts yet. */
    471  1.231        ad 		    	l->l_dopreempt = 0;
    472  1.231        ad 		    	failed = (uintptr_t)&is_softint;
    473  1.231        ad 		    	break;
    474  1.231        ad 		}
    475  1.231        ad 		s = splsched();
    476  1.231        ad 		if (__predict_false(l->l_blcnt != 0 ||
    477  1.231        ad 		    curcpu()->ci_biglock_wanted != NULL)) {
    478  1.231        ad 			/* Hold or want kernel_lock, code is not MT safe. */
    479  1.231        ad 			splx(s);
    480  1.231        ad 			if ((dop & DOPREEMPT_COUNTED) == 0) {
    481  1.231        ad 				atomic_inc_64(&kpreempt_ev_klock.ev_count);
    482  1.231        ad 			}
    483  1.231        ad 			failed = (uintptr_t)&kernel_lock_held;
    484  1.231        ad 			break;
    485  1.231        ad 		}
    486  1.231        ad 		if (__predict_false(!cpu_kpreempt_enter(where, s))) {
    487  1.231        ad 			/*
    488  1.231        ad 			 * It may be that the IPL is too high.
    489  1.231        ad 			 * kpreempt_enter() can schedule an
    490  1.231        ad 			 * interrupt to retry later.
    491  1.231        ad 			 */
    492  1.231        ad 			splx(s);
    493  1.231        ad 			if ((dop & DOPREEMPT_COUNTED) == 0) {
    494  1.231        ad 				atomic_inc_64(&kpreempt_ev_ipl.ev_count);
    495  1.231        ad 			}
    496  1.231        ad 			failed = (uintptr_t)&spl_raised;
    497  1.231        ad 			break;
    498  1.231        ad 		}
    499  1.231        ad 		/* Do it! */
    500  1.231        ad 		if (__predict_true((dop & DOPREEMPT_COUNTED) == 0)) {
    501  1.231        ad 			atomic_inc_64(&kpreempt_ev_immed.ev_count);
    502  1.231        ad 		}
    503  1.231        ad 		lwp_lock(l);
    504  1.231        ad 		mi_switch(l);
    505  1.231        ad 		l->l_nopreempt++;
    506  1.231        ad 		splx(s);
    507  1.231        ad 
    508  1.231        ad 		/* Take care of any MD cleanup. */
    509  1.231        ad 		cpu_kpreempt_exit(where);
    510  1.231        ad 		l->l_nopreempt--;
    511  1.231        ad 	}
    512  1.231        ad 
    513  1.231        ad 	/* Record preemption failure for reporting via lockstat. */
    514  1.231        ad 	if (__predict_false(failed)) {
    515  1.231        ad 		atomic_or_uint(&l->l_dopreempt, DOPREEMPT_COUNTED);
    516  1.231        ad 		int lsflag = 0;
    517  1.231        ad 		LOCKSTAT_ENTER(lsflag);
    518  1.231        ad 		/* Might recurse, make it atomic. */
    519  1.231        ad 		if (__predict_false(lsflag)) {
    520  1.231        ad 			if (where == 0) {
    521  1.231        ad 				where = (uintptr_t)__builtin_return_address(0);
    522  1.231        ad 			}
    523  1.231        ad 			if (atomic_cas_ptr_ni((void *)&l->l_pfailaddr,
    524  1.231        ad 			    NULL, (void *)where) == NULL) {
    525  1.231        ad 				LOCKSTAT_START_TIMER(lsflag, l->l_pfailtime);
    526  1.231        ad 				l->l_pfaillock = failed;
    527  1.231        ad 			}
    528  1.231        ad 		}
    529  1.231        ad 		LOCKSTAT_EXIT(lsflag);
    530  1.231        ad 	}
    531  1.231        ad 
    532  1.231        ad 	return failed;
    533  1.231        ad }
    534  1.231        ad 
    535   1.69   thorpej /*
    536  1.231        ad  * Return true if preemption is explicitly disabled.
    537  1.230        ad  */
    538  1.231        ad bool
    539  1.231        ad kpreempt_disabled(void)
    540  1.231        ad {
    541  1.231        ad 	lwp_t *l;
    542  1.231        ad 
    543  1.231        ad 	l = curlwp;
    544  1.231        ad 
    545  1.231        ad 	return l->l_nopreempt != 0 || l->l_stat == LSZOMB ||
    546  1.231        ad 	    (l->l_flag & LW_IDLE) != 0 || cpu_kpreempt_disabled();
    547  1.231        ad }
    548  1.231        ad #else
    549  1.231        ad bool
    550  1.231        ad kpreempt(uintptr_t where)
    551  1.231        ad {
    552  1.231        ad 
    553  1.231        ad 	panic("kpreempt");
    554  1.231        ad 	return true;
    555  1.231        ad }
    556  1.231        ad 
    557  1.231        ad bool
    558  1.231        ad kpreempt_disabled(void)
    559  1.230        ad {
    560  1.230        ad 
    561  1.231        ad 	return true;
    562  1.230        ad }
    563  1.231        ad #endif
    564  1.230        ad 
    565  1.230        ad /*
    566  1.231        ad  * Disable kernel preemption.
    567  1.230        ad  */
    568  1.230        ad void
    569  1.231        ad kpreempt_disable(void)
    570  1.230        ad {
    571  1.230        ad 
    572  1.231        ad 	KPREEMPT_DISABLE(curlwp);
    573  1.230        ad }
    574  1.230        ad 
    575  1.230        ad /*
    576  1.231        ad  * Reenable kernel preemption.
    577  1.230        ad  */
    578  1.231        ad void
    579  1.231        ad kpreempt_enable(void)
    580  1.230        ad {
    581  1.230        ad 
    582  1.231        ad 	KPREEMPT_ENABLE(curlwp);
    583  1.230        ad }
    584  1.230        ad 
    585  1.230        ad /*
    586  1.188      yamt  * Compute the amount of time during which the current lwp was running.
    587  1.130   nathanw  *
    588  1.188      yamt  * - update l_rtime unless it's an idle lwp.
    589  1.188      yamt  */
    590  1.188      yamt 
    591  1.199        ad void
    592  1.212      yamt updatertime(lwp_t *l, const struct bintime *now)
    593  1.188      yamt {
    594  1.188      yamt 
    595  1.199        ad 	if ((l->l_flag & LW_IDLE) != 0)
    596  1.188      yamt 		return;
    597  1.188      yamt 
    598  1.212      yamt 	/* rtime += now - stime */
    599  1.212      yamt 	bintime_add(&l->l_rtime, now);
    600  1.212      yamt 	bintime_sub(&l->l_rtime, &l->l_stime);
    601  1.188      yamt }
    602  1.188      yamt 
    603  1.188      yamt /*
    604  1.188      yamt  * The machine independent parts of context switch.
    605  1.188      yamt  *
    606  1.188      yamt  * Returns 1 if another LWP was actually run.
    607   1.26       cgd  */
    608  1.122   thorpej int
    609  1.199        ad mi_switch(lwp_t *l)
    610   1.26       cgd {
    611  1.216     rmind 	struct cpu_info *ci, *tci = NULL;
    612   1.76   thorpej 	struct schedstate_percpu *spc;
    613  1.188      yamt 	struct lwp *newl;
    614  1.174        ad 	int retval, oldspl;
    615  1.212      yamt 	struct bintime bt;
    616  1.199        ad 	bool returning;
    617   1.26       cgd 
    618  1.188      yamt 	KASSERT(lwp_locked(l, NULL));
    619  1.231        ad 	KASSERT(kpreempt_disabled());
    620  1.188      yamt 	LOCKDEBUG_BARRIER(l->l_mutex, 1);
    621  1.174        ad 
    622  1.174        ad #ifdef KSTACK_CHECK_MAGIC
    623  1.174        ad 	kstack_check_magic(l);
    624  1.174        ad #endif
    625   1.83   thorpej 
    626  1.212      yamt 	binuptime(&bt);
    627  1.199        ad 
    628  1.231        ad 	KASSERT(l->l_cpu == curcpu());
    629  1.196        ad 	ci = l->l_cpu;
    630  1.196        ad 	spc = &ci->ci_schedstate;
    631  1.199        ad 	returning = false;
    632  1.190        ad 	newl = NULL;
    633  1.190        ad 
    634  1.199        ad 	/*
    635  1.199        ad 	 * If we have been asked to switch to a specific LWP, then there
    636  1.199        ad 	 * is no need to inspect the run queues.  If a soft interrupt is
    637  1.199        ad 	 * blocking, then return to the interrupted thread without adjusting
    638  1.199        ad 	 * VM context or its start time: neither have been changed in order
    639  1.199        ad 	 * to take the interrupt.
    640  1.199        ad 	 */
    641  1.190        ad 	if (l->l_switchto != NULL) {
    642  1.204        ad 		if ((l->l_pflag & LP_INTR) != 0) {
    643  1.199        ad 			returning = true;
    644  1.199        ad 			softint_block(l);
    645  1.199        ad 			if ((l->l_flag & LW_TIMEINTR) != 0)
    646  1.212      yamt 				updatertime(l, &bt);
    647  1.199        ad 		}
    648  1.190        ad 		newl = l->l_switchto;
    649  1.190        ad 		l->l_switchto = NULL;
    650  1.190        ad 	}
    651  1.204        ad #ifndef __HAVE_FAST_SOFTINTS
    652  1.204        ad 	else if (ci->ci_data.cpu_softints != 0) {
    653  1.204        ad 		/* There are pending soft interrupts, so pick one. */
    654  1.204        ad 		newl = softint_picklwp();
    655  1.204        ad 		newl->l_stat = LSONPROC;
    656  1.204        ad 		newl->l_flag |= LW_RUNNING;
    657  1.204        ad 	}
    658  1.204        ad #endif	/* !__HAVE_FAST_SOFTINTS */
    659  1.190        ad 
    660  1.180       dsl 	/* Count time spent in current system call */
    661  1.199        ad 	if (!returning) {
    662  1.199        ad 		SYSCALL_TIME_SLEEP(l);
    663  1.180       dsl 
    664  1.199        ad 		/*
    665  1.199        ad 		 * XXXSMP If we are using h/w performance counters,
    666  1.199        ad 		 * save context.
    667  1.199        ad 		 */
    668  1.174        ad #if PERFCTRS
    669  1.199        ad 		if (PMC_ENABLED(l->l_proc)) {
    670  1.199        ad 			pmc_save_context(l->l_proc);
    671  1.199        ad 		}
    672  1.199        ad #endif
    673  1.212      yamt 		updatertime(l, &bt);
    674  1.174        ad 	}
    675  1.113  gmcgarry 
    676  1.113  gmcgarry 	/*
    677  1.174        ad 	 * If on the CPU and we have gotten this far, then we must yield.
    678  1.113  gmcgarry 	 */
    679  1.174        ad 	KASSERT(l->l_stat != LSRUN);
    680  1.216     rmind 	if (l->l_stat == LSONPROC && (l->l_target_cpu || l != newl)) {
    681  1.217        ad 		KASSERT(lwp_locked(l, spc->spc_lwplock));
    682  1.216     rmind 
    683  1.220     rmind 		if (l->l_target_cpu == l->l_cpu) {
    684  1.220     rmind 			l->l_target_cpu = NULL;
    685  1.220     rmind 		} else {
    686  1.220     rmind 			tci = l->l_target_cpu;
    687  1.220     rmind 		}
    688  1.220     rmind 
    689  1.216     rmind 		if (__predict_false(tci != NULL)) {
    690  1.216     rmind 			/* Double-lock the runqueues */
    691  1.216     rmind 			spc_dlock(ci, tci);
    692  1.216     rmind 		} else {
    693  1.216     rmind 			/* Lock the runqueue */
    694  1.216     rmind 			spc_lock(ci);
    695  1.216     rmind 		}
    696  1.216     rmind 
    697  1.188      yamt 		if ((l->l_flag & LW_IDLE) == 0) {
    698  1.188      yamt 			l->l_stat = LSRUN;
    699  1.216     rmind 			if (__predict_false(tci != NULL)) {
    700  1.216     rmind 				/*
    701  1.216     rmind 				 * Set the new CPU, lock and unset the
    702  1.216     rmind 				 * l_target_cpu - thread will be enqueued
    703  1.216     rmind 				 * to the runqueue of target CPU.
    704  1.216     rmind 				 */
    705  1.216     rmind 				l->l_cpu = tci;
    706  1.216     rmind 				lwp_setlock(l, tci->ci_schedstate.spc_mutex);
    707  1.216     rmind 				l->l_target_cpu = NULL;
    708  1.216     rmind 			} else {
    709  1.216     rmind 				lwp_setlock(l, spc->spc_mutex);
    710  1.216     rmind 			}
    711  1.188      yamt 			sched_enqueue(l, true);
    712  1.216     rmind 		} else {
    713  1.216     rmind 			KASSERT(tci == NULL);
    714  1.188      yamt 			l->l_stat = LSIDL;
    715  1.216     rmind 		}
    716  1.216     rmind 	} else {
    717  1.216     rmind 		/* Lock the runqueue */
    718  1.216     rmind 		spc_lock(ci);
    719  1.174        ad 	}
    720  1.174        ad 
    721  1.174        ad 	/*
    722  1.201     rmind 	 * Let sched_nextlwp() select the LWP to run the CPU next.
    723  1.209        ad 	 * If no LWP is runnable, select the idle LWP.
    724  1.209        ad 	 *
    725  1.209        ad 	 * Note that spc_lwplock might not necessary be held, and
    726  1.209        ad 	 * new thread would be unlocked after setting the LWP-lock.
    727  1.174        ad 	 */
    728  1.190        ad 	if (newl == NULL) {
    729  1.190        ad 		newl = sched_nextlwp();
    730  1.190        ad 		if (newl != NULL) {
    731  1.190        ad 			sched_dequeue(newl);
    732  1.190        ad 			KASSERT(lwp_locked(newl, spc->spc_mutex));
    733  1.190        ad 			newl->l_stat = LSONPROC;
    734  1.196        ad 			newl->l_cpu = ci;
    735  1.190        ad 			newl->l_flag |= LW_RUNNING;
    736  1.217        ad 			lwp_setlock(newl, spc->spc_lwplock);
    737  1.190        ad 		} else {
    738  1.196        ad 			newl = ci->ci_data.cpu_idlelwp;
    739  1.190        ad 			newl->l_stat = LSONPROC;
    740  1.190        ad 			newl->l_flag |= LW_RUNNING;
    741  1.190        ad 		}
    742  1.204        ad 		/*
    743  1.204        ad 		 * Only clear want_resched if there are no
    744  1.204        ad 		 * pending (slow) software interrupts.
    745  1.204        ad 		 */
    746  1.204        ad 		ci->ci_want_resched = ci->ci_data.cpu_softints;
    747  1.199        ad 		spc->spc_flags &= ~SPCF_SWITCHCLEAR;
    748  1.204        ad 		spc->spc_curpriority = lwp_eprio(newl);
    749  1.199        ad 	}
    750  1.199        ad 
    751  1.204        ad 	/* Items that must be updated with the CPU locked. */
    752  1.199        ad 	if (!returning) {
    753  1.204        ad 		/* Update the new LWP's start time. */
    754  1.212      yamt 		newl->l_stime = bt;
    755  1.204        ad 
    756  1.199        ad 		/*
    757  1.204        ad 		 * ci_curlwp changes when a fast soft interrupt occurs.
    758  1.204        ad 		 * We use cpu_onproc to keep track of which kernel or
    759  1.204        ad 		 * user thread is running 'underneath' the software
    760  1.204        ad 		 * interrupt.  This is important for time accounting,
    761  1.204        ad 		 * itimers and forcing user threads to preempt (aston).
    762  1.199        ad 		 */
    763  1.204        ad 		ci->ci_data.cpu_onproc = newl;
    764  1.188      yamt 	}
    765  1.188      yamt 
    766  1.231        ad 	/* Kernel preemption related tasks. */
    767  1.231        ad 	l->l_dopreempt = 0;
    768  1.231        ad 	if (__predict_false(l->l_pfailaddr != 0)) {
    769  1.231        ad 		LOCKSTAT_FLAG(lsflag);
    770  1.231        ad 		LOCKSTAT_ENTER(lsflag);
    771  1.231        ad 		LOCKSTAT_STOP_TIMER(lsflag, l->l_pfailtime);
    772  1.231        ad 		LOCKSTAT_EVENT_RA(lsflag, l->l_pfaillock, LB_NOPREEMPT|LB_SPIN,
    773  1.231        ad 		    1, l->l_pfailtime, l->l_pfailaddr);
    774  1.231        ad 		LOCKSTAT_EXIT(lsflag);
    775  1.231        ad 		l->l_pfailtime = 0;
    776  1.231        ad 		l->l_pfaillock = 0;
    777  1.231        ad 		l->l_pfailaddr = 0;
    778  1.231        ad 	}
    779  1.231        ad 
    780  1.188      yamt 	if (l != newl) {
    781  1.188      yamt 		struct lwp *prevlwp;
    782  1.174        ad 
    783  1.209        ad 		/* Release all locks, but leave the current LWP locked */
    784  1.216     rmind 		if (l->l_mutex == l->l_cpu->ci_schedstate.spc_mutex) {
    785  1.216     rmind 			/*
    786  1.216     rmind 			 * In case of migration, drop the local runqueue
    787  1.216     rmind 			 * lock, thread is on other runqueue now.
    788  1.216     rmind 			 */
    789  1.216     rmind 			if (__predict_false(tci != NULL))
    790  1.216     rmind 				spc_unlock(ci);
    791  1.209        ad 			/*
    792  1.209        ad 			 * Drop spc_lwplock, if the current LWP has been moved
    793  1.209        ad 			 * to the run queue (it is now locked by spc_mutex).
    794  1.209        ad 			 */
    795  1.217        ad 			mutex_spin_exit(spc->spc_lwplock);
    796  1.188      yamt 		} else {
    797  1.209        ad 			/*
    798  1.209        ad 			 * Otherwise, drop the spc_mutex, we are done with the
    799  1.209        ad 			 * run queues.
    800  1.209        ad 			 */
    801  1.188      yamt 			mutex_spin_exit(spc->spc_mutex);
    802  1.216     rmind 			KASSERT(tci == NULL);
    803  1.188      yamt 		}
    804  1.188      yamt 
    805  1.209        ad 		/*
    806  1.209        ad 		 * Mark that context switch is going to be perfomed
    807  1.209        ad 		 * for this LWP, to protect it from being switched
    808  1.209        ad 		 * to on another CPU.
    809  1.209        ad 		 */
    810  1.209        ad 		KASSERT(l->l_ctxswtch == 0);
    811  1.209        ad 		l->l_ctxswtch = 1;
    812  1.209        ad 		l->l_ncsw++;
    813  1.209        ad 		l->l_flag &= ~LW_RUNNING;
    814  1.209        ad 
    815  1.209        ad 		/*
    816  1.209        ad 		 * Increase the count of spin-mutexes before the release
    817  1.209        ad 		 * of the last lock - we must remain at IPL_SCHED during
    818  1.209        ad 		 * the context switch.
    819  1.209        ad 		 */
    820  1.209        ad 		oldspl = MUTEX_SPIN_OLDSPL(ci);
    821  1.209        ad 		ci->ci_mtx_count--;
    822  1.209        ad 		lwp_unlock(l);
    823  1.209        ad 
    824  1.218        ad 		/* Count the context switch on this CPU. */
    825  1.218        ad 		ci->ci_data.cpu_nswtch++;
    826  1.188      yamt 
    827  1.209        ad 		/* Update status for lwpctl, if present. */
    828  1.209        ad 		if (l->l_lwpctl != NULL)
    829  1.209        ad 			l->l_lwpctl->lc_curcpu = LWPCTL_CPU_NONE;
    830  1.209        ad 
    831  1.199        ad 		/*
    832  1.199        ad 		 * Save old VM context, unless a soft interrupt
    833  1.199        ad 		 * handler is blocking.
    834  1.199        ad 		 */
    835  1.199        ad 		if (!returning)
    836  1.199        ad 			pmap_deactivate(l);
    837  1.188      yamt 
    838  1.209        ad 		/*
    839  1.209        ad 		 * We may need to spin-wait for if 'newl' is still
    840  1.209        ad 		 * context switching on another CPU.
    841  1.209        ad 		 */
    842  1.209        ad 		if (newl->l_ctxswtch != 0) {
    843  1.209        ad 			u_int count;
    844  1.209        ad 			count = SPINLOCK_BACKOFF_MIN;
    845  1.209        ad 			while (newl->l_ctxswtch)
    846  1.209        ad 				SPINLOCK_BACKOFF(count);
    847  1.209        ad 		}
    848  1.207        ad 
    849  1.188      yamt 		/* Switch to the new LWP.. */
    850  1.204        ad 		prevlwp = cpu_switchto(l, newl, returning);
    851  1.207        ad 		ci = curcpu();
    852  1.207        ad 
    853  1.188      yamt 		/*
    854  1.209        ad 		 * Switched away - we have new curlwp.
    855  1.209        ad 		 * Restore VM context and IPL.
    856  1.188      yamt 		 */
    857  1.209        ad 		pmap_activate(l);
    858  1.188      yamt 		if (prevlwp != NULL) {
    859  1.209        ad 			/* Normalize the count of the spin-mutexes */
    860  1.209        ad 			ci->ci_mtx_count++;
    861  1.209        ad 			/* Unmark the state of context switch */
    862  1.209        ad 			membar_exit();
    863  1.209        ad 			prevlwp->l_ctxswtch = 0;
    864  1.188      yamt 		}
    865  1.209        ad 
    866  1.209        ad 		/* Update status for lwpctl, if present. */
    867  1.219        ad 		if (l->l_lwpctl != NULL) {
    868  1.209        ad 			l->l_lwpctl->lc_curcpu = (int)cpu_index(ci);
    869  1.219        ad 			l->l_lwpctl->lc_pctr++;
    870  1.219        ad 		}
    871  1.174        ad 
    872  1.231        ad 		KASSERT(l->l_cpu == ci);
    873  1.231        ad 		splx(oldspl);
    874  1.188      yamt 		retval = 1;
    875  1.188      yamt 	} else {
    876  1.188      yamt 		/* Nothing to do - just unlock and return. */
    877  1.216     rmind 		KASSERT(tci == NULL);
    878  1.216     rmind 		spc_unlock(ci);
    879  1.188      yamt 		lwp_unlock(l);
    880  1.122   thorpej 		retval = 0;
    881  1.122   thorpej 	}
    882  1.110    briggs 
    883  1.188      yamt 	KASSERT(l == curlwp);
    884  1.188      yamt 	KASSERT(l->l_stat == LSONPROC);
    885  1.188      yamt 
    886  1.110    briggs 	/*
    887  1.174        ad 	 * XXXSMP If we are using h/w performance counters, restore context.
    888  1.231        ad 	 * XXXSMP preemption problem.
    889   1.26       cgd 	 */
    890  1.114  gmcgarry #if PERFCTRS
    891  1.175  christos 	if (PMC_ENABLED(l->l_proc)) {
    892  1.175  christos 		pmc_restore_context(l->l_proc);
    893  1.166  christos 	}
    894  1.114  gmcgarry #endif
    895  1.180       dsl 	SYSCALL_TIME_WAKEUP(l);
    896  1.188      yamt 	LOCKDEBUG_BARRIER(NULL, 1);
    897  1.169      yamt 
    898  1.122   thorpej 	return retval;
    899   1.26       cgd }
    900   1.26       cgd 
    901   1.26       cgd /*
    902  1.174        ad  * Change process state to be runnable, placing it on the run queue if it is
    903  1.174        ad  * in memory, and awakening the swapper if it isn't in memory.
    904  1.174        ad  *
    905  1.174        ad  * Call with the process and LWP locked.  Will return with the LWP unlocked.
    906   1.26       cgd  */
    907   1.26       cgd void
    908  1.122   thorpej setrunnable(struct lwp *l)
    909   1.26       cgd {
    910  1.122   thorpej 	struct proc *p = l->l_proc;
    911  1.205        ad 	struct cpu_info *ci;
    912  1.174        ad 	sigset_t *ss;
    913   1.26       cgd 
    914  1.188      yamt 	KASSERT((l->l_flag & LW_IDLE) == 0);
    915  1.229        ad 	KASSERT(mutex_owned(p->p_lock));
    916  1.183        ad 	KASSERT(lwp_locked(l, NULL));
    917  1.205        ad 	KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex);
    918   1.83   thorpej 
    919  1.122   thorpej 	switch (l->l_stat) {
    920  1.122   thorpej 	case LSSTOP:
    921   1.33   mycroft 		/*
    922   1.33   mycroft 		 * If we're being traced (possibly because someone attached us
    923   1.33   mycroft 		 * while we were stopped), check for a signal from the debugger.
    924   1.33   mycroft 		 */
    925  1.174        ad 		if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
    926  1.174        ad 			if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
    927  1.174        ad 				ss = &l->l_sigpend.sp_set;
    928  1.174        ad 			else
    929  1.174        ad 				ss = &p->p_sigpend.sp_set;
    930  1.174        ad 			sigaddset(ss, p->p_xstat);
    931  1.174        ad 			signotify(l);
    932   1.53   mycroft 		}
    933  1.174        ad 		p->p_nrlwps++;
    934   1.26       cgd 		break;
    935  1.174        ad 	case LSSUSPENDED:
    936  1.178     pavel 		l->l_flag &= ~LW_WSUSPEND;
    937  1.174        ad 		p->p_nrlwps++;
    938  1.192     rmind 		cv_broadcast(&p->p_lwpcv);
    939  1.122   thorpej 		break;
    940  1.174        ad 	case LSSLEEP:
    941  1.174        ad 		KASSERT(l->l_wchan != NULL);
    942   1.26       cgd 		break;
    943  1.174        ad 	default:
    944  1.174        ad 		panic("setrunnable: lwp %p state was %d", l, l->l_stat);
    945   1.26       cgd 	}
    946  1.139        cl 
    947  1.174        ad 	/*
    948  1.174        ad 	 * If the LWP was sleeping interruptably, then it's OK to start it
    949  1.174        ad 	 * again.  If not, mark it as still sleeping.
    950  1.174        ad 	 */
    951  1.174        ad 	if (l->l_wchan != NULL) {
    952  1.174        ad 		l->l_stat = LSSLEEP;
    953  1.183        ad 		/* lwp_unsleep() will release the lock. */
    954  1.221        ad 		lwp_unsleep(l, true);
    955  1.174        ad 		return;
    956  1.174        ad 	}
    957  1.139        cl 
    958  1.174        ad 	/*
    959  1.174        ad 	 * If the LWP is still on the CPU, mark it as LSONPROC.  It may be
    960  1.174        ad 	 * about to call mi_switch(), in which case it will yield.
    961  1.174        ad 	 */
    962  1.188      yamt 	if ((l->l_flag & LW_RUNNING) != 0) {
    963  1.174        ad 		l->l_stat = LSONPROC;
    964  1.174        ad 		l->l_slptime = 0;
    965  1.174        ad 		lwp_unlock(l);
    966  1.174        ad 		return;
    967  1.174        ad 	}
    968  1.122   thorpej 
    969  1.174        ad 	/*
    970  1.205        ad 	 * Look for a CPU to run.
    971  1.205        ad 	 * Set the LWP runnable.
    972  1.174        ad 	 */
    973  1.205        ad 	ci = sched_takecpu(l);
    974  1.205        ad 	l->l_cpu = ci;
    975  1.206        ad 	if (l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex) {
    976  1.206        ad 		lwp_unlock_to(l, ci->ci_schedstate.spc_mutex);
    977  1.206        ad 		lwp_lock(l);
    978  1.206        ad 	}
    979  1.188      yamt 	sched_setrunnable(l);
    980  1.174        ad 	l->l_stat = LSRUN;
    981  1.122   thorpej 	l->l_slptime = 0;
    982  1.174        ad 
    983  1.205        ad 	/*
    984  1.205        ad 	 * If thread is swapped out - wake the swapper to bring it back in.
    985  1.205        ad 	 * Otherwise, enter it into a run queue.
    986  1.205        ad 	 */
    987  1.178     pavel 	if (l->l_flag & LW_INMEM) {
    988  1.188      yamt 		sched_enqueue(l, false);
    989  1.188      yamt 		resched_cpu(l);
    990  1.174        ad 		lwp_unlock(l);
    991  1.174        ad 	} else {
    992  1.174        ad 		lwp_unlock(l);
    993  1.177        ad 		uvm_kick_scheduler();
    994  1.174        ad 	}
    995   1.26       cgd }
    996   1.26       cgd 
    997   1.26       cgd /*
    998  1.174        ad  * suspendsched:
    999  1.174        ad  *
   1000  1.174        ad  *	Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
   1001  1.174        ad  */
   1002   1.94    bouyer void
   1003  1.174        ad suspendsched(void)
   1004   1.94    bouyer {
   1005  1.174        ad 	CPU_INFO_ITERATOR cii;
   1006  1.174        ad 	struct cpu_info *ci;
   1007  1.122   thorpej 	struct lwp *l;
   1008  1.174        ad 	struct proc *p;
   1009   1.94    bouyer 
   1010   1.94    bouyer 	/*
   1011  1.174        ad 	 * We do this by process in order not to violate the locking rules.
   1012   1.94    bouyer 	 */
   1013  1.228        ad 	mutex_enter(proc_lock);
   1014  1.174        ad 	PROCLIST_FOREACH(p, &allproc) {
   1015  1.229        ad 		mutex_enter(p->p_lock);
   1016  1.174        ad 
   1017  1.178     pavel 		if ((p->p_flag & PK_SYSTEM) != 0) {
   1018  1.229        ad 			mutex_exit(p->p_lock);
   1019   1.94    bouyer 			continue;
   1020  1.174        ad 		}
   1021  1.174        ad 
   1022  1.174        ad 		p->p_stat = SSTOP;
   1023  1.174        ad 
   1024  1.174        ad 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1025  1.174        ad 			if (l == curlwp)
   1026  1.174        ad 				continue;
   1027  1.174        ad 
   1028  1.174        ad 			lwp_lock(l);
   1029  1.122   thorpej 
   1030   1.97     enami 			/*
   1031  1.174        ad 			 * Set L_WREBOOT so that the LWP will suspend itself
   1032  1.174        ad 			 * when it tries to return to user mode.  We want to
   1033  1.174        ad 			 * try and get to get as many LWPs as possible to
   1034  1.174        ad 			 * the user / kernel boundary, so that they will
   1035  1.174        ad 			 * release any locks that they hold.
   1036   1.97     enami 			 */
   1037  1.178     pavel 			l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
   1038  1.174        ad 
   1039  1.174        ad 			if (l->l_stat == LSSLEEP &&
   1040  1.178     pavel 			    (l->l_flag & LW_SINTR) != 0) {
   1041  1.174        ad 				/* setrunnable() will release the lock. */
   1042  1.174        ad 				setrunnable(l);
   1043  1.174        ad 				continue;
   1044  1.174        ad 			}
   1045  1.174        ad 
   1046  1.174        ad 			lwp_unlock(l);
   1047   1.94    bouyer 		}
   1048  1.174        ad 
   1049  1.229        ad 		mutex_exit(p->p_lock);
   1050   1.94    bouyer 	}
   1051  1.228        ad 	mutex_exit(proc_lock);
   1052  1.174        ad 
   1053  1.174        ad 	/*
   1054  1.174        ad 	 * Kick all CPUs to make them preempt any LWPs running in user mode.
   1055  1.174        ad 	 * They'll trap into the kernel and suspend themselves in userret().
   1056  1.174        ad 	 */
   1057  1.204        ad 	for (CPU_INFO_FOREACH(cii, ci)) {
   1058  1.204        ad 		spc_lock(ci);
   1059  1.204        ad 		cpu_need_resched(ci, RESCHED_IMMED);
   1060  1.204        ad 		spc_unlock(ci);
   1061  1.204        ad 	}
   1062  1.174        ad }
   1063  1.174        ad 
   1064  1.174        ad /*
   1065  1.174        ad  * sched_unsleep:
   1066  1.174        ad  *
   1067  1.174        ad  *	The is called when the LWP has not been awoken normally but instead
   1068  1.174        ad  *	interrupted: for example, if the sleep timed out.  Because of this,
   1069  1.174        ad  *	it's not a valid action for running or idle LWPs.
   1070  1.174        ad  */
   1071  1.221        ad static u_int
   1072  1.221        ad sched_unsleep(struct lwp *l, bool cleanup)
   1073  1.174        ad {
   1074  1.174        ad 
   1075  1.174        ad 	lwp_unlock(l);
   1076  1.174        ad 	panic("sched_unsleep");
   1077  1.174        ad }
   1078  1.174        ad 
   1079  1.204        ad void
   1080  1.188      yamt resched_cpu(struct lwp *l)
   1081  1.188      yamt {
   1082  1.188      yamt 	struct cpu_info *ci;
   1083  1.188      yamt 
   1084  1.188      yamt 	/*
   1085  1.188      yamt 	 * XXXSMP
   1086  1.188      yamt 	 * Since l->l_cpu persists across a context switch,
   1087  1.188      yamt 	 * this gives us *very weak* processor affinity, in
   1088  1.188      yamt 	 * that we notify the CPU on which the process last
   1089  1.188      yamt 	 * ran that it should try to switch.
   1090  1.188      yamt 	 *
   1091  1.188      yamt 	 * This does not guarantee that the process will run on
   1092  1.188      yamt 	 * that processor next, because another processor might
   1093  1.188      yamt 	 * grab it the next time it performs a context switch.
   1094  1.188      yamt 	 *
   1095  1.188      yamt 	 * This also does not handle the case where its last
   1096  1.188      yamt 	 * CPU is running a higher-priority process, but every
   1097  1.188      yamt 	 * other CPU is running a lower-priority process.  There
   1098  1.188      yamt 	 * are ways to handle this situation, but they're not
   1099  1.188      yamt 	 * currently very pretty, and we also need to weigh the
   1100  1.188      yamt 	 * cost of moving a process from one CPU to another.
   1101  1.188      yamt 	 */
   1102  1.204        ad 	ci = l->l_cpu;
   1103  1.204        ad 	if (lwp_eprio(l) > ci->ci_schedstate.spc_curpriority)
   1104  1.188      yamt 		cpu_need_resched(ci, 0);
   1105  1.188      yamt }
   1106  1.188      yamt 
   1107  1.188      yamt static void
   1108  1.185      yamt sched_changepri(struct lwp *l, pri_t pri)
   1109  1.174        ad {
   1110  1.174        ad 
   1111  1.188      yamt 	KASSERT(lwp_locked(l, NULL));
   1112  1.174        ad 
   1113  1.204        ad 	if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
   1114  1.204        ad 		KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
   1115  1.204        ad 		sched_dequeue(l);
   1116  1.204        ad 		l->l_priority = pri;
   1117  1.204        ad 		sched_enqueue(l, false);
   1118  1.204        ad 	} else {
   1119  1.174        ad 		l->l_priority = pri;
   1120  1.157      yamt 	}
   1121  1.188      yamt 	resched_cpu(l);
   1122  1.184      yamt }
   1123  1.184      yamt 
   1124  1.188      yamt static void
   1125  1.185      yamt sched_lendpri(struct lwp *l, pri_t pri)
   1126  1.184      yamt {
   1127  1.184      yamt 
   1128  1.188      yamt 	KASSERT(lwp_locked(l, NULL));
   1129  1.184      yamt 
   1130  1.204        ad 	if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
   1131  1.204        ad 		KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
   1132  1.204        ad 		sched_dequeue(l);
   1133  1.204        ad 		l->l_inheritedprio = pri;
   1134  1.204        ad 		sched_enqueue(l, false);
   1135  1.204        ad 	} else {
   1136  1.184      yamt 		l->l_inheritedprio = pri;
   1137  1.184      yamt 	}
   1138  1.188      yamt 	resched_cpu(l);
   1139  1.184      yamt }
   1140  1.184      yamt 
   1141  1.184      yamt struct lwp *
   1142  1.184      yamt syncobj_noowner(wchan_t wchan)
   1143  1.184      yamt {
   1144  1.184      yamt 
   1145  1.184      yamt 	return NULL;
   1146  1.151      yamt }
   1147  1.151      yamt 
   1148  1.188      yamt /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
   1149  1.188      yamt fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
   1150  1.115  nisimura 
   1151  1.130   nathanw /*
   1152  1.188      yamt  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
   1153  1.188      yamt  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
   1154  1.188      yamt  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
   1155  1.188      yamt  *
   1156  1.188      yamt  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
   1157  1.188      yamt  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
   1158  1.188      yamt  *
   1159  1.188      yamt  * If you dont want to bother with the faster/more-accurate formula, you
   1160  1.188      yamt  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
   1161  1.188      yamt  * (more general) method of calculating the %age of CPU used by a process.
   1162  1.134      matt  */
   1163  1.188      yamt #define	CCPU_SHIFT	(FSHIFT + 1)
   1164  1.134      matt 
   1165  1.134      matt /*
   1166  1.188      yamt  * sched_pstats:
   1167  1.188      yamt  *
   1168  1.188      yamt  * Update process statistics and check CPU resource allocation.
   1169  1.188      yamt  * Call scheduler-specific hook to eventually adjust process/LWP
   1170  1.188      yamt  * priorities.
   1171  1.130   nathanw  */
   1172  1.188      yamt /* ARGSUSED */
   1173  1.113  gmcgarry void
   1174  1.188      yamt sched_pstats(void *arg)
   1175  1.113  gmcgarry {
   1176  1.188      yamt 	struct rlimit *rlim;
   1177  1.188      yamt 	struct lwp *l;
   1178  1.188      yamt 	struct proc *p;
   1179  1.204        ad 	int sig, clkhz;
   1180  1.188      yamt 	long runtm;
   1181  1.113  gmcgarry 
   1182  1.188      yamt 	sched_pstats_ticks++;
   1183  1.174        ad 
   1184  1.228        ad 	mutex_enter(proc_lock);
   1185  1.188      yamt 	PROCLIST_FOREACH(p, &allproc) {
   1186  1.188      yamt 		/*
   1187  1.188      yamt 		 * Increment time in/out of memory and sleep time (if
   1188  1.188      yamt 		 * sleeping).  We ignore overflow; with 16-bit int's
   1189  1.188      yamt 		 * (remember them?) overflow takes 45 days.
   1190  1.188      yamt 		 */
   1191  1.229        ad 		mutex_enter(p->p_lock);
   1192  1.188      yamt 		mutex_spin_enter(&p->p_stmutex);
   1193  1.212      yamt 		runtm = p->p_rtime.sec;
   1194  1.188      yamt 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1195  1.188      yamt 			if ((l->l_flag & LW_IDLE) != 0)
   1196  1.188      yamt 				continue;
   1197  1.188      yamt 			lwp_lock(l);
   1198  1.212      yamt 			runtm += l->l_rtime.sec;
   1199  1.188      yamt 			l->l_swtime++;
   1200  1.200     rmind 			sched_pstats_hook(l);
   1201  1.188      yamt 			lwp_unlock(l);
   1202  1.113  gmcgarry 
   1203  1.188      yamt 			/*
   1204  1.188      yamt 			 * p_pctcpu is only for ps.
   1205  1.188      yamt 			 */
   1206  1.188      yamt 			l->l_pctcpu = (l->l_pctcpu * ccpu) >> FSHIFT;
   1207  1.188      yamt 			if (l->l_slptime < 1) {
   1208  1.188      yamt 				clkhz = stathz != 0 ? stathz : hz;
   1209  1.188      yamt #if	(FSHIFT >= CCPU_SHIFT)
   1210  1.188      yamt 				l->l_pctcpu += (clkhz == 100) ?
   1211  1.188      yamt 				    ((fixpt_t)l->l_cpticks) <<
   1212  1.188      yamt 				        (FSHIFT - CCPU_SHIFT) :
   1213  1.188      yamt 				    100 * (((fixpt_t) p->p_cpticks)
   1214  1.188      yamt 				        << (FSHIFT - CCPU_SHIFT)) / clkhz;
   1215  1.188      yamt #else
   1216  1.188      yamt 				l->l_pctcpu += ((FSCALE - ccpu) *
   1217  1.188      yamt 				    (l->l_cpticks * FSCALE / clkhz)) >> FSHIFT;
   1218  1.146      matt #endif
   1219  1.188      yamt 				l->l_cpticks = 0;
   1220  1.188      yamt 			}
   1221  1.188      yamt 		}
   1222  1.188      yamt 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
   1223  1.188      yamt 		mutex_spin_exit(&p->p_stmutex);
   1224  1.174        ad 
   1225  1.188      yamt 		/*
   1226  1.188      yamt 		 * Check if the process exceeds its CPU resource allocation.
   1227  1.188      yamt 		 * If over max, kill it.
   1228  1.188      yamt 		 */
   1229  1.188      yamt 		rlim = &p->p_rlimit[RLIMIT_CPU];
   1230  1.188      yamt 		sig = 0;
   1231  1.188      yamt 		if (runtm >= rlim->rlim_cur) {
   1232  1.188      yamt 			if (runtm >= rlim->rlim_max)
   1233  1.188      yamt 				sig = SIGKILL;
   1234  1.188      yamt 			else {
   1235  1.188      yamt 				sig = SIGXCPU;
   1236  1.188      yamt 				if (rlim->rlim_cur < rlim->rlim_max)
   1237  1.188      yamt 					rlim->rlim_cur += 5;
   1238  1.188      yamt 			}
   1239  1.188      yamt 		}
   1240  1.229        ad 		mutex_exit(p->p_lock);
   1241  1.228        ad 		if (sig)
   1242  1.188      yamt 			psignal(p, sig);
   1243  1.174        ad 	}
   1244  1.228        ad 	mutex_exit(proc_lock);
   1245  1.188      yamt 	uvm_meter();
   1246  1.191        ad 	cv_wakeup(&lbolt);
   1247  1.188      yamt 	callout_schedule(&sched_pstats_ch, hz);
   1248  1.113  gmcgarry }
   1249  1.190        ad 
   1250  1.190        ad void
   1251  1.190        ad sched_init(void)
   1252  1.190        ad {
   1253  1.190        ad 
   1254  1.208        ad 	cv_init(&lbolt, "lbolt");
   1255  1.214        ad 	callout_init(&sched_pstats_ch, CALLOUT_MPSAFE);
   1256  1.190        ad 	callout_setfunc(&sched_pstats_ch, sched_pstats, NULL);
   1257  1.223        ad 
   1258  1.223        ad 	/* Balancing */
   1259  1.223        ad 	worker_ci = curcpu();
   1260  1.223        ad 	cacheht_time = mstohz(5);		/* ~5 ms  */
   1261  1.223        ad 	balance_period = mstohz(300);		/* ~300ms */
   1262  1.223        ad 
   1263  1.223        ad 	/* Minimal count of LWPs for catching: log2(count of CPUs) */
   1264  1.223        ad 	min_catch = min(ilog2(ncpu), 4);
   1265  1.223        ad 
   1266  1.231        ad #ifdef PREEMPTION
   1267  1.231        ad 	evcnt_attach_dynamic(&kpreempt_ev_crit, EVCNT_TYPE_INTR, NULL,
   1268  1.231        ad 	   "kpreempt", "defer: critical section");
   1269  1.231        ad 	evcnt_attach_dynamic(&kpreempt_ev_klock, EVCNT_TYPE_INTR, NULL,
   1270  1.231        ad 	   "kpreempt", "defer: kernel_lock");
   1271  1.231        ad 	evcnt_attach_dynamic(&kpreempt_ev_ipl, EVCNT_TYPE_INTR, NULL,
   1272  1.231        ad 	   "kpreempt", "defer: IPL");
   1273  1.231        ad 	evcnt_attach_dynamic(&kpreempt_ev_immed, EVCNT_TYPE_INTR, NULL,
   1274  1.231        ad 	   "kpreempt", "immediate");
   1275  1.231        ad #endif
   1276  1.231        ad 
   1277  1.223        ad 	/* Initialize balancing callout and run it */
   1278  1.223        ad #ifdef MULTIPROCESSOR
   1279  1.223        ad 	callout_init(&balance_ch, CALLOUT_MPSAFE);
   1280  1.223        ad 	callout_setfunc(&balance_ch, sched_balance, NULL);
   1281  1.223        ad 	callout_schedule(&balance_ch, balance_period);
   1282  1.223        ad #endif
   1283  1.190        ad 	sched_pstats(NULL);
   1284  1.190        ad }
   1285  1.223        ad 
   1286  1.223        ad SYSCTL_SETUP(sysctl_sched_setup, "sysctl sched setup")
   1287  1.223        ad {
   1288  1.223        ad 	const struct sysctlnode *node = NULL;
   1289  1.223        ad 
   1290  1.223        ad 	sysctl_createv(clog, 0, NULL, NULL,
   1291  1.223        ad 		CTLFLAG_PERMANENT,
   1292  1.223        ad 		CTLTYPE_NODE, "kern", NULL,
   1293  1.223        ad 		NULL, 0, NULL, 0,
   1294  1.223        ad 		CTL_KERN, CTL_EOL);
   1295  1.223        ad 	sysctl_createv(clog, 0, NULL, &node,
   1296  1.223        ad 		CTLFLAG_PERMANENT,
   1297  1.223        ad 		CTLTYPE_NODE, "sched",
   1298  1.223        ad 		SYSCTL_DESCR("Scheduler options"),
   1299  1.223        ad 		NULL, 0, NULL, 0,
   1300  1.223        ad 		CTL_KERN, CTL_CREATE, CTL_EOL);
   1301  1.223        ad 
   1302  1.223        ad 	if (node == NULL)
   1303  1.223        ad 		return;
   1304  1.223        ad 
   1305  1.223        ad 	sysctl_createv(clog, 0, &node, NULL,
   1306  1.223        ad 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1307  1.223        ad 		CTLTYPE_INT, "cacheht_time",
   1308  1.223        ad 		SYSCTL_DESCR("Cache hotness time (in ticks)"),
   1309  1.223        ad 		NULL, 0, &cacheht_time, 0,
   1310  1.223        ad 		CTL_CREATE, CTL_EOL);
   1311  1.223        ad 	sysctl_createv(clog, 0, &node, NULL,
   1312  1.223        ad 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1313  1.223        ad 		CTLTYPE_INT, "balance_period",
   1314  1.223        ad 		SYSCTL_DESCR("Balance period (in ticks)"),
   1315  1.223        ad 		NULL, 0, &balance_period, 0,
   1316  1.223        ad 		CTL_CREATE, CTL_EOL);
   1317  1.223        ad 	sysctl_createv(clog, 0, &node, NULL,
   1318  1.223        ad 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1319  1.223        ad 		CTLTYPE_INT, "min_catch",
   1320  1.223        ad 		SYSCTL_DESCR("Minimal count of threads for catching"),
   1321  1.223        ad 		NULL, 0, &min_catch, 0,
   1322  1.223        ad 		CTL_CREATE, CTL_EOL);
   1323  1.223        ad 	sysctl_createv(clog, 0, &node, NULL,
   1324  1.231        ad 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1325  1.223        ad 		CTLTYPE_INT, "timesoftints",
   1326  1.223        ad 		SYSCTL_DESCR("Track CPU time for soft interrupts"),
   1327  1.223        ad 		NULL, 0, &softint_timing, 0,
   1328  1.223        ad 		CTL_CREATE, CTL_EOL);
   1329  1.231        ad 	sysctl_createv(clog, 0, &node, NULL,
   1330  1.231        ad #ifdef PREEMPTION
   1331  1.231        ad 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1332  1.231        ad #else
   1333  1.231        ad 		CTLFLAG_PERMANENT,
   1334  1.231        ad #endif
   1335  1.231        ad 		CTLTYPE_INT, "kpreempt_pri",
   1336  1.231        ad 		SYSCTL_DESCR("Minimum priority to trigger kernel preemption"),
   1337  1.231        ad 		NULL, 0, &sched_kpreempt_pri, 0,
   1338  1.231        ad 		CTL_CREATE, CTL_EOL);
   1339  1.231        ad 	sysctl_createv(clog, 0, &node, NULL,
   1340  1.231        ad 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1341  1.231        ad 		CTLTYPE_INT, "upreempt_pri",
   1342  1.231        ad 		SYSCTL_DESCR("Minimum priority to trigger user preemption"),
   1343  1.231        ad 		NULL, 0, &sched_upreempt_pri, 0,
   1344  1.231        ad 		CTL_CREATE, CTL_EOL);
   1345  1.223        ad }
   1346  1.223        ad 
   1347  1.223        ad void
   1348  1.223        ad sched_cpuattach(struct cpu_info *ci)
   1349  1.223        ad {
   1350  1.223        ad 	runqueue_t *ci_rq;
   1351  1.223        ad 	void *rq_ptr;
   1352  1.223        ad 	u_int i, size;
   1353  1.223        ad 
   1354  1.223        ad 	if (ci->ci_schedstate.spc_lwplock == NULL) {
   1355  1.223        ad 		ci->ci_schedstate.spc_lwplock =
   1356  1.223        ad 		    mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
   1357  1.223        ad 	}
   1358  1.223        ad 	if (ci == lwp0.l_cpu) {
   1359  1.223        ad 		/* Initialize the scheduler structure of the primary LWP */
   1360  1.223        ad 		lwp0.l_mutex = ci->ci_schedstate.spc_lwplock;
   1361  1.223        ad 	}
   1362  1.223        ad 	if (ci->ci_schedstate.spc_mutex != NULL) {
   1363  1.223        ad 		/* Already initialized. */
   1364  1.223        ad 		return;
   1365  1.223        ad 	}
   1366  1.223        ad 
   1367  1.223        ad 	/* Allocate the run queue */
   1368  1.223        ad 	size = roundup2(sizeof(runqueue_t), coherency_unit) + coherency_unit;
   1369  1.223        ad 	rq_ptr = kmem_zalloc(size, KM_SLEEP);
   1370  1.223        ad 	if (rq_ptr == NULL) {
   1371  1.223        ad 		panic("sched_cpuattach: could not allocate the runqueue");
   1372  1.223        ad 	}
   1373  1.223        ad 	ci_rq = (void *)(roundup2((uintptr_t)(rq_ptr), coherency_unit));
   1374  1.223        ad 
   1375  1.223        ad 	/* Initialize run queues */
   1376  1.223        ad 	ci->ci_schedstate.spc_mutex =
   1377  1.223        ad 	    mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
   1378  1.223        ad 	for (i = 0; i < PRI_RT_COUNT; i++)
   1379  1.223        ad 		TAILQ_INIT(&ci_rq->r_rt_queue[i].q_head);
   1380  1.223        ad 	for (i = 0; i < PRI_TS_COUNT; i++)
   1381  1.223        ad 		TAILQ_INIT(&ci_rq->r_ts_queue[i].q_head);
   1382  1.223        ad 
   1383  1.223        ad 	ci->ci_schedstate.spc_sched_info = ci_rq;
   1384  1.223        ad }
   1385  1.223        ad 
   1386  1.223        ad /*
   1387  1.223        ad  * Control of the runqueue.
   1388  1.223        ad  */
   1389  1.223        ad 
   1390  1.223        ad static void *
   1391  1.223        ad sched_getrq(runqueue_t *ci_rq, const pri_t prio)
   1392  1.223        ad {
   1393  1.223        ad 
   1394  1.223        ad 	KASSERT(prio < PRI_COUNT);
   1395  1.223        ad 	return (prio <= PRI_HIGHEST_TS) ?
   1396  1.223        ad 	    &ci_rq->r_ts_queue[prio].q_head :
   1397  1.223        ad 	    &ci_rq->r_rt_queue[prio - PRI_HIGHEST_TS - 1].q_head;
   1398  1.223        ad }
   1399  1.223        ad 
   1400  1.223        ad void
   1401  1.223        ad sched_enqueue(struct lwp *l, bool swtch)
   1402  1.223        ad {
   1403  1.223        ad 	runqueue_t *ci_rq;
   1404  1.223        ad 	struct schedstate_percpu *spc;
   1405  1.223        ad 	TAILQ_HEAD(, lwp) *q_head;
   1406  1.223        ad 	const pri_t eprio = lwp_eprio(l);
   1407  1.223        ad 	struct cpu_info *ci;
   1408  1.231        ad 	int type;
   1409  1.223        ad 
   1410  1.223        ad 	ci = l->l_cpu;
   1411  1.223        ad 	spc = &ci->ci_schedstate;
   1412  1.223        ad 	ci_rq = spc->spc_sched_info;
   1413  1.223        ad 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
   1414  1.223        ad 
   1415  1.223        ad 	/* Update the last run time on switch */
   1416  1.223        ad 	if (__predict_true(swtch == true)) {
   1417  1.223        ad 		l->l_rticks = hardclock_ticks;
   1418  1.223        ad 		l->l_rticksum += (hardclock_ticks - l->l_rticks);
   1419  1.223        ad 	} else if (l->l_rticks == 0)
   1420  1.223        ad 		l->l_rticks = hardclock_ticks;
   1421  1.223        ad 
   1422  1.223        ad 	/* Enqueue the thread */
   1423  1.223        ad 	q_head = sched_getrq(ci_rq, eprio);
   1424  1.223        ad 	if (TAILQ_EMPTY(q_head)) {
   1425  1.223        ad 		u_int i;
   1426  1.223        ad 		uint32_t q;
   1427  1.223        ad 
   1428  1.223        ad 		/* Mark bit */
   1429  1.223        ad 		i = eprio >> BITMAP_SHIFT;
   1430  1.223        ad 		q = BITMAP_MSB >> (eprio & BITMAP_MASK);
   1431  1.223        ad 		KASSERT((ci_rq->r_bitmap[i] & q) == 0);
   1432  1.223        ad 		ci_rq->r_bitmap[i] |= q;
   1433  1.223        ad 	}
   1434  1.223        ad 	TAILQ_INSERT_TAIL(q_head, l, l_runq);
   1435  1.223        ad 	ci_rq->r_count++;
   1436  1.224        ad 	if ((l->l_pflag & LP_BOUND) == 0)
   1437  1.223        ad 		ci_rq->r_mcount++;
   1438  1.223        ad 
   1439  1.223        ad 	/*
   1440  1.223        ad 	 * Update the value of highest priority in the runqueue,
   1441  1.223        ad 	 * if priority of this thread is higher.
   1442  1.223        ad 	 */
   1443  1.223        ad 	if (eprio > spc->spc_maxpriority)
   1444  1.223        ad 		spc->spc_maxpriority = eprio;
   1445  1.223        ad 
   1446  1.223        ad 	sched_newts(l);
   1447  1.223        ad 
   1448  1.223        ad 	/*
   1449  1.223        ad 	 * Wake the chosen CPU or cause a preemption if the newly
   1450  1.223        ad 	 * enqueued thread has higher priority.  Don't cause a
   1451  1.223        ad 	 * preemption if the thread is yielding (swtch).
   1452  1.223        ad 	 */
   1453  1.223        ad 	if (!swtch && eprio > spc->spc_curpriority) {
   1454  1.231        ad 		if (eprio >= sched_kpreempt_pri)
   1455  1.231        ad 			type = RESCHED_KPREEMPT;
   1456  1.231        ad 		else if (eprio >= sched_upreempt_pri)
   1457  1.231        ad 			type = RESCHED_IMMED;
   1458  1.231        ad 		else
   1459  1.231        ad 			type = 0;
   1460  1.231        ad 		cpu_need_resched(ci, type);
   1461  1.223        ad 	}
   1462  1.223        ad }
   1463  1.223        ad 
   1464  1.223        ad void
   1465  1.223        ad sched_dequeue(struct lwp *l)
   1466  1.223        ad {
   1467  1.223        ad 	runqueue_t *ci_rq;
   1468  1.223        ad 	TAILQ_HEAD(, lwp) *q_head;
   1469  1.223        ad 	struct schedstate_percpu *spc;
   1470  1.223        ad 	const pri_t eprio = lwp_eprio(l);
   1471  1.223        ad 
   1472  1.223        ad 	spc = & l->l_cpu->ci_schedstate;
   1473  1.223        ad 	ci_rq = spc->spc_sched_info;
   1474  1.223        ad 	KASSERT(lwp_locked(l, spc->spc_mutex));
   1475  1.223        ad 
   1476  1.223        ad 	KASSERT(eprio <= spc->spc_maxpriority);
   1477  1.223        ad 	KASSERT(ci_rq->r_bitmap[eprio >> BITMAP_SHIFT] != 0);
   1478  1.223        ad 	KASSERT(ci_rq->r_count > 0);
   1479  1.223        ad 
   1480  1.223        ad 	ci_rq->r_count--;
   1481  1.224        ad 	if ((l->l_pflag & LP_BOUND) == 0)
   1482  1.223        ad 		ci_rq->r_mcount--;
   1483  1.223        ad 
   1484  1.223        ad 	q_head = sched_getrq(ci_rq, eprio);
   1485  1.223        ad 	TAILQ_REMOVE(q_head, l, l_runq);
   1486  1.223        ad 	if (TAILQ_EMPTY(q_head)) {
   1487  1.223        ad 		u_int i;
   1488  1.223        ad 		uint32_t q;
   1489  1.223        ad 
   1490  1.223        ad 		/* Unmark bit */
   1491  1.223        ad 		i = eprio >> BITMAP_SHIFT;
   1492  1.223        ad 		q = BITMAP_MSB >> (eprio & BITMAP_MASK);
   1493  1.223        ad 		KASSERT((ci_rq->r_bitmap[i] & q) != 0);
   1494  1.223        ad 		ci_rq->r_bitmap[i] &= ~q;
   1495  1.223        ad 
   1496  1.223        ad 		/*
   1497  1.223        ad 		 * Update the value of highest priority in the runqueue, in a
   1498  1.223        ad 		 * case it was a last thread in the queue of highest priority.
   1499  1.223        ad 		 */
   1500  1.223        ad 		if (eprio != spc->spc_maxpriority)
   1501  1.223        ad 			return;
   1502  1.223        ad 
   1503  1.223        ad 		do {
   1504  1.223        ad 			if (ci_rq->r_bitmap[i] != 0) {
   1505  1.223        ad 				q = ffs(ci_rq->r_bitmap[i]);
   1506  1.223        ad 				spc->spc_maxpriority =
   1507  1.223        ad 				    (i << BITMAP_SHIFT) + (BITMAP_BITS - q);
   1508  1.223        ad 				return;
   1509  1.223        ad 			}
   1510  1.223        ad 		} while (i--);
   1511  1.223        ad 
   1512  1.223        ad 		/* If not found - set the lowest value */
   1513  1.223        ad 		spc->spc_maxpriority = 0;
   1514  1.223        ad 	}
   1515  1.223        ad }
   1516  1.223        ad 
   1517  1.223        ad /*
   1518  1.223        ad  * Migration and balancing.
   1519  1.223        ad  */
   1520  1.223        ad 
   1521  1.223        ad #ifdef MULTIPROCESSOR
   1522  1.223        ad 
   1523  1.223        ad /* Estimate if LWP is cache-hot */
   1524  1.223        ad static inline bool
   1525  1.223        ad lwp_cache_hot(const struct lwp *l)
   1526  1.223        ad {
   1527  1.223        ad 
   1528  1.223        ad 	if (l->l_slptime || l->l_rticks == 0)
   1529  1.223        ad 		return false;
   1530  1.223        ad 
   1531  1.223        ad 	return (hardclock_ticks - l->l_rticks <= cacheht_time);
   1532  1.223        ad }
   1533  1.223        ad 
   1534  1.223        ad /* Check if LWP can migrate to the chosen CPU */
   1535  1.223        ad static inline bool
   1536  1.223        ad sched_migratable(const struct lwp *l, struct cpu_info *ci)
   1537  1.223        ad {
   1538  1.223        ad 	const struct schedstate_percpu *spc = &ci->ci_schedstate;
   1539  1.223        ad 
   1540  1.223        ad 	/* CPU is offline */
   1541  1.223        ad 	if (__predict_false(spc->spc_flags & SPCF_OFFLINE))
   1542  1.223        ad 		return false;
   1543  1.223        ad 
   1544  1.223        ad 	/* Affinity bind */
   1545  1.223        ad 	if (__predict_false(l->l_flag & LW_AFFINITY))
   1546  1.223        ad 		return CPU_ISSET(cpu_index(ci), &l->l_affinity);
   1547  1.223        ad 
   1548  1.223        ad 	/* Processor-set */
   1549  1.223        ad 	return (spc->spc_psid == l->l_psid);
   1550  1.223        ad }
   1551  1.223        ad 
   1552  1.223        ad /*
   1553  1.223        ad  * Estimate the migration of LWP to the other CPU.
   1554  1.223        ad  * Take and return the CPU, if migration is needed.
   1555  1.223        ad  */
   1556  1.223        ad struct cpu_info *
   1557  1.223        ad sched_takecpu(struct lwp *l)
   1558  1.223        ad {
   1559  1.223        ad 	struct cpu_info *ci, *tci, *first, *next;
   1560  1.223        ad 	struct schedstate_percpu *spc;
   1561  1.223        ad 	runqueue_t *ci_rq, *ici_rq;
   1562  1.223        ad 	pri_t eprio, lpri, pri;
   1563  1.223        ad 
   1564  1.223        ad 	KASSERT(lwp_locked(l, NULL));
   1565  1.223        ad 
   1566  1.223        ad 	ci = l->l_cpu;
   1567  1.223        ad 	spc = &ci->ci_schedstate;
   1568  1.223        ad 	ci_rq = spc->spc_sched_info;
   1569  1.223        ad 
   1570  1.223        ad 	/* If thread is strictly bound, do not estimate other CPUs */
   1571  1.224        ad 	if (l->l_pflag & LP_BOUND)
   1572  1.223        ad 		return ci;
   1573  1.223        ad 
   1574  1.223        ad 	/* CPU of this thread is idling - run there */
   1575  1.223        ad 	if (ci_rq->r_count == 0)
   1576  1.223        ad 		return ci;
   1577  1.223        ad 
   1578  1.223        ad 	eprio = lwp_eprio(l);
   1579  1.223        ad 
   1580  1.223        ad 	/* Stay if thread is cache-hot */
   1581  1.223        ad 	if (__predict_true(l->l_stat != LSIDL) &&
   1582  1.223        ad 	    lwp_cache_hot(l) && eprio >= spc->spc_curpriority)
   1583  1.223        ad 		return ci;
   1584  1.223        ad 
   1585  1.223        ad 	/* Run on current CPU if priority of thread is higher */
   1586  1.223        ad 	ci = curcpu();
   1587  1.223        ad 	spc = &ci->ci_schedstate;
   1588  1.223        ad 	if (eprio > spc->spc_curpriority && sched_migratable(l, ci))
   1589  1.223        ad 		return ci;
   1590  1.223        ad 
   1591  1.223        ad 	/*
   1592  1.223        ad 	 * Look for the CPU with the lowest priority thread.  In case of
   1593  1.223        ad 	 * equal priority, choose the CPU with the fewest of threads.
   1594  1.223        ad 	 */
   1595  1.223        ad 	first = l->l_cpu;
   1596  1.223        ad 	ci = first;
   1597  1.223        ad 	tci = first;
   1598  1.223        ad 	lpri = PRI_COUNT;
   1599  1.223        ad 	do {
   1600  1.223        ad 		next = CIRCLEQ_LOOP_NEXT(&cpu_queue, ci, ci_data.cpu_qchain);
   1601  1.223        ad 		spc = &ci->ci_schedstate;
   1602  1.223        ad 		ici_rq = spc->spc_sched_info;
   1603  1.223        ad 		pri = max(spc->spc_curpriority, spc->spc_maxpriority);
   1604  1.223        ad 		if (pri > lpri)
   1605  1.223        ad 			continue;
   1606  1.223        ad 
   1607  1.223        ad 		if (pri == lpri && ci_rq->r_count < ici_rq->r_count)
   1608  1.223        ad 			continue;
   1609  1.223        ad 
   1610  1.223        ad 		if (!sched_migratable(l, ci))
   1611  1.223        ad 			continue;
   1612  1.223        ad 
   1613  1.223        ad 		lpri = pri;
   1614  1.223        ad 		tci = ci;
   1615  1.223        ad 		ci_rq = ici_rq;
   1616  1.223        ad 	} while (ci = next, ci != first);
   1617  1.223        ad 
   1618  1.223        ad 	return tci;
   1619  1.223        ad }
   1620  1.223        ad 
   1621  1.223        ad /*
   1622  1.223        ad  * Tries to catch an LWP from the runqueue of other CPU.
   1623  1.223        ad  */
   1624  1.223        ad static struct lwp *
   1625  1.223        ad sched_catchlwp(void)
   1626  1.223        ad {
   1627  1.223        ad 	struct cpu_info *curci = curcpu(), *ci = worker_ci;
   1628  1.223        ad 	struct schedstate_percpu *spc;
   1629  1.223        ad 	TAILQ_HEAD(, lwp) *q_head;
   1630  1.223        ad 	runqueue_t *ci_rq;
   1631  1.223        ad 	struct lwp *l;
   1632  1.223        ad 
   1633  1.223        ad 	if (curci == ci)
   1634  1.223        ad 		return NULL;
   1635  1.223        ad 
   1636  1.223        ad 	/* Lockless check */
   1637  1.223        ad 	spc = &ci->ci_schedstate;
   1638  1.223        ad 	ci_rq = spc->spc_sched_info;
   1639  1.223        ad 	if (ci_rq->r_mcount < min_catch)
   1640  1.223        ad 		return NULL;
   1641  1.223        ad 
   1642  1.223        ad 	/*
   1643  1.223        ad 	 * Double-lock the runqueues.
   1644  1.223        ad 	 */
   1645  1.223        ad 	if (curci < ci) {
   1646  1.223        ad 		spc_lock(ci);
   1647  1.223        ad 	} else if (!mutex_tryenter(ci->ci_schedstate.spc_mutex)) {
   1648  1.223        ad 		const runqueue_t *cur_rq = curci->ci_schedstate.spc_sched_info;
   1649  1.223        ad 
   1650  1.223        ad 		spc_unlock(curci);
   1651  1.223        ad 		spc_lock(ci);
   1652  1.223        ad 		spc_lock(curci);
   1653  1.223        ad 
   1654  1.223        ad 		if (cur_rq->r_count) {
   1655  1.223        ad 			spc_unlock(ci);
   1656  1.223        ad 			return NULL;
   1657  1.223        ad 		}
   1658  1.223        ad 	}
   1659  1.223        ad 
   1660  1.223        ad 	if (ci_rq->r_mcount < min_catch) {
   1661  1.223        ad 		spc_unlock(ci);
   1662  1.223        ad 		return NULL;
   1663  1.223        ad 	}
   1664  1.223        ad 
   1665  1.223        ad 	/* Take the highest priority thread */
   1666  1.223        ad 	q_head = sched_getrq(ci_rq, spc->spc_maxpriority);
   1667  1.223        ad 	l = TAILQ_FIRST(q_head);
   1668  1.223        ad 
   1669  1.223        ad 	for (;;) {
   1670  1.223        ad 		/* Check the first and next result from the queue */
   1671  1.223        ad 		if (l == NULL)
   1672  1.223        ad 			break;
   1673  1.223        ad 		KASSERT(l->l_stat == LSRUN);
   1674  1.223        ad 		KASSERT(l->l_flag & LW_INMEM);
   1675  1.223        ad 
   1676  1.223        ad 		/* Look for threads, whose are allowed to migrate */
   1677  1.224        ad 		if ((l->l_pflag & LP_BOUND) || lwp_cache_hot(l) ||
   1678  1.223        ad 		    !sched_migratable(l, curci)) {
   1679  1.223        ad 			l = TAILQ_NEXT(l, l_runq);
   1680  1.223        ad 			continue;
   1681  1.223        ad 		}
   1682  1.223        ad 
   1683  1.223        ad 		/* Grab the thread, and move to the local run queue */
   1684  1.223        ad 		sched_dequeue(l);
   1685  1.223        ad 		l->l_cpu = curci;
   1686  1.223        ad 		lwp_unlock_to(l, curci->ci_schedstate.spc_mutex);
   1687  1.223        ad 		sched_enqueue(l, false);
   1688  1.223        ad 		return l;
   1689  1.223        ad 	}
   1690  1.223        ad 	spc_unlock(ci);
   1691  1.223        ad 
   1692  1.223        ad 	return l;
   1693  1.223        ad }
   1694  1.223        ad 
   1695  1.223        ad /*
   1696  1.223        ad  * Periodical calculations for balancing.
   1697  1.223        ad  */
   1698  1.223        ad static void
   1699  1.223        ad sched_balance(void *nocallout)
   1700  1.223        ad {
   1701  1.223        ad 	struct cpu_info *ci, *hci;
   1702  1.223        ad 	runqueue_t *ci_rq;
   1703  1.223        ad 	CPU_INFO_ITERATOR cii;
   1704  1.223        ad 	u_int highest;
   1705  1.223        ad 
   1706  1.223        ad 	hci = curcpu();
   1707  1.223        ad 	highest = 0;
   1708  1.223        ad 
   1709  1.223        ad 	/* Make lockless countings */
   1710  1.223        ad 	for (CPU_INFO_FOREACH(cii, ci)) {
   1711  1.223        ad 		ci_rq = ci->ci_schedstate.spc_sched_info;
   1712  1.223        ad 
   1713  1.223        ad 		/* Average count of the threads */
   1714  1.223        ad 		ci_rq->r_avgcount = (ci_rq->r_avgcount + ci_rq->r_mcount) >> 1;
   1715  1.223        ad 
   1716  1.223        ad 		/* Look for CPU with the highest average */
   1717  1.223        ad 		if (ci_rq->r_avgcount > highest) {
   1718  1.223        ad 			hci = ci;
   1719  1.223        ad 			highest = ci_rq->r_avgcount;
   1720  1.223        ad 		}
   1721  1.223        ad 	}
   1722  1.223        ad 
   1723  1.223        ad 	/* Update the worker */
   1724  1.223        ad 	worker_ci = hci;
   1725  1.223        ad 
   1726  1.223        ad 	if (nocallout == NULL)
   1727  1.223        ad 		callout_schedule(&balance_ch, balance_period);
   1728  1.223        ad }
   1729  1.223        ad 
   1730  1.223        ad #else
   1731  1.223        ad 
   1732  1.223        ad struct cpu_info *
   1733  1.223        ad sched_takecpu(struct lwp *l)
   1734  1.223        ad {
   1735  1.223        ad 
   1736  1.223        ad 	return l->l_cpu;
   1737  1.223        ad }
   1738  1.223        ad 
   1739  1.223        ad #endif	/* MULTIPROCESSOR */
   1740  1.223        ad 
   1741  1.223        ad /*
   1742  1.223        ad  * Scheduler mill.
   1743  1.223        ad  */
   1744  1.223        ad struct lwp *
   1745  1.223        ad sched_nextlwp(void)
   1746  1.223        ad {
   1747  1.223        ad 	struct cpu_info *ci = curcpu();
   1748  1.223        ad 	struct schedstate_percpu *spc;
   1749  1.223        ad 	TAILQ_HEAD(, lwp) *q_head;
   1750  1.223        ad 	runqueue_t *ci_rq;
   1751  1.223        ad 	struct lwp *l;
   1752  1.223        ad 
   1753  1.223        ad 	spc = &ci->ci_schedstate;
   1754  1.223        ad 	ci_rq = spc->spc_sched_info;
   1755  1.223        ad 
   1756  1.223        ad #ifdef MULTIPROCESSOR
   1757  1.223        ad 	/* If runqueue is empty, try to catch some thread from other CPU */
   1758  1.223        ad 	if (__predict_false(spc->spc_flags & SPCF_OFFLINE)) {
   1759  1.223        ad 		if ((ci_rq->r_count - ci_rq->r_mcount) == 0)
   1760  1.223        ad 			return NULL;
   1761  1.223        ad 	} else if (ci_rq->r_count == 0) {
   1762  1.223        ad 		/* Reset the counter, and call the balancer */
   1763  1.223        ad 		ci_rq->r_avgcount = 0;
   1764  1.223        ad 		sched_balance(ci);
   1765  1.223        ad 
   1766  1.223        ad 		/* The re-locking will be done inside */
   1767  1.223        ad 		return sched_catchlwp();
   1768  1.223        ad 	}
   1769  1.223        ad #else
   1770  1.223        ad 	if (ci_rq->r_count == 0)
   1771  1.223        ad 		return NULL;
   1772  1.223        ad #endif
   1773  1.223        ad 
   1774  1.223        ad 	/* Take the highest priority thread */
   1775  1.223        ad 	KASSERT(ci_rq->r_bitmap[spc->spc_maxpriority >> BITMAP_SHIFT]);
   1776  1.223        ad 	q_head = sched_getrq(ci_rq, spc->spc_maxpriority);
   1777  1.223        ad 	l = TAILQ_FIRST(q_head);
   1778  1.223        ad 	KASSERT(l != NULL);
   1779  1.223        ad 
   1780  1.223        ad 	sched_oncpu(l);
   1781  1.223        ad 	l->l_rticks = hardclock_ticks;
   1782  1.223        ad 
   1783  1.223        ad 	return l;
   1784  1.223        ad }
   1785  1.223        ad 
   1786  1.223        ad bool
   1787  1.223        ad sched_curcpu_runnable_p(void)
   1788  1.223        ad {
   1789  1.231        ad 	const struct cpu_info *ci;
   1790  1.231        ad 	const runqueue_t *ci_rq;
   1791  1.231        ad 	bool rv;
   1792  1.231        ad 
   1793  1.231        ad 	kpreempt_disable();
   1794  1.231        ad 	ci = curcpu();
   1795  1.231        ad 	ci_rq = ci->ci_schedstate.spc_sched_info;
   1796  1.223        ad 
   1797  1.223        ad #ifndef __HAVE_FAST_SOFTINTS
   1798  1.231        ad 	if (ci->ci_data.cpu_softints) {
   1799  1.231        ad 		kpreempt_enable();
   1800  1.223        ad 		return true;
   1801  1.231        ad 	}
   1802  1.223        ad #endif
   1803  1.223        ad 
   1804  1.223        ad 	if (ci->ci_schedstate.spc_flags & SPCF_OFFLINE)
   1805  1.231        ad 		rv = (ci_rq->r_count - ci_rq->r_mcount);
   1806  1.231        ad 	else
   1807  1.231        ad 		rv = ci_rq->r_count != 0;
   1808  1.231        ad 	kpreempt_enable();
   1809  1.223        ad 
   1810  1.231        ad 	return rv;
   1811  1.223        ad }
   1812  1.223        ad 
   1813  1.223        ad /*
   1814  1.223        ad  * Debugging.
   1815  1.223        ad  */
   1816  1.223        ad 
   1817  1.223        ad #ifdef DDB
   1818  1.223        ad 
   1819  1.223        ad void
   1820  1.227      yamt sched_print_runqueue(void (*pr)(const char *, ...)
   1821  1.227      yamt     __attribute__((__format__(__printf__,1,2))))
   1822  1.223        ad {
   1823  1.223        ad 	runqueue_t *ci_rq;
   1824  1.223        ad 	struct schedstate_percpu *spc;
   1825  1.223        ad 	struct lwp *l;
   1826  1.223        ad 	struct proc *p;
   1827  1.223        ad 	int i;
   1828  1.223        ad 	struct cpu_info *ci;
   1829  1.223        ad 	CPU_INFO_ITERATOR cii;
   1830  1.223        ad 
   1831  1.223        ad 	for (CPU_INFO_FOREACH(cii, ci)) {
   1832  1.223        ad 		spc = &ci->ci_schedstate;
   1833  1.223        ad 		ci_rq = spc->spc_sched_info;
   1834  1.223        ad 
   1835  1.223        ad 		(*pr)("Run-queue (CPU = %u):\n", ci->ci_index);
   1836  1.223        ad 		(*pr)(" pid.lid = %d.%d, threads count = %u, "
   1837  1.223        ad 		    "avgcount = %u, highest pri = %d\n",
   1838  1.225    dogcow #ifdef MULTIPROCESSOR
   1839  1.223        ad 		    ci->ci_curlwp->l_proc->p_pid, ci->ci_curlwp->l_lid,
   1840  1.225    dogcow #else
   1841  1.225    dogcow 		    curlwp->l_proc->p_pid, curlwp->l_lid,
   1842  1.225    dogcow #endif
   1843  1.223        ad 		    ci_rq->r_count, ci_rq->r_avgcount, spc->spc_maxpriority);
   1844  1.223        ad 		i = (PRI_COUNT >> BITMAP_SHIFT) - 1;
   1845  1.223        ad 		do {
   1846  1.223        ad 			uint32_t q;
   1847  1.223        ad 			q = ci_rq->r_bitmap[i];
   1848  1.223        ad 			(*pr)(" bitmap[%d] => [ %d (0x%x) ]\n", i, ffs(q), q);
   1849  1.223        ad 		} while (i--);
   1850  1.223        ad 	}
   1851  1.223        ad 
   1852  1.226      yamt 	(*pr)("   %5s %4s %4s %10s %3s %18s %4s %s\n",
   1853  1.223        ad 	    "LID", "PRI", "EPRI", "FL", "ST", "LWP", "CPU", "LRTIME");
   1854  1.223        ad 
   1855  1.223        ad 	PROCLIST_FOREACH(p, &allproc) {
   1856  1.223        ad 		(*pr)(" /- %d (%s)\n", (int)p->p_pid, p->p_comm);
   1857  1.223        ad 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1858  1.223        ad 			ci = l->l_cpu;
   1859  1.226      yamt 			(*pr)(" | %5d %4u %4u 0x%8.8x %3s %18p %4u %u\n",
   1860  1.223        ad 			    (int)l->l_lid, l->l_priority, lwp_eprio(l),
   1861  1.223        ad 			    l->l_flag, l->l_stat == LSRUN ? "RQ" :
   1862  1.223        ad 			    (l->l_stat == LSSLEEP ? "SQ" : "-"),
   1863  1.223        ad 			    l, ci->ci_index,
   1864  1.223        ad 			    (u_int)(hardclock_ticks - l->l_rticks));
   1865  1.223        ad 		}
   1866  1.223        ad 	}
   1867  1.223        ad }
   1868  1.223        ad 
   1869  1.223        ad #endif
   1870