Home | History | Annotate | Line # | Download | only in kern
sched_m2.c revision 1.3.2.6
      1  1.3.2.6     ad /*	$NetBSD: sched_m2.c,v 1.3.2.6 2007/11/01 21:58:22 ad Exp $	*/
      2  1.3.2.2  rmind 
      3  1.3.2.2  rmind /*
      4  1.3.2.2  rmind  * Copyright (c) 2007, Mindaugas Rasiukevicius
      5  1.3.2.2  rmind  *
      6  1.3.2.2  rmind  * Redistribution and use in source and binary forms, with or without
      7  1.3.2.2  rmind  * modification, are permitted provided that the following conditions
      8  1.3.2.2  rmind  * are met:
      9  1.3.2.2  rmind  * 1. Redistributions of source code must retain the above copyright
     10  1.3.2.2  rmind  *    notice, this list of conditions and the following disclaimer.
     11  1.3.2.2  rmind  * 2. Redistributions in binary form must reproduce the above copyright
     12  1.3.2.2  rmind  *    notice, this list of conditions and the following disclaimer in the
     13  1.3.2.2  rmind  *    documentation and/or other materials provided with the distribution.
     14  1.3.2.2  rmind  *
     15  1.3.2.2  rmind  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     16  1.3.2.2  rmind  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     17  1.3.2.2  rmind  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     18  1.3.2.2  rmind  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     19  1.3.2.2  rmind  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     20  1.3.2.2  rmind  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     21  1.3.2.2  rmind  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     22  1.3.2.2  rmind  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     23  1.3.2.2  rmind  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     24  1.3.2.2  rmind  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     25  1.3.2.2  rmind  * POSSIBILITY OF SUCH DAMAGE.
     26  1.3.2.2  rmind  */
     27  1.3.2.2  rmind 
     28  1.3.2.2  rmind /*
     29  1.3.2.2  rmind  * TODO:
     30  1.3.2.2  rmind  *  - Implementation of fair share queue;
     31  1.3.2.2  rmind  *  - Support for NUMA;
     32  1.3.2.2  rmind  */
     33  1.3.2.2  rmind 
     34  1.3.2.2  rmind #include <sys/cdefs.h>
     35  1.3.2.6     ad __KERNEL_RCSID(0, "$NetBSD: sched_m2.c,v 1.3.2.6 2007/11/01 21:58:22 ad Exp $");
     36  1.3.2.2  rmind 
     37  1.3.2.2  rmind #include <sys/param.h>
     38  1.3.2.2  rmind 
     39  1.3.2.2  rmind #include <sys/cpu.h>
     40  1.3.2.2  rmind #include <sys/callout.h>
     41  1.3.2.2  rmind #include <sys/errno.h>
     42  1.3.2.2  rmind #include <sys/kernel.h>
     43  1.3.2.2  rmind #include <sys/kmem.h>
     44  1.3.2.2  rmind #include <sys/lwp.h>
     45  1.3.2.2  rmind #include <sys/mutex.h>
     46  1.3.2.2  rmind #include <sys/pool.h>
     47  1.3.2.2  rmind #include <sys/proc.h>
     48  1.3.2.2  rmind #include <sys/resource.h>
     49  1.3.2.2  rmind #include <sys/resourcevar.h>
     50  1.3.2.2  rmind #include <sys/sched.h>
     51  1.3.2.2  rmind #include <sys/syscallargs.h>
     52  1.3.2.2  rmind #include <sys/sysctl.h>
     53  1.3.2.2  rmind #include <sys/types.h>
     54  1.3.2.2  rmind 
     55  1.3.2.2  rmind /*
     56  1.3.2.3  rmind  * Priority related defintions.
     57  1.3.2.2  rmind  */
     58  1.3.2.3  rmind #define	PRI_TS_COUNT	(NPRI_USER)
     59  1.3.2.3  rmind #define	PRI_RT_COUNT	(PRI_COUNT - PRI_TS_COUNT)
     60  1.3.2.3  rmind #define	PRI_HTS_RANGE	(PRI_TS_COUNT / 10)
     61  1.3.2.3  rmind 
     62  1.3.2.3  rmind #define	PRI_HIGHEST_TS	(PRI_KERNEL - 1)
     63  1.3.2.3  rmind #define	PRI_DEFAULT	(NPRI_USER >> 1)
     64  1.3.2.3  rmind 
     65  1.3.2.3  rmind const int schedppq = 1;
     66  1.3.2.2  rmind 
     67  1.3.2.2  rmind /*
     68  1.3.2.2  rmind  * Bits per map.
     69  1.3.2.2  rmind  */
     70  1.3.2.3  rmind #define	BITMAP_BITS	(32)
     71  1.3.2.3  rmind #define	BITMAP_SHIFT	(5)
     72  1.3.2.3  rmind #define	BITMAP_MSB	(0x80000000)
     73  1.3.2.3  rmind #define	BITMAP_MASK	(BITMAP_BITS - 1)
     74  1.3.2.2  rmind 
     75  1.3.2.2  rmind /*
     76  1.3.2.2  rmind  * Time-slices and priorities.
     77  1.3.2.2  rmind  */
     78  1.3.2.2  rmind static u_int	min_ts;			/* Minimal time-slice */
     79  1.3.2.2  rmind static u_int	max_ts;			/* Maximal time-slice */
     80  1.3.2.2  rmind static u_int	rt_ts;			/* Real-time time-slice */
     81  1.3.2.2  rmind static u_int	ts_map[PRI_COUNT];	/* Map of time-slices */
     82  1.3.2.2  rmind static pri_t	high_pri[PRI_COUNT];	/* Map for priority increase */
     83  1.3.2.2  rmind 
     84  1.3.2.2  rmind /*
     85  1.3.2.2  rmind  * Migration and balancing.
     86  1.3.2.2  rmind  */
     87  1.3.2.2  rmind #ifdef MULTIPROCESSOR
     88  1.3.2.2  rmind static u_int	cacheht_time;		/* Cache hotness time */
     89  1.3.2.2  rmind static u_int	min_catch;		/* Minimal LWP count for catching */
     90  1.3.2.2  rmind 
     91  1.3.2.2  rmind static u_int		balance_period;	/* Balance period */
     92  1.3.2.2  rmind static struct callout	balance_ch;	/* Callout of balancer */
     93  1.3.2.2  rmind 
     94  1.3.2.2  rmind static struct cpu_info * volatile worker_ci;
     95  1.3.2.2  rmind 
     96  1.3.2.2  rmind #define CACHE_HOT(sil)		(sil->sl_lrtime && \
     97  1.3.2.2  rmind     (hardclock_ticks - sil->sl_lrtime < cacheht_time))
     98  1.3.2.2  rmind 
     99  1.3.2.2  rmind #endif
    100  1.3.2.2  rmind 
    101  1.3.2.2  rmind /*
    102  1.3.2.2  rmind  * Structures, runqueue.
    103  1.3.2.2  rmind  */
    104  1.3.2.2  rmind 
    105  1.3.2.2  rmind typedef struct {
    106  1.3.2.2  rmind 	TAILQ_HEAD(, lwp) q_head;
    107  1.3.2.2  rmind } queue_t;
    108  1.3.2.2  rmind 
    109  1.3.2.2  rmind typedef struct {
    110  1.3.2.2  rmind 	/* Lock and bitmap */
    111  1.3.2.2  rmind 	kmutex_t	r_rq_mutex;
    112  1.3.2.3  rmind 	uint32_t	r_bitmap[PRI_COUNT >> BITMAP_SHIFT];
    113  1.3.2.2  rmind 	/* Counters */
    114  1.3.2.2  rmind 	u_int		r_count;	/* Count of the threads */
    115  1.3.2.2  rmind 	pri_t		r_highest_pri;	/* Highest priority */
    116  1.3.2.2  rmind 	u_int		r_avgcount;	/* Average count of threads */
    117  1.3.2.2  rmind 	u_int		r_mcount;	/* Count of migratable threads */
    118  1.3.2.2  rmind 	/* Runqueues */
    119  1.3.2.2  rmind 	queue_t		r_rt_queue[PRI_RT_COUNT];
    120  1.3.2.2  rmind 	queue_t		r_ts_queue[PRI_TS_COUNT];
    121  1.3.2.2  rmind } runqueue_t;
    122  1.3.2.2  rmind 
    123  1.3.2.2  rmind typedef struct {
    124  1.3.2.2  rmind 	u_int		sl_flags;
    125  1.3.2.2  rmind 	u_int		sl_timeslice;	/* Time-slice of thread */
    126  1.3.2.2  rmind 	u_int		sl_slept;	/* Saved sleep time for sleep sum */
    127  1.3.2.2  rmind 	u_int		sl_slpsum;	/* Sum of sleep time */
    128  1.3.2.2  rmind 	u_int		sl_rtime;	/* Saved start time of run */
    129  1.3.2.2  rmind 	u_int		sl_rtsum;	/* Sum of the run time */
    130  1.3.2.2  rmind 	u_int		sl_lrtime;	/* Last run time */
    131  1.3.2.2  rmind } sched_info_lwp_t;
    132  1.3.2.2  rmind 
    133  1.3.2.2  rmind /* Flags */
    134  1.3.2.2  rmind #define	SL_BATCH	0x01
    135  1.3.2.2  rmind 
    136  1.3.2.2  rmind /* Pool of the scheduler-specific structures for threads */
    137  1.3.2.2  rmind static struct pool	sil_pool;
    138  1.3.2.2  rmind 
    139  1.3.2.2  rmind /*
    140  1.3.2.2  rmind  * Prototypes.
    141  1.3.2.2  rmind  */
    142  1.3.2.2  rmind 
    143  1.3.2.2  rmind static inline void *	sched_getrq(runqueue_t *, const pri_t);
    144  1.3.2.2  rmind static inline void	sched_newts(struct lwp *);
    145  1.3.2.2  rmind static void		sched_precalcts(void);
    146  1.3.2.2  rmind 
    147  1.3.2.2  rmind #ifdef MULTIPROCESSOR
    148  1.3.2.2  rmind static struct lwp *	sched_catchlwp(void);
    149  1.3.2.2  rmind static void		sched_balance(void *);
    150  1.3.2.2  rmind #endif
    151  1.3.2.2  rmind 
    152  1.3.2.2  rmind /*
    153  1.3.2.2  rmind  * Initialization and setup.
    154  1.3.2.2  rmind  */
    155  1.3.2.2  rmind 
    156  1.3.2.2  rmind void
    157  1.3.2.2  rmind sched_rqinit(void)
    158  1.3.2.2  rmind {
    159  1.3.2.2  rmind 	struct cpu_info *ci = curcpu();
    160  1.3.2.2  rmind 
    161  1.3.2.2  rmind 	if (hz < 100) {
    162  1.3.2.2  rmind 		panic("sched_rqinit: value of HZ is too low\n");
    163  1.3.2.2  rmind 	}
    164  1.3.2.2  rmind 
    165  1.3.2.2  rmind 	/* Default timing ranges */
    166  1.3.2.2  rmind 	min_ts = mstohz(50);			/* ~50ms  */
    167  1.3.2.2  rmind 	max_ts = mstohz(150);			/* ~150ms */
    168  1.3.2.2  rmind 	rt_ts = mstohz(100);			/* ~100ms */
    169  1.3.2.2  rmind 	sched_precalcts();
    170  1.3.2.2  rmind 
    171  1.3.2.2  rmind #ifdef MULTIPROCESSOR
    172  1.3.2.2  rmind 	/* Balancing */
    173  1.3.2.2  rmind 	worker_ci = ci;
    174  1.3.2.2  rmind 	cacheht_time = mstohz(5);		/* ~5 ms  */
    175  1.3.2.2  rmind 	balance_period = mstohz(300);		/* ~300ms */
    176  1.3.2.2  rmind 	min_catch = ~0;
    177  1.3.2.2  rmind #endif
    178  1.3.2.2  rmind 
    179  1.3.2.2  rmind 	/* Pool of the scheduler-specific structures */
    180  1.3.2.2  rmind 	pool_init(&sil_pool, sizeof(sched_info_lwp_t), 0, 0, 0,
    181  1.3.2.2  rmind 	    "lwpsd", &pool_allocator_nointr, IPL_NONE);
    182  1.3.2.2  rmind 
    183  1.3.2.2  rmind 	/* Attach the primary CPU here */
    184  1.3.2.2  rmind 	sched_cpuattach(ci);
    185  1.3.2.2  rmind 
    186  1.3.2.2  rmind 	/* Initialize the scheduler structure of the primary LWP */
    187  1.3.2.2  rmind 	lwp0.l_mutex = &ci->ci_schedstate.spc_lwplock;
    188  1.3.2.2  rmind 	sched_lwp_fork(&lwp0);
    189  1.3.2.2  rmind 	sched_newts(&lwp0);
    190  1.3.2.2  rmind }
    191  1.3.2.2  rmind 
    192  1.3.2.2  rmind void
    193  1.3.2.2  rmind sched_setup(void)
    194  1.3.2.2  rmind {
    195  1.3.2.2  rmind 
    196  1.3.2.2  rmind #ifdef MULTIPROCESSOR
    197  1.3.2.2  rmind 	/* Minimal count of LWPs for catching: log2(count of CPUs) */
    198  1.3.2.2  rmind 	min_catch = min(ffs(ncpu) - 1, 4);
    199  1.3.2.2  rmind 
    200  1.3.2.2  rmind 	/* Initialize balancing callout and run it */
    201  1.3.2.2  rmind 	callout_init(&balance_ch, CALLOUT_MPSAFE);
    202  1.3.2.2  rmind 	callout_setfunc(&balance_ch, sched_balance, NULL);
    203  1.3.2.2  rmind 	callout_schedule(&balance_ch, balance_period);
    204  1.3.2.2  rmind #endif
    205  1.3.2.2  rmind }
    206  1.3.2.2  rmind 
    207  1.3.2.2  rmind void
    208  1.3.2.2  rmind sched_cpuattach(struct cpu_info *ci)
    209  1.3.2.2  rmind {
    210  1.3.2.2  rmind 	runqueue_t *ci_rq;
    211  1.3.2.2  rmind 	void *rq_ptr;
    212  1.3.2.2  rmind 	u_int i, size;
    213  1.3.2.2  rmind 
    214  1.3.2.2  rmind 	/*
    215  1.3.2.2  rmind 	 * Allocate the run queue.
    216  1.3.2.2  rmind 	 * XXX: Estimate cache behaviour more..
    217  1.3.2.2  rmind 	 */
    218  1.3.2.2  rmind 	size = roundup(sizeof(runqueue_t), CACHE_LINE_SIZE) + CACHE_LINE_SIZE;
    219  1.3.2.2  rmind 	rq_ptr = kmem_zalloc(size, KM_NOSLEEP);
    220  1.3.2.2  rmind 	if (rq_ptr == NULL) {
    221  1.3.2.2  rmind 		panic("scheduler: could not allocate the runqueue");
    222  1.3.2.2  rmind 	}
    223  1.3.2.2  rmind 	/* XXX: Save the original pointer for future.. */
    224  1.3.2.2  rmind 	ci_rq = (void *)(roundup((intptr_t)(rq_ptr), CACHE_LINE_SIZE));
    225  1.3.2.2  rmind 
    226  1.3.2.2  rmind 	/* Initialize run queues */
    227  1.3.2.2  rmind 	mutex_init(&ci_rq->r_rq_mutex, MUTEX_SPIN, IPL_SCHED);
    228  1.3.2.2  rmind 	for (i = 0; i < PRI_RT_COUNT; i++)
    229  1.3.2.2  rmind 		TAILQ_INIT(&ci_rq->r_rt_queue[i].q_head);
    230  1.3.2.2  rmind 	for (i = 0; i < PRI_TS_COUNT; i++)
    231  1.3.2.2  rmind 		TAILQ_INIT(&ci_rq->r_ts_queue[i].q_head);
    232  1.3.2.3  rmind 	ci_rq->r_highest_pri = 0;
    233  1.3.2.2  rmind 
    234  1.3.2.2  rmind 	ci->ci_schedstate.spc_sched_info = ci_rq;
    235  1.3.2.2  rmind 	ci->ci_schedstate.spc_mutex = &ci_rq->r_rq_mutex;
    236  1.3.2.2  rmind }
    237  1.3.2.2  rmind 
    238  1.3.2.2  rmind /* Pre-calculate the time-slices for the priorities */
    239  1.3.2.2  rmind static void
    240  1.3.2.2  rmind sched_precalcts(void)
    241  1.3.2.2  rmind {
    242  1.3.2.2  rmind 	pri_t p;
    243  1.3.2.2  rmind 
    244  1.3.2.3  rmind 	/* Time-sharing range */
    245  1.3.2.3  rmind 	for (p = 0; p <= PRI_HIGHEST_TS; p++) {
    246  1.3.2.3  rmind 		ts_map[p] = max_ts -
    247  1.3.2.3  rmind 		    (p * 100 / (PRI_TS_COUNT - 1) * (max_ts - min_ts) / 100);
    248  1.3.2.3  rmind 		high_pri[p] = (PRI_HIGHEST_TS - PRI_HTS_RANGE) +
    249  1.3.2.3  rmind 		    ((p * PRI_HTS_RANGE) / (PRI_TS_COUNT - 1));
    250  1.3.2.2  rmind 	}
    251  1.3.2.2  rmind 
    252  1.3.2.3  rmind 	/* Real-time range */
    253  1.3.2.3  rmind 	for (p = (PRI_HIGHEST_TS + 1); p < PRI_COUNT; p++) {
    254  1.3.2.3  rmind 		ts_map[p] = rt_ts;
    255  1.3.2.3  rmind 		high_pri[p] = p;
    256  1.3.2.2  rmind 	}
    257  1.3.2.2  rmind }
    258  1.3.2.2  rmind 
    259  1.3.2.2  rmind /*
    260  1.3.2.2  rmind  * Hooks.
    261  1.3.2.2  rmind  */
    262  1.3.2.2  rmind 
    263  1.3.2.2  rmind void
    264  1.3.2.2  rmind sched_proc_fork(struct proc *parent, struct proc *child)
    265  1.3.2.2  rmind {
    266  1.3.2.2  rmind 	struct lwp *l;
    267  1.3.2.2  rmind 
    268  1.3.2.2  rmind 	LIST_FOREACH(l, &child->p_lwps, l_sibling) {
    269  1.3.2.2  rmind 		lwp_lock(l);
    270  1.3.2.2  rmind 		sched_newts(l);
    271  1.3.2.2  rmind 		lwp_unlock(l);
    272  1.3.2.2  rmind 	}
    273  1.3.2.2  rmind }
    274  1.3.2.2  rmind 
    275  1.3.2.2  rmind void
    276  1.3.2.2  rmind sched_proc_exit(struct proc *child, struct proc *parent)
    277  1.3.2.2  rmind {
    278  1.3.2.2  rmind 
    279  1.3.2.2  rmind 	/* Dummy */
    280  1.3.2.2  rmind }
    281  1.3.2.2  rmind 
    282  1.3.2.2  rmind void
    283  1.3.2.2  rmind sched_lwp_fork(struct lwp *l)
    284  1.3.2.2  rmind {
    285  1.3.2.2  rmind 
    286  1.3.2.2  rmind 	KASSERT(l->l_sched_info == NULL);
    287  1.3.2.2  rmind 	l->l_sched_info = pool_get(&sil_pool, PR_WAITOK);
    288  1.3.2.2  rmind 	memset(l->l_sched_info, 0, sizeof(sched_info_lwp_t));
    289  1.3.2.6     ad 	if (l->l_priority <= PRI_HIGHEST_TS) /* XXX: For now only.. */
    290  1.3.2.6     ad 		l->l_priority = PRI_DEFAULT;
    291  1.3.2.2  rmind }
    292  1.3.2.2  rmind 
    293  1.3.2.2  rmind void
    294  1.3.2.2  rmind sched_lwp_exit(struct lwp *l)
    295  1.3.2.2  rmind {
    296  1.3.2.2  rmind 
    297  1.3.2.2  rmind 	KASSERT(l->l_sched_info != NULL);
    298  1.3.2.2  rmind 	pool_put(&sil_pool, l->l_sched_info);
    299  1.3.2.2  rmind 	l->l_sched_info = NULL;
    300  1.3.2.2  rmind }
    301  1.3.2.2  rmind 
    302  1.3.2.2  rmind void
    303  1.3.2.2  rmind sched_setrunnable(struct lwp *l)
    304  1.3.2.2  rmind {
    305  1.3.2.2  rmind 
    306  1.3.2.2  rmind 	/* Dummy */
    307  1.3.2.2  rmind }
    308  1.3.2.2  rmind 
    309  1.3.2.2  rmind void
    310  1.3.2.2  rmind sched_schedclock(struct lwp *l)
    311  1.3.2.2  rmind {
    312  1.3.2.2  rmind 
    313  1.3.2.2  rmind 	/* Dummy */
    314  1.3.2.2  rmind }
    315  1.3.2.2  rmind 
    316  1.3.2.2  rmind /*
    317  1.3.2.2  rmind  * Priorities and time-slice.
    318  1.3.2.2  rmind  */
    319  1.3.2.2  rmind 
    320  1.3.2.2  rmind void
    321  1.3.2.2  rmind sched_nice(struct proc *p, int prio)
    322  1.3.2.2  rmind {
    323  1.3.2.2  rmind 	int nprio;
    324  1.3.2.2  rmind 	struct lwp *l;
    325  1.3.2.2  rmind 
    326  1.3.2.2  rmind 	KASSERT(mutex_owned(&p->p_stmutex));
    327  1.3.2.2  rmind 
    328  1.3.2.2  rmind 	p->p_nice = prio;
    329  1.3.2.3  rmind 	nprio = max(min(PRI_DEFAULT + p->p_nice, PRI_HIGHEST_TS), 0);
    330  1.3.2.2  rmind 
    331  1.3.2.2  rmind 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    332  1.3.2.2  rmind 		lwp_lock(l);
    333  1.3.2.2  rmind 		lwp_changepri(l, nprio);
    334  1.3.2.2  rmind 		lwp_unlock(l);
    335  1.3.2.2  rmind 	}
    336  1.3.2.2  rmind }
    337  1.3.2.2  rmind 
    338  1.3.2.2  rmind /* Recalculate the time-slice */
    339  1.3.2.2  rmind static inline void
    340  1.3.2.2  rmind sched_newts(struct lwp *l)
    341  1.3.2.2  rmind {
    342  1.3.2.2  rmind 	sched_info_lwp_t *sil = l->l_sched_info;
    343  1.3.2.2  rmind 
    344  1.3.2.2  rmind 	sil->sl_timeslice = ts_map[lwp_eprio(l)];
    345  1.3.2.2  rmind }
    346  1.3.2.2  rmind 
    347  1.3.2.2  rmind /*
    348  1.3.2.2  rmind  * Control of the runqueue.
    349  1.3.2.2  rmind  */
    350  1.3.2.2  rmind 
    351  1.3.2.2  rmind static inline void *
    352  1.3.2.2  rmind sched_getrq(runqueue_t *ci_rq, const pri_t prio)
    353  1.3.2.2  rmind {
    354  1.3.2.2  rmind 
    355  1.3.2.2  rmind 	KASSERT(prio < PRI_COUNT);
    356  1.3.2.3  rmind 	return (prio <= PRI_HIGHEST_TS) ?
    357  1.3.2.3  rmind 	    &ci_rq->r_ts_queue[prio].q_head :
    358  1.3.2.3  rmind 	    &ci_rq->r_rt_queue[prio - PRI_HIGHEST_TS - 1].q_head;
    359  1.3.2.2  rmind }
    360  1.3.2.2  rmind 
    361  1.3.2.2  rmind void
    362  1.3.2.2  rmind sched_enqueue(struct lwp *l, bool swtch)
    363  1.3.2.2  rmind {
    364  1.3.2.2  rmind 	runqueue_t *ci_rq;
    365  1.3.2.2  rmind 	sched_info_lwp_t *sil = l->l_sched_info;
    366  1.3.2.2  rmind 	TAILQ_HEAD(, lwp) *q_head;
    367  1.3.2.2  rmind 	const pri_t eprio = lwp_eprio(l);
    368  1.3.2.2  rmind 
    369  1.3.2.2  rmind 	ci_rq = l->l_cpu->ci_schedstate.spc_sched_info;
    370  1.3.2.2  rmind 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    371  1.3.2.2  rmind 
    372  1.3.2.2  rmind 	/* Update the last run time on switch */
    373  1.3.2.2  rmind 	if (swtch == true) {
    374  1.3.2.2  rmind 		sil->sl_lrtime = hardclock_ticks;
    375  1.3.2.2  rmind 		sil->sl_rtsum += (hardclock_ticks - sil->sl_rtime);
    376  1.3.2.2  rmind 	} else
    377  1.3.2.2  rmind 		sil->sl_lrtime = 0;
    378  1.3.2.2  rmind 
    379  1.3.2.2  rmind 	/* Enqueue the thread */
    380  1.3.2.2  rmind 	q_head = sched_getrq(ci_rq, eprio);
    381  1.3.2.2  rmind 	if (TAILQ_EMPTY(q_head)) {
    382  1.3.2.2  rmind 		u_int i;
    383  1.3.2.2  rmind 		uint32_t q;
    384  1.3.2.2  rmind 
    385  1.3.2.2  rmind 		/* Mark bit */
    386  1.3.2.2  rmind 		i = eprio >> BITMAP_SHIFT;
    387  1.3.2.3  rmind 		q = BITMAP_MSB >> (eprio & BITMAP_MASK);
    388  1.3.2.3  rmind 		KASSERT((ci_rq->r_bitmap[i] & q) == 0);
    389  1.3.2.3  rmind 		ci_rq->r_bitmap[i] |= q;
    390  1.3.2.2  rmind 	}
    391  1.3.2.2  rmind 	TAILQ_INSERT_TAIL(q_head, l, l_runq);
    392  1.3.2.2  rmind 	ci_rq->r_count++;
    393  1.3.2.2  rmind 	if ((l->l_flag & LW_BOUND) == 0)
    394  1.3.2.2  rmind 		ci_rq->r_mcount++;
    395  1.3.2.2  rmind 
    396  1.3.2.2  rmind 	/*
    397  1.3.2.2  rmind 	 * Update the value of highest priority in the runqueue,
    398  1.3.2.2  rmind 	 * if priority of this thread is higher.
    399  1.3.2.2  rmind 	 */
    400  1.3.2.3  rmind 	if (eprio > ci_rq->r_highest_pri)
    401  1.3.2.2  rmind 		ci_rq->r_highest_pri = eprio;
    402  1.3.2.2  rmind 
    403  1.3.2.2  rmind 	sched_newts(l);
    404  1.3.2.2  rmind }
    405  1.3.2.2  rmind 
    406  1.3.2.2  rmind void
    407  1.3.2.2  rmind sched_dequeue(struct lwp *l)
    408  1.3.2.2  rmind {
    409  1.3.2.2  rmind 	runqueue_t *ci_rq;
    410  1.3.2.2  rmind 	TAILQ_HEAD(, lwp) *q_head;
    411  1.3.2.2  rmind 	const pri_t eprio = lwp_eprio(l);
    412  1.3.2.2  rmind 
    413  1.3.2.2  rmind 	ci_rq = l->l_cpu->ci_schedstate.spc_sched_info;
    414  1.3.2.2  rmind 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    415  1.3.2.3  rmind 	KASSERT(eprio <= ci_rq->r_highest_pri);
    416  1.3.2.2  rmind 	KASSERT(ci_rq->r_bitmap[eprio >> BITMAP_SHIFT] != 0);
    417  1.3.2.2  rmind 	KASSERT(ci_rq->r_count > 0);
    418  1.3.2.2  rmind 
    419  1.3.2.2  rmind 	ci_rq->r_count--;
    420  1.3.2.2  rmind 	if ((l->l_flag & LW_BOUND) == 0)
    421  1.3.2.2  rmind 		ci_rq->r_mcount--;
    422  1.3.2.2  rmind 
    423  1.3.2.2  rmind 	q_head = sched_getrq(ci_rq, eprio);
    424  1.3.2.2  rmind 	TAILQ_REMOVE(q_head, l, l_runq);
    425  1.3.2.2  rmind 	if (TAILQ_EMPTY(q_head)) {
    426  1.3.2.2  rmind 		u_int i;
    427  1.3.2.2  rmind 		uint32_t q;
    428  1.3.2.2  rmind 
    429  1.3.2.2  rmind 		/* Unmark bit */
    430  1.3.2.2  rmind 		i = eprio >> BITMAP_SHIFT;
    431  1.3.2.3  rmind 		q = BITMAP_MSB >> (eprio & BITMAP_MASK);
    432  1.3.2.3  rmind 		KASSERT((ci_rq->r_bitmap[i] & q) != 0);
    433  1.3.2.3  rmind 		ci_rq->r_bitmap[i] &= ~q;
    434  1.3.2.2  rmind 
    435  1.3.2.2  rmind 		/*
    436  1.3.2.2  rmind 		 * Update the value of highest priority in the runqueue, in a
    437  1.3.2.2  rmind 		 * case it was a last thread in the queue of highest priority.
    438  1.3.2.2  rmind 		 */
    439  1.3.2.2  rmind 		if (eprio != ci_rq->r_highest_pri)
    440  1.3.2.2  rmind 			return;
    441  1.3.2.2  rmind 
    442  1.3.2.2  rmind 		do {
    443  1.3.2.2  rmind 			q = ffs(ci_rq->r_bitmap[i]);
    444  1.3.2.2  rmind 			if (q) {
    445  1.3.2.2  rmind 				ci_rq->r_highest_pri =
    446  1.3.2.3  rmind 				    (i << BITMAP_SHIFT) + (BITMAP_BITS - q);
    447  1.3.2.2  rmind 				return;
    448  1.3.2.2  rmind 			}
    449  1.3.2.3  rmind 		} while (i--);
    450  1.3.2.2  rmind 
    451  1.3.2.3  rmind 		/* If not found - set the lowest value */
    452  1.3.2.3  rmind 		ci_rq->r_highest_pri = 0;
    453  1.3.2.2  rmind 	}
    454  1.3.2.2  rmind }
    455  1.3.2.2  rmind 
    456  1.3.2.2  rmind void
    457  1.3.2.2  rmind sched_slept(struct lwp *l)
    458  1.3.2.2  rmind {
    459  1.3.2.2  rmind 	sched_info_lwp_t *sil = l->l_sched_info;
    460  1.3.2.2  rmind 
    461  1.3.2.2  rmind 	/* Save the time when thread has slept */
    462  1.3.2.2  rmind 	sil->sl_slept = hardclock_ticks;
    463  1.3.2.2  rmind 
    464  1.3.2.2  rmind 	/*
    465  1.3.2.3  rmind 	 * If thread is in time-sharing queue and batch flag is not marked,
    466  1.3.2.3  rmind 	 * increase the the priority, and run with the lower time-quantum.
    467  1.3.2.2  rmind 	 */
    468  1.3.2.6     ad 	if (l->l_priority < PRI_HIGHEST_TS && (sil->sl_flags & SL_BATCH) == 0) {
    469  1.3.2.3  rmind 		KASSERT(l->l_policy == SCHED_OTHER);
    470  1.3.2.6     ad 		l->l_priority++;
    471  1.3.2.3  rmind 	}
    472  1.3.2.2  rmind }
    473  1.3.2.2  rmind 
    474  1.3.2.2  rmind void
    475  1.3.2.2  rmind sched_wakeup(struct lwp *l)
    476  1.3.2.2  rmind {
    477  1.3.2.2  rmind 	sched_info_lwp_t *sil = l->l_sched_info;
    478  1.3.2.2  rmind 
    479  1.3.2.2  rmind 	/* Update sleep time delta */
    480  1.3.2.2  rmind 	sil->sl_slpsum += (l->l_slptime == 0) ?
    481  1.3.2.2  rmind 	    (hardclock_ticks - sil->sl_slept) : hz;
    482  1.3.2.2  rmind 
    483  1.3.2.2  rmind 	/* If thread was sleeping a second or more - set a high priority */
    484  1.3.2.2  rmind 	if (l->l_slptime > 1 || (hardclock_ticks - sil->sl_slept) >= hz)
    485  1.3.2.6     ad 		l->l_priority = high_pri[l->l_priority];
    486  1.3.2.2  rmind 
    487  1.3.2.2  rmind 	/* Also, consider looking for a better CPU to wake up */
    488  1.3.2.2  rmind 	if ((l->l_flag & (LW_BOUND | LW_SYSTEM)) == 0)
    489  1.3.2.2  rmind 		l->l_cpu = sched_takecpu(l);
    490  1.3.2.2  rmind }
    491  1.3.2.2  rmind 
    492  1.3.2.2  rmind void
    493  1.3.2.2  rmind sched_pstats_hook(struct lwp *l)
    494  1.3.2.2  rmind {
    495  1.3.2.2  rmind 	sched_info_lwp_t *sil = l->l_sched_info;
    496  1.3.2.4  rmind 	bool batch;
    497  1.3.2.4  rmind 
    498  1.3.2.6     ad 	if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
    499  1.3.2.6     ad 	    l->l_stat == LSSUSPENDED)
    500  1.3.2.6     ad 		l->l_slptime++;
    501  1.3.2.6     ad 
    502  1.3.2.4  rmind 	/*
    503  1.3.2.4  rmind 	 * Set that thread is more CPU-bound, if sum of run time exceeds the
    504  1.3.2.4  rmind 	 * sum of sleep time.  Check if thread is CPU-bound a first time.
    505  1.3.2.4  rmind 	 */
    506  1.3.2.4  rmind 	batch = (sil->sl_rtsum > sil->sl_slpsum);
    507  1.3.2.4  rmind 	if (batch) {
    508  1.3.2.4  rmind 		if ((sil->sl_flags & SL_BATCH) == 0)
    509  1.3.2.4  rmind 			batch = false;
    510  1.3.2.4  rmind 		sil->sl_flags |= SL_BATCH;
    511  1.3.2.4  rmind 	} else
    512  1.3.2.4  rmind 		sil->sl_flags &= ~SL_BATCH;
    513  1.3.2.3  rmind 
    514  1.3.2.3  rmind 	/* Reset the time sums */
    515  1.3.2.3  rmind 	sil->sl_slpsum = 0;
    516  1.3.2.3  rmind 	sil->sl_rtsum = 0;
    517  1.3.2.3  rmind 
    518  1.3.2.3  rmind 	/* Estimate threads on time-sharing queue only */
    519  1.3.2.6     ad 	if (l->l_priority >= PRI_HIGHEST_TS)
    520  1.3.2.3  rmind 		return;
    521  1.3.2.2  rmind 
    522  1.3.2.4  rmind 	/* If it is CPU-bound not a first time - decrease the priority */
    523  1.3.2.6     ad 	if (batch && l->l_priority != 0)
    524  1.3.2.6     ad 		l->l_priority--;
    525  1.3.2.2  rmind 
    526  1.3.2.2  rmind 	/* If thread was not ran a second or more - set a high priority */
    527  1.3.2.3  rmind 	if (l->l_stat == LSRUN && sil->sl_lrtime &&
    528  1.3.2.3  rmind 	    (hardclock_ticks - sil->sl_lrtime >= hz))
    529  1.3.2.6     ad 		lwp_changepri(l, high_pri[l->l_priority]);
    530  1.3.2.2  rmind }
    531  1.3.2.2  rmind 
    532  1.3.2.2  rmind /*
    533  1.3.2.2  rmind  * Migration and balancing.
    534  1.3.2.2  rmind  */
    535  1.3.2.2  rmind 
    536  1.3.2.2  rmind #ifdef MULTIPROCESSOR
    537  1.3.2.2  rmind 
    538  1.3.2.2  rmind /* Check if LWP can migrate to the chosen CPU */
    539  1.3.2.2  rmind static inline bool
    540  1.3.2.2  rmind sched_migratable(const struct lwp *l, const struct cpu_info *ci)
    541  1.3.2.2  rmind {
    542  1.3.2.2  rmind 
    543  1.3.2.2  rmind 	if (ci->ci_schedstate.spc_flags & SPCF_OFFLINE)
    544  1.3.2.2  rmind 		return false;
    545  1.3.2.2  rmind 
    546  1.3.2.2  rmind 	if ((l->l_flag & LW_BOUND) == 0)
    547  1.3.2.2  rmind 		return true;
    548  1.3.2.2  rmind #if 0
    549  1.3.2.2  rmind 	return cpu_in_pset(ci, l->l_psid);
    550  1.3.2.2  rmind #else
    551  1.3.2.2  rmind 	return false;
    552  1.3.2.2  rmind #endif
    553  1.3.2.2  rmind }
    554  1.3.2.2  rmind 
    555  1.3.2.2  rmind /*
    556  1.3.2.2  rmind  * Estimate the migration of LWP to the other CPU.
    557  1.3.2.2  rmind  * Take and return the CPU, if migration is needed.
    558  1.3.2.2  rmind  */
    559  1.3.2.2  rmind struct cpu_info *
    560  1.3.2.2  rmind sched_takecpu(struct lwp *l)
    561  1.3.2.2  rmind {
    562  1.3.2.2  rmind 	struct cpu_info *ci, *tci = NULL;
    563  1.3.2.2  rmind 	struct schedstate_percpu *spc;
    564  1.3.2.2  rmind 	runqueue_t *ci_rq;
    565  1.3.2.2  rmind 	sched_info_lwp_t *sil;
    566  1.3.2.2  rmind 	CPU_INFO_ITERATOR cii;
    567  1.3.2.2  rmind 	pri_t eprio, lpri;
    568  1.3.2.2  rmind 
    569  1.3.2.2  rmind 	ci = l->l_cpu;
    570  1.3.2.2  rmind 	spc = &ci->ci_schedstate;
    571  1.3.2.2  rmind 	ci_rq = spc->spc_sched_info;
    572  1.3.2.2  rmind 
    573  1.3.2.2  rmind 	/* CPU of this thread is idling - run there */
    574  1.3.2.2  rmind 	if (ci_rq->r_count == 0)
    575  1.3.2.2  rmind 		return ci;
    576  1.3.2.2  rmind 
    577  1.3.2.2  rmind 	eprio = lwp_eprio(l);
    578  1.3.2.2  rmind 	sil = l->l_sched_info;
    579  1.3.2.2  rmind 
    580  1.3.2.2  rmind 	/* Stay if thread is cache-hot */
    581  1.3.2.2  rmind 	if (l->l_stat == LSSLEEP && l->l_slptime <= 1 &&
    582  1.3.2.3  rmind 	    CACHE_HOT(sil) && eprio >= spc->spc_curpriority)
    583  1.3.2.2  rmind 		return ci;
    584  1.3.2.2  rmind 
    585  1.3.2.2  rmind 	/* Run on current CPU if priority of thread is higher */
    586  1.3.2.2  rmind 	ci = curcpu();
    587  1.3.2.2  rmind 	spc = &ci->ci_schedstate;
    588  1.3.2.3  rmind 	if (eprio > spc->spc_curpriority && sched_migratable(l, ci))
    589  1.3.2.2  rmind 		return ci;
    590  1.3.2.2  rmind 
    591  1.3.2.2  rmind 	/*
    592  1.3.2.2  rmind 	 * Look for the CPU with the lowest priority thread.  In case of
    593  1.3.2.2  rmind 	 * equal the priority - check the lower count of the threads.
    594  1.3.2.2  rmind 	 */
    595  1.3.2.3  rmind 	lpri = PRI_COUNT;
    596  1.3.2.2  rmind 	for (CPU_INFO_FOREACH(cii, ci)) {
    597  1.3.2.2  rmind 		runqueue_t *ici_rq;
    598  1.3.2.2  rmind 		pri_t pri;
    599  1.3.2.2  rmind 
    600  1.3.2.2  rmind 		spc = &ci->ci_schedstate;
    601  1.3.2.2  rmind 		ici_rq = spc->spc_sched_info;
    602  1.3.2.3  rmind 		pri = max(spc->spc_curpriority, ici_rq->r_highest_pri);
    603  1.3.2.3  rmind 		if (pri > lpri)
    604  1.3.2.2  rmind 			continue;
    605  1.3.2.2  rmind 
    606  1.3.2.3  rmind 		if (pri == lpri && tci && ci_rq->r_count < ici_rq->r_count)
    607  1.3.2.2  rmind 			continue;
    608  1.3.2.2  rmind 
    609  1.3.2.2  rmind 		if (sched_migratable(l, ci) == false)
    610  1.3.2.2  rmind 			continue;
    611  1.3.2.2  rmind 
    612  1.3.2.2  rmind 		lpri = pri;
    613  1.3.2.2  rmind 		tci = ci;
    614  1.3.2.2  rmind 		ci_rq = ici_rq;
    615  1.3.2.2  rmind 	}
    616  1.3.2.2  rmind 
    617  1.3.2.3  rmind 	KASSERT(tci != NULL);
    618  1.3.2.2  rmind 	return tci;
    619  1.3.2.2  rmind }
    620  1.3.2.2  rmind 
    621  1.3.2.2  rmind /*
    622  1.3.2.2  rmind  * Tries to catch an LWP from the runqueue of other CPU.
    623  1.3.2.2  rmind  */
    624  1.3.2.2  rmind static struct lwp *
    625  1.3.2.2  rmind sched_catchlwp(void)
    626  1.3.2.2  rmind {
    627  1.3.2.2  rmind 	struct cpu_info *curci = curcpu(), *ci = worker_ci;
    628  1.3.2.2  rmind 	TAILQ_HEAD(, lwp) *q_head;
    629  1.3.2.2  rmind 	runqueue_t *ci_rq;
    630  1.3.2.2  rmind 	struct lwp *l;
    631  1.3.2.2  rmind 
    632  1.3.2.2  rmind 	if (curci == ci)
    633  1.3.2.2  rmind 		return NULL;
    634  1.3.2.2  rmind 
    635  1.3.2.2  rmind 	/* Lockless check */
    636  1.3.2.2  rmind 	ci_rq = ci->ci_schedstate.spc_sched_info;
    637  1.3.2.2  rmind 	if (ci_rq->r_count < min_catch)
    638  1.3.2.2  rmind 		return NULL;
    639  1.3.2.2  rmind 
    640  1.3.2.2  rmind 	/*
    641  1.3.2.2  rmind 	 * Double-lock the runqueues.
    642  1.3.2.2  rmind 	 */
    643  1.3.2.2  rmind 	if (curci < ci) {
    644  1.3.2.2  rmind 		spc_lock(ci);
    645  1.3.2.2  rmind 	} else if (!mutex_tryenter(ci->ci_schedstate.spc_mutex)) {
    646  1.3.2.2  rmind 		const runqueue_t *cur_rq = curci->ci_schedstate.spc_sched_info;
    647  1.3.2.2  rmind 
    648  1.3.2.2  rmind 		spc_unlock(curci);
    649  1.3.2.2  rmind 		spc_lock(ci);
    650  1.3.2.2  rmind 		spc_lock(curci);
    651  1.3.2.2  rmind 
    652  1.3.2.2  rmind 		if (cur_rq->r_count) {
    653  1.3.2.2  rmind 			spc_unlock(ci);
    654  1.3.2.2  rmind 			return NULL;
    655  1.3.2.2  rmind 		}
    656  1.3.2.2  rmind 	}
    657  1.3.2.2  rmind 
    658  1.3.2.2  rmind 	if (ci_rq->r_count < min_catch) {
    659  1.3.2.2  rmind 		spc_unlock(ci);
    660  1.3.2.2  rmind 		return NULL;
    661  1.3.2.2  rmind 	}
    662  1.3.2.2  rmind 
    663  1.3.2.2  rmind 	/* Take the highest priority thread */
    664  1.3.2.2  rmind 	q_head = sched_getrq(ci_rq, ci_rq->r_highest_pri);
    665  1.3.2.2  rmind 	l = TAILQ_FIRST(q_head);
    666  1.3.2.2  rmind 
    667  1.3.2.2  rmind 	for (;;) {
    668  1.3.2.2  rmind 		sched_info_lwp_t *sil;
    669  1.3.2.2  rmind 
    670  1.3.2.2  rmind 		/* Check the first and next result from the queue */
    671  1.3.2.2  rmind 		if (l == NULL)
    672  1.3.2.2  rmind 			break;
    673  1.3.2.2  rmind 
    674  1.3.2.2  rmind 		/* Look for threads, whose are allowed to migrate */
    675  1.3.2.2  rmind 		sil = l->l_sched_info;
    676  1.3.2.2  rmind 		if ((l->l_flag & LW_SYSTEM) || CACHE_HOT(sil) ||
    677  1.3.2.2  rmind 		    sched_migratable(l, curci) == false) {
    678  1.3.2.2  rmind 			l = TAILQ_NEXT(l, l_runq);
    679  1.3.2.2  rmind 			continue;
    680  1.3.2.2  rmind 		}
    681  1.3.2.2  rmind 		/* Recheck if chosen thread is still on the runqueue */
    682  1.3.2.2  rmind 		if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM)) {
    683  1.3.2.2  rmind 			sched_dequeue(l);
    684  1.3.2.2  rmind 			l->l_cpu = curci;
    685  1.3.2.2  rmind 			lwp_setlock(l, curci->ci_schedstate.spc_mutex);
    686  1.3.2.2  rmind 			sched_enqueue(l, false);
    687  1.3.2.2  rmind 			break;
    688  1.3.2.2  rmind 		}
    689  1.3.2.2  rmind 		l = TAILQ_NEXT(l, l_runq);
    690  1.3.2.2  rmind 	}
    691  1.3.2.2  rmind 	spc_unlock(ci);
    692  1.3.2.2  rmind 
    693  1.3.2.2  rmind 	return l;
    694  1.3.2.2  rmind }
    695  1.3.2.2  rmind 
    696  1.3.2.2  rmind /*
    697  1.3.2.2  rmind  * Periodical calculations for balancing.
    698  1.3.2.2  rmind  */
    699  1.3.2.2  rmind static void
    700  1.3.2.2  rmind sched_balance(void *nocallout)
    701  1.3.2.2  rmind {
    702  1.3.2.2  rmind 	struct cpu_info *ci, *hci;
    703  1.3.2.2  rmind 	runqueue_t *ci_rq;
    704  1.3.2.2  rmind 	CPU_INFO_ITERATOR cii;
    705  1.3.2.2  rmind 	u_int highest;
    706  1.3.2.2  rmind 
    707  1.3.2.2  rmind 	hci = curcpu();
    708  1.3.2.2  rmind 	highest = 0;
    709  1.3.2.2  rmind 
    710  1.3.2.2  rmind 	/* Make lockless countings */
    711  1.3.2.2  rmind 	for (CPU_INFO_FOREACH(cii, ci)) {
    712  1.3.2.2  rmind 		ci_rq = ci->ci_schedstate.spc_sched_info;
    713  1.3.2.2  rmind 
    714  1.3.2.2  rmind 		/* Average count of the threads */
    715  1.3.2.2  rmind 		ci_rq->r_avgcount = (ci_rq->r_avgcount + ci_rq->r_mcount) >> 1;
    716  1.3.2.2  rmind 
    717  1.3.2.2  rmind 		/* Look for CPU with the highest average */
    718  1.3.2.2  rmind 		if (ci_rq->r_avgcount > highest) {
    719  1.3.2.2  rmind 			hci = ci;
    720  1.3.2.2  rmind 			highest = ci_rq->r_avgcount;
    721  1.3.2.2  rmind 		}
    722  1.3.2.2  rmind 	}
    723  1.3.2.2  rmind 
    724  1.3.2.2  rmind 	/* Update the worker */
    725  1.3.2.2  rmind 	worker_ci = hci;
    726  1.3.2.2  rmind 
    727  1.3.2.2  rmind 	if (nocallout == NULL)
    728  1.3.2.2  rmind 		callout_schedule(&balance_ch, balance_period);
    729  1.3.2.2  rmind }
    730  1.3.2.2  rmind 
    731  1.3.2.2  rmind #else
    732  1.3.2.2  rmind 
    733  1.3.2.2  rmind struct cpu_info *
    734  1.3.2.2  rmind sched_takecpu(struct lwp *l)
    735  1.3.2.2  rmind {
    736  1.3.2.2  rmind 
    737  1.3.2.2  rmind 	return l->l_cpu;
    738  1.3.2.2  rmind }
    739  1.3.2.2  rmind 
    740  1.3.2.2  rmind #endif	/* MULTIPROCESSOR */
    741  1.3.2.2  rmind 
    742  1.3.2.2  rmind /*
    743  1.3.2.2  rmind  * Scheduler mill.
    744  1.3.2.2  rmind  */
    745  1.3.2.2  rmind struct lwp *
    746  1.3.2.2  rmind sched_nextlwp(void)
    747  1.3.2.2  rmind {
    748  1.3.2.2  rmind 	struct cpu_info *ci = curcpu();
    749  1.3.2.2  rmind 	struct schedstate_percpu *spc;
    750  1.3.2.2  rmind 	TAILQ_HEAD(, lwp) *q_head;
    751  1.3.2.2  rmind 	sched_info_lwp_t *sil;
    752  1.3.2.2  rmind 	runqueue_t *ci_rq;
    753  1.3.2.2  rmind 	struct lwp *l;
    754  1.3.2.2  rmind 
    755  1.3.2.2  rmind 	spc = &ci->ci_schedstate;
    756  1.3.2.2  rmind 	ci_rq = ci->ci_schedstate.spc_sched_info;
    757  1.3.2.2  rmind 
    758  1.3.2.2  rmind #ifdef MULTIPROCESSOR
    759  1.3.2.2  rmind 	/* If runqueue is empty, try to catch some thread from other CPU */
    760  1.3.2.2  rmind 	if (spc->spc_flags & SPCF_OFFLINE) {
    761  1.3.2.2  rmind 		if (ci_rq->r_mcount == 0)
    762  1.3.2.2  rmind 			return NULL;
    763  1.3.2.2  rmind 	} else if (ci_rq->r_count == 0) {
    764  1.3.2.2  rmind 		/* Reset the counter, and call the balancer */
    765  1.3.2.2  rmind 		ci_rq->r_avgcount = 0;
    766  1.3.2.2  rmind 		sched_balance(ci);
    767  1.3.2.2  rmind 
    768  1.3.2.2  rmind 		/* The re-locking will be done inside */
    769  1.3.2.2  rmind 		return sched_catchlwp();
    770  1.3.2.2  rmind 	}
    771  1.3.2.2  rmind #else
    772  1.3.2.2  rmind 	if (ci_rq->r_count == 0)
    773  1.3.2.2  rmind 		return NULL;
    774  1.3.2.2  rmind #endif
    775  1.3.2.2  rmind 
    776  1.3.2.2  rmind 	/* Take the highest priority thread */
    777  1.3.2.2  rmind 	KASSERT(ci_rq->r_bitmap[ci_rq->r_highest_pri >> BITMAP_SHIFT]);
    778  1.3.2.2  rmind 	q_head = sched_getrq(ci_rq, ci_rq->r_highest_pri);
    779  1.3.2.2  rmind 	l = TAILQ_FIRST(q_head);
    780  1.3.2.2  rmind 	KASSERT(l != NULL);
    781  1.3.2.2  rmind 
    782  1.3.2.2  rmind 	/* Update the counters */
    783  1.3.2.2  rmind 	sil = l->l_sched_info;
    784  1.3.2.2  rmind 	KASSERT(sil->sl_timeslice >= min_ts);
    785  1.3.2.2  rmind 	KASSERT(sil->sl_timeslice <= max_ts);
    786  1.3.2.2  rmind 	spc->spc_ticks = sil->sl_timeslice;
    787  1.3.2.2  rmind 	sil->sl_rtime = hardclock_ticks;
    788  1.3.2.2  rmind 
    789  1.3.2.2  rmind 	return l;
    790  1.3.2.2  rmind }
    791  1.3.2.2  rmind 
    792  1.3.2.2  rmind bool
    793  1.3.2.2  rmind sched_curcpu_runnable_p(void)
    794  1.3.2.2  rmind {
    795  1.3.2.2  rmind 	const struct cpu_info *ci = curcpu();
    796  1.3.2.2  rmind 	const runqueue_t *ci_rq = ci->ci_schedstate.spc_sched_info;
    797  1.3.2.2  rmind 
    798  1.3.2.2  rmind 	if (ci->ci_schedstate.spc_flags & SPCF_OFFLINE)
    799  1.3.2.2  rmind 		return ci_rq->r_mcount;
    800  1.3.2.2  rmind 
    801  1.3.2.2  rmind 	return ci_rq->r_count;
    802  1.3.2.2  rmind }
    803  1.3.2.2  rmind 
    804  1.3.2.2  rmind /*
    805  1.3.2.2  rmind  * Time-driven events.
    806  1.3.2.2  rmind  */
    807  1.3.2.2  rmind 
    808  1.3.2.2  rmind /*
    809  1.3.2.2  rmind  * Called once per time-quantum.  This routine is CPU-local and runs at
    810  1.3.2.2  rmind  * IPL_SCHED, thus the locking is not needed.
    811  1.3.2.2  rmind  */
    812  1.3.2.2  rmind void
    813  1.3.2.2  rmind sched_tick(struct cpu_info *ci)
    814  1.3.2.2  rmind {
    815  1.3.2.2  rmind 	const runqueue_t *ci_rq = ci->ci_schedstate.spc_sched_info;
    816  1.3.2.2  rmind 	struct schedstate_percpu *spc = &ci->ci_schedstate;
    817  1.3.2.2  rmind 	struct lwp *l = curlwp;
    818  1.3.2.2  rmind 	sched_info_lwp_t *sil = l->l_sched_info;
    819  1.3.2.2  rmind 
    820  1.3.2.2  rmind 	if (CURCPU_IDLE_P())
    821  1.3.2.2  rmind 		return;
    822  1.3.2.2  rmind 
    823  1.3.2.2  rmind 	switch (l->l_policy) {
    824  1.3.2.2  rmind 	case SCHED_FIFO:
    825  1.3.2.2  rmind 		/*
    826  1.3.2.2  rmind 		 * Update the time-quantum, and continue running,
    827  1.3.2.2  rmind 		 * if thread runs on FIFO real-time policy.
    828  1.3.2.2  rmind 		 */
    829  1.3.2.2  rmind 		spc->spc_ticks = sil->sl_timeslice;
    830  1.3.2.2  rmind 		return;
    831  1.3.2.2  rmind 	case SCHED_OTHER:
    832  1.3.2.3  rmind 		/*
    833  1.3.2.3  rmind 		 * If thread is in time-sharing queue, decrease the priority,
    834  1.3.2.3  rmind 		 * and run with a higher time-quantum.
    835  1.3.2.3  rmind 		 */
    836  1.3.2.6     ad 		if (l->l_priority > PRI_HIGHEST_TS)
    837  1.3.2.2  rmind 			break;
    838  1.3.2.6     ad 		if (l->l_priority != 0)
    839  1.3.2.6     ad 			l->l_priority--;
    840  1.3.2.2  rmind 		break;
    841  1.3.2.2  rmind 	}
    842  1.3.2.2  rmind 
    843  1.3.2.2  rmind 	/*
    844  1.3.2.2  rmind 	 * If there are higher priority threads or threads in the same queue,
    845  1.3.2.2  rmind 	 * mark that thread should yield, otherwise, continue running.
    846  1.3.2.2  rmind 	 */
    847  1.3.2.3  rmind 	if (lwp_eprio(l) <= ci_rq->r_highest_pri) {
    848  1.3.2.2  rmind 		spc->spc_flags |= SPCF_SHOULDYIELD;
    849  1.3.2.2  rmind 		cpu_need_resched(ci, 0);
    850  1.3.2.2  rmind 	} else
    851  1.3.2.2  rmind 		spc->spc_ticks = sil->sl_timeslice;
    852  1.3.2.2  rmind }
    853  1.3.2.2  rmind 
    854  1.3.2.2  rmind /*
    855  1.3.2.2  rmind  * Sysctl nodes and initialization.
    856  1.3.2.2  rmind  */
    857  1.3.2.2  rmind 
    858  1.3.2.2  rmind static int
    859  1.3.2.2  rmind sysctl_sched_mints(SYSCTLFN_ARGS)
    860  1.3.2.2  rmind {
    861  1.3.2.2  rmind 	struct sysctlnode node;
    862  1.3.2.2  rmind 	struct cpu_info *ci;
    863  1.3.2.2  rmind 	int error, newsize;
    864  1.3.2.2  rmind 	CPU_INFO_ITERATOR cii;
    865  1.3.2.2  rmind 
    866  1.3.2.2  rmind 	node = *rnode;
    867  1.3.2.2  rmind 	node.sysctl_data = &newsize;
    868  1.3.2.2  rmind 
    869  1.3.2.2  rmind 	newsize = hztoms(min_ts);
    870  1.3.2.2  rmind 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    871  1.3.2.2  rmind 	if (error || newp == NULL)
    872  1.3.2.2  rmind 		return error;
    873  1.3.2.2  rmind 
    874  1.3.2.2  rmind 	if (newsize < 1 || newsize > hz || newsize >= max_ts)
    875  1.3.2.2  rmind 		return EINVAL;
    876  1.3.2.2  rmind 
    877  1.3.2.2  rmind 	/* It is safe to do this in such order */
    878  1.3.2.2  rmind 	for (CPU_INFO_FOREACH(cii, ci))
    879  1.3.2.2  rmind 		spc_lock(ci);
    880  1.3.2.2  rmind 
    881  1.3.2.2  rmind 	min_ts = mstohz(newsize);
    882  1.3.2.2  rmind 	sched_precalcts();
    883  1.3.2.2  rmind 
    884  1.3.2.2  rmind 	for (CPU_INFO_FOREACH(cii, ci))
    885  1.3.2.2  rmind 		spc_unlock(ci);
    886  1.3.2.2  rmind 
    887  1.3.2.2  rmind 	return 0;
    888  1.3.2.2  rmind }
    889  1.3.2.2  rmind 
    890  1.3.2.2  rmind static int
    891  1.3.2.2  rmind sysctl_sched_maxts(SYSCTLFN_ARGS)
    892  1.3.2.2  rmind {
    893  1.3.2.2  rmind 	struct sysctlnode node;
    894  1.3.2.2  rmind 	struct cpu_info *ci;
    895  1.3.2.2  rmind 	int error, newsize;
    896  1.3.2.2  rmind 	CPU_INFO_ITERATOR cii;
    897  1.3.2.2  rmind 
    898  1.3.2.2  rmind 	node = *rnode;
    899  1.3.2.2  rmind 	node.sysctl_data = &newsize;
    900  1.3.2.2  rmind 
    901  1.3.2.2  rmind 	newsize = hztoms(max_ts);
    902  1.3.2.2  rmind 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    903  1.3.2.2  rmind 	if (error || newp == NULL)
    904  1.3.2.2  rmind 		return error;
    905  1.3.2.2  rmind 
    906  1.3.2.2  rmind 	if (newsize < 10 || newsize > hz || newsize <= min_ts)
    907  1.3.2.2  rmind 		return EINVAL;
    908  1.3.2.2  rmind 
    909  1.3.2.2  rmind 	/* It is safe to do this in such order */
    910  1.3.2.2  rmind 	for (CPU_INFO_FOREACH(cii, ci))
    911  1.3.2.2  rmind 		spc_lock(ci);
    912  1.3.2.2  rmind 
    913  1.3.2.2  rmind 	max_ts = mstohz(newsize);
    914  1.3.2.2  rmind 	sched_precalcts();
    915  1.3.2.2  rmind 
    916  1.3.2.2  rmind 	for (CPU_INFO_FOREACH(cii, ci))
    917  1.3.2.2  rmind 		spc_unlock(ci);
    918  1.3.2.2  rmind 
    919  1.3.2.2  rmind 	return 0;
    920  1.3.2.2  rmind }
    921  1.3.2.2  rmind 
    922  1.3.2.2  rmind SYSCTL_SETUP(sysctl_sched_setup, "sysctl kern.sched subtree setup")
    923  1.3.2.2  rmind {
    924  1.3.2.2  rmind 	const struct sysctlnode *node = NULL;
    925  1.3.2.2  rmind 
    926  1.3.2.2  rmind 	sysctl_createv(clog, 0, NULL, NULL,
    927  1.3.2.2  rmind 		CTLFLAG_PERMANENT,
    928  1.3.2.2  rmind 		CTLTYPE_NODE, "kern", NULL,
    929  1.3.2.2  rmind 		NULL, 0, NULL, 0,
    930  1.3.2.2  rmind 		CTL_KERN, CTL_EOL);
    931  1.3.2.2  rmind 	sysctl_createv(clog, 0, NULL, &node,
    932  1.3.2.2  rmind 		CTLFLAG_PERMANENT,
    933  1.3.2.2  rmind 		CTLTYPE_NODE, "sched",
    934  1.3.2.2  rmind 		SYSCTL_DESCR("Scheduler options"),
    935  1.3.2.2  rmind 		NULL, 0, NULL, 0,
    936  1.3.2.2  rmind 		CTL_KERN, CTL_CREATE, CTL_EOL);
    937  1.3.2.2  rmind 
    938  1.3.2.2  rmind 	if (node == NULL)
    939  1.3.2.2  rmind 		return;
    940  1.3.2.2  rmind 
    941  1.3.2.2  rmind 	sysctl_createv(clog, 0, &node, NULL,
    942  1.3.2.2  rmind 		CTLFLAG_PERMANENT,
    943  1.3.2.2  rmind 		CTLTYPE_STRING, "name", NULL,
    944  1.3.2.2  rmind 		NULL, 0, __UNCONST("M2"), 0,
    945  1.3.2.2  rmind 		CTL_CREATE, CTL_EOL);
    946  1.3.2.2  rmind 	sysctl_createv(clog, 0, &node, NULL,
    947  1.3.2.2  rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
    948  1.3.2.2  rmind 		CTLTYPE_INT, "maxts",
    949  1.3.2.2  rmind 		SYSCTL_DESCR("Maximal time quantum (in microseconds)"),
    950  1.3.2.2  rmind 		sysctl_sched_maxts, 0, &max_ts, 0,
    951  1.3.2.2  rmind 		CTL_CREATE, CTL_EOL);
    952  1.3.2.2  rmind 	sysctl_createv(clog, 0, &node, NULL,
    953  1.3.2.2  rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
    954  1.3.2.2  rmind 		CTLTYPE_INT, "mints",
    955  1.3.2.2  rmind 		SYSCTL_DESCR("Minimal time quantum (in microseconds)"),
    956  1.3.2.2  rmind 		sysctl_sched_mints, 0, &min_ts, 0,
    957  1.3.2.2  rmind 		CTL_CREATE, CTL_EOL);
    958  1.3.2.2  rmind 
    959  1.3.2.2  rmind #ifdef MULTIPROCESSOR
    960  1.3.2.2  rmind 	sysctl_createv(clog, 0, &node, NULL,
    961  1.3.2.2  rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
    962  1.3.2.2  rmind 		CTLTYPE_INT, "cacheht_time",
    963  1.3.2.2  rmind 		SYSCTL_DESCR("Cache hotness time"),
    964  1.3.2.2  rmind 		NULL, 0, &cacheht_time, 0,
    965  1.3.2.2  rmind 		CTL_CREATE, CTL_EOL);
    966  1.3.2.2  rmind 	sysctl_createv(clog, 0, &node, NULL,
    967  1.3.2.2  rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
    968  1.3.2.2  rmind 		CTLTYPE_INT, "balance_period",
    969  1.3.2.2  rmind 		SYSCTL_DESCR("Balance period"),
    970  1.3.2.2  rmind 		NULL, 0, &balance_period, 0,
    971  1.3.2.2  rmind 		CTL_CREATE, CTL_EOL);
    972  1.3.2.2  rmind 	sysctl_createv(clog, 0, &node, NULL,
    973  1.3.2.2  rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
    974  1.3.2.2  rmind 		CTLTYPE_INT, "min_catch",
    975  1.3.2.2  rmind 		SYSCTL_DESCR("Minimal count of threads for catching"),
    976  1.3.2.2  rmind 		NULL, 0, &min_catch, 0,
    977  1.3.2.2  rmind 		CTL_CREATE, CTL_EOL);
    978  1.3.2.2  rmind #endif
    979  1.3.2.2  rmind }
    980  1.3.2.2  rmind 
    981  1.3.2.2  rmind /*
    982  1.3.2.2  rmind  * Debugging.
    983  1.3.2.2  rmind  */
    984  1.3.2.2  rmind 
    985  1.3.2.2  rmind #ifdef DDB
    986  1.3.2.2  rmind 
    987  1.3.2.2  rmind void
    988  1.3.2.2  rmind sched_print_runqueue(void (*pr)(const char *, ...))
    989  1.3.2.2  rmind {
    990  1.3.2.2  rmind 	runqueue_t *ci_rq;
    991  1.3.2.2  rmind 	sched_info_lwp_t *sil;
    992  1.3.2.2  rmind 	struct lwp *l;
    993  1.3.2.2  rmind 	struct proc *p;
    994  1.3.2.2  rmind 	int i;
    995  1.3.2.2  rmind 
    996  1.3.2.2  rmind 	struct cpu_info *ci;
    997  1.3.2.2  rmind 	CPU_INFO_ITERATOR cii;
    998  1.3.2.2  rmind 
    999  1.3.2.2  rmind 	for (CPU_INFO_FOREACH(cii, ci)) {
   1000  1.3.2.2  rmind 		ci_rq = ci->ci_schedstate.spc_sched_info;
   1001  1.3.2.2  rmind 
   1002  1.3.2.2  rmind 		(*pr)("Run-queue (CPU = %d):\n", ci->ci_cpuid);
   1003  1.3.2.2  rmind 		(*pr)(" pid.lid = %d.%d, threads count = %u, "
   1004  1.3.2.2  rmind 		    "avgcount = %u, highest pri = %d\n",
   1005  1.3.2.2  rmind 		    ci->ci_curlwp->l_proc->p_pid, ci->ci_curlwp->l_lid,
   1006  1.3.2.2  rmind 		    ci_rq->r_count, ci_rq->r_avgcount, ci_rq->r_highest_pri);
   1007  1.3.2.3  rmind 		i = (PRI_COUNT >> BITMAP_SHIFT) - 1;
   1008  1.3.2.2  rmind 		do {
   1009  1.3.2.3  rmind 			uint32_t q;
   1010  1.3.2.3  rmind 			q = ci_rq->r_bitmap[i];
   1011  1.3.2.3  rmind 			(*pr)(" bitmap[%d] => [ %d (0x%x) ]\n", i, ffs(q), q);
   1012  1.3.2.3  rmind 		} while (i--);
   1013  1.3.2.2  rmind 	}
   1014  1.3.2.2  rmind 
   1015  1.3.2.2  rmind 	(*pr)("   %5s %4s %4s %10s %3s %4s %11s %3s %s\n",
   1016  1.3.2.6     ad 	    "LID", "PRI", "EPRI", "FL", "ST", "TS", "LWP", "CPU", "LRTIME");
   1017  1.3.2.2  rmind 
   1018  1.3.2.2  rmind 	PROCLIST_FOREACH(p, &allproc) {
   1019  1.3.2.2  rmind 		(*pr)(" /- %d (%s)\n", (int)p->p_pid, p->p_comm);
   1020  1.3.2.2  rmind 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1021  1.3.2.2  rmind 			sil = l->l_sched_info;
   1022  1.3.2.2  rmind 			ci = l->l_cpu;
   1023  1.3.2.2  rmind 			(*pr)(" | %5d %4u %4u 0x%8.8x %3s %4u %11p %3d "
   1024  1.3.2.2  rmind 			    "%u ST=%d RT=%d %d\n",
   1025  1.3.2.6     ad 			    (int)l->l_lid, l->l_priority, lwp_eprio(l),
   1026  1.3.2.2  rmind 			    l->l_flag, l->l_stat == LSRUN ? "RQ" :
   1027  1.3.2.2  rmind 			    (l->l_stat == LSSLEEP ? "SQ" : "-"),
   1028  1.3.2.2  rmind 			    sil->sl_timeslice, l, ci->ci_cpuid,
   1029  1.3.2.2  rmind 			    (u_int)(hardclock_ticks - sil->sl_lrtime),
   1030  1.3.2.2  rmind 			    sil->sl_slpsum, sil->sl_rtsum, sil->sl_flags);
   1031  1.3.2.2  rmind 		}
   1032  1.3.2.2  rmind 	}
   1033  1.3.2.2  rmind }
   1034  1.3.2.2  rmind 
   1035  1.3.2.2  rmind #endif /* defined(DDB) */
   1036