Home | History | Annotate | Line # | Download | only in kern
sched_m2.c revision 1.20.6.1
      1  1.20.6.1    mjf /*	$NetBSD: sched_m2.c,v 1.20.6.1 2008/04/03 12:43:03 mjf Exp $	*/
      2       1.1  rmind 
      3       1.1  rmind /*
      4      1.15  rmind  * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org>
      5      1.12  rmind  * All rights reserved.
      6       1.1  rmind  *
      7       1.1  rmind  * Redistribution and use in source and binary forms, with or without
      8       1.1  rmind  * modification, are permitted provided that the following conditions
      9       1.1  rmind  * are met:
     10       1.1  rmind  * 1. Redistributions of source code must retain the above copyright
     11       1.1  rmind  *    notice, this list of conditions and the following disclaimer.
     12       1.1  rmind  * 2. Redistributions in binary form must reproduce the above copyright
     13       1.1  rmind  *    notice, this list of conditions and the following disclaimer in the
     14       1.1  rmind  *    documentation and/or other materials provided with the distribution.
     15       1.1  rmind  *
     16      1.19  rmind  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17      1.19  rmind  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18      1.19  rmind  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19      1.19  rmind  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20      1.19  rmind  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21      1.19  rmind  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22      1.19  rmind  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23      1.19  rmind  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24      1.19  rmind  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25      1.19  rmind  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26      1.19  rmind  * SUCH DAMAGE.
     27       1.1  rmind  */
     28       1.1  rmind 
     29       1.1  rmind /*
     30       1.1  rmind  * TODO:
     31       1.1  rmind  *  - Implementation of fair share queue;
     32       1.1  rmind  *  - Support for NUMA;
     33       1.1  rmind  */
     34       1.1  rmind 
     35       1.1  rmind #include <sys/cdefs.h>
     36  1.20.6.1    mjf __KERNEL_RCSID(0, "$NetBSD: sched_m2.c,v 1.20.6.1 2008/04/03 12:43:03 mjf Exp $");
     37       1.1  rmind 
     38       1.1  rmind #include <sys/param.h>
     39       1.1  rmind 
     40       1.8  rmind #include <sys/bitops.h>
     41       1.1  rmind #include <sys/cpu.h>
     42       1.1  rmind #include <sys/callout.h>
     43       1.1  rmind #include <sys/errno.h>
     44       1.1  rmind #include <sys/kernel.h>
     45       1.1  rmind #include <sys/kmem.h>
     46       1.1  rmind #include <sys/lwp.h>
     47       1.1  rmind #include <sys/mutex.h>
     48       1.1  rmind #include <sys/pool.h>
     49       1.1  rmind #include <sys/proc.h>
     50      1.15  rmind #include <sys/pset.h>
     51       1.1  rmind #include <sys/resource.h>
     52       1.1  rmind #include <sys/resourcevar.h>
     53       1.1  rmind #include <sys/sched.h>
     54       1.1  rmind #include <sys/syscallargs.h>
     55       1.1  rmind #include <sys/sysctl.h>
     56       1.1  rmind #include <sys/types.h>
     57       1.1  rmind 
     58       1.1  rmind /*
     59      1.10     ad  * Priority related defintions.
     60       1.1  rmind  */
     61      1.10     ad #define	PRI_TS_COUNT	(NPRI_USER)
     62      1.10     ad #define	PRI_RT_COUNT	(PRI_COUNT - PRI_TS_COUNT)
     63      1.10     ad #define	PRI_HTS_RANGE	(PRI_TS_COUNT / 10)
     64      1.10     ad 
     65      1.11  rmind #define	PRI_HIGHEST_TS	(MAXPRI_USER)
     66      1.10     ad 
     67      1.10     ad const int schedppq = 1;
     68       1.1  rmind 
     69       1.1  rmind /*
     70       1.1  rmind  * Bits per map.
     71       1.1  rmind  */
     72      1.10     ad #define	BITMAP_BITS	(32)
     73      1.10     ad #define	BITMAP_SHIFT	(5)
     74      1.11  rmind #define	BITMAP_MSB	(0x80000000U)
     75      1.10     ad #define	BITMAP_MASK	(BITMAP_BITS - 1)
     76       1.1  rmind 
     77       1.1  rmind /*
     78       1.1  rmind  * Time-slices and priorities.
     79       1.1  rmind  */
     80       1.1  rmind static u_int	min_ts;			/* Minimal time-slice */
     81       1.1  rmind static u_int	max_ts;			/* Maximal time-slice */
     82       1.1  rmind static u_int	rt_ts;			/* Real-time time-slice */
     83       1.1  rmind static u_int	ts_map[PRI_COUNT];	/* Map of time-slices */
     84       1.1  rmind static pri_t	high_pri[PRI_COUNT];	/* Map for priority increase */
     85       1.1  rmind 
     86       1.1  rmind /*
     87       1.1  rmind  * Migration and balancing.
     88       1.1  rmind  */
     89       1.1  rmind #ifdef MULTIPROCESSOR
     90      1.15  rmind 
     91       1.1  rmind static u_int	cacheht_time;		/* Cache hotness time */
     92       1.1  rmind static u_int	min_catch;		/* Minimal LWP count for catching */
     93       1.1  rmind 
     94       1.1  rmind static u_int		balance_period;	/* Balance period */
     95       1.1  rmind static struct callout	balance_ch;	/* Callout of balancer */
     96       1.1  rmind 
     97       1.1  rmind static struct cpu_info * volatile worker_ci;
     98       1.1  rmind 
     99       1.1  rmind #endif
    100       1.1  rmind 
    101       1.1  rmind /*
    102       1.1  rmind  * Structures, runqueue.
    103       1.1  rmind  */
    104       1.1  rmind 
    105       1.1  rmind typedef struct {
    106       1.1  rmind 	TAILQ_HEAD(, lwp) q_head;
    107       1.1  rmind } queue_t;
    108       1.1  rmind 
    109       1.1  rmind typedef struct {
    110       1.1  rmind 	/* Lock and bitmap */
    111      1.10     ad 	uint32_t	r_bitmap[PRI_COUNT >> BITMAP_SHIFT];
    112       1.1  rmind 	/* Counters */
    113       1.1  rmind 	u_int		r_count;	/* Count of the threads */
    114       1.1  rmind 	pri_t		r_highest_pri;	/* Highest priority */
    115       1.1  rmind 	u_int		r_avgcount;	/* Average count of threads */
    116       1.1  rmind 	u_int		r_mcount;	/* Count of migratable threads */
    117       1.1  rmind 	/* Runqueues */
    118       1.1  rmind 	queue_t		r_rt_queue[PRI_RT_COUNT];
    119       1.1  rmind 	queue_t		r_ts_queue[PRI_TS_COUNT];
    120       1.1  rmind } runqueue_t;
    121       1.1  rmind 
    122       1.1  rmind typedef struct {
    123       1.1  rmind 	u_int		sl_flags;
    124       1.1  rmind 	u_int		sl_timeslice;	/* Time-slice of thread */
    125       1.1  rmind 	u_int		sl_slept;	/* Saved sleep time for sleep sum */
    126       1.1  rmind 	u_int		sl_slpsum;	/* Sum of sleep time */
    127       1.1  rmind 	u_int		sl_rtime;	/* Saved start time of run */
    128       1.1  rmind 	u_int		sl_rtsum;	/* Sum of the run time */
    129       1.1  rmind 	u_int		sl_lrtime;	/* Last run time */
    130       1.1  rmind } sched_info_lwp_t;
    131       1.1  rmind 
    132       1.1  rmind /* Flags */
    133       1.1  rmind #define	SL_BATCH	0x01
    134       1.1  rmind 
    135       1.1  rmind /* Pool of the scheduler-specific structures for threads */
    136      1.19  rmind static pool_cache_t	sil_pool;
    137       1.1  rmind 
    138       1.1  rmind /*
    139       1.1  rmind  * Prototypes.
    140       1.1  rmind  */
    141       1.1  rmind 
    142       1.1  rmind static inline void *	sched_getrq(runqueue_t *, const pri_t);
    143       1.1  rmind static inline void	sched_newts(struct lwp *);
    144       1.1  rmind static void		sched_precalcts(void);
    145       1.1  rmind 
    146       1.1  rmind #ifdef MULTIPROCESSOR
    147       1.1  rmind static struct lwp *	sched_catchlwp(void);
    148       1.1  rmind static void		sched_balance(void *);
    149       1.1  rmind #endif
    150       1.1  rmind 
    151       1.1  rmind /*
    152       1.1  rmind  * Initialization and setup.
    153       1.1  rmind  */
    154       1.1  rmind 
    155       1.1  rmind void
    156       1.1  rmind sched_rqinit(void)
    157       1.1  rmind {
    158       1.1  rmind 	struct cpu_info *ci = curcpu();
    159       1.1  rmind 
    160       1.1  rmind 	if (hz < 100) {
    161       1.1  rmind 		panic("sched_rqinit: value of HZ is too low\n");
    162       1.1  rmind 	}
    163       1.1  rmind 
    164       1.1  rmind 	/* Default timing ranges */
    165       1.1  rmind 	min_ts = mstohz(50);			/* ~50ms  */
    166       1.1  rmind 	max_ts = mstohz(150);			/* ~150ms */
    167       1.1  rmind 	rt_ts = mstohz(100);			/* ~100ms */
    168       1.1  rmind 	sched_precalcts();
    169       1.1  rmind 
    170       1.1  rmind #ifdef MULTIPROCESSOR
    171       1.1  rmind 	/* Balancing */
    172       1.1  rmind 	worker_ci = ci;
    173       1.1  rmind 	cacheht_time = mstohz(5);		/* ~5 ms  */
    174       1.1  rmind 	balance_period = mstohz(300);		/* ~300ms */
    175       1.1  rmind 	min_catch = ~0;
    176       1.1  rmind #endif
    177       1.1  rmind 
    178       1.1  rmind 	/* Pool of the scheduler-specific structures */
    179  1.20.6.1    mjf 	sil_pool = pool_cache_init(sizeof(sched_info_lwp_t), coherency_unit,
    180      1.20     ad 	    0, 0, "lwpsd", NULL, IPL_NONE, NULL, NULL, NULL);
    181       1.1  rmind 
    182       1.1  rmind 	/* Attach the primary CPU here */
    183       1.1  rmind 	sched_cpuattach(ci);
    184       1.1  rmind 
    185      1.10     ad 	sched_lwp_fork(NULL, &lwp0);
    186       1.1  rmind 	sched_newts(&lwp0);
    187       1.1  rmind }
    188       1.1  rmind 
    189       1.1  rmind void
    190       1.1  rmind sched_setup(void)
    191       1.1  rmind {
    192       1.1  rmind 
    193       1.1  rmind #ifdef MULTIPROCESSOR
    194       1.1  rmind 	/* Minimal count of LWPs for catching: log2(count of CPUs) */
    195       1.8  rmind 	min_catch = min(ilog2(ncpu), 4);
    196       1.1  rmind 
    197       1.1  rmind 	/* Initialize balancing callout and run it */
    198       1.1  rmind 	callout_init(&balance_ch, CALLOUT_MPSAFE);
    199       1.1  rmind 	callout_setfunc(&balance_ch, sched_balance, NULL);
    200       1.1  rmind 	callout_schedule(&balance_ch, balance_period);
    201       1.1  rmind #endif
    202       1.1  rmind }
    203       1.1  rmind 
    204       1.1  rmind void
    205       1.1  rmind sched_cpuattach(struct cpu_info *ci)
    206       1.1  rmind {
    207       1.1  rmind 	runqueue_t *ci_rq;
    208       1.1  rmind 	void *rq_ptr;
    209       1.1  rmind 	u_int i, size;
    210       1.1  rmind 
    211      1.20     ad 	if (ci == lwp0.l_cpu) {
    212      1.20     ad 		/* Initialize the scheduler structure of the primary LWP */
    213      1.20     ad 		lwp0.l_mutex = ci->ci_schedstate.spc_lwplock;
    214      1.20     ad 	}
    215      1.20     ad 
    216      1.20     ad 	if (ci->ci_schedstate.spc_mutex != NULL) {
    217      1.20     ad 		/* Already initialized. */
    218      1.20     ad 		return;
    219      1.20     ad 	}
    220      1.20     ad 
    221      1.19  rmind 	/* Allocate the run queue */
    222  1.20.6.1    mjf 	size = roundup2(sizeof(runqueue_t), coherency_unit) + coherency_unit;
    223      1.14     ad 	rq_ptr = kmem_zalloc(size, KM_SLEEP);
    224       1.1  rmind 	if (rq_ptr == NULL) {
    225      1.19  rmind 		panic("sched_cpuattach: could not allocate the runqueue");
    226       1.1  rmind 	}
    227  1.20.6.1    mjf 	ci_rq = (void *)(roundup2((uintptr_t)(rq_ptr), coherency_unit));
    228       1.1  rmind 
    229       1.1  rmind 	/* Initialize run queues */
    230      1.20     ad 	KASSERT(sizeof(kmutex_t) <= CACHE_LINE_SIZE);
    231      1.20     ad 	ci->ci_schedstate.spc_mutex = kmem_alloc(CACHE_LINE_SIZE, KM_SLEEP);
    232      1.20     ad 	mutex_init(ci->ci_schedstate.spc_mutex, MUTEX_DEFAULT, IPL_SCHED);
    233       1.1  rmind 	for (i = 0; i < PRI_RT_COUNT; i++)
    234       1.1  rmind 		TAILQ_INIT(&ci_rq->r_rt_queue[i].q_head);
    235       1.1  rmind 	for (i = 0; i < PRI_TS_COUNT; i++)
    236       1.1  rmind 		TAILQ_INIT(&ci_rq->r_ts_queue[i].q_head);
    237      1.10     ad 	ci_rq->r_highest_pri = 0;
    238       1.1  rmind 
    239       1.1  rmind 	ci->ci_schedstate.spc_sched_info = ci_rq;
    240       1.1  rmind }
    241       1.1  rmind 
    242       1.1  rmind /* Pre-calculate the time-slices for the priorities */
    243       1.1  rmind static void
    244       1.1  rmind sched_precalcts(void)
    245       1.1  rmind {
    246       1.1  rmind 	pri_t p;
    247       1.1  rmind 
    248      1.10     ad 	/* Time-sharing range */
    249      1.10     ad 	for (p = 0; p <= PRI_HIGHEST_TS; p++) {
    250      1.10     ad 		ts_map[p] = max_ts -
    251      1.10     ad 		    (p * 100 / (PRI_TS_COUNT - 1) * (max_ts - min_ts) / 100);
    252      1.10     ad 		high_pri[p] = (PRI_HIGHEST_TS - PRI_HTS_RANGE) +
    253      1.10     ad 		    ((p * PRI_HTS_RANGE) / (PRI_TS_COUNT - 1));
    254      1.10     ad 	}
    255      1.10     ad 
    256      1.10     ad 	/* Real-time range */
    257      1.10     ad 	for (p = (PRI_HIGHEST_TS + 1); p < PRI_COUNT; p++) {
    258       1.1  rmind 		ts_map[p] = rt_ts;
    259       1.1  rmind 		high_pri[p] = p;
    260       1.1  rmind 	}
    261       1.1  rmind }
    262       1.1  rmind 
    263       1.1  rmind /*
    264       1.1  rmind  * Hooks.
    265       1.1  rmind  */
    266       1.1  rmind 
    267       1.1  rmind void
    268       1.1  rmind sched_proc_fork(struct proc *parent, struct proc *child)
    269       1.1  rmind {
    270       1.1  rmind 	struct lwp *l;
    271       1.1  rmind 
    272       1.1  rmind 	LIST_FOREACH(l, &child->p_lwps, l_sibling) {
    273       1.1  rmind 		lwp_lock(l);
    274       1.1  rmind 		sched_newts(l);
    275       1.1  rmind 		lwp_unlock(l);
    276       1.1  rmind 	}
    277       1.1  rmind }
    278       1.1  rmind 
    279       1.1  rmind void
    280       1.1  rmind sched_proc_exit(struct proc *child, struct proc *parent)
    281       1.1  rmind {
    282       1.1  rmind 
    283       1.1  rmind 	/* Dummy */
    284       1.1  rmind }
    285       1.1  rmind 
    286       1.1  rmind void
    287      1.10     ad sched_lwp_fork(struct lwp *l1, struct lwp *l2)
    288       1.1  rmind {
    289       1.1  rmind 
    290      1.10     ad 	KASSERT(l2->l_sched_info == NULL);
    291      1.19  rmind 	l2->l_sched_info = pool_cache_get(sil_pool, PR_WAITOK);
    292      1.10     ad 	memset(l2->l_sched_info, 0, sizeof(sched_info_lwp_t));
    293       1.1  rmind }
    294       1.1  rmind 
    295       1.1  rmind void
    296       1.1  rmind sched_lwp_exit(struct lwp *l)
    297       1.1  rmind {
    298       1.1  rmind 
    299       1.1  rmind 	KASSERT(l->l_sched_info != NULL);
    300      1.19  rmind 	pool_cache_put(sil_pool, l->l_sched_info);
    301       1.1  rmind 	l->l_sched_info = NULL;
    302       1.1  rmind }
    303       1.1  rmind 
    304       1.1  rmind void
    305      1.10     ad sched_lwp_collect(struct lwp *l)
    306      1.10     ad {
    307      1.10     ad 
    308      1.10     ad }
    309      1.10     ad 
    310      1.10     ad void
    311       1.1  rmind sched_setrunnable(struct lwp *l)
    312       1.1  rmind {
    313       1.1  rmind 
    314       1.1  rmind 	/* Dummy */
    315       1.1  rmind }
    316       1.1  rmind 
    317       1.1  rmind void
    318       1.1  rmind sched_schedclock(struct lwp *l)
    319       1.1  rmind {
    320       1.1  rmind 
    321       1.1  rmind 	/* Dummy */
    322       1.1  rmind }
    323       1.1  rmind 
    324       1.1  rmind /*
    325       1.1  rmind  * Priorities and time-slice.
    326       1.1  rmind  */
    327       1.1  rmind 
    328       1.1  rmind void
    329       1.1  rmind sched_nice(struct proc *p, int prio)
    330       1.1  rmind {
    331       1.1  rmind 
    332      1.17  rmind 	/* TODO: implement as SCHED_IA */
    333       1.1  rmind }
    334       1.1  rmind 
    335       1.1  rmind /* Recalculate the time-slice */
    336       1.1  rmind static inline void
    337       1.1  rmind sched_newts(struct lwp *l)
    338       1.1  rmind {
    339       1.1  rmind 	sched_info_lwp_t *sil = l->l_sched_info;
    340       1.1  rmind 
    341       1.1  rmind 	sil->sl_timeslice = ts_map[lwp_eprio(l)];
    342       1.1  rmind }
    343       1.1  rmind 
    344       1.1  rmind /*
    345       1.1  rmind  * Control of the runqueue.
    346       1.1  rmind  */
    347       1.1  rmind 
    348       1.1  rmind static inline void *
    349       1.1  rmind sched_getrq(runqueue_t *ci_rq, const pri_t prio)
    350       1.1  rmind {
    351       1.1  rmind 
    352       1.1  rmind 	KASSERT(prio < PRI_COUNT);
    353      1.10     ad 	return (prio <= PRI_HIGHEST_TS) ?
    354      1.10     ad 	    &ci_rq->r_ts_queue[prio].q_head :
    355      1.10     ad 	    &ci_rq->r_rt_queue[prio - PRI_HIGHEST_TS - 1].q_head;
    356       1.1  rmind }
    357       1.1  rmind 
    358       1.1  rmind void
    359       1.1  rmind sched_enqueue(struct lwp *l, bool swtch)
    360       1.1  rmind {
    361       1.1  rmind 	runqueue_t *ci_rq;
    362       1.1  rmind 	sched_info_lwp_t *sil = l->l_sched_info;
    363       1.1  rmind 	TAILQ_HEAD(, lwp) *q_head;
    364       1.1  rmind 	const pri_t eprio = lwp_eprio(l);
    365       1.1  rmind 
    366       1.1  rmind 	ci_rq = l->l_cpu->ci_schedstate.spc_sched_info;
    367       1.1  rmind 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    368       1.1  rmind 
    369       1.1  rmind 	/* Update the last run time on switch */
    370      1.11  rmind 	if (__predict_true(swtch == true)) {
    371       1.1  rmind 		sil->sl_lrtime = hardclock_ticks;
    372       1.1  rmind 		sil->sl_rtsum += (hardclock_ticks - sil->sl_rtime);
    373       1.8  rmind 	} else if (sil->sl_lrtime == 0)
    374       1.8  rmind 		sil->sl_lrtime = hardclock_ticks;
    375       1.1  rmind 
    376       1.1  rmind 	/* Enqueue the thread */
    377       1.1  rmind 	q_head = sched_getrq(ci_rq, eprio);
    378       1.1  rmind 	if (TAILQ_EMPTY(q_head)) {
    379       1.1  rmind 		u_int i;
    380       1.1  rmind 		uint32_t q;
    381       1.1  rmind 
    382       1.1  rmind 		/* Mark bit */
    383       1.1  rmind 		i = eprio >> BITMAP_SHIFT;
    384      1.10     ad 		q = BITMAP_MSB >> (eprio & BITMAP_MASK);
    385      1.10     ad 		KASSERT((ci_rq->r_bitmap[i] & q) == 0);
    386      1.10     ad 		ci_rq->r_bitmap[i] |= q;
    387       1.1  rmind 	}
    388       1.1  rmind 	TAILQ_INSERT_TAIL(q_head, l, l_runq);
    389       1.1  rmind 	ci_rq->r_count++;
    390       1.1  rmind 	if ((l->l_flag & LW_BOUND) == 0)
    391       1.1  rmind 		ci_rq->r_mcount++;
    392       1.1  rmind 
    393       1.1  rmind 	/*
    394       1.1  rmind 	 * Update the value of highest priority in the runqueue,
    395       1.1  rmind 	 * if priority of this thread is higher.
    396       1.1  rmind 	 */
    397      1.10     ad 	if (eprio > ci_rq->r_highest_pri)
    398       1.1  rmind 		ci_rq->r_highest_pri = eprio;
    399       1.1  rmind 
    400       1.1  rmind 	sched_newts(l);
    401       1.1  rmind }
    402       1.1  rmind 
    403       1.1  rmind void
    404       1.1  rmind sched_dequeue(struct lwp *l)
    405       1.1  rmind {
    406       1.1  rmind 	runqueue_t *ci_rq;
    407       1.1  rmind 	TAILQ_HEAD(, lwp) *q_head;
    408       1.1  rmind 	const pri_t eprio = lwp_eprio(l);
    409       1.1  rmind 
    410       1.1  rmind 	ci_rq = l->l_cpu->ci_schedstate.spc_sched_info;
    411       1.1  rmind 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    412      1.15  rmind 
    413      1.10     ad 	KASSERT(eprio <= ci_rq->r_highest_pri);
    414       1.1  rmind 	KASSERT(ci_rq->r_bitmap[eprio >> BITMAP_SHIFT] != 0);
    415       1.1  rmind 	KASSERT(ci_rq->r_count > 0);
    416       1.1  rmind 
    417       1.1  rmind 	ci_rq->r_count--;
    418       1.1  rmind 	if ((l->l_flag & LW_BOUND) == 0)
    419       1.1  rmind 		ci_rq->r_mcount--;
    420       1.1  rmind 
    421       1.1  rmind 	q_head = sched_getrq(ci_rq, eprio);
    422       1.1  rmind 	TAILQ_REMOVE(q_head, l, l_runq);
    423       1.1  rmind 	if (TAILQ_EMPTY(q_head)) {
    424       1.1  rmind 		u_int i;
    425       1.1  rmind 		uint32_t q;
    426       1.1  rmind 
    427       1.1  rmind 		/* Unmark bit */
    428       1.1  rmind 		i = eprio >> BITMAP_SHIFT;
    429      1.10     ad 		q = BITMAP_MSB >> (eprio & BITMAP_MASK);
    430      1.10     ad 		KASSERT((ci_rq->r_bitmap[i] & q) != 0);
    431      1.10     ad 		ci_rq->r_bitmap[i] &= ~q;
    432       1.1  rmind 
    433       1.1  rmind 		/*
    434       1.1  rmind 		 * Update the value of highest priority in the runqueue, in a
    435       1.1  rmind 		 * case it was a last thread in the queue of highest priority.
    436       1.1  rmind 		 */
    437       1.1  rmind 		if (eprio != ci_rq->r_highest_pri)
    438       1.1  rmind 			return;
    439       1.1  rmind 
    440       1.1  rmind 		do {
    441       1.1  rmind 			q = ffs(ci_rq->r_bitmap[i]);
    442       1.1  rmind 			if (q) {
    443       1.1  rmind 				ci_rq->r_highest_pri =
    444      1.10     ad 				    (i << BITMAP_SHIFT) + (BITMAP_BITS - q);
    445       1.1  rmind 				return;
    446       1.1  rmind 			}
    447      1.10     ad 		} while (i--);
    448       1.1  rmind 
    449      1.10     ad 		/* If not found - set the lowest value */
    450      1.10     ad 		ci_rq->r_highest_pri = 0;
    451       1.1  rmind 	}
    452       1.1  rmind }
    453       1.1  rmind 
    454       1.1  rmind void
    455       1.1  rmind sched_slept(struct lwp *l)
    456       1.1  rmind {
    457       1.1  rmind 	sched_info_lwp_t *sil = l->l_sched_info;
    458       1.1  rmind 
    459       1.1  rmind 	/* Save the time when thread has slept */
    460       1.1  rmind 	sil->sl_slept = hardclock_ticks;
    461       1.1  rmind 
    462       1.1  rmind 	/*
    463      1.10     ad 	 * If thread is in time-sharing queue and batch flag is not marked,
    464      1.10     ad 	 * increase the the priority, and run with the lower time-quantum.
    465       1.1  rmind 	 */
    466      1.18  rmind 	if (l->l_priority < PRI_HIGHEST_TS &&
    467      1.16  rmind 	    (sil->sl_flags & SL_BATCH) == 0) {
    468      1.10     ad 		KASSERT(l->l_class == SCHED_OTHER);
    469      1.10     ad 		l->l_priority++;
    470      1.10     ad 	}
    471       1.1  rmind }
    472       1.1  rmind 
    473       1.1  rmind void
    474       1.1  rmind sched_wakeup(struct lwp *l)
    475       1.1  rmind {
    476       1.1  rmind 	sched_info_lwp_t *sil = l->l_sched_info;
    477  1.20.6.1    mjf 	const u_int slptime = hardclock_ticks - sil->sl_slept;
    478       1.1  rmind 
    479       1.1  rmind 	/* Update sleep time delta */
    480  1.20.6.1    mjf 	sil->sl_slpsum += (l->l_slptime == 0) ? slptime : hz;
    481       1.1  rmind 
    482       1.1  rmind 	/* If thread was sleeping a second or more - set a high priority */
    483  1.20.6.1    mjf 	if (l->l_slptime > 1 || slptime >= hz)
    484      1.10     ad 		l->l_priority = high_pri[l->l_priority];
    485       1.1  rmind 
    486       1.1  rmind 	/* Also, consider looking for a better CPU to wake up */
    487  1.20.6.1    mjf 	l->l_cpu = sched_takecpu(l);
    488       1.1  rmind }
    489       1.1  rmind 
    490       1.1  rmind void
    491       1.1  rmind sched_pstats_hook(struct lwp *l)
    492       1.1  rmind {
    493       1.1  rmind 	sched_info_lwp_t *sil = l->l_sched_info;
    494      1.11  rmind 	pri_t prio;
    495      1.10     ad 	bool batch;
    496      1.10     ad 
    497      1.10     ad 	if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
    498      1.10     ad 	    l->l_stat == LSSUSPENDED)
    499      1.10     ad 		l->l_slptime++;
    500       1.1  rmind 
    501       1.1  rmind 	/*
    502       1.1  rmind 	 * Set that thread is more CPU-bound, if sum of run time exceeds the
    503      1.10     ad 	 * sum of sleep time.  Check if thread is CPU-bound a first time.
    504       1.1  rmind 	 */
    505      1.10     ad 	batch = (sil->sl_rtsum > sil->sl_slpsum);
    506      1.10     ad 	if (batch) {
    507      1.10     ad 		if ((sil->sl_flags & SL_BATCH) == 0)
    508      1.10     ad 			batch = false;
    509       1.1  rmind 		sil->sl_flags |= SL_BATCH;
    510      1.10     ad 	} else
    511       1.1  rmind 		sil->sl_flags &= ~SL_BATCH;
    512      1.10     ad 
    513  1.20.6.1    mjf 	/*
    514  1.20.6.1    mjf 	 * If thread is CPU-bound and never sleeps, it would occupy the CPU.
    515  1.20.6.1    mjf 	 * In such case reset the value of last sleep, and check it later, if
    516  1.20.6.1    mjf 	 * it is still zero - perform the migration, unmark the batch flag.
    517  1.20.6.1    mjf 	 */
    518  1.20.6.1    mjf 	if (batch && (l->l_slptime + sil->sl_slpsum) == 0) {
    519  1.20.6.1    mjf 		if (l->l_stat != LSONPROC && sil->sl_slept == 0) {
    520  1.20.6.1    mjf 			struct cpu_info *ci = sched_takecpu(l);
    521  1.20.6.1    mjf 
    522  1.20.6.1    mjf 			if (l->l_cpu != ci)
    523  1.20.6.1    mjf 				l->l_target_cpu = ci;
    524  1.20.6.1    mjf 			sil->sl_flags &= ~SL_BATCH;
    525  1.20.6.1    mjf 		} else {
    526  1.20.6.1    mjf 			sil->sl_slept = 0;
    527  1.20.6.1    mjf 		}
    528  1.20.6.1    mjf 	}
    529  1.20.6.1    mjf 
    530      1.10     ad 	/* Reset the time sums */
    531       1.1  rmind 	sil->sl_slpsum = 0;
    532       1.1  rmind 	sil->sl_rtsum = 0;
    533       1.1  rmind 
    534  1.20.6.1    mjf 	/*
    535  1.20.6.1    mjf 	 * Estimate threads on time-sharing queue only, however,
    536  1.20.6.1    mjf 	 * exclude the highest priority for performance purposes.
    537  1.20.6.1    mjf 	 */
    538      1.10     ad 	if (l->l_priority >= PRI_HIGHEST_TS)
    539       1.1  rmind 		return;
    540      1.16  rmind 	KASSERT(l->l_class == SCHED_OTHER);
    541       1.1  rmind 
    542      1.10     ad 	/* If it is CPU-bound not a first time - decrease the priority */
    543      1.11  rmind 	prio = l->l_priority;
    544      1.11  rmind 	if (batch && prio != 0)
    545      1.11  rmind 		prio--;
    546      1.10     ad 
    547       1.1  rmind 	/* If thread was not ran a second or more - set a high priority */
    548      1.11  rmind 	if (l->l_stat == LSRUN) {
    549      1.11  rmind 		if (sil->sl_lrtime && (hardclock_ticks - sil->sl_lrtime >= hz))
    550      1.11  rmind 			prio = high_pri[prio];
    551      1.11  rmind 		/* Re-enqueue the thread if priority has changed */
    552      1.11  rmind 		if (prio != l->l_priority)
    553      1.11  rmind 			lwp_changepri(l, prio);
    554      1.11  rmind 	} else {
    555      1.11  rmind 		/* In other states, change the priority directly */
    556      1.11  rmind 		l->l_priority = prio;
    557      1.11  rmind 	}
    558       1.1  rmind }
    559       1.1  rmind 
    560       1.1  rmind /*
    561       1.1  rmind  * Migration and balancing.
    562       1.1  rmind  */
    563       1.1  rmind 
    564       1.1  rmind #ifdef MULTIPROCESSOR
    565       1.1  rmind 
    566      1.16  rmind /* Estimate if LWP is cache-hot */
    567      1.16  rmind static inline bool
    568      1.16  rmind lwp_cache_hot(const struct lwp *l)
    569      1.16  rmind {
    570      1.16  rmind 	const sched_info_lwp_t *sil = l->l_sched_info;
    571      1.16  rmind 
    572      1.16  rmind 	if (l->l_slptime || sil->sl_lrtime == 0)
    573      1.16  rmind 		return false;
    574      1.16  rmind 
    575  1.20.6.1    mjf 	return (hardclock_ticks - sil->sl_lrtime <= cacheht_time);
    576      1.16  rmind }
    577      1.16  rmind 
    578       1.1  rmind /* Check if LWP can migrate to the chosen CPU */
    579       1.1  rmind static inline bool
    580      1.15  rmind sched_migratable(const struct lwp *l, struct cpu_info *ci)
    581       1.1  rmind {
    582      1.15  rmind 	const struct schedstate_percpu *spc = &ci->ci_schedstate;
    583       1.1  rmind 
    584      1.15  rmind 	/* CPU is offline */
    585      1.15  rmind 	if (__predict_false(spc->spc_flags & SPCF_OFFLINE))
    586       1.1  rmind 		return false;
    587       1.1  rmind 
    588      1.15  rmind 	/* Affinity bind */
    589      1.15  rmind 	if (__predict_false(l->l_flag & LW_AFFINITY))
    590      1.15  rmind 		return CPU_ISSET(cpu_index(ci), &l->l_affinity);
    591      1.15  rmind 
    592      1.15  rmind 	/* Processor-set */
    593      1.15  rmind 	return (spc->spc_psid == l->l_psid);
    594       1.1  rmind }
    595       1.1  rmind 
    596       1.1  rmind /*
    597       1.1  rmind  * Estimate the migration of LWP to the other CPU.
    598       1.1  rmind  * Take and return the CPU, if migration is needed.
    599       1.1  rmind  */
    600       1.1  rmind struct cpu_info *
    601       1.1  rmind sched_takecpu(struct lwp *l)
    602       1.1  rmind {
    603      1.15  rmind 	struct cpu_info *ci, *tci;
    604       1.1  rmind 	struct schedstate_percpu *spc;
    605       1.1  rmind 	runqueue_t *ci_rq;
    606       1.1  rmind 	CPU_INFO_ITERATOR cii;
    607       1.1  rmind 	pri_t eprio, lpri;
    608       1.1  rmind 
    609      1.15  rmind 	KASSERT(lwp_locked(l, NULL));
    610      1.15  rmind 
    611       1.1  rmind 	ci = l->l_cpu;
    612       1.1  rmind 	spc = &ci->ci_schedstate;
    613       1.1  rmind 	ci_rq = spc->spc_sched_info;
    614       1.1  rmind 
    615      1.15  rmind 	/* If thread is strictly bound, do not estimate other CPUs */
    616      1.15  rmind 	if (l->l_flag & LW_BOUND)
    617      1.15  rmind 		return ci;
    618      1.15  rmind 
    619       1.1  rmind 	/* CPU of this thread is idling - run there */
    620       1.1  rmind 	if (ci_rq->r_count == 0)
    621       1.1  rmind 		return ci;
    622       1.1  rmind 
    623       1.1  rmind 	eprio = lwp_eprio(l);
    624       1.1  rmind 
    625       1.1  rmind 	/* Stay if thread is cache-hot */
    626      1.16  rmind 	if (__predict_true(l->l_stat != LSIDL) &&
    627      1.16  rmind 	    lwp_cache_hot(l) && eprio >= spc->spc_curpriority)
    628       1.1  rmind 		return ci;
    629       1.1  rmind 
    630       1.1  rmind 	/* Run on current CPU if priority of thread is higher */
    631       1.1  rmind 	ci = curcpu();
    632       1.1  rmind 	spc = &ci->ci_schedstate;
    633      1.10     ad 	if (eprio > spc->spc_curpriority && sched_migratable(l, ci))
    634       1.1  rmind 		return ci;
    635       1.1  rmind 
    636       1.1  rmind 	/*
    637       1.1  rmind 	 * Look for the CPU with the lowest priority thread.  In case of
    638       1.1  rmind 	 * equal the priority - check the lower count of the threads.
    639       1.1  rmind 	 */
    640      1.15  rmind 	tci = l->l_cpu;
    641      1.10     ad 	lpri = PRI_COUNT;
    642       1.1  rmind 	for (CPU_INFO_FOREACH(cii, ci)) {
    643       1.1  rmind 		runqueue_t *ici_rq;
    644       1.1  rmind 		pri_t pri;
    645       1.1  rmind 
    646       1.1  rmind 		spc = &ci->ci_schedstate;
    647       1.1  rmind 		ici_rq = spc->spc_sched_info;
    648      1.10     ad 		pri = max(spc->spc_curpriority, ici_rq->r_highest_pri);
    649      1.10     ad 		if (pri > lpri)
    650       1.1  rmind 			continue;
    651       1.1  rmind 
    652      1.15  rmind 		if (pri == lpri && ci_rq->r_count < ici_rq->r_count)
    653       1.1  rmind 			continue;
    654       1.1  rmind 
    655      1.15  rmind 		if (!sched_migratable(l, ci))
    656       1.1  rmind 			continue;
    657       1.1  rmind 
    658       1.1  rmind 		lpri = pri;
    659       1.1  rmind 		tci = ci;
    660       1.1  rmind 		ci_rq = ici_rq;
    661       1.1  rmind 	}
    662       1.1  rmind 	return tci;
    663       1.1  rmind }
    664       1.1  rmind 
    665       1.1  rmind /*
    666       1.1  rmind  * Tries to catch an LWP from the runqueue of other CPU.
    667       1.1  rmind  */
    668       1.1  rmind static struct lwp *
    669       1.1  rmind sched_catchlwp(void)
    670       1.1  rmind {
    671       1.1  rmind 	struct cpu_info *curci = curcpu(), *ci = worker_ci;
    672       1.1  rmind 	TAILQ_HEAD(, lwp) *q_head;
    673       1.1  rmind 	runqueue_t *ci_rq;
    674       1.1  rmind 	struct lwp *l;
    675       1.1  rmind 
    676       1.1  rmind 	if (curci == ci)
    677       1.1  rmind 		return NULL;
    678       1.1  rmind 
    679       1.1  rmind 	/* Lockless check */
    680       1.1  rmind 	ci_rq = ci->ci_schedstate.spc_sched_info;
    681  1.20.6.1    mjf 	if (ci_rq->r_mcount < min_catch)
    682       1.1  rmind 		return NULL;
    683       1.1  rmind 
    684       1.1  rmind 	/*
    685       1.1  rmind 	 * Double-lock the runqueues.
    686       1.1  rmind 	 */
    687       1.3  rmind 	if (curci < ci) {
    688       1.1  rmind 		spc_lock(ci);
    689       1.1  rmind 	} else if (!mutex_tryenter(ci->ci_schedstate.spc_mutex)) {
    690       1.1  rmind 		const runqueue_t *cur_rq = curci->ci_schedstate.spc_sched_info;
    691       1.1  rmind 
    692       1.1  rmind 		spc_unlock(curci);
    693       1.1  rmind 		spc_lock(ci);
    694       1.1  rmind 		spc_lock(curci);
    695       1.1  rmind 
    696       1.1  rmind 		if (cur_rq->r_count) {
    697       1.1  rmind 			spc_unlock(ci);
    698       1.1  rmind 			return NULL;
    699       1.1  rmind 		}
    700       1.1  rmind 	}
    701       1.1  rmind 
    702  1.20.6.1    mjf 	if (ci_rq->r_mcount < min_catch) {
    703       1.1  rmind 		spc_unlock(ci);
    704       1.1  rmind 		return NULL;
    705       1.1  rmind 	}
    706       1.1  rmind 
    707       1.1  rmind 	/* Take the highest priority thread */
    708       1.1  rmind 	q_head = sched_getrq(ci_rq, ci_rq->r_highest_pri);
    709       1.1  rmind 	l = TAILQ_FIRST(q_head);
    710       1.1  rmind 
    711       1.1  rmind 	for (;;) {
    712       1.1  rmind 		/* Check the first and next result from the queue */
    713       1.1  rmind 		if (l == NULL)
    714       1.1  rmind 			break;
    715  1.20.6.1    mjf 		KASSERT(l->l_stat == LSRUN);
    716  1.20.6.1    mjf 		KASSERT(l->l_flag & LW_INMEM);
    717       1.1  rmind 
    718       1.1  rmind 		/* Look for threads, whose are allowed to migrate */
    719  1.20.6.1    mjf 		if ((l->l_flag & LW_BOUND) || lwp_cache_hot(l) ||
    720      1.15  rmind 		    !sched_migratable(l, curci)) {
    721       1.1  rmind 			l = TAILQ_NEXT(l, l_runq);
    722       1.1  rmind 			continue;
    723       1.1  rmind 		}
    724  1.20.6.1    mjf 
    725  1.20.6.1    mjf 		/* Grab the thread, and move to the local run queue */
    726  1.20.6.1    mjf 		sched_dequeue(l);
    727  1.20.6.1    mjf 		l->l_cpu = curci;
    728  1.20.6.1    mjf 		lwp_unlock_to(l, curci->ci_schedstate.spc_mutex);
    729  1.20.6.1    mjf 		sched_enqueue(l, false);
    730  1.20.6.1    mjf 		return l;
    731       1.1  rmind 	}
    732       1.1  rmind 	spc_unlock(ci);
    733       1.1  rmind 
    734       1.1  rmind 	return l;
    735       1.1  rmind }
    736       1.1  rmind 
    737       1.1  rmind /*
    738       1.1  rmind  * Periodical calculations for balancing.
    739       1.1  rmind  */
    740       1.1  rmind static void
    741       1.1  rmind sched_balance(void *nocallout)
    742       1.1  rmind {
    743       1.1  rmind 	struct cpu_info *ci, *hci;
    744       1.1  rmind 	runqueue_t *ci_rq;
    745       1.1  rmind 	CPU_INFO_ITERATOR cii;
    746       1.1  rmind 	u_int highest;
    747       1.1  rmind 
    748       1.1  rmind 	hci = curcpu();
    749       1.1  rmind 	highest = 0;
    750       1.1  rmind 
    751       1.1  rmind 	/* Make lockless countings */
    752       1.1  rmind 	for (CPU_INFO_FOREACH(cii, ci)) {
    753       1.1  rmind 		ci_rq = ci->ci_schedstate.spc_sched_info;
    754       1.1  rmind 
    755       1.1  rmind 		/* Average count of the threads */
    756       1.1  rmind 		ci_rq->r_avgcount = (ci_rq->r_avgcount + ci_rq->r_mcount) >> 1;
    757       1.1  rmind 
    758       1.1  rmind 		/* Look for CPU with the highest average */
    759       1.1  rmind 		if (ci_rq->r_avgcount > highest) {
    760       1.1  rmind 			hci = ci;
    761       1.1  rmind 			highest = ci_rq->r_avgcount;
    762       1.1  rmind 		}
    763       1.1  rmind 	}
    764       1.1  rmind 
    765       1.1  rmind 	/* Update the worker */
    766       1.1  rmind 	worker_ci = hci;
    767       1.1  rmind 
    768       1.1  rmind 	if (nocallout == NULL)
    769       1.1  rmind 		callout_schedule(&balance_ch, balance_period);
    770       1.1  rmind }
    771       1.1  rmind 
    772       1.1  rmind #else
    773       1.1  rmind 
    774       1.1  rmind struct cpu_info *
    775       1.1  rmind sched_takecpu(struct lwp *l)
    776       1.1  rmind {
    777       1.1  rmind 
    778       1.1  rmind 	return l->l_cpu;
    779       1.1  rmind }
    780       1.1  rmind 
    781       1.1  rmind #endif	/* MULTIPROCESSOR */
    782       1.1  rmind 
    783       1.1  rmind /*
    784       1.1  rmind  * Scheduler mill.
    785       1.1  rmind  */
    786       1.1  rmind struct lwp *
    787       1.1  rmind sched_nextlwp(void)
    788       1.1  rmind {
    789       1.1  rmind 	struct cpu_info *ci = curcpu();
    790       1.1  rmind 	struct schedstate_percpu *spc;
    791       1.1  rmind 	TAILQ_HEAD(, lwp) *q_head;
    792       1.1  rmind 	sched_info_lwp_t *sil;
    793       1.1  rmind 	runqueue_t *ci_rq;
    794       1.1  rmind 	struct lwp *l;
    795       1.1  rmind 
    796       1.1  rmind 	spc = &ci->ci_schedstate;
    797       1.1  rmind 	ci_rq = ci->ci_schedstate.spc_sched_info;
    798       1.1  rmind 
    799       1.1  rmind #ifdef MULTIPROCESSOR
    800       1.1  rmind 	/* If runqueue is empty, try to catch some thread from other CPU */
    801      1.11  rmind 	if (__predict_false(spc->spc_flags & SPCF_OFFLINE)) {
    802       1.7  rmind 		if ((ci_rq->r_count - ci_rq->r_mcount) == 0)
    803       1.1  rmind 			return NULL;
    804       1.1  rmind 	} else if (ci_rq->r_count == 0) {
    805       1.1  rmind 		/* Reset the counter, and call the balancer */
    806       1.1  rmind 		ci_rq->r_avgcount = 0;
    807       1.1  rmind 		sched_balance(ci);
    808       1.1  rmind 
    809       1.1  rmind 		/* The re-locking will be done inside */
    810       1.1  rmind 		return sched_catchlwp();
    811       1.1  rmind 	}
    812       1.1  rmind #else
    813       1.1  rmind 	if (ci_rq->r_count == 0)
    814       1.1  rmind 		return NULL;
    815       1.1  rmind #endif
    816       1.1  rmind 
    817       1.1  rmind 	/* Take the highest priority thread */
    818       1.1  rmind 	KASSERT(ci_rq->r_bitmap[ci_rq->r_highest_pri >> BITMAP_SHIFT]);
    819       1.1  rmind 	q_head = sched_getrq(ci_rq, ci_rq->r_highest_pri);
    820       1.1  rmind 	l = TAILQ_FIRST(q_head);
    821       1.1  rmind 	KASSERT(l != NULL);
    822       1.1  rmind 
    823       1.1  rmind 	/* Update the counters */
    824       1.1  rmind 	sil = l->l_sched_info;
    825       1.1  rmind 	KASSERT(sil->sl_timeslice >= min_ts);
    826       1.1  rmind 	KASSERT(sil->sl_timeslice <= max_ts);
    827       1.1  rmind 	spc->spc_ticks = sil->sl_timeslice;
    828       1.1  rmind 	sil->sl_rtime = hardclock_ticks;
    829       1.1  rmind 
    830       1.1  rmind 	return l;
    831       1.1  rmind }
    832       1.1  rmind 
    833       1.1  rmind bool
    834       1.1  rmind sched_curcpu_runnable_p(void)
    835       1.1  rmind {
    836       1.1  rmind 	const struct cpu_info *ci = curcpu();
    837       1.1  rmind 	const runqueue_t *ci_rq = ci->ci_schedstate.spc_sched_info;
    838       1.1  rmind 
    839      1.11  rmind #ifndef __HAVE_FAST_SOFTINTS
    840      1.11  rmind 	if (ci->ci_data.cpu_softints)
    841      1.11  rmind 		return true;
    842      1.11  rmind #endif
    843      1.11  rmind 
    844       1.1  rmind 	if (ci->ci_schedstate.spc_flags & SPCF_OFFLINE)
    845       1.7  rmind 		return (ci_rq->r_count - ci_rq->r_mcount);
    846       1.1  rmind 
    847       1.1  rmind 	return ci_rq->r_count;
    848       1.1  rmind }
    849       1.1  rmind 
    850       1.1  rmind /*
    851       1.1  rmind  * Time-driven events.
    852       1.1  rmind  */
    853       1.1  rmind 
    854       1.1  rmind /*
    855       1.1  rmind  * Called once per time-quantum.  This routine is CPU-local and runs at
    856       1.1  rmind  * IPL_SCHED, thus the locking is not needed.
    857       1.1  rmind  */
    858       1.1  rmind void
    859       1.1  rmind sched_tick(struct cpu_info *ci)
    860       1.1  rmind {
    861       1.1  rmind 	const runqueue_t *ci_rq = ci->ci_schedstate.spc_sched_info;
    862       1.1  rmind 	struct schedstate_percpu *spc = &ci->ci_schedstate;
    863       1.1  rmind 	struct lwp *l = curlwp;
    864      1.16  rmind 	const sched_info_lwp_t *sil = l->l_sched_info;
    865       1.1  rmind 
    866       1.2  rmind 	if (CURCPU_IDLE_P())
    867       1.2  rmind 		return;
    868       1.1  rmind 
    869      1.10     ad 	switch (l->l_class) {
    870       1.2  rmind 	case SCHED_FIFO:
    871       1.2  rmind 		/*
    872       1.2  rmind 		 * Update the time-quantum, and continue running,
    873       1.2  rmind 		 * if thread runs on FIFO real-time policy.
    874       1.2  rmind 		 */
    875      1.16  rmind 		KASSERT(l->l_priority > PRI_HIGHEST_TS);
    876       1.1  rmind 		spc->spc_ticks = sil->sl_timeslice;
    877       1.1  rmind 		return;
    878       1.2  rmind 	case SCHED_OTHER:
    879      1.10     ad 		/*
    880      1.10     ad 		 * If thread is in time-sharing queue, decrease the priority,
    881      1.10     ad 		 * and run with a higher time-quantum.
    882      1.10     ad 		 */
    883      1.16  rmind 		KASSERT(l->l_priority <= PRI_HIGHEST_TS);
    884      1.10     ad 		if (l->l_priority != 0)
    885      1.10     ad 			l->l_priority--;
    886       1.2  rmind 		break;
    887       1.1  rmind 	}
    888       1.1  rmind 
    889       1.1  rmind 	/*
    890       1.2  rmind 	 * If there are higher priority threads or threads in the same queue,
    891       1.2  rmind 	 * mark that thread should yield, otherwise, continue running.
    892       1.1  rmind 	 */
    893      1.15  rmind 	if (lwp_eprio(l) <= ci_rq->r_highest_pri || l->l_target_cpu) {
    894       1.1  rmind 		spc->spc_flags |= SPCF_SHOULDYIELD;
    895       1.1  rmind 		cpu_need_resched(ci, 0);
    896       1.1  rmind 	} else
    897       1.1  rmind 		spc->spc_ticks = sil->sl_timeslice;
    898       1.1  rmind }
    899       1.1  rmind 
    900       1.1  rmind /*
    901       1.1  rmind  * Sysctl nodes and initialization.
    902       1.1  rmind  */
    903       1.1  rmind 
    904       1.1  rmind static int
    905      1.15  rmind sysctl_sched_rtts(SYSCTLFN_ARGS)
    906      1.15  rmind {
    907      1.15  rmind 	struct sysctlnode node;
    908      1.15  rmind 	int rttsms = hztoms(rt_ts);
    909      1.15  rmind 
    910      1.15  rmind 	node = *rnode;
    911      1.15  rmind 	node.sysctl_data = &rttsms;
    912      1.15  rmind 	return sysctl_lookup(SYSCTLFN_CALL(&node));
    913      1.15  rmind }
    914      1.15  rmind 
    915      1.15  rmind static int
    916       1.1  rmind sysctl_sched_mints(SYSCTLFN_ARGS)
    917       1.1  rmind {
    918       1.1  rmind 	struct sysctlnode node;
    919       1.1  rmind 	struct cpu_info *ci;
    920       1.1  rmind 	int error, newsize;
    921       1.1  rmind 	CPU_INFO_ITERATOR cii;
    922       1.1  rmind 
    923       1.1  rmind 	node = *rnode;
    924       1.1  rmind 	node.sysctl_data = &newsize;
    925       1.1  rmind 
    926       1.1  rmind 	newsize = hztoms(min_ts);
    927       1.1  rmind 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    928       1.1  rmind 	if (error || newp == NULL)
    929       1.1  rmind 		return error;
    930       1.1  rmind 
    931       1.8  rmind 	newsize = mstohz(newsize);
    932       1.1  rmind 	if (newsize < 1 || newsize > hz || newsize >= max_ts)
    933       1.1  rmind 		return EINVAL;
    934       1.1  rmind 
    935       1.1  rmind 	/* It is safe to do this in such order */
    936       1.1  rmind 	for (CPU_INFO_FOREACH(cii, ci))
    937       1.1  rmind 		spc_lock(ci);
    938       1.1  rmind 
    939       1.8  rmind 	min_ts = newsize;
    940       1.1  rmind 	sched_precalcts();
    941       1.1  rmind 
    942       1.1  rmind 	for (CPU_INFO_FOREACH(cii, ci))
    943       1.1  rmind 		spc_unlock(ci);
    944       1.1  rmind 
    945       1.1  rmind 	return 0;
    946       1.1  rmind }
    947       1.1  rmind 
    948       1.1  rmind static int
    949       1.1  rmind sysctl_sched_maxts(SYSCTLFN_ARGS)
    950       1.1  rmind {
    951       1.1  rmind 	struct sysctlnode node;
    952       1.1  rmind 	struct cpu_info *ci;
    953       1.1  rmind 	int error, newsize;
    954       1.1  rmind 	CPU_INFO_ITERATOR cii;
    955       1.1  rmind 
    956       1.1  rmind 	node = *rnode;
    957       1.1  rmind 	node.sysctl_data = &newsize;
    958       1.1  rmind 
    959       1.1  rmind 	newsize = hztoms(max_ts);
    960       1.1  rmind 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    961       1.1  rmind 	if (error || newp == NULL)
    962       1.1  rmind 		return error;
    963       1.1  rmind 
    964       1.8  rmind 	newsize = mstohz(newsize);
    965       1.1  rmind 	if (newsize < 10 || newsize > hz || newsize <= min_ts)
    966       1.1  rmind 		return EINVAL;
    967       1.1  rmind 
    968       1.1  rmind 	/* It is safe to do this in such order */
    969       1.1  rmind 	for (CPU_INFO_FOREACH(cii, ci))
    970       1.1  rmind 		spc_lock(ci);
    971       1.1  rmind 
    972       1.8  rmind 	max_ts = newsize;
    973       1.1  rmind 	sched_precalcts();
    974       1.1  rmind 
    975       1.1  rmind 	for (CPU_INFO_FOREACH(cii, ci))
    976       1.1  rmind 		spc_unlock(ci);
    977       1.1  rmind 
    978       1.1  rmind 	return 0;
    979       1.1  rmind }
    980       1.1  rmind 
    981       1.1  rmind SYSCTL_SETUP(sysctl_sched_setup, "sysctl kern.sched subtree setup")
    982       1.1  rmind {
    983       1.1  rmind 	const struct sysctlnode *node = NULL;
    984       1.1  rmind 
    985       1.1  rmind 	sysctl_createv(clog, 0, NULL, NULL,
    986       1.1  rmind 		CTLFLAG_PERMANENT,
    987       1.1  rmind 		CTLTYPE_NODE, "kern", NULL,
    988       1.1  rmind 		NULL, 0, NULL, 0,
    989       1.1  rmind 		CTL_KERN, CTL_EOL);
    990       1.1  rmind 	sysctl_createv(clog, 0, NULL, &node,
    991       1.1  rmind 		CTLFLAG_PERMANENT,
    992       1.1  rmind 		CTLTYPE_NODE, "sched",
    993       1.1  rmind 		SYSCTL_DESCR("Scheduler options"),
    994       1.1  rmind 		NULL, 0, NULL, 0,
    995       1.1  rmind 		CTL_KERN, CTL_CREATE, CTL_EOL);
    996       1.1  rmind 
    997       1.1  rmind 	if (node == NULL)
    998       1.1  rmind 		return;
    999       1.1  rmind 
   1000       1.1  rmind 	sysctl_createv(clog, 0, &node, NULL,
   1001       1.1  rmind 		CTLFLAG_PERMANENT,
   1002       1.1  rmind 		CTLTYPE_STRING, "name", NULL,
   1003       1.1  rmind 		NULL, 0, __UNCONST("M2"), 0,
   1004       1.1  rmind 		CTL_CREATE, CTL_EOL);
   1005       1.1  rmind 	sysctl_createv(clog, 0, &node, NULL,
   1006      1.15  rmind 		CTLFLAG_PERMANENT,
   1007      1.15  rmind 		CTLTYPE_INT, "rtts",
   1008      1.15  rmind 		SYSCTL_DESCR("Round-robin time quantum (in miliseconds)"),
   1009      1.15  rmind 		sysctl_sched_rtts, 0, NULL, 0,
   1010      1.15  rmind 		CTL_CREATE, CTL_EOL);
   1011      1.15  rmind 	sysctl_createv(clog, 0, &node, NULL,
   1012       1.1  rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1013       1.1  rmind 		CTLTYPE_INT, "maxts",
   1014       1.8  rmind 		SYSCTL_DESCR("Maximal time quantum (in miliseconds)"),
   1015       1.1  rmind 		sysctl_sched_maxts, 0, &max_ts, 0,
   1016       1.1  rmind 		CTL_CREATE, CTL_EOL);
   1017       1.1  rmind 	sysctl_createv(clog, 0, &node, NULL,
   1018       1.1  rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1019       1.1  rmind 		CTLTYPE_INT, "mints",
   1020       1.8  rmind 		SYSCTL_DESCR("Minimal time quantum (in miliseconds)"),
   1021       1.1  rmind 		sysctl_sched_mints, 0, &min_ts, 0,
   1022       1.1  rmind 		CTL_CREATE, CTL_EOL);
   1023       1.1  rmind 
   1024       1.1  rmind #ifdef MULTIPROCESSOR
   1025       1.1  rmind 	sysctl_createv(clog, 0, &node, NULL,
   1026       1.1  rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1027       1.1  rmind 		CTLTYPE_INT, "cacheht_time",
   1028       1.8  rmind 		SYSCTL_DESCR("Cache hotness time (in ticks)"),
   1029       1.1  rmind 		NULL, 0, &cacheht_time, 0,
   1030       1.1  rmind 		CTL_CREATE, CTL_EOL);
   1031       1.1  rmind 	sysctl_createv(clog, 0, &node, NULL,
   1032       1.1  rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1033       1.9  rmind 		CTLTYPE_INT, "balance_period",
   1034       1.9  rmind 		SYSCTL_DESCR("Balance period (in ticks)"),
   1035       1.1  rmind 		NULL, 0, &balance_period, 0,
   1036       1.1  rmind 		CTL_CREATE, CTL_EOL);
   1037       1.1  rmind 	sysctl_createv(clog, 0, &node, NULL,
   1038       1.1  rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1039       1.1  rmind 		CTLTYPE_INT, "min_catch",
   1040       1.8  rmind 		SYSCTL_DESCR("Minimal count of the threads for catching"),
   1041       1.1  rmind 		NULL, 0, &min_catch, 0,
   1042       1.1  rmind 		CTL_CREATE, CTL_EOL);
   1043       1.1  rmind #endif
   1044       1.1  rmind }
   1045       1.1  rmind 
   1046       1.1  rmind /*
   1047       1.1  rmind  * Debugging.
   1048       1.1  rmind  */
   1049       1.1  rmind 
   1050       1.1  rmind #ifdef DDB
   1051       1.1  rmind 
   1052       1.1  rmind void
   1053       1.1  rmind sched_print_runqueue(void (*pr)(const char *, ...))
   1054       1.1  rmind {
   1055       1.1  rmind 	runqueue_t *ci_rq;
   1056       1.1  rmind 	sched_info_lwp_t *sil;
   1057       1.1  rmind 	struct lwp *l;
   1058       1.1  rmind 	struct proc *p;
   1059       1.1  rmind 	int i;
   1060       1.1  rmind 
   1061       1.1  rmind 	struct cpu_info *ci;
   1062       1.1  rmind 	CPU_INFO_ITERATOR cii;
   1063       1.1  rmind 
   1064       1.1  rmind 	for (CPU_INFO_FOREACH(cii, ci)) {
   1065       1.1  rmind 		ci_rq = ci->ci_schedstate.spc_sched_info;
   1066       1.1  rmind 
   1067  1.20.6.1    mjf 		(*pr)("Run-queue (CPU = %u):\n", ci->ci_index);
   1068       1.1  rmind 		(*pr)(" pid.lid = %d.%d, threads count = %u, "
   1069       1.1  rmind 		    "avgcount = %u, highest pri = %d\n",
   1070       1.1  rmind 		    ci->ci_curlwp->l_proc->p_pid, ci->ci_curlwp->l_lid,
   1071       1.1  rmind 		    ci_rq->r_count, ci_rq->r_avgcount, ci_rq->r_highest_pri);
   1072      1.10     ad 		i = (PRI_COUNT >> BITMAP_SHIFT) - 1;
   1073       1.1  rmind 		do {
   1074      1.10     ad 			uint32_t q;
   1075      1.10     ad 			q = ci_rq->r_bitmap[i];
   1076      1.10     ad 			(*pr)(" bitmap[%d] => [ %d (0x%x) ]\n", i, ffs(q), q);
   1077      1.10     ad 		} while (i--);
   1078       1.1  rmind 	}
   1079       1.1  rmind 
   1080       1.1  rmind 	(*pr)("   %5s %4s %4s %10s %3s %4s %11s %3s %s\n",
   1081      1.10     ad 	    "LID", "PRI", "EPRI", "FL", "ST", "TS", "LWP", "CPU", "LRTIME");
   1082       1.1  rmind 
   1083       1.1  rmind 	PROCLIST_FOREACH(p, &allproc) {
   1084       1.1  rmind 		(*pr)(" /- %d (%s)\n", (int)p->p_pid, p->p_comm);
   1085       1.1  rmind 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1086       1.1  rmind 			sil = l->l_sched_info;
   1087       1.1  rmind 			ci = l->l_cpu;
   1088  1.20.6.1    mjf 			(*pr)(" | %5d %4u %4u 0x%8.8x %3s %4u %11p %3u "
   1089       1.1  rmind 			    "%u ST=%d RT=%d %d\n",
   1090      1.10     ad 			    (int)l->l_lid, l->l_priority, lwp_eprio(l),
   1091       1.1  rmind 			    l->l_flag, l->l_stat == LSRUN ? "RQ" :
   1092       1.1  rmind 			    (l->l_stat == LSSLEEP ? "SQ" : "-"),
   1093  1.20.6.1    mjf 			    sil->sl_timeslice, l, ci->ci_index,
   1094       1.1  rmind 			    (u_int)(hardclock_ticks - sil->sl_lrtime),
   1095       1.1  rmind 			    sil->sl_slpsum, sil->sl_rtsum, sil->sl_flags);
   1096       1.1  rmind 		}
   1097       1.1  rmind 	}
   1098       1.1  rmind }
   1099       1.1  rmind 
   1100      1.16  rmind #endif
   1101