Home | History | Annotate | Line # | Download | only in kern
sched_m2.c revision 1.9.2.4
      1  1.9.2.4    mjf /*	$NetBSD: sched_m2.c,v 1.9.2.4 2008/02/18 21:06:46 mjf Exp $	*/
      2      1.1  rmind 
      3      1.1  rmind /*
      4  1.9.2.4    mjf  * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org>
      5  1.9.2.2    mjf  * All rights reserved.
      6      1.1  rmind  *
      7      1.1  rmind  * Redistribution and use in source and binary forms, with or without
      8      1.1  rmind  * modification, are permitted provided that the following conditions
      9      1.1  rmind  * are met:
     10      1.1  rmind  * 1. Redistributions of source code must retain the above copyright
     11      1.1  rmind  *    notice, this list of conditions and the following disclaimer.
     12      1.1  rmind  * 2. Redistributions in binary form must reproduce the above copyright
     13      1.1  rmind  *    notice, this list of conditions and the following disclaimer in the
     14      1.1  rmind  *    documentation and/or other materials provided with the distribution.
     15      1.1  rmind  *
     16  1.9.2.4    mjf  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17  1.9.2.4    mjf  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18  1.9.2.4    mjf  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19  1.9.2.4    mjf  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20  1.9.2.4    mjf  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21  1.9.2.4    mjf  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22  1.9.2.4    mjf  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23  1.9.2.4    mjf  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24  1.9.2.4    mjf  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25  1.9.2.4    mjf  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  1.9.2.4    mjf  * SUCH DAMAGE.
     27      1.1  rmind  */
     28      1.1  rmind 
     29      1.1  rmind /*
     30      1.1  rmind  * TODO:
     31      1.1  rmind  *  - Implementation of fair share queue;
     32      1.1  rmind  *  - Support for NUMA;
     33      1.1  rmind  */
     34      1.1  rmind 
     35      1.1  rmind #include <sys/cdefs.h>
     36  1.9.2.4    mjf __KERNEL_RCSID(0, "$NetBSD: sched_m2.c,v 1.9.2.4 2008/02/18 21:06:46 mjf Exp $");
     37      1.1  rmind 
     38      1.1  rmind #include <sys/param.h>
     39      1.1  rmind 
     40      1.8  rmind #include <sys/bitops.h>
     41      1.1  rmind #include <sys/cpu.h>
     42      1.1  rmind #include <sys/callout.h>
     43      1.1  rmind #include <sys/errno.h>
     44      1.1  rmind #include <sys/kernel.h>
     45      1.1  rmind #include <sys/kmem.h>
     46      1.1  rmind #include <sys/lwp.h>
     47      1.1  rmind #include <sys/mutex.h>
     48      1.1  rmind #include <sys/pool.h>
     49      1.1  rmind #include <sys/proc.h>
     50  1.9.2.4    mjf #include <sys/pset.h>
     51      1.1  rmind #include <sys/resource.h>
     52      1.1  rmind #include <sys/resourcevar.h>
     53      1.1  rmind #include <sys/sched.h>
     54      1.1  rmind #include <sys/syscallargs.h>
     55      1.1  rmind #include <sys/sysctl.h>
     56      1.1  rmind #include <sys/types.h>
     57      1.1  rmind 
     58      1.1  rmind /*
     59  1.9.2.1    mjf  * Priority related defintions.
     60      1.1  rmind  */
     61  1.9.2.1    mjf #define	PRI_TS_COUNT	(NPRI_USER)
     62  1.9.2.1    mjf #define	PRI_RT_COUNT	(PRI_COUNT - PRI_TS_COUNT)
     63  1.9.2.1    mjf #define	PRI_HTS_RANGE	(PRI_TS_COUNT / 10)
     64  1.9.2.1    mjf 
     65  1.9.2.1    mjf #define	PRI_HIGHEST_TS	(MAXPRI_USER)
     66  1.9.2.1    mjf 
     67  1.9.2.1    mjf const int schedppq = 1;
     68      1.1  rmind 
     69      1.1  rmind /*
     70      1.1  rmind  * Bits per map.
     71      1.1  rmind  */
     72  1.9.2.1    mjf #define	BITMAP_BITS	(32)
     73  1.9.2.1    mjf #define	BITMAP_SHIFT	(5)
     74  1.9.2.1    mjf #define	BITMAP_MSB	(0x80000000U)
     75  1.9.2.1    mjf #define	BITMAP_MASK	(BITMAP_BITS - 1)
     76      1.1  rmind 
     77      1.1  rmind /*
     78      1.1  rmind  * Time-slices and priorities.
     79      1.1  rmind  */
     80      1.1  rmind static u_int	min_ts;			/* Minimal time-slice */
     81      1.1  rmind static u_int	max_ts;			/* Maximal time-slice */
     82      1.1  rmind static u_int	rt_ts;			/* Real-time time-slice */
     83      1.1  rmind static u_int	ts_map[PRI_COUNT];	/* Map of time-slices */
     84      1.1  rmind static pri_t	high_pri[PRI_COUNT];	/* Map for priority increase */
     85      1.1  rmind 
     86      1.1  rmind /*
     87      1.1  rmind  * Migration and balancing.
     88      1.1  rmind  */
     89      1.1  rmind #ifdef MULTIPROCESSOR
     90  1.9.2.4    mjf 
     91      1.1  rmind static u_int	cacheht_time;		/* Cache hotness time */
     92      1.1  rmind static u_int	min_catch;		/* Minimal LWP count for catching */
     93      1.1  rmind 
     94      1.1  rmind static u_int		balance_period;	/* Balance period */
     95      1.1  rmind static struct callout	balance_ch;	/* Callout of balancer */
     96      1.1  rmind 
     97      1.1  rmind static struct cpu_info * volatile worker_ci;
     98      1.1  rmind 
     99      1.1  rmind #endif
    100      1.1  rmind 
    101      1.1  rmind /*
    102      1.1  rmind  * Structures, runqueue.
    103      1.1  rmind  */
    104      1.1  rmind 
    105      1.1  rmind typedef struct {
    106      1.1  rmind 	TAILQ_HEAD(, lwp) q_head;
    107      1.1  rmind } queue_t;
    108      1.1  rmind 
    109      1.1  rmind typedef struct {
    110      1.1  rmind 	/* Lock and bitmap */
    111  1.9.2.1    mjf 	uint32_t	r_bitmap[PRI_COUNT >> BITMAP_SHIFT];
    112      1.1  rmind 	/* Counters */
    113      1.1  rmind 	u_int		r_count;	/* Count of the threads */
    114      1.1  rmind 	pri_t		r_highest_pri;	/* Highest priority */
    115      1.1  rmind 	u_int		r_avgcount;	/* Average count of threads */
    116      1.1  rmind 	u_int		r_mcount;	/* Count of migratable threads */
    117      1.1  rmind 	/* Runqueues */
    118      1.1  rmind 	queue_t		r_rt_queue[PRI_RT_COUNT];
    119      1.1  rmind 	queue_t		r_ts_queue[PRI_TS_COUNT];
    120      1.1  rmind } runqueue_t;
    121      1.1  rmind 
    122      1.1  rmind typedef struct {
    123      1.1  rmind 	u_int		sl_flags;
    124      1.1  rmind 	u_int		sl_timeslice;	/* Time-slice of thread */
    125      1.1  rmind 	u_int		sl_slept;	/* Saved sleep time for sleep sum */
    126      1.1  rmind 	u_int		sl_slpsum;	/* Sum of sleep time */
    127      1.1  rmind 	u_int		sl_rtime;	/* Saved start time of run */
    128      1.1  rmind 	u_int		sl_rtsum;	/* Sum of the run time */
    129      1.1  rmind 	u_int		sl_lrtime;	/* Last run time */
    130      1.1  rmind } sched_info_lwp_t;
    131      1.1  rmind 
    132      1.1  rmind /* Flags */
    133      1.1  rmind #define	SL_BATCH	0x01
    134      1.1  rmind 
    135      1.1  rmind /* Pool of the scheduler-specific structures for threads */
    136  1.9.2.4    mjf static pool_cache_t	sil_pool;
    137      1.1  rmind 
    138      1.1  rmind /*
    139      1.1  rmind  * Prototypes.
    140      1.1  rmind  */
    141      1.1  rmind 
    142      1.1  rmind static inline void *	sched_getrq(runqueue_t *, const pri_t);
    143      1.1  rmind static inline void	sched_newts(struct lwp *);
    144      1.1  rmind static void		sched_precalcts(void);
    145      1.1  rmind 
    146      1.1  rmind #ifdef MULTIPROCESSOR
    147      1.1  rmind static struct lwp *	sched_catchlwp(void);
    148      1.1  rmind static void		sched_balance(void *);
    149      1.1  rmind #endif
    150      1.1  rmind 
    151      1.1  rmind /*
    152      1.1  rmind  * Initialization and setup.
    153      1.1  rmind  */
    154      1.1  rmind 
    155      1.1  rmind void
    156      1.1  rmind sched_rqinit(void)
    157      1.1  rmind {
    158      1.1  rmind 	struct cpu_info *ci = curcpu();
    159      1.1  rmind 
    160      1.1  rmind 	if (hz < 100) {
    161      1.1  rmind 		panic("sched_rqinit: value of HZ is too low\n");
    162      1.1  rmind 	}
    163      1.1  rmind 
    164      1.1  rmind 	/* Default timing ranges */
    165      1.1  rmind 	min_ts = mstohz(50);			/* ~50ms  */
    166      1.1  rmind 	max_ts = mstohz(150);			/* ~150ms */
    167      1.1  rmind 	rt_ts = mstohz(100);			/* ~100ms */
    168      1.1  rmind 	sched_precalcts();
    169      1.1  rmind 
    170      1.1  rmind #ifdef MULTIPROCESSOR
    171      1.1  rmind 	/* Balancing */
    172      1.1  rmind 	worker_ci = ci;
    173      1.1  rmind 	cacheht_time = mstohz(5);		/* ~5 ms  */
    174      1.1  rmind 	balance_period = mstohz(300);		/* ~300ms */
    175      1.1  rmind 	min_catch = ~0;
    176      1.1  rmind #endif
    177      1.1  rmind 
    178      1.1  rmind 	/* Pool of the scheduler-specific structures */
    179  1.9.2.4    mjf 	sil_pool = pool_cache_init(sizeof(sched_info_lwp_t), CACHE_LINE_SIZE,
    180  1.9.2.4    mjf 	    0, 0, "lwpsd", NULL, IPL_NONE, NULL, NULL, NULL);
    181      1.1  rmind 
    182      1.1  rmind 	/* Attach the primary CPU here */
    183      1.1  rmind 	sched_cpuattach(ci);
    184      1.1  rmind 
    185  1.9.2.1    mjf 	sched_lwp_fork(NULL, &lwp0);
    186      1.1  rmind 	sched_newts(&lwp0);
    187      1.1  rmind }
    188      1.1  rmind 
    189      1.1  rmind void
    190      1.1  rmind sched_setup(void)
    191      1.1  rmind {
    192      1.1  rmind 
    193      1.1  rmind #ifdef MULTIPROCESSOR
    194      1.1  rmind 	/* Minimal count of LWPs for catching: log2(count of CPUs) */
    195      1.8  rmind 	min_catch = min(ilog2(ncpu), 4);
    196      1.1  rmind 
    197      1.1  rmind 	/* Initialize balancing callout and run it */
    198      1.1  rmind 	callout_init(&balance_ch, CALLOUT_MPSAFE);
    199      1.1  rmind 	callout_setfunc(&balance_ch, sched_balance, NULL);
    200      1.1  rmind 	callout_schedule(&balance_ch, balance_period);
    201      1.1  rmind #endif
    202      1.1  rmind }
    203      1.1  rmind 
    204      1.1  rmind void
    205      1.1  rmind sched_cpuattach(struct cpu_info *ci)
    206      1.1  rmind {
    207      1.1  rmind 	runqueue_t *ci_rq;
    208      1.1  rmind 	void *rq_ptr;
    209      1.1  rmind 	u_int i, size;
    210      1.1  rmind 
    211  1.9.2.4    mjf 	if (ci == lwp0.l_cpu) {
    212  1.9.2.4    mjf 		/* Initialize the scheduler structure of the primary LWP */
    213  1.9.2.4    mjf 		lwp0.l_mutex = ci->ci_schedstate.spc_lwplock;
    214  1.9.2.4    mjf 	}
    215  1.9.2.4    mjf 
    216  1.9.2.4    mjf 	if (ci->ci_schedstate.spc_mutex != NULL) {
    217  1.9.2.4    mjf 		/* Already initialized. */
    218  1.9.2.4    mjf 		return;
    219  1.9.2.4    mjf 	}
    220  1.9.2.4    mjf 
    221  1.9.2.4    mjf 	/* Allocate the run queue */
    222  1.9.2.4    mjf 	size = roundup2(sizeof(runqueue_t), CACHE_LINE_SIZE) + CACHE_LINE_SIZE;
    223  1.9.2.3    mjf 	rq_ptr = kmem_zalloc(size, KM_SLEEP);
    224      1.1  rmind 	if (rq_ptr == NULL) {
    225  1.9.2.4    mjf 		panic("sched_cpuattach: could not allocate the runqueue");
    226      1.1  rmind 	}
    227  1.9.2.4    mjf 	ci_rq = (void *)(roundup2((uintptr_t)(rq_ptr), CACHE_LINE_SIZE));
    228      1.1  rmind 
    229      1.1  rmind 	/* Initialize run queues */
    230  1.9.2.4    mjf 	KASSERT(sizeof(kmutex_t) <= CACHE_LINE_SIZE);
    231  1.9.2.4    mjf 	ci->ci_schedstate.spc_mutex = kmem_alloc(CACHE_LINE_SIZE, KM_SLEEP);
    232  1.9.2.4    mjf 	mutex_init(ci->ci_schedstate.spc_mutex, MUTEX_DEFAULT, IPL_SCHED);
    233      1.1  rmind 	for (i = 0; i < PRI_RT_COUNT; i++)
    234      1.1  rmind 		TAILQ_INIT(&ci_rq->r_rt_queue[i].q_head);
    235      1.1  rmind 	for (i = 0; i < PRI_TS_COUNT; i++)
    236      1.1  rmind 		TAILQ_INIT(&ci_rq->r_ts_queue[i].q_head);
    237  1.9.2.1    mjf 	ci_rq->r_highest_pri = 0;
    238      1.1  rmind 
    239      1.1  rmind 	ci->ci_schedstate.spc_sched_info = ci_rq;
    240      1.1  rmind }
    241      1.1  rmind 
    242      1.1  rmind /* Pre-calculate the time-slices for the priorities */
    243      1.1  rmind static void
    244      1.1  rmind sched_precalcts(void)
    245      1.1  rmind {
    246      1.1  rmind 	pri_t p;
    247      1.1  rmind 
    248  1.9.2.1    mjf 	/* Time-sharing range */
    249  1.9.2.1    mjf 	for (p = 0; p <= PRI_HIGHEST_TS; p++) {
    250  1.9.2.1    mjf 		ts_map[p] = max_ts -
    251  1.9.2.1    mjf 		    (p * 100 / (PRI_TS_COUNT - 1) * (max_ts - min_ts) / 100);
    252  1.9.2.1    mjf 		high_pri[p] = (PRI_HIGHEST_TS - PRI_HTS_RANGE) +
    253  1.9.2.1    mjf 		    ((p * PRI_HTS_RANGE) / (PRI_TS_COUNT - 1));
    254      1.1  rmind 	}
    255      1.1  rmind 
    256  1.9.2.1    mjf 	/* Real-time range */
    257  1.9.2.1    mjf 	for (p = (PRI_HIGHEST_TS + 1); p < PRI_COUNT; p++) {
    258  1.9.2.1    mjf 		ts_map[p] = rt_ts;
    259  1.9.2.1    mjf 		high_pri[p] = p;
    260      1.1  rmind 	}
    261      1.1  rmind }
    262      1.1  rmind 
    263      1.1  rmind /*
    264      1.1  rmind  * Hooks.
    265      1.1  rmind  */
    266      1.1  rmind 
    267      1.1  rmind void
    268      1.1  rmind sched_proc_fork(struct proc *parent, struct proc *child)
    269      1.1  rmind {
    270      1.1  rmind 	struct lwp *l;
    271      1.1  rmind 
    272      1.1  rmind 	LIST_FOREACH(l, &child->p_lwps, l_sibling) {
    273      1.1  rmind 		lwp_lock(l);
    274      1.1  rmind 		sched_newts(l);
    275      1.1  rmind 		lwp_unlock(l);
    276      1.1  rmind 	}
    277      1.1  rmind }
    278      1.1  rmind 
    279      1.1  rmind void
    280      1.1  rmind sched_proc_exit(struct proc *child, struct proc *parent)
    281      1.1  rmind {
    282      1.1  rmind 
    283      1.1  rmind 	/* Dummy */
    284      1.1  rmind }
    285      1.1  rmind 
    286      1.1  rmind void
    287  1.9.2.1    mjf sched_lwp_fork(struct lwp *l1, struct lwp *l2)
    288      1.1  rmind {
    289      1.1  rmind 
    290  1.9.2.1    mjf 	KASSERT(l2->l_sched_info == NULL);
    291  1.9.2.4    mjf 	l2->l_sched_info = pool_cache_get(sil_pool, PR_WAITOK);
    292  1.9.2.1    mjf 	memset(l2->l_sched_info, 0, sizeof(sched_info_lwp_t));
    293      1.1  rmind }
    294      1.1  rmind 
    295      1.1  rmind void
    296      1.1  rmind sched_lwp_exit(struct lwp *l)
    297      1.1  rmind {
    298      1.1  rmind 
    299      1.1  rmind 	KASSERT(l->l_sched_info != NULL);
    300  1.9.2.4    mjf 	pool_cache_put(sil_pool, l->l_sched_info);
    301      1.1  rmind 	l->l_sched_info = NULL;
    302      1.1  rmind }
    303      1.1  rmind 
    304      1.1  rmind void
    305  1.9.2.1    mjf sched_lwp_collect(struct lwp *l)
    306  1.9.2.1    mjf {
    307  1.9.2.1    mjf 
    308  1.9.2.1    mjf }
    309  1.9.2.1    mjf 
    310  1.9.2.1    mjf void
    311      1.1  rmind sched_setrunnable(struct lwp *l)
    312      1.1  rmind {
    313      1.1  rmind 
    314      1.1  rmind 	/* Dummy */
    315      1.1  rmind }
    316      1.1  rmind 
    317      1.1  rmind void
    318      1.1  rmind sched_schedclock(struct lwp *l)
    319      1.1  rmind {
    320      1.1  rmind 
    321      1.1  rmind 	/* Dummy */
    322      1.1  rmind }
    323      1.1  rmind 
    324      1.1  rmind /*
    325      1.1  rmind  * Priorities and time-slice.
    326      1.1  rmind  */
    327      1.1  rmind 
    328      1.1  rmind void
    329      1.1  rmind sched_nice(struct proc *p, int prio)
    330      1.1  rmind {
    331      1.1  rmind 
    332  1.9.2.4    mjf 	/* TODO: implement as SCHED_IA */
    333      1.1  rmind }
    334      1.1  rmind 
    335      1.1  rmind /* Recalculate the time-slice */
    336      1.1  rmind static inline void
    337      1.1  rmind sched_newts(struct lwp *l)
    338      1.1  rmind {
    339      1.1  rmind 	sched_info_lwp_t *sil = l->l_sched_info;
    340      1.1  rmind 
    341      1.1  rmind 	sil->sl_timeslice = ts_map[lwp_eprio(l)];
    342      1.1  rmind }
    343      1.1  rmind 
    344      1.1  rmind /*
    345      1.1  rmind  * Control of the runqueue.
    346      1.1  rmind  */
    347      1.1  rmind 
    348      1.1  rmind static inline void *
    349      1.1  rmind sched_getrq(runqueue_t *ci_rq, const pri_t prio)
    350      1.1  rmind {
    351      1.1  rmind 
    352      1.1  rmind 	KASSERT(prio < PRI_COUNT);
    353  1.9.2.1    mjf 	return (prio <= PRI_HIGHEST_TS) ?
    354  1.9.2.1    mjf 	    &ci_rq->r_ts_queue[prio].q_head :
    355  1.9.2.1    mjf 	    &ci_rq->r_rt_queue[prio - PRI_HIGHEST_TS - 1].q_head;
    356      1.1  rmind }
    357      1.1  rmind 
    358      1.1  rmind void
    359      1.1  rmind sched_enqueue(struct lwp *l, bool swtch)
    360      1.1  rmind {
    361      1.1  rmind 	runqueue_t *ci_rq;
    362      1.1  rmind 	sched_info_lwp_t *sil = l->l_sched_info;
    363      1.1  rmind 	TAILQ_HEAD(, lwp) *q_head;
    364      1.1  rmind 	const pri_t eprio = lwp_eprio(l);
    365      1.1  rmind 
    366      1.1  rmind 	ci_rq = l->l_cpu->ci_schedstate.spc_sched_info;
    367      1.1  rmind 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    368      1.1  rmind 
    369      1.1  rmind 	/* Update the last run time on switch */
    370  1.9.2.1    mjf 	if (__predict_true(swtch == true)) {
    371      1.1  rmind 		sil->sl_lrtime = hardclock_ticks;
    372      1.1  rmind 		sil->sl_rtsum += (hardclock_ticks - sil->sl_rtime);
    373      1.8  rmind 	} else if (sil->sl_lrtime == 0)
    374      1.8  rmind 		sil->sl_lrtime = hardclock_ticks;
    375      1.1  rmind 
    376      1.1  rmind 	/* Enqueue the thread */
    377      1.1  rmind 	q_head = sched_getrq(ci_rq, eprio);
    378      1.1  rmind 	if (TAILQ_EMPTY(q_head)) {
    379      1.1  rmind 		u_int i;
    380      1.1  rmind 		uint32_t q;
    381      1.1  rmind 
    382      1.1  rmind 		/* Mark bit */
    383      1.1  rmind 		i = eprio >> BITMAP_SHIFT;
    384  1.9.2.1    mjf 		q = BITMAP_MSB >> (eprio & BITMAP_MASK);
    385  1.9.2.1    mjf 		KASSERT((ci_rq->r_bitmap[i] & q) == 0);
    386  1.9.2.1    mjf 		ci_rq->r_bitmap[i] |= q;
    387      1.1  rmind 	}
    388      1.1  rmind 	TAILQ_INSERT_TAIL(q_head, l, l_runq);
    389      1.1  rmind 	ci_rq->r_count++;
    390      1.1  rmind 	if ((l->l_flag & LW_BOUND) == 0)
    391      1.1  rmind 		ci_rq->r_mcount++;
    392      1.1  rmind 
    393      1.1  rmind 	/*
    394      1.1  rmind 	 * Update the value of highest priority in the runqueue,
    395      1.1  rmind 	 * if priority of this thread is higher.
    396      1.1  rmind 	 */
    397  1.9.2.1    mjf 	if (eprio > ci_rq->r_highest_pri)
    398      1.1  rmind 		ci_rq->r_highest_pri = eprio;
    399      1.1  rmind 
    400      1.1  rmind 	sched_newts(l);
    401      1.1  rmind }
    402      1.1  rmind 
    403      1.1  rmind void
    404      1.1  rmind sched_dequeue(struct lwp *l)
    405      1.1  rmind {
    406      1.1  rmind 	runqueue_t *ci_rq;
    407      1.1  rmind 	TAILQ_HEAD(, lwp) *q_head;
    408      1.1  rmind 	const pri_t eprio = lwp_eprio(l);
    409      1.1  rmind 
    410      1.1  rmind 	ci_rq = l->l_cpu->ci_schedstate.spc_sched_info;
    411      1.1  rmind 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    412  1.9.2.4    mjf 
    413  1.9.2.1    mjf 	KASSERT(eprio <= ci_rq->r_highest_pri);
    414      1.1  rmind 	KASSERT(ci_rq->r_bitmap[eprio >> BITMAP_SHIFT] != 0);
    415      1.1  rmind 	KASSERT(ci_rq->r_count > 0);
    416      1.1  rmind 
    417      1.1  rmind 	ci_rq->r_count--;
    418      1.1  rmind 	if ((l->l_flag & LW_BOUND) == 0)
    419      1.1  rmind 		ci_rq->r_mcount--;
    420      1.1  rmind 
    421      1.1  rmind 	q_head = sched_getrq(ci_rq, eprio);
    422      1.1  rmind 	TAILQ_REMOVE(q_head, l, l_runq);
    423      1.1  rmind 	if (TAILQ_EMPTY(q_head)) {
    424      1.1  rmind 		u_int i;
    425      1.1  rmind 		uint32_t q;
    426      1.1  rmind 
    427      1.1  rmind 		/* Unmark bit */
    428      1.1  rmind 		i = eprio >> BITMAP_SHIFT;
    429  1.9.2.1    mjf 		q = BITMAP_MSB >> (eprio & BITMAP_MASK);
    430  1.9.2.1    mjf 		KASSERT((ci_rq->r_bitmap[i] & q) != 0);
    431  1.9.2.1    mjf 		ci_rq->r_bitmap[i] &= ~q;
    432      1.1  rmind 
    433      1.1  rmind 		/*
    434      1.1  rmind 		 * Update the value of highest priority in the runqueue, in a
    435      1.1  rmind 		 * case it was a last thread in the queue of highest priority.
    436      1.1  rmind 		 */
    437      1.1  rmind 		if (eprio != ci_rq->r_highest_pri)
    438      1.1  rmind 			return;
    439      1.1  rmind 
    440      1.1  rmind 		do {
    441      1.1  rmind 			q = ffs(ci_rq->r_bitmap[i]);
    442      1.1  rmind 			if (q) {
    443      1.1  rmind 				ci_rq->r_highest_pri =
    444  1.9.2.1    mjf 				    (i << BITMAP_SHIFT) + (BITMAP_BITS - q);
    445      1.1  rmind 				return;
    446      1.1  rmind 			}
    447  1.9.2.1    mjf 		} while (i--);
    448      1.1  rmind 
    449  1.9.2.1    mjf 		/* If not found - set the lowest value */
    450  1.9.2.1    mjf 		ci_rq->r_highest_pri = 0;
    451      1.1  rmind 	}
    452      1.1  rmind }
    453      1.1  rmind 
    454      1.1  rmind void
    455      1.1  rmind sched_slept(struct lwp *l)
    456      1.1  rmind {
    457      1.1  rmind 	sched_info_lwp_t *sil = l->l_sched_info;
    458      1.1  rmind 
    459      1.1  rmind 	/* Save the time when thread has slept */
    460      1.1  rmind 	sil->sl_slept = hardclock_ticks;
    461      1.1  rmind 
    462      1.1  rmind 	/*
    463  1.9.2.1    mjf 	 * If thread is in time-sharing queue and batch flag is not marked,
    464  1.9.2.1    mjf 	 * increase the the priority, and run with the lower time-quantum.
    465      1.1  rmind 	 */
    466  1.9.2.4    mjf 	if (l->l_priority < PRI_HIGHEST_TS &&
    467  1.9.2.4    mjf 	    (sil->sl_flags & SL_BATCH) == 0) {
    468  1.9.2.1    mjf 		KASSERT(l->l_class == SCHED_OTHER);
    469  1.9.2.1    mjf 		l->l_priority++;
    470  1.9.2.1    mjf 	}
    471      1.1  rmind }
    472      1.1  rmind 
    473      1.1  rmind void
    474      1.1  rmind sched_wakeup(struct lwp *l)
    475      1.1  rmind {
    476      1.1  rmind 	sched_info_lwp_t *sil = l->l_sched_info;
    477      1.1  rmind 
    478      1.1  rmind 	/* Update sleep time delta */
    479      1.1  rmind 	sil->sl_slpsum += (l->l_slptime == 0) ?
    480      1.1  rmind 	    (hardclock_ticks - sil->sl_slept) : hz;
    481      1.1  rmind 
    482      1.1  rmind 	/* If thread was sleeping a second or more - set a high priority */
    483      1.1  rmind 	if (l->l_slptime > 1 || (hardclock_ticks - sil->sl_slept) >= hz)
    484  1.9.2.1    mjf 		l->l_priority = high_pri[l->l_priority];
    485      1.1  rmind 
    486      1.1  rmind 	/* Also, consider looking for a better CPU to wake up */
    487      1.1  rmind 	if ((l->l_flag & (LW_BOUND | LW_SYSTEM)) == 0)
    488      1.1  rmind 		l->l_cpu = sched_takecpu(l);
    489      1.1  rmind }
    490      1.1  rmind 
    491      1.1  rmind void
    492      1.1  rmind sched_pstats_hook(struct lwp *l)
    493      1.1  rmind {
    494      1.1  rmind 	sched_info_lwp_t *sil = l->l_sched_info;
    495  1.9.2.1    mjf 	pri_t prio;
    496  1.9.2.1    mjf 	bool batch;
    497  1.9.2.1    mjf 
    498  1.9.2.1    mjf 	if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
    499  1.9.2.1    mjf 	    l->l_stat == LSSUSPENDED)
    500  1.9.2.1    mjf 		l->l_slptime++;
    501      1.1  rmind 
    502      1.1  rmind 	/*
    503      1.1  rmind 	 * Set that thread is more CPU-bound, if sum of run time exceeds the
    504  1.9.2.1    mjf 	 * sum of sleep time.  Check if thread is CPU-bound a first time.
    505      1.1  rmind 	 */
    506  1.9.2.1    mjf 	batch = (sil->sl_rtsum > sil->sl_slpsum);
    507  1.9.2.1    mjf 	if (batch) {
    508  1.9.2.1    mjf 		if ((sil->sl_flags & SL_BATCH) == 0)
    509  1.9.2.1    mjf 			batch = false;
    510      1.1  rmind 		sil->sl_flags |= SL_BATCH;
    511  1.9.2.1    mjf 	} else
    512      1.1  rmind 		sil->sl_flags &= ~SL_BATCH;
    513  1.9.2.1    mjf 
    514  1.9.2.1    mjf 	/* Reset the time sums */
    515      1.1  rmind 	sil->sl_slpsum = 0;
    516      1.1  rmind 	sil->sl_rtsum = 0;
    517      1.1  rmind 
    518  1.9.2.1    mjf 	/* Estimate threads on time-sharing queue only */
    519  1.9.2.1    mjf 	if (l->l_priority >= PRI_HIGHEST_TS)
    520      1.1  rmind 		return;
    521  1.9.2.4    mjf 	KASSERT(l->l_class == SCHED_OTHER);
    522      1.1  rmind 
    523  1.9.2.1    mjf 	/* If it is CPU-bound not a first time - decrease the priority */
    524  1.9.2.1    mjf 	prio = l->l_priority;
    525  1.9.2.1    mjf 	if (batch && prio != 0)
    526  1.9.2.1    mjf 		prio--;
    527  1.9.2.1    mjf 
    528      1.1  rmind 	/* If thread was not ran a second or more - set a high priority */
    529  1.9.2.1    mjf 	if (l->l_stat == LSRUN) {
    530  1.9.2.1    mjf 		if (sil->sl_lrtime && (hardclock_ticks - sil->sl_lrtime >= hz))
    531  1.9.2.1    mjf 			prio = high_pri[prio];
    532  1.9.2.1    mjf 		/* Re-enqueue the thread if priority has changed */
    533  1.9.2.1    mjf 		if (prio != l->l_priority)
    534  1.9.2.1    mjf 			lwp_changepri(l, prio);
    535  1.9.2.1    mjf 	} else {
    536  1.9.2.1    mjf 		/* In other states, change the priority directly */
    537  1.9.2.1    mjf 		l->l_priority = prio;
    538  1.9.2.1    mjf 	}
    539      1.1  rmind }
    540      1.1  rmind 
    541      1.1  rmind /*
    542      1.1  rmind  * Migration and balancing.
    543      1.1  rmind  */
    544      1.1  rmind 
    545      1.1  rmind #ifdef MULTIPROCESSOR
    546      1.1  rmind 
    547  1.9.2.4    mjf /* Estimate if LWP is cache-hot */
    548  1.9.2.4    mjf static inline bool
    549  1.9.2.4    mjf lwp_cache_hot(const struct lwp *l)
    550  1.9.2.4    mjf {
    551  1.9.2.4    mjf 	const sched_info_lwp_t *sil = l->l_sched_info;
    552  1.9.2.4    mjf 
    553  1.9.2.4    mjf 	if (l->l_slptime || sil->sl_lrtime == 0)
    554  1.9.2.4    mjf 		return false;
    555  1.9.2.4    mjf 
    556  1.9.2.4    mjf 	return (hardclock_ticks - sil->sl_lrtime < cacheht_time);
    557  1.9.2.4    mjf }
    558  1.9.2.4    mjf 
    559      1.1  rmind /* Check if LWP can migrate to the chosen CPU */
    560      1.1  rmind static inline bool
    561  1.9.2.4    mjf sched_migratable(const struct lwp *l, struct cpu_info *ci)
    562      1.1  rmind {
    563  1.9.2.4    mjf 	const struct schedstate_percpu *spc = &ci->ci_schedstate;
    564      1.1  rmind 
    565  1.9.2.4    mjf 	/* CPU is offline */
    566  1.9.2.4    mjf 	if (__predict_false(spc->spc_flags & SPCF_OFFLINE))
    567      1.1  rmind 		return false;
    568      1.1  rmind 
    569  1.9.2.4    mjf 	/* Affinity bind */
    570  1.9.2.4    mjf 	if (__predict_false(l->l_flag & LW_AFFINITY))
    571  1.9.2.4    mjf 		return CPU_ISSET(cpu_index(ci), &l->l_affinity);
    572  1.9.2.4    mjf 
    573  1.9.2.4    mjf 	/* Processor-set */
    574  1.9.2.4    mjf 	return (spc->spc_psid == l->l_psid);
    575      1.1  rmind }
    576      1.1  rmind 
    577      1.1  rmind /*
    578      1.1  rmind  * Estimate the migration of LWP to the other CPU.
    579      1.1  rmind  * Take and return the CPU, if migration is needed.
    580      1.1  rmind  */
    581      1.1  rmind struct cpu_info *
    582      1.1  rmind sched_takecpu(struct lwp *l)
    583      1.1  rmind {
    584  1.9.2.4    mjf 	struct cpu_info *ci, *tci;
    585      1.1  rmind 	struct schedstate_percpu *spc;
    586      1.1  rmind 	runqueue_t *ci_rq;
    587      1.1  rmind 	CPU_INFO_ITERATOR cii;
    588      1.1  rmind 	pri_t eprio, lpri;
    589      1.1  rmind 
    590  1.9.2.4    mjf 	KASSERT(lwp_locked(l, NULL));
    591  1.9.2.4    mjf 
    592      1.1  rmind 	ci = l->l_cpu;
    593      1.1  rmind 	spc = &ci->ci_schedstate;
    594      1.1  rmind 	ci_rq = spc->spc_sched_info;
    595      1.1  rmind 
    596  1.9.2.4    mjf 	/* If thread is strictly bound, do not estimate other CPUs */
    597  1.9.2.4    mjf 	if (l->l_flag & LW_BOUND)
    598  1.9.2.4    mjf 		return ci;
    599  1.9.2.4    mjf 
    600      1.1  rmind 	/* CPU of this thread is idling - run there */
    601      1.1  rmind 	if (ci_rq->r_count == 0)
    602      1.1  rmind 		return ci;
    603      1.1  rmind 
    604      1.1  rmind 	eprio = lwp_eprio(l);
    605      1.1  rmind 
    606      1.1  rmind 	/* Stay if thread is cache-hot */
    607  1.9.2.4    mjf 	if (__predict_true(l->l_stat != LSIDL) &&
    608  1.9.2.4    mjf 	    lwp_cache_hot(l) && eprio >= spc->spc_curpriority)
    609      1.1  rmind 		return ci;
    610      1.1  rmind 
    611      1.1  rmind 	/* Run on current CPU if priority of thread is higher */
    612      1.1  rmind 	ci = curcpu();
    613      1.1  rmind 	spc = &ci->ci_schedstate;
    614  1.9.2.1    mjf 	if (eprio > spc->spc_curpriority && sched_migratable(l, ci))
    615      1.1  rmind 		return ci;
    616      1.1  rmind 
    617      1.1  rmind 	/*
    618      1.1  rmind 	 * Look for the CPU with the lowest priority thread.  In case of
    619      1.1  rmind 	 * equal the priority - check the lower count of the threads.
    620      1.1  rmind 	 */
    621  1.9.2.4    mjf 	tci = l->l_cpu;
    622  1.9.2.1    mjf 	lpri = PRI_COUNT;
    623      1.1  rmind 	for (CPU_INFO_FOREACH(cii, ci)) {
    624      1.1  rmind 		runqueue_t *ici_rq;
    625      1.1  rmind 		pri_t pri;
    626      1.1  rmind 
    627      1.1  rmind 		spc = &ci->ci_schedstate;
    628      1.1  rmind 		ici_rq = spc->spc_sched_info;
    629  1.9.2.1    mjf 		pri = max(spc->spc_curpriority, ici_rq->r_highest_pri);
    630  1.9.2.1    mjf 		if (pri > lpri)
    631      1.1  rmind 			continue;
    632      1.1  rmind 
    633  1.9.2.4    mjf 		if (pri == lpri && ci_rq->r_count < ici_rq->r_count)
    634      1.1  rmind 			continue;
    635      1.1  rmind 
    636  1.9.2.4    mjf 		if (!sched_migratable(l, ci))
    637      1.1  rmind 			continue;
    638      1.1  rmind 
    639      1.1  rmind 		lpri = pri;
    640      1.1  rmind 		tci = ci;
    641      1.1  rmind 		ci_rq = ici_rq;
    642      1.1  rmind 	}
    643      1.1  rmind 	return tci;
    644      1.1  rmind }
    645      1.1  rmind 
    646      1.1  rmind /*
    647      1.1  rmind  * Tries to catch an LWP from the runqueue of other CPU.
    648      1.1  rmind  */
    649      1.1  rmind static struct lwp *
    650      1.1  rmind sched_catchlwp(void)
    651      1.1  rmind {
    652      1.1  rmind 	struct cpu_info *curci = curcpu(), *ci = worker_ci;
    653      1.1  rmind 	TAILQ_HEAD(, lwp) *q_head;
    654      1.1  rmind 	runqueue_t *ci_rq;
    655      1.1  rmind 	struct lwp *l;
    656      1.1  rmind 
    657      1.1  rmind 	if (curci == ci)
    658      1.1  rmind 		return NULL;
    659      1.1  rmind 
    660      1.1  rmind 	/* Lockless check */
    661      1.1  rmind 	ci_rq = ci->ci_schedstate.spc_sched_info;
    662      1.1  rmind 	if (ci_rq->r_count < min_catch)
    663      1.1  rmind 		return NULL;
    664      1.1  rmind 
    665      1.1  rmind 	/*
    666      1.1  rmind 	 * Double-lock the runqueues.
    667      1.1  rmind 	 */
    668      1.3  rmind 	if (curci < ci) {
    669      1.1  rmind 		spc_lock(ci);
    670      1.1  rmind 	} else if (!mutex_tryenter(ci->ci_schedstate.spc_mutex)) {
    671      1.1  rmind 		const runqueue_t *cur_rq = curci->ci_schedstate.spc_sched_info;
    672      1.1  rmind 
    673      1.1  rmind 		spc_unlock(curci);
    674      1.1  rmind 		spc_lock(ci);
    675      1.1  rmind 		spc_lock(curci);
    676      1.1  rmind 
    677      1.1  rmind 		if (cur_rq->r_count) {
    678      1.1  rmind 			spc_unlock(ci);
    679      1.1  rmind 			return NULL;
    680      1.1  rmind 		}
    681      1.1  rmind 	}
    682      1.1  rmind 
    683      1.1  rmind 	if (ci_rq->r_count < min_catch) {
    684      1.1  rmind 		spc_unlock(ci);
    685      1.1  rmind 		return NULL;
    686      1.1  rmind 	}
    687      1.1  rmind 
    688      1.1  rmind 	/* Take the highest priority thread */
    689      1.1  rmind 	q_head = sched_getrq(ci_rq, ci_rq->r_highest_pri);
    690      1.1  rmind 	l = TAILQ_FIRST(q_head);
    691      1.1  rmind 
    692      1.1  rmind 	for (;;) {
    693      1.1  rmind 		/* Check the first and next result from the queue */
    694      1.1  rmind 		if (l == NULL)
    695      1.1  rmind 			break;
    696      1.1  rmind 
    697      1.1  rmind 		/* Look for threads, whose are allowed to migrate */
    698  1.9.2.4    mjf 		if ((l->l_flag & LW_SYSTEM) || lwp_cache_hot(l) ||
    699  1.9.2.4    mjf 		    !sched_migratable(l, curci)) {
    700      1.1  rmind 			l = TAILQ_NEXT(l, l_runq);
    701      1.1  rmind 			continue;
    702      1.1  rmind 		}
    703      1.1  rmind 		/* Recheck if chosen thread is still on the runqueue */
    704      1.1  rmind 		if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM)) {
    705      1.1  rmind 			sched_dequeue(l);
    706      1.1  rmind 			l->l_cpu = curci;
    707      1.1  rmind 			lwp_setlock(l, curci->ci_schedstate.spc_mutex);
    708      1.1  rmind 			sched_enqueue(l, false);
    709      1.1  rmind 			break;
    710      1.1  rmind 		}
    711      1.1  rmind 		l = TAILQ_NEXT(l, l_runq);
    712      1.1  rmind 	}
    713      1.1  rmind 	spc_unlock(ci);
    714      1.1  rmind 
    715      1.1  rmind 	return l;
    716      1.1  rmind }
    717      1.1  rmind 
    718      1.1  rmind /*
    719      1.1  rmind  * Periodical calculations for balancing.
    720      1.1  rmind  */
    721      1.1  rmind static void
    722      1.1  rmind sched_balance(void *nocallout)
    723      1.1  rmind {
    724      1.1  rmind 	struct cpu_info *ci, *hci;
    725      1.1  rmind 	runqueue_t *ci_rq;
    726      1.1  rmind 	CPU_INFO_ITERATOR cii;
    727      1.1  rmind 	u_int highest;
    728      1.1  rmind 
    729      1.1  rmind 	hci = curcpu();
    730      1.1  rmind 	highest = 0;
    731      1.1  rmind 
    732      1.1  rmind 	/* Make lockless countings */
    733      1.1  rmind 	for (CPU_INFO_FOREACH(cii, ci)) {
    734      1.1  rmind 		ci_rq = ci->ci_schedstate.spc_sched_info;
    735      1.1  rmind 
    736      1.1  rmind 		/* Average count of the threads */
    737      1.1  rmind 		ci_rq->r_avgcount = (ci_rq->r_avgcount + ci_rq->r_mcount) >> 1;
    738      1.1  rmind 
    739      1.1  rmind 		/* Look for CPU with the highest average */
    740      1.1  rmind 		if (ci_rq->r_avgcount > highest) {
    741      1.1  rmind 			hci = ci;
    742      1.1  rmind 			highest = ci_rq->r_avgcount;
    743      1.1  rmind 		}
    744      1.1  rmind 	}
    745      1.1  rmind 
    746      1.1  rmind 	/* Update the worker */
    747      1.1  rmind 	worker_ci = hci;
    748      1.1  rmind 
    749      1.1  rmind 	if (nocallout == NULL)
    750      1.1  rmind 		callout_schedule(&balance_ch, balance_period);
    751      1.1  rmind }
    752      1.1  rmind 
    753      1.1  rmind #else
    754      1.1  rmind 
    755      1.1  rmind struct cpu_info *
    756      1.1  rmind sched_takecpu(struct lwp *l)
    757      1.1  rmind {
    758      1.1  rmind 
    759      1.1  rmind 	return l->l_cpu;
    760      1.1  rmind }
    761      1.1  rmind 
    762      1.1  rmind #endif	/* MULTIPROCESSOR */
    763      1.1  rmind 
    764      1.1  rmind /*
    765      1.1  rmind  * Scheduler mill.
    766      1.1  rmind  */
    767      1.1  rmind struct lwp *
    768      1.1  rmind sched_nextlwp(void)
    769      1.1  rmind {
    770      1.1  rmind 	struct cpu_info *ci = curcpu();
    771      1.1  rmind 	struct schedstate_percpu *spc;
    772      1.1  rmind 	TAILQ_HEAD(, lwp) *q_head;
    773      1.1  rmind 	sched_info_lwp_t *sil;
    774      1.1  rmind 	runqueue_t *ci_rq;
    775      1.1  rmind 	struct lwp *l;
    776      1.1  rmind 
    777      1.1  rmind 	spc = &ci->ci_schedstate;
    778      1.1  rmind 	ci_rq = ci->ci_schedstate.spc_sched_info;
    779      1.1  rmind 
    780      1.1  rmind #ifdef MULTIPROCESSOR
    781      1.1  rmind 	/* If runqueue is empty, try to catch some thread from other CPU */
    782  1.9.2.1    mjf 	if (__predict_false(spc->spc_flags & SPCF_OFFLINE)) {
    783      1.7  rmind 		if ((ci_rq->r_count - ci_rq->r_mcount) == 0)
    784      1.1  rmind 			return NULL;
    785      1.1  rmind 	} else if (ci_rq->r_count == 0) {
    786      1.1  rmind 		/* Reset the counter, and call the balancer */
    787      1.1  rmind 		ci_rq->r_avgcount = 0;
    788      1.1  rmind 		sched_balance(ci);
    789      1.1  rmind 
    790      1.1  rmind 		/* The re-locking will be done inside */
    791      1.1  rmind 		return sched_catchlwp();
    792      1.1  rmind 	}
    793      1.1  rmind #else
    794      1.1  rmind 	if (ci_rq->r_count == 0)
    795      1.1  rmind 		return NULL;
    796      1.1  rmind #endif
    797      1.1  rmind 
    798      1.1  rmind 	/* Take the highest priority thread */
    799      1.1  rmind 	KASSERT(ci_rq->r_bitmap[ci_rq->r_highest_pri >> BITMAP_SHIFT]);
    800      1.1  rmind 	q_head = sched_getrq(ci_rq, ci_rq->r_highest_pri);
    801      1.1  rmind 	l = TAILQ_FIRST(q_head);
    802      1.1  rmind 	KASSERT(l != NULL);
    803      1.1  rmind 
    804      1.1  rmind 	/* Update the counters */
    805      1.1  rmind 	sil = l->l_sched_info;
    806      1.1  rmind 	KASSERT(sil->sl_timeslice >= min_ts);
    807      1.1  rmind 	KASSERT(sil->sl_timeslice <= max_ts);
    808      1.1  rmind 	spc->spc_ticks = sil->sl_timeslice;
    809      1.1  rmind 	sil->sl_rtime = hardclock_ticks;
    810      1.1  rmind 
    811      1.1  rmind 	return l;
    812      1.1  rmind }
    813      1.1  rmind 
    814      1.1  rmind bool
    815      1.1  rmind sched_curcpu_runnable_p(void)
    816      1.1  rmind {
    817      1.1  rmind 	const struct cpu_info *ci = curcpu();
    818      1.1  rmind 	const runqueue_t *ci_rq = ci->ci_schedstate.spc_sched_info;
    819      1.1  rmind 
    820  1.9.2.1    mjf #ifndef __HAVE_FAST_SOFTINTS
    821  1.9.2.1    mjf 	if (ci->ci_data.cpu_softints)
    822  1.9.2.1    mjf 		return true;
    823  1.9.2.1    mjf #endif
    824  1.9.2.1    mjf 
    825      1.1  rmind 	if (ci->ci_schedstate.spc_flags & SPCF_OFFLINE)
    826      1.7  rmind 		return (ci_rq->r_count - ci_rq->r_mcount);
    827      1.1  rmind 
    828      1.1  rmind 	return ci_rq->r_count;
    829      1.1  rmind }
    830      1.1  rmind 
    831      1.1  rmind /*
    832      1.1  rmind  * Time-driven events.
    833      1.1  rmind  */
    834      1.1  rmind 
    835      1.1  rmind /*
    836      1.1  rmind  * Called once per time-quantum.  This routine is CPU-local and runs at
    837      1.1  rmind  * IPL_SCHED, thus the locking is not needed.
    838      1.1  rmind  */
    839      1.1  rmind void
    840      1.1  rmind sched_tick(struct cpu_info *ci)
    841      1.1  rmind {
    842      1.1  rmind 	const runqueue_t *ci_rq = ci->ci_schedstate.spc_sched_info;
    843      1.1  rmind 	struct schedstate_percpu *spc = &ci->ci_schedstate;
    844      1.1  rmind 	struct lwp *l = curlwp;
    845  1.9.2.4    mjf 	const sched_info_lwp_t *sil = l->l_sched_info;
    846      1.1  rmind 
    847      1.2  rmind 	if (CURCPU_IDLE_P())
    848      1.2  rmind 		return;
    849      1.1  rmind 
    850  1.9.2.1    mjf 	switch (l->l_class) {
    851      1.2  rmind 	case SCHED_FIFO:
    852      1.2  rmind 		/*
    853      1.2  rmind 		 * Update the time-quantum, and continue running,
    854      1.2  rmind 		 * if thread runs on FIFO real-time policy.
    855      1.2  rmind 		 */
    856  1.9.2.4    mjf 		KASSERT(l->l_priority > PRI_HIGHEST_TS);
    857      1.1  rmind 		spc->spc_ticks = sil->sl_timeslice;
    858      1.1  rmind 		return;
    859      1.2  rmind 	case SCHED_OTHER:
    860  1.9.2.1    mjf 		/*
    861  1.9.2.1    mjf 		 * If thread is in time-sharing queue, decrease the priority,
    862  1.9.2.1    mjf 		 * and run with a higher time-quantum.
    863  1.9.2.1    mjf 		 */
    864  1.9.2.4    mjf 		KASSERT(l->l_priority <= PRI_HIGHEST_TS);
    865  1.9.2.1    mjf 		if (l->l_priority != 0)
    866  1.9.2.1    mjf 			l->l_priority--;
    867      1.2  rmind 		break;
    868      1.1  rmind 	}
    869      1.1  rmind 
    870      1.1  rmind 	/*
    871      1.2  rmind 	 * If there are higher priority threads or threads in the same queue,
    872      1.2  rmind 	 * mark that thread should yield, otherwise, continue running.
    873      1.1  rmind 	 */
    874  1.9.2.4    mjf 	if (lwp_eprio(l) <= ci_rq->r_highest_pri || l->l_target_cpu) {
    875      1.1  rmind 		spc->spc_flags |= SPCF_SHOULDYIELD;
    876      1.1  rmind 		cpu_need_resched(ci, 0);
    877      1.1  rmind 	} else
    878      1.1  rmind 		spc->spc_ticks = sil->sl_timeslice;
    879      1.1  rmind }
    880      1.1  rmind 
    881      1.1  rmind /*
    882      1.1  rmind  * Sysctl nodes and initialization.
    883      1.1  rmind  */
    884      1.1  rmind 
    885      1.1  rmind static int
    886  1.9.2.4    mjf sysctl_sched_rtts(SYSCTLFN_ARGS)
    887  1.9.2.4    mjf {
    888  1.9.2.4    mjf 	struct sysctlnode node;
    889  1.9.2.4    mjf 	int rttsms = hztoms(rt_ts);
    890  1.9.2.4    mjf 
    891  1.9.2.4    mjf 	node = *rnode;
    892  1.9.2.4    mjf 	node.sysctl_data = &rttsms;
    893  1.9.2.4    mjf 	return sysctl_lookup(SYSCTLFN_CALL(&node));
    894  1.9.2.4    mjf }
    895  1.9.2.4    mjf 
    896  1.9.2.4    mjf static int
    897      1.1  rmind sysctl_sched_mints(SYSCTLFN_ARGS)
    898      1.1  rmind {
    899      1.1  rmind 	struct sysctlnode node;
    900      1.1  rmind 	struct cpu_info *ci;
    901      1.1  rmind 	int error, newsize;
    902      1.1  rmind 	CPU_INFO_ITERATOR cii;
    903      1.1  rmind 
    904      1.1  rmind 	node = *rnode;
    905      1.1  rmind 	node.sysctl_data = &newsize;
    906      1.1  rmind 
    907      1.1  rmind 	newsize = hztoms(min_ts);
    908      1.1  rmind 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    909      1.1  rmind 	if (error || newp == NULL)
    910      1.1  rmind 		return error;
    911      1.1  rmind 
    912      1.8  rmind 	newsize = mstohz(newsize);
    913      1.1  rmind 	if (newsize < 1 || newsize > hz || newsize >= max_ts)
    914      1.1  rmind 		return EINVAL;
    915      1.1  rmind 
    916      1.1  rmind 	/* It is safe to do this in such order */
    917      1.1  rmind 	for (CPU_INFO_FOREACH(cii, ci))
    918      1.1  rmind 		spc_lock(ci);
    919      1.1  rmind 
    920      1.8  rmind 	min_ts = newsize;
    921      1.1  rmind 	sched_precalcts();
    922      1.1  rmind 
    923      1.1  rmind 	for (CPU_INFO_FOREACH(cii, ci))
    924      1.1  rmind 		spc_unlock(ci);
    925      1.1  rmind 
    926      1.1  rmind 	return 0;
    927      1.1  rmind }
    928      1.1  rmind 
    929      1.1  rmind static int
    930      1.1  rmind sysctl_sched_maxts(SYSCTLFN_ARGS)
    931      1.1  rmind {
    932      1.1  rmind 	struct sysctlnode node;
    933      1.1  rmind 	struct cpu_info *ci;
    934      1.1  rmind 	int error, newsize;
    935      1.1  rmind 	CPU_INFO_ITERATOR cii;
    936      1.1  rmind 
    937      1.1  rmind 	node = *rnode;
    938      1.1  rmind 	node.sysctl_data = &newsize;
    939      1.1  rmind 
    940      1.1  rmind 	newsize = hztoms(max_ts);
    941      1.1  rmind 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    942      1.1  rmind 	if (error || newp == NULL)
    943      1.1  rmind 		return error;
    944      1.1  rmind 
    945      1.8  rmind 	newsize = mstohz(newsize);
    946      1.1  rmind 	if (newsize < 10 || newsize > hz || newsize <= min_ts)
    947      1.1  rmind 		return EINVAL;
    948      1.1  rmind 
    949      1.1  rmind 	/* It is safe to do this in such order */
    950      1.1  rmind 	for (CPU_INFO_FOREACH(cii, ci))
    951      1.1  rmind 		spc_lock(ci);
    952      1.1  rmind 
    953      1.8  rmind 	max_ts = newsize;
    954      1.1  rmind 	sched_precalcts();
    955      1.1  rmind 
    956      1.1  rmind 	for (CPU_INFO_FOREACH(cii, ci))
    957      1.1  rmind 		spc_unlock(ci);
    958      1.1  rmind 
    959      1.1  rmind 	return 0;
    960      1.1  rmind }
    961      1.1  rmind 
    962      1.1  rmind SYSCTL_SETUP(sysctl_sched_setup, "sysctl kern.sched subtree setup")
    963      1.1  rmind {
    964      1.1  rmind 	const struct sysctlnode *node = NULL;
    965      1.1  rmind 
    966      1.1  rmind 	sysctl_createv(clog, 0, NULL, NULL,
    967      1.1  rmind 		CTLFLAG_PERMANENT,
    968      1.1  rmind 		CTLTYPE_NODE, "kern", NULL,
    969      1.1  rmind 		NULL, 0, NULL, 0,
    970      1.1  rmind 		CTL_KERN, CTL_EOL);
    971      1.1  rmind 	sysctl_createv(clog, 0, NULL, &node,
    972      1.1  rmind 		CTLFLAG_PERMANENT,
    973      1.1  rmind 		CTLTYPE_NODE, "sched",
    974      1.1  rmind 		SYSCTL_DESCR("Scheduler options"),
    975      1.1  rmind 		NULL, 0, NULL, 0,
    976      1.1  rmind 		CTL_KERN, CTL_CREATE, CTL_EOL);
    977      1.1  rmind 
    978      1.1  rmind 	if (node == NULL)
    979      1.1  rmind 		return;
    980      1.1  rmind 
    981      1.1  rmind 	sysctl_createv(clog, 0, &node, NULL,
    982      1.1  rmind 		CTLFLAG_PERMANENT,
    983      1.1  rmind 		CTLTYPE_STRING, "name", NULL,
    984      1.1  rmind 		NULL, 0, __UNCONST("M2"), 0,
    985      1.1  rmind 		CTL_CREATE, CTL_EOL);
    986      1.1  rmind 	sysctl_createv(clog, 0, &node, NULL,
    987  1.9.2.4    mjf 		CTLFLAG_PERMANENT,
    988  1.9.2.4    mjf 		CTLTYPE_INT, "rtts",
    989  1.9.2.4    mjf 		SYSCTL_DESCR("Round-robin time quantum (in miliseconds)"),
    990  1.9.2.4    mjf 		sysctl_sched_rtts, 0, NULL, 0,
    991  1.9.2.4    mjf 		CTL_CREATE, CTL_EOL);
    992  1.9.2.4    mjf 	sysctl_createv(clog, 0, &node, NULL,
    993      1.1  rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
    994      1.1  rmind 		CTLTYPE_INT, "maxts",
    995      1.8  rmind 		SYSCTL_DESCR("Maximal time quantum (in miliseconds)"),
    996      1.1  rmind 		sysctl_sched_maxts, 0, &max_ts, 0,
    997      1.1  rmind 		CTL_CREATE, CTL_EOL);
    998      1.1  rmind 	sysctl_createv(clog, 0, &node, NULL,
    999      1.1  rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1000      1.1  rmind 		CTLTYPE_INT, "mints",
   1001      1.8  rmind 		SYSCTL_DESCR("Minimal time quantum (in miliseconds)"),
   1002      1.1  rmind 		sysctl_sched_mints, 0, &min_ts, 0,
   1003      1.1  rmind 		CTL_CREATE, CTL_EOL);
   1004      1.1  rmind 
   1005      1.1  rmind #ifdef MULTIPROCESSOR
   1006      1.1  rmind 	sysctl_createv(clog, 0, &node, NULL,
   1007      1.1  rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1008      1.1  rmind 		CTLTYPE_INT, "cacheht_time",
   1009      1.8  rmind 		SYSCTL_DESCR("Cache hotness time (in ticks)"),
   1010      1.1  rmind 		NULL, 0, &cacheht_time, 0,
   1011      1.1  rmind 		CTL_CREATE, CTL_EOL);
   1012      1.1  rmind 	sysctl_createv(clog, 0, &node, NULL,
   1013      1.1  rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1014      1.9  rmind 		CTLTYPE_INT, "balance_period",
   1015      1.9  rmind 		SYSCTL_DESCR("Balance period (in ticks)"),
   1016      1.1  rmind 		NULL, 0, &balance_period, 0,
   1017      1.1  rmind 		CTL_CREATE, CTL_EOL);
   1018      1.1  rmind 	sysctl_createv(clog, 0, &node, NULL,
   1019      1.1  rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1020      1.1  rmind 		CTLTYPE_INT, "min_catch",
   1021      1.8  rmind 		SYSCTL_DESCR("Minimal count of the threads for catching"),
   1022      1.1  rmind 		NULL, 0, &min_catch, 0,
   1023      1.1  rmind 		CTL_CREATE, CTL_EOL);
   1024      1.1  rmind #endif
   1025      1.1  rmind }
   1026      1.1  rmind 
   1027      1.1  rmind /*
   1028      1.1  rmind  * Debugging.
   1029      1.1  rmind  */
   1030      1.1  rmind 
   1031      1.1  rmind #ifdef DDB
   1032      1.1  rmind 
   1033      1.1  rmind void
   1034      1.1  rmind sched_print_runqueue(void (*pr)(const char *, ...))
   1035      1.1  rmind {
   1036      1.1  rmind 	runqueue_t *ci_rq;
   1037      1.1  rmind 	sched_info_lwp_t *sil;
   1038      1.1  rmind 	struct lwp *l;
   1039      1.1  rmind 	struct proc *p;
   1040      1.1  rmind 	int i;
   1041      1.1  rmind 
   1042      1.1  rmind 	struct cpu_info *ci;
   1043      1.1  rmind 	CPU_INFO_ITERATOR cii;
   1044      1.1  rmind 
   1045      1.1  rmind 	for (CPU_INFO_FOREACH(cii, ci)) {
   1046      1.1  rmind 		ci_rq = ci->ci_schedstate.spc_sched_info;
   1047      1.1  rmind 
   1048      1.1  rmind 		(*pr)("Run-queue (CPU = %d):\n", ci->ci_cpuid);
   1049      1.1  rmind 		(*pr)(" pid.lid = %d.%d, threads count = %u, "
   1050      1.1  rmind 		    "avgcount = %u, highest pri = %d\n",
   1051      1.1  rmind 		    ci->ci_curlwp->l_proc->p_pid, ci->ci_curlwp->l_lid,
   1052      1.1  rmind 		    ci_rq->r_count, ci_rq->r_avgcount, ci_rq->r_highest_pri);
   1053  1.9.2.1    mjf 		i = (PRI_COUNT >> BITMAP_SHIFT) - 1;
   1054      1.1  rmind 		do {
   1055  1.9.2.1    mjf 			uint32_t q;
   1056  1.9.2.1    mjf 			q = ci_rq->r_bitmap[i];
   1057  1.9.2.1    mjf 			(*pr)(" bitmap[%d] => [ %d (0x%x) ]\n", i, ffs(q), q);
   1058  1.9.2.1    mjf 		} while (i--);
   1059      1.1  rmind 	}
   1060      1.1  rmind 
   1061      1.1  rmind 	(*pr)("   %5s %4s %4s %10s %3s %4s %11s %3s %s\n",
   1062  1.9.2.1    mjf 	    "LID", "PRI", "EPRI", "FL", "ST", "TS", "LWP", "CPU", "LRTIME");
   1063      1.1  rmind 
   1064      1.1  rmind 	PROCLIST_FOREACH(p, &allproc) {
   1065      1.1  rmind 		(*pr)(" /- %d (%s)\n", (int)p->p_pid, p->p_comm);
   1066      1.1  rmind 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1067      1.1  rmind 			sil = l->l_sched_info;
   1068      1.1  rmind 			ci = l->l_cpu;
   1069      1.1  rmind 			(*pr)(" | %5d %4u %4u 0x%8.8x %3s %4u %11p %3d "
   1070      1.1  rmind 			    "%u ST=%d RT=%d %d\n",
   1071  1.9.2.1    mjf 			    (int)l->l_lid, l->l_priority, lwp_eprio(l),
   1072      1.1  rmind 			    l->l_flag, l->l_stat == LSRUN ? "RQ" :
   1073      1.1  rmind 			    (l->l_stat == LSSLEEP ? "SQ" : "-"),
   1074      1.1  rmind 			    sil->sl_timeslice, l, ci->ci_cpuid,
   1075      1.1  rmind 			    (u_int)(hardclock_ticks - sil->sl_lrtime),
   1076      1.1  rmind 			    sil->sl_slpsum, sil->sl_rtsum, sil->sl_flags);
   1077      1.1  rmind 		}
   1078      1.1  rmind 	}
   1079      1.1  rmind }
   1080      1.1  rmind 
   1081  1.9.2.4    mjf #endif
   1082