Home | History | Annotate | Line # | Download | only in kern
sched_m2.c revision 1.32.28.2
      1  1.32.28.2    martin /*	$NetBSD: sched_m2.c,v 1.32.28.2 2020/04/08 14:08:51 martin Exp $	*/
      2        1.1     rmind 
      3        1.1     rmind /*
      4       1.15     rmind  * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org>
      5       1.12     rmind  * All rights reserved.
      6        1.1     rmind  *
      7        1.1     rmind  * Redistribution and use in source and binary forms, with or without
      8        1.1     rmind  * modification, are permitted provided that the following conditions
      9        1.1     rmind  * are met:
     10        1.1     rmind  * 1. Redistributions of source code must retain the above copyright
     11        1.1     rmind  *    notice, this list of conditions and the following disclaimer.
     12        1.1     rmind  * 2. Redistributions in binary form must reproduce the above copyright
     13        1.1     rmind  *    notice, this list of conditions and the following disclaimer in the
     14        1.1     rmind  *    documentation and/or other materials provided with the distribution.
     15        1.1     rmind  *
     16       1.19     rmind  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17       1.19     rmind  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18       1.19     rmind  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19       1.19     rmind  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20       1.19     rmind  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21       1.19     rmind  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22       1.19     rmind  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23       1.19     rmind  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24       1.19     rmind  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25       1.19     rmind  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26       1.19     rmind  * SUCH DAMAGE.
     27        1.1     rmind  */
     28        1.1     rmind 
     29        1.1     rmind /*
     30        1.1     rmind  * TODO:
     31        1.1     rmind  *  - Implementation of fair share queue;
     32        1.1     rmind  *  - Support for NUMA;
     33        1.1     rmind  */
     34        1.1     rmind 
     35        1.1     rmind #include <sys/cdefs.h>
     36  1.32.28.2    martin __KERNEL_RCSID(0, "$NetBSD: sched_m2.c,v 1.32.28.2 2020/04/08 14:08:51 martin Exp $");
     37        1.1     rmind 
     38        1.1     rmind #include <sys/param.h>
     39        1.1     rmind 
     40        1.1     rmind #include <sys/cpu.h>
     41        1.1     rmind #include <sys/callout.h>
     42        1.1     rmind #include <sys/errno.h>
     43        1.1     rmind #include <sys/kernel.h>
     44        1.1     rmind #include <sys/kmem.h>
     45        1.1     rmind #include <sys/lwp.h>
     46        1.1     rmind #include <sys/mutex.h>
     47        1.1     rmind #include <sys/pool.h>
     48        1.1     rmind #include <sys/proc.h>
     49       1.15     rmind #include <sys/pset.h>
     50        1.1     rmind #include <sys/resource.h>
     51        1.1     rmind #include <sys/resourcevar.h>
     52        1.1     rmind #include <sys/sched.h>
     53        1.1     rmind #include <sys/syscallargs.h>
     54        1.1     rmind #include <sys/sysctl.h>
     55        1.1     rmind #include <sys/types.h>
     56        1.1     rmind 
     57        1.1     rmind /*
     58       1.32      maxv  * Priority related definitions.
     59        1.1     rmind  */
     60       1.10        ad #define	PRI_TS_COUNT	(NPRI_USER)
     61       1.10        ad #define	PRI_RT_COUNT	(PRI_COUNT - PRI_TS_COUNT)
     62       1.10        ad #define	PRI_HTS_RANGE	(PRI_TS_COUNT / 10)
     63       1.10        ad 
     64       1.11     rmind #define	PRI_HIGHEST_TS	(MAXPRI_USER)
     65       1.10        ad 
     66        1.1     rmind /*
     67        1.1     rmind  * Time-slices and priorities.
     68        1.1     rmind  */
     69        1.1     rmind static u_int	min_ts;			/* Minimal time-slice */
     70        1.1     rmind static u_int	max_ts;			/* Maximal time-slice */
     71        1.1     rmind static u_int	rt_ts;			/* Real-time time-slice */
     72        1.1     rmind static u_int	ts_map[PRI_COUNT];	/* Map of time-slices */
     73        1.1     rmind static pri_t	high_pri[PRI_COUNT];	/* Map for priority increase */
     74        1.1     rmind 
     75       1.25     rmind static void	sched_precalcts(void);
     76       1.25     rmind 
     77        1.1     rmind /*
     78        1.1     rmind  * Initialization and setup.
     79        1.1     rmind  */
     80        1.1     rmind 
     81        1.1     rmind void
     82        1.1     rmind sched_rqinit(void)
     83        1.1     rmind {
     84        1.1     rmind 	if (hz < 100) {
     85        1.1     rmind 		panic("sched_rqinit: value of HZ is too low\n");
     86        1.1     rmind 	}
     87        1.1     rmind 
     88        1.1     rmind 	/* Default timing ranges */
     89       1.26     rmind 	min_ts = mstohz(20);			/*  ~20 ms */
     90       1.26     rmind 	max_ts = mstohz(150);			/* ~150 ms */
     91       1.26     rmind 	rt_ts = mstohz(100);			/* ~100 ms */
     92        1.1     rmind 	sched_precalcts();
     93        1.1     rmind 
     94       1.30  christos #ifdef notdef
     95       1.30  christos 	/* Need to set the name etc. This does not belong here */
     96        1.1     rmind 	/* Attach the primary CPU here */
     97       1.30  christos 	sched_cpuattach(curcpu());
     98       1.30  christos #endif
     99        1.1     rmind 
    100       1.10        ad 	sched_lwp_fork(NULL, &lwp0);
    101        1.1     rmind 	sched_newts(&lwp0);
    102        1.1     rmind }
    103        1.1     rmind 
    104        1.1     rmind /* Pre-calculate the time-slices for the priorities */
    105        1.1     rmind static void
    106        1.1     rmind sched_precalcts(void)
    107        1.1     rmind {
    108        1.1     rmind 	pri_t p;
    109        1.1     rmind 
    110       1.10        ad 	/* Time-sharing range */
    111       1.10        ad 	for (p = 0; p <= PRI_HIGHEST_TS; p++) {
    112       1.10        ad 		ts_map[p] = max_ts -
    113       1.10        ad 		    (p * 100 / (PRI_TS_COUNT - 1) * (max_ts - min_ts) / 100);
    114       1.10        ad 		high_pri[p] = (PRI_HIGHEST_TS - PRI_HTS_RANGE) +
    115       1.10        ad 		    ((p * PRI_HTS_RANGE) / (PRI_TS_COUNT - 1));
    116       1.10        ad 	}
    117       1.10        ad 
    118       1.10        ad 	/* Real-time range */
    119       1.10        ad 	for (p = (PRI_HIGHEST_TS + 1); p < PRI_COUNT; p++) {
    120        1.1     rmind 		ts_map[p] = rt_ts;
    121        1.1     rmind 		high_pri[p] = p;
    122        1.1     rmind 	}
    123        1.1     rmind }
    124        1.1     rmind 
    125        1.1     rmind /*
    126        1.1     rmind  * Hooks.
    127        1.1     rmind  */
    128        1.1     rmind 
    129        1.1     rmind void
    130        1.1     rmind sched_proc_fork(struct proc *parent, struct proc *child)
    131        1.1     rmind {
    132        1.1     rmind 	struct lwp *l;
    133        1.1     rmind 
    134        1.1     rmind 	LIST_FOREACH(l, &child->p_lwps, l_sibling) {
    135        1.1     rmind 		lwp_lock(l);
    136        1.1     rmind 		sched_newts(l);
    137        1.1     rmind 		lwp_unlock(l);
    138        1.1     rmind 	}
    139        1.1     rmind }
    140        1.1     rmind 
    141        1.1     rmind void
    142        1.1     rmind sched_proc_exit(struct proc *child, struct proc *parent)
    143        1.1     rmind {
    144        1.1     rmind 
    145        1.1     rmind }
    146        1.1     rmind 
    147        1.1     rmind void
    148       1.10        ad sched_lwp_fork(struct lwp *l1, struct lwp *l2)
    149        1.1     rmind {
    150        1.1     rmind 
    151        1.1     rmind }
    152        1.1     rmind 
    153        1.1     rmind void
    154       1.10        ad sched_lwp_collect(struct lwp *l)
    155       1.10        ad {
    156       1.10        ad 
    157       1.10        ad }
    158       1.10        ad 
    159       1.10        ad void
    160        1.1     rmind sched_setrunnable(struct lwp *l)
    161        1.1     rmind {
    162        1.1     rmind 
    163        1.1     rmind }
    164        1.1     rmind 
    165        1.1     rmind void
    166        1.1     rmind sched_schedclock(struct lwp *l)
    167        1.1     rmind {
    168        1.1     rmind 
    169        1.1     rmind }
    170        1.1     rmind 
    171        1.1     rmind /*
    172        1.1     rmind  * Priorities and time-slice.
    173        1.1     rmind  */
    174        1.1     rmind 
    175        1.1     rmind void
    176        1.1     rmind sched_nice(struct proc *p, int prio)
    177        1.1     rmind {
    178       1.27     rmind 	struct lwp *l;
    179       1.27     rmind 	int n;
    180       1.27     rmind 
    181       1.27     rmind 	KASSERT(mutex_owned(p->p_lock));
    182        1.1     rmind 
    183       1.27     rmind 	p->p_nice = prio;
    184       1.27     rmind 	n = (prio - NZERO) >> 2;
    185       1.27     rmind 	if (n == 0)
    186       1.27     rmind 		return;
    187       1.27     rmind 
    188       1.27     rmind 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    189       1.27     rmind 		lwp_lock(l);
    190       1.27     rmind 		if (l->l_class == SCHED_OTHER) {
    191       1.27     rmind 			pri_t pri = l->l_priority - n;
    192  1.32.28.1  christos 			pri = (n < 0) ? uimin(pri, PRI_HIGHEST_TS) : imax(pri, 0);
    193       1.27     rmind 			lwp_changepri(l, pri);
    194       1.27     rmind 		}
    195       1.27     rmind 		lwp_unlock(l);
    196       1.27     rmind 	}
    197        1.1     rmind }
    198        1.1     rmind 
    199        1.1     rmind /* Recalculate the time-slice */
    200       1.24        ad void
    201        1.1     rmind sched_newts(struct lwp *l)
    202        1.1     rmind {
    203        1.1     rmind 
    204       1.26     rmind 	l->l_sched.timeslice = ts_map[lwp_eprio(l)];
    205        1.1     rmind }
    206        1.1     rmind 
    207        1.1     rmind void
    208        1.1     rmind sched_slept(struct lwp *l)
    209        1.1     rmind {
    210        1.1     rmind 
    211        1.1     rmind 	/*
    212       1.10        ad 	 * If thread is in time-sharing queue and batch flag is not marked,
    213       1.29   mbalmer 	 * increase the priority, and run with the lower time-quantum.
    214        1.1     rmind 	 */
    215       1.25     rmind 	if (l->l_priority < PRI_HIGHEST_TS && (l->l_flag & LW_BATCH) == 0) {
    216       1.27     rmind 		struct proc *p = l->l_proc;
    217       1.27     rmind 
    218       1.10        ad 		KASSERT(l->l_class == SCHED_OTHER);
    219       1.27     rmind 		if (__predict_false(p->p_nice < NZERO)) {
    220  1.32.28.1  christos 			const int n = uimax((NZERO - p->p_nice) >> 2, 1);
    221  1.32.28.1  christos 			l->l_priority = uimin(l->l_priority + n, PRI_HIGHEST_TS);
    222       1.27     rmind 		} else {
    223       1.27     rmind 			l->l_priority++;
    224       1.27     rmind 		}
    225       1.10        ad 	}
    226        1.1     rmind }
    227        1.1     rmind 
    228        1.1     rmind void
    229        1.1     rmind sched_wakeup(struct lwp *l)
    230        1.1     rmind {
    231        1.1     rmind 
    232        1.1     rmind 	/* If thread was sleeping a second or more - set a high priority */
    233       1.25     rmind 	if (l->l_slptime >= 1)
    234       1.10        ad 		l->l_priority = high_pri[l->l_priority];
    235        1.1     rmind }
    236        1.1     rmind 
    237        1.1     rmind void
    238       1.25     rmind sched_pstats_hook(struct lwp *l, int batch)
    239        1.1     rmind {
    240       1.11     rmind 	pri_t prio;
    241        1.1     rmind 
    242       1.22     rmind 	/*
    243       1.22     rmind 	 * Estimate threads on time-sharing queue only, however,
    244       1.22     rmind 	 * exclude the highest priority for performance purposes.
    245       1.22     rmind 	 */
    246       1.26     rmind 	KASSERT(lwp_locked(l, NULL));
    247       1.10        ad 	if (l->l_priority >= PRI_HIGHEST_TS)
    248        1.1     rmind 		return;
    249       1.16     rmind 	KASSERT(l->l_class == SCHED_OTHER);
    250        1.1     rmind 
    251       1.10        ad 	/* If it is CPU-bound not a first time - decrease the priority */
    252       1.11     rmind 	prio = l->l_priority;
    253       1.11     rmind 	if (batch && prio != 0)
    254       1.11     rmind 		prio--;
    255       1.10        ad 
    256        1.1     rmind 	/* If thread was not ran a second or more - set a high priority */
    257       1.11     rmind 	if (l->l_stat == LSRUN) {
    258       1.25     rmind 		if (l->l_rticks && (hardclock_ticks - l->l_rticks >= hz))
    259       1.11     rmind 			prio = high_pri[prio];
    260       1.11     rmind 		/* Re-enqueue the thread if priority has changed */
    261       1.11     rmind 		if (prio != l->l_priority)
    262       1.11     rmind 			lwp_changepri(l, prio);
    263       1.11     rmind 	} else {
    264       1.11     rmind 		/* In other states, change the priority directly */
    265       1.11     rmind 		l->l_priority = prio;
    266       1.11     rmind 	}
    267        1.1     rmind }
    268        1.1     rmind 
    269       1.24        ad void
    270       1.24        ad sched_oncpu(lwp_t *l)
    271       1.16     rmind {
    272       1.26     rmind 	struct schedstate_percpu *spc = &l->l_cpu->ci_schedstate;
    273        1.1     rmind 
    274        1.1     rmind 	/* Update the counters */
    275       1.26     rmind 	KASSERT(l->l_sched.timeslice >= min_ts);
    276       1.26     rmind 	KASSERT(l->l_sched.timeslice <= max_ts);
    277       1.26     rmind 	spc->spc_ticks = l->l_sched.timeslice;
    278        1.1     rmind }
    279        1.1     rmind 
    280        1.1     rmind /*
    281        1.1     rmind  * Time-driven events.
    282        1.1     rmind  */
    283        1.1     rmind 
    284        1.1     rmind /*
    285  1.32.28.2    martin  * Called once per time-quantum, with the running LWP lock held (spc_lwplock).
    286        1.1     rmind  */
    287        1.1     rmind void
    288        1.1     rmind sched_tick(struct cpu_info *ci)
    289        1.1     rmind {
    290        1.1     rmind 	struct schedstate_percpu *spc = &ci->ci_schedstate;
    291  1.32.28.2    martin 	struct lwp *l = ci->ci_onproc;
    292       1.27     rmind 	struct proc *p;
    293        1.1     rmind 
    294       1.26     rmind 	if (__predict_false(CURCPU_IDLE_P()))
    295        1.2     rmind 		return;
    296        1.1     rmind 
    297  1.32.28.2    martin 	lwp_lock(l);
    298  1.32.28.2    martin 	KASSERT(l->l_mutex != spc->spc_mutex);
    299       1.10        ad 	switch (l->l_class) {
    300        1.2     rmind 	case SCHED_FIFO:
    301        1.2     rmind 		/*
    302        1.2     rmind 		 * Update the time-quantum, and continue running,
    303        1.2     rmind 		 * if thread runs on FIFO real-time policy.
    304        1.2     rmind 		 */
    305       1.16     rmind 		KASSERT(l->l_priority > PRI_HIGHEST_TS);
    306       1.26     rmind 		spc->spc_ticks = l->l_sched.timeslice;
    307  1.32.28.2    martin 		lwp_unlock(l);
    308        1.1     rmind 		return;
    309        1.2     rmind 	case SCHED_OTHER:
    310       1.10        ad 		/*
    311       1.10        ad 		 * If thread is in time-sharing queue, decrease the priority,
    312       1.10        ad 		 * and run with a higher time-quantum.
    313       1.10        ad 		 */
    314       1.16     rmind 		KASSERT(l->l_priority <= PRI_HIGHEST_TS);
    315       1.27     rmind 		if (l->l_priority == 0)
    316       1.27     rmind 			break;
    317       1.27     rmind 
    318       1.27     rmind 		p = l->l_proc;
    319       1.27     rmind 		if (__predict_false(p->p_nice > NZERO)) {
    320  1.32.28.1  christos 			const int n = uimax((p->p_nice - NZERO) >> 2, 1);
    321       1.27     rmind 			l->l_priority = imax(l->l_priority - n, 0);
    322       1.27     rmind 		} else
    323       1.10        ad 			l->l_priority--;
    324        1.2     rmind 		break;
    325        1.1     rmind 	}
    326        1.1     rmind 
    327        1.1     rmind 	/*
    328        1.2     rmind 	 * If there are higher priority threads or threads in the same queue,
    329        1.2     rmind 	 * mark that thread should yield, otherwise, continue running.
    330        1.1     rmind 	 */
    331       1.24        ad 	if (lwp_eprio(l) <= spc->spc_maxpriority || l->l_target_cpu) {
    332        1.1     rmind 		spc->spc_flags |= SPCF_SHOULDYIELD;
    333  1.32.28.2    martin 		spc_lock(ci);
    334  1.32.28.2    martin 		sched_resched_cpu(ci, MAXPRI_KTHREAD, true);
    335  1.32.28.2    martin 		/* spc now unlocked */
    336        1.1     rmind 	} else
    337  1.32.28.2    martin 		spc->spc_ticks = l->l_sched.timeslice;
    338  1.32.28.2    martin 	lwp_unlock(l);
    339        1.1     rmind }
    340        1.1     rmind 
    341        1.1     rmind /*
    342        1.1     rmind  * Sysctl nodes and initialization.
    343        1.1     rmind  */
    344        1.1     rmind 
    345        1.1     rmind static int
    346       1.15     rmind sysctl_sched_rtts(SYSCTLFN_ARGS)
    347       1.15     rmind {
    348       1.15     rmind 	struct sysctlnode node;
    349       1.15     rmind 	int rttsms = hztoms(rt_ts);
    350       1.15     rmind 
    351       1.15     rmind 	node = *rnode;
    352       1.15     rmind 	node.sysctl_data = &rttsms;
    353       1.15     rmind 	return sysctl_lookup(SYSCTLFN_CALL(&node));
    354       1.15     rmind }
    355       1.15     rmind 
    356       1.15     rmind static int
    357        1.1     rmind sysctl_sched_mints(SYSCTLFN_ARGS)
    358        1.1     rmind {
    359        1.1     rmind 	struct sysctlnode node;
    360        1.1     rmind 	struct cpu_info *ci;
    361        1.1     rmind 	int error, newsize;
    362        1.1     rmind 	CPU_INFO_ITERATOR cii;
    363        1.1     rmind 
    364        1.1     rmind 	node = *rnode;
    365        1.1     rmind 	node.sysctl_data = &newsize;
    366        1.1     rmind 
    367        1.1     rmind 	newsize = hztoms(min_ts);
    368        1.1     rmind 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    369        1.1     rmind 	if (error || newp == NULL)
    370        1.1     rmind 		return error;
    371        1.1     rmind 
    372        1.8     rmind 	newsize = mstohz(newsize);
    373        1.1     rmind 	if (newsize < 1 || newsize > hz || newsize >= max_ts)
    374        1.1     rmind 		return EINVAL;
    375        1.1     rmind 
    376        1.1     rmind 	/* It is safe to do this in such order */
    377        1.1     rmind 	for (CPU_INFO_FOREACH(cii, ci))
    378        1.1     rmind 		spc_lock(ci);
    379        1.1     rmind 
    380        1.8     rmind 	min_ts = newsize;
    381        1.1     rmind 	sched_precalcts();
    382        1.1     rmind 
    383        1.1     rmind 	for (CPU_INFO_FOREACH(cii, ci))
    384        1.1     rmind 		spc_unlock(ci);
    385        1.1     rmind 
    386        1.1     rmind 	return 0;
    387        1.1     rmind }
    388        1.1     rmind 
    389        1.1     rmind static int
    390        1.1     rmind sysctl_sched_maxts(SYSCTLFN_ARGS)
    391        1.1     rmind {
    392        1.1     rmind 	struct sysctlnode node;
    393        1.1     rmind 	struct cpu_info *ci;
    394        1.1     rmind 	int error, newsize;
    395        1.1     rmind 	CPU_INFO_ITERATOR cii;
    396        1.1     rmind 
    397        1.1     rmind 	node = *rnode;
    398        1.1     rmind 	node.sysctl_data = &newsize;
    399        1.1     rmind 
    400        1.1     rmind 	newsize = hztoms(max_ts);
    401        1.1     rmind 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    402        1.1     rmind 	if (error || newp == NULL)
    403        1.1     rmind 		return error;
    404        1.1     rmind 
    405        1.8     rmind 	newsize = mstohz(newsize);
    406        1.1     rmind 	if (newsize < 10 || newsize > hz || newsize <= min_ts)
    407        1.1     rmind 		return EINVAL;
    408        1.1     rmind 
    409        1.1     rmind 	/* It is safe to do this in such order */
    410        1.1     rmind 	for (CPU_INFO_FOREACH(cii, ci))
    411        1.1     rmind 		spc_lock(ci);
    412        1.1     rmind 
    413        1.8     rmind 	max_ts = newsize;
    414        1.1     rmind 	sched_precalcts();
    415        1.1     rmind 
    416        1.1     rmind 	for (CPU_INFO_FOREACH(cii, ci))
    417        1.1     rmind 		spc_unlock(ci);
    418        1.1     rmind 
    419        1.1     rmind 	return 0;
    420        1.1     rmind }
    421        1.1     rmind 
    422       1.24        ad SYSCTL_SETUP(sysctl_sched_m2_setup, "sysctl sched setup")
    423        1.1     rmind {
    424        1.1     rmind 	const struct sysctlnode *node = NULL;
    425        1.1     rmind 
    426        1.1     rmind 	sysctl_createv(clog, 0, NULL, &node,
    427        1.1     rmind 		CTLFLAG_PERMANENT,
    428        1.1     rmind 		CTLTYPE_NODE, "sched",
    429        1.1     rmind 		SYSCTL_DESCR("Scheduler options"),
    430        1.1     rmind 		NULL, 0, NULL, 0,
    431        1.1     rmind 		CTL_KERN, CTL_CREATE, CTL_EOL);
    432        1.1     rmind 
    433        1.1     rmind 	if (node == NULL)
    434        1.1     rmind 		return;
    435        1.1     rmind 
    436       1.24        ad 	sysctl_createv(NULL, 0, &node, NULL,
    437        1.1     rmind 		CTLFLAG_PERMANENT,
    438        1.1     rmind 		CTLTYPE_STRING, "name", NULL,
    439        1.1     rmind 		NULL, 0, __UNCONST("M2"), 0,
    440        1.1     rmind 		CTL_CREATE, CTL_EOL);
    441       1.24        ad 	sysctl_createv(NULL, 0, &node, NULL,
    442       1.15     rmind 		CTLFLAG_PERMANENT,
    443       1.15     rmind 		CTLTYPE_INT, "rtts",
    444       1.32      maxv 		SYSCTL_DESCR("Round-robin time quantum (in milliseconds)"),
    445       1.15     rmind 		sysctl_sched_rtts, 0, NULL, 0,
    446       1.15     rmind 		CTL_CREATE, CTL_EOL);
    447       1.24        ad 	sysctl_createv(NULL, 0, &node, NULL,
    448        1.1     rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
    449        1.1     rmind 		CTLTYPE_INT, "maxts",
    450       1.32      maxv 		SYSCTL_DESCR("Maximal time quantum (in milliseconds)"),
    451        1.1     rmind 		sysctl_sched_maxts, 0, &max_ts, 0,
    452        1.1     rmind 		CTL_CREATE, CTL_EOL);
    453       1.24        ad 	sysctl_createv(NULL, 0, &node, NULL,
    454        1.1     rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
    455        1.1     rmind 		CTLTYPE_INT, "mints",
    456       1.32      maxv 		SYSCTL_DESCR("Minimal time quantum (in milliseconds)"),
    457        1.1     rmind 		sysctl_sched_mints, 0, &min_ts, 0,
    458        1.1     rmind 		CTL_CREATE, CTL_EOL);
    459        1.1     rmind }
    460