Home | History | Annotate | Line # | Download | only in kern
sched_m2.c revision 1.29
      1  1.29  mbalmer /*	$NetBSD: sched_m2.c,v 1.29 2009/11/22 19:09:16 mbalmer Exp $	*/
      2   1.1    rmind 
      3   1.1    rmind /*
      4  1.15    rmind  * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org>
      5  1.12    rmind  * All rights reserved.
      6   1.1    rmind  *
      7   1.1    rmind  * Redistribution and use in source and binary forms, with or without
      8   1.1    rmind  * modification, are permitted provided that the following conditions
      9   1.1    rmind  * are met:
     10   1.1    rmind  * 1. Redistributions of source code must retain the above copyright
     11   1.1    rmind  *    notice, this list of conditions and the following disclaimer.
     12   1.1    rmind  * 2. Redistributions in binary form must reproduce the above copyright
     13   1.1    rmind  *    notice, this list of conditions and the following disclaimer in the
     14   1.1    rmind  *    documentation and/or other materials provided with the distribution.
     15   1.1    rmind  *
     16  1.19    rmind  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17  1.19    rmind  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18  1.19    rmind  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19  1.19    rmind  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20  1.19    rmind  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21  1.19    rmind  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22  1.19    rmind  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23  1.19    rmind  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24  1.19    rmind  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25  1.19    rmind  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  1.19    rmind  * SUCH DAMAGE.
     27   1.1    rmind  */
     28   1.1    rmind 
     29   1.1    rmind /*
     30   1.1    rmind  * TODO:
     31   1.1    rmind  *  - Implementation of fair share queue;
     32   1.1    rmind  *  - Support for NUMA;
     33   1.1    rmind  */
     34   1.1    rmind 
     35   1.1    rmind #include <sys/cdefs.h>
     36  1.29  mbalmer __KERNEL_RCSID(0, "$NetBSD: sched_m2.c,v 1.29 2009/11/22 19:09:16 mbalmer Exp $");
     37   1.1    rmind 
     38   1.1    rmind #include <sys/param.h>
     39   1.1    rmind 
     40   1.1    rmind #include <sys/cpu.h>
     41   1.1    rmind #include <sys/callout.h>
     42   1.1    rmind #include <sys/errno.h>
     43   1.1    rmind #include <sys/kernel.h>
     44   1.1    rmind #include <sys/kmem.h>
     45   1.1    rmind #include <sys/lwp.h>
     46   1.1    rmind #include <sys/mutex.h>
     47   1.1    rmind #include <sys/pool.h>
     48   1.1    rmind #include <sys/proc.h>
     49  1.15    rmind #include <sys/pset.h>
     50   1.1    rmind #include <sys/resource.h>
     51   1.1    rmind #include <sys/resourcevar.h>
     52   1.1    rmind #include <sys/sched.h>
     53   1.1    rmind #include <sys/syscallargs.h>
     54   1.1    rmind #include <sys/sysctl.h>
     55   1.1    rmind #include <sys/types.h>
     56   1.1    rmind 
     57   1.1    rmind /*
     58  1.10       ad  * Priority related defintions.
     59   1.1    rmind  */
     60  1.10       ad #define	PRI_TS_COUNT	(NPRI_USER)
     61  1.10       ad #define	PRI_RT_COUNT	(PRI_COUNT - PRI_TS_COUNT)
     62  1.10       ad #define	PRI_HTS_RANGE	(PRI_TS_COUNT / 10)
     63  1.10       ad 
     64  1.11    rmind #define	PRI_HIGHEST_TS	(MAXPRI_USER)
     65  1.10       ad 
     66   1.1    rmind /*
     67   1.1    rmind  * Time-slices and priorities.
     68   1.1    rmind  */
     69   1.1    rmind static u_int	min_ts;			/* Minimal time-slice */
     70   1.1    rmind static u_int	max_ts;			/* Maximal time-slice */
     71   1.1    rmind static u_int	rt_ts;			/* Real-time time-slice */
     72   1.1    rmind static u_int	ts_map[PRI_COUNT];	/* Map of time-slices */
     73   1.1    rmind static pri_t	high_pri[PRI_COUNT];	/* Map for priority increase */
     74   1.1    rmind 
     75  1.25    rmind static void	sched_precalcts(void);
     76  1.25    rmind 
     77   1.1    rmind /*
     78   1.1    rmind  * Initialization and setup.
     79   1.1    rmind  */
     80   1.1    rmind 
     81   1.1    rmind void
     82   1.1    rmind sched_rqinit(void)
     83   1.1    rmind {
     84   1.1    rmind 	struct cpu_info *ci = curcpu();
     85   1.1    rmind 
     86   1.1    rmind 	if (hz < 100) {
     87   1.1    rmind 		panic("sched_rqinit: value of HZ is too low\n");
     88   1.1    rmind 	}
     89   1.1    rmind 
     90   1.1    rmind 	/* Default timing ranges */
     91  1.26    rmind 	min_ts = mstohz(20);			/*  ~20 ms */
     92  1.26    rmind 	max_ts = mstohz(150);			/* ~150 ms */
     93  1.26    rmind 	rt_ts = mstohz(100);			/* ~100 ms */
     94   1.1    rmind 	sched_precalcts();
     95   1.1    rmind 
     96   1.1    rmind 	/* Attach the primary CPU here */
     97   1.1    rmind 	sched_cpuattach(ci);
     98   1.1    rmind 
     99  1.10       ad 	sched_lwp_fork(NULL, &lwp0);
    100   1.1    rmind 	sched_newts(&lwp0);
    101   1.1    rmind }
    102   1.1    rmind 
    103   1.1    rmind /* Pre-calculate the time-slices for the priorities */
    104   1.1    rmind static void
    105   1.1    rmind sched_precalcts(void)
    106   1.1    rmind {
    107   1.1    rmind 	pri_t p;
    108   1.1    rmind 
    109  1.10       ad 	/* Time-sharing range */
    110  1.10       ad 	for (p = 0; p <= PRI_HIGHEST_TS; p++) {
    111  1.10       ad 		ts_map[p] = max_ts -
    112  1.10       ad 		    (p * 100 / (PRI_TS_COUNT - 1) * (max_ts - min_ts) / 100);
    113  1.10       ad 		high_pri[p] = (PRI_HIGHEST_TS - PRI_HTS_RANGE) +
    114  1.10       ad 		    ((p * PRI_HTS_RANGE) / (PRI_TS_COUNT - 1));
    115  1.10       ad 	}
    116  1.10       ad 
    117  1.10       ad 	/* Real-time range */
    118  1.10       ad 	for (p = (PRI_HIGHEST_TS + 1); p < PRI_COUNT; p++) {
    119   1.1    rmind 		ts_map[p] = rt_ts;
    120   1.1    rmind 		high_pri[p] = p;
    121   1.1    rmind 	}
    122   1.1    rmind }
    123   1.1    rmind 
    124   1.1    rmind /*
    125   1.1    rmind  * Hooks.
    126   1.1    rmind  */
    127   1.1    rmind 
    128   1.1    rmind void
    129   1.1    rmind sched_proc_fork(struct proc *parent, struct proc *child)
    130   1.1    rmind {
    131   1.1    rmind 	struct lwp *l;
    132   1.1    rmind 
    133   1.1    rmind 	LIST_FOREACH(l, &child->p_lwps, l_sibling) {
    134   1.1    rmind 		lwp_lock(l);
    135   1.1    rmind 		sched_newts(l);
    136   1.1    rmind 		lwp_unlock(l);
    137   1.1    rmind 	}
    138   1.1    rmind }
    139   1.1    rmind 
    140   1.1    rmind void
    141   1.1    rmind sched_proc_exit(struct proc *child, struct proc *parent)
    142   1.1    rmind {
    143   1.1    rmind 
    144   1.1    rmind }
    145   1.1    rmind 
    146   1.1    rmind void
    147  1.10       ad sched_lwp_fork(struct lwp *l1, struct lwp *l2)
    148   1.1    rmind {
    149   1.1    rmind 
    150   1.1    rmind }
    151   1.1    rmind 
    152   1.1    rmind void
    153  1.10       ad sched_lwp_collect(struct lwp *l)
    154  1.10       ad {
    155  1.10       ad 
    156  1.10       ad }
    157  1.10       ad 
    158  1.10       ad void
    159   1.1    rmind sched_setrunnable(struct lwp *l)
    160   1.1    rmind {
    161   1.1    rmind 
    162   1.1    rmind }
    163   1.1    rmind 
    164   1.1    rmind void
    165   1.1    rmind sched_schedclock(struct lwp *l)
    166   1.1    rmind {
    167   1.1    rmind 
    168   1.1    rmind }
    169   1.1    rmind 
    170   1.1    rmind /*
    171   1.1    rmind  * Priorities and time-slice.
    172   1.1    rmind  */
    173   1.1    rmind 
    174   1.1    rmind void
    175   1.1    rmind sched_nice(struct proc *p, int prio)
    176   1.1    rmind {
    177  1.27    rmind 	struct lwp *l;
    178  1.27    rmind 	int n;
    179  1.27    rmind 
    180  1.27    rmind 	KASSERT(mutex_owned(p->p_lock));
    181   1.1    rmind 
    182  1.27    rmind 	p->p_nice = prio;
    183  1.27    rmind 	n = (prio - NZERO) >> 2;
    184  1.27    rmind 	if (n == 0)
    185  1.27    rmind 		return;
    186  1.27    rmind 
    187  1.27    rmind 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    188  1.27    rmind 		lwp_lock(l);
    189  1.27    rmind 		if (l->l_class == SCHED_OTHER) {
    190  1.27    rmind 			pri_t pri = l->l_priority - n;
    191  1.27    rmind 			pri = (n < 0) ? min(pri, PRI_HIGHEST_TS) : imax(pri, 0);
    192  1.27    rmind 			lwp_changepri(l, pri);
    193  1.27    rmind 		}
    194  1.27    rmind 		lwp_unlock(l);
    195  1.27    rmind 	}
    196   1.1    rmind }
    197   1.1    rmind 
    198   1.1    rmind /* Recalculate the time-slice */
    199  1.24       ad void
    200   1.1    rmind sched_newts(struct lwp *l)
    201   1.1    rmind {
    202   1.1    rmind 
    203  1.26    rmind 	l->l_sched.timeslice = ts_map[lwp_eprio(l)];
    204   1.1    rmind }
    205   1.1    rmind 
    206   1.1    rmind void
    207   1.1    rmind sched_slept(struct lwp *l)
    208   1.1    rmind {
    209   1.1    rmind 
    210   1.1    rmind 	/*
    211  1.10       ad 	 * If thread is in time-sharing queue and batch flag is not marked,
    212  1.29  mbalmer 	 * increase the priority, and run with the lower time-quantum.
    213   1.1    rmind 	 */
    214  1.25    rmind 	if (l->l_priority < PRI_HIGHEST_TS && (l->l_flag & LW_BATCH) == 0) {
    215  1.27    rmind 		struct proc *p = l->l_proc;
    216  1.27    rmind 
    217  1.10       ad 		KASSERT(l->l_class == SCHED_OTHER);
    218  1.27    rmind 		if (__predict_false(p->p_nice < NZERO)) {
    219  1.27    rmind 			const int n = max((NZERO - p->p_nice) >> 2, 1);
    220  1.27    rmind 			l->l_priority = min(l->l_priority + n, PRI_HIGHEST_TS);
    221  1.27    rmind 		} else {
    222  1.27    rmind 			l->l_priority++;
    223  1.27    rmind 		}
    224  1.10       ad 	}
    225   1.1    rmind }
    226   1.1    rmind 
    227   1.1    rmind void
    228   1.1    rmind sched_wakeup(struct lwp *l)
    229   1.1    rmind {
    230   1.1    rmind 
    231   1.1    rmind 	/* If thread was sleeping a second or more - set a high priority */
    232  1.25    rmind 	if (l->l_slptime >= 1)
    233  1.10       ad 		l->l_priority = high_pri[l->l_priority];
    234   1.1    rmind }
    235   1.1    rmind 
    236   1.1    rmind void
    237  1.25    rmind sched_pstats_hook(struct lwp *l, int batch)
    238   1.1    rmind {
    239  1.11    rmind 	pri_t prio;
    240   1.1    rmind 
    241  1.22    rmind 	/*
    242  1.22    rmind 	 * Estimate threads on time-sharing queue only, however,
    243  1.22    rmind 	 * exclude the highest priority for performance purposes.
    244  1.22    rmind 	 */
    245  1.26    rmind 	KASSERT(lwp_locked(l, NULL));
    246  1.10       ad 	if (l->l_priority >= PRI_HIGHEST_TS)
    247   1.1    rmind 		return;
    248  1.16    rmind 	KASSERT(l->l_class == SCHED_OTHER);
    249   1.1    rmind 
    250  1.10       ad 	/* If it is CPU-bound not a first time - decrease the priority */
    251  1.11    rmind 	prio = l->l_priority;
    252  1.11    rmind 	if (batch && prio != 0)
    253  1.11    rmind 		prio--;
    254  1.10       ad 
    255   1.1    rmind 	/* If thread was not ran a second or more - set a high priority */
    256  1.11    rmind 	if (l->l_stat == LSRUN) {
    257  1.25    rmind 		if (l->l_rticks && (hardclock_ticks - l->l_rticks >= hz))
    258  1.11    rmind 			prio = high_pri[prio];
    259  1.11    rmind 		/* Re-enqueue the thread if priority has changed */
    260  1.11    rmind 		if (prio != l->l_priority)
    261  1.11    rmind 			lwp_changepri(l, prio);
    262  1.11    rmind 	} else {
    263  1.11    rmind 		/* In other states, change the priority directly */
    264  1.11    rmind 		l->l_priority = prio;
    265  1.11    rmind 	}
    266   1.1    rmind }
    267   1.1    rmind 
    268  1.24       ad void
    269  1.24       ad sched_oncpu(lwp_t *l)
    270  1.16    rmind {
    271  1.26    rmind 	struct schedstate_percpu *spc = &l->l_cpu->ci_schedstate;
    272   1.1    rmind 
    273   1.1    rmind 	/* Update the counters */
    274  1.26    rmind 	KASSERT(l->l_sched.timeslice >= min_ts);
    275  1.26    rmind 	KASSERT(l->l_sched.timeslice <= max_ts);
    276  1.26    rmind 	spc->spc_ticks = l->l_sched.timeslice;
    277   1.1    rmind }
    278   1.1    rmind 
    279   1.1    rmind /*
    280   1.1    rmind  * Time-driven events.
    281   1.1    rmind  */
    282   1.1    rmind 
    283   1.1    rmind /*
    284   1.1    rmind  * Called once per time-quantum.  This routine is CPU-local and runs at
    285   1.1    rmind  * IPL_SCHED, thus the locking is not needed.
    286   1.1    rmind  */
    287   1.1    rmind void
    288   1.1    rmind sched_tick(struct cpu_info *ci)
    289   1.1    rmind {
    290   1.1    rmind 	struct schedstate_percpu *spc = &ci->ci_schedstate;
    291   1.1    rmind 	struct lwp *l = curlwp;
    292  1.27    rmind 	struct proc *p;
    293   1.1    rmind 
    294  1.26    rmind 	if (__predict_false(CURCPU_IDLE_P()))
    295   1.2    rmind 		return;
    296   1.1    rmind 
    297  1.10       ad 	switch (l->l_class) {
    298   1.2    rmind 	case SCHED_FIFO:
    299   1.2    rmind 		/*
    300   1.2    rmind 		 * Update the time-quantum, and continue running,
    301   1.2    rmind 		 * if thread runs on FIFO real-time policy.
    302   1.2    rmind 		 */
    303  1.16    rmind 		KASSERT(l->l_priority > PRI_HIGHEST_TS);
    304  1.26    rmind 		spc->spc_ticks = l->l_sched.timeslice;
    305   1.1    rmind 		return;
    306   1.2    rmind 	case SCHED_OTHER:
    307  1.10       ad 		/*
    308  1.10       ad 		 * If thread is in time-sharing queue, decrease the priority,
    309  1.10       ad 		 * and run with a higher time-quantum.
    310  1.10       ad 		 */
    311  1.16    rmind 		KASSERT(l->l_priority <= PRI_HIGHEST_TS);
    312  1.27    rmind 		if (l->l_priority == 0)
    313  1.27    rmind 			break;
    314  1.27    rmind 
    315  1.27    rmind 		p = l->l_proc;
    316  1.27    rmind 		if (__predict_false(p->p_nice > NZERO)) {
    317  1.27    rmind 			const int n = max((p->p_nice - NZERO) >> 2, 1);
    318  1.27    rmind 			l->l_priority = imax(l->l_priority - n, 0);
    319  1.27    rmind 		} else
    320  1.10       ad 			l->l_priority--;
    321   1.2    rmind 		break;
    322   1.1    rmind 	}
    323   1.1    rmind 
    324   1.1    rmind 	/*
    325   1.2    rmind 	 * If there are higher priority threads or threads in the same queue,
    326   1.2    rmind 	 * mark that thread should yield, otherwise, continue running.
    327   1.1    rmind 	 */
    328  1.24       ad 	if (lwp_eprio(l) <= spc->spc_maxpriority || l->l_target_cpu) {
    329   1.1    rmind 		spc->spc_flags |= SPCF_SHOULDYIELD;
    330   1.1    rmind 		cpu_need_resched(ci, 0);
    331   1.1    rmind 	} else
    332  1.26    rmind 		spc->spc_ticks = l->l_sched.timeslice;
    333   1.1    rmind }
    334   1.1    rmind 
    335   1.1    rmind /*
    336   1.1    rmind  * Sysctl nodes and initialization.
    337   1.1    rmind  */
    338   1.1    rmind 
    339   1.1    rmind static int
    340  1.15    rmind sysctl_sched_rtts(SYSCTLFN_ARGS)
    341  1.15    rmind {
    342  1.15    rmind 	struct sysctlnode node;
    343  1.15    rmind 	int rttsms = hztoms(rt_ts);
    344  1.15    rmind 
    345  1.15    rmind 	node = *rnode;
    346  1.15    rmind 	node.sysctl_data = &rttsms;
    347  1.15    rmind 	return sysctl_lookup(SYSCTLFN_CALL(&node));
    348  1.15    rmind }
    349  1.15    rmind 
    350  1.15    rmind static int
    351   1.1    rmind sysctl_sched_mints(SYSCTLFN_ARGS)
    352   1.1    rmind {
    353   1.1    rmind 	struct sysctlnode node;
    354   1.1    rmind 	struct cpu_info *ci;
    355   1.1    rmind 	int error, newsize;
    356   1.1    rmind 	CPU_INFO_ITERATOR cii;
    357   1.1    rmind 
    358   1.1    rmind 	node = *rnode;
    359   1.1    rmind 	node.sysctl_data = &newsize;
    360   1.1    rmind 
    361   1.1    rmind 	newsize = hztoms(min_ts);
    362   1.1    rmind 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    363   1.1    rmind 	if (error || newp == NULL)
    364   1.1    rmind 		return error;
    365   1.1    rmind 
    366   1.8    rmind 	newsize = mstohz(newsize);
    367   1.1    rmind 	if (newsize < 1 || newsize > hz || newsize >= max_ts)
    368   1.1    rmind 		return EINVAL;
    369   1.1    rmind 
    370   1.1    rmind 	/* It is safe to do this in such order */
    371   1.1    rmind 	for (CPU_INFO_FOREACH(cii, ci))
    372   1.1    rmind 		spc_lock(ci);
    373   1.1    rmind 
    374   1.8    rmind 	min_ts = newsize;
    375   1.1    rmind 	sched_precalcts();
    376   1.1    rmind 
    377   1.1    rmind 	for (CPU_INFO_FOREACH(cii, ci))
    378   1.1    rmind 		spc_unlock(ci);
    379   1.1    rmind 
    380   1.1    rmind 	return 0;
    381   1.1    rmind }
    382   1.1    rmind 
    383   1.1    rmind static int
    384   1.1    rmind sysctl_sched_maxts(SYSCTLFN_ARGS)
    385   1.1    rmind {
    386   1.1    rmind 	struct sysctlnode node;
    387   1.1    rmind 	struct cpu_info *ci;
    388   1.1    rmind 	int error, newsize;
    389   1.1    rmind 	CPU_INFO_ITERATOR cii;
    390   1.1    rmind 
    391   1.1    rmind 	node = *rnode;
    392   1.1    rmind 	node.sysctl_data = &newsize;
    393   1.1    rmind 
    394   1.1    rmind 	newsize = hztoms(max_ts);
    395   1.1    rmind 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    396   1.1    rmind 	if (error || newp == NULL)
    397   1.1    rmind 		return error;
    398   1.1    rmind 
    399   1.8    rmind 	newsize = mstohz(newsize);
    400   1.1    rmind 	if (newsize < 10 || newsize > hz || newsize <= min_ts)
    401   1.1    rmind 		return EINVAL;
    402   1.1    rmind 
    403   1.1    rmind 	/* It is safe to do this in such order */
    404   1.1    rmind 	for (CPU_INFO_FOREACH(cii, ci))
    405   1.1    rmind 		spc_lock(ci);
    406   1.1    rmind 
    407   1.8    rmind 	max_ts = newsize;
    408   1.1    rmind 	sched_precalcts();
    409   1.1    rmind 
    410   1.1    rmind 	for (CPU_INFO_FOREACH(cii, ci))
    411   1.1    rmind 		spc_unlock(ci);
    412   1.1    rmind 
    413   1.1    rmind 	return 0;
    414   1.1    rmind }
    415   1.1    rmind 
    416  1.24       ad SYSCTL_SETUP(sysctl_sched_m2_setup, "sysctl sched setup")
    417   1.1    rmind {
    418   1.1    rmind 	const struct sysctlnode *node = NULL;
    419   1.1    rmind 
    420   1.1    rmind 	sysctl_createv(clog, 0, NULL, NULL,
    421   1.1    rmind 		CTLFLAG_PERMANENT,
    422   1.1    rmind 		CTLTYPE_NODE, "kern", NULL,
    423   1.1    rmind 		NULL, 0, NULL, 0,
    424   1.1    rmind 		CTL_KERN, CTL_EOL);
    425   1.1    rmind 	sysctl_createv(clog, 0, NULL, &node,
    426   1.1    rmind 		CTLFLAG_PERMANENT,
    427   1.1    rmind 		CTLTYPE_NODE, "sched",
    428   1.1    rmind 		SYSCTL_DESCR("Scheduler options"),
    429   1.1    rmind 		NULL, 0, NULL, 0,
    430   1.1    rmind 		CTL_KERN, CTL_CREATE, CTL_EOL);
    431   1.1    rmind 
    432   1.1    rmind 	if (node == NULL)
    433   1.1    rmind 		return;
    434   1.1    rmind 
    435  1.24       ad 	sysctl_createv(NULL, 0, &node, NULL,
    436   1.1    rmind 		CTLFLAG_PERMANENT,
    437   1.1    rmind 		CTLTYPE_STRING, "name", NULL,
    438   1.1    rmind 		NULL, 0, __UNCONST("M2"), 0,
    439   1.1    rmind 		CTL_CREATE, CTL_EOL);
    440  1.24       ad 	sysctl_createv(NULL, 0, &node, NULL,
    441  1.15    rmind 		CTLFLAG_PERMANENT,
    442  1.15    rmind 		CTLTYPE_INT, "rtts",
    443  1.15    rmind 		SYSCTL_DESCR("Round-robin time quantum (in miliseconds)"),
    444  1.15    rmind 		sysctl_sched_rtts, 0, NULL, 0,
    445  1.15    rmind 		CTL_CREATE, CTL_EOL);
    446  1.24       ad 	sysctl_createv(NULL, 0, &node, NULL,
    447   1.1    rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
    448   1.1    rmind 		CTLTYPE_INT, "maxts",
    449   1.8    rmind 		SYSCTL_DESCR("Maximal time quantum (in miliseconds)"),
    450   1.1    rmind 		sysctl_sched_maxts, 0, &max_ts, 0,
    451   1.1    rmind 		CTL_CREATE, CTL_EOL);
    452  1.24       ad 	sysctl_createv(NULL, 0, &node, NULL,
    453   1.1    rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
    454   1.1    rmind 		CTLTYPE_INT, "mints",
    455   1.8    rmind 		SYSCTL_DESCR("Minimal time quantum (in miliseconds)"),
    456   1.1    rmind 		sysctl_sched_mints, 0, &min_ts, 0,
    457   1.1    rmind 		CTL_CREATE, CTL_EOL);
    458   1.1    rmind }
    459