Home | History | Annotate | Line # | Download | only in kern
sched_m2.c revision 1.24.6.1
      1  1.24.6.1  wrstuden /*	$NetBSD: sched_m2.c,v 1.24.6.1 2008/06/23 04:31:51 wrstuden Exp $	*/
      2       1.1     rmind 
      3       1.1     rmind /*
      4      1.15     rmind  * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org>
      5      1.12     rmind  * All rights reserved.
      6       1.1     rmind  *
      7       1.1     rmind  * Redistribution and use in source and binary forms, with or without
      8       1.1     rmind  * modification, are permitted provided that the following conditions
      9       1.1     rmind  * are met:
     10       1.1     rmind  * 1. Redistributions of source code must retain the above copyright
     11       1.1     rmind  *    notice, this list of conditions and the following disclaimer.
     12       1.1     rmind  * 2. Redistributions in binary form must reproduce the above copyright
     13       1.1     rmind  *    notice, this list of conditions and the following disclaimer in the
     14       1.1     rmind  *    documentation and/or other materials provided with the distribution.
     15       1.1     rmind  *
     16      1.19     rmind  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17      1.19     rmind  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18      1.19     rmind  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19      1.19     rmind  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20      1.19     rmind  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21      1.19     rmind  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22      1.19     rmind  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23      1.19     rmind  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24      1.19     rmind  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25      1.19     rmind  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26      1.19     rmind  * SUCH DAMAGE.
     27       1.1     rmind  */
     28       1.1     rmind 
     29       1.1     rmind /*
     30       1.1     rmind  * TODO:
     31       1.1     rmind  *  - Implementation of fair share queue;
     32       1.1     rmind  *  - Support for NUMA;
     33       1.1     rmind  */
     34       1.1     rmind 
     35       1.1     rmind #include <sys/cdefs.h>
     36  1.24.6.1  wrstuden __KERNEL_RCSID(0, "$NetBSD: sched_m2.c,v 1.24.6.1 2008/06/23 04:31:51 wrstuden Exp $");
     37       1.1     rmind 
     38       1.1     rmind #include <sys/param.h>
     39       1.1     rmind 
     40       1.8     rmind #include <sys/bitops.h>
     41       1.1     rmind #include <sys/cpu.h>
     42       1.1     rmind #include <sys/callout.h>
     43       1.1     rmind #include <sys/errno.h>
     44       1.1     rmind #include <sys/kernel.h>
     45       1.1     rmind #include <sys/kmem.h>
     46       1.1     rmind #include <sys/lwp.h>
     47       1.1     rmind #include <sys/mutex.h>
     48       1.1     rmind #include <sys/pool.h>
     49       1.1     rmind #include <sys/proc.h>
     50      1.15     rmind #include <sys/pset.h>
     51       1.1     rmind #include <sys/resource.h>
     52       1.1     rmind #include <sys/resourcevar.h>
     53       1.1     rmind #include <sys/sched.h>
     54       1.1     rmind #include <sys/syscallargs.h>
     55       1.1     rmind #include <sys/sysctl.h>
     56       1.1     rmind #include <sys/types.h>
     57       1.1     rmind 
     58       1.1     rmind /*
     59      1.10        ad  * Priority related defintions.
     60       1.1     rmind  */
     61      1.10        ad #define	PRI_TS_COUNT	(NPRI_USER)
     62      1.10        ad #define	PRI_RT_COUNT	(PRI_COUNT - PRI_TS_COUNT)
     63      1.10        ad #define	PRI_HTS_RANGE	(PRI_TS_COUNT / 10)
     64      1.10        ad 
     65      1.11     rmind #define	PRI_HIGHEST_TS	(MAXPRI_USER)
     66      1.10        ad 
     67       1.1     rmind /*
     68       1.1     rmind  * Time-slices and priorities.
     69       1.1     rmind  */
     70       1.1     rmind static u_int	min_ts;			/* Minimal time-slice */
     71       1.1     rmind static u_int	max_ts;			/* Maximal time-slice */
     72       1.1     rmind static u_int	rt_ts;			/* Real-time time-slice */
     73       1.1     rmind static u_int	ts_map[PRI_COUNT];	/* Map of time-slices */
     74       1.1     rmind static pri_t	high_pri[PRI_COUNT];	/* Map for priority increase */
     75       1.1     rmind 
     76  1.24.6.1  wrstuden static void	sched_precalcts(void);
     77  1.24.6.1  wrstuden 
     78       1.1     rmind typedef struct {
     79       1.1     rmind 	u_int		sl_timeslice;	/* Time-slice of thread */
     80       1.1     rmind } sched_info_lwp_t;
     81       1.1     rmind 
     82      1.19     rmind static pool_cache_t	sil_pool;
     83       1.1     rmind 
     84       1.1     rmind /*
     85       1.1     rmind  * Initialization and setup.
     86       1.1     rmind  */
     87       1.1     rmind 
     88       1.1     rmind void
     89       1.1     rmind sched_rqinit(void)
     90       1.1     rmind {
     91       1.1     rmind 	struct cpu_info *ci = curcpu();
     92       1.1     rmind 
     93       1.1     rmind 	if (hz < 100) {
     94       1.1     rmind 		panic("sched_rqinit: value of HZ is too low\n");
     95       1.1     rmind 	}
     96       1.1     rmind 
     97       1.1     rmind 	/* Default timing ranges */
     98       1.1     rmind 	min_ts = mstohz(50);			/* ~50ms  */
     99       1.1     rmind 	max_ts = mstohz(150);			/* ~150ms */
    100       1.1     rmind 	rt_ts = mstohz(100);			/* ~100ms */
    101       1.1     rmind 	sched_precalcts();
    102       1.1     rmind 
    103       1.1     rmind 	/* Pool of the scheduler-specific structures */
    104      1.23        ad 	sil_pool = pool_cache_init(sizeof(sched_info_lwp_t), coherency_unit,
    105      1.20        ad 	    0, 0, "lwpsd", NULL, IPL_NONE, NULL, NULL, NULL);
    106       1.1     rmind 
    107       1.1     rmind 	/* Attach the primary CPU here */
    108       1.1     rmind 	sched_cpuattach(ci);
    109       1.1     rmind 
    110      1.10        ad 	sched_lwp_fork(NULL, &lwp0);
    111       1.1     rmind 	sched_newts(&lwp0);
    112       1.1     rmind }
    113       1.1     rmind 
    114       1.1     rmind /* Pre-calculate the time-slices for the priorities */
    115       1.1     rmind static void
    116       1.1     rmind sched_precalcts(void)
    117       1.1     rmind {
    118       1.1     rmind 	pri_t p;
    119       1.1     rmind 
    120      1.10        ad 	/* Time-sharing range */
    121      1.10        ad 	for (p = 0; p <= PRI_HIGHEST_TS; p++) {
    122      1.10        ad 		ts_map[p] = max_ts -
    123      1.10        ad 		    (p * 100 / (PRI_TS_COUNT - 1) * (max_ts - min_ts) / 100);
    124      1.10        ad 		high_pri[p] = (PRI_HIGHEST_TS - PRI_HTS_RANGE) +
    125      1.10        ad 		    ((p * PRI_HTS_RANGE) / (PRI_TS_COUNT - 1));
    126      1.10        ad 	}
    127      1.10        ad 
    128      1.10        ad 	/* Real-time range */
    129      1.10        ad 	for (p = (PRI_HIGHEST_TS + 1); p < PRI_COUNT; p++) {
    130       1.1     rmind 		ts_map[p] = rt_ts;
    131       1.1     rmind 		high_pri[p] = p;
    132       1.1     rmind 	}
    133       1.1     rmind }
    134       1.1     rmind 
    135       1.1     rmind /*
    136       1.1     rmind  * Hooks.
    137       1.1     rmind  */
    138       1.1     rmind 
    139       1.1     rmind void
    140       1.1     rmind sched_proc_fork(struct proc *parent, struct proc *child)
    141       1.1     rmind {
    142       1.1     rmind 	struct lwp *l;
    143       1.1     rmind 
    144       1.1     rmind 	LIST_FOREACH(l, &child->p_lwps, l_sibling) {
    145       1.1     rmind 		lwp_lock(l);
    146       1.1     rmind 		sched_newts(l);
    147       1.1     rmind 		lwp_unlock(l);
    148       1.1     rmind 	}
    149       1.1     rmind }
    150       1.1     rmind 
    151       1.1     rmind void
    152       1.1     rmind sched_proc_exit(struct proc *child, struct proc *parent)
    153       1.1     rmind {
    154       1.1     rmind 
    155       1.1     rmind 	/* Dummy */
    156       1.1     rmind }
    157       1.1     rmind 
    158       1.1     rmind void
    159      1.10        ad sched_lwp_fork(struct lwp *l1, struct lwp *l2)
    160       1.1     rmind {
    161       1.1     rmind 
    162      1.10        ad 	KASSERT(l2->l_sched_info == NULL);
    163      1.19     rmind 	l2->l_sched_info = pool_cache_get(sil_pool, PR_WAITOK);
    164      1.10        ad 	memset(l2->l_sched_info, 0, sizeof(sched_info_lwp_t));
    165       1.1     rmind }
    166       1.1     rmind 
    167       1.1     rmind void
    168       1.1     rmind sched_lwp_exit(struct lwp *l)
    169       1.1     rmind {
    170       1.1     rmind 
    171       1.1     rmind 	KASSERT(l->l_sched_info != NULL);
    172      1.19     rmind 	pool_cache_put(sil_pool, l->l_sched_info);
    173       1.1     rmind 	l->l_sched_info = NULL;
    174       1.1     rmind }
    175       1.1     rmind 
    176       1.1     rmind void
    177      1.10        ad sched_lwp_collect(struct lwp *l)
    178      1.10        ad {
    179      1.10        ad 
    180      1.10        ad }
    181      1.10        ad 
    182      1.10        ad void
    183       1.1     rmind sched_setrunnable(struct lwp *l)
    184       1.1     rmind {
    185       1.1     rmind 
    186       1.1     rmind 	/* Dummy */
    187       1.1     rmind }
    188       1.1     rmind 
    189       1.1     rmind void
    190       1.1     rmind sched_schedclock(struct lwp *l)
    191       1.1     rmind {
    192       1.1     rmind 
    193       1.1     rmind 	/* Dummy */
    194       1.1     rmind }
    195       1.1     rmind 
    196       1.1     rmind /*
    197       1.1     rmind  * Priorities and time-slice.
    198       1.1     rmind  */
    199       1.1     rmind 
    200       1.1     rmind void
    201       1.1     rmind sched_nice(struct proc *p, int prio)
    202       1.1     rmind {
    203       1.1     rmind 
    204      1.17     rmind 	/* TODO: implement as SCHED_IA */
    205       1.1     rmind }
    206       1.1     rmind 
    207       1.1     rmind /* Recalculate the time-slice */
    208      1.24        ad void
    209       1.1     rmind sched_newts(struct lwp *l)
    210       1.1     rmind {
    211       1.1     rmind 	sched_info_lwp_t *sil = l->l_sched_info;
    212       1.1     rmind 
    213       1.1     rmind 	sil->sl_timeslice = ts_map[lwp_eprio(l)];
    214       1.1     rmind }
    215       1.1     rmind 
    216       1.1     rmind void
    217       1.1     rmind sched_slept(struct lwp *l)
    218       1.1     rmind {
    219       1.1     rmind 
    220       1.1     rmind 	/*
    221      1.10        ad 	 * If thread is in time-sharing queue and batch flag is not marked,
    222      1.10        ad 	 * increase the the priority, and run with the lower time-quantum.
    223       1.1     rmind 	 */
    224  1.24.6.1  wrstuden 	if (l->l_priority < PRI_HIGHEST_TS && (l->l_flag & LW_BATCH) == 0) {
    225      1.10        ad 		KASSERT(l->l_class == SCHED_OTHER);
    226      1.10        ad 		l->l_priority++;
    227      1.10        ad 	}
    228       1.1     rmind }
    229       1.1     rmind 
    230       1.1     rmind void
    231       1.1     rmind sched_wakeup(struct lwp *l)
    232       1.1     rmind {
    233       1.1     rmind 
    234       1.1     rmind 	/* If thread was sleeping a second or more - set a high priority */
    235  1.24.6.1  wrstuden 	if (l->l_slptime >= 1)
    236      1.10        ad 		l->l_priority = high_pri[l->l_priority];
    237       1.1     rmind }
    238       1.1     rmind 
    239       1.1     rmind void
    240  1.24.6.1  wrstuden sched_pstats_hook(struct lwp *l, int batch)
    241       1.1     rmind {
    242      1.11     rmind 	pri_t prio;
    243       1.1     rmind 
    244      1.22     rmind 	/*
    245      1.22     rmind 	 * Estimate threads on time-sharing queue only, however,
    246      1.22     rmind 	 * exclude the highest priority for performance purposes.
    247      1.22     rmind 	 */
    248      1.10        ad 	if (l->l_priority >= PRI_HIGHEST_TS)
    249       1.1     rmind 		return;
    250      1.16     rmind 	KASSERT(l->l_class == SCHED_OTHER);
    251       1.1     rmind 
    252      1.10        ad 	/* If it is CPU-bound not a first time - decrease the priority */
    253      1.11     rmind 	prio = l->l_priority;
    254      1.11     rmind 	if (batch && prio != 0)
    255      1.11     rmind 		prio--;
    256      1.10        ad 
    257       1.1     rmind 	/* If thread was not ran a second or more - set a high priority */
    258      1.11     rmind 	if (l->l_stat == LSRUN) {
    259  1.24.6.1  wrstuden 		if (l->l_rticks && (hardclock_ticks - l->l_rticks >= hz))
    260      1.11     rmind 			prio = high_pri[prio];
    261      1.11     rmind 		/* Re-enqueue the thread if priority has changed */
    262      1.11     rmind 		if (prio != l->l_priority)
    263      1.11     rmind 			lwp_changepri(l, prio);
    264      1.11     rmind 	} else {
    265      1.11     rmind 		/* In other states, change the priority directly */
    266      1.11     rmind 		l->l_priority = prio;
    267      1.11     rmind 	}
    268       1.1     rmind }
    269       1.1     rmind 
    270      1.24        ad void
    271      1.24        ad sched_oncpu(lwp_t *l)
    272      1.16     rmind {
    273      1.24        ad 	sched_info_lwp_t *sil = l->l_sched_info;
    274       1.1     rmind 
    275       1.1     rmind 	/* Update the counters */
    276       1.1     rmind 	sil = l->l_sched_info;
    277       1.1     rmind 	KASSERT(sil->sl_timeslice >= min_ts);
    278       1.1     rmind 	KASSERT(sil->sl_timeslice <= max_ts);
    279      1.24        ad 	l->l_cpu->ci_schedstate.spc_ticks = sil->sl_timeslice;
    280       1.1     rmind }
    281       1.1     rmind 
    282       1.1     rmind /*
    283       1.1     rmind  * Time-driven events.
    284       1.1     rmind  */
    285       1.1     rmind 
    286       1.1     rmind /*
    287       1.1     rmind  * Called once per time-quantum.  This routine is CPU-local and runs at
    288       1.1     rmind  * IPL_SCHED, thus the locking is not needed.
    289       1.1     rmind  */
    290       1.1     rmind void
    291       1.1     rmind sched_tick(struct cpu_info *ci)
    292       1.1     rmind {
    293       1.1     rmind 	struct schedstate_percpu *spc = &ci->ci_schedstate;
    294       1.1     rmind 	struct lwp *l = curlwp;
    295      1.16     rmind 	const sched_info_lwp_t *sil = l->l_sched_info;
    296       1.1     rmind 
    297       1.2     rmind 	if (CURCPU_IDLE_P())
    298       1.2     rmind 		return;
    299       1.1     rmind 
    300      1.10        ad 	switch (l->l_class) {
    301       1.2     rmind 	case SCHED_FIFO:
    302       1.2     rmind 		/*
    303       1.2     rmind 		 * Update the time-quantum, and continue running,
    304       1.2     rmind 		 * if thread runs on FIFO real-time policy.
    305       1.2     rmind 		 */
    306      1.16     rmind 		KASSERT(l->l_priority > PRI_HIGHEST_TS);
    307       1.1     rmind 		spc->spc_ticks = sil->sl_timeslice;
    308       1.1     rmind 		return;
    309       1.2     rmind 	case SCHED_OTHER:
    310      1.10        ad 		/*
    311      1.10        ad 		 * If thread is in time-sharing queue, decrease the priority,
    312      1.10        ad 		 * and run with a higher time-quantum.
    313      1.10        ad 		 */
    314      1.16     rmind 		KASSERT(l->l_priority <= PRI_HIGHEST_TS);
    315      1.10        ad 		if (l->l_priority != 0)
    316      1.10        ad 			l->l_priority--;
    317       1.2     rmind 		break;
    318       1.1     rmind 	}
    319       1.1     rmind 
    320       1.1     rmind 	/*
    321       1.2     rmind 	 * If there are higher priority threads or threads in the same queue,
    322       1.2     rmind 	 * mark that thread should yield, otherwise, continue running.
    323       1.1     rmind 	 */
    324      1.24        ad 	if (lwp_eprio(l) <= spc->spc_maxpriority || l->l_target_cpu) {
    325       1.1     rmind 		spc->spc_flags |= SPCF_SHOULDYIELD;
    326       1.1     rmind 		cpu_need_resched(ci, 0);
    327       1.1     rmind 	} else
    328       1.1     rmind 		spc->spc_ticks = sil->sl_timeslice;
    329       1.1     rmind }
    330       1.1     rmind 
    331       1.1     rmind /*
    332       1.1     rmind  * Sysctl nodes and initialization.
    333       1.1     rmind  */
    334       1.1     rmind 
    335       1.1     rmind static int
    336      1.15     rmind sysctl_sched_rtts(SYSCTLFN_ARGS)
    337      1.15     rmind {
    338      1.15     rmind 	struct sysctlnode node;
    339      1.15     rmind 	int rttsms = hztoms(rt_ts);
    340      1.15     rmind 
    341      1.15     rmind 	node = *rnode;
    342      1.15     rmind 	node.sysctl_data = &rttsms;
    343      1.15     rmind 	return sysctl_lookup(SYSCTLFN_CALL(&node));
    344      1.15     rmind }
    345      1.15     rmind 
    346      1.15     rmind static int
    347       1.1     rmind sysctl_sched_mints(SYSCTLFN_ARGS)
    348       1.1     rmind {
    349       1.1     rmind 	struct sysctlnode node;
    350       1.1     rmind 	struct cpu_info *ci;
    351       1.1     rmind 	int error, newsize;
    352       1.1     rmind 	CPU_INFO_ITERATOR cii;
    353       1.1     rmind 
    354       1.1     rmind 	node = *rnode;
    355       1.1     rmind 	node.sysctl_data = &newsize;
    356       1.1     rmind 
    357       1.1     rmind 	newsize = hztoms(min_ts);
    358       1.1     rmind 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    359       1.1     rmind 	if (error || newp == NULL)
    360       1.1     rmind 		return error;
    361       1.1     rmind 
    362       1.8     rmind 	newsize = mstohz(newsize);
    363       1.1     rmind 	if (newsize < 1 || newsize > hz || newsize >= max_ts)
    364       1.1     rmind 		return EINVAL;
    365       1.1     rmind 
    366       1.1     rmind 	/* It is safe to do this in such order */
    367       1.1     rmind 	for (CPU_INFO_FOREACH(cii, ci))
    368       1.1     rmind 		spc_lock(ci);
    369       1.1     rmind 
    370       1.8     rmind 	min_ts = newsize;
    371       1.1     rmind 	sched_precalcts();
    372       1.1     rmind 
    373       1.1     rmind 	for (CPU_INFO_FOREACH(cii, ci))
    374       1.1     rmind 		spc_unlock(ci);
    375       1.1     rmind 
    376       1.1     rmind 	return 0;
    377       1.1     rmind }
    378       1.1     rmind 
    379       1.1     rmind static int
    380       1.1     rmind sysctl_sched_maxts(SYSCTLFN_ARGS)
    381       1.1     rmind {
    382       1.1     rmind 	struct sysctlnode node;
    383       1.1     rmind 	struct cpu_info *ci;
    384       1.1     rmind 	int error, newsize;
    385       1.1     rmind 	CPU_INFO_ITERATOR cii;
    386       1.1     rmind 
    387       1.1     rmind 	node = *rnode;
    388       1.1     rmind 	node.sysctl_data = &newsize;
    389       1.1     rmind 
    390       1.1     rmind 	newsize = hztoms(max_ts);
    391       1.1     rmind 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    392       1.1     rmind 	if (error || newp == NULL)
    393       1.1     rmind 		return error;
    394       1.1     rmind 
    395       1.8     rmind 	newsize = mstohz(newsize);
    396       1.1     rmind 	if (newsize < 10 || newsize > hz || newsize <= min_ts)
    397       1.1     rmind 		return EINVAL;
    398       1.1     rmind 
    399       1.1     rmind 	/* It is safe to do this in such order */
    400       1.1     rmind 	for (CPU_INFO_FOREACH(cii, ci))
    401       1.1     rmind 		spc_lock(ci);
    402       1.1     rmind 
    403       1.8     rmind 	max_ts = newsize;
    404       1.1     rmind 	sched_precalcts();
    405       1.1     rmind 
    406       1.1     rmind 	for (CPU_INFO_FOREACH(cii, ci))
    407       1.1     rmind 		spc_unlock(ci);
    408       1.1     rmind 
    409       1.1     rmind 	return 0;
    410       1.1     rmind }
    411       1.1     rmind 
    412      1.24        ad SYSCTL_SETUP(sysctl_sched_m2_setup, "sysctl sched setup")
    413       1.1     rmind {
    414       1.1     rmind 	const struct sysctlnode *node = NULL;
    415       1.1     rmind 
    416       1.1     rmind 	sysctl_createv(clog, 0, NULL, NULL,
    417       1.1     rmind 		CTLFLAG_PERMANENT,
    418       1.1     rmind 		CTLTYPE_NODE, "kern", NULL,
    419       1.1     rmind 		NULL, 0, NULL, 0,
    420       1.1     rmind 		CTL_KERN, CTL_EOL);
    421       1.1     rmind 	sysctl_createv(clog, 0, NULL, &node,
    422       1.1     rmind 		CTLFLAG_PERMANENT,
    423       1.1     rmind 		CTLTYPE_NODE, "sched",
    424       1.1     rmind 		SYSCTL_DESCR("Scheduler options"),
    425       1.1     rmind 		NULL, 0, NULL, 0,
    426       1.1     rmind 		CTL_KERN, CTL_CREATE, CTL_EOL);
    427       1.1     rmind 
    428       1.1     rmind 	if (node == NULL)
    429       1.1     rmind 		return;
    430       1.1     rmind 
    431      1.24        ad 	sysctl_createv(NULL, 0, &node, NULL,
    432       1.1     rmind 		CTLFLAG_PERMANENT,
    433       1.1     rmind 		CTLTYPE_STRING, "name", NULL,
    434       1.1     rmind 		NULL, 0, __UNCONST("M2"), 0,
    435       1.1     rmind 		CTL_CREATE, CTL_EOL);
    436      1.24        ad 	sysctl_createv(NULL, 0, &node, NULL,
    437      1.15     rmind 		CTLFLAG_PERMANENT,
    438      1.15     rmind 		CTLTYPE_INT, "rtts",
    439      1.15     rmind 		SYSCTL_DESCR("Round-robin time quantum (in miliseconds)"),
    440      1.15     rmind 		sysctl_sched_rtts, 0, NULL, 0,
    441      1.15     rmind 		CTL_CREATE, CTL_EOL);
    442      1.24        ad 	sysctl_createv(NULL, 0, &node, NULL,
    443       1.1     rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
    444       1.1     rmind 		CTLTYPE_INT, "maxts",
    445       1.8     rmind 		SYSCTL_DESCR("Maximal time quantum (in miliseconds)"),
    446       1.1     rmind 		sysctl_sched_maxts, 0, &max_ts, 0,
    447       1.1     rmind 		CTL_CREATE, CTL_EOL);
    448      1.24        ad 	sysctl_createv(NULL, 0, &node, NULL,
    449       1.1     rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
    450       1.1     rmind 		CTLTYPE_INT, "mints",
    451       1.8     rmind 		SYSCTL_DESCR("Minimal time quantum (in miliseconds)"),
    452       1.1     rmind 		sysctl_sched_mints, 0, &min_ts, 0,
    453       1.1     rmind 		CTL_CREATE, CTL_EOL);
    454       1.1     rmind }
    455