Home | History | Annotate | Line # | Download | only in kern
sched_m2.c revision 1.20.6.3
      1  1.20.6.1    mjf /*	$NetBSD: sched_m2.c,v 1.20.6.3 2009/01/17 13:29:19 mjf Exp $	*/
      2       1.1  rmind 
      3       1.1  rmind /*
      4      1.15  rmind  * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org>
      5      1.12  rmind  * All rights reserved.
      6       1.1  rmind  *
      7       1.1  rmind  * Redistribution and use in source and binary forms, with or without
      8       1.1  rmind  * modification, are permitted provided that the following conditions
      9       1.1  rmind  * are met:
     10       1.1  rmind  * 1. Redistributions of source code must retain the above copyright
     11       1.1  rmind  *    notice, this list of conditions and the following disclaimer.
     12       1.1  rmind  * 2. Redistributions in binary form must reproduce the above copyright
     13       1.1  rmind  *    notice, this list of conditions and the following disclaimer in the
     14       1.1  rmind  *    documentation and/or other materials provided with the distribution.
     15       1.1  rmind  *
     16      1.19  rmind  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17      1.19  rmind  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18      1.19  rmind  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19      1.19  rmind  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20      1.19  rmind  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21      1.19  rmind  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22      1.19  rmind  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23      1.19  rmind  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24      1.19  rmind  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25      1.19  rmind  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26      1.19  rmind  * SUCH DAMAGE.
     27       1.1  rmind  */
     28       1.1  rmind 
     29       1.1  rmind /*
     30       1.1  rmind  * TODO:
     31       1.1  rmind  *  - Implementation of fair share queue;
     32       1.1  rmind  *  - Support for NUMA;
     33       1.1  rmind  */
     34       1.1  rmind 
     35       1.1  rmind #include <sys/cdefs.h>
     36  1.20.6.1    mjf __KERNEL_RCSID(0, "$NetBSD: sched_m2.c,v 1.20.6.3 2009/01/17 13:29:19 mjf Exp $");
     37       1.1  rmind 
     38       1.1  rmind #include <sys/param.h>
     39       1.1  rmind 
     40       1.8  rmind #include <sys/bitops.h>
     41       1.1  rmind #include <sys/cpu.h>
     42       1.1  rmind #include <sys/callout.h>
     43       1.1  rmind #include <sys/errno.h>
     44       1.1  rmind #include <sys/kernel.h>
     45       1.1  rmind #include <sys/kmem.h>
     46       1.1  rmind #include <sys/lwp.h>
     47       1.1  rmind #include <sys/mutex.h>
     48       1.1  rmind #include <sys/pool.h>
     49       1.1  rmind #include <sys/proc.h>
     50      1.15  rmind #include <sys/pset.h>
     51       1.1  rmind #include <sys/resource.h>
     52       1.1  rmind #include <sys/resourcevar.h>
     53       1.1  rmind #include <sys/sched.h>
     54       1.1  rmind #include <sys/syscallargs.h>
     55       1.1  rmind #include <sys/sysctl.h>
     56       1.1  rmind #include <sys/types.h>
     57       1.1  rmind 
     58       1.1  rmind /*
     59      1.10     ad  * Priority related defintions.
     60       1.1  rmind  */
     61      1.10     ad #define	PRI_TS_COUNT	(NPRI_USER)
     62      1.10     ad #define	PRI_RT_COUNT	(PRI_COUNT - PRI_TS_COUNT)
     63      1.10     ad #define	PRI_HTS_RANGE	(PRI_TS_COUNT / 10)
     64      1.10     ad 
     65      1.11  rmind #define	PRI_HIGHEST_TS	(MAXPRI_USER)
     66      1.10     ad 
     67       1.1  rmind /*
     68       1.1  rmind  * Time-slices and priorities.
     69       1.1  rmind  */
     70       1.1  rmind static u_int	min_ts;			/* Minimal time-slice */
     71       1.1  rmind static u_int	max_ts;			/* Maximal time-slice */
     72       1.1  rmind static u_int	rt_ts;			/* Real-time time-slice */
     73       1.1  rmind static u_int	ts_map[PRI_COUNT];	/* Map of time-slices */
     74       1.1  rmind static pri_t	high_pri[PRI_COUNT];	/* Map for priority increase */
     75       1.1  rmind 
     76  1.20.6.2    mjf static void	sched_precalcts(void);
     77       1.1  rmind 
     78       1.1  rmind /*
     79       1.1  rmind  * Initialization and setup.
     80       1.1  rmind  */
     81       1.1  rmind 
     82       1.1  rmind void
     83       1.1  rmind sched_rqinit(void)
     84       1.1  rmind {
     85       1.1  rmind 	struct cpu_info *ci = curcpu();
     86       1.1  rmind 
     87       1.1  rmind 	if (hz < 100) {
     88       1.1  rmind 		panic("sched_rqinit: value of HZ is too low\n");
     89       1.1  rmind 	}
     90       1.1  rmind 
     91       1.1  rmind 	/* Default timing ranges */
     92  1.20.6.3    mjf 	min_ts = mstohz(20);			/*  ~20 ms */
     93  1.20.6.3    mjf 	max_ts = mstohz(150);			/* ~150 ms */
     94  1.20.6.3    mjf 	rt_ts = mstohz(100);			/* ~100 ms */
     95       1.1  rmind 	sched_precalcts();
     96       1.1  rmind 
     97       1.1  rmind 	/* Attach the primary CPU here */
     98       1.1  rmind 	sched_cpuattach(ci);
     99       1.1  rmind 
    100      1.10     ad 	sched_lwp_fork(NULL, &lwp0);
    101       1.1  rmind 	sched_newts(&lwp0);
    102       1.1  rmind }
    103       1.1  rmind 
    104       1.1  rmind /* Pre-calculate the time-slices for the priorities */
    105       1.1  rmind static void
    106       1.1  rmind sched_precalcts(void)
    107       1.1  rmind {
    108       1.1  rmind 	pri_t p;
    109       1.1  rmind 
    110      1.10     ad 	/* Time-sharing range */
    111      1.10     ad 	for (p = 0; p <= PRI_HIGHEST_TS; p++) {
    112      1.10     ad 		ts_map[p] = max_ts -
    113      1.10     ad 		    (p * 100 / (PRI_TS_COUNT - 1) * (max_ts - min_ts) / 100);
    114      1.10     ad 		high_pri[p] = (PRI_HIGHEST_TS - PRI_HTS_RANGE) +
    115      1.10     ad 		    ((p * PRI_HTS_RANGE) / (PRI_TS_COUNT - 1));
    116      1.10     ad 	}
    117      1.10     ad 
    118      1.10     ad 	/* Real-time range */
    119      1.10     ad 	for (p = (PRI_HIGHEST_TS + 1); p < PRI_COUNT; p++) {
    120       1.1  rmind 		ts_map[p] = rt_ts;
    121       1.1  rmind 		high_pri[p] = p;
    122       1.1  rmind 	}
    123       1.1  rmind }
    124       1.1  rmind 
    125       1.1  rmind /*
    126       1.1  rmind  * Hooks.
    127       1.1  rmind  */
    128       1.1  rmind 
    129       1.1  rmind void
    130       1.1  rmind sched_proc_fork(struct proc *parent, struct proc *child)
    131       1.1  rmind {
    132       1.1  rmind 	struct lwp *l;
    133       1.1  rmind 
    134       1.1  rmind 	LIST_FOREACH(l, &child->p_lwps, l_sibling) {
    135       1.1  rmind 		lwp_lock(l);
    136       1.1  rmind 		sched_newts(l);
    137       1.1  rmind 		lwp_unlock(l);
    138       1.1  rmind 	}
    139       1.1  rmind }
    140       1.1  rmind 
    141       1.1  rmind void
    142       1.1  rmind sched_proc_exit(struct proc *child, struct proc *parent)
    143       1.1  rmind {
    144       1.1  rmind 
    145       1.1  rmind }
    146       1.1  rmind 
    147       1.1  rmind void
    148      1.10     ad sched_lwp_fork(struct lwp *l1, struct lwp *l2)
    149       1.1  rmind {
    150       1.1  rmind 
    151       1.1  rmind }
    152       1.1  rmind 
    153       1.1  rmind void
    154      1.10     ad sched_lwp_collect(struct lwp *l)
    155      1.10     ad {
    156      1.10     ad 
    157      1.10     ad }
    158      1.10     ad 
    159      1.10     ad void
    160       1.1  rmind sched_setrunnable(struct lwp *l)
    161       1.1  rmind {
    162       1.1  rmind 
    163       1.1  rmind }
    164       1.1  rmind 
    165       1.1  rmind void
    166       1.1  rmind sched_schedclock(struct lwp *l)
    167       1.1  rmind {
    168       1.1  rmind 
    169       1.1  rmind }
    170       1.1  rmind 
    171       1.1  rmind /*
    172       1.1  rmind  * Priorities and time-slice.
    173       1.1  rmind  */
    174       1.1  rmind 
    175       1.1  rmind void
    176       1.1  rmind sched_nice(struct proc *p, int prio)
    177       1.1  rmind {
    178  1.20.6.3    mjf 	struct lwp *l;
    179  1.20.6.3    mjf 	int n;
    180       1.1  rmind 
    181  1.20.6.3    mjf 	KASSERT(mutex_owned(p->p_lock));
    182  1.20.6.3    mjf 
    183  1.20.6.3    mjf 	p->p_nice = prio;
    184  1.20.6.3    mjf 	n = (prio - NZERO) >> 2;
    185  1.20.6.3    mjf 	if (n == 0)
    186  1.20.6.3    mjf 		return;
    187  1.20.6.3    mjf 
    188  1.20.6.3    mjf 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    189  1.20.6.3    mjf 		lwp_lock(l);
    190  1.20.6.3    mjf 		if (l->l_class == SCHED_OTHER) {
    191  1.20.6.3    mjf 			pri_t pri = l->l_priority - n;
    192  1.20.6.3    mjf 			pri = (n < 0) ? min(pri, PRI_HIGHEST_TS) : imax(pri, 0);
    193  1.20.6.3    mjf 			lwp_changepri(l, pri);
    194  1.20.6.3    mjf 		}
    195  1.20.6.3    mjf 		lwp_unlock(l);
    196  1.20.6.3    mjf 	}
    197       1.1  rmind }
    198       1.1  rmind 
    199       1.1  rmind /* Recalculate the time-slice */
    200  1.20.6.2    mjf void
    201       1.1  rmind sched_newts(struct lwp *l)
    202       1.1  rmind {
    203       1.1  rmind 
    204  1.20.6.3    mjf 	l->l_sched.timeslice = ts_map[lwp_eprio(l)];
    205       1.1  rmind }
    206       1.1  rmind 
    207       1.1  rmind void
    208       1.1  rmind sched_slept(struct lwp *l)
    209       1.1  rmind {
    210       1.1  rmind 
    211       1.1  rmind 	/*
    212      1.10     ad 	 * If thread is in time-sharing queue and batch flag is not marked,
    213      1.10     ad 	 * increase the the priority, and run with the lower time-quantum.
    214       1.1  rmind 	 */
    215  1.20.6.2    mjf 	if (l->l_priority < PRI_HIGHEST_TS && (l->l_flag & LW_BATCH) == 0) {
    216  1.20.6.3    mjf 		struct proc *p = l->l_proc;
    217  1.20.6.3    mjf 
    218      1.10     ad 		KASSERT(l->l_class == SCHED_OTHER);
    219  1.20.6.3    mjf 		if (__predict_false(p->p_nice < NZERO)) {
    220  1.20.6.3    mjf 			const int n = max((NZERO - p->p_nice) >> 2, 1);
    221  1.20.6.3    mjf 			l->l_priority = min(l->l_priority + n, PRI_HIGHEST_TS);
    222  1.20.6.3    mjf 		} else {
    223  1.20.6.3    mjf 			l->l_priority++;
    224  1.20.6.3    mjf 		}
    225      1.10     ad 	}
    226       1.1  rmind }
    227       1.1  rmind 
    228       1.1  rmind void
    229       1.1  rmind sched_wakeup(struct lwp *l)
    230       1.1  rmind {
    231       1.1  rmind 
    232       1.1  rmind 	/* If thread was sleeping a second or more - set a high priority */
    233  1.20.6.2    mjf 	if (l->l_slptime >= 1)
    234      1.10     ad 		l->l_priority = high_pri[l->l_priority];
    235       1.1  rmind }
    236       1.1  rmind 
    237       1.1  rmind void
    238  1.20.6.2    mjf sched_pstats_hook(struct lwp *l, int batch)
    239       1.1  rmind {
    240      1.11  rmind 	pri_t prio;
    241       1.1  rmind 
    242  1.20.6.1    mjf 	/*
    243  1.20.6.1    mjf 	 * Estimate threads on time-sharing queue only, however,
    244  1.20.6.1    mjf 	 * exclude the highest priority for performance purposes.
    245  1.20.6.1    mjf 	 */
    246  1.20.6.3    mjf 	KASSERT(lwp_locked(l, NULL));
    247      1.10     ad 	if (l->l_priority >= PRI_HIGHEST_TS)
    248       1.1  rmind 		return;
    249      1.16  rmind 	KASSERT(l->l_class == SCHED_OTHER);
    250       1.1  rmind 
    251      1.10     ad 	/* If it is CPU-bound not a first time - decrease the priority */
    252      1.11  rmind 	prio = l->l_priority;
    253      1.11  rmind 	if (batch && prio != 0)
    254      1.11  rmind 		prio--;
    255      1.10     ad 
    256       1.1  rmind 	/* If thread was not ran a second or more - set a high priority */
    257      1.11  rmind 	if (l->l_stat == LSRUN) {
    258  1.20.6.2    mjf 		if (l->l_rticks && (hardclock_ticks - l->l_rticks >= hz))
    259      1.11  rmind 			prio = high_pri[prio];
    260      1.11  rmind 		/* Re-enqueue the thread if priority has changed */
    261      1.11  rmind 		if (prio != l->l_priority)
    262      1.11  rmind 			lwp_changepri(l, prio);
    263      1.11  rmind 	} else {
    264      1.11  rmind 		/* In other states, change the priority directly */
    265      1.11  rmind 		l->l_priority = prio;
    266      1.11  rmind 	}
    267       1.1  rmind }
    268       1.1  rmind 
    269  1.20.6.2    mjf void
    270  1.20.6.2    mjf sched_oncpu(lwp_t *l)
    271       1.1  rmind {
    272  1.20.6.3    mjf 	struct schedstate_percpu *spc = &l->l_cpu->ci_schedstate;
    273       1.1  rmind 
    274       1.1  rmind 	/* Update the counters */
    275  1.20.6.3    mjf 	KASSERT(l->l_sched.timeslice >= min_ts);
    276  1.20.6.3    mjf 	KASSERT(l->l_sched.timeslice <= max_ts);
    277  1.20.6.3    mjf 	spc->spc_ticks = l->l_sched.timeslice;
    278       1.1  rmind }
    279       1.1  rmind 
    280       1.1  rmind /*
    281       1.1  rmind  * Time-driven events.
    282       1.1  rmind  */
    283       1.1  rmind 
    284       1.1  rmind /*
    285       1.1  rmind  * Called once per time-quantum.  This routine is CPU-local and runs at
    286       1.1  rmind  * IPL_SCHED, thus the locking is not needed.
    287       1.1  rmind  */
    288       1.1  rmind void
    289       1.1  rmind sched_tick(struct cpu_info *ci)
    290       1.1  rmind {
    291       1.1  rmind 	struct schedstate_percpu *spc = &ci->ci_schedstate;
    292       1.1  rmind 	struct lwp *l = curlwp;
    293  1.20.6.3    mjf 	struct proc *p;
    294       1.1  rmind 
    295  1.20.6.3    mjf 	if (__predict_false(CURCPU_IDLE_P()))
    296       1.2  rmind 		return;
    297       1.1  rmind 
    298      1.10     ad 	switch (l->l_class) {
    299       1.2  rmind 	case SCHED_FIFO:
    300       1.2  rmind 		/*
    301       1.2  rmind 		 * Update the time-quantum, and continue running,
    302       1.2  rmind 		 * if thread runs on FIFO real-time policy.
    303       1.2  rmind 		 */
    304      1.16  rmind 		KASSERT(l->l_priority > PRI_HIGHEST_TS);
    305  1.20.6.3    mjf 		spc->spc_ticks = l->l_sched.timeslice;
    306       1.1  rmind 		return;
    307       1.2  rmind 	case SCHED_OTHER:
    308      1.10     ad 		/*
    309      1.10     ad 		 * If thread is in time-sharing queue, decrease the priority,
    310      1.10     ad 		 * and run with a higher time-quantum.
    311      1.10     ad 		 */
    312      1.16  rmind 		KASSERT(l->l_priority <= PRI_HIGHEST_TS);
    313  1.20.6.3    mjf 		if (l->l_priority == 0)
    314  1.20.6.3    mjf 			break;
    315  1.20.6.3    mjf 
    316  1.20.6.3    mjf 		p = l->l_proc;
    317  1.20.6.3    mjf 		if (__predict_false(p->p_nice > NZERO)) {
    318  1.20.6.3    mjf 			const int n = max((p->p_nice - NZERO) >> 2, 1);
    319  1.20.6.3    mjf 			l->l_priority = imax(l->l_priority - n, 0);
    320  1.20.6.3    mjf 		} else
    321      1.10     ad 			l->l_priority--;
    322       1.2  rmind 		break;
    323       1.1  rmind 	}
    324       1.1  rmind 
    325       1.1  rmind 	/*
    326       1.2  rmind 	 * If there are higher priority threads or threads in the same queue,
    327       1.2  rmind 	 * mark that thread should yield, otherwise, continue running.
    328       1.1  rmind 	 */
    329  1.20.6.2    mjf 	if (lwp_eprio(l) <= spc->spc_maxpriority || l->l_target_cpu) {
    330       1.1  rmind 		spc->spc_flags |= SPCF_SHOULDYIELD;
    331       1.1  rmind 		cpu_need_resched(ci, 0);
    332       1.1  rmind 	} else
    333  1.20.6.3    mjf 		spc->spc_ticks = l->l_sched.timeslice;
    334       1.1  rmind }
    335       1.1  rmind 
    336       1.1  rmind /*
    337       1.1  rmind  * Sysctl nodes and initialization.
    338       1.1  rmind  */
    339       1.1  rmind 
    340       1.1  rmind static int
    341      1.15  rmind sysctl_sched_rtts(SYSCTLFN_ARGS)
    342      1.15  rmind {
    343      1.15  rmind 	struct sysctlnode node;
    344      1.15  rmind 	int rttsms = hztoms(rt_ts);
    345      1.15  rmind 
    346      1.15  rmind 	node = *rnode;
    347      1.15  rmind 	node.sysctl_data = &rttsms;
    348      1.15  rmind 	return sysctl_lookup(SYSCTLFN_CALL(&node));
    349      1.15  rmind }
    350      1.15  rmind 
    351      1.15  rmind static int
    352       1.1  rmind sysctl_sched_mints(SYSCTLFN_ARGS)
    353       1.1  rmind {
    354       1.1  rmind 	struct sysctlnode node;
    355       1.1  rmind 	struct cpu_info *ci;
    356       1.1  rmind 	int error, newsize;
    357       1.1  rmind 	CPU_INFO_ITERATOR cii;
    358       1.1  rmind 
    359       1.1  rmind 	node = *rnode;
    360       1.1  rmind 	node.sysctl_data = &newsize;
    361       1.1  rmind 
    362       1.1  rmind 	newsize = hztoms(min_ts);
    363       1.1  rmind 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    364       1.1  rmind 	if (error || newp == NULL)
    365       1.1  rmind 		return error;
    366       1.1  rmind 
    367       1.8  rmind 	newsize = mstohz(newsize);
    368       1.1  rmind 	if (newsize < 1 || newsize > hz || newsize >= max_ts)
    369       1.1  rmind 		return EINVAL;
    370       1.1  rmind 
    371       1.1  rmind 	/* It is safe to do this in such order */
    372       1.1  rmind 	for (CPU_INFO_FOREACH(cii, ci))
    373       1.1  rmind 		spc_lock(ci);
    374       1.1  rmind 
    375       1.8  rmind 	min_ts = newsize;
    376       1.1  rmind 	sched_precalcts();
    377       1.1  rmind 
    378       1.1  rmind 	for (CPU_INFO_FOREACH(cii, ci))
    379       1.1  rmind 		spc_unlock(ci);
    380       1.1  rmind 
    381       1.1  rmind 	return 0;
    382       1.1  rmind }
    383       1.1  rmind 
    384       1.1  rmind static int
    385       1.1  rmind sysctl_sched_maxts(SYSCTLFN_ARGS)
    386       1.1  rmind {
    387       1.1  rmind 	struct sysctlnode node;
    388       1.1  rmind 	struct cpu_info *ci;
    389       1.1  rmind 	int error, newsize;
    390       1.1  rmind 	CPU_INFO_ITERATOR cii;
    391       1.1  rmind 
    392       1.1  rmind 	node = *rnode;
    393       1.1  rmind 	node.sysctl_data = &newsize;
    394       1.1  rmind 
    395       1.1  rmind 	newsize = hztoms(max_ts);
    396       1.1  rmind 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    397       1.1  rmind 	if (error || newp == NULL)
    398       1.1  rmind 		return error;
    399       1.1  rmind 
    400       1.8  rmind 	newsize = mstohz(newsize);
    401       1.1  rmind 	if (newsize < 10 || newsize > hz || newsize <= min_ts)
    402       1.1  rmind 		return EINVAL;
    403       1.1  rmind 
    404       1.1  rmind 	/* It is safe to do this in such order */
    405       1.1  rmind 	for (CPU_INFO_FOREACH(cii, ci))
    406       1.1  rmind 		spc_lock(ci);
    407       1.1  rmind 
    408       1.8  rmind 	max_ts = newsize;
    409       1.1  rmind 	sched_precalcts();
    410       1.1  rmind 
    411       1.1  rmind 	for (CPU_INFO_FOREACH(cii, ci))
    412       1.1  rmind 		spc_unlock(ci);
    413       1.1  rmind 
    414       1.1  rmind 	return 0;
    415       1.1  rmind }
    416       1.1  rmind 
    417  1.20.6.2    mjf SYSCTL_SETUP(sysctl_sched_m2_setup, "sysctl sched setup")
    418       1.1  rmind {
    419       1.1  rmind 	const struct sysctlnode *node = NULL;
    420       1.1  rmind 
    421       1.1  rmind 	sysctl_createv(clog, 0, NULL, NULL,
    422       1.1  rmind 		CTLFLAG_PERMANENT,
    423       1.1  rmind 		CTLTYPE_NODE, "kern", NULL,
    424       1.1  rmind 		NULL, 0, NULL, 0,
    425       1.1  rmind 		CTL_KERN, CTL_EOL);
    426       1.1  rmind 	sysctl_createv(clog, 0, NULL, &node,
    427       1.1  rmind 		CTLFLAG_PERMANENT,
    428       1.1  rmind 		CTLTYPE_NODE, "sched",
    429       1.1  rmind 		SYSCTL_DESCR("Scheduler options"),
    430       1.1  rmind 		NULL, 0, NULL, 0,
    431       1.1  rmind 		CTL_KERN, CTL_CREATE, CTL_EOL);
    432       1.1  rmind 
    433       1.1  rmind 	if (node == NULL)
    434       1.1  rmind 		return;
    435       1.1  rmind 
    436  1.20.6.2    mjf 	sysctl_createv(NULL, 0, &node, NULL,
    437       1.1  rmind 		CTLFLAG_PERMANENT,
    438       1.1  rmind 		CTLTYPE_STRING, "name", NULL,
    439       1.1  rmind 		NULL, 0, __UNCONST("M2"), 0,
    440       1.1  rmind 		CTL_CREATE, CTL_EOL);
    441  1.20.6.2    mjf 	sysctl_createv(NULL, 0, &node, NULL,
    442      1.15  rmind 		CTLFLAG_PERMANENT,
    443      1.15  rmind 		CTLTYPE_INT, "rtts",
    444      1.15  rmind 		SYSCTL_DESCR("Round-robin time quantum (in miliseconds)"),
    445      1.15  rmind 		sysctl_sched_rtts, 0, NULL, 0,
    446      1.15  rmind 		CTL_CREATE, CTL_EOL);
    447  1.20.6.2    mjf 	sysctl_createv(NULL, 0, &node, NULL,
    448       1.1  rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
    449       1.1  rmind 		CTLTYPE_INT, "maxts",
    450       1.8  rmind 		SYSCTL_DESCR("Maximal time quantum (in miliseconds)"),
    451       1.1  rmind 		sysctl_sched_maxts, 0, &max_ts, 0,
    452       1.1  rmind 		CTL_CREATE, CTL_EOL);
    453  1.20.6.2    mjf 	sysctl_createv(NULL, 0, &node, NULL,
    454       1.1  rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
    455       1.1  rmind 		CTLTYPE_INT, "mints",
    456       1.8  rmind 		SYSCTL_DESCR("Minimal time quantum (in miliseconds)"),
    457       1.1  rmind 		sysctl_sched_mints, 0, &min_ts, 0,
    458       1.1  rmind 		CTL_CREATE, CTL_EOL);
    459       1.1  rmind }
    460