Home | History | Annotate | Line # | Download | only in rumpkern
scheduler.c revision 1.17
      1  1.17  pooka /*      $NetBSD: scheduler.c,v 1.17 2010/07/11 16:20:39 pooka Exp $	*/
      2   1.1  pooka 
      3   1.1  pooka /*
      4  1.15  pooka  * Copyright (c) 2010 Antti Kantee.  All Rights Reserved.
      5   1.1  pooka  *
      6   1.1  pooka  * Redistribution and use in source and binary forms, with or without
      7   1.1  pooka  * modification, are permitted provided that the following conditions
      8   1.1  pooka  * are met:
      9   1.1  pooka  * 1. Redistributions of source code must retain the above copyright
     10   1.1  pooka  *    notice, this list of conditions and the following disclaimer.
     11   1.1  pooka  * 2. Redistributions in binary form must reproduce the above copyright
     12   1.1  pooka  *    notice, this list of conditions and the following disclaimer in the
     13   1.1  pooka  *    documentation and/or other materials provided with the distribution.
     14   1.1  pooka  *
     15   1.1  pooka  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     16   1.1  pooka  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     17   1.1  pooka  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     18   1.1  pooka  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     19   1.1  pooka  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     20   1.1  pooka  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     21   1.1  pooka  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     22   1.1  pooka  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     23   1.1  pooka  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     24   1.1  pooka  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     25   1.1  pooka  * SUCH DAMAGE.
     26   1.1  pooka  */
     27   1.1  pooka 
     28   1.1  pooka #include <sys/cdefs.h>
     29  1.17  pooka __KERNEL_RCSID(0, "$NetBSD: scheduler.c,v 1.17 2010/07/11 16:20:39 pooka Exp $");
     30   1.1  pooka 
     31   1.1  pooka #include <sys/param.h>
     32  1.16  pooka #include <sys/atomic.h>
     33   1.1  pooka #include <sys/cpu.h>
     34   1.2  pooka #include <sys/kmem.h>
     35   1.1  pooka #include <sys/mutex.h>
     36   1.8  pooka #include <sys/namei.h>
     37   1.1  pooka #include <sys/queue.h>
     38   1.1  pooka #include <sys/select.h>
     39  1.10  pooka #include <sys/systm.h>
     40   1.1  pooka 
     41   1.1  pooka #include <rump/rumpuser.h>
     42   1.1  pooka 
     43   1.1  pooka #include "rump_private.h"
     44   1.1  pooka 
     45   1.8  pooka static struct cpu_info rump_cpus[MAXCPUS];
     46   1.1  pooka static struct rumpcpu {
     47  1.15  pooka 	/* needed in fastpath */
     48   1.1  pooka 	struct cpu_info *rcpu_ci;
     49  1.15  pooka 	void *rcpu_prevlwp;
     50  1.15  pooka 
     51  1.15  pooka 	/* needed in slowpath */
     52  1.15  pooka 	struct rumpuser_mtx *rcpu_mtx;
     53   1.8  pooka 	struct rumpuser_cv *rcpu_cv;
     54  1.15  pooka 	int rcpu_wanted;
     55  1.15  pooka 
     56  1.15  pooka 	/* offset 20 (P=4) or 36 (P=8) here */
     57  1.15  pooka 
     58  1.15  pooka 	/*
     59  1.15  pooka 	 * Some stats.  Not really that necessary, but we should
     60  1.15  pooka 	 * have room.  Note that these overflow quite fast, so need
     61  1.15  pooka 	 * to be collected often.
     62  1.15  pooka 	 */
     63  1.15  pooka 	unsigned int rcpu_fastpath;
     64  1.15  pooka 	unsigned int rcpu_slowpath;
     65  1.15  pooka 	unsigned int rcpu_migrated;
     66  1.15  pooka 
     67  1.15  pooka 	/* offset 32 (P=4) or 50 (P=8) */
     68  1.15  pooka 
     69  1.15  pooka 	int rcpu_align[0] __aligned(CACHE_LINE_SIZE);
     70   1.8  pooka } rcpu_storage[MAXCPUS];
     71   1.1  pooka struct cpu_info *rump_cpu = &rump_cpus[0];
     72  1.12  pooka int ncpu;
     73   1.1  pooka 
     74  1.15  pooka #define RCPULWP_BUSY	((void *)-1)
     75  1.15  pooka #define RCPULWP_WANTED	((void *)-2)
     76   1.8  pooka 
     77  1.15  pooka static struct rumpuser_mtx *lwp0mtx;
     78  1.15  pooka static struct rumpuser_cv *lwp0cv;
     79  1.15  pooka static unsigned nextcpu;
     80  1.14  pooka 
     81  1.15  pooka static bool lwp0busy = false;
     82   1.3  pooka 
     83  1.15  pooka /*
     84  1.15  pooka  * Keep some stats.
     85  1.15  pooka  *
     86  1.15  pooka  * Keeping track of there is not really critical for speed, unless
     87  1.15  pooka  * stats happen to be on a different cache line (CACHE_LINE_SIZE is
     88  1.15  pooka  * really just a coarse estimate), so default for the performant case
     89  1.15  pooka  * (i.e. no stats).
     90  1.15  pooka  */
     91  1.15  pooka #ifdef RUMPSCHED_STATS
     92  1.15  pooka #define SCHED_FASTPATH(rcpu) rcpu->rcpu_fastpath++;
     93  1.15  pooka #define SCHED_SLOWPATH(rcpu) rcpu->rcpu_slowpath++;
     94  1.15  pooka #define SCHED_MIGRATED(rcpu) rcpu->rcpu_migrated++;
     95  1.15  pooka #else
     96  1.15  pooka #define SCHED_FASTPATH(rcpu)
     97  1.15  pooka #define SCHED_SLOWPATH(rcpu)
     98  1.15  pooka #define SCHED_MIGRATED(rcpu)
     99  1.15  pooka #endif
    100   1.1  pooka 
    101   1.1  pooka struct cpu_info *
    102   1.1  pooka cpu_lookup(u_int index)
    103   1.1  pooka {
    104   1.1  pooka 
    105   1.1  pooka 	return &rump_cpus[index];
    106   1.1  pooka }
    107   1.1  pooka 
    108  1.15  pooka static inline struct rumpcpu *
    109  1.15  pooka getnextcpu(void)
    110  1.15  pooka {
    111  1.15  pooka 	unsigned newcpu;
    112  1.15  pooka 
    113  1.15  pooka 	newcpu = atomic_inc_uint_nv(&nextcpu);
    114  1.15  pooka 	if (__predict_false(ncpu > UINT_MAX/2))
    115  1.15  pooka 		atomic_and_uint(&nextcpu, 0);
    116  1.15  pooka 	newcpu = newcpu % ncpu;
    117  1.15  pooka 
    118  1.15  pooka 	return &rcpu_storage[newcpu];
    119  1.15  pooka }
    120  1.15  pooka 
    121  1.12  pooka /* this could/should be mi_attach_cpu? */
    122  1.12  pooka void
    123  1.12  pooka rump_cpus_bootstrap(int num)
    124  1.12  pooka {
    125  1.12  pooka 	struct rumpcpu *rcpu;
    126  1.12  pooka 	struct cpu_info *ci;
    127  1.12  pooka 	int i;
    128  1.12  pooka 
    129  1.13  pooka 	if (num > MAXCPUS) {
    130  1.13  pooka 		aprint_verbose("CPU limit: %d wanted, %d (MAXCPUS) available\n",
    131  1.13  pooka 		    num, MAXCPUS);
    132  1.13  pooka 		num = MAXCPUS;
    133  1.13  pooka 	}
    134  1.13  pooka 
    135  1.12  pooka 	for (i = 0; i < num; i++) {
    136  1.12  pooka 		rcpu = &rcpu_storage[i];
    137  1.12  pooka 		ci = &rump_cpus[i];
    138  1.12  pooka 		ci->ci_index = i;
    139  1.12  pooka 		rump_cpu_attach(ci);
    140  1.12  pooka 		ncpu++;
    141  1.12  pooka 	}
    142  1.12  pooka }
    143  1.12  pooka 
    144   1.1  pooka void
    145   1.1  pooka rump_scheduler_init()
    146   1.1  pooka {
    147   1.1  pooka 	struct rumpcpu *rcpu;
    148   1.1  pooka 	struct cpu_info *ci;
    149   1.1  pooka 	int i;
    150   1.1  pooka 
    151  1.15  pooka 	rumpuser_mutex_init(&lwp0mtx);
    152   1.3  pooka 	rumpuser_cv_init(&lwp0cv);
    153   1.1  pooka 	for (i = 0; i < ncpu; i++) {
    154   1.1  pooka 		rcpu = &rcpu_storage[i];
    155   1.1  pooka 		ci = &rump_cpus[i];
    156  1.12  pooka 		rcpu->rcpu_ci = ci;
    157   1.4  pooka 		ci->ci_schedstate.spc_mutex =
    158   1.4  pooka 		    mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
    159   1.9  pooka 		ci->ci_schedstate.spc_flags = SPCF_RUNNING;
    160  1.15  pooka 		rcpu->rcpu_wanted = 0;
    161   1.8  pooka 		rumpuser_cv_init(&rcpu->rcpu_cv);
    162  1.15  pooka 		rumpuser_mutex_init(&rcpu->rcpu_mtx);
    163   1.1  pooka 	}
    164   1.1  pooka }
    165   1.1  pooka 
    166  1.14  pooka /*
    167  1.14  pooka  * condvar ops using scheduler lock as the rumpuser interlock.
    168  1.14  pooka  */
    169  1.14  pooka void
    170  1.14  pooka rump_schedlock_cv_wait(struct rumpuser_cv *cv)
    171  1.14  pooka {
    172  1.15  pooka 	struct lwp *l = curlwp;
    173  1.15  pooka 	struct rumpcpu *rcpu = &rcpu_storage[l->l_cpu-&rump_cpus[0]];
    174  1.14  pooka 
    175  1.15  pooka 	/* mutex will be taken and released in cpu schedule/unschedule */
    176  1.15  pooka 	rumpuser_cv_wait(cv, rcpu->rcpu_mtx);
    177  1.14  pooka }
    178  1.14  pooka 
    179  1.14  pooka int
    180  1.14  pooka rump_schedlock_cv_timedwait(struct rumpuser_cv *cv, const struct timespec *ts)
    181  1.14  pooka {
    182  1.15  pooka 	struct lwp *l = curlwp;
    183  1.15  pooka 	struct rumpcpu *rcpu = &rcpu_storage[l->l_cpu-&rump_cpus[0]];
    184  1.14  pooka 
    185  1.15  pooka 	/* mutex will be taken and released in cpu schedule/unschedule */
    186  1.15  pooka 	return rumpuser_cv_timedwait(cv, rcpu->rcpu_mtx,
    187  1.15  pooka 	    ts->tv_sec, ts->tv_nsec);
    188  1.14  pooka }
    189  1.14  pooka 
    190   1.1  pooka void
    191   1.1  pooka rump_schedule()
    192   1.1  pooka {
    193   1.3  pooka 	struct lwp *l;
    194   1.2  pooka 
    195   1.2  pooka 	/*
    196   1.2  pooka 	 * If there is no dedicated lwp, allocate a temp one and
    197   1.3  pooka 	 * set it to be free'd upon unschedule().  Use lwp0 context
    198  1.15  pooka 	 * for reserving the necessary resources.  Don't optimize
    199  1.15  pooka 	 * for this case -- anyone who cares about performance will
    200  1.15  pooka 	 * start a real thread.
    201   1.2  pooka 	 */
    202   1.3  pooka 	l = rumpuser_get_curlwp();
    203   1.2  pooka 	if (l == NULL) {
    204   1.3  pooka 		/* busy lwp0 */
    205  1.15  pooka 		rumpuser_mutex_enter_nowrap(lwp0mtx);
    206   1.3  pooka 		while (lwp0busy)
    207  1.15  pooka 			rumpuser_cv_wait_nowrap(lwp0cv, lwp0mtx);
    208   1.3  pooka 		lwp0busy = true;
    209  1.15  pooka 		rumpuser_mutex_exit(lwp0mtx);
    210   1.3  pooka 
    211   1.3  pooka 		/* schedule cpu and use lwp0 */
    212   1.4  pooka 		rump_schedule_cpu(&lwp0);
    213   1.3  pooka 		rumpuser_set_curlwp(&lwp0);
    214   1.2  pooka 		l = rump_lwp_alloc(0, rump_nextlid());
    215   1.3  pooka 
    216   1.3  pooka 		/* release lwp0 */
    217   1.3  pooka 		rump_lwp_switch(l);
    218  1.15  pooka 		rumpuser_mutex_enter_nowrap(lwp0mtx);
    219   1.3  pooka 		lwp0busy = false;
    220   1.3  pooka 		rumpuser_cv_signal(lwp0cv);
    221  1.15  pooka 		rumpuser_mutex_exit(lwp0mtx);
    222   1.3  pooka 
    223   1.3  pooka 		/* mark new lwp as dead-on-exit */
    224   1.2  pooka 		rump_lwp_release(l);
    225   1.3  pooka 	} else {
    226   1.4  pooka 		rump_schedule_cpu(l);
    227   1.2  pooka 	}
    228   1.2  pooka }
    229   1.2  pooka 
    230   1.4  pooka void
    231   1.4  pooka rump_schedule_cpu(struct lwp *l)
    232   1.2  pooka {
    233  1.14  pooka 
    234  1.14  pooka 	rump_schedule_cpu_interlock(l, NULL);
    235  1.14  pooka }
    236  1.14  pooka 
    237  1.15  pooka /*
    238  1.15  pooka  * Schedule a CPU.  This optimizes for the case where we schedule
    239  1.15  pooka  * the same thread often, and we have nCPU >= nFrequently-Running-Thread
    240  1.15  pooka  * (where CPU is virtual rump cpu, not host CPU).
    241  1.15  pooka  */
    242  1.14  pooka void
    243  1.14  pooka rump_schedule_cpu_interlock(struct lwp *l, void *interlock)
    244  1.14  pooka {
    245   1.1  pooka 	struct rumpcpu *rcpu;
    246  1.15  pooka 	void *old;
    247  1.15  pooka 	bool domigrate;
    248  1.15  pooka 	bool bound = l->l_pflag & LP_BOUND;
    249  1.15  pooka 
    250  1.15  pooka 	/*
    251  1.15  pooka 	 * First, try fastpath: if we were the previous user of the
    252  1.15  pooka 	 * CPU, everything is in order cachewise and we can just
    253  1.15  pooka 	 * proceed to use it.
    254  1.15  pooka 	 *
    255  1.15  pooka 	 * If we are a different thread (i.e. CAS fails), we must go
    256  1.15  pooka 	 * through a memory barrier to ensure we get a truthful
    257  1.15  pooka 	 * view of the world.
    258  1.15  pooka 	 */
    259  1.14  pooka 
    260  1.17  pooka 	KASSERT(l->l_target_cpu != NULL);
    261  1.15  pooka 	rcpu = &rcpu_storage[l->l_target_cpu-&rump_cpus[0]];
    262  1.15  pooka 	if (atomic_cas_ptr(&rcpu->rcpu_prevlwp, l, RCPULWP_BUSY) == l) {
    263  1.15  pooka 		if (__predict_true(interlock == rcpu->rcpu_mtx))
    264  1.15  pooka 			rumpuser_mutex_exit(rcpu->rcpu_mtx);
    265  1.15  pooka 		SCHED_FASTPATH(rcpu);
    266  1.15  pooka 		/* jones, you're the man */
    267  1.15  pooka 		goto fastlane;
    268  1.15  pooka 	}
    269   1.1  pooka 
    270  1.15  pooka 	/*
    271  1.15  pooka 	 * Else, it's the slowpath for us.  First, determine if we
    272  1.15  pooka 	 * can migrate.
    273  1.15  pooka 	 */
    274  1.15  pooka 	if (ncpu == 1)
    275  1.15  pooka 		domigrate = false;
    276  1.15  pooka 	else
    277  1.15  pooka 		domigrate = true;
    278  1.15  pooka 
    279  1.15  pooka 	/* Take lock.  This acts as a load barrier too. */
    280  1.15  pooka 	if (__predict_true(interlock != rcpu->rcpu_mtx))
    281  1.15  pooka 		rumpuser_mutex_enter_nowrap(rcpu->rcpu_mtx);
    282  1.15  pooka 
    283  1.15  pooka 	for (;;) {
    284  1.15  pooka 		SCHED_SLOWPATH(rcpu);
    285  1.15  pooka 		old = atomic_swap_ptr(&rcpu->rcpu_prevlwp, RCPULWP_WANTED);
    286  1.15  pooka 
    287  1.15  pooka 		/* CPU is free? */
    288  1.15  pooka 		if (old != RCPULWP_BUSY && old != RCPULWP_WANTED) {
    289  1.15  pooka 			if (atomic_cas_ptr(&rcpu->rcpu_prevlwp,
    290  1.15  pooka 			    RCPULWP_WANTED, RCPULWP_BUSY) == RCPULWP_WANTED) {
    291  1.15  pooka 				break;
    292   1.8  pooka 			}
    293   1.8  pooka 		}
    294  1.15  pooka 
    295  1.15  pooka 		/*
    296  1.15  pooka 		 * Do we want to migrate once?
    297  1.15  pooka 		 * This may need a slightly better algorithm, or we
    298  1.15  pooka 		 * might cache pingpong eternally for non-frequent
    299  1.15  pooka 		 * threads.
    300  1.15  pooka 		 */
    301  1.15  pooka 		if (domigrate && !bound) {
    302  1.15  pooka 			domigrate = false;
    303  1.15  pooka 			SCHED_MIGRATED(rcpu);
    304  1.15  pooka 			rumpuser_mutex_exit(rcpu->rcpu_mtx);
    305  1.15  pooka 			rcpu = getnextcpu();
    306  1.15  pooka 			rumpuser_mutex_enter_nowrap(rcpu->rcpu_mtx);
    307  1.15  pooka 			continue;
    308   1.8  pooka 		}
    309  1.15  pooka 
    310  1.15  pooka 		/* Want CPU, wait until it's released an retry */
    311  1.15  pooka 		rcpu->rcpu_wanted++;
    312  1.15  pooka 		rumpuser_cv_wait_nowrap(rcpu->rcpu_cv, rcpu->rcpu_mtx);
    313  1.15  pooka 		rcpu->rcpu_wanted--;
    314   1.8  pooka 	}
    315  1.15  pooka 	rumpuser_mutex_exit(rcpu->rcpu_mtx);
    316  1.15  pooka 
    317  1.15  pooka  fastlane:
    318  1.15  pooka 	l->l_cpu = l->l_target_cpu = rcpu->rcpu_ci;
    319   1.4  pooka 	l->l_mutex = rcpu->rcpu_ci->ci_schedstate.spc_mutex;
    320   1.1  pooka }
    321   1.1  pooka 
    322   1.1  pooka void
    323   1.1  pooka rump_unschedule()
    324   1.1  pooka {
    325   1.2  pooka 	struct lwp *l;
    326   1.2  pooka 
    327   1.2  pooka 	l = rumpuser_get_curlwp();
    328   1.4  pooka 	KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_mutex);
    329   1.2  pooka 	rump_unschedule_cpu(l);
    330   1.4  pooka 	l->l_mutex = NULL;
    331   1.6  pooka 
    332   1.6  pooka 	/*
    333   1.6  pooka 	 * If we're using a temp lwp, need to take lwp0 for rump_lwp_free().
    334   1.6  pooka 	 * (we could maybe cache idle lwp's to avoid constant bouncing)
    335   1.6  pooka 	 */
    336   1.2  pooka 	if (l->l_flag & LW_WEXIT) {
    337   1.2  pooka 		rumpuser_set_curlwp(NULL);
    338   1.6  pooka 
    339   1.6  pooka 		/* busy lwp0 */
    340  1.15  pooka 		rumpuser_mutex_enter_nowrap(lwp0mtx);
    341   1.6  pooka 		while (lwp0busy)
    342  1.15  pooka 			rumpuser_cv_wait_nowrap(lwp0cv, lwp0mtx);
    343   1.6  pooka 		lwp0busy = true;
    344  1.15  pooka 		rumpuser_mutex_exit(lwp0mtx);
    345   1.6  pooka 
    346   1.6  pooka 		rump_schedule_cpu(&lwp0);
    347   1.6  pooka 		rumpuser_set_curlwp(&lwp0);
    348   1.6  pooka 		rump_lwp_free(l);
    349   1.6  pooka 		rump_unschedule_cpu(&lwp0);
    350   1.6  pooka 		rumpuser_set_curlwp(NULL);
    351   1.6  pooka 
    352  1.15  pooka 		rumpuser_mutex_enter_nowrap(lwp0mtx);
    353   1.6  pooka 		lwp0busy = false;
    354   1.6  pooka 		rumpuser_cv_signal(lwp0cv);
    355  1.15  pooka 		rumpuser_mutex_exit(lwp0mtx);
    356   1.2  pooka 	}
    357   1.2  pooka }
    358   1.2  pooka 
    359   1.2  pooka void
    360   1.2  pooka rump_unschedule_cpu(struct lwp *l)
    361   1.2  pooka {
    362   1.8  pooka 
    363  1.14  pooka 	rump_unschedule_cpu_interlock(l, NULL);
    364  1.14  pooka }
    365  1.14  pooka 
    366  1.14  pooka void
    367  1.14  pooka rump_unschedule_cpu_interlock(struct lwp *l, void *interlock)
    368  1.14  pooka {
    369  1.14  pooka 
    370   1.8  pooka 	if ((l->l_pflag & LP_INTR) == 0)
    371   1.8  pooka 		rump_softint_run(l->l_cpu);
    372  1.14  pooka 	rump_unschedule_cpu1(l, interlock);
    373   1.8  pooka }
    374   1.8  pooka 
    375   1.8  pooka void
    376  1.14  pooka rump_unschedule_cpu1(struct lwp *l, void *interlock)
    377   1.8  pooka {
    378   1.1  pooka 	struct rumpcpu *rcpu;
    379   1.1  pooka 	struct cpu_info *ci;
    380  1.15  pooka 	void *old;
    381   1.1  pooka 
    382   1.1  pooka 	ci = l->l_cpu;
    383  1.15  pooka 	l->l_cpu = NULL;
    384   1.1  pooka 	rcpu = &rcpu_storage[ci-&rump_cpus[0]];
    385  1.15  pooka 
    386   1.1  pooka 	KASSERT(rcpu->rcpu_ci == ci);
    387   1.1  pooka 
    388  1.15  pooka 	/*
    389  1.15  pooka 	 * Make sure all stores are seen before the CPU release.  This
    390  1.15  pooka 	 * is relevant only in the non-fastpath scheduling case, but
    391  1.15  pooka 	 * we don't know here if that's going to happen, so need to
    392  1.15  pooka 	 * expect the worst.
    393  1.15  pooka 	 */
    394  1.15  pooka 	membar_exit();
    395  1.15  pooka 
    396  1.15  pooka 	/* Release the CPU. */
    397  1.15  pooka 	old = atomic_swap_ptr(&rcpu->rcpu_prevlwp, l);
    398  1.15  pooka 
    399  1.15  pooka 	/* No waiters?  No problems.  We're outta here. */
    400  1.15  pooka 	if (old == RCPULWP_BUSY) {
    401  1.15  pooka 		/* Was the scheduler interlock requested? */
    402  1.15  pooka 		if (__predict_false(interlock == rcpu->rcpu_mtx))
    403  1.15  pooka 			rumpuser_mutex_enter_nowrap(rcpu->rcpu_mtx);
    404  1.15  pooka 		return;
    405  1.15  pooka 	}
    406  1.15  pooka 
    407  1.15  pooka 	KASSERT(old == RCPULWP_WANTED);
    408  1.15  pooka 
    409  1.15  pooka 	/*
    410  1.15  pooka 	 * Ok, things weren't so snappy.
    411  1.15  pooka 	 *
    412  1.15  pooka 	 * Snailpath: take lock and signal anyone waiting for this CPU.
    413  1.15  pooka 	 */
    414  1.14  pooka 
    415  1.15  pooka 	rumpuser_mutex_enter_nowrap(rcpu->rcpu_mtx);
    416  1.15  pooka 	if (rcpu->rcpu_wanted)
    417   1.8  pooka 		rumpuser_cv_broadcast(rcpu->rcpu_cv);
    418  1.14  pooka 
    419  1.15  pooka 	if (__predict_true(interlock != rcpu->rcpu_mtx))
    420  1.15  pooka 		rumpuser_mutex_exit(rcpu->rcpu_mtx);
    421   1.1  pooka }
    422   1.5  pooka 
    423   1.5  pooka /* Give up and retake CPU (perhaps a different one) */
    424   1.5  pooka void
    425   1.5  pooka yield()
    426   1.5  pooka {
    427   1.5  pooka 	struct lwp *l = curlwp;
    428   1.5  pooka 	int nlocks;
    429   1.5  pooka 
    430   1.5  pooka 	KERNEL_UNLOCK_ALL(l, &nlocks);
    431   1.5  pooka 	rump_unschedule_cpu(l);
    432   1.5  pooka 	rump_schedule_cpu(l);
    433   1.5  pooka 	KERNEL_LOCK(nlocks, l);
    434   1.5  pooka }
    435   1.5  pooka 
    436   1.5  pooka void
    437   1.5  pooka preempt()
    438   1.5  pooka {
    439   1.5  pooka 
    440   1.5  pooka 	yield();
    441   1.5  pooka }
    442  1.10  pooka 
    443  1.10  pooka bool
    444  1.10  pooka kpreempt(uintptr_t where)
    445  1.10  pooka {
    446  1.10  pooka 
    447  1.10  pooka 	return false;
    448  1.10  pooka }
    449  1.10  pooka 
    450  1.10  pooka /*
    451  1.10  pooka  * There is no kernel thread preemption in rump currently.  But call
    452  1.10  pooka  * the implementing macros anyway in case they grow some side-effects
    453  1.10  pooka  * down the road.
    454  1.10  pooka  */
    455  1.10  pooka void
    456  1.10  pooka kpreempt_disable(void)
    457  1.10  pooka {
    458  1.10  pooka 
    459  1.10  pooka 	KPREEMPT_DISABLE(curlwp);
    460  1.10  pooka }
    461  1.10  pooka 
    462  1.10  pooka void
    463  1.10  pooka kpreempt_enable(void)
    464  1.10  pooka {
    465  1.10  pooka 
    466  1.10  pooka 	KPREEMPT_ENABLE(curlwp);
    467  1.10  pooka }
    468  1.10  pooka 
    469  1.10  pooka void
    470  1.10  pooka suspendsched(void)
    471  1.10  pooka {
    472  1.10  pooka 
    473  1.10  pooka 	/*
    474  1.10  pooka 	 * Could wait until everyone is out and block further entries,
    475  1.10  pooka 	 * but skip that for now.
    476  1.10  pooka 	 */
    477  1.10  pooka }
    478  1.11  pooka 
    479  1.11  pooka void
    480  1.11  pooka sched_nice(struct proc *p, int level)
    481  1.11  pooka {
    482  1.11  pooka 
    483  1.11  pooka 	/* nothing to do for now */
    484  1.11  pooka }
    485