scheduler.c revision 1.55 1 1.55 ad /* $NetBSD: scheduler.c,v 1.55 2023/10/05 19:41:07 ad Exp $ */
2 1.1 pooka
3 1.1 pooka /*
4 1.26 pooka * Copyright (c) 2010, 2011 Antti Kantee. All Rights Reserved.
5 1.1 pooka *
6 1.1 pooka * Redistribution and use in source and binary forms, with or without
7 1.1 pooka * modification, are permitted provided that the following conditions
8 1.1 pooka * are met:
9 1.1 pooka * 1. Redistributions of source code must retain the above copyright
10 1.1 pooka * notice, this list of conditions and the following disclaimer.
11 1.1 pooka * 2. Redistributions in binary form must reproduce the above copyright
12 1.1 pooka * notice, this list of conditions and the following disclaimer in the
13 1.1 pooka * documentation and/or other materials provided with the distribution.
14 1.1 pooka *
15 1.1 pooka * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 1.1 pooka * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 1.1 pooka * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 1.1 pooka * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 1.1 pooka * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 1.1 pooka * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 1.1 pooka * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 1.1 pooka * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 1.1 pooka * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 1.1 pooka * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 1.1 pooka * SUCH DAMAGE.
26 1.1 pooka */
27 1.1 pooka
28 1.1 pooka #include <sys/cdefs.h>
29 1.55 ad __KERNEL_RCSID(0, "$NetBSD: scheduler.c,v 1.55 2023/10/05 19:41:07 ad Exp $");
30 1.1 pooka
31 1.1 pooka #include <sys/param.h>
32 1.16 pooka #include <sys/atomic.h>
33 1.1 pooka #include <sys/cpu.h>
34 1.2 pooka #include <sys/kmem.h>
35 1.1 pooka #include <sys/mutex.h>
36 1.8 pooka #include <sys/namei.h>
37 1.1 pooka #include <sys/queue.h>
38 1.1 pooka #include <sys/select.h>
39 1.10 pooka #include <sys/systm.h>
40 1.1 pooka
41 1.42 pooka #include <rump-sys/kern.h>
42 1.42 pooka
43 1.1 pooka #include <rump/rumpuser.h>
44 1.1 pooka
45 1.1 pooka static struct rumpcpu {
46 1.15 pooka /* needed in fastpath */
47 1.1 pooka struct cpu_info *rcpu_ci;
48 1.15 pooka void *rcpu_prevlwp;
49 1.15 pooka
50 1.15 pooka /* needed in slowpath */
51 1.15 pooka struct rumpuser_mtx *rcpu_mtx;
52 1.8 pooka struct rumpuser_cv *rcpu_cv;
53 1.15 pooka int rcpu_wanted;
54 1.15 pooka
55 1.15 pooka /* offset 20 (P=4) or 36 (P=8) here */
56 1.15 pooka
57 1.15 pooka /*
58 1.15 pooka * Some stats. Not really that necessary, but we should
59 1.15 pooka * have room. Note that these overflow quite fast, so need
60 1.15 pooka * to be collected often.
61 1.15 pooka */
62 1.15 pooka unsigned int rcpu_fastpath;
63 1.15 pooka unsigned int rcpu_slowpath;
64 1.15 pooka unsigned int rcpu_migrated;
65 1.15 pooka
66 1.15 pooka /* offset 32 (P=4) or 50 (P=8) */
67 1.15 pooka
68 1.15 pooka int rcpu_align[0] __aligned(CACHE_LINE_SIZE);
69 1.8 pooka } rcpu_storage[MAXCPUS];
70 1.28 rmind
71 1.43 pooka static inline struct rumpcpu *
72 1.43 pooka cpuinfo_to_rumpcpu(struct cpu_info *ci)
73 1.43 pooka {
74 1.43 pooka
75 1.43 pooka return &rcpu_storage[cpu_index(ci)];
76 1.43 pooka }
77 1.43 pooka
78 1.43 pooka struct cpu_info rump_bootcpu;
79 1.44 pooka
80 1.15 pooka #define RCPULWP_BUSY ((void *)-1)
81 1.15 pooka #define RCPULWP_WANTED ((void *)-2)
82 1.8 pooka
83 1.15 pooka static struct rumpuser_mtx *lwp0mtx;
84 1.15 pooka static struct rumpuser_cv *lwp0cv;
85 1.15 pooka static unsigned nextcpu;
86 1.14 pooka
87 1.25 pooka kmutex_t unruntime_lock; /* unruntime lwp lock. practically unused */
88 1.25 pooka
89 1.19 pooka static bool lwp0isbusy = false;
90 1.3 pooka
91 1.15 pooka /*
92 1.15 pooka * Keep some stats.
93 1.15 pooka *
94 1.15 pooka * Keeping track of there is not really critical for speed, unless
95 1.15 pooka * stats happen to be on a different cache line (CACHE_LINE_SIZE is
96 1.15 pooka * really just a coarse estimate), so default for the performant case
97 1.15 pooka * (i.e. no stats).
98 1.15 pooka */
99 1.15 pooka #ifdef RUMPSCHED_STATS
100 1.15 pooka #define SCHED_FASTPATH(rcpu) rcpu->rcpu_fastpath++;
101 1.15 pooka #define SCHED_SLOWPATH(rcpu) rcpu->rcpu_slowpath++;
102 1.15 pooka #define SCHED_MIGRATED(rcpu) rcpu->rcpu_migrated++;
103 1.15 pooka #else
104 1.15 pooka #define SCHED_FASTPATH(rcpu)
105 1.15 pooka #define SCHED_SLOWPATH(rcpu)
106 1.15 pooka #define SCHED_MIGRATED(rcpu)
107 1.15 pooka #endif
108 1.1 pooka
109 1.1 pooka struct cpu_info *
110 1.1 pooka cpu_lookup(u_int index)
111 1.1 pooka {
112 1.1 pooka
113 1.43 pooka return rcpu_storage[index].rcpu_ci;
114 1.1 pooka }
115 1.1 pooka
116 1.15 pooka static inline struct rumpcpu *
117 1.15 pooka getnextcpu(void)
118 1.15 pooka {
119 1.15 pooka unsigned newcpu;
120 1.15 pooka
121 1.15 pooka newcpu = atomic_inc_uint_nv(&nextcpu);
122 1.15 pooka if (__predict_false(ncpu > UINT_MAX/2))
123 1.15 pooka atomic_and_uint(&nextcpu, 0);
124 1.15 pooka newcpu = newcpu % ncpu;
125 1.15 pooka
126 1.15 pooka return &rcpu_storage[newcpu];
127 1.15 pooka }
128 1.15 pooka
129 1.12 pooka /* this could/should be mi_attach_cpu? */
130 1.12 pooka void
131 1.22 pooka rump_cpus_bootstrap(int *nump)
132 1.12 pooka {
133 1.22 pooka int num = *nump;
134 1.12 pooka
135 1.13 pooka if (num > MAXCPUS) {
136 1.22 pooka aprint_verbose("CPU limit: %d wanted, %d (MAXCPUS) "
137 1.22 pooka "available (adjusted)\n", num, MAXCPUS);
138 1.13 pooka num = MAXCPUS;
139 1.13 pooka }
140 1.13 pooka
141 1.48 ad cpu_setmodel("rumpcore (virtual)");
142 1.44 pooka
143 1.48 ad mi_cpu_init();
144 1.28 rmind
145 1.20 pooka /* attach first cpu for bootstrap */
146 1.43 pooka rump_cpu_attach(&rump_bootcpu);
147 1.20 pooka ncpu = 1;
148 1.22 pooka *nump = num;
149 1.12 pooka }
150 1.12 pooka
151 1.1 pooka void
152 1.20 pooka rump_scheduler_init(int numcpu)
153 1.1 pooka {
154 1.1 pooka struct rumpcpu *rcpu;
155 1.1 pooka struct cpu_info *ci;
156 1.1 pooka int i;
157 1.1 pooka
158 1.31 pooka rumpuser_mutex_init(&lwp0mtx, RUMPUSER_MTX_SPIN);
159 1.3 pooka rumpuser_cv_init(&lwp0cv);
160 1.20 pooka for (i = 0; i < numcpu; i++) {
161 1.43 pooka if (i == 0) {
162 1.43 pooka ci = &rump_bootcpu;
163 1.43 pooka } else {
164 1.43 pooka ci = kmem_zalloc(sizeof(*ci), KM_SLEEP);
165 1.43 pooka ci->ci_index = i;
166 1.43 pooka }
167 1.43 pooka
168 1.1 pooka rcpu = &rcpu_storage[i];
169 1.12 pooka rcpu->rcpu_ci = ci;
170 1.43 pooka rcpu->rcpu_wanted = 0;
171 1.43 pooka rumpuser_cv_init(&rcpu->rcpu_cv);
172 1.43 pooka rumpuser_mutex_init(&rcpu->rcpu_mtx, RUMPUSER_MTX_SPIN);
173 1.43 pooka
174 1.4 pooka ci->ci_schedstate.spc_mutex =
175 1.32 pooka mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
176 1.9 pooka ci->ci_schedstate.spc_flags = SPCF_RUNNING;
177 1.1 pooka }
178 1.25 pooka
179 1.32 pooka mutex_init(&unruntime_lock, MUTEX_DEFAULT, IPL_SCHED);
180 1.1 pooka }
181 1.1 pooka
182 1.52 christos void
183 1.52 christos rump_schedlock_cv_signal(struct cpu_info *ci, struct rumpuser_cv *cv)
184 1.52 christos {
185 1.52 christos struct rumpcpu *rcpu = cpuinfo_to_rumpcpu(ci);
186 1.52 christos
187 1.52 christos rumpuser_mutex_enter_nowrap(rcpu->rcpu_mtx);
188 1.52 christos rumpuser_cv_signal(cv);
189 1.52 christos rumpuser_mutex_exit(rcpu->rcpu_mtx);
190 1.52 christos }
191 1.52 christos
192 1.14 pooka /*
193 1.14 pooka * condvar ops using scheduler lock as the rumpuser interlock.
194 1.14 pooka */
195 1.14 pooka void
196 1.14 pooka rump_schedlock_cv_wait(struct rumpuser_cv *cv)
197 1.14 pooka {
198 1.15 pooka struct lwp *l = curlwp;
199 1.43 pooka struct rumpcpu *rcpu = cpuinfo_to_rumpcpu(l->l_cpu);
200 1.14 pooka
201 1.15 pooka /* mutex will be taken and released in cpu schedule/unschedule */
202 1.15 pooka rumpuser_cv_wait(cv, rcpu->rcpu_mtx);
203 1.14 pooka }
204 1.14 pooka
205 1.14 pooka int
206 1.14 pooka rump_schedlock_cv_timedwait(struct rumpuser_cv *cv, const struct timespec *ts)
207 1.14 pooka {
208 1.15 pooka struct lwp *l = curlwp;
209 1.43 pooka struct rumpcpu *rcpu = cpuinfo_to_rumpcpu(l->l_cpu);
210 1.14 pooka
211 1.15 pooka /* mutex will be taken and released in cpu schedule/unschedule */
212 1.15 pooka return rumpuser_cv_timedwait(cv, rcpu->rcpu_mtx,
213 1.15 pooka ts->tv_sec, ts->tv_nsec);
214 1.14 pooka }
215 1.14 pooka
216 1.19 pooka static void
217 1.19 pooka lwp0busy(void)
218 1.19 pooka {
219 1.19 pooka
220 1.19 pooka /* busy lwp0 */
221 1.25 pooka KASSERT(curlwp == NULL || curlwp->l_stat != LSONPROC);
222 1.19 pooka rumpuser_mutex_enter_nowrap(lwp0mtx);
223 1.19 pooka while (lwp0isbusy)
224 1.19 pooka rumpuser_cv_wait_nowrap(lwp0cv, lwp0mtx);
225 1.19 pooka lwp0isbusy = true;
226 1.19 pooka rumpuser_mutex_exit(lwp0mtx);
227 1.19 pooka }
228 1.19 pooka
229 1.19 pooka static void
230 1.19 pooka lwp0rele(void)
231 1.19 pooka {
232 1.19 pooka
233 1.19 pooka rumpuser_mutex_enter_nowrap(lwp0mtx);
234 1.19 pooka KASSERT(lwp0isbusy == true);
235 1.19 pooka lwp0isbusy = false;
236 1.19 pooka rumpuser_cv_signal(lwp0cv);
237 1.19 pooka rumpuser_mutex_exit(lwp0mtx);
238 1.19 pooka }
239 1.19 pooka
240 1.27 yamt /*
241 1.27 yamt * rump_schedule: ensure that the calling host thread has a valid lwp context.
242 1.33 pooka * ie. ensure that curlwp != NULL. Also, ensure that there
243 1.33 pooka * a 1:1 mapping between the lwp and rump kernel cpu.
244 1.27 yamt */
245 1.1 pooka void
246 1.1 pooka rump_schedule()
247 1.1 pooka {
248 1.3 pooka struct lwp *l;
249 1.2 pooka
250 1.2 pooka /*
251 1.2 pooka * If there is no dedicated lwp, allocate a temp one and
252 1.3 pooka * set it to be free'd upon unschedule(). Use lwp0 context
253 1.15 pooka * for reserving the necessary resources. Don't optimize
254 1.15 pooka * for this case -- anyone who cares about performance will
255 1.15 pooka * start a real thread.
256 1.2 pooka */
257 1.36 pooka if (__predict_true((l = curlwp) != NULL)) {
258 1.55 ad struct proc *p = l->l_proc;
259 1.19 pooka rump_schedule_cpu(l);
260 1.55 ad if (l->l_cred != p->p_cred) {
261 1.55 ad kauth_cred_t oc = l->l_cred;
262 1.55 ad mutex_enter(p->p_lock);
263 1.55 ad l->l_cred = kauth_cred_hold(p->p_cred);
264 1.55 ad mutex_exit(p->p_lock);
265 1.55 ad kauth_cred_free(oc);
266 1.55 ad }
267 1.19 pooka } else {
268 1.19 pooka lwp0busy();
269 1.3 pooka
270 1.3 pooka /* schedule cpu and use lwp0 */
271 1.4 pooka rump_schedule_cpu(&lwp0);
272 1.36 pooka rump_lwproc_curlwp_set(&lwp0);
273 1.3 pooka
274 1.19 pooka /* allocate thread, switch to it, and release lwp0 */
275 1.21 pooka l = rump__lwproc_alloclwp(initproc);
276 1.19 pooka rump_lwproc_switch(l);
277 1.19 pooka lwp0rele();
278 1.3 pooka
279 1.19 pooka /*
280 1.19 pooka * mark new thread dead-on-unschedule. this
281 1.19 pooka * means that we'll be running with l_refcnt == 0.
282 1.19 pooka * relax, it's fine.
283 1.19 pooka */
284 1.19 pooka rump_lwproc_releaselwp();
285 1.2 pooka }
286 1.2 pooka }
287 1.2 pooka
288 1.4 pooka void
289 1.4 pooka rump_schedule_cpu(struct lwp *l)
290 1.2 pooka {
291 1.14 pooka
292 1.14 pooka rump_schedule_cpu_interlock(l, NULL);
293 1.14 pooka }
294 1.14 pooka
295 1.15 pooka /*
296 1.15 pooka * Schedule a CPU. This optimizes for the case where we schedule
297 1.15 pooka * the same thread often, and we have nCPU >= nFrequently-Running-Thread
298 1.15 pooka * (where CPU is virtual rump cpu, not host CPU).
299 1.15 pooka */
300 1.14 pooka void
301 1.14 pooka rump_schedule_cpu_interlock(struct lwp *l, void *interlock)
302 1.14 pooka {
303 1.1 pooka struct rumpcpu *rcpu;
304 1.40 pooka struct cpu_info *ci;
305 1.15 pooka void *old;
306 1.15 pooka bool domigrate;
307 1.15 pooka bool bound = l->l_pflag & LP_BOUND;
308 1.15 pooka
309 1.25 pooka l->l_stat = LSRUN;
310 1.25 pooka
311 1.15 pooka /*
312 1.15 pooka * First, try fastpath: if we were the previous user of the
313 1.15 pooka * CPU, everything is in order cachewise and we can just
314 1.15 pooka * proceed to use it.
315 1.15 pooka *
316 1.15 pooka * If we are a different thread (i.e. CAS fails), we must go
317 1.15 pooka * through a memory barrier to ensure we get a truthful
318 1.15 pooka * view of the world.
319 1.15 pooka */
320 1.14 pooka
321 1.17 pooka KASSERT(l->l_target_cpu != NULL);
322 1.43 pooka rcpu = cpuinfo_to_rumpcpu(l->l_target_cpu);
323 1.15 pooka if (atomic_cas_ptr(&rcpu->rcpu_prevlwp, l, RCPULWP_BUSY) == l) {
324 1.29 pooka if (interlock == rcpu->rcpu_mtx)
325 1.15 pooka rumpuser_mutex_exit(rcpu->rcpu_mtx);
326 1.15 pooka SCHED_FASTPATH(rcpu);
327 1.15 pooka /* jones, you're the man */
328 1.15 pooka goto fastlane;
329 1.15 pooka }
330 1.1 pooka
331 1.15 pooka /*
332 1.15 pooka * Else, it's the slowpath for us. First, determine if we
333 1.15 pooka * can migrate.
334 1.15 pooka */
335 1.15 pooka if (ncpu == 1)
336 1.15 pooka domigrate = false;
337 1.15 pooka else
338 1.15 pooka domigrate = true;
339 1.15 pooka
340 1.15 pooka /* Take lock. This acts as a load barrier too. */
341 1.29 pooka if (interlock != rcpu->rcpu_mtx)
342 1.15 pooka rumpuser_mutex_enter_nowrap(rcpu->rcpu_mtx);
343 1.15 pooka
344 1.15 pooka for (;;) {
345 1.15 pooka SCHED_SLOWPATH(rcpu);
346 1.15 pooka old = atomic_swap_ptr(&rcpu->rcpu_prevlwp, RCPULWP_WANTED);
347 1.15 pooka
348 1.15 pooka /* CPU is free? */
349 1.15 pooka if (old != RCPULWP_BUSY && old != RCPULWP_WANTED) {
350 1.15 pooka if (atomic_cas_ptr(&rcpu->rcpu_prevlwp,
351 1.15 pooka RCPULWP_WANTED, RCPULWP_BUSY) == RCPULWP_WANTED) {
352 1.15 pooka break;
353 1.8 pooka }
354 1.8 pooka }
355 1.15 pooka
356 1.15 pooka /*
357 1.15 pooka * Do we want to migrate once?
358 1.15 pooka * This may need a slightly better algorithm, or we
359 1.15 pooka * might cache pingpong eternally for non-frequent
360 1.15 pooka * threads.
361 1.15 pooka */
362 1.15 pooka if (domigrate && !bound) {
363 1.15 pooka domigrate = false;
364 1.15 pooka SCHED_MIGRATED(rcpu);
365 1.15 pooka rumpuser_mutex_exit(rcpu->rcpu_mtx);
366 1.15 pooka rcpu = getnextcpu();
367 1.15 pooka rumpuser_mutex_enter_nowrap(rcpu->rcpu_mtx);
368 1.15 pooka continue;
369 1.8 pooka }
370 1.15 pooka
371 1.15 pooka /* Want CPU, wait until it's released an retry */
372 1.15 pooka rcpu->rcpu_wanted++;
373 1.15 pooka rumpuser_cv_wait_nowrap(rcpu->rcpu_cv, rcpu->rcpu_mtx);
374 1.15 pooka rcpu->rcpu_wanted--;
375 1.8 pooka }
376 1.15 pooka rumpuser_mutex_exit(rcpu->rcpu_mtx);
377 1.15 pooka
378 1.15 pooka fastlane:
379 1.40 pooka ci = rcpu->rcpu_ci;
380 1.40 pooka l->l_cpu = l->l_target_cpu = ci;
381 1.4 pooka l->l_mutex = rcpu->rcpu_ci->ci_schedstate.spc_mutex;
382 1.54 ad l->l_ru.ru_nvcsw++;
383 1.25 pooka l->l_stat = LSONPROC;
384 1.23 pooka
385 1.40 pooka /*
386 1.40 pooka * No interrupts, so ci_curlwp === cpu_onproc.
387 1.40 pooka * Okay, we could make an attempt to not set cpu_onproc
388 1.40 pooka * in the case that an interrupt is scheduled immediately
389 1.40 pooka * after a user proc, but leave that for later.
390 1.40 pooka */
391 1.46 ad ci->ci_curlwp = ci->ci_onproc = l;
392 1.1 pooka }
393 1.1 pooka
394 1.1 pooka void
395 1.1 pooka rump_unschedule()
396 1.1 pooka {
397 1.36 pooka struct lwp *l = curlwp;
398 1.24 pooka #ifdef DIAGNOSTIC
399 1.24 pooka int nlock;
400 1.24 pooka
401 1.24 pooka KERNEL_UNLOCK_ALL(l, &nlock);
402 1.24 pooka KASSERT(nlock == 0);
403 1.24 pooka #endif
404 1.2 pooka
405 1.4 pooka KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_mutex);
406 1.2 pooka rump_unschedule_cpu(l);
407 1.25 pooka l->l_mutex = &unruntime_lock;
408 1.25 pooka l->l_stat = LSSTOP;
409 1.6 pooka
410 1.6 pooka /*
411 1.19 pooka * Check special conditions:
412 1.19 pooka * 1) do we need to free the lwp which just unscheduled?
413 1.19 pooka * (locking order: lwp0, cpu)
414 1.19 pooka * 2) do we want to clear curlwp for the current host thread
415 1.6 pooka */
416 1.19 pooka if (__predict_false(l->l_flag & LW_WEXIT)) {
417 1.19 pooka lwp0busy();
418 1.19 pooka
419 1.19 pooka /* Now that we have lwp0, we can schedule a CPU again */
420 1.19 pooka rump_schedule_cpu(l);
421 1.6 pooka
422 1.19 pooka /* switch to lwp0. this frees the old thread */
423 1.19 pooka KASSERT(l->l_flag & LW_WEXIT);
424 1.19 pooka rump_lwproc_switch(&lwp0);
425 1.6 pooka
426 1.19 pooka /* release lwp0 */
427 1.6 pooka rump_unschedule_cpu(&lwp0);
428 1.25 pooka lwp0.l_mutex = &unruntime_lock;
429 1.50 ad lwp0.l_pflag &= ~LP_RUNNING;
430 1.19 pooka lwp0rele();
431 1.36 pooka rump_lwproc_curlwp_clear(&lwp0);
432 1.6 pooka
433 1.19 pooka } else if (__predict_false(l->l_flag & LW_RUMP_CLEAR)) {
434 1.36 pooka rump_lwproc_curlwp_clear(l);
435 1.19 pooka l->l_flag &= ~LW_RUMP_CLEAR;
436 1.2 pooka }
437 1.2 pooka }
438 1.2 pooka
439 1.2 pooka void
440 1.2 pooka rump_unschedule_cpu(struct lwp *l)
441 1.2 pooka {
442 1.8 pooka
443 1.14 pooka rump_unschedule_cpu_interlock(l, NULL);
444 1.14 pooka }
445 1.14 pooka
446 1.14 pooka void
447 1.14 pooka rump_unschedule_cpu_interlock(struct lwp *l, void *interlock)
448 1.14 pooka {
449 1.14 pooka
450 1.8 pooka if ((l->l_pflag & LP_INTR) == 0)
451 1.8 pooka rump_softint_run(l->l_cpu);
452 1.14 pooka rump_unschedule_cpu1(l, interlock);
453 1.8 pooka }
454 1.8 pooka
455 1.8 pooka void
456 1.14 pooka rump_unschedule_cpu1(struct lwp *l, void *interlock)
457 1.8 pooka {
458 1.1 pooka struct rumpcpu *rcpu;
459 1.1 pooka struct cpu_info *ci;
460 1.15 pooka void *old;
461 1.1 pooka
462 1.1 pooka ci = l->l_cpu;
463 1.47 ad ci->ci_curlwp = ci->ci_onproc = NULL;
464 1.43 pooka rcpu = cpuinfo_to_rumpcpu(ci);
465 1.15 pooka
466 1.1 pooka KASSERT(rcpu->rcpu_ci == ci);
467 1.1 pooka
468 1.15 pooka /*
469 1.15 pooka * Make sure all stores are seen before the CPU release. This
470 1.15 pooka * is relevant only in the non-fastpath scheduling case, but
471 1.15 pooka * we don't know here if that's going to happen, so need to
472 1.15 pooka * expect the worst.
473 1.29 pooka *
474 1.29 pooka * If the scheduler interlock was requested by the caller, we
475 1.29 pooka * need to obtain it before we release the CPU. Otherwise, we risk a
476 1.29 pooka * race condition where another thread is scheduled onto the
477 1.29 pooka * rump kernel CPU before our current thread can
478 1.29 pooka * grab the interlock.
479 1.15 pooka */
480 1.29 pooka if (interlock == rcpu->rcpu_mtx)
481 1.29 pooka rumpuser_mutex_enter_nowrap(rcpu->rcpu_mtx);
482 1.29 pooka else
483 1.53 riastrad membar_release(); /* XXX what does this pair with? */
484 1.15 pooka
485 1.15 pooka /* Release the CPU. */
486 1.15 pooka old = atomic_swap_ptr(&rcpu->rcpu_prevlwp, l);
487 1.15 pooka
488 1.15 pooka /* No waiters? No problems. We're outta here. */
489 1.15 pooka if (old == RCPULWP_BUSY) {
490 1.15 pooka return;
491 1.15 pooka }
492 1.15 pooka
493 1.15 pooka KASSERT(old == RCPULWP_WANTED);
494 1.15 pooka
495 1.15 pooka /*
496 1.15 pooka * Ok, things weren't so snappy.
497 1.15 pooka *
498 1.15 pooka * Snailpath: take lock and signal anyone waiting for this CPU.
499 1.15 pooka */
500 1.14 pooka
501 1.29 pooka if (interlock != rcpu->rcpu_mtx)
502 1.29 pooka rumpuser_mutex_enter_nowrap(rcpu->rcpu_mtx);
503 1.15 pooka if (rcpu->rcpu_wanted)
504 1.8 pooka rumpuser_cv_broadcast(rcpu->rcpu_cv);
505 1.29 pooka if (interlock != rcpu->rcpu_mtx)
506 1.15 pooka rumpuser_mutex_exit(rcpu->rcpu_mtx);
507 1.1 pooka }
508 1.5 pooka
509 1.5 pooka /* Give up and retake CPU (perhaps a different one) */
510 1.5 pooka void
511 1.5 pooka yield()
512 1.5 pooka {
513 1.5 pooka struct lwp *l = curlwp;
514 1.5 pooka int nlocks;
515 1.5 pooka
516 1.5 pooka KERNEL_UNLOCK_ALL(l, &nlocks);
517 1.5 pooka rump_unschedule_cpu(l);
518 1.5 pooka rump_schedule_cpu(l);
519 1.5 pooka KERNEL_LOCK(nlocks, l);
520 1.5 pooka }
521 1.5 pooka
522 1.5 pooka void
523 1.5 pooka preempt()
524 1.5 pooka {
525 1.5 pooka
526 1.5 pooka yield();
527 1.5 pooka }
528 1.10 pooka
529 1.10 pooka bool
530 1.10 pooka kpreempt(uintptr_t where)
531 1.10 pooka {
532 1.10 pooka
533 1.10 pooka return false;
534 1.10 pooka }
535 1.10 pooka
536 1.10 pooka /*
537 1.10 pooka * There is no kernel thread preemption in rump currently. But call
538 1.10 pooka * the implementing macros anyway in case they grow some side-effects
539 1.10 pooka * down the road.
540 1.10 pooka */
541 1.10 pooka void
542 1.10 pooka kpreempt_disable(void)
543 1.10 pooka {
544 1.10 pooka
545 1.35 pooka KPREEMPT_DISABLE(curlwp);
546 1.10 pooka }
547 1.10 pooka
548 1.10 pooka void
549 1.10 pooka kpreempt_enable(void)
550 1.10 pooka {
551 1.10 pooka
552 1.35 pooka KPREEMPT_ENABLE(curlwp);
553 1.10 pooka }
554 1.10 pooka
555 1.38 rmind bool
556 1.38 rmind kpreempt_disabled(void)
557 1.38 rmind {
558 1.39 rmind #if 0
559 1.38 rmind const lwp_t *l = curlwp;
560 1.38 rmind
561 1.38 rmind return l->l_nopreempt != 0 || l->l_stat == LSZOMB ||
562 1.39 rmind (l->l_flag & LW_IDLE) != 0 || cpu_kpreempt_disabled();
563 1.39 rmind #endif
564 1.39 rmind /* XXX: emulate cpu_kpreempt_disabled() */
565 1.39 rmind return true;
566 1.38 rmind }
567 1.38 rmind
568 1.10 pooka void
569 1.10 pooka suspendsched(void)
570 1.10 pooka {
571 1.10 pooka
572 1.10 pooka /*
573 1.10 pooka * Could wait until everyone is out and block further entries,
574 1.10 pooka * but skip that for now.
575 1.10 pooka */
576 1.10 pooka }
577 1.11 pooka
578 1.11 pooka void
579 1.11 pooka sched_nice(struct proc *p, int level)
580 1.11 pooka {
581 1.11 pooka
582 1.11 pooka /* nothing to do for now */
583 1.11 pooka }
584 1.37 pooka
585 1.37 pooka void
586 1.45 ad setrunnable(struct lwp *l)
587 1.45 ad {
588 1.45 ad
589 1.45 ad sched_enqueue(l);
590 1.45 ad }
591 1.45 ad
592 1.45 ad void
593 1.45 ad sched_enqueue(struct lwp *l)
594 1.37 pooka {
595 1.37 pooka
596 1.37 pooka rump_thread_allow(l);
597 1.37 pooka }
598 1.37 pooka
599 1.37 pooka void
600 1.45 ad sched_resched_cpu(struct cpu_info *ci, pri_t pri, bool unlock)
601 1.45 ad {
602 1.45 ad
603 1.45 ad }
604 1.45 ad
605 1.45 ad void
606 1.45 ad sched_resched_lwp(struct lwp *l, bool unlock)
607 1.45 ad {
608 1.45 ad
609 1.45 ad }
610 1.45 ad
611 1.45 ad void
612 1.37 pooka sched_dequeue(struct lwp *l)
613 1.37 pooka {
614 1.37 pooka
615 1.37 pooka panic("sched_dequeue not implemented");
616 1.37 pooka }
617 1.51 ad
618 1.51 ad void
619 1.51 ad preempt_point(void)
620 1.51 ad {
621 1.51 ad
622 1.51 ad }
623 1.51 ad
624 1.51 ad bool
625 1.51 ad preempt_needed(void)
626 1.51 ad {
627 1.51 ad
628 1.51 ad return false;
629 1.51 ad }
630