scheduler.c revision 1.12 1 1.12 pooka /* $NetBSD: scheduler.c,v 1.12 2010/04/27 23:30:30 pooka Exp $ */
2 1.1 pooka
3 1.1 pooka /*
4 1.1 pooka * Copyright (c) 2009 Antti Kantee. All Rights Reserved.
5 1.1 pooka *
6 1.1 pooka * Development of this software was supported by
7 1.1 pooka * The Finnish Cultural Foundation.
8 1.1 pooka *
9 1.1 pooka * Redistribution and use in source and binary forms, with or without
10 1.1 pooka * modification, are permitted provided that the following conditions
11 1.1 pooka * are met:
12 1.1 pooka * 1. Redistributions of source code must retain the above copyright
13 1.1 pooka * notice, this list of conditions and the following disclaimer.
14 1.1 pooka * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 pooka * notice, this list of conditions and the following disclaimer in the
16 1.1 pooka * documentation and/or other materials provided with the distribution.
17 1.1 pooka *
18 1.1 pooka * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19 1.1 pooka * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 1.1 pooka * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 1.1 pooka * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 1.1 pooka * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 1.1 pooka * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 1.1 pooka * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 1.1 pooka * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 1.1 pooka * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 1.1 pooka * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 1.1 pooka * SUCH DAMAGE.
29 1.1 pooka */
30 1.1 pooka
31 1.1 pooka #include <sys/cdefs.h>
32 1.12 pooka __KERNEL_RCSID(0, "$NetBSD: scheduler.c,v 1.12 2010/04/27 23:30:30 pooka Exp $");
33 1.1 pooka
34 1.1 pooka #include <sys/param.h>
35 1.1 pooka #include <sys/cpu.h>
36 1.2 pooka #include <sys/kmem.h>
37 1.1 pooka #include <sys/mutex.h>
38 1.8 pooka #include <sys/namei.h>
39 1.1 pooka #include <sys/queue.h>
40 1.1 pooka #include <sys/select.h>
41 1.10 pooka #include <sys/systm.h>
42 1.1 pooka
43 1.1 pooka #include <rump/rumpuser.h>
44 1.1 pooka
45 1.1 pooka #include "rump_private.h"
46 1.1 pooka
47 1.1 pooka /* should go for MAXCPUS at some point */
48 1.8 pooka static struct cpu_info rump_cpus[MAXCPUS];
49 1.1 pooka static struct rumpcpu {
50 1.1 pooka struct cpu_info *rcpu_ci;
51 1.8 pooka int rcpu_flags;
52 1.8 pooka struct rumpuser_cv *rcpu_cv;
53 1.8 pooka LIST_ENTRY(rumpcpu) rcpu_entries;
54 1.8 pooka } rcpu_storage[MAXCPUS];
55 1.1 pooka struct cpu_info *rump_cpu = &rump_cpus[0];
56 1.12 pooka int ncpu;
57 1.1 pooka
58 1.8 pooka #define RCPU_WANTED 0x01 /* someone wants this specific CPU */
59 1.8 pooka #define RCPU_BUSY 0x02 /* CPU is busy */
60 1.8 pooka #define RCPU_FREELIST 0x04 /* CPU is on freelist */
61 1.8 pooka
62 1.8 pooka static LIST_HEAD(,rumpcpu) cpu_freelist = LIST_HEAD_INITIALIZER(cpu_freelist);
63 1.1 pooka static struct rumpuser_mtx *schedmtx;
64 1.3 pooka static struct rumpuser_cv *schedcv, *lwp0cv;
65 1.3 pooka
66 1.3 pooka static bool lwp0busy = false;
67 1.1 pooka
68 1.1 pooka struct cpu_info *
69 1.1 pooka cpu_lookup(u_int index)
70 1.1 pooka {
71 1.1 pooka
72 1.1 pooka return &rump_cpus[index];
73 1.1 pooka }
74 1.1 pooka
75 1.12 pooka /* this could/should be mi_attach_cpu? */
76 1.12 pooka void
77 1.12 pooka rump_cpus_bootstrap(int num)
78 1.12 pooka {
79 1.12 pooka struct rumpcpu *rcpu;
80 1.12 pooka struct cpu_info *ci;
81 1.12 pooka int i;
82 1.12 pooka
83 1.12 pooka for (i = 0; i < num; i++) {
84 1.12 pooka rcpu = &rcpu_storage[i];
85 1.12 pooka ci = &rump_cpus[i];
86 1.12 pooka ci->ci_index = i;
87 1.12 pooka rump_cpu_attach(ci);
88 1.12 pooka ncpu++;
89 1.12 pooka }
90 1.12 pooka }
91 1.12 pooka
92 1.1 pooka void
93 1.1 pooka rump_scheduler_init()
94 1.1 pooka {
95 1.1 pooka struct rumpcpu *rcpu;
96 1.1 pooka struct cpu_info *ci;
97 1.1 pooka int i;
98 1.1 pooka
99 1.1 pooka rumpuser_mutex_init(&schedmtx);
100 1.1 pooka rumpuser_cv_init(&schedcv);
101 1.3 pooka rumpuser_cv_init(&lwp0cv);
102 1.1 pooka for (i = 0; i < ncpu; i++) {
103 1.1 pooka rcpu = &rcpu_storage[i];
104 1.1 pooka ci = &rump_cpus[i];
105 1.12 pooka rcpu->rcpu_ci = ci;
106 1.4 pooka ci->ci_schedstate.spc_mutex =
107 1.4 pooka mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
108 1.9 pooka ci->ci_schedstate.spc_flags = SPCF_RUNNING;
109 1.8 pooka LIST_INSERT_HEAD(&cpu_freelist, rcpu, rcpu_entries);
110 1.8 pooka rcpu->rcpu_flags = RCPU_FREELIST;
111 1.8 pooka rumpuser_cv_init(&rcpu->rcpu_cv);
112 1.1 pooka }
113 1.1 pooka }
114 1.1 pooka
115 1.1 pooka void
116 1.1 pooka rump_schedule()
117 1.1 pooka {
118 1.3 pooka struct lwp *l;
119 1.2 pooka
120 1.2 pooka /*
121 1.2 pooka * If there is no dedicated lwp, allocate a temp one and
122 1.3 pooka * set it to be free'd upon unschedule(). Use lwp0 context
123 1.3 pooka * for reserving the necessary resources.
124 1.2 pooka */
125 1.3 pooka l = rumpuser_get_curlwp();
126 1.2 pooka if (l == NULL) {
127 1.3 pooka /* busy lwp0 */
128 1.3 pooka rumpuser_mutex_enter_nowrap(schedmtx);
129 1.3 pooka while (lwp0busy)
130 1.3 pooka rumpuser_cv_wait_nowrap(lwp0cv, schedmtx);
131 1.3 pooka lwp0busy = true;
132 1.3 pooka rumpuser_mutex_exit(schedmtx);
133 1.3 pooka
134 1.3 pooka /* schedule cpu and use lwp0 */
135 1.4 pooka rump_schedule_cpu(&lwp0);
136 1.3 pooka rumpuser_set_curlwp(&lwp0);
137 1.2 pooka l = rump_lwp_alloc(0, rump_nextlid());
138 1.3 pooka
139 1.3 pooka /* release lwp0 */
140 1.3 pooka rump_lwp_switch(l);
141 1.3 pooka rumpuser_mutex_enter_nowrap(schedmtx);
142 1.3 pooka lwp0busy = false;
143 1.3 pooka rumpuser_cv_signal(lwp0cv);
144 1.3 pooka rumpuser_mutex_exit(schedmtx);
145 1.3 pooka
146 1.3 pooka /* mark new lwp as dead-on-exit */
147 1.2 pooka rump_lwp_release(l);
148 1.3 pooka } else {
149 1.4 pooka rump_schedule_cpu(l);
150 1.2 pooka }
151 1.2 pooka }
152 1.2 pooka
153 1.4 pooka void
154 1.4 pooka rump_schedule_cpu(struct lwp *l)
155 1.2 pooka {
156 1.1 pooka struct rumpcpu *rcpu;
157 1.1 pooka
158 1.1 pooka rumpuser_mutex_enter_nowrap(schedmtx);
159 1.8 pooka if (l->l_pflag & LP_BOUND) {
160 1.8 pooka KASSERT(l->l_cpu != NULL);
161 1.8 pooka rcpu = &rcpu_storage[l->l_cpu-&rump_cpus[0]];
162 1.8 pooka if (rcpu->rcpu_flags & RCPU_BUSY) {
163 1.8 pooka KASSERT((rcpu->rcpu_flags & RCPU_FREELIST) == 0);
164 1.8 pooka while (rcpu->rcpu_flags & RCPU_BUSY) {
165 1.8 pooka rcpu->rcpu_flags |= RCPU_WANTED;
166 1.8 pooka rumpuser_cv_wait_nowrap(rcpu->rcpu_cv,
167 1.8 pooka schedmtx);
168 1.8 pooka }
169 1.8 pooka rcpu->rcpu_flags &= ~RCPU_WANTED;
170 1.8 pooka } else {
171 1.8 pooka KASSERT(rcpu->rcpu_flags & (RCPU_FREELIST|RCPU_WANTED));
172 1.8 pooka }
173 1.8 pooka if (rcpu->rcpu_flags & RCPU_FREELIST) {
174 1.8 pooka LIST_REMOVE(rcpu, rcpu_entries);
175 1.8 pooka rcpu->rcpu_flags &= ~RCPU_FREELIST;
176 1.8 pooka }
177 1.8 pooka } else {
178 1.8 pooka while ((rcpu = LIST_FIRST(&cpu_freelist)) == NULL) {
179 1.8 pooka rumpuser_cv_wait_nowrap(schedcv, schedmtx);
180 1.8 pooka }
181 1.8 pooka KASSERT(rcpu->rcpu_flags & RCPU_FREELIST);
182 1.8 pooka LIST_REMOVE(rcpu, rcpu_entries);
183 1.8 pooka rcpu->rcpu_flags &= ~RCPU_FREELIST;
184 1.8 pooka KASSERT(l->l_cpu == NULL);
185 1.8 pooka l->l_cpu = rcpu->rcpu_ci;
186 1.8 pooka }
187 1.8 pooka rcpu->rcpu_flags |= RCPU_BUSY;
188 1.1 pooka rumpuser_mutex_exit(schedmtx);
189 1.4 pooka l->l_mutex = rcpu->rcpu_ci->ci_schedstate.spc_mutex;
190 1.1 pooka }
191 1.1 pooka
192 1.1 pooka void
193 1.1 pooka rump_unschedule()
194 1.1 pooka {
195 1.2 pooka struct lwp *l;
196 1.2 pooka
197 1.2 pooka l = rumpuser_get_curlwp();
198 1.4 pooka KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_mutex);
199 1.2 pooka rump_unschedule_cpu(l);
200 1.4 pooka l->l_mutex = NULL;
201 1.6 pooka
202 1.6 pooka /*
203 1.6 pooka * If we're using a temp lwp, need to take lwp0 for rump_lwp_free().
204 1.6 pooka * (we could maybe cache idle lwp's to avoid constant bouncing)
205 1.6 pooka */
206 1.2 pooka if (l->l_flag & LW_WEXIT) {
207 1.2 pooka rumpuser_set_curlwp(NULL);
208 1.6 pooka
209 1.6 pooka /* busy lwp0 */
210 1.6 pooka rumpuser_mutex_enter_nowrap(schedmtx);
211 1.6 pooka while (lwp0busy)
212 1.6 pooka rumpuser_cv_wait_nowrap(lwp0cv, schedmtx);
213 1.6 pooka lwp0busy = true;
214 1.6 pooka rumpuser_mutex_exit(schedmtx);
215 1.6 pooka
216 1.6 pooka rump_schedule_cpu(&lwp0);
217 1.6 pooka rumpuser_set_curlwp(&lwp0);
218 1.6 pooka rump_lwp_free(l);
219 1.6 pooka rump_unschedule_cpu(&lwp0);
220 1.6 pooka rumpuser_set_curlwp(NULL);
221 1.6 pooka
222 1.6 pooka rumpuser_mutex_enter_nowrap(schedmtx);
223 1.6 pooka lwp0busy = false;
224 1.6 pooka rumpuser_cv_signal(lwp0cv);
225 1.6 pooka rumpuser_mutex_exit(schedmtx);
226 1.2 pooka }
227 1.2 pooka }
228 1.2 pooka
229 1.2 pooka void
230 1.2 pooka rump_unschedule_cpu(struct lwp *l)
231 1.2 pooka {
232 1.8 pooka
233 1.8 pooka if ((l->l_pflag & LP_INTR) == 0)
234 1.8 pooka rump_softint_run(l->l_cpu);
235 1.8 pooka rump_unschedule_cpu1(l);
236 1.8 pooka }
237 1.8 pooka
238 1.8 pooka void
239 1.8 pooka rump_unschedule_cpu1(struct lwp *l)
240 1.8 pooka {
241 1.1 pooka struct rumpcpu *rcpu;
242 1.1 pooka struct cpu_info *ci;
243 1.1 pooka
244 1.1 pooka ci = l->l_cpu;
245 1.8 pooka if ((l->l_pflag & LP_BOUND) == 0) {
246 1.8 pooka l->l_cpu = NULL;
247 1.8 pooka }
248 1.1 pooka rcpu = &rcpu_storage[ci-&rump_cpus[0]];
249 1.1 pooka KASSERT(rcpu->rcpu_ci == ci);
250 1.8 pooka KASSERT(rcpu->rcpu_flags & RCPU_BUSY);
251 1.1 pooka
252 1.1 pooka rumpuser_mutex_enter_nowrap(schedmtx);
253 1.8 pooka if (rcpu->rcpu_flags & RCPU_WANTED) {
254 1.8 pooka /*
255 1.8 pooka * The assumption is that there will usually be max 1
256 1.8 pooka * thread waiting on the rcpu_cv, so broadcast is fine.
257 1.8 pooka * (and the current structure requires it because of
258 1.8 pooka * only a bitmask being used for wanting).
259 1.8 pooka */
260 1.8 pooka rumpuser_cv_broadcast(rcpu->rcpu_cv);
261 1.8 pooka } else {
262 1.8 pooka LIST_INSERT_HEAD(&cpu_freelist, rcpu, rcpu_entries);
263 1.8 pooka rcpu->rcpu_flags |= RCPU_FREELIST;
264 1.8 pooka rumpuser_cv_signal(schedcv);
265 1.8 pooka }
266 1.8 pooka rcpu->rcpu_flags &= ~RCPU_BUSY;
267 1.1 pooka rumpuser_mutex_exit(schedmtx);
268 1.1 pooka }
269 1.5 pooka
270 1.5 pooka /* Give up and retake CPU (perhaps a different one) */
271 1.5 pooka void
272 1.5 pooka yield()
273 1.5 pooka {
274 1.5 pooka struct lwp *l = curlwp;
275 1.5 pooka int nlocks;
276 1.5 pooka
277 1.5 pooka KERNEL_UNLOCK_ALL(l, &nlocks);
278 1.5 pooka rump_unschedule_cpu(l);
279 1.5 pooka rump_schedule_cpu(l);
280 1.5 pooka KERNEL_LOCK(nlocks, l);
281 1.5 pooka }
282 1.5 pooka
283 1.5 pooka void
284 1.5 pooka preempt()
285 1.5 pooka {
286 1.5 pooka
287 1.5 pooka yield();
288 1.5 pooka }
289 1.10 pooka
290 1.10 pooka bool
291 1.10 pooka kpreempt(uintptr_t where)
292 1.10 pooka {
293 1.10 pooka
294 1.10 pooka return false;
295 1.10 pooka }
296 1.10 pooka
297 1.10 pooka /*
298 1.10 pooka * There is no kernel thread preemption in rump currently. But call
299 1.10 pooka * the implementing macros anyway in case they grow some side-effects
300 1.10 pooka * down the road.
301 1.10 pooka */
302 1.10 pooka void
303 1.10 pooka kpreempt_disable(void)
304 1.10 pooka {
305 1.10 pooka
306 1.10 pooka KPREEMPT_DISABLE(curlwp);
307 1.10 pooka }
308 1.10 pooka
309 1.10 pooka void
310 1.10 pooka kpreempt_enable(void)
311 1.10 pooka {
312 1.10 pooka
313 1.10 pooka KPREEMPT_ENABLE(curlwp);
314 1.10 pooka }
315 1.10 pooka
316 1.10 pooka void
317 1.10 pooka suspendsched(void)
318 1.10 pooka {
319 1.10 pooka
320 1.10 pooka /*
321 1.10 pooka * Could wait until everyone is out and block further entries,
322 1.10 pooka * but skip that for now.
323 1.10 pooka */
324 1.10 pooka }
325 1.11 pooka
326 1.11 pooka void
327 1.11 pooka sched_nice(struct proc *p, int level)
328 1.11 pooka {
329 1.11 pooka
330 1.11 pooka /* nothing to do for now */
331 1.11 pooka }
332