scheduler.c revision 1.10 1 1.10 pooka /* $NetBSD: scheduler.c,v 1.10 2010/04/17 13:13:45 pooka Exp $ */
2 1.1 pooka
3 1.1 pooka /*
4 1.1 pooka * Copyright (c) 2009 Antti Kantee. All Rights Reserved.
5 1.1 pooka *
6 1.1 pooka * Development of this software was supported by
7 1.1 pooka * The Finnish Cultural Foundation.
8 1.1 pooka *
9 1.1 pooka * Redistribution and use in source and binary forms, with or without
10 1.1 pooka * modification, are permitted provided that the following conditions
11 1.1 pooka * are met:
12 1.1 pooka * 1. Redistributions of source code must retain the above copyright
13 1.1 pooka * notice, this list of conditions and the following disclaimer.
14 1.1 pooka * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 pooka * notice, this list of conditions and the following disclaimer in the
16 1.1 pooka * documentation and/or other materials provided with the distribution.
17 1.1 pooka *
18 1.1 pooka * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19 1.1 pooka * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 1.1 pooka * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 1.1 pooka * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 1.1 pooka * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 1.1 pooka * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 1.1 pooka * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 1.1 pooka * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 1.1 pooka * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 1.1 pooka * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 1.1 pooka * SUCH DAMAGE.
29 1.1 pooka */
30 1.1 pooka
31 1.1 pooka #include <sys/cdefs.h>
32 1.10 pooka __KERNEL_RCSID(0, "$NetBSD: scheduler.c,v 1.10 2010/04/17 13:13:45 pooka Exp $");
33 1.1 pooka
34 1.1 pooka #include <sys/param.h>
35 1.1 pooka #include <sys/cpu.h>
36 1.2 pooka #include <sys/kmem.h>
37 1.1 pooka #include <sys/mutex.h>
38 1.8 pooka #include <sys/namei.h>
39 1.1 pooka #include <sys/queue.h>
40 1.1 pooka #include <sys/select.h>
41 1.10 pooka #include <sys/systm.h>
42 1.1 pooka
43 1.1 pooka #include <rump/rumpuser.h>
44 1.1 pooka
45 1.1 pooka #include "rump_private.h"
46 1.1 pooka
47 1.1 pooka /* should go for MAXCPUS at some point */
48 1.8 pooka static struct cpu_info rump_cpus[MAXCPUS];
49 1.1 pooka static struct rumpcpu {
50 1.1 pooka struct cpu_info *rcpu_ci;
51 1.8 pooka int rcpu_flags;
52 1.8 pooka struct rumpuser_cv *rcpu_cv;
53 1.8 pooka LIST_ENTRY(rumpcpu) rcpu_entries;
54 1.8 pooka } rcpu_storage[MAXCPUS];
55 1.1 pooka struct cpu_info *rump_cpu = &rump_cpus[0];
56 1.1 pooka int ncpu = 1;
57 1.1 pooka
58 1.8 pooka #define RCPU_WANTED 0x01 /* someone wants this specific CPU */
59 1.8 pooka #define RCPU_BUSY 0x02 /* CPU is busy */
60 1.8 pooka #define RCPU_FREELIST 0x04 /* CPU is on freelist */
61 1.8 pooka
62 1.8 pooka static LIST_HEAD(,rumpcpu) cpu_freelist = LIST_HEAD_INITIALIZER(cpu_freelist);
63 1.1 pooka static struct rumpuser_mtx *schedmtx;
64 1.3 pooka static struct rumpuser_cv *schedcv, *lwp0cv;
65 1.3 pooka
66 1.3 pooka static bool lwp0busy = false;
67 1.1 pooka
68 1.1 pooka struct cpu_info *
69 1.1 pooka cpu_lookup(u_int index)
70 1.1 pooka {
71 1.1 pooka
72 1.1 pooka return &rump_cpus[index];
73 1.1 pooka }
74 1.1 pooka
75 1.1 pooka void
76 1.1 pooka rump_scheduler_init()
77 1.1 pooka {
78 1.1 pooka struct rumpcpu *rcpu;
79 1.1 pooka struct cpu_info *ci;
80 1.1 pooka int i;
81 1.1 pooka
82 1.1 pooka rumpuser_mutex_init(&schedmtx);
83 1.1 pooka rumpuser_cv_init(&schedcv);
84 1.3 pooka rumpuser_cv_init(&lwp0cv);
85 1.1 pooka for (i = 0; i < ncpu; i++) {
86 1.1 pooka rcpu = &rcpu_storage[i];
87 1.1 pooka ci = &rump_cpus[i];
88 1.1 pooka rump_cpu_bootstrap(ci);
89 1.4 pooka ci->ci_schedstate.spc_mutex =
90 1.4 pooka mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
91 1.9 pooka ci->ci_schedstate.spc_flags = SPCF_RUNNING;
92 1.1 pooka rcpu->rcpu_ci = ci;
93 1.8 pooka LIST_INSERT_HEAD(&cpu_freelist, rcpu, rcpu_entries);
94 1.8 pooka rcpu->rcpu_flags = RCPU_FREELIST;
95 1.8 pooka rumpuser_cv_init(&rcpu->rcpu_cv);
96 1.1 pooka }
97 1.1 pooka }
98 1.1 pooka
99 1.1 pooka void
100 1.1 pooka rump_schedule()
101 1.1 pooka {
102 1.3 pooka struct lwp *l;
103 1.2 pooka
104 1.2 pooka /*
105 1.2 pooka * If there is no dedicated lwp, allocate a temp one and
106 1.3 pooka * set it to be free'd upon unschedule(). Use lwp0 context
107 1.3 pooka * for reserving the necessary resources.
108 1.2 pooka */
109 1.3 pooka l = rumpuser_get_curlwp();
110 1.2 pooka if (l == NULL) {
111 1.3 pooka /* busy lwp0 */
112 1.3 pooka rumpuser_mutex_enter_nowrap(schedmtx);
113 1.3 pooka while (lwp0busy)
114 1.3 pooka rumpuser_cv_wait_nowrap(lwp0cv, schedmtx);
115 1.3 pooka lwp0busy = true;
116 1.3 pooka rumpuser_mutex_exit(schedmtx);
117 1.3 pooka
118 1.3 pooka /* schedule cpu and use lwp0 */
119 1.4 pooka rump_schedule_cpu(&lwp0);
120 1.3 pooka rumpuser_set_curlwp(&lwp0);
121 1.2 pooka l = rump_lwp_alloc(0, rump_nextlid());
122 1.3 pooka
123 1.3 pooka /* release lwp0 */
124 1.3 pooka rump_lwp_switch(l);
125 1.3 pooka rumpuser_mutex_enter_nowrap(schedmtx);
126 1.3 pooka lwp0busy = false;
127 1.3 pooka rumpuser_cv_signal(lwp0cv);
128 1.3 pooka rumpuser_mutex_exit(schedmtx);
129 1.3 pooka
130 1.3 pooka /* mark new lwp as dead-on-exit */
131 1.2 pooka rump_lwp_release(l);
132 1.3 pooka } else {
133 1.4 pooka rump_schedule_cpu(l);
134 1.2 pooka }
135 1.2 pooka }
136 1.2 pooka
137 1.4 pooka void
138 1.4 pooka rump_schedule_cpu(struct lwp *l)
139 1.2 pooka {
140 1.1 pooka struct rumpcpu *rcpu;
141 1.1 pooka
142 1.1 pooka rumpuser_mutex_enter_nowrap(schedmtx);
143 1.8 pooka if (l->l_pflag & LP_BOUND) {
144 1.8 pooka KASSERT(l->l_cpu != NULL);
145 1.8 pooka rcpu = &rcpu_storage[l->l_cpu-&rump_cpus[0]];
146 1.8 pooka if (rcpu->rcpu_flags & RCPU_BUSY) {
147 1.8 pooka KASSERT((rcpu->rcpu_flags & RCPU_FREELIST) == 0);
148 1.8 pooka while (rcpu->rcpu_flags & RCPU_BUSY) {
149 1.8 pooka rcpu->rcpu_flags |= RCPU_WANTED;
150 1.8 pooka rumpuser_cv_wait_nowrap(rcpu->rcpu_cv,
151 1.8 pooka schedmtx);
152 1.8 pooka }
153 1.8 pooka rcpu->rcpu_flags &= ~RCPU_WANTED;
154 1.8 pooka } else {
155 1.8 pooka KASSERT(rcpu->rcpu_flags & (RCPU_FREELIST|RCPU_WANTED));
156 1.8 pooka }
157 1.8 pooka if (rcpu->rcpu_flags & RCPU_FREELIST) {
158 1.8 pooka LIST_REMOVE(rcpu, rcpu_entries);
159 1.8 pooka rcpu->rcpu_flags &= ~RCPU_FREELIST;
160 1.8 pooka }
161 1.8 pooka } else {
162 1.8 pooka while ((rcpu = LIST_FIRST(&cpu_freelist)) == NULL) {
163 1.8 pooka rumpuser_cv_wait_nowrap(schedcv, schedmtx);
164 1.8 pooka }
165 1.8 pooka KASSERT(rcpu->rcpu_flags & RCPU_FREELIST);
166 1.8 pooka LIST_REMOVE(rcpu, rcpu_entries);
167 1.8 pooka rcpu->rcpu_flags &= ~RCPU_FREELIST;
168 1.8 pooka KASSERT(l->l_cpu == NULL);
169 1.8 pooka l->l_cpu = rcpu->rcpu_ci;
170 1.8 pooka }
171 1.8 pooka rcpu->rcpu_flags |= RCPU_BUSY;
172 1.1 pooka rumpuser_mutex_exit(schedmtx);
173 1.4 pooka l->l_mutex = rcpu->rcpu_ci->ci_schedstate.spc_mutex;
174 1.1 pooka }
175 1.1 pooka
176 1.1 pooka void
177 1.1 pooka rump_unschedule()
178 1.1 pooka {
179 1.2 pooka struct lwp *l;
180 1.2 pooka
181 1.2 pooka l = rumpuser_get_curlwp();
182 1.4 pooka KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_mutex);
183 1.2 pooka rump_unschedule_cpu(l);
184 1.4 pooka l->l_mutex = NULL;
185 1.6 pooka
186 1.6 pooka /*
187 1.6 pooka * If we're using a temp lwp, need to take lwp0 for rump_lwp_free().
188 1.6 pooka * (we could maybe cache idle lwp's to avoid constant bouncing)
189 1.6 pooka */
190 1.2 pooka if (l->l_flag & LW_WEXIT) {
191 1.2 pooka rumpuser_set_curlwp(NULL);
192 1.6 pooka
193 1.6 pooka /* busy lwp0 */
194 1.6 pooka rumpuser_mutex_enter_nowrap(schedmtx);
195 1.6 pooka while (lwp0busy)
196 1.6 pooka rumpuser_cv_wait_nowrap(lwp0cv, schedmtx);
197 1.6 pooka lwp0busy = true;
198 1.6 pooka rumpuser_mutex_exit(schedmtx);
199 1.6 pooka
200 1.6 pooka rump_schedule_cpu(&lwp0);
201 1.6 pooka rumpuser_set_curlwp(&lwp0);
202 1.6 pooka rump_lwp_free(l);
203 1.6 pooka rump_unschedule_cpu(&lwp0);
204 1.6 pooka rumpuser_set_curlwp(NULL);
205 1.6 pooka
206 1.6 pooka rumpuser_mutex_enter_nowrap(schedmtx);
207 1.6 pooka lwp0busy = false;
208 1.6 pooka rumpuser_cv_signal(lwp0cv);
209 1.6 pooka rumpuser_mutex_exit(schedmtx);
210 1.2 pooka }
211 1.2 pooka }
212 1.2 pooka
213 1.2 pooka void
214 1.2 pooka rump_unschedule_cpu(struct lwp *l)
215 1.2 pooka {
216 1.8 pooka
217 1.8 pooka if ((l->l_pflag & LP_INTR) == 0)
218 1.8 pooka rump_softint_run(l->l_cpu);
219 1.8 pooka rump_unschedule_cpu1(l);
220 1.8 pooka }
221 1.8 pooka
222 1.8 pooka void
223 1.8 pooka rump_unschedule_cpu1(struct lwp *l)
224 1.8 pooka {
225 1.1 pooka struct rumpcpu *rcpu;
226 1.1 pooka struct cpu_info *ci;
227 1.1 pooka
228 1.1 pooka ci = l->l_cpu;
229 1.8 pooka if ((l->l_pflag & LP_BOUND) == 0) {
230 1.8 pooka l->l_cpu = NULL;
231 1.8 pooka }
232 1.1 pooka rcpu = &rcpu_storage[ci-&rump_cpus[0]];
233 1.1 pooka KASSERT(rcpu->rcpu_ci == ci);
234 1.8 pooka KASSERT(rcpu->rcpu_flags & RCPU_BUSY);
235 1.1 pooka
236 1.1 pooka rumpuser_mutex_enter_nowrap(schedmtx);
237 1.8 pooka if (rcpu->rcpu_flags & RCPU_WANTED) {
238 1.8 pooka /*
239 1.8 pooka * The assumption is that there will usually be max 1
240 1.8 pooka * thread waiting on the rcpu_cv, so broadcast is fine.
241 1.8 pooka * (and the current structure requires it because of
242 1.8 pooka * only a bitmask being used for wanting).
243 1.8 pooka */
244 1.8 pooka rumpuser_cv_broadcast(rcpu->rcpu_cv);
245 1.8 pooka } else {
246 1.8 pooka LIST_INSERT_HEAD(&cpu_freelist, rcpu, rcpu_entries);
247 1.8 pooka rcpu->rcpu_flags |= RCPU_FREELIST;
248 1.8 pooka rumpuser_cv_signal(schedcv);
249 1.8 pooka }
250 1.8 pooka rcpu->rcpu_flags &= ~RCPU_BUSY;
251 1.1 pooka rumpuser_mutex_exit(schedmtx);
252 1.1 pooka }
253 1.5 pooka
254 1.5 pooka /* Give up and retake CPU (perhaps a different one) */
255 1.5 pooka void
256 1.5 pooka yield()
257 1.5 pooka {
258 1.5 pooka struct lwp *l = curlwp;
259 1.5 pooka int nlocks;
260 1.5 pooka
261 1.5 pooka KERNEL_UNLOCK_ALL(l, &nlocks);
262 1.5 pooka rump_unschedule_cpu(l);
263 1.5 pooka rump_schedule_cpu(l);
264 1.5 pooka KERNEL_LOCK(nlocks, l);
265 1.5 pooka }
266 1.5 pooka
267 1.5 pooka void
268 1.5 pooka preempt()
269 1.5 pooka {
270 1.5 pooka
271 1.5 pooka yield();
272 1.5 pooka }
273 1.10 pooka
274 1.10 pooka bool
275 1.10 pooka kpreempt(uintptr_t where)
276 1.10 pooka {
277 1.10 pooka
278 1.10 pooka return false;
279 1.10 pooka }
280 1.10 pooka
281 1.10 pooka /*
282 1.10 pooka * There is no kernel thread preemption in rump currently. But call
283 1.10 pooka * the implementing macros anyway in case they grow some side-effects
284 1.10 pooka * down the road.
285 1.10 pooka */
286 1.10 pooka void
287 1.10 pooka kpreempt_disable(void)
288 1.10 pooka {
289 1.10 pooka
290 1.10 pooka KPREEMPT_DISABLE(curlwp);
291 1.10 pooka }
292 1.10 pooka
293 1.10 pooka void
294 1.10 pooka kpreempt_enable(void)
295 1.10 pooka {
296 1.10 pooka
297 1.10 pooka KPREEMPT_ENABLE(curlwp);
298 1.10 pooka }
299 1.10 pooka
300 1.10 pooka void
301 1.10 pooka suspendsched(void)
302 1.10 pooka {
303 1.10 pooka
304 1.10 pooka /*
305 1.10 pooka * Could wait until everyone is out and block further entries,
306 1.10 pooka * but skip that for now.
307 1.10 pooka */
308 1.10 pooka }
309