intr.c revision 1.2.18.3 1 1.2.18.3 yamt /* $NetBSD: intr.c,v 1.2.18.3 2010/03/11 15:04:38 yamt Exp $ */
2 1.2 ad
3 1.2.18.2 yamt /*
4 1.2.18.2 yamt * Copyright (c) 2008 Antti Kantee. All Rights Reserved.
5 1.2 ad *
6 1.2 ad * Redistribution and use in source and binary forms, with or without
7 1.2 ad * modification, are permitted provided that the following conditions
8 1.2 ad * are met:
9 1.2 ad * 1. Redistributions of source code must retain the above copyright
10 1.2 ad * notice, this list of conditions and the following disclaimer.
11 1.2 ad * 2. Redistributions in binary form must reproduce the above copyright
12 1.2 ad * notice, this list of conditions and the following disclaimer in the
13 1.2 ad * documentation and/or other materials provided with the distribution.
14 1.2 ad *
15 1.2.18.2 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 1.2.18.2 yamt * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 1.2.18.2 yamt * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 1.2.18.2 yamt * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 1.2.18.2 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 1.2.18.2 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 1.2.18.2 yamt * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 1.2.18.2 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 1.2.18.2 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 1.2.18.2 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 1.2.18.2 yamt * SUCH DAMAGE.
26 1.2 ad */
27 1.2 ad
28 1.2.18.2 yamt #include <sys/cdefs.h>
29 1.2.18.3 yamt __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.2.18.3 2010/03/11 15:04:38 yamt Exp $");
30 1.2.18.2 yamt
31 1.2 ad #include <sys/param.h>
32 1.2 ad #include <sys/cpu.h>
33 1.2.18.2 yamt #include <sys/kmem.h>
34 1.2.18.2 yamt #include <sys/kthread.h>
35 1.2.18.2 yamt #include <sys/intr.h>
36 1.2.18.2 yamt
37 1.2.18.2 yamt #include <rump/rumpuser.h>
38 1.2 ad
39 1.2.18.2 yamt #include "rump_private.h"
40 1.2 ad
41 1.2.18.2 yamt /*
42 1.2.18.2 yamt * Interrupt simulator. It executes hardclock() and softintrs.
43 1.2.18.2 yamt */
44 1.2.18.2 yamt
45 1.2.18.2 yamt time_t time_uptime = 0;
46 1.2.18.2 yamt
47 1.2.18.3 yamt #define SI_MPSAFE 0x01
48 1.2.18.3 yamt #define SI_ONLIST 0x02
49 1.2.18.3 yamt #define SI_KILLME 0x04
50 1.2.18.3 yamt
51 1.2.18.2 yamt struct softint {
52 1.2.18.2 yamt void (*si_func)(void *);
53 1.2.18.2 yamt void *si_arg;
54 1.2.18.3 yamt int si_flags;
55 1.2.18.3 yamt int si_level;
56 1.2.18.2 yamt
57 1.2.18.2 yamt LIST_ENTRY(softint) si_entries;
58 1.2 ad };
59 1.2 ad
60 1.2.18.3 yamt static struct rumpuser_mtx *si_mtx;
61 1.2.18.3 yamt struct softint_lev {
62 1.2.18.3 yamt struct rumpuser_cv *si_cv;
63 1.2.18.3 yamt LIST_HEAD(, softint) si_pending;
64 1.2.18.3 yamt };
65 1.2 ad
66 1.2.18.2 yamt /* rumpuser structures since we call rumpuser interfaces directly */
67 1.2.18.2 yamt static struct rumpuser_cv *clockcv;
68 1.2.18.2 yamt static struct rumpuser_mtx *clockmtx;
69 1.2.18.2 yamt static struct timespec clockbase, clockup;
70 1.2.18.2 yamt static unsigned clkgen;
71 1.2.18.2 yamt
72 1.2.18.3 yamt kcondvar_t lbolt; /* Oh Kath Ra */
73 1.2.18.3 yamt
74 1.2 ad void
75 1.2.18.2 yamt rump_getuptime(struct timespec *ts)
76 1.2 ad {
77 1.2.18.2 yamt int startgen, i = 0;
78 1.2 ad
79 1.2.18.2 yamt do {
80 1.2.18.2 yamt startgen = clkgen;
81 1.2.18.2 yamt if (__predict_false(i++ > 10)) {
82 1.2.18.2 yamt yield();
83 1.2.18.2 yamt i = 0;
84 1.2.18.2 yamt }
85 1.2.18.2 yamt *ts = clockup;
86 1.2.18.2 yamt } while (startgen != clkgen || clkgen % 2 != 0);
87 1.2.18.2 yamt }
88 1.2.18.2 yamt
89 1.2.18.2 yamt void
90 1.2.18.2 yamt rump_gettime(struct timespec *ts)
91 1.2.18.2 yamt {
92 1.2.18.2 yamt struct timespec ts_up;
93 1.2.18.2 yamt
94 1.2.18.2 yamt rump_getuptime(&ts_up);
95 1.2.18.2 yamt timespecadd(&clockbase, &ts_up, ts);
96 1.2.18.2 yamt }
97 1.2.18.2 yamt
98 1.2.18.2 yamt /*
99 1.2.18.2 yamt * clock "interrupt"
100 1.2.18.2 yamt */
101 1.2.18.2 yamt static void
102 1.2.18.2 yamt doclock(void *noarg)
103 1.2.18.2 yamt {
104 1.2.18.2 yamt struct timespec tick, curtime;
105 1.2.18.2 yamt uint64_t sec, nsec;
106 1.2.18.2 yamt int ticks = 0, error;
107 1.2.18.2 yamt extern int hz;
108 1.2.18.2 yamt
109 1.2.18.2 yamt rumpuser_gettime(&sec, &nsec, &error);
110 1.2.18.2 yamt clockbase.tv_sec = sec;
111 1.2.18.2 yamt clockbase.tv_nsec = nsec;
112 1.2.18.2 yamt curtime = clockbase;
113 1.2.18.2 yamt tick.tv_sec = 0;
114 1.2.18.2 yamt tick.tv_nsec = 1000000000/hz;
115 1.2.18.2 yamt
116 1.2.18.2 yamt rumpuser_mutex_enter(clockmtx);
117 1.2.18.2 yamt rumpuser_cv_signal(clockcv);
118 1.2.18.2 yamt
119 1.2.18.2 yamt for (;;) {
120 1.2.18.2 yamt callout_hardclock();
121 1.2.18.2 yamt
122 1.2.18.3 yamt /* wait until the next tick. XXX: what if the clock changes? */
123 1.2.18.3 yamt while (rumpuser_cv_timedwait(clockcv, clockmtx,
124 1.2.18.3 yamt curtime.tv_sec, curtime.tv_nsec) == 0)
125 1.2.18.3 yamt continue;
126 1.2.18.3 yamt
127 1.2.18.3 yamt /* if !maincpu: continue */
128 1.2.18.3 yamt
129 1.2.18.2 yamt if (++ticks == hz) {
130 1.2.18.2 yamt time_uptime++;
131 1.2.18.2 yamt ticks = 0;
132 1.2.18.3 yamt cv_broadcast(&lbolt);
133 1.2.18.2 yamt }
134 1.2.18.2 yamt
135 1.2.18.2 yamt clkgen++;
136 1.2.18.2 yamt timespecadd(&clockup, &tick, &clockup);
137 1.2.18.2 yamt clkgen++;
138 1.2.18.2 yamt timespecadd(&clockup, &clockbase, &curtime);
139 1.2.18.2 yamt }
140 1.2.18.2 yamt }
141 1.2.18.2 yamt
142 1.2.18.2 yamt /*
143 1.2.18.3 yamt * Soft interrupt execution thread. Note that we run without a CPU
144 1.2.18.3 yamt * context until we start processing the interrupt. This is to avoid
145 1.2.18.3 yamt * lock recursion.
146 1.2.18.2 yamt */
147 1.2.18.2 yamt static void
148 1.2.18.2 yamt sithread(void *arg)
149 1.2.18.2 yamt {
150 1.2.18.2 yamt struct softint *si;
151 1.2.18.2 yamt void (*func)(void *) = NULL;
152 1.2.18.2 yamt void *funarg;
153 1.2.18.2 yamt bool mpsafe;
154 1.2.18.3 yamt int mylevel = (uintptr_t)arg;
155 1.2.18.3 yamt struct softint_lev *si_lvlp, *si_lvl;
156 1.2.18.3 yamt struct cpu_data *cd = &curcpu()->ci_data;
157 1.2.18.3 yamt
158 1.2.18.3 yamt rump_unschedule();
159 1.2.18.3 yamt
160 1.2.18.3 yamt si_lvlp = cd->cpu_softcpu;
161 1.2.18.3 yamt si_lvl = &si_lvlp[mylevel];
162 1.2.18.3 yamt
163 1.2.18.3 yamt /*
164 1.2.18.3 yamt * XXX: si_mtx is unnecessary, and should open an interface
165 1.2.18.3 yamt * which allows to use schedmtx for the cv wait
166 1.2.18.3 yamt */
167 1.2.18.3 yamt rumpuser_mutex_enter_nowrap(si_mtx);
168 1.2.18.2 yamt for (;;) {
169 1.2.18.3 yamt if (!LIST_EMPTY(&si_lvl->si_pending)) {
170 1.2.18.3 yamt si = LIST_FIRST(&si_lvl->si_pending);
171 1.2.18.2 yamt func = si->si_func;
172 1.2.18.2 yamt funarg = si->si_arg;
173 1.2.18.3 yamt mpsafe = si->si_flags & SI_MPSAFE;
174 1.2.18.2 yamt
175 1.2.18.3 yamt si->si_flags &= ~SI_ONLIST;
176 1.2.18.2 yamt LIST_REMOVE(si, si_entries);
177 1.2.18.3 yamt if (si->si_flags & SI_KILLME) {
178 1.2.18.3 yamt rumpuser_mutex_exit(si_mtx);
179 1.2.18.3 yamt rump_schedule();
180 1.2.18.3 yamt softint_disestablish(si);
181 1.2.18.3 yamt rump_unschedule();
182 1.2.18.3 yamt rumpuser_mutex_enter_nowrap(si_mtx);
183 1.2.18.3 yamt continue;
184 1.2.18.3 yamt }
185 1.2.18.2 yamt } else {
186 1.2.18.3 yamt rumpuser_cv_wait_nowrap(si_lvl->si_cv, si_mtx);
187 1.2.18.2 yamt continue;
188 1.2.18.2 yamt }
189 1.2.18.3 yamt rumpuser_mutex_exit(si_mtx);
190 1.2.18.2 yamt
191 1.2.18.3 yamt rump_schedule();
192 1.2.18.2 yamt if (!mpsafe)
193 1.2.18.2 yamt KERNEL_LOCK(1, curlwp);
194 1.2.18.2 yamt func(funarg);
195 1.2.18.2 yamt if (!mpsafe)
196 1.2.18.2 yamt KERNEL_UNLOCK_ONE(curlwp);
197 1.2.18.3 yamt rump_unschedule();
198 1.2.18.2 yamt
199 1.2.18.3 yamt rumpuser_mutex_enter_nowrap(si_mtx);
200 1.2.18.2 yamt }
201 1.2.18.3 yamt
202 1.2.18.3 yamt panic("sithread unreachable");
203 1.2.18.2 yamt }
204 1.2.18.2 yamt
205 1.2.18.2 yamt void
206 1.2.18.3 yamt rump_intr_init()
207 1.2.18.2 yamt {
208 1.2.18.2 yamt
209 1.2.18.3 yamt rumpuser_mutex_init(&si_mtx);
210 1.2.18.2 yamt rumpuser_cv_init(&clockcv);
211 1.2.18.2 yamt rumpuser_mutex_init(&clockmtx);
212 1.2.18.3 yamt cv_init(&lbolt, "oh kath ra");
213 1.2.18.3 yamt }
214 1.2.18.2 yamt
215 1.2.18.3 yamt void
216 1.2.18.3 yamt softint_init(struct cpu_info *ci)
217 1.2.18.3 yamt {
218 1.2.18.3 yamt struct cpu_data *cd = &ci->ci_data;
219 1.2.18.3 yamt struct softint_lev *slev;
220 1.2.18.3 yamt int rv, i;
221 1.2.18.3 yamt
222 1.2.18.3 yamt if (!rump_threads)
223 1.2.18.3 yamt return;
224 1.2.18.3 yamt
225 1.2.18.3 yamt slev = kmem_alloc(sizeof(struct softint_lev) * SOFTINT_COUNT, KM_SLEEP);
226 1.2.18.3 yamt for (i = 0; i < SOFTINT_COUNT; i++) {
227 1.2.18.3 yamt rumpuser_cv_init(&slev[i].si_cv);
228 1.2.18.3 yamt LIST_INIT(&slev[i].si_pending);
229 1.2.18.3 yamt }
230 1.2.18.3 yamt cd->cpu_softcpu = slev;
231 1.2.18.3 yamt
232 1.2.18.3 yamt for (i = 0; i < SOFTINT_COUNT; i++) {
233 1.2.18.3 yamt rv = kthread_create(PRI_NONE,
234 1.2.18.3 yamt KTHREAD_MPSAFE | KTHREAD_INTR, NULL,
235 1.2.18.3 yamt sithread, (void *)(uintptr_t)i,
236 1.2.18.3 yamt NULL, "rumpsi%d", i);
237 1.2.18.3 yamt }
238 1.2.18.3 yamt
239 1.2.18.3 yamt rumpuser_mutex_enter(clockmtx);
240 1.2.18.3 yamt for (i = 0; i < ncpu; i++) {
241 1.2.18.3 yamt rv = kthread_create(PRI_NONE,
242 1.2.18.3 yamt KTHREAD_MPSAFE | KTHREAD_INTR,
243 1.2.18.3 yamt cpu_lookup(i), doclock, NULL, NULL,
244 1.2.18.3 yamt "rumpclk%d", i);
245 1.2.18.2 yamt if (rv)
246 1.2.18.2 yamt panic("clock thread creation failed: %d", rv);
247 1.2.18.2 yamt }
248 1.2.18.3 yamt
249 1.2.18.3 yamt /*
250 1.2.18.3 yamt * Make sure we have a clocktime before returning.
251 1.2.18.3 yamt * XXX: mp
252 1.2.18.3 yamt */
253 1.2.18.3 yamt rumpuser_cv_wait(clockcv, clockmtx);
254 1.2.18.3 yamt rumpuser_mutex_exit(clockmtx);
255 1.2.18.2 yamt }
256 1.2.18.2 yamt
257 1.2.18.2 yamt /*
258 1.2.18.2 yamt * Soft interrupts bring two choices. If we are running with thread
259 1.2.18.2 yamt * support enabled, defer execution, otherwise execute in place.
260 1.2.18.2 yamt * See softint_schedule().
261 1.2.18.2 yamt *
262 1.2.18.2 yamt * As there is currently no clear concept of when a thread finishes
263 1.2.18.2 yamt * work (although rump_clear_curlwp() is close), simply execute all
264 1.2.18.2 yamt * softints in the timer thread. This is probably not the most
265 1.2.18.2 yamt * efficient method, but good enough for now.
266 1.2.18.2 yamt */
267 1.2.18.2 yamt void *
268 1.2.18.2 yamt softint_establish(u_int flags, void (*func)(void *), void *arg)
269 1.2.18.2 yamt {
270 1.2.18.2 yamt struct softint *si;
271 1.2.18.2 yamt
272 1.2.18.2 yamt si = kmem_alloc(sizeof(*si), KM_SLEEP);
273 1.2.18.2 yamt si->si_func = func;
274 1.2.18.2 yamt si->si_arg = arg;
275 1.2.18.3 yamt si->si_flags = flags & SOFTINT_MPSAFE ? SI_MPSAFE : 0;
276 1.2.18.3 yamt si->si_level = flags & SOFTINT_LVLMASK;
277 1.2.18.3 yamt KASSERT(si->si_level < SOFTINT_COUNT);
278 1.2.18.2 yamt
279 1.2.18.2 yamt return si;
280 1.2 ad }
281 1.2 ad
282 1.2 ad void
283 1.2 ad softint_schedule(void *arg)
284 1.2 ad {
285 1.2.18.2 yamt struct softint *si = arg;
286 1.2.18.3 yamt struct cpu_data *cd = &curcpu()->ci_data;
287 1.2.18.3 yamt struct softint_lev *si_lvl = cd->cpu_softcpu;
288 1.2 ad
289 1.2.18.2 yamt if (!rump_threads) {
290 1.2.18.2 yamt si->si_func(si->si_arg);
291 1.2.18.2 yamt } else {
292 1.2.18.3 yamt if (!(si->si_flags & SI_ONLIST)) {
293 1.2.18.3 yamt LIST_INSERT_HEAD(&si_lvl[si->si_level].si_pending,
294 1.2.18.3 yamt si, si_entries);
295 1.2.18.3 yamt si->si_flags |= SI_ONLIST;
296 1.2.18.2 yamt }
297 1.2.18.3 yamt }
298 1.2.18.3 yamt }
299 1.2.18.3 yamt
300 1.2.18.3 yamt /* flimsy disestablish: should wait for softints to finish */
301 1.2.18.3 yamt void
302 1.2.18.3 yamt softint_disestablish(void *cook)
303 1.2.18.3 yamt {
304 1.2.18.3 yamt struct softint *si = cook;
305 1.2.18.3 yamt
306 1.2.18.3 yamt rumpuser_mutex_enter(si_mtx);
307 1.2.18.3 yamt if (si->si_flags & SI_ONLIST) {
308 1.2.18.3 yamt si->si_flags |= SI_KILLME;
309 1.2.18.3 yamt return;
310 1.2.18.3 yamt }
311 1.2.18.3 yamt rumpuser_mutex_exit(si_mtx);
312 1.2.18.3 yamt kmem_free(si, sizeof(*si));
313 1.2.18.3 yamt }
314 1.2.18.3 yamt
315 1.2.18.3 yamt void
316 1.2.18.3 yamt rump_softint_run(struct cpu_info *ci)
317 1.2.18.3 yamt {
318 1.2.18.3 yamt struct cpu_data *cd = &ci->ci_data;
319 1.2.18.3 yamt struct softint_lev *si_lvl = cd->cpu_softcpu;
320 1.2.18.3 yamt int i;
321 1.2.18.3 yamt
322 1.2.18.3 yamt if (!rump_threads)
323 1.2.18.3 yamt return;
324 1.2.18.3 yamt
325 1.2.18.3 yamt for (i = 0; i < SOFTINT_COUNT; i++) {
326 1.2.18.3 yamt if (!LIST_EMPTY(&si_lvl[i].si_pending))
327 1.2.18.3 yamt rumpuser_cv_signal(si_lvl[i].si_cv);
328 1.2.18.2 yamt }
329 1.2 ad }
330 1.2 ad
331 1.2 ad bool
332 1.2 ad cpu_intr_p(void)
333 1.2 ad {
334 1.2 ad
335 1.2 ad return false;
336 1.2 ad }
337 1.2.18.3 yamt
338 1.2.18.3 yamt bool
339 1.2.18.3 yamt cpu_softintr_p(void)
340 1.2.18.3 yamt {
341 1.2.18.3 yamt
342 1.2.18.3 yamt return curlwp->l_pflag & LP_INTR;
343 1.2.18.3 yamt }
344