intr.c revision 1.22 1 1.22 pooka /* $NetBSD: intr.c,v 1.22 2009/12/01 09:50:51 pooka Exp $ */
2 1.2 ad
3 1.5 pooka /*
4 1.5 pooka * Copyright (c) 2008 Antti Kantee. All Rights Reserved.
5 1.2 ad *
6 1.2 ad * Redistribution and use in source and binary forms, with or without
7 1.2 ad * modification, are permitted provided that the following conditions
8 1.2 ad * are met:
9 1.2 ad * 1. Redistributions of source code must retain the above copyright
10 1.2 ad * notice, this list of conditions and the following disclaimer.
11 1.2 ad * 2. Redistributions in binary form must reproduce the above copyright
12 1.2 ad * notice, this list of conditions and the following disclaimer in the
13 1.2 ad * documentation and/or other materials provided with the distribution.
14 1.2 ad *
15 1.5 pooka * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 1.5 pooka * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 1.5 pooka * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 1.5 pooka * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 1.5 pooka * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 1.5 pooka * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 1.5 pooka * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 1.5 pooka * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 1.5 pooka * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 1.5 pooka * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 1.5 pooka * SUCH DAMAGE.
26 1.2 ad */
27 1.2 ad
28 1.11 pooka #include <sys/cdefs.h>
29 1.22 pooka __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.22 2009/12/01 09:50:51 pooka Exp $");
30 1.11 pooka
31 1.2 ad #include <sys/param.h>
32 1.5 pooka #include <sys/cpu.h>
33 1.12 pooka #include <sys/kmem.h>
34 1.5 pooka #include <sys/kthread.h>
35 1.2 ad #include <sys/intr.h>
36 1.2 ad
37 1.5 pooka #include <rump/rumpuser.h>
38 1.5 pooka
39 1.5 pooka #include "rump_private.h"
40 1.5 pooka
41 1.5 pooka /*
42 1.5 pooka * Interrupt simulator. It executes hardclock() and softintrs.
43 1.5 pooka */
44 1.5 pooka
45 1.14 pooka time_t time_uptime = 0;
46 1.8 pooka
47 1.18 pooka #define SI_MPSAFE 0x01
48 1.18 pooka #define SI_ONLIST 0x02
49 1.18 pooka #define SI_KILLME 0x04
50 1.20 pooka
51 1.5 pooka struct softint {
52 1.5 pooka void (*si_func)(void *);
53 1.5 pooka void *si_arg;
54 1.18 pooka int si_flags;
55 1.20 pooka int si_level;
56 1.5 pooka
57 1.5 pooka LIST_ENTRY(softint) si_entries;
58 1.2 ad };
59 1.10 pooka
60 1.20 pooka static struct rumpuser_mtx *si_mtx;
61 1.22 pooka struct softint_lev {
62 1.20 pooka struct rumpuser_cv *si_cv;
63 1.20 pooka LIST_HEAD(, softint) si_pending;
64 1.22 pooka };
65 1.10 pooka
66 1.14 pooka /* rumpuser structures since we call rumpuser interfaces directly */
67 1.14 pooka static struct rumpuser_cv *clockcv;
68 1.14 pooka static struct rumpuser_mtx *clockmtx;
69 1.16 pooka static struct timespec clockbase, clockup;
70 1.16 pooka static unsigned clkgen;
71 1.14 pooka
72 1.14 pooka void
73 1.16 pooka rump_getuptime(struct timespec *ts)
74 1.14 pooka {
75 1.17 pooka int startgen, i = 0;
76 1.14 pooka
77 1.14 pooka do {
78 1.16 pooka startgen = clkgen;
79 1.16 pooka if (__predict_false(i++ > 10)) {
80 1.16 pooka yield();
81 1.16 pooka i = 0;
82 1.16 pooka }
83 1.16 pooka *ts = clockup;
84 1.16 pooka } while (startgen != clkgen || clkgen % 2 != 0);
85 1.16 pooka }
86 1.16 pooka
87 1.16 pooka void
88 1.16 pooka rump_gettime(struct timespec *ts)
89 1.16 pooka {
90 1.16 pooka struct timespec ts_up;
91 1.14 pooka
92 1.16 pooka rump_getuptime(&ts_up);
93 1.16 pooka timespecadd(&clockbase, &ts_up, ts);
94 1.14 pooka }
95 1.14 pooka
96 1.10 pooka /*
97 1.10 pooka * clock "interrupt"
98 1.10 pooka */
99 1.10 pooka static void
100 1.10 pooka doclock(void *noarg)
101 1.10 pooka {
102 1.16 pooka struct timespec tick, curtime;
103 1.15 pooka uint64_t sec, nsec;
104 1.16 pooka int ticks = 0, error;
105 1.10 pooka extern int hz;
106 1.14 pooka
107 1.15 pooka rumpuser_gettime(&sec, &nsec, &error);
108 1.16 pooka clockbase.tv_sec = sec;
109 1.16 pooka clockbase.tv_nsec = nsec;
110 1.16 pooka curtime = clockbase;
111 1.14 pooka tick.tv_sec = 0;
112 1.14 pooka tick.tv_nsec = 1000000000/hz;
113 1.14 pooka
114 1.14 pooka rumpuser_mutex_enter(clockmtx);
115 1.14 pooka rumpuser_cv_signal(clockcv);
116 1.10 pooka
117 1.10 pooka for (;;) {
118 1.5 pooka callout_hardclock();
119 1.5 pooka
120 1.22 pooka /* wait until the next tick. XXX: what if the clock changes? */
121 1.22 pooka while (rumpuser_cv_timedwait(clockcv, clockmtx,
122 1.22 pooka curtime.tv_sec, curtime.tv_nsec) == 0)
123 1.22 pooka continue;
124 1.22 pooka
125 1.22 pooka /* if !maincpu: continue */
126 1.22 pooka
127 1.10 pooka if (++ticks == hz) {
128 1.8 pooka time_uptime++;
129 1.8 pooka ticks = 0;
130 1.8 pooka }
131 1.14 pooka
132 1.16 pooka clkgen++;
133 1.16 pooka timespecadd(&clockup, &tick, &clockup);
134 1.16 pooka clkgen++;
135 1.16 pooka timespecadd(&clockup, &clockbase, &curtime);
136 1.10 pooka }
137 1.10 pooka }
138 1.8 pooka
139 1.10 pooka /*
140 1.20 pooka * Soft interrupt execution thread. Note that we run without a CPU
141 1.20 pooka * context until we start processing the interrupt. This is to avoid
142 1.20 pooka * lock recursion.
143 1.10 pooka */
144 1.10 pooka static void
145 1.10 pooka sithread(void *arg)
146 1.10 pooka {
147 1.10 pooka struct softint *si;
148 1.10 pooka void (*func)(void *) = NULL;
149 1.10 pooka void *funarg;
150 1.10 pooka bool mpsafe;
151 1.20 pooka int mylevel = (uintptr_t)arg;
152 1.22 pooka struct softint_lev *si_lvlp, *si_lvl;
153 1.22 pooka struct cpu_data *cd = &curcpu()->ci_data;
154 1.20 pooka
155 1.20 pooka rump_unschedule();
156 1.10 pooka
157 1.22 pooka si_lvlp = cd->cpu_softcpu;
158 1.22 pooka si_lvl = &si_lvlp[mylevel];
159 1.22 pooka
160 1.22 pooka /*
161 1.22 pooka * XXX: si_mtx is unnecessary, and should open an interface
162 1.22 pooka * which allows to use schedmtx for the cv wait
163 1.22 pooka */
164 1.20 pooka rumpuser_mutex_enter_nowrap(si_mtx);
165 1.10 pooka for (;;) {
166 1.20 pooka if (!LIST_EMPTY(&si_lvl->si_pending)) {
167 1.20 pooka si = LIST_FIRST(&si_lvl->si_pending);
168 1.5 pooka func = si->si_func;
169 1.5 pooka funarg = si->si_arg;
170 1.18 pooka mpsafe = si->si_flags & SI_MPSAFE;
171 1.5 pooka
172 1.18 pooka si->si_flags &= ~SI_ONLIST;
173 1.5 pooka LIST_REMOVE(si, si_entries);
174 1.20 pooka if (si->si_flags & SI_KILLME) {
175 1.20 pooka rumpuser_mutex_exit(si_mtx);
176 1.20 pooka rump_schedule();
177 1.18 pooka softint_disestablish(si);
178 1.20 pooka rump_unschedule();
179 1.20 pooka rumpuser_mutex_enter_nowrap(si_mtx);
180 1.20 pooka continue;
181 1.20 pooka }
182 1.10 pooka } else {
183 1.20 pooka rumpuser_cv_wait_nowrap(si_lvl->si_cv, si_mtx);
184 1.10 pooka continue;
185 1.5 pooka }
186 1.20 pooka rumpuser_mutex_exit(si_mtx);
187 1.5 pooka
188 1.20 pooka rump_schedule();
189 1.10 pooka if (!mpsafe)
190 1.10 pooka KERNEL_LOCK(1, curlwp);
191 1.10 pooka func(funarg);
192 1.10 pooka if (!mpsafe)
193 1.10 pooka KERNEL_UNLOCK_ONE(curlwp);
194 1.20 pooka rump_unschedule();
195 1.10 pooka
196 1.20 pooka rumpuser_mutex_enter_nowrap(si_mtx);
197 1.5 pooka }
198 1.20 pooka
199 1.20 pooka panic("sithread unreachable");
200 1.5 pooka }
201 1.2 ad
202 1.5 pooka void
203 1.22 pooka rump_intr_init()
204 1.22 pooka {
205 1.22 pooka
206 1.22 pooka rumpuser_mutex_init(&si_mtx);
207 1.22 pooka rumpuser_cv_init(&clockcv);
208 1.22 pooka rumpuser_mutex_init(&clockmtx);
209 1.22 pooka }
210 1.22 pooka
211 1.22 pooka void
212 1.5 pooka softint_init(struct cpu_info *ci)
213 1.2 ad {
214 1.22 pooka struct cpu_data *cd = &ci->ci_data;
215 1.22 pooka struct softint_lev *slev;
216 1.20 pooka int rv, i;
217 1.5 pooka
218 1.22 pooka if (!rump_threads)
219 1.22 pooka return;
220 1.22 pooka
221 1.22 pooka slev = kmem_alloc(sizeof(struct softint_lev) * SOFTINT_COUNT, KM_SLEEP);
222 1.20 pooka for (i = 0; i < SOFTINT_COUNT; i++) {
223 1.22 pooka rumpuser_cv_init(&slev[i].si_cv);
224 1.22 pooka LIST_INIT(&slev[i].si_pending);
225 1.20 pooka }
226 1.22 pooka cd->cpu_softcpu = slev;
227 1.2 ad
228 1.22 pooka for (i = 0; i < SOFTINT_COUNT; i++) {
229 1.22 pooka rv = kthread_create(PRI_NONE,
230 1.22 pooka KTHREAD_MPSAFE | KTHREAD_INTR, NULL,
231 1.22 pooka sithread, (void *)(uintptr_t)i,
232 1.22 pooka NULL, "rumpsi%d", i);
233 1.22 pooka }
234 1.14 pooka
235 1.22 pooka rumpuser_mutex_enter(clockmtx);
236 1.22 pooka for (i = 0; i < ncpu; i++) {
237 1.22 pooka rv = kthread_create(PRI_NONE,
238 1.22 pooka KTHREAD_MPSAFE | KTHREAD_INTR,
239 1.22 pooka cpu_lookup(i), doclock, NULL, NULL,
240 1.22 pooka "rumpclk%d", i);
241 1.5 pooka if (rv)
242 1.10 pooka panic("clock thread creation failed: %d", rv);
243 1.22 pooka }
244 1.14 pooka
245 1.22 pooka /*
246 1.22 pooka * Make sure we have a clocktime before returning.
247 1.22 pooka * XXX: mp
248 1.22 pooka */
249 1.22 pooka rumpuser_cv_wait(clockcv, clockmtx);
250 1.22 pooka rumpuser_mutex_exit(clockmtx);
251 1.2 ad }
252 1.2 ad
253 1.5 pooka /*
254 1.5 pooka * Soft interrupts bring two choices. If we are running with thread
255 1.5 pooka * support enabled, defer execution, otherwise execute in place.
256 1.5 pooka * See softint_schedule().
257 1.5 pooka *
258 1.5 pooka * As there is currently no clear concept of when a thread finishes
259 1.5 pooka * work (although rump_clear_curlwp() is close), simply execute all
260 1.5 pooka * softints in the timer thread. This is probably not the most
261 1.5 pooka * efficient method, but good enough for now.
262 1.5 pooka */
263 1.5 pooka void *
264 1.5 pooka softint_establish(u_int flags, void (*func)(void *), void *arg)
265 1.2 ad {
266 1.5 pooka struct softint *si;
267 1.2 ad
268 1.5 pooka si = kmem_alloc(sizeof(*si), KM_SLEEP);
269 1.5 pooka si->si_func = func;
270 1.5 pooka si->si_arg = arg;
271 1.18 pooka si->si_flags = flags & SOFTINT_MPSAFE ? SI_MPSAFE : 0;
272 1.20 pooka si->si_level = flags & SOFTINT_LVLMASK;
273 1.20 pooka KASSERT(si->si_level < SOFTINT_COUNT);
274 1.5 pooka
275 1.5 pooka return si;
276 1.2 ad }
277 1.2 ad
278 1.2 ad void
279 1.2 ad softint_schedule(void *arg)
280 1.2 ad {
281 1.5 pooka struct softint *si = arg;
282 1.22 pooka struct cpu_data *cd = &curcpu()->ci_data;
283 1.22 pooka struct softint_lev *si_lvl = cd->cpu_softcpu;
284 1.2 ad
285 1.5 pooka if (!rump_threads) {
286 1.5 pooka si->si_func(si->si_arg);
287 1.5 pooka } else {
288 1.18 pooka if (!(si->si_flags & SI_ONLIST)) {
289 1.22 pooka LIST_INSERT_HEAD(&si_lvl[si->si_level].si_pending,
290 1.20 pooka si, si_entries);
291 1.18 pooka si->si_flags |= SI_ONLIST;
292 1.5 pooka }
293 1.5 pooka }
294 1.2 ad }
295 1.2 ad
296 1.18 pooka /* flimsy disestablish: should wait for softints to finish */
297 1.18 pooka void
298 1.18 pooka softint_disestablish(void *cook)
299 1.18 pooka {
300 1.18 pooka struct softint *si = cook;
301 1.18 pooka
302 1.20 pooka rumpuser_mutex_enter(si_mtx);
303 1.18 pooka if (si->si_flags & SI_ONLIST) {
304 1.18 pooka si->si_flags |= SI_KILLME;
305 1.18 pooka return;
306 1.18 pooka }
307 1.20 pooka rumpuser_mutex_exit(si_mtx);
308 1.18 pooka kmem_free(si, sizeof(*si));
309 1.18 pooka }
310 1.18 pooka
311 1.20 pooka void
312 1.20 pooka rump_softint_run(struct cpu_info *ci)
313 1.20 pooka {
314 1.22 pooka struct cpu_data *cd = &ci->ci_data;
315 1.22 pooka struct softint_lev *si_lvl = cd->cpu_softcpu;
316 1.20 pooka int i;
317 1.20 pooka
318 1.22 pooka if (!rump_threads)
319 1.22 pooka return;
320 1.22 pooka
321 1.20 pooka for (i = 0; i < SOFTINT_COUNT; i++) {
322 1.22 pooka if (!LIST_EMPTY(&si_lvl[i].si_pending))
323 1.22 pooka rumpuser_cv_signal(si_lvl[i].si_cv);
324 1.20 pooka }
325 1.20 pooka }
326 1.20 pooka
327 1.2 ad bool
328 1.9 christos cpu_intr_p(void)
329 1.2 ad {
330 1.2 ad
331 1.2 ad return false;
332 1.2 ad }
333 1.19 pooka
334 1.19 pooka bool
335 1.19 pooka cpu_softintr_p(void)
336 1.19 pooka {
337 1.19 pooka
338 1.20 pooka return curlwp->l_pflag & LP_INTR;
339 1.19 pooka }
340