intr.c revision 1.2.18.5 1 1.2.18.5 yamt /* $NetBSD: intr.c,v 1.2.18.5 2010/10/09 03:32:43 yamt Exp $ */
2 1.2 ad
3 1.2.18.2 yamt /*
4 1.2.18.2 yamt * Copyright (c) 2008 Antti Kantee. All Rights Reserved.
5 1.2 ad *
6 1.2 ad * Redistribution and use in source and binary forms, with or without
7 1.2 ad * modification, are permitted provided that the following conditions
8 1.2 ad * are met:
9 1.2 ad * 1. Redistributions of source code must retain the above copyright
10 1.2 ad * notice, this list of conditions and the following disclaimer.
11 1.2 ad * 2. Redistributions in binary form must reproduce the above copyright
12 1.2 ad * notice, this list of conditions and the following disclaimer in the
13 1.2 ad * documentation and/or other materials provided with the distribution.
14 1.2 ad *
15 1.2.18.2 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 1.2.18.2 yamt * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 1.2.18.2 yamt * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 1.2.18.2 yamt * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 1.2.18.2 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 1.2.18.2 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 1.2.18.2 yamt * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 1.2.18.2 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 1.2.18.2 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 1.2.18.2 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 1.2.18.2 yamt * SUCH DAMAGE.
26 1.2 ad */
27 1.2 ad
28 1.2.18.2 yamt #include <sys/cdefs.h>
29 1.2.18.5 yamt __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.2.18.5 2010/10/09 03:32:43 yamt Exp $");
30 1.2.18.2 yamt
31 1.2 ad #include <sys/param.h>
32 1.2.18.4 yamt #include <sys/atomic.h>
33 1.2 ad #include <sys/cpu.h>
34 1.2.18.4 yamt #include <sys/kernel.h>
35 1.2.18.2 yamt #include <sys/kmem.h>
36 1.2.18.2 yamt #include <sys/kthread.h>
37 1.2.18.4 yamt #include <sys/malloc.h>
38 1.2.18.2 yamt #include <sys/intr.h>
39 1.2.18.4 yamt #include <sys/timetc.h>
40 1.2.18.2 yamt
41 1.2.18.2 yamt #include <rump/rumpuser.h>
42 1.2 ad
43 1.2.18.2 yamt #include "rump_private.h"
44 1.2 ad
45 1.2.18.2 yamt /*
46 1.2.18.2 yamt * Interrupt simulator. It executes hardclock() and softintrs.
47 1.2.18.2 yamt */
48 1.2.18.2 yamt
49 1.2.18.3 yamt #define SI_MPSAFE 0x01
50 1.2.18.5 yamt #define SI_KILLME 0x02
51 1.2.18.3 yamt
52 1.2.18.5 yamt struct softint_percpu;
53 1.2.18.2 yamt struct softint {
54 1.2.18.2 yamt void (*si_func)(void *);
55 1.2.18.2 yamt void *si_arg;
56 1.2.18.3 yamt int si_flags;
57 1.2.18.3 yamt int si_level;
58 1.2.18.2 yamt
59 1.2.18.5 yamt struct softint_percpu *si_entry; /* [0,ncpu-1] */
60 1.2.18.5 yamt };
61 1.2.18.5 yamt
62 1.2.18.5 yamt struct softint_percpu {
63 1.2.18.5 yamt struct softint *sip_parent;
64 1.2.18.5 yamt bool sip_onlist;
65 1.2.18.5 yamt
66 1.2.18.5 yamt LIST_ENTRY(softint_percpu) sip_entries;
67 1.2 ad };
68 1.2 ad
69 1.2.18.3 yamt struct softint_lev {
70 1.2.18.3 yamt struct rumpuser_cv *si_cv;
71 1.2.18.5 yamt LIST_HEAD(, softint_percpu) si_pending;
72 1.2.18.3 yamt };
73 1.2 ad
74 1.2.18.3 yamt kcondvar_t lbolt; /* Oh Kath Ra */
75 1.2.18.3 yamt
76 1.2.18.4 yamt static u_int ticks;
77 1.2.18.5 yamt static int ncpu_final;
78 1.2.18.2 yamt
79 1.2.18.4 yamt static u_int
80 1.2.18.4 yamt rumptc_get(struct timecounter *tc)
81 1.2.18.2 yamt {
82 1.2.18.2 yamt
83 1.2.18.4 yamt KASSERT(rump_threads);
84 1.2.18.4 yamt return ticks;
85 1.2.18.2 yamt }
86 1.2.18.2 yamt
87 1.2.18.4 yamt static struct timecounter rumptc = {
88 1.2.18.4 yamt .tc_get_timecount = rumptc_get,
89 1.2.18.4 yamt .tc_poll_pps = NULL,
90 1.2.18.4 yamt .tc_counter_mask = ~0,
91 1.2.18.4 yamt .tc_frequency = 0,
92 1.2.18.4 yamt .tc_name = "rumpclk",
93 1.2.18.4 yamt .tc_quality = 0,
94 1.2.18.4 yamt };
95 1.2.18.4 yamt
96 1.2.18.2 yamt /*
97 1.2.18.2 yamt * clock "interrupt"
98 1.2.18.2 yamt */
99 1.2.18.2 yamt static void
100 1.2.18.2 yamt doclock(void *noarg)
101 1.2.18.2 yamt {
102 1.2.18.4 yamt struct timespec clockbase, clockup;
103 1.2.18.4 yamt struct timespec thetick, curtime;
104 1.2.18.4 yamt struct rumpuser_cv *clockcv;
105 1.2.18.4 yamt struct rumpuser_mtx *clockmtx;
106 1.2.18.2 yamt uint64_t sec, nsec;
107 1.2.18.4 yamt int error;
108 1.2.18.2 yamt extern int hz;
109 1.2.18.2 yamt
110 1.2.18.4 yamt memset(&clockup, 0, sizeof(clockup));
111 1.2.18.2 yamt rumpuser_gettime(&sec, &nsec, &error);
112 1.2.18.2 yamt clockbase.tv_sec = sec;
113 1.2.18.2 yamt clockbase.tv_nsec = nsec;
114 1.2.18.2 yamt curtime = clockbase;
115 1.2.18.4 yamt thetick.tv_sec = 0;
116 1.2.18.4 yamt thetick.tv_nsec = 1000000000/hz;
117 1.2.18.2 yamt
118 1.2.18.4 yamt /* XXX: dummies */
119 1.2.18.4 yamt rumpuser_cv_init(&clockcv);
120 1.2.18.4 yamt rumpuser_mutex_init(&clockmtx);
121 1.2.18.2 yamt
122 1.2.18.4 yamt rumpuser_mutex_enter(clockmtx);
123 1.2.18.2 yamt for (;;) {
124 1.2.18.2 yamt callout_hardclock();
125 1.2.18.2 yamt
126 1.2.18.3 yamt /* wait until the next tick. XXX: what if the clock changes? */
127 1.2.18.3 yamt while (rumpuser_cv_timedwait(clockcv, clockmtx,
128 1.2.18.3 yamt curtime.tv_sec, curtime.tv_nsec) == 0)
129 1.2.18.3 yamt continue;
130 1.2.18.3 yamt
131 1.2.18.4 yamt /* XXX: sync with a) virtual clock b) host clock */
132 1.2.18.4 yamt timespecadd(&clockup, &clockbase, &curtime);
133 1.2.18.4 yamt timespecadd(&clockup, &thetick, &clockup);
134 1.2.18.4 yamt
135 1.2.18.4 yamt #if 0
136 1.2.18.4 yamt /* CPU_IS_PRIMARY is MD and hence unreliably correct here */
137 1.2.18.4 yamt if (!CPU_IS_PRIMARY(curcpu()))
138 1.2.18.4 yamt continue;
139 1.2.18.4 yamt #else
140 1.2.18.4 yamt if (curcpu()->ci_index != 0)
141 1.2.18.4 yamt continue;
142 1.2.18.4 yamt #endif
143 1.2.18.4 yamt
144 1.2.18.4 yamt if ((++ticks % hz) == 0) {
145 1.2.18.3 yamt cv_broadcast(&lbolt);
146 1.2.18.2 yamt }
147 1.2.18.4 yamt tc_ticktock();
148 1.2.18.2 yamt }
149 1.2.18.2 yamt }
150 1.2.18.2 yamt
151 1.2.18.2 yamt /*
152 1.2.18.4 yamt * Soft interrupt execution thread. This thread is pinned to the
153 1.2.18.4 yamt * same CPU that scheduled the interrupt, so we don't need to do
154 1.2.18.4 yamt * lock against si_lvl.
155 1.2.18.2 yamt */
156 1.2.18.2 yamt static void
157 1.2.18.2 yamt sithread(void *arg)
158 1.2.18.2 yamt {
159 1.2.18.5 yamt struct softint_percpu *sip;
160 1.2.18.2 yamt struct softint *si;
161 1.2.18.2 yamt void (*func)(void *) = NULL;
162 1.2.18.2 yamt void *funarg;
163 1.2.18.2 yamt bool mpsafe;
164 1.2.18.3 yamt int mylevel = (uintptr_t)arg;
165 1.2.18.3 yamt struct softint_lev *si_lvlp, *si_lvl;
166 1.2.18.3 yamt struct cpu_data *cd = &curcpu()->ci_data;
167 1.2.18.3 yamt
168 1.2.18.3 yamt si_lvlp = cd->cpu_softcpu;
169 1.2.18.3 yamt si_lvl = &si_lvlp[mylevel];
170 1.2.18.3 yamt
171 1.2.18.2 yamt for (;;) {
172 1.2.18.3 yamt if (!LIST_EMPTY(&si_lvl->si_pending)) {
173 1.2.18.5 yamt sip = LIST_FIRST(&si_lvl->si_pending);
174 1.2.18.5 yamt si = sip->sip_parent;
175 1.2.18.5 yamt
176 1.2.18.2 yamt func = si->si_func;
177 1.2.18.2 yamt funarg = si->si_arg;
178 1.2.18.3 yamt mpsafe = si->si_flags & SI_MPSAFE;
179 1.2.18.2 yamt
180 1.2.18.5 yamt sip->sip_onlist = false;
181 1.2.18.5 yamt LIST_REMOVE(sip, sip_entries);
182 1.2.18.3 yamt if (si->si_flags & SI_KILLME) {
183 1.2.18.3 yamt softint_disestablish(si);
184 1.2.18.3 yamt continue;
185 1.2.18.3 yamt }
186 1.2.18.2 yamt } else {
187 1.2.18.4 yamt rump_schedlock_cv_wait(si_lvl->si_cv);
188 1.2.18.2 yamt continue;
189 1.2.18.2 yamt }
190 1.2.18.2 yamt
191 1.2.18.2 yamt if (!mpsafe)
192 1.2.18.2 yamt KERNEL_LOCK(1, curlwp);
193 1.2.18.2 yamt func(funarg);
194 1.2.18.2 yamt if (!mpsafe)
195 1.2.18.2 yamt KERNEL_UNLOCK_ONE(curlwp);
196 1.2.18.2 yamt }
197 1.2.18.3 yamt
198 1.2.18.3 yamt panic("sithread unreachable");
199 1.2.18.2 yamt }
200 1.2.18.2 yamt
201 1.2.18.2 yamt void
202 1.2.18.5 yamt rump_intr_init(int numcpu)
203 1.2.18.2 yamt {
204 1.2.18.2 yamt
205 1.2.18.3 yamt cv_init(&lbolt, "oh kath ra");
206 1.2.18.5 yamt ncpu_final = numcpu;
207 1.2.18.3 yamt }
208 1.2.18.2 yamt
209 1.2.18.3 yamt void
210 1.2.18.3 yamt softint_init(struct cpu_info *ci)
211 1.2.18.3 yamt {
212 1.2.18.3 yamt struct cpu_data *cd = &ci->ci_data;
213 1.2.18.3 yamt struct softint_lev *slev;
214 1.2.18.3 yamt int rv, i;
215 1.2.18.3 yamt
216 1.2.18.3 yamt if (!rump_threads)
217 1.2.18.3 yamt return;
218 1.2.18.3 yamt
219 1.2.18.4 yamt /* XXX */
220 1.2.18.4 yamt if (ci->ci_index == 0) {
221 1.2.18.4 yamt rumptc.tc_frequency = hz;
222 1.2.18.4 yamt tc_init(&rumptc);
223 1.2.18.4 yamt }
224 1.2.18.4 yamt
225 1.2.18.3 yamt slev = kmem_alloc(sizeof(struct softint_lev) * SOFTINT_COUNT, KM_SLEEP);
226 1.2.18.3 yamt for (i = 0; i < SOFTINT_COUNT; i++) {
227 1.2.18.3 yamt rumpuser_cv_init(&slev[i].si_cv);
228 1.2.18.3 yamt LIST_INIT(&slev[i].si_pending);
229 1.2.18.3 yamt }
230 1.2.18.3 yamt cd->cpu_softcpu = slev;
231 1.2.18.3 yamt
232 1.2.18.4 yamt /* softint might run on different physical CPU */
233 1.2.18.4 yamt membar_sync();
234 1.2.18.4 yamt
235 1.2.18.3 yamt for (i = 0; i < SOFTINT_COUNT; i++) {
236 1.2.18.3 yamt rv = kthread_create(PRI_NONE,
237 1.2.18.4 yamt KTHREAD_MPSAFE | KTHREAD_INTR, ci,
238 1.2.18.3 yamt sithread, (void *)(uintptr_t)i,
239 1.2.18.4 yamt NULL, "rsi%d/%d", ci->ci_index, i);
240 1.2.18.2 yamt }
241 1.2.18.3 yamt
242 1.2.18.4 yamt rv = kthread_create(PRI_NONE, KTHREAD_MPSAFE,
243 1.2.18.4 yamt ci, doclock, NULL, NULL, "rumpclk%d", ci->ci_index);
244 1.2.18.4 yamt if (rv)
245 1.2.18.4 yamt panic("clock thread creation failed: %d", rv);
246 1.2.18.2 yamt }
247 1.2.18.2 yamt
248 1.2.18.2 yamt void *
249 1.2.18.2 yamt softint_establish(u_int flags, void (*func)(void *), void *arg)
250 1.2.18.2 yamt {
251 1.2.18.2 yamt struct softint *si;
252 1.2.18.5 yamt struct softint_percpu *sip;
253 1.2.18.5 yamt int i;
254 1.2.18.2 yamt
255 1.2.18.4 yamt si = malloc(sizeof(*si), M_TEMP, M_WAITOK);
256 1.2.18.2 yamt si->si_func = func;
257 1.2.18.2 yamt si->si_arg = arg;
258 1.2.18.3 yamt si->si_flags = flags & SOFTINT_MPSAFE ? SI_MPSAFE : 0;
259 1.2.18.3 yamt si->si_level = flags & SOFTINT_LVLMASK;
260 1.2.18.3 yamt KASSERT(si->si_level < SOFTINT_COUNT);
261 1.2.18.5 yamt si->si_entry = malloc(sizeof(*si->si_entry) * ncpu_final,
262 1.2.18.5 yamt M_TEMP, M_WAITOK | M_ZERO);
263 1.2.18.5 yamt for (i = 0; i < ncpu_final; i++) {
264 1.2.18.5 yamt sip = &si->si_entry[i];
265 1.2.18.5 yamt sip->sip_parent = si;
266 1.2.18.5 yamt }
267 1.2.18.2 yamt
268 1.2.18.2 yamt return si;
269 1.2 ad }
270 1.2 ad
271 1.2.18.5 yamt /*
272 1.2.18.5 yamt * Soft interrupts bring two choices. If we are running with thread
273 1.2.18.5 yamt * support enabled, defer execution, otherwise execute in place.
274 1.2.18.5 yamt */
275 1.2.18.5 yamt
276 1.2 ad void
277 1.2 ad softint_schedule(void *arg)
278 1.2 ad {
279 1.2.18.2 yamt struct softint *si = arg;
280 1.2.18.5 yamt struct softint_percpu *sip = &si->si_entry[curcpu()->ci_index];
281 1.2.18.3 yamt struct cpu_data *cd = &curcpu()->ci_data;
282 1.2.18.3 yamt struct softint_lev *si_lvl = cd->cpu_softcpu;
283 1.2 ad
284 1.2.18.2 yamt if (!rump_threads) {
285 1.2.18.2 yamt si->si_func(si->si_arg);
286 1.2.18.2 yamt } else {
287 1.2.18.5 yamt if (!sip->sip_onlist) {
288 1.2.18.3 yamt LIST_INSERT_HEAD(&si_lvl[si->si_level].si_pending,
289 1.2.18.5 yamt sip, sip_entries);
290 1.2.18.5 yamt sip->sip_onlist = true;
291 1.2.18.2 yamt }
292 1.2.18.3 yamt }
293 1.2.18.3 yamt }
294 1.2.18.3 yamt
295 1.2.18.5 yamt /*
296 1.2.18.5 yamt * flimsy disestablish: should wait for softints to finish.
297 1.2.18.5 yamt */
298 1.2.18.3 yamt void
299 1.2.18.3 yamt softint_disestablish(void *cook)
300 1.2.18.3 yamt {
301 1.2.18.3 yamt struct softint *si = cook;
302 1.2.18.5 yamt int i;
303 1.2.18.3 yamt
304 1.2.18.5 yamt for (i = 0; i < ncpu_final; i++) {
305 1.2.18.5 yamt struct softint_percpu *sip;
306 1.2.18.5 yamt
307 1.2.18.5 yamt sip = &si->si_entry[i];
308 1.2.18.5 yamt if (sip->sip_onlist) {
309 1.2.18.5 yamt si->si_flags |= SI_KILLME;
310 1.2.18.5 yamt return;
311 1.2.18.5 yamt }
312 1.2.18.3 yamt }
313 1.2.18.5 yamt free(si->si_entry, M_TEMP);
314 1.2.18.4 yamt free(si, M_TEMP);
315 1.2.18.3 yamt }
316 1.2.18.3 yamt
317 1.2.18.3 yamt void
318 1.2.18.3 yamt rump_softint_run(struct cpu_info *ci)
319 1.2.18.3 yamt {
320 1.2.18.3 yamt struct cpu_data *cd = &ci->ci_data;
321 1.2.18.3 yamt struct softint_lev *si_lvl = cd->cpu_softcpu;
322 1.2.18.3 yamt int i;
323 1.2.18.3 yamt
324 1.2.18.3 yamt if (!rump_threads)
325 1.2.18.3 yamt return;
326 1.2.18.3 yamt
327 1.2.18.3 yamt for (i = 0; i < SOFTINT_COUNT; i++) {
328 1.2.18.3 yamt if (!LIST_EMPTY(&si_lvl[i].si_pending))
329 1.2.18.3 yamt rumpuser_cv_signal(si_lvl[i].si_cv);
330 1.2.18.2 yamt }
331 1.2 ad }
332 1.2 ad
333 1.2 ad bool
334 1.2 ad cpu_intr_p(void)
335 1.2 ad {
336 1.2 ad
337 1.2 ad return false;
338 1.2 ad }
339 1.2.18.3 yamt
340 1.2.18.3 yamt bool
341 1.2.18.3 yamt cpu_softintr_p(void)
342 1.2.18.3 yamt {
343 1.2.18.3 yamt
344 1.2.18.3 yamt return curlwp->l_pflag & LP_INTR;
345 1.2.18.3 yamt }
346