intr.c revision 1.41 1 /* $NetBSD: intr.c,v 1.41 2013/11/11 23:06:40 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2008-2010 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.41 2013/11/11 23:06:40 pooka Exp $");
30
31 #include <sys/param.h>
32 #include <sys/atomic.h>
33 #include <sys/cpu.h>
34 #include <sys/kernel.h>
35 #include <sys/kmem.h>
36 #include <sys/kthread.h>
37 #include <sys/malloc.h>
38 #include <sys/intr.h>
39 #include <sys/timetc.h>
40
41 #include <rump/rumpuser.h>
42
43 #include "rump_private.h"
44
45 /*
46 * Interrupt simulator. It executes hardclock() and softintrs.
47 */
48
49 #define SI_MPSAFE 0x01
50 #define SI_KILLME 0x02
51
52 struct softint_percpu;
53 struct softint {
54 void (*si_func)(void *);
55 void *si_arg;
56 int si_flags;
57 int si_level;
58
59 struct softint_percpu *si_entry; /* [0,ncpu-1] */
60 };
61
62 struct softint_percpu {
63 struct softint *sip_parent;
64 bool sip_onlist;
65
66 LIST_ENTRY(softint_percpu) sip_entries;
67 };
68
69 struct softint_lev {
70 struct rumpuser_cv *si_cv;
71 LIST_HEAD(, softint_percpu) si_pending;
72 };
73
74 kcondvar_t lbolt; /* Oh Kath Ra */
75
76 static u_int ticks;
77 static int ncpu_final;
78
79 static u_int
80 rumptc_get(struct timecounter *tc)
81 {
82
83 KASSERT(rump_threads);
84 return ticks;
85 }
86
87 static struct timecounter rumptc = {
88 .tc_get_timecount = rumptc_get,
89 .tc_poll_pps = NULL,
90 .tc_counter_mask = ~0,
91 .tc_frequency = 0,
92 .tc_name = "rumpclk",
93 .tc_quality = 0,
94 };
95
96 /*
97 * clock "interrupt"
98 */
99 static void
100 doclock(void *noarg)
101 {
102 struct timespec thetick, curclock;
103 int64_t sec;
104 long nsec;
105 int error;
106 extern int hz;
107
108 error = rumpuser_clock_gettime(RUMPUSER_CLOCK_ABSMONO, &sec, &nsec);
109 if (error)
110 panic("clock: cannot get monotonic time");
111
112 curclock.tv_sec = sec;
113 curclock.tv_nsec = nsec;
114 thetick.tv_sec = 0;
115 thetick.tv_nsec = 1000000000/hz;
116
117 for (;;) {
118 callout_hardclock();
119
120 error = rumpuser_clock_sleep(RUMPUSER_CLOCK_ABSMONO,
121 curclock.tv_sec, curclock.tv_nsec);
122 KASSERT(!error);
123 timespecadd(&curclock, &thetick, &curclock);
124
125 #if 0
126 /* CPU_IS_PRIMARY is MD and hence unreliably correct here */
127 if (!CPU_IS_PRIMARY(curcpu()))
128 continue;
129 #else
130 if (curcpu()->ci_index != 0)
131 continue;
132 #endif
133
134 if ((++ticks % hz) == 0) {
135 cv_broadcast(&lbolt);
136 }
137 tc_ticktock();
138 }
139 }
140
141 /*
142 * Soft interrupt execution thread. This thread is pinned to the
143 * same CPU that scheduled the interrupt, so we don't need to do
144 * lock against si_lvl.
145 */
146 static void
147 sithread(void *arg)
148 {
149 struct softint_percpu *sip;
150 struct softint *si;
151 void (*func)(void *) = NULL;
152 void *funarg;
153 bool mpsafe;
154 int mylevel = (uintptr_t)arg;
155 struct softint_lev *si_lvlp, *si_lvl;
156 struct cpu_data *cd = &curcpu()->ci_data;
157
158 si_lvlp = cd->cpu_softcpu;
159 si_lvl = &si_lvlp[mylevel];
160
161 for (;;) {
162 if (!LIST_EMPTY(&si_lvl->si_pending)) {
163 sip = LIST_FIRST(&si_lvl->si_pending);
164 si = sip->sip_parent;
165
166 func = si->si_func;
167 funarg = si->si_arg;
168 mpsafe = si->si_flags & SI_MPSAFE;
169
170 sip->sip_onlist = false;
171 LIST_REMOVE(sip, sip_entries);
172 if (si->si_flags & SI_KILLME) {
173 softint_disestablish(si);
174 continue;
175 }
176 } else {
177 rump_schedlock_cv_wait(si_lvl->si_cv);
178 continue;
179 }
180
181 if (!mpsafe)
182 KERNEL_LOCK(1, curlwp);
183 func(funarg);
184 if (!mpsafe)
185 KERNEL_UNLOCK_ONE(curlwp);
186 }
187
188 panic("sithread unreachable");
189 }
190
191 static kmutex_t sithr_emtx;
192 static unsigned int sithr_est;
193 static int sithr_canest;
194
195 /*
196 * Create softint handler threads when the softint for each respective
197 * level is established for the first time. Most rump kernels don't
198 * need at least half of the softint levels, so on-demand saves bootstrap
199 * time and memory resources. Note, though, that this routine may be
200 * called before it's possible to call kthread_create(). Creation of
201 * those softints (SOFTINT_CLOCK, as of writing this) will be deferred
202 * to until softint_init() is called for the main CPU.
203 */
204 static void
205 sithread_establish(int level)
206 {
207 int docreate, rv;
208 int lvlbit = 1<<level;
209 int i;
210
211 KASSERT((level & ~SOFTINT_LVLMASK) == 0);
212 if (__predict_true(sithr_est & lvlbit))
213 return;
214
215 mutex_enter(&sithr_emtx);
216 docreate = (sithr_est & lvlbit) == 0 && sithr_canest;
217 sithr_est |= lvlbit;
218 mutex_exit(&sithr_emtx);
219
220 if (docreate) {
221 for (i = 0; i < ncpu_final; i++) {
222 if ((rv = kthread_create(PRI_NONE,
223 KTHREAD_MPSAFE | KTHREAD_INTR,
224 cpu_lookup(i), sithread, (void *)(uintptr_t)level,
225 NULL, "rsi%d/%d", i, level)) != 0)
226 panic("softint thread create failed: %d", rv);
227 }
228 }
229 }
230
231 void
232 rump_intr_init(int numcpu)
233 {
234
235 cv_init(&lbolt, "oh kath ra");
236 mutex_init(&sithr_emtx, MUTEX_DEFAULT, IPL_NONE);
237 ncpu_final = numcpu;
238 }
239
240 void
241 softint_init(struct cpu_info *ci)
242 {
243 struct cpu_data *cd = &ci->ci_data;
244 struct softint_lev *slev;
245 int rv, i;
246
247 if (!rump_threads)
248 return;
249
250 /* overloaded global init ... */
251 if (ci->ci_index == 0) {
252 int sithr_swap;
253
254 rumptc.tc_frequency = hz;
255 tc_init(&rumptc);
256
257 /* create deferred softint threads */
258 mutex_enter(&sithr_emtx);
259 sithr_swap = sithr_est;
260 sithr_est = 0;
261 sithr_canest = 1;
262 mutex_exit(&sithr_emtx);
263 for (i = 0; i < SOFTINT_COUNT; i++) {
264 if (sithr_swap & (1<<i))
265 sithread_establish(i);
266 }
267 }
268
269 slev = kmem_alloc(sizeof(struct softint_lev) * SOFTINT_COUNT, KM_SLEEP);
270 for (i = 0; i < SOFTINT_COUNT; i++) {
271 rumpuser_cv_init(&slev[i].si_cv);
272 LIST_INIT(&slev[i].si_pending);
273 }
274 cd->cpu_softcpu = slev;
275
276 /* well, not really a "soft" interrupt ... */
277 if ((rv = kthread_create(PRI_NONE, KTHREAD_MPSAFE,
278 ci, doclock, NULL, NULL, "rumpclk%d", ci->ci_index)) != 0)
279 panic("clock thread creation failed: %d", rv);
280 }
281
282 void *
283 softint_establish(u_int flags, void (*func)(void *), void *arg)
284 {
285 struct softint *si;
286 struct softint_percpu *sip;
287 int level = flags & SOFTINT_LVLMASK;
288 int i;
289
290 si = malloc(sizeof(*si), M_TEMP, M_WAITOK);
291 si->si_func = func;
292 si->si_arg = arg;
293 si->si_flags = flags & SOFTINT_MPSAFE ? SI_MPSAFE : 0;
294 si->si_level = level;
295 KASSERT(si->si_level < SOFTINT_COUNT);
296 si->si_entry = malloc(sizeof(*si->si_entry) * ncpu_final,
297 M_TEMP, M_WAITOK | M_ZERO);
298 for (i = 0; i < ncpu_final; i++) {
299 sip = &si->si_entry[i];
300 sip->sip_parent = si;
301 }
302 sithread_establish(level);
303
304 return si;
305 }
306
307 /*
308 * Soft interrupts bring two choices. If we are running with thread
309 * support enabled, defer execution, otherwise execute in place.
310 */
311
312 void
313 softint_schedule(void *arg)
314 {
315 struct softint *si = arg;
316 struct softint_percpu *sip = &si->si_entry[curcpu()->ci_index];
317 struct cpu_data *cd = &curcpu()->ci_data;
318 struct softint_lev *si_lvl = cd->cpu_softcpu;
319
320 if (!rump_threads) {
321 si->si_func(si->si_arg);
322 } else {
323 if (!sip->sip_onlist) {
324 LIST_INSERT_HEAD(&si_lvl[si->si_level].si_pending,
325 sip, sip_entries);
326 sip->sip_onlist = true;
327 }
328 }
329 }
330
331 /*
332 * flimsy disestablish: should wait for softints to finish.
333 */
334 void
335 softint_disestablish(void *cook)
336 {
337 struct softint *si = cook;
338 int i;
339
340 for (i = 0; i < ncpu_final; i++) {
341 struct softint_percpu *sip;
342
343 sip = &si->si_entry[i];
344 if (sip->sip_onlist) {
345 si->si_flags |= SI_KILLME;
346 return;
347 }
348 }
349 free(si->si_entry, M_TEMP);
350 free(si, M_TEMP);
351 }
352
353 void
354 rump_softint_run(struct cpu_info *ci)
355 {
356 struct cpu_data *cd = &ci->ci_data;
357 struct softint_lev *si_lvl = cd->cpu_softcpu;
358 int i;
359
360 if (!rump_threads)
361 return;
362
363 for (i = 0; i < SOFTINT_COUNT; i++) {
364 if (!LIST_EMPTY(&si_lvl[i].si_pending))
365 rumpuser_cv_signal(si_lvl[i].si_cv);
366 }
367 }
368
369 bool
370 cpu_intr_p(void)
371 {
372
373 return false;
374 }
375
376 bool
377 cpu_softintr_p(void)
378 {
379
380 return curlwp->l_pflag & LP_INTR;
381 }
382