intr.c revision 1.46.4.1 1 /* $NetBSD: intr.c,v 1.46.4.1 2015/04/06 15:18:30 skrll Exp $ */
2
3 /*
4 * Copyright (c) 2008-2010, 2015 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.46.4.1 2015/04/06 15:18:30 skrll Exp $");
30
31 #include <sys/param.h>
32 #include <sys/atomic.h>
33 #include <sys/cpu.h>
34 #include <sys/kernel.h>
35 #include <sys/kmem.h>
36 #include <sys/kthread.h>
37 #include <sys/malloc.h>
38 #include <sys/intr.h>
39 #include <sys/timetc.h>
40
41 #include <rump/rumpuser.h>
42
43 #include "rump_private.h"
44
45 /*
46 * Interrupt simulator. It executes hardclock() and softintrs.
47 */
48
49 #define SI_MPSAFE 0x01
50 #define SI_KILLME 0x02
51
52 struct softint_percpu;
53 struct softint {
54 void (*si_func)(void *);
55 void *si_arg;
56 int si_flags;
57 int si_level;
58
59 struct softint_percpu *si_entry; /* [0,ncpu-1] */
60 };
61
62 struct softint_percpu {
63 struct softint *sip_parent;
64 bool sip_onlist;
65 bool sip_onlist_cpu;
66
67 TAILQ_ENTRY(softint_percpu) sip_entries; /* scheduled */
68 TAILQ_ENTRY(softint_percpu) sip_entries_cpu; /* to be scheduled */
69 };
70
71 struct softint_lev {
72 struct rumpuser_cv *si_cv;
73 TAILQ_HEAD(, softint_percpu) si_pending;
74 };
75
76 static TAILQ_HEAD(, softint_percpu) sicpupending \
77 = TAILQ_HEAD_INITIALIZER(sicpupending);
78 static struct rumpuser_mtx *sicpumtx;
79 static struct rumpuser_cv *sicpucv;
80
81 kcondvar_t lbolt; /* Oh Kath Ra */
82
83 static u_int ticks;
84 static int ncpu_final;
85
86 static u_int
87 rumptc_get(struct timecounter *tc)
88 {
89
90 KASSERT(rump_threads);
91 return ticks;
92 }
93
94 static struct timecounter rumptc = {
95 .tc_get_timecount = rumptc_get,
96 .tc_poll_pps = NULL,
97 .tc_counter_mask = ~0,
98 .tc_frequency = 0,
99 .tc_name = "rumpclk",
100 .tc_quality = 0,
101 };
102
103 /*
104 * clock "interrupt"
105 */
106 static void
107 doclock(void *noarg)
108 {
109 struct timespec thetick, curclock;
110 int64_t sec;
111 long nsec;
112 int error;
113 int cpuindx = curcpu()->ci_index;
114 extern int hz;
115
116 error = rumpuser_clock_gettime(RUMPUSER_CLOCK_ABSMONO, &sec, &nsec);
117 if (error)
118 panic("clock: cannot get monotonic time");
119
120 curclock.tv_sec = sec;
121 curclock.tv_nsec = nsec;
122 thetick.tv_sec = 0;
123 thetick.tv_nsec = 1000000000/hz;
124
125 for (;;) {
126 callout_hardclock();
127
128 error = rumpuser_clock_sleep(RUMPUSER_CLOCK_ABSMONO,
129 curclock.tv_sec, curclock.tv_nsec);
130 KASSERT(!error);
131 timespecadd(&curclock, &thetick, &curclock);
132
133 if (cpuindx != 0)
134 continue;
135
136 if ((++ticks % hz) == 0) {
137 cv_broadcast(&lbolt);
138 }
139 tc_ticktock();
140 }
141 }
142
143 /*
144 * Soft interrupt execution thread. This thread is pinned to the
145 * same CPU that scheduled the interrupt, so we don't need to do
146 * lock against si_lvl.
147 */
148 static void
149 sithread(void *arg)
150 {
151 struct softint_percpu *sip;
152 struct softint *si;
153 void (*func)(void *) = NULL;
154 void *funarg;
155 bool mpsafe;
156 int mylevel = (uintptr_t)arg;
157 struct softint_lev *si_lvlp, *si_lvl;
158 struct cpu_data *cd = &curcpu()->ci_data;
159
160 si_lvlp = cd->cpu_softcpu;
161 si_lvl = &si_lvlp[mylevel];
162
163 for (;;) {
164 if (!TAILQ_EMPTY(&si_lvl->si_pending)) {
165 sip = TAILQ_FIRST(&si_lvl->si_pending);
166 si = sip->sip_parent;
167
168 func = si->si_func;
169 funarg = si->si_arg;
170 mpsafe = si->si_flags & SI_MPSAFE;
171
172 sip->sip_onlist = false;
173 TAILQ_REMOVE(&si_lvl->si_pending, sip, sip_entries);
174 if (si->si_flags & SI_KILLME) {
175 softint_disestablish(si);
176 continue;
177 }
178 } else {
179 rump_schedlock_cv_wait(si_lvl->si_cv);
180 continue;
181 }
182
183 if (!mpsafe)
184 KERNEL_LOCK(1, curlwp);
185 func(funarg);
186 if (!mpsafe)
187 KERNEL_UNLOCK_ONE(curlwp);
188 }
189
190 panic("sithread unreachable");
191 }
192
193 /*
194 * Helper for softint_schedule_cpu()
195 */
196 static void
197 sithread_cpu_bouncer(void *arg)
198 {
199 struct lwp *me;
200
201 me = curlwp;
202 me->l_pflag |= LP_BOUND;
203
204 rump_unschedule();
205 for (;;) {
206 struct softint_percpu *sip;
207 struct softint *si;
208 struct cpu_info *ci;
209 unsigned int cidx;
210
211 rumpuser_mutex_enter_nowrap(sicpumtx);
212 while (TAILQ_EMPTY(&sicpupending)) {
213 rumpuser_cv_wait_nowrap(sicpucv, sicpumtx);
214 }
215 sip = TAILQ_FIRST(&sicpupending);
216 TAILQ_REMOVE(&sicpupending, sip, sip_entries_cpu);
217 sip->sip_onlist_cpu = false;
218 rumpuser_mutex_exit(sicpumtx);
219
220 /*
221 * ok, now figure out which cpu we need the softint to
222 * be handled on
223 */
224 si = sip->sip_parent;
225 cidx = sip - si->si_entry;
226 ci = cpu_lookup(cidx);
227 me->l_target_cpu = ci;
228
229 /* schedule ourselves there, and then schedule the softint */
230 rump_schedule();
231 KASSERT(curcpu() == ci);
232 softint_schedule(si);
233 rump_unschedule();
234 }
235 panic("sithread_cpu_bouncer unreasonable");
236 }
237
238 static kmutex_t sithr_emtx;
239 static unsigned int sithr_est;
240 static int sithr_canest;
241
242 /*
243 * Create softint handler threads when the softint for each respective
244 * level is established for the first time. Most rump kernels don't
245 * need at least half of the softint levels, so on-demand saves bootstrap
246 * time and memory resources. Note, though, that this routine may be
247 * called before it's possible to call kthread_create(). Creation of
248 * those softints (SOFTINT_CLOCK, as of writing this) will be deferred
249 * to until softint_init() is called for the main CPU.
250 */
251 static void
252 sithread_establish(int level)
253 {
254 int docreate, rv;
255 int lvlbit = 1<<level;
256 int i;
257
258 KASSERT((level & ~SOFTINT_LVLMASK) == 0);
259 if (__predict_true(sithr_est & lvlbit))
260 return;
261
262 mutex_enter(&sithr_emtx);
263 docreate = (sithr_est & lvlbit) == 0 && sithr_canest;
264 sithr_est |= lvlbit;
265 mutex_exit(&sithr_emtx);
266
267 if (docreate) {
268 for (i = 0; i < ncpu_final; i++) {
269 if ((rv = kthread_create(PRI_NONE,
270 KTHREAD_MPSAFE | KTHREAD_INTR,
271 cpu_lookup(i), sithread, (void *)(uintptr_t)level,
272 NULL, "rsi%d/%d", i, level)) != 0)
273 panic("softint thread create failed: %d", rv);
274 }
275 }
276 }
277
278 void
279 rump_intr_init(int numcpu)
280 {
281
282 cv_init(&lbolt, "oh kath ra");
283 mutex_init(&sithr_emtx, MUTEX_DEFAULT, IPL_NONE);
284 ncpu_final = numcpu;
285 }
286
287 void
288 softint_init(struct cpu_info *ci)
289 {
290 struct cpu_data *cd = &ci->ci_data;
291 struct softint_lev *slev;
292 int rv, i;
293
294 if (!rump_threads)
295 return;
296
297 slev = kmem_alloc(sizeof(struct softint_lev) * SOFTINT_COUNT, KM_SLEEP);
298 for (i = 0; i < SOFTINT_COUNT; i++) {
299 rumpuser_cv_init(&slev[i].si_cv);
300 TAILQ_INIT(&slev[i].si_pending);
301 }
302 cd->cpu_softcpu = slev;
303
304 /* overloaded global init ... */
305 /* XXX: should be done the last time we are called */
306 if (ci->ci_index == 0) {
307 int sithr_swap;
308
309 rumptc.tc_frequency = hz;
310 tc_init(&rumptc);
311
312 /* create deferred softint threads */
313 mutex_enter(&sithr_emtx);
314 sithr_swap = sithr_est;
315 sithr_est = 0;
316 sithr_canest = 1;
317 mutex_exit(&sithr_emtx);
318 for (i = 0; i < SOFTINT_COUNT; i++) {
319 if (sithr_swap & (1<<i))
320 sithread_establish(i);
321 }
322 }
323
324 /* well, not really a "soft" interrupt ... */
325 if ((rv = kthread_create(PRI_NONE, KTHREAD_MPSAFE,
326 ci, doclock, NULL, NULL, "rumpclk%d", ci->ci_index)) != 0)
327 panic("clock thread creation failed: %d", rv);
328
329 /* not one either, but at least a softint helper */
330 rumpuser_mutex_init(&sicpumtx, RUMPUSER_MTX_SPIN);
331 rumpuser_cv_init(&sicpucv);
332 if ((rv = kthread_create(PRI_NONE, KTHREAD_MPSAFE,
333 NULL, sithread_cpu_bouncer, NULL, NULL, "sipbnc")) != 0)
334 panic("softint cpu bouncer creation failed: %d", rv);
335 }
336
337 void *
338 softint_establish(u_int flags, void (*func)(void *), void *arg)
339 {
340 struct softint *si;
341 struct softint_percpu *sip;
342 int level = flags & SOFTINT_LVLMASK;
343 int i;
344
345 si = malloc(sizeof(*si), M_TEMP, M_WAITOK);
346 si->si_func = func;
347 si->si_arg = arg;
348 si->si_flags = flags & SOFTINT_MPSAFE ? SI_MPSAFE : 0;
349 si->si_level = level;
350 KASSERT(si->si_level < SOFTINT_COUNT);
351 si->si_entry = malloc(sizeof(*si->si_entry) * ncpu_final,
352 M_TEMP, M_WAITOK | M_ZERO);
353 for (i = 0; i < ncpu_final; i++) {
354 sip = &si->si_entry[i];
355 sip->sip_parent = si;
356 }
357 sithread_establish(level);
358
359 return si;
360 }
361
362 static struct softint_percpu *
363 sitosip(struct softint *si, struct cpu_info *ci)
364 {
365
366 return &si->si_entry[ci->ci_index];
367 }
368
369 /*
370 * Soft interrupts bring two choices. If we are running with thread
371 * support enabled, defer execution, otherwise execute in place.
372 */
373
374 void
375 softint_schedule(void *arg)
376 {
377 struct softint *si = arg;
378 struct cpu_info *ci = curcpu();
379 struct softint_percpu *sip = sitosip(si, ci);
380 struct cpu_data *cd = &ci->ci_data;
381 struct softint_lev *si_lvl = cd->cpu_softcpu;
382
383 if (!rump_threads) {
384 si->si_func(si->si_arg);
385 } else {
386 if (!sip->sip_onlist) {
387 TAILQ_INSERT_TAIL(&si_lvl[si->si_level].si_pending,
388 sip, sip_entries);
389 sip->sip_onlist = true;
390 }
391 }
392 }
393
394 /*
395 * Like softint_schedule(), except schedule softint to be handled on
396 * the core designated by ci_tgt instead of the core the call is made on.
397 *
398 * Unlike softint_schedule(), the performance is not important
399 * (unless ci_tgt == curcpu): high-performance rump kernel I/O stacks
400 * should arrange data to already be on the right core at the driver
401 * layer.
402 */
403 void
404 softint_schedule_cpu(void *arg, struct cpu_info *ci_tgt)
405 {
406 struct softint *si = arg;
407 struct cpu_info *ci_cur = curcpu();
408 struct softint_percpu *sip;
409
410 KASSERT(rump_threads);
411
412 /* preferred case (which can be optimized some day) */
413 if (ci_cur == ci_tgt) {
414 softint_schedule(si);
415 return;
416 }
417
418 /*
419 * no? then it's softint turtles all the way down
420 */
421
422 sip = sitosip(si, ci_tgt);
423 rumpuser_mutex_enter_nowrap(sicpumtx);
424 if (sip->sip_onlist_cpu) {
425 rumpuser_mutex_exit(sicpumtx);
426 return;
427 }
428 TAILQ_INSERT_TAIL(&sicpupending, sip, sip_entries_cpu);
429 sip->sip_onlist_cpu = true;
430 rumpuser_cv_signal(sicpucv);
431 rumpuser_mutex_exit(sicpumtx);
432 }
433
434 /*
435 * flimsy disestablish: should wait for softints to finish.
436 */
437 void
438 softint_disestablish(void *cook)
439 {
440 struct softint *si = cook;
441 int i;
442
443 for (i = 0; i < ncpu_final; i++) {
444 struct softint_percpu *sip;
445
446 sip = &si->si_entry[i];
447 if (sip->sip_onlist) {
448 si->si_flags |= SI_KILLME;
449 return;
450 }
451 }
452 free(si->si_entry, M_TEMP);
453 free(si, M_TEMP);
454 }
455
456 void
457 rump_softint_run(struct cpu_info *ci)
458 {
459 struct cpu_data *cd = &ci->ci_data;
460 struct softint_lev *si_lvl = cd->cpu_softcpu;
461 int i;
462
463 if (!rump_threads)
464 return;
465
466 for (i = 0; i < SOFTINT_COUNT; i++) {
467 if (!TAILQ_EMPTY(&si_lvl[i].si_pending))
468 rumpuser_cv_signal(si_lvl[i].si_cv);
469 }
470 }
471
472 bool
473 cpu_intr_p(void)
474 {
475
476 return false;
477 }
478
479 bool
480 cpu_softintr_p(void)
481 {
482
483 return curlwp->l_pflag & LP_INTR;
484 }
485