intr.c revision 1.50 1 /* $NetBSD: intr.c,v 1.50 2015/04/21 16:18:50 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2008-2010, 2015 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.50 2015/04/21 16:18:50 pooka Exp $");
30
31 #include <sys/param.h>
32 #include <sys/atomic.h>
33 #include <sys/cpu.h>
34 #include <sys/kernel.h>
35 #include <sys/kmem.h>
36 #include <sys/kthread.h>
37 #include <sys/malloc.h>
38 #include <sys/intr.h>
39 #include <sys/timetc.h>
40
41 #include <rump/rumpuser.h>
42
43 #include "rump_private.h"
44
45 /*
46 * Interrupt simulator. It executes hardclock() and softintrs.
47 */
48
49 #define SI_MPSAFE 0x01
50 #define SI_KILLME 0x02
51
52 struct softint_percpu;
53 struct softint {
54 void (*si_func)(void *);
55 void *si_arg;
56 int si_flags;
57 int si_level;
58
59 struct softint_percpu *si_entry; /* [0,ncpu-1] */
60 };
61
62 struct softint_percpu {
63 struct softint *sip_parent;
64 bool sip_onlist;
65 bool sip_onlist_cpu;
66
67 TAILQ_ENTRY(softint_percpu) sip_entries; /* scheduled */
68 TAILQ_ENTRY(softint_percpu) sip_entries_cpu; /* to be scheduled */
69 };
70
71 struct softint_lev {
72 struct rumpuser_cv *si_cv;
73 TAILQ_HEAD(, softint_percpu) si_pending;
74 };
75
76 static TAILQ_HEAD(, softint_percpu) sicpupending \
77 = TAILQ_HEAD_INITIALIZER(sicpupending);
78 static struct rumpuser_mtx *sicpumtx;
79 static struct rumpuser_cv *sicpucv;
80
81 kcondvar_t lbolt; /* Oh Kath Ra */
82
83 static int ncpu_final;
84
85 static u_int
86 rumptc_get(struct timecounter *tc)
87 {
88
89 KASSERT(rump_threads);
90 return (u_int)hardclock_ticks;
91 }
92
93 static struct timecounter rumptc = {
94 .tc_get_timecount = rumptc_get,
95 .tc_poll_pps = NULL,
96 .tc_counter_mask = ~0,
97 .tc_frequency = 0,
98 .tc_name = "rumpclk",
99 .tc_quality = 0,
100 };
101
102 /*
103 * clock "interrupt"
104 */
105 static void
106 doclock(void *noarg)
107 {
108 struct timespec thetick, curclock;
109 int64_t sec;
110 long nsec;
111 int error;
112 int cpuindx = curcpu()->ci_index;
113
114 error = rumpuser_clock_gettime(RUMPUSER_CLOCK_ABSMONO, &sec, &nsec);
115 if (error)
116 panic("clock: cannot get monotonic time");
117
118 curclock.tv_sec = sec;
119 curclock.tv_nsec = nsec;
120 thetick.tv_sec = 0;
121 thetick.tv_nsec = 1000000000/hz;
122
123 for (;;) {
124 callout_hardclock();
125
126 error = rumpuser_clock_sleep(RUMPUSER_CLOCK_ABSMONO,
127 curclock.tv_sec, curclock.tv_nsec);
128 KASSERT(!error);
129 timespecadd(&curclock, &thetick, &curclock);
130
131 if (cpuindx != 0)
132 continue;
133
134 if ((++hardclock_ticks % hz) == 0) {
135 cv_broadcast(&lbolt);
136 }
137 tc_ticktock();
138 }
139 }
140
141 /*
142 * Soft interrupt execution thread. This thread is pinned to the
143 * same CPU that scheduled the interrupt, so we don't need to do
144 * lock against si_lvl.
145 */
146 static void
147 sithread(void *arg)
148 {
149 struct softint_percpu *sip;
150 struct softint *si;
151 void (*func)(void *) = NULL;
152 void *funarg;
153 bool mpsafe;
154 int mylevel = (uintptr_t)arg;
155 struct softint_lev *si_lvlp, *si_lvl;
156 struct cpu_data *cd = &curcpu()->ci_data;
157
158 si_lvlp = cd->cpu_softcpu;
159 si_lvl = &si_lvlp[mylevel];
160
161 for (;;) {
162 if (!TAILQ_EMPTY(&si_lvl->si_pending)) {
163 sip = TAILQ_FIRST(&si_lvl->si_pending);
164 si = sip->sip_parent;
165
166 func = si->si_func;
167 funarg = si->si_arg;
168 mpsafe = si->si_flags & SI_MPSAFE;
169
170 sip->sip_onlist = false;
171 TAILQ_REMOVE(&si_lvl->si_pending, sip, sip_entries);
172 if (si->si_flags & SI_KILLME) {
173 softint_disestablish(si);
174 continue;
175 }
176 } else {
177 rump_schedlock_cv_wait(si_lvl->si_cv);
178 continue;
179 }
180
181 if (!mpsafe)
182 KERNEL_LOCK(1, curlwp);
183 func(funarg);
184 if (!mpsafe)
185 KERNEL_UNLOCK_ONE(curlwp);
186 }
187
188 panic("sithread unreachable");
189 }
190
191 /*
192 * Helper for softint_schedule_cpu()
193 */
194 static void
195 sithread_cpu_bouncer(void *arg)
196 {
197 struct lwp *me;
198
199 me = curlwp;
200 me->l_pflag |= LP_BOUND;
201
202 rump_unschedule();
203 for (;;) {
204 struct softint_percpu *sip;
205 struct softint *si;
206 struct cpu_info *ci;
207 unsigned int cidx;
208
209 rumpuser_mutex_enter_nowrap(sicpumtx);
210 while (TAILQ_EMPTY(&sicpupending)) {
211 rumpuser_cv_wait_nowrap(sicpucv, sicpumtx);
212 }
213 sip = TAILQ_FIRST(&sicpupending);
214 TAILQ_REMOVE(&sicpupending, sip, sip_entries_cpu);
215 sip->sip_onlist_cpu = false;
216 rumpuser_mutex_exit(sicpumtx);
217
218 /*
219 * ok, now figure out which cpu we need the softint to
220 * be handled on
221 */
222 si = sip->sip_parent;
223 cidx = sip - si->si_entry;
224 ci = cpu_lookup(cidx);
225 me->l_target_cpu = ci;
226
227 /* schedule ourselves there, and then schedule the softint */
228 rump_schedule();
229 KASSERT(curcpu() == ci);
230 softint_schedule(si);
231 rump_unschedule();
232 }
233 panic("sithread_cpu_bouncer unreasonable");
234 }
235
236 static kmutex_t sithr_emtx;
237 static unsigned int sithr_est;
238 static int sithr_canest;
239
240 /*
241 * Create softint handler threads when the softint for each respective
242 * level is established for the first time. Most rump kernels don't
243 * need at least half of the softint levels, so on-demand saves bootstrap
244 * time and memory resources. Note, though, that this routine may be
245 * called before it's possible to call kthread_create(). Creation of
246 * those softints (SOFTINT_CLOCK, as of writing this) will be deferred
247 * to until softint_init() is called for the main CPU.
248 */
249 static void
250 sithread_establish(int level)
251 {
252 int docreate, rv;
253 int lvlbit = 1<<level;
254 int i;
255
256 KASSERT((level & ~SOFTINT_LVLMASK) == 0);
257 if (__predict_true(sithr_est & lvlbit))
258 return;
259
260 mutex_enter(&sithr_emtx);
261 docreate = (sithr_est & lvlbit) == 0 && sithr_canest;
262 sithr_est |= lvlbit;
263 mutex_exit(&sithr_emtx);
264
265 if (docreate) {
266 for (i = 0; i < ncpu_final; i++) {
267 if ((rv = kthread_create(PRI_NONE,
268 KTHREAD_MPSAFE | KTHREAD_INTR,
269 cpu_lookup(i), sithread, (void *)(uintptr_t)level,
270 NULL, "rsi%d/%d", i, level)) != 0)
271 panic("softint thread create failed: %d", rv);
272 }
273 }
274 }
275
276 void
277 rump_intr_init(int numcpu)
278 {
279
280 cv_init(&lbolt, "oh kath ra");
281 mutex_init(&sithr_emtx, MUTEX_DEFAULT, IPL_NONE);
282 ncpu_final = numcpu;
283 }
284
285 void
286 softint_init(struct cpu_info *ci)
287 {
288 struct cpu_data *cd = &ci->ci_data;
289 struct softint_lev *slev;
290 int rv, i;
291
292 if (!rump_threads)
293 return;
294
295 slev = kmem_alloc(sizeof(struct softint_lev) * SOFTINT_COUNT, KM_SLEEP);
296 for (i = 0; i < SOFTINT_COUNT; i++) {
297 rumpuser_cv_init(&slev[i].si_cv);
298 TAILQ_INIT(&slev[i].si_pending);
299 }
300 cd->cpu_softcpu = slev;
301
302 /* overloaded global init ... */
303 /* XXX: should be done the last time we are called */
304 if (ci->ci_index == 0) {
305 int sithr_swap;
306
307 rumptc.tc_frequency = hz;
308 tc_init(&rumptc);
309
310 /* create deferred softint threads */
311 mutex_enter(&sithr_emtx);
312 sithr_swap = sithr_est;
313 sithr_est = 0;
314 sithr_canest = 1;
315 mutex_exit(&sithr_emtx);
316 for (i = 0; i < SOFTINT_COUNT; i++) {
317 if (sithr_swap & (1<<i))
318 sithread_establish(i);
319 }
320 }
321
322 /* well, not really a "soft" interrupt ... */
323 if ((rv = kthread_create(PRI_NONE, KTHREAD_MPSAFE,
324 ci, doclock, NULL, NULL, "rumpclk%d", ci->ci_index)) != 0)
325 panic("clock thread creation failed: %d", rv);
326
327 /* not one either, but at least a softint helper */
328 rumpuser_mutex_init(&sicpumtx, RUMPUSER_MTX_SPIN);
329 rumpuser_cv_init(&sicpucv);
330 if ((rv = kthread_create(PRI_NONE, KTHREAD_MPSAFE,
331 NULL, sithread_cpu_bouncer, NULL, NULL, "sipbnc")) != 0)
332 panic("softint cpu bouncer creation failed: %d", rv);
333 }
334
335 void *
336 softint_establish(u_int flags, void (*func)(void *), void *arg)
337 {
338 struct softint *si;
339 struct softint_percpu *sip;
340 int level = flags & SOFTINT_LVLMASK;
341 int i;
342
343 si = malloc(sizeof(*si), M_TEMP, M_WAITOK);
344 si->si_func = func;
345 si->si_arg = arg;
346 si->si_flags = flags & SOFTINT_MPSAFE ? SI_MPSAFE : 0;
347 si->si_level = level;
348 KASSERT(si->si_level < SOFTINT_COUNT);
349 si->si_entry = malloc(sizeof(*si->si_entry) * ncpu_final,
350 M_TEMP, M_WAITOK | M_ZERO);
351 for (i = 0; i < ncpu_final; i++) {
352 sip = &si->si_entry[i];
353 sip->sip_parent = si;
354 }
355 sithread_establish(level);
356
357 return si;
358 }
359
360 static struct softint_percpu *
361 sitosip(struct softint *si, struct cpu_info *ci)
362 {
363
364 return &si->si_entry[ci->ci_index];
365 }
366
367 /*
368 * Soft interrupts bring two choices. If we are running with thread
369 * support enabled, defer execution, otherwise execute in place.
370 */
371
372 void
373 softint_schedule(void *arg)
374 {
375 struct softint *si = arg;
376 struct cpu_info *ci = curcpu();
377 struct softint_percpu *sip = sitosip(si, ci);
378 struct cpu_data *cd = &ci->ci_data;
379 struct softint_lev *si_lvl = cd->cpu_softcpu;
380
381 if (!rump_threads) {
382 si->si_func(si->si_arg);
383 } else {
384 if (!sip->sip_onlist) {
385 TAILQ_INSERT_TAIL(&si_lvl[si->si_level].si_pending,
386 sip, sip_entries);
387 sip->sip_onlist = true;
388 }
389 }
390 }
391
392 /*
393 * Like softint_schedule(), except schedule softint to be handled on
394 * the core designated by ci_tgt instead of the core the call is made on.
395 *
396 * Unlike softint_schedule(), the performance is not important
397 * (unless ci_tgt == curcpu): high-performance rump kernel I/O stacks
398 * should arrange data to already be on the right core at the driver
399 * layer.
400 */
401 void
402 softint_schedule_cpu(void *arg, struct cpu_info *ci_tgt)
403 {
404 struct softint *si = arg;
405 struct cpu_info *ci_cur = curcpu();
406 struct softint_percpu *sip;
407
408 KASSERT(rump_threads);
409
410 /* preferred case (which can be optimized some day) */
411 if (ci_cur == ci_tgt) {
412 softint_schedule(si);
413 return;
414 }
415
416 /*
417 * no? then it's softint turtles all the way down
418 */
419
420 sip = sitosip(si, ci_tgt);
421 rumpuser_mutex_enter_nowrap(sicpumtx);
422 if (sip->sip_onlist_cpu) {
423 rumpuser_mutex_exit(sicpumtx);
424 return;
425 }
426 TAILQ_INSERT_TAIL(&sicpupending, sip, sip_entries_cpu);
427 sip->sip_onlist_cpu = true;
428 rumpuser_cv_signal(sicpucv);
429 rumpuser_mutex_exit(sicpumtx);
430 }
431
432 /*
433 * flimsy disestablish: should wait for softints to finish.
434 */
435 void
436 softint_disestablish(void *cook)
437 {
438 struct softint *si = cook;
439 int i;
440
441 for (i = 0; i < ncpu_final; i++) {
442 struct softint_percpu *sip;
443
444 sip = &si->si_entry[i];
445 if (sip->sip_onlist) {
446 si->si_flags |= SI_KILLME;
447 return;
448 }
449 }
450 free(si->si_entry, M_TEMP);
451 free(si, M_TEMP);
452 }
453
454 void
455 rump_softint_run(struct cpu_info *ci)
456 {
457 struct cpu_data *cd = &ci->ci_data;
458 struct softint_lev *si_lvl = cd->cpu_softcpu;
459 int i;
460
461 if (!rump_threads)
462 return;
463
464 for (i = 0; i < SOFTINT_COUNT; i++) {
465 if (!TAILQ_EMPTY(&si_lvl[i].si_pending))
466 rumpuser_cv_signal(si_lvl[i].si_cv);
467 }
468 }
469
470 bool
471 cpu_intr_p(void)
472 {
473
474 return false;
475 }
476
477 bool
478 cpu_softintr_p(void)
479 {
480
481 return curlwp->l_pflag & LP_INTR;
482 }
483