intr.c revision 1.51 1 /* $NetBSD: intr.c,v 1.51 2015/04/22 16:49:42 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2008-2010, 2015 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.51 2015/04/22 16:49:42 pooka Exp $");
30
31 #include <sys/param.h>
32 #include <sys/atomic.h>
33 #include <sys/cpu.h>
34 #include <sys/kernel.h>
35 #include <sys/kmem.h>
36 #include <sys/kthread.h>
37 #include <sys/malloc.h>
38 #include <sys/intr.h>
39 #include <sys/timetc.h>
40
41 #include <rump/rumpuser.h>
42
43 #include "rump_private.h"
44
45 /*
46 * Interrupt simulator. It executes hardclock() and softintrs.
47 */
48
49 #define SI_MPSAFE 0x01
50 #define SI_KILLME 0x02
51
52 struct softint_percpu;
53 struct softint {
54 void (*si_func)(void *);
55 void *si_arg;
56 int si_flags;
57 int si_level;
58
59 struct softint_percpu *si_entry; /* [0,ncpu-1] */
60 };
61
62 struct softint_percpu {
63 struct softint *sip_parent;
64 bool sip_onlist;
65 bool sip_onlist_cpu;
66
67 TAILQ_ENTRY(softint_percpu) sip_entries; /* scheduled */
68 TAILQ_ENTRY(softint_percpu) sip_entries_cpu; /* to be scheduled */
69 };
70
71 struct softint_lev {
72 struct rumpuser_cv *si_cv;
73 TAILQ_HEAD(, softint_percpu) si_pending;
74 };
75
76 static TAILQ_HEAD(, softint_percpu) sicpupending \
77 = TAILQ_HEAD_INITIALIZER(sicpupending);
78 static struct rumpuser_mtx *sicpumtx;
79 static struct rumpuser_cv *sicpucv;
80
81 kcondvar_t lbolt; /* Oh Kath Ra */
82
83 static int ncpu_final;
84
85 void noclock(void); void noclock(void) {return;}
86 __strong_alias(sched_schedclock,noclock);
87 __strong_alias(cpu_initclocks,noclock);
88 __strong_alias(addupc_intr,noclock);
89 __strong_alias(sched_tick,noclock);
90 __strong_alias(setstatclockrate,noclock);
91
92 /*
93 * clock "interrupt"
94 */
95 static void
96 doclock(void *noarg)
97 {
98 struct timespec thetick, curclock;
99 struct clockframe frame;
100 int64_t sec;
101 long nsec;
102 int error;
103 struct cpu_info *ci = curcpu();
104
105 error = rumpuser_clock_gettime(RUMPUSER_CLOCK_ABSMONO, &sec, &nsec);
106 if (error)
107 panic("clock: cannot get monotonic time");
108
109 curclock.tv_sec = sec;
110 curclock.tv_nsec = nsec;
111 thetick.tv_sec = 0;
112 thetick.tv_nsec = 1000000000/hz;
113
114 /* not used, so doesn't matter what we pass in */
115 memset(&frame, 0, sizeof(frame));
116
117 for (;;) {
118 int lbolt_ticks = 0;
119
120 hardclock(&frame);
121 if (CPU_IS_PRIMARY(ci)) {
122 if (++lbolt_ticks >= hz) {
123 lbolt_ticks = 0;
124 cv_broadcast(&lbolt);
125 }
126 }
127
128 error = rumpuser_clock_sleep(RUMPUSER_CLOCK_ABSMONO,
129 curclock.tv_sec, curclock.tv_nsec);
130 KASSERT(!error);
131 timespecadd(&curclock, &thetick, &curclock);
132 }
133 }
134
135 /*
136 * Soft interrupt execution thread. This thread is pinned to the
137 * same CPU that scheduled the interrupt, so we don't need to do
138 * lock against si_lvl.
139 */
140 static void
141 sithread(void *arg)
142 {
143 struct softint_percpu *sip;
144 struct softint *si;
145 void (*func)(void *) = NULL;
146 void *funarg;
147 bool mpsafe;
148 int mylevel = (uintptr_t)arg;
149 struct softint_lev *si_lvlp, *si_lvl;
150 struct cpu_data *cd = &curcpu()->ci_data;
151
152 si_lvlp = cd->cpu_softcpu;
153 si_lvl = &si_lvlp[mylevel];
154
155 for (;;) {
156 if (!TAILQ_EMPTY(&si_lvl->si_pending)) {
157 sip = TAILQ_FIRST(&si_lvl->si_pending);
158 si = sip->sip_parent;
159
160 func = si->si_func;
161 funarg = si->si_arg;
162 mpsafe = si->si_flags & SI_MPSAFE;
163
164 sip->sip_onlist = false;
165 TAILQ_REMOVE(&si_lvl->si_pending, sip, sip_entries);
166 if (si->si_flags & SI_KILLME) {
167 softint_disestablish(si);
168 continue;
169 }
170 } else {
171 rump_schedlock_cv_wait(si_lvl->si_cv);
172 continue;
173 }
174
175 if (!mpsafe)
176 KERNEL_LOCK(1, curlwp);
177 func(funarg);
178 if (!mpsafe)
179 KERNEL_UNLOCK_ONE(curlwp);
180 }
181
182 panic("sithread unreachable");
183 }
184
185 /*
186 * Helper for softint_schedule_cpu()
187 */
188 static void
189 sithread_cpu_bouncer(void *arg)
190 {
191 struct lwp *me;
192
193 me = curlwp;
194 me->l_pflag |= LP_BOUND;
195
196 rump_unschedule();
197 for (;;) {
198 struct softint_percpu *sip;
199 struct softint *si;
200 struct cpu_info *ci;
201 unsigned int cidx;
202
203 rumpuser_mutex_enter_nowrap(sicpumtx);
204 while (TAILQ_EMPTY(&sicpupending)) {
205 rumpuser_cv_wait_nowrap(sicpucv, sicpumtx);
206 }
207 sip = TAILQ_FIRST(&sicpupending);
208 TAILQ_REMOVE(&sicpupending, sip, sip_entries_cpu);
209 sip->sip_onlist_cpu = false;
210 rumpuser_mutex_exit(sicpumtx);
211
212 /*
213 * ok, now figure out which cpu we need the softint to
214 * be handled on
215 */
216 si = sip->sip_parent;
217 cidx = sip - si->si_entry;
218 ci = cpu_lookup(cidx);
219 me->l_target_cpu = ci;
220
221 /* schedule ourselves there, and then schedule the softint */
222 rump_schedule();
223 KASSERT(curcpu() == ci);
224 softint_schedule(si);
225 rump_unschedule();
226 }
227 panic("sithread_cpu_bouncer unreasonable");
228 }
229
230 static kmutex_t sithr_emtx;
231 static unsigned int sithr_est;
232 static int sithr_canest;
233
234 /*
235 * Create softint handler threads when the softint for each respective
236 * level is established for the first time. Most rump kernels don't
237 * need at least half of the softint levels, so on-demand saves bootstrap
238 * time and memory resources. Note, though, that this routine may be
239 * called before it's possible to call kthread_create(). Creation of
240 * those softints (SOFTINT_CLOCK, as of writing this) will be deferred
241 * to until softint_init() is called for the main CPU.
242 */
243 static void
244 sithread_establish(int level)
245 {
246 int docreate, rv;
247 int lvlbit = 1<<level;
248 int i;
249
250 KASSERT((level & ~SOFTINT_LVLMASK) == 0);
251 if (__predict_true(sithr_est & lvlbit))
252 return;
253
254 mutex_enter(&sithr_emtx);
255 docreate = (sithr_est & lvlbit) == 0 && sithr_canest;
256 sithr_est |= lvlbit;
257 mutex_exit(&sithr_emtx);
258
259 if (docreate) {
260 for (i = 0; i < ncpu_final; i++) {
261 if ((rv = kthread_create(PRI_NONE,
262 KTHREAD_MPSAFE | KTHREAD_INTR,
263 cpu_lookup(i), sithread, (void *)(uintptr_t)level,
264 NULL, "rsi%d/%d", i, level)) != 0)
265 panic("softint thread create failed: %d", rv);
266 }
267 }
268 }
269
270 void
271 rump_intr_init(int numcpu)
272 {
273
274 cv_init(&lbolt, "oh kath ra");
275 mutex_init(&sithr_emtx, MUTEX_DEFAULT, IPL_NONE);
276 ncpu_final = numcpu;
277 }
278
279 void
280 softint_init(struct cpu_info *ci)
281 {
282 struct cpu_data *cd = &ci->ci_data;
283 struct softint_lev *slev;
284 int rv, i;
285
286 if (!rump_threads)
287 return;
288
289 slev = kmem_alloc(sizeof(struct softint_lev) * SOFTINT_COUNT, KM_SLEEP);
290 for (i = 0; i < SOFTINT_COUNT; i++) {
291 rumpuser_cv_init(&slev[i].si_cv);
292 TAILQ_INIT(&slev[i].si_pending);
293 }
294 cd->cpu_softcpu = slev;
295
296 /* overloaded global init ... */
297 /* XXX: should be done the last time we are called */
298 if (ci->ci_index == 0) {
299 int sithr_swap;
300
301 /* pretend that we have our own for these */
302 stathz = 1;
303 schedhz = 1;
304 profhz = 1;
305
306 initclocks();
307
308 /* create deferred softint threads */
309 mutex_enter(&sithr_emtx);
310 sithr_swap = sithr_est;
311 sithr_est = 0;
312 sithr_canest = 1;
313 mutex_exit(&sithr_emtx);
314 for (i = 0; i < SOFTINT_COUNT; i++) {
315 if (sithr_swap & (1<<i))
316 sithread_establish(i);
317 }
318 }
319
320 /* well, not really a "soft" interrupt ... */
321 if ((rv = kthread_create(PRI_NONE, KTHREAD_MPSAFE,
322 ci, doclock, NULL, NULL, "rumpclk%d", ci->ci_index)) != 0)
323 panic("clock thread creation failed: %d", rv);
324
325 /* not one either, but at least a softint helper */
326 rumpuser_mutex_init(&sicpumtx, RUMPUSER_MTX_SPIN);
327 rumpuser_cv_init(&sicpucv);
328 if ((rv = kthread_create(PRI_NONE, KTHREAD_MPSAFE,
329 NULL, sithread_cpu_bouncer, NULL, NULL, "sipbnc")) != 0)
330 panic("softint cpu bouncer creation failed: %d", rv);
331 }
332
333 void *
334 softint_establish(u_int flags, void (*func)(void *), void *arg)
335 {
336 struct softint *si;
337 struct softint_percpu *sip;
338 int level = flags & SOFTINT_LVLMASK;
339 int i;
340
341 si = malloc(sizeof(*si), M_TEMP, M_WAITOK);
342 si->si_func = func;
343 si->si_arg = arg;
344 si->si_flags = flags & SOFTINT_MPSAFE ? SI_MPSAFE : 0;
345 si->si_level = level;
346 KASSERT(si->si_level < SOFTINT_COUNT);
347 si->si_entry = malloc(sizeof(*si->si_entry) * ncpu_final,
348 M_TEMP, M_WAITOK | M_ZERO);
349 for (i = 0; i < ncpu_final; i++) {
350 sip = &si->si_entry[i];
351 sip->sip_parent = si;
352 }
353 sithread_establish(level);
354
355 return si;
356 }
357
358 static struct softint_percpu *
359 sitosip(struct softint *si, struct cpu_info *ci)
360 {
361
362 return &si->si_entry[ci->ci_index];
363 }
364
365 /*
366 * Soft interrupts bring two choices. If we are running with thread
367 * support enabled, defer execution, otherwise execute in place.
368 */
369
370 void
371 softint_schedule(void *arg)
372 {
373 struct softint *si = arg;
374 struct cpu_info *ci = curcpu();
375 struct softint_percpu *sip = sitosip(si, ci);
376 struct cpu_data *cd = &ci->ci_data;
377 struct softint_lev *si_lvl = cd->cpu_softcpu;
378
379 if (!rump_threads) {
380 si->si_func(si->si_arg);
381 } else {
382 if (!sip->sip_onlist) {
383 TAILQ_INSERT_TAIL(&si_lvl[si->si_level].si_pending,
384 sip, sip_entries);
385 sip->sip_onlist = true;
386 }
387 }
388 }
389
390 /*
391 * Like softint_schedule(), except schedule softint to be handled on
392 * the core designated by ci_tgt instead of the core the call is made on.
393 *
394 * Unlike softint_schedule(), the performance is not important
395 * (unless ci_tgt == curcpu): high-performance rump kernel I/O stacks
396 * should arrange data to already be on the right core at the driver
397 * layer.
398 */
399 void
400 softint_schedule_cpu(void *arg, struct cpu_info *ci_tgt)
401 {
402 struct softint *si = arg;
403 struct cpu_info *ci_cur = curcpu();
404 struct softint_percpu *sip;
405
406 KASSERT(rump_threads);
407
408 /* preferred case (which can be optimized some day) */
409 if (ci_cur == ci_tgt) {
410 softint_schedule(si);
411 return;
412 }
413
414 /*
415 * no? then it's softint turtles all the way down
416 */
417
418 sip = sitosip(si, ci_tgt);
419 rumpuser_mutex_enter_nowrap(sicpumtx);
420 if (sip->sip_onlist_cpu) {
421 rumpuser_mutex_exit(sicpumtx);
422 return;
423 }
424 TAILQ_INSERT_TAIL(&sicpupending, sip, sip_entries_cpu);
425 sip->sip_onlist_cpu = true;
426 rumpuser_cv_signal(sicpucv);
427 rumpuser_mutex_exit(sicpumtx);
428 }
429
430 /*
431 * flimsy disestablish: should wait for softints to finish.
432 */
433 void
434 softint_disestablish(void *cook)
435 {
436 struct softint *si = cook;
437 int i;
438
439 for (i = 0; i < ncpu_final; i++) {
440 struct softint_percpu *sip;
441
442 sip = &si->si_entry[i];
443 if (sip->sip_onlist) {
444 si->si_flags |= SI_KILLME;
445 return;
446 }
447 }
448 free(si->si_entry, M_TEMP);
449 free(si, M_TEMP);
450 }
451
452 void
453 rump_softint_run(struct cpu_info *ci)
454 {
455 struct cpu_data *cd = &ci->ci_data;
456 struct softint_lev *si_lvl = cd->cpu_softcpu;
457 int i;
458
459 if (!rump_threads)
460 return;
461
462 for (i = 0; i < SOFTINT_COUNT; i++) {
463 if (!TAILQ_EMPTY(&si_lvl[i].si_pending))
464 rumpuser_cv_signal(si_lvl[i].si_cv);
465 }
466 }
467
468 bool
469 cpu_intr_p(void)
470 {
471
472 return false;
473 }
474
475 bool
476 cpu_softintr_p(void)
477 {
478
479 return curlwp->l_pflag & LP_INTR;
480 }
481