intr.c revision 1.46.4.1 1 1.46.4.1 skrll /* $NetBSD: intr.c,v 1.46.4.1 2015/04/06 15:18:30 skrll Exp $ */
2 1.2 ad
3 1.5 pooka /*
4 1.46.4.1 skrll * Copyright (c) 2008-2010, 2015 Antti Kantee. All Rights Reserved.
5 1.2 ad *
6 1.2 ad * Redistribution and use in source and binary forms, with or without
7 1.2 ad * modification, are permitted provided that the following conditions
8 1.2 ad * are met:
9 1.2 ad * 1. Redistributions of source code must retain the above copyright
10 1.2 ad * notice, this list of conditions and the following disclaimer.
11 1.2 ad * 2. Redistributions in binary form must reproduce the above copyright
12 1.2 ad * notice, this list of conditions and the following disclaimer in the
13 1.2 ad * documentation and/or other materials provided with the distribution.
14 1.2 ad *
15 1.5 pooka * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 1.5 pooka * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 1.5 pooka * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 1.5 pooka * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 1.5 pooka * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 1.5 pooka * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 1.5 pooka * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 1.5 pooka * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 1.5 pooka * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 1.5 pooka * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 1.5 pooka * SUCH DAMAGE.
26 1.2 ad */
27 1.2 ad
28 1.11 pooka #include <sys/cdefs.h>
29 1.46.4.1 skrll __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.46.4.1 2015/04/06 15:18:30 skrll Exp $");
30 1.11 pooka
31 1.2 ad #include <sys/param.h>
32 1.29 martin #include <sys/atomic.h>
33 1.5 pooka #include <sys/cpu.h>
34 1.24 pooka #include <sys/kernel.h>
35 1.12 pooka #include <sys/kmem.h>
36 1.5 pooka #include <sys/kthread.h>
37 1.28 pooka #include <sys/malloc.h>
38 1.2 ad #include <sys/intr.h>
39 1.24 pooka #include <sys/timetc.h>
40 1.2 ad
41 1.5 pooka #include <rump/rumpuser.h>
42 1.5 pooka
43 1.5 pooka #include "rump_private.h"
44 1.5 pooka
45 1.5 pooka /*
46 1.5 pooka * Interrupt simulator. It executes hardclock() and softintrs.
47 1.5 pooka */
48 1.5 pooka
49 1.18 pooka #define SI_MPSAFE 0x01
50 1.32 pooka #define SI_KILLME 0x02
51 1.20 pooka
52 1.32 pooka struct softint_percpu;
53 1.5 pooka struct softint {
54 1.5 pooka void (*si_func)(void *);
55 1.5 pooka void *si_arg;
56 1.18 pooka int si_flags;
57 1.20 pooka int si_level;
58 1.5 pooka
59 1.32 pooka struct softint_percpu *si_entry; /* [0,ncpu-1] */
60 1.32 pooka };
61 1.32 pooka
62 1.32 pooka struct softint_percpu {
63 1.32 pooka struct softint *sip_parent;
64 1.32 pooka bool sip_onlist;
65 1.46.4.1 skrll bool sip_onlist_cpu;
66 1.32 pooka
67 1.46.4.1 skrll TAILQ_ENTRY(softint_percpu) sip_entries; /* scheduled */
68 1.46.4.1 skrll TAILQ_ENTRY(softint_percpu) sip_entries_cpu; /* to be scheduled */
69 1.2 ad };
70 1.10 pooka
71 1.22 pooka struct softint_lev {
72 1.20 pooka struct rumpuser_cv *si_cv;
73 1.46.4.1 skrll TAILQ_HEAD(, softint_percpu) si_pending;
74 1.22 pooka };
75 1.10 pooka
76 1.46.4.1 skrll static TAILQ_HEAD(, softint_percpu) sicpupending \
77 1.46.4.1 skrll = TAILQ_HEAD_INITIALIZER(sicpupending);
78 1.46.4.1 skrll static struct rumpuser_mtx *sicpumtx;
79 1.46.4.1 skrll static struct rumpuser_cv *sicpucv;
80 1.46.4.1 skrll
81 1.23 pooka kcondvar_t lbolt; /* Oh Kath Ra */
82 1.23 pooka
83 1.24 pooka static u_int ticks;
84 1.34 pooka static int ncpu_final;
85 1.24 pooka
86 1.24 pooka static u_int
87 1.24 pooka rumptc_get(struct timecounter *tc)
88 1.14 pooka {
89 1.14 pooka
90 1.24 pooka KASSERT(rump_threads);
91 1.24 pooka return ticks;
92 1.16 pooka }
93 1.16 pooka
94 1.24 pooka static struct timecounter rumptc = {
95 1.24 pooka .tc_get_timecount = rumptc_get,
96 1.24 pooka .tc_poll_pps = NULL,
97 1.24 pooka .tc_counter_mask = ~0,
98 1.24 pooka .tc_frequency = 0,
99 1.24 pooka .tc_name = "rumpclk",
100 1.24 pooka .tc_quality = 0,
101 1.24 pooka };
102 1.14 pooka
103 1.10 pooka /*
104 1.10 pooka * clock "interrupt"
105 1.10 pooka */
106 1.10 pooka static void
107 1.10 pooka doclock(void *noarg)
108 1.10 pooka {
109 1.38 pooka struct timespec thetick, curclock;
110 1.40 pooka int64_t sec;
111 1.40 pooka long nsec;
112 1.24 pooka int error;
113 1.42 pooka int cpuindx = curcpu()->ci_index;
114 1.10 pooka extern int hz;
115 1.14 pooka
116 1.39 pooka error = rumpuser_clock_gettime(RUMPUSER_CLOCK_ABSMONO, &sec, &nsec);
117 1.38 pooka if (error)
118 1.38 pooka panic("clock: cannot get monotonic time");
119 1.38 pooka
120 1.38 pooka curclock.tv_sec = sec;
121 1.38 pooka curclock.tv_nsec = nsec;
122 1.24 pooka thetick.tv_sec = 0;
123 1.24 pooka thetick.tv_nsec = 1000000000/hz;
124 1.14 pooka
125 1.10 pooka for (;;) {
126 1.5 pooka callout_hardclock();
127 1.5 pooka
128 1.39 pooka error = rumpuser_clock_sleep(RUMPUSER_CLOCK_ABSMONO,
129 1.39 pooka curclock.tv_sec, curclock.tv_nsec);
130 1.38 pooka KASSERT(!error);
131 1.38 pooka timespecadd(&curclock, &thetick, &curclock);
132 1.26 pooka
133 1.42 pooka if (cpuindx != 0)
134 1.26 pooka continue;
135 1.22 pooka
136 1.24 pooka if ((++ticks % hz) == 0) {
137 1.23 pooka cv_broadcast(&lbolt);
138 1.8 pooka }
139 1.24 pooka tc_ticktock();
140 1.10 pooka }
141 1.10 pooka }
142 1.8 pooka
143 1.10 pooka /*
144 1.28 pooka * Soft interrupt execution thread. This thread is pinned to the
145 1.28 pooka * same CPU that scheduled the interrupt, so we don't need to do
146 1.28 pooka * lock against si_lvl.
147 1.10 pooka */
148 1.10 pooka static void
149 1.10 pooka sithread(void *arg)
150 1.10 pooka {
151 1.32 pooka struct softint_percpu *sip;
152 1.10 pooka struct softint *si;
153 1.10 pooka void (*func)(void *) = NULL;
154 1.10 pooka void *funarg;
155 1.10 pooka bool mpsafe;
156 1.20 pooka int mylevel = (uintptr_t)arg;
157 1.22 pooka struct softint_lev *si_lvlp, *si_lvl;
158 1.22 pooka struct cpu_data *cd = &curcpu()->ci_data;
159 1.20 pooka
160 1.22 pooka si_lvlp = cd->cpu_softcpu;
161 1.22 pooka si_lvl = &si_lvlp[mylevel];
162 1.22 pooka
163 1.10 pooka for (;;) {
164 1.46.4.1 skrll if (!TAILQ_EMPTY(&si_lvl->si_pending)) {
165 1.46.4.1 skrll sip = TAILQ_FIRST(&si_lvl->si_pending);
166 1.32 pooka si = sip->sip_parent;
167 1.32 pooka
168 1.5 pooka func = si->si_func;
169 1.5 pooka funarg = si->si_arg;
170 1.18 pooka mpsafe = si->si_flags & SI_MPSAFE;
171 1.5 pooka
172 1.32 pooka sip->sip_onlist = false;
173 1.46.4.1 skrll TAILQ_REMOVE(&si_lvl->si_pending, sip, sip_entries);
174 1.20 pooka if (si->si_flags & SI_KILLME) {
175 1.18 pooka softint_disestablish(si);
176 1.20 pooka continue;
177 1.20 pooka }
178 1.10 pooka } else {
179 1.28 pooka rump_schedlock_cv_wait(si_lvl->si_cv);
180 1.10 pooka continue;
181 1.5 pooka }
182 1.5 pooka
183 1.10 pooka if (!mpsafe)
184 1.10 pooka KERNEL_LOCK(1, curlwp);
185 1.10 pooka func(funarg);
186 1.10 pooka if (!mpsafe)
187 1.10 pooka KERNEL_UNLOCK_ONE(curlwp);
188 1.5 pooka }
189 1.20 pooka
190 1.20 pooka panic("sithread unreachable");
191 1.5 pooka }
192 1.2 ad
193 1.46.4.1 skrll /*
194 1.46.4.1 skrll * Helper for softint_schedule_cpu()
195 1.46.4.1 skrll */
196 1.46.4.1 skrll static void
197 1.46.4.1 skrll sithread_cpu_bouncer(void *arg)
198 1.46.4.1 skrll {
199 1.46.4.1 skrll struct lwp *me;
200 1.46.4.1 skrll
201 1.46.4.1 skrll me = curlwp;
202 1.46.4.1 skrll me->l_pflag |= LP_BOUND;
203 1.46.4.1 skrll
204 1.46.4.1 skrll rump_unschedule();
205 1.46.4.1 skrll for (;;) {
206 1.46.4.1 skrll struct softint_percpu *sip;
207 1.46.4.1 skrll struct softint *si;
208 1.46.4.1 skrll struct cpu_info *ci;
209 1.46.4.1 skrll unsigned int cidx;
210 1.46.4.1 skrll
211 1.46.4.1 skrll rumpuser_mutex_enter_nowrap(sicpumtx);
212 1.46.4.1 skrll while (TAILQ_EMPTY(&sicpupending)) {
213 1.46.4.1 skrll rumpuser_cv_wait_nowrap(sicpucv, sicpumtx);
214 1.46.4.1 skrll }
215 1.46.4.1 skrll sip = TAILQ_FIRST(&sicpupending);
216 1.46.4.1 skrll TAILQ_REMOVE(&sicpupending, sip, sip_entries_cpu);
217 1.46.4.1 skrll sip->sip_onlist_cpu = false;
218 1.46.4.1 skrll rumpuser_mutex_exit(sicpumtx);
219 1.46.4.1 skrll
220 1.46.4.1 skrll /*
221 1.46.4.1 skrll * ok, now figure out which cpu we need the softint to
222 1.46.4.1 skrll * be handled on
223 1.46.4.1 skrll */
224 1.46.4.1 skrll si = sip->sip_parent;
225 1.46.4.1 skrll cidx = sip - si->si_entry;
226 1.46.4.1 skrll ci = cpu_lookup(cidx);
227 1.46.4.1 skrll me->l_target_cpu = ci;
228 1.46.4.1 skrll
229 1.46.4.1 skrll /* schedule ourselves there, and then schedule the softint */
230 1.46.4.1 skrll rump_schedule();
231 1.46.4.1 skrll KASSERT(curcpu() == ci);
232 1.46.4.1 skrll softint_schedule(si);
233 1.46.4.1 skrll rump_unschedule();
234 1.46.4.1 skrll }
235 1.46.4.1 skrll panic("sithread_cpu_bouncer unreasonable");
236 1.46.4.1 skrll }
237 1.46.4.1 skrll
238 1.41 pooka static kmutex_t sithr_emtx;
239 1.41 pooka static unsigned int sithr_est;
240 1.41 pooka static int sithr_canest;
241 1.41 pooka
242 1.41 pooka /*
243 1.41 pooka * Create softint handler threads when the softint for each respective
244 1.41 pooka * level is established for the first time. Most rump kernels don't
245 1.41 pooka * need at least half of the softint levels, so on-demand saves bootstrap
246 1.41 pooka * time and memory resources. Note, though, that this routine may be
247 1.41 pooka * called before it's possible to call kthread_create(). Creation of
248 1.41 pooka * those softints (SOFTINT_CLOCK, as of writing this) will be deferred
249 1.41 pooka * to until softint_init() is called for the main CPU.
250 1.41 pooka */
251 1.41 pooka static void
252 1.41 pooka sithread_establish(int level)
253 1.41 pooka {
254 1.41 pooka int docreate, rv;
255 1.41 pooka int lvlbit = 1<<level;
256 1.41 pooka int i;
257 1.41 pooka
258 1.41 pooka KASSERT((level & ~SOFTINT_LVLMASK) == 0);
259 1.41 pooka if (__predict_true(sithr_est & lvlbit))
260 1.41 pooka return;
261 1.41 pooka
262 1.41 pooka mutex_enter(&sithr_emtx);
263 1.41 pooka docreate = (sithr_est & lvlbit) == 0 && sithr_canest;
264 1.41 pooka sithr_est |= lvlbit;
265 1.41 pooka mutex_exit(&sithr_emtx);
266 1.41 pooka
267 1.41 pooka if (docreate) {
268 1.41 pooka for (i = 0; i < ncpu_final; i++) {
269 1.41 pooka if ((rv = kthread_create(PRI_NONE,
270 1.41 pooka KTHREAD_MPSAFE | KTHREAD_INTR,
271 1.41 pooka cpu_lookup(i), sithread, (void *)(uintptr_t)level,
272 1.41 pooka NULL, "rsi%d/%d", i, level)) != 0)
273 1.41 pooka panic("softint thread create failed: %d", rv);
274 1.41 pooka }
275 1.41 pooka }
276 1.41 pooka }
277 1.41 pooka
278 1.5 pooka void
279 1.34 pooka rump_intr_init(int numcpu)
280 1.22 pooka {
281 1.22 pooka
282 1.23 pooka cv_init(&lbolt, "oh kath ra");
283 1.41 pooka mutex_init(&sithr_emtx, MUTEX_DEFAULT, IPL_NONE);
284 1.34 pooka ncpu_final = numcpu;
285 1.22 pooka }
286 1.22 pooka
287 1.22 pooka void
288 1.5 pooka softint_init(struct cpu_info *ci)
289 1.2 ad {
290 1.22 pooka struct cpu_data *cd = &ci->ci_data;
291 1.22 pooka struct softint_lev *slev;
292 1.20 pooka int rv, i;
293 1.5 pooka
294 1.22 pooka if (!rump_threads)
295 1.22 pooka return;
296 1.22 pooka
297 1.46 pooka slev = kmem_alloc(sizeof(struct softint_lev) * SOFTINT_COUNT, KM_SLEEP);
298 1.46 pooka for (i = 0; i < SOFTINT_COUNT; i++) {
299 1.46 pooka rumpuser_cv_init(&slev[i].si_cv);
300 1.46.4.1 skrll TAILQ_INIT(&slev[i].si_pending);
301 1.46 pooka }
302 1.46 pooka cd->cpu_softcpu = slev;
303 1.46 pooka
304 1.41 pooka /* overloaded global init ... */
305 1.46 pooka /* XXX: should be done the last time we are called */
306 1.25 pooka if (ci->ci_index == 0) {
307 1.41 pooka int sithr_swap;
308 1.41 pooka
309 1.25 pooka rumptc.tc_frequency = hz;
310 1.25 pooka tc_init(&rumptc);
311 1.41 pooka
312 1.41 pooka /* create deferred softint threads */
313 1.41 pooka mutex_enter(&sithr_emtx);
314 1.41 pooka sithr_swap = sithr_est;
315 1.41 pooka sithr_est = 0;
316 1.41 pooka sithr_canest = 1;
317 1.41 pooka mutex_exit(&sithr_emtx);
318 1.41 pooka for (i = 0; i < SOFTINT_COUNT; i++) {
319 1.41 pooka if (sithr_swap & (1<<i))
320 1.41 pooka sithread_establish(i);
321 1.41 pooka }
322 1.25 pooka }
323 1.25 pooka
324 1.41 pooka /* well, not really a "soft" interrupt ... */
325 1.41 pooka if ((rv = kthread_create(PRI_NONE, KTHREAD_MPSAFE,
326 1.41 pooka ci, doclock, NULL, NULL, "rumpclk%d", ci->ci_index)) != 0)
327 1.25 pooka panic("clock thread creation failed: %d", rv);
328 1.46.4.1 skrll
329 1.46.4.1 skrll /* not one either, but at least a softint helper */
330 1.46.4.1 skrll rumpuser_mutex_init(&sicpumtx, RUMPUSER_MTX_SPIN);
331 1.46.4.1 skrll rumpuser_cv_init(&sicpucv);
332 1.46.4.1 skrll if ((rv = kthread_create(PRI_NONE, KTHREAD_MPSAFE,
333 1.46.4.1 skrll NULL, sithread_cpu_bouncer, NULL, NULL, "sipbnc")) != 0)
334 1.46.4.1 skrll panic("softint cpu bouncer creation failed: %d", rv);
335 1.2 ad }
336 1.2 ad
337 1.5 pooka void *
338 1.5 pooka softint_establish(u_int flags, void (*func)(void *), void *arg)
339 1.2 ad {
340 1.5 pooka struct softint *si;
341 1.32 pooka struct softint_percpu *sip;
342 1.41 pooka int level = flags & SOFTINT_LVLMASK;
343 1.32 pooka int i;
344 1.2 ad
345 1.28 pooka si = malloc(sizeof(*si), M_TEMP, M_WAITOK);
346 1.5 pooka si->si_func = func;
347 1.5 pooka si->si_arg = arg;
348 1.18 pooka si->si_flags = flags & SOFTINT_MPSAFE ? SI_MPSAFE : 0;
349 1.41 pooka si->si_level = level;
350 1.20 pooka KASSERT(si->si_level < SOFTINT_COUNT);
351 1.34 pooka si->si_entry = malloc(sizeof(*si->si_entry) * ncpu_final,
352 1.32 pooka M_TEMP, M_WAITOK | M_ZERO);
353 1.34 pooka for (i = 0; i < ncpu_final; i++) {
354 1.32 pooka sip = &si->si_entry[i];
355 1.32 pooka sip->sip_parent = si;
356 1.32 pooka }
357 1.41 pooka sithread_establish(level);
358 1.5 pooka
359 1.5 pooka return si;
360 1.2 ad }
361 1.2 ad
362 1.46.4.1 skrll static struct softint_percpu *
363 1.46.4.1 skrll sitosip(struct softint *si, struct cpu_info *ci)
364 1.46.4.1 skrll {
365 1.46.4.1 skrll
366 1.46.4.1 skrll return &si->si_entry[ci->ci_index];
367 1.46.4.1 skrll }
368 1.46.4.1 skrll
369 1.33 pooka /*
370 1.33 pooka * Soft interrupts bring two choices. If we are running with thread
371 1.33 pooka * support enabled, defer execution, otherwise execute in place.
372 1.33 pooka */
373 1.33 pooka
374 1.2 ad void
375 1.2 ad softint_schedule(void *arg)
376 1.2 ad {
377 1.5 pooka struct softint *si = arg;
378 1.42 pooka struct cpu_info *ci = curcpu();
379 1.46.4.1 skrll struct softint_percpu *sip = sitosip(si, ci);
380 1.42 pooka struct cpu_data *cd = &ci->ci_data;
381 1.22 pooka struct softint_lev *si_lvl = cd->cpu_softcpu;
382 1.2 ad
383 1.5 pooka if (!rump_threads) {
384 1.5 pooka si->si_func(si->si_arg);
385 1.5 pooka } else {
386 1.32 pooka if (!sip->sip_onlist) {
387 1.46.4.1 skrll TAILQ_INSERT_TAIL(&si_lvl[si->si_level].si_pending,
388 1.32 pooka sip, sip_entries);
389 1.32 pooka sip->sip_onlist = true;
390 1.5 pooka }
391 1.5 pooka }
392 1.2 ad }
393 1.2 ad
394 1.46.4.1 skrll /*
395 1.46.4.1 skrll * Like softint_schedule(), except schedule softint to be handled on
396 1.46.4.1 skrll * the core designated by ci_tgt instead of the core the call is made on.
397 1.46.4.1 skrll *
398 1.46.4.1 skrll * Unlike softint_schedule(), the performance is not important
399 1.46.4.1 skrll * (unless ci_tgt == curcpu): high-performance rump kernel I/O stacks
400 1.46.4.1 skrll * should arrange data to already be on the right core at the driver
401 1.46.4.1 skrll * layer.
402 1.46.4.1 skrll */
403 1.43 rmind void
404 1.46.4.1 skrll softint_schedule_cpu(void *arg, struct cpu_info *ci_tgt)
405 1.43 rmind {
406 1.46.4.1 skrll struct softint *si = arg;
407 1.46.4.1 skrll struct cpu_info *ci_cur = curcpu();
408 1.46.4.1 skrll struct softint_percpu *sip;
409 1.46.4.1 skrll
410 1.46.4.1 skrll KASSERT(rump_threads);
411 1.46.4.1 skrll
412 1.46.4.1 skrll /* preferred case (which can be optimized some day) */
413 1.46.4.1 skrll if (ci_cur == ci_tgt) {
414 1.46.4.1 skrll softint_schedule(si);
415 1.46.4.1 skrll return;
416 1.46.4.1 skrll }
417 1.46.4.1 skrll
418 1.44 rmind /*
419 1.46.4.1 skrll * no? then it's softint turtles all the way down
420 1.44 rmind */
421 1.46.4.1 skrll
422 1.46.4.1 skrll sip = sitosip(si, ci_tgt);
423 1.46.4.1 skrll rumpuser_mutex_enter_nowrap(sicpumtx);
424 1.46.4.1 skrll if (sip->sip_onlist_cpu) {
425 1.46.4.1 skrll rumpuser_mutex_exit(sicpumtx);
426 1.46.4.1 skrll return;
427 1.46.4.1 skrll }
428 1.46.4.1 skrll TAILQ_INSERT_TAIL(&sicpupending, sip, sip_entries_cpu);
429 1.46.4.1 skrll sip->sip_onlist_cpu = true;
430 1.46.4.1 skrll rumpuser_cv_signal(sicpucv);
431 1.46.4.1 skrll rumpuser_mutex_exit(sicpumtx);
432 1.43 rmind }
433 1.43 rmind
434 1.32 pooka /*
435 1.32 pooka * flimsy disestablish: should wait for softints to finish.
436 1.32 pooka */
437 1.18 pooka void
438 1.18 pooka softint_disestablish(void *cook)
439 1.18 pooka {
440 1.18 pooka struct softint *si = cook;
441 1.32 pooka int i;
442 1.18 pooka
443 1.34 pooka for (i = 0; i < ncpu_final; i++) {
444 1.32 pooka struct softint_percpu *sip;
445 1.32 pooka
446 1.32 pooka sip = &si->si_entry[i];
447 1.32 pooka if (sip->sip_onlist) {
448 1.32 pooka si->si_flags |= SI_KILLME;
449 1.32 pooka return;
450 1.32 pooka }
451 1.18 pooka }
452 1.32 pooka free(si->si_entry, M_TEMP);
453 1.28 pooka free(si, M_TEMP);
454 1.18 pooka }
455 1.18 pooka
456 1.20 pooka void
457 1.20 pooka rump_softint_run(struct cpu_info *ci)
458 1.20 pooka {
459 1.22 pooka struct cpu_data *cd = &ci->ci_data;
460 1.22 pooka struct softint_lev *si_lvl = cd->cpu_softcpu;
461 1.20 pooka int i;
462 1.20 pooka
463 1.22 pooka if (!rump_threads)
464 1.22 pooka return;
465 1.22 pooka
466 1.20 pooka for (i = 0; i < SOFTINT_COUNT; i++) {
467 1.46.4.1 skrll if (!TAILQ_EMPTY(&si_lvl[i].si_pending))
468 1.22 pooka rumpuser_cv_signal(si_lvl[i].si_cv);
469 1.20 pooka }
470 1.20 pooka }
471 1.20 pooka
472 1.2 ad bool
473 1.9 christos cpu_intr_p(void)
474 1.2 ad {
475 1.2 ad
476 1.2 ad return false;
477 1.2 ad }
478 1.19 pooka
479 1.19 pooka bool
480 1.19 pooka cpu_softintr_p(void)
481 1.19 pooka {
482 1.19 pooka
483 1.20 pooka return curlwp->l_pflag & LP_INTR;
484 1.19 pooka }
485