kern_softint.c revision 1.10 1 1.10 ad /* $NetBSD: kern_softint.c,v 1.10 2008/01/29 18:06:14 ad Exp $ */
2 1.2 ad
3 1.2 ad /*-
4 1.10 ad * Copyright (c) 2007, 2008 The NetBSD Foundation, Inc.
5 1.2 ad * All rights reserved.
6 1.2 ad *
7 1.2 ad * This code is derived from software contributed to The NetBSD Foundation
8 1.2 ad * by Andrew Doran.
9 1.2 ad *
10 1.2 ad * Redistribution and use in source and binary forms, with or without
11 1.2 ad * modification, are permitted provided that the following conditions
12 1.2 ad * are met:
13 1.2 ad * 1. Redistributions of source code must retain the above copyright
14 1.2 ad * notice, this list of conditions and the following disclaimer.
15 1.2 ad * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 ad * notice, this list of conditions and the following disclaimer in the
17 1.2 ad * documentation and/or other materials provided with the distribution.
18 1.2 ad * 3. All advertising materials mentioning features or use of this software
19 1.2 ad * must display the following acknowledgement:
20 1.2 ad * This product includes software developed by the NetBSD
21 1.2 ad * Foundation, Inc. and its contributors.
22 1.2 ad * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.2 ad * contributors may be used to endorse or promote products derived
24 1.2 ad * from this software without specific prior written permission.
25 1.2 ad *
26 1.2 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.2 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.2 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.2 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.2 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.2 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.2 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.2 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.2 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.2 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.2 ad * POSSIBILITY OF SUCH DAMAGE.
37 1.2 ad */
38 1.2 ad
39 1.2 ad /*
40 1.5 ad * Generic software interrupt framework.
41 1.5 ad *
42 1.5 ad * Overview
43 1.5 ad *
44 1.5 ad * The soft interrupt framework provides a mechanism to schedule a
45 1.5 ad * low priority callback that runs with thread context. It allows
46 1.5 ad * for dynamic registration of software interrupts, and for fair
47 1.5 ad * queueing and prioritization of those interrupts. The callbacks
48 1.5 ad * can be scheduled to run from nearly any point in the kernel: by
49 1.5 ad * code running with thread context, by code running from a
50 1.5 ad * hardware interrupt handler, and at any interrupt priority
51 1.5 ad * level.
52 1.5 ad *
53 1.5 ad * Priority levels
54 1.5 ad *
55 1.5 ad * Since soft interrupt dispatch can be tied to the underlying
56 1.5 ad * architecture's interrupt dispatch code, it can be limited
57 1.5 ad * both by the capabilities of the hardware and the capabilities
58 1.5 ad * of the interrupt dispatch code itself. The number of priority
59 1.5 ad * levels is restricted to four. In order of priority (lowest to
60 1.5 ad * highest) the levels are: clock, bio, net, serial.
61 1.5 ad *
62 1.5 ad * The names are symbolic and in isolation do not have any direct
63 1.5 ad * connection with a particular kind of device activity: they are
64 1.5 ad * only meant as a guide.
65 1.5 ad *
66 1.5 ad * The four priority levels map directly to scheduler priority
67 1.5 ad * levels, and where the architecture implements 'fast' software
68 1.5 ad * interrupts, they also map onto interrupt priorities. The
69 1.5 ad * interrupt priorities are intended to be hidden from machine
70 1.5 ad * independent code, which should use thread-safe mechanisms to
71 1.5 ad * synchronize with software interrupts (for example: mutexes).
72 1.5 ad *
73 1.5 ad * Capabilities
74 1.5 ad *
75 1.5 ad * Software interrupts run with limited machine context. In
76 1.5 ad * particular, they do not posess any address space context. They
77 1.5 ad * should not try to operate on user space addresses, or to use
78 1.5 ad * virtual memory facilities other than those noted as interrupt
79 1.5 ad * safe.
80 1.5 ad *
81 1.5 ad * Unlike hardware interrupts, software interrupts do have thread
82 1.5 ad * context. They may block on synchronization objects, sleep, and
83 1.5 ad * resume execution at a later time.
84 1.5 ad *
85 1.5 ad * Since software interrupts are a limited resource and run with
86 1.5 ad * higher priority than most other LWPs in the system, all
87 1.5 ad * block-and-resume activity by a software interrupt must be kept
88 1.5 ad * short to allow futher processing at that level to continue. By
89 1.5 ad * extension, code running with process context must take care to
90 1.5 ad * ensure that any lock that may be taken from a software interrupt
91 1.5 ad * can not be held for more than a short period of time.
92 1.5 ad *
93 1.5 ad * The kernel does not allow software interrupts to use facilities
94 1.5 ad * or perform actions that may block for a significant amount of
95 1.5 ad * time. This means that it's not valid for a software interrupt
96 1.10 ad * to sleep on condition variables or wait for resources to become
97 1.10 ad * available (for example, memory).
98 1.5 ad *
99 1.5 ad * Per-CPU operation
100 1.5 ad *
101 1.5 ad * If a soft interrupt is triggered on a CPU, it can only be
102 1.5 ad * dispatched on the same CPU. Each LWP dedicated to handling a
103 1.5 ad * soft interrupt is bound to its home CPU, so if the LWP blocks
104 1.5 ad * and needs to run again, it can only run there. Nearly all data
105 1.5 ad * structures used to manage software interrupts are per-CPU.
106 1.5 ad *
107 1.5 ad * The per-CPU requirement is intended to reduce "ping-pong" of
108 1.5 ad * cache lines between CPUs: lines occupied by data structures
109 1.5 ad * used to manage the soft interrupts, and lines occupied by data
110 1.5 ad * items being passed down to the soft interrupt. As a positive
111 1.5 ad * side effect, this also means that the soft interrupt dispatch
112 1.5 ad * code does not need to to use spinlocks to synchronize.
113 1.5 ad *
114 1.5 ad * Generic implementation
115 1.5 ad *
116 1.5 ad * A generic, low performance implementation is provided that
117 1.5 ad * works across all architectures, with no machine-dependent
118 1.5 ad * modifications needed. This implementation uses the scheduler,
119 1.5 ad * and so has a number of restrictions:
120 1.5 ad *
121 1.5 ad * 1) The software interrupts are not currently preemptive, so
122 1.5 ad * must wait for the currently executing LWP to yield the CPU.
123 1.5 ad * This can introduce latency.
124 1.5 ad *
125 1.5 ad * 2) An expensive context switch is required for a software
126 1.5 ad * interrupt to be handled.
127 1.5 ad *
128 1.5 ad * 'Fast' software interrupts
129 1.5 ad *
130 1.5 ad * If an architectures defines __HAVE_FAST_SOFTINTS, it implements
131 1.5 ad * the fast mechanism. Threads running either in the kernel or in
132 1.5 ad * userspace will be interrupted, but will not be preempted. When
133 1.5 ad * the soft interrupt completes execution, the interrupted LWP
134 1.5 ad * is resumed. Interrupt dispatch code must provide the minimum
135 1.5 ad * level of context necessary for the soft interrupt to block and
136 1.5 ad * be resumed at a later time. The machine-dependent dispatch
137 1.5 ad * path looks something like the following:
138 1.5 ad *
139 1.5 ad * softintr()
140 1.5 ad * {
141 1.5 ad * go to IPL_HIGH if necessary for switch;
142 1.5 ad * save any necessary registers in a format that can be
143 1.5 ad * restored by cpu_switchto if the softint blocks;
144 1.5 ad * arrange for cpu_switchto() to restore into the
145 1.5 ad * trampoline function;
146 1.5 ad * identify LWP to handle this interrupt;
147 1.5 ad * switch to the LWP's stack;
148 1.5 ad * switch register stacks, if necessary;
149 1.5 ad * assign new value of curlwp;
150 1.5 ad * call MI softint_dispatch, passing old curlwp and IPL
151 1.5 ad * to execute interrupt at;
152 1.5 ad * switch back to old stack;
153 1.5 ad * switch back to old register stack, if necessary;
154 1.5 ad * restore curlwp;
155 1.5 ad * return to interrupted LWP;
156 1.5 ad * }
157 1.5 ad *
158 1.5 ad * If the soft interrupt blocks, a trampoline function is returned
159 1.5 ad * to in the context of the interrupted LWP, as arranged for by
160 1.5 ad * softint():
161 1.5 ad *
162 1.5 ad * softint_ret()
163 1.5 ad * {
164 1.5 ad * unlock soft interrupt LWP;
165 1.5 ad * resume interrupt processing, likely returning to
166 1.5 ad * interrupted LWP or dispatching another, different
167 1.5 ad * interrupt;
168 1.5 ad * }
169 1.5 ad *
170 1.5 ad * Once the soft interrupt has fired (and even if it has blocked),
171 1.5 ad * no further soft interrupts at that level will be triggered by
172 1.5 ad * MI code until the soft interrupt handler has ceased execution.
173 1.5 ad * If a soft interrupt handler blocks and is resumed, it resumes
174 1.5 ad * execution as a normal LWP (kthread) and gains VM context. Only
175 1.5 ad * when it has completed and is ready to fire again will it
176 1.5 ad * interrupt other threads.
177 1.5 ad *
178 1.5 ad * Future directions
179 1.5 ad *
180 1.5 ad * Provide a cheap way to direct software interrupts to remote
181 1.5 ad * CPUs. Provide a way to enqueue work items into the handler
182 1.5 ad * record, removing additional spl calls (see subr_workqueue.c).
183 1.2 ad */
184 1.2 ad
185 1.2 ad #include <sys/cdefs.h>
186 1.10 ad __KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.10 2008/01/29 18:06:14 ad Exp $");
187 1.2 ad
188 1.2 ad #include <sys/param.h>
189 1.5 ad #include <sys/malloc.h>
190 1.5 ad #include <sys/proc.h>
191 1.2 ad #include <sys/intr.h>
192 1.5 ad #include <sys/mutex.h>
193 1.5 ad #include <sys/kthread.h>
194 1.5 ad #include <sys/evcnt.h>
195 1.5 ad #include <sys/cpu.h>
196 1.5 ad
197 1.5 ad #include <net/netisr.h>
198 1.5 ad
199 1.5 ad #include <uvm/uvm_extern.h>
200 1.5 ad
201 1.5 ad /* This could overlap with signal info in struct lwp. */
202 1.5 ad typedef struct softint {
203 1.5 ad SIMPLEQ_HEAD(, softhand) si_q;
204 1.5 ad struct lwp *si_lwp;
205 1.5 ad struct cpu_info *si_cpu;
206 1.5 ad uintptr_t si_machdep;
207 1.5 ad struct evcnt si_evcnt;
208 1.5 ad struct evcnt si_evcnt_block;
209 1.5 ad int si_active;
210 1.5 ad char si_name[8];
211 1.5 ad char si_name_block[8+6];
212 1.5 ad } softint_t;
213 1.5 ad
214 1.5 ad typedef struct softhand {
215 1.5 ad SIMPLEQ_ENTRY(softhand) sh_q;
216 1.5 ad void (*sh_func)(void *);
217 1.5 ad void *sh_arg;
218 1.5 ad softint_t *sh_isr;
219 1.5 ad u_int sh_pending;
220 1.5 ad u_int sh_flags;
221 1.5 ad } softhand_t;
222 1.5 ad
223 1.5 ad typedef struct softcpu {
224 1.5 ad struct cpu_info *sc_cpu;
225 1.5 ad softint_t sc_int[SOFTINT_COUNT];
226 1.5 ad softhand_t sc_hand[1];
227 1.5 ad } softcpu_t;
228 1.5 ad
229 1.5 ad static void softint_thread(void *);
230 1.5 ad
231 1.5 ad u_int softint_bytes = 8192;
232 1.5 ad u_int softint_timing;
233 1.5 ad static u_int softint_max;
234 1.5 ad static kmutex_t softint_lock;
235 1.5 ad static void *softint_netisrs[32];
236 1.2 ad
237 1.5 ad /*
238 1.5 ad * softint_init_isr:
239 1.5 ad *
240 1.5 ad * Initialize a single interrupt level for a single CPU.
241 1.5 ad */
242 1.5 ad static void
243 1.5 ad softint_init_isr(softcpu_t *sc, const char *desc, pri_t pri, u_int level)
244 1.5 ad {
245 1.5 ad struct cpu_info *ci;
246 1.5 ad softint_t *si;
247 1.5 ad int error;
248 1.5 ad
249 1.5 ad si = &sc->sc_int[level];
250 1.5 ad ci = sc->sc_cpu;
251 1.5 ad si->si_cpu = ci;
252 1.5 ad
253 1.5 ad SIMPLEQ_INIT(&si->si_q);
254 1.5 ad
255 1.5 ad error = kthread_create(pri, KTHREAD_MPSAFE | KTHREAD_INTR |
256 1.5 ad KTHREAD_IDLE, ci, softint_thread, si, &si->si_lwp,
257 1.5 ad "soft%s/%d", desc, (int)ci->ci_cpuid);
258 1.5 ad if (error != 0)
259 1.5 ad panic("softint_init_isr: error %d", error);
260 1.5 ad
261 1.5 ad snprintf(si->si_name, sizeof(si->si_name), "%s/%d", desc,
262 1.5 ad (int)ci->ci_cpuid);
263 1.5 ad evcnt_attach_dynamic(&si->si_evcnt, EVCNT_TYPE_INTR, NULL,
264 1.5 ad "softint", si->si_name);
265 1.5 ad snprintf(si->si_name_block, sizeof(si->si_name_block), "%s block/%d",
266 1.5 ad desc, (int)ci->ci_cpuid);
267 1.5 ad evcnt_attach_dynamic(&si->si_evcnt_block, EVCNT_TYPE_INTR, NULL,
268 1.5 ad "softint", si->si_name_block);
269 1.3 ad
270 1.5 ad si->si_lwp->l_private = si;
271 1.5 ad softint_init_md(si->si_lwp, level, &si->si_machdep);
272 1.5 ad }
273 1.2 ad /*
274 1.2 ad * softint_init:
275 1.2 ad *
276 1.2 ad * Initialize per-CPU data structures. Called from mi_cpu_attach().
277 1.2 ad */
278 1.2 ad void
279 1.2 ad softint_init(struct cpu_info *ci)
280 1.2 ad {
281 1.5 ad static struct cpu_info *first;
282 1.5 ad softcpu_t *sc, *scfirst;
283 1.5 ad softhand_t *sh, *shmax;
284 1.5 ad
285 1.5 ad if (first == NULL) {
286 1.5 ad /* Boot CPU. */
287 1.5 ad first = ci;
288 1.5 ad mutex_init(&softint_lock, MUTEX_DEFAULT, IPL_NONE);
289 1.5 ad softint_bytes = round_page(softint_bytes);
290 1.5 ad softint_max = (softint_bytes - sizeof(softcpu_t)) /
291 1.5 ad sizeof(softhand_t);
292 1.5 ad }
293 1.2 ad
294 1.5 ad sc = (softcpu_t *)uvm_km_alloc(kernel_map, softint_bytes, 0,
295 1.5 ad UVM_KMF_WIRED | UVM_KMF_ZERO);
296 1.5 ad if (sc == NULL)
297 1.5 ad panic("softint_init_cpu: cannot allocate memory");
298 1.5 ad
299 1.5 ad ci->ci_data.cpu_softcpu = sc;
300 1.5 ad ci->ci_data.cpu_softints = 0;
301 1.5 ad sc->sc_cpu = ci;
302 1.5 ad
303 1.5 ad softint_init_isr(sc, "net", PRI_SOFTNET, SOFTINT_NET);
304 1.5 ad softint_init_isr(sc, "bio", PRI_SOFTBIO, SOFTINT_BIO);
305 1.5 ad softint_init_isr(sc, "clk", PRI_SOFTCLOCK, SOFTINT_CLOCK);
306 1.5 ad softint_init_isr(sc, "ser", PRI_SOFTSERIAL, SOFTINT_SERIAL);
307 1.5 ad
308 1.5 ad if (first != ci) {
309 1.5 ad mutex_enter(&softint_lock);
310 1.5 ad scfirst = first->ci_data.cpu_softcpu;
311 1.5 ad sh = sc->sc_hand;
312 1.5 ad memcpy(sh, scfirst->sc_hand, sizeof(*sh) * softint_max);
313 1.5 ad /* Update pointers for this CPU. */
314 1.5 ad for (shmax = sh + softint_max; sh < shmax; sh++) {
315 1.5 ad if (sh->sh_func == NULL)
316 1.5 ad continue;
317 1.5 ad sh->sh_isr =
318 1.5 ad &sc->sc_int[sh->sh_flags & SOFTINT_LVLMASK];
319 1.5 ad }
320 1.5 ad mutex_exit(&softint_lock);
321 1.5 ad } else {
322 1.5 ad /*
323 1.5 ad * Establish handlers for legacy net interrupts.
324 1.5 ad * XXX Needs to go away.
325 1.5 ad */
326 1.5 ad #define DONETISR(n, f) \
327 1.5 ad softint_netisrs[(n)] = \
328 1.5 ad softint_establish(SOFTINT_NET, (void (*)(void *))(f), NULL)
329 1.5 ad #include <net/netisr_dispatch.h>
330 1.5 ad }
331 1.2 ad }
332 1.2 ad
333 1.2 ad /*
334 1.2 ad * softint_establish:
335 1.2 ad *
336 1.2 ad * Register a software interrupt handler.
337 1.2 ad */
338 1.2 ad void *
339 1.2 ad softint_establish(u_int flags, void (*func)(void *), void *arg)
340 1.2 ad {
341 1.5 ad CPU_INFO_ITERATOR cii;
342 1.5 ad struct cpu_info *ci;
343 1.5 ad softcpu_t *sc;
344 1.5 ad softhand_t *sh;
345 1.5 ad u_int level, index;
346 1.2 ad
347 1.2 ad level = (flags & SOFTINT_LVLMASK);
348 1.2 ad KASSERT(level < SOFTINT_COUNT);
349 1.2 ad
350 1.5 ad mutex_enter(&softint_lock);
351 1.5 ad
352 1.5 ad /* Find a free slot. */
353 1.5 ad sc = curcpu()->ci_data.cpu_softcpu;
354 1.5 ad for (index = 1; index < softint_max; index++)
355 1.5 ad if (sc->sc_hand[index].sh_func == NULL)
356 1.5 ad break;
357 1.5 ad if (index == softint_max) {
358 1.5 ad mutex_exit(&softint_lock);
359 1.5 ad printf("WARNING: softint_establish: table full, "
360 1.5 ad "increase softint_bytes\n");
361 1.5 ad return NULL;
362 1.5 ad }
363 1.5 ad
364 1.5 ad /* Set up the handler on each CPU. */
365 1.8 ad if (ncpu < 2) {
366 1.7 ad /* XXX hack for machines with no CPU_INFO_FOREACH() early on */
367 1.7 ad sc = curcpu()->ci_data.cpu_softcpu;
368 1.7 ad sh = &sc->sc_hand[index];
369 1.7 ad sh->sh_isr = &sc->sc_int[level];
370 1.7 ad sh->sh_func = func;
371 1.7 ad sh->sh_arg = arg;
372 1.7 ad sh->sh_flags = flags;
373 1.7 ad sh->sh_pending = 0;
374 1.7 ad } else for (CPU_INFO_FOREACH(cii, ci)) {
375 1.5 ad sc = ci->ci_data.cpu_softcpu;
376 1.5 ad sh = &sc->sc_hand[index];
377 1.5 ad sh->sh_isr = &sc->sc_int[level];
378 1.5 ad sh->sh_func = func;
379 1.5 ad sh->sh_arg = arg;
380 1.5 ad sh->sh_flags = flags;
381 1.5 ad sh->sh_pending = 0;
382 1.2 ad }
383 1.2 ad
384 1.5 ad mutex_exit(&softint_lock);
385 1.5 ad
386 1.5 ad return (void *)((uint8_t *)&sc->sc_hand[index] - (uint8_t *)sc);
387 1.2 ad }
388 1.2 ad
389 1.2 ad /*
390 1.2 ad * softint_disestablish:
391 1.2 ad *
392 1.2 ad * Unregister a software interrupt handler.
393 1.2 ad */
394 1.2 ad void
395 1.2 ad softint_disestablish(void *arg)
396 1.2 ad {
397 1.5 ad CPU_INFO_ITERATOR cii;
398 1.5 ad struct cpu_info *ci;
399 1.5 ad softcpu_t *sc;
400 1.5 ad softhand_t *sh;
401 1.5 ad uintptr_t offset;
402 1.5 ad
403 1.5 ad offset = (uintptr_t)arg;
404 1.5 ad KASSERT(offset != 0 && offset < softint_bytes);
405 1.5 ad
406 1.5 ad mutex_enter(&softint_lock);
407 1.5 ad
408 1.5 ad /* Clear the handler on each CPU. */
409 1.5 ad for (CPU_INFO_FOREACH(cii, ci)) {
410 1.5 ad sc = ci->ci_data.cpu_softcpu;
411 1.5 ad sh = (softhand_t *)((uint8_t *)sc + offset);
412 1.5 ad KASSERT(sh->sh_func != NULL);
413 1.5 ad KASSERT(sh->sh_pending == 0);
414 1.5 ad sh->sh_func = NULL;
415 1.5 ad }
416 1.2 ad
417 1.5 ad mutex_exit(&softint_lock);
418 1.2 ad }
419 1.2 ad
420 1.2 ad /*
421 1.2 ad * softint_schedule:
422 1.2 ad *
423 1.2 ad * Trigger a software interrupt. Must be called from a hardware
424 1.2 ad * interrupt handler, or with preemption disabled (since we are
425 1.2 ad * using the value of curcpu()).
426 1.2 ad */
427 1.2 ad void
428 1.2 ad softint_schedule(void *arg)
429 1.2 ad {
430 1.5 ad softhand_t *sh;
431 1.5 ad softint_t *si;
432 1.5 ad uintptr_t offset;
433 1.5 ad int s;
434 1.5 ad
435 1.5 ad /* Find the handler record for this CPU. */
436 1.5 ad offset = (uintptr_t)arg;
437 1.5 ad KASSERT(offset != 0 && offset < softint_bytes);
438 1.5 ad sh = (softhand_t *)((uint8_t *)curcpu()->ci_data.cpu_softcpu + offset);
439 1.5 ad
440 1.5 ad /* If it's already pending there's nothing to do. */
441 1.5 ad if (sh->sh_pending)
442 1.5 ad return;
443 1.5 ad
444 1.5 ad /*
445 1.5 ad * Enqueue the handler into the LWP's pending list.
446 1.5 ad * If the LWP is completely idle, then make it run.
447 1.5 ad */
448 1.5 ad s = splhigh();
449 1.5 ad if (!sh->sh_pending) {
450 1.5 ad si = sh->sh_isr;
451 1.5 ad sh->sh_pending = 1;
452 1.5 ad SIMPLEQ_INSERT_TAIL(&si->si_q, sh, sh_q);
453 1.5 ad if (si->si_active == 0) {
454 1.5 ad si->si_active = 1;
455 1.5 ad softint_trigger(si->si_machdep);
456 1.5 ad }
457 1.5 ad }
458 1.5 ad splx(s);
459 1.5 ad }
460 1.5 ad
461 1.5 ad /*
462 1.5 ad * softint_execute:
463 1.5 ad *
464 1.5 ad * Invoke handlers for the specified soft interrupt.
465 1.5 ad * Must be entered at splhigh. Will drop the priority
466 1.5 ad * to the level specified, but returns back at splhigh.
467 1.5 ad */
468 1.5 ad static inline void
469 1.5 ad softint_execute(softint_t *si, lwp_t *l, int s)
470 1.5 ad {
471 1.5 ad softhand_t *sh;
472 1.5 ad bool havelock;
473 1.5 ad
474 1.5 ad #ifdef __HAVE_FAST_SOFTINTS
475 1.5 ad KASSERT(si->si_lwp == curlwp);
476 1.5 ad #else
477 1.5 ad /* May be running in user context. */
478 1.5 ad #endif
479 1.5 ad KASSERT(si->si_cpu == curcpu());
480 1.5 ad KASSERT(si->si_lwp->l_wchan == NULL);
481 1.5 ad KASSERT(si->si_active);
482 1.5 ad
483 1.5 ad havelock = false;
484 1.5 ad
485 1.5 ad /*
486 1.5 ad * Note: due to priority inheritance we may have interrupted a
487 1.5 ad * higher priority LWP. Since the soft interrupt must be quick
488 1.5 ad * and is non-preemptable, we don't bother yielding.
489 1.5 ad */
490 1.5 ad
491 1.5 ad while (!SIMPLEQ_EMPTY(&si->si_q)) {
492 1.5 ad /*
493 1.5 ad * Pick the longest waiting handler to run. We block
494 1.5 ad * interrupts but do not lock in order to do this, as
495 1.5 ad * we are protecting against the local CPU only.
496 1.5 ad */
497 1.5 ad sh = SIMPLEQ_FIRST(&si->si_q);
498 1.5 ad SIMPLEQ_REMOVE_HEAD(&si->si_q, sh_q);
499 1.5 ad sh->sh_pending = 0;
500 1.5 ad splx(s);
501 1.5 ad
502 1.5 ad /* Run the handler. */
503 1.5 ad if ((sh->sh_flags & SOFTINT_MPSAFE) == 0 && !havelock) {
504 1.5 ad KERNEL_LOCK(1, l);
505 1.5 ad havelock = true;
506 1.5 ad }
507 1.5 ad (*sh->sh_func)(sh->sh_arg);
508 1.5 ad
509 1.5 ad (void)splhigh();
510 1.5 ad }
511 1.2 ad
512 1.5 ad if (havelock) {
513 1.5 ad KERNEL_UNLOCK_ONE(l);
514 1.5 ad }
515 1.5 ad
516 1.5 ad /*
517 1.5 ad * Unlocked, but only for statistics.
518 1.5 ad * Should be per-CPU to prevent cache ping-pong.
519 1.5 ad */
520 1.5 ad uvmexp.softs++;
521 1.5 ad
522 1.5 ad si->si_evcnt.ev_count++;
523 1.5 ad si->si_active = 0;
524 1.2 ad }
525 1.2 ad
526 1.2 ad /*
527 1.2 ad * softint_block:
528 1.2 ad *
529 1.2 ad * Update statistics when the soft interrupt blocks.
530 1.2 ad */
531 1.2 ad void
532 1.2 ad softint_block(lwp_t *l)
533 1.2 ad {
534 1.5 ad softint_t *si = l->l_private;
535 1.5 ad
536 1.5 ad KASSERT((l->l_pflag & LP_INTR) != 0);
537 1.5 ad si->si_evcnt_block.ev_count++;
538 1.5 ad }
539 1.5 ad
540 1.5 ad /*
541 1.5 ad * schednetisr:
542 1.5 ad *
543 1.5 ad * Trigger a legacy network interrupt. XXX Needs to go away.
544 1.5 ad */
545 1.5 ad void
546 1.5 ad schednetisr(int isr)
547 1.5 ad {
548 1.5 ad
549 1.5 ad softint_schedule(softint_netisrs[isr]);
550 1.5 ad }
551 1.5 ad
552 1.5 ad #ifndef __HAVE_FAST_SOFTINTS
553 1.5 ad
554 1.5 ad /*
555 1.5 ad * softint_init_md:
556 1.5 ad *
557 1.5 ad * Slow path: perform machine-dependent initialization.
558 1.5 ad */
559 1.5 ad void
560 1.5 ad softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep)
561 1.5 ad {
562 1.5 ad softint_t *si;
563 1.5 ad
564 1.5 ad *machdep = (1 << level);
565 1.5 ad si = l->l_private;
566 1.5 ad
567 1.5 ad lwp_lock(l);
568 1.5 ad lwp_unlock_to(l, l->l_cpu->ci_schedstate.spc_mutex);
569 1.5 ad lwp_lock(l);
570 1.5 ad /* Cheat and make the KASSERT in softint_thread() happy. */
571 1.5 ad si->si_active = 1;
572 1.5 ad l->l_stat = LSRUN;
573 1.5 ad sched_enqueue(l, false);
574 1.5 ad lwp_unlock(l);
575 1.5 ad }
576 1.5 ad
577 1.5 ad /*
578 1.5 ad * softint_trigger:
579 1.5 ad *
580 1.5 ad * Slow path: cause a soft interrupt handler to begin executing.
581 1.5 ad * Called at IPL_HIGH.
582 1.5 ad */
583 1.5 ad void
584 1.5 ad softint_trigger(uintptr_t machdep)
585 1.5 ad {
586 1.5 ad struct cpu_info *ci;
587 1.5 ad lwp_t *l;
588 1.2 ad
589 1.5 ad l = curlwp;
590 1.5 ad ci = l->l_cpu;
591 1.5 ad ci->ci_data.cpu_softints |= machdep;
592 1.5 ad if (l == ci->ci_data.cpu_idlelwp) {
593 1.5 ad cpu_need_resched(ci, 0);
594 1.5 ad } else {
595 1.5 ad /* MI equivalent of aston() */
596 1.5 ad cpu_signotify(l);
597 1.5 ad }
598 1.5 ad }
599 1.5 ad
600 1.5 ad /*
601 1.5 ad * softint_thread:
602 1.5 ad *
603 1.5 ad * Slow path: MI software interrupt dispatch.
604 1.5 ad */
605 1.5 ad void
606 1.5 ad softint_thread(void *cookie)
607 1.5 ad {
608 1.5 ad softint_t *si;
609 1.5 ad lwp_t *l;
610 1.5 ad int s;
611 1.5 ad
612 1.5 ad l = curlwp;
613 1.5 ad si = l->l_private;
614 1.5 ad
615 1.5 ad for (;;) {
616 1.5 ad /*
617 1.5 ad * Clear pending status and run it. We must drop the
618 1.5 ad * spl before mi_switch(), since IPL_HIGH may be higher
619 1.5 ad * than IPL_SCHED (and it is not safe to switch at a
620 1.5 ad * higher level).
621 1.5 ad */
622 1.5 ad s = splhigh();
623 1.5 ad l->l_cpu->ci_data.cpu_softints &= ~si->si_machdep;
624 1.5 ad softint_execute(si, l, s);
625 1.5 ad splx(s);
626 1.5 ad
627 1.5 ad lwp_lock(l);
628 1.5 ad l->l_stat = LSIDL;
629 1.5 ad mi_switch(l);
630 1.5 ad }
631 1.2 ad }
632 1.4 ad
633 1.4 ad /*
634 1.4 ad * softint_picklwp:
635 1.4 ad *
636 1.4 ad * Slow path: called from mi_switch() to pick the highest priority
637 1.4 ad * soft interrupt LWP that needs to run.
638 1.4 ad */
639 1.4 ad lwp_t *
640 1.4 ad softint_picklwp(void)
641 1.4 ad {
642 1.5 ad struct cpu_info *ci;
643 1.5 ad u_int mask;
644 1.5 ad softint_t *si;
645 1.5 ad lwp_t *l;
646 1.5 ad
647 1.5 ad ci = curcpu();
648 1.5 ad si = ((softcpu_t *)ci->ci_data.cpu_softcpu)->sc_int;
649 1.5 ad mask = ci->ci_data.cpu_softints;
650 1.5 ad
651 1.5 ad if ((mask & (1 << SOFTINT_SERIAL)) != 0) {
652 1.5 ad l = si[SOFTINT_SERIAL].si_lwp;
653 1.5 ad } else if ((mask & (1 << SOFTINT_NET)) != 0) {
654 1.5 ad l = si[SOFTINT_NET].si_lwp;
655 1.5 ad } else if ((mask & (1 << SOFTINT_BIO)) != 0) {
656 1.5 ad l = si[SOFTINT_BIO].si_lwp;
657 1.5 ad } else if ((mask & (1 << SOFTINT_CLOCK)) != 0) {
658 1.5 ad l = si[SOFTINT_CLOCK].si_lwp;
659 1.5 ad } else {
660 1.5 ad panic("softint_picklwp");
661 1.5 ad }
662 1.4 ad
663 1.5 ad return l;
664 1.4 ad }
665 1.4 ad
666 1.4 ad /*
667 1.4 ad * softint_overlay:
668 1.4 ad *
669 1.4 ad * Slow path: called from lwp_userret() to run a soft interrupt
670 1.6 ad * within the context of a user thread.
671 1.4 ad */
672 1.4 ad void
673 1.4 ad softint_overlay(void)
674 1.4 ad {
675 1.5 ad struct cpu_info *ci;
676 1.5 ad u_int softints;
677 1.5 ad softint_t *si;
678 1.6 ad pri_t obase;
679 1.5 ad lwp_t *l;
680 1.5 ad int s;
681 1.5 ad
682 1.5 ad l = curlwp;
683 1.5 ad ci = l->l_cpu;
684 1.5 ad si = ((softcpu_t *)ci->ci_data.cpu_softcpu)->sc_int;
685 1.5 ad
686 1.5 ad KASSERT((l->l_pflag & LP_INTR) == 0);
687 1.5 ad
688 1.6 ad /* Arrange to elevate priority if the LWP blocks. */
689 1.6 ad obase = l->l_kpribase;
690 1.6 ad l->l_kpribase = PRI_KERNEL_RT;
691 1.5 ad l->l_pflag |= LP_INTR;
692 1.5 ad s = splhigh();
693 1.5 ad while ((softints = ci->ci_data.cpu_softints) != 0) {
694 1.5 ad if ((softints & (1 << SOFTINT_SERIAL)) != 0) {
695 1.5 ad ci->ci_data.cpu_softints &= ~(1 << SOFTINT_SERIAL);
696 1.5 ad softint_execute(&si[SOFTINT_SERIAL], l, s);
697 1.5 ad continue;
698 1.5 ad }
699 1.5 ad if ((softints & (1 << SOFTINT_NET)) != 0) {
700 1.5 ad ci->ci_data.cpu_softints &= ~(1 << SOFTINT_NET);
701 1.5 ad softint_execute(&si[SOFTINT_NET], l, s);
702 1.5 ad continue;
703 1.5 ad }
704 1.5 ad if ((softints & (1 << SOFTINT_BIO)) != 0) {
705 1.5 ad ci->ci_data.cpu_softints &= ~(1 << SOFTINT_BIO);
706 1.5 ad softint_execute(&si[SOFTINT_BIO], l, s);
707 1.5 ad continue;
708 1.5 ad }
709 1.5 ad if ((softints & (1 << SOFTINT_CLOCK)) != 0) {
710 1.5 ad ci->ci_data.cpu_softints &= ~(1 << SOFTINT_CLOCK);
711 1.5 ad softint_execute(&si[SOFTINT_CLOCK], l, s);
712 1.5 ad continue;
713 1.5 ad }
714 1.5 ad }
715 1.5 ad splx(s);
716 1.5 ad l->l_pflag &= ~LP_INTR;
717 1.6 ad l->l_kpribase = obase;
718 1.4 ad }
719 1.5 ad
720 1.5 ad #else /* !__HAVE_FAST_SOFTINTS */
721 1.5 ad
722 1.5 ad /*
723 1.5 ad * softint_thread:
724 1.5 ad *
725 1.5 ad * Fast path: the LWP is switched to without restoring any state,
726 1.5 ad * so we should not arrive here - there is a direct handoff between
727 1.5 ad * the interrupt stub and softint_dispatch().
728 1.5 ad */
729 1.5 ad void
730 1.5 ad softint_thread(void *cookie)
731 1.5 ad {
732 1.5 ad
733 1.5 ad panic("softint_thread");
734 1.5 ad }
735 1.5 ad
736 1.5 ad /*
737 1.5 ad * softint_dispatch:
738 1.5 ad *
739 1.5 ad * Fast path: entry point from machine-dependent code.
740 1.5 ad */
741 1.5 ad void
742 1.5 ad softint_dispatch(lwp_t *pinned, int s)
743 1.5 ad {
744 1.9 yamt struct bintime now;
745 1.5 ad softint_t *si;
746 1.5 ad u_int timing;
747 1.5 ad lwp_t *l;
748 1.5 ad
749 1.5 ad l = curlwp;
750 1.5 ad si = l->l_private;
751 1.5 ad
752 1.5 ad /*
753 1.5 ad * Note the interrupted LWP, and mark the current LWP as running
754 1.5 ad * before proceeding. Although this must as a rule be done with
755 1.5 ad * the LWP locked, at this point no external agents will want to
756 1.5 ad * modify the interrupt LWP's state.
757 1.5 ad */
758 1.5 ad timing = (softint_timing ? LW_TIMEINTR : 0);
759 1.5 ad l->l_switchto = pinned;
760 1.5 ad l->l_stat = LSONPROC;
761 1.5 ad l->l_flag |= (LW_RUNNING | timing);
762 1.5 ad
763 1.5 ad /*
764 1.5 ad * Dispatch the interrupt. If softints are being timed, charge
765 1.5 ad * for it.
766 1.5 ad */
767 1.5 ad if (timing)
768 1.9 yamt bintime(&l->l_stime);
769 1.5 ad softint_execute(si, l, s);
770 1.5 ad if (timing) {
771 1.9 yamt bintime(&now);
772 1.5 ad updatertime(l, &now);
773 1.5 ad l->l_flag &= ~LW_TIMEINTR;
774 1.5 ad }
775 1.5 ad
776 1.5 ad /*
777 1.5 ad * If we blocked while handling the interrupt, the pinned LWP is
778 1.5 ad * gone so switch to the idle LWP. It will select a new LWP to
779 1.5 ad * run.
780 1.5 ad *
781 1.5 ad * We must drop the priority level as switching at IPL_HIGH could
782 1.5 ad * deadlock the system. We have already set si->si_active = 0,
783 1.5 ad * which means another interrupt at this level can be triggered.
784 1.5 ad * That's not be a problem: we are lowering to level 's' which will
785 1.5 ad * prevent softint_dispatch() from being reentered at level 's',
786 1.5 ad * until the priority is finally dropped to IPL_NONE on entry to
787 1.5 ad * the idle loop.
788 1.5 ad */
789 1.5 ad l->l_stat = LSIDL;
790 1.5 ad if (l->l_switchto == NULL) {
791 1.5 ad splx(s);
792 1.5 ad pmap_deactivate(l);
793 1.5 ad lwp_exit_switchaway(l);
794 1.5 ad /* NOTREACHED */
795 1.5 ad }
796 1.5 ad l->l_switchto = NULL;
797 1.5 ad l->l_flag &= ~LW_RUNNING;
798 1.5 ad }
799 1.5 ad
800 1.5 ad #endif /* !__HAVE_FAST_SOFTINTS */
801