intr.c revision 1.37 1 1.37 jmcneill /* $NetBSD: intr.c,v 1.37 2025/02/17 11:14:49 jmcneill Exp $ */
2 1.2 garbled
3 1.2 garbled /*-
4 1.2 garbled * Copyright (c) 2007 Michael Lorenz
5 1.2 garbled * All rights reserved.
6 1.2 garbled *
7 1.2 garbled * Redistribution and use in source and binary forms, with or without
8 1.2 garbled * modification, are permitted provided that the following conditions
9 1.2 garbled * are met:
10 1.2 garbled * 1. Redistributions of source code must retain the above copyright
11 1.2 garbled * notice, this list of conditions and the following disclaimer.
12 1.2 garbled * 2. Redistributions in binary form must reproduce the above copyright
13 1.2 garbled * notice, this list of conditions and the following disclaimer in the
14 1.2 garbled * documentation and/or other materials provided with the distribution.
15 1.2 garbled *
16 1.2 garbled * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 1.2 garbled * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 1.2 garbled * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 1.2 garbled * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 1.2 garbled * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 1.2 garbled * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 1.2 garbled * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 1.2 garbled * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 1.2 garbled * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 1.2 garbled * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 1.2 garbled * POSSIBILITY OF SUCH DAMAGE.
27 1.2 garbled */
28 1.2 garbled
29 1.28 rin #define __INTR_PRIVATE
30 1.28 rin
31 1.2 garbled #include <sys/cdefs.h>
32 1.37 jmcneill __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.37 2025/02/17 11:14:49 jmcneill Exp $");
33 1.2 garbled
34 1.28 rin #ifdef _KERNEL_OPT
35 1.16 matt #include "opt_interrupt.h"
36 1.2 garbled #include "opt_multiprocessor.h"
37 1.16 matt #include "opt_pic.h"
38 1.29 rin #include "opt_ppcarch.h"
39 1.28 rin #endif
40 1.12 macallan
41 1.2 garbled #include <sys/param.h>
42 1.16 matt #include <sys/cpu.h>
43 1.16 matt #include <sys/kernel.h>
44 1.20 matt #include <sys/kmem.h>
45 1.25 nonaka #include <sys/interrupt.h>
46 1.2 garbled
47 1.16 matt #include <powerpc/psl.h>
48 1.16 matt #include <powerpc/pic/picvar.h>
49 1.16 matt
50 1.2 garbled #if defined(PIC_I8259) || defined (PIC_PREPIVR)
51 1.2 garbled #include <machine/isa_machdep.h>
52 1.2 garbled #endif
53 1.2 garbled
54 1.2 garbled #ifdef MULTIPROCESSOR
55 1.16 matt #include <powerpc/pic/ipivar.h>
56 1.2 garbled #endif
57 1.2 garbled
58 1.12 macallan #ifdef __HAVE_FAST_SOFTINTS
59 1.12 macallan #include <powerpc/softint.h>
60 1.12 macallan #endif
61 1.12 macallan
62 1.2 garbled #define MAX_PICS 8 /* 8 PICs ought to be enough for everyone */
63 1.2 garbled
64 1.15 matt #define PIC_VIRQ_LEGAL_P(x) ((u_int)(x) < NVIRQ)
65 1.2 garbled
66 1.27 rin #if defined(PPC_IBM4XX) && !defined(PPC_IBM440)
67 1.27 rin /* eieio is implemented as sync */
68 1.34 riastrad #define REORDER_PROTECT() __asm volatile("sync" ::: "memory")
69 1.27 rin #else
70 1.34 riastrad #define REORDER_PROTECT() __asm volatile("sync; eieio" ::: "memory")
71 1.27 rin #endif
72 1.27 rin
73 1.2 garbled struct pic_ops *pics[MAX_PICS];
74 1.2 garbled int num_pics = 0;
75 1.2 garbled int max_base = 0;
76 1.15 matt uint8_t virq_map[NIRQ];
77 1.15 matt imask_t virq_mask = HWIRQ_MASK;
78 1.30 rin static imask_t imask[NIPL];
79 1.2 garbled int primary_pic = 0;
80 1.2 garbled
81 1.2 garbled static int fakeintr(void *);
82 1.15 matt static int mapirq(int);
83 1.2 garbled static void intr_calculatemasks(void);
84 1.15 matt static struct pic_ops *find_pic_by_hwirq(int);
85 1.2 garbled
86 1.2 garbled static struct intr_source intrsources[NVIRQ];
87 1.2 garbled
88 1.2 garbled void
89 1.2 garbled pic_init(void)
90 1.2 garbled {
91 1.15 matt /* everything is in bss, no reason to zero it. */
92 1.2 garbled }
93 1.2 garbled
94 1.2 garbled int
95 1.2 garbled pic_add(struct pic_ops *pic)
96 1.2 garbled {
97 1.2 garbled
98 1.2 garbled if (num_pics >= MAX_PICS)
99 1.2 garbled return -1;
100 1.2 garbled
101 1.2 garbled pics[num_pics] = pic;
102 1.2 garbled pic->pic_intrbase = max_base;
103 1.2 garbled max_base += pic->pic_numintrs;
104 1.2 garbled num_pics++;
105 1.7 kiyohara
106 1.2 garbled return pic->pic_intrbase;
107 1.2 garbled }
108 1.2 garbled
109 1.2 garbled void
110 1.2 garbled pic_finish_setup(void)
111 1.2 garbled {
112 1.15 matt for (size_t i = 0; i < num_pics; i++) {
113 1.15 matt struct pic_ops * const pic = pics[i];
114 1.2 garbled if (pic->pic_finish_setup != NULL)
115 1.2 garbled pic->pic_finish_setup(pic);
116 1.2 garbled }
117 1.2 garbled }
118 1.2 garbled
119 1.2 garbled static struct pic_ops *
120 1.15 matt find_pic_by_hwirq(int hwirq)
121 1.2 garbled {
122 1.14 matt for (u_int base = 0; base < num_pics; base++) {
123 1.14 matt struct pic_ops * const pic = pics[base];
124 1.15 matt if (pic->pic_intrbase <= hwirq
125 1.15 matt && hwirq < pic->pic_intrbase + pic->pic_numintrs) {
126 1.14 matt return pic;
127 1.2 garbled }
128 1.2 garbled }
129 1.2 garbled return NULL;
130 1.2 garbled }
131 1.2 garbled
132 1.2 garbled static int
133 1.2 garbled fakeintr(void *arg)
134 1.2 garbled {
135 1.2 garbled
136 1.2 garbled return 0;
137 1.2 garbled }
138 1.2 garbled
139 1.2 garbled /*
140 1.2 garbled * Register an interrupt handler.
141 1.2 garbled */
142 1.2 garbled void *
143 1.14 matt intr_establish(int hwirq, int type, int ipl, int (*ih_fun)(void *),
144 1.2 garbled void *ih_arg)
145 1.2 garbled {
146 1.25 nonaka return intr_establish_xname(hwirq, type, ipl, ih_fun, ih_arg, NULL);
147 1.25 nonaka }
148 1.25 nonaka
149 1.25 nonaka void *
150 1.25 nonaka intr_establish_xname(int hwirq, int type, int ipl, int (*ih_fun)(void *),
151 1.25 nonaka void *ih_arg, const char *xname)
152 1.25 nonaka {
153 1.2 garbled struct intrhand **p, *q, *ih;
154 1.2 garbled struct pic_ops *pic;
155 1.2 garbled static struct intrhand fakehand;
156 1.15 matt int maxipl = ipl;
157 1.2 garbled
158 1.14 matt if (maxipl == IPL_NONE)
159 1.14 matt maxipl = IPL_HIGH;
160 1.2 garbled
161 1.2 garbled if (hwirq >= max_base) {
162 1.2 garbled panic("%s: bogus IRQ %d, max is %d", __func__, hwirq,
163 1.2 garbled max_base - 1);
164 1.2 garbled }
165 1.2 garbled
166 1.15 matt pic = find_pic_by_hwirq(hwirq);
167 1.2 garbled if (pic == NULL) {
168 1.2 garbled panic("%s: cannot find a pic for IRQ %d", __func__, hwirq);
169 1.2 garbled }
170 1.2 garbled
171 1.15 matt const int virq = mapirq(hwirq);
172 1.2 garbled
173 1.2 garbled /* no point in sleeping unless someone can free memory. */
174 1.20 matt ih = kmem_intr_alloc(sizeof(*ih), cold ? KM_NOSLEEP : KM_SLEEP);
175 1.2 garbled if (ih == NULL)
176 1.20 matt panic("intr_establish: can't allocate handler info");
177 1.2 garbled
178 1.15 matt if (!PIC_VIRQ_LEGAL_P(virq) || type == IST_NONE)
179 1.15 matt panic("intr_establish: bogus irq (%d) or type (%d)",
180 1.15 matt hwirq, type);
181 1.2 garbled
182 1.15 matt struct intr_source * const is = &intrsources[virq];
183 1.37 jmcneill const bool cascaded = ih_fun == pic_handle_intr;
184 1.2 garbled
185 1.2 garbled switch (is->is_type) {
186 1.2 garbled case IST_NONE:
187 1.2 garbled is->is_type = type;
188 1.37 jmcneill is->is_cascaded = cascaded;
189 1.2 garbled break;
190 1.19 phx case IST_EDGE_FALLING:
191 1.19 phx case IST_EDGE_RISING:
192 1.19 phx case IST_LEVEL_LOW:
193 1.19 phx case IST_LEVEL_HIGH:
194 1.2 garbled if (type == is->is_type)
195 1.2 garbled break;
196 1.15 matt /* FALLTHROUGH */
197 1.2 garbled case IST_PULSE:
198 1.37 jmcneill if (type != IST_NONE) {
199 1.2 garbled panic("intr_establish: can't share %s with %s",
200 1.2 garbled intr_typename(is->is_type),
201 1.2 garbled intr_typename(type));
202 1.37 jmcneill }
203 1.37 jmcneill if (cascaded != is->is_cascaded) {
204 1.37 jmcneill panic("intr_establish: can't share cascaded with "
205 1.37 jmcneill "non-cascaded interrupt");
206 1.37 jmcneill }
207 1.2 garbled break;
208 1.2 garbled }
209 1.2 garbled if (is->is_hand == NULL) {
210 1.32 rin snprintf(is->is_intrid, sizeof(is->is_intrid), "%s irq %d",
211 1.31 rin pic->pic_name, is->is_hwirq);
212 1.32 rin snprintf(is->is_evname, sizeof(is->is_evname), "irq %d",
213 1.32 rin is->is_hwirq);
214 1.2 garbled evcnt_attach_dynamic(&is->is_ev, EVCNT_TYPE_INTR, NULL,
215 1.32 rin pic->pic_name, is->is_evname);
216 1.2 garbled }
217 1.2 garbled
218 1.2 garbled /*
219 1.2 garbled * Figure out where to put the handler.
220 1.2 garbled * This is O(N^2), but we want to preserve the order, and N is
221 1.2 garbled * generally small.
222 1.2 garbled */
223 1.2 garbled for (p = &is->is_hand; (q = *p) != NULL; p = &q->ih_next) {
224 1.26 riastrad maxipl = uimax(maxipl, q->ih_ipl);
225 1.2 garbled }
226 1.2 garbled
227 1.2 garbled /*
228 1.2 garbled * Actually install a fake handler momentarily, since we might be doing
229 1.2 garbled * this with interrupts enabled and don't want the real routine called
230 1.2 garbled * until masking is set up.
231 1.2 garbled */
232 1.14 matt fakehand.ih_ipl = ipl;
233 1.2 garbled fakehand.ih_fun = fakeintr;
234 1.2 garbled *p = &fakehand;
235 1.2 garbled
236 1.2 garbled /*
237 1.2 garbled * Poke the real handler in now.
238 1.2 garbled */
239 1.2 garbled ih->ih_fun = ih_fun;
240 1.2 garbled ih->ih_arg = ih_arg;
241 1.2 garbled ih->ih_next = NULL;
242 1.14 matt ih->ih_ipl = ipl;
243 1.15 matt ih->ih_virq = virq;
244 1.25 nonaka strlcpy(ih->ih_xname, xname != NULL ? xname : "unknown",
245 1.25 nonaka sizeof(ih->ih_xname));
246 1.2 garbled *p = ih;
247 1.2 garbled
248 1.2 garbled if (pic->pic_establish_irq != NULL)
249 1.2 garbled pic->pic_establish_irq(pic, hwirq - pic->pic_intrbase,
250 1.14 matt is->is_type, maxipl);
251 1.14 matt
252 1.14 matt /*
253 1.14 matt * Remember the highest IPL used by this handler.
254 1.14 matt */
255 1.14 matt is->is_ipl = maxipl;
256 1.2 garbled
257 1.2 garbled /*
258 1.2 garbled * now that the handler is established we're actually ready to
259 1.2 garbled * calculate the masks
260 1.2 garbled */
261 1.2 garbled intr_calculatemasks();
262 1.2 garbled
263 1.2 garbled return ih;
264 1.2 garbled }
265 1.2 garbled
266 1.2 garbled void
267 1.2 garbled dummy_pic_establish_intr(struct pic_ops *pic, int irq, int type, int pri)
268 1.2 garbled {
269 1.2 garbled }
270 1.2 garbled
271 1.2 garbled /*
272 1.2 garbled * Deregister an interrupt handler.
273 1.2 garbled */
274 1.2 garbled void
275 1.2 garbled intr_disestablish(void *arg)
276 1.2 garbled {
277 1.14 matt struct intrhand * const ih = arg;
278 1.15 matt const int virq = ih->ih_virq;
279 1.15 matt struct intr_source * const is = &intrsources[virq];
280 1.14 matt struct intrhand **p, **q;
281 1.14 matt int maxipl = IPL_NONE;
282 1.2 garbled
283 1.15 matt if (!PIC_VIRQ_LEGAL_P(virq))
284 1.15 matt panic("intr_disestablish: bogus virq %d", virq);
285 1.2 garbled
286 1.2 garbled /*
287 1.2 garbled * Remove the handler from the chain.
288 1.2 garbled * This is O(n^2), too.
289 1.2 garbled */
290 1.14 matt for (p = &is->is_hand, q = NULL; (*p) != NULL; p = &(*p)->ih_next) {
291 1.14 matt struct intrhand * const tmp_ih = *p;
292 1.14 matt if (tmp_ih == ih) {
293 1.14 matt q = p;
294 1.14 matt } else {
295 1.26 riastrad maxipl = uimax(maxipl, tmp_ih->ih_ipl);
296 1.14 matt }
297 1.14 matt }
298 1.2 garbled if (q)
299 1.14 matt *q = ih->ih_next;
300 1.2 garbled else
301 1.2 garbled panic("intr_disestablish: handler not registered");
302 1.20 matt kmem_intr_free((void *)ih, sizeof(*ih));
303 1.2 garbled
304 1.14 matt /*
305 1.14 matt * Reset the IPL for this source now that we've removed a handler.
306 1.14 matt */
307 1.14 matt is->is_ipl = maxipl;
308 1.14 matt
309 1.2 garbled intr_calculatemasks();
310 1.2 garbled
311 1.2 garbled if (is->is_hand == NULL) {
312 1.2 garbled is->is_type = IST_NONE;
313 1.2 garbled evcnt_detach(&is->is_ev);
314 1.15 matt /*
315 1.15 matt * Make the virutal IRQ available again.
316 1.15 matt */
317 1.15 matt virq_map[virq] = 0;
318 1.15 matt virq_mask |= PIC_VIRQ_TO_MASK(virq);
319 1.2 garbled }
320 1.2 garbled }
321 1.2 garbled
322 1.2 garbled /*
323 1.2 garbled * Map max_base irqs into 32 (bits).
324 1.2 garbled */
325 1.2 garbled static int
326 1.15 matt mapirq(int hwirq)
327 1.2 garbled {
328 1.2 garbled struct pic_ops *pic;
329 1.2 garbled
330 1.15 matt if (hwirq >= max_base)
331 1.15 matt panic("invalid irq %d", hwirq);
332 1.2 garbled
333 1.15 matt if ((pic = find_pic_by_hwirq(hwirq)) == NULL)
334 1.15 matt panic("%s: cannot find PIC for HWIRQ %d", __func__, hwirq);
335 1.2 garbled
336 1.15 matt if (virq_map[hwirq])
337 1.15 matt return virq_map[hwirq];
338 1.2 garbled
339 1.15 matt if (virq_mask == 0)
340 1.2 garbled panic("virq overflow");
341 1.2 garbled
342 1.15 matt const int virq = PIC_VIRQ_MS_PENDING(virq_mask);
343 1.15 matt struct intr_source * const is = intrsources + virq;
344 1.15 matt
345 1.15 matt virq_mask &= ~PIC_VIRQ_TO_MASK(virq);
346 1.15 matt
347 1.15 matt is->is_hwirq = hwirq;
348 1.15 matt is->is_pic = pic;
349 1.15 matt virq_map[hwirq] = virq;
350 1.2 garbled #ifdef PIC_DEBUG
351 1.17 rjs printf("mapping hwirq %d to virq %d\n", hwirq, virq);
352 1.2 garbled #endif
353 1.15 matt return virq;
354 1.2 garbled }
355 1.2 garbled
356 1.2 garbled static const char * const intr_typenames[] = {
357 1.2 garbled [IST_NONE] = "none",
358 1.2 garbled [IST_PULSE] = "pulsed",
359 1.19 phx [IST_EDGE_FALLING] = "falling edge triggered",
360 1.19 phx [IST_EDGE_RISING] = "rising edge triggered",
361 1.19 phx [IST_LEVEL_LOW] = "low level triggered",
362 1.19 phx [IST_LEVEL_HIGH] = "high level triggered",
363 1.2 garbled };
364 1.2 garbled
365 1.2 garbled const char *
366 1.2 garbled intr_typename(int type)
367 1.2 garbled {
368 1.2 garbled KASSERT((unsigned int) type < __arraycount(intr_typenames));
369 1.2 garbled KASSERT(intr_typenames[type] != NULL);
370 1.2 garbled return intr_typenames[type];
371 1.2 garbled }
372 1.2 garbled
373 1.2 garbled /*
374 1.2 garbled * Recalculate the interrupt masks from scratch.
375 1.2 garbled * We could code special registry and deregistry versions of this function that
376 1.2 garbled * would be faster, but the code would be nastier, and we don't expect this to
377 1.2 garbled * happen very much anyway.
378 1.2 garbled */
379 1.2 garbled static void
380 1.2 garbled intr_calculatemasks(void)
381 1.2 garbled {
382 1.23 macallan imask_t newmask[NIPL];
383 1.2 garbled struct intr_source *is;
384 1.23 macallan struct intrhand *ih;
385 1.14 matt int irq;
386 1.2 garbled
387 1.14 matt for (u_int ipl = IPL_NONE; ipl < NIPL; ipl++) {
388 1.14 matt newmask[ipl] = 0;
389 1.2 garbled }
390 1.2 garbled
391 1.14 matt /* First, figure out which ipl each IRQ uses. */
392 1.14 matt for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
393 1.23 macallan for (ih = is->is_hand; ih != NULL; ih = ih->ih_next) {
394 1.23 macallan newmask[ih->ih_ipl] |= PIC_VIRQ_TO_MASK(irq);
395 1.23 macallan }
396 1.2 garbled }
397 1.2 garbled
398 1.2 garbled /*
399 1.2 garbled * IPL_NONE is used for hardware interrupts that are never blocked,
400 1.2 garbled * and do not block anything else.
401 1.2 garbled */
402 1.14 matt newmask[IPL_NONE] = 0;
403 1.2 garbled
404 1.2 garbled /*
405 1.2 garbled * strict hierarchy - all IPLs block everything blocked by any lower
406 1.2 garbled * IPL
407 1.2 garbled */
408 1.14 matt for (u_int ipl = 1; ipl < NIPL; ipl++) {
409 1.14 matt newmask[ipl] |= newmask[ipl - 1];
410 1.14 matt }
411 1.2 garbled
412 1.23 macallan #ifdef PIC_DEBUG
413 1.14 matt for (u_int ipl = 0; ipl < NIPL; ipl++) {
414 1.14 matt printf("%u: %08x -> %08x\n", ipl, imask[ipl], newmask[ipl]);
415 1.2 garbled }
416 1.2 garbled #endif
417 1.2 garbled
418 1.14 matt /*
419 1.14 matt * Disable all interrupts.
420 1.14 matt */
421 1.14 matt for (u_int base = 0; base < num_pics; base++) {
422 1.14 matt struct pic_ops * const pic = pics[base];
423 1.14 matt for (u_int i = 0; i < pic->pic_numintrs; i++) {
424 1.14 matt pic->pic_disable_irq(pic, i);
425 1.14 matt }
426 1.2 garbled }
427 1.2 garbled
428 1.14 matt /*
429 1.14 matt * Now that all interrupts are disabled, update the ipl masks.
430 1.14 matt */
431 1.14 matt for (u_int ipl = 0; ipl < NIPL; ipl++) {
432 1.14 matt imask[ipl] = newmask[ipl];
433 1.2 garbled }
434 1.7 kiyohara
435 1.14 matt /*
436 1.14 matt * Lastly, enable IRQs actually in use.
437 1.14 matt */
438 1.2 garbled for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
439 1.2 garbled if (is->is_hand)
440 1.2 garbled pic_enable_irq(is->is_hwirq);
441 1.2 garbled }
442 1.2 garbled }
443 1.2 garbled
444 1.2 garbled void
445 1.15 matt pic_enable_irq(int hwirq)
446 1.2 garbled {
447 1.15 matt struct pic_ops * const pic = find_pic_by_hwirq(hwirq);
448 1.15 matt if (pic == NULL)
449 1.15 matt panic("%s: bogus IRQ %d", __func__, hwirq);
450 1.15 matt const int type = intrsources[virq_map[hwirq]].is_type;
451 1.15 matt (*pic->pic_enable_irq)(pic, hwirq - pic->pic_intrbase, type);
452 1.2 garbled }
453 1.2 garbled
454 1.2 garbled void
455 1.15 matt pic_mark_pending(int hwirq)
456 1.2 garbled {
457 1.2 garbled struct cpu_info * const ci = curcpu();
458 1.2 garbled
459 1.15 matt const int virq = virq_map[hwirq];
460 1.15 matt if (virq == 0)
461 1.15 matt printf("IRQ %d maps to 0\n", hwirq);
462 1.2 garbled
463 1.15 matt const register_t msr = mfmsr();
464 1.2 garbled mtmsr(msr & ~PSL_EE);
465 1.15 matt ci->ci_ipending |= PIC_VIRQ_TO_MASK(virq);
466 1.2 garbled mtmsr(msr);
467 1.7 kiyohara }
468 1.2 garbled
469 1.15 matt static void
470 1.15 matt intr_deliver(struct intr_source *is, int virq)
471 1.15 matt {
472 1.15 matt bool locked = false;
473 1.15 matt for (struct intrhand *ih = is->is_hand; ih != NULL; ih = ih->ih_next) {
474 1.15 matt KASSERTMSG(ih->ih_fun != NULL,
475 1.18 jym "%s: irq %d, hwirq %d, is %p ih %p: "
476 1.15 matt "NULL interrupt handler!\n", __func__,
477 1.18 jym virq, is->is_hwirq, is, ih);
478 1.15 matt if (ih->ih_ipl == IPL_VM) {
479 1.15 matt if (!locked) {
480 1.15 matt KERNEL_LOCK(1, NULL);
481 1.15 matt locked = true;
482 1.15 matt }
483 1.15 matt } else if (locked) {
484 1.15 matt KERNEL_UNLOCK_ONE(NULL);
485 1.15 matt locked = false;
486 1.15 matt }
487 1.15 matt (*ih->ih_fun)(ih->ih_arg);
488 1.15 matt }
489 1.15 matt if (locked) {
490 1.15 matt KERNEL_UNLOCK_ONE(NULL);
491 1.15 matt }
492 1.15 matt is->is_ev.ev_count++;
493 1.15 matt }
494 1.15 matt
495 1.2 garbled void
496 1.2 garbled pic_do_pending_int(void)
497 1.2 garbled {
498 1.2 garbled struct cpu_info * const ci = curcpu();
499 1.15 matt imask_t vpend;
500 1.2 garbled
501 1.2 garbled if (ci->ci_iactive)
502 1.2 garbled return;
503 1.2 garbled
504 1.2 garbled ci->ci_iactive = 1;
505 1.15 matt
506 1.15 matt const register_t emsr = mfmsr();
507 1.15 matt const register_t dmsr = emsr & ~PSL_EE;
508 1.15 matt
509 1.2 garbled KASSERT(emsr & PSL_EE);
510 1.2 garbled mtmsr(dmsr);
511 1.2 garbled
512 1.15 matt const int pcpl = ci->ci_cpl;
513 1.3 ad #ifdef __HAVE_FAST_SOFTINTS
514 1.2 garbled again:
515 1.3 ad #endif
516 1.2 garbled
517 1.2 garbled /* Do now unmasked pendings */
518 1.15 matt while ((vpend = (ci->ci_ipending & ~imask[pcpl])) != 0) {
519 1.14 matt ci->ci_idepth++;
520 1.15 matt KASSERT((PIC_VIRQ_TO_MASK(0) & ci->ci_ipending) == 0);
521 1.15 matt
522 1.8 kiyohara /* Get most significant pending bit */
523 1.15 matt const int virq = PIC_VIRQ_MS_PENDING(vpend);
524 1.15 matt ci->ci_ipending &= ~PIC_VIRQ_TO_MASK(virq);
525 1.15 matt
526 1.15 matt struct intr_source * const is = &intrsources[virq];
527 1.15 matt struct pic_ops * const pic = is->is_pic;
528 1.2 garbled
529 1.37 jmcneill if (!is->is_cascaded) {
530 1.37 jmcneill splraise(is->is_ipl);
531 1.37 jmcneill mtmsr(emsr);
532 1.37 jmcneill }
533 1.15 matt intr_deliver(is, virq);
534 1.37 jmcneill if (!is->is_cascaded) {
535 1.37 jmcneill mtmsr(dmsr);
536 1.37 jmcneill ci->ci_cpl = pcpl; /* Don't use splx... we are here already! */
537 1.37 jmcneill }
538 1.2 garbled
539 1.2 garbled pic->pic_reenable_irq(pic, is->is_hwirq - pic->pic_intrbase,
540 1.2 garbled is->is_type);
541 1.14 matt ci->ci_idepth--;
542 1.2 garbled }
543 1.2 garbled
544 1.3 ad #ifdef __HAVE_FAST_SOFTINTS
545 1.21 macallan const u_int softints = ci->ci_data.cpu_softints &
546 1.21 macallan (IPL_SOFTMASK << pcpl);
547 1.21 macallan
548 1.21 macallan /* make sure there are no bits to screw with the line above */
549 1.21 macallan KASSERT((ci->ci_data.cpu_softints & ~IPL_SOFTMASK) == 0);
550 1.12 macallan
551 1.12 macallan if (__predict_false(softints != 0)) {
552 1.15 matt ci->ci_cpl = IPL_HIGH;
553 1.15 matt mtmsr(emsr);
554 1.12 macallan powerpc_softint(ci, pcpl,
555 1.12 macallan (vaddr_t)__builtin_return_address(0));
556 1.15 matt mtmsr(dmsr);
557 1.12 macallan ci->ci_cpl = pcpl;
558 1.15 matt if (__predict_false(ci->ci_ipending & ~imask[pcpl]))
559 1.15 matt goto again;
560 1.12 macallan }
561 1.12 macallan #endif
562 1.2 garbled
563 1.2 garbled ci->ci_iactive = 0;
564 1.2 garbled mtmsr(emsr);
565 1.2 garbled }
566 1.2 garbled
567 1.2 garbled int
568 1.2 garbled pic_handle_intr(void *cookie)
569 1.2 garbled {
570 1.2 garbled struct pic_ops *pic = cookie;
571 1.2 garbled struct cpu_info *ci = curcpu();
572 1.15 matt int picirq;
573 1.2 garbled
574 1.35 jmcneill const register_t msr = mfmsr();
575 1.35 jmcneill const int pcpl = ci->ci_cpl;
576 1.35 jmcneill
577 1.35 jmcneill mtmsr(msr & ~PSL_EE);
578 1.35 jmcneill
579 1.15 matt picirq = pic->pic_get_irq(pic, PIC_GET_IRQ);
580 1.35 jmcneill if (picirq == 255) {
581 1.35 jmcneill mtmsr(msr);
582 1.2 garbled return 0;
583 1.35 jmcneill }
584 1.2 garbled
585 1.15 matt do {
586 1.24 macallan const int virq = virq_map[picirq + pic->pic_intrbase];
587 1.2 garbled
588 1.15 matt KASSERT(virq != 0);
589 1.15 matt KASSERT(picirq < pic->pic_numintrs);
590 1.15 matt imask_t v_imen = PIC_VIRQ_TO_MASK(virq);
591 1.15 matt struct intr_source * const is = &intrsources[virq];
592 1.15 matt
593 1.15 matt if ((imask[pcpl] & v_imen) != 0) {
594 1.15 matt ci->ci_ipending |= v_imen; /* Masked! Mark this as pending */
595 1.15 matt pic->pic_disable_irq(pic, picirq);
596 1.15 matt } else {
597 1.15 matt /* this interrupt is no longer pending */
598 1.15 matt ci->ci_ipending &= ~v_imen;
599 1.15 matt ci->ci_idepth++;
600 1.15 matt
601 1.37 jmcneill if (!is->is_cascaded) {
602 1.37 jmcneill splraise(is->is_ipl);
603 1.37 jmcneill mtmsr(msr | PSL_EE);
604 1.37 jmcneill }
605 1.15 matt intr_deliver(is, virq);
606 1.37 jmcneill if (!is->is_cascaded) {
607 1.37 jmcneill mtmsr(msr & ~PSL_EE);
608 1.37 jmcneill ci->ci_cpl = pcpl;
609 1.37 jmcneill }
610 1.7 kiyohara
611 1.15 matt ci->ci_data.cpu_nintr++;
612 1.15 matt ci->ci_idepth--;
613 1.2 garbled }
614 1.15 matt pic->pic_ack_irq(pic, picirq);
615 1.15 matt } while ((picirq = pic->pic_get_irq(pic, PIC_GET_RECHECK)) != 255);
616 1.2 garbled
617 1.2 garbled mtmsr(msr | PSL_EE);
618 1.2 garbled splx(pcpl); /* Process pendings. */
619 1.2 garbled mtmsr(msr);
620 1.2 garbled
621 1.2 garbled return 0;
622 1.2 garbled }
623 1.2 garbled
624 1.2 garbled void
625 1.2 garbled pic_ext_intr(void)
626 1.2 garbled {
627 1.2 garbled
628 1.2 garbled KASSERT(pics[primary_pic] != NULL);
629 1.2 garbled pic_handle_intr(pics[primary_pic]);
630 1.2 garbled
631 1.2 garbled return;
632 1.2 garbled
633 1.2 garbled }
634 1.2 garbled
635 1.2 garbled int
636 1.2 garbled splraise(int ncpl)
637 1.2 garbled {
638 1.2 garbled struct cpu_info *ci = curcpu();
639 1.2 garbled int ocpl;
640 1.2 garbled
641 1.33 skrll if (ncpl == ci->ci_cpl)
642 1.33 skrll return ncpl;
643 1.27 rin REORDER_PROTECT();
644 1.2 garbled ocpl = ci->ci_cpl;
645 1.12 macallan KASSERT(ncpl < NIPL);
646 1.26 riastrad ci->ci_cpl = uimax(ncpl, ocpl);
647 1.27 rin REORDER_PROTECT();
648 1.12 macallan __insn_barrier();
649 1.2 garbled return ocpl;
650 1.2 garbled }
651 1.2 garbled
652 1.13 matt static inline bool
653 1.13 matt have_pending_intr_p(struct cpu_info *ci, int ncpl)
654 1.13 matt {
655 1.13 matt if (ci->ci_ipending & ~imask[ncpl])
656 1.13 matt return true;
657 1.13 matt #ifdef __HAVE_FAST_SOFTINTS
658 1.22 macallan if (ci->ci_data.cpu_softints & (IPL_SOFTMASK << ncpl))
659 1.13 matt return true;
660 1.13 matt #endif
661 1.13 matt return false;
662 1.13 matt }
663 1.13 matt
664 1.2 garbled void
665 1.2 garbled splx(int ncpl)
666 1.2 garbled {
667 1.2 garbled struct cpu_info *ci = curcpu();
668 1.7 kiyohara
669 1.12 macallan __insn_barrier();
670 1.27 rin REORDER_PROTECT();
671 1.2 garbled ci->ci_cpl = ncpl;
672 1.13 matt if (have_pending_intr_p(ci, ncpl))
673 1.2 garbled pic_do_pending_int();
674 1.13 matt
675 1.27 rin REORDER_PROTECT();
676 1.2 garbled }
677 1.2 garbled
678 1.2 garbled int
679 1.2 garbled spllower(int ncpl)
680 1.2 garbled {
681 1.2 garbled struct cpu_info *ci = curcpu();
682 1.2 garbled int ocpl;
683 1.2 garbled
684 1.12 macallan __insn_barrier();
685 1.27 rin REORDER_PROTECT();
686 1.2 garbled ocpl = ci->ci_cpl;
687 1.2 garbled ci->ci_cpl = ncpl;
688 1.13 matt if (have_pending_intr_p(ci, ncpl))
689 1.2 garbled pic_do_pending_int();
690 1.27 rin REORDER_PROTECT();
691 1.2 garbled return ocpl;
692 1.2 garbled }
693 1.2 garbled
694 1.2 garbled void
695 1.2 garbled genppc_cpu_configure(void)
696 1.2 garbled {
697 1.23 macallan aprint_normal("vmmask %x schedmask %x highmask %x\n",
698 1.23 macallan (u_int)imask[IPL_VM] & 0x7fffffff,
699 1.23 macallan (u_int)imask[IPL_SCHED] & 0x7fffffff,
700 1.23 macallan (u_int)imask[IPL_HIGH] & 0x7fffffff);
701 1.2 garbled
702 1.2 garbled spl0();
703 1.2 garbled }
704 1.2 garbled
705 1.2 garbled #if defined(PIC_PREPIVR) || defined(PIC_I8259)
706 1.2 garbled /*
707 1.2 garbled * isa_intr_alloc needs to be done here, because it needs direct access to
708 1.2 garbled * the various interrupt handler structures.
709 1.2 garbled */
710 1.2 garbled
711 1.2 garbled int
712 1.2 garbled genppc_isa_intr_alloc(isa_chipset_tag_t ic, struct pic_ops *pic,
713 1.2 garbled int mask, int type, int *irq_p)
714 1.2 garbled {
715 1.2 garbled int irq, vi;
716 1.2 garbled int maybe_irq = -1;
717 1.2 garbled int shared_depth = 0;
718 1.2 garbled struct intr_source *is;
719 1.2 garbled
720 1.2 garbled if (pic == NULL)
721 1.2 garbled return 1;
722 1.2 garbled
723 1.2 garbled for (irq = 0; (mask != 0 && irq < pic->pic_numintrs);
724 1.2 garbled mask >>= 1, irq++) {
725 1.2 garbled if ((mask & 1) == 0)
726 1.2 garbled continue;
727 1.15 matt vi = virq_map[irq + pic->pic_intrbase];
728 1.2 garbled if (!vi) {
729 1.2 garbled *irq_p = irq;
730 1.2 garbled return 0;
731 1.2 garbled }
732 1.2 garbled is = &intrsources[vi];
733 1.2 garbled if (is->is_type == IST_NONE) {
734 1.2 garbled *irq_p = irq;
735 1.2 garbled return 0;
736 1.2 garbled }
737 1.2 garbled /* Level interrupts can be shared */
738 1.2 garbled if (type == IST_LEVEL && is->is_type == IST_LEVEL) {
739 1.2 garbled struct intrhand *ih = is->is_hand;
740 1.2 garbled int depth;
741 1.2 garbled
742 1.2 garbled if (maybe_irq == -1) {
743 1.2 garbled maybe_irq = irq;
744 1.2 garbled continue;
745 1.2 garbled }
746 1.2 garbled for (depth = 0; ih != NULL; ih = ih->ih_next)
747 1.2 garbled depth++;
748 1.2 garbled if (depth < shared_depth) {
749 1.2 garbled maybe_irq = irq;
750 1.2 garbled shared_depth = depth;
751 1.2 garbled }
752 1.2 garbled }
753 1.2 garbled }
754 1.2 garbled if (maybe_irq != -1) {
755 1.2 garbled *irq_p = maybe_irq;
756 1.2 garbled return 0;
757 1.2 garbled }
758 1.2 garbled return 1;
759 1.2 garbled }
760 1.2 garbled #endif
761 1.25 nonaka
762 1.25 nonaka static struct intr_source *
763 1.25 nonaka intr_get_source(const char *intrid)
764 1.25 nonaka {
765 1.25 nonaka struct intr_source *is;
766 1.25 nonaka int irq;
767 1.25 nonaka
768 1.25 nonaka for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
769 1.32 rin if (strcmp(intrid, is->is_intrid) == 0)
770 1.25 nonaka return is;
771 1.25 nonaka }
772 1.25 nonaka return NULL;
773 1.25 nonaka }
774 1.25 nonaka
775 1.25 nonaka static struct intrhand *
776 1.25 nonaka intr_get_handler(const char *intrid)
777 1.25 nonaka {
778 1.25 nonaka struct intr_source *is;
779 1.25 nonaka
780 1.25 nonaka is = intr_get_source(intrid);
781 1.25 nonaka if (is != NULL)
782 1.25 nonaka return is->is_hand;
783 1.25 nonaka return NULL;
784 1.25 nonaka }
785 1.25 nonaka
786 1.25 nonaka uint64_t
787 1.25 nonaka interrupt_get_count(const char *intrid, u_int cpu_idx)
788 1.25 nonaka {
789 1.25 nonaka struct intr_source *is;
790 1.25 nonaka
791 1.25 nonaka /* XXX interrupt is always generated by CPU 0 */
792 1.25 nonaka if (cpu_idx != 0)
793 1.25 nonaka return 0;
794 1.25 nonaka
795 1.25 nonaka is = intr_get_source(intrid);
796 1.25 nonaka if (is != NULL)
797 1.25 nonaka return is->is_ev.ev_count;
798 1.25 nonaka return 0;
799 1.25 nonaka }
800 1.25 nonaka
801 1.25 nonaka void
802 1.25 nonaka interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset)
803 1.25 nonaka {
804 1.25 nonaka struct intr_source *is;
805 1.25 nonaka
806 1.25 nonaka kcpuset_zero(cpuset);
807 1.25 nonaka
808 1.25 nonaka is = intr_get_source(intrid);
809 1.25 nonaka if (is != NULL)
810 1.25 nonaka kcpuset_set(cpuset, 0); /* XXX */
811 1.25 nonaka }
812 1.25 nonaka
813 1.25 nonaka void
814 1.25 nonaka interrupt_get_available(kcpuset_t *cpuset)
815 1.25 nonaka {
816 1.25 nonaka CPU_INFO_ITERATOR cii;
817 1.25 nonaka struct cpu_info *ci;
818 1.25 nonaka
819 1.25 nonaka kcpuset_zero(cpuset);
820 1.25 nonaka
821 1.25 nonaka mutex_enter(&cpu_lock);
822 1.25 nonaka for (CPU_INFO_FOREACH(cii, ci)) {
823 1.25 nonaka if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0)
824 1.25 nonaka kcpuset_set(cpuset, cpu_index(ci));
825 1.25 nonaka }
826 1.25 nonaka mutex_exit(&cpu_lock);
827 1.25 nonaka }
828 1.25 nonaka
829 1.25 nonaka void
830 1.25 nonaka interrupt_get_devname(const char *intrid, char *buf, size_t len)
831 1.25 nonaka {
832 1.25 nonaka struct intrhand *ih;
833 1.25 nonaka
834 1.25 nonaka if (len == 0)
835 1.25 nonaka return;
836 1.25 nonaka
837 1.25 nonaka buf[0] = '\0';
838 1.25 nonaka
839 1.25 nonaka for (ih = intr_get_handler(intrid); ih != NULL; ih = ih->ih_next) {
840 1.25 nonaka if (buf[0] != '\0')
841 1.25 nonaka strlcat(buf, ", ", len);
842 1.25 nonaka strlcat(buf, ih->ih_xname, len);
843 1.25 nonaka }
844 1.25 nonaka }
845 1.25 nonaka
846 1.25 nonaka struct intrids_handler *
847 1.25 nonaka interrupt_construct_intrids(const kcpuset_t *cpuset)
848 1.25 nonaka {
849 1.25 nonaka struct intr_source *is;
850 1.25 nonaka struct intrids_handler *ii_handler;
851 1.25 nonaka intrid_t *ids;
852 1.25 nonaka int i, irq, count;
853 1.25 nonaka
854 1.25 nonaka if (kcpuset_iszero(cpuset))
855 1.25 nonaka return NULL;
856 1.25 nonaka if (!kcpuset_isset(cpuset, 0)) /* XXX */
857 1.25 nonaka return NULL;
858 1.25 nonaka
859 1.25 nonaka count = 0;
860 1.25 nonaka for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
861 1.25 nonaka if (is->is_hand != NULL)
862 1.25 nonaka count++;
863 1.25 nonaka }
864 1.25 nonaka
865 1.25 nonaka ii_handler = kmem_zalloc(sizeof(int) + sizeof(intrid_t) * count,
866 1.25 nonaka KM_SLEEP);
867 1.25 nonaka if (ii_handler == NULL)
868 1.25 nonaka return NULL;
869 1.25 nonaka ii_handler->iih_nids = count;
870 1.25 nonaka if (count == 0)
871 1.25 nonaka return ii_handler;
872 1.25 nonaka
873 1.25 nonaka ids = ii_handler->iih_intrids;
874 1.25 nonaka i = 0;
875 1.25 nonaka for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
876 1.25 nonaka /* Ignore devices attached after counting "count". */
877 1.25 nonaka if (i >= count)
878 1.25 nonaka break;
879 1.25 nonaka
880 1.25 nonaka if (is->is_hand == NULL)
881 1.25 nonaka continue;
882 1.25 nonaka
883 1.32 rin strncpy(ids[i], is->is_intrid, sizeof(intrid_t));
884 1.25 nonaka i++;
885 1.25 nonaka }
886 1.25 nonaka
887 1.25 nonaka return ii_handler;
888 1.25 nonaka }
889 1.25 nonaka
890 1.25 nonaka void
891 1.25 nonaka interrupt_destruct_intrids(struct intrids_handler *ii_handler)
892 1.25 nonaka {
893 1.25 nonaka size_t iih_size;
894 1.25 nonaka
895 1.25 nonaka if (ii_handler == NULL)
896 1.25 nonaka return;
897 1.25 nonaka
898 1.25 nonaka iih_size = sizeof(int) + sizeof(intrid_t) * ii_handler->iih_nids;
899 1.25 nonaka kmem_free(ii_handler, iih_size);
900 1.25 nonaka }
901 1.25 nonaka
902 1.25 nonaka int
903 1.25 nonaka interrupt_distribute(void *ich, const kcpuset_t *newset, kcpuset_t *oldset)
904 1.25 nonaka {
905 1.25 nonaka return EOPNOTSUPP;
906 1.25 nonaka }
907 1.25 nonaka
908 1.25 nonaka int
909 1.25 nonaka interrupt_distribute_handler(const char *intrid, const kcpuset_t *newset,
910 1.25 nonaka kcpuset_t *oldset)
911 1.25 nonaka {
912 1.25 nonaka return EOPNOTSUPP;
913 1.25 nonaka }
914 1.27 rin
915 1.27 rin #undef REORDER_PROTECT
916