intr.c revision 1.16 1 1.16 matt /* $NetBSD: intr.c,v 1.16 2011/06/20 06:23:52 matt Exp $ */
2 1.2 garbled
3 1.2 garbled /*-
4 1.2 garbled * Copyright (c) 2007 Michael Lorenz
5 1.2 garbled * All rights reserved.
6 1.2 garbled *
7 1.2 garbled * Redistribution and use in source and binary forms, with or without
8 1.2 garbled * modification, are permitted provided that the following conditions
9 1.2 garbled * are met:
10 1.2 garbled * 1. Redistributions of source code must retain the above copyright
11 1.2 garbled * notice, this list of conditions and the following disclaimer.
12 1.2 garbled * 2. Redistributions in binary form must reproduce the above copyright
13 1.2 garbled * notice, this list of conditions and the following disclaimer in the
14 1.2 garbled * documentation and/or other materials provided with the distribution.
15 1.2 garbled *
16 1.2 garbled * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 1.2 garbled * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 1.2 garbled * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 1.2 garbled * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 1.2 garbled * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 1.2 garbled * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 1.2 garbled * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 1.2 garbled * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 1.2 garbled * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 1.2 garbled * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 1.2 garbled * POSSIBILITY OF SUCH DAMAGE.
27 1.2 garbled */
28 1.2 garbled
29 1.2 garbled #include <sys/cdefs.h>
30 1.16 matt __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.16 2011/06/20 06:23:52 matt Exp $");
31 1.2 garbled
32 1.16 matt #include "opt_interrupt.h"
33 1.2 garbled #include "opt_multiprocessor.h"
34 1.16 matt #include "opt_pic.h"
35 1.2 garbled
36 1.12 macallan #define __INTR_PRIVATE
37 1.12 macallan
38 1.2 garbled #include <sys/param.h>
39 1.16 matt #include <sys/cpu.h>
40 1.16 matt #include <sys/kernel.h>
41 1.2 garbled #include <sys/malloc.h>
42 1.2 garbled
43 1.16 matt #include <powerpc/psl.h>
44 1.16 matt #include <powerpc/pic/picvar.h>
45 1.16 matt
46 1.2 garbled #if defined(PIC_I8259) || defined (PIC_PREPIVR)
47 1.2 garbled #include <machine/isa_machdep.h>
48 1.2 garbled #endif
49 1.2 garbled
50 1.2 garbled #ifdef MULTIPROCESSOR
51 1.16 matt #include <powerpc/pic/ipivar.h>
52 1.2 garbled #endif
53 1.2 garbled
54 1.12 macallan #ifdef __HAVE_FAST_SOFTINTS
55 1.12 macallan #include <powerpc/softint.h>
56 1.12 macallan #endif
57 1.12 macallan
58 1.2 garbled #define MAX_PICS 8 /* 8 PICs ought to be enough for everyone */
59 1.2 garbled
60 1.15 matt #define PIC_VIRQ_LEGAL_P(x) ((u_int)(x) < NVIRQ)
61 1.2 garbled
62 1.2 garbled struct pic_ops *pics[MAX_PICS];
63 1.2 garbled int num_pics = 0;
64 1.2 garbled int max_base = 0;
65 1.15 matt uint8_t virq_map[NIRQ];
66 1.15 matt imask_t virq_mask = HWIRQ_MASK;
67 1.8 kiyohara imask_t imask[NIPL];
68 1.2 garbled int primary_pic = 0;
69 1.2 garbled
70 1.2 garbled static int fakeintr(void *);
71 1.15 matt static int mapirq(int);
72 1.2 garbled static void intr_calculatemasks(void);
73 1.15 matt static struct pic_ops *find_pic_by_hwirq(int);
74 1.2 garbled
75 1.2 garbled static struct intr_source intrsources[NVIRQ];
76 1.2 garbled
77 1.2 garbled void
78 1.2 garbled pic_init(void)
79 1.2 garbled {
80 1.15 matt /* everything is in bss, no reason to zero it. */
81 1.2 garbled }
82 1.2 garbled
83 1.2 garbled int
84 1.2 garbled pic_add(struct pic_ops *pic)
85 1.2 garbled {
86 1.2 garbled
87 1.2 garbled if (num_pics >= MAX_PICS)
88 1.2 garbled return -1;
89 1.2 garbled
90 1.2 garbled pics[num_pics] = pic;
91 1.2 garbled pic->pic_intrbase = max_base;
92 1.2 garbled max_base += pic->pic_numintrs;
93 1.2 garbled num_pics++;
94 1.7 kiyohara
95 1.2 garbled return pic->pic_intrbase;
96 1.2 garbled }
97 1.2 garbled
98 1.2 garbled void
99 1.2 garbled pic_finish_setup(void)
100 1.2 garbled {
101 1.15 matt for (size_t i = 0; i < num_pics; i++) {
102 1.15 matt struct pic_ops * const pic = pics[i];
103 1.2 garbled if (pic->pic_finish_setup != NULL)
104 1.2 garbled pic->pic_finish_setup(pic);
105 1.2 garbled }
106 1.2 garbled }
107 1.2 garbled
108 1.2 garbled static struct pic_ops *
109 1.15 matt find_pic_by_hwirq(int hwirq)
110 1.2 garbled {
111 1.14 matt for (u_int base = 0; base < num_pics; base++) {
112 1.14 matt struct pic_ops * const pic = pics[base];
113 1.15 matt if (pic->pic_intrbase <= hwirq
114 1.15 matt && hwirq < pic->pic_intrbase + pic->pic_numintrs) {
115 1.14 matt return pic;
116 1.2 garbled }
117 1.2 garbled }
118 1.2 garbled return NULL;
119 1.2 garbled }
120 1.2 garbled
121 1.2 garbled static int
122 1.2 garbled fakeintr(void *arg)
123 1.2 garbled {
124 1.2 garbled
125 1.2 garbled return 0;
126 1.2 garbled }
127 1.2 garbled
128 1.2 garbled /*
129 1.2 garbled * Register an interrupt handler.
130 1.2 garbled */
131 1.2 garbled void *
132 1.14 matt intr_establish(int hwirq, int type, int ipl, int (*ih_fun)(void *),
133 1.2 garbled void *ih_arg)
134 1.2 garbled {
135 1.2 garbled struct intrhand **p, *q, *ih;
136 1.2 garbled struct pic_ops *pic;
137 1.2 garbled static struct intrhand fakehand;
138 1.15 matt int maxipl = ipl;
139 1.2 garbled
140 1.14 matt if (maxipl == IPL_NONE)
141 1.14 matt maxipl = IPL_HIGH;
142 1.2 garbled
143 1.2 garbled if (hwirq >= max_base) {
144 1.2 garbled panic("%s: bogus IRQ %d, max is %d", __func__, hwirq,
145 1.2 garbled max_base - 1);
146 1.2 garbled }
147 1.2 garbled
148 1.15 matt pic = find_pic_by_hwirq(hwirq);
149 1.2 garbled if (pic == NULL) {
150 1.2 garbled
151 1.2 garbled panic("%s: cannot find a pic for IRQ %d", __func__, hwirq);
152 1.2 garbled }
153 1.2 garbled
154 1.15 matt const int virq = mapirq(hwirq);
155 1.2 garbled
156 1.2 garbled /* no point in sleeping unless someone can free memory. */
157 1.2 garbled ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK);
158 1.2 garbled if (ih == NULL)
159 1.2 garbled panic("intr_establish: can't malloc handler info");
160 1.2 garbled
161 1.15 matt if (!PIC_VIRQ_LEGAL_P(virq) || type == IST_NONE)
162 1.15 matt panic("intr_establish: bogus irq (%d) or type (%d)",
163 1.15 matt hwirq, type);
164 1.2 garbled
165 1.15 matt struct intr_source * const is = &intrsources[virq];
166 1.2 garbled
167 1.2 garbled switch (is->is_type) {
168 1.2 garbled case IST_NONE:
169 1.2 garbled is->is_type = type;
170 1.2 garbled break;
171 1.2 garbled case IST_EDGE:
172 1.2 garbled case IST_LEVEL:
173 1.2 garbled if (type == is->is_type)
174 1.2 garbled break;
175 1.15 matt /* FALLTHROUGH */
176 1.2 garbled case IST_PULSE:
177 1.2 garbled if (type != IST_NONE)
178 1.2 garbled panic("intr_establish: can't share %s with %s",
179 1.2 garbled intr_typename(is->is_type),
180 1.2 garbled intr_typename(type));
181 1.2 garbled break;
182 1.2 garbled }
183 1.2 garbled if (is->is_hand == NULL) {
184 1.2 garbled snprintf(is->is_source, sizeof(is->is_source), "irq %d",
185 1.2 garbled is->is_hwirq);
186 1.2 garbled evcnt_attach_dynamic(&is->is_ev, EVCNT_TYPE_INTR, NULL,
187 1.2 garbled pic->pic_name, is->is_source);
188 1.2 garbled }
189 1.2 garbled
190 1.2 garbled /*
191 1.2 garbled * Figure out where to put the handler.
192 1.2 garbled * This is O(N^2), but we want to preserve the order, and N is
193 1.2 garbled * generally small.
194 1.2 garbled */
195 1.2 garbled for (p = &is->is_hand; (q = *p) != NULL; p = &q->ih_next) {
196 1.14 matt maxipl = max(maxipl, q->ih_ipl);
197 1.2 garbled }
198 1.2 garbled
199 1.2 garbled /*
200 1.2 garbled * Actually install a fake handler momentarily, since we might be doing
201 1.2 garbled * this with interrupts enabled and don't want the real routine called
202 1.2 garbled * until masking is set up.
203 1.2 garbled */
204 1.14 matt fakehand.ih_ipl = ipl;
205 1.2 garbled fakehand.ih_fun = fakeintr;
206 1.2 garbled *p = &fakehand;
207 1.2 garbled
208 1.2 garbled /*
209 1.2 garbled * Poke the real handler in now.
210 1.2 garbled */
211 1.2 garbled ih->ih_fun = ih_fun;
212 1.2 garbled ih->ih_arg = ih_arg;
213 1.2 garbled ih->ih_next = NULL;
214 1.14 matt ih->ih_ipl = ipl;
215 1.15 matt ih->ih_virq = virq;
216 1.2 garbled *p = ih;
217 1.2 garbled
218 1.2 garbled if (pic->pic_establish_irq != NULL)
219 1.2 garbled pic->pic_establish_irq(pic, hwirq - pic->pic_intrbase,
220 1.14 matt is->is_type, maxipl);
221 1.14 matt
222 1.14 matt /*
223 1.14 matt * Remember the highest IPL used by this handler.
224 1.14 matt */
225 1.14 matt is->is_ipl = maxipl;
226 1.2 garbled
227 1.2 garbled /*
228 1.2 garbled * now that the handler is established we're actually ready to
229 1.2 garbled * calculate the masks
230 1.2 garbled */
231 1.2 garbled intr_calculatemasks();
232 1.2 garbled
233 1.2 garbled
234 1.2 garbled return ih;
235 1.2 garbled }
236 1.2 garbled
237 1.2 garbled void
238 1.2 garbled dummy_pic_establish_intr(struct pic_ops *pic, int irq, int type, int pri)
239 1.2 garbled {
240 1.2 garbled }
241 1.2 garbled
242 1.2 garbled /*
243 1.2 garbled * Deregister an interrupt handler.
244 1.2 garbled */
245 1.2 garbled void
246 1.2 garbled intr_disestablish(void *arg)
247 1.2 garbled {
248 1.14 matt struct intrhand * const ih = arg;
249 1.15 matt const int virq = ih->ih_virq;
250 1.15 matt struct intr_source * const is = &intrsources[virq];
251 1.14 matt struct intrhand **p, **q;
252 1.14 matt int maxipl = IPL_NONE;
253 1.2 garbled
254 1.15 matt if (!PIC_VIRQ_LEGAL_P(virq))
255 1.15 matt panic("intr_disestablish: bogus virq %d", virq);
256 1.2 garbled
257 1.2 garbled /*
258 1.2 garbled * Remove the handler from the chain.
259 1.2 garbled * This is O(n^2), too.
260 1.2 garbled */
261 1.14 matt for (p = &is->is_hand, q = NULL; (*p) != NULL; p = &(*p)->ih_next) {
262 1.14 matt struct intrhand * const tmp_ih = *p;
263 1.14 matt if (tmp_ih == ih) {
264 1.14 matt q = p;
265 1.14 matt } else {
266 1.14 matt maxipl = max(maxipl, tmp_ih->ih_ipl);
267 1.14 matt }
268 1.14 matt }
269 1.2 garbled if (q)
270 1.14 matt *q = ih->ih_next;
271 1.2 garbled else
272 1.2 garbled panic("intr_disestablish: handler not registered");
273 1.2 garbled free((void *)ih, M_DEVBUF);
274 1.2 garbled
275 1.14 matt /*
276 1.14 matt * Reset the IPL for this source now that we've removed a handler.
277 1.14 matt */
278 1.14 matt is->is_ipl = maxipl;
279 1.14 matt
280 1.2 garbled intr_calculatemasks();
281 1.2 garbled
282 1.2 garbled if (is->is_hand == NULL) {
283 1.2 garbled is->is_type = IST_NONE;
284 1.2 garbled evcnt_detach(&is->is_ev);
285 1.15 matt /*
286 1.15 matt * Make the virutal IRQ available again.
287 1.15 matt */
288 1.15 matt virq_map[virq] = 0;
289 1.15 matt virq_mask |= PIC_VIRQ_TO_MASK(virq);
290 1.2 garbled }
291 1.2 garbled }
292 1.2 garbled
293 1.2 garbled /*
294 1.2 garbled * Map max_base irqs into 32 (bits).
295 1.2 garbled */
296 1.2 garbled static int
297 1.15 matt mapirq(int hwirq)
298 1.2 garbled {
299 1.2 garbled struct pic_ops *pic;
300 1.2 garbled
301 1.15 matt if (hwirq >= max_base)
302 1.15 matt panic("invalid irq %d", hwirq);
303 1.2 garbled
304 1.15 matt if ((pic = find_pic_by_hwirq(hwirq)) == NULL)
305 1.15 matt panic("%s: cannot find PIC for HWIRQ %d", __func__, hwirq);
306 1.2 garbled
307 1.15 matt if (virq_map[hwirq])
308 1.15 matt return virq_map[hwirq];
309 1.2 garbled
310 1.15 matt if (virq_mask == 0)
311 1.2 garbled panic("virq overflow");
312 1.2 garbled
313 1.15 matt const int virq = PIC_VIRQ_MS_PENDING(virq_mask);
314 1.15 matt struct intr_source * const is = intrsources + virq;
315 1.15 matt
316 1.15 matt virq_mask &= ~PIC_VIRQ_TO_MASK(virq);
317 1.15 matt
318 1.15 matt is->is_hwirq = hwirq;
319 1.15 matt is->is_pic = pic;
320 1.15 matt virq_map[hwirq] = virq;
321 1.2 garbled #ifdef PIC_DEBUG
322 1.15 matt printf("mapping hwirq %d to virq %d\n", irq, virq);
323 1.2 garbled #endif
324 1.15 matt return virq;
325 1.2 garbled }
326 1.2 garbled
327 1.2 garbled static const char * const intr_typenames[] = {
328 1.2 garbled [IST_NONE] = "none",
329 1.2 garbled [IST_PULSE] = "pulsed",
330 1.2 garbled [IST_EDGE] = "edge-triggered",
331 1.2 garbled [IST_LEVEL] = "level-triggered",
332 1.2 garbled };
333 1.2 garbled
334 1.2 garbled const char *
335 1.2 garbled intr_typename(int type)
336 1.2 garbled {
337 1.2 garbled KASSERT((unsigned int) type < __arraycount(intr_typenames));
338 1.2 garbled KASSERT(intr_typenames[type] != NULL);
339 1.2 garbled return intr_typenames[type];
340 1.2 garbled }
341 1.2 garbled
342 1.2 garbled /*
343 1.2 garbled * Recalculate the interrupt masks from scratch.
344 1.2 garbled * We could code special registry and deregistry versions of this function that
345 1.2 garbled * would be faster, but the code would be nastier, and we don't expect this to
346 1.2 garbled * happen very much anyway.
347 1.2 garbled */
348 1.2 garbled static void
349 1.2 garbled intr_calculatemasks(void)
350 1.2 garbled {
351 1.14 matt imask_t newmask[NIPL] = { [IPL_NONE...IPL_HIGH] = 0 };
352 1.2 garbled struct intr_source *is;
353 1.14 matt int irq;
354 1.2 garbled
355 1.14 matt for (u_int ipl = IPL_NONE; ipl < NIPL; ipl++) {
356 1.14 matt newmask[ipl] = 0;
357 1.2 garbled }
358 1.2 garbled
359 1.14 matt /* First, figure out which ipl each IRQ uses. */
360 1.14 matt for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
361 1.15 matt newmask[is->is_ipl] |= PIC_VIRQ_TO_MASK(irq);
362 1.2 garbled }
363 1.2 garbled
364 1.2 garbled /*
365 1.2 garbled * IPL_NONE is used for hardware interrupts that are never blocked,
366 1.2 garbled * and do not block anything else.
367 1.2 garbled */
368 1.14 matt newmask[IPL_NONE] = 0;
369 1.2 garbled
370 1.2 garbled /*
371 1.2 garbled * strict hierarchy - all IPLs block everything blocked by any lower
372 1.2 garbled * IPL
373 1.2 garbled */
374 1.14 matt for (u_int ipl = 1; ipl < NIPL; ipl++) {
375 1.14 matt newmask[ipl] |= newmask[ipl - 1];
376 1.14 matt }
377 1.2 garbled
378 1.2 garbled #ifdef DEBUG_IPL
379 1.14 matt for (u_int ipl = 0; ipl < NIPL; ipl++) {
380 1.14 matt printf("%u: %08x -> %08x\n", ipl, imask[ipl], newmask[ipl]);
381 1.2 garbled }
382 1.2 garbled #endif
383 1.2 garbled
384 1.14 matt /*
385 1.14 matt * Disable all interrupts.
386 1.14 matt */
387 1.14 matt for (u_int base = 0; base < num_pics; base++) {
388 1.14 matt struct pic_ops * const pic = pics[base];
389 1.14 matt for (u_int i = 0; i < pic->pic_numintrs; i++) {
390 1.14 matt pic->pic_disable_irq(pic, i);
391 1.14 matt }
392 1.2 garbled }
393 1.2 garbled
394 1.14 matt /*
395 1.14 matt * Now that all interrupts are disabled, update the ipl masks.
396 1.14 matt */
397 1.14 matt for (u_int ipl = 0; ipl < NIPL; ipl++) {
398 1.14 matt imask[ipl] = newmask[ipl];
399 1.2 garbled }
400 1.7 kiyohara
401 1.14 matt /*
402 1.14 matt * Lastly, enable IRQs actually in use.
403 1.14 matt */
404 1.2 garbled for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
405 1.2 garbled if (is->is_hand)
406 1.2 garbled pic_enable_irq(is->is_hwirq);
407 1.2 garbled }
408 1.2 garbled }
409 1.2 garbled
410 1.2 garbled void
411 1.15 matt pic_enable_irq(int hwirq)
412 1.2 garbled {
413 1.15 matt struct pic_ops * const pic = find_pic_by_hwirq(hwirq);
414 1.15 matt if (pic == NULL)
415 1.15 matt panic("%s: bogus IRQ %d", __func__, hwirq);
416 1.15 matt const int type = intrsources[virq_map[hwirq]].is_type;
417 1.15 matt (*pic->pic_enable_irq)(pic, hwirq - pic->pic_intrbase, type);
418 1.2 garbled }
419 1.2 garbled
420 1.2 garbled void
421 1.15 matt pic_mark_pending(int hwirq)
422 1.2 garbled {
423 1.2 garbled struct cpu_info * const ci = curcpu();
424 1.2 garbled
425 1.15 matt const int virq = virq_map[hwirq];
426 1.15 matt if (virq == 0)
427 1.15 matt printf("IRQ %d maps to 0\n", hwirq);
428 1.2 garbled
429 1.15 matt const register_t msr = mfmsr();
430 1.2 garbled mtmsr(msr & ~PSL_EE);
431 1.15 matt ci->ci_ipending |= PIC_VIRQ_TO_MASK(virq);
432 1.2 garbled mtmsr(msr);
433 1.7 kiyohara }
434 1.2 garbled
435 1.15 matt static void
436 1.15 matt intr_deliver(struct intr_source *is, int virq)
437 1.15 matt {
438 1.15 matt bool locked = false;
439 1.15 matt for (struct intrhand *ih = is->is_hand; ih != NULL; ih = ih->ih_next) {
440 1.15 matt KASSERTMSG(ih->ih_fun != NULL,
441 1.15 matt ("%s: irq %d, hwirq %d, is %p ih %p: "
442 1.15 matt "NULL interrupt handler!\n", __func__,
443 1.15 matt virq, is->is_hwirq, is, ih));
444 1.15 matt if (ih->ih_ipl == IPL_VM) {
445 1.15 matt if (!locked) {
446 1.15 matt KERNEL_LOCK(1, NULL);
447 1.15 matt locked = true;
448 1.15 matt }
449 1.15 matt } else if (locked) {
450 1.15 matt KERNEL_UNLOCK_ONE(NULL);
451 1.15 matt locked = false;
452 1.15 matt }
453 1.15 matt (*ih->ih_fun)(ih->ih_arg);
454 1.15 matt }
455 1.15 matt if (locked) {
456 1.15 matt KERNEL_UNLOCK_ONE(NULL);
457 1.15 matt }
458 1.15 matt is->is_ev.ev_count++;
459 1.15 matt }
460 1.15 matt
461 1.2 garbled void
462 1.2 garbled pic_do_pending_int(void)
463 1.2 garbled {
464 1.2 garbled struct cpu_info * const ci = curcpu();
465 1.15 matt imask_t vpend;
466 1.2 garbled
467 1.2 garbled if (ci->ci_iactive)
468 1.2 garbled return;
469 1.2 garbled
470 1.2 garbled ci->ci_iactive = 1;
471 1.15 matt
472 1.15 matt const register_t emsr = mfmsr();
473 1.15 matt const register_t dmsr = emsr & ~PSL_EE;
474 1.15 matt
475 1.2 garbled KASSERT(emsr & PSL_EE);
476 1.2 garbled mtmsr(dmsr);
477 1.2 garbled
478 1.15 matt const int pcpl = ci->ci_cpl;
479 1.3 ad #ifdef __HAVE_FAST_SOFTINTS
480 1.2 garbled again:
481 1.3 ad #endif
482 1.2 garbled
483 1.2 garbled /* Do now unmasked pendings */
484 1.15 matt while ((vpend = (ci->ci_ipending & ~imask[pcpl])) != 0) {
485 1.14 matt ci->ci_idepth++;
486 1.15 matt KASSERT((PIC_VIRQ_TO_MASK(0) & ci->ci_ipending) == 0);
487 1.15 matt
488 1.8 kiyohara /* Get most significant pending bit */
489 1.15 matt const int virq = PIC_VIRQ_MS_PENDING(vpend);
490 1.15 matt ci->ci_ipending &= ~PIC_VIRQ_TO_MASK(virq);
491 1.15 matt
492 1.15 matt struct intr_source * const is = &intrsources[virq];
493 1.15 matt struct pic_ops * const pic = is->is_pic;
494 1.2 garbled
495 1.14 matt splraise(is->is_ipl);
496 1.2 garbled mtmsr(emsr);
497 1.15 matt intr_deliver(is, virq);
498 1.2 garbled mtmsr(dmsr);
499 1.15 matt ci->ci_cpl = pcpl; /* Don't use splx... we are here already! */
500 1.2 garbled
501 1.2 garbled pic->pic_reenable_irq(pic, is->is_hwirq - pic->pic_intrbase,
502 1.2 garbled is->is_type);
503 1.14 matt ci->ci_idepth--;
504 1.2 garbled }
505 1.2 garbled
506 1.3 ad #ifdef __HAVE_FAST_SOFTINTS
507 1.12 macallan const u_int softints = (ci->ci_data.cpu_softints << pcpl) & IPL_SOFTMASK;
508 1.12 macallan
509 1.12 macallan if (__predict_false(softints != 0)) {
510 1.15 matt ci->ci_cpl = IPL_HIGH;
511 1.15 matt mtmsr(emsr);
512 1.12 macallan powerpc_softint(ci, pcpl,
513 1.12 macallan (vaddr_t)__builtin_return_address(0));
514 1.15 matt mtmsr(dmsr);
515 1.12 macallan ci->ci_cpl = pcpl;
516 1.15 matt if (__predict_false(ci->ci_ipending & ~imask[pcpl]))
517 1.15 matt goto again;
518 1.12 macallan }
519 1.12 macallan #endif
520 1.2 garbled
521 1.2 garbled ci->ci_iactive = 0;
522 1.2 garbled mtmsr(emsr);
523 1.2 garbled }
524 1.2 garbled
525 1.2 garbled int
526 1.2 garbled pic_handle_intr(void *cookie)
527 1.2 garbled {
528 1.2 garbled struct pic_ops *pic = cookie;
529 1.2 garbled struct cpu_info *ci = curcpu();
530 1.15 matt int picirq;
531 1.2 garbled
532 1.15 matt picirq = pic->pic_get_irq(pic, PIC_GET_IRQ);
533 1.15 matt if (picirq == 255)
534 1.2 garbled return 0;
535 1.2 garbled
536 1.15 matt const register_t msr = mfmsr();
537 1.15 matt const int pcpl = ci->ci_cpl;
538 1.2 garbled
539 1.15 matt do {
540 1.2 garbled #ifdef MULTIPROCESSOR
541 1.15 matt /* THIS IS WRONG XXX */
542 1.15 matt if (picirq == ipiops.ppc_ipi_vector) {
543 1.15 matt ci->ci_cpl = IPL_HIGH;
544 1.15 matt ipi_intr(NULL);
545 1.15 matt ci->ci_cpl = pcpl;
546 1.15 matt pic->pic_ack_irq(pic, picirq);
547 1.15 matt continue;
548 1.15 matt }
549 1.2 garbled #endif
550 1.2 garbled
551 1.15 matt const int virq = virq_map[picirq + pic->pic_intrbase];
552 1.15 matt KASSERT(virq != 0);
553 1.15 matt KASSERT(picirq < pic->pic_numintrs);
554 1.15 matt imask_t v_imen = PIC_VIRQ_TO_MASK(virq);
555 1.15 matt struct intr_source * const is = &intrsources[virq];
556 1.15 matt
557 1.15 matt if ((imask[pcpl] & v_imen) != 0) {
558 1.15 matt ci->ci_ipending |= v_imen; /* Masked! Mark this as pending */
559 1.15 matt pic->pic_disable_irq(pic, picirq);
560 1.15 matt } else {
561 1.15 matt /* this interrupt is no longer pending */
562 1.15 matt ci->ci_ipending &= ~v_imen;
563 1.15 matt ci->ci_idepth++;
564 1.15 matt
565 1.15 matt splraise(is->is_ipl);
566 1.15 matt mtmsr(msr | PSL_EE);
567 1.15 matt intr_deliver(is, virq);
568 1.15 matt mtmsr(msr);
569 1.15 matt ci->ci_cpl = pcpl;
570 1.7 kiyohara
571 1.15 matt ci->ci_data.cpu_nintr++;
572 1.15 matt ci->ci_idepth--;
573 1.2 garbled }
574 1.15 matt pic->pic_ack_irq(pic, picirq);
575 1.15 matt } while ((picirq = pic->pic_get_irq(pic, PIC_GET_RECHECK)) != 255);
576 1.2 garbled
577 1.2 garbled mtmsr(msr | PSL_EE);
578 1.2 garbled splx(pcpl); /* Process pendings. */
579 1.2 garbled mtmsr(msr);
580 1.2 garbled
581 1.2 garbled return 0;
582 1.2 garbled }
583 1.2 garbled
584 1.2 garbled void
585 1.2 garbled pic_ext_intr(void)
586 1.2 garbled {
587 1.2 garbled
588 1.2 garbled KASSERT(pics[primary_pic] != NULL);
589 1.2 garbled pic_handle_intr(pics[primary_pic]);
590 1.2 garbled
591 1.2 garbled return;
592 1.2 garbled
593 1.2 garbled }
594 1.2 garbled
595 1.2 garbled int
596 1.2 garbled splraise(int ncpl)
597 1.2 garbled {
598 1.2 garbled struct cpu_info *ci = curcpu();
599 1.2 garbled int ocpl;
600 1.2 garbled
601 1.12 macallan if (ncpl == ci->ci_cpl) return ncpl;
602 1.2 garbled __asm volatile("sync; eieio"); /* don't reorder.... */
603 1.2 garbled ocpl = ci->ci_cpl;
604 1.12 macallan KASSERT(ncpl < NIPL);
605 1.12 macallan ci->ci_cpl = max(ncpl, ocpl);
606 1.2 garbled __asm volatile("sync; eieio"); /* reorder protect */
607 1.12 macallan __insn_barrier();
608 1.2 garbled return ocpl;
609 1.2 garbled }
610 1.2 garbled
611 1.13 matt static inline bool
612 1.13 matt have_pending_intr_p(struct cpu_info *ci, int ncpl)
613 1.13 matt {
614 1.13 matt if (ci->ci_ipending & ~imask[ncpl])
615 1.13 matt return true;
616 1.13 matt #ifdef __HAVE_FAST_SOFTINTS
617 1.13 matt if ((ci->ci_data.cpu_softints << ncpl) & IPL_SOFTMASK)
618 1.13 matt return true;
619 1.13 matt #endif
620 1.13 matt return false;
621 1.13 matt }
622 1.13 matt
623 1.2 garbled void
624 1.2 garbled splx(int ncpl)
625 1.2 garbled {
626 1.2 garbled struct cpu_info *ci = curcpu();
627 1.7 kiyohara
628 1.12 macallan __insn_barrier();
629 1.2 garbled __asm volatile("sync; eieio"); /* reorder protect */
630 1.2 garbled ci->ci_cpl = ncpl;
631 1.13 matt if (have_pending_intr_p(ci, ncpl))
632 1.2 garbled pic_do_pending_int();
633 1.13 matt
634 1.2 garbled __asm volatile("sync; eieio"); /* reorder protect */
635 1.2 garbled }
636 1.2 garbled
637 1.2 garbled int
638 1.2 garbled spllower(int ncpl)
639 1.2 garbled {
640 1.2 garbled struct cpu_info *ci = curcpu();
641 1.2 garbled int ocpl;
642 1.2 garbled
643 1.12 macallan __insn_barrier();
644 1.2 garbled __asm volatile("sync; eieio"); /* reorder protect */
645 1.2 garbled ocpl = ci->ci_cpl;
646 1.2 garbled ci->ci_cpl = ncpl;
647 1.13 matt if (have_pending_intr_p(ci, ncpl))
648 1.2 garbled pic_do_pending_int();
649 1.2 garbled __asm volatile("sync; eieio"); /* reorder protect */
650 1.2 garbled return ocpl;
651 1.2 garbled }
652 1.2 garbled
653 1.2 garbled void
654 1.2 garbled genppc_cpu_configure(void)
655 1.2 garbled {
656 1.2 garbled aprint_normal("biomask %x netmask %x ttymask %x\n",
657 1.8 kiyohara (u_int)imask[IPL_BIO] & 0x1fffffff,
658 1.8 kiyohara (u_int)imask[IPL_NET] & 0x1fffffff,
659 1.8 kiyohara (u_int)imask[IPL_TTY] & 0x1fffffff);
660 1.2 garbled
661 1.2 garbled spl0();
662 1.2 garbled }
663 1.2 garbled
664 1.2 garbled #if defined(PIC_PREPIVR) || defined(PIC_I8259)
665 1.2 garbled /*
666 1.2 garbled * isa_intr_alloc needs to be done here, because it needs direct access to
667 1.2 garbled * the various interrupt handler structures.
668 1.2 garbled */
669 1.2 garbled
670 1.2 garbled int
671 1.2 garbled genppc_isa_intr_alloc(isa_chipset_tag_t ic, struct pic_ops *pic,
672 1.2 garbled int mask, int type, int *irq_p)
673 1.2 garbled {
674 1.2 garbled int irq, vi;
675 1.2 garbled int maybe_irq = -1;
676 1.2 garbled int shared_depth = 0;
677 1.2 garbled struct intr_source *is;
678 1.2 garbled
679 1.2 garbled if (pic == NULL)
680 1.2 garbled return 1;
681 1.2 garbled
682 1.2 garbled for (irq = 0; (mask != 0 && irq < pic->pic_numintrs);
683 1.2 garbled mask >>= 1, irq++) {
684 1.2 garbled if ((mask & 1) == 0)
685 1.2 garbled continue;
686 1.15 matt vi = virq_map[irq + pic->pic_intrbase];
687 1.2 garbled if (!vi) {
688 1.2 garbled *irq_p = irq;
689 1.2 garbled return 0;
690 1.2 garbled }
691 1.2 garbled is = &intrsources[vi];
692 1.2 garbled if (is->is_type == IST_NONE) {
693 1.2 garbled *irq_p = irq;
694 1.2 garbled return 0;
695 1.2 garbled }
696 1.2 garbled /* Level interrupts can be shared */
697 1.2 garbled if (type == IST_LEVEL && is->is_type == IST_LEVEL) {
698 1.2 garbled struct intrhand *ih = is->is_hand;
699 1.2 garbled int depth;
700 1.2 garbled
701 1.2 garbled if (maybe_irq == -1) {
702 1.2 garbled maybe_irq = irq;
703 1.2 garbled continue;
704 1.2 garbled }
705 1.2 garbled for (depth = 0; ih != NULL; ih = ih->ih_next)
706 1.2 garbled depth++;
707 1.2 garbled if (depth < shared_depth) {
708 1.2 garbled maybe_irq = irq;
709 1.2 garbled shared_depth = depth;
710 1.2 garbled }
711 1.2 garbled }
712 1.2 garbled }
713 1.2 garbled if (maybe_irq != -1) {
714 1.2 garbled *irq_p = maybe_irq;
715 1.2 garbled return 0;
716 1.2 garbled }
717 1.2 garbled return 1;
718 1.2 garbled }
719 1.2 garbled #endif
720