intr.c revision 1.16 1 /* $NetBSD: intr.c,v 1.16 2011/06/20 06:23:52 matt Exp $ */
2
3 /*-
4 * Copyright (c) 2007 Michael Lorenz
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.16 2011/06/20 06:23:52 matt Exp $");
31
32 #include "opt_interrupt.h"
33 #include "opt_multiprocessor.h"
34 #include "opt_pic.h"
35
36 #define __INTR_PRIVATE
37
38 #include <sys/param.h>
39 #include <sys/cpu.h>
40 #include <sys/kernel.h>
41 #include <sys/malloc.h>
42
43 #include <powerpc/psl.h>
44 #include <powerpc/pic/picvar.h>
45
46 #if defined(PIC_I8259) || defined (PIC_PREPIVR)
47 #include <machine/isa_machdep.h>
48 #endif
49
50 #ifdef MULTIPROCESSOR
51 #include <powerpc/pic/ipivar.h>
52 #endif
53
54 #ifdef __HAVE_FAST_SOFTINTS
55 #include <powerpc/softint.h>
56 #endif
57
58 #define MAX_PICS 8 /* 8 PICs ought to be enough for everyone */
59
60 #define PIC_VIRQ_LEGAL_P(x) ((u_int)(x) < NVIRQ)
61
62 struct pic_ops *pics[MAX_PICS];
63 int num_pics = 0;
64 int max_base = 0;
65 uint8_t virq_map[NIRQ];
66 imask_t virq_mask = HWIRQ_MASK;
67 imask_t imask[NIPL];
68 int primary_pic = 0;
69
70 static int fakeintr(void *);
71 static int mapirq(int);
72 static void intr_calculatemasks(void);
73 static struct pic_ops *find_pic_by_hwirq(int);
74
75 static struct intr_source intrsources[NVIRQ];
76
77 void
78 pic_init(void)
79 {
80 /* everything is in bss, no reason to zero it. */
81 }
82
83 int
84 pic_add(struct pic_ops *pic)
85 {
86
87 if (num_pics >= MAX_PICS)
88 return -1;
89
90 pics[num_pics] = pic;
91 pic->pic_intrbase = max_base;
92 max_base += pic->pic_numintrs;
93 num_pics++;
94
95 return pic->pic_intrbase;
96 }
97
98 void
99 pic_finish_setup(void)
100 {
101 for (size_t i = 0; i < num_pics; i++) {
102 struct pic_ops * const pic = pics[i];
103 if (pic->pic_finish_setup != NULL)
104 pic->pic_finish_setup(pic);
105 }
106 }
107
108 static struct pic_ops *
109 find_pic_by_hwirq(int hwirq)
110 {
111 for (u_int base = 0; base < num_pics; base++) {
112 struct pic_ops * const pic = pics[base];
113 if (pic->pic_intrbase <= hwirq
114 && hwirq < pic->pic_intrbase + pic->pic_numintrs) {
115 return pic;
116 }
117 }
118 return NULL;
119 }
120
121 static int
122 fakeintr(void *arg)
123 {
124
125 return 0;
126 }
127
128 /*
129 * Register an interrupt handler.
130 */
131 void *
132 intr_establish(int hwirq, int type, int ipl, int (*ih_fun)(void *),
133 void *ih_arg)
134 {
135 struct intrhand **p, *q, *ih;
136 struct pic_ops *pic;
137 static struct intrhand fakehand;
138 int maxipl = ipl;
139
140 if (maxipl == IPL_NONE)
141 maxipl = IPL_HIGH;
142
143 if (hwirq >= max_base) {
144 panic("%s: bogus IRQ %d, max is %d", __func__, hwirq,
145 max_base - 1);
146 }
147
148 pic = find_pic_by_hwirq(hwirq);
149 if (pic == NULL) {
150
151 panic("%s: cannot find a pic for IRQ %d", __func__, hwirq);
152 }
153
154 const int virq = mapirq(hwirq);
155
156 /* no point in sleeping unless someone can free memory. */
157 ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK);
158 if (ih == NULL)
159 panic("intr_establish: can't malloc handler info");
160
161 if (!PIC_VIRQ_LEGAL_P(virq) || type == IST_NONE)
162 panic("intr_establish: bogus irq (%d) or type (%d)",
163 hwirq, type);
164
165 struct intr_source * const is = &intrsources[virq];
166
167 switch (is->is_type) {
168 case IST_NONE:
169 is->is_type = type;
170 break;
171 case IST_EDGE:
172 case IST_LEVEL:
173 if (type == is->is_type)
174 break;
175 /* FALLTHROUGH */
176 case IST_PULSE:
177 if (type != IST_NONE)
178 panic("intr_establish: can't share %s with %s",
179 intr_typename(is->is_type),
180 intr_typename(type));
181 break;
182 }
183 if (is->is_hand == NULL) {
184 snprintf(is->is_source, sizeof(is->is_source), "irq %d",
185 is->is_hwirq);
186 evcnt_attach_dynamic(&is->is_ev, EVCNT_TYPE_INTR, NULL,
187 pic->pic_name, is->is_source);
188 }
189
190 /*
191 * Figure out where to put the handler.
192 * This is O(N^2), but we want to preserve the order, and N is
193 * generally small.
194 */
195 for (p = &is->is_hand; (q = *p) != NULL; p = &q->ih_next) {
196 maxipl = max(maxipl, q->ih_ipl);
197 }
198
199 /*
200 * Actually install a fake handler momentarily, since we might be doing
201 * this with interrupts enabled and don't want the real routine called
202 * until masking is set up.
203 */
204 fakehand.ih_ipl = ipl;
205 fakehand.ih_fun = fakeintr;
206 *p = &fakehand;
207
208 /*
209 * Poke the real handler in now.
210 */
211 ih->ih_fun = ih_fun;
212 ih->ih_arg = ih_arg;
213 ih->ih_next = NULL;
214 ih->ih_ipl = ipl;
215 ih->ih_virq = virq;
216 *p = ih;
217
218 if (pic->pic_establish_irq != NULL)
219 pic->pic_establish_irq(pic, hwirq - pic->pic_intrbase,
220 is->is_type, maxipl);
221
222 /*
223 * Remember the highest IPL used by this handler.
224 */
225 is->is_ipl = maxipl;
226
227 /*
228 * now that the handler is established we're actually ready to
229 * calculate the masks
230 */
231 intr_calculatemasks();
232
233
234 return ih;
235 }
236
237 void
238 dummy_pic_establish_intr(struct pic_ops *pic, int irq, int type, int pri)
239 {
240 }
241
242 /*
243 * Deregister an interrupt handler.
244 */
245 void
246 intr_disestablish(void *arg)
247 {
248 struct intrhand * const ih = arg;
249 const int virq = ih->ih_virq;
250 struct intr_source * const is = &intrsources[virq];
251 struct intrhand **p, **q;
252 int maxipl = IPL_NONE;
253
254 if (!PIC_VIRQ_LEGAL_P(virq))
255 panic("intr_disestablish: bogus virq %d", virq);
256
257 /*
258 * Remove the handler from the chain.
259 * This is O(n^2), too.
260 */
261 for (p = &is->is_hand, q = NULL; (*p) != NULL; p = &(*p)->ih_next) {
262 struct intrhand * const tmp_ih = *p;
263 if (tmp_ih == ih) {
264 q = p;
265 } else {
266 maxipl = max(maxipl, tmp_ih->ih_ipl);
267 }
268 }
269 if (q)
270 *q = ih->ih_next;
271 else
272 panic("intr_disestablish: handler not registered");
273 free((void *)ih, M_DEVBUF);
274
275 /*
276 * Reset the IPL for this source now that we've removed a handler.
277 */
278 is->is_ipl = maxipl;
279
280 intr_calculatemasks();
281
282 if (is->is_hand == NULL) {
283 is->is_type = IST_NONE;
284 evcnt_detach(&is->is_ev);
285 /*
286 * Make the virutal IRQ available again.
287 */
288 virq_map[virq] = 0;
289 virq_mask |= PIC_VIRQ_TO_MASK(virq);
290 }
291 }
292
293 /*
294 * Map max_base irqs into 32 (bits).
295 */
296 static int
297 mapirq(int hwirq)
298 {
299 struct pic_ops *pic;
300
301 if (hwirq >= max_base)
302 panic("invalid irq %d", hwirq);
303
304 if ((pic = find_pic_by_hwirq(hwirq)) == NULL)
305 panic("%s: cannot find PIC for HWIRQ %d", __func__, hwirq);
306
307 if (virq_map[hwirq])
308 return virq_map[hwirq];
309
310 if (virq_mask == 0)
311 panic("virq overflow");
312
313 const int virq = PIC_VIRQ_MS_PENDING(virq_mask);
314 struct intr_source * const is = intrsources + virq;
315
316 virq_mask &= ~PIC_VIRQ_TO_MASK(virq);
317
318 is->is_hwirq = hwirq;
319 is->is_pic = pic;
320 virq_map[hwirq] = virq;
321 #ifdef PIC_DEBUG
322 printf("mapping hwirq %d to virq %d\n", irq, virq);
323 #endif
324 return virq;
325 }
326
327 static const char * const intr_typenames[] = {
328 [IST_NONE] = "none",
329 [IST_PULSE] = "pulsed",
330 [IST_EDGE] = "edge-triggered",
331 [IST_LEVEL] = "level-triggered",
332 };
333
334 const char *
335 intr_typename(int type)
336 {
337 KASSERT((unsigned int) type < __arraycount(intr_typenames));
338 KASSERT(intr_typenames[type] != NULL);
339 return intr_typenames[type];
340 }
341
342 /*
343 * Recalculate the interrupt masks from scratch.
344 * We could code special registry and deregistry versions of this function that
345 * would be faster, but the code would be nastier, and we don't expect this to
346 * happen very much anyway.
347 */
348 static void
349 intr_calculatemasks(void)
350 {
351 imask_t newmask[NIPL] = { [IPL_NONE...IPL_HIGH] = 0 };
352 struct intr_source *is;
353 int irq;
354
355 for (u_int ipl = IPL_NONE; ipl < NIPL; ipl++) {
356 newmask[ipl] = 0;
357 }
358
359 /* First, figure out which ipl each IRQ uses. */
360 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
361 newmask[is->is_ipl] |= PIC_VIRQ_TO_MASK(irq);
362 }
363
364 /*
365 * IPL_NONE is used for hardware interrupts that are never blocked,
366 * and do not block anything else.
367 */
368 newmask[IPL_NONE] = 0;
369
370 /*
371 * strict hierarchy - all IPLs block everything blocked by any lower
372 * IPL
373 */
374 for (u_int ipl = 1; ipl < NIPL; ipl++) {
375 newmask[ipl] |= newmask[ipl - 1];
376 }
377
378 #ifdef DEBUG_IPL
379 for (u_int ipl = 0; ipl < NIPL; ipl++) {
380 printf("%u: %08x -> %08x\n", ipl, imask[ipl], newmask[ipl]);
381 }
382 #endif
383
384 /*
385 * Disable all interrupts.
386 */
387 for (u_int base = 0; base < num_pics; base++) {
388 struct pic_ops * const pic = pics[base];
389 for (u_int i = 0; i < pic->pic_numintrs; i++) {
390 pic->pic_disable_irq(pic, i);
391 }
392 }
393
394 /*
395 * Now that all interrupts are disabled, update the ipl masks.
396 */
397 for (u_int ipl = 0; ipl < NIPL; ipl++) {
398 imask[ipl] = newmask[ipl];
399 }
400
401 /*
402 * Lastly, enable IRQs actually in use.
403 */
404 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
405 if (is->is_hand)
406 pic_enable_irq(is->is_hwirq);
407 }
408 }
409
410 void
411 pic_enable_irq(int hwirq)
412 {
413 struct pic_ops * const pic = find_pic_by_hwirq(hwirq);
414 if (pic == NULL)
415 panic("%s: bogus IRQ %d", __func__, hwirq);
416 const int type = intrsources[virq_map[hwirq]].is_type;
417 (*pic->pic_enable_irq)(pic, hwirq - pic->pic_intrbase, type);
418 }
419
420 void
421 pic_mark_pending(int hwirq)
422 {
423 struct cpu_info * const ci = curcpu();
424
425 const int virq = virq_map[hwirq];
426 if (virq == 0)
427 printf("IRQ %d maps to 0\n", hwirq);
428
429 const register_t msr = mfmsr();
430 mtmsr(msr & ~PSL_EE);
431 ci->ci_ipending |= PIC_VIRQ_TO_MASK(virq);
432 mtmsr(msr);
433 }
434
435 static void
436 intr_deliver(struct intr_source *is, int virq)
437 {
438 bool locked = false;
439 for (struct intrhand *ih = is->is_hand; ih != NULL; ih = ih->ih_next) {
440 KASSERTMSG(ih->ih_fun != NULL,
441 ("%s: irq %d, hwirq %d, is %p ih %p: "
442 "NULL interrupt handler!\n", __func__,
443 virq, is->is_hwirq, is, ih));
444 if (ih->ih_ipl == IPL_VM) {
445 if (!locked) {
446 KERNEL_LOCK(1, NULL);
447 locked = true;
448 }
449 } else if (locked) {
450 KERNEL_UNLOCK_ONE(NULL);
451 locked = false;
452 }
453 (*ih->ih_fun)(ih->ih_arg);
454 }
455 if (locked) {
456 KERNEL_UNLOCK_ONE(NULL);
457 }
458 is->is_ev.ev_count++;
459 }
460
461 void
462 pic_do_pending_int(void)
463 {
464 struct cpu_info * const ci = curcpu();
465 imask_t vpend;
466
467 if (ci->ci_iactive)
468 return;
469
470 ci->ci_iactive = 1;
471
472 const register_t emsr = mfmsr();
473 const register_t dmsr = emsr & ~PSL_EE;
474
475 KASSERT(emsr & PSL_EE);
476 mtmsr(dmsr);
477
478 const int pcpl = ci->ci_cpl;
479 #ifdef __HAVE_FAST_SOFTINTS
480 again:
481 #endif
482
483 /* Do now unmasked pendings */
484 while ((vpend = (ci->ci_ipending & ~imask[pcpl])) != 0) {
485 ci->ci_idepth++;
486 KASSERT((PIC_VIRQ_TO_MASK(0) & ci->ci_ipending) == 0);
487
488 /* Get most significant pending bit */
489 const int virq = PIC_VIRQ_MS_PENDING(vpend);
490 ci->ci_ipending &= ~PIC_VIRQ_TO_MASK(virq);
491
492 struct intr_source * const is = &intrsources[virq];
493 struct pic_ops * const pic = is->is_pic;
494
495 splraise(is->is_ipl);
496 mtmsr(emsr);
497 intr_deliver(is, virq);
498 mtmsr(dmsr);
499 ci->ci_cpl = pcpl; /* Don't use splx... we are here already! */
500
501 pic->pic_reenable_irq(pic, is->is_hwirq - pic->pic_intrbase,
502 is->is_type);
503 ci->ci_idepth--;
504 }
505
506 #ifdef __HAVE_FAST_SOFTINTS
507 const u_int softints = (ci->ci_data.cpu_softints << pcpl) & IPL_SOFTMASK;
508
509 if (__predict_false(softints != 0)) {
510 ci->ci_cpl = IPL_HIGH;
511 mtmsr(emsr);
512 powerpc_softint(ci, pcpl,
513 (vaddr_t)__builtin_return_address(0));
514 mtmsr(dmsr);
515 ci->ci_cpl = pcpl;
516 if (__predict_false(ci->ci_ipending & ~imask[pcpl]))
517 goto again;
518 }
519 #endif
520
521 ci->ci_iactive = 0;
522 mtmsr(emsr);
523 }
524
525 int
526 pic_handle_intr(void *cookie)
527 {
528 struct pic_ops *pic = cookie;
529 struct cpu_info *ci = curcpu();
530 int picirq;
531
532 picirq = pic->pic_get_irq(pic, PIC_GET_IRQ);
533 if (picirq == 255)
534 return 0;
535
536 const register_t msr = mfmsr();
537 const int pcpl = ci->ci_cpl;
538
539 do {
540 #ifdef MULTIPROCESSOR
541 /* THIS IS WRONG XXX */
542 if (picirq == ipiops.ppc_ipi_vector) {
543 ci->ci_cpl = IPL_HIGH;
544 ipi_intr(NULL);
545 ci->ci_cpl = pcpl;
546 pic->pic_ack_irq(pic, picirq);
547 continue;
548 }
549 #endif
550
551 const int virq = virq_map[picirq + pic->pic_intrbase];
552 KASSERT(virq != 0);
553 KASSERT(picirq < pic->pic_numintrs);
554 imask_t v_imen = PIC_VIRQ_TO_MASK(virq);
555 struct intr_source * const is = &intrsources[virq];
556
557 if ((imask[pcpl] & v_imen) != 0) {
558 ci->ci_ipending |= v_imen; /* Masked! Mark this as pending */
559 pic->pic_disable_irq(pic, picirq);
560 } else {
561 /* this interrupt is no longer pending */
562 ci->ci_ipending &= ~v_imen;
563 ci->ci_idepth++;
564
565 splraise(is->is_ipl);
566 mtmsr(msr | PSL_EE);
567 intr_deliver(is, virq);
568 mtmsr(msr);
569 ci->ci_cpl = pcpl;
570
571 ci->ci_data.cpu_nintr++;
572 ci->ci_idepth--;
573 }
574 pic->pic_ack_irq(pic, picirq);
575 } while ((picirq = pic->pic_get_irq(pic, PIC_GET_RECHECK)) != 255);
576
577 mtmsr(msr | PSL_EE);
578 splx(pcpl); /* Process pendings. */
579 mtmsr(msr);
580
581 return 0;
582 }
583
584 void
585 pic_ext_intr(void)
586 {
587
588 KASSERT(pics[primary_pic] != NULL);
589 pic_handle_intr(pics[primary_pic]);
590
591 return;
592
593 }
594
595 int
596 splraise(int ncpl)
597 {
598 struct cpu_info *ci = curcpu();
599 int ocpl;
600
601 if (ncpl == ci->ci_cpl) return ncpl;
602 __asm volatile("sync; eieio"); /* don't reorder.... */
603 ocpl = ci->ci_cpl;
604 KASSERT(ncpl < NIPL);
605 ci->ci_cpl = max(ncpl, ocpl);
606 __asm volatile("sync; eieio"); /* reorder protect */
607 __insn_barrier();
608 return ocpl;
609 }
610
611 static inline bool
612 have_pending_intr_p(struct cpu_info *ci, int ncpl)
613 {
614 if (ci->ci_ipending & ~imask[ncpl])
615 return true;
616 #ifdef __HAVE_FAST_SOFTINTS
617 if ((ci->ci_data.cpu_softints << ncpl) & IPL_SOFTMASK)
618 return true;
619 #endif
620 return false;
621 }
622
623 void
624 splx(int ncpl)
625 {
626 struct cpu_info *ci = curcpu();
627
628 __insn_barrier();
629 __asm volatile("sync; eieio"); /* reorder protect */
630 ci->ci_cpl = ncpl;
631 if (have_pending_intr_p(ci, ncpl))
632 pic_do_pending_int();
633
634 __asm volatile("sync; eieio"); /* reorder protect */
635 }
636
637 int
638 spllower(int ncpl)
639 {
640 struct cpu_info *ci = curcpu();
641 int ocpl;
642
643 __insn_barrier();
644 __asm volatile("sync; eieio"); /* reorder protect */
645 ocpl = ci->ci_cpl;
646 ci->ci_cpl = ncpl;
647 if (have_pending_intr_p(ci, ncpl))
648 pic_do_pending_int();
649 __asm volatile("sync; eieio"); /* reorder protect */
650 return ocpl;
651 }
652
653 void
654 genppc_cpu_configure(void)
655 {
656 aprint_normal("biomask %x netmask %x ttymask %x\n",
657 (u_int)imask[IPL_BIO] & 0x1fffffff,
658 (u_int)imask[IPL_NET] & 0x1fffffff,
659 (u_int)imask[IPL_TTY] & 0x1fffffff);
660
661 spl0();
662 }
663
664 #if defined(PIC_PREPIVR) || defined(PIC_I8259)
665 /*
666 * isa_intr_alloc needs to be done here, because it needs direct access to
667 * the various interrupt handler structures.
668 */
669
670 int
671 genppc_isa_intr_alloc(isa_chipset_tag_t ic, struct pic_ops *pic,
672 int mask, int type, int *irq_p)
673 {
674 int irq, vi;
675 int maybe_irq = -1;
676 int shared_depth = 0;
677 struct intr_source *is;
678
679 if (pic == NULL)
680 return 1;
681
682 for (irq = 0; (mask != 0 && irq < pic->pic_numintrs);
683 mask >>= 1, irq++) {
684 if ((mask & 1) == 0)
685 continue;
686 vi = virq_map[irq + pic->pic_intrbase];
687 if (!vi) {
688 *irq_p = irq;
689 return 0;
690 }
691 is = &intrsources[vi];
692 if (is->is_type == IST_NONE) {
693 *irq_p = irq;
694 return 0;
695 }
696 /* Level interrupts can be shared */
697 if (type == IST_LEVEL && is->is_type == IST_LEVEL) {
698 struct intrhand *ih = is->is_hand;
699 int depth;
700
701 if (maybe_irq == -1) {
702 maybe_irq = irq;
703 continue;
704 }
705 for (depth = 0; ih != NULL; ih = ih->ih_next)
706 depth++;
707 if (depth < shared_depth) {
708 maybe_irq = irq;
709 shared_depth = depth;
710 }
711 }
712 }
713 if (maybe_irq != -1) {
714 *irq_p = maybe_irq;
715 return 0;
716 }
717 return 1;
718 }
719 #endif
720