intr.c revision 1.10 1 /* $NetBSD: intr.c,v 1.10 2010/12/20 00:25:41 matt Exp $ */
2
3 /*-
4 * Copyright (c) 2007 Michael Lorenz
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.10 2010/12/20 00:25:41 matt Exp $");
31
32 #include "opt_multiprocessor.h"
33
34 #include <sys/param.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/cpu.h>
38
39 #include <arch/powerpc/pic/picvar.h>
40 #include "opt_pic.h"
41 #include "opt_interrupt.h"
42 #if defined(PIC_I8259) || defined (PIC_PREPIVR)
43 #include <machine/isa_machdep.h>
44 #endif
45
46 #ifdef MULTIPROCESSOR
47 #include <arch/powerpc/pic/ipivar.h>
48 #endif
49
50 #define MAX_PICS 8 /* 8 PICs ought to be enough for everyone */
51
52 #define LEGAL_VIRQ(x) ((x) >= 0 && (x) < NVIRQ)
53
54 struct pic_ops *pics[MAX_PICS];
55 int num_pics = 0;
56 int max_base = 0;
57 uint8_t virq[NIRQ];
58 int virq_max = 0;
59 imask_t imask[NIPL];
60 int primary_pic = 0;
61
62 static int fakeintr(void *);
63 static int mapirq(uint32_t);
64 static void intr_calculatemasks(void);
65 static struct pic_ops *find_pic_by_irq(int);
66
67 static struct intr_source intrsources[NVIRQ];
68
69 void
70 pic_init(void)
71 {
72 int i;
73
74 for (i = 0; i < NIRQ; i++)
75 virq[i] = 0;
76 memset(intrsources, 0, sizeof(intrsources));
77 }
78
79 int
80 pic_add(struct pic_ops *pic)
81 {
82
83 if (num_pics >= MAX_PICS)
84 return -1;
85
86 pics[num_pics] = pic;
87 pic->pic_intrbase = max_base;
88 max_base += pic->pic_numintrs;
89 num_pics++;
90
91 return pic->pic_intrbase;
92 }
93
94 void
95 pic_finish_setup(void)
96 {
97 struct pic_ops *pic;
98 int i;
99
100 for (i = 0; i < num_pics; i++) {
101 pic = pics[i];
102 if (pic->pic_finish_setup != NULL)
103 pic->pic_finish_setup(pic);
104 }
105 }
106
107 static struct pic_ops *
108 find_pic_by_irq(int irq)
109 {
110 struct pic_ops *current;
111 int base = 0;
112
113 while (base < num_pics) {
114
115 current = pics[base];
116 if ((irq >= current->pic_intrbase) &&
117 (irq < (current->pic_intrbase + current->pic_numintrs))) {
118
119 return current;
120 }
121 base++;
122 }
123 return NULL;
124 }
125
126 static int
127 fakeintr(void *arg)
128 {
129
130 return 0;
131 }
132
133 /*
134 * Register an interrupt handler.
135 */
136 void *
137 intr_establish(int hwirq, int type, int level, int (*ih_fun)(void *),
138 void *ih_arg)
139 {
140 struct intrhand **p, *q, *ih;
141 struct intr_source *is;
142 struct pic_ops *pic;
143 static struct intrhand fakehand;
144 int irq, maxlevel = level;
145
146 if (maxlevel == IPL_NONE)
147 maxlevel = IPL_HIGH;
148
149 if (hwirq >= max_base) {
150
151 panic("%s: bogus IRQ %d, max is %d", __func__, hwirq,
152 max_base - 1);
153 }
154
155 pic = find_pic_by_irq(hwirq);
156 if (pic == NULL) {
157
158 panic("%s: cannot find a pic for IRQ %d", __func__, hwirq);
159 }
160
161 irq = mapirq(hwirq);
162
163 /* no point in sleeping unless someone can free memory. */
164 ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK);
165 if (ih == NULL)
166 panic("intr_establish: can't malloc handler info");
167
168 if (!LEGAL_VIRQ(irq) || type == IST_NONE)
169 panic("intr_establish: bogus irq (%d) or type (%d)", irq, type);
170
171 is = &intrsources[irq];
172
173 switch (is->is_type) {
174 case IST_NONE:
175 is->is_type = type;
176 break;
177 case IST_EDGE:
178 case IST_LEVEL:
179 if (type == is->is_type)
180 break;
181 case IST_PULSE:
182 if (type != IST_NONE)
183 panic("intr_establish: can't share %s with %s",
184 intr_typename(is->is_type),
185 intr_typename(type));
186 break;
187 }
188 if (is->is_hand == NULL) {
189 snprintf(is->is_source, sizeof(is->is_source), "irq %d",
190 is->is_hwirq);
191 evcnt_attach_dynamic(&is->is_ev, EVCNT_TYPE_INTR, NULL,
192 pic->pic_name, is->is_source);
193 }
194
195 /*
196 * Figure out where to put the handler.
197 * This is O(N^2), but we want to preserve the order, and N is
198 * generally small.
199 */
200 for (p = &is->is_hand; (q = *p) != NULL; p = &q->ih_next) {
201
202 maxlevel = max(maxlevel, q->ih_level);
203 }
204
205 /*
206 * Actually install a fake handler momentarily, since we might be doing
207 * this with interrupts enabled and don't want the real routine called
208 * until masking is set up.
209 */
210 fakehand.ih_level = level;
211 fakehand.ih_fun = fakeintr;
212 *p = &fakehand;
213
214 /*
215 * Poke the real handler in now.
216 */
217 ih->ih_fun = ih_fun;
218 ih->ih_arg = ih_arg;
219 ih->ih_next = NULL;
220 ih->ih_level = level;
221 ih->ih_irq = irq;
222 *p = ih;
223
224 if (pic->pic_establish_irq != NULL)
225 pic->pic_establish_irq(pic, hwirq - pic->pic_intrbase,
226 is->is_type, maxlevel);
227
228 /*
229 * now that the handler is established we're actually ready to
230 * calculate the masks
231 */
232 intr_calculatemasks();
233
234
235 return ih;
236 }
237
238 void
239 dummy_pic_establish_intr(struct pic_ops *pic, int irq, int type, int pri)
240 {
241 }
242
243 /*
244 * Deregister an interrupt handler.
245 */
246 void
247 intr_disestablish(void *arg)
248 {
249 struct intrhand *ih = arg;
250 int irq = ih->ih_irq;
251 struct intr_source *is = &intrsources[irq];
252 struct intrhand **p, *q;
253
254 if (!LEGAL_VIRQ(irq))
255 panic("intr_disestablish: bogus irq %d", irq);
256
257 /*
258 * Remove the handler from the chain.
259 * This is O(n^2), too.
260 */
261 for (p = &is->is_hand; (q = *p) != NULL && q != ih; p = &q->ih_next)
262 ;
263 if (q)
264 *p = q->ih_next;
265 else
266 panic("intr_disestablish: handler not registered");
267 free((void *)ih, M_DEVBUF);
268
269 intr_calculatemasks();
270
271 if (is->is_hand == NULL) {
272 is->is_type = IST_NONE;
273 evcnt_detach(&is->is_ev);
274 }
275 }
276
277 /*
278 * Map max_base irqs into 32 (bits).
279 */
280 static int
281 mapirq(uint32_t irq)
282 {
283 struct pic_ops *pic;
284 int v;
285
286 if (irq >= max_base)
287 panic("invalid irq %d", irq);
288
289 if ((pic = find_pic_by_irq(irq)) == NULL)
290 panic("%s: cannot find PIC for IRQ %d", __func__, irq);
291
292 if (virq[irq])
293 return virq[irq];
294
295 virq_max++;
296 v = virq_max;
297 if (v > HWIRQ_MAX)
298 panic("virq overflow");
299
300 intrsources[v].is_hwirq = irq;
301 intrsources[v].is_pic = pic;
302 virq[irq] = v;
303 #ifdef PIC_DEBUG
304 printf("mapping irq %d to virq %d\n", irq, v);
305 #endif
306 return v;
307 }
308
309 static const char * const intr_typenames[] = {
310 [IST_NONE] = "none",
311 [IST_PULSE] = "pulsed",
312 [IST_EDGE] = "edge-triggered",
313 [IST_LEVEL] = "level-triggered",
314 };
315
316 const char *
317 intr_typename(int type)
318 {
319 KASSERT((unsigned int) type < __arraycount(intr_typenames));
320 KASSERT(intr_typenames[type] != NULL);
321 return intr_typenames[type];
322 }
323
324 /*
325 * Recalculate the interrupt masks from scratch.
326 * We could code special registry and deregistry versions of this function that
327 * would be faster, but the code would be nastier, and we don't expect this to
328 * happen very much anyway.
329 */
330 static void
331 intr_calculatemasks(void)
332 {
333 struct intr_source *is;
334 struct intrhand *q;
335 struct pic_ops *current;
336 int irq, level, i, base;
337
338 /* First, figure out which levels each IRQ uses. */
339 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
340 register int levels = 0;
341 for (q = is->is_hand; q; q = q->ih_next)
342 levels |= 1 << q->ih_level;
343 is->is_level = levels;
344 }
345
346 /* Then figure out which IRQs use each level. */
347 for (level = 0; level < NIPL; level++) {
348 register imask_t irqs = 0;
349 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++)
350 if (is->is_level & (1 << level))
351 irqs |= 1ULL << irq;
352 imask[level] = irqs;
353 }
354
355 /*
356 * IPL_CLOCK should mask clock interrupt even if interrupt handler
357 * is not registered.
358 */
359 imask[IPL_CLOCK] |= 1ULL << SPL_CLOCK;
360
361 /*
362 * Initialize soft interrupt masks to block themselves.
363 */
364 imask[IPL_SOFTCLOCK] = 1ULL << SIR_CLOCK;
365 imask[IPL_SOFTNET] = 1ULL << SIR_NET;
366 imask[IPL_SOFTSERIAL] = 1ULL << SIR_SERIAL;
367
368 /*
369 * IPL_NONE is used for hardware interrupts that are never blocked,
370 * and do not block anything else.
371 */
372 imask[IPL_NONE] = 0;
373
374 #ifdef SLOPPY_IPLS
375 /*
376 * Enforce a sloppy hierarchy as in spl(9)
377 */
378 /* everything above softclock must block softclock */
379 for (i = IPL_SOFTCLOCK; i < NIPL; i++)
380 imask[i] |= imask[IPL_SOFTCLOCK];
381
382 /* everything above softnet must block softnet */
383 for (i = IPL_SOFTNET; i < NIPL; i++)
384 imask[i] |= imask[IPL_SOFTNET];
385
386 /* IPL_TTY must block softserial */
387 imask[IPL_TTY] |= imask[IPL_SOFTSERIAL];
388
389 /* IPL_VM must block net, block IO and tty */
390 imask[IPL_VM] |= (imask[IPL_NET] | imask[IPL_BIO] | imask[IPL_TTY]);
391
392 /* IPL_SERIAL must block IPL_TTY */
393 imask[IPL_SERIAL] |= imask[IPL_TTY];
394
395 /* IPL_HIGH must block all other priority levels */
396 for (i = IPL_NONE; i < IPL_HIGH; i++)
397 imask[IPL_HIGH] |= imask[i];
398 #else /* !SLOPPY_IPLS */
399 /*
400 * strict hierarchy - all IPLs block everything blocked by any lower
401 * IPL
402 */
403 for (i = 1; i < NIPL; i++)
404 imask[i] |= imask[i - 1];
405 #endif /* !SLOPPY_IPLS */
406
407 #ifdef DEBUG_IPL
408 for (i = 0; i < NIPL; i++) {
409 printf("%2d: %08x\n", i, imask[i]);
410 }
411 #endif
412
413 /* And eventually calculate the complete masks. */
414 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
415 register imask_t irqs = 1ULL << irq;
416 for (q = is->is_hand; q; q = q->ih_next)
417 irqs |= imask[q->ih_level];
418 is->is_mask = irqs;
419 }
420
421 /* Lastly, enable IRQs actually in use. */
422 for (base = 0; base < num_pics; base++) {
423 current = pics[base];
424 for (i = 0; i < current->pic_numintrs; i++)
425 current->pic_disable_irq(current, i);
426 }
427
428 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
429 if (is->is_hand)
430 pic_enable_irq(is->is_hwirq);
431 }
432 }
433
434 void
435 pic_enable_irq(int num)
436 {
437 struct pic_ops *current;
438 int type;
439
440 current = find_pic_by_irq(num);
441 if (current == NULL)
442 panic("%s: bogus IRQ %d", __func__, num);
443 type = intrsources[virq[num]].is_type;
444 current->pic_enable_irq(current, num - current->pic_intrbase, type);
445 }
446
447 void
448 pic_mark_pending(int irq)
449 {
450 struct cpu_info * const ci = curcpu();
451 int v, msr;
452
453 v = virq[irq];
454 if (v == 0)
455 printf("IRQ %d maps to 0\n", irq);
456
457 msr = mfmsr();
458 mtmsr(msr & ~PSL_EE);
459 ci->ci_ipending |= 1ULL << v;
460 mtmsr(msr);
461 }
462
463 void
464 pic_do_pending_int(void)
465 {
466 struct cpu_info * const ci = curcpu();
467 struct intr_source *is;
468 struct intrhand *ih;
469 struct pic_ops *pic;
470 int irq;
471 int pcpl;
472 imask_t hwpend;
473 int emsr, dmsr;
474
475 if (ci->ci_iactive)
476 return;
477
478 ci->ci_iactive = 1;
479 emsr = mfmsr();
480 KASSERT(emsr & PSL_EE);
481 dmsr = emsr & ~PSL_EE;
482 mtmsr(dmsr);
483
484 pcpl = ci->ci_cpl;
485 #ifdef __HAVE_FAST_SOFTINTS
486 again:
487 #endif
488
489 /* Do now unmasked pendings */
490 ci->ci_idepth++;
491 while ((hwpend = (ci->ci_ipending & ~pcpl & HWIRQ_MASK)) != 0) {
492 /* Get most significant pending bit */
493 irq = MS_PENDING(hwpend);
494 KASSERT(irq <= virq_max);
495 ci->ci_ipending &= ~(1ULL << irq);
496 if (irq == 0) {
497 printf("VIRQ0");
498 continue;
499 }
500 is = &intrsources[irq];
501 pic = is->is_pic;
502
503 splraise(is->is_mask);
504 mtmsr(emsr);
505 ih = is->is_hand;
506 while (ih) {
507 #ifdef DIAGNOSTIC
508 if (!ih->ih_fun) {
509 printf("NULL interrupt handler!\n");
510 panic("irq %02d, hwirq %02d, is %p\n",
511 irq, is->is_hwirq, is);
512 }
513 #endif
514 if (ih->ih_level == IPL_VM) {
515 KERNEL_LOCK(1, NULL);
516 }
517 (*ih->ih_fun)(ih->ih_arg);
518 if (ih->ih_level == IPL_VM) {
519 KERNEL_UNLOCK_ONE(NULL);
520 }
521 ih = ih->ih_next;
522 }
523 mtmsr(dmsr);
524 ci->ci_cpl = pcpl;
525
526 is->is_ev.ev_count++;
527 pic->pic_reenable_irq(pic, is->is_hwirq - pic->pic_intrbase,
528 is->is_type);
529 }
530 ci->ci_idepth--;
531
532 #ifdef __HAVE_FAST_SOFTINTS
533 if ((ci->ci_ipending & ~pcpl) & (1ULL << SIR_SERIAL)) {
534 ci->ci_ipending &= ~(1ULL << SIR_SERIAL);
535 splsoftserial();
536 mtmsr(emsr);
537 softintr__run(IPL_SOFTSERIAL);
538 mtmsr(dmsr);
539 ci->ci_cpl = pcpl;
540 ci->ci_ev_softserial.ev_count++;
541 goto again;
542 }
543 if ((ci->ci_ipending & ~pcpl) & (1ULL << SIR_NET)) {
544 ci->ci_ipending &= ~(1ULL << SIR_NET);
545 splsoftnet();
546 mtmsr(emsr);
547 softintr__run(IPL_SOFTNET);
548 mtmsr(dmsr);
549 ci->ci_cpl = pcpl;
550 ci->ci_ev_softnet.ev_count++;
551 goto again;
552 }
553 if ((ci->ci_ipending & ~pcpl) & (1ULL << SIR_CLOCK)) {
554 ci->ci_ipending &= ~(1ULL << SIR_CLOCK);
555 splsoftclock();
556 mtmsr(emsr);
557 softintr__run(IPL_SOFTCLOCK);
558 mtmsr(dmsr);
559 ci->ci_cpl = pcpl;
560 ci->ci_ev_softclock.ev_count++;
561 goto again;
562 }
563 #endif
564
565 ci->ci_cpl = pcpl; /* Don't use splx... we are here already! */
566 ci->ci_iactive = 0;
567 mtmsr(emsr);
568 }
569
570 int
571 pic_handle_intr(void *cookie)
572 {
573 struct pic_ops *pic = cookie;
574 struct cpu_info *ci = curcpu();
575 struct intr_source *is;
576 struct intrhand *ih;
577 int irq, realirq;
578 int pcpl, msr, bail;
579 imask_t r_imen;
580
581 realirq = pic->pic_get_irq(pic, PIC_GET_IRQ);
582 if (realirq == 255)
583 return 0;
584
585 msr = mfmsr();
586 pcpl = ci->ci_cpl;
587
588 start:
589
590 #ifdef MULTIPROCESSOR
591 /* THIS IS WRONG XXX */
592 while (realirq == ipiops.ppc_ipi_vector) {
593 ppcipi_intr(NULL);
594 pic->pic_ack_irq(pic, realirq);
595 realirq = pic->pic_get_irq(pic, PIC_GET_RECHECK);
596 }
597 if (realirq == 255) {
598 return 0;
599 }
600 #endif
601
602 irq = virq[realirq + pic->pic_intrbase];
603 #ifdef PIC_DEBUG
604 if (irq == 0) {
605 printf("%s: %d virq 0\n", pic->pic_name, realirq);
606 goto boo;
607 }
608 #endif /* PIC_DEBUG */
609 KASSERT(realirq < pic->pic_numintrs);
610 r_imen = 1ULL << irq;
611 is = &intrsources[irq];
612
613 if ((pcpl & r_imen) != 0) {
614
615 ci->ci_ipending |= r_imen; /* Masked! Mark this as pending */
616 pic->pic_disable_irq(pic, realirq);
617 } else {
618
619 /* this interrupt is no longer pending */
620 ci->ci_ipending &= ~r_imen;
621 ci->ci_idepth++;
622
623 splraise(is->is_mask);
624 mtmsr(msr | PSL_EE);
625 ih = is->is_hand;
626 bail = 0;
627 while ((ih != NULL) && (bail < 10)) {
628 if (ih->ih_fun == NULL)
629 panic("bogus handler for IRQ %s %d",
630 pic->pic_name, realirq);
631 if (ih->ih_level == IPL_VM) {
632 KERNEL_LOCK(1, NULL);
633 }
634 (*ih->ih_fun)(ih->ih_arg);
635 if (ih->ih_level == IPL_VM) {
636 KERNEL_UNLOCK_ONE(NULL);
637 }
638 ih = ih->ih_next;
639 bail++;
640 }
641 mtmsr(msr);
642 ci->ci_cpl = pcpl;
643
644 ci->ci_data.cpu_nintr++;
645 is->is_ev.ev_count++;
646 ci->ci_idepth--;
647 }
648 #ifdef PIC_DEBUG
649 boo:
650 #endif /* PIC_DEBUG */
651 pic->pic_ack_irq(pic, realirq);
652 realirq = pic->pic_get_irq(pic, PIC_GET_RECHECK);
653 if (realirq != 255)
654 goto start;
655
656 mtmsr(msr | PSL_EE);
657 splx(pcpl); /* Process pendings. */
658 mtmsr(msr);
659
660 return 0;
661 }
662
663 void
664 pic_ext_intr(void)
665 {
666
667 KASSERT(pics[primary_pic] != NULL);
668 pic_handle_intr(pics[primary_pic]);
669
670 return;
671
672 }
673
674 int
675 splraise(int ncpl)
676 {
677 struct cpu_info *ci = curcpu();
678 int ocpl;
679
680 __asm volatile("sync; eieio"); /* don't reorder.... */
681
682 ocpl = ci->ci_cpl;
683 ci->ci_cpl = ocpl | ncpl;
684 __asm volatile("sync; eieio"); /* reorder protect */
685 return ocpl;
686 }
687
688 void
689 splx(int ncpl)
690 {
691 struct cpu_info *ci = curcpu();
692
693 __asm volatile("sync; eieio"); /* reorder protect */
694 ci->ci_cpl = ncpl;
695 if (ci->ci_ipending & ~ncpl)
696 pic_do_pending_int();
697 __asm volatile("sync; eieio"); /* reorder protect */
698 }
699
700 int
701 spllower(int ncpl)
702 {
703 struct cpu_info *ci = curcpu();
704 int ocpl;
705
706 __asm volatile("sync; eieio"); /* reorder protect */
707 ocpl = ci->ci_cpl;
708 ci->ci_cpl = ncpl;
709 if (ci->ci_ipending & ~ncpl)
710 pic_do_pending_int();
711 __asm volatile("sync; eieio"); /* reorder protect */
712 return ocpl;
713 }
714
715 /* Following code should be implemented with lwarx/stwcx to avoid
716 * the disable/enable. i need to read the manual once more.... */
717 void
718 softintr(int ipl)
719 {
720 struct cpu_info *ci = curcpu();
721 int msrsave;
722
723 msrsave = mfmsr();
724 mtmsr(msrsave & ~PSL_EE);
725 ci->ci_ipending |= 1ULL << ipl;
726 mtmsr(msrsave);
727 }
728
729 void
730 genppc_cpu_configure(void)
731 {
732 aprint_normal("biomask %x netmask %x ttymask %x\n",
733 (u_int)imask[IPL_BIO] & 0x1fffffff,
734 (u_int)imask[IPL_NET] & 0x1fffffff,
735 (u_int)imask[IPL_TTY] & 0x1fffffff);
736
737 spl0();
738 }
739
740 #if defined(PIC_PREPIVR) || defined(PIC_I8259)
741 /*
742 * isa_intr_alloc needs to be done here, because it needs direct access to
743 * the various interrupt handler structures.
744 */
745
746 int
747 genppc_isa_intr_alloc(isa_chipset_tag_t ic, struct pic_ops *pic,
748 int mask, int type, int *irq_p)
749 {
750 int irq, vi;
751 int maybe_irq = -1;
752 int shared_depth = 0;
753 struct intr_source *is;
754
755 if (pic == NULL)
756 return 1;
757
758 for (irq = 0; (mask != 0 && irq < pic->pic_numintrs);
759 mask >>= 1, irq++) {
760 if ((mask & 1) == 0)
761 continue;
762 vi = virq[irq + pic->pic_intrbase];
763 if (!vi) {
764 *irq_p = irq;
765 return 0;
766 }
767 is = &intrsources[vi];
768 if (is->is_type == IST_NONE) {
769 *irq_p = irq;
770 return 0;
771 }
772 /* Level interrupts can be shared */
773 if (type == IST_LEVEL && is->is_type == IST_LEVEL) {
774 struct intrhand *ih = is->is_hand;
775 int depth;
776
777 if (maybe_irq == -1) {
778 maybe_irq = irq;
779 continue;
780 }
781 for (depth = 0; ih != NULL; ih = ih->ih_next)
782 depth++;
783 if (depth < shared_depth) {
784 maybe_irq = irq;
785 shared_depth = depth;
786 }
787 }
788 }
789 if (maybe_irq != -1) {
790 *irq_p = maybe_irq;
791 return 0;
792 }
793 return 1;
794 }
795 #endif
796