intr.c revision 1.8 1 /* $NetBSD: intr.c,v 1.8 2010/04/24 09:39:57 kiyohara Exp $ */
2
3 /*-
4 * Copyright (c) 2007 Michael Lorenz
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.8 2010/04/24 09:39:57 kiyohara Exp $");
31
32 #include "opt_multiprocessor.h"
33
34 #include <sys/param.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/cpu.h>
38
39 #include <uvm/uvm_extern.h>
40
41 #include <arch/powerpc/pic/picvar.h>
42 #include "opt_pic.h"
43 #include "opt_interrupt.h"
44 #if defined(PIC_I8259) || defined (PIC_PREPIVR)
45 #include <machine/isa_machdep.h>
46 #endif
47
48 #ifdef MULTIPROCESSOR
49 #include <arch/powerpc/pic/ipivar.h>
50 #endif
51
52 #define MAX_PICS 8 /* 8 PICs ought to be enough for everyone */
53
54 #define LEGAL_VIRQ(x) ((x) >= 0 && (x) < NVIRQ)
55
56 struct pic_ops *pics[MAX_PICS];
57 int num_pics = 0;
58 int max_base = 0;
59 uint8_t virq[NIRQ];
60 int virq_max = 0;
61 imask_t imask[NIPL];
62 int primary_pic = 0;
63
64 static int fakeintr(void *);
65 static int mapirq(uint32_t);
66 static void intr_calculatemasks(void);
67 static struct pic_ops *find_pic_by_irq(int);
68
69 static struct intr_source intrsources[NVIRQ];
70
71 void
72 pic_init(void)
73 {
74 int i;
75
76 for (i = 0; i < NIRQ; i++)
77 virq[i] = 0;
78 memset(intrsources, 0, sizeof(intrsources));
79 }
80
81 int
82 pic_add(struct pic_ops *pic)
83 {
84
85 if (num_pics >= MAX_PICS)
86 return -1;
87
88 pics[num_pics] = pic;
89 pic->pic_intrbase = max_base;
90 max_base += pic->pic_numintrs;
91 num_pics++;
92
93 return pic->pic_intrbase;
94 }
95
96 void
97 pic_finish_setup(void)
98 {
99 struct pic_ops *pic;
100 int i;
101
102 for (i = 0; i < num_pics; i++) {
103 pic = pics[i];
104 if (pic->pic_finish_setup != NULL)
105 pic->pic_finish_setup(pic);
106 }
107 }
108
109 static struct pic_ops *
110 find_pic_by_irq(int irq)
111 {
112 struct pic_ops *current;
113 int base = 0;
114
115 while (base < num_pics) {
116
117 current = pics[base];
118 if ((irq >= current->pic_intrbase) &&
119 (irq < (current->pic_intrbase + current->pic_numintrs))) {
120
121 return current;
122 }
123 base++;
124 }
125 return NULL;
126 }
127
128 static int
129 fakeintr(void *arg)
130 {
131
132 return 0;
133 }
134
135 /*
136 * Register an interrupt handler.
137 */
138 void *
139 intr_establish(int hwirq, int type, int level, int (*ih_fun)(void *),
140 void *ih_arg)
141 {
142 struct intrhand **p, *q, *ih;
143 struct intr_source *is;
144 struct pic_ops *pic;
145 static struct intrhand fakehand;
146 int irq, maxlevel = level;
147
148 if (maxlevel == IPL_NONE)
149 maxlevel = IPL_HIGH;
150
151 if (hwirq >= max_base) {
152
153 panic("%s: bogus IRQ %d, max is %d", __func__, hwirq,
154 max_base - 1);
155 }
156
157 pic = find_pic_by_irq(hwirq);
158 if (pic == NULL) {
159
160 panic("%s: cannot find a pic for IRQ %d", __func__, hwirq);
161 }
162
163 irq = mapirq(hwirq);
164
165 /* no point in sleeping unless someone can free memory. */
166 ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK);
167 if (ih == NULL)
168 panic("intr_establish: can't malloc handler info");
169
170 if (!LEGAL_VIRQ(irq) || type == IST_NONE)
171 panic("intr_establish: bogus irq (%d) or type (%d)", irq, type);
172
173 is = &intrsources[irq];
174
175 switch (is->is_type) {
176 case IST_NONE:
177 is->is_type = type;
178 break;
179 case IST_EDGE:
180 case IST_LEVEL:
181 if (type == is->is_type)
182 break;
183 case IST_PULSE:
184 if (type != IST_NONE)
185 panic("intr_establish: can't share %s with %s",
186 intr_typename(is->is_type),
187 intr_typename(type));
188 break;
189 }
190 if (is->is_hand == NULL) {
191 snprintf(is->is_source, sizeof(is->is_source), "irq %d",
192 is->is_hwirq);
193 evcnt_attach_dynamic(&is->is_ev, EVCNT_TYPE_INTR, NULL,
194 pic->pic_name, is->is_source);
195 }
196
197 /*
198 * Figure out where to put the handler.
199 * This is O(N^2), but we want to preserve the order, and N is
200 * generally small.
201 */
202 for (p = &is->is_hand; (q = *p) != NULL; p = &q->ih_next) {
203
204 maxlevel = max(maxlevel, q->ih_level);
205 }
206
207 /*
208 * Actually install a fake handler momentarily, since we might be doing
209 * this with interrupts enabled and don't want the real routine called
210 * until masking is set up.
211 */
212 fakehand.ih_level = level;
213 fakehand.ih_fun = fakeintr;
214 *p = &fakehand;
215
216 /*
217 * Poke the real handler in now.
218 */
219 ih->ih_fun = ih_fun;
220 ih->ih_arg = ih_arg;
221 ih->ih_next = NULL;
222 ih->ih_level = level;
223 ih->ih_irq = irq;
224 *p = ih;
225
226 if (pic->pic_establish_irq != NULL)
227 pic->pic_establish_irq(pic, hwirq - pic->pic_intrbase,
228 is->is_type, maxlevel);
229
230 /*
231 * now that the handler is established we're actually ready to
232 * calculate the masks
233 */
234 intr_calculatemasks();
235
236
237 return ih;
238 }
239
240 void
241 dummy_pic_establish_intr(struct pic_ops *pic, int irq, int type, int pri)
242 {
243 }
244
245 /*
246 * Deregister an interrupt handler.
247 */
248 void
249 intr_disestablish(void *arg)
250 {
251 struct intrhand *ih = arg;
252 int irq = ih->ih_irq;
253 struct intr_source *is = &intrsources[irq];
254 struct intrhand **p, *q;
255
256 if (!LEGAL_VIRQ(irq))
257 panic("intr_disestablish: bogus irq %d", irq);
258
259 /*
260 * Remove the handler from the chain.
261 * This is O(n^2), too.
262 */
263 for (p = &is->is_hand; (q = *p) != NULL && q != ih; p = &q->ih_next)
264 ;
265 if (q)
266 *p = q->ih_next;
267 else
268 panic("intr_disestablish: handler not registered");
269 free((void *)ih, M_DEVBUF);
270
271 intr_calculatemasks();
272
273 if (is->is_hand == NULL) {
274 is->is_type = IST_NONE;
275 evcnt_detach(&is->is_ev);
276 }
277 }
278
279 /*
280 * Map max_base irqs into 32 (bits).
281 */
282 static int
283 mapirq(uint32_t irq)
284 {
285 struct pic_ops *pic;
286 int v;
287
288 if (irq >= max_base)
289 panic("invalid irq %d", irq);
290
291 if ((pic = find_pic_by_irq(irq)) == NULL)
292 panic("%s: cannot find PIC for IRQ %d", __func__, irq);
293
294 if (virq[irq])
295 return virq[irq];
296
297 virq_max++;
298 v = virq_max;
299 if (v > HWIRQ_MAX)
300 panic("virq overflow");
301
302 intrsources[v].is_hwirq = irq;
303 intrsources[v].is_pic = pic;
304 virq[irq] = v;
305 #ifdef PIC_DEBUG
306 printf("mapping irq %d to virq %d\n", irq, v);
307 #endif
308 return v;
309 }
310
311 static const char * const intr_typenames[] = {
312 [IST_NONE] = "none",
313 [IST_PULSE] = "pulsed",
314 [IST_EDGE] = "edge-triggered",
315 [IST_LEVEL] = "level-triggered",
316 };
317
318 const char *
319 intr_typename(int type)
320 {
321 KASSERT((unsigned int) type < __arraycount(intr_typenames));
322 KASSERT(intr_typenames[type] != NULL);
323 return intr_typenames[type];
324 }
325
326 /*
327 * Recalculate the interrupt masks from scratch.
328 * We could code special registry and deregistry versions of this function that
329 * would be faster, but the code would be nastier, and we don't expect this to
330 * happen very much anyway.
331 */
332 static void
333 intr_calculatemasks(void)
334 {
335 struct intr_source *is;
336 struct intrhand *q;
337 struct pic_ops *current;
338 int irq, level, i, base;
339
340 /* First, figure out which levels each IRQ uses. */
341 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
342 register int levels = 0;
343 for (q = is->is_hand; q; q = q->ih_next)
344 levels |= 1 << q->ih_level;
345 is->is_level = levels;
346 }
347
348 /* Then figure out which IRQs use each level. */
349 for (level = 0; level < NIPL; level++) {
350 register imask_t irqs = 0;
351 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++)
352 if (is->is_level & (1 << level))
353 irqs |= 1ULL << irq;
354 imask[level] = irqs;
355 }
356
357 /*
358 * IPL_CLOCK should mask clock interrupt even if interrupt handler
359 * is not registered.
360 */
361 imask[IPL_CLOCK] |= 1ULL << SPL_CLOCK;
362
363 /*
364 * Initialize soft interrupt masks to block themselves.
365 */
366 imask[IPL_SOFTCLOCK] = 1ULL << SIR_CLOCK;
367 imask[IPL_SOFTNET] = 1ULL << SIR_NET;
368 imask[IPL_SOFTSERIAL] = 1ULL << SIR_SERIAL;
369
370 /*
371 * IPL_NONE is used for hardware interrupts that are never blocked,
372 * and do not block anything else.
373 */
374 imask[IPL_NONE] = 0;
375
376 #ifdef SLOPPY_IPLS
377 /*
378 * Enforce a sloppy hierarchy as in spl(9)
379 */
380 /* everything above softclock must block softclock */
381 for (i = IPL_SOFTCLOCK; i < NIPL; i++)
382 imask[i] |= imask[IPL_SOFTCLOCK];
383
384 /* everything above softnet must block softnet */
385 for (i = IPL_SOFTNET; i < NIPL; i++)
386 imask[i] |= imask[IPL_SOFTNET];
387
388 /* IPL_TTY must block softserial */
389 imask[IPL_TTY] |= imask[IPL_SOFTSERIAL];
390
391 /* IPL_VM must block net, block IO and tty */
392 imask[IPL_VM] |= (imask[IPL_NET] | imask[IPL_BIO] | imask[IPL_TTY]);
393
394 /* IPL_SERIAL must block IPL_TTY */
395 imask[IPL_SERIAL] |= imask[IPL_TTY];
396
397 /* IPL_HIGH must block all other priority levels */
398 for (i = IPL_NONE; i < IPL_HIGH; i++)
399 imask[IPL_HIGH] |= imask[i];
400 #else /* !SLOPPY_IPLS */
401 /*
402 * strict hierarchy - all IPLs block everything blocked by any lower
403 * IPL
404 */
405 for (i = 1; i < NIPL; i++)
406 imask[i] |= imask[i - 1];
407 #endif /* !SLOPPY_IPLS */
408
409 #ifdef DEBUG_IPL
410 for (i = 0; i < NIPL; i++) {
411 printf("%2d: %08x\n", i, imask[i]);
412 }
413 #endif
414
415 /* And eventually calculate the complete masks. */
416 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
417 register imask_t irqs = 1ULL << irq;
418 for (q = is->is_hand; q; q = q->ih_next)
419 irqs |= imask[q->ih_level];
420 is->is_mask = irqs;
421 }
422
423 /* Lastly, enable IRQs actually in use. */
424 for (base = 0; base < num_pics; base++) {
425 current = pics[base];
426 for (i = 0; i < current->pic_numintrs; i++)
427 current->pic_disable_irq(current, i);
428 }
429
430 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
431 if (is->is_hand)
432 pic_enable_irq(is->is_hwirq);
433 }
434 }
435
436 void
437 pic_enable_irq(int num)
438 {
439 struct pic_ops *current;
440 int type;
441
442 current = find_pic_by_irq(num);
443 if (current == NULL)
444 panic("%s: bogus IRQ %d", __func__, num);
445 type = intrsources[virq[num]].is_type;
446 current->pic_enable_irq(current, num - current->pic_intrbase, type);
447 }
448
449 void
450 pic_mark_pending(int irq)
451 {
452 struct cpu_info * const ci = curcpu();
453 int v, msr;
454
455 v = virq[irq];
456 if (v == 0)
457 printf("IRQ %d maps to 0\n", irq);
458
459 msr = mfmsr();
460 mtmsr(msr & ~PSL_EE);
461 ci->ci_ipending |= 1ULL << v;
462 mtmsr(msr);
463 }
464
465 void
466 pic_do_pending_int(void)
467 {
468 struct cpu_info * const ci = curcpu();
469 struct intr_source *is;
470 struct intrhand *ih;
471 struct pic_ops *pic;
472 int irq;
473 int pcpl;
474 imask_t hwpend;
475 int emsr, dmsr;
476
477 if (ci->ci_iactive)
478 return;
479
480 ci->ci_iactive = 1;
481 emsr = mfmsr();
482 KASSERT(emsr & PSL_EE);
483 dmsr = emsr & ~PSL_EE;
484 mtmsr(dmsr);
485
486 pcpl = ci->ci_cpl;
487 #ifdef __HAVE_FAST_SOFTINTS
488 again:
489 #endif
490
491 /* Do now unmasked pendings */
492 ci->ci_idepth++;
493 while ((hwpend = (ci->ci_ipending & ~pcpl & HWIRQ_MASK)) != 0) {
494 /* Get most significant pending bit */
495 irq = MS_PENDING(hwpend);
496 KASSERT(irq <= virq_max);
497 ci->ci_ipending &= ~(1ULL << irq);
498 if (irq == 0) {
499 printf("VIRQ0");
500 continue;
501 }
502 is = &intrsources[irq];
503 pic = is->is_pic;
504
505 splraise(is->is_mask);
506 mtmsr(emsr);
507 ih = is->is_hand;
508 while (ih) {
509 #ifdef DIAGNOSTIC
510 if (!ih->ih_fun) {
511 printf("NULL interrupt handler!\n");
512 panic("irq %02d, hwirq %02d, is %p\n",
513 irq, is->is_hwirq, is);
514 }
515 #endif
516 if (ih->ih_level == IPL_VM) {
517 KERNEL_LOCK(1, NULL);
518 }
519 (*ih->ih_fun)(ih->ih_arg);
520 if (ih->ih_level == IPL_VM) {
521 KERNEL_UNLOCK_ONE(NULL);
522 }
523 ih = ih->ih_next;
524 }
525 mtmsr(dmsr);
526 ci->ci_cpl = pcpl;
527
528 is->is_ev.ev_count++;
529 pic->pic_reenable_irq(pic, is->is_hwirq - pic->pic_intrbase,
530 is->is_type);
531 }
532 ci->ci_idepth--;
533
534 #ifdef __HAVE_FAST_SOFTINTS
535 if ((ci->ci_ipending & ~pcpl) & (1ULL << SIR_SERIAL)) {
536 ci->ci_ipending &= ~(1ULL << SIR_SERIAL);
537 splsoftserial();
538 mtmsr(emsr);
539 softintr__run(IPL_SOFTSERIAL);
540 mtmsr(dmsr);
541 ci->ci_cpl = pcpl;
542 ci->ci_ev_softserial.ev_count++;
543 goto again;
544 }
545 if ((ci->ci_ipending & ~pcpl) & (1ULL << SIR_NET)) {
546 ci->ci_ipending &= ~(1ULL << SIR_NET);
547 splsoftnet();
548 mtmsr(emsr);
549 softintr__run(IPL_SOFTNET);
550 mtmsr(dmsr);
551 ci->ci_cpl = pcpl;
552 ci->ci_ev_softnet.ev_count++;
553 goto again;
554 }
555 if ((ci->ci_ipending & ~pcpl) & (1ULL << SIR_CLOCK)) {
556 ci->ci_ipending &= ~(1ULL << SIR_CLOCK);
557 splsoftclock();
558 mtmsr(emsr);
559 softintr__run(IPL_SOFTCLOCK);
560 mtmsr(dmsr);
561 ci->ci_cpl = pcpl;
562 ci->ci_ev_softclock.ev_count++;
563 goto again;
564 }
565 #endif
566
567 ci->ci_cpl = pcpl; /* Don't use splx... we are here already! */
568 ci->ci_iactive = 0;
569 mtmsr(emsr);
570 }
571
572 int
573 pic_handle_intr(void *cookie)
574 {
575 struct pic_ops *pic = cookie;
576 struct cpu_info *ci = curcpu();
577 struct intr_source *is;
578 struct intrhand *ih;
579 int irq, realirq;
580 int pcpl, msr, bail;
581 imask_t r_imen;
582
583 realirq = pic->pic_get_irq(pic, PIC_GET_IRQ);
584 if (realirq == 255)
585 return 0;
586
587 msr = mfmsr();
588 pcpl = ci->ci_cpl;
589
590 start:
591
592 #ifdef MULTIPROCESSOR
593 /* THIS IS WRONG XXX */
594 while (realirq == ipiops.ppc_ipi_vector) {
595 ppcipi_intr(NULL);
596 pic->pic_ack_irq(pic, realirq);
597 realirq = pic->pic_get_irq(pic, PIC_GET_RECHECK);
598 }
599 if (realirq == 255) {
600 return 0;
601 }
602 #endif
603
604 irq = virq[realirq + pic->pic_intrbase];
605 #ifdef PIC_DEBUG
606 if (irq == 0) {
607 printf("%s: %d virq 0\n", pic->pic_name, realirq);
608 goto boo;
609 }
610 #endif /* PIC_DEBUG */
611 KASSERT(realirq < pic->pic_numintrs);
612 r_imen = 1ULL << irq;
613 is = &intrsources[irq];
614
615 if ((pcpl & r_imen) != 0) {
616
617 ci->ci_ipending |= r_imen; /* Masked! Mark this as pending */
618 pic->pic_disable_irq(pic, realirq);
619 } else {
620
621 /* this interrupt is no longer pending */
622 ci->ci_ipending &= ~r_imen;
623 ci->ci_idepth++;
624
625 splraise(is->is_mask);
626 mtmsr(msr | PSL_EE);
627 ih = is->is_hand;
628 bail = 0;
629 while ((ih != NULL) && (bail < 10)) {
630 if (ih->ih_fun == NULL)
631 panic("bogus handler for IRQ %s %d",
632 pic->pic_name, realirq);
633 if (ih->ih_level == IPL_VM) {
634 KERNEL_LOCK(1, NULL);
635 }
636 (*ih->ih_fun)(ih->ih_arg);
637 if (ih->ih_level == IPL_VM) {
638 KERNEL_UNLOCK_ONE(NULL);
639 }
640 ih = ih->ih_next;
641 bail++;
642 }
643 mtmsr(msr);
644 ci->ci_cpl = pcpl;
645
646 uvmexp.intrs++;
647 is->is_ev.ev_count++;
648 ci->ci_idepth--;
649 }
650 #ifdef PIC_DEBUG
651 boo:
652 #endif /* PIC_DEBUG */
653 pic->pic_ack_irq(pic, realirq);
654 realirq = pic->pic_get_irq(pic, PIC_GET_RECHECK);
655 if (realirq != 255)
656 goto start;
657
658 mtmsr(msr | PSL_EE);
659 splx(pcpl); /* Process pendings. */
660 mtmsr(msr);
661
662 return 0;
663 }
664
665 void
666 pic_ext_intr(void)
667 {
668
669 KASSERT(pics[primary_pic] != NULL);
670 pic_handle_intr(pics[primary_pic]);
671
672 return;
673
674 }
675
676 int
677 splraise(int ncpl)
678 {
679 struct cpu_info *ci = curcpu();
680 int ocpl;
681
682 __asm volatile("sync; eieio"); /* don't reorder.... */
683
684 ocpl = ci->ci_cpl;
685 ci->ci_cpl = ncpl;
686 __asm volatile("sync; eieio"); /* reorder protect */
687 return ocpl;
688 }
689
690 void
691 splx(int ncpl)
692 {
693 struct cpu_info *ci = curcpu();
694
695 __asm volatile("sync; eieio"); /* reorder protect */
696 ci->ci_cpl = ncpl;
697 if (ci->ci_ipending & ~ncpl)
698 pic_do_pending_int();
699 __asm volatile("sync; eieio"); /* reorder protect */
700 }
701
702 int
703 spllower(int ncpl)
704 {
705 struct cpu_info *ci = curcpu();
706 int ocpl;
707
708 __asm volatile("sync; eieio"); /* reorder protect */
709 ocpl = ci->ci_cpl;
710 ci->ci_cpl = ncpl;
711 if (ci->ci_ipending & ~ncpl)
712 pic_do_pending_int();
713 __asm volatile("sync; eieio"); /* reorder protect */
714 return ocpl;
715 }
716
717 /* Following code should be implemented with lwarx/stwcx to avoid
718 * the disable/enable. i need to read the manual once more.... */
719 void
720 softintr(int ipl)
721 {
722 int msrsave;
723
724 msrsave = mfmsr();
725 mtmsr(msrsave & ~PSL_EE);
726 curcpu()->ci_ipending |= 1ULL << ipl;
727 mtmsr(msrsave);
728 }
729
730 void
731 genppc_cpu_configure(void)
732 {
733 aprint_normal("biomask %x netmask %x ttymask %x\n",
734 (u_int)imask[IPL_BIO] & 0x1fffffff,
735 (u_int)imask[IPL_NET] & 0x1fffffff,
736 (u_int)imask[IPL_TTY] & 0x1fffffff);
737
738 spl0();
739 }
740
741 #if defined(PIC_PREPIVR) || defined(PIC_I8259)
742 /*
743 * isa_intr_alloc needs to be done here, because it needs direct access to
744 * the various interrupt handler structures.
745 */
746
747 int
748 genppc_isa_intr_alloc(isa_chipset_tag_t ic, struct pic_ops *pic,
749 int mask, int type, int *irq_p)
750 {
751 int irq, vi;
752 int maybe_irq = -1;
753 int shared_depth = 0;
754 struct intr_source *is;
755
756 if (pic == NULL)
757 return 1;
758
759 for (irq = 0; (mask != 0 && irq < pic->pic_numintrs);
760 mask >>= 1, irq++) {
761 if ((mask & 1) == 0)
762 continue;
763 vi = virq[irq + pic->pic_intrbase];
764 if (!vi) {
765 *irq_p = irq;
766 return 0;
767 }
768 is = &intrsources[vi];
769 if (is->is_type == IST_NONE) {
770 *irq_p = irq;
771 return 0;
772 }
773 /* Level interrupts can be shared */
774 if (type == IST_LEVEL && is->is_type == IST_LEVEL) {
775 struct intrhand *ih = is->is_hand;
776 int depth;
777
778 if (maybe_irq == -1) {
779 maybe_irq = irq;
780 continue;
781 }
782 for (depth = 0; ih != NULL; ih = ih->ih_next)
783 depth++;
784 if (depth < shared_depth) {
785 maybe_irq = irq;
786 shared_depth = depth;
787 }
788 }
789 }
790 if (maybe_irq != -1) {
791 *irq_p = maybe_irq;
792 return 0;
793 }
794 return 1;
795 }
796 #endif
797