intr.c revision 1.25 1 /* $NetBSD: intr.c,v 1.25 2016/10/19 00:08:42 nonaka Exp $ */
2
3 /*-
4 * Copyright (c) 2007 Michael Lorenz
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.25 2016/10/19 00:08:42 nonaka Exp $");
31
32 #include "opt_interrupt.h"
33 #include "opt_multiprocessor.h"
34 #include "opt_pic.h"
35
36 #define __INTR_PRIVATE
37
38 #include <sys/param.h>
39 #include <sys/cpu.h>
40 #include <sys/kernel.h>
41 #include <sys/kmem.h>
42 #include <sys/interrupt.h>
43
44 #include <powerpc/psl.h>
45 #include <powerpc/pic/picvar.h>
46
47 #if defined(PIC_I8259) || defined (PIC_PREPIVR)
48 #include <machine/isa_machdep.h>
49 #endif
50
51 #ifdef MULTIPROCESSOR
52 #include <powerpc/pic/ipivar.h>
53 #endif
54
55 #ifdef __HAVE_FAST_SOFTINTS
56 #include <powerpc/softint.h>
57 #endif
58
59 #define MAX_PICS 8 /* 8 PICs ought to be enough for everyone */
60
61 #define PIC_VIRQ_LEGAL_P(x) ((u_int)(x) < NVIRQ)
62
63 struct pic_ops *pics[MAX_PICS];
64 int num_pics = 0;
65 int max_base = 0;
66 uint8_t virq_map[NIRQ];
67 imask_t virq_mask = HWIRQ_MASK;
68 imask_t imask[NIPL];
69 int primary_pic = 0;
70
71 static int fakeintr(void *);
72 static int mapirq(int);
73 static void intr_calculatemasks(void);
74 static struct pic_ops *find_pic_by_hwirq(int);
75
76 static struct intr_source intrsources[NVIRQ];
77
78 void
79 pic_init(void)
80 {
81 /* everything is in bss, no reason to zero it. */
82 }
83
84 int
85 pic_add(struct pic_ops *pic)
86 {
87
88 if (num_pics >= MAX_PICS)
89 return -1;
90
91 pics[num_pics] = pic;
92 pic->pic_intrbase = max_base;
93 max_base += pic->pic_numintrs;
94 num_pics++;
95
96 return pic->pic_intrbase;
97 }
98
99 void
100 pic_finish_setup(void)
101 {
102 for (size_t i = 0; i < num_pics; i++) {
103 struct pic_ops * const pic = pics[i];
104 if (pic->pic_finish_setup != NULL)
105 pic->pic_finish_setup(pic);
106 }
107 }
108
109 static struct pic_ops *
110 find_pic_by_hwirq(int hwirq)
111 {
112 for (u_int base = 0; base < num_pics; base++) {
113 struct pic_ops * const pic = pics[base];
114 if (pic->pic_intrbase <= hwirq
115 && hwirq < pic->pic_intrbase + pic->pic_numintrs) {
116 return pic;
117 }
118 }
119 return NULL;
120 }
121
122 static int
123 fakeintr(void *arg)
124 {
125
126 return 0;
127 }
128
129 /*
130 * Register an interrupt handler.
131 */
132 void *
133 intr_establish(int hwirq, int type, int ipl, int (*ih_fun)(void *),
134 void *ih_arg)
135 {
136 return intr_establish_xname(hwirq, type, ipl, ih_fun, ih_arg, NULL);
137 }
138
139 void *
140 intr_establish_xname(int hwirq, int type, int ipl, int (*ih_fun)(void *),
141 void *ih_arg, const char *xname)
142 {
143 struct intrhand **p, *q, *ih;
144 struct pic_ops *pic;
145 static struct intrhand fakehand;
146 int maxipl = ipl;
147
148 if (maxipl == IPL_NONE)
149 maxipl = IPL_HIGH;
150
151 if (hwirq >= max_base) {
152 panic("%s: bogus IRQ %d, max is %d", __func__, hwirq,
153 max_base - 1);
154 }
155
156 pic = find_pic_by_hwirq(hwirq);
157 if (pic == NULL) {
158 panic("%s: cannot find a pic for IRQ %d", __func__, hwirq);
159 }
160
161 const int virq = mapirq(hwirq);
162
163 /* no point in sleeping unless someone can free memory. */
164 ih = kmem_intr_alloc(sizeof(*ih), cold ? KM_NOSLEEP : KM_SLEEP);
165 if (ih == NULL)
166 panic("intr_establish: can't allocate handler info");
167
168 if (!PIC_VIRQ_LEGAL_P(virq) || type == IST_NONE)
169 panic("intr_establish: bogus irq (%d) or type (%d)",
170 hwirq, type);
171
172 struct intr_source * const is = &intrsources[virq];
173
174 switch (is->is_type) {
175 case IST_NONE:
176 is->is_type = type;
177 break;
178 case IST_EDGE_FALLING:
179 case IST_EDGE_RISING:
180 case IST_LEVEL_LOW:
181 case IST_LEVEL_HIGH:
182 if (type == is->is_type)
183 break;
184 /* FALLTHROUGH */
185 case IST_PULSE:
186 if (type != IST_NONE)
187 panic("intr_establish: can't share %s with %s",
188 intr_typename(is->is_type),
189 intr_typename(type));
190 break;
191 }
192 if (is->is_hand == NULL) {
193 snprintf(is->is_source, sizeof(is->is_source), "irq %d",
194 is->is_hwirq);
195 evcnt_attach_dynamic(&is->is_ev, EVCNT_TYPE_INTR, NULL,
196 pic->pic_name, is->is_source);
197 }
198
199 /*
200 * Figure out where to put the handler.
201 * This is O(N^2), but we want to preserve the order, and N is
202 * generally small.
203 */
204 for (p = &is->is_hand; (q = *p) != NULL; p = &q->ih_next) {
205 maxipl = max(maxipl, q->ih_ipl);
206 }
207
208 /*
209 * Actually install a fake handler momentarily, since we might be doing
210 * this with interrupts enabled and don't want the real routine called
211 * until masking is set up.
212 */
213 fakehand.ih_ipl = ipl;
214 fakehand.ih_fun = fakeintr;
215 *p = &fakehand;
216
217 /*
218 * Poke the real handler in now.
219 */
220 ih->ih_fun = ih_fun;
221 ih->ih_arg = ih_arg;
222 ih->ih_next = NULL;
223 ih->ih_ipl = ipl;
224 ih->ih_virq = virq;
225 strlcpy(ih->ih_xname, xname != NULL ? xname : "unknown",
226 sizeof(ih->ih_xname));
227 *p = ih;
228
229 if (pic->pic_establish_irq != NULL)
230 pic->pic_establish_irq(pic, hwirq - pic->pic_intrbase,
231 is->is_type, maxipl);
232
233 /*
234 * Remember the highest IPL used by this handler.
235 */
236 is->is_ipl = maxipl;
237
238 /*
239 * now that the handler is established we're actually ready to
240 * calculate the masks
241 */
242 intr_calculatemasks();
243
244 return ih;
245 }
246
247 void
248 dummy_pic_establish_intr(struct pic_ops *pic, int irq, int type, int pri)
249 {
250 }
251
252 /*
253 * Deregister an interrupt handler.
254 */
255 void
256 intr_disestablish(void *arg)
257 {
258 struct intrhand * const ih = arg;
259 const int virq = ih->ih_virq;
260 struct intr_source * const is = &intrsources[virq];
261 struct intrhand **p, **q;
262 int maxipl = IPL_NONE;
263
264 if (!PIC_VIRQ_LEGAL_P(virq))
265 panic("intr_disestablish: bogus virq %d", virq);
266
267 /*
268 * Remove the handler from the chain.
269 * This is O(n^2), too.
270 */
271 for (p = &is->is_hand, q = NULL; (*p) != NULL; p = &(*p)->ih_next) {
272 struct intrhand * const tmp_ih = *p;
273 if (tmp_ih == ih) {
274 q = p;
275 } else {
276 maxipl = max(maxipl, tmp_ih->ih_ipl);
277 }
278 }
279 if (q)
280 *q = ih->ih_next;
281 else
282 panic("intr_disestablish: handler not registered");
283 kmem_intr_free((void *)ih, sizeof(*ih));
284
285 /*
286 * Reset the IPL for this source now that we've removed a handler.
287 */
288 is->is_ipl = maxipl;
289
290 intr_calculatemasks();
291
292 if (is->is_hand == NULL) {
293 is->is_type = IST_NONE;
294 evcnt_detach(&is->is_ev);
295 /*
296 * Make the virutal IRQ available again.
297 */
298 virq_map[virq] = 0;
299 virq_mask |= PIC_VIRQ_TO_MASK(virq);
300 }
301 }
302
303 /*
304 * Map max_base irqs into 32 (bits).
305 */
306 static int
307 mapirq(int hwirq)
308 {
309 struct pic_ops *pic;
310
311 if (hwirq >= max_base)
312 panic("invalid irq %d", hwirq);
313
314 if ((pic = find_pic_by_hwirq(hwirq)) == NULL)
315 panic("%s: cannot find PIC for HWIRQ %d", __func__, hwirq);
316
317 if (virq_map[hwirq])
318 return virq_map[hwirq];
319
320 if (virq_mask == 0)
321 panic("virq overflow");
322
323 const int virq = PIC_VIRQ_MS_PENDING(virq_mask);
324 struct intr_source * const is = intrsources + virq;
325
326 virq_mask &= ~PIC_VIRQ_TO_MASK(virq);
327
328 is->is_hwirq = hwirq;
329 is->is_pic = pic;
330 virq_map[hwirq] = virq;
331 #ifdef PIC_DEBUG
332 printf("mapping hwirq %d to virq %d\n", hwirq, virq);
333 #endif
334 return virq;
335 }
336
337 static const char * const intr_typenames[] = {
338 [IST_NONE] = "none",
339 [IST_PULSE] = "pulsed",
340 [IST_EDGE_FALLING] = "falling edge triggered",
341 [IST_EDGE_RISING] = "rising edge triggered",
342 [IST_LEVEL_LOW] = "low level triggered",
343 [IST_LEVEL_HIGH] = "high level triggered",
344 };
345
346 const char *
347 intr_typename(int type)
348 {
349 KASSERT((unsigned int) type < __arraycount(intr_typenames));
350 KASSERT(intr_typenames[type] != NULL);
351 return intr_typenames[type];
352 }
353
354 /*
355 * Recalculate the interrupt masks from scratch.
356 * We could code special registry and deregistry versions of this function that
357 * would be faster, but the code would be nastier, and we don't expect this to
358 * happen very much anyway.
359 */
360 static void
361 intr_calculatemasks(void)
362 {
363 imask_t newmask[NIPL];
364 struct intr_source *is;
365 struct intrhand *ih;
366 int irq;
367
368 for (u_int ipl = IPL_NONE; ipl < NIPL; ipl++) {
369 newmask[ipl] = 0;
370 }
371
372 /* First, figure out which ipl each IRQ uses. */
373 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
374 for (ih = is->is_hand; ih != NULL; ih = ih->ih_next) {
375 newmask[ih->ih_ipl] |= PIC_VIRQ_TO_MASK(irq);
376 }
377 }
378
379 /*
380 * IPL_NONE is used for hardware interrupts that are never blocked,
381 * and do not block anything else.
382 */
383 newmask[IPL_NONE] = 0;
384
385 /*
386 * strict hierarchy - all IPLs block everything blocked by any lower
387 * IPL
388 */
389 for (u_int ipl = 1; ipl < NIPL; ipl++) {
390 newmask[ipl] |= newmask[ipl - 1];
391 }
392
393 #ifdef PIC_DEBUG
394 for (u_int ipl = 0; ipl < NIPL; ipl++) {
395 printf("%u: %08x -> %08x\n", ipl, imask[ipl], newmask[ipl]);
396 }
397 #endif
398
399 /*
400 * Disable all interrupts.
401 */
402 for (u_int base = 0; base < num_pics; base++) {
403 struct pic_ops * const pic = pics[base];
404 for (u_int i = 0; i < pic->pic_numintrs; i++) {
405 pic->pic_disable_irq(pic, i);
406 }
407 }
408
409 /*
410 * Now that all interrupts are disabled, update the ipl masks.
411 */
412 for (u_int ipl = 0; ipl < NIPL; ipl++) {
413 imask[ipl] = newmask[ipl];
414 }
415
416 /*
417 * Lastly, enable IRQs actually in use.
418 */
419 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
420 if (is->is_hand)
421 pic_enable_irq(is->is_hwirq);
422 }
423 }
424
425 void
426 pic_enable_irq(int hwirq)
427 {
428 struct pic_ops * const pic = find_pic_by_hwirq(hwirq);
429 if (pic == NULL)
430 panic("%s: bogus IRQ %d", __func__, hwirq);
431 const int type = intrsources[virq_map[hwirq]].is_type;
432 (*pic->pic_enable_irq)(pic, hwirq - pic->pic_intrbase, type);
433 }
434
435 void
436 pic_mark_pending(int hwirq)
437 {
438 struct cpu_info * const ci = curcpu();
439
440 const int virq = virq_map[hwirq];
441 if (virq == 0)
442 printf("IRQ %d maps to 0\n", hwirq);
443
444 const register_t msr = mfmsr();
445 mtmsr(msr & ~PSL_EE);
446 ci->ci_ipending |= PIC_VIRQ_TO_MASK(virq);
447 mtmsr(msr);
448 }
449
450 static void
451 intr_deliver(struct intr_source *is, int virq)
452 {
453 bool locked = false;
454 for (struct intrhand *ih = is->is_hand; ih != NULL; ih = ih->ih_next) {
455 KASSERTMSG(ih->ih_fun != NULL,
456 "%s: irq %d, hwirq %d, is %p ih %p: "
457 "NULL interrupt handler!\n", __func__,
458 virq, is->is_hwirq, is, ih);
459 if (ih->ih_ipl == IPL_VM) {
460 if (!locked) {
461 KERNEL_LOCK(1, NULL);
462 locked = true;
463 }
464 } else if (locked) {
465 KERNEL_UNLOCK_ONE(NULL);
466 locked = false;
467 }
468 (*ih->ih_fun)(ih->ih_arg);
469 }
470 if (locked) {
471 KERNEL_UNLOCK_ONE(NULL);
472 }
473 is->is_ev.ev_count++;
474 }
475
476 void
477 pic_do_pending_int(void)
478 {
479 struct cpu_info * const ci = curcpu();
480 imask_t vpend;
481
482 if (ci->ci_iactive)
483 return;
484
485 ci->ci_iactive = 1;
486
487 const register_t emsr = mfmsr();
488 const register_t dmsr = emsr & ~PSL_EE;
489
490 KASSERT(emsr & PSL_EE);
491 mtmsr(dmsr);
492
493 const int pcpl = ci->ci_cpl;
494 #ifdef __HAVE_FAST_SOFTINTS
495 again:
496 #endif
497
498 /* Do now unmasked pendings */
499 while ((vpend = (ci->ci_ipending & ~imask[pcpl])) != 0) {
500 ci->ci_idepth++;
501 KASSERT((PIC_VIRQ_TO_MASK(0) & ci->ci_ipending) == 0);
502
503 /* Get most significant pending bit */
504 const int virq = PIC_VIRQ_MS_PENDING(vpend);
505 ci->ci_ipending &= ~PIC_VIRQ_TO_MASK(virq);
506
507 struct intr_source * const is = &intrsources[virq];
508 struct pic_ops * const pic = is->is_pic;
509
510 splraise(is->is_ipl);
511 mtmsr(emsr);
512 intr_deliver(is, virq);
513 mtmsr(dmsr);
514 ci->ci_cpl = pcpl; /* Don't use splx... we are here already! */
515
516 pic->pic_reenable_irq(pic, is->is_hwirq - pic->pic_intrbase,
517 is->is_type);
518 ci->ci_idepth--;
519 }
520
521 #ifdef __HAVE_FAST_SOFTINTS
522 const u_int softints = ci->ci_data.cpu_softints &
523 (IPL_SOFTMASK << pcpl);
524
525 /* make sure there are no bits to screw with the line above */
526 KASSERT((ci->ci_data.cpu_softints & ~IPL_SOFTMASK) == 0);
527
528 if (__predict_false(softints != 0)) {
529 ci->ci_cpl = IPL_HIGH;
530 mtmsr(emsr);
531 powerpc_softint(ci, pcpl,
532 (vaddr_t)__builtin_return_address(0));
533 mtmsr(dmsr);
534 ci->ci_cpl = pcpl;
535 if (__predict_false(ci->ci_ipending & ~imask[pcpl]))
536 goto again;
537 }
538 #endif
539
540 ci->ci_iactive = 0;
541 mtmsr(emsr);
542 }
543
544 int
545 pic_handle_intr(void *cookie)
546 {
547 struct pic_ops *pic = cookie;
548 struct cpu_info *ci = curcpu();
549 int picirq;
550
551 picirq = pic->pic_get_irq(pic, PIC_GET_IRQ);
552 if (picirq == 255)
553 return 0;
554
555 const register_t msr = mfmsr();
556 const int pcpl = ci->ci_cpl;
557
558 do {
559 const int virq = virq_map[picirq + pic->pic_intrbase];
560
561 KASSERT(virq != 0);
562 KASSERT(picirq < pic->pic_numintrs);
563 imask_t v_imen = PIC_VIRQ_TO_MASK(virq);
564 struct intr_source * const is = &intrsources[virq];
565
566 if ((imask[pcpl] & v_imen) != 0) {
567 ci->ci_ipending |= v_imen; /* Masked! Mark this as pending */
568 pic->pic_disable_irq(pic, picirq);
569 } else {
570 /* this interrupt is no longer pending */
571 ci->ci_ipending &= ~v_imen;
572 ci->ci_idepth++;
573
574 splraise(is->is_ipl);
575 mtmsr(msr | PSL_EE);
576 intr_deliver(is, virq);
577 mtmsr(msr);
578 ci->ci_cpl = pcpl;
579
580 ci->ci_data.cpu_nintr++;
581 ci->ci_idepth--;
582 }
583 pic->pic_ack_irq(pic, picirq);
584 } while ((picirq = pic->pic_get_irq(pic, PIC_GET_RECHECK)) != 255);
585
586 mtmsr(msr | PSL_EE);
587 splx(pcpl); /* Process pendings. */
588 mtmsr(msr);
589
590 return 0;
591 }
592
593 void
594 pic_ext_intr(void)
595 {
596
597 KASSERT(pics[primary_pic] != NULL);
598 pic_handle_intr(pics[primary_pic]);
599
600 return;
601
602 }
603
604 int
605 splraise(int ncpl)
606 {
607 struct cpu_info *ci = curcpu();
608 int ocpl;
609
610 if (ncpl == ci->ci_cpl) return ncpl;
611 __asm volatile("sync; eieio"); /* don't reorder.... */
612 ocpl = ci->ci_cpl;
613 KASSERT(ncpl < NIPL);
614 ci->ci_cpl = max(ncpl, ocpl);
615 __asm volatile("sync; eieio"); /* reorder protect */
616 __insn_barrier();
617 return ocpl;
618 }
619
620 static inline bool
621 have_pending_intr_p(struct cpu_info *ci, int ncpl)
622 {
623 if (ci->ci_ipending & ~imask[ncpl])
624 return true;
625 #ifdef __HAVE_FAST_SOFTINTS
626 if (ci->ci_data.cpu_softints & (IPL_SOFTMASK << ncpl))
627 return true;
628 #endif
629 return false;
630 }
631
632 void
633 splx(int ncpl)
634 {
635 struct cpu_info *ci = curcpu();
636
637 __insn_barrier();
638 __asm volatile("sync; eieio"); /* reorder protect */
639 ci->ci_cpl = ncpl;
640 if (have_pending_intr_p(ci, ncpl))
641 pic_do_pending_int();
642
643 __asm volatile("sync; eieio"); /* reorder protect */
644 }
645
646 int
647 spllower(int ncpl)
648 {
649 struct cpu_info *ci = curcpu();
650 int ocpl;
651
652 __insn_barrier();
653 __asm volatile("sync; eieio"); /* reorder protect */
654 ocpl = ci->ci_cpl;
655 ci->ci_cpl = ncpl;
656 if (have_pending_intr_p(ci, ncpl))
657 pic_do_pending_int();
658 __asm volatile("sync; eieio"); /* reorder protect */
659 return ocpl;
660 }
661
662 void
663 genppc_cpu_configure(void)
664 {
665 aprint_normal("vmmask %x schedmask %x highmask %x\n",
666 (u_int)imask[IPL_VM] & 0x7fffffff,
667 (u_int)imask[IPL_SCHED] & 0x7fffffff,
668 (u_int)imask[IPL_HIGH] & 0x7fffffff);
669
670 spl0();
671 }
672
673 #if defined(PIC_PREPIVR) || defined(PIC_I8259)
674 /*
675 * isa_intr_alloc needs to be done here, because it needs direct access to
676 * the various interrupt handler structures.
677 */
678
679 int
680 genppc_isa_intr_alloc(isa_chipset_tag_t ic, struct pic_ops *pic,
681 int mask, int type, int *irq_p)
682 {
683 int irq, vi;
684 int maybe_irq = -1;
685 int shared_depth = 0;
686 struct intr_source *is;
687
688 if (pic == NULL)
689 return 1;
690
691 for (irq = 0; (mask != 0 && irq < pic->pic_numintrs);
692 mask >>= 1, irq++) {
693 if ((mask & 1) == 0)
694 continue;
695 vi = virq_map[irq + pic->pic_intrbase];
696 if (!vi) {
697 *irq_p = irq;
698 return 0;
699 }
700 is = &intrsources[vi];
701 if (is->is_type == IST_NONE) {
702 *irq_p = irq;
703 return 0;
704 }
705 /* Level interrupts can be shared */
706 if (type == IST_LEVEL && is->is_type == IST_LEVEL) {
707 struct intrhand *ih = is->is_hand;
708 int depth;
709
710 if (maybe_irq == -1) {
711 maybe_irq = irq;
712 continue;
713 }
714 for (depth = 0; ih != NULL; ih = ih->ih_next)
715 depth++;
716 if (depth < shared_depth) {
717 maybe_irq = irq;
718 shared_depth = depth;
719 }
720 }
721 }
722 if (maybe_irq != -1) {
723 *irq_p = maybe_irq;
724 return 0;
725 }
726 return 1;
727 }
728 #endif
729
730 static struct intr_source *
731 intr_get_source(const char *intrid)
732 {
733 struct intr_source *is;
734 int irq;
735
736 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
737 if (strcmp(intrid, is->is_source) == 0)
738 return is;
739 }
740 return NULL;
741 }
742
743 static struct intrhand *
744 intr_get_handler(const char *intrid)
745 {
746 struct intr_source *is;
747
748 is = intr_get_source(intrid);
749 if (is != NULL)
750 return is->is_hand;
751 return NULL;
752 }
753
754 uint64_t
755 interrupt_get_count(const char *intrid, u_int cpu_idx)
756 {
757 struct intr_source *is;
758
759 /* XXX interrupt is always generated by CPU 0 */
760 if (cpu_idx != 0)
761 return 0;
762
763 is = intr_get_source(intrid);
764 if (is != NULL)
765 return is->is_ev.ev_count;
766 return 0;
767 }
768
769 void
770 interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset)
771 {
772 struct intr_source *is;
773
774 kcpuset_zero(cpuset);
775
776 is = intr_get_source(intrid);
777 if (is != NULL)
778 kcpuset_set(cpuset, 0); /* XXX */
779 }
780
781 void
782 interrupt_get_available(kcpuset_t *cpuset)
783 {
784 CPU_INFO_ITERATOR cii;
785 struct cpu_info *ci;
786
787 kcpuset_zero(cpuset);
788
789 mutex_enter(&cpu_lock);
790 for (CPU_INFO_FOREACH(cii, ci)) {
791 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0)
792 kcpuset_set(cpuset, cpu_index(ci));
793 }
794 mutex_exit(&cpu_lock);
795 }
796
797 void
798 interrupt_get_devname(const char *intrid, char *buf, size_t len)
799 {
800 struct intrhand *ih;
801
802 if (len == 0)
803 return;
804
805 buf[0] = '\0';
806
807 for (ih = intr_get_handler(intrid); ih != NULL; ih = ih->ih_next) {
808 if (buf[0] != '\0')
809 strlcat(buf, ", ", len);
810 strlcat(buf, ih->ih_xname, len);
811 }
812 }
813
814 struct intrids_handler *
815 interrupt_construct_intrids(const kcpuset_t *cpuset)
816 {
817 struct intr_source *is;
818 struct intrids_handler *ii_handler;
819 intrid_t *ids;
820 int i, irq, count;
821
822 if (kcpuset_iszero(cpuset))
823 return NULL;
824 if (!kcpuset_isset(cpuset, 0)) /* XXX */
825 return NULL;
826
827 count = 0;
828 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
829 if (is->is_hand != NULL)
830 count++;
831 }
832
833 ii_handler = kmem_zalloc(sizeof(int) + sizeof(intrid_t) * count,
834 KM_SLEEP);
835 if (ii_handler == NULL)
836 return NULL;
837 ii_handler->iih_nids = count;
838 if (count == 0)
839 return ii_handler;
840
841 ids = ii_handler->iih_intrids;
842 i = 0;
843 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
844 /* Ignore devices attached after counting "count". */
845 if (i >= count)
846 break;
847
848 if (is->is_hand == NULL)
849 continue;
850
851 strncpy(ids[i], is->is_source, sizeof(intrid_t));
852 i++;
853 }
854
855 return ii_handler;
856 }
857
858 void
859 interrupt_destruct_intrids(struct intrids_handler *ii_handler)
860 {
861 size_t iih_size;
862
863 if (ii_handler == NULL)
864 return;
865
866 iih_size = sizeof(int) + sizeof(intrid_t) * ii_handler->iih_nids;
867 kmem_free(ii_handler, iih_size);
868 }
869
870 int
871 interrupt_distribute(void *ich, const kcpuset_t *newset, kcpuset_t *oldset)
872 {
873 return EOPNOTSUPP;
874 }
875
876 int
877 interrupt_distribute_handler(const char *intrid, const kcpuset_t *newset,
878 kcpuset_t *oldset)
879 {
880 return EOPNOTSUPP;
881 }
882