intr.c revision 1.29 1 /* $NetBSD: intr.c,v 1.29 2020/07/06 10:31:23 rin Exp $ */
2
3 /*-
4 * Copyright (c) 2007 Michael Lorenz
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #define __INTR_PRIVATE
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.29 2020/07/06 10:31:23 rin Exp $");
33
34 #ifdef _KERNEL_OPT
35 #include "opt_interrupt.h"
36 #include "opt_multiprocessor.h"
37 #include "opt_pic.h"
38 #include "opt_ppcarch.h"
39 #endif
40
41 #include <sys/param.h>
42 #include <sys/cpu.h>
43 #include <sys/kernel.h>
44 #include <sys/kmem.h>
45 #include <sys/interrupt.h>
46
47 #include <powerpc/psl.h>
48 #include <powerpc/pic/picvar.h>
49
50 #if defined(PIC_I8259) || defined (PIC_PREPIVR)
51 #include <machine/isa_machdep.h>
52 #endif
53
54 #ifdef MULTIPROCESSOR
55 #include <powerpc/pic/ipivar.h>
56 #endif
57
58 #ifdef __HAVE_FAST_SOFTINTS
59 #include <powerpc/softint.h>
60 #endif
61
62 #define MAX_PICS 8 /* 8 PICs ought to be enough for everyone */
63
64 #define PIC_VIRQ_LEGAL_P(x) ((u_int)(x) < NVIRQ)
65
66 #if defined(PPC_IBM4XX) && !defined(PPC_IBM440)
67 /* eieio is implemented as sync */
68 #define REORDER_PROTECT() __asm volatile("sync")
69 #else
70 #define REORDER_PROTECT() __asm volatile("sync; eieio")
71 #endif
72
73 struct pic_ops *pics[MAX_PICS];
74 int num_pics = 0;
75 int max_base = 0;
76 uint8_t virq_map[NIRQ];
77 imask_t virq_mask = HWIRQ_MASK;
78 imask_t imask[NIPL];
79 int primary_pic = 0;
80
81 static int fakeintr(void *);
82 static int mapirq(int);
83 static void intr_calculatemasks(void);
84 static struct pic_ops *find_pic_by_hwirq(int);
85
86 static struct intr_source intrsources[NVIRQ];
87
88 void
89 pic_init(void)
90 {
91 /* everything is in bss, no reason to zero it. */
92 }
93
94 int
95 pic_add(struct pic_ops *pic)
96 {
97
98 if (num_pics >= MAX_PICS)
99 return -1;
100
101 pics[num_pics] = pic;
102 pic->pic_intrbase = max_base;
103 max_base += pic->pic_numintrs;
104 num_pics++;
105
106 return pic->pic_intrbase;
107 }
108
109 void
110 pic_finish_setup(void)
111 {
112 for (size_t i = 0; i < num_pics; i++) {
113 struct pic_ops * const pic = pics[i];
114 if (pic->pic_finish_setup != NULL)
115 pic->pic_finish_setup(pic);
116 }
117 }
118
119 static struct pic_ops *
120 find_pic_by_hwirq(int hwirq)
121 {
122 for (u_int base = 0; base < num_pics; base++) {
123 struct pic_ops * const pic = pics[base];
124 if (pic->pic_intrbase <= hwirq
125 && hwirq < pic->pic_intrbase + pic->pic_numintrs) {
126 return pic;
127 }
128 }
129 return NULL;
130 }
131
132 static int
133 fakeintr(void *arg)
134 {
135
136 return 0;
137 }
138
139 /*
140 * Register an interrupt handler.
141 */
142 void *
143 intr_establish(int hwirq, int type, int ipl, int (*ih_fun)(void *),
144 void *ih_arg)
145 {
146 return intr_establish_xname(hwirq, type, ipl, ih_fun, ih_arg, NULL);
147 }
148
149 void *
150 intr_establish_xname(int hwirq, int type, int ipl, int (*ih_fun)(void *),
151 void *ih_arg, const char *xname)
152 {
153 struct intrhand **p, *q, *ih;
154 struct pic_ops *pic;
155 static struct intrhand fakehand;
156 int maxipl = ipl;
157
158 if (maxipl == IPL_NONE)
159 maxipl = IPL_HIGH;
160
161 if (hwirq >= max_base) {
162 panic("%s: bogus IRQ %d, max is %d", __func__, hwirq,
163 max_base - 1);
164 }
165
166 pic = find_pic_by_hwirq(hwirq);
167 if (pic == NULL) {
168 panic("%s: cannot find a pic for IRQ %d", __func__, hwirq);
169 }
170
171 const int virq = mapirq(hwirq);
172
173 /* no point in sleeping unless someone can free memory. */
174 ih = kmem_intr_alloc(sizeof(*ih), cold ? KM_NOSLEEP : KM_SLEEP);
175 if (ih == NULL)
176 panic("intr_establish: can't allocate handler info");
177
178 if (!PIC_VIRQ_LEGAL_P(virq) || type == IST_NONE)
179 panic("intr_establish: bogus irq (%d) or type (%d)",
180 hwirq, type);
181
182 struct intr_source * const is = &intrsources[virq];
183
184 switch (is->is_type) {
185 case IST_NONE:
186 is->is_type = type;
187 break;
188 case IST_EDGE_FALLING:
189 case IST_EDGE_RISING:
190 case IST_LEVEL_LOW:
191 case IST_LEVEL_HIGH:
192 if (type == is->is_type)
193 break;
194 /* FALLTHROUGH */
195 case IST_PULSE:
196 if (type != IST_NONE)
197 panic("intr_establish: can't share %s with %s",
198 intr_typename(is->is_type),
199 intr_typename(type));
200 break;
201 }
202 if (is->is_hand == NULL) {
203 snprintf(is->is_source, sizeof(is->is_source), "irq %d",
204 is->is_hwirq);
205 evcnt_attach_dynamic(&is->is_ev, EVCNT_TYPE_INTR, NULL,
206 pic->pic_name, is->is_source);
207 }
208
209 /*
210 * Figure out where to put the handler.
211 * This is O(N^2), but we want to preserve the order, and N is
212 * generally small.
213 */
214 for (p = &is->is_hand; (q = *p) != NULL; p = &q->ih_next) {
215 maxipl = uimax(maxipl, q->ih_ipl);
216 }
217
218 /*
219 * Actually install a fake handler momentarily, since we might be doing
220 * this with interrupts enabled and don't want the real routine called
221 * until masking is set up.
222 */
223 fakehand.ih_ipl = ipl;
224 fakehand.ih_fun = fakeintr;
225 *p = &fakehand;
226
227 /*
228 * Poke the real handler in now.
229 */
230 ih->ih_fun = ih_fun;
231 ih->ih_arg = ih_arg;
232 ih->ih_next = NULL;
233 ih->ih_ipl = ipl;
234 ih->ih_virq = virq;
235 strlcpy(ih->ih_xname, xname != NULL ? xname : "unknown",
236 sizeof(ih->ih_xname));
237 *p = ih;
238
239 if (pic->pic_establish_irq != NULL)
240 pic->pic_establish_irq(pic, hwirq - pic->pic_intrbase,
241 is->is_type, maxipl);
242
243 /*
244 * Remember the highest IPL used by this handler.
245 */
246 is->is_ipl = maxipl;
247
248 /*
249 * now that the handler is established we're actually ready to
250 * calculate the masks
251 */
252 intr_calculatemasks();
253
254 return ih;
255 }
256
257 void
258 dummy_pic_establish_intr(struct pic_ops *pic, int irq, int type, int pri)
259 {
260 }
261
262 /*
263 * Deregister an interrupt handler.
264 */
265 void
266 intr_disestablish(void *arg)
267 {
268 struct intrhand * const ih = arg;
269 const int virq = ih->ih_virq;
270 struct intr_source * const is = &intrsources[virq];
271 struct intrhand **p, **q;
272 int maxipl = IPL_NONE;
273
274 if (!PIC_VIRQ_LEGAL_P(virq))
275 panic("intr_disestablish: bogus virq %d", virq);
276
277 /*
278 * Remove the handler from the chain.
279 * This is O(n^2), too.
280 */
281 for (p = &is->is_hand, q = NULL; (*p) != NULL; p = &(*p)->ih_next) {
282 struct intrhand * const tmp_ih = *p;
283 if (tmp_ih == ih) {
284 q = p;
285 } else {
286 maxipl = uimax(maxipl, tmp_ih->ih_ipl);
287 }
288 }
289 if (q)
290 *q = ih->ih_next;
291 else
292 panic("intr_disestablish: handler not registered");
293 kmem_intr_free((void *)ih, sizeof(*ih));
294
295 /*
296 * Reset the IPL for this source now that we've removed a handler.
297 */
298 is->is_ipl = maxipl;
299
300 intr_calculatemasks();
301
302 if (is->is_hand == NULL) {
303 is->is_type = IST_NONE;
304 evcnt_detach(&is->is_ev);
305 /*
306 * Make the virutal IRQ available again.
307 */
308 virq_map[virq] = 0;
309 virq_mask |= PIC_VIRQ_TO_MASK(virq);
310 }
311 }
312
313 /*
314 * Map max_base irqs into 32 (bits).
315 */
316 static int
317 mapirq(int hwirq)
318 {
319 struct pic_ops *pic;
320
321 if (hwirq >= max_base)
322 panic("invalid irq %d", hwirq);
323
324 if ((pic = find_pic_by_hwirq(hwirq)) == NULL)
325 panic("%s: cannot find PIC for HWIRQ %d", __func__, hwirq);
326
327 if (virq_map[hwirq])
328 return virq_map[hwirq];
329
330 if (virq_mask == 0)
331 panic("virq overflow");
332
333 const int virq = PIC_VIRQ_MS_PENDING(virq_mask);
334 struct intr_source * const is = intrsources + virq;
335
336 virq_mask &= ~PIC_VIRQ_TO_MASK(virq);
337
338 is->is_hwirq = hwirq;
339 is->is_pic = pic;
340 virq_map[hwirq] = virq;
341 #ifdef PIC_DEBUG
342 printf("mapping hwirq %d to virq %d\n", hwirq, virq);
343 #endif
344 return virq;
345 }
346
347 static const char * const intr_typenames[] = {
348 [IST_NONE] = "none",
349 [IST_PULSE] = "pulsed",
350 [IST_EDGE_FALLING] = "falling edge triggered",
351 [IST_EDGE_RISING] = "rising edge triggered",
352 [IST_LEVEL_LOW] = "low level triggered",
353 [IST_LEVEL_HIGH] = "high level triggered",
354 };
355
356 const char *
357 intr_typename(int type)
358 {
359 KASSERT((unsigned int) type < __arraycount(intr_typenames));
360 KASSERT(intr_typenames[type] != NULL);
361 return intr_typenames[type];
362 }
363
364 /*
365 * Recalculate the interrupt masks from scratch.
366 * We could code special registry and deregistry versions of this function that
367 * would be faster, but the code would be nastier, and we don't expect this to
368 * happen very much anyway.
369 */
370 static void
371 intr_calculatemasks(void)
372 {
373 imask_t newmask[NIPL];
374 struct intr_source *is;
375 struct intrhand *ih;
376 int irq;
377
378 for (u_int ipl = IPL_NONE; ipl < NIPL; ipl++) {
379 newmask[ipl] = 0;
380 }
381
382 /* First, figure out which ipl each IRQ uses. */
383 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
384 for (ih = is->is_hand; ih != NULL; ih = ih->ih_next) {
385 newmask[ih->ih_ipl] |= PIC_VIRQ_TO_MASK(irq);
386 }
387 }
388
389 /*
390 * IPL_NONE is used for hardware interrupts that are never blocked,
391 * and do not block anything else.
392 */
393 newmask[IPL_NONE] = 0;
394
395 /*
396 * strict hierarchy - all IPLs block everything blocked by any lower
397 * IPL
398 */
399 for (u_int ipl = 1; ipl < NIPL; ipl++) {
400 newmask[ipl] |= newmask[ipl - 1];
401 }
402
403 #ifdef PIC_DEBUG
404 for (u_int ipl = 0; ipl < NIPL; ipl++) {
405 printf("%u: %08x -> %08x\n", ipl, imask[ipl], newmask[ipl]);
406 }
407 #endif
408
409 /*
410 * Disable all interrupts.
411 */
412 for (u_int base = 0; base < num_pics; base++) {
413 struct pic_ops * const pic = pics[base];
414 for (u_int i = 0; i < pic->pic_numintrs; i++) {
415 pic->pic_disable_irq(pic, i);
416 }
417 }
418
419 /*
420 * Now that all interrupts are disabled, update the ipl masks.
421 */
422 for (u_int ipl = 0; ipl < NIPL; ipl++) {
423 imask[ipl] = newmask[ipl];
424 }
425
426 /*
427 * Lastly, enable IRQs actually in use.
428 */
429 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
430 if (is->is_hand)
431 pic_enable_irq(is->is_hwirq);
432 }
433 }
434
435 void
436 pic_enable_irq(int hwirq)
437 {
438 struct pic_ops * const pic = find_pic_by_hwirq(hwirq);
439 if (pic == NULL)
440 panic("%s: bogus IRQ %d", __func__, hwirq);
441 const int type = intrsources[virq_map[hwirq]].is_type;
442 (*pic->pic_enable_irq)(pic, hwirq - pic->pic_intrbase, type);
443 }
444
445 void
446 pic_mark_pending(int hwirq)
447 {
448 struct cpu_info * const ci = curcpu();
449
450 const int virq = virq_map[hwirq];
451 if (virq == 0)
452 printf("IRQ %d maps to 0\n", hwirq);
453
454 const register_t msr = mfmsr();
455 mtmsr(msr & ~PSL_EE);
456 ci->ci_ipending |= PIC_VIRQ_TO_MASK(virq);
457 mtmsr(msr);
458 }
459
460 static void
461 intr_deliver(struct intr_source *is, int virq)
462 {
463 bool locked = false;
464 for (struct intrhand *ih = is->is_hand; ih != NULL; ih = ih->ih_next) {
465 KASSERTMSG(ih->ih_fun != NULL,
466 "%s: irq %d, hwirq %d, is %p ih %p: "
467 "NULL interrupt handler!\n", __func__,
468 virq, is->is_hwirq, is, ih);
469 if (ih->ih_ipl == IPL_VM) {
470 if (!locked) {
471 KERNEL_LOCK(1, NULL);
472 locked = true;
473 }
474 } else if (locked) {
475 KERNEL_UNLOCK_ONE(NULL);
476 locked = false;
477 }
478 (*ih->ih_fun)(ih->ih_arg);
479 }
480 if (locked) {
481 KERNEL_UNLOCK_ONE(NULL);
482 }
483 is->is_ev.ev_count++;
484 }
485
486 void
487 pic_do_pending_int(void)
488 {
489 struct cpu_info * const ci = curcpu();
490 imask_t vpend;
491
492 if (ci->ci_iactive)
493 return;
494
495 ci->ci_iactive = 1;
496
497 const register_t emsr = mfmsr();
498 const register_t dmsr = emsr & ~PSL_EE;
499
500 KASSERT(emsr & PSL_EE);
501 mtmsr(dmsr);
502
503 const int pcpl = ci->ci_cpl;
504 #ifdef __HAVE_FAST_SOFTINTS
505 again:
506 #endif
507
508 /* Do now unmasked pendings */
509 while ((vpend = (ci->ci_ipending & ~imask[pcpl])) != 0) {
510 ci->ci_idepth++;
511 KASSERT((PIC_VIRQ_TO_MASK(0) & ci->ci_ipending) == 0);
512
513 /* Get most significant pending bit */
514 const int virq = PIC_VIRQ_MS_PENDING(vpend);
515 ci->ci_ipending &= ~PIC_VIRQ_TO_MASK(virq);
516
517 struct intr_source * const is = &intrsources[virq];
518 struct pic_ops * const pic = is->is_pic;
519
520 splraise(is->is_ipl);
521 mtmsr(emsr);
522 intr_deliver(is, virq);
523 mtmsr(dmsr);
524 ci->ci_cpl = pcpl; /* Don't use splx... we are here already! */
525
526 pic->pic_reenable_irq(pic, is->is_hwirq - pic->pic_intrbase,
527 is->is_type);
528 ci->ci_idepth--;
529 }
530
531 #ifdef __HAVE_FAST_SOFTINTS
532 const u_int softints = ci->ci_data.cpu_softints &
533 (IPL_SOFTMASK << pcpl);
534
535 /* make sure there are no bits to screw with the line above */
536 KASSERT((ci->ci_data.cpu_softints & ~IPL_SOFTMASK) == 0);
537
538 if (__predict_false(softints != 0)) {
539 ci->ci_cpl = IPL_HIGH;
540 mtmsr(emsr);
541 powerpc_softint(ci, pcpl,
542 (vaddr_t)__builtin_return_address(0));
543 mtmsr(dmsr);
544 ci->ci_cpl = pcpl;
545 if (__predict_false(ci->ci_ipending & ~imask[pcpl]))
546 goto again;
547 }
548 #endif
549
550 ci->ci_iactive = 0;
551 mtmsr(emsr);
552 }
553
554 int
555 pic_handle_intr(void *cookie)
556 {
557 struct pic_ops *pic = cookie;
558 struct cpu_info *ci = curcpu();
559 int picirq;
560
561 picirq = pic->pic_get_irq(pic, PIC_GET_IRQ);
562 if (picirq == 255)
563 return 0;
564
565 const register_t msr = mfmsr();
566 const int pcpl = ci->ci_cpl;
567
568 do {
569 const int virq = virq_map[picirq + pic->pic_intrbase];
570
571 KASSERT(virq != 0);
572 KASSERT(picirq < pic->pic_numintrs);
573 imask_t v_imen = PIC_VIRQ_TO_MASK(virq);
574 struct intr_source * const is = &intrsources[virq];
575
576 if ((imask[pcpl] & v_imen) != 0) {
577 ci->ci_ipending |= v_imen; /* Masked! Mark this as pending */
578 pic->pic_disable_irq(pic, picirq);
579 } else {
580 /* this interrupt is no longer pending */
581 ci->ci_ipending &= ~v_imen;
582 ci->ci_idepth++;
583
584 splraise(is->is_ipl);
585 mtmsr(msr | PSL_EE);
586 intr_deliver(is, virq);
587 mtmsr(msr);
588 ci->ci_cpl = pcpl;
589
590 ci->ci_data.cpu_nintr++;
591 ci->ci_idepth--;
592 }
593 pic->pic_ack_irq(pic, picirq);
594 } while ((picirq = pic->pic_get_irq(pic, PIC_GET_RECHECK)) != 255);
595
596 mtmsr(msr | PSL_EE);
597 splx(pcpl); /* Process pendings. */
598 mtmsr(msr);
599
600 return 0;
601 }
602
603 void
604 pic_ext_intr(void)
605 {
606
607 KASSERT(pics[primary_pic] != NULL);
608 pic_handle_intr(pics[primary_pic]);
609
610 return;
611
612 }
613
614 int
615 splraise(int ncpl)
616 {
617 struct cpu_info *ci = curcpu();
618 int ocpl;
619
620 if (ncpl == ci->ci_cpl) return ncpl;
621 REORDER_PROTECT();
622 ocpl = ci->ci_cpl;
623 KASSERT(ncpl < NIPL);
624 ci->ci_cpl = uimax(ncpl, ocpl);
625 REORDER_PROTECT();
626 __insn_barrier();
627 return ocpl;
628 }
629
630 static inline bool
631 have_pending_intr_p(struct cpu_info *ci, int ncpl)
632 {
633 if (ci->ci_ipending & ~imask[ncpl])
634 return true;
635 #ifdef __HAVE_FAST_SOFTINTS
636 if (ci->ci_data.cpu_softints & (IPL_SOFTMASK << ncpl))
637 return true;
638 #endif
639 return false;
640 }
641
642 void
643 splx(int ncpl)
644 {
645 struct cpu_info *ci = curcpu();
646
647 __insn_barrier();
648 REORDER_PROTECT();
649 ci->ci_cpl = ncpl;
650 if (have_pending_intr_p(ci, ncpl))
651 pic_do_pending_int();
652
653 REORDER_PROTECT();
654 }
655
656 int
657 spllower(int ncpl)
658 {
659 struct cpu_info *ci = curcpu();
660 int ocpl;
661
662 __insn_barrier();
663 REORDER_PROTECT();
664 ocpl = ci->ci_cpl;
665 ci->ci_cpl = ncpl;
666 if (have_pending_intr_p(ci, ncpl))
667 pic_do_pending_int();
668 REORDER_PROTECT();
669 return ocpl;
670 }
671
672 void
673 genppc_cpu_configure(void)
674 {
675 aprint_normal("vmmask %x schedmask %x highmask %x\n",
676 (u_int)imask[IPL_VM] & 0x7fffffff,
677 (u_int)imask[IPL_SCHED] & 0x7fffffff,
678 (u_int)imask[IPL_HIGH] & 0x7fffffff);
679
680 spl0();
681 }
682
683 #if defined(PIC_PREPIVR) || defined(PIC_I8259)
684 /*
685 * isa_intr_alloc needs to be done here, because it needs direct access to
686 * the various interrupt handler structures.
687 */
688
689 int
690 genppc_isa_intr_alloc(isa_chipset_tag_t ic, struct pic_ops *pic,
691 int mask, int type, int *irq_p)
692 {
693 int irq, vi;
694 int maybe_irq = -1;
695 int shared_depth = 0;
696 struct intr_source *is;
697
698 if (pic == NULL)
699 return 1;
700
701 for (irq = 0; (mask != 0 && irq < pic->pic_numintrs);
702 mask >>= 1, irq++) {
703 if ((mask & 1) == 0)
704 continue;
705 vi = virq_map[irq + pic->pic_intrbase];
706 if (!vi) {
707 *irq_p = irq;
708 return 0;
709 }
710 is = &intrsources[vi];
711 if (is->is_type == IST_NONE) {
712 *irq_p = irq;
713 return 0;
714 }
715 /* Level interrupts can be shared */
716 if (type == IST_LEVEL && is->is_type == IST_LEVEL) {
717 struct intrhand *ih = is->is_hand;
718 int depth;
719
720 if (maybe_irq == -1) {
721 maybe_irq = irq;
722 continue;
723 }
724 for (depth = 0; ih != NULL; ih = ih->ih_next)
725 depth++;
726 if (depth < shared_depth) {
727 maybe_irq = irq;
728 shared_depth = depth;
729 }
730 }
731 }
732 if (maybe_irq != -1) {
733 *irq_p = maybe_irq;
734 return 0;
735 }
736 return 1;
737 }
738 #endif
739
740 static struct intr_source *
741 intr_get_source(const char *intrid)
742 {
743 struct intr_source *is;
744 int irq;
745
746 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
747 if (strcmp(intrid, is->is_source) == 0)
748 return is;
749 }
750 return NULL;
751 }
752
753 static struct intrhand *
754 intr_get_handler(const char *intrid)
755 {
756 struct intr_source *is;
757
758 is = intr_get_source(intrid);
759 if (is != NULL)
760 return is->is_hand;
761 return NULL;
762 }
763
764 uint64_t
765 interrupt_get_count(const char *intrid, u_int cpu_idx)
766 {
767 struct intr_source *is;
768
769 /* XXX interrupt is always generated by CPU 0 */
770 if (cpu_idx != 0)
771 return 0;
772
773 is = intr_get_source(intrid);
774 if (is != NULL)
775 return is->is_ev.ev_count;
776 return 0;
777 }
778
779 void
780 interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset)
781 {
782 struct intr_source *is;
783
784 kcpuset_zero(cpuset);
785
786 is = intr_get_source(intrid);
787 if (is != NULL)
788 kcpuset_set(cpuset, 0); /* XXX */
789 }
790
791 void
792 interrupt_get_available(kcpuset_t *cpuset)
793 {
794 CPU_INFO_ITERATOR cii;
795 struct cpu_info *ci;
796
797 kcpuset_zero(cpuset);
798
799 mutex_enter(&cpu_lock);
800 for (CPU_INFO_FOREACH(cii, ci)) {
801 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0)
802 kcpuset_set(cpuset, cpu_index(ci));
803 }
804 mutex_exit(&cpu_lock);
805 }
806
807 void
808 interrupt_get_devname(const char *intrid, char *buf, size_t len)
809 {
810 struct intrhand *ih;
811
812 if (len == 0)
813 return;
814
815 buf[0] = '\0';
816
817 for (ih = intr_get_handler(intrid); ih != NULL; ih = ih->ih_next) {
818 if (buf[0] != '\0')
819 strlcat(buf, ", ", len);
820 strlcat(buf, ih->ih_xname, len);
821 }
822 }
823
824 struct intrids_handler *
825 interrupt_construct_intrids(const kcpuset_t *cpuset)
826 {
827 struct intr_source *is;
828 struct intrids_handler *ii_handler;
829 intrid_t *ids;
830 int i, irq, count;
831
832 if (kcpuset_iszero(cpuset))
833 return NULL;
834 if (!kcpuset_isset(cpuset, 0)) /* XXX */
835 return NULL;
836
837 count = 0;
838 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
839 if (is->is_hand != NULL)
840 count++;
841 }
842
843 ii_handler = kmem_zalloc(sizeof(int) + sizeof(intrid_t) * count,
844 KM_SLEEP);
845 if (ii_handler == NULL)
846 return NULL;
847 ii_handler->iih_nids = count;
848 if (count == 0)
849 return ii_handler;
850
851 ids = ii_handler->iih_intrids;
852 i = 0;
853 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
854 /* Ignore devices attached after counting "count". */
855 if (i >= count)
856 break;
857
858 if (is->is_hand == NULL)
859 continue;
860
861 strncpy(ids[i], is->is_source, sizeof(intrid_t));
862 i++;
863 }
864
865 return ii_handler;
866 }
867
868 void
869 interrupt_destruct_intrids(struct intrids_handler *ii_handler)
870 {
871 size_t iih_size;
872
873 if (ii_handler == NULL)
874 return;
875
876 iih_size = sizeof(int) + sizeof(intrid_t) * ii_handler->iih_nids;
877 kmem_free(ii_handler, iih_size);
878 }
879
880 int
881 interrupt_distribute(void *ich, const kcpuset_t *newset, kcpuset_t *oldset)
882 {
883 return EOPNOTSUPP;
884 }
885
886 int
887 interrupt_distribute_handler(const char *intrid, const kcpuset_t *newset,
888 kcpuset_t *oldset)
889 {
890 return EOPNOTSUPP;
891 }
892
893 #undef REORDER_PROTECT
894