Home | History | Annotate | Line # | Download | only in pic
      1  1.38  macallan /*	$NetBSD: intr.c,v 1.38 2025/07/05 15:11:05 macallan Exp $ */
      2   1.2   garbled 
      3   1.2   garbled /*-
      4   1.2   garbled  * Copyright (c) 2007 Michael Lorenz
      5   1.2   garbled  * All rights reserved.
      6   1.2   garbled  *
      7   1.2   garbled  * Redistribution and use in source and binary forms, with or without
      8   1.2   garbled  * modification, are permitted provided that the following conditions
      9   1.2   garbled  * are met:
     10   1.2   garbled  * 1. Redistributions of source code must retain the above copyright
     11   1.2   garbled  *    notice, this list of conditions and the following disclaimer.
     12   1.2   garbled  * 2. Redistributions in binary form must reproduce the above copyright
     13   1.2   garbled  *    notice, this list of conditions and the following disclaimer in the
     14   1.2   garbled  *    documentation and/or other materials provided with the distribution.
     15   1.2   garbled  *
     16   1.2   garbled  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     17   1.2   garbled  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     18   1.2   garbled  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     19   1.2   garbled  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     20   1.2   garbled  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     21   1.2   garbled  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     22   1.2   garbled  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     23   1.2   garbled  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     24   1.2   garbled  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     25   1.2   garbled  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     26   1.2   garbled  * POSSIBILITY OF SUCH DAMAGE.
     27   1.2   garbled  */
     28   1.2   garbled 
     29  1.28       rin #define __INTR_PRIVATE
     30  1.28       rin 
     31   1.2   garbled #include <sys/cdefs.h>
     32  1.38  macallan __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.38 2025/07/05 15:11:05 macallan Exp $");
     33   1.2   garbled 
     34  1.28       rin #ifdef _KERNEL_OPT
     35  1.16      matt #include "opt_interrupt.h"
     36   1.2   garbled #include "opt_multiprocessor.h"
     37  1.16      matt #include "opt_pic.h"
     38  1.29       rin #include "opt_ppcarch.h"
     39  1.28       rin #endif
     40  1.12  macallan 
     41   1.2   garbled #include <sys/param.h>
     42  1.16      matt #include <sys/cpu.h>
     43  1.16      matt #include <sys/kernel.h>
     44  1.20      matt #include <sys/kmem.h>
     45  1.25    nonaka #include <sys/interrupt.h>
     46   1.2   garbled 
     47  1.16      matt #include <powerpc/psl.h>
     48  1.16      matt #include <powerpc/pic/picvar.h>
     49  1.16      matt 
     50   1.2   garbled #if defined(PIC_I8259) || defined (PIC_PREPIVR)
     51   1.2   garbled #include <machine/isa_machdep.h>
     52   1.2   garbled #endif
     53   1.2   garbled 
     54   1.2   garbled #ifdef MULTIPROCESSOR
     55  1.16      matt #include <powerpc/pic/ipivar.h>
     56   1.2   garbled #endif
     57   1.2   garbled 
     58  1.12  macallan #ifdef __HAVE_FAST_SOFTINTS
     59  1.12  macallan #include <powerpc/softint.h>
     60  1.12  macallan #endif
     61  1.12  macallan 
     62   1.2   garbled #define MAX_PICS	8	/* 8 PICs ought to be enough for everyone */
     63   1.2   garbled 
     64  1.15      matt #define	PIC_VIRQ_LEGAL_P(x)	((u_int)(x) < NVIRQ)
     65   1.2   garbled 
     66  1.27       rin #if defined(PPC_IBM4XX) && !defined(PPC_IBM440)
     67  1.27       rin /* eieio is implemented as sync */
     68  1.34  riastrad #define REORDER_PROTECT() __asm volatile("sync" ::: "memory")
     69  1.27       rin #else
     70  1.34  riastrad #define REORDER_PROTECT() __asm volatile("sync; eieio" ::: "memory")
     71  1.27       rin #endif
     72  1.27       rin 
     73   1.2   garbled struct pic_ops *pics[MAX_PICS];
     74   1.2   garbled int num_pics = 0;
     75   1.2   garbled int max_base = 0;
     76  1.15      matt uint8_t	virq_map[NIRQ];
     77  1.15      matt imask_t virq_mask = HWIRQ_MASK;
     78  1.30       rin static imask_t imask[NIPL];
     79   1.2   garbled int	primary_pic = 0;
     80   1.2   garbled 
     81   1.2   garbled static int	fakeintr(void *);
     82  1.15      matt static int	mapirq(int);
     83   1.2   garbled static void	intr_calculatemasks(void);
     84  1.15      matt static struct pic_ops *find_pic_by_hwirq(int);
     85   1.2   garbled 
     86   1.2   garbled static struct intr_source intrsources[NVIRQ];
     87   1.2   garbled 
     88   1.2   garbled void
     89   1.2   garbled pic_init(void)
     90   1.2   garbled {
     91  1.15      matt 	/* everything is in bss, no reason to zero it. */
     92   1.2   garbled }
     93   1.2   garbled 
     94  1.38  macallan struct pic_ops *
     95  1.38  macallan find_pic_by_cookie(void *c)
     96  1.38  macallan {
     97  1.38  macallan 	int i = 0;
     98  1.38  macallan 	while (i < num_pics) {
     99  1.38  macallan 		if (pics[i]->pic_cookie == c)
    100  1.38  macallan 			return pics[i];
    101  1.38  macallan 		i++;
    102  1.38  macallan 	}
    103  1.38  macallan 	return NULL;
    104  1.38  macallan }
    105  1.38  macallan 
    106   1.2   garbled int
    107   1.2   garbled pic_add(struct pic_ops *pic)
    108   1.2   garbled {
    109   1.2   garbled 
    110   1.2   garbled 	if (num_pics >= MAX_PICS)
    111   1.2   garbled 		return -1;
    112   1.2   garbled 
    113   1.2   garbled 	pics[num_pics] = pic;
    114   1.2   garbled 	pic->pic_intrbase = max_base;
    115   1.2   garbled 	max_base += pic->pic_numintrs;
    116   1.2   garbled 	num_pics++;
    117   1.7  kiyohara 
    118   1.2   garbled 	return pic->pic_intrbase;
    119   1.2   garbled }
    120   1.2   garbled 
    121   1.2   garbled void
    122   1.2   garbled pic_finish_setup(void)
    123   1.2   garbled {
    124  1.15      matt 	for (size_t i = 0; i < num_pics; i++) {
    125  1.15      matt 		struct pic_ops * const pic = pics[i];
    126   1.2   garbled 		if (pic->pic_finish_setup != NULL)
    127   1.2   garbled 			pic->pic_finish_setup(pic);
    128   1.2   garbled 	}
    129   1.2   garbled }
    130   1.2   garbled 
    131   1.2   garbled static struct pic_ops *
    132  1.15      matt find_pic_by_hwirq(int hwirq)
    133   1.2   garbled {
    134  1.14      matt 	for (u_int base = 0; base < num_pics; base++) {
    135  1.14      matt 		struct pic_ops * const pic = pics[base];
    136  1.15      matt 		if (pic->pic_intrbase <= hwirq
    137  1.15      matt 		    && hwirq < pic->pic_intrbase + pic->pic_numintrs) {
    138  1.14      matt 			return pic;
    139   1.2   garbled 		}
    140   1.2   garbled 	}
    141   1.2   garbled 	return NULL;
    142   1.2   garbled }
    143   1.2   garbled 
    144   1.2   garbled static int
    145   1.2   garbled fakeintr(void *arg)
    146   1.2   garbled {
    147   1.2   garbled 
    148   1.2   garbled 	return 0;
    149   1.2   garbled }
    150   1.2   garbled 
    151   1.2   garbled /*
    152   1.2   garbled  * Register an interrupt handler.
    153   1.2   garbled  */
    154   1.2   garbled void *
    155  1.14      matt intr_establish(int hwirq, int type, int ipl, int (*ih_fun)(void *),
    156   1.2   garbled     void *ih_arg)
    157   1.2   garbled {
    158  1.25    nonaka 	return intr_establish_xname(hwirq, type, ipl, ih_fun, ih_arg, NULL);
    159  1.25    nonaka }
    160  1.25    nonaka 
    161  1.25    nonaka void *
    162  1.25    nonaka intr_establish_xname(int hwirq, int type, int ipl, int (*ih_fun)(void *),
    163  1.25    nonaka     void *ih_arg, const char *xname)
    164  1.25    nonaka {
    165   1.2   garbled 	struct intrhand **p, *q, *ih;
    166   1.2   garbled 	struct pic_ops *pic;
    167   1.2   garbled 	static struct intrhand fakehand;
    168  1.15      matt 	int maxipl = ipl;
    169   1.2   garbled 
    170  1.14      matt 	if (maxipl == IPL_NONE)
    171  1.14      matt 		maxipl = IPL_HIGH;
    172   1.2   garbled 
    173   1.2   garbled 	if (hwirq >= max_base) {
    174   1.2   garbled 		panic("%s: bogus IRQ %d, max is %d", __func__, hwirq,
    175   1.2   garbled 		    max_base - 1);
    176   1.2   garbled 	}
    177   1.2   garbled 
    178  1.15      matt 	pic = find_pic_by_hwirq(hwirq);
    179   1.2   garbled 	if (pic == NULL) {
    180   1.2   garbled 		panic("%s: cannot find a pic for IRQ %d", __func__, hwirq);
    181   1.2   garbled 	}
    182   1.2   garbled 
    183  1.15      matt 	const int virq = mapirq(hwirq);
    184   1.2   garbled 
    185   1.2   garbled 	/* no point in sleeping unless someone can free memory. */
    186  1.20      matt 	ih = kmem_intr_alloc(sizeof(*ih), cold ? KM_NOSLEEP : KM_SLEEP);
    187   1.2   garbled 	if (ih == NULL)
    188  1.20      matt 		panic("intr_establish: can't allocate handler info");
    189   1.2   garbled 
    190  1.15      matt 	if (!PIC_VIRQ_LEGAL_P(virq) || type == IST_NONE)
    191  1.15      matt 		panic("intr_establish: bogus irq (%d) or type (%d)",
    192  1.15      matt 		    hwirq, type);
    193   1.2   garbled 
    194  1.15      matt 	struct intr_source * const is = &intrsources[virq];
    195  1.37  jmcneill 	const bool cascaded = ih_fun == pic_handle_intr;
    196   1.2   garbled 
    197   1.2   garbled 	switch (is->is_type) {
    198   1.2   garbled 	case IST_NONE:
    199   1.2   garbled 		is->is_type = type;
    200  1.37  jmcneill 		is->is_cascaded = cascaded;
    201   1.2   garbled 		break;
    202  1.19       phx 	case IST_EDGE_FALLING:
    203  1.19       phx 	case IST_EDGE_RISING:
    204  1.19       phx 	case IST_LEVEL_LOW:
    205  1.19       phx 	case IST_LEVEL_HIGH:
    206   1.2   garbled 		if (type == is->is_type)
    207   1.2   garbled 			break;
    208  1.15      matt 		/* FALLTHROUGH */
    209   1.2   garbled 	case IST_PULSE:
    210  1.37  jmcneill 		if (type != IST_NONE) {
    211   1.2   garbled 			panic("intr_establish: can't share %s with %s",
    212   1.2   garbled 			    intr_typename(is->is_type),
    213   1.2   garbled 			    intr_typename(type));
    214  1.37  jmcneill 		}
    215  1.37  jmcneill 		if (cascaded != is->is_cascaded) {
    216  1.37  jmcneill 			panic("intr_establish: can't share cascaded with "
    217  1.37  jmcneill 			    "non-cascaded interrupt");
    218  1.37  jmcneill 		}
    219   1.2   garbled 		break;
    220   1.2   garbled 	}
    221   1.2   garbled 	if (is->is_hand == NULL) {
    222  1.32       rin 		snprintf(is->is_intrid, sizeof(is->is_intrid), "%s irq %d",
    223  1.31       rin 		    pic->pic_name, is->is_hwirq);
    224  1.32       rin 		snprintf(is->is_evname, sizeof(is->is_evname), "irq %d",
    225  1.32       rin 		    is->is_hwirq);
    226   1.2   garbled 		evcnt_attach_dynamic(&is->is_ev, EVCNT_TYPE_INTR, NULL,
    227  1.32       rin 		    pic->pic_name, is->is_evname);
    228   1.2   garbled 	}
    229   1.2   garbled 
    230   1.2   garbled 	/*
    231   1.2   garbled 	 * Figure out where to put the handler.
    232   1.2   garbled 	 * This is O(N^2), but we want to preserve the order, and N is
    233   1.2   garbled 	 * generally small.
    234   1.2   garbled 	 */
    235   1.2   garbled 	for (p = &is->is_hand; (q = *p) != NULL; p = &q->ih_next) {
    236  1.26  riastrad 		maxipl = uimax(maxipl, q->ih_ipl);
    237   1.2   garbled 	}
    238   1.2   garbled 
    239   1.2   garbled 	/*
    240   1.2   garbled 	 * Actually install a fake handler momentarily, since we might be doing
    241   1.2   garbled 	 * this with interrupts enabled and don't want the real routine called
    242   1.2   garbled 	 * until masking is set up.
    243   1.2   garbled 	 */
    244  1.14      matt 	fakehand.ih_ipl = ipl;
    245   1.2   garbled 	fakehand.ih_fun = fakeintr;
    246   1.2   garbled 	*p = &fakehand;
    247   1.2   garbled 
    248   1.2   garbled 	/*
    249   1.2   garbled 	 * Poke the real handler in now.
    250   1.2   garbled 	 */
    251   1.2   garbled 	ih->ih_fun = ih_fun;
    252   1.2   garbled 	ih->ih_arg = ih_arg;
    253   1.2   garbled 	ih->ih_next = NULL;
    254  1.14      matt 	ih->ih_ipl = ipl;
    255  1.15      matt 	ih->ih_virq = virq;
    256  1.25    nonaka 	strlcpy(ih->ih_xname, xname != NULL ? xname : "unknown",
    257  1.25    nonaka 	    sizeof(ih->ih_xname));
    258   1.2   garbled 	*p = ih;
    259   1.2   garbled 
    260   1.2   garbled 	if (pic->pic_establish_irq != NULL)
    261   1.2   garbled 		pic->pic_establish_irq(pic, hwirq - pic->pic_intrbase,
    262  1.14      matt 		    is->is_type, maxipl);
    263  1.14      matt 
    264  1.14      matt 	/*
    265  1.14      matt 	 * Remember the highest IPL used by this handler.
    266  1.14      matt 	 */
    267  1.14      matt 	is->is_ipl = maxipl;
    268   1.2   garbled 
    269   1.2   garbled 	/*
    270   1.2   garbled 	 * now that the handler is established we're actually ready to
    271   1.2   garbled 	 * calculate the masks
    272   1.2   garbled 	 */
    273   1.2   garbled 	intr_calculatemasks();
    274   1.2   garbled 
    275   1.2   garbled 	return ih;
    276   1.2   garbled }
    277   1.2   garbled 
    278   1.2   garbled void
    279   1.2   garbled dummy_pic_establish_intr(struct pic_ops *pic, int irq, int type, int pri)
    280   1.2   garbled {
    281   1.2   garbled }
    282   1.2   garbled 
    283   1.2   garbled /*
    284   1.2   garbled  * Deregister an interrupt handler.
    285   1.2   garbled  */
    286   1.2   garbled void
    287   1.2   garbled intr_disestablish(void *arg)
    288   1.2   garbled {
    289  1.14      matt 	struct intrhand * const ih = arg;
    290  1.15      matt 	const int virq = ih->ih_virq;
    291  1.15      matt 	struct intr_source * const is = &intrsources[virq];
    292  1.14      matt 	struct intrhand **p, **q;
    293  1.14      matt 	int maxipl = IPL_NONE;
    294   1.2   garbled 
    295  1.15      matt 	if (!PIC_VIRQ_LEGAL_P(virq))
    296  1.15      matt 		panic("intr_disestablish: bogus virq %d", virq);
    297   1.2   garbled 
    298   1.2   garbled 	/*
    299   1.2   garbled 	 * Remove the handler from the chain.
    300   1.2   garbled 	 * This is O(n^2), too.
    301   1.2   garbled 	 */
    302  1.14      matt 	for (p = &is->is_hand, q = NULL; (*p) != NULL; p = &(*p)->ih_next) {
    303  1.14      matt 		struct intrhand * const tmp_ih = *p;
    304  1.14      matt 		if (tmp_ih == ih) {
    305  1.14      matt 			q = p;
    306  1.14      matt 		} else {
    307  1.26  riastrad 			maxipl = uimax(maxipl, tmp_ih->ih_ipl);
    308  1.14      matt 		}
    309  1.14      matt 	}
    310   1.2   garbled 	if (q)
    311  1.14      matt 		*q = ih->ih_next;
    312   1.2   garbled 	else
    313   1.2   garbled 		panic("intr_disestablish: handler not registered");
    314  1.20      matt 	kmem_intr_free((void *)ih, sizeof(*ih));
    315   1.2   garbled 
    316  1.14      matt 	/*
    317  1.14      matt 	 * Reset the IPL for this source now that we've removed a handler.
    318  1.14      matt 	 */
    319  1.14      matt 	is->is_ipl = maxipl;
    320  1.14      matt 
    321   1.2   garbled 	intr_calculatemasks();
    322   1.2   garbled 
    323   1.2   garbled 	if (is->is_hand == NULL) {
    324   1.2   garbled 		is->is_type = IST_NONE;
    325   1.2   garbled 		evcnt_detach(&is->is_ev);
    326  1.15      matt 		/*
    327  1.15      matt 		 * Make the virutal IRQ available again.
    328  1.15      matt 		 */
    329  1.15      matt 		virq_map[virq] = 0;
    330  1.15      matt 		virq_mask |= PIC_VIRQ_TO_MASK(virq);
    331   1.2   garbled 	}
    332   1.2   garbled }
    333   1.2   garbled 
    334   1.2   garbled /*
    335   1.2   garbled  * Map max_base irqs into 32 (bits).
    336   1.2   garbled  */
    337   1.2   garbled static int
    338  1.15      matt mapirq(int hwirq)
    339   1.2   garbled {
    340   1.2   garbled 	struct pic_ops *pic;
    341   1.2   garbled 
    342  1.15      matt 	if (hwirq >= max_base)
    343  1.15      matt 		panic("invalid irq %d", hwirq);
    344   1.2   garbled 
    345  1.15      matt 	if ((pic = find_pic_by_hwirq(hwirq)) == NULL)
    346  1.15      matt 		panic("%s: cannot find PIC for HWIRQ %d", __func__, hwirq);
    347   1.2   garbled 
    348  1.15      matt 	if (virq_map[hwirq])
    349  1.15      matt 		return virq_map[hwirq];
    350   1.2   garbled 
    351  1.15      matt 	if (virq_mask == 0)
    352   1.2   garbled 		panic("virq overflow");
    353   1.2   garbled 
    354  1.15      matt 	const int virq = PIC_VIRQ_MS_PENDING(virq_mask);
    355  1.15      matt 	struct intr_source * const is = intrsources + virq;
    356  1.15      matt 
    357  1.15      matt 	virq_mask &= ~PIC_VIRQ_TO_MASK(virq);
    358  1.15      matt 
    359  1.15      matt 	is->is_hwirq = hwirq;
    360  1.15      matt 	is->is_pic = pic;
    361  1.15      matt 	virq_map[hwirq] = virq;
    362   1.2   garbled #ifdef PIC_DEBUG
    363  1.17       rjs 	printf("mapping hwirq %d to virq %d\n", hwirq, virq);
    364   1.2   garbled #endif
    365  1.15      matt 	return virq;
    366   1.2   garbled }
    367   1.2   garbled 
    368   1.2   garbled static const char * const intr_typenames[] = {
    369   1.2   garbled    [IST_NONE]  = "none",
    370   1.2   garbled    [IST_PULSE] = "pulsed",
    371  1.19       phx    [IST_EDGE_FALLING]  = "falling edge triggered",
    372  1.19       phx    [IST_EDGE_RISING]  = "rising edge triggered",
    373  1.19       phx    [IST_LEVEL_LOW] = "low level triggered",
    374  1.19       phx    [IST_LEVEL_HIGH] = "high level triggered",
    375   1.2   garbled };
    376   1.2   garbled 
    377   1.2   garbled const char *
    378   1.2   garbled intr_typename(int type)
    379   1.2   garbled {
    380   1.2   garbled 	KASSERT((unsigned int) type < __arraycount(intr_typenames));
    381   1.2   garbled 	KASSERT(intr_typenames[type] != NULL);
    382   1.2   garbled 	return intr_typenames[type];
    383   1.2   garbled }
    384   1.2   garbled 
    385   1.2   garbled /*
    386   1.2   garbled  * Recalculate the interrupt masks from scratch.
    387   1.2   garbled  * We could code special registry and deregistry versions of this function that
    388   1.2   garbled  * would be faster, but the code would be nastier, and we don't expect this to
    389   1.2   garbled  * happen very much anyway.
    390   1.2   garbled  */
    391   1.2   garbled static void
    392   1.2   garbled intr_calculatemasks(void)
    393   1.2   garbled {
    394  1.23  macallan 	imask_t newmask[NIPL];
    395   1.2   garbled 	struct intr_source *is;
    396  1.23  macallan 	struct intrhand *ih;
    397  1.14      matt 	int irq;
    398   1.2   garbled 
    399  1.14      matt 	for (u_int ipl = IPL_NONE; ipl < NIPL; ipl++) {
    400  1.14      matt 		newmask[ipl] = 0;
    401   1.2   garbled 	}
    402   1.2   garbled 
    403  1.14      matt 	/* First, figure out which ipl each IRQ uses. */
    404  1.14      matt 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
    405  1.23  macallan 		for (ih = is->is_hand; ih != NULL; ih = ih->ih_next) {
    406  1.23  macallan 			newmask[ih->ih_ipl] |= PIC_VIRQ_TO_MASK(irq);
    407  1.23  macallan 		}
    408   1.2   garbled 	}
    409   1.2   garbled 
    410   1.2   garbled 	/*
    411   1.2   garbled 	 * IPL_NONE is used for hardware interrupts that are never blocked,
    412   1.2   garbled 	 * and do not block anything else.
    413   1.2   garbled 	 */
    414  1.14      matt 	newmask[IPL_NONE] = 0;
    415   1.2   garbled 
    416   1.2   garbled 	/*
    417   1.2   garbled 	 * strict hierarchy - all IPLs block everything blocked by any lower
    418   1.2   garbled 	 * IPL
    419   1.2   garbled 	 */
    420  1.14      matt 	for (u_int ipl = 1; ipl < NIPL; ipl++) {
    421  1.14      matt 		newmask[ipl] |= newmask[ipl - 1];
    422  1.14      matt 	}
    423   1.2   garbled 
    424  1.23  macallan #ifdef PIC_DEBUG
    425  1.14      matt 	for (u_int ipl = 0; ipl < NIPL; ipl++) {
    426  1.14      matt 		printf("%u: %08x -> %08x\n", ipl, imask[ipl], newmask[ipl]);
    427   1.2   garbled 	}
    428   1.2   garbled #endif
    429   1.2   garbled 
    430  1.14      matt 	/*
    431  1.14      matt 	 * Disable all interrupts.
    432  1.14      matt 	 */
    433  1.14      matt 	for (u_int base = 0; base < num_pics; base++) {
    434  1.14      matt 		struct pic_ops * const pic = pics[base];
    435  1.14      matt 		for (u_int i = 0; i < pic->pic_numintrs; i++) {
    436  1.14      matt 			pic->pic_disable_irq(pic, i);
    437  1.14      matt 		}
    438   1.2   garbled 	}
    439   1.2   garbled 
    440  1.14      matt 	/*
    441  1.14      matt 	 * Now that all interrupts are disabled, update the ipl masks.
    442  1.14      matt 	 */
    443  1.14      matt 	for (u_int ipl = 0; ipl < NIPL; ipl++) {
    444  1.14      matt 		imask[ipl] = newmask[ipl];
    445   1.2   garbled 	}
    446   1.7  kiyohara 
    447  1.14      matt 	/*
    448  1.14      matt 	 * Lastly, enable IRQs actually in use.
    449  1.14      matt 	 */
    450   1.2   garbled 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
    451   1.2   garbled 		if (is->is_hand)
    452   1.2   garbled 			pic_enable_irq(is->is_hwirq);
    453   1.2   garbled 	}
    454   1.2   garbled }
    455   1.2   garbled 
    456   1.2   garbled void
    457  1.15      matt pic_enable_irq(int hwirq)
    458   1.2   garbled {
    459  1.15      matt 	struct pic_ops * const pic = find_pic_by_hwirq(hwirq);
    460  1.15      matt 	if (pic == NULL)
    461  1.15      matt 		panic("%s: bogus IRQ %d", __func__, hwirq);
    462  1.15      matt 	const int type = intrsources[virq_map[hwirq]].is_type;
    463  1.15      matt 	(*pic->pic_enable_irq)(pic, hwirq - pic->pic_intrbase, type);
    464   1.2   garbled }
    465   1.2   garbled 
    466   1.2   garbled void
    467  1.15      matt pic_mark_pending(int hwirq)
    468   1.2   garbled {
    469   1.2   garbled 	struct cpu_info * const ci = curcpu();
    470   1.2   garbled 
    471  1.15      matt 	const int virq = virq_map[hwirq];
    472  1.15      matt 	if (virq == 0)
    473  1.15      matt 		printf("IRQ %d maps to 0\n", hwirq);
    474   1.2   garbled 
    475  1.15      matt 	const register_t msr = mfmsr();
    476   1.2   garbled 	mtmsr(msr & ~PSL_EE);
    477  1.15      matt 	ci->ci_ipending |= PIC_VIRQ_TO_MASK(virq);
    478   1.2   garbled 	mtmsr(msr);
    479   1.7  kiyohara }
    480   1.2   garbled 
    481  1.15      matt static void
    482  1.15      matt intr_deliver(struct intr_source *is, int virq)
    483  1.15      matt {
    484  1.15      matt 	bool locked = false;
    485  1.15      matt 	for (struct intrhand *ih = is->is_hand; ih != NULL; ih = ih->ih_next) {
    486  1.15      matt 		KASSERTMSG(ih->ih_fun != NULL,
    487  1.18       jym 		    "%s: irq %d, hwirq %d, is %p ih %p: "
    488  1.15      matt 		     "NULL interrupt handler!\n", __func__,
    489  1.18       jym 		     virq, is->is_hwirq, is, ih);
    490  1.15      matt 		if (ih->ih_ipl == IPL_VM) {
    491  1.15      matt 			if (!locked) {
    492  1.15      matt 				KERNEL_LOCK(1, NULL);
    493  1.15      matt 				locked = true;
    494  1.15      matt 			}
    495  1.15      matt 		} else if (locked) {
    496  1.15      matt 			KERNEL_UNLOCK_ONE(NULL);
    497  1.15      matt 			locked = false;
    498  1.15      matt 		}
    499  1.15      matt 		(*ih->ih_fun)(ih->ih_arg);
    500  1.15      matt 	}
    501  1.15      matt 	if (locked) {
    502  1.15      matt 		KERNEL_UNLOCK_ONE(NULL);
    503  1.15      matt 	}
    504  1.15      matt 	is->is_ev.ev_count++;
    505  1.15      matt }
    506  1.15      matt 
    507   1.2   garbled void
    508   1.2   garbled pic_do_pending_int(void)
    509   1.2   garbled {
    510   1.2   garbled 	struct cpu_info * const ci = curcpu();
    511  1.15      matt 	imask_t vpend;
    512   1.2   garbled 
    513   1.2   garbled 	if (ci->ci_iactive)
    514   1.2   garbled 		return;
    515   1.2   garbled 
    516   1.2   garbled 	ci->ci_iactive = 1;
    517  1.15      matt 
    518  1.15      matt 	const register_t emsr = mfmsr();
    519  1.15      matt 	const register_t dmsr = emsr & ~PSL_EE;
    520  1.15      matt 
    521   1.2   garbled 	KASSERT(emsr & PSL_EE);
    522   1.2   garbled 	mtmsr(dmsr);
    523   1.2   garbled 
    524  1.15      matt 	const int pcpl = ci->ci_cpl;
    525   1.3        ad #ifdef __HAVE_FAST_SOFTINTS
    526   1.2   garbled again:
    527   1.3        ad #endif
    528   1.2   garbled 
    529   1.2   garbled 	/* Do now unmasked pendings */
    530  1.15      matt 	while ((vpend = (ci->ci_ipending & ~imask[pcpl])) != 0) {
    531  1.14      matt 		ci->ci_idepth++;
    532  1.15      matt 		KASSERT((PIC_VIRQ_TO_MASK(0) & ci->ci_ipending) == 0);
    533  1.15      matt 
    534   1.8  kiyohara 		/* Get most significant pending bit */
    535  1.15      matt 		const int virq = PIC_VIRQ_MS_PENDING(vpend);
    536  1.15      matt 		ci->ci_ipending &= ~PIC_VIRQ_TO_MASK(virq);
    537  1.15      matt 
    538  1.15      matt 		struct intr_source * const is = &intrsources[virq];
    539  1.15      matt 		struct pic_ops * const pic = is->is_pic;
    540   1.2   garbled 
    541  1.37  jmcneill 		if (!is->is_cascaded) {
    542  1.37  jmcneill 			splraise(is->is_ipl);
    543  1.37  jmcneill 			mtmsr(emsr);
    544  1.37  jmcneill 		}
    545  1.15      matt 		intr_deliver(is, virq);
    546  1.37  jmcneill 		if (!is->is_cascaded) {
    547  1.37  jmcneill 			mtmsr(dmsr);
    548  1.37  jmcneill 			ci->ci_cpl = pcpl; /* Don't use splx... we are here already! */
    549  1.37  jmcneill 		}
    550   1.2   garbled 
    551   1.2   garbled 		pic->pic_reenable_irq(pic, is->is_hwirq - pic->pic_intrbase,
    552   1.2   garbled 		    is->is_type);
    553  1.14      matt 		ci->ci_idepth--;
    554   1.2   garbled 	}
    555   1.2   garbled 
    556   1.3        ad #ifdef __HAVE_FAST_SOFTINTS
    557  1.21  macallan 	const u_int softints = ci->ci_data.cpu_softints &
    558  1.21  macallan 				 (IPL_SOFTMASK << pcpl);
    559  1.21  macallan 
    560  1.21  macallan 	/* make sure there are no bits to screw with the line above */
    561  1.21  macallan 	KASSERT((ci->ci_data.cpu_softints & ~IPL_SOFTMASK) == 0);
    562  1.12  macallan 
    563  1.12  macallan 	if (__predict_false(softints != 0)) {
    564  1.15      matt 		ci->ci_cpl = IPL_HIGH;
    565  1.15      matt 		mtmsr(emsr);
    566  1.12  macallan 		powerpc_softint(ci, pcpl,
    567  1.12  macallan 		    (vaddr_t)__builtin_return_address(0));
    568  1.15      matt 		mtmsr(dmsr);
    569  1.12  macallan 		ci->ci_cpl = pcpl;
    570  1.15      matt 		if (__predict_false(ci->ci_ipending & ~imask[pcpl]))
    571  1.15      matt 			goto again;
    572  1.12  macallan 	}
    573  1.12  macallan #endif
    574   1.2   garbled 
    575   1.2   garbled 	ci->ci_iactive = 0;
    576   1.2   garbled 	mtmsr(emsr);
    577   1.2   garbled }
    578   1.2   garbled 
    579   1.2   garbled int
    580   1.2   garbled pic_handle_intr(void *cookie)
    581   1.2   garbled {
    582   1.2   garbled 	struct pic_ops *pic = cookie;
    583   1.2   garbled 	struct cpu_info *ci = curcpu();
    584  1.15      matt 	int picirq;
    585   1.2   garbled 
    586  1.35  jmcneill 	const register_t msr = mfmsr();
    587  1.35  jmcneill 	const int pcpl = ci->ci_cpl;
    588  1.35  jmcneill 
    589  1.35  jmcneill 	mtmsr(msr & ~PSL_EE);
    590  1.35  jmcneill 
    591  1.15      matt 	picirq = pic->pic_get_irq(pic, PIC_GET_IRQ);
    592  1.35  jmcneill 	if (picirq == 255) {
    593  1.35  jmcneill 		mtmsr(msr);
    594   1.2   garbled 		return 0;
    595  1.35  jmcneill 	}
    596   1.2   garbled 
    597  1.15      matt 	do {
    598  1.24  macallan 		const int virq = virq_map[picirq + pic->pic_intrbase];
    599   1.2   garbled 
    600  1.15      matt 		KASSERT(virq != 0);
    601  1.15      matt 		KASSERT(picirq < pic->pic_numintrs);
    602  1.15      matt 		imask_t v_imen = PIC_VIRQ_TO_MASK(virq);
    603  1.15      matt 		struct intr_source * const is = &intrsources[virq];
    604  1.15      matt 
    605  1.15      matt 		if ((imask[pcpl] & v_imen) != 0) {
    606  1.15      matt 			ci->ci_ipending |= v_imen; /* Masked! Mark this as pending */
    607  1.15      matt 			pic->pic_disable_irq(pic, picirq);
    608  1.15      matt 		} else {
    609  1.15      matt 			/* this interrupt is no longer pending */
    610  1.15      matt 			ci->ci_ipending &= ~v_imen;
    611  1.15      matt 			ci->ci_idepth++;
    612  1.15      matt 
    613  1.37  jmcneill 			if (!is->is_cascaded) {
    614  1.37  jmcneill 				splraise(is->is_ipl);
    615  1.37  jmcneill 				mtmsr(msr | PSL_EE);
    616  1.37  jmcneill 			}
    617  1.15      matt 			intr_deliver(is, virq);
    618  1.37  jmcneill 			if (!is->is_cascaded) {
    619  1.37  jmcneill 				mtmsr(msr & ~PSL_EE);
    620  1.37  jmcneill 				ci->ci_cpl = pcpl;
    621  1.37  jmcneill 			}
    622   1.7  kiyohara 
    623  1.15      matt 			ci->ci_data.cpu_nintr++;
    624  1.15      matt 			ci->ci_idepth--;
    625   1.2   garbled 		}
    626  1.15      matt 		pic->pic_ack_irq(pic, picirq);
    627  1.15      matt 	} while ((picirq = pic->pic_get_irq(pic, PIC_GET_RECHECK)) != 255);
    628   1.2   garbled 
    629   1.2   garbled 	mtmsr(msr | PSL_EE);
    630   1.2   garbled 	splx(pcpl);	/* Process pendings. */
    631   1.2   garbled 	mtmsr(msr);
    632   1.2   garbled 
    633   1.2   garbled 	return 0;
    634   1.2   garbled }
    635   1.2   garbled 
    636   1.2   garbled void
    637   1.2   garbled pic_ext_intr(void)
    638   1.2   garbled {
    639   1.2   garbled 
    640   1.2   garbled 	KASSERT(pics[primary_pic] != NULL);
    641   1.2   garbled 	pic_handle_intr(pics[primary_pic]);
    642   1.2   garbled 
    643   1.2   garbled 	return;
    644   1.2   garbled 
    645   1.2   garbled }
    646   1.2   garbled 
    647   1.2   garbled int
    648   1.2   garbled splraise(int ncpl)
    649   1.2   garbled {
    650   1.2   garbled 	struct cpu_info *ci = curcpu();
    651   1.2   garbled 	int ocpl;
    652   1.2   garbled 
    653  1.33     skrll 	if (ncpl == ci->ci_cpl)
    654  1.33     skrll 		return ncpl;
    655  1.27       rin 	REORDER_PROTECT();
    656   1.2   garbled 	ocpl = ci->ci_cpl;
    657  1.12  macallan 	KASSERT(ncpl < NIPL);
    658  1.26  riastrad 	ci->ci_cpl = uimax(ncpl, ocpl);
    659  1.27       rin 	REORDER_PROTECT();
    660  1.12  macallan 	__insn_barrier();
    661   1.2   garbled 	return ocpl;
    662   1.2   garbled }
    663   1.2   garbled 
    664  1.13      matt static inline bool
    665  1.13      matt have_pending_intr_p(struct cpu_info *ci, int ncpl)
    666  1.13      matt {
    667  1.13      matt 	if (ci->ci_ipending & ~imask[ncpl])
    668  1.13      matt 		return true;
    669  1.13      matt #ifdef __HAVE_FAST_SOFTINTS
    670  1.22  macallan 	if (ci->ci_data.cpu_softints & (IPL_SOFTMASK << ncpl))
    671  1.13      matt 		return true;
    672  1.13      matt #endif
    673  1.13      matt 	return false;
    674  1.13      matt }
    675  1.13      matt 
    676   1.2   garbled void
    677   1.2   garbled splx(int ncpl)
    678   1.2   garbled {
    679   1.2   garbled 	struct cpu_info *ci = curcpu();
    680   1.7  kiyohara 
    681  1.12  macallan 	__insn_barrier();
    682  1.27       rin 	REORDER_PROTECT();
    683   1.2   garbled 	ci->ci_cpl = ncpl;
    684  1.13      matt 	if (have_pending_intr_p(ci, ncpl))
    685   1.2   garbled 		pic_do_pending_int();
    686  1.13      matt 
    687  1.27       rin 	REORDER_PROTECT();
    688   1.2   garbled }
    689   1.2   garbled 
    690   1.2   garbled int
    691   1.2   garbled spllower(int ncpl)
    692   1.2   garbled {
    693   1.2   garbled 	struct cpu_info *ci = curcpu();
    694   1.2   garbled 	int ocpl;
    695   1.2   garbled 
    696  1.12  macallan 	__insn_barrier();
    697  1.27       rin 	REORDER_PROTECT();
    698   1.2   garbled 	ocpl = ci->ci_cpl;
    699   1.2   garbled 	ci->ci_cpl = ncpl;
    700  1.13      matt 	if (have_pending_intr_p(ci, ncpl))
    701   1.2   garbled 		pic_do_pending_int();
    702  1.27       rin 	REORDER_PROTECT();
    703   1.2   garbled 	return ocpl;
    704   1.2   garbled }
    705   1.2   garbled 
    706   1.2   garbled void
    707   1.2   garbled genppc_cpu_configure(void)
    708   1.2   garbled {
    709  1.23  macallan 	aprint_normal("vmmask %x schedmask %x highmask %x\n",
    710  1.23  macallan 	    (u_int)imask[IPL_VM] & 0x7fffffff,
    711  1.23  macallan 	    (u_int)imask[IPL_SCHED] & 0x7fffffff,
    712  1.23  macallan 	    (u_int)imask[IPL_HIGH] & 0x7fffffff);
    713   1.2   garbled 
    714   1.2   garbled 	spl0();
    715   1.2   garbled }
    716   1.2   garbled 
    717   1.2   garbled #if defined(PIC_PREPIVR) || defined(PIC_I8259)
    718   1.2   garbled /*
    719   1.2   garbled  * isa_intr_alloc needs to be done here, because it needs direct access to
    720   1.2   garbled  * the various interrupt handler structures.
    721   1.2   garbled  */
    722   1.2   garbled 
    723   1.2   garbled int
    724   1.2   garbled genppc_isa_intr_alloc(isa_chipset_tag_t ic, struct pic_ops *pic,
    725   1.2   garbled     int mask, int type, int *irq_p)
    726   1.2   garbled {
    727   1.2   garbled 	int irq, vi;
    728   1.2   garbled 	int maybe_irq = -1;
    729   1.2   garbled 	int shared_depth = 0;
    730   1.2   garbled 	struct intr_source *is;
    731   1.2   garbled 
    732   1.2   garbled 	if (pic == NULL)
    733   1.2   garbled 		return 1;
    734   1.2   garbled 
    735   1.2   garbled 	for (irq = 0; (mask != 0 && irq < pic->pic_numintrs);
    736   1.2   garbled 	     mask >>= 1, irq++) {
    737   1.2   garbled 		if ((mask & 1) == 0)
    738   1.2   garbled 			continue;
    739  1.15      matt 		vi = virq_map[irq + pic->pic_intrbase];
    740   1.2   garbled 		if (!vi) {
    741   1.2   garbled 			*irq_p = irq;
    742   1.2   garbled 			return 0;
    743   1.2   garbled 		}
    744   1.2   garbled 		is = &intrsources[vi];
    745   1.2   garbled 		if (is->is_type == IST_NONE) {
    746   1.2   garbled 			*irq_p = irq;
    747   1.2   garbled 			return 0;
    748   1.2   garbled 		}
    749   1.2   garbled 		/* Level interrupts can be shared */
    750   1.2   garbled 		if (type == IST_LEVEL && is->is_type == IST_LEVEL) {
    751   1.2   garbled 			struct intrhand *ih = is->is_hand;
    752   1.2   garbled 			int depth;
    753   1.2   garbled 
    754   1.2   garbled 			if (maybe_irq == -1) {
    755   1.2   garbled 				maybe_irq = irq;
    756   1.2   garbled 				continue;
    757   1.2   garbled 			}
    758   1.2   garbled 			for (depth = 0; ih != NULL; ih = ih->ih_next)
    759   1.2   garbled 				depth++;
    760   1.2   garbled 			if (depth < shared_depth) {
    761   1.2   garbled 				maybe_irq = irq;
    762   1.2   garbled 				shared_depth = depth;
    763   1.2   garbled 			}
    764   1.2   garbled 		}
    765   1.2   garbled 	}
    766   1.2   garbled 	if (maybe_irq != -1) {
    767   1.2   garbled 		*irq_p = maybe_irq;
    768   1.2   garbled 		return 0;
    769   1.2   garbled 	}
    770   1.2   garbled 	return 1;
    771   1.2   garbled }
    772   1.2   garbled #endif
    773  1.25    nonaka 
    774  1.25    nonaka static struct intr_source *
    775  1.25    nonaka intr_get_source(const char *intrid)
    776  1.25    nonaka {
    777  1.25    nonaka 	struct intr_source *is;
    778  1.25    nonaka 	int irq;
    779  1.25    nonaka 
    780  1.25    nonaka 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
    781  1.32       rin 		if (strcmp(intrid, is->is_intrid) == 0)
    782  1.25    nonaka 			return is;
    783  1.25    nonaka 	}
    784  1.25    nonaka 	return NULL;
    785  1.25    nonaka }
    786  1.25    nonaka 
    787  1.25    nonaka static struct intrhand *
    788  1.25    nonaka intr_get_handler(const char *intrid)
    789  1.25    nonaka {
    790  1.25    nonaka 	struct intr_source *is;
    791  1.25    nonaka 
    792  1.25    nonaka 	is = intr_get_source(intrid);
    793  1.25    nonaka 	if (is != NULL)
    794  1.25    nonaka 		return is->is_hand;
    795  1.25    nonaka 	return NULL;
    796  1.25    nonaka }
    797  1.25    nonaka 
    798  1.25    nonaka uint64_t
    799  1.25    nonaka interrupt_get_count(const char *intrid, u_int cpu_idx)
    800  1.25    nonaka {
    801  1.25    nonaka 	struct intr_source *is;
    802  1.25    nonaka 
    803  1.25    nonaka 	/* XXX interrupt is always generated by CPU 0 */
    804  1.25    nonaka 	if (cpu_idx != 0)
    805  1.25    nonaka 		return 0;
    806  1.25    nonaka 
    807  1.25    nonaka 	is = intr_get_source(intrid);
    808  1.25    nonaka 	if (is != NULL)
    809  1.25    nonaka 		return is->is_ev.ev_count;
    810  1.25    nonaka 	return 0;
    811  1.25    nonaka }
    812  1.25    nonaka 
    813  1.25    nonaka void
    814  1.25    nonaka interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset)
    815  1.25    nonaka {
    816  1.25    nonaka 	struct intr_source *is;
    817  1.25    nonaka 
    818  1.25    nonaka 	kcpuset_zero(cpuset);
    819  1.25    nonaka 
    820  1.25    nonaka 	is = intr_get_source(intrid);
    821  1.25    nonaka 	if (is != NULL)
    822  1.25    nonaka 		kcpuset_set(cpuset, 0);	/* XXX */
    823  1.25    nonaka }
    824  1.25    nonaka 
    825  1.25    nonaka void
    826  1.25    nonaka interrupt_get_available(kcpuset_t *cpuset)
    827  1.25    nonaka {
    828  1.25    nonaka 	CPU_INFO_ITERATOR cii;
    829  1.25    nonaka 	struct cpu_info *ci;
    830  1.25    nonaka 
    831  1.25    nonaka 	kcpuset_zero(cpuset);
    832  1.25    nonaka 
    833  1.25    nonaka 	mutex_enter(&cpu_lock);
    834  1.25    nonaka 	for (CPU_INFO_FOREACH(cii, ci)) {
    835  1.25    nonaka 		if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0)
    836  1.25    nonaka 			kcpuset_set(cpuset, cpu_index(ci));
    837  1.25    nonaka 	}
    838  1.25    nonaka 	mutex_exit(&cpu_lock);
    839  1.25    nonaka }
    840  1.25    nonaka 
    841  1.25    nonaka void
    842  1.25    nonaka interrupt_get_devname(const char *intrid, char *buf, size_t len)
    843  1.25    nonaka {
    844  1.25    nonaka 	struct intrhand *ih;
    845  1.25    nonaka 
    846  1.25    nonaka 	if (len == 0)
    847  1.25    nonaka 		return;
    848  1.25    nonaka 
    849  1.25    nonaka 	buf[0] = '\0';
    850  1.25    nonaka 
    851  1.25    nonaka 	for (ih = intr_get_handler(intrid); ih != NULL; ih = ih->ih_next) {
    852  1.25    nonaka 		if (buf[0] != '\0')
    853  1.25    nonaka 			strlcat(buf, ", ", len);
    854  1.25    nonaka 		strlcat(buf, ih->ih_xname, len);
    855  1.25    nonaka 	}
    856  1.25    nonaka }
    857  1.25    nonaka 
    858  1.25    nonaka struct intrids_handler *
    859  1.25    nonaka interrupt_construct_intrids(const kcpuset_t *cpuset)
    860  1.25    nonaka {
    861  1.25    nonaka 	struct intr_source *is;
    862  1.25    nonaka 	struct intrids_handler *ii_handler;
    863  1.25    nonaka 	intrid_t *ids;
    864  1.25    nonaka 	int i, irq, count;
    865  1.25    nonaka 
    866  1.25    nonaka 	if (kcpuset_iszero(cpuset))
    867  1.25    nonaka 		return NULL;
    868  1.25    nonaka 	if (!kcpuset_isset(cpuset, 0))	/* XXX */
    869  1.25    nonaka 		return NULL;
    870  1.25    nonaka 
    871  1.25    nonaka 	count = 0;
    872  1.25    nonaka 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
    873  1.25    nonaka 		if (is->is_hand != NULL)
    874  1.25    nonaka 			count++;
    875  1.25    nonaka 	}
    876  1.25    nonaka 
    877  1.25    nonaka 	ii_handler = kmem_zalloc(sizeof(int) + sizeof(intrid_t) * count,
    878  1.25    nonaka 	    KM_SLEEP);
    879  1.25    nonaka 	if (ii_handler == NULL)
    880  1.25    nonaka 		return NULL;
    881  1.25    nonaka 	ii_handler->iih_nids = count;
    882  1.25    nonaka 	if (count == 0)
    883  1.25    nonaka 		return ii_handler;
    884  1.25    nonaka 
    885  1.25    nonaka 	ids = ii_handler->iih_intrids;
    886  1.25    nonaka 	i = 0;
    887  1.25    nonaka 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
    888  1.25    nonaka 		/* Ignore devices attached after counting "count". */
    889  1.25    nonaka 		if (i >= count)
    890  1.25    nonaka 			break;
    891  1.25    nonaka 
    892  1.25    nonaka 		if (is->is_hand == NULL)
    893  1.25    nonaka 			continue;
    894  1.25    nonaka 
    895  1.32       rin 		strncpy(ids[i], is->is_intrid, sizeof(intrid_t));
    896  1.25    nonaka 		i++;
    897  1.25    nonaka 	}
    898  1.25    nonaka 
    899  1.25    nonaka 	return ii_handler;
    900  1.25    nonaka }
    901  1.25    nonaka 
    902  1.25    nonaka void
    903  1.25    nonaka interrupt_destruct_intrids(struct intrids_handler *ii_handler)
    904  1.25    nonaka {
    905  1.25    nonaka 	size_t iih_size;
    906  1.25    nonaka 
    907  1.25    nonaka 	if (ii_handler == NULL)
    908  1.25    nonaka 		return;
    909  1.25    nonaka 
    910  1.25    nonaka 	iih_size = sizeof(int) + sizeof(intrid_t) * ii_handler->iih_nids;
    911  1.25    nonaka 	kmem_free(ii_handler, iih_size);
    912  1.25    nonaka }
    913  1.25    nonaka 
    914  1.25    nonaka int
    915  1.25    nonaka interrupt_distribute(void *ich, const kcpuset_t *newset, kcpuset_t *oldset)
    916  1.25    nonaka {
    917  1.25    nonaka 	return EOPNOTSUPP;
    918  1.25    nonaka }
    919  1.25    nonaka 
    920  1.25    nonaka int
    921  1.25    nonaka interrupt_distribute_handler(const char *intrid, const kcpuset_t *newset,
    922  1.25    nonaka     kcpuset_t *oldset)
    923  1.25    nonaka {
    924  1.25    nonaka 	return EOPNOTSUPP;
    925  1.25    nonaka }
    926  1.27       rin 
    927  1.27       rin #undef REORDER_PROTECT
    928