Home | History | Annotate | Line # | Download | only in pic
intr.c revision 1.28
      1 /*	$NetBSD: intr.c,v 1.28 2020/07/06 09:34:18 rin Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2007 Michael Lorenz
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     26  * POSSIBILITY OF SUCH DAMAGE.
     27  */
     28 
     29 #define __INTR_PRIVATE
     30 
     31 #include <sys/cdefs.h>
     32 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.28 2020/07/06 09:34:18 rin Exp $");
     33 
     34 #ifdef _KERNEL_OPT
     35 #include "opt_interrupt.h"
     36 #include "opt_multiprocessor.h"
     37 #include "opt_pic.h"
     38 #endif
     39 
     40 #include <sys/param.h>
     41 #include <sys/cpu.h>
     42 #include <sys/kernel.h>
     43 #include <sys/kmem.h>
     44 #include <sys/interrupt.h>
     45 
     46 #include <powerpc/psl.h>
     47 #include <powerpc/pic/picvar.h>
     48 
     49 #if defined(PIC_I8259) || defined (PIC_PREPIVR)
     50 #include <machine/isa_machdep.h>
     51 #endif
     52 
     53 #ifdef MULTIPROCESSOR
     54 #include <powerpc/pic/ipivar.h>
     55 #endif
     56 
     57 #ifdef __HAVE_FAST_SOFTINTS
     58 #include <powerpc/softint.h>
     59 #endif
     60 
     61 #define MAX_PICS	8	/* 8 PICs ought to be enough for everyone */
     62 
     63 #define	PIC_VIRQ_LEGAL_P(x)	((u_int)(x) < NVIRQ)
     64 
     65 #if defined(PPC_IBM4XX) && !defined(PPC_IBM440)
     66 /* eieio is implemented as sync */
     67 #define REORDER_PROTECT() __asm volatile("sync")
     68 #else
     69 #define REORDER_PROTECT() __asm volatile("sync; eieio")
     70 #endif
     71 
     72 struct pic_ops *pics[MAX_PICS];
     73 int num_pics = 0;
     74 int max_base = 0;
     75 uint8_t	virq_map[NIRQ];
     76 imask_t virq_mask = HWIRQ_MASK;
     77 imask_t	imask[NIPL];
     78 int	primary_pic = 0;
     79 
     80 static int	fakeintr(void *);
     81 static int	mapirq(int);
     82 static void	intr_calculatemasks(void);
     83 static struct pic_ops *find_pic_by_hwirq(int);
     84 
     85 static struct intr_source intrsources[NVIRQ];
     86 
     87 void
     88 pic_init(void)
     89 {
     90 	/* everything is in bss, no reason to zero it. */
     91 }
     92 
     93 int
     94 pic_add(struct pic_ops *pic)
     95 {
     96 
     97 	if (num_pics >= MAX_PICS)
     98 		return -1;
     99 
    100 	pics[num_pics] = pic;
    101 	pic->pic_intrbase = max_base;
    102 	max_base += pic->pic_numintrs;
    103 	num_pics++;
    104 
    105 	return pic->pic_intrbase;
    106 }
    107 
    108 void
    109 pic_finish_setup(void)
    110 {
    111 	for (size_t i = 0; i < num_pics; i++) {
    112 		struct pic_ops * const pic = pics[i];
    113 		if (pic->pic_finish_setup != NULL)
    114 			pic->pic_finish_setup(pic);
    115 	}
    116 }
    117 
    118 static struct pic_ops *
    119 find_pic_by_hwirq(int hwirq)
    120 {
    121 	for (u_int base = 0; base < num_pics; base++) {
    122 		struct pic_ops * const pic = pics[base];
    123 		if (pic->pic_intrbase <= hwirq
    124 		    && hwirq < pic->pic_intrbase + pic->pic_numintrs) {
    125 			return pic;
    126 		}
    127 	}
    128 	return NULL;
    129 }
    130 
    131 static int
    132 fakeintr(void *arg)
    133 {
    134 
    135 	return 0;
    136 }
    137 
    138 /*
    139  * Register an interrupt handler.
    140  */
    141 void *
    142 intr_establish(int hwirq, int type, int ipl, int (*ih_fun)(void *),
    143     void *ih_arg)
    144 {
    145 	return intr_establish_xname(hwirq, type, ipl, ih_fun, ih_arg, NULL);
    146 }
    147 
    148 void *
    149 intr_establish_xname(int hwirq, int type, int ipl, int (*ih_fun)(void *),
    150     void *ih_arg, const char *xname)
    151 {
    152 	struct intrhand **p, *q, *ih;
    153 	struct pic_ops *pic;
    154 	static struct intrhand fakehand;
    155 	int maxipl = ipl;
    156 
    157 	if (maxipl == IPL_NONE)
    158 		maxipl = IPL_HIGH;
    159 
    160 	if (hwirq >= max_base) {
    161 		panic("%s: bogus IRQ %d, max is %d", __func__, hwirq,
    162 		    max_base - 1);
    163 	}
    164 
    165 	pic = find_pic_by_hwirq(hwirq);
    166 	if (pic == NULL) {
    167 		panic("%s: cannot find a pic for IRQ %d", __func__, hwirq);
    168 	}
    169 
    170 	const int virq = mapirq(hwirq);
    171 
    172 	/* no point in sleeping unless someone can free memory. */
    173 	ih = kmem_intr_alloc(sizeof(*ih), cold ? KM_NOSLEEP : KM_SLEEP);
    174 	if (ih == NULL)
    175 		panic("intr_establish: can't allocate handler info");
    176 
    177 	if (!PIC_VIRQ_LEGAL_P(virq) || type == IST_NONE)
    178 		panic("intr_establish: bogus irq (%d) or type (%d)",
    179 		    hwirq, type);
    180 
    181 	struct intr_source * const is = &intrsources[virq];
    182 
    183 	switch (is->is_type) {
    184 	case IST_NONE:
    185 		is->is_type = type;
    186 		break;
    187 	case IST_EDGE_FALLING:
    188 	case IST_EDGE_RISING:
    189 	case IST_LEVEL_LOW:
    190 	case IST_LEVEL_HIGH:
    191 		if (type == is->is_type)
    192 			break;
    193 		/* FALLTHROUGH */
    194 	case IST_PULSE:
    195 		if (type != IST_NONE)
    196 			panic("intr_establish: can't share %s with %s",
    197 			    intr_typename(is->is_type),
    198 			    intr_typename(type));
    199 		break;
    200 	}
    201 	if (is->is_hand == NULL) {
    202 		snprintf(is->is_source, sizeof(is->is_source), "irq %d",
    203 		    is->is_hwirq);
    204 		evcnt_attach_dynamic(&is->is_ev, EVCNT_TYPE_INTR, NULL,
    205 		    pic->pic_name, is->is_source);
    206 	}
    207 
    208 	/*
    209 	 * Figure out where to put the handler.
    210 	 * This is O(N^2), but we want to preserve the order, and N is
    211 	 * generally small.
    212 	 */
    213 	for (p = &is->is_hand; (q = *p) != NULL; p = &q->ih_next) {
    214 		maxipl = uimax(maxipl, q->ih_ipl);
    215 	}
    216 
    217 	/*
    218 	 * Actually install a fake handler momentarily, since we might be doing
    219 	 * this with interrupts enabled and don't want the real routine called
    220 	 * until masking is set up.
    221 	 */
    222 	fakehand.ih_ipl = ipl;
    223 	fakehand.ih_fun = fakeintr;
    224 	*p = &fakehand;
    225 
    226 	/*
    227 	 * Poke the real handler in now.
    228 	 */
    229 	ih->ih_fun = ih_fun;
    230 	ih->ih_arg = ih_arg;
    231 	ih->ih_next = NULL;
    232 	ih->ih_ipl = ipl;
    233 	ih->ih_virq = virq;
    234 	strlcpy(ih->ih_xname, xname != NULL ? xname : "unknown",
    235 	    sizeof(ih->ih_xname));
    236 	*p = ih;
    237 
    238 	if (pic->pic_establish_irq != NULL)
    239 		pic->pic_establish_irq(pic, hwirq - pic->pic_intrbase,
    240 		    is->is_type, maxipl);
    241 
    242 	/*
    243 	 * Remember the highest IPL used by this handler.
    244 	 */
    245 	is->is_ipl = maxipl;
    246 
    247 	/*
    248 	 * now that the handler is established we're actually ready to
    249 	 * calculate the masks
    250 	 */
    251 	intr_calculatemasks();
    252 
    253 	return ih;
    254 }
    255 
    256 void
    257 dummy_pic_establish_intr(struct pic_ops *pic, int irq, int type, int pri)
    258 {
    259 }
    260 
    261 /*
    262  * Deregister an interrupt handler.
    263  */
    264 void
    265 intr_disestablish(void *arg)
    266 {
    267 	struct intrhand * const ih = arg;
    268 	const int virq = ih->ih_virq;
    269 	struct intr_source * const is = &intrsources[virq];
    270 	struct intrhand **p, **q;
    271 	int maxipl = IPL_NONE;
    272 
    273 	if (!PIC_VIRQ_LEGAL_P(virq))
    274 		panic("intr_disestablish: bogus virq %d", virq);
    275 
    276 	/*
    277 	 * Remove the handler from the chain.
    278 	 * This is O(n^2), too.
    279 	 */
    280 	for (p = &is->is_hand, q = NULL; (*p) != NULL; p = &(*p)->ih_next) {
    281 		struct intrhand * const tmp_ih = *p;
    282 		if (tmp_ih == ih) {
    283 			q = p;
    284 		} else {
    285 			maxipl = uimax(maxipl, tmp_ih->ih_ipl);
    286 		}
    287 	}
    288 	if (q)
    289 		*q = ih->ih_next;
    290 	else
    291 		panic("intr_disestablish: handler not registered");
    292 	kmem_intr_free((void *)ih, sizeof(*ih));
    293 
    294 	/*
    295 	 * Reset the IPL for this source now that we've removed a handler.
    296 	 */
    297 	is->is_ipl = maxipl;
    298 
    299 	intr_calculatemasks();
    300 
    301 	if (is->is_hand == NULL) {
    302 		is->is_type = IST_NONE;
    303 		evcnt_detach(&is->is_ev);
    304 		/*
    305 		 * Make the virutal IRQ available again.
    306 		 */
    307 		virq_map[virq] = 0;
    308 		virq_mask |= PIC_VIRQ_TO_MASK(virq);
    309 	}
    310 }
    311 
    312 /*
    313  * Map max_base irqs into 32 (bits).
    314  */
    315 static int
    316 mapirq(int hwirq)
    317 {
    318 	struct pic_ops *pic;
    319 
    320 	if (hwirq >= max_base)
    321 		panic("invalid irq %d", hwirq);
    322 
    323 	if ((pic = find_pic_by_hwirq(hwirq)) == NULL)
    324 		panic("%s: cannot find PIC for HWIRQ %d", __func__, hwirq);
    325 
    326 	if (virq_map[hwirq])
    327 		return virq_map[hwirq];
    328 
    329 	if (virq_mask == 0)
    330 		panic("virq overflow");
    331 
    332 	const int virq = PIC_VIRQ_MS_PENDING(virq_mask);
    333 	struct intr_source * const is = intrsources + virq;
    334 
    335 	virq_mask &= ~PIC_VIRQ_TO_MASK(virq);
    336 
    337 	is->is_hwirq = hwirq;
    338 	is->is_pic = pic;
    339 	virq_map[hwirq] = virq;
    340 #ifdef PIC_DEBUG
    341 	printf("mapping hwirq %d to virq %d\n", hwirq, virq);
    342 #endif
    343 	return virq;
    344 }
    345 
    346 static const char * const intr_typenames[] = {
    347    [IST_NONE]  = "none",
    348    [IST_PULSE] = "pulsed",
    349    [IST_EDGE_FALLING]  = "falling edge triggered",
    350    [IST_EDGE_RISING]  = "rising edge triggered",
    351    [IST_LEVEL_LOW] = "low level triggered",
    352    [IST_LEVEL_HIGH] = "high level triggered",
    353 };
    354 
    355 const char *
    356 intr_typename(int type)
    357 {
    358 	KASSERT((unsigned int) type < __arraycount(intr_typenames));
    359 	KASSERT(intr_typenames[type] != NULL);
    360 	return intr_typenames[type];
    361 }
    362 
    363 /*
    364  * Recalculate the interrupt masks from scratch.
    365  * We could code special registry and deregistry versions of this function that
    366  * would be faster, but the code would be nastier, and we don't expect this to
    367  * happen very much anyway.
    368  */
    369 static void
    370 intr_calculatemasks(void)
    371 {
    372 	imask_t newmask[NIPL];
    373 	struct intr_source *is;
    374 	struct intrhand *ih;
    375 	int irq;
    376 
    377 	for (u_int ipl = IPL_NONE; ipl < NIPL; ipl++) {
    378 		newmask[ipl] = 0;
    379 	}
    380 
    381 	/* First, figure out which ipl each IRQ uses. */
    382 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
    383 		for (ih = is->is_hand; ih != NULL; ih = ih->ih_next) {
    384 			newmask[ih->ih_ipl] |= PIC_VIRQ_TO_MASK(irq);
    385 		}
    386 	}
    387 
    388 	/*
    389 	 * IPL_NONE is used for hardware interrupts that are never blocked,
    390 	 * and do not block anything else.
    391 	 */
    392 	newmask[IPL_NONE] = 0;
    393 
    394 	/*
    395 	 * strict hierarchy - all IPLs block everything blocked by any lower
    396 	 * IPL
    397 	 */
    398 	for (u_int ipl = 1; ipl < NIPL; ipl++) {
    399 		newmask[ipl] |= newmask[ipl - 1];
    400 	}
    401 
    402 #ifdef PIC_DEBUG
    403 	for (u_int ipl = 0; ipl < NIPL; ipl++) {
    404 		printf("%u: %08x -> %08x\n", ipl, imask[ipl], newmask[ipl]);
    405 	}
    406 #endif
    407 
    408 	/*
    409 	 * Disable all interrupts.
    410 	 */
    411 	for (u_int base = 0; base < num_pics; base++) {
    412 		struct pic_ops * const pic = pics[base];
    413 		for (u_int i = 0; i < pic->pic_numintrs; i++) {
    414 			pic->pic_disable_irq(pic, i);
    415 		}
    416 	}
    417 
    418 	/*
    419 	 * Now that all interrupts are disabled, update the ipl masks.
    420 	 */
    421 	for (u_int ipl = 0; ipl < NIPL; ipl++) {
    422 		imask[ipl] = newmask[ipl];
    423 	}
    424 
    425 	/*
    426 	 * Lastly, enable IRQs actually in use.
    427 	 */
    428 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
    429 		if (is->is_hand)
    430 			pic_enable_irq(is->is_hwirq);
    431 	}
    432 }
    433 
    434 void
    435 pic_enable_irq(int hwirq)
    436 {
    437 	struct pic_ops * const pic = find_pic_by_hwirq(hwirq);
    438 	if (pic == NULL)
    439 		panic("%s: bogus IRQ %d", __func__, hwirq);
    440 	const int type = intrsources[virq_map[hwirq]].is_type;
    441 	(*pic->pic_enable_irq)(pic, hwirq - pic->pic_intrbase, type);
    442 }
    443 
    444 void
    445 pic_mark_pending(int hwirq)
    446 {
    447 	struct cpu_info * const ci = curcpu();
    448 
    449 	const int virq = virq_map[hwirq];
    450 	if (virq == 0)
    451 		printf("IRQ %d maps to 0\n", hwirq);
    452 
    453 	const register_t msr = mfmsr();
    454 	mtmsr(msr & ~PSL_EE);
    455 	ci->ci_ipending |= PIC_VIRQ_TO_MASK(virq);
    456 	mtmsr(msr);
    457 }
    458 
    459 static void
    460 intr_deliver(struct intr_source *is, int virq)
    461 {
    462 	bool locked = false;
    463 	for (struct intrhand *ih = is->is_hand; ih != NULL; ih = ih->ih_next) {
    464 		KASSERTMSG(ih->ih_fun != NULL,
    465 		    "%s: irq %d, hwirq %d, is %p ih %p: "
    466 		     "NULL interrupt handler!\n", __func__,
    467 		     virq, is->is_hwirq, is, ih);
    468 		if (ih->ih_ipl == IPL_VM) {
    469 			if (!locked) {
    470 				KERNEL_LOCK(1, NULL);
    471 				locked = true;
    472 			}
    473 		} else if (locked) {
    474 			KERNEL_UNLOCK_ONE(NULL);
    475 			locked = false;
    476 		}
    477 		(*ih->ih_fun)(ih->ih_arg);
    478 	}
    479 	if (locked) {
    480 		KERNEL_UNLOCK_ONE(NULL);
    481 	}
    482 	is->is_ev.ev_count++;
    483 }
    484 
    485 void
    486 pic_do_pending_int(void)
    487 {
    488 	struct cpu_info * const ci = curcpu();
    489 	imask_t vpend;
    490 
    491 	if (ci->ci_iactive)
    492 		return;
    493 
    494 	ci->ci_iactive = 1;
    495 
    496 	const register_t emsr = mfmsr();
    497 	const register_t dmsr = emsr & ~PSL_EE;
    498 
    499 	KASSERT(emsr & PSL_EE);
    500 	mtmsr(dmsr);
    501 
    502 	const int pcpl = ci->ci_cpl;
    503 #ifdef __HAVE_FAST_SOFTINTS
    504 again:
    505 #endif
    506 
    507 	/* Do now unmasked pendings */
    508 	while ((vpend = (ci->ci_ipending & ~imask[pcpl])) != 0) {
    509 		ci->ci_idepth++;
    510 		KASSERT((PIC_VIRQ_TO_MASK(0) & ci->ci_ipending) == 0);
    511 
    512 		/* Get most significant pending bit */
    513 		const int virq = PIC_VIRQ_MS_PENDING(vpend);
    514 		ci->ci_ipending &= ~PIC_VIRQ_TO_MASK(virq);
    515 
    516 		struct intr_source * const is = &intrsources[virq];
    517 		struct pic_ops * const pic = is->is_pic;
    518 
    519 		splraise(is->is_ipl);
    520 		mtmsr(emsr);
    521 		intr_deliver(is, virq);
    522 		mtmsr(dmsr);
    523 		ci->ci_cpl = pcpl; /* Don't use splx... we are here already! */
    524 
    525 		pic->pic_reenable_irq(pic, is->is_hwirq - pic->pic_intrbase,
    526 		    is->is_type);
    527 		ci->ci_idepth--;
    528 	}
    529 
    530 #ifdef __HAVE_FAST_SOFTINTS
    531 	const u_int softints = ci->ci_data.cpu_softints &
    532 				 (IPL_SOFTMASK << pcpl);
    533 
    534 	/* make sure there are no bits to screw with the line above */
    535 	KASSERT((ci->ci_data.cpu_softints & ~IPL_SOFTMASK) == 0);
    536 
    537 	if (__predict_false(softints != 0)) {
    538 		ci->ci_cpl = IPL_HIGH;
    539 		mtmsr(emsr);
    540 		powerpc_softint(ci, pcpl,
    541 		    (vaddr_t)__builtin_return_address(0));
    542 		mtmsr(dmsr);
    543 		ci->ci_cpl = pcpl;
    544 		if (__predict_false(ci->ci_ipending & ~imask[pcpl]))
    545 			goto again;
    546 	}
    547 #endif
    548 
    549 	ci->ci_iactive = 0;
    550 	mtmsr(emsr);
    551 }
    552 
    553 int
    554 pic_handle_intr(void *cookie)
    555 {
    556 	struct pic_ops *pic = cookie;
    557 	struct cpu_info *ci = curcpu();
    558 	int picirq;
    559 
    560 	picirq = pic->pic_get_irq(pic, PIC_GET_IRQ);
    561 	if (picirq == 255)
    562 		return 0;
    563 
    564 	const register_t msr = mfmsr();
    565 	const int pcpl = ci->ci_cpl;
    566 
    567 	do {
    568 		const int virq = virq_map[picirq + pic->pic_intrbase];
    569 
    570 		KASSERT(virq != 0);
    571 		KASSERT(picirq < pic->pic_numintrs);
    572 		imask_t v_imen = PIC_VIRQ_TO_MASK(virq);
    573 		struct intr_source * const is = &intrsources[virq];
    574 
    575 		if ((imask[pcpl] & v_imen) != 0) {
    576 			ci->ci_ipending |= v_imen; /* Masked! Mark this as pending */
    577 			pic->pic_disable_irq(pic, picirq);
    578 		} else {
    579 			/* this interrupt is no longer pending */
    580 			ci->ci_ipending &= ~v_imen;
    581 			ci->ci_idepth++;
    582 
    583 			splraise(is->is_ipl);
    584 			mtmsr(msr | PSL_EE);
    585 			intr_deliver(is, virq);
    586 			mtmsr(msr);
    587 			ci->ci_cpl = pcpl;
    588 
    589 			ci->ci_data.cpu_nintr++;
    590 			ci->ci_idepth--;
    591 		}
    592 		pic->pic_ack_irq(pic, picirq);
    593 	} while ((picirq = pic->pic_get_irq(pic, PIC_GET_RECHECK)) != 255);
    594 
    595 	mtmsr(msr | PSL_EE);
    596 	splx(pcpl);	/* Process pendings. */
    597 	mtmsr(msr);
    598 
    599 	return 0;
    600 }
    601 
    602 void
    603 pic_ext_intr(void)
    604 {
    605 
    606 	KASSERT(pics[primary_pic] != NULL);
    607 	pic_handle_intr(pics[primary_pic]);
    608 
    609 	return;
    610 
    611 }
    612 
    613 int
    614 splraise(int ncpl)
    615 {
    616 	struct cpu_info *ci = curcpu();
    617 	int ocpl;
    618 
    619 	if (ncpl == ci->ci_cpl) return ncpl;
    620 	REORDER_PROTECT();
    621 	ocpl = ci->ci_cpl;
    622 	KASSERT(ncpl < NIPL);
    623 	ci->ci_cpl = uimax(ncpl, ocpl);
    624 	REORDER_PROTECT();
    625 	__insn_barrier();
    626 	return ocpl;
    627 }
    628 
    629 static inline bool
    630 have_pending_intr_p(struct cpu_info *ci, int ncpl)
    631 {
    632 	if (ci->ci_ipending & ~imask[ncpl])
    633 		return true;
    634 #ifdef __HAVE_FAST_SOFTINTS
    635 	if (ci->ci_data.cpu_softints & (IPL_SOFTMASK << ncpl))
    636 		return true;
    637 #endif
    638 	return false;
    639 }
    640 
    641 void
    642 splx(int ncpl)
    643 {
    644 	struct cpu_info *ci = curcpu();
    645 
    646 	__insn_barrier();
    647 	REORDER_PROTECT();
    648 	ci->ci_cpl = ncpl;
    649 	if (have_pending_intr_p(ci, ncpl))
    650 		pic_do_pending_int();
    651 
    652 	REORDER_PROTECT();
    653 }
    654 
    655 int
    656 spllower(int ncpl)
    657 {
    658 	struct cpu_info *ci = curcpu();
    659 	int ocpl;
    660 
    661 	__insn_barrier();
    662 	REORDER_PROTECT();
    663 	ocpl = ci->ci_cpl;
    664 	ci->ci_cpl = ncpl;
    665 	if (have_pending_intr_p(ci, ncpl))
    666 		pic_do_pending_int();
    667 	REORDER_PROTECT();
    668 	return ocpl;
    669 }
    670 
    671 void
    672 genppc_cpu_configure(void)
    673 {
    674 	aprint_normal("vmmask %x schedmask %x highmask %x\n",
    675 	    (u_int)imask[IPL_VM] & 0x7fffffff,
    676 	    (u_int)imask[IPL_SCHED] & 0x7fffffff,
    677 	    (u_int)imask[IPL_HIGH] & 0x7fffffff);
    678 
    679 	spl0();
    680 }
    681 
    682 #if defined(PIC_PREPIVR) || defined(PIC_I8259)
    683 /*
    684  * isa_intr_alloc needs to be done here, because it needs direct access to
    685  * the various interrupt handler structures.
    686  */
    687 
    688 int
    689 genppc_isa_intr_alloc(isa_chipset_tag_t ic, struct pic_ops *pic,
    690     int mask, int type, int *irq_p)
    691 {
    692 	int irq, vi;
    693 	int maybe_irq = -1;
    694 	int shared_depth = 0;
    695 	struct intr_source *is;
    696 
    697 	if (pic == NULL)
    698 		return 1;
    699 
    700 	for (irq = 0; (mask != 0 && irq < pic->pic_numintrs);
    701 	     mask >>= 1, irq++) {
    702 		if ((mask & 1) == 0)
    703 			continue;
    704 		vi = virq_map[irq + pic->pic_intrbase];
    705 		if (!vi) {
    706 			*irq_p = irq;
    707 			return 0;
    708 		}
    709 		is = &intrsources[vi];
    710 		if (is->is_type == IST_NONE) {
    711 			*irq_p = irq;
    712 			return 0;
    713 		}
    714 		/* Level interrupts can be shared */
    715 		if (type == IST_LEVEL && is->is_type == IST_LEVEL) {
    716 			struct intrhand *ih = is->is_hand;
    717 			int depth;
    718 
    719 			if (maybe_irq == -1) {
    720 				maybe_irq = irq;
    721 				continue;
    722 			}
    723 			for (depth = 0; ih != NULL; ih = ih->ih_next)
    724 				depth++;
    725 			if (depth < shared_depth) {
    726 				maybe_irq = irq;
    727 				shared_depth = depth;
    728 			}
    729 		}
    730 	}
    731 	if (maybe_irq != -1) {
    732 		*irq_p = maybe_irq;
    733 		return 0;
    734 	}
    735 	return 1;
    736 }
    737 #endif
    738 
    739 static struct intr_source *
    740 intr_get_source(const char *intrid)
    741 {
    742 	struct intr_source *is;
    743 	int irq;
    744 
    745 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
    746 		if (strcmp(intrid, is->is_source) == 0)
    747 			return is;
    748 	}
    749 	return NULL;
    750 }
    751 
    752 static struct intrhand *
    753 intr_get_handler(const char *intrid)
    754 {
    755 	struct intr_source *is;
    756 
    757 	is = intr_get_source(intrid);
    758 	if (is != NULL)
    759 		return is->is_hand;
    760 	return NULL;
    761 }
    762 
    763 uint64_t
    764 interrupt_get_count(const char *intrid, u_int cpu_idx)
    765 {
    766 	struct intr_source *is;
    767 
    768 	/* XXX interrupt is always generated by CPU 0 */
    769 	if (cpu_idx != 0)
    770 		return 0;
    771 
    772 	is = intr_get_source(intrid);
    773 	if (is != NULL)
    774 		return is->is_ev.ev_count;
    775 	return 0;
    776 }
    777 
    778 void
    779 interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset)
    780 {
    781 	struct intr_source *is;
    782 
    783 	kcpuset_zero(cpuset);
    784 
    785 	is = intr_get_source(intrid);
    786 	if (is != NULL)
    787 		kcpuset_set(cpuset, 0);	/* XXX */
    788 }
    789 
    790 void
    791 interrupt_get_available(kcpuset_t *cpuset)
    792 {
    793 	CPU_INFO_ITERATOR cii;
    794 	struct cpu_info *ci;
    795 
    796 	kcpuset_zero(cpuset);
    797 
    798 	mutex_enter(&cpu_lock);
    799 	for (CPU_INFO_FOREACH(cii, ci)) {
    800 		if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0)
    801 			kcpuset_set(cpuset, cpu_index(ci));
    802 	}
    803 	mutex_exit(&cpu_lock);
    804 }
    805 
    806 void
    807 interrupt_get_devname(const char *intrid, char *buf, size_t len)
    808 {
    809 	struct intrhand *ih;
    810 
    811 	if (len == 0)
    812 		return;
    813 
    814 	buf[0] = '\0';
    815 
    816 	for (ih = intr_get_handler(intrid); ih != NULL; ih = ih->ih_next) {
    817 		if (buf[0] != '\0')
    818 			strlcat(buf, ", ", len);
    819 		strlcat(buf, ih->ih_xname, len);
    820 	}
    821 }
    822 
    823 struct intrids_handler *
    824 interrupt_construct_intrids(const kcpuset_t *cpuset)
    825 {
    826 	struct intr_source *is;
    827 	struct intrids_handler *ii_handler;
    828 	intrid_t *ids;
    829 	int i, irq, count;
    830 
    831 	if (kcpuset_iszero(cpuset))
    832 		return NULL;
    833 	if (!kcpuset_isset(cpuset, 0))	/* XXX */
    834 		return NULL;
    835 
    836 	count = 0;
    837 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
    838 		if (is->is_hand != NULL)
    839 			count++;
    840 	}
    841 
    842 	ii_handler = kmem_zalloc(sizeof(int) + sizeof(intrid_t) * count,
    843 	    KM_SLEEP);
    844 	if (ii_handler == NULL)
    845 		return NULL;
    846 	ii_handler->iih_nids = count;
    847 	if (count == 0)
    848 		return ii_handler;
    849 
    850 	ids = ii_handler->iih_intrids;
    851 	i = 0;
    852 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
    853 		/* Ignore devices attached after counting "count". */
    854 		if (i >= count)
    855 			break;
    856 
    857 		if (is->is_hand == NULL)
    858 			continue;
    859 
    860 		strncpy(ids[i], is->is_source, sizeof(intrid_t));
    861 		i++;
    862 	}
    863 
    864 	return ii_handler;
    865 }
    866 
    867 void
    868 interrupt_destruct_intrids(struct intrids_handler *ii_handler)
    869 {
    870 	size_t iih_size;
    871 
    872 	if (ii_handler == NULL)
    873 		return;
    874 
    875 	iih_size = sizeof(int) + sizeof(intrid_t) * ii_handler->iih_nids;
    876 	kmem_free(ii_handler, iih_size);
    877 }
    878 
    879 int
    880 interrupt_distribute(void *ich, const kcpuset_t *newset, kcpuset_t *oldset)
    881 {
    882 	return EOPNOTSUPP;
    883 }
    884 
    885 int
    886 interrupt_distribute_handler(const char *intrid, const kcpuset_t *newset,
    887     kcpuset_t *oldset)
    888 {
    889 	return EOPNOTSUPP;
    890 }
    891 
    892 #undef REORDER_PROTECT
    893