Home | History | Annotate | Line # | Download | only in pic
intr.c revision 1.14
      1 /*	$NetBSD: intr.c,v 1.14 2011/06/17 05:15:23 matt Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2007 Michael Lorenz
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     26  * POSSIBILITY OF SUCH DAMAGE.
     27  */
     28 
     29 #include <sys/cdefs.h>
     30 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.14 2011/06/17 05:15:23 matt Exp $");
     31 
     32 #include "opt_multiprocessor.h"
     33 
     34 #define __INTR_PRIVATE
     35 
     36 #include <sys/param.h>
     37 #include <sys/malloc.h>
     38 #include <sys/kernel.h>
     39 #include <sys/cpu.h>
     40 
     41 #include <arch/powerpc/pic/picvar.h>
     42 #include "opt_pic.h"
     43 #include "opt_interrupt.h"
     44 #if defined(PIC_I8259) || defined (PIC_PREPIVR)
     45 #include <machine/isa_machdep.h>
     46 #endif
     47 
     48 #ifdef MULTIPROCESSOR
     49 #include <arch/powerpc/pic/ipivar.h>
     50 #endif
     51 
     52 #ifdef __HAVE_FAST_SOFTINTS
     53 #include <powerpc/softint.h>
     54 #endif
     55 
     56 #define MAX_PICS	8	/* 8 PICs ought to be enough for everyone */
     57 
     58 #define	LEGAL_VIRQ(x)	((x) >= 0 && (x) < NVIRQ)
     59 
     60 struct pic_ops *pics[MAX_PICS];
     61 int num_pics = 0;
     62 int max_base = 0;
     63 uint8_t	virq[NIRQ];
     64 int	virq_max = 0;
     65 imask_t	imask[NIPL];
     66 int	primary_pic = 0;
     67 
     68 static int	fakeintr(void *);
     69 static int	mapirq(uint32_t);
     70 static void	intr_calculatemasks(void);
     71 static struct pic_ops *find_pic_by_irq(int);
     72 
     73 static struct intr_source intrsources[NVIRQ];
     74 
     75 void
     76 pic_init(void)
     77 {
     78 	for (u_int i = 0; i < NIRQ; i++)
     79 		virq[i] = 0;
     80 	memset(intrsources, 0, sizeof(intrsources));
     81 }
     82 
     83 int
     84 pic_add(struct pic_ops *pic)
     85 {
     86 
     87 	if (num_pics >= MAX_PICS)
     88 		return -1;
     89 
     90 	pics[num_pics] = pic;
     91 	pic->pic_intrbase = max_base;
     92 	max_base += pic->pic_numintrs;
     93 	num_pics++;
     94 
     95 	return pic->pic_intrbase;
     96 }
     97 
     98 void
     99 pic_finish_setup(void)
    100 {
    101 	struct pic_ops *pic;
    102 	int i;
    103 
    104 	for (i = 0; i < num_pics; i++) {
    105 		pic = pics[i];
    106 		if (pic->pic_finish_setup != NULL)
    107 			pic->pic_finish_setup(pic);
    108 	}
    109 }
    110 
    111 static struct pic_ops *
    112 find_pic_by_irq(int irq)
    113 {
    114 	for (u_int base = 0; base < num_pics; base++) {
    115 		struct pic_ops * const pic = pics[base];
    116 		if (pic->pic_intrbase <= irq
    117 		    && irq < pic->pic_intrbase + pic->pic_numintrs) {
    118 			return pic;
    119 		}
    120 	}
    121 	return NULL;
    122 }
    123 
    124 static int
    125 fakeintr(void *arg)
    126 {
    127 
    128 	return 0;
    129 }
    130 
    131 /*
    132  * Register an interrupt handler.
    133  */
    134 void *
    135 intr_establish(int hwirq, int type, int ipl, int (*ih_fun)(void *),
    136     void *ih_arg)
    137 {
    138 	struct intrhand **p, *q, *ih;
    139 	struct intr_source *is;
    140 	struct pic_ops *pic;
    141 	static struct intrhand fakehand;
    142 	int irq, maxipl = ipl;
    143 
    144 	if (maxipl == IPL_NONE)
    145 		maxipl = IPL_HIGH;
    146 
    147 	if (hwirq >= max_base) {
    148 
    149 		panic("%s: bogus IRQ %d, max is %d", __func__, hwirq,
    150 		    max_base - 1);
    151 	}
    152 
    153 	pic = find_pic_by_irq(hwirq);
    154 	if (pic == NULL) {
    155 
    156 		panic("%s: cannot find a pic for IRQ %d", __func__, hwirq);
    157 	}
    158 
    159 	irq = mapirq(hwirq);
    160 
    161 	/* no point in sleeping unless someone can free memory. */
    162 	ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK);
    163 	if (ih == NULL)
    164 		panic("intr_establish: can't malloc handler info");
    165 
    166 	if (!LEGAL_VIRQ(irq) || type == IST_NONE)
    167 		panic("intr_establish: bogus irq (%d) or type (%d)", irq, type);
    168 
    169 	is = &intrsources[irq];
    170 
    171 	switch (is->is_type) {
    172 	case IST_NONE:
    173 		is->is_type = type;
    174 		break;
    175 	case IST_EDGE:
    176 	case IST_LEVEL:
    177 		if (type == is->is_type)
    178 			break;
    179 	case IST_PULSE:
    180 		if (type != IST_NONE)
    181 			panic("intr_establish: can't share %s with %s",
    182 			    intr_typename(is->is_type),
    183 			    intr_typename(type));
    184 		break;
    185 	}
    186 	if (is->is_hand == NULL) {
    187 		snprintf(is->is_source, sizeof(is->is_source), "irq %d",
    188 		    is->is_hwirq);
    189 		evcnt_attach_dynamic(&is->is_ev, EVCNT_TYPE_INTR, NULL,
    190 		    pic->pic_name, is->is_source);
    191 	}
    192 
    193 	/*
    194 	 * Figure out where to put the handler.
    195 	 * This is O(N^2), but we want to preserve the order, and N is
    196 	 * generally small.
    197 	 */
    198 	for (p = &is->is_hand; (q = *p) != NULL; p = &q->ih_next) {
    199 		maxipl = max(maxipl, q->ih_ipl);
    200 	}
    201 
    202 	/*
    203 	 * Actually install a fake handler momentarily, since we might be doing
    204 	 * this with interrupts enabled and don't want the real routine called
    205 	 * until masking is set up.
    206 	 */
    207 	fakehand.ih_ipl = ipl;
    208 	fakehand.ih_fun = fakeintr;
    209 	*p = &fakehand;
    210 
    211 	/*
    212 	 * Poke the real handler in now.
    213 	 */
    214 	ih->ih_fun = ih_fun;
    215 	ih->ih_arg = ih_arg;
    216 	ih->ih_next = NULL;
    217 	ih->ih_ipl = ipl;
    218 	ih->ih_irq = irq;
    219 	*p = ih;
    220 
    221 	if (pic->pic_establish_irq != NULL)
    222 		pic->pic_establish_irq(pic, hwirq - pic->pic_intrbase,
    223 		    is->is_type, maxipl);
    224 
    225 	/*
    226 	 * Remember the highest IPL used by this handler.
    227 	 */
    228 	is->is_ipl = maxipl;
    229 
    230 	/*
    231 	 * now that the handler is established we're actually ready to
    232 	 * calculate the masks
    233 	 */
    234 	intr_calculatemasks();
    235 
    236 
    237 	return ih;
    238 }
    239 
    240 void
    241 dummy_pic_establish_intr(struct pic_ops *pic, int irq, int type, int pri)
    242 {
    243 }
    244 
    245 /*
    246  * Deregister an interrupt handler.
    247  */
    248 void
    249 intr_disestablish(void *arg)
    250 {
    251 	struct intrhand * const ih = arg;
    252 	const int irq = ih->ih_irq;
    253 	struct intr_source * const is = &intrsources[irq];
    254 	struct intrhand **p, **q;
    255 	int maxipl = IPL_NONE;
    256 
    257 	if (!LEGAL_VIRQ(irq))
    258 		panic("intr_disestablish: bogus irq %d", irq);
    259 
    260 	/*
    261 	 * Remove the handler from the chain.
    262 	 * This is O(n^2), too.
    263 	 */
    264 	for (p = &is->is_hand, q = NULL; (*p) != NULL; p = &(*p)->ih_next) {
    265 		struct intrhand * const tmp_ih = *p;
    266 		if (tmp_ih == ih) {
    267 			q = p;
    268 		} else {
    269 			maxipl = max(maxipl, tmp_ih->ih_ipl);
    270 		}
    271 	}
    272 	if (q)
    273 		*q = ih->ih_next;
    274 	else
    275 		panic("intr_disestablish: handler not registered");
    276 	free((void *)ih, M_DEVBUF);
    277 
    278 	/*
    279 	 * Reset the IPL for this source now that we've removed a handler.
    280 	 */
    281 	is->is_ipl = maxipl;
    282 
    283 	intr_calculatemasks();
    284 
    285 	if (is->is_hand == NULL) {
    286 		is->is_type = IST_NONE;
    287 		evcnt_detach(&is->is_ev);
    288 	}
    289 }
    290 
    291 /*
    292  * Map max_base irqs into 32 (bits).
    293  */
    294 static int
    295 mapirq(uint32_t irq)
    296 {
    297 	struct pic_ops *pic;
    298 	int v;
    299 
    300 	if (irq >= max_base)
    301 		panic("invalid irq %d", irq);
    302 
    303 	if ((pic = find_pic_by_irq(irq)) == NULL)
    304 		panic("%s: cannot find PIC for IRQ %d", __func__, irq);
    305 
    306 	if (virq[irq])
    307 		return virq[irq];
    308 
    309 	virq_max++;
    310 	v = virq_max;
    311 	if (v > HWIRQ_MAX)
    312 		panic("virq overflow");
    313 
    314 	intrsources[v].is_hwirq = irq;
    315 	intrsources[v].is_pic = pic;
    316 	virq[irq] = v;
    317 #ifdef PIC_DEBUG
    318 	printf("mapping irq %d to virq %d\n", irq, v);
    319 #endif
    320 	return v;
    321 }
    322 
    323 static const char * const intr_typenames[] = {
    324    [IST_NONE]  = "none",
    325    [IST_PULSE] = "pulsed",
    326    [IST_EDGE]  = "edge-triggered",
    327    [IST_LEVEL] = "level-triggered",
    328 };
    329 
    330 const char *
    331 intr_typename(int type)
    332 {
    333 	KASSERT((unsigned int) type < __arraycount(intr_typenames));
    334 	KASSERT(intr_typenames[type] != NULL);
    335 	return intr_typenames[type];
    336 }
    337 
    338 /*
    339  * Recalculate the interrupt masks from scratch.
    340  * We could code special registry and deregistry versions of this function that
    341  * would be faster, but the code would be nastier, and we don't expect this to
    342  * happen very much anyway.
    343  */
    344 static void
    345 intr_calculatemasks(void)
    346 {
    347 	imask_t newmask[NIPL] = { [IPL_NONE...IPL_HIGH] = 0 };
    348 	struct intr_source *is;
    349 	int irq;
    350 
    351 	for (u_int ipl = IPL_NONE; ipl < NIPL; ipl++) {
    352 		newmask[ipl] = 0;
    353 	}
    354 
    355 	/* First, figure out which ipl each IRQ uses. */
    356 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
    357 		newmask[is->is_ipl] |= 1ULL << irq;
    358 	}
    359 
    360 	/*
    361 	 * IPL_NONE is used for hardware interrupts that are never blocked,
    362 	 * and do not block anything else.
    363 	 */
    364 	newmask[IPL_NONE] = 0;
    365 
    366 	/*
    367 	 * strict hierarchy - all IPLs block everything blocked by any lower
    368 	 * IPL
    369 	 */
    370 	for (u_int ipl = 1; ipl < NIPL; ipl++) {
    371 		newmask[ipl] |= newmask[ipl - 1];
    372 	}
    373 
    374 #ifdef DEBUG_IPL
    375 	for (u_int ipl = 0; ipl < NIPL; ipl++) {
    376 		printf("%u: %08x -> %08x\n", ipl, imask[ipl], newmask[ipl]);
    377 	}
    378 #endif
    379 
    380 	/*
    381 	 * Disable all interrupts.
    382 	 */
    383 	for (u_int base = 0; base < num_pics; base++) {
    384 		struct pic_ops * const pic = pics[base];
    385 		for (u_int i = 0; i < pic->pic_numintrs; i++) {
    386 			pic->pic_disable_irq(pic, i);
    387 		}
    388 	}
    389 
    390 	/*
    391 	 * Now that all interrupts are disabled, update the ipl masks.
    392 	 */
    393 	for (u_int ipl = 0; ipl < NIPL; ipl++) {
    394 		imask[ipl] = newmask[ipl];
    395 	}
    396 
    397 	/*
    398 	 * Lastly, enable IRQs actually in use.
    399 	 */
    400 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
    401 		if (is->is_hand)
    402 			pic_enable_irq(is->is_hwirq);
    403 	}
    404 }
    405 
    406 void
    407 pic_enable_irq(int num)
    408 {
    409 	struct pic_ops *current;
    410 	int type;
    411 
    412 	current = find_pic_by_irq(num);
    413 	if (current == NULL)
    414 		panic("%s: bogus IRQ %d", __func__, num);
    415 	type = intrsources[virq[num]].is_type;
    416 	current->pic_enable_irq(current, num - current->pic_intrbase, type);
    417 }
    418 
    419 void
    420 pic_mark_pending(int irq)
    421 {
    422 	struct cpu_info * const ci = curcpu();
    423 	int v, msr;
    424 
    425 	v = virq[irq];
    426 	if (v == 0)
    427 		printf("IRQ %d maps to 0\n", irq);
    428 
    429 	msr = mfmsr();
    430 	mtmsr(msr & ~PSL_EE);
    431 	ci->ci_ipending |= 1ULL << v;
    432 	mtmsr(msr);
    433 }
    434 
    435 void
    436 pic_do_pending_int(void)
    437 {
    438 	struct cpu_info * const ci = curcpu();
    439 	struct intr_source *is;
    440 	struct intrhand *ih;
    441 	struct pic_ops *pic;
    442 	int irq;
    443 	int pcpl;
    444 	imask_t hwpend;
    445 	int emsr, dmsr;
    446 
    447 	if (ci->ci_iactive)
    448 		return;
    449 
    450 	ci->ci_iactive = 1;
    451 	emsr = mfmsr();
    452 	KASSERT(emsr & PSL_EE);
    453 	dmsr = emsr & ~PSL_EE;
    454 	mtmsr(dmsr);
    455 
    456 	pcpl = ci->ci_cpl;
    457 #ifdef __HAVE_FAST_SOFTINTS
    458 #if 0
    459 again:
    460 #endif
    461 #endif
    462 
    463 	/* Do now unmasked pendings */
    464 	while ((hwpend = (ci->ci_ipending & ~imask[pcpl] & HWIRQ_MASK)) != 0) {
    465 		ci->ci_idepth++;
    466 		/* Get most significant pending bit */
    467 		irq = MS_PENDING(hwpend);
    468 		KASSERT(irq <= virq_max);
    469 		ci->ci_ipending &= ~(1ULL << irq);
    470 		if (irq == 0) {
    471 			printf("VIRQ0");
    472 			continue;
    473 		}
    474 		is = &intrsources[irq];
    475 		pic = is->is_pic;
    476 
    477 		splraise(is->is_ipl);
    478 		mtmsr(emsr);
    479 		ih = is->is_hand;
    480 		while (ih) {
    481 #ifdef DIAGNOSTIC
    482 			if (!ih->ih_fun) {
    483 				printf("NULL interrupt handler!\n");
    484 				panic("irq %02d, hwirq %02d, is %p\n",
    485 					irq, is->is_hwirq, is);
    486 			}
    487 #endif
    488 			if (ih->ih_ipl == IPL_VM) {
    489 				KERNEL_LOCK(1, NULL);
    490 			}
    491 			(*ih->ih_fun)(ih->ih_arg);
    492 			if (ih->ih_ipl == IPL_VM) {
    493 				KERNEL_UNLOCK_ONE(NULL);
    494 			}
    495 			ih = ih->ih_next;
    496 		}
    497 		mtmsr(dmsr);
    498 		ci->ci_cpl = pcpl;
    499 
    500 		is->is_ev.ev_count++;
    501 		pic->pic_reenable_irq(pic, is->is_hwirq - pic->pic_intrbase,
    502 		    is->is_type);
    503 		ci->ci_idepth--;
    504 	}
    505 
    506 #ifdef __HAVE_FAST_SOFTINTS
    507 #if 0
    508 	if ((ci->ci_ipending & ~pcpl) & (1ULL << SIR_SERIAL)) {
    509 		ci->ci_ipending &= ~(1ULL << SIR_SERIAL);
    510 		splsoftserial();
    511 		mtmsr(emsr);
    512 		softintr__run(IPL_SOFTSERIAL);
    513 		mtmsr(dmsr);
    514 		ci->ci_cpl = pcpl;
    515 		ci->ci_ev_softserial.ev_count++;
    516 		goto again;
    517 	}
    518 	if ((ci->ci_ipending & ~pcpl) & (1ULL << SIR_NET)) {
    519 		ci->ci_ipending &= ~(1ULL << SIR_NET);
    520 		splsoftnet();
    521 		mtmsr(emsr);
    522 		softintr__run(IPL_SOFTNET);
    523 		mtmsr(dmsr);
    524 		ci->ci_cpl = pcpl;
    525 		ci->ci_ev_softnet.ev_count++;
    526 		goto again;
    527 	}
    528 	if ((ci->ci_ipending & ~pcpl) & (1ULL << SIR_CLOCK)) {
    529 		ci->ci_ipending &= ~(1ULL << SIR_CLOCK);
    530 		splsoftclock();
    531 		mtmsr(emsr);
    532 		softintr__run(IPL_SOFTCLOCK);
    533 		mtmsr(dmsr);
    534 		ci->ci_cpl = pcpl;
    535 		ci->ci_ev_softclock.ev_count++;
    536 		goto again;
    537 	}
    538 #else
    539 	const u_int softints = (ci->ci_data.cpu_softints << pcpl) & IPL_SOFTMASK;
    540 
    541 	if (__predict_false(softints != 0)) {
    542 		splhigh();
    543 		powerpc_softint(ci, pcpl,
    544 		    (vaddr_t)__builtin_return_address(0));
    545 		ci->ci_cpl = pcpl;
    546 	}
    547 #endif
    548 #endif
    549 
    550 	ci->ci_cpl = pcpl;	/* Don't use splx... we are here already! */
    551 	ci->ci_iactive = 0;
    552 	mtmsr(emsr);
    553 }
    554 
    555 int
    556 pic_handle_intr(void *cookie)
    557 {
    558 	struct pic_ops *pic = cookie;
    559 	struct cpu_info *ci = curcpu();
    560 	struct intr_source *is;
    561 	struct intrhand *ih;
    562 	int irq, realirq;
    563 	int pcpl, msr, bail;
    564 	imask_t r_imen;
    565 
    566 	realirq = pic->pic_get_irq(pic, PIC_GET_IRQ);
    567 	if (realirq == 255)
    568 		return 0;
    569 
    570 	msr = mfmsr();
    571 	pcpl = ci->ci_cpl;
    572 
    573 start:
    574 
    575 #ifdef MULTIPROCESSOR
    576 	/* THIS IS WRONG XXX */
    577 	while (realirq == ipiops.ppc_ipi_vector) {
    578 		ipi_intr(NULL);
    579 		pic->pic_ack_irq(pic, realirq);
    580 		realirq = pic->pic_get_irq(pic, PIC_GET_RECHECK);
    581 	}
    582 	if (realirq == 255) {
    583 		return 0;
    584 	}
    585 #endif
    586 
    587 	irq = virq[realirq + pic->pic_intrbase];
    588 #ifdef PIC_DEBUG
    589 	if (irq == 0) {
    590 		printf("%s: %d virq 0\n", pic->pic_name, realirq);
    591 		goto boo;
    592 	}
    593 #endif /* PIC_DEBUG */
    594 	KASSERT(realirq < pic->pic_numintrs);
    595 	r_imen = 1ULL << irq;
    596 	is = &intrsources[irq];
    597 
    598 	if ((imask[pcpl] & r_imen) != 0) {
    599 
    600 		ci->ci_ipending |= r_imen; /* Masked! Mark this as pending */
    601 		pic->pic_disable_irq(pic, realirq);
    602 	} else {
    603 
    604 		/* this interrupt is no longer pending */
    605 		ci->ci_ipending &= ~r_imen;
    606 		ci->ci_idepth++;
    607 
    608 		splraise(is->is_ipl);
    609 		mtmsr(msr | PSL_EE);
    610 		ih = is->is_hand;
    611 		bail = 0;
    612 		while ((ih != NULL) && (bail < 10)) {
    613 			if (ih->ih_fun == NULL)
    614 				panic("bogus handler for IRQ %s %d",
    615 				    pic->pic_name, realirq);
    616 			if (ih->ih_ipl == IPL_VM) {
    617 				KERNEL_LOCK(1, NULL);
    618 			}
    619 			(*ih->ih_fun)(ih->ih_arg);
    620 			if (ih->ih_ipl == IPL_VM) {
    621 				KERNEL_UNLOCK_ONE(NULL);
    622 			}
    623 			ih = ih->ih_next;
    624 			bail++;
    625 		}
    626 		mtmsr(msr);
    627 		ci->ci_cpl = pcpl;
    628 
    629 		ci->ci_data.cpu_nintr++;
    630 		is->is_ev.ev_count++;
    631 		ci->ci_idepth--;
    632 	}
    633 #ifdef PIC_DEBUG
    634 boo:
    635 #endif /* PIC_DEBUG */
    636 	pic->pic_ack_irq(pic, realirq);
    637 	realirq = pic->pic_get_irq(pic, PIC_GET_RECHECK);
    638 	if (realirq != 255)
    639 		goto start;
    640 
    641 	mtmsr(msr | PSL_EE);
    642 	splx(pcpl);	/* Process pendings. */
    643 	mtmsr(msr);
    644 
    645 	return 0;
    646 }
    647 
    648 void
    649 pic_ext_intr(void)
    650 {
    651 
    652 	KASSERT(pics[primary_pic] != NULL);
    653 	pic_handle_intr(pics[primary_pic]);
    654 
    655 	return;
    656 
    657 }
    658 
    659 int
    660 splraise(int ncpl)
    661 {
    662 	struct cpu_info *ci = curcpu();
    663 	int ocpl;
    664 
    665 	if (ncpl == ci->ci_cpl) return ncpl;
    666 	__asm volatile("sync; eieio");	/* don't reorder.... */
    667 	ocpl = ci->ci_cpl;
    668 	KASSERT(ncpl < NIPL);
    669 	ci->ci_cpl = max(ncpl, ocpl);
    670 	__asm volatile("sync; eieio");	/* reorder protect */
    671 	__insn_barrier();
    672 	return ocpl;
    673 }
    674 
    675 static inline bool
    676 have_pending_intr_p(struct cpu_info *ci, int ncpl)
    677 {
    678 	if (ci->ci_ipending & ~imask[ncpl])
    679 		return true;
    680 #ifdef __HAVE_FAST_SOFTINTS
    681 	if ((ci->ci_data.cpu_softints << ncpl) & IPL_SOFTMASK)
    682 		return true;
    683 #endif
    684 	return false;
    685 }
    686 
    687 void
    688 splx(int ncpl)
    689 {
    690 	struct cpu_info *ci = curcpu();
    691 
    692 	__insn_barrier();
    693 	__asm volatile("sync; eieio");	/* reorder protect */
    694 	ci->ci_cpl = ncpl;
    695 	if (have_pending_intr_p(ci, ncpl))
    696 		pic_do_pending_int();
    697 
    698 	__asm volatile("sync; eieio");	/* reorder protect */
    699 }
    700 
    701 int
    702 spllower(int ncpl)
    703 {
    704 	struct cpu_info *ci = curcpu();
    705 	int ocpl;
    706 
    707 	__insn_barrier();
    708 	__asm volatile("sync; eieio");	/* reorder protect */
    709 	ocpl = ci->ci_cpl;
    710 	ci->ci_cpl = ncpl;
    711 	if (have_pending_intr_p(ci, ncpl))
    712 		pic_do_pending_int();
    713 	__asm volatile("sync; eieio");	/* reorder protect */
    714 	return ocpl;
    715 }
    716 
    717 void
    718 genppc_cpu_configure(void)
    719 {
    720 	aprint_normal("biomask %x netmask %x ttymask %x\n",
    721 	    (u_int)imask[IPL_BIO] & 0x1fffffff,
    722 	    (u_int)imask[IPL_NET] & 0x1fffffff,
    723 	    (u_int)imask[IPL_TTY] & 0x1fffffff);
    724 
    725 	spl0();
    726 }
    727 
    728 #if defined(PIC_PREPIVR) || defined(PIC_I8259)
    729 /*
    730  * isa_intr_alloc needs to be done here, because it needs direct access to
    731  * the various interrupt handler structures.
    732  */
    733 
    734 int
    735 genppc_isa_intr_alloc(isa_chipset_tag_t ic, struct pic_ops *pic,
    736     int mask, int type, int *irq_p)
    737 {
    738 	int irq, vi;
    739 	int maybe_irq = -1;
    740 	int shared_depth = 0;
    741 	struct intr_source *is;
    742 
    743 	if (pic == NULL)
    744 		return 1;
    745 
    746 	for (irq = 0; (mask != 0 && irq < pic->pic_numintrs);
    747 	     mask >>= 1, irq++) {
    748 		if ((mask & 1) == 0)
    749 			continue;
    750 		vi = virq[irq + pic->pic_intrbase];
    751 		if (!vi) {
    752 			*irq_p = irq;
    753 			return 0;
    754 		}
    755 		is = &intrsources[vi];
    756 		if (is->is_type == IST_NONE) {
    757 			*irq_p = irq;
    758 			return 0;
    759 		}
    760 		/* Level interrupts can be shared */
    761 		if (type == IST_LEVEL && is->is_type == IST_LEVEL) {
    762 			struct intrhand *ih = is->is_hand;
    763 			int depth;
    764 
    765 			if (maybe_irq == -1) {
    766 				maybe_irq = irq;
    767 				continue;
    768 			}
    769 			for (depth = 0; ih != NULL; ih = ih->ih_next)
    770 				depth++;
    771 			if (depth < shared_depth) {
    772 				maybe_irq = irq;
    773 				shared_depth = depth;
    774 			}
    775 		}
    776 	}
    777 	if (maybe_irq != -1) {
    778 		*irq_p = maybe_irq;
    779 		return 0;
    780 	}
    781 	return 1;
    782 }
    783 #endif
    784