Home | History | Annotate | Line # | Download | only in pic
intr.c revision 1.3
      1 /*	$NetBSD: intr.c,v 1.3 2007/12/03 15:34:14 ad Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2007 Michael Lorenz
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of The NetBSD Foundation nor the names of its
     16  *    contributors may be used to endorse or promote products derived
     17  *    from this software without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.3 2007/12/03 15:34:14 ad Exp $");
     34 
     35 #include "opt_multiprocessor.h"
     36 
     37 #include <sys/param.h>
     38 #include <sys/malloc.h>
     39 #include <sys/kernel.h>
     40 #include <sys/cpu.h>
     41 
     42 #include <uvm/uvm_extern.h>
     43 
     44 #include <arch/powerpc/pic/picvar.h>
     45 #include "opt_pic.h"
     46 #include "opt_interrupt.h"
     47 #if defined(PIC_I8259) || defined (PIC_PREPIVR)
     48 #include <machine/isa_machdep.h>
     49 #endif
     50 
     51 #ifdef MULTIPROCESSOR
     52 #include <arch/powerpc/pic/ipivar.h>
     53 #endif
     54 
     55 #define MAX_PICS	8	/* 8 PICs ought to be enough for everyone */
     56 
     57 #define NVIRQ		32	/* 32 virtual IRQs */
     58 #define NIRQ		128	/* up to 128 HW IRQs */
     59 
     60 #define HWIRQ_MAX	(NVIRQ - 4 - 1)
     61 #define HWIRQ_MASK	0x0fffffff
     62 #define	LEGAL_VIRQ(x)	((x) >= 0 && (x) < NVIRQ)
     63 
     64 struct pic_ops *pics[MAX_PICS];
     65 int num_pics = 0;
     66 int max_base = 0;
     67 uint8_t	virq[NIRQ];
     68 int	virq_max = 0;
     69 int	imask[NIPL];
     70 int	primary_pic = 0;
     71 
     72 static int	fakeintr(void *);
     73 static int	mapirq(uint32_t);
     74 static void	intr_calculatemasks(void);
     75 static struct pic_ops *find_pic_by_irq(int);
     76 
     77 static struct intr_source intrsources[NVIRQ];
     78 
     79 void
     80 pic_init(void)
     81 {
     82 	int i;
     83 
     84 	for (i = 0; i < NIRQ; i++)
     85 		virq[i] = 0;
     86 	memset(intrsources, 0, sizeof(intrsources));
     87 }
     88 
     89 int
     90 pic_add(struct pic_ops *pic)
     91 {
     92 
     93 	if (num_pics >= MAX_PICS)
     94 		return -1;
     95 
     96 	pics[num_pics] = pic;
     97 	pic->pic_intrbase = max_base;
     98 	max_base += pic->pic_numintrs;
     99 	num_pics++;
    100 
    101 	return pic->pic_intrbase;
    102 }
    103 
    104 void
    105 pic_finish_setup(void)
    106 {
    107 	struct pic_ops *pic;
    108 	int i;
    109 
    110 	for (i = 0; i < num_pics; i++) {
    111 		pic = pics[i];
    112 		if (pic->pic_finish_setup != NULL)
    113 			pic->pic_finish_setup(pic);
    114 	}
    115 }
    116 
    117 static struct pic_ops *
    118 find_pic_by_irq(int irq)
    119 {
    120 	struct pic_ops *current;
    121 	int base = 0;
    122 
    123 	while (base < num_pics) {
    124 
    125 		current = pics[base];
    126 		if ((irq >= current->pic_intrbase) &&
    127 		    (irq < (current->pic_intrbase + current->pic_numintrs))) {
    128 
    129 			return current;
    130 		}
    131 		base++;
    132 	}
    133 	return NULL;
    134 }
    135 
    136 static int
    137 fakeintr(void *arg)
    138 {
    139 
    140 	return 0;
    141 }
    142 
    143 /*
    144  * Register an interrupt handler.
    145  */
    146 void *
    147 intr_establish(int hwirq, int type, int level, int (*ih_fun)(void *),
    148     void *ih_arg)
    149 {
    150 	struct intrhand **p, *q, *ih;
    151 	struct intr_source *is;
    152 	struct pic_ops *pic;
    153 	static struct intrhand fakehand;
    154 	int irq, maxlevel = level;
    155 
    156 	if (maxlevel == IPL_NONE)
    157 		maxlevel = IPL_HIGH;
    158 
    159 	if (hwirq >= max_base) {
    160 
    161 		panic("%s: bogus IRQ %d, max is %d", __func__, hwirq,
    162 		    max_base - 1);
    163 	}
    164 
    165 	pic = find_pic_by_irq(hwirq);
    166 	if (pic == NULL) {
    167 
    168 		panic("%s: cannot find a pic for IRQ %d", __func__, hwirq);
    169 	}
    170 
    171 	irq = mapirq(hwirq);
    172 
    173 	/* no point in sleeping unless someone can free memory. */
    174 	ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK);
    175 	if (ih == NULL)
    176 		panic("intr_establish: can't malloc handler info");
    177 
    178 	if (!LEGAL_VIRQ(irq) || type == IST_NONE)
    179 		panic("intr_establish: bogus irq (%d) or type (%d)", irq, type);
    180 
    181 	is = &intrsources[irq];
    182 
    183 	switch (is->is_type) {
    184 	case IST_NONE:
    185 		is->is_type = type;
    186 		break;
    187 	case IST_EDGE:
    188 	case IST_LEVEL:
    189 		if (type == is->is_type)
    190 			break;
    191 	case IST_PULSE:
    192 		if (type != IST_NONE)
    193 			panic("intr_establish: can't share %s with %s",
    194 			    intr_typename(is->is_type),
    195 			    intr_typename(type));
    196 		break;
    197 	}
    198 	if (is->is_hand == NULL) {
    199 		snprintf(is->is_source, sizeof(is->is_source), "irq %d",
    200 		    is->is_hwirq);
    201 		evcnt_attach_dynamic(&is->is_ev, EVCNT_TYPE_INTR, NULL,
    202 		    pic->pic_name, is->is_source);
    203 	}
    204 
    205 	/*
    206 	 * Figure out where to put the handler.
    207 	 * This is O(N^2), but we want to preserve the order, and N is
    208 	 * generally small.
    209 	 */
    210 	for (p = &is->is_hand; (q = *p) != NULL; p = &q->ih_next) {
    211 
    212 		maxlevel = max(maxlevel, q->ih_level);
    213 	}
    214 
    215 	/*
    216 	 * Actually install a fake handler momentarily, since we might be doing
    217 	 * this with interrupts enabled and don't want the real routine called
    218 	 * until masking is set up.
    219 	 */
    220 	fakehand.ih_level = level;
    221 	fakehand.ih_fun = fakeintr;
    222 	*p = &fakehand;
    223 
    224 	/*
    225 	 * Poke the real handler in now.
    226 	 */
    227 	ih->ih_fun = ih_fun;
    228 	ih->ih_arg = ih_arg;
    229 	ih->ih_next = NULL;
    230 	ih->ih_level = level;
    231 	ih->ih_irq = irq;
    232 	*p = ih;
    233 
    234 	if (pic->pic_establish_irq != NULL)
    235 		pic->pic_establish_irq(pic, hwirq - pic->pic_intrbase,
    236 		    is->is_type, maxlevel);
    237 
    238 	/*
    239 	 * now that the handler is established we're actually ready to
    240 	 * calculate the masks
    241 	 */
    242 	intr_calculatemasks();
    243 
    244 
    245 	return ih;
    246 }
    247 
    248 void
    249 dummy_pic_establish_intr(struct pic_ops *pic, int irq, int type, int pri)
    250 {
    251 }
    252 
    253 /*
    254  * Deregister an interrupt handler.
    255  */
    256 void
    257 intr_disestablish(void *arg)
    258 {
    259 	struct intrhand *ih = arg;
    260 	int irq = ih->ih_irq;
    261 	struct intr_source *is = &intrsources[irq];
    262 	struct intrhand **p, *q;
    263 
    264 	if (!LEGAL_VIRQ(irq))
    265 		panic("intr_disestablish: bogus irq %d", irq);
    266 
    267 	/*
    268 	 * Remove the handler from the chain.
    269 	 * This is O(n^2), too.
    270 	 */
    271 	for (p = &is->is_hand; (q = *p) != NULL && q != ih; p = &q->ih_next)
    272 		;
    273 	if (q)
    274 		*p = q->ih_next;
    275 	else
    276 		panic("intr_disestablish: handler not registered");
    277 	free((void *)ih, M_DEVBUF);
    278 
    279 	intr_calculatemasks();
    280 
    281 	if (is->is_hand == NULL) {
    282 		is->is_type = IST_NONE;
    283 		evcnt_detach(&is->is_ev);
    284 	}
    285 }
    286 
    287 /*
    288  * Map max_base irqs into 32 (bits).
    289  */
    290 static int
    291 mapirq(uint32_t irq)
    292 {
    293 	struct pic_ops *pic;
    294 	int v;
    295 
    296 	if (irq >= max_base)
    297 		panic("invalid irq %d", irq);
    298 
    299 	if ((pic = find_pic_by_irq(irq)) == NULL)
    300 		panic("%s: cannot find PIC for IRQ %d", __func__, irq);
    301 
    302 	if (virq[irq])
    303 		return virq[irq];
    304 
    305 	virq_max++;
    306 	v = virq_max;
    307 	if (v > HWIRQ_MAX)
    308 		panic("virq overflow");
    309 
    310 	intrsources[v].is_hwirq = irq;
    311 	intrsources[v].is_pic = pic;
    312 	virq[irq] = v;
    313 #ifdef PIC_DEBUG
    314 	printf("mapping irq %d to virq %d\n", irq, v);
    315 #endif
    316 	return v;
    317 }
    318 
    319 static const char * const intr_typenames[] = {
    320    [IST_NONE]  = "none",
    321    [IST_PULSE] = "pulsed",
    322    [IST_EDGE]  = "edge-triggered",
    323    [IST_LEVEL] = "level-triggered",
    324 };
    325 
    326 const char *
    327 intr_typename(int type)
    328 {
    329 	KASSERT((unsigned int) type < __arraycount(intr_typenames));
    330 	KASSERT(intr_typenames[type] != NULL);
    331 	return intr_typenames[type];
    332 }
    333 
    334 /*
    335  * Recalculate the interrupt masks from scratch.
    336  * We could code special registry and deregistry versions of this function that
    337  * would be faster, but the code would be nastier, and we don't expect this to
    338  * happen very much anyway.
    339  */
    340 static void
    341 intr_calculatemasks(void)
    342 {
    343 	struct intr_source *is;
    344 	struct intrhand *q;
    345 	struct pic_ops *current;
    346 	int irq, level, i, base;
    347 
    348 	/* First, figure out which levels each IRQ uses. */
    349 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
    350 		register int levels = 0;
    351 		for (q = is->is_hand; q; q = q->ih_next)
    352 			levels |= 1 << q->ih_level;
    353 		is->is_level = levels;
    354 	}
    355 
    356 	/* Then figure out which IRQs use each level. */
    357 	for (level = 0; level < NIPL; level++) {
    358 		register int irqs = 0;
    359 		for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++)
    360 			if (is->is_level & (1 << level))
    361 				irqs |= 1 << irq;
    362 		imask[level] = irqs;
    363 	}
    364 
    365 	/*
    366 	 * IPL_CLOCK should mask clock interrupt even if interrupt handler
    367 	 * is not registered.
    368 	 */
    369 	imask[IPL_CLOCK] |= 1 << SPL_CLOCK;
    370 
    371 	/*
    372 	 * Initialize soft interrupt masks to block themselves.
    373 	 */
    374 	imask[IPL_SOFTCLOCK] = 1 << SIR_CLOCK;
    375 	imask[IPL_SOFTNET] = 1 << SIR_NET;
    376 	imask[IPL_SOFTSERIAL] = 1 << SIR_SERIAL;
    377 
    378 	/*
    379 	 * IPL_NONE is used for hardware interrupts that are never blocked,
    380 	 * and do not block anything else.
    381 	 */
    382 	imask[IPL_NONE] = 0;
    383 
    384 #ifdef SLOPPY_IPLS
    385 	/*
    386 	 * Enforce a sloppy hierarchy as in spl(9)
    387 	 */
    388 	/* everything above softclock must block softclock */
    389 	for (i = IPL_SOFTCLOCK; i < NIPL; i++)
    390 		imask[i] |= imask[IPL_SOFTCLOCK];
    391 
    392 	/* everything above softnet must block softnet */
    393 	for (i = IPL_SOFTNET; i < NIPL; i++)
    394 		imask[i] |= imask[IPL_SOFTNET];
    395 
    396 	/* IPL_TTY must block softserial */
    397 	imask[IPL_TTY] |= imask[IPL_SOFTSERIAL];
    398 
    399 	/* IPL_VM must block net, block IO and tty */
    400 	imask[IPL_VM] |= (imask[IPL_NET] | imask[IPL_BIO] | imask[IPL_TTY]);
    401 
    402 	/* IPL_SERIAL must block IPL_TTY */
    403 	imask[IPL_SERIAL] |= imask[IPL_TTY];
    404 
    405 	/* IPL_HIGH must block all other priority levels */
    406 	for (i = IPL_NONE; i < IPL_HIGH; i++)
    407 		imask[IPL_HIGH] |= imask[i];
    408 #else	/* !SLOPPY_IPLS */
    409 	/*
    410 	 * strict hierarchy - all IPLs block everything blocked by any lower
    411 	 * IPL
    412 	 */
    413 	for (i = 1; i < NIPL; i++)
    414 		imask[i] |= imask[i - 1];
    415 #endif	/* !SLOPPY_IPLS */
    416 
    417 #ifdef DEBUG_IPL
    418 	for (i = 0; i < NIPL; i++) {
    419 		printf("%2d: %08x\n", i, imask[i]);
    420 	}
    421 #endif
    422 
    423 	/* And eventually calculate the complete masks. */
    424 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
    425 		register int irqs = 1 << irq;
    426 		for (q = is->is_hand; q; q = q->ih_next)
    427 			irqs |= imask[q->ih_level];
    428 		is->is_mask = irqs;
    429 	}
    430 
    431 	/* Lastly, enable IRQs actually in use. */
    432 	for (base = 0; base < num_pics; base++) {
    433 		current = pics[base];
    434 		for (i = 0; i < current->pic_numintrs; i++)
    435 			current->pic_disable_irq(current, i);
    436 	}
    437 
    438 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
    439 		if (is->is_hand)
    440 			pic_enable_irq(is->is_hwirq);
    441 	}
    442 }
    443 
    444 void
    445 pic_enable_irq(int num)
    446 {
    447 	struct pic_ops *current;
    448 	int type;
    449 
    450 	current = find_pic_by_irq(num);
    451 	if (current == NULL)
    452 		panic("%s: bogus IRQ %d", __func__, num);
    453 	type = intrsources[virq[num]].is_type;
    454 	current->pic_enable_irq(current, num - current->pic_intrbase, type);
    455 }
    456 
    457 void
    458 pic_mark_pending(int irq)
    459 {
    460 	struct cpu_info * const ci = curcpu();
    461 	int v, msr;
    462 
    463 	v = virq[irq];
    464 	if (v == 0)
    465 		printf("IRQ %d maps to 0\n", irq);
    466 
    467 	msr = mfmsr();
    468 	mtmsr(msr & ~PSL_EE);
    469 	ci->ci_ipending |= 1 << v;
    470 	mtmsr(msr);
    471 }
    472 
    473 void
    474 pic_do_pending_int(void)
    475 {
    476 	struct cpu_info * const ci = curcpu();
    477 	struct intr_source *is;
    478 	struct intrhand *ih;
    479 	struct pic_ops *pic;
    480 	int irq;
    481 	int pcpl;
    482 	int hwpend;
    483 	int emsr, dmsr;
    484 
    485 	if (ci->ci_iactive)
    486 		return;
    487 
    488 	ci->ci_iactive = 1;
    489 	emsr = mfmsr();
    490 	KASSERT(emsr & PSL_EE);
    491 	dmsr = emsr & ~PSL_EE;
    492 	mtmsr(dmsr);
    493 
    494 	pcpl = ci->ci_cpl;
    495 #ifdef __HAVE_FAST_SOFTINTS
    496 again:
    497 #endif
    498 
    499 	/* Do now unmasked pendings */
    500 	ci->ci_idepth++;
    501 	while ((hwpend = (ci->ci_ipending & ~pcpl & HWIRQ_MASK)) != 0) {
    502 		irq = 31 - cntlzw(hwpend);
    503 		KASSERT(irq <= virq_max);
    504 		ci->ci_ipending &= ~(1 << irq);
    505 		if (irq == 0) {
    506 			printf("VIRQ0");
    507 			continue;
    508 		}
    509 		is = &intrsources[irq];
    510 		pic = is->is_pic;
    511 
    512 		splraise(is->is_mask);
    513 		mtmsr(emsr);
    514 		ih = is->is_hand;
    515 		while (ih) {
    516 #ifdef DIAGNOSTIC
    517 			if (!ih->ih_fun) {
    518 				printf("NULL interrupt handler!\n");
    519 				panic("irq %02d, hwirq %02d, is %p\n",
    520 					irq, is->is_hwirq, is);
    521 			}
    522 #endif
    523 			if (ih->ih_level == IPL_VM) {
    524 				KERNEL_LOCK(1, NULL);
    525 			}
    526 			(*ih->ih_fun)(ih->ih_arg);
    527 			if (ih->ih_level == IPL_VM) {
    528 				KERNEL_UNLOCK_ONE(NULL);
    529 			}
    530 			ih = ih->ih_next;
    531 		}
    532 		mtmsr(dmsr);
    533 		ci->ci_cpl = pcpl;
    534 
    535 		is->is_ev.ev_count++;
    536 		pic->pic_reenable_irq(pic, is->is_hwirq - pic->pic_intrbase,
    537 		    is->is_type);
    538 	}
    539 	ci->ci_idepth--;
    540 
    541 #ifdef __HAVE_FAST_SOFTINTS
    542 	if ((ci->ci_ipending & ~pcpl) & (1 << SIR_SERIAL)) {
    543 		ci->ci_ipending &= ~(1 << SIR_SERIAL);
    544 		splsoftserial();
    545 		mtmsr(emsr);
    546 		softintr__run(IPL_SOFTSERIAL);
    547 		mtmsr(dmsr);
    548 		ci->ci_cpl = pcpl;
    549 		ci->ci_ev_softserial.ev_count++;
    550 		goto again;
    551 	}
    552 	if ((ci->ci_ipending & ~pcpl) & (1 << SIR_NET)) {
    553 		ci->ci_ipending &= ~(1 << SIR_NET);
    554 		splsoftnet();
    555 		mtmsr(emsr);
    556 		softintr__run(IPL_SOFTNET);
    557 		mtmsr(dmsr);
    558 		ci->ci_cpl = pcpl;
    559 		ci->ci_ev_softnet.ev_count++;
    560 		goto again;
    561 	}
    562 	if ((ci->ci_ipending & ~pcpl) & (1 << SIR_CLOCK)) {
    563 		ci->ci_ipending &= ~(1 << SIR_CLOCK);
    564 		splsoftclock();
    565 		mtmsr(emsr);
    566 		softintr__run(IPL_SOFTCLOCK);
    567 		mtmsr(dmsr);
    568 		ci->ci_cpl = pcpl;
    569 		ci->ci_ev_softclock.ev_count++;
    570 		goto again;
    571 	}
    572 #endif
    573 
    574 	ci->ci_cpl = pcpl;	/* Don't use splx... we are here already! */
    575 	ci->ci_iactive = 0;
    576 	mtmsr(emsr);
    577 }
    578 
    579 int
    580 pic_handle_intr(void *cookie)
    581 {
    582 	struct pic_ops *pic = cookie;
    583 	struct cpu_info *ci = curcpu();
    584 	struct intr_source *is;
    585 	struct intrhand *ih;
    586 	int irq, realirq;
    587 	int pcpl, msr, r_imen, bail;
    588 
    589 	realirq = pic->pic_get_irq(pic);
    590 	if (realirq == 255)
    591 		return 0;
    592 
    593 	msr = mfmsr();
    594 	pcpl = ci->ci_cpl;
    595 
    596 start:
    597 
    598 #ifdef MULTIPROCESSOR
    599 	/* THIS IS WRONG XXX */
    600 	while (realirq == ipiops.ppc_ipi_vector) {
    601 		ppcipi_intr(NULL);
    602 		pic->pic_ack_irq(pic, realirq);
    603 		realirq = pic->pic_get_irq(pic);
    604 	}
    605 	if (realirq == 255) {
    606 		return 0;
    607 	}
    608 #endif
    609 
    610 	irq = virq[realirq + pic->pic_intrbase];
    611 #ifdef PIC_DEBUG
    612 	if (irq == 0) {
    613 		printf("%s: %d virq 0\n", pic->pic_name, realirq);
    614 		goto boo;
    615 	}
    616 #endif /* PIC_DEBUG */
    617 	KASSERT(realirq < pic->pic_numintrs);
    618 	r_imen = 1 << irq;
    619 	is = &intrsources[irq];
    620 
    621 	if ((pcpl & r_imen) != 0) {
    622 
    623 		ci->ci_ipending |= r_imen; /* Masked! Mark this as pending */
    624 		pic->pic_disable_irq(pic, realirq);
    625 	} else {
    626 
    627 		/* this interrupt is no longer pending */
    628 		ci->ci_ipending &= ~r_imen;
    629 		ci->ci_idepth++;
    630 
    631 		splraise(is->is_mask);
    632 		mtmsr(msr | PSL_EE);
    633 		ih = is->is_hand;
    634 		bail = 0;
    635 		while ((ih != NULL) && (bail < 10)) {
    636 			if (ih->ih_fun == NULL)
    637 				panic("bogus handler for IRQ %s %d",
    638 				    pic->pic_name, realirq);
    639 			if (ih->ih_level == IPL_VM) {
    640 				KERNEL_LOCK(1, NULL);
    641 			}
    642 			(*ih->ih_fun)(ih->ih_arg);
    643 			if (ih->ih_level == IPL_VM) {
    644 				KERNEL_UNLOCK_ONE(NULL);
    645 			}
    646 			ih = ih->ih_next;
    647 			bail++;
    648 		}
    649 		mtmsr(msr);
    650 		ci->ci_cpl = pcpl;
    651 
    652 		uvmexp.intrs++;
    653 		is->is_ev.ev_count++;
    654 		ci->ci_idepth--;
    655 	}
    656 #ifdef PIC_DEBUG
    657 boo:
    658 #endif /* PIC_DEBUG */
    659 	pic->pic_ack_irq(pic, realirq);
    660 	realirq = pic->pic_get_irq(pic);
    661 	if (realirq != 255)
    662 		goto start;
    663 
    664 	mtmsr(msr | PSL_EE);
    665 	splx(pcpl);	/* Process pendings. */
    666 	mtmsr(msr);
    667 
    668 	return 0;
    669 }
    670 
    671 void
    672 pic_ext_intr(void)
    673 {
    674 
    675 	KASSERT(pics[primary_pic] != NULL);
    676 	pic_handle_intr(pics[primary_pic]);
    677 
    678 	return;
    679 
    680 }
    681 
    682 int
    683 splraise(int ncpl)
    684 {
    685 	struct cpu_info *ci = curcpu();
    686 	int ocpl;
    687 
    688 	__asm volatile("sync; eieio");	/* don't reorder.... */
    689 
    690 	ocpl = ci->ci_cpl;
    691 	ci->ci_cpl = ocpl | ncpl;
    692 	__asm volatile("sync; eieio");	/* reorder protect */
    693 	return ocpl;
    694 }
    695 
    696 void
    697 splx(int ncpl)
    698 {
    699 	struct cpu_info *ci = curcpu();
    700 
    701 	__asm volatile("sync; eieio");	/* reorder protect */
    702 	ci->ci_cpl = ncpl;
    703 	if (ci->ci_ipending & ~ncpl)
    704 		pic_do_pending_int();
    705 	__asm volatile("sync; eieio");	/* reorder protect */
    706 }
    707 
    708 int
    709 spllower(int ncpl)
    710 {
    711 	struct cpu_info *ci = curcpu();
    712 	int ocpl;
    713 
    714 	__asm volatile("sync; eieio");	/* reorder protect */
    715 	ocpl = ci->ci_cpl;
    716 	ci->ci_cpl = ncpl;
    717 	if (ci->ci_ipending & ~ncpl)
    718 		pic_do_pending_int();
    719 	__asm volatile("sync; eieio");	/* reorder protect */
    720 	return ocpl;
    721 }
    722 
    723 /* Following code should be implemented with lwarx/stwcx to avoid
    724  * the disable/enable. i need to read the manual once more.... */
    725 void
    726 softintr(int ipl)
    727 {
    728 	int msrsave;
    729 
    730 	msrsave = mfmsr();
    731 	mtmsr(msrsave & ~PSL_EE);
    732 	curcpu()->ci_ipending |= 1 << ipl;
    733 	mtmsr(msrsave);
    734 }
    735 
    736 void
    737 genppc_cpu_configure(void)
    738 {
    739 	aprint_normal("biomask %x netmask %x ttymask %x\n",
    740 	    imask[IPL_BIO] & 0x1fffffff,
    741 	    imask[IPL_NET] & 0x1fffffff,
    742 	    imask[IPL_TTY] & 0x1fffffff);
    743 
    744 	spl0();
    745 }
    746 
    747 #if defined(PIC_PREPIVR) || defined(PIC_I8259)
    748 /*
    749  * isa_intr_alloc needs to be done here, because it needs direct access to
    750  * the various interrupt handler structures.
    751  */
    752 
    753 int
    754 genppc_isa_intr_alloc(isa_chipset_tag_t ic, struct pic_ops *pic,
    755     int mask, int type, int *irq_p)
    756 {
    757 	int irq, vi;
    758 	int maybe_irq = -1;
    759 	int shared_depth = 0;
    760 	struct intr_source *is;
    761 
    762 	if (pic == NULL)
    763 		return 1;
    764 
    765 	for (irq = 0; (mask != 0 && irq < pic->pic_numintrs);
    766 	     mask >>= 1, irq++) {
    767 		if ((mask & 1) == 0)
    768 			continue;
    769 		vi = virq[irq + pic->pic_intrbase];
    770 		if (!vi) {
    771 			*irq_p = irq;
    772 			return 0;
    773 		}
    774 		is = &intrsources[vi];
    775 		if (is->is_type == IST_NONE) {
    776 			*irq_p = irq;
    777 			return 0;
    778 		}
    779 		/* Level interrupts can be shared */
    780 		if (type == IST_LEVEL && is->is_type == IST_LEVEL) {
    781 			struct intrhand *ih = is->is_hand;
    782 			int depth;
    783 
    784 			if (maybe_irq == -1) {
    785 				maybe_irq = irq;
    786 				continue;
    787 			}
    788 			for (depth = 0; ih != NULL; ih = ih->ih_next)
    789 				depth++;
    790 			if (depth < shared_depth) {
    791 				maybe_irq = irq;
    792 				shared_depth = depth;
    793 			}
    794 		}
    795 	}
    796 	if (maybe_irq != -1) {
    797 		*irq_p = maybe_irq;
    798 		return 0;
    799 	}
    800 	return 1;
    801 }
    802 #endif
    803