Home | History | Annotate | Line # | Download | only in pic
intr.c revision 1.37
      1 /*	$NetBSD: intr.c,v 1.37 2025/02/17 11:14:49 jmcneill Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2007 Michael Lorenz
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     26  * POSSIBILITY OF SUCH DAMAGE.
     27  */
     28 
     29 #define __INTR_PRIVATE
     30 
     31 #include <sys/cdefs.h>
     32 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.37 2025/02/17 11:14:49 jmcneill Exp $");
     33 
     34 #ifdef _KERNEL_OPT
     35 #include "opt_interrupt.h"
     36 #include "opt_multiprocessor.h"
     37 #include "opt_pic.h"
     38 #include "opt_ppcarch.h"
     39 #endif
     40 
     41 #include <sys/param.h>
     42 #include <sys/cpu.h>
     43 #include <sys/kernel.h>
     44 #include <sys/kmem.h>
     45 #include <sys/interrupt.h>
     46 
     47 #include <powerpc/psl.h>
     48 #include <powerpc/pic/picvar.h>
     49 
     50 #if defined(PIC_I8259) || defined (PIC_PREPIVR)
     51 #include <machine/isa_machdep.h>
     52 #endif
     53 
     54 #ifdef MULTIPROCESSOR
     55 #include <powerpc/pic/ipivar.h>
     56 #endif
     57 
     58 #ifdef __HAVE_FAST_SOFTINTS
     59 #include <powerpc/softint.h>
     60 #endif
     61 
     62 #define MAX_PICS	8	/* 8 PICs ought to be enough for everyone */
     63 
     64 #define	PIC_VIRQ_LEGAL_P(x)	((u_int)(x) < NVIRQ)
     65 
     66 #if defined(PPC_IBM4XX) && !defined(PPC_IBM440)
     67 /* eieio is implemented as sync */
     68 #define REORDER_PROTECT() __asm volatile("sync" ::: "memory")
     69 #else
     70 #define REORDER_PROTECT() __asm volatile("sync; eieio" ::: "memory")
     71 #endif
     72 
     73 struct pic_ops *pics[MAX_PICS];
     74 int num_pics = 0;
     75 int max_base = 0;
     76 uint8_t	virq_map[NIRQ];
     77 imask_t virq_mask = HWIRQ_MASK;
     78 static imask_t imask[NIPL];
     79 int	primary_pic = 0;
     80 
     81 static int	fakeintr(void *);
     82 static int	mapirq(int);
     83 static void	intr_calculatemasks(void);
     84 static struct pic_ops *find_pic_by_hwirq(int);
     85 
     86 static struct intr_source intrsources[NVIRQ];
     87 
     88 void
     89 pic_init(void)
     90 {
     91 	/* everything is in bss, no reason to zero it. */
     92 }
     93 
     94 int
     95 pic_add(struct pic_ops *pic)
     96 {
     97 
     98 	if (num_pics >= MAX_PICS)
     99 		return -1;
    100 
    101 	pics[num_pics] = pic;
    102 	pic->pic_intrbase = max_base;
    103 	max_base += pic->pic_numintrs;
    104 	num_pics++;
    105 
    106 	return pic->pic_intrbase;
    107 }
    108 
    109 void
    110 pic_finish_setup(void)
    111 {
    112 	for (size_t i = 0; i < num_pics; i++) {
    113 		struct pic_ops * const pic = pics[i];
    114 		if (pic->pic_finish_setup != NULL)
    115 			pic->pic_finish_setup(pic);
    116 	}
    117 }
    118 
    119 static struct pic_ops *
    120 find_pic_by_hwirq(int hwirq)
    121 {
    122 	for (u_int base = 0; base < num_pics; base++) {
    123 		struct pic_ops * const pic = pics[base];
    124 		if (pic->pic_intrbase <= hwirq
    125 		    && hwirq < pic->pic_intrbase + pic->pic_numintrs) {
    126 			return pic;
    127 		}
    128 	}
    129 	return NULL;
    130 }
    131 
    132 static int
    133 fakeintr(void *arg)
    134 {
    135 
    136 	return 0;
    137 }
    138 
    139 /*
    140  * Register an interrupt handler.
    141  */
    142 void *
    143 intr_establish(int hwirq, int type, int ipl, int (*ih_fun)(void *),
    144     void *ih_arg)
    145 {
    146 	return intr_establish_xname(hwirq, type, ipl, ih_fun, ih_arg, NULL);
    147 }
    148 
    149 void *
    150 intr_establish_xname(int hwirq, int type, int ipl, int (*ih_fun)(void *),
    151     void *ih_arg, const char *xname)
    152 {
    153 	struct intrhand **p, *q, *ih;
    154 	struct pic_ops *pic;
    155 	static struct intrhand fakehand;
    156 	int maxipl = ipl;
    157 
    158 	if (maxipl == IPL_NONE)
    159 		maxipl = IPL_HIGH;
    160 
    161 	if (hwirq >= max_base) {
    162 		panic("%s: bogus IRQ %d, max is %d", __func__, hwirq,
    163 		    max_base - 1);
    164 	}
    165 
    166 	pic = find_pic_by_hwirq(hwirq);
    167 	if (pic == NULL) {
    168 		panic("%s: cannot find a pic for IRQ %d", __func__, hwirq);
    169 	}
    170 
    171 	const int virq = mapirq(hwirq);
    172 
    173 	/* no point in sleeping unless someone can free memory. */
    174 	ih = kmem_intr_alloc(sizeof(*ih), cold ? KM_NOSLEEP : KM_SLEEP);
    175 	if (ih == NULL)
    176 		panic("intr_establish: can't allocate handler info");
    177 
    178 	if (!PIC_VIRQ_LEGAL_P(virq) || type == IST_NONE)
    179 		panic("intr_establish: bogus irq (%d) or type (%d)",
    180 		    hwirq, type);
    181 
    182 	struct intr_source * const is = &intrsources[virq];
    183 	const bool cascaded = ih_fun == pic_handle_intr;
    184 
    185 	switch (is->is_type) {
    186 	case IST_NONE:
    187 		is->is_type = type;
    188 		is->is_cascaded = cascaded;
    189 		break;
    190 	case IST_EDGE_FALLING:
    191 	case IST_EDGE_RISING:
    192 	case IST_LEVEL_LOW:
    193 	case IST_LEVEL_HIGH:
    194 		if (type == is->is_type)
    195 			break;
    196 		/* FALLTHROUGH */
    197 	case IST_PULSE:
    198 		if (type != IST_NONE) {
    199 			panic("intr_establish: can't share %s with %s",
    200 			    intr_typename(is->is_type),
    201 			    intr_typename(type));
    202 		}
    203 		if (cascaded != is->is_cascaded) {
    204 			panic("intr_establish: can't share cascaded with "
    205 			    "non-cascaded interrupt");
    206 		}
    207 		break;
    208 	}
    209 	if (is->is_hand == NULL) {
    210 		snprintf(is->is_intrid, sizeof(is->is_intrid), "%s irq %d",
    211 		    pic->pic_name, is->is_hwirq);
    212 		snprintf(is->is_evname, sizeof(is->is_evname), "irq %d",
    213 		    is->is_hwirq);
    214 		evcnt_attach_dynamic(&is->is_ev, EVCNT_TYPE_INTR, NULL,
    215 		    pic->pic_name, is->is_evname);
    216 	}
    217 
    218 	/*
    219 	 * Figure out where to put the handler.
    220 	 * This is O(N^2), but we want to preserve the order, and N is
    221 	 * generally small.
    222 	 */
    223 	for (p = &is->is_hand; (q = *p) != NULL; p = &q->ih_next) {
    224 		maxipl = uimax(maxipl, q->ih_ipl);
    225 	}
    226 
    227 	/*
    228 	 * Actually install a fake handler momentarily, since we might be doing
    229 	 * this with interrupts enabled and don't want the real routine called
    230 	 * until masking is set up.
    231 	 */
    232 	fakehand.ih_ipl = ipl;
    233 	fakehand.ih_fun = fakeintr;
    234 	*p = &fakehand;
    235 
    236 	/*
    237 	 * Poke the real handler in now.
    238 	 */
    239 	ih->ih_fun = ih_fun;
    240 	ih->ih_arg = ih_arg;
    241 	ih->ih_next = NULL;
    242 	ih->ih_ipl = ipl;
    243 	ih->ih_virq = virq;
    244 	strlcpy(ih->ih_xname, xname != NULL ? xname : "unknown",
    245 	    sizeof(ih->ih_xname));
    246 	*p = ih;
    247 
    248 	if (pic->pic_establish_irq != NULL)
    249 		pic->pic_establish_irq(pic, hwirq - pic->pic_intrbase,
    250 		    is->is_type, maxipl);
    251 
    252 	/*
    253 	 * Remember the highest IPL used by this handler.
    254 	 */
    255 	is->is_ipl = maxipl;
    256 
    257 	/*
    258 	 * now that the handler is established we're actually ready to
    259 	 * calculate the masks
    260 	 */
    261 	intr_calculatemasks();
    262 
    263 	return ih;
    264 }
    265 
    266 void
    267 dummy_pic_establish_intr(struct pic_ops *pic, int irq, int type, int pri)
    268 {
    269 }
    270 
    271 /*
    272  * Deregister an interrupt handler.
    273  */
    274 void
    275 intr_disestablish(void *arg)
    276 {
    277 	struct intrhand * const ih = arg;
    278 	const int virq = ih->ih_virq;
    279 	struct intr_source * const is = &intrsources[virq];
    280 	struct intrhand **p, **q;
    281 	int maxipl = IPL_NONE;
    282 
    283 	if (!PIC_VIRQ_LEGAL_P(virq))
    284 		panic("intr_disestablish: bogus virq %d", virq);
    285 
    286 	/*
    287 	 * Remove the handler from the chain.
    288 	 * This is O(n^2), too.
    289 	 */
    290 	for (p = &is->is_hand, q = NULL; (*p) != NULL; p = &(*p)->ih_next) {
    291 		struct intrhand * const tmp_ih = *p;
    292 		if (tmp_ih == ih) {
    293 			q = p;
    294 		} else {
    295 			maxipl = uimax(maxipl, tmp_ih->ih_ipl);
    296 		}
    297 	}
    298 	if (q)
    299 		*q = ih->ih_next;
    300 	else
    301 		panic("intr_disestablish: handler not registered");
    302 	kmem_intr_free((void *)ih, sizeof(*ih));
    303 
    304 	/*
    305 	 * Reset the IPL for this source now that we've removed a handler.
    306 	 */
    307 	is->is_ipl = maxipl;
    308 
    309 	intr_calculatemasks();
    310 
    311 	if (is->is_hand == NULL) {
    312 		is->is_type = IST_NONE;
    313 		evcnt_detach(&is->is_ev);
    314 		/*
    315 		 * Make the virutal IRQ available again.
    316 		 */
    317 		virq_map[virq] = 0;
    318 		virq_mask |= PIC_VIRQ_TO_MASK(virq);
    319 	}
    320 }
    321 
    322 /*
    323  * Map max_base irqs into 32 (bits).
    324  */
    325 static int
    326 mapirq(int hwirq)
    327 {
    328 	struct pic_ops *pic;
    329 
    330 	if (hwirq >= max_base)
    331 		panic("invalid irq %d", hwirq);
    332 
    333 	if ((pic = find_pic_by_hwirq(hwirq)) == NULL)
    334 		panic("%s: cannot find PIC for HWIRQ %d", __func__, hwirq);
    335 
    336 	if (virq_map[hwirq])
    337 		return virq_map[hwirq];
    338 
    339 	if (virq_mask == 0)
    340 		panic("virq overflow");
    341 
    342 	const int virq = PIC_VIRQ_MS_PENDING(virq_mask);
    343 	struct intr_source * const is = intrsources + virq;
    344 
    345 	virq_mask &= ~PIC_VIRQ_TO_MASK(virq);
    346 
    347 	is->is_hwirq = hwirq;
    348 	is->is_pic = pic;
    349 	virq_map[hwirq] = virq;
    350 #ifdef PIC_DEBUG
    351 	printf("mapping hwirq %d to virq %d\n", hwirq, virq);
    352 #endif
    353 	return virq;
    354 }
    355 
    356 static const char * const intr_typenames[] = {
    357    [IST_NONE]  = "none",
    358    [IST_PULSE] = "pulsed",
    359    [IST_EDGE_FALLING]  = "falling edge triggered",
    360    [IST_EDGE_RISING]  = "rising edge triggered",
    361    [IST_LEVEL_LOW] = "low level triggered",
    362    [IST_LEVEL_HIGH] = "high level triggered",
    363 };
    364 
    365 const char *
    366 intr_typename(int type)
    367 {
    368 	KASSERT((unsigned int) type < __arraycount(intr_typenames));
    369 	KASSERT(intr_typenames[type] != NULL);
    370 	return intr_typenames[type];
    371 }
    372 
    373 /*
    374  * Recalculate the interrupt masks from scratch.
    375  * We could code special registry and deregistry versions of this function that
    376  * would be faster, but the code would be nastier, and we don't expect this to
    377  * happen very much anyway.
    378  */
    379 static void
    380 intr_calculatemasks(void)
    381 {
    382 	imask_t newmask[NIPL];
    383 	struct intr_source *is;
    384 	struct intrhand *ih;
    385 	int irq;
    386 
    387 	for (u_int ipl = IPL_NONE; ipl < NIPL; ipl++) {
    388 		newmask[ipl] = 0;
    389 	}
    390 
    391 	/* First, figure out which ipl each IRQ uses. */
    392 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
    393 		for (ih = is->is_hand; ih != NULL; ih = ih->ih_next) {
    394 			newmask[ih->ih_ipl] |= PIC_VIRQ_TO_MASK(irq);
    395 		}
    396 	}
    397 
    398 	/*
    399 	 * IPL_NONE is used for hardware interrupts that are never blocked,
    400 	 * and do not block anything else.
    401 	 */
    402 	newmask[IPL_NONE] = 0;
    403 
    404 	/*
    405 	 * strict hierarchy - all IPLs block everything blocked by any lower
    406 	 * IPL
    407 	 */
    408 	for (u_int ipl = 1; ipl < NIPL; ipl++) {
    409 		newmask[ipl] |= newmask[ipl - 1];
    410 	}
    411 
    412 #ifdef PIC_DEBUG
    413 	for (u_int ipl = 0; ipl < NIPL; ipl++) {
    414 		printf("%u: %08x -> %08x\n", ipl, imask[ipl], newmask[ipl]);
    415 	}
    416 #endif
    417 
    418 	/*
    419 	 * Disable all interrupts.
    420 	 */
    421 	for (u_int base = 0; base < num_pics; base++) {
    422 		struct pic_ops * const pic = pics[base];
    423 		for (u_int i = 0; i < pic->pic_numintrs; i++) {
    424 			pic->pic_disable_irq(pic, i);
    425 		}
    426 	}
    427 
    428 	/*
    429 	 * Now that all interrupts are disabled, update the ipl masks.
    430 	 */
    431 	for (u_int ipl = 0; ipl < NIPL; ipl++) {
    432 		imask[ipl] = newmask[ipl];
    433 	}
    434 
    435 	/*
    436 	 * Lastly, enable IRQs actually in use.
    437 	 */
    438 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
    439 		if (is->is_hand)
    440 			pic_enable_irq(is->is_hwirq);
    441 	}
    442 }
    443 
    444 void
    445 pic_enable_irq(int hwirq)
    446 {
    447 	struct pic_ops * const pic = find_pic_by_hwirq(hwirq);
    448 	if (pic == NULL)
    449 		panic("%s: bogus IRQ %d", __func__, hwirq);
    450 	const int type = intrsources[virq_map[hwirq]].is_type;
    451 	(*pic->pic_enable_irq)(pic, hwirq - pic->pic_intrbase, type);
    452 }
    453 
    454 void
    455 pic_mark_pending(int hwirq)
    456 {
    457 	struct cpu_info * const ci = curcpu();
    458 
    459 	const int virq = virq_map[hwirq];
    460 	if (virq == 0)
    461 		printf("IRQ %d maps to 0\n", hwirq);
    462 
    463 	const register_t msr = mfmsr();
    464 	mtmsr(msr & ~PSL_EE);
    465 	ci->ci_ipending |= PIC_VIRQ_TO_MASK(virq);
    466 	mtmsr(msr);
    467 }
    468 
    469 static void
    470 intr_deliver(struct intr_source *is, int virq)
    471 {
    472 	bool locked = false;
    473 	for (struct intrhand *ih = is->is_hand; ih != NULL; ih = ih->ih_next) {
    474 		KASSERTMSG(ih->ih_fun != NULL,
    475 		    "%s: irq %d, hwirq %d, is %p ih %p: "
    476 		     "NULL interrupt handler!\n", __func__,
    477 		     virq, is->is_hwirq, is, ih);
    478 		if (ih->ih_ipl == IPL_VM) {
    479 			if (!locked) {
    480 				KERNEL_LOCK(1, NULL);
    481 				locked = true;
    482 			}
    483 		} else if (locked) {
    484 			KERNEL_UNLOCK_ONE(NULL);
    485 			locked = false;
    486 		}
    487 		(*ih->ih_fun)(ih->ih_arg);
    488 	}
    489 	if (locked) {
    490 		KERNEL_UNLOCK_ONE(NULL);
    491 	}
    492 	is->is_ev.ev_count++;
    493 }
    494 
    495 void
    496 pic_do_pending_int(void)
    497 {
    498 	struct cpu_info * const ci = curcpu();
    499 	imask_t vpend;
    500 
    501 	if (ci->ci_iactive)
    502 		return;
    503 
    504 	ci->ci_iactive = 1;
    505 
    506 	const register_t emsr = mfmsr();
    507 	const register_t dmsr = emsr & ~PSL_EE;
    508 
    509 	KASSERT(emsr & PSL_EE);
    510 	mtmsr(dmsr);
    511 
    512 	const int pcpl = ci->ci_cpl;
    513 #ifdef __HAVE_FAST_SOFTINTS
    514 again:
    515 #endif
    516 
    517 	/* Do now unmasked pendings */
    518 	while ((vpend = (ci->ci_ipending & ~imask[pcpl])) != 0) {
    519 		ci->ci_idepth++;
    520 		KASSERT((PIC_VIRQ_TO_MASK(0) & ci->ci_ipending) == 0);
    521 
    522 		/* Get most significant pending bit */
    523 		const int virq = PIC_VIRQ_MS_PENDING(vpend);
    524 		ci->ci_ipending &= ~PIC_VIRQ_TO_MASK(virq);
    525 
    526 		struct intr_source * const is = &intrsources[virq];
    527 		struct pic_ops * const pic = is->is_pic;
    528 
    529 		if (!is->is_cascaded) {
    530 			splraise(is->is_ipl);
    531 			mtmsr(emsr);
    532 		}
    533 		intr_deliver(is, virq);
    534 		if (!is->is_cascaded) {
    535 			mtmsr(dmsr);
    536 			ci->ci_cpl = pcpl; /* Don't use splx... we are here already! */
    537 		}
    538 
    539 		pic->pic_reenable_irq(pic, is->is_hwirq - pic->pic_intrbase,
    540 		    is->is_type);
    541 		ci->ci_idepth--;
    542 	}
    543 
    544 #ifdef __HAVE_FAST_SOFTINTS
    545 	const u_int softints = ci->ci_data.cpu_softints &
    546 				 (IPL_SOFTMASK << pcpl);
    547 
    548 	/* make sure there are no bits to screw with the line above */
    549 	KASSERT((ci->ci_data.cpu_softints & ~IPL_SOFTMASK) == 0);
    550 
    551 	if (__predict_false(softints != 0)) {
    552 		ci->ci_cpl = IPL_HIGH;
    553 		mtmsr(emsr);
    554 		powerpc_softint(ci, pcpl,
    555 		    (vaddr_t)__builtin_return_address(0));
    556 		mtmsr(dmsr);
    557 		ci->ci_cpl = pcpl;
    558 		if (__predict_false(ci->ci_ipending & ~imask[pcpl]))
    559 			goto again;
    560 	}
    561 #endif
    562 
    563 	ci->ci_iactive = 0;
    564 	mtmsr(emsr);
    565 }
    566 
    567 int
    568 pic_handle_intr(void *cookie)
    569 {
    570 	struct pic_ops *pic = cookie;
    571 	struct cpu_info *ci = curcpu();
    572 	int picirq;
    573 
    574 	const register_t msr = mfmsr();
    575 	const int pcpl = ci->ci_cpl;
    576 
    577 	mtmsr(msr & ~PSL_EE);
    578 
    579 	picirq = pic->pic_get_irq(pic, PIC_GET_IRQ);
    580 	if (picirq == 255) {
    581 		mtmsr(msr);
    582 		return 0;
    583 	}
    584 
    585 	do {
    586 		const int virq = virq_map[picirq + pic->pic_intrbase];
    587 
    588 		KASSERT(virq != 0);
    589 		KASSERT(picirq < pic->pic_numintrs);
    590 		imask_t v_imen = PIC_VIRQ_TO_MASK(virq);
    591 		struct intr_source * const is = &intrsources[virq];
    592 
    593 		if ((imask[pcpl] & v_imen) != 0) {
    594 			ci->ci_ipending |= v_imen; /* Masked! Mark this as pending */
    595 			pic->pic_disable_irq(pic, picirq);
    596 		} else {
    597 			/* this interrupt is no longer pending */
    598 			ci->ci_ipending &= ~v_imen;
    599 			ci->ci_idepth++;
    600 
    601 			if (!is->is_cascaded) {
    602 				splraise(is->is_ipl);
    603 				mtmsr(msr | PSL_EE);
    604 			}
    605 			intr_deliver(is, virq);
    606 			if (!is->is_cascaded) {
    607 				mtmsr(msr & ~PSL_EE);
    608 				ci->ci_cpl = pcpl;
    609 			}
    610 
    611 			ci->ci_data.cpu_nintr++;
    612 			ci->ci_idepth--;
    613 		}
    614 		pic->pic_ack_irq(pic, picirq);
    615 	} while ((picirq = pic->pic_get_irq(pic, PIC_GET_RECHECK)) != 255);
    616 
    617 	mtmsr(msr | PSL_EE);
    618 	splx(pcpl);	/* Process pendings. */
    619 	mtmsr(msr);
    620 
    621 	return 0;
    622 }
    623 
    624 void
    625 pic_ext_intr(void)
    626 {
    627 
    628 	KASSERT(pics[primary_pic] != NULL);
    629 	pic_handle_intr(pics[primary_pic]);
    630 
    631 	return;
    632 
    633 }
    634 
    635 int
    636 splraise(int ncpl)
    637 {
    638 	struct cpu_info *ci = curcpu();
    639 	int ocpl;
    640 
    641 	if (ncpl == ci->ci_cpl)
    642 		return ncpl;
    643 	REORDER_PROTECT();
    644 	ocpl = ci->ci_cpl;
    645 	KASSERT(ncpl < NIPL);
    646 	ci->ci_cpl = uimax(ncpl, ocpl);
    647 	REORDER_PROTECT();
    648 	__insn_barrier();
    649 	return ocpl;
    650 }
    651 
    652 static inline bool
    653 have_pending_intr_p(struct cpu_info *ci, int ncpl)
    654 {
    655 	if (ci->ci_ipending & ~imask[ncpl])
    656 		return true;
    657 #ifdef __HAVE_FAST_SOFTINTS
    658 	if (ci->ci_data.cpu_softints & (IPL_SOFTMASK << ncpl))
    659 		return true;
    660 #endif
    661 	return false;
    662 }
    663 
    664 void
    665 splx(int ncpl)
    666 {
    667 	struct cpu_info *ci = curcpu();
    668 
    669 	__insn_barrier();
    670 	REORDER_PROTECT();
    671 	ci->ci_cpl = ncpl;
    672 	if (have_pending_intr_p(ci, ncpl))
    673 		pic_do_pending_int();
    674 
    675 	REORDER_PROTECT();
    676 }
    677 
    678 int
    679 spllower(int ncpl)
    680 {
    681 	struct cpu_info *ci = curcpu();
    682 	int ocpl;
    683 
    684 	__insn_barrier();
    685 	REORDER_PROTECT();
    686 	ocpl = ci->ci_cpl;
    687 	ci->ci_cpl = ncpl;
    688 	if (have_pending_intr_p(ci, ncpl))
    689 		pic_do_pending_int();
    690 	REORDER_PROTECT();
    691 	return ocpl;
    692 }
    693 
    694 void
    695 genppc_cpu_configure(void)
    696 {
    697 	aprint_normal("vmmask %x schedmask %x highmask %x\n",
    698 	    (u_int)imask[IPL_VM] & 0x7fffffff,
    699 	    (u_int)imask[IPL_SCHED] & 0x7fffffff,
    700 	    (u_int)imask[IPL_HIGH] & 0x7fffffff);
    701 
    702 	spl0();
    703 }
    704 
    705 #if defined(PIC_PREPIVR) || defined(PIC_I8259)
    706 /*
    707  * isa_intr_alloc needs to be done here, because it needs direct access to
    708  * the various interrupt handler structures.
    709  */
    710 
    711 int
    712 genppc_isa_intr_alloc(isa_chipset_tag_t ic, struct pic_ops *pic,
    713     int mask, int type, int *irq_p)
    714 {
    715 	int irq, vi;
    716 	int maybe_irq = -1;
    717 	int shared_depth = 0;
    718 	struct intr_source *is;
    719 
    720 	if (pic == NULL)
    721 		return 1;
    722 
    723 	for (irq = 0; (mask != 0 && irq < pic->pic_numintrs);
    724 	     mask >>= 1, irq++) {
    725 		if ((mask & 1) == 0)
    726 			continue;
    727 		vi = virq_map[irq + pic->pic_intrbase];
    728 		if (!vi) {
    729 			*irq_p = irq;
    730 			return 0;
    731 		}
    732 		is = &intrsources[vi];
    733 		if (is->is_type == IST_NONE) {
    734 			*irq_p = irq;
    735 			return 0;
    736 		}
    737 		/* Level interrupts can be shared */
    738 		if (type == IST_LEVEL && is->is_type == IST_LEVEL) {
    739 			struct intrhand *ih = is->is_hand;
    740 			int depth;
    741 
    742 			if (maybe_irq == -1) {
    743 				maybe_irq = irq;
    744 				continue;
    745 			}
    746 			for (depth = 0; ih != NULL; ih = ih->ih_next)
    747 				depth++;
    748 			if (depth < shared_depth) {
    749 				maybe_irq = irq;
    750 				shared_depth = depth;
    751 			}
    752 		}
    753 	}
    754 	if (maybe_irq != -1) {
    755 		*irq_p = maybe_irq;
    756 		return 0;
    757 	}
    758 	return 1;
    759 }
    760 #endif
    761 
    762 static struct intr_source *
    763 intr_get_source(const char *intrid)
    764 {
    765 	struct intr_source *is;
    766 	int irq;
    767 
    768 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
    769 		if (strcmp(intrid, is->is_intrid) == 0)
    770 			return is;
    771 	}
    772 	return NULL;
    773 }
    774 
    775 static struct intrhand *
    776 intr_get_handler(const char *intrid)
    777 {
    778 	struct intr_source *is;
    779 
    780 	is = intr_get_source(intrid);
    781 	if (is != NULL)
    782 		return is->is_hand;
    783 	return NULL;
    784 }
    785 
    786 uint64_t
    787 interrupt_get_count(const char *intrid, u_int cpu_idx)
    788 {
    789 	struct intr_source *is;
    790 
    791 	/* XXX interrupt is always generated by CPU 0 */
    792 	if (cpu_idx != 0)
    793 		return 0;
    794 
    795 	is = intr_get_source(intrid);
    796 	if (is != NULL)
    797 		return is->is_ev.ev_count;
    798 	return 0;
    799 }
    800 
    801 void
    802 interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset)
    803 {
    804 	struct intr_source *is;
    805 
    806 	kcpuset_zero(cpuset);
    807 
    808 	is = intr_get_source(intrid);
    809 	if (is != NULL)
    810 		kcpuset_set(cpuset, 0);	/* XXX */
    811 }
    812 
    813 void
    814 interrupt_get_available(kcpuset_t *cpuset)
    815 {
    816 	CPU_INFO_ITERATOR cii;
    817 	struct cpu_info *ci;
    818 
    819 	kcpuset_zero(cpuset);
    820 
    821 	mutex_enter(&cpu_lock);
    822 	for (CPU_INFO_FOREACH(cii, ci)) {
    823 		if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0)
    824 			kcpuset_set(cpuset, cpu_index(ci));
    825 	}
    826 	mutex_exit(&cpu_lock);
    827 }
    828 
    829 void
    830 interrupt_get_devname(const char *intrid, char *buf, size_t len)
    831 {
    832 	struct intrhand *ih;
    833 
    834 	if (len == 0)
    835 		return;
    836 
    837 	buf[0] = '\0';
    838 
    839 	for (ih = intr_get_handler(intrid); ih != NULL; ih = ih->ih_next) {
    840 		if (buf[0] != '\0')
    841 			strlcat(buf, ", ", len);
    842 		strlcat(buf, ih->ih_xname, len);
    843 	}
    844 }
    845 
    846 struct intrids_handler *
    847 interrupt_construct_intrids(const kcpuset_t *cpuset)
    848 {
    849 	struct intr_source *is;
    850 	struct intrids_handler *ii_handler;
    851 	intrid_t *ids;
    852 	int i, irq, count;
    853 
    854 	if (kcpuset_iszero(cpuset))
    855 		return NULL;
    856 	if (!kcpuset_isset(cpuset, 0))	/* XXX */
    857 		return NULL;
    858 
    859 	count = 0;
    860 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
    861 		if (is->is_hand != NULL)
    862 			count++;
    863 	}
    864 
    865 	ii_handler = kmem_zalloc(sizeof(int) + sizeof(intrid_t) * count,
    866 	    KM_SLEEP);
    867 	if (ii_handler == NULL)
    868 		return NULL;
    869 	ii_handler->iih_nids = count;
    870 	if (count == 0)
    871 		return ii_handler;
    872 
    873 	ids = ii_handler->iih_intrids;
    874 	i = 0;
    875 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
    876 		/* Ignore devices attached after counting "count". */
    877 		if (i >= count)
    878 			break;
    879 
    880 		if (is->is_hand == NULL)
    881 			continue;
    882 
    883 		strncpy(ids[i], is->is_intrid, sizeof(intrid_t));
    884 		i++;
    885 	}
    886 
    887 	return ii_handler;
    888 }
    889 
    890 void
    891 interrupt_destruct_intrids(struct intrids_handler *ii_handler)
    892 {
    893 	size_t iih_size;
    894 
    895 	if (ii_handler == NULL)
    896 		return;
    897 
    898 	iih_size = sizeof(int) + sizeof(intrid_t) * ii_handler->iih_nids;
    899 	kmem_free(ii_handler, iih_size);
    900 }
    901 
    902 int
    903 interrupt_distribute(void *ich, const kcpuset_t *newset, kcpuset_t *oldset)
    904 {
    905 	return EOPNOTSUPP;
    906 }
    907 
    908 int
    909 interrupt_distribute_handler(const char *intrid, const kcpuset_t *newset,
    910     kcpuset_t *oldset)
    911 {
    912 	return EOPNOTSUPP;
    913 }
    914 
    915 #undef REORDER_PROTECT
    916