Home | History | Annotate | Line # | Download | only in pic
      1 /*	$NetBSD: intr.c,v 1.38 2025/07/05 15:11:05 macallan Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2007 Michael Lorenz
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     26  * POSSIBILITY OF SUCH DAMAGE.
     27  */
     28 
     29 #define __INTR_PRIVATE
     30 
     31 #include <sys/cdefs.h>
     32 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.38 2025/07/05 15:11:05 macallan Exp $");
     33 
     34 #ifdef _KERNEL_OPT
     35 #include "opt_interrupt.h"
     36 #include "opt_multiprocessor.h"
     37 #include "opt_pic.h"
     38 #include "opt_ppcarch.h"
     39 #endif
     40 
     41 #include <sys/param.h>
     42 #include <sys/cpu.h>
     43 #include <sys/kernel.h>
     44 #include <sys/kmem.h>
     45 #include <sys/interrupt.h>
     46 
     47 #include <powerpc/psl.h>
     48 #include <powerpc/pic/picvar.h>
     49 
     50 #if defined(PIC_I8259) || defined (PIC_PREPIVR)
     51 #include <machine/isa_machdep.h>
     52 #endif
     53 
     54 #ifdef MULTIPROCESSOR
     55 #include <powerpc/pic/ipivar.h>
     56 #endif
     57 
     58 #ifdef __HAVE_FAST_SOFTINTS
     59 #include <powerpc/softint.h>
     60 #endif
     61 
     62 #define MAX_PICS	8	/* 8 PICs ought to be enough for everyone */
     63 
     64 #define	PIC_VIRQ_LEGAL_P(x)	((u_int)(x) < NVIRQ)
     65 
     66 #if defined(PPC_IBM4XX) && !defined(PPC_IBM440)
     67 /* eieio is implemented as sync */
     68 #define REORDER_PROTECT() __asm volatile("sync" ::: "memory")
     69 #else
     70 #define REORDER_PROTECT() __asm volatile("sync; eieio" ::: "memory")
     71 #endif
     72 
     73 struct pic_ops *pics[MAX_PICS];
     74 int num_pics = 0;
     75 int max_base = 0;
     76 uint8_t	virq_map[NIRQ];
     77 imask_t virq_mask = HWIRQ_MASK;
     78 static imask_t imask[NIPL];
     79 int	primary_pic = 0;
     80 
     81 static int	fakeintr(void *);
     82 static int	mapirq(int);
     83 static void	intr_calculatemasks(void);
     84 static struct pic_ops *find_pic_by_hwirq(int);
     85 
     86 static struct intr_source intrsources[NVIRQ];
     87 
     88 void
     89 pic_init(void)
     90 {
     91 	/* everything is in bss, no reason to zero it. */
     92 }
     93 
     94 struct pic_ops *
     95 find_pic_by_cookie(void *c)
     96 {
     97 	int i = 0;
     98 	while (i < num_pics) {
     99 		if (pics[i]->pic_cookie == c)
    100 			return pics[i];
    101 		i++;
    102 	}
    103 	return NULL;
    104 }
    105 
    106 int
    107 pic_add(struct pic_ops *pic)
    108 {
    109 
    110 	if (num_pics >= MAX_PICS)
    111 		return -1;
    112 
    113 	pics[num_pics] = pic;
    114 	pic->pic_intrbase = max_base;
    115 	max_base += pic->pic_numintrs;
    116 	num_pics++;
    117 
    118 	return pic->pic_intrbase;
    119 }
    120 
    121 void
    122 pic_finish_setup(void)
    123 {
    124 	for (size_t i = 0; i < num_pics; i++) {
    125 		struct pic_ops * const pic = pics[i];
    126 		if (pic->pic_finish_setup != NULL)
    127 			pic->pic_finish_setup(pic);
    128 	}
    129 }
    130 
    131 static struct pic_ops *
    132 find_pic_by_hwirq(int hwirq)
    133 {
    134 	for (u_int base = 0; base < num_pics; base++) {
    135 		struct pic_ops * const pic = pics[base];
    136 		if (pic->pic_intrbase <= hwirq
    137 		    && hwirq < pic->pic_intrbase + pic->pic_numintrs) {
    138 			return pic;
    139 		}
    140 	}
    141 	return NULL;
    142 }
    143 
    144 static int
    145 fakeintr(void *arg)
    146 {
    147 
    148 	return 0;
    149 }
    150 
    151 /*
    152  * Register an interrupt handler.
    153  */
    154 void *
    155 intr_establish(int hwirq, int type, int ipl, int (*ih_fun)(void *),
    156     void *ih_arg)
    157 {
    158 	return intr_establish_xname(hwirq, type, ipl, ih_fun, ih_arg, NULL);
    159 }
    160 
    161 void *
    162 intr_establish_xname(int hwirq, int type, int ipl, int (*ih_fun)(void *),
    163     void *ih_arg, const char *xname)
    164 {
    165 	struct intrhand **p, *q, *ih;
    166 	struct pic_ops *pic;
    167 	static struct intrhand fakehand;
    168 	int maxipl = ipl;
    169 
    170 	if (maxipl == IPL_NONE)
    171 		maxipl = IPL_HIGH;
    172 
    173 	if (hwirq >= max_base) {
    174 		panic("%s: bogus IRQ %d, max is %d", __func__, hwirq,
    175 		    max_base - 1);
    176 	}
    177 
    178 	pic = find_pic_by_hwirq(hwirq);
    179 	if (pic == NULL) {
    180 		panic("%s: cannot find a pic for IRQ %d", __func__, hwirq);
    181 	}
    182 
    183 	const int virq = mapirq(hwirq);
    184 
    185 	/* no point in sleeping unless someone can free memory. */
    186 	ih = kmem_intr_alloc(sizeof(*ih), cold ? KM_NOSLEEP : KM_SLEEP);
    187 	if (ih == NULL)
    188 		panic("intr_establish: can't allocate handler info");
    189 
    190 	if (!PIC_VIRQ_LEGAL_P(virq) || type == IST_NONE)
    191 		panic("intr_establish: bogus irq (%d) or type (%d)",
    192 		    hwirq, type);
    193 
    194 	struct intr_source * const is = &intrsources[virq];
    195 	const bool cascaded = ih_fun == pic_handle_intr;
    196 
    197 	switch (is->is_type) {
    198 	case IST_NONE:
    199 		is->is_type = type;
    200 		is->is_cascaded = cascaded;
    201 		break;
    202 	case IST_EDGE_FALLING:
    203 	case IST_EDGE_RISING:
    204 	case IST_LEVEL_LOW:
    205 	case IST_LEVEL_HIGH:
    206 		if (type == is->is_type)
    207 			break;
    208 		/* FALLTHROUGH */
    209 	case IST_PULSE:
    210 		if (type != IST_NONE) {
    211 			panic("intr_establish: can't share %s with %s",
    212 			    intr_typename(is->is_type),
    213 			    intr_typename(type));
    214 		}
    215 		if (cascaded != is->is_cascaded) {
    216 			panic("intr_establish: can't share cascaded with "
    217 			    "non-cascaded interrupt");
    218 		}
    219 		break;
    220 	}
    221 	if (is->is_hand == NULL) {
    222 		snprintf(is->is_intrid, sizeof(is->is_intrid), "%s irq %d",
    223 		    pic->pic_name, is->is_hwirq);
    224 		snprintf(is->is_evname, sizeof(is->is_evname), "irq %d",
    225 		    is->is_hwirq);
    226 		evcnt_attach_dynamic(&is->is_ev, EVCNT_TYPE_INTR, NULL,
    227 		    pic->pic_name, is->is_evname);
    228 	}
    229 
    230 	/*
    231 	 * Figure out where to put the handler.
    232 	 * This is O(N^2), but we want to preserve the order, and N is
    233 	 * generally small.
    234 	 */
    235 	for (p = &is->is_hand; (q = *p) != NULL; p = &q->ih_next) {
    236 		maxipl = uimax(maxipl, q->ih_ipl);
    237 	}
    238 
    239 	/*
    240 	 * Actually install a fake handler momentarily, since we might be doing
    241 	 * this with interrupts enabled and don't want the real routine called
    242 	 * until masking is set up.
    243 	 */
    244 	fakehand.ih_ipl = ipl;
    245 	fakehand.ih_fun = fakeintr;
    246 	*p = &fakehand;
    247 
    248 	/*
    249 	 * Poke the real handler in now.
    250 	 */
    251 	ih->ih_fun = ih_fun;
    252 	ih->ih_arg = ih_arg;
    253 	ih->ih_next = NULL;
    254 	ih->ih_ipl = ipl;
    255 	ih->ih_virq = virq;
    256 	strlcpy(ih->ih_xname, xname != NULL ? xname : "unknown",
    257 	    sizeof(ih->ih_xname));
    258 	*p = ih;
    259 
    260 	if (pic->pic_establish_irq != NULL)
    261 		pic->pic_establish_irq(pic, hwirq - pic->pic_intrbase,
    262 		    is->is_type, maxipl);
    263 
    264 	/*
    265 	 * Remember the highest IPL used by this handler.
    266 	 */
    267 	is->is_ipl = maxipl;
    268 
    269 	/*
    270 	 * now that the handler is established we're actually ready to
    271 	 * calculate the masks
    272 	 */
    273 	intr_calculatemasks();
    274 
    275 	return ih;
    276 }
    277 
    278 void
    279 dummy_pic_establish_intr(struct pic_ops *pic, int irq, int type, int pri)
    280 {
    281 }
    282 
    283 /*
    284  * Deregister an interrupt handler.
    285  */
    286 void
    287 intr_disestablish(void *arg)
    288 {
    289 	struct intrhand * const ih = arg;
    290 	const int virq = ih->ih_virq;
    291 	struct intr_source * const is = &intrsources[virq];
    292 	struct intrhand **p, **q;
    293 	int maxipl = IPL_NONE;
    294 
    295 	if (!PIC_VIRQ_LEGAL_P(virq))
    296 		panic("intr_disestablish: bogus virq %d", virq);
    297 
    298 	/*
    299 	 * Remove the handler from the chain.
    300 	 * This is O(n^2), too.
    301 	 */
    302 	for (p = &is->is_hand, q = NULL; (*p) != NULL; p = &(*p)->ih_next) {
    303 		struct intrhand * const tmp_ih = *p;
    304 		if (tmp_ih == ih) {
    305 			q = p;
    306 		} else {
    307 			maxipl = uimax(maxipl, tmp_ih->ih_ipl);
    308 		}
    309 	}
    310 	if (q)
    311 		*q = ih->ih_next;
    312 	else
    313 		panic("intr_disestablish: handler not registered");
    314 	kmem_intr_free((void *)ih, sizeof(*ih));
    315 
    316 	/*
    317 	 * Reset the IPL for this source now that we've removed a handler.
    318 	 */
    319 	is->is_ipl = maxipl;
    320 
    321 	intr_calculatemasks();
    322 
    323 	if (is->is_hand == NULL) {
    324 		is->is_type = IST_NONE;
    325 		evcnt_detach(&is->is_ev);
    326 		/*
    327 		 * Make the virutal IRQ available again.
    328 		 */
    329 		virq_map[virq] = 0;
    330 		virq_mask |= PIC_VIRQ_TO_MASK(virq);
    331 	}
    332 }
    333 
    334 /*
    335  * Map max_base irqs into 32 (bits).
    336  */
    337 static int
    338 mapirq(int hwirq)
    339 {
    340 	struct pic_ops *pic;
    341 
    342 	if (hwirq >= max_base)
    343 		panic("invalid irq %d", hwirq);
    344 
    345 	if ((pic = find_pic_by_hwirq(hwirq)) == NULL)
    346 		panic("%s: cannot find PIC for HWIRQ %d", __func__, hwirq);
    347 
    348 	if (virq_map[hwirq])
    349 		return virq_map[hwirq];
    350 
    351 	if (virq_mask == 0)
    352 		panic("virq overflow");
    353 
    354 	const int virq = PIC_VIRQ_MS_PENDING(virq_mask);
    355 	struct intr_source * const is = intrsources + virq;
    356 
    357 	virq_mask &= ~PIC_VIRQ_TO_MASK(virq);
    358 
    359 	is->is_hwirq = hwirq;
    360 	is->is_pic = pic;
    361 	virq_map[hwirq] = virq;
    362 #ifdef PIC_DEBUG
    363 	printf("mapping hwirq %d to virq %d\n", hwirq, virq);
    364 #endif
    365 	return virq;
    366 }
    367 
    368 static const char * const intr_typenames[] = {
    369    [IST_NONE]  = "none",
    370    [IST_PULSE] = "pulsed",
    371    [IST_EDGE_FALLING]  = "falling edge triggered",
    372    [IST_EDGE_RISING]  = "rising edge triggered",
    373    [IST_LEVEL_LOW] = "low level triggered",
    374    [IST_LEVEL_HIGH] = "high level triggered",
    375 };
    376 
    377 const char *
    378 intr_typename(int type)
    379 {
    380 	KASSERT((unsigned int) type < __arraycount(intr_typenames));
    381 	KASSERT(intr_typenames[type] != NULL);
    382 	return intr_typenames[type];
    383 }
    384 
    385 /*
    386  * Recalculate the interrupt masks from scratch.
    387  * We could code special registry and deregistry versions of this function that
    388  * would be faster, but the code would be nastier, and we don't expect this to
    389  * happen very much anyway.
    390  */
    391 static void
    392 intr_calculatemasks(void)
    393 {
    394 	imask_t newmask[NIPL];
    395 	struct intr_source *is;
    396 	struct intrhand *ih;
    397 	int irq;
    398 
    399 	for (u_int ipl = IPL_NONE; ipl < NIPL; ipl++) {
    400 		newmask[ipl] = 0;
    401 	}
    402 
    403 	/* First, figure out which ipl each IRQ uses. */
    404 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
    405 		for (ih = is->is_hand; ih != NULL; ih = ih->ih_next) {
    406 			newmask[ih->ih_ipl] |= PIC_VIRQ_TO_MASK(irq);
    407 		}
    408 	}
    409 
    410 	/*
    411 	 * IPL_NONE is used for hardware interrupts that are never blocked,
    412 	 * and do not block anything else.
    413 	 */
    414 	newmask[IPL_NONE] = 0;
    415 
    416 	/*
    417 	 * strict hierarchy - all IPLs block everything blocked by any lower
    418 	 * IPL
    419 	 */
    420 	for (u_int ipl = 1; ipl < NIPL; ipl++) {
    421 		newmask[ipl] |= newmask[ipl - 1];
    422 	}
    423 
    424 #ifdef PIC_DEBUG
    425 	for (u_int ipl = 0; ipl < NIPL; ipl++) {
    426 		printf("%u: %08x -> %08x\n", ipl, imask[ipl], newmask[ipl]);
    427 	}
    428 #endif
    429 
    430 	/*
    431 	 * Disable all interrupts.
    432 	 */
    433 	for (u_int base = 0; base < num_pics; base++) {
    434 		struct pic_ops * const pic = pics[base];
    435 		for (u_int i = 0; i < pic->pic_numintrs; i++) {
    436 			pic->pic_disable_irq(pic, i);
    437 		}
    438 	}
    439 
    440 	/*
    441 	 * Now that all interrupts are disabled, update the ipl masks.
    442 	 */
    443 	for (u_int ipl = 0; ipl < NIPL; ipl++) {
    444 		imask[ipl] = newmask[ipl];
    445 	}
    446 
    447 	/*
    448 	 * Lastly, enable IRQs actually in use.
    449 	 */
    450 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
    451 		if (is->is_hand)
    452 			pic_enable_irq(is->is_hwirq);
    453 	}
    454 }
    455 
    456 void
    457 pic_enable_irq(int hwirq)
    458 {
    459 	struct pic_ops * const pic = find_pic_by_hwirq(hwirq);
    460 	if (pic == NULL)
    461 		panic("%s: bogus IRQ %d", __func__, hwirq);
    462 	const int type = intrsources[virq_map[hwirq]].is_type;
    463 	(*pic->pic_enable_irq)(pic, hwirq - pic->pic_intrbase, type);
    464 }
    465 
    466 void
    467 pic_mark_pending(int hwirq)
    468 {
    469 	struct cpu_info * const ci = curcpu();
    470 
    471 	const int virq = virq_map[hwirq];
    472 	if (virq == 0)
    473 		printf("IRQ %d maps to 0\n", hwirq);
    474 
    475 	const register_t msr = mfmsr();
    476 	mtmsr(msr & ~PSL_EE);
    477 	ci->ci_ipending |= PIC_VIRQ_TO_MASK(virq);
    478 	mtmsr(msr);
    479 }
    480 
    481 static void
    482 intr_deliver(struct intr_source *is, int virq)
    483 {
    484 	bool locked = false;
    485 	for (struct intrhand *ih = is->is_hand; ih != NULL; ih = ih->ih_next) {
    486 		KASSERTMSG(ih->ih_fun != NULL,
    487 		    "%s: irq %d, hwirq %d, is %p ih %p: "
    488 		     "NULL interrupt handler!\n", __func__,
    489 		     virq, is->is_hwirq, is, ih);
    490 		if (ih->ih_ipl == IPL_VM) {
    491 			if (!locked) {
    492 				KERNEL_LOCK(1, NULL);
    493 				locked = true;
    494 			}
    495 		} else if (locked) {
    496 			KERNEL_UNLOCK_ONE(NULL);
    497 			locked = false;
    498 		}
    499 		(*ih->ih_fun)(ih->ih_arg);
    500 	}
    501 	if (locked) {
    502 		KERNEL_UNLOCK_ONE(NULL);
    503 	}
    504 	is->is_ev.ev_count++;
    505 }
    506 
    507 void
    508 pic_do_pending_int(void)
    509 {
    510 	struct cpu_info * const ci = curcpu();
    511 	imask_t vpend;
    512 
    513 	if (ci->ci_iactive)
    514 		return;
    515 
    516 	ci->ci_iactive = 1;
    517 
    518 	const register_t emsr = mfmsr();
    519 	const register_t dmsr = emsr & ~PSL_EE;
    520 
    521 	KASSERT(emsr & PSL_EE);
    522 	mtmsr(dmsr);
    523 
    524 	const int pcpl = ci->ci_cpl;
    525 #ifdef __HAVE_FAST_SOFTINTS
    526 again:
    527 #endif
    528 
    529 	/* Do now unmasked pendings */
    530 	while ((vpend = (ci->ci_ipending & ~imask[pcpl])) != 0) {
    531 		ci->ci_idepth++;
    532 		KASSERT((PIC_VIRQ_TO_MASK(0) & ci->ci_ipending) == 0);
    533 
    534 		/* Get most significant pending bit */
    535 		const int virq = PIC_VIRQ_MS_PENDING(vpend);
    536 		ci->ci_ipending &= ~PIC_VIRQ_TO_MASK(virq);
    537 
    538 		struct intr_source * const is = &intrsources[virq];
    539 		struct pic_ops * const pic = is->is_pic;
    540 
    541 		if (!is->is_cascaded) {
    542 			splraise(is->is_ipl);
    543 			mtmsr(emsr);
    544 		}
    545 		intr_deliver(is, virq);
    546 		if (!is->is_cascaded) {
    547 			mtmsr(dmsr);
    548 			ci->ci_cpl = pcpl; /* Don't use splx... we are here already! */
    549 		}
    550 
    551 		pic->pic_reenable_irq(pic, is->is_hwirq - pic->pic_intrbase,
    552 		    is->is_type);
    553 		ci->ci_idepth--;
    554 	}
    555 
    556 #ifdef __HAVE_FAST_SOFTINTS
    557 	const u_int softints = ci->ci_data.cpu_softints &
    558 				 (IPL_SOFTMASK << pcpl);
    559 
    560 	/* make sure there are no bits to screw with the line above */
    561 	KASSERT((ci->ci_data.cpu_softints & ~IPL_SOFTMASK) == 0);
    562 
    563 	if (__predict_false(softints != 0)) {
    564 		ci->ci_cpl = IPL_HIGH;
    565 		mtmsr(emsr);
    566 		powerpc_softint(ci, pcpl,
    567 		    (vaddr_t)__builtin_return_address(0));
    568 		mtmsr(dmsr);
    569 		ci->ci_cpl = pcpl;
    570 		if (__predict_false(ci->ci_ipending & ~imask[pcpl]))
    571 			goto again;
    572 	}
    573 #endif
    574 
    575 	ci->ci_iactive = 0;
    576 	mtmsr(emsr);
    577 }
    578 
    579 int
    580 pic_handle_intr(void *cookie)
    581 {
    582 	struct pic_ops *pic = cookie;
    583 	struct cpu_info *ci = curcpu();
    584 	int picirq;
    585 
    586 	const register_t msr = mfmsr();
    587 	const int pcpl = ci->ci_cpl;
    588 
    589 	mtmsr(msr & ~PSL_EE);
    590 
    591 	picirq = pic->pic_get_irq(pic, PIC_GET_IRQ);
    592 	if (picirq == 255) {
    593 		mtmsr(msr);
    594 		return 0;
    595 	}
    596 
    597 	do {
    598 		const int virq = virq_map[picirq + pic->pic_intrbase];
    599 
    600 		KASSERT(virq != 0);
    601 		KASSERT(picirq < pic->pic_numintrs);
    602 		imask_t v_imen = PIC_VIRQ_TO_MASK(virq);
    603 		struct intr_source * const is = &intrsources[virq];
    604 
    605 		if ((imask[pcpl] & v_imen) != 0) {
    606 			ci->ci_ipending |= v_imen; /* Masked! Mark this as pending */
    607 			pic->pic_disable_irq(pic, picirq);
    608 		} else {
    609 			/* this interrupt is no longer pending */
    610 			ci->ci_ipending &= ~v_imen;
    611 			ci->ci_idepth++;
    612 
    613 			if (!is->is_cascaded) {
    614 				splraise(is->is_ipl);
    615 				mtmsr(msr | PSL_EE);
    616 			}
    617 			intr_deliver(is, virq);
    618 			if (!is->is_cascaded) {
    619 				mtmsr(msr & ~PSL_EE);
    620 				ci->ci_cpl = pcpl;
    621 			}
    622 
    623 			ci->ci_data.cpu_nintr++;
    624 			ci->ci_idepth--;
    625 		}
    626 		pic->pic_ack_irq(pic, picirq);
    627 	} while ((picirq = pic->pic_get_irq(pic, PIC_GET_RECHECK)) != 255);
    628 
    629 	mtmsr(msr | PSL_EE);
    630 	splx(pcpl);	/* Process pendings. */
    631 	mtmsr(msr);
    632 
    633 	return 0;
    634 }
    635 
    636 void
    637 pic_ext_intr(void)
    638 {
    639 
    640 	KASSERT(pics[primary_pic] != NULL);
    641 	pic_handle_intr(pics[primary_pic]);
    642 
    643 	return;
    644 
    645 }
    646 
    647 int
    648 splraise(int ncpl)
    649 {
    650 	struct cpu_info *ci = curcpu();
    651 	int ocpl;
    652 
    653 	if (ncpl == ci->ci_cpl)
    654 		return ncpl;
    655 	REORDER_PROTECT();
    656 	ocpl = ci->ci_cpl;
    657 	KASSERT(ncpl < NIPL);
    658 	ci->ci_cpl = uimax(ncpl, ocpl);
    659 	REORDER_PROTECT();
    660 	__insn_barrier();
    661 	return ocpl;
    662 }
    663 
    664 static inline bool
    665 have_pending_intr_p(struct cpu_info *ci, int ncpl)
    666 {
    667 	if (ci->ci_ipending & ~imask[ncpl])
    668 		return true;
    669 #ifdef __HAVE_FAST_SOFTINTS
    670 	if (ci->ci_data.cpu_softints & (IPL_SOFTMASK << ncpl))
    671 		return true;
    672 #endif
    673 	return false;
    674 }
    675 
    676 void
    677 splx(int ncpl)
    678 {
    679 	struct cpu_info *ci = curcpu();
    680 
    681 	__insn_barrier();
    682 	REORDER_PROTECT();
    683 	ci->ci_cpl = ncpl;
    684 	if (have_pending_intr_p(ci, ncpl))
    685 		pic_do_pending_int();
    686 
    687 	REORDER_PROTECT();
    688 }
    689 
    690 int
    691 spllower(int ncpl)
    692 {
    693 	struct cpu_info *ci = curcpu();
    694 	int ocpl;
    695 
    696 	__insn_barrier();
    697 	REORDER_PROTECT();
    698 	ocpl = ci->ci_cpl;
    699 	ci->ci_cpl = ncpl;
    700 	if (have_pending_intr_p(ci, ncpl))
    701 		pic_do_pending_int();
    702 	REORDER_PROTECT();
    703 	return ocpl;
    704 }
    705 
    706 void
    707 genppc_cpu_configure(void)
    708 {
    709 	aprint_normal("vmmask %x schedmask %x highmask %x\n",
    710 	    (u_int)imask[IPL_VM] & 0x7fffffff,
    711 	    (u_int)imask[IPL_SCHED] & 0x7fffffff,
    712 	    (u_int)imask[IPL_HIGH] & 0x7fffffff);
    713 
    714 	spl0();
    715 }
    716 
    717 #if defined(PIC_PREPIVR) || defined(PIC_I8259)
    718 /*
    719  * isa_intr_alloc needs to be done here, because it needs direct access to
    720  * the various interrupt handler structures.
    721  */
    722 
    723 int
    724 genppc_isa_intr_alloc(isa_chipset_tag_t ic, struct pic_ops *pic,
    725     int mask, int type, int *irq_p)
    726 {
    727 	int irq, vi;
    728 	int maybe_irq = -1;
    729 	int shared_depth = 0;
    730 	struct intr_source *is;
    731 
    732 	if (pic == NULL)
    733 		return 1;
    734 
    735 	for (irq = 0; (mask != 0 && irq < pic->pic_numintrs);
    736 	     mask >>= 1, irq++) {
    737 		if ((mask & 1) == 0)
    738 			continue;
    739 		vi = virq_map[irq + pic->pic_intrbase];
    740 		if (!vi) {
    741 			*irq_p = irq;
    742 			return 0;
    743 		}
    744 		is = &intrsources[vi];
    745 		if (is->is_type == IST_NONE) {
    746 			*irq_p = irq;
    747 			return 0;
    748 		}
    749 		/* Level interrupts can be shared */
    750 		if (type == IST_LEVEL && is->is_type == IST_LEVEL) {
    751 			struct intrhand *ih = is->is_hand;
    752 			int depth;
    753 
    754 			if (maybe_irq == -1) {
    755 				maybe_irq = irq;
    756 				continue;
    757 			}
    758 			for (depth = 0; ih != NULL; ih = ih->ih_next)
    759 				depth++;
    760 			if (depth < shared_depth) {
    761 				maybe_irq = irq;
    762 				shared_depth = depth;
    763 			}
    764 		}
    765 	}
    766 	if (maybe_irq != -1) {
    767 		*irq_p = maybe_irq;
    768 		return 0;
    769 	}
    770 	return 1;
    771 }
    772 #endif
    773 
    774 static struct intr_source *
    775 intr_get_source(const char *intrid)
    776 {
    777 	struct intr_source *is;
    778 	int irq;
    779 
    780 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
    781 		if (strcmp(intrid, is->is_intrid) == 0)
    782 			return is;
    783 	}
    784 	return NULL;
    785 }
    786 
    787 static struct intrhand *
    788 intr_get_handler(const char *intrid)
    789 {
    790 	struct intr_source *is;
    791 
    792 	is = intr_get_source(intrid);
    793 	if (is != NULL)
    794 		return is->is_hand;
    795 	return NULL;
    796 }
    797 
    798 uint64_t
    799 interrupt_get_count(const char *intrid, u_int cpu_idx)
    800 {
    801 	struct intr_source *is;
    802 
    803 	/* XXX interrupt is always generated by CPU 0 */
    804 	if (cpu_idx != 0)
    805 		return 0;
    806 
    807 	is = intr_get_source(intrid);
    808 	if (is != NULL)
    809 		return is->is_ev.ev_count;
    810 	return 0;
    811 }
    812 
    813 void
    814 interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset)
    815 {
    816 	struct intr_source *is;
    817 
    818 	kcpuset_zero(cpuset);
    819 
    820 	is = intr_get_source(intrid);
    821 	if (is != NULL)
    822 		kcpuset_set(cpuset, 0);	/* XXX */
    823 }
    824 
    825 void
    826 interrupt_get_available(kcpuset_t *cpuset)
    827 {
    828 	CPU_INFO_ITERATOR cii;
    829 	struct cpu_info *ci;
    830 
    831 	kcpuset_zero(cpuset);
    832 
    833 	mutex_enter(&cpu_lock);
    834 	for (CPU_INFO_FOREACH(cii, ci)) {
    835 		if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0)
    836 			kcpuset_set(cpuset, cpu_index(ci));
    837 	}
    838 	mutex_exit(&cpu_lock);
    839 }
    840 
    841 void
    842 interrupt_get_devname(const char *intrid, char *buf, size_t len)
    843 {
    844 	struct intrhand *ih;
    845 
    846 	if (len == 0)
    847 		return;
    848 
    849 	buf[0] = '\0';
    850 
    851 	for (ih = intr_get_handler(intrid); ih != NULL; ih = ih->ih_next) {
    852 		if (buf[0] != '\0')
    853 			strlcat(buf, ", ", len);
    854 		strlcat(buf, ih->ih_xname, len);
    855 	}
    856 }
    857 
    858 struct intrids_handler *
    859 interrupt_construct_intrids(const kcpuset_t *cpuset)
    860 {
    861 	struct intr_source *is;
    862 	struct intrids_handler *ii_handler;
    863 	intrid_t *ids;
    864 	int i, irq, count;
    865 
    866 	if (kcpuset_iszero(cpuset))
    867 		return NULL;
    868 	if (!kcpuset_isset(cpuset, 0))	/* XXX */
    869 		return NULL;
    870 
    871 	count = 0;
    872 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
    873 		if (is->is_hand != NULL)
    874 			count++;
    875 	}
    876 
    877 	ii_handler = kmem_zalloc(sizeof(int) + sizeof(intrid_t) * count,
    878 	    KM_SLEEP);
    879 	if (ii_handler == NULL)
    880 		return NULL;
    881 	ii_handler->iih_nids = count;
    882 	if (count == 0)
    883 		return ii_handler;
    884 
    885 	ids = ii_handler->iih_intrids;
    886 	i = 0;
    887 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
    888 		/* Ignore devices attached after counting "count". */
    889 		if (i >= count)
    890 			break;
    891 
    892 		if (is->is_hand == NULL)
    893 			continue;
    894 
    895 		strncpy(ids[i], is->is_intrid, sizeof(intrid_t));
    896 		i++;
    897 	}
    898 
    899 	return ii_handler;
    900 }
    901 
    902 void
    903 interrupt_destruct_intrids(struct intrids_handler *ii_handler)
    904 {
    905 	size_t iih_size;
    906 
    907 	if (ii_handler == NULL)
    908 		return;
    909 
    910 	iih_size = sizeof(int) + sizeof(intrid_t) * ii_handler->iih_nids;
    911 	kmem_free(ii_handler, iih_size);
    912 }
    913 
    914 int
    915 interrupt_distribute(void *ich, const kcpuset_t *newset, kcpuset_t *oldset)
    916 {
    917 	return EOPNOTSUPP;
    918 }
    919 
    920 int
    921 interrupt_distribute_handler(const char *intrid, const kcpuset_t *newset,
    922     kcpuset_t *oldset)
    923 {
    924 	return EOPNOTSUPP;
    925 }
    926 
    927 #undef REORDER_PROTECT
    928