Home | History | Annotate | Line # | Download | only in cavium
octeon_intr.c revision 1.13
      1 /*	$NetBSD: octeon_intr.c,v 1.13 2020/06/20 18:48:28 riastradh Exp $	*/
      2 /*
      3  * Copyright 2001, 2002 Wasabi Systems, Inc.
      4  * All rights reserved.
      5  *
      6  * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *      This product includes software developed for the NetBSD Project by
     19  *      Wasabi Systems, Inc.
     20  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     21  *    or promote products derived from this software without specific prior
     22  *    written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     34  * POSSIBILITY OF SUCH DAMAGE.
     35  */
     36 
     37 /*
     38  * Platform-specific interrupt support for the MIPS Malta.
     39  */
     40 
     41 #include "opt_octeon.h"
     42 #include "opt_multiprocessor.h"
     43 
     44 #include "cpunode.h"
     45 #define __INTR_PRIVATE
     46 
     47 #include <sys/cdefs.h>
     48 __KERNEL_RCSID(0, "$NetBSD: octeon_intr.c,v 1.13 2020/06/20 18:48:28 riastradh Exp $");
     49 
     50 #include <sys/param.h>
     51 #include <sys/cpu.h>
     52 #include <sys/systm.h>
     53 #include <sys/device.h>
     54 #include <sys/intr.h>
     55 #include <sys/kernel.h>
     56 #include <sys/kmem.h>
     57 #include <sys/atomic.h>
     58 
     59 #include <lib/libkern/libkern.h>
     60 
     61 #include <mips/locore.h>
     62 
     63 #include <mips/cavium/dev/octeon_ciureg.h>
     64 #include <mips/cavium/octeonvar.h>
     65 
     66 /*
     67  * This is a mask of bits to clear in the SR when we go to a
     68  * given hardware interrupt priority level.
     69  */
     70 static const struct ipl_sr_map octeon_ipl_sr_map = {
     71     .sr_bits = {
     72 	[IPL_NONE] =		0,
     73 	[IPL_SOFTCLOCK] =	MIPS_SOFT_INT_MASK_0,
     74 	[IPL_SOFTNET] =		MIPS_SOFT_INT_MASK,
     75 	[IPL_VM] =		MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0,
     76 	[IPL_SCHED] =		MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
     77 				    | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
     78 	[IPL_DDB] =		MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
     79 				    | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
     80 	[IPL_HIGH] =		MIPS_INT_MASK,
     81     },
     82 };
     83 
     84 const char * const octeon_intrnames[NIRQS] = {
     85 	"workq 0",
     86 	"workq 1",
     87 	"workq 2",
     88 	"workq 3",
     89 	"workq 4",
     90 	"workq 5",
     91 	"workq 6",
     92 	"workq 7",
     93 	"workq 8",
     94 	"workq 9",
     95 	"workq 10",
     96 	"workq 11",
     97 	"workq 12",
     98 	"workq 13",
     99 	"workq 14",
    100 	"workq 15",
    101 	"gpio 0",
    102 	"gpio 1",
    103 	"gpio 2",
    104 	"gpio 3",
    105 	"gpio 4",
    106 	"gpio 5",
    107 	"gpio 6",
    108 	"gpio 7",
    109 	"gpio 8",
    110 	"gpio 9",
    111 	"gpio 10",
    112 	"gpio 11",
    113 	"gpio 12",
    114 	"gpio 13",
    115 	"gpio 14",
    116 	"gpio 15",
    117 	"mbox 0-15",
    118 	"mbox 16-31",
    119 	"uart 0",
    120 	"uart 1",
    121 	"pci inta",
    122 	"pci intb",
    123 	"pci intc",
    124 	"pci intd",
    125 	"pci msi 0-15",
    126 	"pci msi 16-31",
    127 	"pci msi 32-47",
    128 	"pci msi 48-63",
    129 	"wdog summary",
    130 	"twsi",
    131 	"rml",
    132 	"trace",
    133 	"gmx drop",
    134 	"reserved",
    135 	"ipd drop",
    136 	"reserved",
    137 	"timer 0",
    138 	"timer 1",
    139 	"timer 2",
    140 	"timer 3",
    141 	"usb",
    142 	"pcm/tdm",
    143 	"mpi/spi",
    144 	"reserved",
    145 	"reserved",
    146 	"reserved",
    147 	"reserved",
    148 	"reserved",
    149 };
    150 
    151 struct octeon_intrhand {
    152 	int (*ih_func)(void *);
    153 	void *ih_arg;
    154 	int ih_irq;
    155 	int ih_ipl;
    156 };
    157 
    158 #ifdef MULTIPROCESSOR
    159 static int octeon_send_ipi(struct cpu_info *, int);
    160 static int octeon_ipi_intr(void *);
    161 
    162 struct octeon_intrhand ipi_intrhands[2] = {
    163 	[0] = {
    164 		.ih_func = octeon_ipi_intr,
    165 		.ih_arg = (void *)(uintptr_t)__BITS(15,0),
    166 		.ih_irq = CIU_INT_MBOX_15_0,
    167 		.ih_ipl = IPL_SCHED,
    168 	},
    169 	[1] = {
    170 		.ih_func = octeon_ipi_intr,
    171 		.ih_arg = (void *)(uintptr_t)__BITS(31,16),
    172 		.ih_irq = CIU_INT_MBOX_31_16,
    173 		.ih_ipl = IPL_HIGH,
    174 	},
    175 };
    176 #endif
    177 
    178 struct octeon_intrhand *octciu_intrs[NIRQS] = {
    179 #ifdef MULTIPROCESSOR
    180 	[CIU_INT_MBOX_15_0] = &ipi_intrhands[0],
    181 	[CIU_INT_MBOX_31_16] = &ipi_intrhands[1],
    182 #endif
    183 };
    184 
    185 kmutex_t octeon_intr_lock;
    186 
    187 #define X(a)	MIPS_PHYS_TO_XKPHYS(OCTEON_CCA_NONE, (a))
    188 
    189 struct cpu_softc octeon_cpu0_softc = {
    190 	.cpu_ci = &cpu_info_store,
    191 	.cpu_int0_sum0 = X(CIU_INT0_SUM0),
    192 	.cpu_int1_sum0 = X(CIU_INT1_SUM0),
    193 	.cpu_int2_sum0 = X(CIU_INT4_SUM0),
    194 
    195 	.cpu_int0_en0 = X(CIU_INT0_EN0),
    196 	.cpu_int1_en0 = X(CIU_INT1_EN0),
    197 	.cpu_int2_en0 = X(CIU_INT4_EN00),
    198 
    199 	.cpu_int0_en1 = X(CIU_INT0_EN1),
    200 	.cpu_int1_en1 = X(CIU_INT1_EN1),
    201 	.cpu_int2_en1 = X(CIU_INT4_EN01),
    202 
    203 	.cpu_int32_en = X(CIU_INT32_EN0),
    204 
    205 	.cpu_wdog = X(CIU_WDOG0),
    206 	.cpu_pp_poke = X(CIU_PP_POKE0),
    207 
    208 #ifdef MULTIPROCESSOR
    209 	.cpu_mbox_set = X(CIU_MBOX_SET0),
    210 	.cpu_mbox_clr = X(CIU_MBOX_CLR0),
    211 #endif
    212 };
    213 
    214 #ifdef MULTIPROCESSOR
    215 /* XXX limit of two CPUs ... */
    216 struct cpu_softc octeon_cpu1_softc = {
    217 	.cpu_int0_sum0 = X(CIU_INT2_SUM0),
    218 	.cpu_int1_sum0 = X(CIU_INT3_SUM0),
    219 	.cpu_int2_sum0 = X(CIU_INT4_SUM1),
    220 
    221 	.cpu_int0_en0 = X(CIU_INT2_EN0),
    222 	.cpu_int1_en0 = X(CIU_INT3_EN0),
    223 	.cpu_int2_en0 = X(CIU_INT4_EN10),
    224 
    225 	.cpu_int0_en1 = X(CIU_INT2_EN1),
    226 	.cpu_int1_en1 = X(CIU_INT3_EN1),
    227 	.cpu_int2_en1 = X(CIU_INT4_EN11),
    228 
    229 	.cpu_int32_en = X(CIU_INT32_EN1),
    230 
    231 	.cpu_wdog = X(CIU_WDOG(1)),
    232 	.cpu_pp_poke = X(CIU_PP_POKE1),
    233 
    234 	.cpu_mbox_set = X(CIU_MBOX_SET1),
    235 	.cpu_mbox_clr = X(CIU_MBOX_CLR1),
    236 };
    237 #endif
    238 
    239 #ifdef DEBUG
    240 static void
    241 octeon_mbox_test(void)
    242 {
    243 	const uint64_t mbox_clr0 = X(CIU_MBOX_CLR0);
    244 	const uint64_t mbox_clr1 = X(CIU_MBOX_CLR1);
    245 	const uint64_t mbox_set0 = X(CIU_MBOX_SET0);
    246 	const uint64_t mbox_set1 = X(CIU_MBOX_SET1);
    247 	const uint64_t int_sum0 = X(CIU_INT0_SUM0);
    248 	const uint64_t int_sum1 = X(CIU_INT2_SUM0);
    249 	const uint64_t sum_mbox_lo = __BIT(CIU_INT_MBOX_15_0);
    250 	const uint64_t sum_mbox_hi = __BIT(CIU_INT_MBOX_31_16);
    251 
    252 	mips3_sd(mbox_clr0, ~0ULL);
    253 	mips3_sd(mbox_clr1, ~0ULL);
    254 
    255 	uint32_t mbox0 = mips3_ld(mbox_set0);
    256 	uint32_t mbox1 = mips3_ld(mbox_set1);
    257 
    258 	KDASSERTMSG(mbox0 == 0, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
    259 	KDASSERTMSG(mbox1 == 0, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
    260 
    261 	mips3_sd(mbox_set0, __BIT(0));
    262 
    263 	mbox0 = mips3_ld(mbox_set0);
    264 	mbox1 = mips3_ld(mbox_set1);
    265 
    266 	KDASSERTMSG(mbox0 == 1, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
    267 	KDASSERTMSG(mbox1 == 0, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
    268 
    269 	uint64_t sum0 = mips3_ld(int_sum0);
    270 	uint64_t sum1 = mips3_ld(int_sum1);
    271 
    272 	KDASSERTMSG((sum0 & sum_mbox_lo) != 0, "sum0 %#"PRIx64, sum0);
    273 	KDASSERTMSG((sum0 & sum_mbox_hi) == 0, "sum0 %#"PRIx64, sum0);
    274 
    275 	KDASSERTMSG((sum1 & sum_mbox_lo) == 0, "sum1 %#"PRIx64, sum1);
    276 	KDASSERTMSG((sum1 & sum_mbox_hi) == 0, "sum1 %#"PRIx64, sum1);
    277 
    278 	mips3_sd(mbox_clr0, mbox0);
    279 	mbox0 = mips3_ld(mbox_set0);
    280 	KDASSERTMSG(mbox0 == 0, "mbox0 %#x", mbox0);
    281 
    282 	mips3_sd(mbox_set0, __BIT(16));
    283 
    284 	mbox0 = mips3_ld(mbox_set0);
    285 	mbox1 = mips3_ld(mbox_set1);
    286 
    287 	KDASSERTMSG(mbox0 == __BIT(16), "mbox0 %#x", mbox0);
    288 	KDASSERTMSG(mbox1 == 0, "mbox1 %#x", mbox1);
    289 
    290 	sum0 = mips3_ld(int_sum0);
    291 	sum1 = mips3_ld(int_sum1);
    292 
    293 	KDASSERTMSG((sum0 & sum_mbox_lo) == 0, "sum0 %#"PRIx64, sum0);
    294 	KDASSERTMSG((sum0 & sum_mbox_hi) != 0, "sum0 %#"PRIx64, sum0);
    295 
    296 	KDASSERTMSG((sum1 & sum_mbox_lo) == 0, "sum1 %#"PRIx64, sum1);
    297 	KDASSERTMSG((sum1 & sum_mbox_hi) == 0, "sum1 %#"PRIx64, sum1);
    298 }
    299 #endif
    300 
    301 #undef X
    302 
    303 void
    304 octeon_intr_init(struct cpu_info *ci)
    305 {
    306 #ifdef DIAGNOSTIC
    307 	const int cpunum = cpu_index(ci);
    308 #endif
    309 	const char * const xname = cpu_name(ci);
    310 	struct cpu_softc *cpu = ci->ci_softc;
    311 
    312 
    313 	if (ci->ci_cpuid == 0) {
    314 		KASSERT(ci->ci_softc == &octeon_cpu0_softc);
    315 		ipl_sr_map = octeon_ipl_sr_map;
    316 		mutex_init(&octeon_intr_lock, MUTEX_DEFAULT, IPL_HIGH);
    317 #ifdef MULTIPROCESSOR
    318 		mips_locoresw.lsw_send_ipi = octeon_send_ipi;
    319 #endif
    320 #ifdef DEBUG
    321 		octeon_mbox_test();
    322 #endif
    323 	} else {
    324 		KASSERT(cpunum == 1);
    325 #ifdef MULTIPROCESSOR
    326 		KASSERT(ci->ci_softc == &octeon_cpu1_softc);
    327 #endif
    328 	}
    329 
    330 #ifdef MULTIPROCESSOR
    331 	// Enable the IPIs
    332 	cpu->cpu_int1_enable0 |= __BIT(CIU_INT_MBOX_15_0);
    333 	cpu->cpu_int2_enable0 |= __BIT(CIU_INT_MBOX_31_16);
    334 #endif
    335 
    336 	if (ci->ci_dev)
    337 		aprint_verbose_dev(ci->ci_dev,
    338 		    "enabling intr masks %#"PRIx64"/%#"PRIx64"/%#"PRIx64"\n",
    339 		    cpu->cpu_int0_enable0, cpu->cpu_int1_enable0,
    340 		    cpu->cpu_int2_enable0);
    341 
    342 	mips3_sd(cpu->cpu_int0_en0, cpu->cpu_int0_enable0);
    343 	mips3_sd(cpu->cpu_int1_en0, cpu->cpu_int1_enable0);
    344 	mips3_sd(cpu->cpu_int2_en0, cpu->cpu_int2_enable0);
    345 
    346 	mips3_sd(cpu->cpu_int32_en, 0);
    347 
    348 	mips3_sd(cpu->cpu_int0_en1, 0);	// WDOG IPL2
    349 	mips3_sd(cpu->cpu_int1_en1, 0);	// WDOG IPL3
    350 	mips3_sd(cpu->cpu_int2_en1, 0);	// WDOG IPL4
    351 
    352 #ifdef MULTIPROCESSOR
    353 	mips3_sd(cpu->cpu_mbox_clr, __BITS(31,0));
    354 #endif
    355 
    356 	for (size_t i = 0; i < NIRQS; i++) {
    357 		evcnt_attach_dynamic(&cpu->cpu_intr_evs[i],
    358 		    EVCNT_TYPE_INTR, NULL, xname, octeon_intrnames[i]);
    359 	}
    360 }
    361 
    362 void
    363 octeon_cal_timer(int corefreq)
    364 {
    365 	/* Compute the number of cycles per second. */
    366 	curcpu()->ci_cpu_freq = corefreq;
    367 
    368 	/* Compute the number of ticks for hz. */
    369 	curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz;
    370 
    371 	/* Compute the delay divisor and reciprical. */
    372 	curcpu()->ci_divisor_delay =
    373 	    ((curcpu()->ci_cpu_freq + 500000) / 1000000);
    374 #if 0
    375 	MIPS_SET_CI_RECIPRICAL(curcpu());
    376 #endif
    377 
    378 	mips3_cp0_count_write(0);
    379 	mips3_cp0_compare_write(0);
    380 }
    381 
    382 void *
    383 octeon_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
    384 {
    385 	struct octeon_intrhand *ih;
    386 
    387 	if (irq >= NIRQS)
    388 		panic("octeon_intr_establish: bogus IRQ %d", irq);
    389 	if (ipl < IPL_VM)
    390 		panic("octeon_intr_establish: bogus IPL %d", ipl);
    391 
    392 	ih = kmem_zalloc(sizeof(*ih), KM_NOSLEEP);
    393 	if (ih == NULL)
    394 		return (NULL);
    395 
    396 	ih->ih_func = func;
    397 	ih->ih_arg = arg;
    398 	ih->ih_irq = irq;
    399 	ih->ih_ipl = ipl;
    400 
    401 	mutex_enter(&octeon_intr_lock);
    402 
    403 	/*
    404 	 * First, make it known.
    405 	 */
    406 	KASSERTMSG(octciu_intrs[irq] == NULL, "irq %d in use! (%p)",
    407 	    irq, octciu_intrs[irq]);
    408 
    409 	octciu_intrs[irq] = ih;
    410 	membar_producer();
    411 
    412 	/*
    413 	 * Now enable it.
    414 	 */
    415 	const uint64_t irq_mask = __BIT(irq);
    416 	struct cpu_softc * const cpu0 = &octeon_cpu0_softc;
    417 #if MULTIPROCESSOR
    418 	struct cpu_softc * const cpu1 = &octeon_cpu1_softc;
    419 #endif
    420 
    421 	switch (ipl) {
    422 	case IPL_VM:
    423 		cpu0->cpu_int0_enable0 |= irq_mask;
    424 		mips3_sd(cpu0->cpu_int0_en0, cpu0->cpu_int0_enable0);
    425 		break;
    426 
    427 	case IPL_SCHED:
    428 		cpu0->cpu_int1_enable0 |= irq_mask;
    429 		mips3_sd(cpu0->cpu_int1_en0, cpu0->cpu_int1_enable0);
    430 #ifdef MULTIPROCESSOR
    431 		cpu1->cpu_int1_enable0 = cpu0->cpu_int1_enable0;
    432 		mips3_sd(cpu1->cpu_int1_en0, cpu1->cpu_int1_enable0);
    433 #endif
    434 		break;
    435 
    436 	case IPL_DDB:
    437 	case IPL_HIGH:
    438 		cpu0->cpu_int2_enable0 |= irq_mask;
    439 		mips3_sd(cpu0->cpu_int2_en0, cpu0->cpu_int2_enable0);
    440 #ifdef MULTIPROCESSOR
    441 		cpu1->cpu_int2_enable0 = cpu0->cpu_int2_enable0;
    442 		mips3_sd(cpu1->cpu_int2_en0, cpu1->cpu_int2_enable0);
    443 #endif
    444 		break;
    445 	}
    446 
    447 	mutex_exit(&octeon_intr_lock);
    448 
    449 	return ih;
    450 }
    451 
    452 void
    453 octeon_intr_disestablish(void *cookie)
    454 {
    455 	struct octeon_intrhand * const ih = cookie;
    456 	const int irq = ih->ih_irq & (NIRQS-1);
    457 	const int ipl = ih->ih_ipl;
    458 
    459 	mutex_enter(&octeon_intr_lock);
    460 
    461 	/*
    462 	 * First disable it.
    463 	 */
    464 	const uint64_t irq_mask = ~__BIT(irq);
    465 	struct cpu_softc * const cpu0 = &octeon_cpu0_softc;
    466 #if MULTIPROCESSOR
    467 	struct cpu_softc * const cpu1 = &octeon_cpu1_softc;
    468 #endif
    469 
    470 	switch (ipl) {
    471 	case IPL_VM:
    472 		cpu0->cpu_int0_enable0 &= ~irq_mask;
    473 		mips3_sd(cpu0->cpu_int0_en0, cpu0->cpu_int0_enable0);
    474 		break;
    475 
    476 	case IPL_SCHED:
    477 		cpu0->cpu_int1_enable0 &= ~irq_mask;
    478 		mips3_sd(cpu0->cpu_int1_en0, cpu0->cpu_int1_enable0);
    479 #ifdef MULTIPROCESSOR
    480 		cpu1->cpu_int1_enable0 = cpu0->cpu_int1_enable0;
    481 		mips3_sd(cpu1->cpu_int1_en0, cpu1->cpu_int1_enable0);
    482 #endif
    483 		break;
    484 
    485 	case IPL_DDB:
    486 	case IPL_HIGH:
    487 		cpu0->cpu_int2_enable0 &= ~irq_mask;
    488 		mips3_sd(cpu0->cpu_int2_en0, cpu0->cpu_int2_enable0);
    489 #ifdef MULTIPROCESSOR
    490 		cpu1->cpu_int2_enable0 = cpu0->cpu_int2_enable0;
    491 		mips3_sd(cpu1->cpu_int2_en0, cpu1->cpu_int2_enable0);
    492 #endif
    493 		break;
    494 	}
    495 
    496 	/*
    497 	 * Now remove it since we shouldn't get interrupts for it.
    498 	 */
    499 	octciu_intrs[irq] = NULL;
    500 
    501 	mutex_exit(&octeon_intr_lock);
    502 
    503 	kmem_free(ih, sizeof(*ih));
    504 }
    505 
    506 void
    507 octeon_iointr(int ipl, vaddr_t pc, uint32_t ipending)
    508 {
    509 	struct cpu_info * const ci = curcpu();
    510 	struct cpu_softc * const cpu = ci->ci_softc;
    511 
    512 	KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
    513 	KASSERT((ipending & ~MIPS_INT_MASK) == 0);
    514 	KASSERT(ipending & MIPS_HARD_INT_MASK);
    515 	uint64_t hwpend = 0;
    516 
    517 	if (ipending & MIPS_INT_MASK_2) {
    518 		hwpend = mips3_ld(cpu->cpu_int2_sum0)
    519 		    & cpu->cpu_int2_enable0;
    520 	} else if (ipending & MIPS_INT_MASK_1) {
    521 		hwpend = mips3_ld(cpu->cpu_int1_sum0)
    522 		    & cpu->cpu_int1_enable0;
    523 	} else if (ipending & MIPS_INT_MASK_0) {
    524 		hwpend = mips3_ld(cpu->cpu_int0_sum0)
    525 		    & cpu->cpu_int0_enable0;
    526 	} else {
    527 		panic("octeon_iointr: unexpected ipending %#x", ipending);
    528 	}
    529 	while (hwpend != 0) {
    530 		const int irq = ffs64(hwpend) - 1;
    531 		hwpend &= ~__BIT(irq);
    532 
    533 		struct octeon_intrhand * const ih = octciu_intrs[irq];
    534 		cpu->cpu_intr_evs[irq].ev_count++;
    535 		if (__predict_true(ih != NULL)) {
    536 #ifdef MULTIPROCESSOR
    537 			if (ipl == IPL_VM) {
    538 				KERNEL_LOCK(1, NULL);
    539 #endif
    540 				(*ih->ih_func)(ih->ih_arg);
    541 #ifdef MULTIPROCESSOR
    542 				KERNEL_UNLOCK_ONE(NULL);
    543 			} else {
    544 				(*ih->ih_func)(ih->ih_arg);
    545 			}
    546 #endif
    547 			KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
    548 		}
    549 	}
    550 	KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
    551 }
    552 
    553 #ifdef MULTIPROCESSOR
    554 __CTASSERT(NIPIS < 16);
    555 
    556 int
    557 octeon_ipi_intr(void *arg)
    558 {
    559 	struct cpu_info * const ci = curcpu();
    560 	struct cpu_softc * const cpu = ci->ci_softc;
    561 	uint32_t ipi_mask = (uintptr_t) arg;
    562 
    563 	KASSERTMSG((ipi_mask & __BITS(31,16)) == 0 || ci->ci_cpl >= IPL_SCHED,
    564 	    "ipi_mask %#"PRIx32" cpl %d", ipi_mask, ci->ci_cpl);
    565 
    566 	ipi_mask &= mips3_ld(cpu->cpu_mbox_set);
    567 	if (ipi_mask == 0)
    568 		return 0;
    569 
    570 	mips3_sd(cpu->cpu_mbox_clr, ipi_mask);
    571 
    572 	ipi_mask |= (ipi_mask >> 16);
    573 	ipi_mask &= __BITS(15,0);
    574 
    575 	KASSERT(ipi_mask < __BIT(NIPIS));
    576 
    577 #if NWDOG > 0
    578 	// Handle WDOG requests ourselves.
    579 	if (ipi_mask & __BIT(IPI_WDOG)) {
    580 		softint_schedule(cpu->cpu_wdog_sih);
    581 		atomic_and_64(&ci->ci_request_ipis, ~__BIT(IPI_WDOG));
    582 		ipi_mask &= ~__BIT(IPI_WDOG);
    583 		ci->ci_evcnt_per_ipi[IPI_WDOG].ev_count++;
    584 		if (__predict_true(ipi_mask == 0))
    585 			return 1;
    586 	}
    587 #endif
    588 
    589 	/* if the request is clear, it was previously processed */
    590 	if ((ci->ci_request_ipis & ipi_mask) == 0)
    591 		return 0;
    592 
    593 	atomic_or_64(&ci->ci_active_ipis, ipi_mask);
    594 	atomic_and_64(&ci->ci_request_ipis, ~ipi_mask);
    595 
    596 	ipi_process(ci, ipi_mask);
    597 
    598 	atomic_and_64(&ci->ci_active_ipis, ~ipi_mask);
    599 
    600 	return 1;
    601 }
    602 
    603 int
    604 octeon_send_ipi(struct cpu_info *ci, int req)
    605 {
    606 	KASSERT(req < NIPIS);
    607 	if (ci == NULL) {
    608 		CPU_INFO_ITERATOR cii;
    609 		for (CPU_INFO_FOREACH(cii, ci)) {
    610 			if (ci != curcpu()) {
    611 				octeon_send_ipi(ci, req);
    612 			}
    613 		}
    614 		return 0;
    615 	}
    616 	KASSERT(cold || ci->ci_softc != NULL);
    617 	if (ci->ci_softc == NULL)
    618 		return -1;
    619 
    620 	struct cpu_softc * const cpu = ci->ci_softc;
    621 	uint64_t ipi_mask = __BIT(req);
    622 
    623 	atomic_or_64(&ci->ci_request_ipis, ipi_mask);
    624 	if (req == IPI_SUSPEND || req == IPI_WDOG) {
    625 		ipi_mask <<= 16;
    626 	}
    627 
    628 	mips3_sd(cpu->cpu_mbox_set, ipi_mask);
    629 	return 0;
    630 }
    631 #endif	/* MULTIPROCESSOR */
    632