Home | History | Annotate | Line # | Download | only in cavium
octeon_intr.c revision 1.11
      1 /*	$NetBSD: octeon_intr.c,v 1.11 2020/05/31 06:27:06 simonb Exp $	*/
      2 /*
      3  * Copyright 2001, 2002 Wasabi Systems, Inc.
      4  * All rights reserved.
      5  *
      6  * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *      This product includes software developed for the NetBSD Project by
     19  *      Wasabi Systems, Inc.
     20  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     21  *    or promote products derived from this software without specific prior
     22  *    written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     34  * POSSIBILITY OF SUCH DAMAGE.
     35  */
     36 
     37 /*
     38  * Platform-specific interrupt support for the MIPS Malta.
     39  */
     40 
     41 #include "opt_octeon.h"
     42 #include "opt_multiprocessor.h"
     43 
     44 #include "cpunode.h"
     45 #define __INTR_PRIVATE
     46 
     47 #include <sys/cdefs.h>
     48 __KERNEL_RCSID(0, "$NetBSD: octeon_intr.c,v 1.11 2020/05/31 06:27:06 simonb Exp $");
     49 
     50 #include <sys/param.h>
     51 #include <sys/cpu.h>
     52 #include <sys/systm.h>
     53 #include <sys/device.h>
     54 #include <sys/intr.h>
     55 #include <sys/kernel.h>
     56 #include <sys/kmem.h>
     57 #include <sys/atomic.h>
     58 
     59 #include <lib/libkern/libkern.h>
     60 
     61 #include <mips/locore.h>
     62 
     63 #include <mips/cavium/dev/octeon_ciureg.h>
     64 #include <mips/cavium/octeonvar.h>
     65 
     66 /*
     67  * This is a mask of bits to clear in the SR when we go to a
     68  * given hardware interrupt priority level.
     69  */
     70 static const struct ipl_sr_map octeon_ipl_sr_map = {
     71     .sr_bits = {
     72 	[IPL_NONE] =		0,
     73 	[IPL_SOFTCLOCK] =	MIPS_SOFT_INT_MASK_0,
     74 	[IPL_SOFTNET] =		MIPS_SOFT_INT_MASK,
     75 	[IPL_VM] =		MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0,
     76 	[IPL_SCHED] =		MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
     77 				    | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
     78 	[IPL_DDB] =		MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
     79 				    | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
     80 	[IPL_HIGH] =		MIPS_INT_MASK,
     81     },
     82 };
     83 
     84 const char * const octeon_intrnames[NIRQS] = {
     85 	"workq 0",
     86 	"workq 1",
     87 	"workq 2",
     88 	"workq 3",
     89 	"workq 4",
     90 	"workq 5",
     91 	"workq 6",
     92 	"workq 7",
     93 	"workq 8",
     94 	"workq 9",
     95 	"workq 10",
     96 	"workq 11",
     97 	"workq 12",
     98 	"workq 13",
     99 	"workq 14",
    100 	"workq 15",
    101 	"gpio 0",
    102 	"gpio 1",
    103 	"gpio 2",
    104 	"gpio 3",
    105 	"gpio 4",
    106 	"gpio 5",
    107 	"gpio 6",
    108 	"gpio 7",
    109 	"gpio 8",
    110 	"gpio 9",
    111 	"gpio 10",
    112 	"gpio 11",
    113 	"gpio 12",
    114 	"gpio 13",
    115 	"gpio 14",
    116 	"gpio 15",
    117 	"mbox 0-15",
    118 	"mbox 16-31",
    119 	"uart 0",
    120 	"uart 1",
    121 	"pci inta",
    122 	"pci intb",
    123 	"pci intc",
    124 	"pci intd",
    125 	"pci msi 0-15",
    126 	"pci msi 16-31",
    127 	"pci msi 32-47",
    128 	"pci msi 48-63",
    129 	"wdog summary",
    130 	"twsi",
    131 	"rml",
    132 	"trace",
    133 	"gmx drop",
    134 	"reserved",
    135 	"ipd drop",
    136 	"reserved",
    137 	"timer 0",
    138 	"timer 1",
    139 	"timer 2",
    140 	"timer 3",
    141 	"usb",
    142 	"pcm/tdm",
    143 	"mpi/spi",
    144 	"reserved",
    145 	"reserved",
    146 	"reserved",
    147 	"reserved",
    148 	"reserved",
    149 };
    150 
    151 struct octeon_intrhand {
    152 	int (*ih_func)(void *);
    153 	void *ih_arg;
    154 	int ih_irq;
    155 	int ih_ipl;
    156 };
    157 
    158 #ifdef MULTIPROCESSOR
    159 static int octeon_send_ipi(struct cpu_info *, int);
    160 static int octeon_ipi_intr(void *);
    161 
    162 struct octeon_intrhand ipi_intrhands[2] = {
    163 	[0] = {
    164 		.ih_func = octeon_ipi_intr,
    165 		.ih_arg = (void *)(uintptr_t)__BITS(15,0),
    166 		.ih_irq = _CIU_INT_MBOX_15_0_SHIFT,
    167 		.ih_ipl = IPL_SCHED,
    168 	},
    169 	[1] = {
    170 		.ih_func = octeon_ipi_intr,
    171 		.ih_arg = (void *)(uintptr_t)__BITS(31,16),
    172 		.ih_irq = _CIU_INT_MBOX_31_16_SHIFT,
    173 		.ih_ipl = IPL_HIGH,
    174 	},
    175 };
    176 #endif
    177 
    178 struct octeon_intrhand *octciu_intrs[NIRQS] = {
    179 #ifdef MULTIPROCESSOR
    180 	[_CIU_INT_MBOX_15_0_SHIFT] = &ipi_intrhands[0],
    181 	[_CIU_INT_MBOX_31_16_SHIFT] = &ipi_intrhands[1],
    182 #endif
    183 };
    184 
    185 kmutex_t octeon_intr_lock;
    186 
    187 #define X(a)	MIPS_PHYS_TO_XKPHYS(OCTEON_CCA_NONE, (a))
    188 
    189 struct cpu_softc octeon_cpu0_softc = {
    190 	.cpu_ci = &cpu_info_store,
    191 	.cpu_int0_sum0 = X(CIU_INT0_SUM0),
    192 	.cpu_int1_sum0 = X(CIU_INT1_SUM0),
    193 	.cpu_int2_sum0 = X(CIU_INT4_SUM0),
    194 
    195 	.cpu_int0_en0 = X(CIU_INT0_EN0),
    196 	.cpu_int1_en0 = X(CIU_INT1_EN0),
    197 	.cpu_int2_en0 = X(CIU_INT4_EN00),
    198 
    199 	.cpu_int0_en1 = X(CIU_INT0_EN1),
    200 	.cpu_int1_en1 = X(CIU_INT1_EN1),
    201 	.cpu_int2_en1 = X(CIU_INT4_EN01),
    202 
    203 	.cpu_int32_en = X(CIU_INT32_EN0),
    204 
    205 	.cpu_wdog = X(CIU_WDOG0),
    206 	.cpu_pp_poke = X(CIU_PP_POKE0),
    207 
    208 #ifdef MULTIPROCESSOR
    209 	.cpu_mbox_set = X(CIU_MBOX_SET0),
    210 	.cpu_mbox_clr = X(CIU_MBOX_CLR0),
    211 #endif
    212 };
    213 
    214 #ifdef MULTIPROCESSOR
    215 struct cpu_softc octeon_cpu1_softc = {
    216 	.cpu_int0_sum0 = X(CIU_INT2_SUM0),
    217 	.cpu_int1_sum0 = X(CIU_INT3_SUM0),
    218 	.cpu_int2_sum0 = X(CIU_INT4_SUM1),
    219 
    220 	.cpu_int0_en0 = X(CIU_INT2_EN0),
    221 	.cpu_int1_en0 = X(CIU_INT3_EN0),
    222 	.cpu_int2_en0 = X(CIU_INT4_EN10),
    223 
    224 	.cpu_int0_en1 = X(CIU_INT2_EN1),
    225 	.cpu_int1_en1 = X(CIU_INT3_EN1),
    226 	.cpu_int2_en1 = X(CIU_INT4_EN11),
    227 
    228 	.cpu_int32_en = X(CIU_INT32_EN1),
    229 
    230 	.cpu_wdog = X(CIU_WDOG1),
    231 	.cpu_pp_poke = X(CIU_PP_POKE1),
    232 
    233 	.cpu_mbox_set = X(CIU_MBOX_SET1),
    234 	.cpu_mbox_clr = X(CIU_MBOX_CLR1),
    235 };
    236 #endif
    237 
    238 #ifdef DEBUG
    239 static void
    240 octeon_mbox_test(void)
    241 {
    242 	const uint64_t mbox_clr0 = X(CIU_MBOX_CLR0);
    243 	const uint64_t mbox_clr1 = X(CIU_MBOX_CLR1);
    244 	const uint64_t mbox_set0 = X(CIU_MBOX_SET0);
    245 	const uint64_t mbox_set1 = X(CIU_MBOX_SET1);
    246 	const uint64_t int_sum0 = X(CIU_INT0_SUM0);
    247 	const uint64_t int_sum1 = X(CIU_INT2_SUM0);
    248 	const uint64_t sum_mbox_lo = __BIT(_CIU_INT_MBOX_15_0_SHIFT);
    249 	const uint64_t sum_mbox_hi = __BIT(_CIU_INT_MBOX_31_16_SHIFT);
    250 
    251 	mips3_sd(mbox_clr0, ~0ULL);
    252 	mips3_sd(mbox_clr1, ~0ULL);
    253 
    254 	uint32_t mbox0 = mips3_ld(mbox_set0);
    255 	uint32_t mbox1 = mips3_ld(mbox_set1);
    256 
    257 	KDASSERTMSG(mbox0 == 0, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
    258 	KDASSERTMSG(mbox1 == 0, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
    259 
    260 	mips3_sd(mbox_set0, __BIT(0));
    261 
    262 	mbox0 = mips3_ld(mbox_set0);
    263 	mbox1 = mips3_ld(mbox_set1);
    264 
    265 	KDASSERTMSG(mbox0 == 1, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
    266 	KDASSERTMSG(mbox1 == 0, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
    267 
    268 	uint64_t sum0 = mips3_ld(int_sum0);
    269 	uint64_t sum1 = mips3_ld(int_sum1);
    270 
    271 	KDASSERTMSG((sum0 & sum_mbox_lo) != 0, "sum0 %#"PRIx64, sum0);
    272 	KDASSERTMSG((sum0 & sum_mbox_hi) == 0, "sum0 %#"PRIx64, sum0);
    273 
    274 	KDASSERTMSG((sum1 & sum_mbox_lo) == 0, "sum1 %#"PRIx64, sum1);
    275 	KDASSERTMSG((sum1 & sum_mbox_hi) == 0, "sum1 %#"PRIx64, sum1);
    276 
    277 	mips3_sd(mbox_clr0, mbox0);
    278 	mbox0 = mips3_ld(mbox_set0);
    279 	KDASSERTMSG(mbox0 == 0, "mbox0 %#x", mbox0);
    280 
    281 	mips3_sd(mbox_set0, __BIT(16));
    282 
    283 	mbox0 = mips3_ld(mbox_set0);
    284 	mbox1 = mips3_ld(mbox_set1);
    285 
    286 	KDASSERTMSG(mbox0 == __BIT(16), "mbox0 %#x", mbox0);
    287 	KDASSERTMSG(mbox1 == 0, "mbox1 %#x", mbox1);
    288 
    289 	sum0 = mips3_ld(int_sum0);
    290 	sum1 = mips3_ld(int_sum1);
    291 
    292 	KDASSERTMSG((sum0 & sum_mbox_lo) == 0, "sum0 %#"PRIx64, sum0);
    293 	KDASSERTMSG((sum0 & sum_mbox_hi) != 0, "sum0 %#"PRIx64, sum0);
    294 
    295 	KDASSERTMSG((sum1 & sum_mbox_lo) == 0, "sum1 %#"PRIx64, sum1);
    296 	KDASSERTMSG((sum1 & sum_mbox_hi) == 0, "sum1 %#"PRIx64, sum1);
    297 }
    298 #endif
    299 
    300 #undef X
    301 
    302 void
    303 octeon_intr_init(struct cpu_info *ci)
    304 {
    305 #ifdef DIAGNOSTIC
    306 	const int cpunum = cpu_index(ci);
    307 #endif
    308 	const char * const xname = cpu_name(ci);
    309 	struct cpu_softc *cpu = ci->ci_softc;
    310 
    311 
    312 	if (ci->ci_cpuid == 0) {
    313 		KASSERT(ci->ci_softc == &octeon_cpu0_softc);
    314 		ipl_sr_map = octeon_ipl_sr_map;
    315 		mutex_init(&octeon_intr_lock, MUTEX_DEFAULT, IPL_HIGH);
    316 #ifdef MULTIPROCESSOR
    317 		mips_locoresw.lsw_send_ipi = octeon_send_ipi;
    318 #endif
    319 #ifdef DEBUG
    320 		octeon_mbox_test();
    321 #endif
    322 	} else {
    323 		KASSERT(cpunum == 1);
    324 #ifdef MULTIPROCESSOR
    325 		KASSERT(ci->ci_softc == &octeon_cpu1_softc);
    326 #endif
    327 	}
    328 
    329 #ifdef MULTIPROCESSOR
    330 	// Enable the IPIs
    331 	cpu->cpu_int1_enable0 |= __BIT(_CIU_INT_MBOX_15_0_SHIFT);
    332 	cpu->cpu_int2_enable0 |= __BIT(_CIU_INT_MBOX_31_16_SHIFT);
    333 #endif
    334 
    335 	if (ci->ci_dev)
    336 		aprint_verbose_dev(ci->ci_dev,
    337 		    "enabling intr masks %#"PRIx64"/%#"PRIx64"/%#"PRIx64"\n",
    338 		    cpu->cpu_int0_enable0, cpu->cpu_int1_enable0,
    339 		    cpu->cpu_int2_enable0);
    340 
    341 	mips3_sd(cpu->cpu_int0_en0, cpu->cpu_int0_enable0);
    342 	mips3_sd(cpu->cpu_int1_en0, cpu->cpu_int1_enable0);
    343 	mips3_sd(cpu->cpu_int2_en0, cpu->cpu_int2_enable0);
    344 
    345 	mips3_sd(cpu->cpu_int32_en, 0);
    346 
    347 	mips3_sd(cpu->cpu_int0_en1, 0);	// WDOG IPL2
    348 	mips3_sd(cpu->cpu_int1_en1, 0);	// WDOG IPL3
    349 	mips3_sd(cpu->cpu_int2_en1, 0);	// WDOG IPL4
    350 
    351 #ifdef MULTIPROCESSOR
    352 	mips3_sd(cpu->cpu_mbox_clr, __BITS(31,0));
    353 #endif
    354 
    355 	for (size_t i = 0; i < NIRQS; i++) {
    356 		evcnt_attach_dynamic(&cpu->cpu_intr_evs[i],
    357 		    EVCNT_TYPE_INTR, NULL, xname, octeon_intrnames[i]);
    358 	}
    359 }
    360 
    361 void
    362 octeon_cal_timer(int corefreq)
    363 {
    364 	/* Compute the number of cycles per second. */
    365 	curcpu()->ci_cpu_freq = corefreq;
    366 
    367 	/* Compute the number of ticks for hz. */
    368 	curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz;
    369 
    370 	/* Compute the delay divisor and reciprical. */
    371 	curcpu()->ci_divisor_delay =
    372 	    ((curcpu()->ci_cpu_freq + 500000) / 1000000);
    373 #if 0
    374 	MIPS_SET_CI_RECIPRICAL(curcpu());
    375 #endif
    376 
    377 	mips3_cp0_count_write(0);
    378 	mips3_cp0_compare_write(0);
    379 }
    380 
    381 void *
    382 octeon_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
    383 {
    384 	struct octeon_intrhand *ih;
    385 
    386 	if (irq >= NIRQS)
    387 		panic("octeon_intr_establish: bogus IRQ %d", irq);
    388 	if (ipl < IPL_VM)
    389 		panic("octeon_intr_establish: bogus IPL %d", ipl);
    390 
    391 	ih = kmem_zalloc(sizeof(*ih), KM_NOSLEEP);
    392 	if (ih == NULL)
    393 		return (NULL);
    394 
    395 	ih->ih_func = func;
    396 	ih->ih_arg = arg;
    397 	ih->ih_irq = irq;
    398 	ih->ih_ipl = ipl;
    399 
    400 	mutex_enter(&octeon_intr_lock);
    401 
    402 	/*
    403 	 * First, make it known.
    404 	 */
    405 	KASSERTMSG(octciu_intrs[irq] == NULL, "irq %d in use! (%p)",
    406 	    irq, octciu_intrs[irq]);
    407 
    408 	octciu_intrs[irq] = ih;
    409 	membar_producer();
    410 
    411 	/*
    412 	 * Now enable it.
    413 	 */
    414 	const uint64_t irq_mask = __BIT(irq);
    415 	struct cpu_softc * const cpu0 = &octeon_cpu0_softc;
    416 #if MULTIPROCESSOR
    417 	struct cpu_softc * const cpu1 = &octeon_cpu1_softc;
    418 #endif
    419 
    420 	switch (ipl) {
    421 	case IPL_VM:
    422 		cpu0->cpu_int0_enable0 |= irq_mask;
    423 		mips3_sd(cpu0->cpu_int0_en0, cpu0->cpu_int0_enable0);
    424 		break;
    425 
    426 	case IPL_SCHED:
    427 		cpu0->cpu_int1_enable0 |= irq_mask;
    428 		mips3_sd(cpu0->cpu_int1_en0, cpu0->cpu_int1_enable0);
    429 #ifdef MULTIPROCESSOR
    430 		cpu1->cpu_int1_enable0 = cpu0->cpu_int1_enable0;
    431 		mips3_sd(cpu1->cpu_int1_en0, cpu1->cpu_int1_enable0);
    432 #endif
    433 		break;
    434 
    435 	case IPL_DDB:
    436 	case IPL_HIGH:
    437 		cpu0->cpu_int2_enable0 |= irq_mask;
    438 		mips3_sd(cpu0->cpu_int2_en0, cpu0->cpu_int2_enable0);
    439 #ifdef MULTIPROCESSOR
    440 		cpu1->cpu_int2_enable0 = cpu0->cpu_int2_enable0;
    441 		mips3_sd(cpu1->cpu_int2_en0, cpu1->cpu_int2_enable0);
    442 #endif
    443 		break;
    444 	}
    445 
    446 	mutex_exit(&octeon_intr_lock);
    447 
    448 	return ih;
    449 }
    450 
    451 void
    452 octeon_intr_disestablish(void *cookie)
    453 {
    454 	struct octeon_intrhand * const ih = cookie;
    455 	const int irq = ih->ih_irq & (NIRQS-1);
    456 	const int ipl = ih->ih_ipl;
    457 
    458 	mutex_enter(&octeon_intr_lock);
    459 
    460 	/*
    461 	 * First disable it.
    462 	 */
    463 	const uint64_t irq_mask = ~__BIT(irq);
    464 	struct cpu_softc * const cpu0 = &octeon_cpu0_softc;
    465 #if MULTIPROCESSOR
    466 	struct cpu_softc * const cpu1 = &octeon_cpu1_softc;
    467 #endif
    468 
    469 	switch (ipl) {
    470 	case IPL_VM:
    471 		cpu0->cpu_int0_enable0 &= ~irq_mask;
    472 		mips3_sd(cpu0->cpu_int0_en0, cpu0->cpu_int0_enable0);
    473 		break;
    474 
    475 	case IPL_SCHED:
    476 		cpu0->cpu_int1_enable0 &= ~irq_mask;
    477 		mips3_sd(cpu0->cpu_int1_en0, cpu0->cpu_int1_enable0);
    478 #ifdef MULTIPROCESSOR
    479 		cpu1->cpu_int1_enable0 = cpu0->cpu_int1_enable0;
    480 		mips3_sd(cpu1->cpu_int1_en0, cpu1->cpu_int1_enable0);
    481 #endif
    482 		break;
    483 
    484 	case IPL_DDB:
    485 	case IPL_HIGH:
    486 		cpu0->cpu_int2_enable0 &= ~irq_mask;
    487 		mips3_sd(cpu0->cpu_int2_en0, cpu0->cpu_int2_enable0);
    488 #ifdef MULTIPROCESSOR
    489 		cpu1->cpu_int2_enable0 = cpu0->cpu_int2_enable0;
    490 		mips3_sd(cpu1->cpu_int2_en0, cpu1->cpu_int2_enable0);
    491 #endif
    492 		break;
    493 	}
    494 
    495 	/*
    496 	 * Now remove it since we shouldn't get interrupts for it.
    497 	 */
    498 	octciu_intrs[irq] = NULL;
    499 
    500 	mutex_exit(&octeon_intr_lock);
    501 
    502 	kmem_free(ih, sizeof(*ih));
    503 }
    504 
    505 void
    506 octeon_iointr(int ipl, vaddr_t pc, uint32_t ipending)
    507 {
    508 	struct cpu_info * const ci = curcpu();
    509 	struct cpu_softc * const cpu = ci->ci_softc;
    510 
    511 	KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
    512 	KASSERT((ipending & ~MIPS_INT_MASK) == 0);
    513 	KASSERT(ipending & MIPS_HARD_INT_MASK);
    514 	uint64_t hwpend = 0;
    515 
    516 	if (ipending & MIPS_INT_MASK_2) {
    517 		hwpend = mips3_ld(cpu->cpu_int2_sum0)
    518 		    & cpu->cpu_int2_enable0;
    519 	} else if (ipending & MIPS_INT_MASK_1) {
    520 		hwpend = mips3_ld(cpu->cpu_int1_sum0)
    521 		    & cpu->cpu_int1_enable0;
    522 	} else if (ipending & MIPS_INT_MASK_0) {
    523 		hwpend = mips3_ld(cpu->cpu_int0_sum0)
    524 		    & cpu->cpu_int0_enable0;
    525 	} else {
    526 		panic("octeon_iointr: unexpected ipending %#x", ipending);
    527 	}
    528 	while (hwpend != 0) {
    529 		const int irq = ffs64(hwpend) - 1;
    530 		hwpend &= ~__BIT(irq);
    531 
    532 		struct octeon_intrhand * const ih = octciu_intrs[irq];
    533 		cpu->cpu_intr_evs[irq].ev_count++;
    534 		if (__predict_true(ih != NULL)) {
    535 #ifdef MULTIPROCESSOR
    536 			if (ipl == IPL_VM) {
    537 				KERNEL_LOCK(1, NULL);
    538 #endif
    539 				(*ih->ih_func)(ih->ih_arg);
    540 #ifdef MULTIPROCESSOR
    541 				KERNEL_UNLOCK_ONE(NULL);
    542 			} else {
    543 				(*ih->ih_func)(ih->ih_arg);
    544 			}
    545 #endif
    546 			KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
    547 		}
    548 	}
    549 	KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
    550 }
    551 
    552 #ifdef MULTIPROCESSOR
    553 __CTASSERT(NIPIS < 16);
    554 
    555 int
    556 octeon_ipi_intr(void *arg)
    557 {
    558 	struct cpu_info * const ci = curcpu();
    559 	struct cpu_softc * const cpu = ci->ci_softc;
    560 	uint32_t ipi_mask = (uintptr_t) arg;
    561 
    562 	KASSERTMSG((ipi_mask & __BITS(31,16)) == 0 || ci->ci_cpl >= IPL_SCHED,
    563 	    "ipi_mask %#"PRIx32" cpl %d", ipi_mask, ci->ci_cpl);
    564 
    565 	ipi_mask &= mips3_ld(cpu->cpu_mbox_set);
    566 	if (ipi_mask == 0)
    567 		return 0;
    568 
    569 	mips3_sd(cpu->cpu_mbox_clr, ipi_mask);
    570 
    571 	ipi_mask |= (ipi_mask >> 16);
    572 	ipi_mask &= __BITS(15,0);
    573 
    574 	KASSERT(ipi_mask < __BIT(NIPIS));
    575 
    576 #if NWDOG > 0
    577 	// Handle WDOG requests ourselves.
    578 	if (ipi_mask & __BIT(IPI_WDOG)) {
    579 		softint_schedule(cpu->cpu_wdog_sih);
    580 		atomic_and_64(&ci->ci_request_ipis, ~__BIT(IPI_WDOG));
    581 		ipi_mask &= ~__BIT(IPI_WDOG);
    582 		ci->ci_evcnt_per_ipi[IPI_WDOG].ev_count++;
    583 		if (__predict_true(ipi_mask == 0))
    584 			return 1;
    585 	}
    586 #endif
    587 
    588 	/* if the request is clear, it was previously processed */
    589 	if ((ci->ci_request_ipis & ipi_mask) == 0)
    590 		return 0;
    591 
    592 	atomic_or_64(&ci->ci_active_ipis, ipi_mask);
    593 	atomic_and_64(&ci->ci_request_ipis, ~ipi_mask);
    594 
    595 	ipi_process(ci, ipi_mask);
    596 
    597 	atomic_and_64(&ci->ci_active_ipis, ~ipi_mask);
    598 
    599 	return 1;
    600 }
    601 
    602 int
    603 octeon_send_ipi(struct cpu_info *ci, int req)
    604 {
    605 	KASSERT(req < NIPIS);
    606 	if (ci == NULL) {
    607 		CPU_INFO_ITERATOR cii;
    608 		for (CPU_INFO_FOREACH(cii, ci)) {
    609 			if (ci != curcpu()) {
    610 				octeon_send_ipi(ci, req);
    611 			}
    612 		}
    613 		return 0;
    614 	}
    615 	KASSERT(cold || ci->ci_softc != NULL);
    616 	if (ci->ci_softc == NULL)
    617 		return -1;
    618 
    619 	struct cpu_softc * const cpu = ci->ci_softc;
    620 	uint64_t ipi_mask = __BIT(req);
    621 
    622 	atomic_or_64(&ci->ci_request_ipis, ipi_mask);
    623 	if (req == IPI_SUSPEND || req == IPI_WDOG) {
    624 		ipi_mask <<= 16;
    625 	}
    626 
    627 	mips3_sd(cpu->cpu_mbox_set, ipi_mask);
    628 	return 0;
    629 }
    630 #endif	/* MULTIPROCESSOR */
    631