Home | History | Annotate | Line # | Download | only in cavium
octeon_intr.c revision 1.6
      1 /*	$NetBSD: octeon_intr.c,v 1.6 2016/07/12 06:13:39 skrll Exp $	*/
      2 /*
      3  * Copyright 2001, 2002 Wasabi Systems, Inc.
      4  * All rights reserved.
      5  *
      6  * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *      This product includes software developed for the NetBSD Project by
     19  *      Wasabi Systems, Inc.
     20  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     21  *    or promote products derived from this software without specific prior
     22  *    written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     34  * POSSIBILITY OF SUCH DAMAGE.
     35  */
     36 
     37 /*
     38  * Platform-specific interrupt support for the MIPS Malta.
     39  */
     40 
     41 #include "opt_octeon.h"
     42 #include "opt_multiprocessor.h"
     43 
     44 #include "cpunode.h"
     45 #define __INTR_PRIVATE
     46 
     47 #include <sys/cdefs.h>
     48 __KERNEL_RCSID(0, "$NetBSD: octeon_intr.c,v 1.6 2016/07/12 06:13:39 skrll Exp $");
     49 
     50 #include <sys/param.h>
     51 #include <sys/cpu.h>
     52 #include <sys/systm.h>
     53 #include <sys/device.h>
     54 #include <sys/intr.h>
     55 #include <sys/kernel.h>
     56 #include <sys/kmem.h>
     57 #include <sys/atomic.h>
     58 
     59 #include <lib/libkern/libkern.h>
     60 
     61 #include <mips/locore.h>
     62 
     63 #include <mips/cavium/dev/octeon_ciureg.h>
     64 #include <mips/cavium/octeonvar.h>
     65 
     66 /*
     67  * This is a mask of bits to clear in the SR when we go to a
     68  * given hardware interrupt priority level.
     69  */
     70 static const struct ipl_sr_map octeon_ipl_sr_map = {
     71     .sr_bits = {
     72 	[IPL_NONE] =		0,
     73 	[IPL_SOFTCLOCK] =	MIPS_SOFT_INT_MASK_0,
     74 	[IPL_SOFTNET] =		MIPS_SOFT_INT_MASK,
     75 	[IPL_VM] =		MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0,
     76 	[IPL_SCHED] =		MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
     77 				    | MIPS_INT_MASK_5,
     78 	[IPL_DDB] =		MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
     79 				    | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
     80 	[IPL_HIGH] =		MIPS_INT_MASK,
     81     },
     82 };
     83 
     84 const char * const octeon_intrnames[NIRQS] = {
     85 	"workq 0",
     86 	"workq 1",
     87 	"workq 2",
     88 	"workq 3",
     89 	"workq 4",
     90 	"workq 5",
     91 	"workq 6",
     92 	"workq 7",
     93 	"workq 8",
     94 	"workq 9",
     95 	"workq 10",
     96 	"workq 11",
     97 	"workq 12",
     98 	"workq 13",
     99 	"workq 14",
    100 	"workq 15",
    101 	"gpio 0",
    102 	"gpio 1",
    103 	"gpio 2",
    104 	"gpio 3",
    105 	"gpio 4",
    106 	"gpio 5",
    107 	"gpio 6",
    108 	"gpio 7",
    109 	"gpio 8",
    110 	"gpio 9",
    111 	"gpio 10",
    112 	"gpio 11",
    113 	"gpio 12",
    114 	"gpio 13",
    115 	"gpio 14",
    116 	"gpio 15",
    117 	"mbox 0-15",
    118 	"mbox 16-31",
    119 	"uart 0",
    120 	"uart 1",
    121 	"pci inta",
    122 	"pci intb",
    123 	"pci intc",
    124 	"pci intd",
    125 	"pci msi 0-15",
    126 	"pci msi 16-31",
    127 	"pci msi 32-47",
    128 	"pci msi 48-63",
    129 	"wdog summary",
    130 	"twsi",
    131 	"rml",
    132 	"trace",
    133 	"gmx drop",
    134 	"reserved",
    135 	"ipd drop",
    136 	"reserved",
    137 	"timer 0",
    138 	"timer 1",
    139 	"timer 2",
    140 	"timer 3",
    141 	"usb",
    142 	"pcm/tdm",
    143 	"mpi/spi",
    144 	"reserved",
    145 	"reserved",
    146 	"reserved",
    147 	"reserved",
    148 	"reserved",
    149 };
    150 
    151 struct octeon_intrhand {
    152 	int (*ih_func)(void *);
    153 	void *ih_arg;
    154 	int ih_irq;
    155 	int ih_ipl;
    156 };
    157 
    158 #ifdef MULTIPROCESSOR
    159 static int octeon_send_ipi(struct cpu_info *, int);
    160 static int octeon_ipi_intr(void *);
    161 
    162 struct octeon_intrhand ipi_intrhands[2] = {
    163 	[0] = {
    164 		.ih_func = octeon_ipi_intr,
    165 		.ih_arg = (void *)(uintptr_t)__BITS(15,0),
    166 		.ih_irq = _CIU_INT_MBOX_15_0_SHIFT,
    167 		.ih_ipl = IPL_SCHED,
    168 	},
    169 	[1] = {
    170 		.ih_func = octeon_ipi_intr,
    171 		.ih_arg = (void *)(uintptr_t)__BITS(31,16),
    172 		.ih_irq = _CIU_INT_MBOX_31_16_SHIFT,
    173 		.ih_ipl = IPL_HIGH,
    174 	},
    175 };
    176 #endif
    177 
    178 struct octeon_intrhand *octeon_ciu_intrs[NIRQS] = {
    179 #ifdef MULTIPROCESSOR
    180 	[_CIU_INT_MBOX_15_0_SHIFT] = &ipi_intrhands[0],
    181 	[_CIU_INT_MBOX_31_16_SHIFT] = &ipi_intrhands[1],
    182 #endif
    183 };
    184 
    185 kmutex_t octeon_intr_lock;
    186 
    187 #define X(a)	MIPS_PHYS_TO_XKPHYS(OCTEON_CCA_NONE, (a))
    188 
    189 struct cpu_softc octeon_cpu0_softc = {
    190 	.cpu_ci = &cpu_info_store,
    191 	.cpu_int0_sum0 = X(CIU_INT0_SUM0),
    192 	.cpu_int1_sum0 = X(CIU_INT1_SUM0),
    193 	.cpu_int2_sum0 = X(CIU_INT4_SUM0),
    194 
    195 	.cpu_int0_en0 = X(CIU_INT0_EN0),
    196 	.cpu_int1_en0 = X(CIU_INT1_EN0),
    197 	.cpu_int2_en0 = X(CIU_INT4_EN00),
    198 
    199 	.cpu_int0_en1 = X(CIU_INT0_EN1),
    200 	.cpu_int1_en1 = X(CIU_INT1_EN1),
    201 	.cpu_int2_en1 = X(CIU_INT4_EN01),
    202 
    203 	.cpu_int32_en = X(CIU_INT32_EN0),
    204 
    205 	.cpu_wdog = X(CIU_WDOG0),
    206 	.cpu_pp_poke = X(CIU_PP_POKE0),
    207 
    208 #ifdef MULTIPROCESSOR
    209 	.cpu_mbox_set = X(CIU_MBOX_SET0),
    210 	.cpu_mbox_clr = X(CIU_MBOX_CLR0),
    211 #endif
    212 };
    213 
    214 #ifdef MULTIPROCESSOR
    215 struct cpu_softc octeon_cpu1_softc = {
    216 	.cpu_int0_sum0 = X(CIU_INT2_SUM0),
    217 	.cpu_int1_sum0 = X(CIU_INT3_SUM0),
    218 	.cpu_int2_sum0 = X(CIU_INT4_SUM1),
    219 
    220 	.cpu_int0_en0 = X(CIU_INT2_EN0),
    221 	.cpu_int1_en0 = X(CIU_INT3_EN0),
    222 	.cpu_int2_en0 = X(CIU_INT4_EN10),
    223 
    224 	.cpu_int0_en1 = X(CIU_INT2_EN1),
    225 	.cpu_int1_en1 = X(CIU_INT3_EN1),
    226 	.cpu_int2_en1 = X(CIU_INT4_EN11),
    227 
    228 	.cpu_int32_en = X(CIU_INT32_EN1),
    229 
    230 	.cpu_wdog = X(CIU_WDOG1),
    231 	.cpu_pp_poke = X(CIU_PP_POKE1),
    232 
    233 	.cpu_mbox_set = X(CIU_MBOX_SET1),
    234 	.cpu_mbox_clr = X(CIU_MBOX_CLR1),
    235 };
    236 #endif
    237 
    238 #ifdef DEBUG
    239 static void
    240 octeon_mbox_test(void)
    241 {
    242 	const uint64_t mbox_clr0 = X(CIU_MBOX_CLR0);
    243 	const uint64_t mbox_clr1 = X(CIU_MBOX_CLR1);
    244 	const uint64_t mbox_set0 = X(CIU_MBOX_SET0);
    245 	const uint64_t mbox_set1 = X(CIU_MBOX_SET1);
    246 	const uint64_t int_sum0 = X(CIU_INT0_SUM0);
    247 	const uint64_t int_sum1 = X(CIU_INT2_SUM0);
    248 	const uint64_t sum_mbox_lo = __BIT(_CIU_INT_MBOX_15_0_SHIFT);
    249 	const uint64_t sum_mbox_hi = __BIT(_CIU_INT_MBOX_31_16_SHIFT);
    250 
    251 	mips3_sd(mbox_clr0, ~0ULL);
    252 	mips3_sd(mbox_clr1, ~0ULL);
    253 
    254 	uint32_t mbox0 = mips3_ld(mbox_set0);
    255 	uint32_t mbox1 = mips3_ld(mbox_set1);
    256 
    257 	KDASSERTMSG(mbox0 == 0, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
    258 	KDASSERTMSG(mbox1 == 0, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
    259 
    260 	mips3_sd(mbox_set0, __BIT(0));
    261 
    262 	mbox0 = mips3_ld(mbox_set0);
    263 	mbox1 = mips3_ld(mbox_set1);
    264 
    265 	KDASSERTMSG(mbox0 == 1, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
    266 	KDASSERTMSG(mbox1 == 0, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
    267 
    268 	uint64_t sum0 = mips3_ld(int_sum0);
    269 	uint64_t sum1 = mips3_ld(int_sum1);
    270 
    271 	KDASSERTMSG((sum0 & sum_mbox_lo) != 0, "sum0 %#"PRIx64, sum0);
    272 	KDASSERTMSG((sum0 & sum_mbox_hi) == 0, "sum0 %#"PRIx64, sum0);
    273 
    274 	KDASSERTMSG((sum1 & sum_mbox_lo) == 0, "sum1 %#"PRIx64, sum1);
    275 	KDASSERTMSG((sum1 & sum_mbox_hi) == 0, "sum1 %#"PRIx64, sum1);
    276 
    277 	mips3_sd(mbox_clr0, mbox0);
    278 	mbox0 = mips3_ld(mbox_set0);
    279 	KDASSERTMSG(mbox0 == 0, "mbox0 %#x", mbox0);
    280 
    281 	mips3_sd(mbox_set0, __BIT(16));
    282 
    283 	mbox0 = mips3_ld(mbox_set0);
    284 	mbox1 = mips3_ld(mbox_set1);
    285 
    286 	KDASSERTMSG(mbox0 == __BIT(16), "mbox0 %#x", mbox0);
    287 	KDASSERTMSG(mbox1 == 0, "mbox1 %#x", mbox1);
    288 
    289 	sum0 = mips3_ld(int_sum0);
    290 	sum1 = mips3_ld(int_sum1);
    291 
    292 	KDASSERTMSG((sum0 & sum_mbox_lo) == 0, "sum0 %#"PRIx64, sum0);
    293 	KDASSERTMSG((sum0 & sum_mbox_hi) != 0, "sum0 %#"PRIx64, sum0);
    294 
    295 	KDASSERTMSG((sum1 & sum_mbox_lo) == 0, "sum1 %#"PRIx64, sum1);
    296 	KDASSERTMSG((sum1 & sum_mbox_hi) == 0, "sum1 %#"PRIx64, sum1);
    297 }
    298 #endif
    299 
    300 #undef X
    301 
    302 void
    303 octeon_intr_init(struct cpu_info *ci)
    304 {
    305 	const int cpunum = cpu_index(ci);
    306 	const char * const xname = cpu_name(ci);
    307 	struct cpu_softc *cpu = ci->ci_softc;
    308 
    309 
    310 	if (ci->ci_cpuid == 0) {
    311 		KASSERT(ci->ci_softc == &octeon_cpu0_softc);
    312 		ipl_sr_map = octeon_ipl_sr_map;
    313 		mutex_init(&octeon_intr_lock, MUTEX_DEFAULT, IPL_HIGH);
    314 #ifdef MULTIPROCESSOR
    315 		mips_locoresw.lsw_send_ipi = octeon_send_ipi;
    316 #endif
    317 #ifdef DEBUG
    318 		octeon_mbox_test();
    319 #endif
    320 	} else {
    321 		KASSERT(cpunum == 1);
    322 #ifdef MULTIPROCESSOR
    323 		KASSERT(ci->ci_softc == &octeon_cpu1_softc);
    324 #endif
    325 	}
    326 
    327 #ifdef MULTIPROCESSOR
    328 	// Enable the IPIs
    329 	cpu->cpu_int0_enable0 |= __BIT(_CIU_INT_MBOX_15_0_SHIFT);
    330 	cpu->cpu_int2_enable0 |= __BIT(_CIU_INT_MBOX_31_16_SHIFT);
    331 #endif
    332 
    333 	if (ci->ci_dev)
    334 	aprint_verbose_dev(ci->ci_dev,
    335 	    "enabling intr masks %#"PRIx64"/%#"PRIx64"/%#"PRIx64"\n",
    336 	    cpu->cpu_int0_enable0, cpu->cpu_int1_enable0, cpu->cpu_int2_enable0);
    337 
    338 	mips3_sd(cpu->cpu_int0_en0, cpu->cpu_int0_enable0);
    339 	mips3_sd(cpu->cpu_int1_en0, cpu->cpu_int1_enable0);
    340 	mips3_sd(cpu->cpu_int2_en0, cpu->cpu_int2_enable0);
    341 
    342 	mips3_sd(cpu->cpu_int32_en, 0);
    343 
    344 	mips3_sd(cpu->cpu_int0_en1, 0);	// WDOG IPL2
    345 	mips3_sd(cpu->cpu_int1_en1, 0);	// WDOG IPL3
    346 	mips3_sd(cpu->cpu_int2_en1, 0);	// WDOG IPL4
    347 
    348 #ifdef MULTIPROCESSOR
    349 	mips3_sd(cpu->cpu_mbox_clr, __BITS(31,0));
    350 #endif
    351 
    352 	for (size_t i = 0; i < NIRQS; i++) {
    353 		evcnt_attach_dynamic(&cpu->cpu_intr_evs[i],
    354 		    EVCNT_TYPE_INTR, NULL, xname, octeon_intrnames[i]);
    355 	}
    356 }
    357 
    358 void
    359 octeon_cal_timer(int corefreq)
    360 {
    361 	/* Compute the number of cycles per second. */
    362 	curcpu()->ci_cpu_freq = corefreq;
    363 
    364 	/* Compute the number of ticks for hz. */
    365 	curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz;
    366 
    367 	/* Compute the delay divisor and reciprical. */
    368 	curcpu()->ci_divisor_delay =
    369 	    ((curcpu()->ci_cpu_freq + 500000) / 1000000);
    370 #if 0
    371 	MIPS_SET_CI_RECIPRICAL(curcpu());
    372 #endif
    373 
    374 	mips3_cp0_count_write(0);
    375 	mips3_cp0_compare_write(0);
    376 }
    377 
    378 void *
    379 octeon_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
    380 {
    381 	struct octeon_intrhand *ih;
    382 
    383 	if (irq >= NIRQS)
    384 		panic("octeon_intr_establish: bogus IRQ %d", irq);
    385 	if (ipl < IPL_VM)
    386 		panic("octeon_intr_establish: bogus IPL %d", ipl);
    387 
    388 	ih = kmem_zalloc(sizeof(*ih), KM_NOSLEEP);
    389 	if (ih == NULL)
    390 		return (NULL);
    391 
    392 	ih->ih_func = func;
    393 	ih->ih_arg = arg;
    394 	ih->ih_irq = irq;
    395 	ih->ih_ipl = ipl;
    396 
    397 	mutex_enter(&octeon_intr_lock);
    398 
    399 	/*
    400 	 * First, make it known.
    401 	 */
    402 	KASSERTMSG(octeon_ciu_intrs[irq] == NULL, "irq %d in use! (%p)",
    403 	    irq, octeon_ciu_intrs[irq]);
    404 
    405 	octeon_ciu_intrs[irq] = ih;
    406 	membar_producer();
    407 
    408 	/*
    409 	 * Now enable it.
    410 	 */
    411 	const uint64_t irq_mask = __BIT(irq);
    412 	struct cpu_softc * const cpu0 = &octeon_cpu0_softc;
    413 #if MULTIPROCESSOR
    414 	struct cpu_softc * const cpu1 = &octeon_cpu1_softc;
    415 #endif
    416 
    417 	switch (ipl) {
    418 	case IPL_VM:
    419 		cpu0->cpu_int0_enable0 |= irq_mask;
    420 		mips3_sd(cpu0->cpu_int0_en0, cpu0->cpu_int0_enable0);
    421 		break;
    422 
    423 	case IPL_SCHED:
    424 		cpu0->cpu_int1_enable0 |= irq_mask;
    425 		mips3_sd(cpu0->cpu_int1_en0, cpu0->cpu_int1_enable0);
    426 #ifdef MULTIPROCESSOR
    427 		cpu1->cpu_int1_enable0 = cpu0->cpu_int1_enable0;
    428 		mips3_sd(cpu1->cpu_int1_en0, cpu1->cpu_int1_enable0);
    429 #endif
    430 		break;
    431 
    432 	case IPL_DDB:
    433 	case IPL_HIGH:
    434 		cpu0->cpu_int2_enable0 |= irq_mask;
    435 		mips3_sd(cpu0->cpu_int2_en0, cpu0->cpu_int2_enable0);
    436 #ifdef MULTIPROCESSOR
    437 		cpu1->cpu_int2_enable0 = cpu0->cpu_int2_enable0;
    438 		mips3_sd(cpu1->cpu_int2_en0, cpu1->cpu_int2_enable0);
    439 #endif
    440 		break;
    441 	}
    442 
    443 	mutex_exit(&octeon_intr_lock);
    444 
    445 	return ih;
    446 }
    447 
    448 void
    449 octeon_intr_disestablish(void *cookie)
    450 {
    451 	struct octeon_intrhand * const ih = cookie;
    452 	const int irq = ih->ih_irq & (NIRQS-1);
    453 	const int ipl = ih->ih_ipl;
    454 
    455 	mutex_enter(&octeon_intr_lock);
    456 
    457 	/*
    458 	 * First disable it.
    459 	 */
    460 	const uint64_t irq_mask = ~__BIT(irq);
    461 	struct cpu_softc * const cpu0 = &octeon_cpu0_softc;
    462 #if MULTIPROCESSOR
    463 	struct cpu_softc * const cpu1 = &octeon_cpu1_softc;
    464 #endif
    465 
    466 	switch (ipl) {
    467 	case IPL_VM:
    468 		cpu0->cpu_int0_enable0 &= ~irq_mask;
    469 		mips3_sd(cpu0->cpu_int0_en0, cpu0->cpu_int0_enable0);
    470 		break;
    471 
    472 	case IPL_SCHED:
    473 		cpu0->cpu_int1_enable0 &= ~irq_mask;
    474 		mips3_sd(cpu0->cpu_int1_en0, cpu0->cpu_int1_enable0);
    475 #ifdef MULTIPROCESSOR
    476 		cpu1->cpu_int1_enable0 = cpu0->cpu_int1_enable0;
    477 		mips3_sd(cpu1->cpu_int1_en0, cpu1->cpu_int1_enable0);
    478 #endif
    479 		break;
    480 
    481 	case IPL_DDB:
    482 	case IPL_HIGH:
    483 		cpu0->cpu_int2_enable0 &= ~irq_mask;
    484 		mips3_sd(cpu0->cpu_int2_en0, cpu0->cpu_int2_enable0);
    485 #ifdef MULTIPROCESSOR
    486 		cpu1->cpu_int2_enable0 = cpu0->cpu_int2_enable0;
    487 		mips3_sd(cpu1->cpu_int2_en0, cpu1->cpu_int2_enable0);
    488 #endif
    489 		break;
    490 	}
    491 
    492 	/*
    493 	 * Now remove it since we shouldn't get interrupts for it.
    494 	 */
    495 	octeon_ciu_intrs[irq] = NULL;
    496 
    497 	mutex_exit(&octeon_intr_lock);
    498 
    499 	kmem_free(ih, sizeof(*ih));
    500 }
    501 
    502 void
    503 octeon_iointr(int ipl, vaddr_t pc, uint32_t ipending)
    504 {
    505 	struct cpu_info * const ci = curcpu();
    506 	struct cpu_softc * const cpu = ci->ci_softc;
    507 
    508 	KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
    509 	KASSERT((ipending & ~MIPS_INT_MASK) == 0);
    510 	KASSERT(ipending & MIPS_HARD_INT_MASK);
    511 	uint64_t hwpend = 0;
    512 
    513 	if (ipending & MIPS_INT_MASK_2) {
    514 		hwpend = mips3_ld(cpu->cpu_int2_sum0)
    515 		    & cpu->cpu_int2_enable0;
    516 	} else if (ipending & MIPS_INT_MASK_1) {
    517 		hwpend = mips3_ld(cpu->cpu_int1_sum0)
    518 		    & cpu->cpu_int1_enable0;
    519 	} else if (ipending & MIPS_INT_MASK_0) {
    520 		hwpend = mips3_ld(cpu->cpu_int0_sum0)
    521 		    & cpu->cpu_int0_enable0;
    522 	} else {
    523 		panic("octeon_iointr: unexpected ipending %#x", ipending);
    524 	}
    525 	while (hwpend != 0) {
    526 		const int irq = ffs64(hwpend) - 1;
    527 		hwpend &= ~__BIT(irq);
    528 
    529 		struct octeon_intrhand * const ih = octeon_ciu_intrs[irq];
    530 		cpu->cpu_intr_evs[irq].ev_count++;
    531 		if (__predict_true(ih != NULL)) {
    532 #ifdef MULTIPROCESSOR
    533 			if (ipl == IPL_VM) {
    534 				KERNEL_LOCK(1, NULL);
    535 #endif
    536 				(*ih->ih_func)(ih->ih_arg);
    537 #ifdef MULTIPROCESSOR
    538 				KERNEL_UNLOCK_ONE(NULL);
    539 			} else {
    540 				(*ih->ih_func)(ih->ih_arg);
    541 			}
    542 #endif
    543 			KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
    544 		}
    545 	}
    546 	KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
    547 }
    548 
    549 #ifdef MULTIPROCESSOR
    550 __CTASSERT(NIPIS < 16);
    551 
    552 int
    553 octeon_ipi_intr(void *arg)
    554 {
    555 	struct cpu_info * const ci = curcpu();
    556 	struct cpu_softc * const cpu = ci->ci_softc;
    557 	uint32_t ipi_mask = (uintptr_t) arg;
    558 
    559 	KASSERTMSG((ipi_mask & __BITS(31,16)) == 0 || ci->ci_cpl >= IPL_SCHED,
    560 	    "ipi_mask %#"PRIx32" cpl %d", ipi_mask, ci->ci_cpl);
    561 
    562 	ipi_mask &= mips3_ld(cpu->cpu_mbox_set);
    563 	if (ipi_mask == 0)
    564 		return 0;
    565 
    566 	mips3_sd(cpu->cpu_mbox_clr, ipi_mask);
    567 
    568 	ipi_mask |= (ipi_mask >> 16);
    569 	ipi_mask &= __BITS(15,0);
    570 
    571 	KASSERT(ipi_mask < __BIT(NIPIS));
    572 
    573 #if NWDOG > 0
    574 	// Handle WDOG requests ourselves.
    575 	if (ipi_mask & __BIT(IPI_WDOG)) {
    576 		softint_schedule(cpu->cpu_wdog_sih);
    577 		atomic_and_64(&ci->ci_request_ipis, ~__BIT(IPI_WDOG));
    578 		ipi_mask &= ~__BIT(IPI_WDOG);
    579 		ci->ci_evcnt_per_ipi[IPI_WDOG].ev_count++;
    580 		if (__predict_true(ipi_mask == 0))
    581 			return 1;
    582 	}
    583 #endif
    584 
    585 	/* if the request is clear, it was previously processed */
    586 	if ((ci->ci_request_ipis & ipi_mask) == 0)
    587 		return 0;
    588 
    589 	atomic_or_64(&ci->ci_active_ipis, ipi_mask);
    590 	atomic_and_64(&ci->ci_request_ipis, ~ipi_mask);
    591 
    592 	ipi_process(ci, ipi_mask);
    593 
    594 	atomic_and_64(&ci->ci_active_ipis, ~ipi_mask);
    595 
    596 	return 1;
    597 }
    598 
    599 int
    600 octeon_send_ipi(struct cpu_info *ci, int req)
    601 {
    602 	KASSERT(req < NIPIS);
    603 	if (ci == NULL) {
    604 		CPU_INFO_ITERATOR cii;
    605 		for (CPU_INFO_FOREACH(cii, ci)) {
    606 			if (ci != curcpu()) {
    607 				octeon_send_ipi(ci, req);
    608 			}
    609 		}
    610 		return 0;
    611 	}
    612 	KASSERT(cold || ci->ci_softc != NULL);
    613 	if (ci->ci_softc == NULL)
    614 		return -1;
    615 
    616 	struct cpu_softc * const cpu = ci->ci_softc;
    617 	uint64_t ipi_mask = __BIT(req);
    618 
    619 	if (__BIT(req) == (__BIT(IPI_SUSPEND)|__BIT(IPI_WDOG))) {
    620 		ipi_mask <<= 16;
    621 	}
    622 
    623 	mips3_sd(cpu->cpu_mbox_set, ipi_mask);
    624 	return 0;
    625 }
    626 #endif	/* MULTIPROCESSOR */
    627