Home | History | Annotate | Line # | Download | only in cavium
octeon_intr.c revision 1.22
      1 /*	$NetBSD: octeon_intr.c,v 1.22 2020/08/05 04:47:35 simonb Exp $	*/
      2 /*
      3  * Copyright 2001, 2002 Wasabi Systems, Inc.
      4  * All rights reserved.
      5  *
      6  * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *      This product includes software developed for the NetBSD Project by
     19  *      Wasabi Systems, Inc.
     20  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     21  *    or promote products derived from this software without specific prior
     22  *    written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     34  * POSSIBILITY OF SUCH DAMAGE.
     35  */
     36 
     37 /*
     38  * Platform-specific interrupt support for the MIPS Malta.
     39  */
     40 
     41 #include "opt_multiprocessor.h"
     42 
     43 #include "cpunode.h"
     44 #define __INTR_PRIVATE
     45 
     46 #include <sys/cdefs.h>
     47 __KERNEL_RCSID(0, "$NetBSD: octeon_intr.c,v 1.22 2020/08/05 04:47:35 simonb Exp $");
     48 
     49 #include <sys/param.h>
     50 #include <sys/cpu.h>
     51 #include <sys/systm.h>
     52 #include <sys/device.h>
     53 #include <sys/intr.h>
     54 #include <sys/kernel.h>
     55 #include <sys/kmem.h>
     56 #include <sys/atomic.h>
     57 
     58 #include <lib/libkern/libkern.h>
     59 
     60 #include <mips/locore.h>
     61 
     62 #include <mips/cavium/dev/octeon_ciureg.h>
     63 #include <mips/cavium/octeonvar.h>
     64 
     65 /*
     66  * XXX:
     67  * Force all interrupts (except clock intrs and IPIs) to be routed
     68  * through cpu0 until MP on MIPS is more stable.
     69  */
     70 #define	OCTEON_CPU0_INTERRUPTS
     71 
     72 
     73 /*
     74  * This is a mask of bits to clear in the SR when we go to a
     75  * given hardware interrupt priority level.
     76  */
     77 static const struct ipl_sr_map octeon_ipl_sr_map = {
     78     .sr_bits = {
     79 	[IPL_NONE] =		0,
     80 	[IPL_SOFTCLOCK] =	MIPS_SOFT_INT_MASK_0,
     81 	[IPL_SOFTNET] =		MIPS_SOFT_INT_MASK,
     82 	[IPL_VM] =		MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0,
     83 	[IPL_SCHED] =		MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
     84 				    | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
     85 	[IPL_DDB] =		MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
     86 				    | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
     87 	[IPL_HIGH] =		MIPS_INT_MASK,
     88     },
     89 };
     90 
     91 static const char * octeon_intrnames[NIRQS] = {
     92 	"workq 0",
     93 	"workq 1",
     94 	"workq 2",
     95 	"workq 3",
     96 	"workq 4",
     97 	"workq 5",
     98 	"workq 6",
     99 	"workq 7",
    100 	"workq 8",
    101 	"workq 9",
    102 	"workq 10",
    103 	"workq 11",
    104 	"workq 12",
    105 	"workq 13",
    106 	"workq 14",
    107 	"workq 15",
    108 	"gpio 0",
    109 	"gpio 1",
    110 	"gpio 2",
    111 	"gpio 3",
    112 	"gpio 4",
    113 	"gpio 5",
    114 	"gpio 6",
    115 	"gpio 7",
    116 	"gpio 8",
    117 	"gpio 9",
    118 	"gpio 10",
    119 	"gpio 11",
    120 	"gpio 12",
    121 	"gpio 13",
    122 	"gpio 14",
    123 	"gpio 15",
    124 	"mbox 0-15",
    125 	"mbox 16-31",
    126 	"uart 0",
    127 	"uart 1",
    128 	"pci inta",
    129 	"pci intb",
    130 	"pci intc",
    131 	"pci intd",
    132 	"pci msi 0-15",
    133 	"pci msi 16-31",
    134 	"pci msi 32-47",
    135 	"pci msi 48-63",
    136 	"wdog summary",
    137 	"twsi",
    138 	"rml",
    139 	"trace",
    140 	"gmx drop",
    141 	"reserved",
    142 	"ipd drop",
    143 	"reserved",
    144 	"timer 0",
    145 	"timer 1",
    146 	"timer 2",
    147 	"timer 3",
    148 	"usb",
    149 	"pcm/tdm",
    150 	"mpi/spi",
    151 	"reserved",
    152 	"reserved",
    153 	"reserved",
    154 	"reserved",
    155 	"reserved",
    156 };
    157 
    158 struct octeon_intrhand {
    159 	int (*ih_func)(void *);
    160 	void *ih_arg;
    161 	int ih_irq;
    162 	int ih_ipl;
    163 };
    164 
    165 #ifdef MULTIPROCESSOR
    166 static int octeon_send_ipi(struct cpu_info *, int);
    167 static int octeon_ipi_intr(void *);
    168 
    169 static struct octeon_intrhand ipi_intrhands[1] = {
    170 	[0] = {
    171 		.ih_func = octeon_ipi_intr,
    172 		.ih_arg = (void *)(uintptr_t)__BITS(15,0),
    173 		.ih_irq = CIU_INT_MBOX_15_0,
    174 		.ih_ipl = IPL_HIGH,
    175 	},
    176 };
    177 #endif
    178 
    179 static struct octeon_intrhand *octciu_intrs[NIRQS] = {
    180 #ifdef MULTIPROCESSOR
    181 	[CIU_INT_MBOX_15_0] = &ipi_intrhands[0],
    182 #endif
    183 };
    184 
    185 static kmutex_t octeon_intr_lock;
    186 
    187 #if defined(MULTIPROCESSOR)
    188 #define	OCTEON_NCPU	MAXCPUS
    189 #else
    190 #define	OCTEON_NCPU	1
    191 #endif
    192 
    193 struct cpu_softc octeon_cpu_softc[OCTEON_NCPU];
    194 
    195 static void
    196 octeon_intr_setup(void)
    197 {
    198 	struct cpu_softc *cpu;
    199 	int cpunum;
    200 
    201 #define X(a)	MIPS_PHYS_TO_XKPHYS(OCTEON_CCA_NONE, (a))
    202 
    203 	for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
    204 		cpu = &octeon_cpu_softc[cpunum];
    205 
    206 		cpu->cpu_ip2_sum0 = X(CIU_IP2_SUM0(cpunum));
    207 		cpu->cpu_ip3_sum0 = X(CIU_IP3_SUM0(cpunum));
    208 		cpu->cpu_ip4_sum0 = X(CIU_IP4_SUM0(cpunum));
    209 
    210 		cpu->cpu_int_sum1 = X(CIU_INT_SUM1);
    211 
    212 		cpu->cpu_ip2_en[0] = X(CIU_IP2_EN0(cpunum));
    213 		cpu->cpu_ip3_en[0] = X(CIU_IP3_EN0(cpunum));
    214 		cpu->cpu_ip4_en[0] = X(CIU_IP4_EN0(cpunum));
    215 
    216 		cpu->cpu_ip2_en[1] = X(CIU_IP2_EN1(cpunum));
    217 		cpu->cpu_ip3_en[1] = X(CIU_IP3_EN1(cpunum));
    218 		cpu->cpu_ip4_en[1] = X(CIU_IP4_EN1(cpunum));
    219 
    220 		cpu->cpu_wdog = X(CIU_WDOG(cpunum));
    221 		cpu->cpu_pp_poke = X(CIU_PP_POKE(cpunum));
    222 
    223 #ifdef MULTIPROCESSOR
    224 		cpu->cpu_mbox_set = X(CIU_MBOX_SET(cpunum));
    225 		cpu->cpu_mbox_clr = X(CIU_MBOX_CLR(cpunum));
    226 #endif
    227 	}
    228 
    229 #undef X
    230 
    231 }
    232 
    233 void
    234 octeon_intr_init(struct cpu_info *ci)
    235 {
    236 	const int cpunum = cpu_index(ci);
    237 	struct cpu_softc *cpu = &octeon_cpu_softc[cpunum];
    238 	const char * const xname = cpu_name(ci);
    239 	int bank;
    240 
    241 	cpu->cpu_ci = ci;
    242 	ci->ci_softc = cpu;
    243 
    244 	KASSERT(cpunum == ci->ci_cpuid);
    245 
    246 	if (ci->ci_cpuid == 0) {
    247 		ipl_sr_map = octeon_ipl_sr_map;
    248 		mutex_init(&octeon_intr_lock, MUTEX_DEFAULT, IPL_HIGH);
    249 #ifdef MULTIPROCESSOR
    250 		mips_locoresw.lsw_send_ipi = octeon_send_ipi;
    251 #endif
    252 
    253 		octeon_intr_setup();
    254 	}
    255 
    256 #ifdef MULTIPROCESSOR
    257 	// Enable the IPIs
    258 	cpu->cpu_ip4_enable[0] |= __BIT(CIU_INT_MBOX_15_0);
    259 #endif
    260 
    261 	if (ci->ci_dev) {
    262 		for (bank = 0; bank < NBANKS; bank++) {
    263 			aprint_verbose_dev(ci->ci_dev,
    264 			    "enabling intr masks %u "
    265 			    " %#"PRIx64"/%#"PRIx64"/%#"PRIx64"\n",
    266 			    bank,
    267 			    cpu->cpu_ip2_enable[bank],
    268 			    cpu->cpu_ip3_enable[bank],
    269 			    cpu->cpu_ip4_enable[bank]);
    270 		}
    271 	}
    272 
    273 	for (bank = 0; bank < NBANKS; bank++) {
    274 		mips3_sd(cpu->cpu_ip2_en[bank], cpu->cpu_ip2_enable[bank]);
    275 		mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]);
    276 		mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]);
    277 	}
    278 
    279 #ifdef MULTIPROCESSOR
    280 	mips3_sd(cpu->cpu_mbox_clr, __BITS(31,0));
    281 #endif
    282 
    283 	for (int i = 0; i < NIRQS; i++) {
    284 		if (octeon_intrnames[i] == NULL)
    285 			octeon_intrnames[i] = kmem_asprintf("irq %d", i);
    286 		evcnt_attach_dynamic(&cpu->cpu_intr_evs[i],
    287 		    EVCNT_TYPE_INTR, NULL, xname, octeon_intrnames[i]);
    288 	}
    289 }
    290 
    291 void
    292 octeon_cal_timer(int corefreq)
    293 {
    294 	/* Compute the number of cycles per second. */
    295 	curcpu()->ci_cpu_freq = corefreq;
    296 
    297 	/* Compute the number of ticks for hz. */
    298 	curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz;
    299 
    300 	/* Compute the delay divisor and reciprical. */
    301 	curcpu()->ci_divisor_delay =
    302 	    ((curcpu()->ci_cpu_freq + 500000) / 1000000);
    303 #if 0
    304 	MIPS_SET_CI_RECIPRICAL(curcpu());
    305 #endif
    306 
    307 	mips3_cp0_count_write(0);
    308 	mips3_cp0_compare_write(0);
    309 }
    310 
    311 void *
    312 octeon_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
    313 {
    314 	struct octeon_intrhand *ih;
    315 	struct cpu_softc *cpu;
    316 #ifndef OCTEON_CPU0_INTERRUPTS
    317 	int cpunum;
    318 #endif
    319 
    320 	if (irq >= NIRQS)
    321 		panic("octeon_intr_establish: bogus IRQ %d", irq);
    322 	if (ipl < IPL_VM)
    323 		panic("octeon_intr_establish: bogus IPL %d", ipl);
    324 
    325 	ih = kmem_zalloc(sizeof(*ih), KM_NOSLEEP);
    326 	if (ih == NULL)
    327 		return (NULL);
    328 
    329 	ih->ih_func = func;
    330 	ih->ih_arg = arg;
    331 	ih->ih_irq = irq;
    332 	ih->ih_ipl = ipl;
    333 
    334 	mutex_enter(&octeon_intr_lock);
    335 
    336 	/*
    337 	 * First, make it known.
    338 	 */
    339 	KASSERTMSG(octciu_intrs[irq] == NULL, "irq %d in use! (%p)",
    340 	    irq, octciu_intrs[irq]);
    341 
    342 	octciu_intrs[irq] = ih;
    343 	membar_producer();
    344 
    345 	/*
    346 	 * Now enable it.
    347 	 */
    348 	const int bank = irq / 64;
    349 	const uint64_t irq_mask = __BIT(irq % 64);
    350 
    351 	switch (ipl) {
    352 	case IPL_VM:
    353 		cpu = &octeon_cpu_softc[0];
    354 		cpu->cpu_ip2_enable[bank] |= irq_mask;
    355 		mips3_sd(cpu->cpu_ip2_en[bank], cpu->cpu_ip2_enable[bank]);
    356 		break;
    357 
    358 	case IPL_SCHED:
    359 #ifdef OCTEON_CPU0_INTERRUPTS
    360 		cpu = &octeon_cpu_softc[0];
    361 		cpu->cpu_ip3_enable[bank] |= irq_mask;
    362 		mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]);
    363 #else	/* OCTEON_CPU0_INTERRUPTS */
    364 		for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
    365 			cpu = &octeon_cpu_softc[cpunum];
    366 			if (cpu->cpu_ci == NULL)
    367 				break;
    368 			cpu->cpu_ip3_enable[bank] |= irq_mask;
    369 			mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]);
    370 		}
    371 #endif	/* OCTEON_CPU0_INTERRUPTS */
    372 		break;
    373 
    374 	case IPL_DDB:
    375 	case IPL_HIGH:
    376 #ifdef OCTEON_CPU0_INTERRUPTS
    377 		cpu = &octeon_cpu_softc[0];
    378 		cpu->cpu_ip4_enable[bank] |= irq_mask;
    379 		mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]);
    380 #else	/* OCTEON_CPU0_INTERRUPTS */
    381 		for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
    382 			cpu = &octeon_cpu_softc[cpunum];
    383 			if (cpu->cpu_ci == NULL)
    384 				break;
    385 			cpu->cpu_ip4_enable[bank] |= irq_mask;
    386 			mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]);
    387 		}
    388 #endif	/* OCTEON_CPU0_INTERRUPTS */
    389 		break;
    390 	}
    391 
    392 	mutex_exit(&octeon_intr_lock);
    393 
    394 	return ih;
    395 }
    396 
    397 void
    398 octeon_intr_disestablish(void *cookie)
    399 {
    400 	struct octeon_intrhand * const ih = cookie;
    401 	struct cpu_softc *cpu;
    402 	const int irq = ih->ih_irq & (NIRQS-1);
    403 	const int ipl = ih->ih_ipl;
    404 	int cpunum;
    405 
    406 	mutex_enter(&octeon_intr_lock);
    407 
    408 	/*
    409 	 * First disable it.
    410 	 */
    411 	const int bank = irq / 64;
    412 	const uint64_t irq_mask = ~__BIT(irq % 64);
    413 
    414 	switch (ipl) {
    415 	case IPL_VM:
    416 		cpu = &octeon_cpu_softc[0];
    417 		cpu->cpu_ip2_enable[bank] &= ~irq_mask;
    418 		mips3_sd(cpu->cpu_ip2_en[bank], cpu->cpu_ip2_enable[bank]);
    419 		break;
    420 
    421 	case IPL_SCHED:
    422 		for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
    423 			cpu = &octeon_cpu_softc[cpunum];
    424 			if (cpu->cpu_ci == NULL)
    425 				break;
    426 			cpu->cpu_ip3_enable[bank] &= ~irq_mask;
    427 			mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]);
    428 		}
    429 		break;
    430 
    431 	case IPL_DDB:
    432 	case IPL_HIGH:
    433 		for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
    434 			cpu = &octeon_cpu_softc[cpunum];
    435 			if (cpu->cpu_ci == NULL)
    436 				break;
    437 			cpu->cpu_ip4_enable[bank] &= ~irq_mask;
    438 			mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]);
    439 		}
    440 		break;
    441 	}
    442 
    443 	/*
    444 	 * Now remove it since we shouldn't get interrupts for it.
    445 	 */
    446 	octciu_intrs[irq] = NULL;
    447 
    448 	mutex_exit(&octeon_intr_lock);
    449 
    450 	kmem_free(ih, sizeof(*ih));
    451 }
    452 
    453 void
    454 octeon_iointr(int ipl, vaddr_t pc, uint32_t ipending)
    455 {
    456 	struct cpu_info * const ci = curcpu();
    457 	struct cpu_softc * const cpu = ci->ci_softc;
    458 	int bank;
    459 
    460 	KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
    461 	KASSERT((ipending & ~MIPS_INT_MASK) == 0);
    462 	KASSERT(ipending & MIPS_HARD_INT_MASK);
    463 	uint64_t hwpend[2] = { 0, 0 };
    464 
    465 	const uint64_t sum1 = mips3_ld(cpu->cpu_int_sum1);
    466 
    467 	if (ipending & MIPS_INT_MASK_2) {
    468 		hwpend[0] = mips3_ld(cpu->cpu_ip4_sum0)
    469 		    & cpu->cpu_ip4_enable[0];
    470 		hwpend[1] = sum1 & cpu->cpu_ip4_enable[1];
    471 	} else if (ipending & MIPS_INT_MASK_1) {
    472 		hwpend[0] = mips3_ld(cpu->cpu_ip3_sum0)
    473 		    & cpu->cpu_ip3_enable[0];
    474 		hwpend[1] = sum1 & cpu->cpu_ip3_enable[1];
    475 	} else if (ipending & MIPS_INT_MASK_0) {
    476 		hwpend[0] = mips3_ld(cpu->cpu_ip2_sum0)
    477 		    & cpu->cpu_ip2_enable[0];
    478 		hwpend[1] = sum1 & cpu->cpu_ip2_enable[1];
    479 	} else {
    480 		panic("octeon_iointr: unexpected ipending %#x", ipending);
    481 	}
    482 	for (bank = 0; bank <= 1; bank++) {
    483 		while (hwpend[bank] != 0) {
    484 			const int bit = ffs64(hwpend[bank]) - 1;
    485 			const int irq = (bank * 64) + bit;
    486 			hwpend[bank] &= ~__BIT(bit);
    487 
    488 			struct octeon_intrhand * const ih = octciu_intrs[irq];
    489 			cpu->cpu_intr_evs[irq].ev_count++;
    490 			if (__predict_true(ih != NULL)) {
    491 #ifdef MULTIPROCESSOR
    492 				if (ipl == IPL_VM) {
    493 					KERNEL_LOCK(1, NULL);
    494 #endif
    495 					(*ih->ih_func)(ih->ih_arg);
    496 #ifdef MULTIPROCESSOR
    497 					KERNEL_UNLOCK_ONE(NULL);
    498 				} else {
    499 					(*ih->ih_func)(ih->ih_arg);
    500 				}
    501 #endif
    502 				KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
    503 			}
    504 		}
    505 	}
    506 	KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
    507 }
    508 
    509 #ifdef MULTIPROCESSOR
    510 __CTASSERT(NIPIS < 16);
    511 
    512 int
    513 octeon_ipi_intr(void *arg)
    514 {
    515 	struct cpu_info * const ci = curcpu();
    516 	struct cpu_softc * const cpu = ci->ci_softc;
    517 	uint32_t ipi_mask = (uintptr_t) arg;
    518 
    519 	KASSERTMSG(ci->ci_cpl == IPL_HIGH,
    520 	    "ipi_mask %#"PRIx32" cpl %d", ipi_mask, ci->ci_cpl);
    521 
    522 	ipi_mask &= mips3_ld(cpu->cpu_mbox_set);
    523 	if (ipi_mask == 0)
    524 		return 0;
    525 
    526 	mips3_sd(cpu->cpu_mbox_clr, ipi_mask);
    527 
    528 	KASSERT(ipi_mask < __BIT(NIPIS));
    529 
    530 #if NWDOG > 0
    531 	// Handle WDOG requests ourselves.
    532 	if (ipi_mask & __BIT(IPI_WDOG)) {
    533 		softint_schedule(cpu->cpu_wdog_sih);
    534 		atomic_and_64(&ci->ci_request_ipis, ~__BIT(IPI_WDOG));
    535 		ipi_mask &= ~__BIT(IPI_WDOG);
    536 		ci->ci_evcnt_per_ipi[IPI_WDOG].ev_count++;
    537 		if (__predict_true(ipi_mask == 0))
    538 			return 1;
    539 	}
    540 #endif
    541 
    542 	/* if the request is clear, it was previously processed */
    543 	if ((ci->ci_request_ipis & ipi_mask) == 0)
    544 		return 0;
    545 
    546 	atomic_or_64(&ci->ci_active_ipis, ipi_mask);
    547 	atomic_and_64(&ci->ci_request_ipis, ~ipi_mask);
    548 
    549 	ipi_process(ci, ipi_mask);
    550 
    551 	atomic_and_64(&ci->ci_active_ipis, ~ipi_mask);
    552 
    553 	return 1;
    554 }
    555 
    556 int
    557 octeon_send_ipi(struct cpu_info *ci, int req)
    558 {
    559 	KASSERT(req < NIPIS);
    560 	if (ci == NULL) {
    561 		CPU_INFO_ITERATOR cii;
    562 		for (CPU_INFO_FOREACH(cii, ci)) {
    563 			if (ci != curcpu()) {
    564 				octeon_send_ipi(ci, req);
    565 			}
    566 		}
    567 		return 0;
    568 	}
    569 	KASSERT(cold || ci->ci_softc != NULL);
    570 	if (ci->ci_softc == NULL)
    571 		return -1;
    572 
    573 	struct cpu_softc * const cpu = ci->ci_softc;
    574 	const uint32_t ipi_mask = __BIT(req);
    575 
    576 	atomic_or_64(&ci->ci_request_ipis, ipi_mask);
    577 
    578 	mips3_sd(cpu->cpu_mbox_set, ipi_mask);
    579 
    580 	return 0;
    581 }
    582 #endif	/* MULTIPROCESSOR */
    583