octeon_intr.c revision 1.9 1 /* $NetBSD: octeon_intr.c,v 1.9 2016/11/28 04:18:08 mrg Exp $ */
2 /*
3 * Copyright 2001, 2002 Wasabi Systems, Inc.
4 * All rights reserved.
5 *
6 * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed for the NetBSD Project by
19 * Wasabi Systems, Inc.
20 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
21 * or promote products derived from this software without specific prior
22 * written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 /*
38 * Platform-specific interrupt support for the MIPS Malta.
39 */
40
41 #include "opt_octeon.h"
42 #include "opt_multiprocessor.h"
43
44 #include "cpunode.h"
45 #define __INTR_PRIVATE
46
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: octeon_intr.c,v 1.9 2016/11/28 04:18:08 mrg Exp $");
49
50 #include <sys/param.h>
51 #include <sys/cpu.h>
52 #include <sys/systm.h>
53 #include <sys/device.h>
54 #include <sys/intr.h>
55 #include <sys/kernel.h>
56 #include <sys/kmem.h>
57 #include <sys/atomic.h>
58
59 #include <lib/libkern/libkern.h>
60
61 #include <mips/locore.h>
62
63 #include <mips/cavium/dev/octeon_ciureg.h>
64 #include <mips/cavium/octeonvar.h>
65
66 /*
67 * This is a mask of bits to clear in the SR when we go to a
68 * given hardware interrupt priority level.
69 */
70 static const struct ipl_sr_map octeon_ipl_sr_map = {
71 .sr_bits = {
72 [IPL_NONE] = 0,
73 [IPL_SOFTCLOCK] = MIPS_SOFT_INT_MASK_0,
74 [IPL_SOFTNET] = MIPS_SOFT_INT_MASK,
75 [IPL_VM] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0,
76 [IPL_SCHED] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
77 | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
78 [IPL_DDB] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
79 | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
80 [IPL_HIGH] = MIPS_INT_MASK,
81 },
82 };
83
84 const char * const octeon_intrnames[NIRQS] = {
85 "workq 0",
86 "workq 1",
87 "workq 2",
88 "workq 3",
89 "workq 4",
90 "workq 5",
91 "workq 6",
92 "workq 7",
93 "workq 8",
94 "workq 9",
95 "workq 10",
96 "workq 11",
97 "workq 12",
98 "workq 13",
99 "workq 14",
100 "workq 15",
101 "gpio 0",
102 "gpio 1",
103 "gpio 2",
104 "gpio 3",
105 "gpio 4",
106 "gpio 5",
107 "gpio 6",
108 "gpio 7",
109 "gpio 8",
110 "gpio 9",
111 "gpio 10",
112 "gpio 11",
113 "gpio 12",
114 "gpio 13",
115 "gpio 14",
116 "gpio 15",
117 "mbox 0-15",
118 "mbox 16-31",
119 "uart 0",
120 "uart 1",
121 "pci inta",
122 "pci intb",
123 "pci intc",
124 "pci intd",
125 "pci msi 0-15",
126 "pci msi 16-31",
127 "pci msi 32-47",
128 "pci msi 48-63",
129 "wdog summary",
130 "twsi",
131 "rml",
132 "trace",
133 "gmx drop",
134 "reserved",
135 "ipd drop",
136 "reserved",
137 "timer 0",
138 "timer 1",
139 "timer 2",
140 "timer 3",
141 "usb",
142 "pcm/tdm",
143 "mpi/spi",
144 "reserved",
145 "reserved",
146 "reserved",
147 "reserved",
148 "reserved",
149 };
150
151 struct octeon_intrhand {
152 int (*ih_func)(void *);
153 void *ih_arg;
154 int ih_irq;
155 int ih_ipl;
156 };
157
158 #ifdef MULTIPROCESSOR
159 static int octeon_send_ipi(struct cpu_info *, int);
160 static int octeon_ipi_intr(void *);
161
162 struct octeon_intrhand ipi_intrhands[2] = {
163 [0] = {
164 .ih_func = octeon_ipi_intr,
165 .ih_arg = (void *)(uintptr_t)__BITS(15,0),
166 .ih_irq = _CIU_INT_MBOX_15_0_SHIFT,
167 .ih_ipl = IPL_SCHED,
168 },
169 [1] = {
170 .ih_func = octeon_ipi_intr,
171 .ih_arg = (void *)(uintptr_t)__BITS(31,16),
172 .ih_irq = _CIU_INT_MBOX_31_16_SHIFT,
173 .ih_ipl = IPL_HIGH,
174 },
175 };
176 #endif
177
178 struct octeon_intrhand *octeon_ciu_intrs[NIRQS] = {
179 #ifdef MULTIPROCESSOR
180 [_CIU_INT_MBOX_15_0_SHIFT] = &ipi_intrhands[0],
181 [_CIU_INT_MBOX_31_16_SHIFT] = &ipi_intrhands[1],
182 #endif
183 };
184
185 kmutex_t octeon_intr_lock;
186
187 #define X(a) MIPS_PHYS_TO_XKPHYS(OCTEON_CCA_NONE, (a))
188
189 struct cpu_softc octeon_cpu0_softc = {
190 .cpu_ci = &cpu_info_store,
191 .cpu_int0_sum0 = X(CIU_INT0_SUM0),
192 .cpu_int1_sum0 = X(CIU_INT1_SUM0),
193 .cpu_int2_sum0 = X(CIU_INT4_SUM0),
194
195 .cpu_int0_en0 = X(CIU_INT0_EN0),
196 .cpu_int1_en0 = X(CIU_INT1_EN0),
197 .cpu_int2_en0 = X(CIU_INT4_EN00),
198
199 .cpu_int0_en1 = X(CIU_INT0_EN1),
200 .cpu_int1_en1 = X(CIU_INT1_EN1),
201 .cpu_int2_en1 = X(CIU_INT4_EN01),
202
203 .cpu_int32_en = X(CIU_INT32_EN0),
204
205 .cpu_wdog = X(CIU_WDOG0),
206 .cpu_pp_poke = X(CIU_PP_POKE0),
207
208 #ifdef MULTIPROCESSOR
209 .cpu_mbox_set = X(CIU_MBOX_SET0),
210 .cpu_mbox_clr = X(CIU_MBOX_CLR0),
211 #endif
212 };
213
214 #ifdef MULTIPROCESSOR
215 struct cpu_softc octeon_cpu1_softc = {
216 .cpu_int0_sum0 = X(CIU_INT2_SUM0),
217 .cpu_int1_sum0 = X(CIU_INT3_SUM0),
218 .cpu_int2_sum0 = X(CIU_INT4_SUM1),
219
220 .cpu_int0_en0 = X(CIU_INT2_EN0),
221 .cpu_int1_en0 = X(CIU_INT3_EN0),
222 .cpu_int2_en0 = X(CIU_INT4_EN10),
223
224 .cpu_int0_en1 = X(CIU_INT2_EN1),
225 .cpu_int1_en1 = X(CIU_INT3_EN1),
226 .cpu_int2_en1 = X(CIU_INT4_EN11),
227
228 .cpu_int32_en = X(CIU_INT32_EN1),
229
230 .cpu_wdog = X(CIU_WDOG1),
231 .cpu_pp_poke = X(CIU_PP_POKE1),
232
233 .cpu_mbox_set = X(CIU_MBOX_SET1),
234 .cpu_mbox_clr = X(CIU_MBOX_CLR1),
235 };
236 #endif
237
238 #ifdef DEBUG
239 static void
240 octeon_mbox_test(void)
241 {
242 const uint64_t mbox_clr0 = X(CIU_MBOX_CLR0);
243 const uint64_t mbox_clr1 = X(CIU_MBOX_CLR1);
244 const uint64_t mbox_set0 = X(CIU_MBOX_SET0);
245 const uint64_t mbox_set1 = X(CIU_MBOX_SET1);
246 const uint64_t int_sum0 = X(CIU_INT0_SUM0);
247 const uint64_t int_sum1 = X(CIU_INT2_SUM0);
248 const uint64_t sum_mbox_lo = __BIT(_CIU_INT_MBOX_15_0_SHIFT);
249 const uint64_t sum_mbox_hi = __BIT(_CIU_INT_MBOX_31_16_SHIFT);
250
251 mips3_sd(mbox_clr0, ~0ULL);
252 mips3_sd(mbox_clr1, ~0ULL);
253
254 uint32_t mbox0 = mips3_ld(mbox_set0);
255 uint32_t mbox1 = mips3_ld(mbox_set1);
256
257 KDASSERTMSG(mbox0 == 0, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
258 KDASSERTMSG(mbox1 == 0, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
259
260 mips3_sd(mbox_set0, __BIT(0));
261
262 mbox0 = mips3_ld(mbox_set0);
263 mbox1 = mips3_ld(mbox_set1);
264
265 KDASSERTMSG(mbox0 == 1, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
266 KDASSERTMSG(mbox1 == 0, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
267
268 uint64_t sum0 = mips3_ld(int_sum0);
269 uint64_t sum1 = mips3_ld(int_sum1);
270
271 KDASSERTMSG((sum0 & sum_mbox_lo) != 0, "sum0 %#"PRIx64, sum0);
272 KDASSERTMSG((sum0 & sum_mbox_hi) == 0, "sum0 %#"PRIx64, sum0);
273
274 KDASSERTMSG((sum1 & sum_mbox_lo) == 0, "sum1 %#"PRIx64, sum1);
275 KDASSERTMSG((sum1 & sum_mbox_hi) == 0, "sum1 %#"PRIx64, sum1);
276
277 mips3_sd(mbox_clr0, mbox0);
278 mbox0 = mips3_ld(mbox_set0);
279 KDASSERTMSG(mbox0 == 0, "mbox0 %#x", mbox0);
280
281 mips3_sd(mbox_set0, __BIT(16));
282
283 mbox0 = mips3_ld(mbox_set0);
284 mbox1 = mips3_ld(mbox_set1);
285
286 KDASSERTMSG(mbox0 == __BIT(16), "mbox0 %#x", mbox0);
287 KDASSERTMSG(mbox1 == 0, "mbox1 %#x", mbox1);
288
289 sum0 = mips3_ld(int_sum0);
290 sum1 = mips3_ld(int_sum1);
291
292 KDASSERTMSG((sum0 & sum_mbox_lo) == 0, "sum0 %#"PRIx64, sum0);
293 KDASSERTMSG((sum0 & sum_mbox_hi) != 0, "sum0 %#"PRIx64, sum0);
294
295 KDASSERTMSG((sum1 & sum_mbox_lo) == 0, "sum1 %#"PRIx64, sum1);
296 KDASSERTMSG((sum1 & sum_mbox_hi) == 0, "sum1 %#"PRIx64, sum1);
297 }
298 #endif
299
300 #undef X
301
302 void
303 octeon_intr_init(struct cpu_info *ci)
304 {
305 #ifdef DIAGNOSTIC
306 const int cpunum = cpu_index(ci);
307 #endif
308 const char * const xname = cpu_name(ci);
309 struct cpu_softc *cpu = ci->ci_softc;
310
311
312 if (ci->ci_cpuid == 0) {
313 KASSERT(ci->ci_softc == &octeon_cpu0_softc);
314 ipl_sr_map = octeon_ipl_sr_map;
315 mutex_init(&octeon_intr_lock, MUTEX_DEFAULT, IPL_HIGH);
316 #ifdef MULTIPROCESSOR
317 mips_locoresw.lsw_send_ipi = octeon_send_ipi;
318 #endif
319 #ifdef DEBUG
320 octeon_mbox_test();
321 #endif
322 } else {
323 KASSERT(cpunum == 1);
324 #ifdef MULTIPROCESSOR
325 KASSERT(ci->ci_softc == &octeon_cpu1_softc);
326 #endif
327 }
328
329 #ifdef MULTIPROCESSOR
330 // Enable the IPIs
331 cpu->cpu_int1_enable0 |= __BIT(_CIU_INT_MBOX_15_0_SHIFT);
332 cpu->cpu_int2_enable0 |= __BIT(_CIU_INT_MBOX_31_16_SHIFT);
333 #endif
334
335 if (ci->ci_dev)
336 aprint_verbose_dev(ci->ci_dev,
337 "enabling intr masks %#"PRIx64"/%#"PRIx64"/%#"PRIx64"\n",
338 cpu->cpu_int0_enable0, cpu->cpu_int1_enable0, cpu->cpu_int2_enable0);
339
340 mips3_sd(cpu->cpu_int0_en0, cpu->cpu_int0_enable0);
341 mips3_sd(cpu->cpu_int1_en0, cpu->cpu_int1_enable0);
342 mips3_sd(cpu->cpu_int2_en0, cpu->cpu_int2_enable0);
343
344 mips3_sd(cpu->cpu_int32_en, 0);
345
346 mips3_sd(cpu->cpu_int0_en1, 0); // WDOG IPL2
347 mips3_sd(cpu->cpu_int1_en1, 0); // WDOG IPL3
348 mips3_sd(cpu->cpu_int2_en1, 0); // WDOG IPL4
349
350 #ifdef MULTIPROCESSOR
351 mips3_sd(cpu->cpu_mbox_clr, __BITS(31,0));
352 #endif
353
354 for (size_t i = 0; i < NIRQS; i++) {
355 evcnt_attach_dynamic(&cpu->cpu_intr_evs[i],
356 EVCNT_TYPE_INTR, NULL, xname, octeon_intrnames[i]);
357 }
358 }
359
360 void
361 octeon_cal_timer(int corefreq)
362 {
363 /* Compute the number of cycles per second. */
364 curcpu()->ci_cpu_freq = corefreq;
365
366 /* Compute the number of ticks for hz. */
367 curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz;
368
369 /* Compute the delay divisor and reciprical. */
370 curcpu()->ci_divisor_delay =
371 ((curcpu()->ci_cpu_freq + 500000) / 1000000);
372 #if 0
373 MIPS_SET_CI_RECIPRICAL(curcpu());
374 #endif
375
376 mips3_cp0_count_write(0);
377 mips3_cp0_compare_write(0);
378 }
379
380 void *
381 octeon_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
382 {
383 struct octeon_intrhand *ih;
384
385 if (irq >= NIRQS)
386 panic("octeon_intr_establish: bogus IRQ %d", irq);
387 if (ipl < IPL_VM)
388 panic("octeon_intr_establish: bogus IPL %d", ipl);
389
390 ih = kmem_zalloc(sizeof(*ih), KM_NOSLEEP);
391 if (ih == NULL)
392 return (NULL);
393
394 ih->ih_func = func;
395 ih->ih_arg = arg;
396 ih->ih_irq = irq;
397 ih->ih_ipl = ipl;
398
399 mutex_enter(&octeon_intr_lock);
400
401 /*
402 * First, make it known.
403 */
404 KASSERTMSG(octeon_ciu_intrs[irq] == NULL, "irq %d in use! (%p)",
405 irq, octeon_ciu_intrs[irq]);
406
407 octeon_ciu_intrs[irq] = ih;
408 membar_producer();
409
410 /*
411 * Now enable it.
412 */
413 const uint64_t irq_mask = __BIT(irq);
414 struct cpu_softc * const cpu0 = &octeon_cpu0_softc;
415 #if MULTIPROCESSOR
416 struct cpu_softc * const cpu1 = &octeon_cpu1_softc;
417 #endif
418
419 switch (ipl) {
420 case IPL_VM:
421 cpu0->cpu_int0_enable0 |= irq_mask;
422 mips3_sd(cpu0->cpu_int0_en0, cpu0->cpu_int0_enable0);
423 break;
424
425 case IPL_SCHED:
426 cpu0->cpu_int1_enable0 |= irq_mask;
427 mips3_sd(cpu0->cpu_int1_en0, cpu0->cpu_int1_enable0);
428 #ifdef MULTIPROCESSOR
429 cpu1->cpu_int1_enable0 = cpu0->cpu_int1_enable0;
430 mips3_sd(cpu1->cpu_int1_en0, cpu1->cpu_int1_enable0);
431 #endif
432 break;
433
434 case IPL_DDB:
435 case IPL_HIGH:
436 cpu0->cpu_int2_enable0 |= irq_mask;
437 mips3_sd(cpu0->cpu_int2_en0, cpu0->cpu_int2_enable0);
438 #ifdef MULTIPROCESSOR
439 cpu1->cpu_int2_enable0 = cpu0->cpu_int2_enable0;
440 mips3_sd(cpu1->cpu_int2_en0, cpu1->cpu_int2_enable0);
441 #endif
442 break;
443 }
444
445 mutex_exit(&octeon_intr_lock);
446
447 return ih;
448 }
449
450 void
451 octeon_intr_disestablish(void *cookie)
452 {
453 struct octeon_intrhand * const ih = cookie;
454 const int irq = ih->ih_irq & (NIRQS-1);
455 const int ipl = ih->ih_ipl;
456
457 mutex_enter(&octeon_intr_lock);
458
459 /*
460 * First disable it.
461 */
462 const uint64_t irq_mask = ~__BIT(irq);
463 struct cpu_softc * const cpu0 = &octeon_cpu0_softc;
464 #if MULTIPROCESSOR
465 struct cpu_softc * const cpu1 = &octeon_cpu1_softc;
466 #endif
467
468 switch (ipl) {
469 case IPL_VM:
470 cpu0->cpu_int0_enable0 &= ~irq_mask;
471 mips3_sd(cpu0->cpu_int0_en0, cpu0->cpu_int0_enable0);
472 break;
473
474 case IPL_SCHED:
475 cpu0->cpu_int1_enable0 &= ~irq_mask;
476 mips3_sd(cpu0->cpu_int1_en0, cpu0->cpu_int1_enable0);
477 #ifdef MULTIPROCESSOR
478 cpu1->cpu_int1_enable0 = cpu0->cpu_int1_enable0;
479 mips3_sd(cpu1->cpu_int1_en0, cpu1->cpu_int1_enable0);
480 #endif
481 break;
482
483 case IPL_DDB:
484 case IPL_HIGH:
485 cpu0->cpu_int2_enable0 &= ~irq_mask;
486 mips3_sd(cpu0->cpu_int2_en0, cpu0->cpu_int2_enable0);
487 #ifdef MULTIPROCESSOR
488 cpu1->cpu_int2_enable0 = cpu0->cpu_int2_enable0;
489 mips3_sd(cpu1->cpu_int2_en0, cpu1->cpu_int2_enable0);
490 #endif
491 break;
492 }
493
494 /*
495 * Now remove it since we shouldn't get interrupts for it.
496 */
497 octeon_ciu_intrs[irq] = NULL;
498
499 mutex_exit(&octeon_intr_lock);
500
501 kmem_free(ih, sizeof(*ih));
502 }
503
504 void
505 octeon_iointr(int ipl, vaddr_t pc, uint32_t ipending)
506 {
507 struct cpu_info * const ci = curcpu();
508 struct cpu_softc * const cpu = ci->ci_softc;
509
510 KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
511 KASSERT((ipending & ~MIPS_INT_MASK) == 0);
512 KASSERT(ipending & MIPS_HARD_INT_MASK);
513 uint64_t hwpend = 0;
514
515 if (ipending & MIPS_INT_MASK_2) {
516 hwpend = mips3_ld(cpu->cpu_int2_sum0)
517 & cpu->cpu_int2_enable0;
518 } else if (ipending & MIPS_INT_MASK_1) {
519 hwpend = mips3_ld(cpu->cpu_int1_sum0)
520 & cpu->cpu_int1_enable0;
521 } else if (ipending & MIPS_INT_MASK_0) {
522 hwpend = mips3_ld(cpu->cpu_int0_sum0)
523 & cpu->cpu_int0_enable0;
524 } else {
525 panic("octeon_iointr: unexpected ipending %#x", ipending);
526 }
527 while (hwpend != 0) {
528 const int irq = ffs64(hwpend) - 1;
529 hwpend &= ~__BIT(irq);
530
531 struct octeon_intrhand * const ih = octeon_ciu_intrs[irq];
532 cpu->cpu_intr_evs[irq].ev_count++;
533 if (__predict_true(ih != NULL)) {
534 #ifdef MULTIPROCESSOR
535 if (ipl == IPL_VM) {
536 KERNEL_LOCK(1, NULL);
537 #endif
538 (*ih->ih_func)(ih->ih_arg);
539 #ifdef MULTIPROCESSOR
540 KERNEL_UNLOCK_ONE(NULL);
541 } else {
542 (*ih->ih_func)(ih->ih_arg);
543 }
544 #endif
545 KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
546 }
547 }
548 KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
549 }
550
551 #ifdef MULTIPROCESSOR
552 __CTASSERT(NIPIS < 16);
553
554 int
555 octeon_ipi_intr(void *arg)
556 {
557 struct cpu_info * const ci = curcpu();
558 struct cpu_softc * const cpu = ci->ci_softc;
559 uint32_t ipi_mask = (uintptr_t) arg;
560
561 KASSERTMSG((ipi_mask & __BITS(31,16)) == 0 || ci->ci_cpl >= IPL_SCHED,
562 "ipi_mask %#"PRIx32" cpl %d", ipi_mask, ci->ci_cpl);
563
564 ipi_mask &= mips3_ld(cpu->cpu_mbox_set);
565 if (ipi_mask == 0)
566 return 0;
567
568 mips3_sd(cpu->cpu_mbox_clr, ipi_mask);
569
570 ipi_mask |= (ipi_mask >> 16);
571 ipi_mask &= __BITS(15,0);
572
573 KASSERT(ipi_mask < __BIT(NIPIS));
574
575 #if NWDOG > 0
576 // Handle WDOG requests ourselves.
577 if (ipi_mask & __BIT(IPI_WDOG)) {
578 softint_schedule(cpu->cpu_wdog_sih);
579 atomic_and_64(&ci->ci_request_ipis, ~__BIT(IPI_WDOG));
580 ipi_mask &= ~__BIT(IPI_WDOG);
581 ci->ci_evcnt_per_ipi[IPI_WDOG].ev_count++;
582 if (__predict_true(ipi_mask == 0))
583 return 1;
584 }
585 #endif
586
587 /* if the request is clear, it was previously processed */
588 if ((ci->ci_request_ipis & ipi_mask) == 0)
589 return 0;
590
591 atomic_or_64(&ci->ci_active_ipis, ipi_mask);
592 atomic_and_64(&ci->ci_request_ipis, ~ipi_mask);
593
594 ipi_process(ci, ipi_mask);
595
596 atomic_and_64(&ci->ci_active_ipis, ~ipi_mask);
597
598 return 1;
599 }
600
601 int
602 octeon_send_ipi(struct cpu_info *ci, int req)
603 {
604 KASSERT(req < NIPIS);
605 if (ci == NULL) {
606 CPU_INFO_ITERATOR cii;
607 for (CPU_INFO_FOREACH(cii, ci)) {
608 if (ci != curcpu()) {
609 octeon_send_ipi(ci, req);
610 }
611 }
612 return 0;
613 }
614 KASSERT(cold || ci->ci_softc != NULL);
615 if (ci->ci_softc == NULL)
616 return -1;
617
618 struct cpu_softc * const cpu = ci->ci_softc;
619 uint64_t ipi_mask = __BIT(req);
620
621 atomic_or_64(&ci->ci_request_ipis, ipi_mask);
622 if (req == IPI_SUSPEND || req == IPI_WDOG) {
623 ipi_mask <<= 16;
624 }
625
626 mips3_sd(cpu->cpu_mbox_set, ipi_mask);
627 return 0;
628 }
629 #endif /* MULTIPROCESSOR */
630