octeon_intr.c revision 1.3.2.3 1 /* $NetBSD: octeon_intr.c,v 1.3.2.3 2015/09/22 12:05:47 skrll Exp $ */
2 /*
3 * Copyright 2001, 2002 Wasabi Systems, Inc.
4 * All rights reserved.
5 *
6 * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed for the NetBSD Project by
19 * Wasabi Systems, Inc.
20 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
21 * or promote products derived from this software without specific prior
22 * written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 /*
38 * Platform-specific interrupt support for the MIPS Malta.
39 */
40
41 #include "opt_octeon.h"
42 #include "cpunode.h"
43 #define __INTR_PRIVATE
44
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: octeon_intr.c,v 1.3.2.3 2015/09/22 12:05:47 skrll Exp $");
47
48 #include <sys/param.h>
49 #include <sys/cpu.h>
50 #include <sys/systm.h>
51 #include <sys/device.h>
52 #include <sys/intr.h>
53 #include <sys/kernel.h>
54 #include <sys/kmem.h>
55 #include <sys/atomic.h>
56
57 #include <lib/libkern/libkern.h>
58
59 #include <mips/locore.h>
60
61 #include <mips/cavium/dev/octeon_ciureg.h>
62 #include <mips/cavium/octeonvar.h>
63
64 /*
65 * This is a mask of bits to clear in the SR when we go to a
66 * given hardware interrupt priority level.
67 */
68 static const struct ipl_sr_map octeon_ipl_sr_map = {
69 .sr_bits = {
70 [IPL_NONE] = 0,
71 [IPL_SOFTCLOCK] = MIPS_SOFT_INT_MASK_0,
72 [IPL_SOFTNET] = MIPS_SOFT_INT_MASK,
73 [IPL_VM] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0,
74 [IPL_SCHED] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
75 | MIPS_INT_MASK_5,
76 [IPL_DDB] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
77 | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
78 [IPL_HIGH] = MIPS_INT_MASK,
79 },
80 };
81
82 const char * const octeon_intrnames[NIRQS] = {
83 "workq 0",
84 "workq 1",
85 "workq 2",
86 "workq 3",
87 "workq 4",
88 "workq 5",
89 "workq 6",
90 "workq 7",
91 "workq 8",
92 "workq 9",
93 "workq 10",
94 "workq 11",
95 "workq 12",
96 "workq 13",
97 "workq 14",
98 "workq 15",
99 "gpio 0",
100 "gpio 1",
101 "gpio 2",
102 "gpio 3",
103 "gpio 4",
104 "gpio 5",
105 "gpio 6",
106 "gpio 7",
107 "gpio 8",
108 "gpio 9",
109 "gpio 10",
110 "gpio 11",
111 "gpio 12",
112 "gpio 13",
113 "gpio 14",
114 "gpio 15",
115 "mbox 0-15",
116 "mbox 16-31",
117 "uart 0",
118 "uart 1",
119 "pci inta",
120 "pci intb",
121 "pci intc",
122 "pci intd",
123 "pci msi 0-15",
124 "pci msi 16-31",
125 "pci msi 32-47",
126 "pci msi 48-63",
127 "wdog summary",
128 "twsi",
129 "rml",
130 "trace",
131 "gmx drop",
132 "reserved",
133 "ipd drop",
134 "reserved",
135 "timer 0",
136 "timer 1",
137 "timer 2",
138 "timer 3",
139 "usb",
140 "pcm/tdm",
141 "mpi/spi",
142 "reserved",
143 "reserved",
144 "reserved",
145 "reserved",
146 "reserved",
147 };
148
149 struct octeon_intrhand {
150 int (*ih_func)(void *);
151 void *ih_arg;
152 int ih_irq;
153 int ih_ipl;
154 };
155
156 #ifdef MULTIPROCESSOR
157 static int octeon_send_ipi(struct cpu_info *, int);
158 static int octeon_ipi_intr(void *);
159
160 struct octeon_intrhand ipi_intrhands[2] = {
161 [0] = {
162 .ih_func = octeon_ipi_intr,
163 .ih_arg = (void *)(uintptr_t)__BITS(15,0),
164 .ih_irq = _CIU_INT_MBOX_15_0_SHIFT,
165 .ih_ipl = IPL_SCHED,
166 },
167 [1] = {
168 .ih_func = octeon_ipi_intr,
169 .ih_arg = (void *)(uintptr_t)__BITS(31,16),
170 .ih_irq = _CIU_INT_MBOX_31_16_SHIFT,
171 .ih_ipl = IPL_HIGH,
172 },
173 };
174 #endif
175
176 struct octeon_intrhand *octeon_ciu_intrs[NIRQS] = {
177 #ifdef MULTIPROCESSOR
178 [_CIU_INT_MBOX_15_0_SHIFT] = &ipi_intrhands[0],
179 [_CIU_INT_MBOX_31_16_SHIFT] = &ipi_intrhands[1],
180 #endif
181 };
182
183 kmutex_t octeon_intr_lock;
184
185 #define X(a) MIPS_PHYS_TO_XKPHYS(OCTEON_CCA_NONE, (a))
186
187 struct cpu_softc octeon_cpu0_softc = {
188 .cpu_ci = &cpu_info_store,
189 .cpu_int0_sum0 = X(CIU_INT0_SUM0),
190 .cpu_int1_sum0 = X(CIU_INT1_SUM0),
191 .cpu_int2_sum0 = X(CIU_INT4_SUM0),
192
193 .cpu_int0_en0 = X(CIU_INT0_EN0),
194 .cpu_int1_en0 = X(CIU_INT1_EN0),
195 .cpu_int2_en0 = X(CIU_INT4_EN00),
196
197 .cpu_int0_en1 = X(CIU_INT0_EN1),
198 .cpu_int1_en1 = X(CIU_INT1_EN1),
199 .cpu_int2_en1 = X(CIU_INT4_EN01),
200
201 .cpu_int32_en = X(CIU_INT32_EN0),
202
203 .cpu_wdog = X(CIU_WDOG0),
204 .cpu_pp_poke = X(CIU_PP_POKE0),
205
206 #ifdef MULTIPROCESSOR
207 .cpu_mbox_set = X(CIU_MBOX_SET0),
208 .cpu_mbox_clr = X(CIU_MBOX_CLR0),
209 #endif
210 };
211
212 #ifdef MULTIPROCESSOR
213 struct cpu_softc octeon_cpu1_softc = {
214 .cpu_int0_sum0 = X(CIU_INT2_SUM0),
215 .cpu_int1_sum0 = X(CIU_INT3_SUM0),
216 .cpu_int2_sum0 = X(CIU_INT4_SUM1),
217
218 .cpu_int0_en0 = X(CIU_INT2_EN0),
219 .cpu_int1_en0 = X(CIU_INT3_EN0),
220 .cpu_int2_en0 = X(CIU_INT4_EN10),
221
222 .cpu_int0_en1 = X(CIU_INT2_EN1),
223 .cpu_int1_en1 = X(CIU_INT3_EN1),
224 .cpu_int2_en1 = X(CIU_INT4_EN11),
225
226 .cpu_int32_en = X(CIU_INT32_EN1),
227
228 .cpu_wdog = X(CIU_WDOG1),
229 .cpu_pp_poke = X(CIU_PP_POKE1),
230
231 .cpu_mbox_set = X(CIU_MBOX_SET1),
232 .cpu_mbox_clr = X(CIU_MBOX_CLR1),
233 };
234 #endif
235
236 #ifdef DEBUG
237 static void
238 octeon_mbox_test(void)
239 {
240 const uint64_t mbox_clr0 = X(CIU_MBOX_CLR0);
241 const uint64_t mbox_clr1 = X(CIU_MBOX_CLR1);
242 const uint64_t mbox_set0 = X(CIU_MBOX_SET0);
243 const uint64_t mbox_set1 = X(CIU_MBOX_SET1);
244 const uint64_t int_sum0 = X(CIU_INT0_SUM0);
245 const uint64_t int_sum1 = X(CIU_INT2_SUM0);
246 const uint64_t sum_mbox_lo = __BIT(_CIU_INT_MBOX_15_0_SHIFT);
247 const uint64_t sum_mbox_hi = __BIT(_CIU_INT_MBOX_31_16_SHIFT);
248
249 mips64_sd_a64(mbox_clr0, ~0ULL);
250 mips64_sd_a64(mbox_clr1, ~0ULL);
251
252 uint32_t mbox0 = mips64_ld_a64(mbox_set0);
253 uint32_t mbox1 = mips64_ld_a64(mbox_set1);
254
255 KDASSERTMSG(mbox0 == 0, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
256 KDASSERTMSG(mbox1 == 0, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
257
258 mips64_sd_a64(mbox_set0, __BIT(0));
259
260 mbox0 = mips64_ld_a64(mbox_set0);
261 mbox1 = mips64_ld_a64(mbox_set1);
262
263 KDASSERTMSG(mbox0 == 1, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
264 KDASSERTMSG(mbox1 == 0, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
265
266 uint64_t sum0 = mips64_ld_a64(int_sum0);
267 uint64_t sum1 = mips64_ld_a64(int_sum1);
268
269 KDASSERTMSG((sum0 & sum_mbox_lo) != 0, "sum0 %#"PRIx64, sum0);
270 KDASSERTMSG((sum0 & sum_mbox_hi) == 0, "sum0 %#"PRIx64, sum0);
271
272 KDASSERTMSG((sum1 & sum_mbox_lo) == 0, "sum1 %#"PRIx64, sum1);
273 KDASSERTMSG((sum1 & sum_mbox_hi) == 0, "sum1 %#"PRIx64, sum1);
274
275 mips64_sd_a64(mbox_clr0, mbox0);
276 mbox0 = mips64_ld_a64(mbox_set0);
277 KDASSERTMSG(mbox0 == 0, "mbox0 %#x", mbox0);
278
279 mips64_sd_a64(mbox_set0, __BIT(16));
280
281 mbox0 = mips64_ld_a64(mbox_set0);
282 mbox1 = mips64_ld_a64(mbox_set1);
283
284 KDASSERTMSG(mbox0 == __BIT(16), "mbox0 %#x", mbox0);
285 KDASSERTMSG(mbox1 == 0, "mbox1 %#x", mbox1);
286
287 sum0 = mips64_ld_a64(int_sum0);
288 sum1 = mips64_ld_a64(int_sum1);
289
290 KDASSERTMSG((sum0 & sum_mbox_lo) == 0, "sum0 %#"PRIx64, sum0);
291 KDASSERTMSG((sum0 & sum_mbox_hi) != 0, "sum0 %#"PRIx64, sum0);
292
293 KDASSERTMSG((sum1 & sum_mbox_lo) == 0, "sum1 %#"PRIx64, sum1);
294 KDASSERTMSG((sum1 & sum_mbox_hi) == 0, "sum1 %#"PRIx64, sum1);
295 }
296 #endif
297
298 #undef X
299
300 void
301 octeon_intr_init(struct cpu_info *ci)
302 {
303 const int cpunum = cpu_index(ci);
304 const char * const xname = cpu_name(ci);
305 struct cpu_softc *cpu = ci->ci_softc;
306
307
308 if (ci->ci_cpuid == 0) {
309 KASSERT(ci->ci_softc == &octeon_cpu0_softc);
310 ipl_sr_map = octeon_ipl_sr_map;
311 mutex_init(&octeon_intr_lock, MUTEX_DEFAULT, IPL_HIGH);
312 #ifdef MULTIPROCESSOR
313 mips_locoresw.lsw_send_ipi = octeon_send_ipi;
314 #endif
315 #ifdef DEBUG
316 octeon_mbox_test();
317 #endif
318 } else {
319 KASSERT(cpunum == 1);
320 #ifdef MULTIPROCESSOR
321 KASSERT(ci->ci_softc == &octeon_cpu1_softc);
322 #endif
323 }
324
325 #ifdef MULTIPROCESSOR
326 // Enable the IPIs
327 cpu->cpu_int0_enable0 |= __BIT(_CIU_INT_MBOX_15_0_SHIFT);
328 cpu->cpu_int2_enable0 |= __BIT(_CIU_INT_MBOX_31_16_SHIFT);
329 #endif
330
331 if (ci->ci_dev)
332 aprint_verbose_dev(ci->ci_dev,
333 "enabling intr masks %#"PRIx64"/%#"PRIx64"/%#"PRIx64"\n",
334 cpu->cpu_int0_enable0, cpu->cpu_int1_enable0, cpu->cpu_int2_enable0);
335
336 mips64_sd_a64(cpu->cpu_int0_en0, cpu->cpu_int0_enable0);
337 mips64_sd_a64(cpu->cpu_int1_en0, cpu->cpu_int1_enable0);
338 mips64_sd_a64(cpu->cpu_int2_en0, cpu->cpu_int2_enable0);
339
340 mips64_sd_a64(cpu->cpu_int32_en, 0);
341
342 mips64_sd_a64(cpu->cpu_int0_en1, 0); // WDOG IPL2
343 mips64_sd_a64(cpu->cpu_int1_en1, 0); // WDOG IPL3
344 mips64_sd_a64(cpu->cpu_int2_en1, 0); // WDOG IPL4
345
346 #ifdef MULTIPROCESSOR
347 mips64_sd_a64(cpu->cpu_mbox_clr, __BITS(31,0));
348 #endif
349
350 for (size_t i = 0; i < NIRQS; i++) {
351 evcnt_attach_dynamic(&cpu->cpu_intr_evs[i],
352 EVCNT_TYPE_INTR, NULL, xname, octeon_intrnames[i]);
353 }
354 }
355
356 void
357 octeon_cal_timer(int corefreq)
358 {
359 /* Compute the number of cycles per second. */
360 curcpu()->ci_cpu_freq = corefreq;
361
362 /* Compute the number of ticks for hz. */
363 curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz;
364
365 /* Compute the delay divisor and reciprical. */
366 curcpu()->ci_divisor_delay =
367 ((curcpu()->ci_cpu_freq + 500000) / 1000000);
368 #if 0
369 MIPS_SET_CI_RECIPRICAL(curcpu());
370 #endif
371
372 mips3_cp0_count_write(0);
373 mips3_cp0_compare_write(0);
374 }
375
376 void *
377 octeon_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
378 {
379 struct octeon_intrhand *ih;
380
381 if (irq >= NIRQS)
382 panic("octeon_intr_establish: bogus IRQ %d", irq);
383 if (ipl < IPL_VM)
384 panic("octeon_intr_establish: bogus IPL %d", ipl);
385
386 ih = kmem_zalloc(sizeof(*ih), KM_NOSLEEP);
387 if (ih == NULL)
388 return (NULL);
389
390 ih->ih_func = func;
391 ih->ih_arg = arg;
392 ih->ih_irq = irq;
393 ih->ih_ipl = ipl;
394
395 mutex_enter(&octeon_intr_lock);
396
397 /*
398 * First, make it known.
399 */
400 KASSERTMSG(octeon_ciu_intrs[irq] == NULL, "irq %d in use! (%p)",
401 irq, octeon_ciu_intrs[irq]);
402
403 octeon_ciu_intrs[irq] = ih;
404 membar_producer();
405
406 /*
407 * Now enable it.
408 */
409 const uint64_t irq_mask = __BIT(irq);
410 struct cpu_softc * const cpu0 = &octeon_cpu0_softc;
411 #if MULTIPROCESSOR
412 struct cpu_softc * const cpu1 = &octeon_cpu1_softc;
413 #endif
414
415 switch (ipl) {
416 case IPL_VM:
417 cpu0->cpu_int0_enable0 |= irq_mask;
418 mips64_sd_a64(cpu0->cpu_int0_en0, cpu0->cpu_int0_enable0);
419 break;
420
421 case IPL_SCHED:
422 cpu0->cpu_int1_enable0 |= irq_mask;
423 mips64_sd_a64(cpu0->cpu_int1_en0, cpu0->cpu_int1_enable0);
424 #ifdef MULTIPROCESSOR
425 cpu1->cpu_int1_enable0 = cpu0->cpu_int1_enable0;
426 mips64_sd_a64(cpu1->cpu_int1_en0, cpu1->cpu_int1_enable0);
427 #endif
428 break;
429
430 case IPL_DDB:
431 case IPL_HIGH:
432 cpu0->cpu_int2_enable0 |= irq_mask;
433 mips64_sd_a64(cpu0->cpu_int2_en0, cpu0->cpu_int2_enable0);
434 #ifdef MULTIPROCESSOR
435 cpu1->cpu_int2_enable0 = cpu0->cpu_int2_enable0;
436 mips64_sd_a64(cpu1->cpu_int2_en0, cpu1->cpu_int2_enable0);
437 #endif
438 break;
439 }
440
441 mutex_exit(&octeon_intr_lock);
442
443 return ih;
444 }
445
446 void
447 octeon_intr_disestablish(void *cookie)
448 {
449 struct octeon_intrhand * const ih = cookie;
450 const int irq = ih->ih_irq & (NIRQS-1);
451 const int ipl = ih->ih_ipl;
452
453 mutex_enter(&octeon_intr_lock);
454
455 /*
456 * First disable it.
457 */
458 const uint64_t irq_mask = ~__BIT(irq);
459 struct cpu_softc * const cpu0 = &octeon_cpu0_softc;
460 #if MULTIPROCESSOR
461 struct cpu_softc * const cpu1 = &octeon_cpu1_softc;
462 #endif
463
464 switch (ipl) {
465 case IPL_VM:
466 cpu0->cpu_int0_enable0 &= ~irq_mask;
467 mips64_sd_a64(cpu0->cpu_int0_en0, cpu0->cpu_int0_enable0);
468 break;
469
470 case IPL_SCHED:
471 cpu0->cpu_int1_enable0 &= ~irq_mask;
472 mips64_sd_a64(cpu0->cpu_int1_en0, cpu0->cpu_int1_enable0);
473 #ifdef MULTIPROCESSOR
474 cpu1->cpu_int1_enable0 = cpu0->cpu_int1_enable0;
475 mips64_sd_a64(cpu1->cpu_int1_en0, cpu1->cpu_int1_enable0);
476 #endif
477 break;
478
479 case IPL_DDB:
480 case IPL_HIGH:
481 cpu0->cpu_int2_enable0 &= ~irq_mask;
482 mips64_sd_a64(cpu0->cpu_int2_en0, cpu0->cpu_int2_enable0);
483 #ifdef MULTIPROCESSOR
484 cpu1->cpu_int2_enable0 = cpu0->cpu_int2_enable0;
485 mips64_sd_a64(cpu1->cpu_int2_en0, cpu1->cpu_int2_enable0);
486 #endif
487 break;
488 }
489
490 /*
491 * Now remove it since we shouldn't get interrupts for it.
492 */
493 octeon_ciu_intrs[irq] = NULL;
494
495 mutex_exit(&octeon_intr_lock);
496
497 kmem_free(ih, sizeof(*ih));
498 }
499
500 void
501 octeon_iointr(int ipl, vaddr_t pc, uint32_t ipending)
502 {
503 struct cpu_info * const ci = curcpu();
504 struct cpu_softc * const cpu = ci->ci_softc;
505
506 KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
507 KASSERT((ipending & ~MIPS_INT_MASK) == 0);
508 KASSERT(ipending & MIPS_HARD_INT_MASK);
509 uint64_t hwpend = 0;
510
511 if (ipending & MIPS_INT_MASK_2) {
512 hwpend = mips64_ld_a64(cpu->cpu_int2_sum0)
513 & cpu->cpu_int2_enable0;
514 } else if (ipending & MIPS_INT_MASK_1) {
515 hwpend = mips64_ld_a64(cpu->cpu_int1_sum0)
516 & cpu->cpu_int1_enable0;
517 } else if (ipending & MIPS_INT_MASK_0) {
518 hwpend = mips64_ld_a64(cpu->cpu_int0_sum0)
519 & cpu->cpu_int0_enable0;
520 } else {
521 panic("octeon_iointr: unexpected ipending %#x", ipending);
522 }
523 while (hwpend != 0) {
524 const int irq = ffs64(hwpend) - 1;
525 hwpend &= ~__BIT(irq);
526
527 struct octeon_intrhand * const ih = octeon_ciu_intrs[irq];
528 cpu->cpu_intr_evs[irq].ev_count++;
529 if (__predict_true(ih != NULL)) {
530 #ifdef MULTIPROCESSOR
531 if (ipl == IPL_VM) {
532 KERNEL_LOCK(1, NULL);
533 #endif
534 (*ih->ih_func)(ih->ih_arg);
535 #ifdef MULTIPROCESSOR
536 KERNEL_UNLOCK_ONE(NULL);
537 } else {
538 (*ih->ih_func)(ih->ih_arg);
539 }
540 #endif
541 KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
542 }
543 }
544 KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
545 }
546
547 #ifdef MULTIPROCESSOR
548 __CTASSERT(NIPIS < 16);
549
550 int
551 octeon_ipi_intr(void *arg)
552 {
553 struct cpu_info * const ci = curcpu();
554 struct cpu_softc * const cpu = ci->ci_softc;
555 uint32_t ipi_mask = (uintptr_t) arg;
556
557 KASSERTMSG((ipi_mask & __BITS(31,16)) == 0 || ci->ci_cpl >= IPL_SCHED,
558 "ipi_mask %#"PRIx32" cpl %d", ipi_mask, ci->ci_cpl);
559
560 ipi_mask &= mips64_ld_a64(cpu->cpu_mbox_set);
561 if (ipi_mask == 0)
562 return 0;
563
564 mips64_sd_a64(cpu->cpu_mbox_clr, ipi_mask);
565
566 ipi_mask |= (ipi_mask >> 16);
567 ipi_mask &= __BITS(15,0);
568
569 KASSERT(ipi_mask < __BIT(NIPIS));
570
571 #if NWDOG > 0
572 // Handle WDOG requests ourselves.
573 if (ipi_mask & __BIT(IPI_WDOG)) {
574 softint_schedule(cpu->cpu_wdog_sih);
575 atomic_and_64(&ci->ci_request_ipis, ~__BIT(IPI_WDOG));
576 ipi_mask &= ~__BIT(IPI_WDOG);
577 ci->ci_evcnt_per_ipi[IPI_WDOG].ev_count++;
578 if (__predict_true(ipi_mask == 0))
579 return 1;
580 }
581 #endif
582
583 /* if the request is clear, it was previously processed */
584 if ((ci->ci_request_ipis & ipi_mask) == 0)
585 return 0;
586
587 atomic_or_64(&ci->ci_active_ipis, ipi_mask);
588 atomic_and_64(&ci->ci_request_ipis, ~ipi_mask);
589
590 ipi_process(ci, ipi_mask);
591
592 atomic_and_64(&ci->ci_active_ipis, ~ipi_mask);
593
594 return 1;
595 }
596
597 int
598 octeon_send_ipi(struct cpu_info *ci, int req)
599 {
600 KASSERT(req < NIPIS);
601 if (ci == NULL) {
602 CPU_INFO_ITERATOR cii;
603 for (CPU_INFO_FOREACH(cii, ci)) {
604 if (ci != curcpu()) {
605 octeon_send_ipi(ci, req);
606 }
607 }
608 return 0;
609 }
610 KASSERT(cold || ci->ci_softc != NULL);
611 if (ci->ci_softc == NULL)
612 return -1;
613
614 struct cpu_softc * const cpu = ci->ci_softc;
615 uint64_t ipi_mask = __BIT(req);
616
617 if (__BIT(req) == (__BIT(IPI_SUSPEND)|__BIT(IPI_WDOG))) {
618 ipi_mask <<= 16;
619 }
620
621 mips64_sd_a64(cpu->cpu_mbox_set, ipi_mask);
622 return 0;
623 }
624 #endif /* MULTIPROCESSOR */
625