octeon_intr.c revision 1.23 1 /* $NetBSD: octeon_intr.c,v 1.23 2020/08/17 21:00:29 jmcneill Exp $ */
2 /*
3 * Copyright 2001, 2002 Wasabi Systems, Inc.
4 * All rights reserved.
5 *
6 * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed for the NetBSD Project by
19 * Wasabi Systems, Inc.
20 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
21 * or promote products derived from this software without specific prior
22 * written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 /*
38 * Platform-specific interrupt support for the MIPS Malta.
39 */
40
41 #include "opt_multiprocessor.h"
42
43 #include "cpunode.h"
44 #define __INTR_PRIVATE
45
46 #include <sys/cdefs.h>
47 __KERNEL_RCSID(0, "$NetBSD: octeon_intr.c,v 1.23 2020/08/17 21:00:29 jmcneill Exp $");
48
49 #include <sys/param.h>
50 #include <sys/cpu.h>
51 #include <sys/systm.h>
52 #include <sys/device.h>
53 #include <sys/intr.h>
54 #include <sys/kernel.h>
55 #include <sys/kmem.h>
56 #include <sys/atomic.h>
57
58 #include <lib/libkern/libkern.h>
59
60 #include <mips/locore.h>
61
62 #include <mips/cavium/dev/octeon_ciureg.h>
63 #include <mips/cavium/octeonvar.h>
64
65 /*
66 * XXX:
67 * Force all interrupts (except clock intrs and IPIs) to be routed
68 * through cpu0 until MP on MIPS is more stable.
69 */
70 #define OCTEON_CPU0_INTERRUPTS
71
72
73 /*
74 * This is a mask of bits to clear in the SR when we go to a
75 * given hardware interrupt priority level.
76 */
77 static const struct ipl_sr_map octeon_ipl_sr_map = {
78 .sr_bits = {
79 [IPL_NONE] = 0,
80 [IPL_SOFTCLOCK] = MIPS_SOFT_INT_MASK_0,
81 [IPL_SOFTNET] = MIPS_SOFT_INT_MASK,
82 [IPL_VM] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0,
83 [IPL_SCHED] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
84 | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
85 [IPL_DDB] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
86 | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
87 [IPL_HIGH] = MIPS_INT_MASK,
88 },
89 };
90
91 static const char * octeon_intrnames[NIRQS] = {
92 "workq 0",
93 "workq 1",
94 "workq 2",
95 "workq 3",
96 "workq 4",
97 "workq 5",
98 "workq 6",
99 "workq 7",
100 "workq 8",
101 "workq 9",
102 "workq 10",
103 "workq 11",
104 "workq 12",
105 "workq 13",
106 "workq 14",
107 "workq 15",
108 "gpio 0",
109 "gpio 1",
110 "gpio 2",
111 "gpio 3",
112 "gpio 4",
113 "gpio 5",
114 "gpio 6",
115 "gpio 7",
116 "gpio 8",
117 "gpio 9",
118 "gpio 10",
119 "gpio 11",
120 "gpio 12",
121 "gpio 13",
122 "gpio 14",
123 "gpio 15",
124 "mbox 0-15",
125 "mbox 16-31",
126 "uart 0",
127 "uart 1",
128 "pci inta",
129 "pci intb",
130 "pci intc",
131 "pci intd",
132 "pci msi 0-15",
133 "pci msi 16-31",
134 "pci msi 32-47",
135 "pci msi 48-63",
136 "wdog summary",
137 "twsi",
138 "rml",
139 "trace",
140 "gmx drop",
141 "reserved",
142 "ipd drop",
143 "reserved",
144 "timer 0",
145 "timer 1",
146 "timer 2",
147 "timer 3",
148 "usb",
149 "pcm/tdm",
150 "mpi/spi",
151 "reserved",
152 "reserved",
153 "reserved",
154 "reserved",
155 "reserved",
156 };
157
158 struct octeon_intrhand {
159 int (*ih_func)(void *);
160 void *ih_arg;
161 int ih_irq;
162 int ih_ipl;
163 };
164
165 #ifdef MULTIPROCESSOR
166 static int octeon_send_ipi(struct cpu_info *, int);
167 static int octeon_ipi_intr(void *);
168
169 static struct octeon_intrhand ipi_intrhands[1] = {
170 [0] = {
171 .ih_func = octeon_ipi_intr,
172 .ih_arg = (void *)(uintptr_t)__BITS(15,0),
173 .ih_irq = CIU_INT_MBOX_15_0,
174 .ih_ipl = IPL_HIGH,
175 },
176 [1] = {
177 .ih_func = octeon_ipi_intr,
178 .ih_arg = (void *)(uintptr_t)__BITS(31,16),
179 .ih_irq = CIU_INT_MBOX_31_16,
180 .ih_ipl = IPL_SCHED,
181 },
182 };
183
184 static int ipi_prio[NIPIS] = {
185 [IPI_NOP] = IPL_HIGH,
186 [IPI_AST] = IPL_HIGH,
187 [IPI_SHOOTDOWN] = IPL_SCHED,
188 [IPI_SYNCICACHE] = IPL_HIGH,
189 [IPI_KPREEMPT] = IPL_HIGH,
190 [IPI_SUSPEND] = IPL_HIGH,
191 [IPI_HALT] = IPL_HIGH,
192 [IPI_XCALL] = IPL_HIGH,
193 [IPI_GENERIC] = IPL_HIGH,
194 [IPI_WDOG] = IPL_HIGH,
195 };
196
197 #endif
198
199 static struct octeon_intrhand *octciu_intrs[NIRQS] = {
200 #ifdef MULTIPROCESSOR
201 [CIU_INT_MBOX_15_0] = &ipi_intrhands[0],
202 [CIU_INT_MBOX_31_16] = &ipi_intrhands[1],
203 #endif
204 };
205
206 static kmutex_t octeon_intr_lock;
207
208 #if defined(MULTIPROCESSOR)
209 #define OCTEON_NCPU MAXCPUS
210 #else
211 #define OCTEON_NCPU 1
212 #endif
213
214 struct cpu_softc octeon_cpu_softc[OCTEON_NCPU];
215
216 static void
217 octeon_intr_setup(void)
218 {
219 struct cpu_softc *cpu;
220 int cpunum;
221
222 #define X(a) MIPS_PHYS_TO_XKPHYS(OCTEON_CCA_NONE, (a))
223
224 for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
225 cpu = &octeon_cpu_softc[cpunum];
226
227 cpu->cpu_ip2_sum0 = X(CIU_IP2_SUM0(cpunum));
228 cpu->cpu_ip3_sum0 = X(CIU_IP3_SUM0(cpunum));
229 cpu->cpu_ip4_sum0 = X(CIU_IP4_SUM0(cpunum));
230
231 cpu->cpu_int_sum1 = X(CIU_INT_SUM1);
232
233 cpu->cpu_ip2_en[0] = X(CIU_IP2_EN0(cpunum));
234 cpu->cpu_ip3_en[0] = X(CIU_IP3_EN0(cpunum));
235 cpu->cpu_ip4_en[0] = X(CIU_IP4_EN0(cpunum));
236
237 cpu->cpu_ip2_en[1] = X(CIU_IP2_EN1(cpunum));
238 cpu->cpu_ip3_en[1] = X(CIU_IP3_EN1(cpunum));
239 cpu->cpu_ip4_en[1] = X(CIU_IP4_EN1(cpunum));
240
241 cpu->cpu_wdog = X(CIU_WDOG(cpunum));
242 cpu->cpu_pp_poke = X(CIU_PP_POKE(cpunum));
243
244 #ifdef MULTIPROCESSOR
245 cpu->cpu_mbox_set = X(CIU_MBOX_SET(cpunum));
246 cpu->cpu_mbox_clr = X(CIU_MBOX_CLR(cpunum));
247 #endif
248 }
249
250 #undef X
251
252 }
253
254 void
255 octeon_intr_init(struct cpu_info *ci)
256 {
257 const int cpunum = cpu_index(ci);
258 struct cpu_softc *cpu = &octeon_cpu_softc[cpunum];
259 const char * const xname = cpu_name(ci);
260 int bank;
261
262 cpu->cpu_ci = ci;
263 ci->ci_softc = cpu;
264
265 KASSERT(cpunum == ci->ci_cpuid);
266
267 if (ci->ci_cpuid == 0) {
268 ipl_sr_map = octeon_ipl_sr_map;
269 mutex_init(&octeon_intr_lock, MUTEX_DEFAULT, IPL_HIGH);
270 #ifdef MULTIPROCESSOR
271 mips_locoresw.lsw_send_ipi = octeon_send_ipi;
272 #endif
273
274 octeon_intr_setup();
275 }
276
277 #ifdef MULTIPROCESSOR
278 // Enable the IPIs
279 cpu->cpu_ip4_enable[0] |= __BIT(CIU_INT_MBOX_15_0);
280 cpu->cpu_ip3_enable[0] |= __BIT(CIU_INT_MBOX_31_16);
281 #endif
282
283 if (ci->ci_dev) {
284 for (bank = 0; bank < NBANKS; bank++) {
285 aprint_verbose_dev(ci->ci_dev,
286 "enabling intr masks %u "
287 " %#"PRIx64"/%#"PRIx64"/%#"PRIx64"\n",
288 bank,
289 cpu->cpu_ip2_enable[bank],
290 cpu->cpu_ip3_enable[bank],
291 cpu->cpu_ip4_enable[bank]);
292 }
293 }
294
295 for (bank = 0; bank < NBANKS; bank++) {
296 mips3_sd(cpu->cpu_ip2_en[bank], cpu->cpu_ip2_enable[bank]);
297 mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]);
298 mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]);
299 }
300
301 #ifdef MULTIPROCESSOR
302 mips3_sd(cpu->cpu_mbox_clr, __BITS(31,0));
303 #endif
304
305 for (int i = 0; i < NIRQS; i++) {
306 if (octeon_intrnames[i] == NULL)
307 octeon_intrnames[i] = kmem_asprintf("irq %d", i);
308 evcnt_attach_dynamic(&cpu->cpu_intr_evs[i],
309 EVCNT_TYPE_INTR, NULL, xname, octeon_intrnames[i]);
310 }
311 }
312
313 void
314 octeon_cal_timer(int corefreq)
315 {
316 /* Compute the number of cycles per second. */
317 curcpu()->ci_cpu_freq = corefreq;
318
319 /* Compute the number of ticks for hz. */
320 curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz;
321
322 /* Compute the delay divisor and reciprical. */
323 curcpu()->ci_divisor_delay =
324 ((curcpu()->ci_cpu_freq + 500000) / 1000000);
325 #if 0
326 MIPS_SET_CI_RECIPRICAL(curcpu());
327 #endif
328
329 mips3_cp0_count_write(0);
330 mips3_cp0_compare_write(0);
331 }
332
333 void *
334 octeon_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
335 {
336 struct octeon_intrhand *ih;
337 struct cpu_softc *cpu;
338 #ifndef OCTEON_CPU0_INTERRUPTS
339 int cpunum;
340 #endif
341
342 if (irq >= NIRQS)
343 panic("octeon_intr_establish: bogus IRQ %d", irq);
344 if (ipl < IPL_VM)
345 panic("octeon_intr_establish: bogus IPL %d", ipl);
346
347 ih = kmem_zalloc(sizeof(*ih), KM_NOSLEEP);
348 if (ih == NULL)
349 return (NULL);
350
351 ih->ih_func = func;
352 ih->ih_arg = arg;
353 ih->ih_irq = irq;
354 ih->ih_ipl = ipl;
355
356 mutex_enter(&octeon_intr_lock);
357
358 /*
359 * First, make it known.
360 */
361 KASSERTMSG(octciu_intrs[irq] == NULL, "irq %d in use! (%p)",
362 irq, octciu_intrs[irq]);
363
364 octciu_intrs[irq] = ih;
365 membar_producer();
366
367 /*
368 * Now enable it.
369 */
370 const int bank = irq / 64;
371 const uint64_t irq_mask = __BIT(irq % 64);
372
373 switch (ipl) {
374 case IPL_VM:
375 cpu = &octeon_cpu_softc[0];
376 cpu->cpu_ip2_enable[bank] |= irq_mask;
377 mips3_sd(cpu->cpu_ip2_en[bank], cpu->cpu_ip2_enable[bank]);
378 break;
379
380 case IPL_SCHED:
381 #ifdef OCTEON_CPU0_INTERRUPTS
382 cpu = &octeon_cpu_softc[0];
383 cpu->cpu_ip3_enable[bank] |= irq_mask;
384 mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]);
385 #else /* OCTEON_CPU0_INTERRUPTS */
386 for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
387 cpu = &octeon_cpu_softc[cpunum];
388 if (cpu->cpu_ci == NULL)
389 break;
390 cpu->cpu_ip3_enable[bank] |= irq_mask;
391 mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]);
392 }
393 #endif /* OCTEON_CPU0_INTERRUPTS */
394 break;
395
396 case IPL_DDB:
397 case IPL_HIGH:
398 #ifdef OCTEON_CPU0_INTERRUPTS
399 cpu = &octeon_cpu_softc[0];
400 cpu->cpu_ip4_enable[bank] |= irq_mask;
401 mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]);
402 #else /* OCTEON_CPU0_INTERRUPTS */
403 for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
404 cpu = &octeon_cpu_softc[cpunum];
405 if (cpu->cpu_ci == NULL)
406 break;
407 cpu->cpu_ip4_enable[bank] |= irq_mask;
408 mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]);
409 }
410 #endif /* OCTEON_CPU0_INTERRUPTS */
411 break;
412 }
413
414 mutex_exit(&octeon_intr_lock);
415
416 return ih;
417 }
418
419 void
420 octeon_intr_disestablish(void *cookie)
421 {
422 struct octeon_intrhand * const ih = cookie;
423 struct cpu_softc *cpu;
424 const int irq = ih->ih_irq & (NIRQS-1);
425 const int ipl = ih->ih_ipl;
426 int cpunum;
427
428 mutex_enter(&octeon_intr_lock);
429
430 /*
431 * First disable it.
432 */
433 const int bank = irq / 64;
434 const uint64_t irq_mask = ~__BIT(irq % 64);
435
436 switch (ipl) {
437 case IPL_VM:
438 cpu = &octeon_cpu_softc[0];
439 cpu->cpu_ip2_enable[bank] &= ~irq_mask;
440 mips3_sd(cpu->cpu_ip2_en[bank], cpu->cpu_ip2_enable[bank]);
441 break;
442
443 case IPL_SCHED:
444 for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
445 cpu = &octeon_cpu_softc[cpunum];
446 if (cpu->cpu_ci == NULL)
447 break;
448 cpu->cpu_ip3_enable[bank] &= ~irq_mask;
449 mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]);
450 }
451 break;
452
453 case IPL_DDB:
454 case IPL_HIGH:
455 for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
456 cpu = &octeon_cpu_softc[cpunum];
457 if (cpu->cpu_ci == NULL)
458 break;
459 cpu->cpu_ip4_enable[bank] &= ~irq_mask;
460 mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]);
461 }
462 break;
463 }
464
465 /*
466 * Now remove it since we shouldn't get interrupts for it.
467 */
468 octciu_intrs[irq] = NULL;
469
470 mutex_exit(&octeon_intr_lock);
471
472 kmem_free(ih, sizeof(*ih));
473 }
474
475 void
476 octeon_iointr(int ipl, vaddr_t pc, uint32_t ipending)
477 {
478 struct cpu_info * const ci = curcpu();
479 struct cpu_softc * const cpu = ci->ci_softc;
480 int bank;
481
482 KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
483 KASSERT((ipending & ~MIPS_INT_MASK) == 0);
484 KASSERT(ipending & MIPS_HARD_INT_MASK);
485 uint64_t hwpend[2] = { 0, 0 };
486
487 const uint64_t sum1 = mips3_ld(cpu->cpu_int_sum1);
488
489 if (ipending & MIPS_INT_MASK_2) {
490 hwpend[0] = mips3_ld(cpu->cpu_ip4_sum0)
491 & cpu->cpu_ip4_enable[0];
492 hwpend[1] = sum1 & cpu->cpu_ip4_enable[1];
493 } else if (ipending & MIPS_INT_MASK_1) {
494 hwpend[0] = mips3_ld(cpu->cpu_ip3_sum0)
495 & cpu->cpu_ip3_enable[0];
496 hwpend[1] = sum1 & cpu->cpu_ip3_enable[1];
497 } else if (ipending & MIPS_INT_MASK_0) {
498 hwpend[0] = mips3_ld(cpu->cpu_ip2_sum0)
499 & cpu->cpu_ip2_enable[0];
500 hwpend[1] = sum1 & cpu->cpu_ip2_enable[1];
501 } else {
502 panic("octeon_iointr: unexpected ipending %#x", ipending);
503 }
504 for (bank = 0; bank <= 1; bank++) {
505 while (hwpend[bank] != 0) {
506 const int bit = ffs64(hwpend[bank]) - 1;
507 const int irq = (bank * 64) + bit;
508 hwpend[bank] &= ~__BIT(bit);
509
510 struct octeon_intrhand * const ih = octciu_intrs[irq];
511 cpu->cpu_intr_evs[irq].ev_count++;
512 if (__predict_true(ih != NULL)) {
513 #ifdef MULTIPROCESSOR
514 if (ipl == IPL_VM) {
515 KERNEL_LOCK(1, NULL);
516 #endif
517 (*ih->ih_func)(ih->ih_arg);
518 #ifdef MULTIPROCESSOR
519 KERNEL_UNLOCK_ONE(NULL);
520 } else {
521 (*ih->ih_func)(ih->ih_arg);
522 }
523 #endif
524 KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
525 }
526 }
527 }
528 KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
529 }
530
531 #ifdef MULTIPROCESSOR
532 __CTASSERT(NIPIS < 16);
533
534 int
535 octeon_ipi_intr(void *arg)
536 {
537 struct cpu_info * const ci = curcpu();
538 struct cpu_softc * const cpu = ci->ci_softc;
539 const uint32_t mbox_mask = (uintptr_t) arg;
540 uint32_t ipi_mask = mbox_mask;
541
542 KASSERTMSG((mbox_mask & __BITS(31,16)) == 0 || ci->ci_cpl >= IPL_SCHED,
543 "mbox_mask %#"PRIx32" cpl %d", mbox_mask, ci->ci_cpl);
544
545 ipi_mask &= mips3_ld(cpu->cpu_mbox_set);
546 if (ipi_mask == 0)
547 return 0;
548
549 mips3_sd(cpu->cpu_mbox_clr, ipi_mask);
550
551 KASSERT(__SHIFTOUT(ipi_mask, mbox_mask) < __BIT(NIPIS));
552
553 #if NWDOG > 0
554 // Handle WDOG requests ourselves.
555 if (ipi_mask & __BIT(IPI_WDOG)) {
556 softint_schedule(cpu->cpu_wdog_sih);
557 atomic_and_64(&ci->ci_request_ipis, ~__BIT(IPI_WDOG));
558 ipi_mask &= ~__BIT(IPI_WDOG);
559 ci->ci_evcnt_per_ipi[IPI_WDOG].ev_count++;
560 if (__predict_true(ipi_mask == 0))
561 return 1;
562 }
563 #endif
564
565 /* if the request is clear, it was previously processed */
566 if ((ci->ci_request_ipis & ipi_mask) == 0)
567 return 0;
568
569 atomic_or_64(&ci->ci_active_ipis, ipi_mask);
570 atomic_and_64(&ci->ci_request_ipis, ~ipi_mask);
571
572 ipi_process(ci, __SHIFTOUT(ipi_mask, mbox_mask));
573
574 atomic_and_64(&ci->ci_active_ipis, ~ipi_mask);
575
576 return 1;
577 }
578
579 int
580 octeon_send_ipi(struct cpu_info *ci, int req)
581 {
582 KASSERT(req < NIPIS);
583 if (ci == NULL) {
584 CPU_INFO_ITERATOR cii;
585 for (CPU_INFO_FOREACH(cii, ci)) {
586 if (ci != curcpu()) {
587 octeon_send_ipi(ci, req);
588 }
589 }
590 return 0;
591 }
592 KASSERT(cold || ci->ci_softc != NULL);
593 if (ci->ci_softc == NULL)
594 return -1;
595
596 struct cpu_softc * const cpu = ci->ci_softc;
597 const u_int ipi_shift = ipi_prio[req] == IPL_SCHED ? 16 : 0;
598 const uint32_t ipi_mask = __BIT(req + ipi_shift);
599
600 atomic_or_64(&ci->ci_request_ipis, ipi_mask);
601
602 mips3_sd(cpu->cpu_mbox_set, ipi_mask);
603
604 return 0;
605 }
606 #endif /* MULTIPROCESSOR */
607