octeon_intr.c revision 1.18 1 /* $NetBSD: octeon_intr.c,v 1.18 2020/07/17 21:59:30 jmcneill Exp $ */
2 /*
3 * Copyright 2001, 2002 Wasabi Systems, Inc.
4 * All rights reserved.
5 *
6 * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed for the NetBSD Project by
19 * Wasabi Systems, Inc.
20 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
21 * or promote products derived from this software without specific prior
22 * written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 /*
38 * Platform-specific interrupt support for the MIPS Malta.
39 */
40
41 #include "opt_multiprocessor.h"
42
43 #include "cpunode.h"
44 #define __INTR_PRIVATE
45
46 #include <sys/cdefs.h>
47 __KERNEL_RCSID(0, "$NetBSD: octeon_intr.c,v 1.18 2020/07/17 21:59:30 jmcneill Exp $");
48
49 #include <sys/param.h>
50 #include <sys/cpu.h>
51 #include <sys/systm.h>
52 #include <sys/device.h>
53 #include <sys/intr.h>
54 #include <sys/kernel.h>
55 #include <sys/kmem.h>
56 #include <sys/atomic.h>
57
58 #include <lib/libkern/libkern.h>
59
60 #include <mips/locore.h>
61
62 #include <mips/cavium/dev/octeon_ciureg.h>
63 #include <mips/cavium/octeonvar.h>
64
65 /*
66 * This is a mask of bits to clear in the SR when we go to a
67 * given hardware interrupt priority level.
68 */
69 static const struct ipl_sr_map octeon_ipl_sr_map = {
70 .sr_bits = {
71 [IPL_NONE] = 0,
72 [IPL_SOFTCLOCK] = MIPS_SOFT_INT_MASK_0,
73 [IPL_SOFTNET] = MIPS_SOFT_INT_MASK,
74 [IPL_VM] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0,
75 [IPL_SCHED] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
76 | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
77 [IPL_DDB] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
78 | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
79 [IPL_HIGH] = MIPS_INT_MASK,
80 },
81 };
82
83 const char * octeon_intrnames[NIRQS] = {
84 "workq 0",
85 "workq 1",
86 "workq 2",
87 "workq 3",
88 "workq 4",
89 "workq 5",
90 "workq 6",
91 "workq 7",
92 "workq 8",
93 "workq 9",
94 "workq 10",
95 "workq 11",
96 "workq 12",
97 "workq 13",
98 "workq 14",
99 "workq 15",
100 "gpio 0",
101 "gpio 1",
102 "gpio 2",
103 "gpio 3",
104 "gpio 4",
105 "gpio 5",
106 "gpio 6",
107 "gpio 7",
108 "gpio 8",
109 "gpio 9",
110 "gpio 10",
111 "gpio 11",
112 "gpio 12",
113 "gpio 13",
114 "gpio 14",
115 "gpio 15",
116 "mbox 0-15",
117 "mbox 16-31",
118 "uart 0",
119 "uart 1",
120 "pci inta",
121 "pci intb",
122 "pci intc",
123 "pci intd",
124 "pci msi 0-15",
125 "pci msi 16-31",
126 "pci msi 32-47",
127 "pci msi 48-63",
128 "wdog summary",
129 "twsi",
130 "rml",
131 "trace",
132 "gmx drop",
133 "reserved",
134 "ipd drop",
135 "reserved",
136 "timer 0",
137 "timer 1",
138 "timer 2",
139 "timer 3",
140 "usb",
141 "pcm/tdm",
142 "mpi/spi",
143 "reserved",
144 "reserved",
145 "reserved",
146 "reserved",
147 "reserved",
148 };
149
150 struct octeon_intrhand {
151 int (*ih_func)(void *);
152 void *ih_arg;
153 int ih_irq;
154 int ih_ipl;
155 };
156
157 #ifdef MULTIPROCESSOR
158 static int octeon_send_ipi(struct cpu_info *, int);
159 static int octeon_ipi_intr(void *);
160
161 struct octeon_intrhand ipi_intrhands[2] = {
162 [0] = {
163 .ih_func = octeon_ipi_intr,
164 .ih_arg = (void *)(uintptr_t)__BITS(15,0),
165 .ih_irq = CIU_INT_MBOX_15_0,
166 .ih_ipl = IPL_SCHED,
167 },
168 [1] = {
169 .ih_func = octeon_ipi_intr,
170 .ih_arg = (void *)(uintptr_t)__BITS(31,16),
171 .ih_irq = CIU_INT_MBOX_31_16,
172 .ih_ipl = IPL_HIGH,
173 },
174 };
175
176 #define OCTEON_IPI_SCHED(n) __BIT((n) + 0)
177 #define OCTEON_IPI_HIGH(n) __BIT((n) + 16)
178
179 static uint64_t octeon_ipi_mask[NIPIS] = {
180 [IPI_NOP] = OCTEON_IPI_SCHED(IPI_NOP),
181 [IPI_AST] = OCTEON_IPI_SCHED(IPI_AST),
182 [IPI_SHOOTDOWN] = OCTEON_IPI_SCHED(IPI_SHOOTDOWN),
183 [IPI_SYNCICACHE] = OCTEON_IPI_SCHED(IPI_SYNCICACHE),
184 [IPI_KPREEMPT] = OCTEON_IPI_SCHED(IPI_KPREEMPT),
185 [IPI_SUSPEND] = OCTEON_IPI_HIGH(IPI_SUSPEND),
186 [IPI_HALT] = OCTEON_IPI_HIGH(IPI_HALT),
187 [IPI_XCALL] = OCTEON_IPI_HIGH(IPI_XCALL),
188 [IPI_GENERIC] = OCTEON_IPI_HIGH(IPI_GENERIC),
189 [IPI_WDOG] = OCTEON_IPI_HIGH(IPI_WDOG),
190 };
191 #endif
192
193 struct octeon_intrhand *octciu_intrs[NIRQS] = {
194 #ifdef MULTIPROCESSOR
195 [CIU_INT_MBOX_15_0] = &ipi_intrhands[0],
196 [CIU_INT_MBOX_31_16] = &ipi_intrhands[1],
197 #endif
198 };
199
200 kmutex_t octeon_intr_lock;
201
202 #if defined(MULTIPROCESSOR)
203 #define OCTEON_NCPU MAXCPUS
204 #else
205 #define OCTEON_NCPU 1
206 #endif
207
208 struct cpu_softc octeon_cpu_softc[OCTEON_NCPU];
209
210 static void
211 octeon_intr_setup(void)
212 {
213 struct cpu_softc *cpu;
214 int cpunum;
215
216 #define X(a) MIPS_PHYS_TO_XKPHYS(OCTEON_CCA_NONE, (a))
217
218 for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
219 cpu = &octeon_cpu_softc[cpunum];
220
221 cpu->cpu_ip2_sum0 = X(CIU_IP2_SUM0(cpunum));
222 cpu->cpu_ip3_sum0 = X(CIU_IP3_SUM0(cpunum));
223 cpu->cpu_ip4_sum0 = X(CIU_IP4_SUM0(cpunum));
224
225 cpu->cpu_int_sum1 = X(CIU_INT_SUM1);
226
227 cpu->cpu_ip2_en[0] = X(CIU_IP2_EN0(cpunum));
228 cpu->cpu_ip3_en[0] = X(CIU_IP3_EN0(cpunum));
229 cpu->cpu_ip4_en[0] = X(CIU_IP4_EN0(cpunum));
230
231 cpu->cpu_ip2_en[1] = X(CIU_IP2_EN1(cpunum));
232 cpu->cpu_ip3_en[1] = X(CIU_IP3_EN1(cpunum));
233 cpu->cpu_ip4_en[1] = X(CIU_IP4_EN1(cpunum));
234
235 cpu->cpu_wdog = X(CIU_WDOG(cpunum));
236 cpu->cpu_pp_poke = X(CIU_PP_POKE(cpunum));
237
238 #ifdef MULTIPROCESSOR
239 cpu->cpu_mbox_set = X(CIU_MBOX_SET(cpunum));
240 cpu->cpu_mbox_clr = X(CIU_MBOX_CLR(cpunum));
241 #endif
242 }
243
244 #undef X
245
246 }
247
248 void
249 octeon_intr_init(struct cpu_info *ci)
250 {
251 const int cpunum = cpu_index(ci);
252 struct cpu_softc *cpu = &octeon_cpu_softc[cpunum];
253 const char * const xname = cpu_name(ci);
254 int bank;
255
256 cpu->cpu_ci = ci;
257 ci->ci_softc = cpu;
258
259 KASSERT(cpunum == ci->ci_cpuid);
260
261 if (ci->ci_cpuid == 0) {
262 ipl_sr_map = octeon_ipl_sr_map;
263 mutex_init(&octeon_intr_lock, MUTEX_DEFAULT, IPL_HIGH);
264 #ifdef MULTIPROCESSOR
265 mips_locoresw.lsw_send_ipi = octeon_send_ipi;
266 #endif
267
268 octeon_intr_setup();
269 }
270
271 #ifdef MULTIPROCESSOR
272 // Enable the IPIs
273 cpu->cpu_ip3_enable[0] |= __BIT(CIU_INT_MBOX_15_0);
274 cpu->cpu_ip4_enable[0] |= __BIT(CIU_INT_MBOX_31_16);
275 #endif
276
277 if (ci->ci_dev) {
278 for (bank = 0; bank < NBANKS; bank++) {
279 aprint_verbose_dev(ci->ci_dev,
280 "enabling intr masks %u "
281 " %#"PRIx64"/%#"PRIx64"/%#"PRIx64"\n",
282 bank,
283 cpu->cpu_ip2_enable[bank],
284 cpu->cpu_ip3_enable[bank],
285 cpu->cpu_ip4_enable[bank]);
286 }
287 }
288
289 for (bank = 0; bank < NBANKS; bank++) {
290 mips3_sd(cpu->cpu_ip2_en[bank], cpu->cpu_ip2_enable[bank]);
291 mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]);
292 mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]);
293 }
294
295 #ifdef MULTIPROCESSOR
296 mips3_sd(cpu->cpu_mbox_clr, __BITS(31,0));
297 #endif
298
299 for (int i = 0; i < NIRQS; i++) {
300 if (octeon_intrnames[i] == NULL)
301 octeon_intrnames[i] = kmem_asprintf("irq %d", i);
302 evcnt_attach_dynamic(&cpu->cpu_intr_evs[i],
303 EVCNT_TYPE_INTR, NULL, xname, octeon_intrnames[i]);
304 }
305 }
306
307 void
308 octeon_cal_timer(int corefreq)
309 {
310 /* Compute the number of cycles per second. */
311 curcpu()->ci_cpu_freq = corefreq;
312
313 /* Compute the number of ticks for hz. */
314 curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz;
315
316 /* Compute the delay divisor and reciprical. */
317 curcpu()->ci_divisor_delay =
318 ((curcpu()->ci_cpu_freq + 500000) / 1000000);
319 #if 0
320 MIPS_SET_CI_RECIPRICAL(curcpu());
321 #endif
322
323 mips3_cp0_count_write(0);
324 mips3_cp0_compare_write(0);
325 }
326
327 void *
328 octeon_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
329 {
330 struct octeon_intrhand *ih;
331 struct cpu_softc *cpu;
332 int cpunum;
333
334 if (irq >= NIRQS)
335 panic("octeon_intr_establish: bogus IRQ %d", irq);
336 if (ipl < IPL_VM)
337 panic("octeon_intr_establish: bogus IPL %d", ipl);
338
339 ih = kmem_zalloc(sizeof(*ih), KM_NOSLEEP);
340 if (ih == NULL)
341 return (NULL);
342
343 ih->ih_func = func;
344 ih->ih_arg = arg;
345 ih->ih_irq = irq;
346 ih->ih_ipl = ipl;
347
348 mutex_enter(&octeon_intr_lock);
349
350 /*
351 * First, make it known.
352 */
353 KASSERTMSG(octciu_intrs[irq] == NULL, "irq %d in use! (%p)",
354 irq, octciu_intrs[irq]);
355
356 octciu_intrs[irq] = ih;
357 membar_producer();
358
359 /*
360 * Now enable it.
361 */
362 const int bank = irq / 64;
363 const uint64_t irq_mask = __BIT(irq % 64);
364
365 switch (ipl) {
366 case IPL_VM:
367 cpu = &octeon_cpu_softc[0];
368 cpu->cpu_ip2_enable[bank] |= irq_mask;
369 mips3_sd(cpu->cpu_ip2_en[bank], cpu->cpu_ip2_enable[bank]);
370 break;
371
372 case IPL_SCHED:
373 for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
374 cpu = &octeon_cpu_softc[cpunum];
375 if (cpu->cpu_ci == NULL)
376 break;
377 cpu->cpu_ip3_enable[bank] |= irq_mask;
378 mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]);
379 }
380 break;
381
382 case IPL_DDB:
383 case IPL_HIGH:
384 for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
385 cpu = &octeon_cpu_softc[cpunum];
386 if (cpu->cpu_ci == NULL)
387 break;
388 cpu->cpu_ip4_enable[bank] |= irq_mask;
389 mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]);
390 }
391 break;
392 }
393
394 mutex_exit(&octeon_intr_lock);
395
396 return ih;
397 }
398
399 void
400 octeon_intr_disestablish(void *cookie)
401 {
402 struct octeon_intrhand * const ih = cookie;
403 struct cpu_softc *cpu;
404 const int irq = ih->ih_irq & (NIRQS-1);
405 const int ipl = ih->ih_ipl;
406 int cpunum;
407
408 mutex_enter(&octeon_intr_lock);
409
410 /*
411 * First disable it.
412 */
413 const int bank = irq / 64;
414 const uint64_t irq_mask = ~__BIT(irq % 64);
415
416 switch (ipl) {
417 case IPL_VM:
418 cpu = &octeon_cpu_softc[0];
419 cpu->cpu_ip2_enable[bank] &= ~irq_mask;
420 mips3_sd(cpu->cpu_ip2_en[bank], cpu->cpu_ip2_enable[bank]);
421 break;
422
423 case IPL_SCHED:
424 for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
425 cpu = &octeon_cpu_softc[cpunum];
426 if (cpu->cpu_ci == NULL)
427 break;
428 cpu->cpu_ip3_enable[bank] &= ~irq_mask;
429 mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]);
430 }
431 break;
432
433 case IPL_DDB:
434 case IPL_HIGH:
435 for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
436 cpu = &octeon_cpu_softc[cpunum];
437 if (cpu->cpu_ci == NULL)
438 break;
439 cpu->cpu_ip4_enable[bank] &= ~irq_mask;
440 mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]);
441 }
442 break;
443 }
444
445 /*
446 * Now remove it since we shouldn't get interrupts for it.
447 */
448 octciu_intrs[irq] = NULL;
449
450 mutex_exit(&octeon_intr_lock);
451
452 kmem_free(ih, sizeof(*ih));
453 }
454
455 void
456 octeon_iointr(int ipl, vaddr_t pc, uint32_t ipending)
457 {
458 struct cpu_info * const ci = curcpu();
459 struct cpu_softc * const cpu = ci->ci_softc;
460 int bank;
461
462 KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
463 KASSERT((ipending & ~MIPS_INT_MASK) == 0);
464 KASSERT(ipending & MIPS_HARD_INT_MASK);
465 uint64_t hwpend[2] = { 0, 0 };
466
467 const uint64_t sum1 = mips3_ld(cpu->cpu_int_sum1);
468
469 if (ipending & MIPS_INT_MASK_2) {
470 hwpend[0] = mips3_ld(cpu->cpu_ip4_sum0)
471 & cpu->cpu_ip4_enable[0];
472 hwpend[1] = sum1 & cpu->cpu_ip4_enable[1];
473 } else if (ipending & MIPS_INT_MASK_1) {
474 hwpend[0] = mips3_ld(cpu->cpu_ip3_sum0)
475 & cpu->cpu_ip3_enable[0];
476 hwpend[1] = sum1 & cpu->cpu_ip3_enable[1];
477 } else if (ipending & MIPS_INT_MASK_0) {
478 hwpend[0] = mips3_ld(cpu->cpu_ip2_sum0)
479 & cpu->cpu_ip2_enable[0];
480 hwpend[1] = sum1 & cpu->cpu_ip2_enable[1];
481 } else {
482 panic("octeon_iointr: unexpected ipending %#x", ipending);
483 }
484 for (bank = 0; bank <= 1; bank++) {
485 while (hwpend[bank] != 0) {
486 const int bit = ffs64(hwpend[bank]) - 1;
487 const int irq = (bank * 64) + bit;
488 hwpend[bank] &= ~__BIT(bit);
489
490 struct octeon_intrhand * const ih = octciu_intrs[irq];
491 cpu->cpu_intr_evs[irq].ev_count++;
492 if (__predict_true(ih != NULL)) {
493 #ifdef MULTIPROCESSOR
494 if (ipl == IPL_VM) {
495 KERNEL_LOCK(1, NULL);
496 #endif
497 (*ih->ih_func)(ih->ih_arg);
498 #ifdef MULTIPROCESSOR
499 KERNEL_UNLOCK_ONE(NULL);
500 } else {
501 (*ih->ih_func)(ih->ih_arg);
502 }
503 #endif
504 KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
505 }
506 }
507 }
508 KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
509 }
510
511 #ifdef MULTIPROCESSOR
512 __CTASSERT(NIPIS < 16);
513
514 int
515 octeon_ipi_intr(void *arg)
516 {
517 struct cpu_info * const ci = curcpu();
518 struct cpu_softc * const cpu = ci->ci_softc;
519 uint32_t ipi_mask = (uintptr_t) arg;
520
521 KASSERTMSG((ipi_mask & __BITS(31,16)) == 0 || ci->ci_cpl >= IPL_SCHED,
522 "ipi_mask %#"PRIx32" cpl %d", ipi_mask, ci->ci_cpl);
523
524 ipi_mask &= mips3_ld(cpu->cpu_mbox_set);
525 if (ipi_mask == 0)
526 return 0;
527
528 mips3_sd(cpu->cpu_mbox_clr, ipi_mask);
529
530 KASSERT(ipi_mask < __BIT(NIPIS));
531
532 #if NWDOG > 0
533 // Handle WDOG requests ourselves.
534 if (ipi_mask & __BIT(IPI_WDOG)) {
535 softint_schedule(cpu->cpu_wdog_sih);
536 atomic_and_64(&ci->ci_request_ipis, ~__BIT(IPI_WDOG));
537 ipi_mask &= ~__BIT(IPI_WDOG);
538 ci->ci_evcnt_per_ipi[IPI_WDOG].ev_count++;
539 if (__predict_true(ipi_mask == 0))
540 return 1;
541 }
542 #endif
543
544 /* if the request is clear, it was previously processed */
545 if ((ci->ci_request_ipis & ipi_mask) == 0)
546 return 0;
547
548 atomic_or_64(&ci->ci_active_ipis, ipi_mask);
549 atomic_and_64(&ci->ci_request_ipis, ~ipi_mask);
550
551 ipi_process(ci, ipi_mask);
552
553 atomic_and_64(&ci->ci_active_ipis, ~ipi_mask);
554
555 return 1;
556 }
557
558 int
559 octeon_send_ipi(struct cpu_info *ci, int req)
560 {
561 KASSERT(req < NIPIS);
562 if (ci == NULL) {
563 CPU_INFO_ITERATOR cii;
564 for (CPU_INFO_FOREACH(cii, ci)) {
565 if (ci != curcpu()) {
566 octeon_send_ipi(ci, req);
567 }
568 }
569 return 0;
570 }
571 KASSERT(cold || ci->ci_softc != NULL);
572 if (ci->ci_softc == NULL)
573 return -1;
574
575 struct cpu_softc * const cpu = ci->ci_softc;
576 const uint64_t ipi_mask = octeon_ipi_mask[req];
577
578 atomic_or_64(&ci->ci_request_ipis, ipi_mask);
579
580 mips3_sd(cpu->cpu_mbox_set, ipi_mask);
581
582 return 0;
583 }
584 #endif /* MULTIPROCESSOR */
585