octeon_intr.c revision 1.26 1 1.26 riastrad /* $NetBSD: octeon_intr.c,v 1.26 2022/03/26 19:38:00 riastradh Exp $ */
2 1.1 hikaru /*
3 1.1 hikaru * Copyright 2001, 2002 Wasabi Systems, Inc.
4 1.1 hikaru * All rights reserved.
5 1.1 hikaru *
6 1.1 hikaru * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
7 1.1 hikaru *
8 1.1 hikaru * Redistribution and use in source and binary forms, with or without
9 1.1 hikaru * modification, are permitted provided that the following conditions
10 1.1 hikaru * are met:
11 1.1 hikaru * 1. Redistributions of source code must retain the above copyright
12 1.1 hikaru * notice, this list of conditions and the following disclaimer.
13 1.1 hikaru * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 hikaru * notice, this list of conditions and the following disclaimer in the
15 1.1 hikaru * documentation and/or other materials provided with the distribution.
16 1.1 hikaru * 3. All advertising materials mentioning features or use of this software
17 1.1 hikaru * must display the following acknowledgement:
18 1.1 hikaru * This product includes software developed for the NetBSD Project by
19 1.1 hikaru * Wasabi Systems, Inc.
20 1.1 hikaru * 4. The name of Wasabi Systems, Inc. may not be used to endorse
21 1.1 hikaru * or promote products derived from this software without specific prior
22 1.1 hikaru * written permission.
23 1.1 hikaru *
24 1.1 hikaru * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
25 1.1 hikaru * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 1.1 hikaru * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 1.1 hikaru * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
28 1.1 hikaru * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 1.1 hikaru * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 1.1 hikaru * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 1.1 hikaru * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 1.1 hikaru * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 1.1 hikaru * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 1.1 hikaru * POSSIBILITY OF SUCH DAMAGE.
35 1.1 hikaru */
36 1.1 hikaru
37 1.1 hikaru /*
38 1.1 hikaru * Platform-specific interrupt support for the MIPS Malta.
39 1.1 hikaru */
40 1.1 hikaru
41 1.6 skrll #include "opt_multiprocessor.h"
42 1.6 skrll
43 1.4 matt #include "cpunode.h"
44 1.1 hikaru #define __INTR_PRIVATE
45 1.1 hikaru
46 1.1 hikaru #include <sys/cdefs.h>
47 1.26 riastrad __KERNEL_RCSID(0, "$NetBSD: octeon_intr.c,v 1.26 2022/03/26 19:38:00 riastradh Exp $");
48 1.1 hikaru
49 1.1 hikaru #include <sys/param.h>
50 1.1 hikaru #include <sys/cpu.h>
51 1.1 hikaru #include <sys/systm.h>
52 1.1 hikaru #include <sys/device.h>
53 1.1 hikaru #include <sys/intr.h>
54 1.1 hikaru #include <sys/kernel.h>
55 1.3 matt #include <sys/kmem.h>
56 1.3 matt #include <sys/atomic.h>
57 1.25 riastrad #include <sys/xcall.h>
58 1.1 hikaru
59 1.1 hikaru #include <lib/libkern/libkern.h>
60 1.1 hikaru
61 1.1 hikaru #include <mips/locore.h>
62 1.1 hikaru
63 1.1 hikaru #include <mips/cavium/dev/octeon_ciureg.h>
64 1.1 hikaru #include <mips/cavium/octeonvar.h>
65 1.1 hikaru
66 1.1 hikaru /*
67 1.21 simonb * XXX:
68 1.21 simonb * Force all interrupts (except clock intrs and IPIs) to be routed
69 1.21 simonb * through cpu0 until MP on MIPS is more stable.
70 1.21 simonb */
71 1.21 simonb #define OCTEON_CPU0_INTERRUPTS
72 1.21 simonb
73 1.21 simonb
74 1.21 simonb /*
75 1.1 hikaru * This is a mask of bits to clear in the SR when we go to a
76 1.1 hikaru * given hardware interrupt priority level.
77 1.1 hikaru */
78 1.1 hikaru static const struct ipl_sr_map octeon_ipl_sr_map = {
79 1.1 hikaru .sr_bits = {
80 1.1 hikaru [IPL_NONE] = 0,
81 1.1 hikaru [IPL_SOFTCLOCK] = MIPS_SOFT_INT_MASK_0,
82 1.1 hikaru [IPL_SOFTNET] = MIPS_SOFT_INT_MASK,
83 1.1 hikaru [IPL_VM] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0,
84 1.1 hikaru [IPL_SCHED] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
85 1.8 skrll | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
86 1.3 matt [IPL_DDB] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
87 1.3 matt | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
88 1.1 hikaru [IPL_HIGH] = MIPS_INT_MASK,
89 1.1 hikaru },
90 1.1 hikaru };
91 1.1 hikaru
92 1.22 simonb static const char * octeon_intrnames[NIRQS] = {
93 1.1 hikaru "workq 0",
94 1.1 hikaru "workq 1",
95 1.1 hikaru "workq 2",
96 1.1 hikaru "workq 3",
97 1.1 hikaru "workq 4",
98 1.1 hikaru "workq 5",
99 1.1 hikaru "workq 6",
100 1.1 hikaru "workq 7",
101 1.1 hikaru "workq 8",
102 1.1 hikaru "workq 9",
103 1.1 hikaru "workq 10",
104 1.1 hikaru "workq 11",
105 1.1 hikaru "workq 12",
106 1.1 hikaru "workq 13",
107 1.1 hikaru "workq 14",
108 1.1 hikaru "workq 15",
109 1.1 hikaru "gpio 0",
110 1.1 hikaru "gpio 1",
111 1.1 hikaru "gpio 2",
112 1.1 hikaru "gpio 3",
113 1.1 hikaru "gpio 4",
114 1.1 hikaru "gpio 5",
115 1.1 hikaru "gpio 6",
116 1.1 hikaru "gpio 7",
117 1.1 hikaru "gpio 8",
118 1.1 hikaru "gpio 9",
119 1.1 hikaru "gpio 10",
120 1.1 hikaru "gpio 11",
121 1.1 hikaru "gpio 12",
122 1.1 hikaru "gpio 13",
123 1.1 hikaru "gpio 14",
124 1.1 hikaru "gpio 15",
125 1.1 hikaru "mbox 0-15",
126 1.1 hikaru "mbox 16-31",
127 1.1 hikaru "uart 0",
128 1.1 hikaru "uart 1",
129 1.1 hikaru "pci inta",
130 1.1 hikaru "pci intb",
131 1.1 hikaru "pci intc",
132 1.1 hikaru "pci intd",
133 1.1 hikaru "pci msi 0-15",
134 1.1 hikaru "pci msi 16-31",
135 1.1 hikaru "pci msi 32-47",
136 1.1 hikaru "pci msi 48-63",
137 1.1 hikaru "wdog summary",
138 1.1 hikaru "twsi",
139 1.1 hikaru "rml",
140 1.1 hikaru "trace",
141 1.1 hikaru "gmx drop",
142 1.1 hikaru "reserved",
143 1.1 hikaru "ipd drop",
144 1.1 hikaru "reserved",
145 1.1 hikaru "timer 0",
146 1.1 hikaru "timer 1",
147 1.1 hikaru "timer 2",
148 1.1 hikaru "timer 3",
149 1.1 hikaru "usb",
150 1.1 hikaru "pcm/tdm",
151 1.1 hikaru "mpi/spi",
152 1.1 hikaru "reserved",
153 1.1 hikaru "reserved",
154 1.1 hikaru "reserved",
155 1.1 hikaru "reserved",
156 1.1 hikaru "reserved",
157 1.1 hikaru };
158 1.1 hikaru
159 1.1 hikaru struct octeon_intrhand {
160 1.1 hikaru int (*ih_func)(void *);
161 1.1 hikaru void *ih_arg;
162 1.1 hikaru int ih_irq;
163 1.1 hikaru int ih_ipl;
164 1.1 hikaru };
165 1.1 hikaru
166 1.3 matt #ifdef MULTIPROCESSOR
167 1.3 matt static int octeon_send_ipi(struct cpu_info *, int);
168 1.3 matt static int octeon_ipi_intr(void *);
169 1.3 matt
170 1.24 skrll static struct octeon_intrhand ipi_intrhands[2] = {
171 1.3 matt [0] = {
172 1.3 matt .ih_func = octeon_ipi_intr,
173 1.3 matt .ih_arg = (void *)(uintptr_t)__BITS(15,0),
174 1.12 simonb .ih_irq = CIU_INT_MBOX_15_0,
175 1.3 matt .ih_ipl = IPL_HIGH,
176 1.3 matt },
177 1.23 jmcneill [1] = {
178 1.23 jmcneill .ih_func = octeon_ipi_intr,
179 1.23 jmcneill .ih_arg = (void *)(uintptr_t)__BITS(31,16),
180 1.23 jmcneill .ih_irq = CIU_INT_MBOX_31_16,
181 1.23 jmcneill .ih_ipl = IPL_SCHED,
182 1.23 jmcneill },
183 1.1 hikaru };
184 1.23 jmcneill
185 1.23 jmcneill static int ipi_prio[NIPIS] = {
186 1.23 jmcneill [IPI_NOP] = IPL_HIGH,
187 1.23 jmcneill [IPI_AST] = IPL_HIGH,
188 1.23 jmcneill [IPI_SHOOTDOWN] = IPL_SCHED,
189 1.23 jmcneill [IPI_SYNCICACHE] = IPL_HIGH,
190 1.23 jmcneill [IPI_KPREEMPT] = IPL_HIGH,
191 1.23 jmcneill [IPI_SUSPEND] = IPL_HIGH,
192 1.23 jmcneill [IPI_HALT] = IPL_HIGH,
193 1.23 jmcneill [IPI_XCALL] = IPL_HIGH,
194 1.23 jmcneill [IPI_GENERIC] = IPL_HIGH,
195 1.23 jmcneill [IPI_WDOG] = IPL_HIGH,
196 1.23 jmcneill };
197 1.23 jmcneill
198 1.3 matt #endif
199 1.1 hikaru
200 1.22 simonb static struct octeon_intrhand *octciu_intrs[NIRQS] = {
201 1.3 matt #ifdef MULTIPROCESSOR
202 1.12 simonb [CIU_INT_MBOX_15_0] = &ipi_intrhands[0],
203 1.23 jmcneill [CIU_INT_MBOX_31_16] = &ipi_intrhands[1],
204 1.3 matt #endif
205 1.1 hikaru };
206 1.1 hikaru
207 1.22 simonb static kmutex_t octeon_intr_lock;
208 1.1 hikaru
209 1.18 jmcneill #if defined(MULTIPROCESSOR)
210 1.18 jmcneill #define OCTEON_NCPU MAXCPUS
211 1.18 jmcneill #else
212 1.18 jmcneill #define OCTEON_NCPU 1
213 1.3 matt #endif
214 1.1 hikaru
215 1.18 jmcneill struct cpu_softc octeon_cpu_softc[OCTEON_NCPU];
216 1.1 hikaru
217 1.4 matt static void
218 1.18 jmcneill octeon_intr_setup(void)
219 1.4 matt {
220 1.18 jmcneill struct cpu_softc *cpu;
221 1.18 jmcneill int cpunum;
222 1.4 matt
223 1.18 jmcneill #define X(a) MIPS_PHYS_TO_XKPHYS(OCTEON_CCA_NONE, (a))
224 1.4 matt
225 1.18 jmcneill for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
226 1.18 jmcneill cpu = &octeon_cpu_softc[cpunum];
227 1.4 matt
228 1.18 jmcneill cpu->cpu_ip2_sum0 = X(CIU_IP2_SUM0(cpunum));
229 1.18 jmcneill cpu->cpu_ip3_sum0 = X(CIU_IP3_SUM0(cpunum));
230 1.18 jmcneill cpu->cpu_ip4_sum0 = X(CIU_IP4_SUM0(cpunum));
231 1.4 matt
232 1.18 jmcneill cpu->cpu_int_sum1 = X(CIU_INT_SUM1);
233 1.4 matt
234 1.18 jmcneill cpu->cpu_ip2_en[0] = X(CIU_IP2_EN0(cpunum));
235 1.18 jmcneill cpu->cpu_ip3_en[0] = X(CIU_IP3_EN0(cpunum));
236 1.18 jmcneill cpu->cpu_ip4_en[0] = X(CIU_IP4_EN0(cpunum));
237 1.4 matt
238 1.18 jmcneill cpu->cpu_ip2_en[1] = X(CIU_IP2_EN1(cpunum));
239 1.18 jmcneill cpu->cpu_ip3_en[1] = X(CIU_IP3_EN1(cpunum));
240 1.18 jmcneill cpu->cpu_ip4_en[1] = X(CIU_IP4_EN1(cpunum));
241 1.4 matt
242 1.18 jmcneill cpu->cpu_wdog = X(CIU_WDOG(cpunum));
243 1.18 jmcneill cpu->cpu_pp_poke = X(CIU_PP_POKE(cpunum));
244 1.4 matt
245 1.18 jmcneill #ifdef MULTIPROCESSOR
246 1.18 jmcneill cpu->cpu_mbox_set = X(CIU_MBOX_SET(cpunum));
247 1.18 jmcneill cpu->cpu_mbox_clr = X(CIU_MBOX_CLR(cpunum));
248 1.18 jmcneill #endif
249 1.18 jmcneill }
250 1.4 matt
251 1.18 jmcneill #undef X
252 1.4 matt
253 1.4 matt }
254 1.1 hikaru
255 1.3 matt void
256 1.3 matt octeon_intr_init(struct cpu_info *ci)
257 1.3 matt {
258 1.3 matt const int cpunum = cpu_index(ci);
259 1.18 jmcneill struct cpu_softc *cpu = &octeon_cpu_softc[cpunum];
260 1.3 matt const char * const xname = cpu_name(ci);
261 1.16 jmcneill int bank;
262 1.1 hikaru
263 1.18 jmcneill cpu->cpu_ci = ci;
264 1.18 jmcneill ci->ci_softc = cpu;
265 1.18 jmcneill
266 1.18 jmcneill KASSERT(cpunum == ci->ci_cpuid);
267 1.1 hikaru
268 1.3 matt if (ci->ci_cpuid == 0) {
269 1.4 matt ipl_sr_map = octeon_ipl_sr_map;
270 1.3 matt mutex_init(&octeon_intr_lock, MUTEX_DEFAULT, IPL_HIGH);
271 1.3 matt #ifdef MULTIPROCESSOR
272 1.3 matt mips_locoresw.lsw_send_ipi = octeon_send_ipi;
273 1.3 matt #endif
274 1.18 jmcneill
275 1.18 jmcneill octeon_intr_setup();
276 1.1 hikaru }
277 1.1 hikaru
278 1.3 matt #ifdef MULTIPROCESSOR
279 1.3 matt // Enable the IPIs
280 1.20 jmcneill cpu->cpu_ip4_enable[0] |= __BIT(CIU_INT_MBOX_15_0);
281 1.23 jmcneill cpu->cpu_ip3_enable[0] |= __BIT(CIU_INT_MBOX_31_16);
282 1.1 hikaru #endif
283 1.1 hikaru
284 1.16 jmcneill if (ci->ci_dev) {
285 1.16 jmcneill for (bank = 0; bank < NBANKS; bank++) {
286 1.16 jmcneill aprint_verbose_dev(ci->ci_dev,
287 1.16 jmcneill "enabling intr masks %u "
288 1.16 jmcneill " %#"PRIx64"/%#"PRIx64"/%#"PRIx64"\n",
289 1.16 jmcneill bank,
290 1.18 jmcneill cpu->cpu_ip2_enable[bank],
291 1.18 jmcneill cpu->cpu_ip3_enable[bank],
292 1.18 jmcneill cpu->cpu_ip4_enable[bank]);
293 1.16 jmcneill }
294 1.16 jmcneill }
295 1.16 jmcneill
296 1.16 jmcneill for (bank = 0; bank < NBANKS; bank++) {
297 1.18 jmcneill mips3_sd(cpu->cpu_ip2_en[bank], cpu->cpu_ip2_enable[bank]);
298 1.18 jmcneill mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]);
299 1.18 jmcneill mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]);
300 1.16 jmcneill }
301 1.3 matt
302 1.3 matt #ifdef MULTIPROCESSOR
303 1.5 matt mips3_sd(cpu->cpu_mbox_clr, __BITS(31,0));
304 1.3 matt #endif
305 1.1 hikaru
306 1.15 jmcneill for (int i = 0; i < NIRQS; i++) {
307 1.15 jmcneill if (octeon_intrnames[i] == NULL)
308 1.15 jmcneill octeon_intrnames[i] = kmem_asprintf("irq %d", i);
309 1.3 matt evcnt_attach_dynamic(&cpu->cpu_intr_evs[i],
310 1.3 matt EVCNT_TYPE_INTR, NULL, xname, octeon_intrnames[i]);
311 1.1 hikaru }
312 1.1 hikaru }
313 1.1 hikaru
314 1.1 hikaru void
315 1.1 hikaru octeon_cal_timer(int corefreq)
316 1.1 hikaru {
317 1.1 hikaru /* Compute the number of cycles per second. */
318 1.1 hikaru curcpu()->ci_cpu_freq = corefreq;
319 1.1 hikaru
320 1.1 hikaru /* Compute the number of ticks for hz. */
321 1.1 hikaru curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz;
322 1.1 hikaru
323 1.1 hikaru /* Compute the delay divisor and reciprical. */
324 1.1 hikaru curcpu()->ci_divisor_delay =
325 1.1 hikaru ((curcpu()->ci_cpu_freq + 500000) / 1000000);
326 1.1 hikaru #if 0
327 1.1 hikaru MIPS_SET_CI_RECIPRICAL(curcpu());
328 1.1 hikaru #endif
329 1.1 hikaru
330 1.1 hikaru mips3_cp0_count_write(0);
331 1.1 hikaru mips3_cp0_compare_write(0);
332 1.1 hikaru }
333 1.1 hikaru
334 1.1 hikaru void *
335 1.3 matt octeon_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
336 1.1 hikaru {
337 1.1 hikaru struct octeon_intrhand *ih;
338 1.18 jmcneill struct cpu_softc *cpu;
339 1.21 simonb #ifndef OCTEON_CPU0_INTERRUPTS
340 1.18 jmcneill int cpunum;
341 1.21 simonb #endif
342 1.1 hikaru
343 1.1 hikaru if (irq >= NIRQS)
344 1.1 hikaru panic("octeon_intr_establish: bogus IRQ %d", irq);
345 1.3 matt if (ipl < IPL_VM)
346 1.3 matt panic("octeon_intr_establish: bogus IPL %d", ipl);
347 1.1 hikaru
348 1.3 matt ih = kmem_zalloc(sizeof(*ih), KM_NOSLEEP);
349 1.1 hikaru if (ih == NULL)
350 1.1 hikaru return (NULL);
351 1.1 hikaru
352 1.1 hikaru ih->ih_func = func;
353 1.1 hikaru ih->ih_arg = arg;
354 1.1 hikaru ih->ih_irq = irq;
355 1.3 matt ih->ih_ipl = ipl;
356 1.1 hikaru
357 1.3 matt mutex_enter(&octeon_intr_lock);
358 1.1 hikaru
359 1.1 hikaru /*
360 1.3 matt * First, make it known.
361 1.1 hikaru */
362 1.11 simonb KASSERTMSG(octciu_intrs[irq] == NULL, "irq %d in use! (%p)",
363 1.11 simonb irq, octciu_intrs[irq]);
364 1.3 matt
365 1.26 riastrad atomic_store_release(&octciu_intrs[irq], ih);
366 1.1 hikaru
367 1.1 hikaru /*
368 1.1 hikaru * Now enable it.
369 1.1 hikaru */
370 1.15 jmcneill const int bank = irq / 64;
371 1.15 jmcneill const uint64_t irq_mask = __BIT(irq % 64);
372 1.3 matt
373 1.3 matt switch (ipl) {
374 1.3 matt case IPL_VM:
375 1.18 jmcneill cpu = &octeon_cpu_softc[0];
376 1.18 jmcneill cpu->cpu_ip2_enable[bank] |= irq_mask;
377 1.18 jmcneill mips3_sd(cpu->cpu_ip2_en[bank], cpu->cpu_ip2_enable[bank]);
378 1.3 matt break;
379 1.1 hikaru
380 1.3 matt case IPL_SCHED:
381 1.21 simonb #ifdef OCTEON_CPU0_INTERRUPTS
382 1.21 simonb cpu = &octeon_cpu_softc[0];
383 1.21 simonb cpu->cpu_ip3_enable[bank] |= irq_mask;
384 1.21 simonb mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]);
385 1.21 simonb #else /* OCTEON_CPU0_INTERRUPTS */
386 1.18 jmcneill for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
387 1.18 jmcneill cpu = &octeon_cpu_softc[cpunum];
388 1.18 jmcneill if (cpu->cpu_ci == NULL)
389 1.18 jmcneill break;
390 1.18 jmcneill cpu->cpu_ip3_enable[bank] |= irq_mask;
391 1.18 jmcneill mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]);
392 1.18 jmcneill }
393 1.21 simonb #endif /* OCTEON_CPU0_INTERRUPTS */
394 1.3 matt break;
395 1.3 matt
396 1.3 matt case IPL_DDB:
397 1.3 matt case IPL_HIGH:
398 1.21 simonb #ifdef OCTEON_CPU0_INTERRUPTS
399 1.21 simonb cpu = &octeon_cpu_softc[0];
400 1.21 simonb cpu->cpu_ip4_enable[bank] |= irq_mask;
401 1.21 simonb mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]);
402 1.21 simonb #else /* OCTEON_CPU0_INTERRUPTS */
403 1.18 jmcneill for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
404 1.18 jmcneill cpu = &octeon_cpu_softc[cpunum];
405 1.18 jmcneill if (cpu->cpu_ci == NULL)
406 1.18 jmcneill break;
407 1.18 jmcneill cpu->cpu_ip4_enable[bank] |= irq_mask;
408 1.18 jmcneill mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]);
409 1.18 jmcneill }
410 1.21 simonb #endif /* OCTEON_CPU0_INTERRUPTS */
411 1.3 matt break;
412 1.1 hikaru }
413 1.1 hikaru
414 1.3 matt mutex_exit(&octeon_intr_lock);
415 1.3 matt
416 1.3 matt return ih;
417 1.1 hikaru }
418 1.1 hikaru
419 1.1 hikaru void
420 1.1 hikaru octeon_intr_disestablish(void *cookie)
421 1.1 hikaru {
422 1.3 matt struct octeon_intrhand * const ih = cookie;
423 1.18 jmcneill struct cpu_softc *cpu;
424 1.3 matt const int irq = ih->ih_irq & (NIRQS-1);
425 1.3 matt const int ipl = ih->ih_ipl;
426 1.18 jmcneill int cpunum;
427 1.1 hikaru
428 1.3 matt mutex_enter(&octeon_intr_lock);
429 1.1 hikaru
430 1.1 hikaru /*
431 1.3 matt * First disable it.
432 1.1 hikaru */
433 1.15 jmcneill const int bank = irq / 64;
434 1.15 jmcneill const uint64_t irq_mask = ~__BIT(irq % 64);
435 1.3 matt
436 1.3 matt switch (ipl) {
437 1.3 matt case IPL_VM:
438 1.18 jmcneill cpu = &octeon_cpu_softc[0];
439 1.18 jmcneill cpu->cpu_ip2_enable[bank] &= ~irq_mask;
440 1.18 jmcneill mips3_sd(cpu->cpu_ip2_en[bank], cpu->cpu_ip2_enable[bank]);
441 1.3 matt break;
442 1.3 matt
443 1.3 matt case IPL_SCHED:
444 1.18 jmcneill for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
445 1.18 jmcneill cpu = &octeon_cpu_softc[cpunum];
446 1.18 jmcneill if (cpu->cpu_ci == NULL)
447 1.18 jmcneill break;
448 1.18 jmcneill cpu->cpu_ip3_enable[bank] &= ~irq_mask;
449 1.18 jmcneill mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]);
450 1.18 jmcneill }
451 1.3 matt break;
452 1.3 matt
453 1.3 matt case IPL_DDB:
454 1.3 matt case IPL_HIGH:
455 1.18 jmcneill for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
456 1.18 jmcneill cpu = &octeon_cpu_softc[cpunum];
457 1.18 jmcneill if (cpu->cpu_ci == NULL)
458 1.18 jmcneill break;
459 1.18 jmcneill cpu->cpu_ip4_enable[bank] &= ~irq_mask;
460 1.18 jmcneill mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]);
461 1.18 jmcneill }
462 1.3 matt break;
463 1.3 matt }
464 1.1 hikaru
465 1.25 riastrad atomic_store_relaxed(&octciu_intrs[irq], NULL);
466 1.3 matt
467 1.3 matt mutex_exit(&octeon_intr_lock);
468 1.1 hikaru
469 1.25 riastrad /*
470 1.25 riastrad * Wait until the interrupt handler is no longer running on all
471 1.25 riastrad * CPUs before freeing ih and returning.
472 1.25 riastrad */
473 1.25 riastrad xc_barrier(0);
474 1.3 matt kmem_free(ih, sizeof(*ih));
475 1.1 hikaru }
476 1.1 hikaru
477 1.1 hikaru void
478 1.1 hikaru octeon_iointr(int ipl, vaddr_t pc, uint32_t ipending)
479 1.1 hikaru {
480 1.3 matt struct cpu_info * const ci = curcpu();
481 1.3 matt struct cpu_softc * const cpu = ci->ci_softc;
482 1.15 jmcneill int bank;
483 1.3 matt
484 1.4 matt KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
485 1.3 matt KASSERT((ipending & ~MIPS_INT_MASK) == 0);
486 1.3 matt KASSERT(ipending & MIPS_HARD_INT_MASK);
487 1.15 jmcneill uint64_t hwpend[2] = { 0, 0 };
488 1.15 jmcneill
489 1.15 jmcneill const uint64_t sum1 = mips3_ld(cpu->cpu_int_sum1);
490 1.1 hikaru
491 1.3 matt if (ipending & MIPS_INT_MASK_2) {
492 1.18 jmcneill hwpend[0] = mips3_ld(cpu->cpu_ip4_sum0)
493 1.18 jmcneill & cpu->cpu_ip4_enable[0];
494 1.18 jmcneill hwpend[1] = sum1 & cpu->cpu_ip4_enable[1];
495 1.3 matt } else if (ipending & MIPS_INT_MASK_1) {
496 1.18 jmcneill hwpend[0] = mips3_ld(cpu->cpu_ip3_sum0)
497 1.18 jmcneill & cpu->cpu_ip3_enable[0];
498 1.18 jmcneill hwpend[1] = sum1 & cpu->cpu_ip3_enable[1];
499 1.3 matt } else if (ipending & MIPS_INT_MASK_0) {
500 1.18 jmcneill hwpend[0] = mips3_ld(cpu->cpu_ip2_sum0)
501 1.18 jmcneill & cpu->cpu_ip2_enable[0];
502 1.18 jmcneill hwpend[1] = sum1 & cpu->cpu_ip2_enable[1];
503 1.3 matt } else {
504 1.3 matt panic("octeon_iointr: unexpected ipending %#x", ipending);
505 1.3 matt }
506 1.15 jmcneill for (bank = 0; bank <= 1; bank++) {
507 1.15 jmcneill while (hwpend[bank] != 0) {
508 1.15 jmcneill const int bit = ffs64(hwpend[bank]) - 1;
509 1.15 jmcneill const int irq = (bank * 64) + bit;
510 1.15 jmcneill hwpend[bank] &= ~__BIT(bit);
511 1.15 jmcneill
512 1.25 riastrad struct octeon_intrhand * const ih =
513 1.26 riastrad atomic_load_consume(&octciu_intrs[irq]);
514 1.15 jmcneill cpu->cpu_intr_evs[irq].ev_count++;
515 1.15 jmcneill if (__predict_true(ih != NULL)) {
516 1.15 jmcneill #ifdef MULTIPROCESSOR
517 1.15 jmcneill if (ipl == IPL_VM) {
518 1.15 jmcneill KERNEL_LOCK(1, NULL);
519 1.15 jmcneill #endif
520 1.15 jmcneill (*ih->ih_func)(ih->ih_arg);
521 1.15 jmcneill #ifdef MULTIPROCESSOR
522 1.15 jmcneill KERNEL_UNLOCK_ONE(NULL);
523 1.15 jmcneill } else {
524 1.15 jmcneill (*ih->ih_func)(ih->ih_arg);
525 1.15 jmcneill }
526 1.15 jmcneill #endif
527 1.15 jmcneill KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
528 1.3 matt }
529 1.3 matt }
530 1.3 matt }
531 1.4 matt KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
532 1.3 matt }
533 1.3 matt
534 1.3 matt #ifdef MULTIPROCESSOR
535 1.3 matt __CTASSERT(NIPIS < 16);
536 1.3 matt
537 1.3 matt int
538 1.3 matt octeon_ipi_intr(void *arg)
539 1.3 matt {
540 1.3 matt struct cpu_info * const ci = curcpu();
541 1.3 matt struct cpu_softc * const cpu = ci->ci_softc;
542 1.23 jmcneill const uint32_t mbox_mask = (uintptr_t) arg;
543 1.23 jmcneill uint32_t ipi_mask = mbox_mask;
544 1.4 matt
545 1.23 jmcneill KASSERTMSG((mbox_mask & __BITS(31,16)) == 0 || ci->ci_cpl >= IPL_SCHED,
546 1.23 jmcneill "mbox_mask %#"PRIx32" cpl %d", mbox_mask, ci->ci_cpl);
547 1.3 matt
548 1.20 jmcneill ipi_mask &= mips3_ld(cpu->cpu_mbox_set);
549 1.20 jmcneill if (ipi_mask == 0)
550 1.4 matt return 0;
551 1.4 matt
552 1.20 jmcneill mips3_sd(cpu->cpu_mbox_clr, ipi_mask);
553 1.3 matt
554 1.23 jmcneill KASSERT(__SHIFTOUT(ipi_mask, mbox_mask) < __BIT(NIPIS));
555 1.3 matt
556 1.4 matt #if NWDOG > 0
557 1.4 matt // Handle WDOG requests ourselves.
558 1.4 matt if (ipi_mask & __BIT(IPI_WDOG)) {
559 1.4 matt softint_schedule(cpu->cpu_wdog_sih);
560 1.4 matt atomic_and_64(&ci->ci_request_ipis, ~__BIT(IPI_WDOG));
561 1.4 matt ipi_mask &= ~__BIT(IPI_WDOG);
562 1.4 matt ci->ci_evcnt_per_ipi[IPI_WDOG].ev_count++;
563 1.4 matt if (__predict_true(ipi_mask == 0))
564 1.4 matt return 1;
565 1.4 matt }
566 1.4 matt #endif
567 1.4 matt
568 1.3 matt /* if the request is clear, it was previously processed */
569 1.3 matt if ((ci->ci_request_ipis & ipi_mask) == 0)
570 1.3 matt return 0;
571 1.3 matt
572 1.3 matt atomic_or_64(&ci->ci_active_ipis, ipi_mask);
573 1.3 matt atomic_and_64(&ci->ci_request_ipis, ~ipi_mask);
574 1.3 matt
575 1.23 jmcneill ipi_process(ci, __SHIFTOUT(ipi_mask, mbox_mask));
576 1.3 matt
577 1.3 matt atomic_and_64(&ci->ci_active_ipis, ~ipi_mask);
578 1.3 matt
579 1.3 matt return 1;
580 1.3 matt }
581 1.1 hikaru
582 1.3 matt int
583 1.3 matt octeon_send_ipi(struct cpu_info *ci, int req)
584 1.3 matt {
585 1.3 matt KASSERT(req < NIPIS);
586 1.3 matt if (ci == NULL) {
587 1.4 matt CPU_INFO_ITERATOR cii;
588 1.4 matt for (CPU_INFO_FOREACH(cii, ci)) {
589 1.4 matt if (ci != curcpu()) {
590 1.4 matt octeon_send_ipi(ci, req);
591 1.4 matt }
592 1.4 matt }
593 1.4 matt return 0;
594 1.1 hikaru }
595 1.4 matt KASSERT(cold || ci->ci_softc != NULL);
596 1.4 matt if (ci->ci_softc == NULL)
597 1.4 matt return -1;
598 1.3 matt
599 1.3 matt struct cpu_softc * const cpu = ci->ci_softc;
600 1.23 jmcneill const u_int ipi_shift = ipi_prio[req] == IPL_SCHED ? 16 : 0;
601 1.23 jmcneill const uint32_t ipi_mask = __BIT(req + ipi_shift);
602 1.3 matt
603 1.7 skrll atomic_or_64(&ci->ci_request_ipis, ipi_mask);
604 1.3 matt
605 1.20 jmcneill mips3_sd(cpu->cpu_mbox_set, ipi_mask);
606 1.17 jmcneill
607 1.3 matt return 0;
608 1.1 hikaru }
609 1.3 matt #endif /* MULTIPROCESSOR */
610