octeon_intr.c revision 1.13 1 1.13 riastrad /* $NetBSD: octeon_intr.c,v 1.13 2020/06/20 18:48:28 riastradh Exp $ */
2 1.1 hikaru /*
3 1.1 hikaru * Copyright 2001, 2002 Wasabi Systems, Inc.
4 1.1 hikaru * All rights reserved.
5 1.1 hikaru *
6 1.1 hikaru * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
7 1.1 hikaru *
8 1.1 hikaru * Redistribution and use in source and binary forms, with or without
9 1.1 hikaru * modification, are permitted provided that the following conditions
10 1.1 hikaru * are met:
11 1.1 hikaru * 1. Redistributions of source code must retain the above copyright
12 1.1 hikaru * notice, this list of conditions and the following disclaimer.
13 1.1 hikaru * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 hikaru * notice, this list of conditions and the following disclaimer in the
15 1.1 hikaru * documentation and/or other materials provided with the distribution.
16 1.1 hikaru * 3. All advertising materials mentioning features or use of this software
17 1.1 hikaru * must display the following acknowledgement:
18 1.1 hikaru * This product includes software developed for the NetBSD Project by
19 1.1 hikaru * Wasabi Systems, Inc.
20 1.1 hikaru * 4. The name of Wasabi Systems, Inc. may not be used to endorse
21 1.1 hikaru * or promote products derived from this software without specific prior
22 1.1 hikaru * written permission.
23 1.1 hikaru *
24 1.1 hikaru * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
25 1.1 hikaru * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 1.1 hikaru * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 1.1 hikaru * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
28 1.1 hikaru * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 1.1 hikaru * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 1.1 hikaru * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 1.1 hikaru * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 1.1 hikaru * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 1.1 hikaru * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 1.1 hikaru * POSSIBILITY OF SUCH DAMAGE.
35 1.1 hikaru */
36 1.1 hikaru
37 1.1 hikaru /*
38 1.1 hikaru * Platform-specific interrupt support for the MIPS Malta.
39 1.1 hikaru */
40 1.1 hikaru
41 1.1 hikaru #include "opt_octeon.h"
42 1.6 skrll #include "opt_multiprocessor.h"
43 1.6 skrll
44 1.4 matt #include "cpunode.h"
45 1.1 hikaru #define __INTR_PRIVATE
46 1.1 hikaru
47 1.1 hikaru #include <sys/cdefs.h>
48 1.13 riastrad __KERNEL_RCSID(0, "$NetBSD: octeon_intr.c,v 1.13 2020/06/20 18:48:28 riastradh Exp $");
49 1.1 hikaru
50 1.1 hikaru #include <sys/param.h>
51 1.1 hikaru #include <sys/cpu.h>
52 1.1 hikaru #include <sys/systm.h>
53 1.1 hikaru #include <sys/device.h>
54 1.1 hikaru #include <sys/intr.h>
55 1.1 hikaru #include <sys/kernel.h>
56 1.3 matt #include <sys/kmem.h>
57 1.3 matt #include <sys/atomic.h>
58 1.1 hikaru
59 1.1 hikaru #include <lib/libkern/libkern.h>
60 1.1 hikaru
61 1.1 hikaru #include <mips/locore.h>
62 1.1 hikaru
63 1.1 hikaru #include <mips/cavium/dev/octeon_ciureg.h>
64 1.1 hikaru #include <mips/cavium/octeonvar.h>
65 1.1 hikaru
66 1.1 hikaru /*
67 1.1 hikaru * This is a mask of bits to clear in the SR when we go to a
68 1.1 hikaru * given hardware interrupt priority level.
69 1.1 hikaru */
70 1.1 hikaru static const struct ipl_sr_map octeon_ipl_sr_map = {
71 1.1 hikaru .sr_bits = {
72 1.1 hikaru [IPL_NONE] = 0,
73 1.1 hikaru [IPL_SOFTCLOCK] = MIPS_SOFT_INT_MASK_0,
74 1.1 hikaru [IPL_SOFTNET] = MIPS_SOFT_INT_MASK,
75 1.1 hikaru [IPL_VM] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0,
76 1.1 hikaru [IPL_SCHED] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
77 1.8 skrll | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
78 1.3 matt [IPL_DDB] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
79 1.3 matt | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
80 1.1 hikaru [IPL_HIGH] = MIPS_INT_MASK,
81 1.1 hikaru },
82 1.1 hikaru };
83 1.1 hikaru
84 1.2 matt const char * const octeon_intrnames[NIRQS] = {
85 1.1 hikaru "workq 0",
86 1.1 hikaru "workq 1",
87 1.1 hikaru "workq 2",
88 1.1 hikaru "workq 3",
89 1.1 hikaru "workq 4",
90 1.1 hikaru "workq 5",
91 1.1 hikaru "workq 6",
92 1.1 hikaru "workq 7",
93 1.1 hikaru "workq 8",
94 1.1 hikaru "workq 9",
95 1.1 hikaru "workq 10",
96 1.1 hikaru "workq 11",
97 1.1 hikaru "workq 12",
98 1.1 hikaru "workq 13",
99 1.1 hikaru "workq 14",
100 1.1 hikaru "workq 15",
101 1.1 hikaru "gpio 0",
102 1.1 hikaru "gpio 1",
103 1.1 hikaru "gpio 2",
104 1.1 hikaru "gpio 3",
105 1.1 hikaru "gpio 4",
106 1.1 hikaru "gpio 5",
107 1.1 hikaru "gpio 6",
108 1.1 hikaru "gpio 7",
109 1.1 hikaru "gpio 8",
110 1.1 hikaru "gpio 9",
111 1.1 hikaru "gpio 10",
112 1.1 hikaru "gpio 11",
113 1.1 hikaru "gpio 12",
114 1.1 hikaru "gpio 13",
115 1.1 hikaru "gpio 14",
116 1.1 hikaru "gpio 15",
117 1.1 hikaru "mbox 0-15",
118 1.1 hikaru "mbox 16-31",
119 1.1 hikaru "uart 0",
120 1.1 hikaru "uart 1",
121 1.1 hikaru "pci inta",
122 1.1 hikaru "pci intb",
123 1.1 hikaru "pci intc",
124 1.1 hikaru "pci intd",
125 1.1 hikaru "pci msi 0-15",
126 1.1 hikaru "pci msi 16-31",
127 1.1 hikaru "pci msi 32-47",
128 1.1 hikaru "pci msi 48-63",
129 1.1 hikaru "wdog summary",
130 1.1 hikaru "twsi",
131 1.1 hikaru "rml",
132 1.1 hikaru "trace",
133 1.1 hikaru "gmx drop",
134 1.1 hikaru "reserved",
135 1.1 hikaru "ipd drop",
136 1.1 hikaru "reserved",
137 1.1 hikaru "timer 0",
138 1.1 hikaru "timer 1",
139 1.1 hikaru "timer 2",
140 1.1 hikaru "timer 3",
141 1.1 hikaru "usb",
142 1.1 hikaru "pcm/tdm",
143 1.1 hikaru "mpi/spi",
144 1.1 hikaru "reserved",
145 1.1 hikaru "reserved",
146 1.1 hikaru "reserved",
147 1.1 hikaru "reserved",
148 1.1 hikaru "reserved",
149 1.1 hikaru };
150 1.1 hikaru
151 1.1 hikaru struct octeon_intrhand {
152 1.1 hikaru int (*ih_func)(void *);
153 1.1 hikaru void *ih_arg;
154 1.1 hikaru int ih_irq;
155 1.1 hikaru int ih_ipl;
156 1.1 hikaru };
157 1.1 hikaru
158 1.3 matt #ifdef MULTIPROCESSOR
159 1.3 matt static int octeon_send_ipi(struct cpu_info *, int);
160 1.3 matt static int octeon_ipi_intr(void *);
161 1.3 matt
162 1.3 matt struct octeon_intrhand ipi_intrhands[2] = {
163 1.3 matt [0] = {
164 1.3 matt .ih_func = octeon_ipi_intr,
165 1.3 matt .ih_arg = (void *)(uintptr_t)__BITS(15,0),
166 1.12 simonb .ih_irq = CIU_INT_MBOX_15_0,
167 1.3 matt .ih_ipl = IPL_SCHED,
168 1.3 matt },
169 1.3 matt [1] = {
170 1.3 matt .ih_func = octeon_ipi_intr,
171 1.3 matt .ih_arg = (void *)(uintptr_t)__BITS(31,16),
172 1.12 simonb .ih_irq = CIU_INT_MBOX_31_16,
173 1.3 matt .ih_ipl = IPL_HIGH,
174 1.3 matt },
175 1.1 hikaru };
176 1.3 matt #endif
177 1.1 hikaru
178 1.11 simonb struct octeon_intrhand *octciu_intrs[NIRQS] = {
179 1.3 matt #ifdef MULTIPROCESSOR
180 1.12 simonb [CIU_INT_MBOX_15_0] = &ipi_intrhands[0],
181 1.12 simonb [CIU_INT_MBOX_31_16] = &ipi_intrhands[1],
182 1.3 matt #endif
183 1.1 hikaru };
184 1.1 hikaru
185 1.3 matt kmutex_t octeon_intr_lock;
186 1.1 hikaru
187 1.3 matt #define X(a) MIPS_PHYS_TO_XKPHYS(OCTEON_CCA_NONE, (a))
188 1.1 hikaru
189 1.3 matt struct cpu_softc octeon_cpu0_softc = {
190 1.3 matt .cpu_ci = &cpu_info_store,
191 1.3 matt .cpu_int0_sum0 = X(CIU_INT0_SUM0),
192 1.3 matt .cpu_int1_sum0 = X(CIU_INT1_SUM0),
193 1.3 matt .cpu_int2_sum0 = X(CIU_INT4_SUM0),
194 1.1 hikaru
195 1.3 matt .cpu_int0_en0 = X(CIU_INT0_EN0),
196 1.3 matt .cpu_int1_en0 = X(CIU_INT1_EN0),
197 1.3 matt .cpu_int2_en0 = X(CIU_INT4_EN00),
198 1.1 hikaru
199 1.3 matt .cpu_int0_en1 = X(CIU_INT0_EN1),
200 1.3 matt .cpu_int1_en1 = X(CIU_INT1_EN1),
201 1.3 matt .cpu_int2_en1 = X(CIU_INT4_EN01),
202 1.1 hikaru
203 1.3 matt .cpu_int32_en = X(CIU_INT32_EN0),
204 1.1 hikaru
205 1.4 matt .cpu_wdog = X(CIU_WDOG0),
206 1.4 matt .cpu_pp_poke = X(CIU_PP_POKE0),
207 1.4 matt
208 1.3 matt #ifdef MULTIPROCESSOR
209 1.3 matt .cpu_mbox_set = X(CIU_MBOX_SET0),
210 1.3 matt .cpu_mbox_clr = X(CIU_MBOX_CLR0),
211 1.3 matt #endif
212 1.3 matt };
213 1.1 hikaru
214 1.3 matt #ifdef MULTIPROCESSOR
215 1.12 simonb /* XXX limit of two CPUs ... */
216 1.3 matt struct cpu_softc octeon_cpu1_softc = {
217 1.3 matt .cpu_int0_sum0 = X(CIU_INT2_SUM0),
218 1.3 matt .cpu_int1_sum0 = X(CIU_INT3_SUM0),
219 1.3 matt .cpu_int2_sum0 = X(CIU_INT4_SUM1),
220 1.3 matt
221 1.3 matt .cpu_int0_en0 = X(CIU_INT2_EN0),
222 1.3 matt .cpu_int1_en0 = X(CIU_INT3_EN0),
223 1.3 matt .cpu_int2_en0 = X(CIU_INT4_EN10),
224 1.3 matt
225 1.3 matt .cpu_int0_en1 = X(CIU_INT2_EN1),
226 1.3 matt .cpu_int1_en1 = X(CIU_INT3_EN1),
227 1.3 matt .cpu_int2_en1 = X(CIU_INT4_EN11),
228 1.1 hikaru
229 1.3 matt .cpu_int32_en = X(CIU_INT32_EN1),
230 1.1 hikaru
231 1.12 simonb .cpu_wdog = X(CIU_WDOG(1)),
232 1.4 matt .cpu_pp_poke = X(CIU_PP_POKE1),
233 1.4 matt
234 1.3 matt .cpu_mbox_set = X(CIU_MBOX_SET1),
235 1.3 matt .cpu_mbox_clr = X(CIU_MBOX_CLR1),
236 1.3 matt };
237 1.3 matt #endif
238 1.1 hikaru
239 1.4 matt #ifdef DEBUG
240 1.4 matt static void
241 1.4 matt octeon_mbox_test(void)
242 1.4 matt {
243 1.4 matt const uint64_t mbox_clr0 = X(CIU_MBOX_CLR0);
244 1.4 matt const uint64_t mbox_clr1 = X(CIU_MBOX_CLR1);
245 1.4 matt const uint64_t mbox_set0 = X(CIU_MBOX_SET0);
246 1.4 matt const uint64_t mbox_set1 = X(CIU_MBOX_SET1);
247 1.4 matt const uint64_t int_sum0 = X(CIU_INT0_SUM0);
248 1.4 matt const uint64_t int_sum1 = X(CIU_INT2_SUM0);
249 1.12 simonb const uint64_t sum_mbox_lo = __BIT(CIU_INT_MBOX_15_0);
250 1.12 simonb const uint64_t sum_mbox_hi = __BIT(CIU_INT_MBOX_31_16);
251 1.4 matt
252 1.5 matt mips3_sd(mbox_clr0, ~0ULL);
253 1.5 matt mips3_sd(mbox_clr1, ~0ULL);
254 1.4 matt
255 1.5 matt uint32_t mbox0 = mips3_ld(mbox_set0);
256 1.5 matt uint32_t mbox1 = mips3_ld(mbox_set1);
257 1.4 matt
258 1.4 matt KDASSERTMSG(mbox0 == 0, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
259 1.4 matt KDASSERTMSG(mbox1 == 0, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
260 1.4 matt
261 1.5 matt mips3_sd(mbox_set0, __BIT(0));
262 1.4 matt
263 1.5 matt mbox0 = mips3_ld(mbox_set0);
264 1.5 matt mbox1 = mips3_ld(mbox_set1);
265 1.4 matt
266 1.4 matt KDASSERTMSG(mbox0 == 1, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
267 1.4 matt KDASSERTMSG(mbox1 == 0, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
268 1.4 matt
269 1.5 matt uint64_t sum0 = mips3_ld(int_sum0);
270 1.5 matt uint64_t sum1 = mips3_ld(int_sum1);
271 1.4 matt
272 1.4 matt KDASSERTMSG((sum0 & sum_mbox_lo) != 0, "sum0 %#"PRIx64, sum0);
273 1.4 matt KDASSERTMSG((sum0 & sum_mbox_hi) == 0, "sum0 %#"PRIx64, sum0);
274 1.4 matt
275 1.4 matt KDASSERTMSG((sum1 & sum_mbox_lo) == 0, "sum1 %#"PRIx64, sum1);
276 1.4 matt KDASSERTMSG((sum1 & sum_mbox_hi) == 0, "sum1 %#"PRIx64, sum1);
277 1.4 matt
278 1.5 matt mips3_sd(mbox_clr0, mbox0);
279 1.5 matt mbox0 = mips3_ld(mbox_set0);
280 1.4 matt KDASSERTMSG(mbox0 == 0, "mbox0 %#x", mbox0);
281 1.4 matt
282 1.5 matt mips3_sd(mbox_set0, __BIT(16));
283 1.4 matt
284 1.5 matt mbox0 = mips3_ld(mbox_set0);
285 1.5 matt mbox1 = mips3_ld(mbox_set1);
286 1.4 matt
287 1.4 matt KDASSERTMSG(mbox0 == __BIT(16), "mbox0 %#x", mbox0);
288 1.4 matt KDASSERTMSG(mbox1 == 0, "mbox1 %#x", mbox1);
289 1.4 matt
290 1.5 matt sum0 = mips3_ld(int_sum0);
291 1.5 matt sum1 = mips3_ld(int_sum1);
292 1.4 matt
293 1.4 matt KDASSERTMSG((sum0 & sum_mbox_lo) == 0, "sum0 %#"PRIx64, sum0);
294 1.4 matt KDASSERTMSG((sum0 & sum_mbox_hi) != 0, "sum0 %#"PRIx64, sum0);
295 1.4 matt
296 1.4 matt KDASSERTMSG((sum1 & sum_mbox_lo) == 0, "sum1 %#"PRIx64, sum1);
297 1.4 matt KDASSERTMSG((sum1 & sum_mbox_hi) == 0, "sum1 %#"PRIx64, sum1);
298 1.4 matt }
299 1.4 matt #endif
300 1.4 matt
301 1.3 matt #undef X
302 1.1 hikaru
303 1.3 matt void
304 1.3 matt octeon_intr_init(struct cpu_info *ci)
305 1.3 matt {
306 1.9 mrg #ifdef DIAGNOSTIC
307 1.3 matt const int cpunum = cpu_index(ci);
308 1.9 mrg #endif
309 1.3 matt const char * const xname = cpu_name(ci);
310 1.4 matt struct cpu_softc *cpu = ci->ci_softc;
311 1.1 hikaru
312 1.1 hikaru
313 1.3 matt if (ci->ci_cpuid == 0) {
314 1.4 matt KASSERT(ci->ci_softc == &octeon_cpu0_softc);
315 1.4 matt ipl_sr_map = octeon_ipl_sr_map;
316 1.3 matt mutex_init(&octeon_intr_lock, MUTEX_DEFAULT, IPL_HIGH);
317 1.3 matt #ifdef MULTIPROCESSOR
318 1.3 matt mips_locoresw.lsw_send_ipi = octeon_send_ipi;
319 1.3 matt #endif
320 1.4 matt #ifdef DEBUG
321 1.4 matt octeon_mbox_test();
322 1.4 matt #endif
323 1.3 matt } else {
324 1.3 matt KASSERT(cpunum == 1);
325 1.3 matt #ifdef MULTIPROCESSOR
326 1.4 matt KASSERT(ci->ci_softc == &octeon_cpu1_softc);
327 1.3 matt #endif
328 1.1 hikaru }
329 1.1 hikaru
330 1.3 matt #ifdef MULTIPROCESSOR
331 1.3 matt // Enable the IPIs
332 1.12 simonb cpu->cpu_int1_enable0 |= __BIT(CIU_INT_MBOX_15_0);
333 1.12 simonb cpu->cpu_int2_enable0 |= __BIT(CIU_INT_MBOX_31_16);
334 1.1 hikaru #endif
335 1.1 hikaru
336 1.4 matt if (ci->ci_dev)
337 1.10 skrll aprint_verbose_dev(ci->ci_dev,
338 1.10 skrll "enabling intr masks %#"PRIx64"/%#"PRIx64"/%#"PRIx64"\n",
339 1.10 skrll cpu->cpu_int0_enable0, cpu->cpu_int1_enable0,
340 1.10 skrll cpu->cpu_int2_enable0);
341 1.4 matt
342 1.5 matt mips3_sd(cpu->cpu_int0_en0, cpu->cpu_int0_enable0);
343 1.5 matt mips3_sd(cpu->cpu_int1_en0, cpu->cpu_int1_enable0);
344 1.5 matt mips3_sd(cpu->cpu_int2_en0, cpu->cpu_int2_enable0);
345 1.3 matt
346 1.5 matt mips3_sd(cpu->cpu_int32_en, 0);
347 1.3 matt
348 1.5 matt mips3_sd(cpu->cpu_int0_en1, 0); // WDOG IPL2
349 1.5 matt mips3_sd(cpu->cpu_int1_en1, 0); // WDOG IPL3
350 1.5 matt mips3_sd(cpu->cpu_int2_en1, 0); // WDOG IPL4
351 1.1 hikaru
352 1.3 matt #ifdef MULTIPROCESSOR
353 1.5 matt mips3_sd(cpu->cpu_mbox_clr, __BITS(31,0));
354 1.3 matt #endif
355 1.1 hikaru
356 1.1 hikaru for (size_t i = 0; i < NIRQS; i++) {
357 1.3 matt evcnt_attach_dynamic(&cpu->cpu_intr_evs[i],
358 1.3 matt EVCNT_TYPE_INTR, NULL, xname, octeon_intrnames[i]);
359 1.1 hikaru }
360 1.1 hikaru }
361 1.1 hikaru
362 1.1 hikaru void
363 1.1 hikaru octeon_cal_timer(int corefreq)
364 1.1 hikaru {
365 1.1 hikaru /* Compute the number of cycles per second. */
366 1.1 hikaru curcpu()->ci_cpu_freq = corefreq;
367 1.1 hikaru
368 1.1 hikaru /* Compute the number of ticks for hz. */
369 1.1 hikaru curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz;
370 1.1 hikaru
371 1.1 hikaru /* Compute the delay divisor and reciprical. */
372 1.1 hikaru curcpu()->ci_divisor_delay =
373 1.1 hikaru ((curcpu()->ci_cpu_freq + 500000) / 1000000);
374 1.1 hikaru #if 0
375 1.1 hikaru MIPS_SET_CI_RECIPRICAL(curcpu());
376 1.1 hikaru #endif
377 1.1 hikaru
378 1.1 hikaru mips3_cp0_count_write(0);
379 1.1 hikaru mips3_cp0_compare_write(0);
380 1.1 hikaru }
381 1.1 hikaru
382 1.1 hikaru void *
383 1.3 matt octeon_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
384 1.1 hikaru {
385 1.1 hikaru struct octeon_intrhand *ih;
386 1.1 hikaru
387 1.1 hikaru if (irq >= NIRQS)
388 1.1 hikaru panic("octeon_intr_establish: bogus IRQ %d", irq);
389 1.3 matt if (ipl < IPL_VM)
390 1.3 matt panic("octeon_intr_establish: bogus IPL %d", ipl);
391 1.1 hikaru
392 1.3 matt ih = kmem_zalloc(sizeof(*ih), KM_NOSLEEP);
393 1.1 hikaru if (ih == NULL)
394 1.1 hikaru return (NULL);
395 1.1 hikaru
396 1.1 hikaru ih->ih_func = func;
397 1.1 hikaru ih->ih_arg = arg;
398 1.1 hikaru ih->ih_irq = irq;
399 1.3 matt ih->ih_ipl = ipl;
400 1.1 hikaru
401 1.3 matt mutex_enter(&octeon_intr_lock);
402 1.1 hikaru
403 1.1 hikaru /*
404 1.3 matt * First, make it known.
405 1.1 hikaru */
406 1.11 simonb KASSERTMSG(octciu_intrs[irq] == NULL, "irq %d in use! (%p)",
407 1.11 simonb irq, octciu_intrs[irq]);
408 1.3 matt
409 1.11 simonb octciu_intrs[irq] = ih;
410 1.3 matt membar_producer();
411 1.1 hikaru
412 1.1 hikaru /*
413 1.1 hikaru * Now enable it.
414 1.1 hikaru */
415 1.3 matt const uint64_t irq_mask = __BIT(irq);
416 1.3 matt struct cpu_softc * const cpu0 = &octeon_cpu0_softc;
417 1.3 matt #if MULTIPROCESSOR
418 1.3 matt struct cpu_softc * const cpu1 = &octeon_cpu1_softc;
419 1.3 matt #endif
420 1.3 matt
421 1.3 matt switch (ipl) {
422 1.3 matt case IPL_VM:
423 1.3 matt cpu0->cpu_int0_enable0 |= irq_mask;
424 1.5 matt mips3_sd(cpu0->cpu_int0_en0, cpu0->cpu_int0_enable0);
425 1.3 matt break;
426 1.1 hikaru
427 1.3 matt case IPL_SCHED:
428 1.3 matt cpu0->cpu_int1_enable0 |= irq_mask;
429 1.5 matt mips3_sd(cpu0->cpu_int1_en0, cpu0->cpu_int1_enable0);
430 1.3 matt #ifdef MULTIPROCESSOR
431 1.3 matt cpu1->cpu_int1_enable0 = cpu0->cpu_int1_enable0;
432 1.5 matt mips3_sd(cpu1->cpu_int1_en0, cpu1->cpu_int1_enable0);
433 1.3 matt #endif
434 1.3 matt break;
435 1.3 matt
436 1.3 matt case IPL_DDB:
437 1.3 matt case IPL_HIGH:
438 1.3 matt cpu0->cpu_int2_enable0 |= irq_mask;
439 1.5 matt mips3_sd(cpu0->cpu_int2_en0, cpu0->cpu_int2_enable0);
440 1.3 matt #ifdef MULTIPROCESSOR
441 1.3 matt cpu1->cpu_int2_enable0 = cpu0->cpu_int2_enable0;
442 1.5 matt mips3_sd(cpu1->cpu_int2_en0, cpu1->cpu_int2_enable0);
443 1.3 matt #endif
444 1.3 matt break;
445 1.1 hikaru }
446 1.1 hikaru
447 1.3 matt mutex_exit(&octeon_intr_lock);
448 1.3 matt
449 1.3 matt return ih;
450 1.1 hikaru }
451 1.1 hikaru
452 1.1 hikaru void
453 1.1 hikaru octeon_intr_disestablish(void *cookie)
454 1.1 hikaru {
455 1.3 matt struct octeon_intrhand * const ih = cookie;
456 1.3 matt const int irq = ih->ih_irq & (NIRQS-1);
457 1.3 matt const int ipl = ih->ih_ipl;
458 1.1 hikaru
459 1.3 matt mutex_enter(&octeon_intr_lock);
460 1.1 hikaru
461 1.1 hikaru /*
462 1.3 matt * First disable it.
463 1.1 hikaru */
464 1.3 matt const uint64_t irq_mask = ~__BIT(irq);
465 1.3 matt struct cpu_softc * const cpu0 = &octeon_cpu0_softc;
466 1.3 matt #if MULTIPROCESSOR
467 1.3 matt struct cpu_softc * const cpu1 = &octeon_cpu1_softc;
468 1.3 matt #endif
469 1.3 matt
470 1.3 matt switch (ipl) {
471 1.3 matt case IPL_VM:
472 1.3 matt cpu0->cpu_int0_enable0 &= ~irq_mask;
473 1.5 matt mips3_sd(cpu0->cpu_int0_en0, cpu0->cpu_int0_enable0);
474 1.3 matt break;
475 1.3 matt
476 1.3 matt case IPL_SCHED:
477 1.3 matt cpu0->cpu_int1_enable0 &= ~irq_mask;
478 1.5 matt mips3_sd(cpu0->cpu_int1_en0, cpu0->cpu_int1_enable0);
479 1.3 matt #ifdef MULTIPROCESSOR
480 1.3 matt cpu1->cpu_int1_enable0 = cpu0->cpu_int1_enable0;
481 1.5 matt mips3_sd(cpu1->cpu_int1_en0, cpu1->cpu_int1_enable0);
482 1.3 matt #endif
483 1.3 matt break;
484 1.3 matt
485 1.3 matt case IPL_DDB:
486 1.3 matt case IPL_HIGH:
487 1.3 matt cpu0->cpu_int2_enable0 &= ~irq_mask;
488 1.5 matt mips3_sd(cpu0->cpu_int2_en0, cpu0->cpu_int2_enable0);
489 1.3 matt #ifdef MULTIPROCESSOR
490 1.3 matt cpu1->cpu_int2_enable0 = cpu0->cpu_int2_enable0;
491 1.5 matt mips3_sd(cpu1->cpu_int2_en0, cpu1->cpu_int2_enable0);
492 1.3 matt #endif
493 1.3 matt break;
494 1.3 matt }
495 1.1 hikaru
496 1.1 hikaru /*
497 1.3 matt * Now remove it since we shouldn't get interrupts for it.
498 1.1 hikaru */
499 1.11 simonb octciu_intrs[irq] = NULL;
500 1.3 matt
501 1.3 matt mutex_exit(&octeon_intr_lock);
502 1.1 hikaru
503 1.3 matt kmem_free(ih, sizeof(*ih));
504 1.1 hikaru }
505 1.1 hikaru
506 1.1 hikaru void
507 1.1 hikaru octeon_iointr(int ipl, vaddr_t pc, uint32_t ipending)
508 1.1 hikaru {
509 1.3 matt struct cpu_info * const ci = curcpu();
510 1.3 matt struct cpu_softc * const cpu = ci->ci_softc;
511 1.3 matt
512 1.4 matt KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
513 1.3 matt KASSERT((ipending & ~MIPS_INT_MASK) == 0);
514 1.3 matt KASSERT(ipending & MIPS_HARD_INT_MASK);
515 1.1 hikaru uint64_t hwpend = 0;
516 1.1 hikaru
517 1.3 matt if (ipending & MIPS_INT_MASK_2) {
518 1.5 matt hwpend = mips3_ld(cpu->cpu_int2_sum0)
519 1.3 matt & cpu->cpu_int2_enable0;
520 1.3 matt } else if (ipending & MIPS_INT_MASK_1) {
521 1.5 matt hwpend = mips3_ld(cpu->cpu_int1_sum0)
522 1.3 matt & cpu->cpu_int1_enable0;
523 1.3 matt } else if (ipending & MIPS_INT_MASK_0) {
524 1.5 matt hwpend = mips3_ld(cpu->cpu_int0_sum0)
525 1.3 matt & cpu->cpu_int0_enable0;
526 1.3 matt } else {
527 1.3 matt panic("octeon_iointr: unexpected ipending %#x", ipending);
528 1.3 matt }
529 1.3 matt while (hwpend != 0) {
530 1.3 matt const int irq = ffs64(hwpend) - 1;
531 1.3 matt hwpend &= ~__BIT(irq);
532 1.13 riastrad
533 1.11 simonb struct octeon_intrhand * const ih = octciu_intrs[irq];
534 1.3 matt cpu->cpu_intr_evs[irq].ev_count++;
535 1.3 matt if (__predict_true(ih != NULL)) {
536 1.3 matt #ifdef MULTIPROCESSOR
537 1.3 matt if (ipl == IPL_VM) {
538 1.3 matt KERNEL_LOCK(1, NULL);
539 1.3 matt #endif
540 1.3 matt (*ih->ih_func)(ih->ih_arg);
541 1.3 matt #ifdef MULTIPROCESSOR
542 1.3 matt KERNEL_UNLOCK_ONE(NULL);
543 1.3 matt } else {
544 1.3 matt (*ih->ih_func)(ih->ih_arg);
545 1.3 matt }
546 1.3 matt #endif
547 1.4 matt KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
548 1.3 matt }
549 1.3 matt }
550 1.4 matt KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
551 1.3 matt }
552 1.3 matt
553 1.3 matt #ifdef MULTIPROCESSOR
554 1.3 matt __CTASSERT(NIPIS < 16);
555 1.3 matt
556 1.3 matt int
557 1.3 matt octeon_ipi_intr(void *arg)
558 1.3 matt {
559 1.3 matt struct cpu_info * const ci = curcpu();
560 1.3 matt struct cpu_softc * const cpu = ci->ci_softc;
561 1.4 matt uint32_t ipi_mask = (uintptr_t) arg;
562 1.4 matt
563 1.4 matt KASSERTMSG((ipi_mask & __BITS(31,16)) == 0 || ci->ci_cpl >= IPL_SCHED,
564 1.4 matt "ipi_mask %#"PRIx32" cpl %d", ipi_mask, ci->ci_cpl);
565 1.3 matt
566 1.5 matt ipi_mask &= mips3_ld(cpu->cpu_mbox_set);
567 1.4 matt if (ipi_mask == 0)
568 1.4 matt return 0;
569 1.4 matt
570 1.5 matt mips3_sd(cpu->cpu_mbox_clr, ipi_mask);
571 1.3 matt
572 1.3 matt ipi_mask |= (ipi_mask >> 16);
573 1.3 matt ipi_mask &= __BITS(15,0);
574 1.3 matt
575 1.3 matt KASSERT(ipi_mask < __BIT(NIPIS));
576 1.3 matt
577 1.4 matt #if NWDOG > 0
578 1.4 matt // Handle WDOG requests ourselves.
579 1.4 matt if (ipi_mask & __BIT(IPI_WDOG)) {
580 1.4 matt softint_schedule(cpu->cpu_wdog_sih);
581 1.4 matt atomic_and_64(&ci->ci_request_ipis, ~__BIT(IPI_WDOG));
582 1.4 matt ipi_mask &= ~__BIT(IPI_WDOG);
583 1.4 matt ci->ci_evcnt_per_ipi[IPI_WDOG].ev_count++;
584 1.4 matt if (__predict_true(ipi_mask == 0))
585 1.4 matt return 1;
586 1.4 matt }
587 1.4 matt #endif
588 1.4 matt
589 1.3 matt /* if the request is clear, it was previously processed */
590 1.3 matt if ((ci->ci_request_ipis & ipi_mask) == 0)
591 1.3 matt return 0;
592 1.3 matt
593 1.3 matt atomic_or_64(&ci->ci_active_ipis, ipi_mask);
594 1.3 matt atomic_and_64(&ci->ci_request_ipis, ~ipi_mask);
595 1.3 matt
596 1.3 matt ipi_process(ci, ipi_mask);
597 1.3 matt
598 1.3 matt atomic_and_64(&ci->ci_active_ipis, ~ipi_mask);
599 1.3 matt
600 1.3 matt return 1;
601 1.3 matt }
602 1.1 hikaru
603 1.3 matt int
604 1.3 matt octeon_send_ipi(struct cpu_info *ci, int req)
605 1.3 matt {
606 1.3 matt KASSERT(req < NIPIS);
607 1.3 matt if (ci == NULL) {
608 1.4 matt CPU_INFO_ITERATOR cii;
609 1.4 matt for (CPU_INFO_FOREACH(cii, ci)) {
610 1.4 matt if (ci != curcpu()) {
611 1.4 matt octeon_send_ipi(ci, req);
612 1.4 matt }
613 1.4 matt }
614 1.4 matt return 0;
615 1.1 hikaru }
616 1.4 matt KASSERT(cold || ci->ci_softc != NULL);
617 1.4 matt if (ci->ci_softc == NULL)
618 1.4 matt return -1;
619 1.3 matt
620 1.3 matt struct cpu_softc * const cpu = ci->ci_softc;
621 1.3 matt uint64_t ipi_mask = __BIT(req);
622 1.3 matt
623 1.7 skrll atomic_or_64(&ci->ci_request_ipis, ipi_mask);
624 1.7 skrll if (req == IPI_SUSPEND || req == IPI_WDOG) {
625 1.3 matt ipi_mask <<= 16;
626 1.1 hikaru }
627 1.3 matt
628 1.5 matt mips3_sd(cpu->cpu_mbox_set, ipi_mask);
629 1.3 matt return 0;
630 1.1 hikaru }
631 1.3 matt #endif /* MULTIPROCESSOR */
632