octeon_intr.c revision 1.4 1 1.4 matt /* $NetBSD: octeon_intr.c,v 1.4 2015/06/06 20:52:16 matt Exp $ */
2 1.1 hikaru /*
3 1.1 hikaru * Copyright 2001, 2002 Wasabi Systems, Inc.
4 1.1 hikaru * All rights reserved.
5 1.1 hikaru *
6 1.1 hikaru * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
7 1.1 hikaru *
8 1.1 hikaru * Redistribution and use in source and binary forms, with or without
9 1.1 hikaru * modification, are permitted provided that the following conditions
10 1.1 hikaru * are met:
11 1.1 hikaru * 1. Redistributions of source code must retain the above copyright
12 1.1 hikaru * notice, this list of conditions and the following disclaimer.
13 1.1 hikaru * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 hikaru * notice, this list of conditions and the following disclaimer in the
15 1.1 hikaru * documentation and/or other materials provided with the distribution.
16 1.1 hikaru * 3. All advertising materials mentioning features or use of this software
17 1.1 hikaru * must display the following acknowledgement:
18 1.1 hikaru * This product includes software developed for the NetBSD Project by
19 1.1 hikaru * Wasabi Systems, Inc.
20 1.1 hikaru * 4. The name of Wasabi Systems, Inc. may not be used to endorse
21 1.1 hikaru * or promote products derived from this software without specific prior
22 1.1 hikaru * written permission.
23 1.1 hikaru *
24 1.1 hikaru * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
25 1.1 hikaru * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 1.1 hikaru * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 1.1 hikaru * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
28 1.1 hikaru * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 1.1 hikaru * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 1.1 hikaru * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 1.1 hikaru * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 1.1 hikaru * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 1.1 hikaru * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 1.1 hikaru * POSSIBILITY OF SUCH DAMAGE.
35 1.1 hikaru */
36 1.1 hikaru
37 1.1 hikaru /*
38 1.1 hikaru * Platform-specific interrupt support for the MIPS Malta.
39 1.1 hikaru */
40 1.1 hikaru
41 1.1 hikaru #include "opt_octeon.h"
42 1.4 matt #include "cpunode.h"
43 1.1 hikaru #define __INTR_PRIVATE
44 1.1 hikaru
45 1.1 hikaru #include <sys/cdefs.h>
46 1.4 matt __KERNEL_RCSID(0, "$NetBSD: octeon_intr.c,v 1.4 2015/06/06 20:52:16 matt Exp $");
47 1.1 hikaru
48 1.1 hikaru #include <sys/param.h>
49 1.1 hikaru #include <sys/cpu.h>
50 1.1 hikaru #include <sys/systm.h>
51 1.1 hikaru #include <sys/device.h>
52 1.1 hikaru #include <sys/intr.h>
53 1.1 hikaru #include <sys/kernel.h>
54 1.3 matt #include <sys/kmem.h>
55 1.3 matt #include <sys/atomic.h>
56 1.1 hikaru
57 1.1 hikaru #include <lib/libkern/libkern.h>
58 1.1 hikaru
59 1.1 hikaru #include <mips/locore.h>
60 1.1 hikaru
61 1.1 hikaru #include <mips/cavium/dev/octeon_ciureg.h>
62 1.1 hikaru #include <mips/cavium/octeonvar.h>
63 1.1 hikaru
64 1.1 hikaru /*
65 1.1 hikaru * This is a mask of bits to clear in the SR when we go to a
66 1.1 hikaru * given hardware interrupt priority level.
67 1.1 hikaru */
68 1.1 hikaru static const struct ipl_sr_map octeon_ipl_sr_map = {
69 1.1 hikaru .sr_bits = {
70 1.1 hikaru [IPL_NONE] = 0,
71 1.1 hikaru [IPL_SOFTCLOCK] = MIPS_SOFT_INT_MASK_0,
72 1.1 hikaru [IPL_SOFTNET] = MIPS_SOFT_INT_MASK,
73 1.1 hikaru [IPL_VM] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0,
74 1.1 hikaru [IPL_SCHED] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
75 1.1 hikaru | MIPS_INT_MASK_5,
76 1.3 matt [IPL_DDB] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
77 1.3 matt | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
78 1.1 hikaru [IPL_HIGH] = MIPS_INT_MASK,
79 1.1 hikaru },
80 1.1 hikaru };
81 1.1 hikaru
82 1.2 matt const char * const octeon_intrnames[NIRQS] = {
83 1.1 hikaru "workq 0",
84 1.1 hikaru "workq 1",
85 1.1 hikaru "workq 2",
86 1.1 hikaru "workq 3",
87 1.1 hikaru "workq 4",
88 1.1 hikaru "workq 5",
89 1.1 hikaru "workq 6",
90 1.1 hikaru "workq 7",
91 1.1 hikaru "workq 8",
92 1.1 hikaru "workq 9",
93 1.1 hikaru "workq 10",
94 1.1 hikaru "workq 11",
95 1.1 hikaru "workq 12",
96 1.1 hikaru "workq 13",
97 1.1 hikaru "workq 14",
98 1.1 hikaru "workq 15",
99 1.1 hikaru "gpio 0",
100 1.1 hikaru "gpio 1",
101 1.1 hikaru "gpio 2",
102 1.1 hikaru "gpio 3",
103 1.1 hikaru "gpio 4",
104 1.1 hikaru "gpio 5",
105 1.1 hikaru "gpio 6",
106 1.1 hikaru "gpio 7",
107 1.1 hikaru "gpio 8",
108 1.1 hikaru "gpio 9",
109 1.1 hikaru "gpio 10",
110 1.1 hikaru "gpio 11",
111 1.1 hikaru "gpio 12",
112 1.1 hikaru "gpio 13",
113 1.1 hikaru "gpio 14",
114 1.1 hikaru "gpio 15",
115 1.1 hikaru "mbox 0-15",
116 1.1 hikaru "mbox 16-31",
117 1.1 hikaru "uart 0",
118 1.1 hikaru "uart 1",
119 1.1 hikaru "pci inta",
120 1.1 hikaru "pci intb",
121 1.1 hikaru "pci intc",
122 1.1 hikaru "pci intd",
123 1.1 hikaru "pci msi 0-15",
124 1.1 hikaru "pci msi 16-31",
125 1.1 hikaru "pci msi 32-47",
126 1.1 hikaru "pci msi 48-63",
127 1.1 hikaru "wdog summary",
128 1.1 hikaru "twsi",
129 1.1 hikaru "rml",
130 1.1 hikaru "trace",
131 1.1 hikaru "gmx drop",
132 1.1 hikaru "reserved",
133 1.1 hikaru "ipd drop",
134 1.1 hikaru "reserved",
135 1.1 hikaru "timer 0",
136 1.1 hikaru "timer 1",
137 1.1 hikaru "timer 2",
138 1.1 hikaru "timer 3",
139 1.1 hikaru "usb",
140 1.1 hikaru "pcm/tdm",
141 1.1 hikaru "mpi/spi",
142 1.1 hikaru "reserved",
143 1.1 hikaru "reserved",
144 1.1 hikaru "reserved",
145 1.1 hikaru "reserved",
146 1.1 hikaru "reserved",
147 1.1 hikaru };
148 1.1 hikaru
149 1.1 hikaru struct octeon_intrhand {
150 1.1 hikaru int (*ih_func)(void *);
151 1.1 hikaru void *ih_arg;
152 1.1 hikaru int ih_irq;
153 1.1 hikaru int ih_ipl;
154 1.1 hikaru };
155 1.1 hikaru
156 1.3 matt #ifdef MULTIPROCESSOR
157 1.3 matt static int octeon_send_ipi(struct cpu_info *, int);
158 1.3 matt static int octeon_ipi_intr(void *);
159 1.3 matt
160 1.3 matt struct octeon_intrhand ipi_intrhands[2] = {
161 1.3 matt [0] = {
162 1.3 matt .ih_func = octeon_ipi_intr,
163 1.3 matt .ih_arg = (void *)(uintptr_t)__BITS(15,0),
164 1.3 matt .ih_irq = _CIU_INT_MBOX_15_0_SHIFT,
165 1.3 matt .ih_ipl = IPL_SCHED,
166 1.3 matt },
167 1.3 matt [1] = {
168 1.3 matt .ih_func = octeon_ipi_intr,
169 1.3 matt .ih_arg = (void *)(uintptr_t)__BITS(31,16),
170 1.3 matt .ih_irq = _CIU_INT_MBOX_31_16_SHIFT,
171 1.3 matt .ih_ipl = IPL_HIGH,
172 1.3 matt },
173 1.1 hikaru };
174 1.3 matt #endif
175 1.1 hikaru
176 1.3 matt struct octeon_intrhand *octeon_ciu_intrs[NIRQS] = {
177 1.3 matt #ifdef MULTIPROCESSOR
178 1.3 matt [_CIU_INT_MBOX_15_0_SHIFT] = &ipi_intrhands[0],
179 1.3 matt [_CIU_INT_MBOX_31_16_SHIFT] = &ipi_intrhands[1],
180 1.3 matt #endif
181 1.1 hikaru };
182 1.1 hikaru
183 1.3 matt kmutex_t octeon_intr_lock;
184 1.1 hikaru
185 1.3 matt #define X(a) MIPS_PHYS_TO_XKPHYS(OCTEON_CCA_NONE, (a))
186 1.1 hikaru
187 1.3 matt struct cpu_softc octeon_cpu0_softc = {
188 1.3 matt .cpu_ci = &cpu_info_store,
189 1.3 matt .cpu_int0_sum0 = X(CIU_INT0_SUM0),
190 1.3 matt .cpu_int1_sum0 = X(CIU_INT1_SUM0),
191 1.3 matt .cpu_int2_sum0 = X(CIU_INT4_SUM0),
192 1.1 hikaru
193 1.3 matt .cpu_int0_en0 = X(CIU_INT0_EN0),
194 1.3 matt .cpu_int1_en0 = X(CIU_INT1_EN0),
195 1.3 matt .cpu_int2_en0 = X(CIU_INT4_EN00),
196 1.1 hikaru
197 1.3 matt .cpu_int0_en1 = X(CIU_INT0_EN1),
198 1.3 matt .cpu_int1_en1 = X(CIU_INT1_EN1),
199 1.3 matt .cpu_int2_en1 = X(CIU_INT4_EN01),
200 1.1 hikaru
201 1.3 matt .cpu_int32_en = X(CIU_INT32_EN0),
202 1.1 hikaru
203 1.4 matt .cpu_wdog = X(CIU_WDOG0),
204 1.4 matt .cpu_pp_poke = X(CIU_PP_POKE0),
205 1.4 matt
206 1.3 matt #ifdef MULTIPROCESSOR
207 1.3 matt .cpu_mbox_set = X(CIU_MBOX_SET0),
208 1.3 matt .cpu_mbox_clr = X(CIU_MBOX_CLR0),
209 1.3 matt #endif
210 1.3 matt };
211 1.1 hikaru
212 1.3 matt #ifdef MULTIPROCESSOR
213 1.3 matt struct cpu_softc octeon_cpu1_softc = {
214 1.3 matt .cpu_int0_sum0 = X(CIU_INT2_SUM0),
215 1.3 matt .cpu_int1_sum0 = X(CIU_INT3_SUM0),
216 1.3 matt .cpu_int2_sum0 = X(CIU_INT4_SUM1),
217 1.3 matt
218 1.3 matt .cpu_int0_en0 = X(CIU_INT2_EN0),
219 1.3 matt .cpu_int1_en0 = X(CIU_INT3_EN0),
220 1.3 matt .cpu_int2_en0 = X(CIU_INT4_EN10),
221 1.3 matt
222 1.3 matt .cpu_int0_en1 = X(CIU_INT2_EN1),
223 1.3 matt .cpu_int1_en1 = X(CIU_INT3_EN1),
224 1.3 matt .cpu_int2_en1 = X(CIU_INT4_EN11),
225 1.1 hikaru
226 1.3 matt .cpu_int32_en = X(CIU_INT32_EN1),
227 1.1 hikaru
228 1.4 matt .cpu_wdog = X(CIU_WDOG1),
229 1.4 matt .cpu_pp_poke = X(CIU_PP_POKE1),
230 1.4 matt
231 1.3 matt .cpu_mbox_set = X(CIU_MBOX_SET1),
232 1.3 matt .cpu_mbox_clr = X(CIU_MBOX_CLR1),
233 1.3 matt };
234 1.3 matt #endif
235 1.1 hikaru
236 1.4 matt #ifdef DEBUG
237 1.4 matt static void
238 1.4 matt octeon_mbox_test(void)
239 1.4 matt {
240 1.4 matt const uint64_t mbox_clr0 = X(CIU_MBOX_CLR0);
241 1.4 matt const uint64_t mbox_clr1 = X(CIU_MBOX_CLR1);
242 1.4 matt const uint64_t mbox_set0 = X(CIU_MBOX_SET0);
243 1.4 matt const uint64_t mbox_set1 = X(CIU_MBOX_SET1);
244 1.4 matt const uint64_t int_sum0 = X(CIU_INT0_SUM0);
245 1.4 matt const uint64_t int_sum1 = X(CIU_INT2_SUM0);
246 1.4 matt const uint64_t sum_mbox_lo = __BIT(_CIU_INT_MBOX_15_0_SHIFT);
247 1.4 matt const uint64_t sum_mbox_hi = __BIT(_CIU_INT_MBOX_31_16_SHIFT);
248 1.4 matt
249 1.4 matt mips64_sd_a64(mbox_clr0, ~0ULL);
250 1.4 matt mips64_sd_a64(mbox_clr1, ~0ULL);
251 1.4 matt
252 1.4 matt uint32_t mbox0 = mips64_ld_a64(mbox_set0);
253 1.4 matt uint32_t mbox1 = mips64_ld_a64(mbox_set1);
254 1.4 matt
255 1.4 matt KDASSERTMSG(mbox0 == 0, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
256 1.4 matt KDASSERTMSG(mbox1 == 0, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
257 1.4 matt
258 1.4 matt mips64_sd_a64(mbox_set0, __BIT(0));
259 1.4 matt
260 1.4 matt mbox0 = mips64_ld_a64(mbox_set0);
261 1.4 matt mbox1 = mips64_ld_a64(mbox_set1);
262 1.4 matt
263 1.4 matt KDASSERTMSG(mbox0 == 1, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
264 1.4 matt KDASSERTMSG(mbox1 == 0, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
265 1.4 matt
266 1.4 matt uint64_t sum0 = mips64_ld_a64(int_sum0);
267 1.4 matt uint64_t sum1 = mips64_ld_a64(int_sum1);
268 1.4 matt
269 1.4 matt KDASSERTMSG((sum0 & sum_mbox_lo) != 0, "sum0 %#"PRIx64, sum0);
270 1.4 matt KDASSERTMSG((sum0 & sum_mbox_hi) == 0, "sum0 %#"PRIx64, sum0);
271 1.4 matt
272 1.4 matt KDASSERTMSG((sum1 & sum_mbox_lo) == 0, "sum1 %#"PRIx64, sum1);
273 1.4 matt KDASSERTMSG((sum1 & sum_mbox_hi) == 0, "sum1 %#"PRIx64, sum1);
274 1.4 matt
275 1.4 matt mips64_sd_a64(mbox_clr0, mbox0);
276 1.4 matt mbox0 = mips64_ld_a64(mbox_set0);
277 1.4 matt KDASSERTMSG(mbox0 == 0, "mbox0 %#x", mbox0);
278 1.4 matt
279 1.4 matt mips64_sd_a64(mbox_set0, __BIT(16));
280 1.4 matt
281 1.4 matt mbox0 = mips64_ld_a64(mbox_set0);
282 1.4 matt mbox1 = mips64_ld_a64(mbox_set1);
283 1.4 matt
284 1.4 matt KDASSERTMSG(mbox0 == __BIT(16), "mbox0 %#x", mbox0);
285 1.4 matt KDASSERTMSG(mbox1 == 0, "mbox1 %#x", mbox1);
286 1.4 matt
287 1.4 matt sum0 = mips64_ld_a64(int_sum0);
288 1.4 matt sum1 = mips64_ld_a64(int_sum1);
289 1.4 matt
290 1.4 matt KDASSERTMSG((sum0 & sum_mbox_lo) == 0, "sum0 %#"PRIx64, sum0);
291 1.4 matt KDASSERTMSG((sum0 & sum_mbox_hi) != 0, "sum0 %#"PRIx64, sum0);
292 1.4 matt
293 1.4 matt KDASSERTMSG((sum1 & sum_mbox_lo) == 0, "sum1 %#"PRIx64, sum1);
294 1.4 matt KDASSERTMSG((sum1 & sum_mbox_hi) == 0, "sum1 %#"PRIx64, sum1);
295 1.4 matt }
296 1.4 matt #endif
297 1.4 matt
298 1.3 matt #undef X
299 1.1 hikaru
300 1.3 matt void
301 1.3 matt octeon_intr_init(struct cpu_info *ci)
302 1.3 matt {
303 1.3 matt const int cpunum = cpu_index(ci);
304 1.3 matt const char * const xname = cpu_name(ci);
305 1.4 matt struct cpu_softc *cpu = ci->ci_softc;
306 1.1 hikaru
307 1.1 hikaru
308 1.3 matt if (ci->ci_cpuid == 0) {
309 1.4 matt KASSERT(ci->ci_softc == &octeon_cpu0_softc);
310 1.4 matt ipl_sr_map = octeon_ipl_sr_map;
311 1.3 matt mutex_init(&octeon_intr_lock, MUTEX_DEFAULT, IPL_HIGH);
312 1.3 matt #ifdef MULTIPROCESSOR
313 1.3 matt mips_locoresw.lsw_send_ipi = octeon_send_ipi;
314 1.3 matt #endif
315 1.4 matt #ifdef DEBUG
316 1.4 matt octeon_mbox_test();
317 1.4 matt #endif
318 1.3 matt } else {
319 1.3 matt KASSERT(cpunum == 1);
320 1.3 matt #ifdef MULTIPROCESSOR
321 1.4 matt KASSERT(ci->ci_softc == &octeon_cpu1_softc);
322 1.3 matt #endif
323 1.1 hikaru }
324 1.1 hikaru
325 1.3 matt #ifdef MULTIPROCESSOR
326 1.3 matt // Enable the IPIs
327 1.3 matt cpu->cpu_int0_enable0 |= __BIT(_CIU_INT_MBOX_15_0_SHIFT);
328 1.3 matt cpu->cpu_int2_enable0 |= __BIT(_CIU_INT_MBOX_31_16_SHIFT);
329 1.1 hikaru #endif
330 1.1 hikaru
331 1.4 matt if (ci->ci_dev)
332 1.4 matt aprint_verbose_dev(ci->ci_dev,
333 1.4 matt "enabling intr masks %#"PRIx64"/%#"PRIx64"/%#"PRIx64"\n",
334 1.4 matt cpu->cpu_int0_enable0, cpu->cpu_int1_enable0, cpu->cpu_int2_enable0);
335 1.4 matt
336 1.3 matt mips64_sd_a64(cpu->cpu_int0_en0, cpu->cpu_int0_enable0);
337 1.3 matt mips64_sd_a64(cpu->cpu_int1_en0, cpu->cpu_int1_enable0);
338 1.3 matt mips64_sd_a64(cpu->cpu_int2_en0, cpu->cpu_int2_enable0);
339 1.3 matt
340 1.3 matt mips64_sd_a64(cpu->cpu_int32_en, 0);
341 1.3 matt
342 1.3 matt mips64_sd_a64(cpu->cpu_int0_en1, 0); // WDOG IPL2
343 1.3 matt mips64_sd_a64(cpu->cpu_int1_en1, 0); // WDOG IPL3
344 1.3 matt mips64_sd_a64(cpu->cpu_int2_en1, 0); // WDOG IPL4
345 1.1 hikaru
346 1.3 matt #ifdef MULTIPROCESSOR
347 1.3 matt mips64_sd_a64(cpu->cpu_mbox_clr, __BITS(31,0));
348 1.3 matt #endif
349 1.1 hikaru
350 1.1 hikaru for (size_t i = 0; i < NIRQS; i++) {
351 1.3 matt evcnt_attach_dynamic(&cpu->cpu_intr_evs[i],
352 1.3 matt EVCNT_TYPE_INTR, NULL, xname, octeon_intrnames[i]);
353 1.1 hikaru }
354 1.1 hikaru }
355 1.1 hikaru
356 1.1 hikaru void
357 1.1 hikaru octeon_cal_timer(int corefreq)
358 1.1 hikaru {
359 1.1 hikaru /* Compute the number of cycles per second. */
360 1.1 hikaru curcpu()->ci_cpu_freq = corefreq;
361 1.1 hikaru
362 1.1 hikaru /* Compute the number of ticks for hz. */
363 1.1 hikaru curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz;
364 1.1 hikaru
365 1.1 hikaru /* Compute the delay divisor and reciprical. */
366 1.1 hikaru curcpu()->ci_divisor_delay =
367 1.1 hikaru ((curcpu()->ci_cpu_freq + 500000) / 1000000);
368 1.1 hikaru #if 0
369 1.1 hikaru MIPS_SET_CI_RECIPRICAL(curcpu());
370 1.1 hikaru #endif
371 1.1 hikaru
372 1.1 hikaru mips3_cp0_count_write(0);
373 1.1 hikaru mips3_cp0_compare_write(0);
374 1.1 hikaru }
375 1.1 hikaru
376 1.1 hikaru void *
377 1.3 matt octeon_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
378 1.1 hikaru {
379 1.1 hikaru struct octeon_intrhand *ih;
380 1.1 hikaru
381 1.1 hikaru if (irq >= NIRQS)
382 1.1 hikaru panic("octeon_intr_establish: bogus IRQ %d", irq);
383 1.3 matt if (ipl < IPL_VM)
384 1.3 matt panic("octeon_intr_establish: bogus IPL %d", ipl);
385 1.1 hikaru
386 1.3 matt ih = kmem_zalloc(sizeof(*ih), KM_NOSLEEP);
387 1.1 hikaru if (ih == NULL)
388 1.1 hikaru return (NULL);
389 1.1 hikaru
390 1.1 hikaru ih->ih_func = func;
391 1.1 hikaru ih->ih_arg = arg;
392 1.1 hikaru ih->ih_irq = irq;
393 1.3 matt ih->ih_ipl = ipl;
394 1.1 hikaru
395 1.3 matt mutex_enter(&octeon_intr_lock);
396 1.1 hikaru
397 1.1 hikaru /*
398 1.3 matt * First, make it known.
399 1.1 hikaru */
400 1.3 matt KASSERTMSG(octeon_ciu_intrs[irq] == NULL, "irq %d in use! (%p)",
401 1.3 matt irq, octeon_ciu_intrs[irq]);
402 1.3 matt
403 1.3 matt octeon_ciu_intrs[irq] = ih;
404 1.3 matt membar_producer();
405 1.1 hikaru
406 1.1 hikaru /*
407 1.1 hikaru * Now enable it.
408 1.1 hikaru */
409 1.3 matt const uint64_t irq_mask = __BIT(irq);
410 1.3 matt struct cpu_softc * const cpu0 = &octeon_cpu0_softc;
411 1.3 matt #if MULTIPROCESSOR
412 1.3 matt struct cpu_softc * const cpu1 = &octeon_cpu1_softc;
413 1.3 matt #endif
414 1.3 matt
415 1.3 matt switch (ipl) {
416 1.3 matt case IPL_VM:
417 1.3 matt cpu0->cpu_int0_enable0 |= irq_mask;
418 1.3 matt mips64_sd_a64(cpu0->cpu_int0_en0, cpu0->cpu_int0_enable0);
419 1.3 matt break;
420 1.1 hikaru
421 1.3 matt case IPL_SCHED:
422 1.3 matt cpu0->cpu_int1_enable0 |= irq_mask;
423 1.3 matt mips64_sd_a64(cpu0->cpu_int1_en0, cpu0->cpu_int1_enable0);
424 1.3 matt #ifdef MULTIPROCESSOR
425 1.3 matt cpu1->cpu_int1_enable0 = cpu0->cpu_int1_enable0;
426 1.3 matt mips64_sd_a64(cpu1->cpu_int1_en0, cpu1->cpu_int1_enable0);
427 1.3 matt #endif
428 1.3 matt break;
429 1.3 matt
430 1.3 matt case IPL_DDB:
431 1.3 matt case IPL_HIGH:
432 1.3 matt cpu0->cpu_int2_enable0 |= irq_mask;
433 1.3 matt mips64_sd_a64(cpu0->cpu_int2_en0, cpu0->cpu_int2_enable0);
434 1.3 matt #ifdef MULTIPROCESSOR
435 1.3 matt cpu1->cpu_int2_enable0 = cpu0->cpu_int2_enable0;
436 1.3 matt mips64_sd_a64(cpu1->cpu_int2_en0, cpu1->cpu_int2_enable0);
437 1.3 matt #endif
438 1.3 matt break;
439 1.1 hikaru }
440 1.1 hikaru
441 1.3 matt mutex_exit(&octeon_intr_lock);
442 1.3 matt
443 1.3 matt return ih;
444 1.1 hikaru }
445 1.1 hikaru
446 1.1 hikaru void
447 1.1 hikaru octeon_intr_disestablish(void *cookie)
448 1.1 hikaru {
449 1.3 matt struct octeon_intrhand * const ih = cookie;
450 1.3 matt const int irq = ih->ih_irq & (NIRQS-1);
451 1.3 matt const int ipl = ih->ih_ipl;
452 1.1 hikaru
453 1.3 matt mutex_enter(&octeon_intr_lock);
454 1.1 hikaru
455 1.1 hikaru /*
456 1.3 matt * First disable it.
457 1.1 hikaru */
458 1.3 matt const uint64_t irq_mask = ~__BIT(irq);
459 1.3 matt struct cpu_softc * const cpu0 = &octeon_cpu0_softc;
460 1.3 matt #if MULTIPROCESSOR
461 1.3 matt struct cpu_softc * const cpu1 = &octeon_cpu1_softc;
462 1.3 matt #endif
463 1.3 matt
464 1.3 matt switch (ipl) {
465 1.3 matt case IPL_VM:
466 1.3 matt cpu0->cpu_int0_enable0 &= ~irq_mask;
467 1.3 matt mips64_sd_a64(cpu0->cpu_int0_en0, cpu0->cpu_int0_enable0);
468 1.3 matt break;
469 1.3 matt
470 1.3 matt case IPL_SCHED:
471 1.3 matt cpu0->cpu_int1_enable0 &= ~irq_mask;
472 1.3 matt mips64_sd_a64(cpu0->cpu_int1_en0, cpu0->cpu_int1_enable0);
473 1.3 matt #ifdef MULTIPROCESSOR
474 1.3 matt cpu1->cpu_int1_enable0 = cpu0->cpu_int1_enable0;
475 1.3 matt mips64_sd_a64(cpu1->cpu_int1_en0, cpu1->cpu_int1_enable0);
476 1.3 matt #endif
477 1.3 matt break;
478 1.3 matt
479 1.3 matt case IPL_DDB:
480 1.3 matt case IPL_HIGH:
481 1.3 matt cpu0->cpu_int2_enable0 &= ~irq_mask;
482 1.3 matt mips64_sd_a64(cpu0->cpu_int2_en0, cpu0->cpu_int2_enable0);
483 1.3 matt #ifdef MULTIPROCESSOR
484 1.3 matt cpu1->cpu_int2_enable0 = cpu0->cpu_int2_enable0;
485 1.3 matt mips64_sd_a64(cpu1->cpu_int2_en0, cpu1->cpu_int2_enable0);
486 1.3 matt #endif
487 1.3 matt break;
488 1.3 matt }
489 1.1 hikaru
490 1.1 hikaru /*
491 1.3 matt * Now remove it since we shouldn't get interrupts for it.
492 1.1 hikaru */
493 1.3 matt octeon_ciu_intrs[irq] = NULL;
494 1.3 matt
495 1.3 matt mutex_exit(&octeon_intr_lock);
496 1.1 hikaru
497 1.3 matt kmem_free(ih, sizeof(*ih));
498 1.1 hikaru }
499 1.1 hikaru
500 1.1 hikaru void
501 1.1 hikaru octeon_iointr(int ipl, vaddr_t pc, uint32_t ipending)
502 1.1 hikaru {
503 1.3 matt struct cpu_info * const ci = curcpu();
504 1.3 matt struct cpu_softc * const cpu = ci->ci_softc;
505 1.3 matt
506 1.4 matt KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
507 1.3 matt KASSERT((ipending & ~MIPS_INT_MASK) == 0);
508 1.3 matt KASSERT(ipending & MIPS_HARD_INT_MASK);
509 1.1 hikaru uint64_t hwpend = 0;
510 1.1 hikaru
511 1.3 matt if (ipending & MIPS_INT_MASK_2) {
512 1.3 matt hwpend = mips64_ld_a64(cpu->cpu_int2_sum0)
513 1.3 matt & cpu->cpu_int2_enable0;
514 1.3 matt } else if (ipending & MIPS_INT_MASK_1) {
515 1.3 matt hwpend = mips64_ld_a64(cpu->cpu_int1_sum0)
516 1.3 matt & cpu->cpu_int1_enable0;
517 1.3 matt } else if (ipending & MIPS_INT_MASK_0) {
518 1.3 matt hwpend = mips64_ld_a64(cpu->cpu_int0_sum0)
519 1.3 matt & cpu->cpu_int0_enable0;
520 1.3 matt } else {
521 1.3 matt panic("octeon_iointr: unexpected ipending %#x", ipending);
522 1.3 matt }
523 1.3 matt while (hwpend != 0) {
524 1.3 matt const int irq = ffs64(hwpend) - 1;
525 1.3 matt hwpend &= ~__BIT(irq);
526 1.3 matt
527 1.3 matt struct octeon_intrhand * const ih = octeon_ciu_intrs[irq];
528 1.3 matt cpu->cpu_intr_evs[irq].ev_count++;
529 1.3 matt if (__predict_true(ih != NULL)) {
530 1.3 matt #ifdef MULTIPROCESSOR
531 1.3 matt if (ipl == IPL_VM) {
532 1.3 matt KERNEL_LOCK(1, NULL);
533 1.3 matt #endif
534 1.3 matt (*ih->ih_func)(ih->ih_arg);
535 1.3 matt #ifdef MULTIPROCESSOR
536 1.3 matt KERNEL_UNLOCK_ONE(NULL);
537 1.3 matt } else {
538 1.3 matt (*ih->ih_func)(ih->ih_arg);
539 1.3 matt }
540 1.3 matt #endif
541 1.4 matt KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
542 1.3 matt }
543 1.3 matt }
544 1.4 matt KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
545 1.3 matt }
546 1.3 matt
547 1.3 matt #ifdef MULTIPROCESSOR
548 1.3 matt __CTASSERT(NIPIS < 16);
549 1.3 matt
550 1.3 matt int
551 1.3 matt octeon_ipi_intr(void *arg)
552 1.3 matt {
553 1.3 matt struct cpu_info * const ci = curcpu();
554 1.3 matt struct cpu_softc * const cpu = ci->ci_softc;
555 1.4 matt uint32_t ipi_mask = (uintptr_t) arg;
556 1.4 matt
557 1.4 matt KASSERTMSG((ipi_mask & __BITS(31,16)) == 0 || ci->ci_cpl >= IPL_SCHED,
558 1.4 matt "ipi_mask %#"PRIx32" cpl %d", ipi_mask, ci->ci_cpl);
559 1.3 matt
560 1.3 matt ipi_mask &= mips64_ld_a64(cpu->cpu_mbox_set);
561 1.4 matt if (ipi_mask == 0)
562 1.4 matt return 0;
563 1.4 matt
564 1.3 matt mips64_sd_a64(cpu->cpu_mbox_clr, ipi_mask);
565 1.3 matt
566 1.3 matt ipi_mask |= (ipi_mask >> 16);
567 1.3 matt ipi_mask &= __BITS(15,0);
568 1.3 matt
569 1.3 matt KASSERT(ipi_mask < __BIT(NIPIS));
570 1.3 matt
571 1.4 matt #if NWDOG > 0
572 1.4 matt // Handle WDOG requests ourselves.
573 1.4 matt if (ipi_mask & __BIT(IPI_WDOG)) {
574 1.4 matt softint_schedule(cpu->cpu_wdog_sih);
575 1.4 matt atomic_and_64(&ci->ci_request_ipis, ~__BIT(IPI_WDOG));
576 1.4 matt ipi_mask &= ~__BIT(IPI_WDOG);
577 1.4 matt ci->ci_evcnt_per_ipi[IPI_WDOG].ev_count++;
578 1.4 matt if (__predict_true(ipi_mask == 0))
579 1.4 matt return 1;
580 1.4 matt }
581 1.4 matt #endif
582 1.4 matt
583 1.3 matt /* if the request is clear, it was previously processed */
584 1.3 matt if ((ci->ci_request_ipis & ipi_mask) == 0)
585 1.3 matt return 0;
586 1.3 matt
587 1.3 matt atomic_or_64(&ci->ci_active_ipis, ipi_mask);
588 1.3 matt atomic_and_64(&ci->ci_request_ipis, ~ipi_mask);
589 1.3 matt
590 1.3 matt ipi_process(ci, ipi_mask);
591 1.3 matt
592 1.3 matt atomic_and_64(&ci->ci_active_ipis, ~ipi_mask);
593 1.3 matt
594 1.3 matt return 1;
595 1.3 matt }
596 1.1 hikaru
597 1.3 matt int
598 1.3 matt octeon_send_ipi(struct cpu_info *ci, int req)
599 1.3 matt {
600 1.3 matt KASSERT(req < NIPIS);
601 1.3 matt if (ci == NULL) {
602 1.4 matt CPU_INFO_ITERATOR cii;
603 1.4 matt for (CPU_INFO_FOREACH(cii, ci)) {
604 1.4 matt if (ci != curcpu()) {
605 1.4 matt octeon_send_ipi(ci, req);
606 1.4 matt }
607 1.4 matt }
608 1.4 matt return 0;
609 1.1 hikaru }
610 1.4 matt KASSERT(cold || ci->ci_softc != NULL);
611 1.4 matt if (ci->ci_softc == NULL)
612 1.4 matt return -1;
613 1.3 matt
614 1.3 matt struct cpu_softc * const cpu = ci->ci_softc;
615 1.3 matt uint64_t ipi_mask = __BIT(req);
616 1.3 matt
617 1.4 matt if (__BIT(req) == (__BIT(IPI_SUSPEND)|__BIT(IPI_WDOG))) {
618 1.3 matt ipi_mask <<= 16;
619 1.1 hikaru }
620 1.3 matt
621 1.3 matt mips64_sd_a64(cpu->cpu_mbox_set, ipi_mask);
622 1.3 matt return 0;
623 1.1 hikaru }
624 1.3 matt #endif /* MULTIPROCESSOR */
625