rmixl_intr.c revision 1.8.30.2 1 1.8.30.2 skrll /* $NetBSD: rmixl_intr.c,v 1.8.30.2 2016/10/05 20:55:32 skrll Exp $ */
2 1.2 matt
3 1.2 matt /*-
4 1.2 matt * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko.
5 1.2 matt * All rights reserved.
6 1.2 matt *
7 1.2 matt * Redistribution and use in source and binary forms, with or
8 1.2 matt * without modification, are permitted provided that the following
9 1.2 matt * conditions are met:
10 1.2 matt * 1. Redistributions of source code must retain the above copyright
11 1.2 matt * notice, this list of conditions and the following disclaimer.
12 1.2 matt * 2. Redistributions in binary form must reproduce the above
13 1.2 matt * copyright notice, this list of conditions and the following
14 1.2 matt * disclaimer in the documentation and/or other materials provided
15 1.2 matt * with the distribution.
16 1.2 matt * 3. The names of the authors may not be used to endorse or promote
17 1.2 matt * products derived from this software without specific prior
18 1.2 matt * written permission.
19 1.2 matt *
20 1.2 matt * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY
21 1.2 matt * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 1.2 matt * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
23 1.2 matt * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS
24 1.2 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
25 1.2 matt * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26 1.2 matt * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
27 1.2 matt * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 1.2 matt * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
29 1.2 matt * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 1.2 matt * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
31 1.2 matt * OF SUCH DAMAGE.
32 1.2 matt */
33 1.2 matt /*-
34 1.2 matt * Copyright (c) 2001 The NetBSD Foundation, Inc.
35 1.2 matt * All rights reserved.
36 1.2 matt *
37 1.2 matt * This code is derived from software contributed to The NetBSD Foundation
38 1.2 matt * by Jason R. Thorpe.
39 1.2 matt *
40 1.2 matt * Redistribution and use in source and binary forms, with or without
41 1.2 matt * modification, are permitted provided that the following conditions
42 1.2 matt * are met:
43 1.2 matt * 1. Redistributions of source code must retain the above copyright
44 1.2 matt * notice, this list of conditions and the following disclaimer.
45 1.2 matt * 2. Redistributions in binary form must reproduce the above copyright
46 1.2 matt * notice, this list of conditions and the following disclaimer in the
47 1.2 matt * documentation and/or other materials provided with the distribution.
48 1.2 matt *
49 1.2 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
50 1.2 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
51 1.2 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
52 1.2 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
53 1.2 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
54 1.2 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
55 1.2 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
56 1.2 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
57 1.2 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
58 1.2 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 1.2 matt * POSSIBILITY OF SUCH DAMAGE.
60 1.2 matt */
61 1.2 matt
62 1.2 matt /*
63 1.2 matt * Platform-specific interrupt support for the RMI XLP, XLR, XLS
64 1.2 matt */
65 1.2 matt
66 1.2 matt #include <sys/cdefs.h>
67 1.8.30.2 skrll __KERNEL_RCSID(0, "$NetBSD: rmixl_intr.c,v 1.8.30.2 2016/10/05 20:55:32 skrll Exp $");
68 1.2 matt
69 1.2 matt #include "opt_ddb.h"
70 1.3 matt #include "opt_multiprocessor.h"
71 1.3 matt #define __INTR_PRIVATE
72 1.2 matt
73 1.2 matt #include <sys/param.h>
74 1.7 matt #include <sys/atomic.h>
75 1.7 matt #include <sys/bus.h>
76 1.7 matt #include <sys/cpu.h>
77 1.2 matt #include <sys/device.h>
78 1.7 matt #include <sys/intr.h>
79 1.2 matt #include <sys/kernel.h>
80 1.7 matt #include <sys/malloc.h>
81 1.3 matt #include <sys/mutex.h>
82 1.7 matt #include <sys/systm.h>
83 1.2 matt
84 1.2 matt #include <mips/locore.h>
85 1.2 matt
86 1.2 matt #include <mips/rmi/rmixlreg.h>
87 1.2 matt #include <mips/rmi/rmixlvar.h>
88 1.2 matt
89 1.3 matt #include <mips/rmi/rmixl_cpuvar.h>
90 1.3 matt #include <mips/rmi/rmixl_intr.h>
91 1.3 matt
92 1.2 matt #include <dev/pci/pcireg.h>
93 1.2 matt #include <dev/pci/pcivar.h>
94 1.2 matt
95 1.3 matt //#define IOINTR_DEBUG 1
96 1.2 matt #ifdef IOINTR_DEBUG
97 1.2 matt int iointr_debug = IOINTR_DEBUG;
98 1.2 matt # define DPRINTF(x) do { if (iointr_debug) printf x ; } while(0)
99 1.2 matt #else
100 1.2 matt # define DPRINTF(x)
101 1.2 matt #endif
102 1.2 matt
103 1.2 matt #define RMIXL_PICREG_READ(off) \
104 1.2 matt RMIXL_IOREG_READ(RMIXL_IO_DEV_PIC + (off))
105 1.2 matt #define RMIXL_PICREG_WRITE(off, val) \
106 1.2 matt RMIXL_IOREG_WRITE(RMIXL_IO_DEV_PIC + (off), (val))
107 1.2 matt
108 1.2 matt /*
109 1.3 matt * do not clear these when acking EIRR
110 1.3 matt * (otherwise they get lost)
111 1.2 matt */
112 1.3 matt #define RMIXL_EIRR_PRESERVE_MASK \
113 1.3 matt ((MIPS_INT_MASK_5|MIPS_SOFT_INT_MASK) >> 8)
114 1.2 matt
115 1.2 matt /*
116 1.3 matt * IRT assignments depends on the RMI chip family
117 1.3 matt * (XLS1xx vs. XLS2xx vs. XLS3xx vs. XLS6xx)
118 1.3 matt * use the right display string table for the CPU that's running.
119 1.3 matt */
120 1.2 matt
121 1.2 matt /*
122 1.3 matt * rmixl_irtnames_xlrxxx
123 1.3 matt * - use for XLRxxx
124 1.2 matt */
125 1.3 matt static const char * const rmixl_irtnames_xlrxxx[NIRTS] = {
126 1.3 matt "pic int 0 (watchdog)", /* 0 */
127 1.3 matt "pic int 1 (timer0)", /* 1 */
128 1.3 matt "pic int 2 (timer1)", /* 2 */
129 1.3 matt "pic int 3 (timer2)", /* 3 */
130 1.3 matt "pic int 4 (timer3)", /* 4 */
131 1.3 matt "pic int 5 (timer4)", /* 5 */
132 1.3 matt "pic int 6 (timer5)", /* 6 */
133 1.3 matt "pic int 7 (timer6)", /* 7 */
134 1.3 matt "pic int 8 (timer7)", /* 8 */
135 1.3 matt "pic int 9 (uart0)", /* 9 */
136 1.3 matt "pic int 10 (uart1)", /* 10 */
137 1.3 matt "pic int 11 (i2c0)", /* 11 */
138 1.3 matt "pic int 12 (i2c1)", /* 12 */
139 1.3 matt "pic int 13 (pcmcia)", /* 13 */
140 1.3 matt "pic int 14 (gpio)", /* 14 */
141 1.3 matt "pic int 15 (hyper)", /* 15 */
142 1.3 matt "pic int 16 (pcix)", /* 16 */
143 1.3 matt "pic int 17 (gmac0)", /* 17 */
144 1.3 matt "pic int 18 (gmac1)", /* 18 */
145 1.3 matt "pic int 19 (gmac2)", /* 19 */
146 1.3 matt "pic int 20 (gmac3)", /* 20 */
147 1.3 matt "pic int 21 (xgs0)", /* 21 */
148 1.3 matt "pic int 22 (xgs1)", /* 22 */
149 1.3 matt "pic int 23 (irq23)", /* 23 */
150 1.3 matt "pic int 24 (hyper_fatal)", /* 24 */
151 1.3 matt "pic int 25 (bridge_aerr)", /* 25 */
152 1.3 matt "pic int 26 (bridge_berr)", /* 26 */
153 1.3 matt "pic int 27 (bridge_tb)", /* 27 */
154 1.3 matt "pic int 28 (bridge_nmi)", /* 28 */
155 1.3 matt "pic int 29 (bridge_sram_derr)",/* 29 */
156 1.3 matt "pic int 30 (gpio_fatal)", /* 30 */
157 1.3 matt "pic int 31 (reserved)", /* 31 */
158 1.2 matt };
159 1.2 matt
160 1.2 matt /*
161 1.3 matt * rmixl_irtnames_xls2xx
162 1.3 matt * - use for XLS2xx
163 1.2 matt */
164 1.3 matt static const char * const rmixl_irtnames_xls2xx[NIRTS] = {
165 1.3 matt "pic int 0 (watchdog)", /* 0 */
166 1.3 matt "pic int 1 (timer0)", /* 1 */
167 1.3 matt "pic int 2 (timer1)", /* 2 */
168 1.3 matt "pic int 3 (timer2)", /* 3 */
169 1.3 matt "pic int 4 (timer3)", /* 4 */
170 1.3 matt "pic int 5 (timer4)", /* 5 */
171 1.3 matt "pic int 6 (timer5)", /* 6 */
172 1.3 matt "pic int 7 (timer6)", /* 7 */
173 1.3 matt "pic int 8 (timer7)", /* 8 */
174 1.3 matt "pic int 9 (uart0)", /* 9 */
175 1.3 matt "pic int 10 (uart1)", /* 10 */
176 1.3 matt "pic int 11 (i2c0)", /* 11 */
177 1.3 matt "pic int 12 (i2c1)", /* 12 */
178 1.3 matt "pic int 13 (pcmcia)", /* 13 */
179 1.3 matt "pic int 14 (gpio_a)", /* 14 */
180 1.3 matt "pic int 15 (irq15)", /* 15 */
181 1.3 matt "pic int 16 (bridge_tb)", /* 16 */
182 1.3 matt "pic int 17 (gmac0)", /* 17 */
183 1.3 matt "pic int 18 (gmac1)", /* 18 */
184 1.3 matt "pic int 19 (gmac2)", /* 19 */
185 1.3 matt "pic int 20 (gmac3)", /* 20 */
186 1.3 matt "pic int 21 (irq21)", /* 21 */
187 1.3 matt "pic int 22 (irq22)", /* 22 */
188 1.3 matt "pic int 23 (pcie_link2)", /* 23 */
189 1.3 matt "pic int 24 (pcie_link3)", /* 24 */
190 1.3 matt "pic int 25 (bridge_err)", /* 25 */
191 1.3 matt "pic int 26 (pcie_link0)", /* 26 */
192 1.3 matt "pic int 27 (pcie_link1)", /* 27 */
193 1.3 matt "pic int 28 (irq28)", /* 28 */
194 1.3 matt "pic int 29 (pcie_err)", /* 29 */
195 1.3 matt "pic int 30 (gpio_b)", /* 30 */
196 1.3 matt "pic int 31 (usb)", /* 31 */
197 1.2 matt };
198 1.2 matt
199 1.2 matt /*
200 1.3 matt * rmixl_irtnames_xls1xx
201 1.3 matt * - use for XLS1xx, XLS4xx-Lite
202 1.2 matt */
203 1.3 matt static const char * const rmixl_irtnames_xls1xx[NIRTS] = {
204 1.3 matt "pic int 0 (watchdog)", /* 0 */
205 1.3 matt "pic int 1 (timer0)", /* 1 */
206 1.3 matt "pic int 2 (timer1)", /* 2 */
207 1.3 matt "pic int 3 (timer2)", /* 3 */
208 1.3 matt "pic int 4 (timer3)", /* 4 */
209 1.3 matt "pic int 5 (timer4)", /* 5 */
210 1.3 matt "pic int 6 (timer5)", /* 6 */
211 1.3 matt "pic int 7 (timer6)", /* 7 */
212 1.3 matt "pic int 8 (timer7)", /* 8 */
213 1.3 matt "pic int 9 (uart0)", /* 9 */
214 1.3 matt "pic int 10 (uart1)", /* 10 */
215 1.3 matt "pic int 11 (i2c0)", /* 11 */
216 1.3 matt "pic int 12 (i2c1)", /* 12 */
217 1.3 matt "pic int 13 (pcmcia)", /* 13 */
218 1.3 matt "pic int 14 (gpio_a)", /* 14 */
219 1.3 matt "pic int 15 (irq15)", /* 15 */
220 1.3 matt "pic int 16 (bridge_tb)", /* 16 */
221 1.3 matt "pic int 17 (gmac0)", /* 17 */
222 1.3 matt "pic int 18 (gmac1)", /* 18 */
223 1.3 matt "pic int 19 (gmac2)", /* 19 */
224 1.3 matt "pic int 20 (gmac3)", /* 20 */
225 1.3 matt "pic int 21 (irq21)", /* 21 */
226 1.3 matt "pic int 22 (irq22)", /* 22 */
227 1.3 matt "pic int 23 (irq23)", /* 23 */
228 1.3 matt "pic int 24 (irq24)", /* 24 */
229 1.3 matt "pic int 25 (bridge_err)", /* 25 */
230 1.3 matt "pic int 26 (pcie_link0)", /* 26 */
231 1.3 matt "pic int 27 (pcie_link1)", /* 27 */
232 1.3 matt "pic int 28 (irq28)", /* 28 */
233 1.3 matt "pic int 29 (pcie_err)", /* 29 */
234 1.3 matt "pic int 30 (gpio_b)", /* 30 */
235 1.3 matt "pic int 31 (usb)", /* 31 */
236 1.2 matt };
237 1.2 matt
238 1.2 matt /*
239 1.3 matt * rmixl_irtnames_xls4xx:
240 1.3 matt * - use for XLS4xx, XLS6xx
241 1.2 matt */
242 1.3 matt static const char * const rmixl_irtnames_xls4xx[NIRTS] = {
243 1.3 matt "pic int 0 (watchdog)", /* 0 */
244 1.3 matt "pic int 1 (timer0)", /* 1 */
245 1.3 matt "pic int 2 (timer1)", /* 2 */
246 1.3 matt "pic int 3 (timer2)", /* 3 */
247 1.3 matt "pic int 4 (timer3)", /* 4 */
248 1.3 matt "pic int 5 (timer4)", /* 5 */
249 1.3 matt "pic int 6 (timer5)", /* 6 */
250 1.3 matt "pic int 7 (timer6)", /* 7 */
251 1.3 matt "pic int 8 (timer7)", /* 8 */
252 1.3 matt "pic int 9 (uart0)", /* 9 */
253 1.3 matt "pic int 10 (uart1)", /* 10 */
254 1.3 matt "pic int 11 (i2c0)", /* 11 */
255 1.3 matt "pic int 12 (i2c1)", /* 12 */
256 1.3 matt "pic int 13 (pcmcia)", /* 13 */
257 1.3 matt "pic int 14 (gpio_a)", /* 14 */
258 1.3 matt "pic int 15 (irq15)", /* 15 */
259 1.3 matt "pic int 16 (bridge_tb)", /* 16 */
260 1.3 matt "pic int 17 (gmac0)", /* 17 */
261 1.3 matt "pic int 18 (gmac1)", /* 18 */
262 1.3 matt "pic int 19 (gmac2)", /* 19 */
263 1.3 matt "pic int 20 (gmac3)", /* 20 */
264 1.3 matt "pic int 21 (irq21)", /* 21 */
265 1.3 matt "pic int 22 (irq22)", /* 22 */
266 1.3 matt "pic int 23 (irq23)", /* 23 */
267 1.3 matt "pic int 24 (irq24)", /* 24 */
268 1.3 matt "pic int 25 (bridge_err)", /* 25 */
269 1.3 matt "pic int 26 (pcie_link0)", /* 26 */
270 1.3 matt "pic int 27 (pcie_link1)", /* 27 */
271 1.3 matt "pic int 28 (pcie_link2)", /* 28 */
272 1.3 matt "pic int 29 (pcie_link3)", /* 29 */
273 1.3 matt "pic int 30 (gpio_b)", /* 30 */
274 1.3 matt "pic int 31 (usb)", /* 31 */
275 1.3 matt };
276 1.2 matt
277 1.2 matt /*
278 1.3 matt * rmixl_vecnames_common:
279 1.3 matt * - use for unknown cpu implementation
280 1.3 matt * - covers all vectors, not just IRT intrs
281 1.2 matt */
282 1.3 matt static const char * const rmixl_vecnames_common[NINTRVECS] = {
283 1.3 matt "vec 0", /* 0 */
284 1.3 matt "vec 1", /* 1 */
285 1.3 matt "vec 2", /* 2 */
286 1.3 matt "vec 3", /* 3 */
287 1.3 matt "vec 4", /* 4 */
288 1.3 matt "vec 5", /* 5 */
289 1.3 matt "vec 6", /* 6 */
290 1.3 matt "vec 7", /* 7 */
291 1.4 cliff "vec 8 (ipi 0)", /* 8 */
292 1.4 cliff "vec 9 (ipi 1)", /* 9 */
293 1.4 cliff "vec 10 (ipi 2)", /* 10 */
294 1.4 cliff "vec 11 (ipi 3)", /* 11 */
295 1.4 cliff "vec 12 (ipi 4)", /* 12 */
296 1.4 cliff "vec 13 (ipi 5)", /* 13 */
297 1.4 cliff "vec 14 (ipi 6)", /* 14 */
298 1.4 cliff "vec 15 (fmn)", /* 15 */
299 1.4 cliff "vec 16", /* 16 */
300 1.3 matt "vec 17", /* 17 */
301 1.3 matt "vec 18", /* 18 */
302 1.3 matt "vec 19", /* 19 */
303 1.3 matt "vec 20", /* 20 */
304 1.3 matt "vec 21", /* 21 */
305 1.3 matt "vec 22", /* 22 */
306 1.3 matt "vec 23", /* 23 */
307 1.3 matt "vec 24", /* 24 */
308 1.3 matt "vec 25", /* 25 */
309 1.3 matt "vec 26", /* 26 */
310 1.3 matt "vec 27", /* 27 */
311 1.3 matt "vec 28", /* 28 */
312 1.3 matt "vec 29", /* 29 */
313 1.3 matt "vec 30", /* 30 */
314 1.3 matt "vec 31", /* 31 */
315 1.3 matt "vec 32", /* 32 */
316 1.3 matt "vec 33", /* 33 */
317 1.3 matt "vec 34", /* 34 */
318 1.3 matt "vec 35", /* 35 */
319 1.3 matt "vec 36", /* 36 */
320 1.3 matt "vec 37", /* 37 */
321 1.3 matt "vec 38", /* 38 */
322 1.3 matt "vec 39", /* 39 */
323 1.3 matt "vec 40", /* 40 */
324 1.3 matt "vec 41", /* 41 */
325 1.3 matt "vec 42", /* 42 */
326 1.3 matt "vec 43", /* 43 */
327 1.3 matt "vec 44", /* 44 */
328 1.3 matt "vec 45", /* 45 */
329 1.3 matt "vec 46", /* 46 */
330 1.3 matt "vec 47", /* 47 */
331 1.3 matt "vec 48", /* 48 */
332 1.3 matt "vec 49", /* 49 */
333 1.3 matt "vec 50", /* 50 */
334 1.3 matt "vec 51", /* 51 */
335 1.3 matt "vec 52", /* 52 */
336 1.3 matt "vec 53", /* 53 */
337 1.3 matt "vec 54", /* 54 */
338 1.3 matt "vec 55", /* 55 */
339 1.3 matt "vec 56", /* 56 */
340 1.3 matt "vec 57", /* 57 */
341 1.3 matt "vec 58", /* 58 */
342 1.3 matt "vec 59", /* 59 */
343 1.3 matt "vec 60", /* 60 */
344 1.3 matt "vec 61", /* 61 */
345 1.3 matt "vec 62", /* 63 */
346 1.3 matt "vec 63", /* 63 */
347 1.2 matt };
348 1.2 matt
349 1.2 matt /*
350 1.3 matt * mask of CPUs attached
351 1.3 matt * once they are attached, this var is read-only so mp safe
352 1.2 matt */
353 1.3 matt static uint32_t cpu_present_mask;
354 1.3 matt
355 1.3 matt kmutex_t rmixl_ipi_lock __cacheline_aligned;
356 1.3 matt /* covers RMIXL_PIC_IPIBASE */
357 1.3 matt kmutex_t rmixl_intr_lock __cacheline_aligned;
358 1.3 matt /* covers rest of PIC, and rmixl_intrhand[] */
359 1.3 matt rmixl_intrhand_t rmixl_intrhand[NINTRVECS];
360 1.2 matt
361 1.2 matt #ifdef DIAGNOSTIC
362 1.3 matt static int rmixl_pic_init_done;
363 1.2 matt #endif
364 1.2 matt
365 1.2 matt
366 1.3 matt static const char *rmixl_intr_string_xlr(int);
367 1.3 matt static const char *rmixl_intr_string_xls(int);
368 1.3 matt static uint32_t rmixl_irt_thread_mask(int);
369 1.3 matt static void rmixl_irt_init(int);
370 1.3 matt static void rmixl_irt_disestablish(int);
371 1.3 matt static void rmixl_irt_establish(int, int, int,
372 1.3 matt rmixl_intr_trigger_t, rmixl_intr_polarity_t);
373 1.3 matt
374 1.3 matt #ifdef MULTIPROCESSOR
375 1.3 matt static int rmixl_send_ipi(struct cpu_info *, int);
376 1.3 matt static int rmixl_ipi_intr(void *);
377 1.3 matt #endif
378 1.3 matt
379 1.3 matt #if defined(DIAGNOSTIC) || defined(IOINTR_DEBUG) || defined(DDB)
380 1.3 matt int rmixl_intrhand_print_subr(int);
381 1.3 matt int rmixl_intrhand_print(void);
382 1.3 matt int rmixl_irt_print(void);
383 1.3 matt void rmixl_ipl_eimr_map_print(void);
384 1.3 matt #endif
385 1.2 matt
386 1.2 matt
387 1.3 matt static inline u_int
388 1.3 matt dclz(uint64_t val)
389 1.2 matt {
390 1.3 matt int nlz;
391 1.2 matt
392 1.3 matt asm volatile("dclz %0, %1;"
393 1.3 matt : "=r"(nlz) : "r"(val));
394 1.8.30.2 skrll
395 1.3 matt return nlz;
396 1.2 matt }
397 1.2 matt
398 1.2 matt void
399 1.2 matt evbmips_intr_init(void)
400 1.2 matt {
401 1.2 matt uint32_t r;
402 1.2 matt
403 1.3 matt KASSERT(cpu_rmixlr(mips_options.mips_cpu)
404 1.3 matt || cpu_rmixls(mips_options.mips_cpu));
405 1.3 matt
406 1.2 matt
407 1.2 matt #ifdef DIAGNOSTIC
408 1.3 matt if (rmixl_pic_init_done != 0)
409 1.3 matt panic("%s: rmixl_pic_init_done %d",
410 1.3 matt __func__, rmixl_pic_init_done);
411 1.2 matt #endif
412 1.2 matt
413 1.3 matt mutex_init(&rmixl_ipi_lock, MUTEX_DEFAULT, IPL_HIGH);
414 1.3 matt mutex_init(&rmixl_intr_lock, MUTEX_DEFAULT, IPL_HIGH);
415 1.3 matt
416 1.3 matt mutex_enter(&rmixl_intr_lock);
417 1.2 matt
418 1.3 matt /*
419 1.3 matt * initialize (zero) all IRT Entries in the PIC
420 1.3 matt */
421 1.3 matt for (u_int i = 0; i < NIRTS; i++) {
422 1.3 matt rmixl_irt_init(i);
423 1.2 matt }
424 1.2 matt
425 1.2 matt /*
426 1.2 matt * disable watchdog NMI, timers
427 1.2 matt *
428 1.2 matt * XXX
429 1.2 matt * WATCHDOG_ENB is preserved because clearing it causes
430 1.2 matt * hang on the XLS616 (but not on the XLS408)
431 1.2 matt */
432 1.2 matt r = RMIXL_PICREG_READ(RMIXL_PIC_CONTROL);
433 1.2 matt r &= RMIXL_PIC_CONTROL_RESV|RMIXL_PIC_CONTROL_WATCHDOG_ENB;
434 1.2 matt RMIXL_PICREG_WRITE(RMIXL_PIC_CONTROL, r);
435 1.2 matt
436 1.3 matt #ifdef DIAGNOSTIC
437 1.3 matt rmixl_pic_init_done = 1;
438 1.3 matt #endif
439 1.3 matt mutex_exit(&rmixl_intr_lock);
440 1.3 matt
441 1.3 matt }
442 1.3 matt
443 1.3 matt /*
444 1.3 matt * establish vector for mips3 count/compare clock interrupt
445 1.3 matt * this ensures we enable in EIRR,
446 1.3 matt * even though cpu_intr() handles the interrupt
447 1.3 matt * note the 'mpsafe' arg here is a placeholder only
448 1.3 matt */
449 1.3 matt void
450 1.3 matt rmixl_intr_init_clk(void)
451 1.3 matt {
452 1.3 matt const int vec = ffs(MIPS_INT_MASK_5 >> MIPS_INT_MASK_SHIFT) - 1;
453 1.3 matt
454 1.3 matt mutex_enter(&rmixl_intr_lock);
455 1.3 matt
456 1.3 matt void *ih = rmixl_vec_establish(vec, 0, IPL_SCHED, NULL, NULL, false);
457 1.3 matt if (ih == NULL)
458 1.3 matt panic("%s: establish vec %d failed", __func__, vec);
459 1.3 matt
460 1.3 matt mutex_exit(&rmixl_intr_lock);
461 1.3 matt }
462 1.3 matt
463 1.3 matt #ifdef MULTIPROCESSOR
464 1.3 matt /*
465 1.3 matt * establish IPI interrupt and send function
466 1.3 matt */
467 1.3 matt void
468 1.3 matt rmixl_intr_init_ipi(void)
469 1.3 matt {
470 1.3 matt mutex_enter(&rmixl_intr_lock);
471 1.3 matt
472 1.3 matt for (u_int ipi = 0; ipi < NIPIS; ipi++) {
473 1.3 matt const u_int vec = RMIXL_INTRVEC_IPI + ipi;
474 1.3 matt void * const ih = rmixl_vec_establish(vec, -1, IPL_SCHED,
475 1.3 matt rmixl_ipi_intr, (void *)(uintptr_t)ipi, true);
476 1.3 matt if (ih == NULL)
477 1.3 matt panic("%s: establish ipi %d at vec %d failed",
478 1.3 matt __func__, ipi, vec);
479 1.3 matt }
480 1.5 matt
481 1.3 matt mips_locoresw.lsw_send_ipi = rmixl_send_ipi;
482 1.5 matt
483 1.3 matt mutex_exit(&rmixl_intr_lock);
484 1.3 matt }
485 1.3 matt #endif /* MULTIPROCESSOR */
486 1.3 matt
487 1.3 matt /*
488 1.3 matt * initialize per-cpu interrupt stuff in softc
489 1.3 matt * accumulate per-cpu bits in 'cpu_present_mask'
490 1.3 matt */
491 1.3 matt void
492 1.3 matt rmixl_intr_init_cpu(struct cpu_info *ci)
493 1.3 matt {
494 1.3 matt struct rmixl_cpu_softc *sc = (void *)ci->ci_softc;
495 1.3 matt
496 1.3 matt KASSERT(sc != NULL);
497 1.2 matt
498 1.3 matt for (int vec=0; vec < NINTRVECS; vec++)
499 1.3 matt evcnt_attach_dynamic(&sc->sc_vec_evcnts[vec],
500 1.3 matt EVCNT_TYPE_INTR, NULL,
501 1.3 matt device_xname(sc->sc_dev),
502 1.3 matt rmixl_intr_string(vec));
503 1.2 matt
504 1.3 matt KASSERT(cpu_index(ci) < (sizeof(cpu_present_mask) * 8));
505 1.3 matt atomic_or_32((volatile uint32_t *)&cpu_present_mask, 1 << cpu_index(ci));
506 1.2 matt }
507 1.2 matt
508 1.3 matt /*
509 1.3 matt * rmixl_intr_string - return pointer to display name of a PIC-based interrupt
510 1.3 matt */
511 1.2 matt const char *
512 1.3 matt rmixl_intr_string(int vec)
513 1.3 matt {
514 1.3 matt int irt;
515 1.3 matt
516 1.3 matt if (vec < 0 || vec >= NINTRVECS)
517 1.3 matt panic("%s: vec index %d out of range, max %d",
518 1.3 matt __func__, vec, NINTRVECS - 1);
519 1.3 matt
520 1.3 matt if (! RMIXL_VECTOR_IS_IRT(vec))
521 1.3 matt return rmixl_vecnames_common[vec];
522 1.3 matt
523 1.3 matt irt = RMIXL_VECTOR_IRT(vec);
524 1.3 matt switch(cpu_rmixl_chip_type(mips_options.mips_cpu)) {
525 1.3 matt case CIDFL_RMI_TYPE_XLR:
526 1.3 matt return rmixl_intr_string_xlr(irt);
527 1.3 matt case CIDFL_RMI_TYPE_XLS:
528 1.3 matt return rmixl_intr_string_xls(irt);
529 1.3 matt case CIDFL_RMI_TYPE_XLP:
530 1.3 matt panic("%s: RMI XLP not yet supported", __func__);
531 1.3 matt }
532 1.3 matt
533 1.3 matt return "undefined"; /* appease gcc */
534 1.3 matt }
535 1.3 matt
536 1.3 matt static const char *
537 1.3 matt rmixl_intr_string_xlr(int irt)
538 1.3 matt {
539 1.3 matt return rmixl_irtnames_xlrxxx[irt];
540 1.3 matt }
541 1.3 matt
542 1.3 matt static const char *
543 1.3 matt rmixl_intr_string_xls(int irt)
544 1.2 matt {
545 1.2 matt const char *name;
546 1.2 matt
547 1.3 matt switch (MIPS_PRID_IMPL(mips_options.mips_cpu_id)) {
548 1.2 matt case MIPS_XLS104:
549 1.2 matt case MIPS_XLS108:
550 1.3 matt case MIPS_XLS404LITE:
551 1.3 matt case MIPS_XLS408LITE:
552 1.3 matt name = rmixl_irtnames_xls1xx[irt];
553 1.3 matt break;
554 1.2 matt case MIPS_XLS204:
555 1.2 matt case MIPS_XLS208:
556 1.3 matt name = rmixl_irtnames_xls2xx[irt];
557 1.3 matt break;
558 1.3 matt case MIPS_XLS404:
559 1.3 matt case MIPS_XLS408:
560 1.3 matt case MIPS_XLS416:
561 1.3 matt case MIPS_XLS608:
562 1.3 matt case MIPS_XLS616:
563 1.3 matt name = rmixl_irtnames_xls4xx[irt];
564 1.3 matt break;
565 1.3 matt default:
566 1.3 matt name = rmixl_vecnames_common[RMIXL_IRT_VECTOR(irt)];
567 1.3 matt break;
568 1.3 matt }
569 1.3 matt
570 1.3 matt return name;
571 1.3 matt }
572 1.3 matt
573 1.3 matt /*
574 1.3 matt * rmixl_irt_thread_mask
575 1.3 matt *
576 1.3 matt * given a bitmask of cpus, return a, IRT thread mask
577 1.3 matt */
578 1.3 matt static uint32_t
579 1.3 matt rmixl_irt_thread_mask(int cpumask)
580 1.3 matt {
581 1.3 matt uint32_t irtc0;
582 1.3 matt
583 1.3 matt #if defined(MULTIPROCESSOR)
584 1.3 matt #ifndef NOTYET
585 1.3 matt if (cpumask == -1)
586 1.3 matt return 1; /* XXX TMP FIXME */
587 1.3 matt #endif
588 1.3 matt
589 1.3 matt /*
590 1.3 matt * discount cpus not present
591 1.3 matt */
592 1.3 matt cpumask &= cpu_present_mask;
593 1.8.30.2 skrll
594 1.3 matt switch (MIPS_PRID_IMPL(mips_options.mips_cpu_id)) {
595 1.3 matt case MIPS_XLS104:
596 1.3 matt case MIPS_XLS204:
597 1.3 matt case MIPS_XLS404:
598 1.2 matt case MIPS_XLS404LITE:
599 1.3 matt irtc0 = ((cpumask >> 2) << 4) | (cpumask & __BITS(1,0));
600 1.3 matt irtc0 &= (__BITS(5,4) | __BITS(1,0));
601 1.3 matt break;
602 1.3 matt case MIPS_XLS108:
603 1.3 matt case MIPS_XLS208:
604 1.3 matt case MIPS_XLS408:
605 1.2 matt case MIPS_XLS408LITE:
606 1.3 matt case MIPS_XLS608:
607 1.3 matt irtc0 = cpumask & __BITS(7,0);
608 1.2 matt break;
609 1.3 matt case MIPS_XLS416:
610 1.3 matt case MIPS_XLS616:
611 1.3 matt irtc0 = cpumask & __BITS(15,0);
612 1.2 matt break;
613 1.2 matt default:
614 1.3 matt panic("%s: unknown cpu ID %#x\n", __func__,
615 1.3 matt mips_options.mips_cpu_id);
616 1.2 matt }
617 1.3 matt #else
618 1.3 matt irtc0 = 1;
619 1.3 matt #endif /* MULTIPROCESSOR */
620 1.2 matt
621 1.3 matt return irtc0;
622 1.2 matt }
623 1.2 matt
624 1.2 matt /*
625 1.3 matt * rmixl_irt_init
626 1.3 matt * - initialize IRT Entry for given index
627 1.2 matt * - unmask Thread#0 in low word (assume we only have 1 thread)
628 1.2 matt */
629 1.2 matt static void
630 1.3 matt rmixl_irt_init(int irt)
631 1.2 matt {
632 1.3 matt KASSERT(irt < NIRTS);
633 1.3 matt RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC1(irt), 0); /* high word */
634 1.3 matt RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC0(irt), 0); /* low word */
635 1.2 matt }
636 1.2 matt
637 1.2 matt /*
638 1.3 matt * rmixl_irt_disestablish
639 1.3 matt * - invalidate IRT Entry for given index
640 1.2 matt */
641 1.2 matt static void
642 1.3 matt rmixl_irt_disestablish(int irt)
643 1.2 matt {
644 1.3 matt KASSERT(mutex_owned(&rmixl_intr_lock));
645 1.3 matt DPRINTF(("%s: irt %d, irtc1 %#x\n", __func__, irt, 0));
646 1.3 matt rmixl_irt_init(irt);
647 1.2 matt }
648 1.2 matt
649 1.2 matt /*
650 1.3 matt * rmixl_irt_establish
651 1.3 matt * - construct an IRT Entry for irt and write to PIC
652 1.2 matt */
653 1.2 matt static void
654 1.3 matt rmixl_irt_establish(int irt, int vec, int cpumask, rmixl_intr_trigger_t trigger,
655 1.3 matt rmixl_intr_polarity_t polarity)
656 1.2 matt {
657 1.2 matt uint32_t irtc1;
658 1.3 matt uint32_t irtc0;
659 1.3 matt
660 1.3 matt KASSERT(mutex_owned(&rmixl_intr_lock));
661 1.3 matt
662 1.3 matt if (irt >= NIRTS)
663 1.3 matt panic("%s: bad irt %d\n", __func__, irt);
664 1.3 matt
665 1.3 matt if (! RMIXL_VECTOR_IS_IRT(vec))
666 1.3 matt panic("%s: bad vec %d\n", __func__, vec);
667 1.3 matt
668 1.3 matt switch (trigger) {
669 1.3 matt case RMIXL_TRIG_EDGE:
670 1.3 matt case RMIXL_TRIG_LEVEL:
671 1.3 matt break;
672 1.3 matt default:
673 1.3 matt panic("%s: bad trigger %d\n", __func__, trigger);
674 1.3 matt }
675 1.3 matt
676 1.3 matt switch (polarity) {
677 1.3 matt case RMIXL_POLR_RISING:
678 1.3 matt case RMIXL_POLR_HIGH:
679 1.3 matt case RMIXL_POLR_FALLING:
680 1.3 matt case RMIXL_POLR_LOW:
681 1.3 matt break;
682 1.3 matt default:
683 1.3 matt panic("%s: bad polarity %d\n", __func__, polarity);
684 1.3 matt }
685 1.3 matt
686 1.3 matt /*
687 1.3 matt * XXX IRT entries are not shared
688 1.3 matt */
689 1.3 matt KASSERT(RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC0(irt)) == 0);
690 1.3 matt KASSERT(RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC1(irt)) == 0);
691 1.3 matt
692 1.3 matt irtc0 = rmixl_irt_thread_mask(cpumask);
693 1.2 matt
694 1.2 matt irtc1 = RMIXL_PIC_IRTENTRYC1_VALID;
695 1.2 matt irtc1 |= RMIXL_PIC_IRTENTRYC1_GL; /* local */
696 1.3 matt KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
697 1.2 matt
698 1.3 matt if (trigger == RMIXL_TRIG_LEVEL)
699 1.2 matt irtc1 |= RMIXL_PIC_IRTENTRYC1_TRG;
700 1.3 matt KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
701 1.2 matt
702 1.3 matt if ((polarity == RMIXL_POLR_FALLING) || (polarity == RMIXL_POLR_LOW))
703 1.2 matt irtc1 |= RMIXL_PIC_IRTENTRYC1_P;
704 1.3 matt KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
705 1.2 matt
706 1.3 matt irtc1 |= vec; /* vector in EIRR */
707 1.3 matt KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
708 1.2 matt
709 1.2 matt /*
710 1.3 matt * write IRT Entry to PIC
711 1.2 matt */
712 1.3 matt DPRINTF(("%s: vec %d (%#x), irt %d, irtc0 %#x, irtc1 %#x\n",
713 1.3 matt __func__, vec, vec, irt, irtc0, irtc1));
714 1.3 matt RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC0(irt), irtc0); /* low word */
715 1.3 matt RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC1(irt), irtc1); /* high word */
716 1.2 matt }
717 1.2 matt
718 1.2 matt void *
719 1.3 matt rmixl_vec_establish(int vec, int cpumask, int ipl,
720 1.3 matt int (*func)(void *), void *arg, bool mpsafe)
721 1.2 matt {
722 1.3 matt rmixl_intrhand_t *ih;
723 1.3 matt uint64_t eimr_bit;
724 1.2 matt int s;
725 1.2 matt
726 1.3 matt KASSERT(mutex_owned(&rmixl_intr_lock));
727 1.3 matt
728 1.5 matt DPRINTF(("%s: vec %d cpumask %#x ipl %d func %p arg %p mpsafe %d\n",
729 1.3 matt __func__, vec, cpumask, ipl, func, arg, mpsafe));
730 1.2 matt #ifdef DIAGNOSTIC
731 1.3 matt if (rmixl_pic_init_done == 0)
732 1.2 matt panic("%s: called before evbmips_intr_init", __func__);
733 1.2 matt #endif
734 1.2 matt
735 1.2 matt /*
736 1.3 matt * check args
737 1.2 matt */
738 1.3 matt if (vec < 0 || vec >= NINTRVECS)
739 1.3 matt panic("%s: vec %d out of range, max %d",
740 1.3 matt __func__, vec, NINTRVECS - 1);
741 1.2 matt if (ipl <= 0 || ipl >= _IPL_N)
742 1.2 matt panic("%s: ipl %d out of range, min %d, max %d",
743 1.2 matt __func__, ipl, 1, _IPL_N - 1);
744 1.2 matt
745 1.3 matt s = splhigh();
746 1.3 matt
747 1.3 matt ih = &rmixl_intrhand[vec];
748 1.3 matt if (ih->ih_func != NULL) {
749 1.3 matt #ifdef DIAGNOSTIC
750 1.3 matt printf("%s: intrhand[%d] busy\n", __func__, vec);
751 1.3 matt #endif
752 1.3 matt splx(s);
753 1.3 matt return NULL;
754 1.2 matt }
755 1.2 matt
756 1.3 matt ih->ih_arg = arg;
757 1.3 matt ih->ih_mpsafe = mpsafe;
758 1.3 matt ih->ih_vec = vec;
759 1.3 matt ih->ih_ipl = ipl;
760 1.3 matt ih->ih_cpumask = cpumask;
761 1.3 matt
762 1.3 matt eimr_bit = (uint64_t)1 << vec;
763 1.3 matt for (int i=ih->ih_ipl; --i >= 0; ) {
764 1.3 matt KASSERT((ipl_eimr_map[i] & eimr_bit) == 0);
765 1.3 matt ipl_eimr_map[i] |= eimr_bit;
766 1.2 matt }
767 1.2 matt
768 1.3 matt ih->ih_func = func; /* do this last */
769 1.3 matt
770 1.3 matt splx(s);
771 1.3 matt
772 1.3 matt return ih;
773 1.3 matt }
774 1.2 matt
775 1.3 matt /*
776 1.3 matt * rmixl_intr_establish
777 1.3 matt * - used to establish an IRT-based interrupt only
778 1.3 matt */
779 1.3 matt void *
780 1.3 matt rmixl_intr_establish(int irt, int cpumask, int ipl,
781 1.3 matt rmixl_intr_trigger_t trigger, rmixl_intr_polarity_t polarity,
782 1.3 matt int (*func)(void *), void *arg, bool mpsafe)
783 1.3 matt {
784 1.3 matt rmixl_intrhand_t *ih;
785 1.3 matt int vec;
786 1.2 matt
787 1.2 matt #ifdef DIAGNOSTIC
788 1.3 matt if (rmixl_pic_init_done == 0)
789 1.3 matt panic("%s: called before rmixl_pic_init_done", __func__);
790 1.2 matt #endif
791 1.2 matt
792 1.2 matt /*
793 1.3 matt * check args
794 1.2 matt */
795 1.3 matt if (irt < 0 || irt >= NIRTS)
796 1.3 matt panic("%s: irt %d out of range, max %d",
797 1.3 matt __func__, irt, NIRTS - 1);
798 1.3 matt if (ipl <= 0 || ipl >= _IPL_N)
799 1.3 matt panic("%s: ipl %d out of range, min %d, max %d",
800 1.3 matt __func__, ipl, 1, _IPL_N - 1);
801 1.3 matt
802 1.3 matt vec = RMIXL_IRT_VECTOR(irt);
803 1.2 matt
804 1.3 matt DPRINTF(("%s: irt %d, vec %d, ipl %d\n", __func__, irt, vec, ipl));
805 1.2 matt
806 1.3 matt mutex_enter(&rmixl_intr_lock);
807 1.2 matt
808 1.2 matt /*
809 1.3 matt * establish vector
810 1.2 matt */
811 1.3 matt ih = rmixl_vec_establish(vec, cpumask, ipl, func, arg, mpsafe);
812 1.2 matt
813 1.2 matt /*
814 1.2 matt * establish IRT Entry
815 1.2 matt */
816 1.3 matt rmixl_irt_establish(irt, vec, cpumask, trigger, polarity);
817 1.2 matt
818 1.3 matt mutex_exit(&rmixl_intr_lock);
819 1.2 matt
820 1.2 matt return ih;
821 1.2 matt }
822 1.2 matt
823 1.2 matt void
824 1.3 matt rmixl_vec_disestablish(void *cookie)
825 1.3 matt {
826 1.3 matt rmixl_intrhand_t *ih = cookie;
827 1.3 matt uint64_t eimr_bit;
828 1.3 matt
829 1.3 matt KASSERT(mutex_owned(&rmixl_intr_lock));
830 1.3 matt KASSERT(ih->ih_vec < NINTRVECS);
831 1.3 matt KASSERT(ih == &rmixl_intrhand[ih->ih_vec]);
832 1.3 matt
833 1.3 matt ih->ih_func = NULL; /* do this first */
834 1.3 matt
835 1.3 matt eimr_bit = (uint64_t)1 << ih->ih_vec;
836 1.3 matt for (int i=ih->ih_ipl; --i >= 0; ) {
837 1.3 matt KASSERT((ipl_eimr_map[i] & eimr_bit) != 0);
838 1.3 matt ipl_eimr_map[i] ^= eimr_bit;
839 1.3 matt }
840 1.3 matt }
841 1.3 matt
842 1.3 matt void
843 1.2 matt rmixl_intr_disestablish(void *cookie)
844 1.2 matt {
845 1.3 matt rmixl_intrhand_t *ih = cookie;
846 1.5 matt const int vec = ih->ih_vec;
847 1.3 matt
848 1.3 matt KASSERT(vec < NINTRVECS);
849 1.3 matt KASSERT(ih == &rmixl_intrhand[vec]);
850 1.2 matt
851 1.3 matt mutex_enter(&rmixl_intr_lock);
852 1.2 matt
853 1.2 matt /*
854 1.3 matt * disable/invalidate the IRT Entry if needed
855 1.2 matt */
856 1.3 matt if (RMIXL_VECTOR_IS_IRT(vec))
857 1.3 matt rmixl_irt_disestablish(vec);
858 1.2 matt
859 1.2 matt /*
860 1.3 matt * disasociate from vector and free the handle
861 1.2 matt */
862 1.3 matt rmixl_vec_disestablish(cookie);
863 1.3 matt
864 1.3 matt mutex_exit(&rmixl_intr_lock);
865 1.3 matt }
866 1.3 matt
867 1.3 matt void
868 1.8.30.2 skrll evbmips_iointr(int ipl, uint32_t pending, struct clockframe *cf)
869 1.3 matt {
870 1.3 matt struct rmixl_cpu_softc *sc = (void *)curcpu()->ci_softc;
871 1.2 matt
872 1.5 matt DPRINTF(("%s: cpu%u: ipl %d, pc %#"PRIxVADDR", pending %#x\n",
873 1.8.30.2 skrll __func__, cpu_number(), ipl, cf->pc, pending));
874 1.3 matt
875 1.3 matt /*
876 1.3 matt * 'pending' arg is a summary that there is something to do
877 1.3 matt * the real pending status is obtained from EIRR
878 1.2 matt */
879 1.3 matt KASSERT(pending == MIPS_INT_MASK_1);
880 1.2 matt
881 1.3 matt for (;;) {
882 1.3 matt rmixl_intrhand_t *ih;
883 1.3 matt uint64_t eirr;
884 1.3 matt uint64_t eimr;
885 1.3 matt uint64_t vecbit;
886 1.3 matt int vec;
887 1.3 matt
888 1.3 matt asm volatile("dmfc0 %0, $9, 6;" : "=r"(eirr));
889 1.3 matt asm volatile("dmfc0 %0, $9, 7;" : "=r"(eimr));
890 1.3 matt
891 1.3 matt #ifdef IOINTR_DEBUG
892 1.5 matt printf("%s: cpu%u: eirr %#"PRIx64", eimr %#"PRIx64", mask %#"PRIx64"\n",
893 1.3 matt __func__, cpu_number(), eirr, eimr, ipl_eimr_map[ipl-1]);
894 1.3 matt #endif /* IOINTR_DEBUG */
895 1.3 matt
896 1.3 matt /*
897 1.3 matt * reduce eirr to
898 1.3 matt * - ints that are enabled at or below this ipl
899 1.3 matt * - exclude count/compare clock and soft ints
900 1.3 matt * they are handled elsewhere
901 1.3 matt */
902 1.3 matt eirr &= ipl_eimr_map[ipl-1];
903 1.3 matt eirr &= ~ipl_eimr_map[ipl];
904 1.3 matt eirr &= ~((MIPS_INT_MASK_5 | MIPS_SOFT_INT_MASK) >> 8);
905 1.3 matt if (eirr == 0)
906 1.3 matt break;
907 1.3 matt
908 1.3 matt vec = 63 - dclz(eirr);
909 1.3 matt ih = &rmixl_intrhand[vec];
910 1.3 matt vecbit = 1ULL << vec;
911 1.3 matt KASSERT (ih->ih_ipl == ipl);
912 1.3 matt KASSERT ((vecbit & eimr) == 0);
913 1.3 matt KASSERT ((vecbit & RMIXL_EIRR_PRESERVE_MASK) == 0);
914 1.3 matt
915 1.3 matt /*
916 1.4 cliff * ack in EIRR, and in PIC if needed,
917 1.4 cliff * the irq we are about to handle
918 1.3 matt */
919 1.4 cliff rmixl_eirr_ack(eimr, vecbit, RMIXL_EIRR_PRESERVE_MASK);
920 1.3 matt if (RMIXL_VECTOR_IS_IRT(vec))
921 1.3 matt RMIXL_PICREG_WRITE(RMIXL_PIC_INTRACK,
922 1.3 matt 1 << RMIXL_VECTOR_IRT(vec));
923 1.2 matt
924 1.3 matt if (ih->ih_func != NULL) {
925 1.3 matt #ifdef MULTIPROCESSOR
926 1.3 matt if (ih->ih_mpsafe) {
927 1.3 matt (void)(*ih->ih_func)(ih->ih_arg);
928 1.3 matt } else {
929 1.3 matt KASSERTMSG(ipl == IPL_VM,
930 1.8 jym "%s: %s: ipl (%d) != IPL_VM for KERNEL_LOCK",
931 1.3 matt __func__, sc->sc_vec_evcnts[vec].ev_name,
932 1.8 jym ipl);
933 1.3 matt KERNEL_LOCK(1, NULL);
934 1.3 matt (void)(*ih->ih_func)(ih->ih_arg);
935 1.3 matt KERNEL_UNLOCK_ONE(NULL);
936 1.3 matt }
937 1.3 matt #else
938 1.3 matt (void)(*ih->ih_func)(ih->ih_arg);
939 1.3 matt #endif /* MULTIPROCESSOR */
940 1.3 matt }
941 1.3 matt KASSERT(ipl == ih->ih_ipl);
942 1.3 matt KASSERTMSG(curcpu()->ci_cpl >= ipl,
943 1.8 jym "%s: after %s: cpl (%d) < ipl %d",
944 1.3 matt __func__, sc->sc_vec_evcnts[vec].ev_name,
945 1.8 jym ipl, curcpu()->ci_cpl);
946 1.3 matt sc->sc_vec_evcnts[vec].ev_count++;
947 1.3 matt }
948 1.2 matt }
949 1.2 matt
950 1.3 matt #ifdef MULTIPROCESSOR
951 1.3 matt static int
952 1.3 matt rmixl_send_ipi(struct cpu_info *ci, int tag)
953 1.2 matt {
954 1.3 matt const cpuid_t cpuid = ci->ci_cpuid;
955 1.3 matt uint32_t core = (uint32_t)(cpuid >> 2);
956 1.3 matt uint32_t thread = (uint32_t)(cpuid & __BITS(1,0));
957 1.3 matt uint64_t req = 1 << tag;
958 1.2 matt uint32_t r;
959 1.3 matt
960 1.8.30.1 skrll if (!kcpuset_isset(cpus_running, cpu_index(ci)))
961 1.3 matt return -1;
962 1.3 matt
963 1.3 matt KASSERT((tag >= 0) && (tag < NIPIS));
964 1.3 matt
965 1.3 matt r = (thread << RMIXL_PIC_IPIBASE_ID_THREAD_SHIFT)
966 1.3 matt | (core << RMIXL_PIC_IPIBASE_ID_CORE_SHIFT)
967 1.3 matt | (RMIXL_INTRVEC_IPI + tag);
968 1.3 matt
969 1.3 matt mutex_enter(&rmixl_ipi_lock);
970 1.3 matt atomic_or_64(&ci->ci_request_ipis, req);
971 1.3 matt RMIXL_PICREG_WRITE(RMIXL_PIC_IPIBASE, r);
972 1.3 matt mutex_exit(&rmixl_ipi_lock);
973 1.3 matt
974 1.3 matt return 0;
975 1.2 matt }
976 1.2 matt
977 1.3 matt static int
978 1.3 matt rmixl_ipi_intr(void *arg)
979 1.2 matt {
980 1.3 matt struct cpu_info * const ci = curcpu();
981 1.8.30.2 skrll const uint64_t ipi_mask = 1ULL << (uintptr_t)arg;
982 1.2 matt
983 1.3 matt KASSERT(ci->ci_cpl >= IPL_SCHED);
984 1.4 cliff KASSERT((uintptr_t)arg < NIPIS);
985 1.2 matt
986 1.4 cliff /* if the request is clear, it was previously processed */
987 1.4 cliff if ((ci->ci_request_ipis & ipi_mask) == 0)
988 1.4 cliff return 0;
989 1.2 matt
990 1.3 matt atomic_or_64(&ci->ci_active_ipis, ipi_mask);
991 1.3 matt atomic_and_64(&ci->ci_request_ipis, ~ipi_mask);
992 1.2 matt
993 1.3 matt ipi_process(ci, ipi_mask);
994 1.2 matt
995 1.3 matt atomic_and_64(&ci->ci_active_ipis, ~ipi_mask);
996 1.2 matt
997 1.3 matt return 1;
998 1.3 matt }
999 1.3 matt #endif /* MULTIPROCESSOR */
1000 1.2 matt
1001 1.3 matt #if defined(DIAGNOSTIC) || defined(IOINTR_DEBUG) || defined(DDB)
1002 1.3 matt int
1003 1.3 matt rmixl_intrhand_print_subr(int vec)
1004 1.3 matt {
1005 1.3 matt rmixl_intrhand_t *ih = &rmixl_intrhand[vec];
1006 1.3 matt printf("vec %d: func %p, arg %p, vec %d, ipl %d, mask %#x\n",
1007 1.3 matt vec, ih->ih_func, ih->ih_arg, ih->ih_vec, ih->ih_ipl,
1008 1.3 matt ih->ih_cpumask);
1009 1.3 matt return 0;
1010 1.3 matt }
1011 1.3 matt int
1012 1.3 matt rmixl_intrhand_print(void)
1013 1.3 matt {
1014 1.3 matt for (int vec=0; vec < NINTRVECS ; vec++)
1015 1.3 matt rmixl_intrhand_print_subr(vec);
1016 1.3 matt return 0;
1017 1.3 matt }
1018 1.2 matt
1019 1.3 matt static inline void
1020 1.3 matt rmixl_irt_entry_print(u_int irt)
1021 1.3 matt {
1022 1.3 matt uint32_t c0, c1;
1023 1.2 matt
1024 1.3 matt if ((irt < 0) || (irt > NIRTS))
1025 1.3 matt return;
1026 1.3 matt c0 = RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC0(irt));
1027 1.3 matt c1 = RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC1(irt));
1028 1.3 matt printf("irt[%d]: %#x, %#x\n", irt, c0, c1);
1029 1.2 matt }
1030 1.2 matt
1031 1.2 matt int
1032 1.3 matt rmixl_irt_print(void)
1033 1.2 matt {
1034 1.3 matt printf("%s:\n", __func__);
1035 1.3 matt for (int irt=0; irt < NIRTS ; irt++)
1036 1.3 matt rmixl_irt_entry_print(irt);
1037 1.3 matt return 0;
1038 1.3 matt }
1039 1.2 matt
1040 1.3 matt void
1041 1.3 matt rmixl_ipl_eimr_map_print(void)
1042 1.3 matt {
1043 1.3 matt printf("IPL_NONE=%d, mask %#"PRIx64"\n",
1044 1.3 matt IPL_NONE, ipl_eimr_map[IPL_NONE]);
1045 1.3 matt printf("IPL_SOFTCLOCK=%d, mask %#"PRIx64"\n",
1046 1.3 matt IPL_SOFTCLOCK, ipl_eimr_map[IPL_SOFTCLOCK]);
1047 1.3 matt printf("IPL_SOFTNET=%d, mask %#"PRIx64"\n",
1048 1.3 matt IPL_SOFTNET, ipl_eimr_map[IPL_SOFTNET]);
1049 1.3 matt printf("IPL_VM=%d, mask %#"PRIx64"\n",
1050 1.3 matt IPL_VM, ipl_eimr_map[IPL_VM]);
1051 1.3 matt printf("IPL_SCHED=%d, mask %#"PRIx64"\n",
1052 1.3 matt IPL_SCHED, ipl_eimr_map[IPL_SCHED]);
1053 1.3 matt printf("IPL_DDB=%d, mask %#"PRIx64"\n",
1054 1.3 matt IPL_DDB, ipl_eimr_map[IPL_DDB]);
1055 1.3 matt printf("IPL_HIGH=%d, mask %#"PRIx64"\n",
1056 1.3 matt IPL_HIGH, ipl_eimr_map[IPL_HIGH]);
1057 1.2 matt }
1058 1.3 matt
1059 1.2 matt #endif
1060