rmixl_intr.c revision 1.1.2.31 1 /* $NetBSD: rmixl_intr.c,v 1.1.2.31 2011/12/24 01:57:54 matt Exp $ */
2
3 /*-
4 * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or
8 * without modification, are permitted provided that the following
9 * conditions are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following
14 * disclaimer in the documentation and/or other materials provided
15 * with the distribution.
16 * 3. The names of the authors may not be used to endorse or promote
17 * products derived from this software without specific prior
18 * written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY
21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
23 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
25 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
27 * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
29 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
31 * OF SUCH DAMAGE.
32 */
33 /*-
34 * Copyright (c) 2001 The NetBSD Foundation, Inc.
35 * All rights reserved.
36 *
37 * This code is derived from software contributed to The NetBSD Foundation
38 * by Jason R. Thorpe.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
50 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
51 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
52 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
53 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
54 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
55 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
56 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
57 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
58 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
60 */
61
62 /*
63 * Platform-specific interrupt support for the RMI XLP, XLR, XLS
64 */
65
66 #include <sys/cdefs.h>
67 __KERNEL_RCSID(0, "$NetBSD: rmixl_intr.c,v 1.1.2.31 2011/12/24 01:57:54 matt Exp $");
68
69 #include "opt_ddb.h"
70 #include "opt_multiprocessor.h"
71 #define __INTR_PRIVATE
72
73 #include <sys/param.h>
74 #include <sys/queue.h>
75 #include <sys/malloc.h>
76 #include <sys/systm.h>
77 #include <sys/device.h>
78 #include <sys/kernel.h>
79 #include <sys/atomic.h>
80 #include <sys/mutex.h>
81 #include <sys/cpu.h>
82
83 #include <machine/bus.h>
84 #include <machine/intr.h>
85
86 #include <mips/cpu.h>
87 #include <mips/cpuset.h>
88 #include <mips/locore.h>
89
90 #include <mips/rmi/rmixlreg.h>
91 #include <mips/rmi/rmixlvar.h>
92
93 #include <mips/rmi/rmixl_cpuvar.h>
94 #include <mips/rmi/rmixl_intr.h>
95
96 #include <dev/pci/pcireg.h>
97 #include <dev/pci/pcivar.h>
98
99 //#define IOINTR_DEBUG 1
100 #ifdef IOINTR_DEBUG
101 int iointr_debug = IOINTR_DEBUG;
102 # define DPRINTF(x) do { if (iointr_debug) printf x ; } while(0)
103 #else
104 # define DPRINTF(x)
105 #endif
106
107 #define RMIXL_PICREG_READ(off) \
108 RMIXL_IOREG_READ(RMIXL_IO_DEV_PIC + (off))
109 #define RMIXL_PICREG_WRITE(off, val) \
110 RMIXL_IOREG_WRITE(RMIXL_IO_DEV_PIC + (off), (val))
111
112 /* XXX this will need to deal with node */
113 #define RMIXLP_PICREG_READ(off) \
114 rmixlp_read_8(RMIXL_PIC_PCITAG, (off))
115 #define RMIXLP_PICREG_WRITE(off, val) \
116 rmixlp_write_8(RMIXL_PIC_PCITAG, (off), (val));
117
118 /*
119 * do not clear these when acking EIRR
120 * (otherwise they get lost)
121 */
122 #define RMIXL_EIRR_PRESERVE_MASK \
123 ((MIPS_INT_MASK_5|MIPS_SOFT_INT_MASK) >> 8)
124
125 /*
126 * IRT assignments depends on the RMI chip family
127 * (XLS1xx vs. XLS2xx vs. XLS3xx vs. XLS6xx)
128 * use the right display string table for the CPU that's running.
129 */
130
131 /*
132 * rmixl_irtnames_xlrxxx
133 * - use for XLRxxx
134 */
135 static const char * const rmixl_irtnames_xlrxxx[RMIXLR_NIRTS] = {
136 "pic int 0 (watchdog)", /* 0 */
137 "pic int 1 (timer0)", /* 1 */
138 "pic int 2 (timer1)", /* 2 */
139 "pic int 3 (timer2)", /* 3 */
140 "pic int 4 (timer3)", /* 4 */
141 "pic int 5 (timer4)", /* 5 */
142 "pic int 6 (timer5)", /* 6 */
143 "pic int 7 (timer6)", /* 7 */
144 "pic int 8 (timer7)", /* 8 */
145 "pic int 9 (uart0)", /* 9 */
146 "pic int 10 (uart1)", /* 10 */
147 "pic int 11 (i2c0)", /* 11 */
148 "pic int 12 (i2c1)", /* 12 */
149 "pic int 13 (pcmcia)", /* 13 */
150 "pic int 14 (gpio)", /* 14 */
151 "pic int 15 (hyper)", /* 15 */
152 "pic int 16 (pcix)", /* 16 */
153 "pic int 17 (gmac0)", /* 17 */
154 "pic int 18 (gmac1)", /* 18 */
155 "pic int 19 (gmac2)", /* 19 */
156 "pic int 20 (gmac3)", /* 20 */
157 "pic int 21 (xgs0)", /* 21 */
158 "pic int 22 (xgs1)", /* 22 */
159 "pic int 23 (irq23)", /* 23 */
160 "pic int 24 (hyper_fatal)", /* 24 */
161 "pic int 25 (bridge_aerr)", /* 25 */
162 "pic int 26 (bridge_berr)", /* 26 */
163 "pic int 27 (bridge_tb)", /* 27 */
164 "pic int 28 (bridge_nmi)", /* 28 */
165 "pic int 29 (bridge_sram_derr)",/* 29 */
166 "pic int 30 (gpio_fatal)", /* 30 */
167 "pic int 31 (reserved)", /* 31 */
168 };
169
170 /*
171 * rmixl_irtnames_xls2xx
172 * - use for XLS2xx
173 */
174 static const char * const rmixl_irtnames_xls2xx[RMIXLS_NIRTS] = {
175 "pic int 0 (watchdog)", /* 0 */
176 "pic int 1 (timer0)", /* 1 */
177 "pic int 2 (timer1)", /* 2 */
178 "pic int 3 (timer2)", /* 3 */
179 "pic int 4 (timer3)", /* 4 */
180 "pic int 5 (timer4)", /* 5 */
181 "pic int 6 (timer5)", /* 6 */
182 "pic int 7 (timer6)", /* 7 */
183 "pic int 8 (timer7)", /* 8 */
184 "pic int 9 (uart0)", /* 9 */
185 "pic int 10 (uart1)", /* 10 */
186 "pic int 11 (i2c0)", /* 11 */
187 "pic int 12 (i2c1)", /* 12 */
188 "pic int 13 (pcmcia)", /* 13 */
189 "pic int 14 (gpio_a)", /* 14 */
190 "pic int 15 (irq15)", /* 15 */
191 "pic int 16 (bridge_tb)", /* 16 */
192 "pic int 17 (gmac0)", /* 17 */
193 "pic int 18 (gmac1)", /* 18 */
194 "pic int 19 (gmac2)", /* 19 */
195 "pic int 20 (gmac3)", /* 20 */
196 "pic int 21 (irq21)", /* 21 */
197 "pic int 22 (irq22)", /* 22 */
198 "pic int 23 (pcie_link2)", /* 23 */
199 "pic int 24 (pcie_link3)", /* 24 */
200 "pic int 25 (bridge_err)", /* 25 */
201 "pic int 26 (pcie_link0)", /* 26 */
202 "pic int 27 (pcie_link1)", /* 27 */
203 "pic int 28 (irq28)", /* 28 */
204 "pic int 29 (pcie_err)", /* 29 */
205 "pic int 30 (gpio_b)", /* 30 */
206 "pic int 31 (usb)", /* 31 */
207 };
208
209 /*
210 * rmixl_irtnames_xls1xx
211 * - use for XLS1xx, XLS4xx-Lite
212 */
213 static const char * const rmixl_irtnames_xls1xx[RMIXLS_NIRTS] = {
214 "pic int 0 (watchdog)", /* 0 */
215 "pic int 1 (timer0)", /* 1 */
216 "pic int 2 (timer1)", /* 2 */
217 "pic int 3 (timer2)", /* 3 */
218 "pic int 4 (timer3)", /* 4 */
219 "pic int 5 (timer4)", /* 5 */
220 "pic int 6 (timer5)", /* 6 */
221 "pic int 7 (timer6)", /* 7 */
222 "pic int 8 (timer7)", /* 8 */
223 "pic int 9 (uart0)", /* 9 */
224 "pic int 10 (uart1)", /* 10 */
225 "pic int 11 (i2c0)", /* 11 */
226 "pic int 12 (i2c1)", /* 12 */
227 "pic int 13 (pcmcia)", /* 13 */
228 "pic int 14 (gpio_a)", /* 14 */
229 "pic int 15 (irq15)", /* 15 */
230 "pic int 16 (bridge_tb)", /* 16 */
231 "pic int 17 (gmac0)", /* 17 */
232 "pic int 18 (gmac1)", /* 18 */
233 "pic int 19 (gmac2)", /* 19 */
234 "pic int 20 (gmac3)", /* 20 */
235 "pic int 21 (irq21)", /* 21 */
236 "pic int 22 (irq22)", /* 22 */
237 "pic int 23 (irq23)", /* 23 */
238 "pic int 24 (irq24)", /* 24 */
239 "pic int 25 (bridge_err)", /* 25 */
240 "pic int 26 (pcie_link0)", /* 26 */
241 "pic int 27 (pcie_link1)", /* 27 */
242 "pic int 28 (irq28)", /* 28 */
243 "pic int 29 (pcie_err)", /* 29 */
244 "pic int 30 (gpio_b)", /* 30 */
245 "pic int 31 (usb)", /* 31 */
246 };
247
248 /*
249 * rmixl_irtnames_xls4xx:
250 * - use for XLS4xx, XLS6xx
251 */
252 static const char * const rmixl_irtnames_xls4xx[RMIXLS_NIRTS] = {
253 "pic int 0 (watchdog)", /* 0 */
254 "pic int 1 (timer0)", /* 1 */
255 "pic int 2 (timer1)", /* 2 */
256 "pic int 3 (timer2)", /* 3 */
257 "pic int 4 (timer3)", /* 4 */
258 "pic int 5 (timer4)", /* 5 */
259 "pic int 6 (timer5)", /* 6 */
260 "pic int 7 (timer6)", /* 7 */
261 "pic int 8 (timer7)", /* 8 */
262 "pic int 9 (uart0)", /* 9 */
263 "pic int 10 (uart1)", /* 10 */
264 "pic int 11 (i2c0)", /* 11 */
265 "pic int 12 (i2c1)", /* 12 */
266 "pic int 13 (pcmcia)", /* 13 */
267 "pic int 14 (gpio_a)", /* 14 */
268 "pic int 15 (irq15)", /* 15 */
269 "pic int 16 (bridge_tb)", /* 16 */
270 "pic int 17 (gmac0)", /* 17 */
271 "pic int 18 (gmac1)", /* 18 */
272 "pic int 19 (gmac2)", /* 19 */
273 "pic int 20 (gmac3)", /* 20 */
274 "pic int 21 (irq21)", /* 21 */
275 "pic int 22 (irq22)", /* 22 */
276 "pic int 23 (irq23)", /* 23 */
277 "pic int 24 (irq24)", /* 24 */
278 "pic int 25 (bridge_err)", /* 25 */
279 "pic int 26 (pcie_link0)", /* 26 */
280 "pic int 27 (pcie_link1)", /* 27 */
281 "pic int 28 (pcie_link2)", /* 28 */
282 "pic int 29 (pcie_link3)", /* 29 */
283 "pic int 30 (gpio_b)", /* 30 */
284 "pic int 31 (usb)", /* 31 */
285 };
286
287 /*
288 * rmixl_irtnames_xlp:
289 * - use for XLP
290 */
291 static const char * const rmixl_irtnames_xlpxxx[RMIXLP_NIRTS] = {
292 [ 0] = "pic int 0 (watchdog0)",
293 [ 1] = "pic int 1 (watchdog1)",
294 [ 2] = "pic int 2 (watchdogNMI0)",
295 [ 3] = "pic int 3 (watchdogNMI1)",
296 [ 4] = "pic int 4 (timer0)",
297 [ 5] = "pic int 5 (timer1)",
298 [ 6] = "pic int 6 (timer2)",
299 [ 7] = "pic int 7 (timer3)",
300 [ 8] = "pic int 8 (timer4)",
301 [ 9] = "pic int 9 (timer5)",
302 [ 10] = "pic int 10 (timer6)",
303 [ 11] = "pic int 11 (timer7)",
304 [ 12] = "pic int 12 (fmn0)",
305 [ 13] = "pic int 13 (fmn1)",
306 [ 14] = "pic int 14 (fmn2)",
307 [ 15] = "pic int 15 (fmn3)",
308 [ 16] = "pic int 16 (fmn4)",
309 [ 17] = "pic int 17 (fmn5)",
310 [ 18] = "pic int 18 (fmn6)",
311 [ 19] = "pic int 19 (fmn7)",
312 [ 20] = "pic int 20 (fmn8)",
313 [ 21] = "pic int 21 (fmn9)",
314 [ 22] = "pic int 22 (fmn10)",
315 [ 23] = "pic int 23 (fmn11)",
316 [ 24] = "pic int 24 (fmn12)",
317 [ 25] = "pic int 25 (fmn13)",
318 [ 26] = "pic int 26 (fmn14)",
319 [ 27] = "pic int 27 (fmn15)",
320 [ 28] = "pic int 28 (fmn16)",
321 [ 29] = "pic int 29 (fmn17)",
322 [ 30] = "pic int 30 (fmn18)",
323 [ 31] = "pic int 31 (fmn19)",
324 [ 32] = "pic int 22 (fmn20)",
325 [ 33] = "pic int 23 (fmn21)",
326 [ 34] = "pic int 24 (fmn22)",
327 [ 35] = "pic int 25 (fmn23)",
328 [ 36] = "pic int 26 (fmn24)",
329 [ 37] = "pic int 27 (fmn25)",
330 [ 38] = "pic int 28 (fmn26)",
331 [ 39] = "pic int 29 (fmn27)",
332 [ 40] = "pic int 30 (fmn28)",
333 [ 41] = "pic int 31 (fmn29)",
334 [ 42] = "pic int 42 (fmn30)",
335 [ 43] = "pic int 43 (fmn31)",
336 [ 44] = "pic int 44 (message0)",
337 [ 45] = "pic int 45 (message1)",
338 [ 46] = "pic int 46 (pcie_msix0)",
339 [ 47] = "pic int 47 (pcie_msix1)",
340 [ 48] = "pic int 48 (pcie_msix2)",
341 [ 49] = "pic int 49 (pcie_msix3)",
342 [ 50] = "pic int 50 (pcie_msix4)",
343 [ 51] = "pic int 51 (pcie_msix5)",
344 [ 52] = "pic int 52 (pcie_msix6)",
345 [ 53] = "pic int 53 (pcie_msix7)",
346 [ 54] = "pic int 54 (pcie_msix8)",
347 [ 55] = "pic int 55 (pcie_msix9)",
348 [ 56] = "pic int 56 (pcie_msix10)",
349 [ 57] = "pic int 57 (pcie_msix11)",
350 [ 58] = "pic int 58 (pcie_msix12)",
351 [ 59] = "pic int 59 (pcie_msix13)",
352 [ 60] = "pic int 60 (pcie_msix14)",
353 [ 61] = "pic int 61 (pcie_msix15)",
354 [ 62] = "pic int 62 (pcie_msix16)",
355 [ 63] = "pic int 63 (pcie_msix17)",
356 [ 64] = "pic int 64 (pcie_msix18)",
357 [ 65] = "pic int 65 (pcie_msix19)",
358 [ 66] = "pic int 66 (pcie_msix20)",
359 [ 67] = "pic int 67 (pcie_msix21)",
360 [ 68] = "pic int 68 (pcie_msix22)",
361 [ 69] = "pic int 69 (pcie_msix23)",
362 [ 70] = "pic int 70 (pcie_msix24)",
363 [ 71] = "pic int 71 (pcie_msix25)",
364 [ 72] = "pic int 72 (pcie_msix26)",
365 [ 73] = "pic int 73 (pcie_msix27)",
366 [ 74] = "pic int 74 (pcie_msix28)",
367 [ 75] = "pic int 75 (pcie_msix29)",
368 [ 76] = "pic int 76 (pcie_msix30)",
369 [ 77] = "pic int 77 (pcie_msix31)",
370 [ 78] = "pic int 78 (pcie_link0)",
371 [ 79] = "pic int 79 (pcie_link1)",
372 [ 80] = "pic int 80 (pcie_link2)",
373 [ 81] = "pic int 81 (pcie_link3)",
374 [ 82] = "pic int 82 (na0)",
375 [ 83] = "pic int 83 (na1)",
376 [ 84] = "pic int 84 (na2)",
377 [ 85] = "pic int 85 (na3)",
378 [ 86] = "pic int 86 (na4)",
379 [ 87] = "pic int 87 (na5)",
380 [ 88] = "pic int 88 (na6)",
381 [ 89] = "pic int 89 (na7)",
382 [ 90] = "pic int 90 (na8)",
383 [ 91] = "pic int 91 (na9)",
384 [ 92] = "pic int 92 (na10)",
385 [ 93] = "pic int 93 (na11)",
386 [ 94] = "pic int 94 (na12)",
387 [ 95] = "pic int 95 (na13)",
388 [ 96] = "pic int 96 (na14)",
389 [ 97] = "pic int 97 (na15)",
390 [ 98] = "pic int 98 (na16)",
391 [ 99] = "pic int 99 (na17)",
392 [100] = "pic int 100 (na18)",
393 [101] = "pic int 101 (na19)",
394 [102] = "pic int 102 (na20)",
395 [103] = "pic int 103 (na21)",
396 [104] = "pic int 104 (na22)",
397 [105] = "pic int 105 (na23)",
398 [106] = "pic int 106 (na24)",
399 [107] = "pic int 107 (na25)",
400 [108] = "pic int 108 (na26)",
401 [109] = "pic int 109 (na27)",
402 [110] = "pic int 100 (na28)",
403 [111] = "pic int 111 (na29)",
404 [112] = "pic int 112 (na30)",
405 [113] = "pic int 113 (na31)",
406 [114] = "pic int 114 (poe)",
407 [115] = "pic int 115 (ehci0)",
408 [116] = "pic int 116 (ohci0)",
409 [117] = "pic int 117 (ohci1)",
410 [118] = "pic int 118 (ehci1)",
411 [119] = "pic int 119 (ohci2)",
412 [120] = "pic int 120 (ohci3)",
413 [121] = "pic int 121 (data/raid)",
414 [122] = "pic int 122 (security)",
415 [123] = "pic int 123 (rsa/ecc)",
416 [124] = "pic int 124 (compression0)",
417 [125] = "pic int 125 (compression1)",
418 [126] = "pic int 126 (compression2)",
419 [127] = "pic int 127 (compression3)",
420 [128] = "pic int 128 (irq128)",
421 [129] = "pic int 129 (icici0)",
422 [130] = "pic int 130 (icici1)",
423 [131] = "pic int 131 (icici2)",
424 [132] = "pic int 132 (kbp)",
425 [133] = "pic int 133 (uart0)",
426 [134] = "pic int 134 (uart1)",
427 [135] = "pic int 135 (i2c0)",
428 [136] = "pic int 136 (i2c1)",
429 [137] = "pic int 137 (sysmgt0)",
430 [138] = "pic int 138 (sysmgt1)",
431 [139] = "pic int 139 (jtag)",
432 [140] = "pic int 140 (pic)",
433 [141] = "pic int 141 (irq141)",
434 [142] = "pic int 142 (irq142)",
435 [143] = "pic int 143 (irq143)",
436 [144] = "pic int 144 (irq144)",
437 [145] = "pic int 145 (irq145)",
438 [146] = "pic int 146 (gpio0)",
439 [147] = "pic int 147 (gpio1)",
440 [148] = "pic int 148 (gpio2)",
441 [149] = "pic int 149 (gpio3)",
442 [150] = "pic int 150 (norflash)",
443 [151] = "pic int 151 (nandflash)",
444 [152] = "pic int 152 (spi)",
445 [153] = "pic int 153 (mmc/sd)",
446 [154] = "pic int 154 (mem-io-bridge)",
447 [155] = "pic int 155 (l3)",
448 [156] = "pic int 156 (gcu)",
449 [157] = "pic int 157 (dram3_0)",
450 [158] = "pic int 158 (dram3_1)",
451 [159] = "pic int 159 (tracebuf)",
452 };
453 /*
454 * rmixl_vecnames_common:
455 * - use for unknown cpu implementation
456 * - covers all vectors, not just IRT intrs
457 */
458 static const char * const rmixl_vecnames_common[NINTRVECS] = {
459 "vec 0 (sw0)", /* 0 */
460 "vec 1 (sw1)", /* 1 */
461 "vec 2 (hw2)", /* 2 */
462 "vec 3 (hw3)", /* 3 */
463 "vec 4 (hw4)", /* 4 */
464 "vec 5 (hw5)", /* 5 */
465 "vec 6 (hw6)", /* 6 */
466 "vec 7 (hw7)", /* 7 */
467 "vec 8", /* 8 */
468 "vec 9", /* 9 */
469 "vec 10", /* 10 */
470 "vec 11", /* 11 */
471 "vec 12", /* 12 */
472 "vec 13", /* 13 */
473 "vec 14", /* 14 */
474 "vec 15", /* 15 */
475 "vec 16", /* 16 */
476 "vec 17", /* 17 */
477 "vec 18", /* 18 */
478 "vec 19", /* 19 */
479 "vec 20", /* 20 */
480 "vec 21", /* 21 */
481 "vec 22", /* 22 */
482 "vec 23", /* 23 */
483 "vec 24", /* 24 */
484 "vec 25", /* 25 */
485 "vec 26", /* 26 */
486 "vec 27", /* 27 */
487 "vec 28", /* 28 */
488 "vec 29", /* 29 */
489 "vec 30", /* 30 */
490 "vec 31", /* 31 */
491 "vec 32", /* 32 */
492 "vec 33", /* 33 */
493 "vec 34", /* 34 */
494 "vec 35", /* 35 */
495 "vec 36", /* 36 */
496 "vec 37", /* 37 */
497 "vec 38", /* 38 */
498 "vec 39", /* 39 */
499 "vec 40", /* 40 */
500 "vec 41", /* 41 */
501 "vec 42", /* 42 */
502 "vec 43", /* 43 */
503 "vec 44", /* 44 */
504 "vec 45", /* 45 */
505 "vec 46", /* 46 */
506 "vec 47", /* 47 */
507 "vec 48", /* 48 */
508 "vec 49", /* 49 */
509 "vec 50", /* 50 */
510 "vec 51", /* 51 */
511 "vec 52", /* 52 */
512 "vec 53", /* 53 */
513 "vec 54", /* 54 */
514 "vec 55", /* 55 */
515 "vec 56", /* 56 */
516 "vec 57", /* 57 */
517 "vec 58", /* 58 */
518 "vec 59", /* 59 */
519 "vec 60", /* 60 */
520 "vec 61", /* 61 */
521 "vec 62", /* 63 */
522 "vec 63", /* 63 */
523 };
524
525 /*
526 * mask of CPUs attached
527 * once they are attached, this var is read-only so mp safe
528 */
529 static __cpuset_t cpu_present_mask;
530
531 kmutex_t *rmixl_ipi_lock; /* covers RMIXL_PIC_IPIBASE */
532 kmutex_t *rmixl_intr_lock; /* covers rest of PIC, and rmixl_intrhand[] */
533 rmixl_intrvecq_t rmixl_intrvec_lruq[_IPL_N] = {
534 [IPL_NONE] = TAILQ_HEAD_INITIALIZER(rmixl_intrvec_lruq[IPL_NONE]),
535 [IPL_SOFTCLOCK] = TAILQ_HEAD_INITIALIZER(rmixl_intrvec_lruq[IPL_SOFTCLOCK]),
536 [IPL_SOFTNET] = TAILQ_HEAD_INITIALIZER(rmixl_intrvec_lruq[IPL_SOFTNET]),
537 [IPL_VM] = TAILQ_HEAD_INITIALIZER(rmixl_intrvec_lruq[IPL_VM]),
538 [IPL_SCHED] = TAILQ_HEAD_INITIALIZER(rmixl_intrvec_lruq[IPL_SCHED]),
539 [IPL_DDB] = TAILQ_HEAD_INITIALIZER(rmixl_intrvec_lruq[IPL_DDB]),
540 [IPL_HIGH] = TAILQ_HEAD_INITIALIZER(rmixl_intrvec_lruq[IPL_HIGH]),
541 };
542 rmixl_intrvec_t rmixl_intrvec[NINTRVECS];
543 rmixl_intrhand_t rmixl_irt_intrhands[MAX(MAX(RMIXLR_NIRTS,RMIXLS_NIRTS), RMIXLP_NIRTS)];
544 static u_int rmixl_nirts;
545 const char * const *rmixl_irtnames;
546
547 #ifdef DIAGNOSTIC
548 static int rmixl_pic_init_done;
549 #endif
550
551
552 static uint32_t rmixl_irt_thread_mask(__cpuset_t);
553 static void rmixl_irt_init(size_t);
554 static void rmixl_irt_disestablish(size_t);
555 static void rmixl_irt_establish(size_t, size_t,
556 rmixl_intr_trigger_t, rmixl_intr_polarity_t);
557 static size_t rmixl_intr_get_vec(int);
558
559 #ifdef MULTIPROCESSOR
560 static int rmixl_send_ipi(struct cpu_info *, int);
561 static int rmixl_ipi_intr(void *);
562 #endif
563
564 #if defined(DIAGNOSTIC) || defined(IOINTR_DEBUG) || defined(DDB)
565 int rmixl_intrvec_print_subr(size_t);
566 int rmixl_intrhand_print(void);
567 int rmixl_irt_print(void);
568 void rmixl_ipl_eimr_map_print(void);
569 #endif
570
571
572 static inline u_int
573 dclz(uint64_t val)
574 {
575 u_int nlz;
576
577 __asm volatile("dclz %0, %1" : "=r"(nlz) : "r"(val));
578
579 return nlz;
580 }
581
582 void
583 evbmips_intr_init(void)
584 {
585 const bool is_xlp_p = cpu_rmixlp(mips_options.mips_cpu);
586 const bool is_xlr_p = cpu_rmixlr(mips_options.mips_cpu);
587 const bool is_xls_p = cpu_rmixls(mips_options.mips_cpu);
588
589 KASSERT(is_xlp_p || is_xlr_p || is_xls_p);
590
591 /*
592 * The number of IRT entries is different for XLP .vs. XLR/XLS.
593 */
594 if (is_xlp_p) {
595 rmixl_irtnames = rmixl_irtnames_xlpxxx;
596 rmixl_nirts = __arraycount(rmixl_irtnames_xlpxxx);
597 } else if (is_xlr_p) {
598 rmixl_irtnames = rmixl_irtnames_xlrxxx;
599 rmixl_nirts = __arraycount(rmixl_irtnames_xlrxxx);
600 } else if (is_xls_p) {
601 switch (MIPS_PRID_IMPL(mips_options.mips_cpu_id)) {
602 case MIPS_XLS104:
603 case MIPS_XLS108:
604 case MIPS_XLS404LITE:
605 case MIPS_XLS408LITE:
606 rmixl_irtnames = rmixl_irtnames_xls1xx;
607 rmixl_nirts = __arraycount(rmixl_irtnames_xls1xx);
608 break;
609 case MIPS_XLS204:
610 case MIPS_XLS208:
611 rmixl_irtnames = rmixl_irtnames_xls2xx;
612 rmixl_nirts = __arraycount(rmixl_irtnames_xls2xx);
613 break;
614 case MIPS_XLS404:
615 case MIPS_XLS408:
616 case MIPS_XLS416:
617 case MIPS_XLS608:
618 case MIPS_XLS616:
619 rmixl_irtnames = rmixl_irtnames_xls4xx;
620 rmixl_nirts = __arraycount(rmixl_irtnames_xls4xx);
621 break;
622 default:
623 rmixl_irtnames = rmixl_vecnames_common;
624 rmixl_nirts = __arraycount(rmixl_vecnames_common);
625 break;
626 }
627 }
628
629 #ifdef DIAGNOSTIC
630 if (rmixl_pic_init_done != 0)
631 panic("%s: rmixl_pic_init_done %d",
632 __func__, rmixl_pic_init_done);
633 #endif
634
635 rmixl_ipi_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_HIGH);
636 rmixl_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_HIGH);
637
638 mutex_enter(rmixl_intr_lock);
639
640 /*
641 * Insert all non-IPI non-normal MIPS vectors on lru queue.
642 */
643 for (size_t i = RMIXL_INTRVEC_IPI; i < NINTRVECS; i++) {
644 TAILQ_INSERT_TAIL(&rmixl_intrvec_lruq[IPL_NONE],
645 &rmixl_intrvec[i], iv_lruq_link);
646 }
647
648 /*
649 * initialize (zero) all IRT Entries in the PIC
650 */
651 for (size_t i = 0; i < rmixl_nirts; i++) {
652 rmixl_irt_init(i);
653 }
654
655 /*
656 * disable watchdog NMI, timers
657 */
658 if (is_xlp_p) {
659 /*
660 * Reset the interrupt thread enables to disable all CPUs.
661 */
662 for (size_t i = 0; i < 8; i++) {
663 RMIXLP_PICREG_WRITE(RMIXLP_PIC_INT_THREAD_ENABLE01(i), 0);
664 RMIXLP_PICREG_WRITE(RMIXLP_PIC_INT_THREAD_ENABLE23(i), 0);
665 }
666
667 /*
668 * Enable interrupts for node 0 core 0 thread 0.
669 */
670 RMIXLP_PICREG_WRITE(RMIXLP_PIC_INT_THREAD_ENABLE01(0), 1);
671
672 /*
673 * Disable watchdogs and system timers.
674 */
675 uint64_t r = RMIXLP_PICREG_READ(RMIXLP_PIC_CTRL);
676 r &= ~(RMIXLP_PIC_CTRL_WTE|RMIXLP_PIC_CTRL_STE);
677 RMIXLP_PICREG_WRITE(RMIXLP_PIC_CTRL, r);
678 } else {
679 /*
680 * XXX
681 * WATCHDOG_ENB is preserved because clearing it causes
682 * hang on the XLS616 (but not on the XLS408)
683 */
684 uint32_t r = RMIXL_PICREG_READ(RMIXL_PIC_CONTROL);
685 r &= RMIXL_PIC_CONTROL_RESV|RMIXL_PIC_CONTROL_WATCHDOG_ENB;
686 RMIXL_PICREG_WRITE(RMIXL_PIC_CONTROL, r);
687 }
688
689 #ifdef DIAGNOSTIC
690 rmixl_pic_init_done = 1;
691 #endif
692 mutex_exit(rmixl_intr_lock);
693 }
694
695 /*
696 * establish vector for mips3 count/compare clock interrupt
697 * this ensures we enable in EIRR,
698 * even though cpu_intr() handles the interrupt
699 * note the 'mpsafe' arg here is a placeholder only
700 */
701 void
702 rmixl_intr_init_clk(void)
703 {
704 const size_t vec = ffs(MIPS_INT_MASK_5 >> MIPS_INT_MASK_SHIFT) - 1;
705
706 mutex_enter(rmixl_intr_lock);
707
708 void *ih = rmixl_vec_establish(vec, NULL, IPL_SCHED, NULL, NULL, false);
709 if (ih == NULL)
710 panic("%s: establish vec %zu failed", __func__, vec);
711
712 mutex_exit(rmixl_intr_lock);
713 }
714
715 #ifdef MULTIPROCESSOR
716 /*
717 * establish IPI interrupt and send function
718 */
719 void
720 rmixl_intr_init_ipi(void)
721 {
722 mutex_enter(rmixl_intr_lock);
723
724 for (size_t ipi = 0; ipi < NIPIS; ipi++) {
725 const size_t vec = RMIXL_INTRVEC_IPI + ipi;
726 void * const ih = rmixl_vec_establish(vec, NULL, IPL_SCHED,
727 rmixl_ipi_intr, (void *)(uintptr_t)ipi, true);
728 if (ih == NULL)
729 panic("%s: establish ipi %zu at vec %zu failed",
730 __func__, ipi, vec);
731 }
732
733 mips_locoresw.lsw_send_ipi = rmixl_send_ipi;
734
735 mutex_exit(rmixl_intr_lock);
736 }
737 #endif /* MULTIPROCESSOR */
738
739 /*
740 * initialize per-cpu interrupt stuff in softc
741 * accumulate per-cpu bits in 'cpu_present_mask'
742 */
743 void
744 rmixl_intr_init_cpu(struct cpu_info *ci)
745 {
746 struct rmixl_cpu_softc * const sc = (void *)ci->ci_softc;
747 const char * xname = device_xname(sc->sc_dev);
748
749 KASSERT(sc != NULL);
750 KASSERT(NINTRVECS <= __arraycount(sc->sc_vec_evcnts));
751 KASSERT(rmixl_nirts <= __arraycount(sc->sc_irt_evcnts));
752
753 for (size_t vec = 0; vec < NINTRVECS; vec++) {
754 evcnt_attach_dynamic(&sc->sc_vec_evcnts[vec],
755 EVCNT_TYPE_INTR, NULL, xname, rmixl_intr_string(vec));
756 }
757
758 for (size_t irt = 0; irt < rmixl_nirts; irt++) {
759 evcnt_attach_dynamic(&sc->sc_irt_evcnts[irt],
760 EVCNT_TYPE_INTR, NULL, xname, rmixl_irtnames[irt]);
761 }
762
763 KASSERT(cpu_index(ci) < (sizeof(cpu_present_mask) * 8));
764 atomic_or_32((volatile uint32_t *)&cpu_present_mask, 1 << cpu_index(ci));
765 }
766
767 const char *
768 rmixl_irt_string(size_t irt)
769 {
770 KASSERT(irt < rmixl_nirts);
771
772 return rmixl_irtnames[irt];
773 }
774
775 /*
776 * rmixl_intr_string - return pointer to display name of a PIC-based interrupt
777 */
778 const char *
779 rmixl_intr_string(size_t vec)
780 {
781
782 if (vec >= NINTRVECS)
783 panic("%s: vec index %zu out of range, max %d",
784 __func__, vec, NINTRVECS - 1);
785
786 return rmixl_vecnames_common[vec];
787 }
788
789 size_t
790 rmixl_intr_get_vec(int ipl)
791 {
792 KASSERT(mutex_owned(rmixl_intr_lock));
793 KASSERT(IPL_VM <= ipl && ipl <= IPL_HIGH);
794
795 /*
796 * In reality higer ipls should have higher vec numbers,
797 * but for now don't worry about it.
798 */
799 struct rmixl_intrvecq * freeq = &rmixl_intrvec_lruq[IPL_NONE];
800 struct rmixl_intrvecq * iplq = &rmixl_intrvec_lruq[ipl];
801 rmixl_intrvec_t *iv;
802
803 /*
804 * If there's a free vector, grab it otherwise choose the least
805 * recently assigned vector sharing this IPL.
806 */
807 if ((iv = TAILQ_FIRST(freeq)) == NULL) {
808 iv = TAILQ_FIRST(iplq);
809 KASSERT(iv != NULL);
810 }
811
812 return iv - rmixl_intrvec;
813 }
814
815 /*
816 * rmixl_irt_thread_mask
817 *
818 * given a bitmask of cpus, return a, IRT thread mask
819 */
820 static uint32_t
821 rmixl_irt_thread_mask(__cpuset_t cpumask)
822 {
823 uint32_t irtc0;
824
825 #if defined(MULTIPROCESSOR)
826 #ifndef NOTYET
827 if (cpumask == -1)
828 return 1; /* XXX TMP FIXME */
829 #endif
830
831 /*
832 * discount cpus not present
833 */
834 cpumask &= cpu_present_mask;
835
836 switch (MIPS_PRID_IMPL(mips_options.mips_cpu_id)) {
837 case MIPS_XLS104:
838 case MIPS_XLS204:
839 case MIPS_XLS404:
840 case MIPS_XLS404LITE:
841 irtc0 = ((cpumask >> 2) << 4) | (cpumask & __BITS(1,0));
842 irtc0 &= (__BITS(5,4) | __BITS(1,0));
843 break;
844 case MIPS_XLS108:
845 case MIPS_XLS208:
846 case MIPS_XLS408:
847 case MIPS_XLS408LITE:
848 case MIPS_XLS608:
849 irtc0 = cpumask & __BITS(7,0);
850 break;
851 case MIPS_XLS416:
852 case MIPS_XLS616:
853 irtc0 = cpumask & __BITS(15,0);
854 break;
855 default:
856 panic("%s: unknown cpu ID %#x\n", __func__,
857 mips_options.mips_cpu_id);
858 }
859 #else
860 irtc0 = 1;
861 #endif /* MULTIPROCESSOR */
862
863 return irtc0;
864 }
865
866 /*
867 * rmixl_irt_init
868 * - initialize IRT Entry for given index
869 * - unmask Thread#0 in low word (assume we only have 1 thread)
870 */
871 static void
872 rmixl_irt_init(size_t irt)
873 {
874 KASSERT(irt < rmixl_nirts);
875 if (cpu_rmixlp(mips_options.mips_cpu)) {
876 RMIXLP_PICREG_WRITE(RMIXLP_PIC_IRTENTRY(irt), 0);
877 } else {
878 RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC1(irt), 0); /* high word */
879 RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC0(irt), 0); /* low word */
880 }
881 }
882
883 /*
884 * rmixl_irt_disestablish
885 * - invalidate IRT Entry for given index
886 */
887 static void
888 rmixl_irt_disestablish(size_t irt)
889 {
890 KASSERT(mutex_owned(rmixl_intr_lock));
891 DPRINTF(("%s: irt %zu, irtc1 %#x\n", __func__, irt, 0));
892 rmixl_irt_init(irt);
893 }
894
895 /*
896 * rmixl_irt_establish
897 * - construct an IRT Entry for irt and write to PIC
898 */
899 static void
900 rmixl_irt_establish(size_t irt, size_t vec, rmixl_intr_trigger_t trigger,
901 rmixl_intr_polarity_t polarity)
902 {
903 const bool is_xlp_p = cpu_rmixlp(mips_options.mips_cpu);
904
905 KASSERT(mutex_owned(rmixl_intr_lock));
906
907 if (irt >= rmixl_nirts)
908 panic("%s: bad irt %zu\n", __func__, irt);
909
910 /*
911 * All XLP interrupt are level.
912 */
913 if (trigger != RMIXL_TRIG_LEVEL
914 && (is_xlp_p || trigger != RMIXL_TRIG_EDGE)) {
915 panic("%s: bad trigger %d\n", __func__, trigger);
916 }
917
918 /*
919 * All XLP interrupt have high (positive) polarity.
920 */
921 if (polarity != RMIXL_POLR_HIGH
922 && (is_xlp_p
923 || (polarity != RMIXL_POLR_RISING
924 && polarity != RMIXL_POLR_FALLING
925 && polarity != RMIXL_POLR_LOW))) {
926 panic("%s: bad polarity %d\n", __func__, polarity);
927 }
928
929 /*
930 * XXX IRT entries are not shared
931 */
932 if (is_xlp_p) {
933 KASSERT(RMIXLP_PICREG_READ(RMIXLP_PIC_IRTENTRY(irt)) == 0);
934 uint64_t irtc0 = RMIXLP_PIC_IRTENTRY_EN
935 | RMIXLP_PIC_IRTENTRY_LOCAL
936 | RMIXLP_PIC_IRTENTRY_DT_ITE
937 | RMIXLP_PIC_IRTENTRY_ITE(0)
938 | __SHIFTIN(vec, RMIXLP_PIC_IRTENTRY_INTVEC)
939
940 /*
941 * write IRT Entry to PIC
942 */
943 DPRINTF(("%s: vec %zu (%#x), irt %zu (%s), irtc0 %#"PRIx64"\n",
944 __func__, vec, vec, irt, rmixl_irtnames[irt], irtc0));
945
946 RMIXLP_PICREG_WRITE(RMIXLP_PIC_IRTENTRY(irt), irtc0);
947 } else {
948 KASSERT(RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC0(irt)) == 0);
949 KASSERT(RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC1(irt)) == 0);
950
951 __cpuset_t cpumask = 1; /* XXX */
952 uint32_t irtc0 = rmixl_irt_thread_mask(cpumask);
953
954 uint32_t irtc1 = RMIXL_PIC_IRTENTRYC1_VALID;
955 irtc1 |= RMIXL_PIC_IRTENTRYC1_GL; /* local */
956 KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
957
958 if (trigger == RMIXL_TRIG_LEVEL)
959 irtc1 |= RMIXL_PIC_IRTENTRYC1_TRG;
960 KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
961
962 if (polarity == RMIXL_POLR_FALLING
963 || polarity == RMIXL_POLR_LOW)
964 irtc1 |= RMIXL_PIC_IRTENTRYC1_P;
965 KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
966
967 irtc1 |= vec; /* vector in EIRR */
968 KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
969
970 /*
971 * write IRT Entry to PIC
972 */
973 DPRINTF(("%s: vec %zu (%#x), irt %zu, irtc0 %#x, irtc1 %#x\n",
974 __func__, vec, vec, irt, irtc0, irtc1));
975 RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC0(irt), irtc0); /* low word */
976 RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC1(irt), irtc1); /* high word */
977 }
978 }
979
980 void *
981 rmixl_vec_establish(size_t vec, rmixl_intrhand_t *ih, int ipl,
982 int (*func)(void *), void *arg, bool mpsafe)
983 {
984
985 KASSERT(mutex_owned(rmixl_intr_lock));
986
987 DPRINTF(("%s: vec %zu ih %p ipl %d func %p arg %p mpsafe %d\n",
988 __func__, vec, ih, ipl, func, arg, mpsafe));
989
990 #ifdef DIAGNOSTIC
991 if (rmixl_pic_init_done == 0)
992 panic("%s: called before evbmips_intr_init", __func__);
993 #endif
994
995 /*
996 * check args
997 */
998 if (vec >= NINTRVECS)
999 panic("%s: vec %zu out of range, max %d",
1000 __func__, vec, NINTRVECS - 1);
1001 if (ipl < IPL_VM || ipl > IPL_HIGH)
1002 panic("%s: ipl %d out of range, min %d, max %d",
1003 __func__, ipl, IPL_VM, IPL_HIGH);
1004
1005 const int s = splhigh();
1006
1007 rmixl_intrvec_t * const iv = &rmixl_intrvec[vec];
1008 if (ih == NULL) {
1009 ih = &iv->iv_intrhand;
1010 }
1011
1012 if (vec >= 8) {
1013 TAILQ_REMOVE(&rmixl_intrvec_lruq[iv->iv_ipl], iv, iv_lruq_link);
1014 }
1015
1016 if (LIST_EMPTY(&iv->iv_hands)) {
1017 KASSERT(iv->iv_ipl == IPL_NONE);
1018 iv->iv_ipl = ipl;
1019 } else {
1020 KASSERT(iv->iv_ipl == ipl);
1021 }
1022
1023 if (vec >= 8) {
1024 TAILQ_INSERT_TAIL(&rmixl_intrvec_lruq[iv->iv_ipl],
1025 iv, iv_lruq_link);
1026 }
1027
1028 if (ih->ih_func != NULL) {
1029 #ifdef DIAGNOSTIC
1030 printf("%s: intrhand[%zu] busy\n", __func__, vec);
1031 #endif
1032 splx(s);
1033 return NULL;
1034 }
1035
1036 ih->ih_arg = arg;
1037 ih->ih_mpsafe = mpsafe;
1038 ih->ih_vec = vec;
1039
1040 LIST_INSERT_HEAD(&iv->iv_hands, ih, ih_link);
1041
1042 const uint64_t eimr_bit = (uint64_t)1 << vec;
1043 for (int i = ipl; --i >= 0; ) {
1044 KASSERT((ipl_eimr_map[i] & eimr_bit) == 0);
1045 ipl_eimr_map[i] |= eimr_bit;
1046 }
1047
1048 ih->ih_func = func; /* do this last */
1049
1050 splx(s);
1051
1052 return ih;
1053 }
1054
1055 /*
1056 * rmixl_intr_establish
1057 * - used to establish an IRT-based interrupt only
1058 */
1059 void *
1060 rmixl_intr_establish(size_t irt, int ipl,
1061 rmixl_intr_trigger_t trigger, rmixl_intr_polarity_t polarity,
1062 int (*func)(void *), void *arg, bool mpsafe)
1063 {
1064 #ifdef DIAGNOSTIC
1065 if (rmixl_pic_init_done == 0)
1066 panic("%s: called before rmixl_pic_init_done", __func__);
1067 #endif
1068
1069 /*
1070 * check args
1071 */
1072 if (irt >= rmixl_nirts)
1073 panic("%s: irt %zu out of range, max %d",
1074 __func__, irt, rmixl_nirts - 1);
1075 if (ipl < IPL_VM || ipl > IPL_HIGH)
1076 panic("%s: ipl %d out of range, min %d, max %d",
1077 __func__, ipl, IPL_VM, IPL_HIGH);
1078
1079 mutex_enter(rmixl_intr_lock);
1080
1081 rmixl_intrhand_t *ih = &rmixl_irt_intrhands[irt];
1082
1083 KASSERT(ih->ih_func == NULL);
1084
1085 const size_t vec = rmixl_intr_get_vec(ipl);
1086
1087 DPRINTF(("%s: irt %zu, ih %p vec %zu, ipl %d\n",
1088 __func__, irt, ih, vec, ipl));
1089
1090 /*
1091 * establish vector
1092 */
1093 ih = rmixl_vec_establish(vec, ih, ipl, func, arg, mpsafe);
1094
1095 /*
1096 * establish IRT Entry
1097 */
1098 rmixl_irt_establish(irt, vec, trigger, polarity);
1099
1100 mutex_exit(rmixl_intr_lock);
1101
1102 return ih;
1103 }
1104
1105 void
1106 rmixl_vec_disestablish(void *cookie)
1107 {
1108 rmixl_intrhand_t * const ih = cookie;
1109 const size_t vec = ih->ih_vec;
1110 rmixl_intrvec_t * const iv = &rmixl_intrvec[vec];
1111
1112 KASSERT(mutex_owned(rmixl_intr_lock));
1113 KASSERT(vec < NINTRVECS);
1114 KASSERT(ih->ih_func != NULL);
1115 KASSERT(IPL_VM <= iv->iv_ipl && iv->iv_ipl <= IPL_HIGH);
1116
1117 LIST_REMOVE(ih, ih_link);
1118
1119 ih->ih_func = NULL; /* do this first */
1120
1121 const uint64_t eimr_bit = __BIT(ih->ih_vec);
1122 for (int i = iv->iv_ipl; --i >= 0; ) {
1123 KASSERT((ipl_eimr_map[i] & eimr_bit) != 0);
1124 ipl_eimr_map[i] ^= eimr_bit;
1125 }
1126
1127 ih->ih_vec = 0;
1128 ih->ih_mpsafe = false;
1129 ih->ih_arg = NULL;
1130
1131 /*
1132 * If this vector isn't servicing any interrupts, then check to
1133 * see if this IPL has other vectors using it. If it does, then
1134 * return this vector to the freeq (lruq for IPL_NONE). This makes
1135 * there will always be at least one vector per IPL.
1136 */
1137 if (vec > 8 && LIST_EMPTY(&iv->iv_hands)) {
1138 rmixl_intrvecq_t * const freeq = &rmixl_intrvec_lruq[IPL_NONE];
1139 rmixl_intrvecq_t * const iplq = &rmixl_intrvec_lruq[iv->iv_ipl];
1140
1141 if (TAILQ_NEXT(iv, iv_lruq_link) != NULL
1142 || TAILQ_FIRST(iplq) != iv) {
1143 TAILQ_REMOVE(iplq, iv, iv_lruq_link);
1144 iv->iv_ipl = IPL_NONE;
1145 TAILQ_INSERT_TAIL(freeq, iv, iv_lruq_link);
1146 }
1147 }
1148 }
1149
1150 void
1151 rmixl_intr_disestablish(void *cookie)
1152 {
1153 rmixl_intrhand_t * const ih = cookie;
1154 const size_t vec = ih->ih_vec;
1155 rmixl_intrvec_t * const iv = &rmixl_intrvec[vec];
1156
1157 KASSERT(vec < NINTRVECS);
1158
1159 mutex_enter(rmixl_intr_lock);
1160
1161 /*
1162 * disable/invalidate the IRT Entry if needed
1163 */
1164 if (ih != &iv->iv_intrhand) {
1165 size_t irt = ih - rmixl_irt_intrhands;
1166 KASSERT(irt < rmixl_nirts);
1167 rmixl_irt_disestablish(irt);
1168 }
1169
1170 /*
1171 * disasociate from vector and free the handle
1172 */
1173 rmixl_vec_disestablish(cookie);
1174
1175 mutex_exit(rmixl_intr_lock);
1176 }
1177
1178 void
1179 evbmips_iointr(int ipl, vaddr_t pc, uint32_t pending)
1180 {
1181 struct rmixl_cpu_softc * const sc = (void *)curcpu()->ci_softc;
1182 const bool is_xlp_p = cpu_rmixlp(mips_options.mips_cpu);
1183
1184 DPRINTF(("%s: cpu%u: ipl %d, pc %#"PRIxVADDR", pending %#x\n",
1185 __func__, cpu_number(), ipl, pc, pending));
1186
1187 /*
1188 * 'pending' arg is a summary that there is something to do
1189 * the real pending status is obtained from EIRR
1190 */
1191 KASSERT(pending == MIPS_INT_MASK_1);
1192
1193 for (;;) {
1194 rmixl_intrhand_t *ih;
1195 uint64_t eirr;
1196 uint64_t eimr;
1197 uint64_t vecbit;
1198 int vec;
1199
1200 __asm volatile("dmfc0 %0, $9, 6;" : "=r"(eirr));
1201 __asm volatile("dmfc0 %0, $9, 7;" : "=r"(eimr));
1202
1203 #ifdef IOINTR_DEBUG
1204 printf("%s: cpu%u: eirr %#"PRIx64", eimr %#"PRIx64", mask %#"PRIx64"\n",
1205 __func__, cpu_number(), eirr, eimr, ipl_eimr_map[ipl-1]);
1206 #endif /* IOINTR_DEBUG */
1207
1208 /*
1209 * reduce eirr to
1210 * - ints that are enabled at or below this ipl
1211 * - exclude count/compare clock and soft ints
1212 * they are handled elsewhere
1213 */
1214 eirr &= ipl_eimr_map[ipl-1];
1215 eirr &= ~ipl_eimr_map[ipl];
1216 eirr &= ~((MIPS_INT_MASK_5 | MIPS_SOFT_INT_MASK) >> 8);
1217 if (eirr == 0)
1218 break;
1219
1220 vec = 63 - dclz(eirr);
1221 rmixl_intrvec_t * const iv = &rmixl_intrvec[vec];
1222 vecbit = 1ULL << vec;
1223 KASSERT (iv->iv_ipl == ipl);
1224 LIST_FOREACH(ih, &iv->iv_hands, ih_link) {
1225 KASSERT ((vecbit & eimr) == 0);
1226 KASSERT ((vecbit & RMIXL_EIRR_PRESERVE_MASK) == 0);
1227
1228 /*
1229 * ack in EIRR, and in PIC if needed,
1230 * the irq we are about to handle
1231 */
1232 rmixl_eirr_ack(eimr, vecbit, RMIXL_EIRR_PRESERVE_MASK);
1233 if (ih != &iv->iv_intrhand) {
1234 size_t irt = ih - rmixl_irt_intrhands;
1235 KASSERT(irt < rmixl_nirts);
1236 if (is_xlp_p) {
1237 RMIXLP_PICREG_WRITE(RMIXLP_PIC_INT_ACK,
1238 irt);
1239 } else {
1240 RMIXL_PICREG_WRITE(RMIXL_PIC_INTRACK,
1241 1 << irt);
1242 }
1243 sc->sc_irt_evcnts[irt].ev_count++;
1244 }
1245
1246 if (ih->ih_func != NULL) {
1247 #ifdef MULTIPROCESSOR
1248 if (ih->ih_mpsafe) {
1249 (void)(*ih->ih_func)(ih->ih_arg);
1250 } else {
1251 KASSERTMSG(ipl == IPL_VM,
1252 ("%s: %s: ipl (%d) != IPL_VM for KERNEL_LOCK",
1253 __func__, sc->sc_vec_evcnts[vec].ev_name,
1254 ipl));
1255 KERNEL_LOCK(1, NULL);
1256 (void)(*ih->ih_func)(ih->ih_arg);
1257 KERNEL_UNLOCK_ONE(NULL);
1258 }
1259 #else
1260 (void)(*ih->ih_func)(ih->ih_arg);
1261 #endif /* MULTIPROCESSOR */
1262 }
1263 KASSERT(ipl == iv->iv_ipl);
1264 KASSERTMSG(curcpu()->ci_cpl >= ipl,
1265 ("%s: after %s: cpl (%d) < ipl %d",
1266 __func__, sc->sc_vec_evcnts[vec].ev_name,
1267 ipl, curcpu()->ci_cpl));
1268 sc->sc_vec_evcnts[vec].ev_count++;
1269 }
1270 }
1271 }
1272
1273 #ifdef MULTIPROCESSOR
1274 static int
1275 rmixl_send_ipi(struct cpu_info *ci, int tag)
1276 {
1277 const cpuid_t cpuid = ci->ci_cpuid;
1278 const uint64_t req = 1 << tag;
1279 const bool is_xlp_p = cpu_rmixlp(mips_options.mips_cpu);
1280 uint32_t r;
1281
1282 if (! CPUSET_HAS_P(cpus_running, cpu_index(ci)))
1283 return -1;
1284
1285 KASSERT(tag >= 0 && tag < NIPIS);
1286
1287 if (is_xlp_p) {
1288 r = RMXLP_PIC_IPI_CTRL_MAKE(0, __BIT(cpuid & 15),
1289 RMIXL_INTERVEC_IPI + tag);
1290 } else {
1291 const uint32_t core = (uint32_t)(cpuid >> 2);
1292 const uint32_t thread = (uint32_t)(cpuid & __BITS(1,0));
1293 r = RMXLP_PIC_IPI_CTRL_MAKE(0, core, thread,
1294 RMIXL_INTERVEC_IPI + tag);
1295 }
1296
1297 mutex_enter(rmixl_ipi_lock);
1298 atomic_or_64(&ci->ci_request_ipis, req);
1299 __asm __volatile("sync");
1300 if (is_xlp_p) {
1301 RMIXLP_PICREG_WRITE(RMIXLP_PIC_IPI_CTRL, r);
1302 } else {
1303 RMIXL_PICREG_WRITE(RMIXL_PIC_IPIBASE, r);
1304 }
1305 mutex_exit(rmixl_ipi_lock);
1306
1307 return 0;
1308 }
1309
1310 static int
1311 rmixl_ipi_intr(void *arg)
1312 {
1313 struct cpu_info * const ci = curcpu();
1314 const uint64_t ipi_mask = 1 << (uintptr_t)arg;
1315
1316 KASSERT(ci->ci_cpl >= IPL_SCHED);
1317 KASSERT((uintptr_t)arg < NIPIS);
1318
1319 /* if the request is clear, it was previously processed */
1320 if ((ci->ci_request_ipis & ipi_mask) == 0)
1321 return 0;
1322
1323 atomic_or_64(&ci->ci_active_ipis, ipi_mask);
1324 atomic_and_64(&ci->ci_request_ipis, ~ipi_mask);
1325
1326 ipi_process(ci, ipi_mask);
1327
1328 atomic_and_64(&ci->ci_active_ipis, ~ipi_mask);
1329
1330 return 1;
1331 }
1332 #endif /* MULTIPROCESSOR */
1333
1334 #if defined(DIAGNOSTIC) || defined(IOINTR_DEBUG) || defined(DDB)
1335 int
1336 rmixl_intrvec_print_subr(size_t vec)
1337 {
1338 rmixl_intrvec_t * const iv = &rmixl_intrvec[vec];
1339 rmixl_intrhand_t *ih;
1340
1341 printf("vec %zu: ipl %u\n", vec, iv->iv_ipl);
1342
1343 LIST_FOREACH(ih, &iv->iv_hands, ih_link) {
1344 if (ih == &iv->iv_intrhand) {
1345 printf(" [%s]: func %p, arg %p\n",
1346 rmixl_vecnames_common[vec],
1347 ih->ih_func, ih->ih_arg);
1348 } else {
1349 const size_t irt = ih - rmixl_irt_intrhands;
1350 printf(" irt %zu [%s]: func %p, arg %p\n",
1351 irt, rmixl_irtnames[irt],
1352 ih->ih_func, ih->ih_arg);
1353 }
1354 }
1355 return 0;
1356 }
1357 int
1358 rmixl_intrhand_print(void)
1359 {
1360 for (size_t vec = 0; vec < NINTRVECS; vec++)
1361 rmixl_intrvec_print_subr(vec);
1362 return 0;
1363 }
1364
1365 static inline void
1366 rmixl_irt_entry_print(size_t irt)
1367 {
1368 if (irt >= rmixl_nirts)
1369 return;
1370 if (cpu_rmixlp(mips_options.mips_cpu)) {
1371 uint64_t c = RMIXLP_PICREG_READ(RMIXLP_PIC_IRTENTRY(irt));
1372 printf("irt[%zu]: %#"PRIx64"\n", irt, c);
1373 } else {
1374 uint32_t c0 = RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC0(irt));
1375 uint32_t c1 = RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC1(irt));
1376 printf("irt[%zu]: %#x, %#x\n", irt, c0, c1);
1377 }
1378 }
1379
1380 int
1381 rmixl_irt_print(void)
1382 {
1383 printf("%s:\n", __func__);
1384 for (size_t irt = 0; irt < rmixl_nirts ; irt++)
1385 rmixl_irt_entry_print(irt);
1386 return 0;
1387 }
1388
1389 void
1390 rmixl_ipl_eimr_map_print(void)
1391 {
1392 printf("IPL_NONE=%d, mask %#"PRIx64"\n",
1393 IPL_NONE, ipl_eimr_map[IPL_NONE]);
1394 printf("IPL_SOFTCLOCK=%d, mask %#"PRIx64"\n",
1395 IPL_SOFTCLOCK, ipl_eimr_map[IPL_SOFTCLOCK]);
1396 printf("IPL_SOFTNET=%d, mask %#"PRIx64"\n",
1397 IPL_SOFTNET, ipl_eimr_map[IPL_SOFTNET]);
1398 printf("IPL_VM=%d, mask %#"PRIx64"\n",
1399 IPL_VM, ipl_eimr_map[IPL_VM]);
1400 printf("IPL_SCHED=%d, mask %#"PRIx64"\n",
1401 IPL_SCHED, ipl_eimr_map[IPL_SCHED]);
1402 printf("IPL_DDB=%d, mask %#"PRIx64"\n",
1403 IPL_DDB, ipl_eimr_map[IPL_DDB]);
1404 printf("IPL_HIGH=%d, mask %#"PRIx64"\n",
1405 IPL_HIGH, ipl_eimr_map[IPL_HIGH]);
1406 }
1407
1408 #endif
1409