rmixl_intr.c revision 1.1.2.33 1 1.1.2.33 matt /* $NetBSD: rmixl_intr.c,v 1.1.2.33 2011/12/31 08:20:43 matt Exp $ */
2 1.1.2.1 cliff
3 1.1.2.1 cliff /*-
4 1.1.2.1 cliff * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko.
5 1.1.2.1 cliff * All rights reserved.
6 1.1.2.1 cliff *
7 1.1.2.1 cliff * Redistribution and use in source and binary forms, with or
8 1.1.2.1 cliff * without modification, are permitted provided that the following
9 1.1.2.1 cliff * conditions are met:
10 1.1.2.1 cliff * 1. Redistributions of source code must retain the above copyright
11 1.1.2.1 cliff * notice, this list of conditions and the following disclaimer.
12 1.1.2.1 cliff * 2. Redistributions in binary form must reproduce the above
13 1.1.2.1 cliff * copyright notice, this list of conditions and the following
14 1.1.2.1 cliff * disclaimer in the documentation and/or other materials provided
15 1.1.2.1 cliff * with the distribution.
16 1.1.2.1 cliff * 3. The names of the authors may not be used to endorse or promote
17 1.1.2.1 cliff * products derived from this software without specific prior
18 1.1.2.1 cliff * written permission.
19 1.1.2.1 cliff *
20 1.1.2.1 cliff * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY
21 1.1.2.1 cliff * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 1.1.2.1 cliff * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
23 1.1.2.1 cliff * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS
24 1.1.2.1 cliff * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
25 1.1.2.1 cliff * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26 1.1.2.1 cliff * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
27 1.1.2.1 cliff * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 1.1.2.1 cliff * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
29 1.1.2.1 cliff * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 1.1.2.1 cliff * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
31 1.1.2.1 cliff * OF SUCH DAMAGE.
32 1.1.2.1 cliff */
33 1.1.2.1 cliff /*-
34 1.1.2.1 cliff * Copyright (c) 2001 The NetBSD Foundation, Inc.
35 1.1.2.1 cliff * All rights reserved.
36 1.1.2.1 cliff *
37 1.1.2.1 cliff * This code is derived from software contributed to The NetBSD Foundation
38 1.1.2.1 cliff * by Jason R. Thorpe.
39 1.1.2.1 cliff *
40 1.1.2.1 cliff * Redistribution and use in source and binary forms, with or without
41 1.1.2.1 cliff * modification, are permitted provided that the following conditions
42 1.1.2.1 cliff * are met:
43 1.1.2.1 cliff * 1. Redistributions of source code must retain the above copyright
44 1.1.2.1 cliff * notice, this list of conditions and the following disclaimer.
45 1.1.2.1 cliff * 2. Redistributions in binary form must reproduce the above copyright
46 1.1.2.1 cliff * notice, this list of conditions and the following disclaimer in the
47 1.1.2.1 cliff * documentation and/or other materials provided with the distribution.
48 1.1.2.1 cliff *
49 1.1.2.1 cliff * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
50 1.1.2.1 cliff * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
51 1.1.2.1 cliff * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
52 1.1.2.1 cliff * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
53 1.1.2.1 cliff * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
54 1.1.2.1 cliff * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
55 1.1.2.1 cliff * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
56 1.1.2.1 cliff * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
57 1.1.2.1 cliff * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
58 1.1.2.1 cliff * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 1.1.2.1 cliff * POSSIBILITY OF SUCH DAMAGE.
60 1.1.2.1 cliff */
61 1.1.2.1 cliff
62 1.1.2.1 cliff /*
63 1.1.2.1 cliff * Platform-specific interrupt support for the RMI XLP, XLR, XLS
64 1.1.2.1 cliff */
65 1.1.2.1 cliff
66 1.1.2.1 cliff #include <sys/cdefs.h>
67 1.1.2.33 matt __KERNEL_RCSID(0, "$NetBSD: rmixl_intr.c,v 1.1.2.33 2011/12/31 08:20:43 matt Exp $");
68 1.1.2.1 cliff
69 1.1.2.1 cliff #include "opt_ddb.h"
70 1.1.2.30 matt #include "opt_multiprocessor.h"
71 1.1.2.14 matt #define __INTR_PRIVATE
72 1.1.2.1 cliff
73 1.1.2.1 cliff #include <sys/param.h>
74 1.1.2.1 cliff #include <sys/queue.h>
75 1.1.2.1 cliff #include <sys/malloc.h>
76 1.1.2.1 cliff #include <sys/systm.h>
77 1.1.2.1 cliff #include <sys/device.h>
78 1.1.2.1 cliff #include <sys/kernel.h>
79 1.1.2.15 cliff #include <sys/atomic.h>
80 1.1.2.25 cliff #include <sys/mutex.h>
81 1.1.2.15 cliff #include <sys/cpu.h>
82 1.1.2.1 cliff
83 1.1.2.1 cliff #include <machine/bus.h>
84 1.1.2.1 cliff #include <machine/intr.h>
85 1.1.2.1 cliff
86 1.1.2.5 cliff #include <mips/cpu.h>
87 1.1.2.30 matt #include <mips/cpuset.h>
88 1.1.2.1 cliff #include <mips/locore.h>
89 1.1.2.5 cliff
90 1.1.2.1 cliff #include <mips/rmi/rmixlreg.h>
91 1.1.2.1 cliff #include <mips/rmi/rmixlvar.h>
92 1.1.2.1 cliff
93 1.1.2.15 cliff #include <mips/rmi/rmixl_cpuvar.h>
94 1.1.2.15 cliff #include <mips/rmi/rmixl_intr.h>
95 1.1.2.15 cliff
96 1.1.2.1 cliff #include <dev/pci/pcireg.h>
97 1.1.2.1 cliff #include <dev/pci/pcivar.h>
98 1.1.2.1 cliff
99 1.1.2.30 matt //#define IOINTR_DEBUG 1
100 1.1.2.4 cliff #ifdef IOINTR_DEBUG
101 1.1.2.4 cliff int iointr_debug = IOINTR_DEBUG;
102 1.1.2.4 cliff # define DPRINTF(x) do { if (iointr_debug) printf x ; } while(0)
103 1.1.2.4 cliff #else
104 1.1.2.4 cliff # define DPRINTF(x)
105 1.1.2.4 cliff #endif
106 1.1.2.4 cliff
107 1.1.2.4 cliff #define RMIXL_PICREG_READ(off) \
108 1.1.2.4 cliff RMIXL_IOREG_READ(RMIXL_IO_DEV_PIC + (off))
109 1.1.2.4 cliff #define RMIXL_PICREG_WRITE(off, val) \
110 1.1.2.4 cliff RMIXL_IOREG_WRITE(RMIXL_IO_DEV_PIC + (off), (val))
111 1.1.2.15 cliff
112 1.1.2.31 matt /* XXX this will need to deal with node */
113 1.1.2.31 matt #define RMIXLP_PICREG_READ(off) \
114 1.1.2.32 matt rmixlp_read_8(RMIXLP_PIC_PCITAG, (off))
115 1.1.2.31 matt #define RMIXLP_PICREG_WRITE(off, val) \
116 1.1.2.32 matt rmixlp_write_8(RMIXLP_PIC_PCITAG, (off), (val));
117 1.1.2.31 matt
118 1.1.2.1 cliff /*
119 1.1.2.15 cliff * do not clear these when acking EIRR
120 1.1.2.15 cliff * (otherwise they get lost)
121 1.1.2.15 cliff */
122 1.1.2.15 cliff #define RMIXL_EIRR_PRESERVE_MASK \
123 1.1.2.15 cliff ((MIPS_INT_MASK_5|MIPS_SOFT_INT_MASK) >> 8)
124 1.1.2.1 cliff
125 1.1.2.2 cliff /*
126 1.1.2.15 cliff * IRT assignments depends on the RMI chip family
127 1.1.2.15 cliff * (XLS1xx vs. XLS2xx vs. XLS3xx vs. XLS6xx)
128 1.1.2.20 cliff * use the right display string table for the CPU that's running.
129 1.1.2.4 cliff */
130 1.1.2.4 cliff
131 1.1.2.32 matt #ifdef MIPS64_XLR
132 1.1.2.4 cliff /*
133 1.1.2.16 cliff * rmixl_irtnames_xlrxxx
134 1.1.2.16 cliff * - use for XLRxxx
135 1.1.2.16 cliff */
136 1.1.2.31 matt static const char * const rmixl_irtnames_xlrxxx[RMIXLR_NIRTS] = {
137 1.1.2.20 cliff "pic int 0 (watchdog)", /* 0 */
138 1.1.2.20 cliff "pic int 1 (timer0)", /* 1 */
139 1.1.2.20 cliff "pic int 2 (timer1)", /* 2 */
140 1.1.2.20 cliff "pic int 3 (timer2)", /* 3 */
141 1.1.2.20 cliff "pic int 4 (timer3)", /* 4 */
142 1.1.2.20 cliff "pic int 5 (timer4)", /* 5 */
143 1.1.2.20 cliff "pic int 6 (timer5)", /* 6 */
144 1.1.2.20 cliff "pic int 7 (timer6)", /* 7 */
145 1.1.2.20 cliff "pic int 8 (timer7)", /* 8 */
146 1.1.2.20 cliff "pic int 9 (uart0)", /* 9 */
147 1.1.2.20 cliff "pic int 10 (uart1)", /* 10 */
148 1.1.2.20 cliff "pic int 11 (i2c0)", /* 11 */
149 1.1.2.20 cliff "pic int 12 (i2c1)", /* 12 */
150 1.1.2.20 cliff "pic int 13 (pcmcia)", /* 13 */
151 1.1.2.20 cliff "pic int 14 (gpio)", /* 14 */
152 1.1.2.20 cliff "pic int 15 (hyper)", /* 15 */
153 1.1.2.20 cliff "pic int 16 (pcix)", /* 16 */
154 1.1.2.20 cliff "pic int 17 (gmac0)", /* 17 */
155 1.1.2.20 cliff "pic int 18 (gmac1)", /* 18 */
156 1.1.2.20 cliff "pic int 19 (gmac2)", /* 19 */
157 1.1.2.20 cliff "pic int 20 (gmac3)", /* 20 */
158 1.1.2.20 cliff "pic int 21 (xgs0)", /* 21 */
159 1.1.2.20 cliff "pic int 22 (xgs1)", /* 22 */
160 1.1.2.32 matt "pic int 23 (?)", /* 23 */
161 1.1.2.20 cliff "pic int 24 (hyper_fatal)", /* 24 */
162 1.1.2.20 cliff "pic int 25 (bridge_aerr)", /* 25 */
163 1.1.2.20 cliff "pic int 26 (bridge_berr)", /* 26 */
164 1.1.2.20 cliff "pic int 27 (bridge_tb)", /* 27 */
165 1.1.2.20 cliff "pic int 28 (bridge_nmi)", /* 28 */
166 1.1.2.20 cliff "pic int 29 (bridge_sram_derr)",/* 29 */
167 1.1.2.20 cliff "pic int 30 (gpio_fatal)", /* 30 */
168 1.1.2.20 cliff "pic int 31 (reserved)", /* 31 */
169 1.1.2.16 cliff };
170 1.1.2.32 matt #endif /* MIPS64_XLR */
171 1.1.2.16 cliff
172 1.1.2.32 matt #ifdef MIPS64_XLS
173 1.1.2.16 cliff /*
174 1.1.2.19 cliff * rmixl_irtnames_xls2xx
175 1.1.2.19 cliff * - use for XLS2xx
176 1.1.2.19 cliff */
177 1.1.2.31 matt static const char * const rmixl_irtnames_xls2xx[RMIXLS_NIRTS] = {
178 1.1.2.20 cliff "pic int 0 (watchdog)", /* 0 */
179 1.1.2.20 cliff "pic int 1 (timer0)", /* 1 */
180 1.1.2.20 cliff "pic int 2 (timer1)", /* 2 */
181 1.1.2.20 cliff "pic int 3 (timer2)", /* 3 */
182 1.1.2.20 cliff "pic int 4 (timer3)", /* 4 */
183 1.1.2.20 cliff "pic int 5 (timer4)", /* 5 */
184 1.1.2.20 cliff "pic int 6 (timer5)", /* 6 */
185 1.1.2.20 cliff "pic int 7 (timer6)", /* 7 */
186 1.1.2.20 cliff "pic int 8 (timer7)", /* 8 */
187 1.1.2.20 cliff "pic int 9 (uart0)", /* 9 */
188 1.1.2.20 cliff "pic int 10 (uart1)", /* 10 */
189 1.1.2.20 cliff "pic int 11 (i2c0)", /* 11 */
190 1.1.2.20 cliff "pic int 12 (i2c1)", /* 12 */
191 1.1.2.20 cliff "pic int 13 (pcmcia)", /* 13 */
192 1.1.2.20 cliff "pic int 14 (gpio_a)", /* 14 */
193 1.1.2.32 matt "pic int 15 (?)", /* 15 */
194 1.1.2.20 cliff "pic int 16 (bridge_tb)", /* 16 */
195 1.1.2.20 cliff "pic int 17 (gmac0)", /* 17 */
196 1.1.2.20 cliff "pic int 18 (gmac1)", /* 18 */
197 1.1.2.20 cliff "pic int 19 (gmac2)", /* 19 */
198 1.1.2.20 cliff "pic int 20 (gmac3)", /* 20 */
199 1.1.2.32 matt "pic int 21 (?)", /* 21 */
200 1.1.2.32 matt "pic int 22 (?)", /* 22 */
201 1.1.2.20 cliff "pic int 23 (pcie_link2)", /* 23 */
202 1.1.2.20 cliff "pic int 24 (pcie_link3)", /* 24 */
203 1.1.2.20 cliff "pic int 25 (bridge_err)", /* 25 */
204 1.1.2.20 cliff "pic int 26 (pcie_link0)", /* 26 */
205 1.1.2.20 cliff "pic int 27 (pcie_link1)", /* 27 */
206 1.1.2.32 matt "pic int 28 (?)", /* 28 */
207 1.1.2.20 cliff "pic int 29 (pcie_err)", /* 29 */
208 1.1.2.20 cliff "pic int 30 (gpio_b)", /* 30 */
209 1.1.2.20 cliff "pic int 31 (usb)", /* 31 */
210 1.1.2.19 cliff };
211 1.1.2.19 cliff
212 1.1.2.19 cliff /*
213 1.1.2.15 cliff * rmixl_irtnames_xls1xx
214 1.1.2.19 cliff * - use for XLS1xx, XLS4xx-Lite
215 1.1.2.2 cliff */
216 1.1.2.31 matt static const char * const rmixl_irtnames_xls1xx[RMIXLS_NIRTS] = {
217 1.1.2.20 cliff "pic int 0 (watchdog)", /* 0 */
218 1.1.2.20 cliff "pic int 1 (timer0)", /* 1 */
219 1.1.2.20 cliff "pic int 2 (timer1)", /* 2 */
220 1.1.2.20 cliff "pic int 3 (timer2)", /* 3 */
221 1.1.2.20 cliff "pic int 4 (timer3)", /* 4 */
222 1.1.2.20 cliff "pic int 5 (timer4)", /* 5 */
223 1.1.2.20 cliff "pic int 6 (timer5)", /* 6 */
224 1.1.2.20 cliff "pic int 7 (timer6)", /* 7 */
225 1.1.2.20 cliff "pic int 8 (timer7)", /* 8 */
226 1.1.2.20 cliff "pic int 9 (uart0)", /* 9 */
227 1.1.2.20 cliff "pic int 10 (uart1)", /* 10 */
228 1.1.2.20 cliff "pic int 11 (i2c0)", /* 11 */
229 1.1.2.20 cliff "pic int 12 (i2c1)", /* 12 */
230 1.1.2.20 cliff "pic int 13 (pcmcia)", /* 13 */
231 1.1.2.20 cliff "pic int 14 (gpio_a)", /* 14 */
232 1.1.2.32 matt "pic int 15 (?)", /* 15 */
233 1.1.2.20 cliff "pic int 16 (bridge_tb)", /* 16 */
234 1.1.2.20 cliff "pic int 17 (gmac0)", /* 17 */
235 1.1.2.20 cliff "pic int 18 (gmac1)", /* 18 */
236 1.1.2.20 cliff "pic int 19 (gmac2)", /* 19 */
237 1.1.2.20 cliff "pic int 20 (gmac3)", /* 20 */
238 1.1.2.32 matt "pic int 21 (?)", /* 21 */
239 1.1.2.32 matt "pic int 22 (?)", /* 22 */
240 1.1.2.32 matt "pic int 23 (?)", /* 23 */
241 1.1.2.32 matt "pic int 24 (?)", /* 24 */
242 1.1.2.20 cliff "pic int 25 (bridge_err)", /* 25 */
243 1.1.2.20 cliff "pic int 26 (pcie_link0)", /* 26 */
244 1.1.2.20 cliff "pic int 27 (pcie_link1)", /* 27 */
245 1.1.2.32 matt "pic int 28 (?)", /* 28 */
246 1.1.2.20 cliff "pic int 29 (pcie_err)", /* 29 */
247 1.1.2.20 cliff "pic int 30 (gpio_b)", /* 30 */
248 1.1.2.20 cliff "pic int 31 (usb)", /* 31 */
249 1.1.2.1 cliff };
250 1.1.2.1 cliff
251 1.1.2.2 cliff /*
252 1.1.2.15 cliff * rmixl_irtnames_xls4xx:
253 1.1.2.4 cliff * - use for XLS4xx, XLS6xx
254 1.1.2.4 cliff */
255 1.1.2.31 matt static const char * const rmixl_irtnames_xls4xx[RMIXLS_NIRTS] = {
256 1.1.2.20 cliff "pic int 0 (watchdog)", /* 0 */
257 1.1.2.20 cliff "pic int 1 (timer0)", /* 1 */
258 1.1.2.20 cliff "pic int 2 (timer1)", /* 2 */
259 1.1.2.20 cliff "pic int 3 (timer2)", /* 3 */
260 1.1.2.20 cliff "pic int 4 (timer3)", /* 4 */
261 1.1.2.20 cliff "pic int 5 (timer4)", /* 5 */
262 1.1.2.20 cliff "pic int 6 (timer5)", /* 6 */
263 1.1.2.20 cliff "pic int 7 (timer6)", /* 7 */
264 1.1.2.20 cliff "pic int 8 (timer7)", /* 8 */
265 1.1.2.20 cliff "pic int 9 (uart0)", /* 9 */
266 1.1.2.20 cliff "pic int 10 (uart1)", /* 10 */
267 1.1.2.20 cliff "pic int 11 (i2c0)", /* 11 */
268 1.1.2.20 cliff "pic int 12 (i2c1)", /* 12 */
269 1.1.2.20 cliff "pic int 13 (pcmcia)", /* 13 */
270 1.1.2.20 cliff "pic int 14 (gpio_a)", /* 14 */
271 1.1.2.32 matt "pic int 15 (?)", /* 15 */
272 1.1.2.20 cliff "pic int 16 (bridge_tb)", /* 16 */
273 1.1.2.20 cliff "pic int 17 (gmac0)", /* 17 */
274 1.1.2.20 cliff "pic int 18 (gmac1)", /* 18 */
275 1.1.2.20 cliff "pic int 19 (gmac2)", /* 19 */
276 1.1.2.20 cliff "pic int 20 (gmac3)", /* 20 */
277 1.1.2.32 matt "pic int 21 (?)", /* 21 */
278 1.1.2.32 matt "pic int 22 (?)", /* 22 */
279 1.1.2.32 matt "pic int 23 (?)", /* 23 */
280 1.1.2.32 matt "pic int 24 (?)", /* 24 */
281 1.1.2.20 cliff "pic int 25 (bridge_err)", /* 25 */
282 1.1.2.20 cliff "pic int 26 (pcie_link0)", /* 26 */
283 1.1.2.20 cliff "pic int 27 (pcie_link1)", /* 27 */
284 1.1.2.20 cliff "pic int 28 (pcie_link2)", /* 28 */
285 1.1.2.20 cliff "pic int 29 (pcie_link3)", /* 29 */
286 1.1.2.20 cliff "pic int 30 (gpio_b)", /* 30 */
287 1.1.2.20 cliff "pic int 31 (usb)", /* 31 */
288 1.1.2.4 cliff };
289 1.1.2.32 matt #endif /* MIPS64_XLS */
290 1.1.2.4 cliff
291 1.1.2.32 matt #ifdef MIPS64_XLP
292 1.1.2.4 cliff /*
293 1.1.2.31 matt * rmixl_irtnames_xlp:
294 1.1.2.31 matt * - use for XLP
295 1.1.2.31 matt */
296 1.1.2.32 matt static const char * const rmixl_irtnames_xlp8xx[RMIXLP_NIRTS] = {
297 1.1.2.31 matt [ 0] = "pic int 0 (watchdog0)",
298 1.1.2.31 matt [ 1] = "pic int 1 (watchdog1)",
299 1.1.2.31 matt [ 2] = "pic int 2 (watchdogNMI0)",
300 1.1.2.31 matt [ 3] = "pic int 3 (watchdogNMI1)",
301 1.1.2.31 matt [ 4] = "pic int 4 (timer0)",
302 1.1.2.31 matt [ 5] = "pic int 5 (timer1)",
303 1.1.2.31 matt [ 6] = "pic int 6 (timer2)",
304 1.1.2.31 matt [ 7] = "pic int 7 (timer3)",
305 1.1.2.31 matt [ 8] = "pic int 8 (timer4)",
306 1.1.2.31 matt [ 9] = "pic int 9 (timer5)",
307 1.1.2.31 matt [ 10] = "pic int 10 (timer6)",
308 1.1.2.31 matt [ 11] = "pic int 11 (timer7)",
309 1.1.2.31 matt [ 12] = "pic int 12 (fmn0)",
310 1.1.2.31 matt [ 13] = "pic int 13 (fmn1)",
311 1.1.2.31 matt [ 14] = "pic int 14 (fmn2)",
312 1.1.2.31 matt [ 15] = "pic int 15 (fmn3)",
313 1.1.2.31 matt [ 16] = "pic int 16 (fmn4)",
314 1.1.2.31 matt [ 17] = "pic int 17 (fmn5)",
315 1.1.2.31 matt [ 18] = "pic int 18 (fmn6)",
316 1.1.2.31 matt [ 19] = "pic int 19 (fmn7)",
317 1.1.2.31 matt [ 20] = "pic int 20 (fmn8)",
318 1.1.2.31 matt [ 21] = "pic int 21 (fmn9)",
319 1.1.2.31 matt [ 22] = "pic int 22 (fmn10)",
320 1.1.2.31 matt [ 23] = "pic int 23 (fmn11)",
321 1.1.2.31 matt [ 24] = "pic int 24 (fmn12)",
322 1.1.2.31 matt [ 25] = "pic int 25 (fmn13)",
323 1.1.2.31 matt [ 26] = "pic int 26 (fmn14)",
324 1.1.2.31 matt [ 27] = "pic int 27 (fmn15)",
325 1.1.2.31 matt [ 28] = "pic int 28 (fmn16)",
326 1.1.2.31 matt [ 29] = "pic int 29 (fmn17)",
327 1.1.2.31 matt [ 30] = "pic int 30 (fmn18)",
328 1.1.2.31 matt [ 31] = "pic int 31 (fmn19)",
329 1.1.2.31 matt [ 32] = "pic int 22 (fmn20)",
330 1.1.2.31 matt [ 33] = "pic int 23 (fmn21)",
331 1.1.2.31 matt [ 34] = "pic int 24 (fmn22)",
332 1.1.2.31 matt [ 35] = "pic int 25 (fmn23)",
333 1.1.2.31 matt [ 36] = "pic int 26 (fmn24)",
334 1.1.2.31 matt [ 37] = "pic int 27 (fmn25)",
335 1.1.2.31 matt [ 38] = "pic int 28 (fmn26)",
336 1.1.2.31 matt [ 39] = "pic int 29 (fmn27)",
337 1.1.2.31 matt [ 40] = "pic int 30 (fmn28)",
338 1.1.2.31 matt [ 41] = "pic int 31 (fmn29)",
339 1.1.2.31 matt [ 42] = "pic int 42 (fmn30)",
340 1.1.2.31 matt [ 43] = "pic int 43 (fmn31)",
341 1.1.2.32 matt [ 44] = "pic int 44 (fmnerr0)",
342 1.1.2.32 matt [ 45] = "pic int 45 (fmnerr1)",
343 1.1.2.31 matt [ 46] = "pic int 46 (pcie_msix0)",
344 1.1.2.31 matt [ 47] = "pic int 47 (pcie_msix1)",
345 1.1.2.31 matt [ 48] = "pic int 48 (pcie_msix2)",
346 1.1.2.31 matt [ 49] = "pic int 49 (pcie_msix3)",
347 1.1.2.31 matt [ 50] = "pic int 50 (pcie_msix4)",
348 1.1.2.31 matt [ 51] = "pic int 51 (pcie_msix5)",
349 1.1.2.31 matt [ 52] = "pic int 52 (pcie_msix6)",
350 1.1.2.31 matt [ 53] = "pic int 53 (pcie_msix7)",
351 1.1.2.31 matt [ 54] = "pic int 54 (pcie_msix8)",
352 1.1.2.31 matt [ 55] = "pic int 55 (pcie_msix9)",
353 1.1.2.31 matt [ 56] = "pic int 56 (pcie_msix10)",
354 1.1.2.31 matt [ 57] = "pic int 57 (pcie_msix11)",
355 1.1.2.31 matt [ 58] = "pic int 58 (pcie_msix12)",
356 1.1.2.31 matt [ 59] = "pic int 59 (pcie_msix13)",
357 1.1.2.31 matt [ 60] = "pic int 60 (pcie_msix14)",
358 1.1.2.31 matt [ 61] = "pic int 61 (pcie_msix15)",
359 1.1.2.31 matt [ 62] = "pic int 62 (pcie_msix16)",
360 1.1.2.31 matt [ 63] = "pic int 63 (pcie_msix17)",
361 1.1.2.31 matt [ 64] = "pic int 64 (pcie_msix18)",
362 1.1.2.31 matt [ 65] = "pic int 65 (pcie_msix19)",
363 1.1.2.31 matt [ 66] = "pic int 66 (pcie_msix20)",
364 1.1.2.31 matt [ 67] = "pic int 67 (pcie_msix21)",
365 1.1.2.31 matt [ 68] = "pic int 68 (pcie_msix22)",
366 1.1.2.31 matt [ 69] = "pic int 69 (pcie_msix23)",
367 1.1.2.31 matt [ 70] = "pic int 70 (pcie_msix24)",
368 1.1.2.31 matt [ 71] = "pic int 71 (pcie_msix25)",
369 1.1.2.31 matt [ 72] = "pic int 72 (pcie_msix26)",
370 1.1.2.31 matt [ 73] = "pic int 73 (pcie_msix27)",
371 1.1.2.31 matt [ 74] = "pic int 74 (pcie_msix28)",
372 1.1.2.31 matt [ 75] = "pic int 75 (pcie_msix29)",
373 1.1.2.31 matt [ 76] = "pic int 76 (pcie_msix30)",
374 1.1.2.31 matt [ 77] = "pic int 77 (pcie_msix31)",
375 1.1.2.31 matt [ 78] = "pic int 78 (pcie_link0)",
376 1.1.2.31 matt [ 79] = "pic int 79 (pcie_link1)",
377 1.1.2.31 matt [ 80] = "pic int 80 (pcie_link2)",
378 1.1.2.31 matt [ 81] = "pic int 81 (pcie_link3)",
379 1.1.2.32 matt [ 82] = "pic int 82 (nae0)",
380 1.1.2.32 matt [ 83] = "pic int 83 (nae1)",
381 1.1.2.32 matt [ 84] = "pic int 84 (nae2)",
382 1.1.2.32 matt [ 85] = "pic int 85 (nae3)",
383 1.1.2.32 matt [ 86] = "pic int 86 (nae4)",
384 1.1.2.32 matt [ 87] = "pic int 87 (nae5)",
385 1.1.2.32 matt [ 88] = "pic int 88 (nae6)",
386 1.1.2.32 matt [ 89] = "pic int 89 (nae7)",
387 1.1.2.32 matt [ 90] = "pic int 90 (nae8)",
388 1.1.2.32 matt [ 91] = "pic int 91 (nae9)",
389 1.1.2.32 matt [ 92] = "pic int 92 (nae10)",
390 1.1.2.32 matt [ 93] = "pic int 93 (nae11)",
391 1.1.2.32 matt [ 94] = "pic int 94 (nae12)",
392 1.1.2.32 matt [ 95] = "pic int 95 (nae13)",
393 1.1.2.32 matt [ 96] = "pic int 96 (nae14)",
394 1.1.2.32 matt [ 97] = "pic int 97 (nae15)",
395 1.1.2.32 matt [ 98] = "pic int 98 (nae16)",
396 1.1.2.32 matt [ 99] = "pic int 99 (nae17)",
397 1.1.2.32 matt [100] = "pic int 100 (nae18)",
398 1.1.2.32 matt [101] = "pic int 101 (?)",
399 1.1.2.32 matt [102] = "pic int 102 (naecom0)",
400 1.1.2.32 matt [103] = "pic int 103 (naecom1)",
401 1.1.2.32 matt [104] = "pic int 104 (?)",
402 1.1.2.32 matt [105] = "pic int 105 (?)",
403 1.1.2.32 matt [106] = "pic int 106 (?)",
404 1.1.2.32 matt [107] = "pic int 107 (?)",
405 1.1.2.32 matt [108] = "pic int 108 (?)",
406 1.1.2.32 matt [109] = "pic int 109 (?)",
407 1.1.2.32 matt [110] = "pic int 100 (?)",
408 1.1.2.32 matt [111] = "pic int 111 (?)",
409 1.1.2.32 matt [112] = "pic int 112 (?)",
410 1.1.2.32 matt [113] = "pic int 113 (?)",
411 1.1.2.31 matt [114] = "pic int 114 (poe)",
412 1.1.2.31 matt [115] = "pic int 115 (ehci0)",
413 1.1.2.31 matt [116] = "pic int 116 (ohci0)",
414 1.1.2.31 matt [117] = "pic int 117 (ohci1)",
415 1.1.2.31 matt [118] = "pic int 118 (ehci1)",
416 1.1.2.31 matt [119] = "pic int 119 (ohci2)",
417 1.1.2.31 matt [120] = "pic int 120 (ohci3)",
418 1.1.2.32 matt [121] = "pic int 121 (dma)",
419 1.1.2.32 matt [122] = "pic int 122 (sae)",
420 1.1.2.32 matt [123] = "pic int 123 (pke)",
421 1.1.2.32 matt [124] = "pic int 124 (cde0)",
422 1.1.2.32 matt [125] = "pic int 125 (cde1)",
423 1.1.2.32 matt [126] = "pic int 126 (cde2)",
424 1.1.2.32 matt [127] = "pic int 127 (cde3)",
425 1.1.2.32 matt [128] = "pic int 128 (?)",
426 1.1.2.32 matt [129] = "pic int 129 (ici0)",
427 1.1.2.32 matt [130] = "pic int 130 (ici1)",
428 1.1.2.32 matt [131] = "pic int 131 (ici2)",
429 1.1.2.31 matt [132] = "pic int 132 (kbp)",
430 1.1.2.31 matt [133] = "pic int 133 (uart0)",
431 1.1.2.31 matt [134] = "pic int 134 (uart1)",
432 1.1.2.31 matt [135] = "pic int 135 (i2c0)",
433 1.1.2.31 matt [136] = "pic int 136 (i2c1)",
434 1.1.2.31 matt [137] = "pic int 137 (sysmgt0)",
435 1.1.2.31 matt [138] = "pic int 138 (sysmgt1)",
436 1.1.2.31 matt [139] = "pic int 139 (jtag)",
437 1.1.2.31 matt [140] = "pic int 140 (pic)",
438 1.1.2.32 matt [141] = "pic int 141 (?)",
439 1.1.2.32 matt [142] = "pic int 142 (?)",
440 1.1.2.32 matt [143] = "pic int 143 (?)",
441 1.1.2.32 matt [144] = "pic int 144 (?)",
442 1.1.2.32 matt [145] = "pic int 145 (?)",
443 1.1.2.31 matt [146] = "pic int 146 (gpio0)",
444 1.1.2.31 matt [147] = "pic int 147 (gpio1)",
445 1.1.2.31 matt [148] = "pic int 148 (gpio2)",
446 1.1.2.31 matt [149] = "pic int 149 (gpio3)",
447 1.1.2.31 matt [150] = "pic int 150 (norflash)",
448 1.1.2.31 matt [151] = "pic int 151 (nandflash)",
449 1.1.2.31 matt [152] = "pic int 152 (spi)",
450 1.1.2.31 matt [153] = "pic int 153 (mmc/sd)",
451 1.1.2.31 matt [154] = "pic int 154 (mem-io-bridge)",
452 1.1.2.31 matt [155] = "pic int 155 (l3)",
453 1.1.2.31 matt [156] = "pic int 156 (gcu)",
454 1.1.2.31 matt [157] = "pic int 157 (dram3_0)",
455 1.1.2.31 matt [158] = "pic int 158 (dram3_1)",
456 1.1.2.31 matt [159] = "pic int 159 (tracebuf)",
457 1.1.2.31 matt };
458 1.1.2.32 matt
459 1.1.2.32 matt /*
460 1.1.2.32 matt * rmixl_irtnames_xlp:
461 1.1.2.32 matt * - use for XLP
462 1.1.2.32 matt */
463 1.1.2.32 matt static const char * const rmixl_irtnames_xlp3xx[RMIXLP_NIRTS] = {
464 1.1.2.32 matt [ 0] = "pic int 0 (watchdog0)",
465 1.1.2.32 matt [ 1] = "pic int 1 (watchdog1)",
466 1.1.2.32 matt [ 2] = "pic int 2 (watchdogNMI0)",
467 1.1.2.32 matt [ 3] = "pic int 3 (watchdogNMI1)",
468 1.1.2.32 matt [ 4] = "pic int 4 (timer0)",
469 1.1.2.32 matt [ 5] = "pic int 5 (timer1)",
470 1.1.2.32 matt [ 6] = "pic int 6 (timer2)",
471 1.1.2.32 matt [ 7] = "pic int 7 (timer3)",
472 1.1.2.32 matt [ 8] = "pic int 8 (timer4)",
473 1.1.2.32 matt [ 9] = "pic int 9 (timer5)",
474 1.1.2.32 matt [ 10] = "pic int 10 (timer6)",
475 1.1.2.32 matt [ 11] = "pic int 11 (timer7)",
476 1.1.2.32 matt [ 12] = "pic int 12 (gpio0)",
477 1.1.2.32 matt [ 13] = "pic int 13 (gpio1)",
478 1.1.2.32 matt [ 14] = "pic int 14 (gpio2)",
479 1.1.2.32 matt [ 15] = "pic int 15 (gpio3)",
480 1.1.2.32 matt [ 16] = "pic int 16 (gpio4)",
481 1.1.2.32 matt [ 17] = "pic int 17 (gpio5)",
482 1.1.2.32 matt [ 18] = "pic int 18 (gpio6)",
483 1.1.2.32 matt [ 19] = "pic int 19 (gpio7)",
484 1.1.2.32 matt [ 20] = "pic int 20 (gpio8)",
485 1.1.2.32 matt [ 21] = "pic int 21 (gpio0)",
486 1.1.2.32 matt [ 22] = "pic int 22 (gpio10)",
487 1.1.2.32 matt [ 23] = "pic int 23 (gpio11)",
488 1.1.2.32 matt [ 24] = "pic int 24 (?)",
489 1.1.2.32 matt [ 25] = "pic int 25 (?)",
490 1.1.2.32 matt [ 26] = "pic int 26 (?)",
491 1.1.2.32 matt [ 27] = "pic int 27 (?)",
492 1.1.2.32 matt [ 28] = "pic int 28 (fmn0)",
493 1.1.2.32 matt [ 29] = "pic int 29 (fmn1)",
494 1.1.2.32 matt [ 30] = "pic int 30 (fmn2)",
495 1.1.2.32 matt [ 31] = "pic int 31 (fmn3)",
496 1.1.2.32 matt [ 32] = "pic int 22 (fmn4)",
497 1.1.2.32 matt [ 33] = "pic int 23 (fmn5)",
498 1.1.2.32 matt [ 34] = "pic int 24 (fmn6)",
499 1.1.2.32 matt [ 35] = "pic int 25 (fmn7)",
500 1.1.2.32 matt [ 36] = "pic int 26 (fmn8)",
501 1.1.2.32 matt [ 37] = "pic int 27 (fmn9)",
502 1.1.2.32 matt [ 38] = "pic int 28 (fmn10)",
503 1.1.2.32 matt [ 39] = "pic int 29 (fmn11)",
504 1.1.2.32 matt [ 40] = "pic int 30 (fmn12)",
505 1.1.2.32 matt [ 41] = "pic int 31 (fmn13)",
506 1.1.2.32 matt [ 42] = "pic int 42 (fmn14)",
507 1.1.2.32 matt [ 43] = "pic int 43 (fmn15)",
508 1.1.2.32 matt [ 44] = "pic int 44 (fmnerr0)",
509 1.1.2.32 matt [ 45] = "pic int 45 (fmnerr1)",
510 1.1.2.32 matt [ 46] = "pic int 46 (pcie_msix0)",
511 1.1.2.32 matt [ 47] = "pic int 47 (pcie_msix1)",
512 1.1.2.32 matt [ 48] = "pic int 48 (pcie_msix2)",
513 1.1.2.32 matt [ 49] = "pic int 49 (pcie_msix3)",
514 1.1.2.32 matt [ 50] = "pic int 50 (pcie_msix4)",
515 1.1.2.32 matt [ 51] = "pic int 51 (pcie_msix5)",
516 1.1.2.32 matt [ 52] = "pic int 52 (pcie_msix6)",
517 1.1.2.32 matt [ 53] = "pic int 53 (pcie_msix7)",
518 1.1.2.32 matt [ 54] = "pic int 54 (pcie_msix8)",
519 1.1.2.32 matt [ 55] = "pic int 55 (pcie_msix9)",
520 1.1.2.32 matt [ 56] = "pic int 56 (pcie_msix10)",
521 1.1.2.32 matt [ 57] = "pic int 57 (pcie_msix11)",
522 1.1.2.32 matt [ 58] = "pic int 58 (pcie_msix12)",
523 1.1.2.32 matt [ 59] = "pic int 59 (pcie_msix13)",
524 1.1.2.32 matt [ 60] = "pic int 60 (pcie_msix14)",
525 1.1.2.32 matt [ 61] = "pic int 61 (pcie_msix15)",
526 1.1.2.32 matt [ 62] = "pic int 62 (pcie_msix16)",
527 1.1.2.32 matt [ 63] = "pic int 63 (pcie_msix17)",
528 1.1.2.32 matt [ 64] = "pic int 64 (pcie_msix18)",
529 1.1.2.32 matt [ 65] = "pic int 65 (pcie_msix19)",
530 1.1.2.32 matt [ 66] = "pic int 66 (pcie_msix20)",
531 1.1.2.32 matt [ 67] = "pic int 67 (pcie_msix21)",
532 1.1.2.32 matt [ 68] = "pic int 68 (pcie_msix22)",
533 1.1.2.32 matt [ 69] = "pic int 69 (pcie_msix23)",
534 1.1.2.32 matt [ 70] = "pic int 70 (pcie_msix24)",
535 1.1.2.32 matt [ 71] = "pic int 71 (pcie_msix25)",
536 1.1.2.32 matt [ 72] = "pic int 72 (pcie_msix26)",
537 1.1.2.32 matt [ 73] = "pic int 73 (pcie_msix27)",
538 1.1.2.32 matt [ 74] = "pic int 74 (pcie_msix28)",
539 1.1.2.32 matt [ 75] = "pic int 75 (pcie_msix29)",
540 1.1.2.32 matt [ 76] = "pic int 76 (pcie_msix30)",
541 1.1.2.32 matt [ 77] = "pic int 77 (pcie_msix31)",
542 1.1.2.32 matt [ 78] = "pic int 78 (pcie_link0)",
543 1.1.2.32 matt [ 79] = "pic int 79 (pcie_link1)",
544 1.1.2.32 matt [ 80] = "pic int 80 (pcie_link2)",
545 1.1.2.32 matt [ 81] = "pic int 81 (pcie_link3)",
546 1.1.2.32 matt [ 82] = "pic int 82 (?)",
547 1.1.2.32 matt [ 83] = "pic int 83 (?)",
548 1.1.2.32 matt [ 84] = "pic int 84 (?)",
549 1.1.2.32 matt [ 85] = "pic int 85 (?)",
550 1.1.2.32 matt [ 86] = "pic int 86 (?)",
551 1.1.2.32 matt [ 87] = "pic int 87 (?)",
552 1.1.2.32 matt [ 88] = "pic int 88 (?)",
553 1.1.2.32 matt [ 89] = "pic int 89 (?)",
554 1.1.2.32 matt [ 90] = "pic int 90 (?)",
555 1.1.2.32 matt [ 91] = "pic int 91 (?)",
556 1.1.2.32 matt [ 92] = "pic int 92 (?)",
557 1.1.2.32 matt [ 93] = "pic int 93 (?)",
558 1.1.2.32 matt [ 94] = "pic int 94 (?)",
559 1.1.2.32 matt [ 95] = "pic int 95 (?)",
560 1.1.2.32 matt [ 96] = "pic int 96 (?)",
561 1.1.2.32 matt [ 97] = "pic int 97 (?)",
562 1.1.2.32 matt [ 98] = "pic int 98 (nae0)",
563 1.1.2.32 matt [ 99] = "pic int 99 (nae1)",
564 1.1.2.32 matt [100] = "pic int 100 (nae2)",
565 1.1.2.32 matt [101] = "pic int 101 (nae3)",
566 1.1.2.32 matt [102] = "pic int 102 (nae4)",
567 1.1.2.32 matt [103] = "pic int 103 (nae5)",
568 1.1.2.32 matt [104] = "pic int 104 (nae6)",
569 1.1.2.32 matt [105] = "pic int 105 (nae7)",
570 1.1.2.32 matt [106] = "pic int 106 (nae8)",
571 1.1.2.32 matt [107] = "pic int 107 (?)",
572 1.1.2.32 matt [108] = "pic int 108 (?)",
573 1.1.2.32 matt [109] = "pic int 109 (?)",
574 1.1.2.32 matt [110] = "pic int 100 (naecom0)",
575 1.1.2.32 matt [111] = "pic int 111 (naecom1)",
576 1.1.2.32 matt [112] = "pic int 112 (?)",
577 1.1.2.32 matt [113] = "pic int 113 (?)",
578 1.1.2.32 matt [114] = "pic int 114 (poe)",
579 1.1.2.32 matt [115] = "pic int 115 (ehci0)",
580 1.1.2.32 matt [116] = "pic int 116 (ohci0)",
581 1.1.2.32 matt [117] = "pic int 117 (ohci1)",
582 1.1.2.32 matt [118] = "pic int 118 (ehci1)",
583 1.1.2.32 matt [119] = "pic int 119 (ohci2)",
584 1.1.2.32 matt [120] = "pic int 120 (ohci3)",
585 1.1.2.32 matt [121] = "pic int 121 (dma)",
586 1.1.2.32 matt [122] = "pic int 122 (sae)",
587 1.1.2.32 matt [123] = "pic int 123 (pke)",
588 1.1.2.32 matt [124] = "pic int 124 (?)",
589 1.1.2.32 matt [125] = "pic int 125 (?)",
590 1.1.2.32 matt [126] = "pic int 126 (?)",
591 1.1.2.32 matt [127] = "pic int 127 (?)",
592 1.1.2.32 matt [128] = "pic int 128 (?)",
593 1.1.2.32 matt [129] = "pic int 129 (?)",
594 1.1.2.32 matt [130] = "pic int 130 (?)",
595 1.1.2.32 matt [131] = "pic int 131 (?)",
596 1.1.2.32 matt [132] = "pic int 132 (?)",
597 1.1.2.32 matt [133] = "pic int 133 (uart0)",
598 1.1.2.32 matt [134] = "pic int 134 (uart1)",
599 1.1.2.32 matt [135] = "pic int 135 (i2c0)",
600 1.1.2.32 matt [136] = "pic int 136 (i2c1)",
601 1.1.2.32 matt [137] = "pic int 137 (sysmgt0)",
602 1.1.2.32 matt [138] = "pic int 138 (sysmgt1)",
603 1.1.2.32 matt [139] = "pic int 139 (jtag)",
604 1.1.2.32 matt [140] = "pic int 140 (pic)",
605 1.1.2.32 matt [141] = "pic int 141 (rxe0)",
606 1.1.2.32 matt [142] = "pic int 142 (rxe1)",
607 1.1.2.32 matt [143] = "pic int 143 (sata)",
608 1.1.2.32 matt [144] = "pic int 144 (srio0)",
609 1.1.2.32 matt [145] = "pic int 145 (srio1)",
610 1.1.2.32 matt [146] = "pic int 146 (srio2)",
611 1.1.2.32 matt [147] = "pic int 147 (srio3)",
612 1.1.2.32 matt [148] = "pic int 148 (srio4)",
613 1.1.2.32 matt [149] = "pic int 149 (?)",
614 1.1.2.32 matt [150] = "pic int 150 (norflash)",
615 1.1.2.32 matt [151] = "pic int 151 (nandflash)",
616 1.1.2.32 matt [152] = "pic int 152 (spi)",
617 1.1.2.32 matt [153] = "pic int 153 (mmc/sd)",
618 1.1.2.32 matt [154] = "pic int 154 (mem-io-bridge)",
619 1.1.2.32 matt [155] = "pic int 155 (l3)",
620 1.1.2.32 matt [156] = "pic int 156 (?)",
621 1.1.2.32 matt [157] = "pic int 157 (dram3_0)",
622 1.1.2.32 matt [158] = "pic int 158 (dram3_1)",
623 1.1.2.32 matt [159] = "pic int 159 (tracebuf)",
624 1.1.2.32 matt };
625 1.1.2.32 matt
626 1.1.2.32 matt #endif /* MIPS64_XLP */
627 1.1.2.31 matt /*
628 1.1.2.15 cliff * rmixl_vecnames_common:
629 1.1.2.4 cliff * - use for unknown cpu implementation
630 1.1.2.15 cliff * - covers all vectors, not just IRT intrs
631 1.1.2.4 cliff */
632 1.1.2.15 cliff static const char * const rmixl_vecnames_common[NINTRVECS] = {
633 1.1.2.31 matt "vec 0 (sw0)", /* 0 */
634 1.1.2.31 matt "vec 1 (sw1)", /* 1 */
635 1.1.2.31 matt "vec 2 (hw2)", /* 2 */
636 1.1.2.31 matt "vec 3 (hw3)", /* 3 */
637 1.1.2.31 matt "vec 4 (hw4)", /* 4 */
638 1.1.2.31 matt "vec 5 (hw5)", /* 5 */
639 1.1.2.31 matt "vec 6 (hw6)", /* 6 */
640 1.1.2.31 matt "vec 7 (hw7)", /* 7 */
641 1.1.2.31 matt "vec 8", /* 8 */
642 1.1.2.31 matt "vec 9", /* 9 */
643 1.1.2.31 matt "vec 10", /* 10 */
644 1.1.2.31 matt "vec 11", /* 11 */
645 1.1.2.31 matt "vec 12", /* 12 */
646 1.1.2.31 matt "vec 13", /* 13 */
647 1.1.2.31 matt "vec 14", /* 14 */
648 1.1.2.31 matt "vec 15", /* 15 */
649 1.1.2.30 matt "vec 16", /* 16 */
650 1.1.2.20 cliff "vec 17", /* 17 */
651 1.1.2.20 cliff "vec 18", /* 18 */
652 1.1.2.20 cliff "vec 19", /* 19 */
653 1.1.2.20 cliff "vec 20", /* 20 */
654 1.1.2.20 cliff "vec 21", /* 21 */
655 1.1.2.20 cliff "vec 22", /* 22 */
656 1.1.2.20 cliff "vec 23", /* 23 */
657 1.1.2.20 cliff "vec 24", /* 24 */
658 1.1.2.20 cliff "vec 25", /* 25 */
659 1.1.2.20 cliff "vec 26", /* 26 */
660 1.1.2.20 cliff "vec 27", /* 27 */
661 1.1.2.20 cliff "vec 28", /* 28 */
662 1.1.2.20 cliff "vec 29", /* 29 */
663 1.1.2.20 cliff "vec 30", /* 30 */
664 1.1.2.20 cliff "vec 31", /* 31 */
665 1.1.2.20 cliff "vec 32", /* 32 */
666 1.1.2.20 cliff "vec 33", /* 33 */
667 1.1.2.20 cliff "vec 34", /* 34 */
668 1.1.2.20 cliff "vec 35", /* 35 */
669 1.1.2.20 cliff "vec 36", /* 36 */
670 1.1.2.20 cliff "vec 37", /* 37 */
671 1.1.2.20 cliff "vec 38", /* 38 */
672 1.1.2.20 cliff "vec 39", /* 39 */
673 1.1.2.20 cliff "vec 40", /* 40 */
674 1.1.2.20 cliff "vec 41", /* 41 */
675 1.1.2.20 cliff "vec 42", /* 42 */
676 1.1.2.20 cliff "vec 43", /* 43 */
677 1.1.2.20 cliff "vec 44", /* 44 */
678 1.1.2.20 cliff "vec 45", /* 45 */
679 1.1.2.20 cliff "vec 46", /* 46 */
680 1.1.2.20 cliff "vec 47", /* 47 */
681 1.1.2.20 cliff "vec 48", /* 48 */
682 1.1.2.20 cliff "vec 49", /* 49 */
683 1.1.2.20 cliff "vec 50", /* 50 */
684 1.1.2.20 cliff "vec 51", /* 51 */
685 1.1.2.20 cliff "vec 52", /* 52 */
686 1.1.2.20 cliff "vec 53", /* 53 */
687 1.1.2.20 cliff "vec 54", /* 54 */
688 1.1.2.20 cliff "vec 55", /* 55 */
689 1.1.2.20 cliff "vec 56", /* 56 */
690 1.1.2.20 cliff "vec 57", /* 57 */
691 1.1.2.20 cliff "vec 58", /* 58 */
692 1.1.2.20 cliff "vec 59", /* 59 */
693 1.1.2.20 cliff "vec 60", /* 60 */
694 1.1.2.20 cliff "vec 61", /* 61 */
695 1.1.2.20 cliff "vec 62", /* 63 */
696 1.1.2.20 cliff "vec 63", /* 63 */
697 1.1.2.4 cliff };
698 1.1.2.4 cliff
699 1.1.2.4 cliff /*
700 1.1.2.15 cliff * mask of CPUs attached
701 1.1.2.30 matt * once they are attached, this var is read-only so mp safe
702 1.1.2.2 cliff */
703 1.1.2.31 matt static __cpuset_t cpu_present_mask;
704 1.1.2.1 cliff
705 1.1.2.30 matt kmutex_t *rmixl_ipi_lock; /* covers RMIXL_PIC_IPIBASE */
706 1.1.2.30 matt kmutex_t *rmixl_intr_lock; /* covers rest of PIC, and rmixl_intrhand[] */
707 1.1.2.31 matt rmixl_intrvecq_t rmixl_intrvec_lruq[_IPL_N] = {
708 1.1.2.31 matt [IPL_NONE] = TAILQ_HEAD_INITIALIZER(rmixl_intrvec_lruq[IPL_NONE]),
709 1.1.2.31 matt [IPL_SOFTCLOCK] = TAILQ_HEAD_INITIALIZER(rmixl_intrvec_lruq[IPL_SOFTCLOCK]),
710 1.1.2.31 matt [IPL_SOFTNET] = TAILQ_HEAD_INITIALIZER(rmixl_intrvec_lruq[IPL_SOFTNET]),
711 1.1.2.31 matt [IPL_VM] = TAILQ_HEAD_INITIALIZER(rmixl_intrvec_lruq[IPL_VM]),
712 1.1.2.31 matt [IPL_SCHED] = TAILQ_HEAD_INITIALIZER(rmixl_intrvec_lruq[IPL_SCHED]),
713 1.1.2.31 matt [IPL_DDB] = TAILQ_HEAD_INITIALIZER(rmixl_intrvec_lruq[IPL_DDB]),
714 1.1.2.31 matt [IPL_HIGH] = TAILQ_HEAD_INITIALIZER(rmixl_intrvec_lruq[IPL_HIGH]),
715 1.1.2.31 matt };
716 1.1.2.31 matt rmixl_intrvec_t rmixl_intrvec[NINTRVECS];
717 1.1.2.31 matt rmixl_intrhand_t rmixl_irt_intrhands[MAX(MAX(RMIXLR_NIRTS,RMIXLS_NIRTS), RMIXLP_NIRTS)];
718 1.1.2.31 matt static u_int rmixl_nirts;
719 1.1.2.31 matt const char * const *rmixl_irtnames;
720 1.1.2.1 cliff
721 1.1.2.15 cliff #ifdef DIAGNOSTIC
722 1.1.2.15 cliff static int rmixl_pic_init_done;
723 1.1.2.15 cliff #endif
724 1.1.2.2 cliff
725 1.1.2.1 cliff
726 1.1.2.31 matt static uint32_t rmixl_irt_thread_mask(__cpuset_t);
727 1.1.2.31 matt static void rmixl_irt_init(size_t);
728 1.1.2.31 matt static void rmixl_irt_disestablish(size_t);
729 1.1.2.33 matt static void rmixl_irt_establish(size_t, size_t, int);
730 1.1.2.31 matt static size_t rmixl_intr_get_vec(int);
731 1.1.2.2 cliff
732 1.1.2.15 cliff #ifdef MULTIPROCESSOR
733 1.1.2.15 cliff static int rmixl_send_ipi(struct cpu_info *, int);
734 1.1.2.15 cliff static int rmixl_ipi_intr(void *);
735 1.1.2.15 cliff #endif
736 1.1.2.15 cliff
737 1.1.2.23 rmind #if defined(DIAGNOSTIC) || defined(IOINTR_DEBUG) || defined(DDB)
738 1.1.2.31 matt int rmixl_intrvec_print_subr(size_t);
739 1.1.2.20 cliff int rmixl_intrhand_print(void);
740 1.1.2.20 cliff int rmixl_irt_print(void);
741 1.1.2.20 cliff void rmixl_ipl_eimr_map_print(void);
742 1.1.2.4 cliff #endif
743 1.1.2.2 cliff
744 1.1.2.6 cliff
745 1.1.2.15 cliff static inline u_int
746 1.1.2.15 cliff dclz(uint64_t val)
747 1.1.2.15 cliff {
748 1.1.2.31 matt u_int nlz;
749 1.1.2.6 cliff
750 1.1.2.31 matt __asm volatile("dclz %0, %1" : "=r"(nlz) : "r"(val));
751 1.1.2.15 cliff
752 1.1.2.15 cliff return nlz;
753 1.1.2.15 cliff }
754 1.1.2.6 cliff
755 1.1.2.1 cliff void
756 1.1.2.1 cliff evbmips_intr_init(void)
757 1.1.2.1 cliff {
758 1.1.2.31 matt const bool is_xlp_p = cpu_rmixlp(mips_options.mips_cpu);
759 1.1.2.31 matt const bool is_xlr_p = cpu_rmixlr(mips_options.mips_cpu);
760 1.1.2.31 matt const bool is_xls_p = cpu_rmixls(mips_options.mips_cpu);
761 1.1.2.1 cliff
762 1.1.2.31 matt KASSERT(is_xlp_p || is_xlr_p || is_xls_p);
763 1.1.2.5 cliff
764 1.1.2.31 matt /*
765 1.1.2.31 matt * The number of IRT entries is different for XLP .vs. XLR/XLS.
766 1.1.2.31 matt */
767 1.1.2.31 matt if (is_xlp_p) {
768 1.1.2.32 matt #ifdef MIPS64_XLP
769 1.1.2.32 matt if (rmixl_xlp_variant >= RMIXLP_3XX) {
770 1.1.2.32 matt rmixl_irtnames = rmixl_irtnames_xlp3xx;
771 1.1.2.32 matt rmixl_nirts = __arraycount(rmixl_irtnames_xlp3xx);
772 1.1.2.32 matt } else {
773 1.1.2.32 matt rmixl_irtnames = rmixl_irtnames_xlp8xx;
774 1.1.2.32 matt rmixl_nirts = __arraycount(rmixl_irtnames_xlp8xx);
775 1.1.2.32 matt }
776 1.1.2.32 matt #endif
777 1.1.2.31 matt } else if (is_xlr_p) {
778 1.1.2.32 matt #ifdef MIPS64_XLR
779 1.1.2.31 matt rmixl_irtnames = rmixl_irtnames_xlrxxx;
780 1.1.2.31 matt rmixl_nirts = __arraycount(rmixl_irtnames_xlrxxx);
781 1.1.2.32 matt #endif
782 1.1.2.31 matt } else if (is_xls_p) {
783 1.1.2.32 matt #ifdef MIPS64_XLS
784 1.1.2.31 matt switch (MIPS_PRID_IMPL(mips_options.mips_cpu_id)) {
785 1.1.2.31 matt case MIPS_XLS104:
786 1.1.2.31 matt case MIPS_XLS108:
787 1.1.2.31 matt case MIPS_XLS404LITE:
788 1.1.2.31 matt case MIPS_XLS408LITE:
789 1.1.2.31 matt rmixl_irtnames = rmixl_irtnames_xls1xx;
790 1.1.2.31 matt rmixl_nirts = __arraycount(rmixl_irtnames_xls1xx);
791 1.1.2.31 matt break;
792 1.1.2.31 matt case MIPS_XLS204:
793 1.1.2.31 matt case MIPS_XLS208:
794 1.1.2.31 matt rmixl_irtnames = rmixl_irtnames_xls2xx;
795 1.1.2.31 matt rmixl_nirts = __arraycount(rmixl_irtnames_xls2xx);
796 1.1.2.31 matt break;
797 1.1.2.31 matt case MIPS_XLS404:
798 1.1.2.31 matt case MIPS_XLS408:
799 1.1.2.31 matt case MIPS_XLS416:
800 1.1.2.31 matt case MIPS_XLS608:
801 1.1.2.31 matt case MIPS_XLS616:
802 1.1.2.31 matt rmixl_irtnames = rmixl_irtnames_xls4xx;
803 1.1.2.31 matt rmixl_nirts = __arraycount(rmixl_irtnames_xls4xx);
804 1.1.2.31 matt break;
805 1.1.2.31 matt default:
806 1.1.2.31 matt rmixl_irtnames = rmixl_vecnames_common;
807 1.1.2.31 matt rmixl_nirts = __arraycount(rmixl_vecnames_common);
808 1.1.2.31 matt break;
809 1.1.2.31 matt }
810 1.1.2.32 matt #endif /* MIPS64_XLS */
811 1.1.2.31 matt }
812 1.1.2.4 cliff
813 1.1.2.15 cliff #ifdef DIAGNOSTIC
814 1.1.2.15 cliff if (rmixl_pic_init_done != 0)
815 1.1.2.15 cliff panic("%s: rmixl_pic_init_done %d",
816 1.1.2.15 cliff __func__, rmixl_pic_init_done);
817 1.1.2.15 cliff #endif
818 1.1.2.1 cliff
819 1.1.2.28 cliff rmixl_ipi_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_HIGH);
820 1.1.2.28 cliff rmixl_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_HIGH);
821 1.1.2.25 cliff
822 1.1.2.28 cliff mutex_enter(rmixl_intr_lock);
823 1.1.2.25 cliff
824 1.1.2.15 cliff /*
825 1.1.2.31 matt * Insert all non-IPI non-normal MIPS vectors on lru queue.
826 1.1.2.31 matt */
827 1.1.2.31 matt for (size_t i = RMIXL_INTRVEC_IPI; i < NINTRVECS; i++) {
828 1.1.2.31 matt TAILQ_INSERT_TAIL(&rmixl_intrvec_lruq[IPL_NONE],
829 1.1.2.31 matt &rmixl_intrvec[i], iv_lruq_link);
830 1.1.2.31 matt }
831 1.1.2.31 matt
832 1.1.2.31 matt /*
833 1.1.2.15 cliff * initialize (zero) all IRT Entries in the PIC
834 1.1.2.15 cliff */
835 1.1.2.31 matt for (size_t i = 0; i < rmixl_nirts; i++) {
836 1.1.2.15 cliff rmixl_irt_init(i);
837 1.1.2.30 matt }
838 1.1.2.1 cliff
839 1.1.2.2 cliff /*
840 1.1.2.4 cliff * disable watchdog NMI, timers
841 1.1.2.4 cliff */
842 1.1.2.31 matt if (is_xlp_p) {
843 1.1.2.31 matt /*
844 1.1.2.31 matt * Reset the interrupt thread enables to disable all CPUs.
845 1.1.2.31 matt */
846 1.1.2.31 matt for (size_t i = 0; i < 8; i++) {
847 1.1.2.31 matt RMIXLP_PICREG_WRITE(RMIXLP_PIC_INT_THREAD_ENABLE01(i), 0);
848 1.1.2.31 matt RMIXLP_PICREG_WRITE(RMIXLP_PIC_INT_THREAD_ENABLE23(i), 0);
849 1.1.2.31 matt }
850 1.1.2.31 matt
851 1.1.2.31 matt /*
852 1.1.2.31 matt * Enable interrupts for node 0 core 0 thread 0.
853 1.1.2.31 matt */
854 1.1.2.31 matt RMIXLP_PICREG_WRITE(RMIXLP_PIC_INT_THREAD_ENABLE01(0), 1);
855 1.1.2.31 matt
856 1.1.2.31 matt /*
857 1.1.2.31 matt * Disable watchdogs and system timers.
858 1.1.2.31 matt */
859 1.1.2.31 matt uint64_t r = RMIXLP_PICREG_READ(RMIXLP_PIC_CTRL);
860 1.1.2.31 matt r &= ~(RMIXLP_PIC_CTRL_WTE|RMIXLP_PIC_CTRL_STE);
861 1.1.2.31 matt RMIXLP_PICREG_WRITE(RMIXLP_PIC_CTRL, r);
862 1.1.2.31 matt } else {
863 1.1.2.31 matt /*
864 1.1.2.31 matt * XXX
865 1.1.2.31 matt * WATCHDOG_ENB is preserved because clearing it causes
866 1.1.2.31 matt * hang on the XLS616 (but not on the XLS408)
867 1.1.2.31 matt */
868 1.1.2.31 matt uint32_t r = RMIXL_PICREG_READ(RMIXL_PIC_CONTROL);
869 1.1.2.31 matt r &= RMIXL_PIC_CONTROL_RESV|RMIXL_PIC_CONTROL_WATCHDOG_ENB;
870 1.1.2.31 matt RMIXL_PICREG_WRITE(RMIXL_PIC_CONTROL, r);
871 1.1.2.31 matt }
872 1.1.2.2 cliff
873 1.1.2.4 cliff #ifdef DIAGNOSTIC
874 1.1.2.15 cliff rmixl_pic_init_done = 1;
875 1.1.2.4 cliff #endif
876 1.1.2.28 cliff mutex_exit(rmixl_intr_lock);
877 1.1.2.4 cliff }
878 1.1.2.4 cliff
879 1.1.2.15 cliff /*
880 1.1.2.15 cliff * establish vector for mips3 count/compare clock interrupt
881 1.1.2.15 cliff * this ensures we enable in EIRR,
882 1.1.2.15 cliff * even though cpu_intr() handles the interrupt
883 1.1.2.17 cliff * note the 'mpsafe' arg here is a placeholder only
884 1.1.2.15 cliff */
885 1.1.2.27 cliff void
886 1.1.2.15 cliff rmixl_intr_init_clk(void)
887 1.1.2.15 cliff {
888 1.1.2.31 matt const size_t vec = ffs(MIPS_INT_MASK_5 >> MIPS_INT_MASK_SHIFT) - 1;
889 1.1.2.25 cliff
890 1.1.2.28 cliff mutex_enter(rmixl_intr_lock);
891 1.1.2.25 cliff
892 1.1.2.31 matt void *ih = rmixl_vec_establish(vec, NULL, IPL_SCHED, NULL, NULL, false);
893 1.1.2.15 cliff if (ih == NULL)
894 1.1.2.31 matt panic("%s: establish vec %zu failed", __func__, vec);
895 1.1.2.25 cliff
896 1.1.2.28 cliff mutex_exit(rmixl_intr_lock);
897 1.1.2.15 cliff }
898 1.1.2.15 cliff
899 1.1.2.15 cliff #ifdef MULTIPROCESSOR
900 1.1.2.15 cliff /*
901 1.1.2.15 cliff * establish IPI interrupt and send function
902 1.1.2.15 cliff */
903 1.1.2.27 cliff void
904 1.1.2.15 cliff rmixl_intr_init_ipi(void)
905 1.1.2.15 cliff {
906 1.1.2.28 cliff mutex_enter(rmixl_intr_lock);
907 1.1.2.25 cliff
908 1.1.2.31 matt for (size_t ipi = 0; ipi < NIPIS; ipi++) {
909 1.1.2.31 matt const size_t vec = RMIXL_INTRVEC_IPI + ipi;
910 1.1.2.31 matt void * const ih = rmixl_vec_establish(vec, NULL, IPL_SCHED,
911 1.1.2.25 cliff rmixl_ipi_intr, (void *)(uintptr_t)ipi, true);
912 1.1.2.25 cliff if (ih == NULL)
913 1.1.2.31 matt panic("%s: establish ipi %zu at vec %zu failed",
914 1.1.2.25 cliff __func__, ipi, vec);
915 1.1.2.25 cliff }
916 1.1.2.15 cliff
917 1.1.2.15 cliff mips_locoresw.lsw_send_ipi = rmixl_send_ipi;
918 1.1.2.15 cliff
919 1.1.2.28 cliff mutex_exit(rmixl_intr_lock);
920 1.1.2.15 cliff }
921 1.1.2.15 cliff #endif /* MULTIPROCESSOR */
922 1.1.2.15 cliff
923 1.1.2.15 cliff /*
924 1.1.2.15 cliff * initialize per-cpu interrupt stuff in softc
925 1.1.2.15 cliff * accumulate per-cpu bits in 'cpu_present_mask'
926 1.1.2.15 cliff */
927 1.1.2.15 cliff void
928 1.1.2.15 cliff rmixl_intr_init_cpu(struct cpu_info *ci)
929 1.1.2.15 cliff {
930 1.1.2.31 matt struct rmixl_cpu_softc * const sc = (void *)ci->ci_softc;
931 1.1.2.31 matt const char * xname = device_xname(sc->sc_dev);
932 1.1.2.21 cliff
933 1.1.2.15 cliff KASSERT(sc != NULL);
934 1.1.2.31 matt KASSERT(NINTRVECS <= __arraycount(sc->sc_vec_evcnts));
935 1.1.2.31 matt KASSERT(rmixl_nirts <= __arraycount(sc->sc_irt_evcnts));
936 1.1.2.15 cliff
937 1.1.2.31 matt for (size_t vec = 0; vec < NINTRVECS; vec++) {
938 1.1.2.15 cliff evcnt_attach_dynamic(&sc->sc_vec_evcnts[vec],
939 1.1.2.31 matt EVCNT_TYPE_INTR, NULL, xname, rmixl_intr_string(vec));
940 1.1.2.31 matt }
941 1.1.2.31 matt
942 1.1.2.31 matt for (size_t irt = 0; irt < rmixl_nirts; irt++) {
943 1.1.2.31 matt evcnt_attach_dynamic(&sc->sc_irt_evcnts[irt],
944 1.1.2.31 matt EVCNT_TYPE_INTR, NULL, xname, rmixl_irtnames[irt]);
945 1.1.2.31 matt }
946 1.1.2.15 cliff
947 1.1.2.26 cliff KASSERT(cpu_index(ci) < (sizeof(cpu_present_mask) * 8));
948 1.1.2.26 cliff atomic_or_32((volatile uint32_t *)&cpu_present_mask, 1 << cpu_index(ci));
949 1.1.2.15 cliff }
950 1.1.2.15 cliff
951 1.1.2.31 matt const char *
952 1.1.2.31 matt rmixl_irt_string(size_t irt)
953 1.1.2.31 matt {
954 1.1.2.31 matt KASSERT(irt < rmixl_nirts);
955 1.1.2.31 matt
956 1.1.2.31 matt return rmixl_irtnames[irt];
957 1.1.2.31 matt }
958 1.1.2.31 matt
959 1.1.2.15 cliff /*
960 1.1.2.15 cliff * rmixl_intr_string - return pointer to display name of a PIC-based interrupt
961 1.1.2.15 cliff */
962 1.1.2.4 cliff const char *
963 1.1.2.31 matt rmixl_intr_string(size_t vec)
964 1.1.2.4 cliff {
965 1.1.2.20 cliff
966 1.1.2.31 matt if (vec >= NINTRVECS)
967 1.1.2.31 matt panic("%s: vec index %zu out of range, max %d",
968 1.1.2.20 cliff __func__, vec, NINTRVECS - 1);
969 1.1.2.15 cliff
970 1.1.2.31 matt return rmixl_vecnames_common[vec];
971 1.1.2.16 cliff }
972 1.1.2.16 cliff
973 1.1.2.31 matt size_t
974 1.1.2.31 matt rmixl_intr_get_vec(int ipl)
975 1.1.2.16 cliff {
976 1.1.2.31 matt KASSERT(mutex_owned(rmixl_intr_lock));
977 1.1.2.31 matt KASSERT(IPL_VM <= ipl && ipl <= IPL_HIGH);
978 1.1.2.16 cliff
979 1.1.2.31 matt /*
980 1.1.2.31 matt * In reality higer ipls should have higher vec numbers,
981 1.1.2.31 matt * but for now don't worry about it.
982 1.1.2.31 matt */
983 1.1.2.31 matt struct rmixl_intrvecq * freeq = &rmixl_intrvec_lruq[IPL_NONE];
984 1.1.2.31 matt struct rmixl_intrvecq * iplq = &rmixl_intrvec_lruq[ipl];
985 1.1.2.31 matt rmixl_intrvec_t *iv;
986 1.1.2.16 cliff
987 1.1.2.31 matt /*
988 1.1.2.31 matt * If there's a free vector, grab it otherwise choose the least
989 1.1.2.31 matt * recently assigned vector sharing this IPL.
990 1.1.2.31 matt */
991 1.1.2.31 matt if ((iv = TAILQ_FIRST(freeq)) == NULL) {
992 1.1.2.31 matt iv = TAILQ_FIRST(iplq);
993 1.1.2.31 matt KASSERT(iv != NULL);
994 1.1.2.4 cliff }
995 1.1.2.31 matt
996 1.1.2.31 matt return iv - rmixl_intrvec;
997 1.1.2.1 cliff }
998 1.1.2.1 cliff
999 1.1.2.6 cliff /*
1000 1.1.2.15 cliff * rmixl_irt_thread_mask
1001 1.1.2.15 cliff *
1002 1.1.2.15 cliff * given a bitmask of cpus, return a, IRT thread mask
1003 1.1.2.6 cliff */
1004 1.1.2.15 cliff static uint32_t
1005 1.1.2.31 matt rmixl_irt_thread_mask(__cpuset_t cpumask)
1006 1.1.2.6 cliff {
1007 1.1.2.15 cliff uint32_t irtc0;
1008 1.1.2.15 cliff
1009 1.1.2.15 cliff #if defined(MULTIPROCESSOR)
1010 1.1.2.15 cliff #ifndef NOTYET
1011 1.1.2.15 cliff if (cpumask == -1)
1012 1.1.2.15 cliff return 1; /* XXX TMP FIXME */
1013 1.1.2.15 cliff #endif
1014 1.1.2.8 cliff
1015 1.1.2.8 cliff /*
1016 1.1.2.15 cliff * discount cpus not present
1017 1.1.2.8 cliff */
1018 1.1.2.15 cliff cpumask &= cpu_present_mask;
1019 1.1.2.15 cliff
1020 1.1.2.8 cliff switch (MIPS_PRID_IMPL(mips_options.mips_cpu_id)) {
1021 1.1.2.8 cliff case MIPS_XLS104:
1022 1.1.2.8 cliff case MIPS_XLS204:
1023 1.1.2.8 cliff case MIPS_XLS404:
1024 1.1.2.8 cliff case MIPS_XLS404LITE:
1025 1.1.2.15 cliff irtc0 = ((cpumask >> 2) << 4) | (cpumask & __BITS(1,0));
1026 1.1.2.15 cliff irtc0 &= (__BITS(5,4) | __BITS(1,0));
1027 1.1.2.8 cliff break;
1028 1.1.2.8 cliff case MIPS_XLS108:
1029 1.1.2.8 cliff case MIPS_XLS208:
1030 1.1.2.8 cliff case MIPS_XLS408:
1031 1.1.2.8 cliff case MIPS_XLS408LITE:
1032 1.1.2.8 cliff case MIPS_XLS608:
1033 1.1.2.15 cliff irtc0 = cpumask & __BITS(7,0);
1034 1.1.2.8 cliff break;
1035 1.1.2.8 cliff case MIPS_XLS416:
1036 1.1.2.8 cliff case MIPS_XLS616:
1037 1.1.2.15 cliff irtc0 = cpumask & __BITS(15,0);
1038 1.1.2.8 cliff break;
1039 1.1.2.8 cliff default:
1040 1.1.2.8 cliff panic("%s: unknown cpu ID %#x\n", __func__,
1041 1.1.2.8 cliff mips_options.mips_cpu_id);
1042 1.1.2.8 cliff }
1043 1.1.2.8 cliff #else
1044 1.1.2.15 cliff irtc0 = 1;
1045 1.1.2.15 cliff #endif /* MULTIPROCESSOR */
1046 1.1.2.15 cliff
1047 1.1.2.15 cliff return irtc0;
1048 1.1.2.15 cliff }
1049 1.1.2.15 cliff
1050 1.1.2.15 cliff /*
1051 1.1.2.15 cliff * rmixl_irt_init
1052 1.1.2.20 cliff * - initialize IRT Entry for given index
1053 1.1.2.15 cliff * - unmask Thread#0 in low word (assume we only have 1 thread)
1054 1.1.2.15 cliff */
1055 1.1.2.15 cliff static void
1056 1.1.2.31 matt rmixl_irt_init(size_t irt)
1057 1.1.2.15 cliff {
1058 1.1.2.31 matt KASSERT(irt < rmixl_nirts);
1059 1.1.2.31 matt if (cpu_rmixlp(mips_options.mips_cpu)) {
1060 1.1.2.31 matt RMIXLP_PICREG_WRITE(RMIXLP_PIC_IRTENTRY(irt), 0);
1061 1.1.2.31 matt } else {
1062 1.1.2.31 matt RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC1(irt), 0); /* high word */
1063 1.1.2.31 matt RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC0(irt), 0); /* low word */
1064 1.1.2.31 matt }
1065 1.1.2.6 cliff }
1066 1.1.2.6 cliff
1067 1.1.2.6 cliff /*
1068 1.1.2.15 cliff * rmixl_irt_disestablish
1069 1.1.2.20 cliff * - invalidate IRT Entry for given index
1070 1.1.2.6 cliff */
1071 1.1.2.6 cliff static void
1072 1.1.2.31 matt rmixl_irt_disestablish(size_t irt)
1073 1.1.2.6 cliff {
1074 1.1.2.28 cliff KASSERT(mutex_owned(rmixl_intr_lock));
1075 1.1.2.31 matt DPRINTF(("%s: irt %zu, irtc1 %#x\n", __func__, irt, 0));
1076 1.1.2.20 cliff rmixl_irt_init(irt);
1077 1.1.2.6 cliff }
1078 1.1.2.6 cliff
1079 1.1.2.6 cliff /*
1080 1.1.2.15 cliff * rmixl_irt_establish
1081 1.1.2.20 cliff * - construct an IRT Entry for irt and write to PIC
1082 1.1.2.6 cliff */
1083 1.1.2.6 cliff static void
1084 1.1.2.33 matt rmixl_irt_establish(size_t irt, size_t vec, int ist)
1085 1.1.2.6 cliff {
1086 1.1.2.31 matt const bool is_xlp_p = cpu_rmixlp(mips_options.mips_cpu);
1087 1.1.2.15 cliff
1088 1.1.2.28 cliff KASSERT(mutex_owned(rmixl_intr_lock));
1089 1.1.2.25 cliff
1090 1.1.2.31 matt if (irt >= rmixl_nirts)
1091 1.1.2.31 matt panic("%s: bad irt %zu\n", __func__, irt);
1092 1.1.2.20 cliff
1093 1.1.2.31 matt /*
1094 1.1.2.33 matt * All XLP interrupt are level (high).
1095 1.1.2.31 matt */
1096 1.1.2.33 matt if (ist != IST_LEVEL && ist != IST_LEVEL_HIGH
1097 1.1.2.31 matt && (is_xlp_p
1098 1.1.2.33 matt || (ist != IST_EDGE
1099 1.1.2.33 matt && ist != IST_EDGE_FALLING
1100 1.1.2.33 matt && ist != IST_EDGE_RISING))) {
1101 1.1.2.33 matt panic("%s: bad ist %d\n", __func__, ist);
1102 1.1.2.15 cliff }
1103 1.1.2.15 cliff
1104 1.1.2.15 cliff /*
1105 1.1.2.15 cliff * XXX IRT entries are not shared
1106 1.1.2.15 cliff */
1107 1.1.2.31 matt if (is_xlp_p) {
1108 1.1.2.31 matt KASSERT(RMIXLP_PICREG_READ(RMIXLP_PIC_IRTENTRY(irt)) == 0);
1109 1.1.2.31 matt uint64_t irtc0 = RMIXLP_PIC_IRTENTRY_EN
1110 1.1.2.31 matt | RMIXLP_PIC_IRTENTRY_LOCAL
1111 1.1.2.31 matt | RMIXLP_PIC_IRTENTRY_DT_ITE
1112 1.1.2.31 matt | RMIXLP_PIC_IRTENTRY_ITE(0)
1113 1.1.2.31 matt | __SHIFTIN(vec, RMIXLP_PIC_IRTENTRY_INTVEC)
1114 1.1.2.6 cliff
1115 1.1.2.31 matt /*
1116 1.1.2.31 matt * write IRT Entry to PIC
1117 1.1.2.31 matt */
1118 1.1.2.31 matt DPRINTF(("%s: vec %zu (%#x), irt %zu (%s), irtc0 %#"PRIx64"\n",
1119 1.1.2.31 matt __func__, vec, vec, irt, rmixl_irtnames[irt], irtc0));
1120 1.1.2.6 cliff
1121 1.1.2.31 matt RMIXLP_PICREG_WRITE(RMIXLP_PIC_IRTENTRY(irt), irtc0);
1122 1.1.2.31 matt } else {
1123 1.1.2.31 matt KASSERT(RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC0(irt)) == 0);
1124 1.1.2.31 matt KASSERT(RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC1(irt)) == 0);
1125 1.1.2.31 matt
1126 1.1.2.31 matt __cpuset_t cpumask = 1; /* XXX */
1127 1.1.2.31 matt uint32_t irtc0 = rmixl_irt_thread_mask(cpumask);
1128 1.1.2.31 matt
1129 1.1.2.31 matt uint32_t irtc1 = RMIXL_PIC_IRTENTRYC1_VALID;
1130 1.1.2.31 matt irtc1 |= RMIXL_PIC_IRTENTRYC1_GL; /* local */
1131 1.1.2.31 matt KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
1132 1.1.2.31 matt
1133 1.1.2.33 matt if (ist == IST_LEVEL
1134 1.1.2.33 matt || ist == IST_LEVEL_LOW
1135 1.1.2.33 matt || ist == IST_LEVEL_HIGH)
1136 1.1.2.31 matt irtc1 |= RMIXL_PIC_IRTENTRYC1_TRG;
1137 1.1.2.31 matt KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
1138 1.1.2.31 matt
1139 1.1.2.33 matt if (ist == IST_LEVEL_LOW || ist == IST_EDGE_FALLING)
1140 1.1.2.31 matt irtc1 |= RMIXL_PIC_IRTENTRYC1_P;
1141 1.1.2.31 matt KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
1142 1.1.2.6 cliff
1143 1.1.2.31 matt irtc1 |= vec; /* vector in EIRR */
1144 1.1.2.31 matt KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
1145 1.1.2.6 cliff
1146 1.1.2.31 matt /*
1147 1.1.2.31 matt * write IRT Entry to PIC
1148 1.1.2.31 matt */
1149 1.1.2.31 matt DPRINTF(("%s: vec %zu (%#x), irt %zu, irtc0 %#x, irtc1 %#x\n",
1150 1.1.2.31 matt __func__, vec, vec, irt, irtc0, irtc1));
1151 1.1.2.31 matt RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC0(irt), irtc0); /* low word */
1152 1.1.2.31 matt RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC1(irt), irtc1); /* high word */
1153 1.1.2.31 matt }
1154 1.1.2.6 cliff }
1155 1.1.2.6 cliff
1156 1.1.2.1 cliff void *
1157 1.1.2.31 matt rmixl_vec_establish(size_t vec, rmixl_intrhand_t *ih, int ipl,
1158 1.1.2.17 cliff int (*func)(void *), void *arg, bool mpsafe)
1159 1.1.2.1 cliff {
1160 1.1.2.1 cliff
1161 1.1.2.28 cliff KASSERT(mutex_owned(rmixl_intr_lock));
1162 1.1.2.25 cliff
1163 1.1.2.31 matt DPRINTF(("%s: vec %zu ih %p ipl %d func %p arg %p mpsafe %d\n",
1164 1.1.2.31 matt __func__, vec, ih, ipl, func, arg, mpsafe));
1165 1.1.2.31 matt
1166 1.1.2.4 cliff #ifdef DIAGNOSTIC
1167 1.1.2.15 cliff if (rmixl_pic_init_done == 0)
1168 1.1.2.4 cliff panic("%s: called before evbmips_intr_init", __func__);
1169 1.1.2.4 cliff #endif
1170 1.1.2.4 cliff
1171 1.1.2.2 cliff /*
1172 1.1.2.15 cliff * check args
1173 1.1.2.2 cliff */
1174 1.1.2.31 matt if (vec >= NINTRVECS)
1175 1.1.2.31 matt panic("%s: vec %zu out of range, max %d",
1176 1.1.2.31 matt __func__, vec, NINTRVECS - 1);
1177 1.1.2.31 matt if (ipl < IPL_VM || ipl > IPL_HIGH)
1178 1.1.2.4 cliff panic("%s: ipl %d out of range, min %d, max %d",
1179 1.1.2.31 matt __func__, ipl, IPL_VM, IPL_HIGH);
1180 1.1.2.31 matt
1181 1.1.2.31 matt const int s = splhigh();
1182 1.1.2.2 cliff
1183 1.1.2.31 matt rmixl_intrvec_t * const iv = &rmixl_intrvec[vec];
1184 1.1.2.31 matt if (ih == NULL) {
1185 1.1.2.31 matt ih = &iv->iv_intrhand;
1186 1.1.2.31 matt }
1187 1.1.2.31 matt
1188 1.1.2.31 matt if (vec >= 8) {
1189 1.1.2.31 matt TAILQ_REMOVE(&rmixl_intrvec_lruq[iv->iv_ipl], iv, iv_lruq_link);
1190 1.1.2.31 matt }
1191 1.1.2.31 matt
1192 1.1.2.31 matt if (LIST_EMPTY(&iv->iv_hands)) {
1193 1.1.2.31 matt KASSERT(iv->iv_ipl == IPL_NONE);
1194 1.1.2.31 matt iv->iv_ipl = ipl;
1195 1.1.2.31 matt } else {
1196 1.1.2.31 matt KASSERT(iv->iv_ipl == ipl);
1197 1.1.2.31 matt }
1198 1.1.2.31 matt
1199 1.1.2.31 matt if (vec >= 8) {
1200 1.1.2.31 matt TAILQ_INSERT_TAIL(&rmixl_intrvec_lruq[iv->iv_ipl],
1201 1.1.2.31 matt iv, iv_lruq_link);
1202 1.1.2.31 matt }
1203 1.1.2.1 cliff
1204 1.1.2.20 cliff if (ih->ih_func != NULL) {
1205 1.1.2.20 cliff #ifdef DIAGNOSTIC
1206 1.1.2.31 matt printf("%s: intrhand[%zu] busy\n", __func__, vec);
1207 1.1.2.20 cliff #endif
1208 1.1.2.20 cliff splx(s);
1209 1.1.2.20 cliff return NULL;
1210 1.1.2.20 cliff }
1211 1.1.2.2 cliff
1212 1.1.2.15 cliff ih->ih_arg = arg;
1213 1.1.2.17 cliff ih->ih_mpsafe = mpsafe;
1214 1.1.2.20 cliff ih->ih_vec = vec;
1215 1.1.2.2 cliff
1216 1.1.2.31 matt LIST_INSERT_HEAD(&iv->iv_hands, ih, ih_link);
1217 1.1.2.31 matt
1218 1.1.2.31 matt const uint64_t eimr_bit = (uint64_t)1 << vec;
1219 1.1.2.31 matt for (int i = ipl; --i >= 0; ) {
1220 1.1.2.20 cliff KASSERT((ipl_eimr_map[i] & eimr_bit) == 0);
1221 1.1.2.20 cliff ipl_eimr_map[i] |= eimr_bit;
1222 1.1.2.20 cliff }
1223 1.1.2.20 cliff
1224 1.1.2.24 cliff ih->ih_func = func; /* do this last */
1225 1.1.2.24 cliff
1226 1.1.2.15 cliff splx(s);
1227 1.1.2.15 cliff
1228 1.1.2.15 cliff return ih;
1229 1.1.2.15 cliff }
1230 1.1.2.15 cliff
1231 1.1.2.20 cliff /*
1232 1.1.2.20 cliff * rmixl_intr_establish
1233 1.1.2.20 cliff * - used to establish an IRT-based interrupt only
1234 1.1.2.20 cliff */
1235 1.1.2.15 cliff void *
1236 1.1.2.33 matt rmixl_intr_establish(size_t irt, int ipl, int ist,
1237 1.1.2.17 cliff int (*func)(void *), void *arg, bool mpsafe)
1238 1.1.2.15 cliff {
1239 1.1.2.4 cliff #ifdef DIAGNOSTIC
1240 1.1.2.15 cliff if (rmixl_pic_init_done == 0)
1241 1.1.2.15 cliff panic("%s: called before rmixl_pic_init_done", __func__);
1242 1.1.2.15 cliff #endif
1243 1.1.2.4 cliff
1244 1.1.2.2 cliff /*
1245 1.1.2.15 cliff * check args
1246 1.1.2.2 cliff */
1247 1.1.2.31 matt if (irt >= rmixl_nirts)
1248 1.1.2.31 matt panic("%s: irt %zu out of range, max %d",
1249 1.1.2.31 matt __func__, irt, rmixl_nirts - 1);
1250 1.1.2.31 matt if (ipl < IPL_VM || ipl > IPL_HIGH)
1251 1.1.2.15 cliff panic("%s: ipl %d out of range, min %d, max %d",
1252 1.1.2.31 matt __func__, ipl, IPL_VM, IPL_HIGH);
1253 1.1.2.1 cliff
1254 1.1.2.31 matt mutex_enter(rmixl_intr_lock);
1255 1.1.2.20 cliff
1256 1.1.2.31 matt rmixl_intrhand_t *ih = &rmixl_irt_intrhands[irt];
1257 1.1.2.1 cliff
1258 1.1.2.31 matt KASSERT(ih->ih_func == NULL);
1259 1.1.2.31 matt
1260 1.1.2.31 matt const size_t vec = rmixl_intr_get_vec(ipl);
1261 1.1.2.31 matt
1262 1.1.2.31 matt DPRINTF(("%s: irt %zu, ih %p vec %zu, ipl %d\n",
1263 1.1.2.31 matt __func__, irt, ih, vec, ipl));
1264 1.1.2.1 cliff
1265 1.1.2.2 cliff /*
1266 1.1.2.15 cliff * establish vector
1267 1.1.2.2 cliff */
1268 1.1.2.31 matt ih = rmixl_vec_establish(vec, ih, ipl, func, arg, mpsafe);
1269 1.1.2.1 cliff
1270 1.1.2.1 cliff /*
1271 1.1.2.6 cliff * establish IRT Entry
1272 1.1.2.1 cliff */
1273 1.1.2.33 matt rmixl_irt_establish(irt, vec, ist);
1274 1.1.2.1 cliff
1275 1.1.2.28 cliff mutex_exit(rmixl_intr_lock);
1276 1.1.2.1 cliff
1277 1.1.2.1 cliff return ih;
1278 1.1.2.1 cliff }
1279 1.1.2.1 cliff
1280 1.1.2.1 cliff void
1281 1.1.2.15 cliff rmixl_vec_disestablish(void *cookie)
1282 1.1.2.15 cliff {
1283 1.1.2.31 matt rmixl_intrhand_t * const ih = cookie;
1284 1.1.2.31 matt const size_t vec = ih->ih_vec;
1285 1.1.2.31 matt rmixl_intrvec_t * const iv = &rmixl_intrvec[vec];
1286 1.1.2.15 cliff
1287 1.1.2.28 cliff KASSERT(mutex_owned(rmixl_intr_lock));
1288 1.1.2.31 matt KASSERT(vec < NINTRVECS);
1289 1.1.2.31 matt KASSERT(ih->ih_func != NULL);
1290 1.1.2.31 matt KASSERT(IPL_VM <= iv->iv_ipl && iv->iv_ipl <= IPL_HIGH);
1291 1.1.2.31 matt
1292 1.1.2.31 matt LIST_REMOVE(ih, ih_link);
1293 1.1.2.15 cliff
1294 1.1.2.24 cliff ih->ih_func = NULL; /* do this first */
1295 1.1.2.20 cliff
1296 1.1.2.31 matt const uint64_t eimr_bit = __BIT(ih->ih_vec);
1297 1.1.2.31 matt for (int i = iv->iv_ipl; --i >= 0; ) {
1298 1.1.2.20 cliff KASSERT((ipl_eimr_map[i] & eimr_bit) != 0);
1299 1.1.2.20 cliff ipl_eimr_map[i] ^= eimr_bit;
1300 1.1.2.20 cliff }
1301 1.1.2.31 matt
1302 1.1.2.31 matt ih->ih_vec = 0;
1303 1.1.2.31 matt ih->ih_mpsafe = false;
1304 1.1.2.31 matt ih->ih_arg = NULL;
1305 1.1.2.31 matt
1306 1.1.2.31 matt /*
1307 1.1.2.31 matt * If this vector isn't servicing any interrupts, then check to
1308 1.1.2.31 matt * see if this IPL has other vectors using it. If it does, then
1309 1.1.2.31 matt * return this vector to the freeq (lruq for IPL_NONE). This makes
1310 1.1.2.31 matt * there will always be at least one vector per IPL.
1311 1.1.2.31 matt */
1312 1.1.2.31 matt if (vec > 8 && LIST_EMPTY(&iv->iv_hands)) {
1313 1.1.2.31 matt rmixl_intrvecq_t * const freeq = &rmixl_intrvec_lruq[IPL_NONE];
1314 1.1.2.31 matt rmixl_intrvecq_t * const iplq = &rmixl_intrvec_lruq[iv->iv_ipl];
1315 1.1.2.31 matt
1316 1.1.2.31 matt if (TAILQ_NEXT(iv, iv_lruq_link) != NULL
1317 1.1.2.31 matt || TAILQ_FIRST(iplq) != iv) {
1318 1.1.2.31 matt TAILQ_REMOVE(iplq, iv, iv_lruq_link);
1319 1.1.2.31 matt iv->iv_ipl = IPL_NONE;
1320 1.1.2.31 matt TAILQ_INSERT_TAIL(freeq, iv, iv_lruq_link);
1321 1.1.2.31 matt }
1322 1.1.2.31 matt }
1323 1.1.2.15 cliff }
1324 1.1.2.15 cliff
1325 1.1.2.15 cliff void
1326 1.1.2.1 cliff rmixl_intr_disestablish(void *cookie)
1327 1.1.2.1 cliff {
1328 1.1.2.31 matt rmixl_intrhand_t * const ih = cookie;
1329 1.1.2.31 matt const size_t vec = ih->ih_vec;
1330 1.1.2.31 matt rmixl_intrvec_t * const iv = &rmixl_intrvec[vec];
1331 1.1.2.15 cliff
1332 1.1.2.20 cliff KASSERT(vec < NINTRVECS);
1333 1.1.2.1 cliff
1334 1.1.2.28 cliff mutex_enter(rmixl_intr_lock);
1335 1.1.2.1 cliff
1336 1.1.2.1 cliff /*
1337 1.1.2.15 cliff * disable/invalidate the IRT Entry if needed
1338 1.1.2.1 cliff */
1339 1.1.2.31 matt if (ih != &iv->iv_intrhand) {
1340 1.1.2.31 matt size_t irt = ih - rmixl_irt_intrhands;
1341 1.1.2.31 matt KASSERT(irt < rmixl_nirts);
1342 1.1.2.31 matt rmixl_irt_disestablish(irt);
1343 1.1.2.31 matt }
1344 1.1.2.1 cliff
1345 1.1.2.1 cliff /*
1346 1.1.2.15 cliff * disasociate from vector and free the handle
1347 1.1.2.1 cliff */
1348 1.1.2.15 cliff rmixl_vec_disestablish(cookie);
1349 1.1.2.1 cliff
1350 1.1.2.28 cliff mutex_exit(rmixl_intr_lock);
1351 1.1.2.1 cliff }
1352 1.1.2.1 cliff
1353 1.1.2.1 cliff void
1354 1.1.2.15 cliff evbmips_iointr(int ipl, vaddr_t pc, uint32_t pending)
1355 1.1.2.1 cliff {
1356 1.1.2.31 matt struct rmixl_cpu_softc * const sc = (void *)curcpu()->ci_softc;
1357 1.1.2.31 matt const bool is_xlp_p = cpu_rmixlp(mips_options.mips_cpu);
1358 1.1.2.4 cliff
1359 1.1.2.30 matt DPRINTF(("%s: cpu%u: ipl %d, pc %#"PRIxVADDR", pending %#x\n",
1360 1.1.2.15 cliff __func__, cpu_number(), ipl, pc, pending));
1361 1.1.2.2 cliff
1362 1.1.2.15 cliff /*
1363 1.1.2.15 cliff * 'pending' arg is a summary that there is something to do
1364 1.1.2.15 cliff * the real pending status is obtained from EIRR
1365 1.1.2.15 cliff */
1366 1.1.2.15 cliff KASSERT(pending == MIPS_INT_MASK_1);
1367 1.1.2.4 cliff
1368 1.1.2.15 cliff for (;;) {
1369 1.1.2.15 cliff rmixl_intrhand_t *ih;
1370 1.1.2.15 cliff uint64_t eirr;
1371 1.1.2.18 cliff uint64_t eimr;
1372 1.1.2.15 cliff uint64_t vecbit;
1373 1.1.2.15 cliff int vec;
1374 1.1.2.1 cliff
1375 1.1.2.31 matt __asm volatile("dmfc0 %0, $9, 6;" : "=r"(eirr));
1376 1.1.2.31 matt __asm volatile("dmfc0 %0, $9, 7;" : "=r"(eimr));
1377 1.1.2.4 cliff
1378 1.1.2.15 cliff #ifdef IOINTR_DEBUG
1379 1.1.2.30 matt printf("%s: cpu%u: eirr %#"PRIx64", eimr %#"PRIx64", mask %#"PRIx64"\n",
1380 1.1.2.30 matt __func__, cpu_number(), eirr, eimr, ipl_eimr_map[ipl-1]);
1381 1.1.2.15 cliff #endif /* IOINTR_DEBUG */
1382 1.1.2.15 cliff
1383 1.1.2.22 cliff /*
1384 1.1.2.22 cliff * reduce eirr to
1385 1.1.2.22 cliff * - ints that are enabled at or below this ipl
1386 1.1.2.22 cliff * - exclude count/compare clock and soft ints
1387 1.1.2.22 cliff * they are handled elsewhere
1388 1.1.2.22 cliff */
1389 1.1.2.15 cliff eirr &= ipl_eimr_map[ipl-1];
1390 1.1.2.22 cliff eirr &= ~ipl_eimr_map[ipl];
1391 1.1.2.22 cliff eirr &= ~((MIPS_INT_MASK_5 | MIPS_SOFT_INT_MASK) >> 8);
1392 1.1.2.15 cliff if (eirr == 0)
1393 1.1.2.15 cliff break;
1394 1.1.2.15 cliff
1395 1.1.2.15 cliff vec = 63 - dclz(eirr);
1396 1.1.2.31 matt rmixl_intrvec_t * const iv = &rmixl_intrvec[vec];
1397 1.1.2.15 cliff vecbit = 1ULL << vec;
1398 1.1.2.31 matt KASSERT (iv->iv_ipl == ipl);
1399 1.1.2.31 matt LIST_FOREACH(ih, &iv->iv_hands, ih_link) {
1400 1.1.2.31 matt KASSERT ((vecbit & eimr) == 0);
1401 1.1.2.31 matt KASSERT ((vecbit & RMIXL_EIRR_PRESERVE_MASK) == 0);
1402 1.1.2.31 matt
1403 1.1.2.31 matt /*
1404 1.1.2.31 matt * ack in EIRR, and in PIC if needed,
1405 1.1.2.31 matt * the irq we are about to handle
1406 1.1.2.31 matt */
1407 1.1.2.31 matt rmixl_eirr_ack(eimr, vecbit, RMIXL_EIRR_PRESERVE_MASK);
1408 1.1.2.31 matt if (ih != &iv->iv_intrhand) {
1409 1.1.2.31 matt size_t irt = ih - rmixl_irt_intrhands;
1410 1.1.2.31 matt KASSERT(irt < rmixl_nirts);
1411 1.1.2.31 matt if (is_xlp_p) {
1412 1.1.2.31 matt RMIXLP_PICREG_WRITE(RMIXLP_PIC_INT_ACK,
1413 1.1.2.31 matt irt);
1414 1.1.2.31 matt } else {
1415 1.1.2.31 matt RMIXL_PICREG_WRITE(RMIXL_PIC_INTRACK,
1416 1.1.2.31 matt 1 << irt);
1417 1.1.2.31 matt }
1418 1.1.2.31 matt sc->sc_irt_evcnts[irt].ev_count++;
1419 1.1.2.31 matt }
1420 1.1.2.15 cliff
1421 1.1.2.31 matt if (ih->ih_func != NULL) {
1422 1.1.2.17 cliff #ifdef MULTIPROCESSOR
1423 1.1.2.31 matt if (ih->ih_mpsafe) {
1424 1.1.2.31 matt (void)(*ih->ih_func)(ih->ih_arg);
1425 1.1.2.31 matt } else {
1426 1.1.2.31 matt KASSERTMSG(ipl == IPL_VM,
1427 1.1.2.31 matt ("%s: %s: ipl (%d) != IPL_VM for KERNEL_LOCK",
1428 1.1.2.31 matt __func__, sc->sc_vec_evcnts[vec].ev_name,
1429 1.1.2.31 matt ipl));
1430 1.1.2.31 matt KERNEL_LOCK(1, NULL);
1431 1.1.2.31 matt (void)(*ih->ih_func)(ih->ih_arg);
1432 1.1.2.31 matt KERNEL_UNLOCK_ONE(NULL);
1433 1.1.2.31 matt }
1434 1.1.2.17 cliff #else
1435 1.1.2.31 matt (void)(*ih->ih_func)(ih->ih_arg);
1436 1.1.2.17 cliff #endif /* MULTIPROCESSOR */
1437 1.1.2.31 matt }
1438 1.1.2.31 matt KASSERT(ipl == iv->iv_ipl);
1439 1.1.2.31 matt KASSERTMSG(curcpu()->ci_cpl >= ipl,
1440 1.1.2.31 matt ("%s: after %s: cpl (%d) < ipl %d",
1441 1.1.2.31 matt __func__, sc->sc_vec_evcnts[vec].ev_name,
1442 1.1.2.31 matt ipl, curcpu()->ci_cpl));
1443 1.1.2.31 matt sc->sc_vec_evcnts[vec].ev_count++;
1444 1.1.2.17 cliff }
1445 1.1.2.1 cliff }
1446 1.1.2.1 cliff }
1447 1.1.2.4 cliff
1448 1.1.2.15 cliff #ifdef MULTIPROCESSOR
1449 1.1.2.15 cliff static int
1450 1.1.2.15 cliff rmixl_send_ipi(struct cpu_info *ci, int tag)
1451 1.1.2.4 cliff {
1452 1.1.2.26 cliff const cpuid_t cpuid = ci->ci_cpuid;
1453 1.1.2.31 matt const uint64_t req = 1 << tag;
1454 1.1.2.31 matt const bool is_xlp_p = cpu_rmixlp(mips_options.mips_cpu);
1455 1.1.2.15 cliff uint32_t r;
1456 1.1.2.4 cliff
1457 1.1.2.30 matt if (! CPUSET_HAS_P(cpus_running, cpu_index(ci)))
1458 1.1.2.15 cliff return -1;
1459 1.1.2.15 cliff
1460 1.1.2.31 matt KASSERT(tag >= 0 && tag < NIPIS);
1461 1.1.2.15 cliff
1462 1.1.2.31 matt if (is_xlp_p) {
1463 1.1.2.31 matt r = RMXLP_PIC_IPI_CTRL_MAKE(0, __BIT(cpuid & 15),
1464 1.1.2.31 matt RMIXL_INTERVEC_IPI + tag);
1465 1.1.2.31 matt } else {
1466 1.1.2.31 matt const uint32_t core = (uint32_t)(cpuid >> 2);
1467 1.1.2.31 matt const uint32_t thread = (uint32_t)(cpuid & __BITS(1,0));
1468 1.1.2.31 matt r = RMXLP_PIC_IPI_CTRL_MAKE(0, core, thread,
1469 1.1.2.31 matt RMIXL_INTERVEC_IPI + tag);
1470 1.1.2.31 matt }
1471 1.1.2.15 cliff
1472 1.1.2.28 cliff mutex_enter(rmixl_ipi_lock);
1473 1.1.2.15 cliff atomic_or_64(&ci->ci_request_ipis, req);
1474 1.1.2.31 matt __asm __volatile("sync");
1475 1.1.2.31 matt if (is_xlp_p) {
1476 1.1.2.31 matt RMIXLP_PICREG_WRITE(RMIXLP_PIC_IPI_CTRL, r);
1477 1.1.2.31 matt } else {
1478 1.1.2.31 matt RMIXL_PICREG_WRITE(RMIXL_PIC_IPIBASE, r);
1479 1.1.2.31 matt }
1480 1.1.2.28 cliff mutex_exit(rmixl_ipi_lock);
1481 1.1.2.15 cliff
1482 1.1.2.15 cliff return 0;
1483 1.1.2.15 cliff }
1484 1.1.2.15 cliff
1485 1.1.2.15 cliff static int
1486 1.1.2.15 cliff rmixl_ipi_intr(void *arg)
1487 1.1.2.15 cliff {
1488 1.1.2.15 cliff struct cpu_info * const ci = curcpu();
1489 1.1.2.30 matt const uint64_t ipi_mask = 1 << (uintptr_t)arg;
1490 1.1.2.15 cliff
1491 1.1.2.30 matt KASSERT(ci->ci_cpl >= IPL_SCHED);
1492 1.1.2.25 cliff KASSERT((uintptr_t)arg < NIPIS);
1493 1.1.2.30 matt
1494 1.1.2.30 matt /* if the request is clear, it was previously processed */
1495 1.1.2.30 matt if ((ci->ci_request_ipis & ipi_mask) == 0)
1496 1.1.2.30 matt return 0;
1497 1.1.2.25 cliff
1498 1.1.2.25 cliff atomic_or_64(&ci->ci_active_ipis, ipi_mask);
1499 1.1.2.25 cliff atomic_and_64(&ci->ci_request_ipis, ~ipi_mask);
1500 1.1.2.15 cliff
1501 1.1.2.15 cliff ipi_process(ci, ipi_mask);
1502 1.1.2.15 cliff
1503 1.1.2.25 cliff atomic_and_64(&ci->ci_active_ipis, ~ipi_mask);
1504 1.1.2.25 cliff
1505 1.1.2.15 cliff return 1;
1506 1.1.2.15 cliff }
1507 1.1.2.15 cliff #endif /* MULTIPROCESSOR */
1508 1.1.2.15 cliff
1509 1.1.2.20 cliff #if defined(DIAGNOSTIC) || defined(IOINTR_DEBUG) || defined(DDB)
1510 1.1.2.15 cliff int
1511 1.1.2.31 matt rmixl_intrvec_print_subr(size_t vec)
1512 1.1.2.15 cliff {
1513 1.1.2.31 matt rmixl_intrvec_t * const iv = &rmixl_intrvec[vec];
1514 1.1.2.31 matt rmixl_intrhand_t *ih;
1515 1.1.2.31 matt
1516 1.1.2.31 matt printf("vec %zu: ipl %u\n", vec, iv->iv_ipl);
1517 1.1.2.31 matt
1518 1.1.2.31 matt LIST_FOREACH(ih, &iv->iv_hands, ih_link) {
1519 1.1.2.31 matt if (ih == &iv->iv_intrhand) {
1520 1.1.2.31 matt printf(" [%s]: func %p, arg %p\n",
1521 1.1.2.31 matt rmixl_vecnames_common[vec],
1522 1.1.2.31 matt ih->ih_func, ih->ih_arg);
1523 1.1.2.31 matt } else {
1524 1.1.2.31 matt const size_t irt = ih - rmixl_irt_intrhands;
1525 1.1.2.31 matt printf(" irt %zu [%s]: func %p, arg %p\n",
1526 1.1.2.31 matt irt, rmixl_irtnames[irt],
1527 1.1.2.31 matt ih->ih_func, ih->ih_arg);
1528 1.1.2.31 matt }
1529 1.1.2.31 matt }
1530 1.1.2.15 cliff return 0;
1531 1.1.2.15 cliff }
1532 1.1.2.15 cliff int
1533 1.1.2.15 cliff rmixl_intrhand_print(void)
1534 1.1.2.15 cliff {
1535 1.1.2.31 matt for (size_t vec = 0; vec < NINTRVECS; vec++)
1536 1.1.2.31 matt rmixl_intrvec_print_subr(vec);
1537 1.1.2.15 cliff return 0;
1538 1.1.2.15 cliff }
1539 1.1.2.20 cliff
1540 1.1.2.20 cliff static inline void
1541 1.1.2.31 matt rmixl_irt_entry_print(size_t irt)
1542 1.1.2.20 cliff {
1543 1.1.2.31 matt if (irt >= rmixl_nirts)
1544 1.1.2.20 cliff return;
1545 1.1.2.31 matt if (cpu_rmixlp(mips_options.mips_cpu)) {
1546 1.1.2.31 matt uint64_t c = RMIXLP_PICREG_READ(RMIXLP_PIC_IRTENTRY(irt));
1547 1.1.2.31 matt printf("irt[%zu]: %#"PRIx64"\n", irt, c);
1548 1.1.2.31 matt } else {
1549 1.1.2.31 matt uint32_t c0 = RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC0(irt));
1550 1.1.2.31 matt uint32_t c1 = RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC1(irt));
1551 1.1.2.31 matt printf("irt[%zu]: %#x, %#x\n", irt, c0, c1);
1552 1.1.2.31 matt }
1553 1.1.2.20 cliff }
1554 1.1.2.20 cliff
1555 1.1.2.15 cliff int
1556 1.1.2.15 cliff rmixl_irt_print(void)
1557 1.1.2.15 cliff {
1558 1.1.2.15 cliff printf("%s:\n", __func__);
1559 1.1.2.31 matt for (size_t irt = 0; irt < rmixl_nirts ; irt++)
1560 1.1.2.15 cliff rmixl_irt_entry_print(irt);
1561 1.1.2.4 cliff return 0;
1562 1.1.2.4 cliff }
1563 1.1.2.20 cliff
1564 1.1.2.20 cliff void
1565 1.1.2.20 cliff rmixl_ipl_eimr_map_print(void)
1566 1.1.2.20 cliff {
1567 1.1.2.20 cliff printf("IPL_NONE=%d, mask %#"PRIx64"\n",
1568 1.1.2.20 cliff IPL_NONE, ipl_eimr_map[IPL_NONE]);
1569 1.1.2.20 cliff printf("IPL_SOFTCLOCK=%d, mask %#"PRIx64"\n",
1570 1.1.2.20 cliff IPL_SOFTCLOCK, ipl_eimr_map[IPL_SOFTCLOCK]);
1571 1.1.2.20 cliff printf("IPL_SOFTNET=%d, mask %#"PRIx64"\n",
1572 1.1.2.20 cliff IPL_SOFTNET, ipl_eimr_map[IPL_SOFTNET]);
1573 1.1.2.20 cliff printf("IPL_VM=%d, mask %#"PRIx64"\n",
1574 1.1.2.20 cliff IPL_VM, ipl_eimr_map[IPL_VM]);
1575 1.1.2.20 cliff printf("IPL_SCHED=%d, mask %#"PRIx64"\n",
1576 1.1.2.20 cliff IPL_SCHED, ipl_eimr_map[IPL_SCHED]);
1577 1.1.2.20 cliff printf("IPL_DDB=%d, mask %#"PRIx64"\n",
1578 1.1.2.20 cliff IPL_DDB, ipl_eimr_map[IPL_DDB]);
1579 1.1.2.20 cliff printf("IPL_HIGH=%d, mask %#"PRIx64"\n",
1580 1.1.2.20 cliff IPL_HIGH, ipl_eimr_map[IPL_HIGH]);
1581 1.1.2.20 cliff }
1582 1.1.2.20 cliff
1583 1.1.2.4 cliff #endif
1584