rmixl_intr.c revision 1.2.8.1 1 /* $NetBSD: rmixl_intr.c,v 1.2.8.1 2011/06/06 09:06:09 jruoho Exp $ */
2
3 /*-
4 * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or
8 * without modification, are permitted provided that the following
9 * conditions are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following
14 * disclaimer in the documentation and/or other materials provided
15 * with the distribution.
16 * 3. The names of the authors may not be used to endorse or promote
17 * products derived from this software without specific prior
18 * written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY
21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
23 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
25 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
27 * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
29 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
31 * OF SUCH DAMAGE.
32 */
33 /*-
34 * Copyright (c) 2001 The NetBSD Foundation, Inc.
35 * All rights reserved.
36 *
37 * This code is derived from software contributed to The NetBSD Foundation
38 * by Jason R. Thorpe.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
50 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
51 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
52 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
53 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
54 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
55 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
56 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
57 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
58 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
60 */
61
62 /*
63 * Platform-specific interrupt support for the RMI XLP, XLR, XLS
64 */
65
66 #include <sys/cdefs.h>
67 __KERNEL_RCSID(0, "$NetBSD: rmixl_intr.c,v 1.2.8.1 2011/06/06 09:06:09 jruoho Exp $");
68
69 #include "opt_ddb.h"
70 #include "opt_multiprocessor.h"
71 #define __INTR_PRIVATE
72
73 #include <sys/param.h>
74 #include <sys/queue.h>
75 #include <sys/malloc.h>
76 #include <sys/systm.h>
77 #include <sys/device.h>
78 #include <sys/kernel.h>
79 #include <sys/atomic.h>
80 #include <sys/mutex.h>
81 #include <sys/cpu.h>
82
83 #include <machine/bus.h>
84 #include <machine/intr.h>
85
86 #include <mips/cpu.h>
87 #include <mips/cpuset.h>
88 #include <mips/locore.h>
89
90 #include <mips/rmi/rmixlreg.h>
91 #include <mips/rmi/rmixlvar.h>
92
93 #include <mips/rmi/rmixl_cpuvar.h>
94 #include <mips/rmi/rmixl_intr.h>
95
96 #include <dev/pci/pcireg.h>
97 #include <dev/pci/pcivar.h>
98
99 //#define IOINTR_DEBUG 1
100 #ifdef IOINTR_DEBUG
101 int iointr_debug = IOINTR_DEBUG;
102 # define DPRINTF(x) do { if (iointr_debug) printf x ; } while(0)
103 #else
104 # define DPRINTF(x)
105 #endif
106
107 #define RMIXL_PICREG_READ(off) \
108 RMIXL_IOREG_READ(RMIXL_IO_DEV_PIC + (off))
109 #define RMIXL_PICREG_WRITE(off, val) \
110 RMIXL_IOREG_WRITE(RMIXL_IO_DEV_PIC + (off), (val))
111
112 /*
113 * do not clear these when acking EIRR
114 * (otherwise they get lost)
115 */
116 #define RMIXL_EIRR_PRESERVE_MASK \
117 ((MIPS_INT_MASK_5|MIPS_SOFT_INT_MASK) >> 8)
118
119 /*
120 * IRT assignments depends on the RMI chip family
121 * (XLS1xx vs. XLS2xx vs. XLS3xx vs. XLS6xx)
122 * use the right display string table for the CPU that's running.
123 */
124
125 /*
126 * rmixl_irtnames_xlrxxx
127 * - use for XLRxxx
128 */
129 static const char * const rmixl_irtnames_xlrxxx[NIRTS] = {
130 "pic int 0 (watchdog)", /* 0 */
131 "pic int 1 (timer0)", /* 1 */
132 "pic int 2 (timer1)", /* 2 */
133 "pic int 3 (timer2)", /* 3 */
134 "pic int 4 (timer3)", /* 4 */
135 "pic int 5 (timer4)", /* 5 */
136 "pic int 6 (timer5)", /* 6 */
137 "pic int 7 (timer6)", /* 7 */
138 "pic int 8 (timer7)", /* 8 */
139 "pic int 9 (uart0)", /* 9 */
140 "pic int 10 (uart1)", /* 10 */
141 "pic int 11 (i2c0)", /* 11 */
142 "pic int 12 (i2c1)", /* 12 */
143 "pic int 13 (pcmcia)", /* 13 */
144 "pic int 14 (gpio)", /* 14 */
145 "pic int 15 (hyper)", /* 15 */
146 "pic int 16 (pcix)", /* 16 */
147 "pic int 17 (gmac0)", /* 17 */
148 "pic int 18 (gmac1)", /* 18 */
149 "pic int 19 (gmac2)", /* 19 */
150 "pic int 20 (gmac3)", /* 20 */
151 "pic int 21 (xgs0)", /* 21 */
152 "pic int 22 (xgs1)", /* 22 */
153 "pic int 23 (irq23)", /* 23 */
154 "pic int 24 (hyper_fatal)", /* 24 */
155 "pic int 25 (bridge_aerr)", /* 25 */
156 "pic int 26 (bridge_berr)", /* 26 */
157 "pic int 27 (bridge_tb)", /* 27 */
158 "pic int 28 (bridge_nmi)", /* 28 */
159 "pic int 29 (bridge_sram_derr)",/* 29 */
160 "pic int 30 (gpio_fatal)", /* 30 */
161 "pic int 31 (reserved)", /* 31 */
162 };
163
164 /*
165 * rmixl_irtnames_xls2xx
166 * - use for XLS2xx
167 */
168 static const char * const rmixl_irtnames_xls2xx[NIRTS] = {
169 "pic int 0 (watchdog)", /* 0 */
170 "pic int 1 (timer0)", /* 1 */
171 "pic int 2 (timer1)", /* 2 */
172 "pic int 3 (timer2)", /* 3 */
173 "pic int 4 (timer3)", /* 4 */
174 "pic int 5 (timer4)", /* 5 */
175 "pic int 6 (timer5)", /* 6 */
176 "pic int 7 (timer6)", /* 7 */
177 "pic int 8 (timer7)", /* 8 */
178 "pic int 9 (uart0)", /* 9 */
179 "pic int 10 (uart1)", /* 10 */
180 "pic int 11 (i2c0)", /* 11 */
181 "pic int 12 (i2c1)", /* 12 */
182 "pic int 13 (pcmcia)", /* 13 */
183 "pic int 14 (gpio_a)", /* 14 */
184 "pic int 15 (irq15)", /* 15 */
185 "pic int 16 (bridge_tb)", /* 16 */
186 "pic int 17 (gmac0)", /* 17 */
187 "pic int 18 (gmac1)", /* 18 */
188 "pic int 19 (gmac2)", /* 19 */
189 "pic int 20 (gmac3)", /* 20 */
190 "pic int 21 (irq21)", /* 21 */
191 "pic int 22 (irq22)", /* 22 */
192 "pic int 23 (pcie_link2)", /* 23 */
193 "pic int 24 (pcie_link3)", /* 24 */
194 "pic int 25 (bridge_err)", /* 25 */
195 "pic int 26 (pcie_link0)", /* 26 */
196 "pic int 27 (pcie_link1)", /* 27 */
197 "pic int 28 (irq28)", /* 28 */
198 "pic int 29 (pcie_err)", /* 29 */
199 "pic int 30 (gpio_b)", /* 30 */
200 "pic int 31 (usb)", /* 31 */
201 };
202
203 /*
204 * rmixl_irtnames_xls1xx
205 * - use for XLS1xx, XLS4xx-Lite
206 */
207 static const char * const rmixl_irtnames_xls1xx[NIRTS] = {
208 "pic int 0 (watchdog)", /* 0 */
209 "pic int 1 (timer0)", /* 1 */
210 "pic int 2 (timer1)", /* 2 */
211 "pic int 3 (timer2)", /* 3 */
212 "pic int 4 (timer3)", /* 4 */
213 "pic int 5 (timer4)", /* 5 */
214 "pic int 6 (timer5)", /* 6 */
215 "pic int 7 (timer6)", /* 7 */
216 "pic int 8 (timer7)", /* 8 */
217 "pic int 9 (uart0)", /* 9 */
218 "pic int 10 (uart1)", /* 10 */
219 "pic int 11 (i2c0)", /* 11 */
220 "pic int 12 (i2c1)", /* 12 */
221 "pic int 13 (pcmcia)", /* 13 */
222 "pic int 14 (gpio_a)", /* 14 */
223 "pic int 15 (irq15)", /* 15 */
224 "pic int 16 (bridge_tb)", /* 16 */
225 "pic int 17 (gmac0)", /* 17 */
226 "pic int 18 (gmac1)", /* 18 */
227 "pic int 19 (gmac2)", /* 19 */
228 "pic int 20 (gmac3)", /* 20 */
229 "pic int 21 (irq21)", /* 21 */
230 "pic int 22 (irq22)", /* 22 */
231 "pic int 23 (irq23)", /* 23 */
232 "pic int 24 (irq24)", /* 24 */
233 "pic int 25 (bridge_err)", /* 25 */
234 "pic int 26 (pcie_link0)", /* 26 */
235 "pic int 27 (pcie_link1)", /* 27 */
236 "pic int 28 (irq28)", /* 28 */
237 "pic int 29 (pcie_err)", /* 29 */
238 "pic int 30 (gpio_b)", /* 30 */
239 "pic int 31 (usb)", /* 31 */
240 };
241
242 /*
243 * rmixl_irtnames_xls4xx:
244 * - use for XLS4xx, XLS6xx
245 */
246 static const char * const rmixl_irtnames_xls4xx[NIRTS] = {
247 "pic int 0 (watchdog)", /* 0 */
248 "pic int 1 (timer0)", /* 1 */
249 "pic int 2 (timer1)", /* 2 */
250 "pic int 3 (timer2)", /* 3 */
251 "pic int 4 (timer3)", /* 4 */
252 "pic int 5 (timer4)", /* 5 */
253 "pic int 6 (timer5)", /* 6 */
254 "pic int 7 (timer6)", /* 7 */
255 "pic int 8 (timer7)", /* 8 */
256 "pic int 9 (uart0)", /* 9 */
257 "pic int 10 (uart1)", /* 10 */
258 "pic int 11 (i2c0)", /* 11 */
259 "pic int 12 (i2c1)", /* 12 */
260 "pic int 13 (pcmcia)", /* 13 */
261 "pic int 14 (gpio_a)", /* 14 */
262 "pic int 15 (irq15)", /* 15 */
263 "pic int 16 (bridge_tb)", /* 16 */
264 "pic int 17 (gmac0)", /* 17 */
265 "pic int 18 (gmac1)", /* 18 */
266 "pic int 19 (gmac2)", /* 19 */
267 "pic int 20 (gmac3)", /* 20 */
268 "pic int 21 (irq21)", /* 21 */
269 "pic int 22 (irq22)", /* 22 */
270 "pic int 23 (irq23)", /* 23 */
271 "pic int 24 (irq24)", /* 24 */
272 "pic int 25 (bridge_err)", /* 25 */
273 "pic int 26 (pcie_link0)", /* 26 */
274 "pic int 27 (pcie_link1)", /* 27 */
275 "pic int 28 (pcie_link2)", /* 28 */
276 "pic int 29 (pcie_link3)", /* 29 */
277 "pic int 30 (gpio_b)", /* 30 */
278 "pic int 31 (usb)", /* 31 */
279 };
280
281 /*
282 * rmixl_vecnames_common:
283 * - use for unknown cpu implementation
284 * - covers all vectors, not just IRT intrs
285 */
286 static const char * const rmixl_vecnames_common[NINTRVECS] = {
287 "vec 0", /* 0 */
288 "vec 1", /* 1 */
289 "vec 2", /* 2 */
290 "vec 3", /* 3 */
291 "vec 4", /* 4 */
292 "vec 5", /* 5 */
293 "vec 6", /* 6 */
294 "vec 7", /* 7 */
295 "vec 8 (ipi 0)", /* 8 */
296 "vec 9 (ipi 1)", /* 9 */
297 "vec 10 (ipi 2)", /* 10 */
298 "vec 11 (ipi 3)", /* 11 */
299 "vec 12 (ipi 4)", /* 12 */
300 "vec 13 (ipi 5)", /* 13 */
301 "vec 14 (ipi 6)", /* 14 */
302 "vec 15 (fmn)", /* 15 */
303 "vec 16", /* 16 */
304 "vec 17", /* 17 */
305 "vec 18", /* 18 */
306 "vec 19", /* 19 */
307 "vec 20", /* 20 */
308 "vec 21", /* 21 */
309 "vec 22", /* 22 */
310 "vec 23", /* 23 */
311 "vec 24", /* 24 */
312 "vec 25", /* 25 */
313 "vec 26", /* 26 */
314 "vec 27", /* 27 */
315 "vec 28", /* 28 */
316 "vec 29", /* 29 */
317 "vec 30", /* 30 */
318 "vec 31", /* 31 */
319 "vec 32", /* 32 */
320 "vec 33", /* 33 */
321 "vec 34", /* 34 */
322 "vec 35", /* 35 */
323 "vec 36", /* 36 */
324 "vec 37", /* 37 */
325 "vec 38", /* 38 */
326 "vec 39", /* 39 */
327 "vec 40", /* 40 */
328 "vec 41", /* 41 */
329 "vec 42", /* 42 */
330 "vec 43", /* 43 */
331 "vec 44", /* 44 */
332 "vec 45", /* 45 */
333 "vec 46", /* 46 */
334 "vec 47", /* 47 */
335 "vec 48", /* 48 */
336 "vec 49", /* 49 */
337 "vec 50", /* 50 */
338 "vec 51", /* 51 */
339 "vec 52", /* 52 */
340 "vec 53", /* 53 */
341 "vec 54", /* 54 */
342 "vec 55", /* 55 */
343 "vec 56", /* 56 */
344 "vec 57", /* 57 */
345 "vec 58", /* 58 */
346 "vec 59", /* 59 */
347 "vec 60", /* 60 */
348 "vec 61", /* 61 */
349 "vec 62", /* 63 */
350 "vec 63", /* 63 */
351 };
352
353 /*
354 * mask of CPUs attached
355 * once they are attached, this var is read-only so mp safe
356 */
357 static uint32_t cpu_present_mask;
358
359 kmutex_t rmixl_ipi_lock __cacheline_aligned;
360 /* covers RMIXL_PIC_IPIBASE */
361 kmutex_t rmixl_intr_lock __cacheline_aligned;
362 /* covers rest of PIC, and rmixl_intrhand[] */
363 rmixl_intrhand_t rmixl_intrhand[NINTRVECS];
364
365 #ifdef DIAGNOSTIC
366 static int rmixl_pic_init_done;
367 #endif
368
369
370 static const char *rmixl_intr_string_xlr(int);
371 static const char *rmixl_intr_string_xls(int);
372 static uint32_t rmixl_irt_thread_mask(int);
373 static void rmixl_irt_init(int);
374 static void rmixl_irt_disestablish(int);
375 static void rmixl_irt_establish(int, int, int,
376 rmixl_intr_trigger_t, rmixl_intr_polarity_t);
377
378 #ifdef MULTIPROCESSOR
379 static int rmixl_send_ipi(struct cpu_info *, int);
380 static int rmixl_ipi_intr(void *);
381 #endif
382
383 #if defined(DIAGNOSTIC) || defined(IOINTR_DEBUG) || defined(DDB)
384 int rmixl_intrhand_print_subr(int);
385 int rmixl_intrhand_print(void);
386 int rmixl_irt_print(void);
387 void rmixl_ipl_eimr_map_print(void);
388 #endif
389
390
391 static inline u_int
392 dclz(uint64_t val)
393 {
394 int nlz;
395
396 asm volatile("dclz %0, %1;"
397 : "=r"(nlz) : "r"(val));
398
399 return nlz;
400 }
401
402 void
403 evbmips_intr_init(void)
404 {
405 uint32_t r;
406
407 KASSERT(cpu_rmixlr(mips_options.mips_cpu)
408 || cpu_rmixls(mips_options.mips_cpu));
409
410
411 #ifdef DIAGNOSTIC
412 if (rmixl_pic_init_done != 0)
413 panic("%s: rmixl_pic_init_done %d",
414 __func__, rmixl_pic_init_done);
415 #endif
416
417 mutex_init(&rmixl_ipi_lock, MUTEX_DEFAULT, IPL_HIGH);
418 mutex_init(&rmixl_intr_lock, MUTEX_DEFAULT, IPL_HIGH);
419
420 mutex_enter(&rmixl_intr_lock);
421
422 /*
423 * initialize (zero) all IRT Entries in the PIC
424 */
425 for (u_int i = 0; i < NIRTS; i++) {
426 rmixl_irt_init(i);
427 }
428
429 /*
430 * disable watchdog NMI, timers
431 *
432 * XXX
433 * WATCHDOG_ENB is preserved because clearing it causes
434 * hang on the XLS616 (but not on the XLS408)
435 */
436 r = RMIXL_PICREG_READ(RMIXL_PIC_CONTROL);
437 r &= RMIXL_PIC_CONTROL_RESV|RMIXL_PIC_CONTROL_WATCHDOG_ENB;
438 RMIXL_PICREG_WRITE(RMIXL_PIC_CONTROL, r);
439
440 #ifdef DIAGNOSTIC
441 rmixl_pic_init_done = 1;
442 #endif
443 mutex_exit(&rmixl_intr_lock);
444
445 }
446
447 /*
448 * establish vector for mips3 count/compare clock interrupt
449 * this ensures we enable in EIRR,
450 * even though cpu_intr() handles the interrupt
451 * note the 'mpsafe' arg here is a placeholder only
452 */
453 void
454 rmixl_intr_init_clk(void)
455 {
456 const int vec = ffs(MIPS_INT_MASK_5 >> MIPS_INT_MASK_SHIFT) - 1;
457
458 mutex_enter(&rmixl_intr_lock);
459
460 void *ih = rmixl_vec_establish(vec, 0, IPL_SCHED, NULL, NULL, false);
461 if (ih == NULL)
462 panic("%s: establish vec %d failed", __func__, vec);
463
464 mutex_exit(&rmixl_intr_lock);
465 }
466
467 #ifdef MULTIPROCESSOR
468 /*
469 * establish IPI interrupt and send function
470 */
471 void
472 rmixl_intr_init_ipi(void)
473 {
474 mutex_enter(&rmixl_intr_lock);
475
476 for (u_int ipi = 0; ipi < NIPIS; ipi++) {
477 const u_int vec = RMIXL_INTRVEC_IPI + ipi;
478 void * const ih = rmixl_vec_establish(vec, -1, IPL_SCHED,
479 rmixl_ipi_intr, (void *)(uintptr_t)ipi, true);
480 if (ih == NULL)
481 panic("%s: establish ipi %d at vec %d failed",
482 __func__, ipi, vec);
483 }
484
485 mips_locoresw.lsw_send_ipi = rmixl_send_ipi;
486
487 mutex_exit(&rmixl_intr_lock);
488 }
489 #endif /* MULTIPROCESSOR */
490
491 /*
492 * initialize per-cpu interrupt stuff in softc
493 * accumulate per-cpu bits in 'cpu_present_mask'
494 */
495 void
496 rmixl_intr_init_cpu(struct cpu_info *ci)
497 {
498 struct rmixl_cpu_softc *sc = (void *)ci->ci_softc;
499
500 KASSERT(sc != NULL);
501
502 for (int vec=0; vec < NINTRVECS; vec++)
503 evcnt_attach_dynamic(&sc->sc_vec_evcnts[vec],
504 EVCNT_TYPE_INTR, NULL,
505 device_xname(sc->sc_dev),
506 rmixl_intr_string(vec));
507
508 KASSERT(cpu_index(ci) < (sizeof(cpu_present_mask) * 8));
509 atomic_or_32((volatile uint32_t *)&cpu_present_mask, 1 << cpu_index(ci));
510 }
511
512 /*
513 * rmixl_intr_string - return pointer to display name of a PIC-based interrupt
514 */
515 const char *
516 rmixl_intr_string(int vec)
517 {
518 int irt;
519
520 if (vec < 0 || vec >= NINTRVECS)
521 panic("%s: vec index %d out of range, max %d",
522 __func__, vec, NINTRVECS - 1);
523
524 if (! RMIXL_VECTOR_IS_IRT(vec))
525 return rmixl_vecnames_common[vec];
526
527 irt = RMIXL_VECTOR_IRT(vec);
528 switch(cpu_rmixl_chip_type(mips_options.mips_cpu)) {
529 case CIDFL_RMI_TYPE_XLR:
530 return rmixl_intr_string_xlr(irt);
531 case CIDFL_RMI_TYPE_XLS:
532 return rmixl_intr_string_xls(irt);
533 case CIDFL_RMI_TYPE_XLP:
534 panic("%s: RMI XLP not yet supported", __func__);
535 }
536
537 return "undefined"; /* appease gcc */
538 }
539
540 static const char *
541 rmixl_intr_string_xlr(int irt)
542 {
543 return rmixl_irtnames_xlrxxx[irt];
544 }
545
546 static const char *
547 rmixl_intr_string_xls(int irt)
548 {
549 const char *name;
550
551 switch (MIPS_PRID_IMPL(mips_options.mips_cpu_id)) {
552 case MIPS_XLS104:
553 case MIPS_XLS108:
554 case MIPS_XLS404LITE:
555 case MIPS_XLS408LITE:
556 name = rmixl_irtnames_xls1xx[irt];
557 break;
558 case MIPS_XLS204:
559 case MIPS_XLS208:
560 name = rmixl_irtnames_xls2xx[irt];
561 break;
562 case MIPS_XLS404:
563 case MIPS_XLS408:
564 case MIPS_XLS416:
565 case MIPS_XLS608:
566 case MIPS_XLS616:
567 name = rmixl_irtnames_xls4xx[irt];
568 break;
569 default:
570 name = rmixl_vecnames_common[RMIXL_IRT_VECTOR(irt)];
571 break;
572 }
573
574 return name;
575 }
576
577 /*
578 * rmixl_irt_thread_mask
579 *
580 * given a bitmask of cpus, return a, IRT thread mask
581 */
582 static uint32_t
583 rmixl_irt_thread_mask(int cpumask)
584 {
585 uint32_t irtc0;
586
587 #if defined(MULTIPROCESSOR)
588 #ifndef NOTYET
589 if (cpumask == -1)
590 return 1; /* XXX TMP FIXME */
591 #endif
592
593 /*
594 * discount cpus not present
595 */
596 cpumask &= cpu_present_mask;
597
598 switch (MIPS_PRID_IMPL(mips_options.mips_cpu_id)) {
599 case MIPS_XLS104:
600 case MIPS_XLS204:
601 case MIPS_XLS404:
602 case MIPS_XLS404LITE:
603 irtc0 = ((cpumask >> 2) << 4) | (cpumask & __BITS(1,0));
604 irtc0 &= (__BITS(5,4) | __BITS(1,0));
605 break;
606 case MIPS_XLS108:
607 case MIPS_XLS208:
608 case MIPS_XLS408:
609 case MIPS_XLS408LITE:
610 case MIPS_XLS608:
611 irtc0 = cpumask & __BITS(7,0);
612 break;
613 case MIPS_XLS416:
614 case MIPS_XLS616:
615 irtc0 = cpumask & __BITS(15,0);
616 break;
617 default:
618 panic("%s: unknown cpu ID %#x\n", __func__,
619 mips_options.mips_cpu_id);
620 }
621 #else
622 irtc0 = 1;
623 #endif /* MULTIPROCESSOR */
624
625 return irtc0;
626 }
627
628 /*
629 * rmixl_irt_init
630 * - initialize IRT Entry for given index
631 * - unmask Thread#0 in low word (assume we only have 1 thread)
632 */
633 static void
634 rmixl_irt_init(int irt)
635 {
636 KASSERT(irt < NIRTS);
637 RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC1(irt), 0); /* high word */
638 RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC0(irt), 0); /* low word */
639 }
640
641 /*
642 * rmixl_irt_disestablish
643 * - invalidate IRT Entry for given index
644 */
645 static void
646 rmixl_irt_disestablish(int irt)
647 {
648 KASSERT(mutex_owned(&rmixl_intr_lock));
649 DPRINTF(("%s: irt %d, irtc1 %#x\n", __func__, irt, 0));
650 rmixl_irt_init(irt);
651 }
652
653 /*
654 * rmixl_irt_establish
655 * - construct an IRT Entry for irt and write to PIC
656 */
657 static void
658 rmixl_irt_establish(int irt, int vec, int cpumask, rmixl_intr_trigger_t trigger,
659 rmixl_intr_polarity_t polarity)
660 {
661 uint32_t irtc1;
662 uint32_t irtc0;
663
664 KASSERT(mutex_owned(&rmixl_intr_lock));
665
666 if (irt >= NIRTS)
667 panic("%s: bad irt %d\n", __func__, irt);
668
669 if (! RMIXL_VECTOR_IS_IRT(vec))
670 panic("%s: bad vec %d\n", __func__, vec);
671
672 switch (trigger) {
673 case RMIXL_TRIG_EDGE:
674 case RMIXL_TRIG_LEVEL:
675 break;
676 default:
677 panic("%s: bad trigger %d\n", __func__, trigger);
678 }
679
680 switch (polarity) {
681 case RMIXL_POLR_RISING:
682 case RMIXL_POLR_HIGH:
683 case RMIXL_POLR_FALLING:
684 case RMIXL_POLR_LOW:
685 break;
686 default:
687 panic("%s: bad polarity %d\n", __func__, polarity);
688 }
689
690 /*
691 * XXX IRT entries are not shared
692 */
693 KASSERT(RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC0(irt)) == 0);
694 KASSERT(RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC1(irt)) == 0);
695
696 irtc0 = rmixl_irt_thread_mask(cpumask);
697
698 irtc1 = RMIXL_PIC_IRTENTRYC1_VALID;
699 irtc1 |= RMIXL_PIC_IRTENTRYC1_GL; /* local */
700 KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
701
702 if (trigger == RMIXL_TRIG_LEVEL)
703 irtc1 |= RMIXL_PIC_IRTENTRYC1_TRG;
704 KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
705
706 if ((polarity == RMIXL_POLR_FALLING) || (polarity == RMIXL_POLR_LOW))
707 irtc1 |= RMIXL_PIC_IRTENTRYC1_P;
708 KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
709
710 irtc1 |= vec; /* vector in EIRR */
711 KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
712
713 /*
714 * write IRT Entry to PIC
715 */
716 DPRINTF(("%s: vec %d (%#x), irt %d, irtc0 %#x, irtc1 %#x\n",
717 __func__, vec, vec, irt, irtc0, irtc1));
718 RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC0(irt), irtc0); /* low word */
719 RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC1(irt), irtc1); /* high word */
720 }
721
722 void *
723 rmixl_vec_establish(int vec, int cpumask, int ipl,
724 int (*func)(void *), void *arg, bool mpsafe)
725 {
726 rmixl_intrhand_t *ih;
727 uint64_t eimr_bit;
728 int s;
729
730 KASSERT(mutex_owned(&rmixl_intr_lock));
731
732 DPRINTF(("%s: vec %d cpumask %#x ipl %d func %p arg %p mpsafe %d\n",
733 __func__, vec, cpumask, ipl, func, arg, mpsafe));
734 #ifdef DIAGNOSTIC
735 if (rmixl_pic_init_done == 0)
736 panic("%s: called before evbmips_intr_init", __func__);
737 #endif
738
739 /*
740 * check args
741 */
742 if (vec < 0 || vec >= NINTRVECS)
743 panic("%s: vec %d out of range, max %d",
744 __func__, vec, NINTRVECS - 1);
745 if (ipl <= 0 || ipl >= _IPL_N)
746 panic("%s: ipl %d out of range, min %d, max %d",
747 __func__, ipl, 1, _IPL_N - 1);
748
749 s = splhigh();
750
751 ih = &rmixl_intrhand[vec];
752 if (ih->ih_func != NULL) {
753 #ifdef DIAGNOSTIC
754 printf("%s: intrhand[%d] busy\n", __func__, vec);
755 #endif
756 splx(s);
757 return NULL;
758 }
759
760 ih->ih_arg = arg;
761 ih->ih_mpsafe = mpsafe;
762 ih->ih_vec = vec;
763 ih->ih_ipl = ipl;
764 ih->ih_cpumask = cpumask;
765
766 eimr_bit = (uint64_t)1 << vec;
767 for (int i=ih->ih_ipl; --i >= 0; ) {
768 KASSERT((ipl_eimr_map[i] & eimr_bit) == 0);
769 ipl_eimr_map[i] |= eimr_bit;
770 }
771
772 ih->ih_func = func; /* do this last */
773
774 splx(s);
775
776 return ih;
777 }
778
779 /*
780 * rmixl_intr_establish
781 * - used to establish an IRT-based interrupt only
782 */
783 void *
784 rmixl_intr_establish(int irt, int cpumask, int ipl,
785 rmixl_intr_trigger_t trigger, rmixl_intr_polarity_t polarity,
786 int (*func)(void *), void *arg, bool mpsafe)
787 {
788 rmixl_intrhand_t *ih;
789 int vec;
790
791 #ifdef DIAGNOSTIC
792 if (rmixl_pic_init_done == 0)
793 panic("%s: called before rmixl_pic_init_done", __func__);
794 #endif
795
796 /*
797 * check args
798 */
799 if (irt < 0 || irt >= NIRTS)
800 panic("%s: irt %d out of range, max %d",
801 __func__, irt, NIRTS - 1);
802 if (ipl <= 0 || ipl >= _IPL_N)
803 panic("%s: ipl %d out of range, min %d, max %d",
804 __func__, ipl, 1, _IPL_N - 1);
805
806 vec = RMIXL_IRT_VECTOR(irt);
807
808 DPRINTF(("%s: irt %d, vec %d, ipl %d\n", __func__, irt, vec, ipl));
809
810 mutex_enter(&rmixl_intr_lock);
811
812 /*
813 * establish vector
814 */
815 ih = rmixl_vec_establish(vec, cpumask, ipl, func, arg, mpsafe);
816
817 /*
818 * establish IRT Entry
819 */
820 rmixl_irt_establish(irt, vec, cpumask, trigger, polarity);
821
822 mutex_exit(&rmixl_intr_lock);
823
824 return ih;
825 }
826
827 void
828 rmixl_vec_disestablish(void *cookie)
829 {
830 rmixl_intrhand_t *ih = cookie;
831 uint64_t eimr_bit;
832
833 KASSERT(mutex_owned(&rmixl_intr_lock));
834 KASSERT(ih->ih_vec < NINTRVECS);
835 KASSERT(ih == &rmixl_intrhand[ih->ih_vec]);
836
837 ih->ih_func = NULL; /* do this first */
838
839 eimr_bit = (uint64_t)1 << ih->ih_vec;
840 for (int i=ih->ih_ipl; --i >= 0; ) {
841 KASSERT((ipl_eimr_map[i] & eimr_bit) != 0);
842 ipl_eimr_map[i] ^= eimr_bit;
843 }
844 }
845
846 void
847 rmixl_intr_disestablish(void *cookie)
848 {
849 rmixl_intrhand_t *ih = cookie;
850 const int vec = ih->ih_vec;
851
852 KASSERT(vec < NINTRVECS);
853 KASSERT(ih == &rmixl_intrhand[vec]);
854
855 mutex_enter(&rmixl_intr_lock);
856
857 /*
858 * disable/invalidate the IRT Entry if needed
859 */
860 if (RMIXL_VECTOR_IS_IRT(vec))
861 rmixl_irt_disestablish(vec);
862
863 /*
864 * disasociate from vector and free the handle
865 */
866 rmixl_vec_disestablish(cookie);
867
868 mutex_exit(&rmixl_intr_lock);
869 }
870
871 void
872 evbmips_iointr(int ipl, vaddr_t pc, uint32_t pending)
873 {
874 struct rmixl_cpu_softc *sc = (void *)curcpu()->ci_softc;
875
876 DPRINTF(("%s: cpu%u: ipl %d, pc %#"PRIxVADDR", pending %#x\n",
877 __func__, cpu_number(), ipl, pc, pending));
878
879 /*
880 * 'pending' arg is a summary that there is something to do
881 * the real pending status is obtained from EIRR
882 */
883 KASSERT(pending == MIPS_INT_MASK_1);
884
885 for (;;) {
886 rmixl_intrhand_t *ih;
887 uint64_t eirr;
888 uint64_t eimr;
889 uint64_t vecbit;
890 int vec;
891
892 asm volatile("dmfc0 %0, $9, 6;" : "=r"(eirr));
893 asm volatile("dmfc0 %0, $9, 7;" : "=r"(eimr));
894
895 #ifdef IOINTR_DEBUG
896 printf("%s: cpu%u: eirr %#"PRIx64", eimr %#"PRIx64", mask %#"PRIx64"\n",
897 __func__, cpu_number(), eirr, eimr, ipl_eimr_map[ipl-1]);
898 #endif /* IOINTR_DEBUG */
899
900 /*
901 * reduce eirr to
902 * - ints that are enabled at or below this ipl
903 * - exclude count/compare clock and soft ints
904 * they are handled elsewhere
905 */
906 eirr &= ipl_eimr_map[ipl-1];
907 eirr &= ~ipl_eimr_map[ipl];
908 eirr &= ~((MIPS_INT_MASK_5 | MIPS_SOFT_INT_MASK) >> 8);
909 if (eirr == 0)
910 break;
911
912 vec = 63 - dclz(eirr);
913 ih = &rmixl_intrhand[vec];
914 vecbit = 1ULL << vec;
915 KASSERT (ih->ih_ipl == ipl);
916 KASSERT ((vecbit & eimr) == 0);
917 KASSERT ((vecbit & RMIXL_EIRR_PRESERVE_MASK) == 0);
918
919 /*
920 * ack in EIRR, and in PIC if needed,
921 * the irq we are about to handle
922 */
923 rmixl_eirr_ack(eimr, vecbit, RMIXL_EIRR_PRESERVE_MASK);
924 if (RMIXL_VECTOR_IS_IRT(vec))
925 RMIXL_PICREG_WRITE(RMIXL_PIC_INTRACK,
926 1 << RMIXL_VECTOR_IRT(vec));
927
928 if (ih->ih_func != NULL) {
929 #ifdef MULTIPROCESSOR
930 if (ih->ih_mpsafe) {
931 (void)(*ih->ih_func)(ih->ih_arg);
932 } else {
933 KASSERTMSG(ipl == IPL_VM,
934 ("%s: %s: ipl (%d) != IPL_VM for KERNEL_LOCK",
935 __func__, sc->sc_vec_evcnts[vec].ev_name,
936 ipl));
937 KERNEL_LOCK(1, NULL);
938 (void)(*ih->ih_func)(ih->ih_arg);
939 KERNEL_UNLOCK_ONE(NULL);
940 }
941 #else
942 (void)(*ih->ih_func)(ih->ih_arg);
943 #endif /* MULTIPROCESSOR */
944 }
945 KASSERT(ipl == ih->ih_ipl);
946 KASSERTMSG(curcpu()->ci_cpl >= ipl,
947 ("%s: after %s: cpl (%d) < ipl %d",
948 __func__, sc->sc_vec_evcnts[vec].ev_name,
949 ipl, curcpu()->ci_cpl));
950 sc->sc_vec_evcnts[vec].ev_count++;
951 }
952 }
953
954 #ifdef MULTIPROCESSOR
955 static int
956 rmixl_send_ipi(struct cpu_info *ci, int tag)
957 {
958 const cpuid_t cpuid = ci->ci_cpuid;
959 uint32_t core = (uint32_t)(cpuid >> 2);
960 uint32_t thread = (uint32_t)(cpuid & __BITS(1,0));
961 uint64_t req = 1 << tag;
962 uint32_t r;
963
964 if (! CPUSET_HAS_P(cpus_running, cpu_index(ci)))
965 return -1;
966
967 KASSERT((tag >= 0) && (tag < NIPIS));
968
969 r = (thread << RMIXL_PIC_IPIBASE_ID_THREAD_SHIFT)
970 | (core << RMIXL_PIC_IPIBASE_ID_CORE_SHIFT)
971 | (RMIXL_INTRVEC_IPI + tag);
972
973 mutex_enter(&rmixl_ipi_lock);
974 atomic_or_64(&ci->ci_request_ipis, req);
975 RMIXL_PICREG_WRITE(RMIXL_PIC_IPIBASE, r);
976 mutex_exit(&rmixl_ipi_lock);
977
978 return 0;
979 }
980
981 static int
982 rmixl_ipi_intr(void *arg)
983 {
984 struct cpu_info * const ci = curcpu();
985 const uint64_t ipi_mask = 1 << (uintptr_t)arg;
986
987 KASSERT(ci->ci_cpl >= IPL_SCHED);
988 KASSERT((uintptr_t)arg < NIPIS);
989
990 /* if the request is clear, it was previously processed */
991 if ((ci->ci_request_ipis & ipi_mask) == 0)
992 return 0;
993
994 atomic_or_64(&ci->ci_active_ipis, ipi_mask);
995 atomic_and_64(&ci->ci_request_ipis, ~ipi_mask);
996
997 ipi_process(ci, ipi_mask);
998
999 atomic_and_64(&ci->ci_active_ipis, ~ipi_mask);
1000
1001 return 1;
1002 }
1003 #endif /* MULTIPROCESSOR */
1004
1005 #if defined(DIAGNOSTIC) || defined(IOINTR_DEBUG) || defined(DDB)
1006 int
1007 rmixl_intrhand_print_subr(int vec)
1008 {
1009 rmixl_intrhand_t *ih = &rmixl_intrhand[vec];
1010 printf("vec %d: func %p, arg %p, vec %d, ipl %d, mask %#x\n",
1011 vec, ih->ih_func, ih->ih_arg, ih->ih_vec, ih->ih_ipl,
1012 ih->ih_cpumask);
1013 return 0;
1014 }
1015 int
1016 rmixl_intrhand_print(void)
1017 {
1018 for (int vec=0; vec < NINTRVECS ; vec++)
1019 rmixl_intrhand_print_subr(vec);
1020 return 0;
1021 }
1022
1023 static inline void
1024 rmixl_irt_entry_print(u_int irt)
1025 {
1026 uint32_t c0, c1;
1027
1028 if ((irt < 0) || (irt > NIRTS))
1029 return;
1030 c0 = RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC0(irt));
1031 c1 = RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC1(irt));
1032 printf("irt[%d]: %#x, %#x\n", irt, c0, c1);
1033 }
1034
1035 int
1036 rmixl_irt_print(void)
1037 {
1038 printf("%s:\n", __func__);
1039 for (int irt=0; irt < NIRTS ; irt++)
1040 rmixl_irt_entry_print(irt);
1041 return 0;
1042 }
1043
1044 void
1045 rmixl_ipl_eimr_map_print(void)
1046 {
1047 printf("IPL_NONE=%d, mask %#"PRIx64"\n",
1048 IPL_NONE, ipl_eimr_map[IPL_NONE]);
1049 printf("IPL_SOFTCLOCK=%d, mask %#"PRIx64"\n",
1050 IPL_SOFTCLOCK, ipl_eimr_map[IPL_SOFTCLOCK]);
1051 printf("IPL_SOFTNET=%d, mask %#"PRIx64"\n",
1052 IPL_SOFTNET, ipl_eimr_map[IPL_SOFTNET]);
1053 printf("IPL_VM=%d, mask %#"PRIx64"\n",
1054 IPL_VM, ipl_eimr_map[IPL_VM]);
1055 printf("IPL_SCHED=%d, mask %#"PRIx64"\n",
1056 IPL_SCHED, ipl_eimr_map[IPL_SCHED]);
1057 printf("IPL_DDB=%d, mask %#"PRIx64"\n",
1058 IPL_DDB, ipl_eimr_map[IPL_DDB]);
1059 printf("IPL_HIGH=%d, mask %#"PRIx64"\n",
1060 IPL_HIGH, ipl_eimr_map[IPL_HIGH]);
1061 }
1062
1063 #endif
1064