rmixl_intr.c revision 1.1.2.28 1 /* $NetBSD: rmixl_intr.c,v 1.1.2.28 2011/02/08 21:27:15 cliff Exp $ */
2
3 /*-
4 * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or
8 * without modification, are permitted provided that the following
9 * conditions are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following
14 * disclaimer in the documentation and/or other materials provided
15 * with the distribution.
16 * 3. The names of the authors may not be used to endorse or promote
17 * products derived from this software without specific prior
18 * written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY
21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
23 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
25 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
27 * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
29 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
31 * OF SUCH DAMAGE.
32 */
33 /*-
34 * Copyright (c) 2001 The NetBSD Foundation, Inc.
35 * All rights reserved.
36 *
37 * This code is derived from software contributed to The NetBSD Foundation
38 * by Jason R. Thorpe.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
50 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
51 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
52 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
53 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
54 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
55 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
56 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
57 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
58 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
60 */
61
62 /*
63 * Platform-specific interrupt support for the RMI XLP, XLR, XLS
64 */
65
66 #include <sys/cdefs.h>
67 __KERNEL_RCSID(0, "$NetBSD: rmixl_intr.c,v 1.1.2.28 2011/02/08 21:27:15 cliff Exp $");
68
69 #include "opt_multiprocessor.h"
70 #include "opt_ddb.h"
71 #define __INTR_PRIVATE
72
73 #include <sys/param.h>
74 #include <sys/queue.h>
75 #include <sys/malloc.h>
76 #include <sys/systm.h>
77 #include <sys/device.h>
78 #include <sys/kernel.h>
79 #include <sys/atomic.h>
80 #include <sys/mutex.h>
81 #include <sys/cpu.h>
82
83 #include <machine/bus.h>
84 #include <machine/intr.h>
85
86 #include <mips/cpu.h>
87 #include <mips/locore.h>
88
89 #include <mips/rmi/rmixlreg.h>
90 #include <mips/rmi/rmixlvar.h>
91
92 #include <mips/rmi/rmixl_cpuvar.h>
93 #include <mips/rmi/rmixl_intr.h>
94
95 #include <dev/pci/pcireg.h>
96 #include <dev/pci/pcivar.h>
97
98 #ifdef IOINTR_DEBUG
99 int iointr_debug = IOINTR_DEBUG;
100 # define DPRINTF(x) do { if (iointr_debug) printf x ; } while(0)
101 #else
102 # define DPRINTF(x)
103 #endif
104
105 #define RMIXL_PICREG_READ(off) \
106 RMIXL_IOREG_READ(RMIXL_IO_DEV_PIC + (off))
107 #define RMIXL_PICREG_WRITE(off, val) \
108 RMIXL_IOREG_WRITE(RMIXL_IO_DEV_PIC + (off), (val))
109
110 /*
111 * do not clear these when acking EIRR
112 * (otherwise they get lost)
113 */
114 #define RMIXL_EIRR_PRESERVE_MASK \
115 ((MIPS_INT_MASK_5|MIPS_SOFT_INT_MASK) >> 8)
116
117 /*
118 * IRT assignments depends on the RMI chip family
119 * (XLS1xx vs. XLS2xx vs. XLS3xx vs. XLS6xx)
120 * use the right display string table for the CPU that's running.
121 */
122
123 /*
124 * rmixl_irtnames_xlrxxx
125 * - use for XLRxxx
126 */
127 static const char * const rmixl_irtnames_xlrxxx[NIRTS] = {
128 "pic int 0 (watchdog)", /* 0 */
129 "pic int 1 (timer0)", /* 1 */
130 "pic int 2 (timer1)", /* 2 */
131 "pic int 3 (timer2)", /* 3 */
132 "pic int 4 (timer3)", /* 4 */
133 "pic int 5 (timer4)", /* 5 */
134 "pic int 6 (timer5)", /* 6 */
135 "pic int 7 (timer6)", /* 7 */
136 "pic int 8 (timer7)", /* 8 */
137 "pic int 9 (uart0)", /* 9 */
138 "pic int 10 (uart1)", /* 10 */
139 "pic int 11 (i2c0)", /* 11 */
140 "pic int 12 (i2c1)", /* 12 */
141 "pic int 13 (pcmcia)", /* 13 */
142 "pic int 14 (gpio)", /* 14 */
143 "pic int 15 (hyper)", /* 15 */
144 "pic int 16 (pcix)", /* 16 */
145 "pic int 17 (gmac0)", /* 17 */
146 "pic int 18 (gmac1)", /* 18 */
147 "pic int 19 (gmac2)", /* 19 */
148 "pic int 20 (gmac3)", /* 20 */
149 "pic int 21 (xgs0)", /* 21 */
150 "pic int 22 (xgs1)", /* 22 */
151 "pic int 23 (irq23)", /* 23 */
152 "pic int 24 (hyper_fatal)", /* 24 */
153 "pic int 25 (bridge_aerr)", /* 25 */
154 "pic int 26 (bridge_berr)", /* 26 */
155 "pic int 27 (bridge_tb)", /* 27 */
156 "pic int 28 (bridge_nmi)", /* 28 */
157 "pic int 29 (bridge_sram_derr)",/* 29 */
158 "pic int 30 (gpio_fatal)", /* 30 */
159 "pic int 31 (reserved)", /* 31 */
160 };
161
162 /*
163 * rmixl_irtnames_xls2xx
164 * - use for XLS2xx
165 */
166 static const char * const rmixl_irtnames_xls2xx[NIRTS] = {
167 "pic int 0 (watchdog)", /* 0 */
168 "pic int 1 (timer0)", /* 1 */
169 "pic int 2 (timer1)", /* 2 */
170 "pic int 3 (timer2)", /* 3 */
171 "pic int 4 (timer3)", /* 4 */
172 "pic int 5 (timer4)", /* 5 */
173 "pic int 6 (timer5)", /* 6 */
174 "pic int 7 (timer6)", /* 7 */
175 "pic int 8 (timer7)", /* 8 */
176 "pic int 9 (uart0)", /* 9 */
177 "pic int 10 (uart1)", /* 10 */
178 "pic int 11 (i2c0)", /* 11 */
179 "pic int 12 (i2c1)", /* 12 */
180 "pic int 13 (pcmcia)", /* 13 */
181 "pic int 14 (gpio_a)", /* 14 */
182 "pic int 15 (irq15)", /* 15 */
183 "pic int 16 (bridge_tb)", /* 16 */
184 "pic int 17 (gmac0)", /* 17 */
185 "pic int 18 (gmac1)", /* 18 */
186 "pic int 19 (gmac2)", /* 19 */
187 "pic int 20 (gmac3)", /* 20 */
188 "pic int 21 (irq21)", /* 21 */
189 "pic int 22 (irq22)", /* 22 */
190 "pic int 23 (pcie_link2)", /* 23 */
191 "pic int 24 (pcie_link3)", /* 24 */
192 "pic int 25 (bridge_err)", /* 25 */
193 "pic int 26 (pcie_link0)", /* 26 */
194 "pic int 27 (pcie_link1)", /* 27 */
195 "pic int 28 (irq28)", /* 28 */
196 "pic int 29 (pcie_err)", /* 29 */
197 "pic int 30 (gpio_b)", /* 30 */
198 "pic int 31 (usb)", /* 31 */
199 };
200
201 /*
202 * rmixl_irtnames_xls1xx
203 * - use for XLS1xx, XLS4xx-Lite
204 */
205 static const char * const rmixl_irtnames_xls1xx[NIRTS] = {
206 "pic int 0 (watchdog)", /* 0 */
207 "pic int 1 (timer0)", /* 1 */
208 "pic int 2 (timer1)", /* 2 */
209 "pic int 3 (timer2)", /* 3 */
210 "pic int 4 (timer3)", /* 4 */
211 "pic int 5 (timer4)", /* 5 */
212 "pic int 6 (timer5)", /* 6 */
213 "pic int 7 (timer6)", /* 7 */
214 "pic int 8 (timer7)", /* 8 */
215 "pic int 9 (uart0)", /* 9 */
216 "pic int 10 (uart1)", /* 10 */
217 "pic int 11 (i2c0)", /* 11 */
218 "pic int 12 (i2c1)", /* 12 */
219 "pic int 13 (pcmcia)", /* 13 */
220 "pic int 14 (gpio_a)", /* 14 */
221 "pic int 15 (irq15)", /* 15 */
222 "pic int 16 (bridge_tb)", /* 16 */
223 "pic int 17 (gmac0)", /* 17 */
224 "pic int 18 (gmac1)", /* 18 */
225 "pic int 19 (gmac2)", /* 19 */
226 "pic int 20 (gmac3)", /* 20 */
227 "pic int 21 (irq21)", /* 21 */
228 "pic int 22 (irq22)", /* 22 */
229 "pic int 23 (irq23)", /* 23 */
230 "pic int 24 (irq24)", /* 24 */
231 "pic int 25 (bridge_err)", /* 25 */
232 "pic int 26 (pcie_link0)", /* 26 */
233 "pic int 27 (pcie_link1)", /* 27 */
234 "pic int 28 (irq28)", /* 28 */
235 "pic int 29 (pcie_err)", /* 29 */
236 "pic int 30 (gpio_b)", /* 30 */
237 "pic int 31 (usb)", /* 31 */
238 };
239
240 /*
241 * rmixl_irtnames_xls4xx:
242 * - use for XLS4xx, XLS6xx
243 */
244 static const char * const rmixl_irtnames_xls4xx[NIRTS] = {
245 "pic int 0 (watchdog)", /* 0 */
246 "pic int 1 (timer0)", /* 1 */
247 "pic int 2 (timer1)", /* 2 */
248 "pic int 3 (timer2)", /* 3 */
249 "pic int 4 (timer3)", /* 4 */
250 "pic int 5 (timer4)", /* 5 */
251 "pic int 6 (timer5)", /* 6 */
252 "pic int 7 (timer6)", /* 7 */
253 "pic int 8 (timer7)", /* 8 */
254 "pic int 9 (uart0)", /* 9 */
255 "pic int 10 (uart1)", /* 10 */
256 "pic int 11 (i2c0)", /* 11 */
257 "pic int 12 (i2c1)", /* 12 */
258 "pic int 13 (pcmcia)", /* 13 */
259 "pic int 14 (gpio_a)", /* 14 */
260 "pic int 15 (irq15)", /* 15 */
261 "pic int 16 (bridge_tb)", /* 16 */
262 "pic int 17 (gmac0)", /* 17 */
263 "pic int 18 (gmac1)", /* 18 */
264 "pic int 19 (gmac2)", /* 19 */
265 "pic int 20 (gmac3)", /* 20 */
266 "pic int 21 (irq21)", /* 21 */
267 "pic int 22 (irq22)", /* 22 */
268 "pic int 23 (irq23)", /* 23 */
269 "pic int 24 (irq24)", /* 24 */
270 "pic int 25 (bridge_err)", /* 25 */
271 "pic int 26 (pcie_link0)", /* 26 */
272 "pic int 27 (pcie_link1)", /* 27 */
273 "pic int 28 (pcie_link2)", /* 28 */
274 "pic int 29 (pcie_link3)", /* 29 */
275 "pic int 30 (gpio_b)", /* 30 */
276 "pic int 31 (usb)", /* 31 */
277 };
278
279 /*
280 * rmixl_vecnames_common:
281 * - use for unknown cpu implementation
282 * - covers all vectors, not just IRT intrs
283 */
284 static const char * const rmixl_vecnames_common[NINTRVECS] = {
285 "vec 0", /* 0 */
286 "vec 1", /* 1 */
287 "vec 2", /* 2 */
288 "vec 3", /* 3 */
289 "vec 4", /* 4 */
290 "vec 5", /* 5 */
291 "vec 6", /* 6 */
292 "vec 7", /* 7 */
293 "vec 8 (ipi)", /* 8 */
294 "vec 9 (ipi)", /* 9 */
295 "vec 10 (ipi)", /* 10 */
296 "vec 11 (ipi)", /* 11 */
297 "vec 12 (ipi)", /* 12 */
298 "vec 13 (ipi)", /* 13 */
299 "vec 14 (ipi)", /* 14 */
300 "vec 15 (ipi)", /* 15 */
301 "vec 16 (fmn)", /* 16 */
302 "vec 17", /* 17 */
303 "vec 18", /* 18 */
304 "vec 19", /* 19 */
305 "vec 20", /* 20 */
306 "vec 21", /* 21 */
307 "vec 22", /* 22 */
308 "vec 23", /* 23 */
309 "vec 24", /* 24 */
310 "vec 25", /* 25 */
311 "vec 26", /* 26 */
312 "vec 27", /* 27 */
313 "vec 28", /* 28 */
314 "vec 29", /* 29 */
315 "vec 30", /* 30 */
316 "vec 31", /* 31 */
317 "vec 32", /* 32 */
318 "vec 33", /* 33 */
319 "vec 34", /* 34 */
320 "vec 35", /* 35 */
321 "vec 36", /* 36 */
322 "vec 37", /* 37 */
323 "vec 38", /* 38 */
324 "vec 39", /* 39 */
325 "vec 40", /* 40 */
326 "vec 41", /* 41 */
327 "vec 42", /* 42 */
328 "vec 43", /* 43 */
329 "vec 44", /* 44 */
330 "vec 45", /* 45 */
331 "vec 46", /* 46 */
332 "vec 47", /* 47 */
333 "vec 48", /* 48 */
334 "vec 49", /* 49 */
335 "vec 50", /* 50 */
336 "vec 51", /* 51 */
337 "vec 52", /* 52 */
338 "vec 53", /* 53 */
339 "vec 54", /* 54 */
340 "vec 55", /* 55 */
341 "vec 56", /* 56 */
342 "vec 57", /* 57 */
343 "vec 58", /* 58 */
344 "vec 59", /* 59 */
345 "vec 60", /* 60 */
346 "vec 61", /* 61 */
347 "vec 62", /* 63 */
348 "vec 63", /* 63 */
349 };
350
351 /*
352 * mask of CPUs attached
353 * while CPUs are attaching, we cast to volatile;
354 * once they are attached, it's read-only so mp safe
355 */
356 static uint32_t cpu_present_mask;
357
358 static kmutex_t *rmixl_ipi_lock; /* covers RMIXL_PIC_IPIBASE */
359 static kmutex_t *rmixl_intr_lock; /* covers rest of PIC, and rmixl_intrhand[] */
360 static rmixl_intrhand_t rmixl_intrhand[NINTRVECS];
361
362 #ifdef DIAGNOSTIC
363 static int rmixl_pic_init_done;
364 #endif
365
366
367 static const char *rmixl_intr_string_xlr(int);
368 static const char *rmixl_intr_string_xls(int);
369 static uint32_t rmixl_irt_thread_mask(int);
370 static void rmixl_irt_init(int);
371 static void rmixl_irt_disestablish(int);
372 static void rmixl_irt_establish(int, int, int,
373 rmixl_intr_trigger_t, rmixl_intr_polarity_t);
374
375 #ifdef MULTIPROCESSOR
376 static int rmixl_send_ipi(struct cpu_info *, int);
377 static int rmixl_ipi_intr(void *);
378 #endif
379
380 #if defined(DIAGNOSTIC) || defined(IOINTR_DEBUG) || defined(DDB)
381 int rmixl_intrhand_print_subr(int);
382 int rmixl_intrhand_print(void);
383 int rmixl_irt_print(void);
384 void rmixl_ipl_eimr_map_print(void);
385 #endif
386
387
388 static inline u_int
389 dclz(uint64_t val)
390 {
391 int nlz;
392
393 asm volatile("dclz %0, %1;"
394 : "=r"(nlz) : "r"(val));
395
396 return nlz;
397 }
398
399 void
400 evbmips_intr_init(void)
401 {
402 uint32_t r;
403
404 KASSERT(cpu_rmixlr(mips_options.mips_cpu)
405 || cpu_rmixls(mips_options.mips_cpu));
406
407
408 #ifdef DIAGNOSTIC
409 if (rmixl_pic_init_done != 0)
410 panic("%s: rmixl_pic_init_done %d",
411 __func__, rmixl_pic_init_done);
412 #endif
413
414 rmixl_ipi_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_HIGH);
415 rmixl_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_HIGH);
416
417 mutex_enter(rmixl_intr_lock);
418
419 /*
420 * initialize (zero) all IRT Entries in the PIC
421 */
422 for (int i=0; i < NIRTS; i++)
423 rmixl_irt_init(i);
424
425 /*
426 * disable watchdog NMI, timers
427 *
428 * XXX
429 * WATCHDOG_ENB is preserved because clearing it causes
430 * hang on the XLS616 (but not on the XLS408)
431 */
432 r = RMIXL_PICREG_READ(RMIXL_PIC_CONTROL);
433 r &= RMIXL_PIC_CONTROL_RESV|RMIXL_PIC_CONTROL_WATCHDOG_ENB;
434 RMIXL_PICREG_WRITE(RMIXL_PIC_CONTROL, r);
435
436 #ifdef DIAGNOSTIC
437 rmixl_pic_init_done = 1;
438 #endif
439 mutex_exit(rmixl_intr_lock);
440
441 }
442
443 /*
444 * establish vector for mips3 count/compare clock interrupt
445 * this ensures we enable in EIRR,
446 * even though cpu_intr() handles the interrupt
447 * note the 'mpsafe' arg here is a placeholder only
448 */
449 void
450 rmixl_intr_init_clk(void)
451 {
452 int vec = ffs(MIPS_INT_MASK_5 >> 8) - 1;
453
454 mutex_enter(rmixl_intr_lock);
455
456 void *ih = rmixl_vec_establish(vec, 0, IPL_SCHED, NULL, NULL, false);
457 if (ih == NULL)
458 panic("%s: establish vec %d failed", __func__, vec);
459
460 mutex_exit(rmixl_intr_lock);
461
462 }
463
464 #ifdef MULTIPROCESSOR
465 /*
466 * establish IPI interrupt and send function
467 */
468 void
469 rmixl_intr_init_ipi(void)
470 {
471 u_int ipi, vec;
472 void *ih;
473
474 mutex_enter(rmixl_intr_lock);
475
476 for (ipi=0; ipi < NIPIS; ipi++) {
477 vec = RMIXL_INTRVEC_IPI + ipi;
478 ih = rmixl_vec_establish(vec, -1, IPL_SCHED,
479 rmixl_ipi_intr, (void *)(uintptr_t)ipi, true);
480 if (ih == NULL)
481 panic("%s: establish ipi %d at vec %d failed",
482 __func__, ipi, vec);
483 }
484
485 mips_locoresw.lsw_send_ipi = rmixl_send_ipi;
486
487 mutex_exit(rmixl_intr_lock);
488
489 }
490 #endif /* MULTIPROCESSOR */
491
492 /*
493 * initialize per-cpu interrupt stuff in softc
494 * accumulate per-cpu bits in 'cpu_present_mask'
495 */
496 void
497 rmixl_intr_init_cpu(struct cpu_info *ci)
498 {
499 struct rmixl_cpu_softc *sc = (void *)ci->ci_softc;
500
501 KASSERT(sc != NULL);
502
503 for (int vec=0; vec < NINTRVECS; vec++)
504 evcnt_attach_dynamic(&sc->sc_vec_evcnts[vec],
505 EVCNT_TYPE_INTR, NULL,
506 device_xname(sc->sc_dev),
507 rmixl_intr_string(vec));
508
509 KASSERT(cpu_index(ci) < (sizeof(cpu_present_mask) * 8));
510 atomic_or_32((volatile uint32_t *)&cpu_present_mask, 1 << cpu_index(ci));
511 }
512
513 /*
514 * rmixl_intr_string - return pointer to display name of a PIC-based interrupt
515 */
516 const char *
517 rmixl_intr_string(int vec)
518 {
519 int irt;
520
521 if (vec < 0 || vec >= NINTRVECS)
522 panic("%s: vec index %d out of range, max %d",
523 __func__, vec, NINTRVECS - 1);
524
525 if (! RMIXL_VECTOR_IS_IRT(vec))
526 return rmixl_vecnames_common[vec];
527
528 irt = RMIXL_VECTOR_IRT(vec);
529 switch(cpu_rmixl_chip_type(mips_options.mips_cpu)) {
530 case CIDFL_RMI_TYPE_XLR:
531 return rmixl_intr_string_xlr(irt);
532 case CIDFL_RMI_TYPE_XLS:
533 return rmixl_intr_string_xls(irt);
534 case CIDFL_RMI_TYPE_XLP:
535 panic("%s: RMI XLP not yet supported", __func__);
536 }
537
538 return "undefined"; /* appease gcc */
539 }
540
541 static const char *
542 rmixl_intr_string_xlr(int irt)
543 {
544 return rmixl_irtnames_xlrxxx[irt];
545 }
546
547 static const char *
548 rmixl_intr_string_xls(int irt)
549 {
550 const char *name;
551
552 switch (MIPS_PRID_IMPL(mips_options.mips_cpu_id)) {
553 case MIPS_XLS104:
554 case MIPS_XLS108:
555 case MIPS_XLS404LITE:
556 case MIPS_XLS408LITE:
557 name = rmixl_irtnames_xls1xx[irt];
558 break;
559 case MIPS_XLS204:
560 case MIPS_XLS208:
561 name = rmixl_irtnames_xls2xx[irt];
562 break;
563 case MIPS_XLS404:
564 case MIPS_XLS408:
565 case MIPS_XLS416:
566 case MIPS_XLS608:
567 case MIPS_XLS616:
568 name = rmixl_irtnames_xls4xx[irt];
569 break;
570 default:
571 name = rmixl_vecnames_common[RMIXL_IRT_VECTOR(irt)];
572 break;
573 }
574
575 return name;
576 }
577
578 /*
579 * rmixl_irt_thread_mask
580 *
581 * given a bitmask of cpus, return a, IRT thread mask
582 */
583 static uint32_t
584 rmixl_irt_thread_mask(int cpumask)
585 {
586 uint32_t irtc0;
587
588 #if defined(MULTIPROCESSOR)
589 #ifndef NOTYET
590 if (cpumask == -1)
591 return 1; /* XXX TMP FIXME */
592 #endif
593
594 /*
595 * discount cpus not present
596 */
597 cpumask &= cpu_present_mask;
598
599 switch (MIPS_PRID_IMPL(mips_options.mips_cpu_id)) {
600 case MIPS_XLS104:
601 case MIPS_XLS204:
602 case MIPS_XLS404:
603 case MIPS_XLS404LITE:
604 irtc0 = ((cpumask >> 2) << 4) | (cpumask & __BITS(1,0));
605 irtc0 &= (__BITS(5,4) | __BITS(1,0));
606 break;
607 case MIPS_XLS108:
608 case MIPS_XLS208:
609 case MIPS_XLS408:
610 case MIPS_XLS408LITE:
611 case MIPS_XLS608:
612 irtc0 = cpumask & __BITS(7,0);
613 break;
614 case MIPS_XLS416:
615 case MIPS_XLS616:
616 irtc0 = cpumask & __BITS(15,0);
617 break;
618 default:
619 panic("%s: unknown cpu ID %#x\n", __func__,
620 mips_options.mips_cpu_id);
621 }
622 #else
623 irtc0 = 1;
624 #endif /* MULTIPROCESSOR */
625
626 return irtc0;
627 }
628
629 /*
630 * rmixl_irt_init
631 * - initialize IRT Entry for given index
632 * - unmask Thread#0 in low word (assume we only have 1 thread)
633 */
634 static void
635 rmixl_irt_init(int irt)
636 {
637 KASSERT(irt < NIRTS);
638 RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC1(irt), 0); /* high word */
639 RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC0(irt), 0); /* low word */
640 }
641
642 /*
643 * rmixl_irt_disestablish
644 * - invalidate IRT Entry for given index
645 */
646 static void
647 rmixl_irt_disestablish(int irt)
648 {
649 KASSERT(mutex_owned(rmixl_intr_lock));
650 DPRINTF(("%s: irt %d, irtc1 %#x\n", __func__, irt, 0));
651 rmixl_irt_init(irt);
652 }
653
654 /*
655 * rmixl_irt_establish
656 * - construct an IRT Entry for irt and write to PIC
657 */
658 static void
659 rmixl_irt_establish(int irt, int vec, int cpumask, rmixl_intr_trigger_t trigger,
660 rmixl_intr_polarity_t polarity)
661 {
662 uint32_t irtc1;
663 uint32_t irtc0;
664
665 KASSERT(mutex_owned(rmixl_intr_lock));
666
667 if (irt >= NIRTS)
668 panic("%s: bad irt %d\n", __func__, irt);
669
670 if (! RMIXL_VECTOR_IS_IRT(vec))
671 panic("%s: bad vec %d\n", __func__, vec);
672
673 switch (trigger) {
674 case RMIXL_TRIG_EDGE:
675 case RMIXL_TRIG_LEVEL:
676 break;
677 default:
678 panic("%s: bad trigger %d\n", __func__, trigger);
679 }
680
681 switch (polarity) {
682 case RMIXL_POLR_RISING:
683 case RMIXL_POLR_HIGH:
684 case RMIXL_POLR_FALLING:
685 case RMIXL_POLR_LOW:
686 break;
687 default:
688 panic("%s: bad polarity %d\n", __func__, polarity);
689 }
690
691 /*
692 * XXX IRT entries are not shared
693 */
694 KASSERT(RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC0(irt)) == 0);
695 KASSERT(RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC1(irt)) == 0);
696
697 irtc0 = rmixl_irt_thread_mask(cpumask);
698
699 irtc1 = RMIXL_PIC_IRTENTRYC1_VALID;
700 irtc1 |= RMIXL_PIC_IRTENTRYC1_GL; /* local */
701
702 if (trigger == RMIXL_TRIG_LEVEL)
703 irtc1 |= RMIXL_PIC_IRTENTRYC1_TRG;
704
705 if ((polarity == RMIXL_POLR_FALLING) || (polarity == RMIXL_POLR_LOW))
706 irtc1 |= RMIXL_PIC_IRTENTRYC1_P;
707
708 irtc1 |= vec; /* vector in EIRR */
709
710 /*
711 * write IRT Entry to PIC
712 */
713 DPRINTF(("%s: irt %d, irtc0 %#x, irtc1 %#x\n",
714 __func__, irt, irtc0, irtc1));
715 RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC0(irt), irtc0); /* low word */
716 RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC1(irt), irtc1); /* high word */
717 }
718
719 void *
720 rmixl_vec_establish(int vec, int cpumask, int ipl,
721 int (*func)(void *), void *arg, bool mpsafe)
722 {
723 rmixl_intrhand_t *ih;
724 uint64_t eimr_bit;
725 int s;
726
727 KASSERT(mutex_owned(rmixl_intr_lock));
728
729 DPRINTF(("%s: vec %d, cpumask %#x, ipl %d, func %p, arg %p\n"
730 __func__, vec, cpumask, ipl, func, arg));
731 #ifdef DIAGNOSTIC
732 if (rmixl_pic_init_done == 0)
733 panic("%s: called before evbmips_intr_init", __func__);
734 #endif
735
736 /*
737 * check args
738 */
739 if (vec < 0 || vec >= NINTRVECS)
740 panic("%s: vec %d out of range, max %d",
741 __func__, vec, NINTRVECS - 1);
742 if (ipl <= 0 || ipl >= _IPL_N)
743 panic("%s: ipl %d out of range, min %d, max %d",
744 __func__, ipl, 1, _IPL_N - 1);
745
746 s = splhigh();
747
748 ih = &rmixl_intrhand[vec];
749 if (ih->ih_func != NULL) {
750 #ifdef DIAGNOSTIC
751 printf("%s: intrhand[%d] busy\n", __func__, vec);
752 #endif
753 splx(s);
754 return NULL;
755 }
756
757 ih->ih_arg = arg;
758 ih->ih_mpsafe = mpsafe;
759 ih->ih_vec = vec;
760 ih->ih_ipl = ipl;
761 ih->ih_cpumask = cpumask;
762
763 eimr_bit = (uint64_t)1 << vec;
764 for (int i=ih->ih_ipl; --i >= 0; ) {
765 KASSERT((ipl_eimr_map[i] & eimr_bit) == 0);
766 ipl_eimr_map[i] |= eimr_bit;
767 }
768
769 ih->ih_func = func; /* do this last */
770
771 splx(s);
772
773 return ih;
774 }
775
776 /*
777 * rmixl_intr_establish
778 * - used to establish an IRT-based interrupt only
779 */
780 void *
781 rmixl_intr_establish(int irt, int cpumask, int ipl,
782 rmixl_intr_trigger_t trigger, rmixl_intr_polarity_t polarity,
783 int (*func)(void *), void *arg, bool mpsafe)
784 {
785 rmixl_intrhand_t *ih;
786 int vec;
787
788 #ifdef DIAGNOSTIC
789 if (rmixl_pic_init_done == 0)
790 panic("%s: called before rmixl_pic_init_done", __func__);
791 #endif
792
793 /*
794 * check args
795 */
796 if (irt < 0 || irt >= NIRTS)
797 panic("%s: irt %d out of range, max %d",
798 __func__, irt, NIRTS - 1);
799 if (ipl <= 0 || ipl >= _IPL_N)
800 panic("%s: ipl %d out of range, min %d, max %d",
801 __func__, ipl, 1, _IPL_N - 1);
802
803 vec = RMIXL_IRT_VECTOR(irt);
804
805 DPRINTF(("%s: irt %d, vec %d, ipl %d\n", __func__, irt, vec, ipl));
806
807 mutex_enter(rmixl_intr_lock);
808
809 /*
810 * establish vector
811 */
812 ih = rmixl_vec_establish(vec, cpumask, ipl, func, arg, mpsafe);
813
814 /*
815 * establish IRT Entry
816 */
817 rmixl_irt_establish(irt, vec, cpumask, trigger, polarity);
818
819 mutex_exit(rmixl_intr_lock);
820
821 return ih;
822 }
823
824 void
825 rmixl_vec_disestablish(void *cookie)
826 {
827 rmixl_intrhand_t *ih = cookie;
828 uint64_t eimr_bit;
829
830 KASSERT(mutex_owned(rmixl_intr_lock));
831 KASSERT(ih->ih_vec < NINTRVECS);
832 KASSERT(ih == &rmixl_intrhand[ih->ih_vec]);
833
834 ih->ih_func = NULL; /* do this first */
835
836 eimr_bit = (uint64_t)1 << ih->ih_vec;
837 for (int i=ih->ih_ipl; --i >= 0; ) {
838 KASSERT((ipl_eimr_map[i] & eimr_bit) != 0);
839 ipl_eimr_map[i] ^= eimr_bit;
840 }
841 }
842
843 void
844 rmixl_intr_disestablish(void *cookie)
845 {
846 rmixl_intrhand_t *ih = cookie;
847 int vec;
848
849 vec = ih->ih_vec;
850
851 KASSERT(vec < NINTRVECS);
852 KASSERT(ih == &rmixl_intrhand[vec]);
853
854 mutex_enter(rmixl_intr_lock);
855
856 /*
857 * disable/invalidate the IRT Entry if needed
858 */
859 if (RMIXL_VECTOR_IS_IRT(vec))
860 rmixl_irt_disestablish(vec);
861
862 /*
863 * disasociate from vector and free the handle
864 */
865 rmixl_vec_disestablish(cookie);
866
867 mutex_exit(rmixl_intr_lock);
868 }
869
870 void
871 evbmips_iointr(int ipl, vaddr_t pc, uint32_t pending)
872 {
873 struct rmixl_cpu_softc *sc = (void *)curcpu()->ci_softc;
874
875 DPRINTF(("%s: cpu%ld: ipl %d, pc %#"PRIxVADDR", pending %#x\n",
876 __func__, cpu_number(), ipl, pc, pending));
877
878 /*
879 * 'pending' arg is a summary that there is something to do
880 * the real pending status is obtained from EIRR
881 */
882 KASSERT(pending == MIPS_INT_MASK_1);
883
884 for (;;) {
885 rmixl_intrhand_t *ih;
886 uint64_t eirr;
887 uint64_t eimr;
888 uint64_t vecbit;
889 int vec;
890
891 asm volatile("dmfc0 %0, $9, 6;" : "=r"(eirr));
892 asm volatile("dmfc0 %0, $9, 7;" : "=r"(eimr));
893
894 #ifdef IOINTR_DEBUG
895 printf("%s: eirr %#"PRIx64", eimr %#"PRIx64", mask %#"PRIx64"\n",
896 __func__, eirr, eimr, ipl_eimr_map[ipl-1]);
897 #endif /* IOINTR_DEBUG */
898
899 /*
900 * reduce eirr to
901 * - ints that are enabled at or below this ipl
902 * - exclude count/compare clock and soft ints
903 * they are handled elsewhere
904 */
905 eirr &= ipl_eimr_map[ipl-1];
906 eirr &= ~ipl_eimr_map[ipl];
907 eirr &= ~((MIPS_INT_MASK_5 | MIPS_SOFT_INT_MASK) >> 8);
908 if (eirr == 0)
909 break;
910
911 vec = 63 - dclz(eirr);
912 ih = &rmixl_intrhand[vec];
913 vecbit = 1ULL << vec;
914 KASSERT (ih->ih_ipl == ipl);
915 KASSERT ((vecbit & eimr) == 0);
916 KASSERT ((vecbit & RMIXL_EIRR_PRESERVE_MASK) == 0);
917
918 /*
919 * ack in EIRR the irq we are about to handle
920 * disable all interrupt to prevent a race that would allow
921 * e.g. softints set from a higher interrupt getting
922 * clobbered by the EIRR read-modify-write
923 */
924 asm volatile("dmtc0 $0, $9, 7;");
925 asm volatile("dmfc0 %0, $9, 6;" : "=r"(eirr));
926 eirr &= RMIXL_EIRR_PRESERVE_MASK;
927 eirr |= vecbit;
928 asm volatile("dmtc0 %0, $9, 6;" :: "r"(eirr));
929 asm volatile("dmtc0 %0, $9, 7;" :: "r"(eimr));
930
931 if (RMIXL_VECTOR_IS_IRT(vec))
932 RMIXL_PICREG_WRITE(RMIXL_PIC_INTRACK,
933 1 << RMIXL_VECTOR_IRT(vec));
934
935 if (ih->ih_func != NULL) {
936 #ifdef MULTIPROCESSOR
937 if (ih->ih_mpsafe) {
938 (void)(*ih->ih_func)(ih->ih_arg);
939 } else {
940 KERNEL_LOCK(1, NULL);
941 (void)(*ih->ih_func)(ih->ih_arg);
942 KERNEL_UNLOCK_ONE(NULL);
943 }
944 #else
945 (void)(*ih->ih_func)(ih->ih_arg);
946 #endif /* MULTIPROCESSOR */
947 }
948 sc->sc_vec_evcnts[vec].ev_count++;
949 }
950 }
951
952 #ifdef MULTIPROCESSOR
953 static int
954 rmixl_send_ipi(struct cpu_info *ci, int tag)
955 {
956 const cpuid_t cpuid = ci->ci_cpuid;
957 uint32_t core = (uint32_t)(cpuid >> 2);
958 uint32_t thread = (uint32_t)(cpuid & __BITS(1,0));
959 uint64_t req = 1 << tag;
960 uint32_t r;
961 extern volatile mips_cpuset_t cpus_running;
962
963 if (! CPUSET_HAS(cpus_running, cpu_index(ci)))
964 return -1;
965
966 KASSERT((tag >= 0) && (tag < NIPIS));
967
968 r = (thread << RMIXL_PIC_IPIBASE_ID_THREAD_SHIFT)
969 | (core << RMIXL_PIC_IPIBASE_ID_CORE_SHIFT)
970 | (RMIXL_INTRVEC_IPI + tag);
971
972 mutex_enter(rmixl_ipi_lock);
973 atomic_or_64(&ci->ci_request_ipis, req);
974 RMIXL_PICREG_WRITE(RMIXL_PIC_IPIBASE, r);
975 mutex_exit(rmixl_ipi_lock);
976
977 return 0;
978 }
979
980 static int
981 rmixl_ipi_intr(void *arg)
982 {
983 struct cpu_info * const ci = curcpu();
984 uint64_t ipi_mask;
985
986 KASSERT((uintptr_t)arg < NIPIS);
987 ipi_mask = 1 << (uintptr_t)arg;
988 KASSERT((ci->ci_request_ipis & ipi_mask) != 0);
989
990 atomic_or_64(&ci->ci_active_ipis, ipi_mask);
991 atomic_and_64(&ci->ci_request_ipis, ~ipi_mask);
992
993 ipi_process(ci, ipi_mask);
994
995 atomic_and_64(&ci->ci_active_ipis, ~ipi_mask);
996
997 return 1;
998 }
999 #endif /* MULTIPROCESSOR */
1000
1001 #if defined(DIAGNOSTIC) || defined(IOINTR_DEBUG) || defined(DDB)
1002 int
1003 rmixl_intrhand_print_subr(int vec)
1004 {
1005 rmixl_intrhand_t *ih = &rmixl_intrhand[vec];
1006 printf("vec %d: func %p, arg %p, vec %d, ipl %d, mask %#x\n",
1007 vec, ih->ih_func, ih->ih_arg, ih->ih_vec, ih->ih_ipl,
1008 ih->ih_cpumask);
1009 return 0;
1010 }
1011 int
1012 rmixl_intrhand_print(void)
1013 {
1014 for (int vec=0; vec < NINTRVECS ; vec++)
1015 rmixl_intrhand_print_subr(vec);
1016 return 0;
1017 }
1018
1019 static inline void
1020 rmixl_irt_entry_print(u_int irt)
1021 {
1022 uint32_t c0, c1;
1023
1024 if ((irt < 0) || (irt > NIRTS))
1025 return;
1026 c0 = RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC0(irt));
1027 c1 = RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC1(irt));
1028 printf("irt[%d]: %#x, %#x\n", irt, c0, c1);
1029 }
1030
1031 int
1032 rmixl_irt_print(void)
1033 {
1034 printf("%s:\n", __func__);
1035 for (int irt=0; irt < NIRTS ; irt++)
1036 rmixl_irt_entry_print(irt);
1037 return 0;
1038 }
1039
1040 void
1041 rmixl_ipl_eimr_map_print(void)
1042 {
1043 printf("IPL_NONE=%d, mask %#"PRIx64"\n",
1044 IPL_NONE, ipl_eimr_map[IPL_NONE]);
1045 printf("IPL_SOFTCLOCK=%d, mask %#"PRIx64"\n",
1046 IPL_SOFTCLOCK, ipl_eimr_map[IPL_SOFTCLOCK]);
1047 printf("IPL_SOFTNET=%d, mask %#"PRIx64"\n",
1048 IPL_SOFTNET, ipl_eimr_map[IPL_SOFTNET]);
1049 printf("IPL_VM=%d, mask %#"PRIx64"\n",
1050 IPL_VM, ipl_eimr_map[IPL_VM]);
1051 printf("IPL_SCHED=%d, mask %#"PRIx64"\n",
1052 IPL_SCHED, ipl_eimr_map[IPL_SCHED]);
1053 printf("IPL_DDB=%d, mask %#"PRIx64"\n",
1054 IPL_DDB, ipl_eimr_map[IPL_DDB]);
1055 printf("IPL_HIGH=%d, mask %#"PRIx64"\n",
1056 IPL_HIGH, ipl_eimr_map[IPL_HIGH]);
1057 }
1058
1059 #endif
1060