e500_intr.c revision 1.41 1 /* $NetBSD: e500_intr.c,v 1.41 2020/07/04 17:20:45 rin Exp $ */
2 /*-
3 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
8 * Agency and which was developed by Matt Thomas of 3am Software Foundry.
9 *
10 * This material is based upon work supported by the Defense Advanced Research
11 * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
12 * Contract No. N66001-09-C-2073.
13 * Approved for Public Release, Distribution Unlimited
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include "opt_mpc85xx.h"
38 #include "opt_multiprocessor.h"
39 #include "opt_ddb.h"
40
41 #define __INTR_PRIVATE
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: e500_intr.c,v 1.41 2020/07/04 17:20:45 rin Exp $");
45
46 #include <sys/param.h>
47 #include <sys/proc.h>
48 #include <sys/intr.h>
49 #include <sys/cpu.h>
50 #include <sys/kmem.h>
51 #include <sys/atomic.h>
52 #include <sys/bus.h>
53 #include <sys/xcall.h>
54 #include <sys/ipi.h>
55 #include <sys/bitops.h>
56 #include <sys/interrupt.h>
57
58 #include <uvm/uvm_extern.h>
59
60 #ifdef __HAVE_FAST_SOFTINTS
61 #include <powerpc/softint.h>
62 #endif
63
64 #include <powerpc/spr.h>
65 #include <powerpc/booke/spr.h>
66
67 #include <powerpc/booke/cpuvar.h>
68 #include <powerpc/booke/e500reg.h>
69 #include <powerpc/booke/e500var.h>
70 #include <powerpc/booke/openpicreg.h>
71
72 #define IPL2CTPR(ipl) ((ipl) + 15 - IPL_HIGH)
73 #define CTPR2IPL(ctpr) ((ctpr) - (15 - IPL_HIGH))
74
75 #define IST_PERCPU_P(ist) ((ist) >= IST_TIMER)
76
77 struct e500_intr_irq_info {
78 bus_addr_t irq_vpr;
79 bus_addr_t irq_dr;
80 u_int irq_vector;
81 };
82
83 struct intr_source {
84 int (*is_func)(void *);
85 void *is_arg;
86 int8_t is_ipl;
87 uint8_t is_ist;
88 uint8_t is_irq;
89 uint8_t is_refcnt;
90 bus_size_t is_vpr;
91 bus_size_t is_dr;
92 char is_source[INTRIDBUF];
93 char is_xname[INTRDEVNAMEBUF];
94 };
95
96 #define INTR_SOURCE_INITIALIZER \
97 { .is_func = e500_intr_spurious, .is_arg = NULL, \
98 .is_irq = -1, .is_ipl = IPL_NONE, .is_ist = IST_NONE, \
99 .is_source = "", .is_xname = "", }
100
101 struct e500_intr_name {
102 uint8_t in_irq;
103 const char in_name[15];
104 };
105
106 static const struct e500_intr_name e500_onchip_intr_names[] = {
107 { ISOURCE_L2, "l2" },
108 { ISOURCE_ECM, "ecm" },
109 { ISOURCE_DDR, "ddr" },
110 { ISOURCE_LBC, "lbc" },
111 { ISOURCE_DMA_CHAN1, "dma-chan1" },
112 { ISOURCE_DMA_CHAN2, "dma-chan2" },
113 { ISOURCE_DMA_CHAN3, "dma-chan3" },
114 { ISOURCE_DMA_CHAN4, "dma-chan4" },
115 { ISOURCE_PCI1, "pci1" },
116 { ISOURCE_PCIEX2, "pcie2" },
117 { ISOURCE_PCIEX , "pcie1" },
118 { ISOURCE_PCIEX3, "pcie3" },
119 { ISOURCE_USB1, "usb1" },
120 { ISOURCE_ETSEC1_TX, "etsec1-tx" },
121 { ISOURCE_ETSEC1_RX, "etsec1-rx" },
122 { ISOURCE_ETSEC3_TX, "etsec3-tx" },
123 { ISOURCE_ETSEC3_RX, "etsec3-rx" },
124 { ISOURCE_ETSEC3_ERR, "etsec3-err" },
125 { ISOURCE_ETSEC1_ERR, "etsec1-err" },
126 { ISOURCE_ETSEC2_TX, "etsec2-tx" },
127 { ISOURCE_ETSEC2_RX, "etsec2-rx" },
128 { ISOURCE_ETSEC4_TX, "etsec4-tx" },
129 { ISOURCE_ETSEC4_RX, "etsec4-rx" },
130 { ISOURCE_ETSEC4_ERR, "etsec4-err" },
131 { ISOURCE_ETSEC2_ERR, "etsec2-err" },
132 { ISOURCE_DUART, "duart" },
133 { ISOURCE_I2C, "i2c" },
134 { ISOURCE_PERFMON, "perfmon" },
135 { ISOURCE_SECURITY1, "sec1" },
136 { ISOURCE_GPIO, "gpio" },
137 { ISOURCE_SRIO_EWPU, "srio-ewpu" },
138 { ISOURCE_SRIO_ODBELL, "srio-odbell" },
139 { ISOURCE_SRIO_IDBELL, "srio-idbell" },
140 { ISOURCE_SRIO_OMU1, "srio-omu1" },
141 { ISOURCE_SRIO_IMU1, "srio-imu1" },
142 { ISOURCE_SRIO_OMU2, "srio-omu2" },
143 { ISOURCE_SRIO_IMU2, "srio-imu2" },
144 { ISOURCE_SECURITY2, "sec2" },
145 { ISOURCE_SPI, "spi" },
146 { ISOURCE_ETSEC1_PTP, "etsec1-ptp" },
147 { ISOURCE_ETSEC2_PTP, "etsec2-ptp" },
148 { ISOURCE_ETSEC3_PTP, "etsec3-ptp" },
149 { ISOURCE_ETSEC4_PTP, "etsec4-ptp" },
150 { ISOURCE_ESDHC, "esdhc" },
151 { 0, "" },
152 };
153
154 const struct e500_intr_name default_external_intr_names[] = {
155 { 0, "" },
156 };
157
158 static const struct e500_intr_name e500_msigroup_intr_names[] = {
159 { 0, "msigroup0" },
160 { 1, "msigroup1" },
161 { 2, "msigroup2" },
162 { 3, "msigroup3" },
163 { 4, "msigroup4" },
164 { 5, "msigroup5" },
165 { 6, "msigroup6" },
166 { 7, "msigroup7" },
167 { 0, "" },
168 };
169
170 static const struct e500_intr_name e500_timer_intr_names[] = {
171 { 0, "timer0" },
172 { 1, "timer1" },
173 { 2, "timer2" },
174 { 3, "timer3" },
175 { 0, "" },
176 };
177
178 static const struct e500_intr_name e500_ipi_intr_names[] = {
179 { 0, "ipi0" },
180 { 1, "ipi1" },
181 { 2, "ipi2" },
182 { 3, "ipi3" },
183 { 0, "" },
184 };
185
186 static const struct e500_intr_name e500_mi_intr_names[] = {
187 { 0, "mi0" },
188 { 1, "mi1" },
189 { 2, "mi2" },
190 { 3, "mi3" },
191 { 0, "" },
192 };
193
194 struct e500_intr_info {
195 u_int ii_external_sources;
196 uint32_t ii_onchip_bitmap[2];
197 u_int ii_onchip_sources;
198 u_int ii_msigroup_sources;
199 u_int ii_ipi_sources; /* per-cpu */
200 u_int ii_timer_sources; /* per-cpu */
201 u_int ii_mi_sources; /* per-cpu */
202 u_int ii_percpu_sources;
203 const struct e500_intr_name *ii_external_intr_names;
204 const struct e500_intr_name *ii_onchip_intr_names;
205 u_int8_t ii_ist_vectors[IST_MAX+1];
206 };
207
208 static kmutex_t e500_intr_lock __cacheline_aligned;
209 static struct e500_intr_info e500_intr_info;
210
211 #define INTR_INFO_DECL(lc_chip, UC_CHIP) \
212 static const struct e500_intr_info lc_chip##_intr_info = { \
213 .ii_external_sources = UC_CHIP ## _EXTERNALSOURCES, \
214 .ii_onchip_bitmap = UC_CHIP ## _ONCHIPBITMAP, \
215 .ii_onchip_sources = UC_CHIP ## _ONCHIPSOURCES, \
216 .ii_msigroup_sources = UC_CHIP ## _MSIGROUPSOURCES, \
217 .ii_timer_sources = UC_CHIP ## _TIMERSOURCES, \
218 .ii_ipi_sources = UC_CHIP ## _IPISOURCES, \
219 .ii_mi_sources = UC_CHIP ## _MISOURCES, \
220 .ii_percpu_sources = UC_CHIP ## _TIMERSOURCES \
221 + UC_CHIP ## _IPISOURCES + UC_CHIP ## _MISOURCES, \
222 .ii_external_intr_names = lc_chip ## _external_intr_names, \
223 .ii_onchip_intr_names = lc_chip ## _onchip_intr_names, \
224 .ii_ist_vectors = { \
225 [IST_NONE] = ~0, \
226 [IST_EDGE] = 0, \
227 [IST_LEVEL_LOW] = 0, \
228 [IST_LEVEL_HIGH] = 0, \
229 [IST_PULSE] = 0, \
230 [IST_ONCHIP] = UC_CHIP ## _EXTERNALSOURCES, \
231 [IST_MSIGROUP] = UC_CHIP ## _EXTERNALSOURCES \
232 + UC_CHIP ## _ONCHIPSOURCES, \
233 [IST_TIMER] = UC_CHIP ## _EXTERNALSOURCES \
234 + UC_CHIP ## _ONCHIPSOURCES \
235 + UC_CHIP ## _MSIGROUPSOURCES, \
236 [IST_IPI] = UC_CHIP ## _EXTERNALSOURCES \
237 + UC_CHIP ## _ONCHIPSOURCES \
238 + UC_CHIP ## _MSIGROUPSOURCES \
239 + UC_CHIP ## _TIMERSOURCES, \
240 [IST_MI] = UC_CHIP ## _EXTERNALSOURCES \
241 + UC_CHIP ## _ONCHIPSOURCES \
242 + UC_CHIP ## _MSIGROUPSOURCES \
243 + UC_CHIP ## _TIMERSOURCES \
244 + UC_CHIP ## _IPISOURCES, \
245 [IST_MAX] = UC_CHIP ## _EXTERNALSOURCES \
246 + UC_CHIP ## _ONCHIPSOURCES \
247 + UC_CHIP ## _MSIGROUPSOURCES \
248 + UC_CHIP ## _TIMERSOURCES \
249 + UC_CHIP ## _IPISOURCES \
250 + UC_CHIP ## _MISOURCES, \
251 }, \
252 }
253
254 #ifdef MPC8536
255 #define mpc8536_external_intr_names default_external_intr_names
256 const struct e500_intr_name mpc8536_onchip_intr_names[] = {
257 { ISOURCE_SATA2, "sata2" },
258 { ISOURCE_USB2, "usb2" },
259 { ISOURCE_USB3, "usb3" },
260 { ISOURCE_SATA1, "sata1" },
261 { 0, "" },
262 };
263
264 INTR_INFO_DECL(mpc8536, MPC8536);
265 #endif
266
267 #ifdef MPC8544
268 #define mpc8544_external_intr_names default_external_intr_names
269 const struct e500_intr_name mpc8544_onchip_intr_names[] = {
270 { 0, "" },
271 };
272
273 INTR_INFO_DECL(mpc8544, MPC8544);
274 #endif
275 #ifdef MPC8548
276 #define mpc8548_external_intr_names default_external_intr_names
277 const struct e500_intr_name mpc8548_onchip_intr_names[] = {
278 { ISOURCE_PCI1, "pci1" },
279 { ISOURCE_PCI2, "pci2" },
280 { 0, "" },
281 };
282
283 INTR_INFO_DECL(mpc8548, MPC8548);
284 #endif
285 #ifdef MPC8555
286 #define mpc8555_external_intr_names default_external_intr_names
287 const struct e500_intr_name mpc8555_onchip_intr_names[] = {
288 { ISOURCE_PCI2, "pci2" },
289 { ISOURCE_CPM, "CPM" },
290 { 0, "" },
291 };
292
293 INTR_INFO_DECL(mpc8555, MPC8555);
294 #endif
295 #ifdef MPC8568
296 #define mpc8568_external_intr_names default_external_intr_names
297 const struct e500_intr_name mpc8568_onchip_intr_names[] = {
298 { ISOURCE_QEB_LOW, "QEB low" },
299 { ISOURCE_QEB_PORT, "QEB port" },
300 { ISOURCE_QEB_IECC, "QEB iram ecc" },
301 { ISOURCE_QEB_MUECC, "QEB ram ecc" },
302 { ISOURCE_TLU1, "tlu1" },
303 { ISOURCE_QEB_HIGH, "QEB high" },
304 { 0, "" },
305 };
306
307 INTR_INFO_DECL(mpc8568, MPC8568);
308 #endif
309 #ifdef MPC8572
310 #define mpc8572_external_intr_names default_external_intr_names
311 const struct e500_intr_name mpc8572_onchip_intr_names[] = {
312 { ISOURCE_PCIEX3_MPC8572, "pcie3" },
313 { ISOURCE_FEC, "fec" },
314 { ISOURCE_PME_GENERAL, "pme" },
315 { ISOURCE_TLU1, "tlu1" },
316 { ISOURCE_TLU2, "tlu2" },
317 { ISOURCE_PME_CHAN1, "pme-chan1" },
318 { ISOURCE_PME_CHAN2, "pme-chan2" },
319 { ISOURCE_PME_CHAN3, "pme-chan3" },
320 { ISOURCE_PME_CHAN4, "pme-chan4" },
321 { ISOURCE_DMA2_CHAN1, "dma2-chan1" },
322 { ISOURCE_DMA2_CHAN2, "dma2-chan2" },
323 { ISOURCE_DMA2_CHAN3, "dma2-chan3" },
324 { ISOURCE_DMA2_CHAN4, "dma2-chan4" },
325 { 0, "" },
326 };
327
328 INTR_INFO_DECL(mpc8572, MPC8572);
329 #endif
330
331 #ifdef P1025
332 #define p1025_external_intr_names default_external_intr_names
333 const struct e500_intr_name p1025_onchip_intr_names[] = {
334 { ISOURCE_PCIEX3_MPC8572, "pcie3" },
335 { ISOURCE_ETSEC1_G1_TX, "etsec1-g1-tx" },
336 { ISOURCE_ETSEC1_G1_RX, "etsec1-g1-rx" },
337 { ISOURCE_ETSEC1_G1_ERR, "etsec1-g1-error" },
338 { ISOURCE_ETSEC2_G1_TX, "etsec2-g1-tx" },
339 { ISOURCE_ETSEC2_G1_RX, "etsec2-g1-rx" },
340 { ISOURCE_ETSEC2_G1_ERR, "etsec2-g1-error" },
341 { ISOURCE_ETSEC3_G1_TX, "etsec3-g1-tx" },
342 { ISOURCE_ETSEC3_G1_RX, "etsec3-g1-rx" },
343 { ISOURCE_ETSEC3_G1_ERR, "etsec3-g1-error" },
344 { ISOURCE_QEB_MUECC, "qeb-low" },
345 { ISOURCE_QEB_HIGH, "qeb-crit" },
346 { ISOURCE_DMA2_CHAN1, "dma2-chan1" },
347 { ISOURCE_DMA2_CHAN2, "dma2-chan2" },
348 { ISOURCE_DMA2_CHAN3, "dma2-chan3" },
349 { ISOURCE_DMA2_CHAN4, "dma2-chan4" },
350 { 0, "" },
351 };
352
353 INTR_INFO_DECL(p1025, P1025);
354 #endif
355
356 #ifdef P2020
357 #define p20x0_external_intr_names default_external_intr_names
358 const struct e500_intr_name p20x0_onchip_intr_names[] = {
359 { ISOURCE_PCIEX3_MPC8572, "pcie3" },
360 { ISOURCE_DMA2_CHAN1, "dma2-chan1" },
361 { ISOURCE_DMA2_CHAN2, "dma2-chan2" },
362 { ISOURCE_DMA2_CHAN3, "dma2-chan3" },
363 { ISOURCE_DMA2_CHAN4, "dma2-chan4" },
364 { 0, "" },
365 };
366
367 INTR_INFO_DECL(p20x0, P20x0);
368 #endif
369
370 #ifdef P1023
371 #define p1023_external_intr_names default_external_intr_names
372 const struct e500_intr_name p1023_onchip_intr_names[] = {
373 { ISOURCE_FMAN, "fman" },
374 { ISOURCE_MDIO, "mdio" },
375 { ISOURCE_QMAN0, "qman0" },
376 { ISOURCE_BMAN0, "bman0" },
377 { ISOURCE_QMAN1, "qman1" },
378 { ISOURCE_BMAN1, "bman1" },
379 { ISOURCE_QMAN2, "qman2" },
380 { ISOURCE_BMAN2, "bman2" },
381 { ISOURCE_SECURITY2_P1023, "sec2" },
382 { ISOURCE_SEC_GENERAL, "sec-general" },
383 { ISOURCE_DMA2_CHAN1, "dma2-chan1" },
384 { ISOURCE_DMA2_CHAN2, "dma2-chan2" },
385 { ISOURCE_DMA2_CHAN3, "dma2-chan3" },
386 { ISOURCE_DMA2_CHAN4, "dma2-chan4" },
387 { 0, "" },
388 };
389
390 INTR_INFO_DECL(p1023, P1023);
391 #endif
392
393 static const char ist_names[][12] = {
394 [IST_NONE] = "none",
395 [IST_EDGE] = "edge",
396 [IST_LEVEL_LOW] = "level-",
397 [IST_LEVEL_HIGH] = "level+",
398 [IST_PULSE] = "pulse",
399 [IST_MSI] = "msi",
400 [IST_ONCHIP] = "onchip",
401 [IST_MSIGROUP] = "msigroup",
402 [IST_TIMER] = "timer",
403 [IST_IPI] = "ipi",
404 [IST_MI] = "msgint",
405 };
406
407 static struct intr_source *e500_intr_sources;
408 static const struct intr_source *e500_intr_last_source;
409
410 static void *e500_intr_establish(int, int, int, int (*)(void *), void *,
411 const char *);
412 static void e500_intr_disestablish(void *);
413 static void e500_intr_cpu_attach(struct cpu_info *ci);
414 static void e500_intr_cpu_hatch(struct cpu_info *ci);
415 static void e500_intr_cpu_send_ipi(cpuid_t, uintptr_t);
416 static void e500_intr_init(void);
417 static void e500_intr_init_precpu(void);
418 static const char *e500_intr_string(int, int, char *, size_t);
419 static const char *e500_intr_typename(int);
420 static void e500_critintr(struct trapframe *tf);
421 static void e500_decrintr(struct trapframe *tf);
422 static void e500_extintr(struct trapframe *tf);
423 static void e500_fitintr(struct trapframe *tf);
424 static void e500_wdogintr(struct trapframe *tf);
425 static void e500_spl0(void);
426 static int e500_splraise(int);
427 static void e500_splx(int);
428 static const char *e500_intr_all_name_lookup(int, int);
429
430 const struct intrsw e500_intrsw = {
431 .intrsw_establish = e500_intr_establish,
432 .intrsw_disestablish = e500_intr_disestablish,
433 .intrsw_init = e500_intr_init,
434 .intrsw_cpu_attach = e500_intr_cpu_attach,
435 .intrsw_cpu_hatch = e500_intr_cpu_hatch,
436 .intrsw_cpu_send_ipi = e500_intr_cpu_send_ipi,
437 .intrsw_string = e500_intr_string,
438 .intrsw_typename = e500_intr_typename,
439
440 .intrsw_critintr = e500_critintr,
441 .intrsw_decrintr = e500_decrintr,
442 .intrsw_extintr = e500_extintr,
443 .intrsw_fitintr = e500_fitintr,
444 .intrsw_wdogintr = e500_wdogintr,
445
446 .intrsw_splraise = e500_splraise,
447 .intrsw_splx = e500_splx,
448 .intrsw_spl0 = e500_spl0,
449
450 #ifdef __HAVE_FAST_SOFTINTS
451 .intrsw_softint_init_md = powerpc_softint_init_md,
452 .intrsw_softint_trigger = powerpc_softint_trigger,
453 #endif
454 };
455
456 static bool wdog_barked;
457
458 static inline uint32_t
459 openpic_read(struct cpu_softc *cpu, bus_size_t offset)
460 {
461
462 return bus_space_read_4(cpu->cpu_bst, cpu->cpu_bsh,
463 OPENPIC_BASE + offset);
464 }
465
466 static inline void
467 openpic_write(struct cpu_softc *cpu, bus_size_t offset, uint32_t val)
468 {
469
470 return bus_space_write_4(cpu->cpu_bst, cpu->cpu_bsh,
471 OPENPIC_BASE + offset, val);
472 }
473
474 static const char *
475 e500_intr_external_name_lookup(int irq)
476 {
477 prop_array_t extirqs = board_info_get_object("external-irqs");
478 prop_string_t irqname = prop_array_get(extirqs, irq);
479 KASSERT(irqname != NULL);
480 KASSERT(prop_object_type(irqname) == PROP_TYPE_STRING);
481
482 return prop_string_cstring_nocopy(irqname);
483 }
484
485 static const char *
486 e500_intr_name_lookup(const struct e500_intr_name *names, int irq)
487 {
488 for (; names->in_name[0] != '\0'; names++) {
489 if (names->in_irq == irq)
490 return names->in_name;
491 }
492
493 return NULL;
494 }
495
496 static const char *
497 e500_intr_onchip_name_lookup(int irq)
498 {
499 const char *name;
500
501 name = e500_intr_name_lookup(e500_intr_info.ii_onchip_intr_names, irq);
502 if (name == NULL)
503 name = e500_intr_name_lookup(e500_onchip_intr_names, irq);
504
505 return name;
506 }
507
508 static inline void
509 e500_splset(struct cpu_info *ci, int ipl)
510 {
511 struct cpu_softc * const cpu = ci->ci_softc;
512
513 KASSERT((curlwp->l_pflag & LP_INTR) == 0 || ipl != IPL_NONE);
514 const u_int ctpr = IPL2CTPR(ipl);
515 KASSERT(openpic_read(cpu, OPENPIC_CTPR) == IPL2CTPR(ci->ci_cpl));
516 openpic_write(cpu, OPENPIC_CTPR, ctpr);
517 KASSERT(openpic_read(cpu, OPENPIC_CTPR) == ctpr);
518 #ifdef DIAGNOSTIC
519 cpu->cpu_spl_tb[ipl][ci->ci_cpl] = mftb();
520 #endif
521 ci->ci_cpl = ipl;
522 }
523
524 static void
525 e500_spl0(void)
526 {
527 wrtee(0);
528
529 struct cpu_info * const ci = curcpu();
530
531 #ifdef __HAVE_FAST_SOFTINTS
532 if (__predict_false(ci->ci_data.cpu_softints != 0)) {
533 e500_splset(ci, IPL_HIGH);
534 wrtee(PSL_EE);
535 powerpc_softint(ci, IPL_NONE,
536 (vaddr_t)__builtin_return_address(0));
537 wrtee(0);
538 }
539 #endif /* __HAVE_FAST_SOFTINTS */
540 e500_splset(ci, IPL_NONE);
541
542 wrtee(PSL_EE);
543 }
544
545 static void
546 e500_splx(int ipl)
547 {
548 struct cpu_info * const ci = curcpu();
549 const int old_ipl = ci->ci_cpl;
550
551 /* if we paniced because of watchdog, PSL_CE will be clear. */
552 KASSERT(wdog_barked || (mfmsr() & PSL_CE));
553
554 if (ipl == old_ipl)
555 return;
556
557 if (__predict_false(ipl > old_ipl)) {
558 printf("%s: %p: cpl=%u: ignoring splx(%u) to raise ipl\n",
559 __func__, __builtin_return_address(0), old_ipl, ipl);
560 if (old_ipl == IPL_NONE)
561 Debugger();
562 }
563
564 // const
565 register_t msr = wrtee(0);
566 #ifdef __HAVE_FAST_SOFTINTS
567 const u_int softints = ci->ci_data.cpu_softints & (IPL_SOFTMASK << ipl);
568 if (__predict_false(softints != 0)) {
569 e500_splset(ci, IPL_HIGH);
570 wrtee(msr);
571 powerpc_softint(ci, ipl,
572 (vaddr_t)__builtin_return_address(0));
573 wrtee(0);
574 }
575 #endif /* __HAVE_FAST_SOFTINTS */
576 e500_splset(ci, ipl);
577 #if 1
578 if (ipl < IPL_VM && old_ipl >= IPL_VM)
579 msr = PSL_EE;
580 #endif
581 wrtee(msr);
582 }
583
584 static int
585 e500_splraise(int ipl)
586 {
587 struct cpu_info * const ci = curcpu();
588 const int old_ipl = ci->ci_cpl;
589
590 /* if we paniced because of watchdog, PSL_CE will be clear. */
591 KASSERT(wdog_barked || (mfmsr() & PSL_CE));
592
593 if (old_ipl < ipl) {
594 //const
595 register_t msr = wrtee(0);
596 e500_splset(ci, ipl);
597 #if 0
598 if (old_ipl < IPL_VM && ipl >= IPL_VM)
599 msr = 0;
600 #endif
601 wrtee(msr);
602 }
603 #if 0
604 else if (ipl == IPL_NONE) {
605 panic("%s: %p: cpl=%u: attempt to splraise(IPL_NONE)",
606 __func__, __builtin_return_address(0), old_ipl);
607 } else if (old_ipl > ipl) {
608 printf("%s: %p: cpl=%u: ignoring splraise(%u) to lower ipl\n",
609 __func__, __builtin_return_address(0), old_ipl, ipl);
610 }
611 #endif
612
613 return old_ipl;
614 }
615
616 static int
617 e500_intr_spurious(void *arg)
618 {
619 return 0;
620 }
621
622 static bool
623 e500_intr_irq_info_get(struct cpu_info *ci, u_int irq, int ipl, int ist,
624 struct e500_intr_irq_info *ii)
625 {
626 const struct e500_intr_info * const info = &e500_intr_info;
627 bool ok;
628
629 #if DEBUG > 2
630 printf("%s(%p,irq=%u,ipl=%u,ist=%u,%p)\n", __func__, ci, irq, ipl, ist, ii);
631 #endif
632
633 if (ipl < IPL_VM || ipl > IPL_HIGH) {
634 #if DEBUG > 2
635 printf("%s:%d ipl=%u\n", __func__, __LINE__, ipl);
636 #endif
637 return false;
638 }
639
640 if (ist <= IST_NONE || ist >= IST_MAX) {
641 #if DEBUG > 2
642 printf("%s:%d ist=%u\n", __func__, __LINE__, ist);
643 #endif
644 return false;
645 }
646
647 ii->irq_vector = irq + info->ii_ist_vectors[ist];
648 if (IST_PERCPU_P(ist) && ist != IST_IPI)
649 ii->irq_vector += ci->ci_cpuid * info->ii_percpu_sources;
650
651 switch (ist) {
652 default:
653 ii->irq_vpr = OPENPIC_EIVPR(irq);
654 ii->irq_dr = OPENPIC_EIDR(irq);
655 ok = irq < info->ii_external_sources
656 && (ist == IST_EDGE
657 || ist == IST_LEVEL_LOW
658 || ist == IST_LEVEL_HIGH);
659 break;
660 case IST_PULSE:
661 ok = false;
662 break;
663 case IST_ONCHIP:
664 ii->irq_vpr = OPENPIC_IIVPR(irq);
665 ii->irq_dr = OPENPIC_IIDR(irq);
666 ok = irq < 32 * __arraycount(info->ii_onchip_bitmap);
667 #if DEBUG > 2
668 printf("%s: irq=%u: ok=%u\n", __func__, irq, ok);
669 #endif
670 ok = ok && (info->ii_onchip_bitmap[irq/32] & (1 << (irq & 31)));
671 #if DEBUG > 2
672 printf("%s: %08x%08x -> %08x%08x: ok=%u\n", __func__,
673 irq < 32 ? 0 : (1 << irq), irq < 32 ? (1 << irq) : 0,
674 info->ii_onchip_bitmap[1], info->ii_onchip_bitmap[0],
675 ok);
676 #endif
677 break;
678 case IST_MSIGROUP:
679 ii->irq_vpr = OPENPIC_MSIVPR(irq);
680 ii->irq_dr = OPENPIC_MSIDR(irq);
681 ok = irq < info->ii_msigroup_sources
682 && ipl == IPL_VM;
683 break;
684 case IST_TIMER:
685 ii->irq_vpr = OPENPIC_GTVPR(ci->ci_cpuid, irq);
686 ii->irq_dr = OPENPIC_GTDR(ci->ci_cpuid, irq);
687 ok = irq < info->ii_timer_sources;
688 #if DEBUG > 2
689 printf("%s: IST_TIMER irq=%u: ok=%u\n", __func__, irq, ok);
690 #endif
691 break;
692 case IST_IPI:
693 ii->irq_vpr = OPENPIC_IPIVPR(irq);
694 ii->irq_dr = OPENPIC_IPIDR(irq);
695 ok = irq < info->ii_ipi_sources;
696 break;
697 case IST_MI:
698 ii->irq_vpr = OPENPIC_MIVPR(irq);
699 ii->irq_dr = OPENPIC_MIDR(irq);
700 ok = irq < info->ii_mi_sources;
701 break;
702 }
703
704 return ok;
705 }
706
707 static const char *
708 e500_intr_string(int irq, int ist, char *buf, size_t len)
709 {
710 struct cpu_info * const ci = curcpu();
711 struct cpu_softc * const cpu = ci->ci_softc;
712 struct e500_intr_irq_info ii;
713
714 if (!e500_intr_irq_info_get(ci, irq, IPL_VM, ist, &ii))
715 return NULL;
716
717 strlcpy(buf, cpu->cpu_evcnt_intrs[ii.irq_vector].ev_name, len);
718 return buf;
719 }
720
721 __CTASSERT(__arraycount(ist_names) == IST_MAX);
722
723 static const char *
724 e500_intr_typename(int ist)
725 {
726 if (IST_NONE <= ist && ist < IST_MAX)
727 return ist_names[ist];
728
729 return NULL;
730 }
731
732 static void *
733 e500_intr_cpu_establish(struct cpu_info *ci, int irq, int ipl, int ist,
734 int (*handler)(void *), void *arg, const char *xname)
735 {
736 struct cpu_softc * const cpu = ci->ci_softc;
737 struct e500_intr_irq_info ii;
738
739 KASSERT(ipl >= IPL_VM && ipl <= IPL_HIGH);
740 KASSERT(ist > IST_NONE && ist < IST_MAX && ist != IST_MSI);
741
742 if (!e500_intr_irq_info_get(ci, irq, ipl, ist, &ii)) {
743 printf("%s: e500_intr_irq_info_get(%p,%u,%u,%u,%p) failed\n",
744 __func__, ci, irq, ipl, ist, &ii);
745 return NULL;
746 }
747
748 if (xname == NULL) {
749 xname = e500_intr_all_name_lookup(irq, ist);
750 if (xname == NULL)
751 xname = "unknown";
752 }
753
754 struct intr_source * const is = &e500_intr_sources[ii.irq_vector];
755 mutex_enter(&e500_intr_lock);
756 if (is->is_ipl != IPL_NONE) {
757 /* XXX IPI0 is shared by all CPU. */
758 if (is->is_ist != IST_IPI ||
759 is->is_irq != irq ||
760 is->is_ipl != ipl ||
761 is->is_ist != ist ||
762 is->is_func != handler ||
763 is->is_arg != arg) {
764 mutex_exit(&e500_intr_lock);
765 return NULL;
766 }
767 }
768
769 is->is_func = handler;
770 is->is_arg = arg;
771 is->is_ipl = ipl;
772 is->is_ist = ist;
773 is->is_irq = irq;
774 is->is_refcnt++;
775 is->is_vpr = ii.irq_vpr;
776 is->is_dr = ii.irq_dr;
777 switch (ist) {
778 case IST_EDGE:
779 case IST_LEVEL_LOW:
780 case IST_LEVEL_HIGH:
781 snprintf(is->is_source, sizeof(is->is_source), "extirq %d",
782 irq);
783 break;
784 case IST_ONCHIP:
785 snprintf(is->is_source, sizeof(is->is_source), "irq %d", irq);
786 break;
787 case IST_MSIGROUP:
788 snprintf(is->is_source, sizeof(is->is_source), "msigroup %d",
789 irq);
790 break;
791 case IST_TIMER:
792 snprintf(is->is_source, sizeof(is->is_source), "timer %d", irq);
793 break;
794 case IST_IPI:
795 snprintf(is->is_source, sizeof(is->is_source), "ipi %d", irq);
796 break;
797 case IST_MI:
798 snprintf(is->is_source, sizeof(is->is_source), "mi %d", irq);
799 break;
800 case IST_PULSE:
801 default:
802 panic("%s: invalid ist (%d)\n", __func__, ist);
803 }
804 strlcpy(is->is_xname, xname, sizeof(is->is_xname));
805
806 uint32_t vpr = VPR_PRIORITY_MAKE(IPL2CTPR(ipl))
807 | VPR_VECTOR_MAKE(((ii.irq_vector + 1) << 4) | ipl)
808 | (ist == IST_LEVEL_LOW
809 ? VPR_LEVEL_LOW
810 : (ist == IST_LEVEL_HIGH
811 ? VPR_LEVEL_HIGH
812 : (ist == IST_ONCHIP
813 ? VPR_P_HIGH
814 : 0)));
815
816 /*
817 * All interrupts go to the primary except per-cpu interrupts which get
818 * routed to the appropriate cpu.
819 */
820 uint32_t dr = openpic_read(cpu, ii.irq_dr);
821
822 dr |= 1 << (IST_PERCPU_P(ist) ? ci->ci_cpuid : 0);
823
824 /*
825 * Update the vector/priority and destination registers keeping the
826 * interrupt masked.
827 */
828 const register_t msr = wrtee(0); /* disable interrupts */
829 openpic_write(cpu, ii.irq_vpr, vpr | VPR_MSK);
830 openpic_write(cpu, ii.irq_dr, dr);
831
832 /*
833 * Now unmask the interrupt.
834 */
835 openpic_write(cpu, ii.irq_vpr, vpr);
836
837 wrtee(msr); /* re-enable interrupts */
838
839 mutex_exit(&e500_intr_lock);
840
841 return is;
842 }
843
844 static void *
845 e500_intr_establish(int irq, int ipl, int ist, int (*handler)(void *),
846 void *arg, const char *xname)
847 {
848 return e500_intr_cpu_establish(curcpu(), irq, ipl, ist, handler, arg,
849 xname);
850 }
851
852 static void
853 e500_intr_disestablish(void *vis)
854 {
855 struct cpu_softc * const cpu = curcpu()->ci_softc;
856 struct intr_source * const is = vis;
857 struct e500_intr_irq_info ii;
858
859 KASSERT(e500_intr_sources <= is);
860 KASSERT(is < e500_intr_last_source);
861 KASSERT(!cpu_intr_p());
862
863 bool ok = e500_intr_irq_info_get(curcpu(), is->is_irq, is->is_ipl,
864 is->is_ist, &ii);
865 (void)ok; /* appease gcc */
866 KASSERT(ok);
867 KASSERT(is - e500_intr_sources == ii.irq_vector);
868
869 mutex_enter(&e500_intr_lock);
870
871 if (is->is_refcnt-- > 1) {
872 mutex_exit(&e500_intr_lock);
873 return;
874 }
875
876 /*
877 * Mask the source using the mask (MSK) bit in the vector/priority reg.
878 */
879 uint32_t vpr = openpic_read(cpu, ii.irq_vpr);
880 openpic_write(cpu, ii.irq_vpr, VPR_MSK | vpr);
881
882 /*
883 * Wait for the Activity (A) bit for the source to be cleared.
884 */
885 while (openpic_read(cpu, ii.irq_vpr) & VPR_A)
886 ;
887
888 /*
889 * Now the source can be modified.
890 */
891 openpic_write(cpu, ii.irq_dr, 0); /* stop delivery */
892 openpic_write(cpu, ii.irq_vpr, VPR_MSK); /* mask/reset it */
893
894 *is = (struct intr_source)INTR_SOURCE_INITIALIZER;
895
896 mutex_exit(&e500_intr_lock);
897 }
898
899 static void
900 e500_critintr(struct trapframe *tf)
901 {
902 panic("%s: srr0/srr1=%#lx/%#lx", __func__, tf->tf_srr0, tf->tf_srr1);
903 }
904
905 static void
906 e500_decrintr(struct trapframe *tf)
907 {
908 panic("%s: srr0/srr1=%#lx/%#lx", __func__, tf->tf_srr0, tf->tf_srr1);
909 }
910
911 static void
912 e500_fitintr(struct trapframe *tf)
913 {
914 panic("%s: srr0/srr1=%#lx/%#lx", __func__, tf->tf_srr0, tf->tf_srr1);
915 }
916
917 static void
918 e500_wdogintr(struct trapframe *tf)
919 {
920 struct cpu_info * const ci = curcpu();
921 mtspr(SPR_TSR, TSR_ENW|TSR_WIS);
922 wdog_barked = true;
923 dump_splhist(ci, NULL);
924 dump_trapframe(tf, NULL);
925 panic("%s: tf=%p tb=%"PRId64" srr0/srr1=%#lx/%#lx"
926 " cpl=%d idepth=%d, mtxcount=%d",
927 __func__, tf, mftb(), tf->tf_srr0, tf->tf_srr1,
928 ci->ci_cpl, ci->ci_idepth, ci->ci_mtx_count);
929 }
930
931 static void
932 e500_extintr(struct trapframe *tf)
933 {
934 struct cpu_info * const ci = curcpu();
935 struct cpu_softc * const cpu = ci->ci_softc;
936 const int old_ipl = ci->ci_cpl;
937
938 /* if we paniced because of watchdog, PSL_CE will be clear. */
939 KASSERT(wdog_barked || (mfmsr() & PSL_CE));
940
941 #if 0
942 // printf("%s(%p): idepth=%d enter\n", __func__, tf, ci->ci_idepth);
943 if ((register_t)tf >= (register_t)curlwp->l_addr + USPACE
944 || (register_t)tf < (register_t)curlwp->l_addr + NBPG) {
945 printf("%s(entry): pid %d.%d (%s): srr0/srr1=%#lx/%#lx: invalid tf addr %p\n",
946 __func__, curlwp->l_proc->p_pid, curlwp->l_lid,
947 curlwp->l_proc->p_comm, tf->tf_srr0, tf->tf_srr1, tf);
948 }
949 #endif
950
951
952 ci->ci_data.cpu_nintr++;
953 tf->tf_cf.cf_idepth = ci->ci_idepth++;
954 cpu->cpu_pcpls[ci->ci_idepth] = old_ipl;
955 #if 1
956 if (mfmsr() & PSL_EE)
957 panic("%s(%p): MSR[EE] is on (%#lx)!", __func__, tf, mfmsr());
958 if (old_ipl == IPL_HIGH
959 || IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR))
960 panic("%s(%p): old_ipl(%u) == IPL_HIGH(%u) "
961 "|| old_ipl + %u != OPENPIC_CTPR (%u)",
962 __func__, tf, old_ipl, IPL_HIGH,
963 15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR));
964 #else
965 if (old_ipl >= IPL_VM)
966 panic("%s(%p): old_ipl(%u) >= IPL_VM(%u) CTPR=%u",
967 __func__, tf, old_ipl, IPL_VM, openpic_read(cpu, OPENPIC_CTPR));
968 #endif
969
970 for (;;) {
971 /*
972 * Find out the pending interrupt.
973 */
974 KASSERTMSG((mfmsr() & PSL_EE) == 0,
975 "%s(%p): MSR[EE] left on (%#lx)!", __func__, tf, mfmsr());
976 if (IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR))
977 panic("%s(%p): %d: old_ipl(%u) + %u != OPENPIC_CTPR (%u)",
978 __func__, tf, __LINE__, old_ipl,
979 15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR));
980 const uint32_t iack = openpic_read(cpu, OPENPIC_IACK);
981 #ifdef DIAGNOSTIC
982 const int ipl = iack & 0xf;
983 #endif
984 const int irq = (iack >> 4) - 1;
985 #if 0
986 printf("%s: iack=%d ipl=%d irq=%d <%s>\n",
987 __func__, iack, ipl, irq,
988 (iack != IRQ_SPURIOUS ?
989 cpu->cpu_evcnt_intrs[irq].ev_name : "spurious"));
990 #endif
991 if (IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR))
992 panic("%s(%p): %d: old_ipl(%u) + %u != OPENPIC_CTPR (%u)",
993 __func__, tf, __LINE__, old_ipl,
994 15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR));
995 if (iack == IRQ_SPURIOUS)
996 break;
997
998 struct intr_source * const is = &e500_intr_sources[irq];
999 if (__predict_true(is < e500_intr_last_source)) {
1000 /*
1001 * Timer interrupts get their argument overriden with
1002 * the pointer to the trapframe.
1003 */
1004 KASSERTMSG(is->is_ipl == ipl,
1005 "iack %#x: is %p: irq %d ipl %d != iack ipl %d",
1006 iack, is, irq, is->is_ipl, ipl);
1007 void *arg = (is->is_ist == IST_TIMER ? tf : is->is_arg);
1008 if (is->is_ipl <= old_ipl)
1009 panic("%s(%p): %s (%u): is->is_ipl (%u) <= old_ipl (%u)\n",
1010 __func__, tf,
1011 cpu->cpu_evcnt_intrs[irq].ev_name, irq,
1012 is->is_ipl, old_ipl);
1013 KASSERT(is->is_ipl > old_ipl);
1014 e500_splset(ci, is->is_ipl); /* change IPL */
1015 if (__predict_false(is->is_func == NULL)) {
1016 aprint_error_dev(ci->ci_dev,
1017 "interrupt from unestablished irq %d\n",
1018 irq);
1019 } else {
1020 int (*func)(void *) = is->is_func;
1021 wrtee(PSL_EE);
1022 int rv = (*func)(arg);
1023 wrtee(0);
1024 #if DEBUG > 2
1025 printf("%s: %s handler %p(%p) returned %d\n",
1026 __func__,
1027 cpu->cpu_evcnt_intrs[irq].ev_name,
1028 func, arg, rv);
1029 #endif
1030 if (rv == 0)
1031 cpu->cpu_evcnt_spurious_intr.ev_count++;
1032 }
1033 e500_splset(ci, old_ipl); /* restore IPL */
1034 cpu->cpu_evcnt_intrs[irq].ev_count++;
1035 } else {
1036 aprint_error_dev(ci->ci_dev,
1037 "interrupt from illegal irq %d\n", irq);
1038 cpu->cpu_evcnt_spurious_intr.ev_count++;
1039 }
1040 /*
1041 * If this is a nested interrupt, simply ack it and exit
1042 * because the loop we interrupted will complete looking
1043 * for interrupts.
1044 */
1045 KASSERTMSG((mfmsr() & PSL_EE) == 0,
1046 "%s(%p): MSR[EE] left on (%#lx)!", __func__, tf, mfmsr());
1047 if (IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR))
1048 panic("%s(%p): %d: old_ipl(%u) + %u != OPENPIC_CTPR (%u)",
1049 __func__, tf, __LINE__, old_ipl,
1050 15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR));
1051
1052 openpic_write(cpu, OPENPIC_EOI, 0);
1053 if (IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR))
1054 panic("%s(%p): %d: old_ipl(%u) + %u != OPENPIC_CTPR (%u)",
1055 __func__, tf, __LINE__, old_ipl,
1056 15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR));
1057 if (ci->ci_idepth > 0)
1058 break;
1059 }
1060
1061 ci->ci_idepth--;
1062
1063 #ifdef __HAVE_FAST_SOFTINTS
1064 /*
1065 * Before exiting, deal with any softints that need to be dealt with.
1066 */
1067 const u_int softints = ci->ci_data.cpu_softints & (IPL_SOFTMASK << old_ipl);
1068 if (__predict_false(softints != 0)) {
1069 KASSERT(old_ipl < IPL_VM);
1070 e500_splset(ci, IPL_HIGH); /* pop to high */
1071 wrtee(PSL_EE); /* reenable interrupts */
1072 powerpc_softint(ci, old_ipl, /* deal with them */
1073 tf->tf_srr0);
1074 wrtee(0); /* disable interrupts */
1075 e500_splset(ci, old_ipl); /* and drop back */
1076 }
1077 #endif /* __HAVE_FAST_SOFTINTS */
1078 KASSERT(ci->ci_cpl == old_ipl);
1079
1080 /*
1081 * If we interrupted while power-saving and we need to exit idle,
1082 * we need to clear PSL_POW so we won't go back into power-saving.
1083 */
1084 if (__predict_false(tf->tf_srr1 & PSL_POW) && ci->ci_want_resched)
1085 tf->tf_srr1 &= ~PSL_POW;
1086
1087 // printf("%s(%p): idepth=%d exit\n", __func__, tf, ci->ci_idepth);
1088 }
1089
1090 static void
1091 e500_intr_init(void)
1092 {
1093 struct cpu_info * const ci = curcpu();
1094 struct cpu_softc * const cpu = ci->ci_softc;
1095 const uint32_t frr = openpic_read(cpu, OPENPIC_FRR);
1096 const u_int nirq = FRR_NIRQ_GET(frr) + 1;
1097 // const u_int ncpu = FRR_NCPU_GET(frr) + 1;
1098 struct intr_source *is;
1099 struct e500_intr_info * const ii = &e500_intr_info;
1100
1101 const uint16_t svr = (mfspr(SPR_SVR) & ~0x80000) >> 16;
1102 switch (svr) {
1103 #ifdef MPC8536
1104 case SVR_MPC8536v1 >> 16:
1105 *ii = mpc8536_intr_info;
1106 break;
1107 #endif
1108 #ifdef MPC8544
1109 case SVR_MPC8544v1 >> 16:
1110 *ii = mpc8544_intr_info;
1111 break;
1112 #endif
1113 #ifdef MPC8548
1114 case SVR_MPC8543v1 >> 16:
1115 case SVR_MPC8548v1 >> 16:
1116 *ii = mpc8548_intr_info;
1117 break;
1118 #endif
1119 #ifdef MPC8555
1120 case SVR_MPC8541v1 >> 16:
1121 case SVR_MPC8555v1 >> 16:
1122 *ii = mpc8555_intr_info;
1123 break;
1124 #endif
1125 #ifdef MPC8568
1126 case SVR_MPC8568v1 >> 16:
1127 *ii = mpc8568_intr_info;
1128 break;
1129 #endif
1130 #ifdef MPC8572
1131 case SVR_MPC8572v1 >> 16:
1132 *ii = mpc8572_intr_info;
1133 break;
1134 #endif
1135 #ifdef P1023
1136 case SVR_P1017v1 >> 16:
1137 case SVR_P1023v1 >> 16:
1138 *ii = p1023_intr_info;
1139 break;
1140 #endif
1141 #ifdef P1025
1142 case SVR_P1016v1 >> 16:
1143 case SVR_P1025v1 >> 16:
1144 *ii = p1025_intr_info;
1145 break;
1146 #endif
1147 #ifdef P2020
1148 case SVR_P2010v2 >> 16:
1149 case SVR_P2020v2 >> 16:
1150 *ii = p20x0_intr_info;
1151 break;
1152 #endif
1153 default:
1154 panic("%s: don't know how to deal with SVR %#jx",
1155 __func__, (uintmax_t)mfspr(SPR_SVR));
1156 }
1157
1158 /*
1159 * Initialize interrupt handler lock
1160 */
1161 mutex_init(&e500_intr_lock, MUTEX_DEFAULT, IPL_HIGH);
1162
1163 /*
1164 * We need to be in mixed mode.
1165 */
1166 openpic_write(cpu, OPENPIC_GCR, GCR_M);
1167
1168 /*
1169 * Make we and the openpic both agree about the current SPL level.
1170 */
1171 e500_splset(ci, ci->ci_cpl);
1172
1173 /*
1174 * Allow the required number of interrupt sources.
1175 */
1176 is = kmem_zalloc(nirq * sizeof(*is), KM_SLEEP);
1177 e500_intr_sources = is;
1178 e500_intr_last_source = is + nirq;
1179
1180 /*
1181 * Initialize all the external interrupts as active low.
1182 */
1183 for (u_int irq = 0; irq < e500_intr_info.ii_external_sources; irq++) {
1184 openpic_write(cpu, OPENPIC_EIVPR(irq),
1185 VPR_VECTOR_MAKE(irq) | VPR_LEVEL_LOW);
1186 }
1187 }
1188
1189 static void
1190 e500_intr_init_precpu(void)
1191 {
1192 struct cpu_info const *ci = curcpu();
1193 struct cpu_softc * const cpu = ci->ci_softc;
1194 bus_addr_t dr;
1195
1196 /*
1197 * timer's DR is set to be delivered to cpu0 as initial value.
1198 */
1199 for (u_int irq = 0; irq < e500_intr_info.ii_timer_sources; irq++) {
1200 dr = OPENPIC_GTDR(ci->ci_cpuid, irq);
1201 openpic_write(cpu, dr, 0); /* stop delivery */
1202 }
1203 }
1204
1205 static void
1206 e500_idlespin(void)
1207 {
1208 KASSERTMSG(curcpu()->ci_cpl == IPL_NONE,
1209 "%s: cpu%u: ci_cpl (%d) != 0", __func__, cpu_number(),
1210 curcpu()->ci_cpl);
1211 KASSERTMSG(CTPR2IPL(openpic_read(curcpu()->ci_softc, OPENPIC_CTPR)) == IPL_NONE,
1212 "%s: cpu%u: CTPR (%d) != IPL_NONE", __func__, cpu_number(),
1213 CTPR2IPL(openpic_read(curcpu()->ci_softc, OPENPIC_CTPR)));
1214 KASSERT(mfmsr() & PSL_EE);
1215
1216 if (powersave > 0)
1217 mtmsr(mfmsr() | PSL_POW);
1218 }
1219
1220 static void
1221 e500_intr_cpu_attach(struct cpu_info *ci)
1222 {
1223 struct cpu_softc * const cpu = ci->ci_softc;
1224 const char * const xname = device_xname(ci->ci_dev);
1225
1226 const u_int32_t frr = openpic_read(cpu, OPENPIC_FRR);
1227 const u_int nirq = FRR_NIRQ_GET(frr) + 1;
1228 // const u_int ncpu = FRR_NCPU_GET(frr) + 1;
1229
1230 const struct e500_intr_info * const info = &e500_intr_info;
1231
1232 cpu->cpu_clock_gtbcr = OPENPIC_GTBCR(ci->ci_cpuid, E500_CLOCK_TIMER);
1233
1234 cpu->cpu_evcnt_intrs =
1235 kmem_zalloc(nirq * sizeof(cpu->cpu_evcnt_intrs[0]), KM_SLEEP);
1236
1237 struct evcnt *evcnt = cpu->cpu_evcnt_intrs;
1238 for (size_t j = 0; j < info->ii_external_sources; j++, evcnt++) {
1239 const char *name = e500_intr_external_name_lookup(j);
1240 evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR, NULL, xname, name);
1241 }
1242 KASSERT(evcnt == cpu->cpu_evcnt_intrs + info->ii_ist_vectors[IST_ONCHIP]);
1243 for (size_t j = 0; j < info->ii_onchip_sources; j++, evcnt++) {
1244 if (info->ii_onchip_bitmap[j / 32] & __BIT(j & 31)) {
1245 const char *name = e500_intr_onchip_name_lookup(j);
1246 if (name != NULL) {
1247 evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR,
1248 NULL, xname, name);
1249 #ifdef DIAGNOSTIC
1250 } else {
1251 printf("%s: missing evcnt for onchip irq %zu\n",
1252 __func__, j);
1253 #endif
1254 }
1255 }
1256 }
1257
1258 KASSERT(evcnt == cpu->cpu_evcnt_intrs + info->ii_ist_vectors[IST_MSIGROUP]);
1259 for (size_t j = 0; j < info->ii_msigroup_sources; j++, evcnt++) {
1260 evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR,
1261 NULL, xname, e500_msigroup_intr_names[j].in_name);
1262 }
1263
1264 KASSERT(evcnt == cpu->cpu_evcnt_intrs + info->ii_ist_vectors[IST_TIMER]);
1265 evcnt += ci->ci_cpuid * info->ii_percpu_sources;
1266 for (size_t j = 0; j < info->ii_timer_sources; j++, evcnt++) {
1267 evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR,
1268 NULL, xname, e500_timer_intr_names[j].in_name);
1269 }
1270
1271 for (size_t j = 0; j < info->ii_ipi_sources; j++, evcnt++) {
1272 evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR,
1273 NULL, xname, e500_ipi_intr_names[j].in_name);
1274 }
1275
1276 for (size_t j = 0; j < info->ii_mi_sources; j++, evcnt++) {
1277 evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR,
1278 NULL, xname, e500_mi_intr_names[j].in_name);
1279 }
1280
1281 ci->ci_idlespin = e500_idlespin;
1282 }
1283
1284 static void
1285 e500_intr_cpu_send_ipi(cpuid_t target, uint32_t ipimsg)
1286 {
1287 struct cpu_info * const ci = curcpu();
1288 struct cpu_softc * const cpu = ci->ci_softc;
1289 uint32_t dstmask;
1290
1291 if (target >= CPU_MAXNUM) {
1292 CPU_INFO_ITERATOR cii;
1293 struct cpu_info *dst_ci;
1294
1295 KASSERT(target == IPI_DST_NOTME || target == IPI_DST_ALL);
1296
1297 dstmask = 0;
1298 for (CPU_INFO_FOREACH(cii, dst_ci)) {
1299 if (target == IPI_DST_ALL || ci != dst_ci) {
1300 dstmask |= 1 << cpu_index(ci);
1301 if (ipimsg)
1302 atomic_or_32(&dst_ci->ci_pending_ipis,
1303 ipimsg);
1304 }
1305 }
1306 } else {
1307 struct cpu_info * const dst_ci = cpu_lookup(target);
1308 KASSERT(dst_ci != NULL);
1309 KASSERTMSG(target == cpu_index(dst_ci),
1310 "%s: target (%lu) != cpu_index(cpu%u)",
1311 __func__, target, cpu_index(dst_ci));
1312 dstmask = (1 << target);
1313 if (ipimsg)
1314 atomic_or_32(&dst_ci->ci_pending_ipis, ipimsg);
1315 }
1316
1317 openpic_write(cpu, OPENPIC_IPIDR(0), dstmask);
1318 }
1319
1320 typedef void (*ipifunc_t)(void);
1321
1322 #ifdef __HAVE_PREEMPTION
1323 static void
1324 e500_ipi_kpreempt(void)
1325 {
1326 poowerpc_softint_trigger(1 << IPL_NONE);
1327 }
1328 #endif
1329
1330 static void
1331 e500_ipi_suspend(void)
1332 {
1333
1334 #ifdef MULTIPROCESSOR
1335 cpu_pause(NULL);
1336 #endif /* MULTIPROCESSOR */
1337 }
1338
1339 static void
1340 e500_ipi_ast(void)
1341 {
1342 curcpu()->ci_onproc->l_md.md_astpending = 1;
1343 }
1344
1345 static const ipifunc_t e500_ipifuncs[] = {
1346 [ilog2(IPI_XCALL)] = xc_ipi_handler,
1347 [ilog2(IPI_GENERIC)] = ipi_cpu_handler,
1348 [ilog2(IPI_HALT)] = e500_ipi_halt,
1349 #ifdef __HAVE_PREEMPTION
1350 [ilog2(IPI_KPREEMPT)] = e500_ipi_kpreempt,
1351 #endif
1352 [ilog2(IPI_TLB1SYNC)] = e500_tlb1_sync,
1353 [ilog2(IPI_SUSPEND)] = e500_ipi_suspend,
1354 [ilog2(IPI_AST)] = e500_ipi_ast,
1355 };
1356
1357 static int
1358 e500_ipi_intr(void *v)
1359 {
1360 struct cpu_info * const ci = curcpu();
1361
1362 ci->ci_ev_ipi.ev_count++;
1363
1364 uint32_t pending_ipis = atomic_swap_32(&ci->ci_pending_ipis, 0);
1365 for (u_int ipi = 31; pending_ipis != 0; ipi--, pending_ipis <<= 1) {
1366 const u_int bits = __builtin_clz(pending_ipis);
1367 ipi -= bits;
1368 pending_ipis <<= bits;
1369 KASSERT(e500_ipifuncs[ipi] != NULL);
1370 (*e500_ipifuncs[ipi])();
1371 }
1372
1373 return 1;
1374 }
1375
1376 static void
1377 e500_intr_cpu_hatch(struct cpu_info *ci)
1378 {
1379 char iname[INTRIDBUF];
1380
1381 /* Initialize percpu interrupts. */
1382 e500_intr_init_precpu();
1383
1384 /*
1385 * Establish clock interrupt for this CPU.
1386 */
1387 snprintf(iname, sizeof(iname), "%s clock", device_xname(ci->ci_dev));
1388 if (e500_intr_cpu_establish(ci, E500_CLOCK_TIMER, IPL_CLOCK, IST_TIMER,
1389 e500_clock_intr, NULL, iname) == NULL)
1390 panic("%s: failed to establish clock interrupt!", __func__);
1391
1392 /*
1393 * Establish the IPI interrupts for this CPU.
1394 */
1395 if (e500_intr_cpu_establish(ci, 0, IPL_VM, IST_IPI, e500_ipi_intr,
1396 NULL, "ipi") == NULL)
1397 panic("%s: failed to establish ipi interrupt!", __func__);
1398
1399 /*
1400 * Enable watchdog interrupts.
1401 */
1402 uint32_t tcr = mfspr(SPR_TCR);
1403 tcr |= TCR_WIE;
1404 mtspr(SPR_TCR, tcr);
1405 }
1406
1407 static const char *
1408 e500_intr_all_name_lookup(int irq, int ist)
1409 {
1410 const struct e500_intr_info * const info = &e500_intr_info;
1411
1412 switch (ist) {
1413 default:
1414 if (irq < info->ii_external_sources &&
1415 (ist == IST_EDGE ||
1416 ist == IST_LEVEL_LOW ||
1417 ist == IST_LEVEL_HIGH))
1418 return e500_intr_name_lookup(
1419 info->ii_external_intr_names, irq);
1420 break;
1421
1422 case IST_PULSE:
1423 break;
1424
1425 case IST_ONCHIP:
1426 if (irq < info->ii_onchip_sources)
1427 return e500_intr_onchip_name_lookup(irq);
1428 break;
1429
1430 case IST_MSIGROUP:
1431 if (irq < info->ii_msigroup_sources)
1432 return e500_intr_name_lookup(e500_msigroup_intr_names,
1433 irq);
1434 break;
1435
1436 case IST_TIMER:
1437 if (irq < info->ii_timer_sources)
1438 return e500_intr_name_lookup(e500_timer_intr_names,
1439 irq);
1440 break;
1441
1442 case IST_IPI:
1443 if (irq < info->ii_ipi_sources)
1444 return e500_intr_name_lookup(e500_ipi_intr_names, irq);
1445 break;
1446
1447 case IST_MI:
1448 if (irq < info->ii_mi_sources)
1449 return e500_intr_name_lookup(e500_mi_intr_names, irq);
1450 break;
1451 }
1452
1453 return NULL;
1454 }
1455
1456 static void
1457 e500_intr_get_affinity(struct intr_source *is, kcpuset_t *cpuset)
1458 {
1459 struct cpu_info * const ci = curcpu();
1460 struct cpu_softc * const cpu = ci->ci_softc;
1461 struct e500_intr_irq_info ii;
1462
1463 kcpuset_zero(cpuset);
1464
1465 if (is->is_ipl != IPL_NONE && !IST_PERCPU_P(is->is_ist)) {
1466 if (e500_intr_irq_info_get(ci, is->is_irq, is->is_ipl,
1467 is->is_ist, &ii)) {
1468 uint32_t dr = openpic_read(cpu, ii.irq_dr);
1469 while (dr != 0) {
1470 u_int n = ffs(dr);
1471 if (n-- == 0)
1472 break;
1473 dr &= ~(1 << n);
1474 kcpuset_set(cpuset, n);
1475 }
1476 }
1477 }
1478 }
1479
1480 static int
1481 e500_intr_set_affinity(struct intr_source *is, const kcpuset_t *cpuset)
1482 {
1483 struct cpu_info * const ci = curcpu();
1484 struct cpu_softc * const cpu = ci->ci_softc;
1485 struct e500_intr_irq_info ii;
1486 uint32_t ecpuset, tcpuset;
1487
1488 KASSERT(mutex_owned(&cpu_lock));
1489 KASSERT(mutex_owned(&e500_intr_lock));
1490 KASSERT(!kcpuset_iszero(cpuset));
1491
1492 kcpuset_export_u32(cpuset, &ecpuset, sizeof(ecpuset));
1493 tcpuset = ecpuset;
1494 while (tcpuset != 0) {
1495 u_int cpu_idx = ffs(tcpuset);
1496 if (cpu_idx-- == 0)
1497 break;
1498
1499 tcpuset &= ~(1 << cpu_idx);
1500 struct cpu_info * const newci = cpu_lookup(cpu_idx);
1501 if (newci == NULL)
1502 return EINVAL;
1503 if ((newci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0)
1504 return EINVAL;
1505 }
1506
1507 if (!e500_intr_irq_info_get(ci, is->is_irq, is->is_ipl, is->is_ist,
1508 &ii))
1509 return ENXIO;
1510
1511 /*
1512 * Update the vector/priority and destination registers keeping the
1513 * interrupt masked.
1514 */
1515 const register_t msr = wrtee(0); /* disable interrupts */
1516
1517 uint32_t vpr = openpic_read(cpu, ii.irq_vpr);
1518 openpic_write(cpu, ii.irq_vpr, vpr | VPR_MSK);
1519
1520 /*
1521 * Wait for the Activity (A) bit for the source to be cleared.
1522 */
1523 while (openpic_read(cpu, ii.irq_vpr) & VPR_A)
1524 continue;
1525
1526 /*
1527 * Update destination register
1528 */
1529 openpic_write(cpu, ii.irq_dr, ecpuset);
1530
1531 /*
1532 * Now unmask the interrupt.
1533 */
1534 openpic_write(cpu, ii.irq_vpr, vpr);
1535
1536 wrtee(msr); /* re-enable interrupts */
1537
1538 return 0;
1539 }
1540
1541 static bool
1542 e500_intr_is_affinity_intrsource(struct intr_source *is,
1543 const kcpuset_t *cpuset)
1544 {
1545 struct cpu_info * const ci = curcpu();
1546 struct cpu_softc * const cpu = ci->ci_softc;
1547 struct e500_intr_irq_info ii;
1548 bool result = false;
1549
1550 if (is->is_ipl != IPL_NONE && !IST_PERCPU_P(is->is_ist)) {
1551 if (e500_intr_irq_info_get(ci, is->is_irq, is->is_ipl,
1552 is->is_ist, &ii)) {
1553 uint32_t dr = openpic_read(cpu, ii.irq_dr);
1554 while (dr != 0 && !result) {
1555 u_int n = ffs(dr);
1556 if (n-- == 0)
1557 break;
1558 dr &= ~(1 << n);
1559 result = kcpuset_isset(cpuset, n);
1560 }
1561 }
1562 }
1563 return result;
1564 }
1565
1566 static struct intr_source *
1567 e500_intr_get_source(const char *intrid)
1568 {
1569 struct intr_source *is;
1570
1571 mutex_enter(&e500_intr_lock);
1572 for (is = e500_intr_sources; is < e500_intr_last_source; ++is) {
1573 if (is->is_source[0] == '\0')
1574 continue;
1575
1576 if (!strncmp(intrid, is->is_source, sizeof(is->is_source) - 1))
1577 break;
1578 }
1579 if (is == e500_intr_last_source)
1580 is = NULL;
1581 mutex_exit(&e500_intr_lock);
1582 return is;
1583 }
1584
1585 uint64_t
1586 interrupt_get_count(const char *intrid, u_int cpu_idx)
1587 {
1588 struct cpu_info * const ci = cpu_lookup(cpu_idx);
1589 struct cpu_softc * const cpu = ci->ci_softc;
1590 struct intr_source *is;
1591 struct e500_intr_irq_info ii;
1592
1593 is = e500_intr_get_source(intrid);
1594 if (is == NULL)
1595 return 0;
1596
1597 if (e500_intr_irq_info_get(ci, is->is_irq, is->is_ipl, is->is_ist, &ii))
1598 return cpu->cpu_evcnt_intrs[ii.irq_vector].ev_count;
1599 return 0;
1600 }
1601
1602 void
1603 interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset)
1604 {
1605 struct intr_source *is;
1606
1607 kcpuset_zero(cpuset);
1608
1609 is = e500_intr_get_source(intrid);
1610 if (is == NULL)
1611 return;
1612
1613 mutex_enter(&e500_intr_lock);
1614 e500_intr_get_affinity(is, cpuset);
1615 mutex_exit(&e500_intr_lock);
1616 }
1617
1618 void
1619 interrupt_get_available(kcpuset_t *cpuset)
1620 {
1621 CPU_INFO_ITERATOR cii;
1622 struct cpu_info *ci;
1623
1624 kcpuset_zero(cpuset);
1625
1626 mutex_enter(&cpu_lock);
1627 for (CPU_INFO_FOREACH(cii, ci)) {
1628 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0)
1629 kcpuset_set(cpuset, cpu_index(ci));
1630 }
1631 mutex_exit(&cpu_lock);
1632 }
1633
1634 void
1635 interrupt_get_devname(const char *intrid, char *buf, size_t len)
1636 {
1637 struct intr_source *is;
1638
1639 if (len == 0)
1640 return;
1641
1642 buf[0] = '\0';
1643
1644 is = e500_intr_get_source(intrid);
1645 if (is != NULL)
1646 strlcpy(buf, is->is_xname, len);
1647 }
1648
1649 struct intrids_handler *
1650 interrupt_construct_intrids(const kcpuset_t *cpuset)
1651 {
1652 struct intr_source *is;
1653 struct intrids_handler *ii_handler;
1654 intrid_t *ids;
1655 int i, n;
1656
1657 if (kcpuset_iszero(cpuset))
1658 return NULL;
1659
1660 n = 0;
1661 mutex_enter(&e500_intr_lock);
1662 for (is = e500_intr_sources; is < e500_intr_last_source; ++is) {
1663 if (e500_intr_is_affinity_intrsource(is, cpuset))
1664 ++n;
1665 }
1666 mutex_exit(&e500_intr_lock);
1667
1668 const size_t alloc_size = sizeof(int) + sizeof(intrid_t) * n;
1669 ii_handler = kmem_zalloc(alloc_size, KM_SLEEP);
1670 ii_handler->iih_nids = n;
1671 if (n == 0)
1672 return ii_handler;
1673
1674 ids = ii_handler->iih_intrids;
1675 mutex_enter(&e500_intr_lock);
1676 for (i = 0, is = e500_intr_sources;
1677 i < n && is < e500_intr_last_source;
1678 ++is) {
1679 if (!e500_intr_is_affinity_intrsource(is, cpuset))
1680 continue;
1681
1682 if (is->is_source[0] != '\0') {
1683 strlcpy(ids[i], is->is_source, sizeof(ids[0]));
1684 ++i;
1685 }
1686 }
1687 mutex_exit(&e500_intr_lock);
1688
1689 return ii_handler;
1690 }
1691
1692 void
1693 interrupt_destruct_intrids(struct intrids_handler *ii_handler)
1694 {
1695 size_t iih_size;
1696
1697 if (ii_handler == NULL)
1698 return;
1699
1700 iih_size = sizeof(int) + sizeof(intrid_t) * ii_handler->iih_nids;
1701 kmem_free(ii_handler, iih_size);
1702 }
1703
1704 static int
1705 interrupt_distribute_locked(struct intr_source *is, const kcpuset_t *newset,
1706 kcpuset_t *oldset)
1707 {
1708 int error;
1709
1710 KASSERT(mutex_owned(&cpu_lock));
1711
1712 if (is->is_ipl == IPL_NONE || IST_PERCPU_P(is->is_ist))
1713 return EINVAL;
1714
1715 mutex_enter(&e500_intr_lock);
1716 if (oldset != NULL)
1717 e500_intr_get_affinity(is, oldset);
1718 error = e500_intr_set_affinity(is, newset);
1719 mutex_exit(&e500_intr_lock);
1720
1721 return error;
1722 }
1723
1724 int
1725 interrupt_distribute(void *ich, const kcpuset_t *newset, kcpuset_t *oldset)
1726 {
1727 int error;
1728
1729 mutex_enter(&cpu_lock);
1730 error = interrupt_distribute_locked(ich, newset, oldset);
1731 mutex_exit(&cpu_lock);
1732
1733 return error;
1734 }
1735
1736 int
1737 interrupt_distribute_handler(const char *intrid, const kcpuset_t *newset,
1738 kcpuset_t *oldset)
1739 {
1740 struct intr_source *is;
1741 int error;
1742
1743 is = e500_intr_get_source(intrid);
1744 if (is != NULL) {
1745 mutex_enter(&cpu_lock);
1746 error = interrupt_distribute_locked(is, newset, oldset);
1747 mutex_exit(&cpu_lock);
1748 } else
1749 error = ENOENT;
1750
1751 return error;
1752 }
1753