e500_intr.c revision 1.39 1 /* $NetBSD: e500_intr.c,v 1.39 2019/11/23 19:40:36 ad Exp $ */
2 /*-
3 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
8 * Agency and which was developed by Matt Thomas of 3am Software Foundry.
9 *
10 * This material is based upon work supported by the Defense Advanced Research
11 * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
12 * Contract No. N66001-09-C-2073.
13 * Approved for Public Release, Distribution Unlimited
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include "opt_mpc85xx.h"
38 #include "opt_multiprocessor.h"
39 #include "opt_ddb.h"
40
41 #define __INTR_PRIVATE
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: e500_intr.c,v 1.39 2019/11/23 19:40:36 ad Exp $");
45
46 #include <sys/param.h>
47 #include <sys/proc.h>
48 #include <sys/intr.h>
49 #include <sys/cpu.h>
50 #include <sys/kmem.h>
51 #include <sys/atomic.h>
52 #include <sys/bus.h>
53 #include <sys/xcall.h>
54 #include <sys/ipi.h>
55 #include <sys/bitops.h>
56 #include <sys/interrupt.h>
57
58 #include <uvm/uvm_extern.h>
59
60 #ifdef __HAVE_FAST_SOFTINTS
61 #include <powerpc/softint.h>
62 #endif
63
64 #include <powerpc/spr.h>
65 #include <powerpc/booke/spr.h>
66
67 #include <powerpc/booke/cpuvar.h>
68 #include <powerpc/booke/e500reg.h>
69 #include <powerpc/booke/e500var.h>
70 #include <powerpc/booke/openpicreg.h>
71
72 #define IPL2CTPR(ipl) ((ipl) + 15 - IPL_HIGH)
73 #define CTPR2IPL(ctpr) ((ctpr) - (15 - IPL_HIGH))
74
75 #define IST_PERCPU_P(ist) ((ist) >= IST_TIMER)
76
77 struct e500_intr_irq_info {
78 bus_addr_t irq_vpr;
79 bus_addr_t irq_dr;
80 u_int irq_vector;
81 };
82
83 struct intr_source {
84 int (*is_func)(void *);
85 void *is_arg;
86 int8_t is_ipl;
87 uint8_t is_ist;
88 uint8_t is_irq;
89 uint8_t is_refcnt;
90 bus_size_t is_vpr;
91 bus_size_t is_dr;
92 char is_source[INTRIDBUF];
93 char is_xname[INTRDEVNAMEBUF];
94 };
95
96 #define INTR_SOURCE_INITIALIZER \
97 { .is_func = e500_intr_spurious, .is_arg = NULL, \
98 .is_irq = -1, .is_ipl = IPL_NONE, .is_ist = IST_NONE, \
99 .is_source = "", .is_xname = "", }
100
101 struct e500_intr_name {
102 uint8_t in_irq;
103 const char in_name[15];
104 };
105
106 static const struct e500_intr_name e500_onchip_intr_names[] = {
107 { ISOURCE_L2, "l2" },
108 { ISOURCE_ECM, "ecm" },
109 { ISOURCE_DDR, "ddr" },
110 { ISOURCE_LBC, "lbc" },
111 { ISOURCE_DMA_CHAN1, "dma-chan1" },
112 { ISOURCE_DMA_CHAN2, "dma-chan2" },
113 { ISOURCE_DMA_CHAN3, "dma-chan3" },
114 { ISOURCE_DMA_CHAN4, "dma-chan4" },
115 { ISOURCE_PCI1, "pci1" },
116 { ISOURCE_PCIEX2, "pcie2" },
117 { ISOURCE_PCIEX , "pcie1" },
118 { ISOURCE_PCIEX3, "pcie3" },
119 { ISOURCE_USB1, "usb1" },
120 { ISOURCE_ETSEC1_TX, "etsec1-tx" },
121 { ISOURCE_ETSEC1_RX, "etsec1-rx" },
122 { ISOURCE_ETSEC3_TX, "etsec3-tx" },
123 { ISOURCE_ETSEC3_RX, "etsec3-rx" },
124 { ISOURCE_ETSEC3_ERR, "etsec3-err" },
125 { ISOURCE_ETSEC1_ERR, "etsec1-err" },
126 { ISOURCE_ETSEC2_TX, "etsec2-tx" },
127 { ISOURCE_ETSEC2_RX, "etsec2-rx" },
128 { ISOURCE_ETSEC4_TX, "etsec4-tx" },
129 { ISOURCE_ETSEC4_RX, "etsec4-rx" },
130 { ISOURCE_ETSEC4_ERR, "etsec4-err" },
131 { ISOURCE_ETSEC2_ERR, "etsec2-err" },
132 { ISOURCE_DUART, "duart" },
133 { ISOURCE_I2C, "i2c" },
134 { ISOURCE_PERFMON, "perfmon" },
135 { ISOURCE_SECURITY1, "sec1" },
136 { ISOURCE_GPIO, "gpio" },
137 { ISOURCE_SRIO_EWPU, "srio-ewpu" },
138 { ISOURCE_SRIO_ODBELL, "srio-odbell" },
139 { ISOURCE_SRIO_IDBELL, "srio-idbell" },
140 { ISOURCE_SRIO_OMU1, "srio-omu1" },
141 { ISOURCE_SRIO_IMU1, "srio-imu1" },
142 { ISOURCE_SRIO_OMU2, "srio-omu2" },
143 { ISOURCE_SRIO_IMU2, "srio-imu2" },
144 { ISOURCE_SECURITY2, "sec2" },
145 { ISOURCE_SPI, "spi" },
146 { ISOURCE_ETSEC1_PTP, "etsec1-ptp" },
147 { ISOURCE_ETSEC2_PTP, "etsec2-ptp" },
148 { ISOURCE_ETSEC3_PTP, "etsec3-ptp" },
149 { ISOURCE_ETSEC4_PTP, "etsec4-ptp" },
150 { ISOURCE_ESDHC, "esdhc" },
151 { 0, "" },
152 };
153
154 const struct e500_intr_name default_external_intr_names[] = {
155 { 0, "" },
156 };
157
158 static const struct e500_intr_name e500_msigroup_intr_names[] = {
159 { 0, "msigroup0" },
160 { 1, "msigroup1" },
161 { 2, "msigroup2" },
162 { 3, "msigroup3" },
163 { 4, "msigroup4" },
164 { 5, "msigroup5" },
165 { 6, "msigroup6" },
166 { 7, "msigroup7" },
167 { 0, "" },
168 };
169
170 static const struct e500_intr_name e500_timer_intr_names[] = {
171 { 0, "timer0" },
172 { 1, "timer1" },
173 { 2, "timer2" },
174 { 3, "timer3" },
175 { 0, "" },
176 };
177
178 static const struct e500_intr_name e500_ipi_intr_names[] = {
179 { 0, "ipi0" },
180 { 1, "ipi1" },
181 { 2, "ipi2" },
182 { 3, "ipi3" },
183 { 0, "" },
184 };
185
186 static const struct e500_intr_name e500_mi_intr_names[] = {
187 { 0, "mi0" },
188 { 1, "mi1" },
189 { 2, "mi2" },
190 { 3, "mi3" },
191 { 0, "" },
192 };
193
194 struct e500_intr_info {
195 u_int ii_external_sources;
196 uint32_t ii_onchip_bitmap[2];
197 u_int ii_onchip_sources;
198 u_int ii_msigroup_sources;
199 u_int ii_ipi_sources; /* per-cpu */
200 u_int ii_timer_sources; /* per-cpu */
201 u_int ii_mi_sources; /* per-cpu */
202 u_int ii_percpu_sources;
203 const struct e500_intr_name *ii_external_intr_names;
204 const struct e500_intr_name *ii_onchip_intr_names;
205 u_int8_t ii_ist_vectors[IST_MAX+1];
206 };
207
208 static kmutex_t e500_intr_lock __cacheline_aligned;
209 static struct e500_intr_info e500_intr_info;
210
211 #define INTR_INFO_DECL(lc_chip, UC_CHIP) \
212 static const struct e500_intr_info lc_chip##_intr_info = { \
213 .ii_external_sources = UC_CHIP ## _EXTERNALSOURCES, \
214 .ii_onchip_bitmap = UC_CHIP ## _ONCHIPBITMAP, \
215 .ii_onchip_sources = UC_CHIP ## _ONCHIPSOURCES, \
216 .ii_msigroup_sources = UC_CHIP ## _MSIGROUPSOURCES, \
217 .ii_timer_sources = UC_CHIP ## _TIMERSOURCES, \
218 .ii_ipi_sources = UC_CHIP ## _IPISOURCES, \
219 .ii_mi_sources = UC_CHIP ## _MISOURCES, \
220 .ii_percpu_sources = UC_CHIP ## _TIMERSOURCES \
221 + UC_CHIP ## _IPISOURCES + UC_CHIP ## _MISOURCES, \
222 .ii_external_intr_names = lc_chip ## _external_intr_names, \
223 .ii_onchip_intr_names = lc_chip ## _onchip_intr_names, \
224 .ii_ist_vectors = { \
225 [IST_NONE] = ~0, \
226 [IST_EDGE] = 0, \
227 [IST_LEVEL_LOW] = 0, \
228 [IST_LEVEL_HIGH] = 0, \
229 [IST_PULSE] = 0, \
230 [IST_ONCHIP] = UC_CHIP ## _EXTERNALSOURCES, \
231 [IST_MSIGROUP] = UC_CHIP ## _EXTERNALSOURCES \
232 + UC_CHIP ## _ONCHIPSOURCES, \
233 [IST_TIMER] = UC_CHIP ## _EXTERNALSOURCES \
234 + UC_CHIP ## _ONCHIPSOURCES \
235 + UC_CHIP ## _MSIGROUPSOURCES, \
236 [IST_IPI] = UC_CHIP ## _EXTERNALSOURCES \
237 + UC_CHIP ## _ONCHIPSOURCES \
238 + UC_CHIP ## _MSIGROUPSOURCES \
239 + UC_CHIP ## _TIMERSOURCES, \
240 [IST_MI] = UC_CHIP ## _EXTERNALSOURCES \
241 + UC_CHIP ## _ONCHIPSOURCES \
242 + UC_CHIP ## _MSIGROUPSOURCES \
243 + UC_CHIP ## _TIMERSOURCES \
244 + UC_CHIP ## _IPISOURCES, \
245 [IST_MAX] = UC_CHIP ## _EXTERNALSOURCES \
246 + UC_CHIP ## _ONCHIPSOURCES \
247 + UC_CHIP ## _MSIGROUPSOURCES \
248 + UC_CHIP ## _TIMERSOURCES \
249 + UC_CHIP ## _IPISOURCES \
250 + UC_CHIP ## _MISOURCES, \
251 }, \
252 }
253
254 #ifdef MPC8536
255 #define mpc8536_external_intr_names default_external_intr_names
256 const struct e500_intr_name mpc8536_onchip_intr_names[] = {
257 { ISOURCE_SATA2, "sata2" },
258 { ISOURCE_USB2, "usb2" },
259 { ISOURCE_USB3, "usb3" },
260 { ISOURCE_SATA1, "sata1" },
261 { 0, "" },
262 };
263
264 INTR_INFO_DECL(mpc8536, MPC8536);
265 #endif
266
267 #ifdef MPC8544
268 #define mpc8544_external_intr_names default_external_intr_names
269 const struct e500_intr_name mpc8544_onchip_intr_names[] = {
270 { 0, "" },
271 };
272
273 INTR_INFO_DECL(mpc8544, MPC8544);
274 #endif
275 #ifdef MPC8548
276 #define mpc8548_external_intr_names default_external_intr_names
277 const struct e500_intr_name mpc8548_onchip_intr_names[] = {
278 { ISOURCE_PCI1, "pci1" },
279 { ISOURCE_PCI2, "pci2" },
280 { 0, "" },
281 };
282
283 INTR_INFO_DECL(mpc8548, MPC8548);
284 #endif
285 #ifdef MPC8555
286 #define mpc8555_external_intr_names default_external_intr_names
287 const struct e500_intr_name mpc8555_onchip_intr_names[] = {
288 { ISOURCE_PCI2, "pci2" },
289 { ISOURCE_CPM, "CPM" },
290 { 0, "" },
291 };
292
293 INTR_INFO_DECL(mpc8555, MPC8555);
294 #endif
295 #ifdef MPC8568
296 #define mpc8568_external_intr_names default_external_intr_names
297 const struct e500_intr_name mpc8568_onchip_intr_names[] = {
298 { ISOURCE_QEB_LOW, "QEB low" },
299 { ISOURCE_QEB_PORT, "QEB port" },
300 { ISOURCE_QEB_IECC, "QEB iram ecc" },
301 { ISOURCE_QEB_MUECC, "QEB ram ecc" },
302 { ISOURCE_TLU1, "tlu1" },
303 { ISOURCE_QEB_HIGH, "QEB high" },
304 { 0, "" },
305 };
306
307 INTR_INFO_DECL(mpc8568, MPC8568);
308 #endif
309 #ifdef MPC8572
310 #define mpc8572_external_intr_names default_external_intr_names
311 const struct e500_intr_name mpc8572_onchip_intr_names[] = {
312 { ISOURCE_PCIEX3_MPC8572, "pcie3" },
313 { ISOURCE_FEC, "fec" },
314 { ISOURCE_PME_GENERAL, "pme" },
315 { ISOURCE_TLU1, "tlu1" },
316 { ISOURCE_TLU2, "tlu2" },
317 { ISOURCE_PME_CHAN1, "pme-chan1" },
318 { ISOURCE_PME_CHAN2, "pme-chan2" },
319 { ISOURCE_PME_CHAN3, "pme-chan3" },
320 { ISOURCE_PME_CHAN4, "pme-chan4" },
321 { ISOURCE_DMA2_CHAN1, "dma2-chan1" },
322 { ISOURCE_DMA2_CHAN2, "dma2-chan2" },
323 { ISOURCE_DMA2_CHAN3, "dma2-chan3" },
324 { ISOURCE_DMA2_CHAN4, "dma2-chan4" },
325 { 0, "" },
326 };
327
328 INTR_INFO_DECL(mpc8572, MPC8572);
329 #endif
330
331 #ifdef P1025
332 #define p1025_external_intr_names default_external_intr_names
333 const struct e500_intr_name p1025_onchip_intr_names[] = {
334 { ISOURCE_PCIEX3_MPC8572, "pcie3" },
335 { ISOURCE_ETSEC1_G1_TX, "etsec1-g1-tx" },
336 { ISOURCE_ETSEC1_G1_RX, "etsec1-g1-rx" },
337 { ISOURCE_ETSEC1_G1_ERR, "etsec1-g1-error" },
338 { ISOURCE_ETSEC2_G1_TX, "etsec2-g1-tx" },
339 { ISOURCE_ETSEC2_G1_RX, "etsec2-g1-rx" },
340 { ISOURCE_ETSEC2_G1_ERR, "etsec2-g1-error" },
341 { ISOURCE_ETSEC3_G1_TX, "etsec3-g1-tx" },
342 { ISOURCE_ETSEC3_G1_RX, "etsec3-g1-rx" },
343 { ISOURCE_ETSEC3_G1_ERR, "etsec3-g1-error" },
344 { ISOURCE_QEB_MUECC, "qeb-low" },
345 { ISOURCE_QEB_HIGH, "qeb-crit" },
346 { ISOURCE_DMA2_CHAN1, "dma2-chan1" },
347 { ISOURCE_DMA2_CHAN2, "dma2-chan2" },
348 { ISOURCE_DMA2_CHAN3, "dma2-chan3" },
349 { ISOURCE_DMA2_CHAN4, "dma2-chan4" },
350 { 0, "" },
351 };
352
353 INTR_INFO_DECL(p1025, P1025);
354 #endif
355
356 #ifdef P2020
357 #define p20x0_external_intr_names default_external_intr_names
358 const struct e500_intr_name p20x0_onchip_intr_names[] = {
359 { ISOURCE_PCIEX3_MPC8572, "pcie3" },
360 { ISOURCE_DMA2_CHAN1, "dma2-chan1" },
361 { ISOURCE_DMA2_CHAN2, "dma2-chan2" },
362 { ISOURCE_DMA2_CHAN3, "dma2-chan3" },
363 { ISOURCE_DMA2_CHAN4, "dma2-chan4" },
364 { 0, "" },
365 };
366
367 INTR_INFO_DECL(p20x0, P20x0);
368 #endif
369
370 #ifdef P1023
371 #define p1023_external_intr_names default_external_intr_names
372 const struct e500_intr_name p1023_onchip_intr_names[] = {
373 { ISOURCE_FMAN, "fman" },
374 { ISOURCE_MDIO, "mdio" },
375 { ISOURCE_QMAN0, "qman0" },
376 { ISOURCE_BMAN0, "bman0" },
377 { ISOURCE_QMAN1, "qman1" },
378 { ISOURCE_BMAN1, "bman1" },
379 { ISOURCE_QMAN2, "qman2" },
380 { ISOURCE_BMAN2, "bman2" },
381 { ISOURCE_SECURITY2_P1023, "sec2" },
382 { ISOURCE_SEC_GENERAL, "sec-general" },
383 { ISOURCE_DMA2_CHAN1, "dma2-chan1" },
384 { ISOURCE_DMA2_CHAN2, "dma2-chan2" },
385 { ISOURCE_DMA2_CHAN3, "dma2-chan3" },
386 { ISOURCE_DMA2_CHAN4, "dma2-chan4" },
387 { 0, "" },
388 };
389
390 INTR_INFO_DECL(p1023, P1023);
391 #endif
392
393 static const char ist_names[][12] = {
394 [IST_NONE] = "none",
395 [IST_EDGE] = "edge",
396 [IST_LEVEL_LOW] = "level-",
397 [IST_LEVEL_HIGH] = "level+",
398 [IST_PULSE] = "pulse",
399 [IST_MSI] = "msi",
400 [IST_ONCHIP] = "onchip",
401 [IST_MSIGROUP] = "msigroup",
402 [IST_TIMER] = "timer",
403 [IST_IPI] = "ipi",
404 [IST_MI] = "msgint",
405 };
406
407 static struct intr_source *e500_intr_sources;
408 static const struct intr_source *e500_intr_last_source;
409
410 static void *e500_intr_establish(int, int, int, int (*)(void *), void *,
411 const char *);
412 static void e500_intr_disestablish(void *);
413 static void e500_intr_cpu_attach(struct cpu_info *ci);
414 static void e500_intr_cpu_hatch(struct cpu_info *ci);
415 static void e500_intr_cpu_send_ipi(cpuid_t, uintptr_t);
416 static void e500_intr_init(void);
417 static void e500_intr_init_precpu(void);
418 static const char *e500_intr_string(int, int, char *, size_t);
419 static const char *e500_intr_typename(int);
420 static void e500_critintr(struct trapframe *tf);
421 static void e500_decrintr(struct trapframe *tf);
422 static void e500_extintr(struct trapframe *tf);
423 static void e500_fitintr(struct trapframe *tf);
424 static void e500_wdogintr(struct trapframe *tf);
425 static void e500_spl0(void);
426 static int e500_splraise(int);
427 static void e500_splx(int);
428 static const char *e500_intr_all_name_lookup(int, int);
429
430 const struct intrsw e500_intrsw = {
431 .intrsw_establish = e500_intr_establish,
432 .intrsw_disestablish = e500_intr_disestablish,
433 .intrsw_init = e500_intr_init,
434 .intrsw_cpu_attach = e500_intr_cpu_attach,
435 .intrsw_cpu_hatch = e500_intr_cpu_hatch,
436 .intrsw_cpu_send_ipi = e500_intr_cpu_send_ipi,
437 .intrsw_string = e500_intr_string,
438 .intrsw_typename = e500_intr_typename,
439
440 .intrsw_critintr = e500_critintr,
441 .intrsw_decrintr = e500_decrintr,
442 .intrsw_extintr = e500_extintr,
443 .intrsw_fitintr = e500_fitintr,
444 .intrsw_wdogintr = e500_wdogintr,
445
446 .intrsw_splraise = e500_splraise,
447 .intrsw_splx = e500_splx,
448 .intrsw_spl0 = e500_spl0,
449
450 #ifdef __HAVE_FAST_SOFTINTS
451 .intrsw_softint_init_md = powerpc_softint_init_md,
452 .intrsw_softint_trigger = powerpc_softint_trigger,
453 #endif
454 };
455
456 static bool wdog_barked;
457
458 static inline uint32_t
459 openpic_read(struct cpu_softc *cpu, bus_size_t offset)
460 {
461
462 return bus_space_read_4(cpu->cpu_bst, cpu->cpu_bsh,
463 OPENPIC_BASE + offset);
464 }
465
466 static inline void
467 openpic_write(struct cpu_softc *cpu, bus_size_t offset, uint32_t val)
468 {
469
470 return bus_space_write_4(cpu->cpu_bst, cpu->cpu_bsh,
471 OPENPIC_BASE + offset, val);
472 }
473
474 static const char *
475 e500_intr_external_name_lookup(int irq)
476 {
477 prop_array_t extirqs = board_info_get_object("external-irqs");
478 prop_string_t irqname = prop_array_get(extirqs, irq);
479 KASSERT(irqname != NULL);
480 KASSERT(prop_object_type(irqname) == PROP_TYPE_STRING);
481
482 return prop_string_cstring_nocopy(irqname);
483 }
484
485 static const char *
486 e500_intr_name_lookup(const struct e500_intr_name *names, int irq)
487 {
488 for (; names->in_name[0] != '\0'; names++) {
489 if (names->in_irq == irq)
490 return names->in_name;
491 }
492
493 return NULL;
494 }
495
496 static const char *
497 e500_intr_onchip_name_lookup(int irq)
498 {
499 const char *name;
500
501 name = e500_intr_name_lookup(e500_intr_info.ii_onchip_intr_names, irq);
502 if (name == NULL)
503 name = e500_intr_name_lookup(e500_onchip_intr_names, irq);
504
505 return name;
506 }
507
508 static inline void
509 e500_splset(struct cpu_info *ci, int ipl)
510 {
511 struct cpu_softc * const cpu = ci->ci_softc;
512
513 KASSERT((curlwp->l_pflag & LP_INTR) == 0 || ipl != IPL_NONE);
514 const u_int ctpr = IPL2CTPR(ipl);
515 KASSERT(openpic_read(cpu, OPENPIC_CTPR) == IPL2CTPR(ci->ci_cpl));
516 openpic_write(cpu, OPENPIC_CTPR, ctpr);
517 KASSERT(openpic_read(cpu, OPENPIC_CTPR) == ctpr);
518 #ifdef DIAGNOSTIC
519 cpu->cpu_spl_tb[ipl][ci->ci_cpl] = mftb();
520 #endif
521 ci->ci_cpl = ipl;
522 }
523
524 static void
525 e500_spl0(void)
526 {
527 wrtee(0);
528
529 struct cpu_info * const ci = curcpu();
530
531 #ifdef __HAVE_FAST_SOFTINTS
532 if (__predict_false(ci->ci_data.cpu_softints != 0)) {
533 e500_splset(ci, IPL_HIGH);
534 wrtee(PSL_EE);
535 powerpc_softint(ci, IPL_NONE,
536 (vaddr_t)__builtin_return_address(0));
537 wrtee(0);
538 }
539 #endif /* __HAVE_FAST_SOFTINTS */
540 e500_splset(ci, IPL_NONE);
541
542 wrtee(PSL_EE);
543 }
544
545 static void
546 e500_splx(int ipl)
547 {
548 struct cpu_info * const ci = curcpu();
549 const int old_ipl = ci->ci_cpl;
550
551 /* if we paniced because of watchdog, PSL_CE will be clear. */
552 KASSERT(wdog_barked || (mfmsr() & PSL_CE));
553
554 if (ipl == old_ipl)
555 return;
556
557 if (__predict_false(ipl > old_ipl)) {
558 printf("%s: %p: cpl=%u: ignoring splx(%u) to raise ipl\n",
559 __func__, __builtin_return_address(0), old_ipl, ipl);
560 if (old_ipl == IPL_NONE)
561 Debugger();
562 }
563
564 // const
565 register_t msr = wrtee(0);
566 #ifdef __HAVE_FAST_SOFTINTS
567 const u_int softints = ci->ci_data.cpu_softints & (IPL_SOFTMASK << ipl);
568 if (__predict_false(softints != 0)) {
569 e500_splset(ci, IPL_HIGH);
570 wrtee(msr);
571 powerpc_softint(ci, ipl,
572 (vaddr_t)__builtin_return_address(0));
573 wrtee(0);
574 }
575 #endif /* __HAVE_FAST_SOFTINTS */
576 e500_splset(ci, ipl);
577 #if 1
578 if (ipl < IPL_VM && old_ipl >= IPL_VM)
579 msr = PSL_EE;
580 #endif
581 wrtee(msr);
582 }
583
584 static int
585 e500_splraise(int ipl)
586 {
587 struct cpu_info * const ci = curcpu();
588 const int old_ipl = ci->ci_cpl;
589
590 /* if we paniced because of watchdog, PSL_CE will be clear. */
591 KASSERT(wdog_barked || (mfmsr() & PSL_CE));
592
593 if (old_ipl < ipl) {
594 //const
595 register_t msr = wrtee(0);
596 e500_splset(ci, ipl);
597 #if 0
598 if (old_ipl < IPL_VM && ipl >= IPL_VM)
599 msr = 0;
600 #endif
601 wrtee(msr);
602 } else if (ipl == IPL_NONE) {
603 panic("%s: %p: cpl=%u: attempt to splraise(IPL_NONE)",
604 __func__, __builtin_return_address(0), old_ipl);
605 #if 0
606 } else if (old_ipl > ipl) {
607 printf("%s: %p: cpl=%u: ignoring splraise(%u) to lower ipl\n",
608 __func__, __builtin_return_address(0), old_ipl, ipl);
609 #endif
610 }
611
612 return old_ipl;
613 }
614
615 static int
616 e500_intr_spurious(void *arg)
617 {
618 return 0;
619 }
620
621 static bool
622 e500_intr_irq_info_get(struct cpu_info *ci, u_int irq, int ipl, int ist,
623 struct e500_intr_irq_info *ii)
624 {
625 const struct e500_intr_info * const info = &e500_intr_info;
626 bool ok;
627
628 #if DEBUG > 2
629 printf("%s(%p,irq=%u,ipl=%u,ist=%u,%p)\n", __func__, ci, irq, ipl, ist, ii);
630 #endif
631
632 if (ipl < IPL_VM || ipl > IPL_HIGH) {
633 #if DEBUG > 2
634 printf("%s:%d ipl=%u\n", __func__, __LINE__, ipl);
635 #endif
636 return false;
637 }
638
639 if (ist <= IST_NONE || ist >= IST_MAX) {
640 #if DEBUG > 2
641 printf("%s:%d ist=%u\n", __func__, __LINE__, ist);
642 #endif
643 return false;
644 }
645
646 ii->irq_vector = irq + info->ii_ist_vectors[ist];
647 if (IST_PERCPU_P(ist) && ist != IST_IPI)
648 ii->irq_vector += ci->ci_cpuid * info->ii_percpu_sources;
649
650 switch (ist) {
651 default:
652 ii->irq_vpr = OPENPIC_EIVPR(irq);
653 ii->irq_dr = OPENPIC_EIDR(irq);
654 ok = irq < info->ii_external_sources
655 && (ist == IST_EDGE
656 || ist == IST_LEVEL_LOW
657 || ist == IST_LEVEL_HIGH);
658 break;
659 case IST_PULSE:
660 ok = false;
661 break;
662 case IST_ONCHIP:
663 ii->irq_vpr = OPENPIC_IIVPR(irq);
664 ii->irq_dr = OPENPIC_IIDR(irq);
665 ok = irq < 32 * __arraycount(info->ii_onchip_bitmap);
666 #if DEBUG > 2
667 printf("%s: irq=%u: ok=%u\n", __func__, irq, ok);
668 #endif
669 ok = ok && (info->ii_onchip_bitmap[irq/32] & (1 << (irq & 31)));
670 #if DEBUG > 2
671 printf("%s: %08x%08x -> %08x%08x: ok=%u\n", __func__,
672 irq < 32 ? 0 : (1 << irq), irq < 32 ? (1 << irq) : 0,
673 info->ii_onchip_bitmap[1], info->ii_onchip_bitmap[0],
674 ok);
675 #endif
676 break;
677 case IST_MSIGROUP:
678 ii->irq_vpr = OPENPIC_MSIVPR(irq);
679 ii->irq_dr = OPENPIC_MSIDR(irq);
680 ok = irq < info->ii_msigroup_sources
681 && ipl == IPL_VM;
682 break;
683 case IST_TIMER:
684 ii->irq_vpr = OPENPIC_GTVPR(ci->ci_cpuid, irq);
685 ii->irq_dr = OPENPIC_GTDR(ci->ci_cpuid, irq);
686 ok = irq < info->ii_timer_sources;
687 #if DEBUG > 2
688 printf("%s: IST_TIMER irq=%u: ok=%u\n", __func__, irq, ok);
689 #endif
690 break;
691 case IST_IPI:
692 ii->irq_vpr = OPENPIC_IPIVPR(irq);
693 ii->irq_dr = OPENPIC_IPIDR(irq);
694 ok = irq < info->ii_ipi_sources;
695 break;
696 case IST_MI:
697 ii->irq_vpr = OPENPIC_MIVPR(irq);
698 ii->irq_dr = OPENPIC_MIDR(irq);
699 ok = irq < info->ii_mi_sources;
700 break;
701 }
702
703 return ok;
704 }
705
706 static const char *
707 e500_intr_string(int irq, int ist, char *buf, size_t len)
708 {
709 struct cpu_info * const ci = curcpu();
710 struct cpu_softc * const cpu = ci->ci_softc;
711 struct e500_intr_irq_info ii;
712
713 if (!e500_intr_irq_info_get(ci, irq, IPL_VM, ist, &ii))
714 return NULL;
715
716 strlcpy(buf, cpu->cpu_evcnt_intrs[ii.irq_vector].ev_name, len);
717 return buf;
718 }
719
720 __CTASSERT(__arraycount(ist_names) == IST_MAX);
721
722 static const char *
723 e500_intr_typename(int ist)
724 {
725 if (IST_NONE <= ist && ist < IST_MAX)
726 return ist_names[ist];
727
728 return NULL;
729 }
730
731 static void *
732 e500_intr_cpu_establish(struct cpu_info *ci, int irq, int ipl, int ist,
733 int (*handler)(void *), void *arg, const char *xname)
734 {
735 struct cpu_softc * const cpu = ci->ci_softc;
736 struct e500_intr_irq_info ii;
737
738 KASSERT(ipl >= IPL_VM && ipl <= IPL_HIGH);
739 KASSERT(ist > IST_NONE && ist < IST_MAX && ist != IST_MSI);
740
741 if (!e500_intr_irq_info_get(ci, irq, ipl, ist, &ii)) {
742 printf("%s: e500_intr_irq_info_get(%p,%u,%u,%u,%p) failed\n",
743 __func__, ci, irq, ipl, ist, &ii);
744 return NULL;
745 }
746
747 if (xname == NULL) {
748 xname = e500_intr_all_name_lookup(irq, ist);
749 if (xname == NULL)
750 xname = "unknown";
751 }
752
753 struct intr_source * const is = &e500_intr_sources[ii.irq_vector];
754 mutex_enter(&e500_intr_lock);
755 if (is->is_ipl != IPL_NONE) {
756 /* XXX IPI0 is shared by all CPU. */
757 if (is->is_ist != IST_IPI ||
758 is->is_irq != irq ||
759 is->is_ipl != ipl ||
760 is->is_ist != ist ||
761 is->is_func != handler ||
762 is->is_arg != arg) {
763 mutex_exit(&e500_intr_lock);
764 return NULL;
765 }
766 }
767
768 is->is_func = handler;
769 is->is_arg = arg;
770 is->is_ipl = ipl;
771 is->is_ist = ist;
772 is->is_irq = irq;
773 is->is_refcnt++;
774 is->is_vpr = ii.irq_vpr;
775 is->is_dr = ii.irq_dr;
776 switch (ist) {
777 case IST_EDGE:
778 case IST_LEVEL_LOW:
779 case IST_LEVEL_HIGH:
780 snprintf(is->is_source, sizeof(is->is_source), "extirq %d",
781 irq);
782 break;
783 case IST_ONCHIP:
784 snprintf(is->is_source, sizeof(is->is_source), "irq %d", irq);
785 break;
786 case IST_MSIGROUP:
787 snprintf(is->is_source, sizeof(is->is_source), "msigroup %d",
788 irq);
789 break;
790 case IST_TIMER:
791 snprintf(is->is_source, sizeof(is->is_source), "timer %d", irq);
792 break;
793 case IST_IPI:
794 snprintf(is->is_source, sizeof(is->is_source), "ipi %d", irq);
795 break;
796 case IST_MI:
797 snprintf(is->is_source, sizeof(is->is_source), "mi %d", irq);
798 break;
799 case IST_PULSE:
800 default:
801 panic("%s: invalid ist (%d)\n", __func__, ist);
802 }
803 strlcpy(is->is_xname, xname, sizeof(is->is_xname));
804
805 uint32_t vpr = VPR_PRIORITY_MAKE(IPL2CTPR(ipl))
806 | VPR_VECTOR_MAKE(((ii.irq_vector + 1) << 4) | ipl)
807 | (ist == IST_LEVEL_LOW
808 ? VPR_LEVEL_LOW
809 : (ist == IST_LEVEL_HIGH
810 ? VPR_LEVEL_HIGH
811 : (ist == IST_ONCHIP
812 ? VPR_P_HIGH
813 : 0)));
814
815 /*
816 * All interrupts go to the primary except per-cpu interrupts which get
817 * routed to the appropriate cpu.
818 */
819 uint32_t dr = openpic_read(cpu, ii.irq_dr);
820
821 dr |= 1 << (IST_PERCPU_P(ist) ? ci->ci_cpuid : 0);
822
823 /*
824 * Update the vector/priority and destination registers keeping the
825 * interrupt masked.
826 */
827 const register_t msr = wrtee(0); /* disable interrupts */
828 openpic_write(cpu, ii.irq_vpr, vpr | VPR_MSK);
829 openpic_write(cpu, ii.irq_dr, dr);
830
831 /*
832 * Now unmask the interrupt.
833 */
834 openpic_write(cpu, ii.irq_vpr, vpr);
835
836 wrtee(msr); /* re-enable interrupts */
837
838 mutex_exit(&e500_intr_lock);
839
840 return is;
841 }
842
843 static void *
844 e500_intr_establish(int irq, int ipl, int ist, int (*handler)(void *),
845 void *arg, const char *xname)
846 {
847 return e500_intr_cpu_establish(curcpu(), irq, ipl, ist, handler, arg,
848 xname);
849 }
850
851 static void
852 e500_intr_disestablish(void *vis)
853 {
854 struct cpu_softc * const cpu = curcpu()->ci_softc;
855 struct intr_source * const is = vis;
856 struct e500_intr_irq_info ii;
857
858 KASSERT(e500_intr_sources <= is);
859 KASSERT(is < e500_intr_last_source);
860 KASSERT(!cpu_intr_p());
861
862 bool ok = e500_intr_irq_info_get(curcpu(), is->is_irq, is->is_ipl,
863 is->is_ist, &ii);
864 (void)ok; /* appease gcc */
865 KASSERT(ok);
866 KASSERT(is - e500_intr_sources == ii.irq_vector);
867
868 mutex_enter(&e500_intr_lock);
869
870 if (is->is_refcnt-- > 1) {
871 mutex_exit(&e500_intr_lock);
872 return;
873 }
874
875 /*
876 * Mask the source using the mask (MSK) bit in the vector/priority reg.
877 */
878 uint32_t vpr = openpic_read(cpu, ii.irq_vpr);
879 openpic_write(cpu, ii.irq_vpr, VPR_MSK | vpr);
880
881 /*
882 * Wait for the Activity (A) bit for the source to be cleared.
883 */
884 while (openpic_read(cpu, ii.irq_vpr) & VPR_A)
885 ;
886
887 /*
888 * Now the source can be modified.
889 */
890 openpic_write(cpu, ii.irq_dr, 0); /* stop delivery */
891 openpic_write(cpu, ii.irq_vpr, VPR_MSK); /* mask/reset it */
892
893 *is = (struct intr_source)INTR_SOURCE_INITIALIZER;
894
895 mutex_exit(&e500_intr_lock);
896 }
897
898 static void
899 e500_critintr(struct trapframe *tf)
900 {
901 panic("%s: srr0/srr1=%#lx/%#lx", __func__, tf->tf_srr0, tf->tf_srr1);
902 }
903
904 static void
905 e500_decrintr(struct trapframe *tf)
906 {
907 panic("%s: srr0/srr1=%#lx/%#lx", __func__, tf->tf_srr0, tf->tf_srr1);
908 }
909
910 static void
911 e500_fitintr(struct trapframe *tf)
912 {
913 panic("%s: srr0/srr1=%#lx/%#lx", __func__, tf->tf_srr0, tf->tf_srr1);
914 }
915
916 static void
917 e500_wdogintr(struct trapframe *tf)
918 {
919 struct cpu_info * const ci = curcpu();
920 mtspr(SPR_TSR, TSR_ENW|TSR_WIS);
921 wdog_barked = true;
922 dump_splhist(ci, NULL);
923 dump_trapframe(tf, NULL);
924 panic("%s: tf=%p tb=%"PRId64" srr0/srr1=%#lx/%#lx"
925 " cpl=%d idepth=%d, mtxcount=%d",
926 __func__, tf, mftb(), tf->tf_srr0, tf->tf_srr1,
927 ci->ci_cpl, ci->ci_idepth, ci->ci_mtx_count);
928 }
929
930 static void
931 e500_extintr(struct trapframe *tf)
932 {
933 struct cpu_info * const ci = curcpu();
934 struct cpu_softc * const cpu = ci->ci_softc;
935 const int old_ipl = ci->ci_cpl;
936
937 /* if we paniced because of watchdog, PSL_CE will be clear. */
938 KASSERT(wdog_barked || (mfmsr() & PSL_CE));
939
940 #if 0
941 // printf("%s(%p): idepth=%d enter\n", __func__, tf, ci->ci_idepth);
942 if ((register_t)tf >= (register_t)curlwp->l_addr + USPACE
943 || (register_t)tf < (register_t)curlwp->l_addr + NBPG) {
944 printf("%s(entry): pid %d.%d (%s): srr0/srr1=%#lx/%#lx: invalid tf addr %p\n",
945 __func__, curlwp->l_proc->p_pid, curlwp->l_lid,
946 curlwp->l_proc->p_comm, tf->tf_srr0, tf->tf_srr1, tf);
947 }
948 #endif
949
950
951 ci->ci_data.cpu_nintr++;
952 tf->tf_cf.cf_idepth = ci->ci_idepth++;
953 cpu->cpu_pcpls[ci->ci_idepth] = old_ipl;
954 #if 1
955 if (mfmsr() & PSL_EE)
956 panic("%s(%p): MSR[EE] is on (%#lx)!", __func__, tf, mfmsr());
957 if (old_ipl == IPL_HIGH
958 || IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR))
959 panic("%s(%p): old_ipl(%u) == IPL_HIGH(%u) "
960 "|| old_ipl + %u != OPENPIC_CTPR (%u)",
961 __func__, tf, old_ipl, IPL_HIGH,
962 15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR));
963 #else
964 if (old_ipl >= IPL_VM)
965 panic("%s(%p): old_ipl(%u) >= IPL_VM(%u) CTPR=%u",
966 __func__, tf, old_ipl, IPL_VM, openpic_read(cpu, OPENPIC_CTPR));
967 #endif
968
969 for (;;) {
970 /*
971 * Find out the pending interrupt.
972 */
973 KASSERTMSG((mfmsr() & PSL_EE) == 0,
974 "%s(%p): MSR[EE] left on (%#lx)!", __func__, tf, mfmsr());
975 if (IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR))
976 panic("%s(%p): %d: old_ipl(%u) + %u != OPENPIC_CTPR (%u)",
977 __func__, tf, __LINE__, old_ipl,
978 15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR));
979 const uint32_t iack = openpic_read(cpu, OPENPIC_IACK);
980 #ifdef DIAGNOSTIC
981 const int ipl = iack & 0xf;
982 #endif
983 const int irq = (iack >> 4) - 1;
984 #if 0
985 printf("%s: iack=%d ipl=%d irq=%d <%s>\n",
986 __func__, iack, ipl, irq,
987 (iack != IRQ_SPURIOUS ?
988 cpu->cpu_evcnt_intrs[irq].ev_name : "spurious"));
989 #endif
990 if (IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR))
991 panic("%s(%p): %d: old_ipl(%u) + %u != OPENPIC_CTPR (%u)",
992 __func__, tf, __LINE__, old_ipl,
993 15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR));
994 if (iack == IRQ_SPURIOUS)
995 break;
996
997 struct intr_source * const is = &e500_intr_sources[irq];
998 if (__predict_true(is < e500_intr_last_source)) {
999 /*
1000 * Timer interrupts get their argument overriden with
1001 * the pointer to the trapframe.
1002 */
1003 KASSERTMSG(is->is_ipl == ipl,
1004 "iack %#x: is %p: irq %d ipl %d != iack ipl %d",
1005 iack, is, irq, is->is_ipl, ipl);
1006 void *arg = (is->is_ist == IST_TIMER ? tf : is->is_arg);
1007 if (is->is_ipl <= old_ipl)
1008 panic("%s(%p): %s (%u): is->is_ipl (%u) <= old_ipl (%u)\n",
1009 __func__, tf,
1010 cpu->cpu_evcnt_intrs[irq].ev_name, irq,
1011 is->is_ipl, old_ipl);
1012 KASSERT(is->is_ipl > old_ipl);
1013 e500_splset(ci, is->is_ipl); /* change IPL */
1014 if (__predict_false(is->is_func == NULL)) {
1015 aprint_error_dev(ci->ci_dev,
1016 "interrupt from unestablished irq %d\n",
1017 irq);
1018 } else {
1019 int (*func)(void *) = is->is_func;
1020 wrtee(PSL_EE);
1021 int rv = (*func)(arg);
1022 wrtee(0);
1023 #if DEBUG > 2
1024 printf("%s: %s handler %p(%p) returned %d\n",
1025 __func__,
1026 cpu->cpu_evcnt_intrs[irq].ev_name,
1027 func, arg, rv);
1028 #endif
1029 if (rv == 0)
1030 cpu->cpu_evcnt_spurious_intr.ev_count++;
1031 }
1032 e500_splset(ci, old_ipl); /* restore IPL */
1033 cpu->cpu_evcnt_intrs[irq].ev_count++;
1034 } else {
1035 aprint_error_dev(ci->ci_dev,
1036 "interrupt from illegal irq %d\n", irq);
1037 cpu->cpu_evcnt_spurious_intr.ev_count++;
1038 }
1039 /*
1040 * If this is a nested interrupt, simply ack it and exit
1041 * because the loop we interrupted will complete looking
1042 * for interrupts.
1043 */
1044 KASSERTMSG((mfmsr() & PSL_EE) == 0,
1045 "%s(%p): MSR[EE] left on (%#lx)!", __func__, tf, mfmsr());
1046 if (IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR))
1047 panic("%s(%p): %d: old_ipl(%u) + %u != OPENPIC_CTPR (%u)",
1048 __func__, tf, __LINE__, old_ipl,
1049 15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR));
1050
1051 openpic_write(cpu, OPENPIC_EOI, 0);
1052 if (IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR))
1053 panic("%s(%p): %d: old_ipl(%u) + %u != OPENPIC_CTPR (%u)",
1054 __func__, tf, __LINE__, old_ipl,
1055 15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR));
1056 if (ci->ci_idepth > 0)
1057 break;
1058 }
1059
1060 ci->ci_idepth--;
1061
1062 #ifdef __HAVE_FAST_SOFTINTS
1063 /*
1064 * Before exiting, deal with any softints that need to be dealt with.
1065 */
1066 const u_int softints = ci->ci_data.cpu_softints & (IPL_SOFTMASK << old_ipl);
1067 if (__predict_false(softints != 0)) {
1068 KASSERT(old_ipl < IPL_VM);
1069 e500_splset(ci, IPL_HIGH); /* pop to high */
1070 wrtee(PSL_EE); /* reenable interrupts */
1071 powerpc_softint(ci, old_ipl, /* deal with them */
1072 tf->tf_srr0);
1073 wrtee(0); /* disable interrupts */
1074 e500_splset(ci, old_ipl); /* and drop back */
1075 }
1076 #endif /* __HAVE_FAST_SOFTINTS */
1077 KASSERT(ci->ci_cpl == old_ipl);
1078
1079 /*
1080 * If we interrupted while power-saving and we need to exit idle,
1081 * we need to clear PSL_POW so we won't go back into power-saving.
1082 */
1083 if (__predict_false(tf->tf_srr1 & PSL_POW) && ci->ci_want_resched)
1084 tf->tf_srr1 &= ~PSL_POW;
1085
1086 // printf("%s(%p): idepth=%d exit\n", __func__, tf, ci->ci_idepth);
1087 }
1088
1089 static void
1090 e500_intr_init(void)
1091 {
1092 struct cpu_info * const ci = curcpu();
1093 struct cpu_softc * const cpu = ci->ci_softc;
1094 const uint32_t frr = openpic_read(cpu, OPENPIC_FRR);
1095 const u_int nirq = FRR_NIRQ_GET(frr) + 1;
1096 // const u_int ncpu = FRR_NCPU_GET(frr) + 1;
1097 struct intr_source *is;
1098 struct e500_intr_info * const ii = &e500_intr_info;
1099
1100 const uint16_t svr = (mfspr(SPR_SVR) & ~0x80000) >> 16;
1101 switch (svr) {
1102 #ifdef MPC8536
1103 case SVR_MPC8536v1 >> 16:
1104 *ii = mpc8536_intr_info;
1105 break;
1106 #endif
1107 #ifdef MPC8544
1108 case SVR_MPC8544v1 >> 16:
1109 *ii = mpc8544_intr_info;
1110 break;
1111 #endif
1112 #ifdef MPC8548
1113 case SVR_MPC8543v1 >> 16:
1114 case SVR_MPC8548v1 >> 16:
1115 *ii = mpc8548_intr_info;
1116 break;
1117 #endif
1118 #ifdef MPC8555
1119 case SVR_MPC8541v1 >> 16:
1120 case SVR_MPC8555v1 >> 16:
1121 *ii = mpc8555_intr_info;
1122 break;
1123 #endif
1124 #ifdef MPC8568
1125 case SVR_MPC8568v1 >> 16:
1126 *ii = mpc8568_intr_info;
1127 break;
1128 #endif
1129 #ifdef MPC8572
1130 case SVR_MPC8572v1 >> 16:
1131 *ii = mpc8572_intr_info;
1132 break;
1133 #endif
1134 #ifdef P1023
1135 case SVR_P1017v1 >> 16:
1136 case SVR_P1023v1 >> 16:
1137 *ii = p1023_intr_info;
1138 break;
1139 #endif
1140 #ifdef P1025
1141 case SVR_P1016v1 >> 16:
1142 case SVR_P1025v1 >> 16:
1143 *ii = p1025_intr_info;
1144 break;
1145 #endif
1146 #ifdef P2020
1147 case SVR_P2010v2 >> 16:
1148 case SVR_P2020v2 >> 16:
1149 *ii = p20x0_intr_info;
1150 break;
1151 #endif
1152 default:
1153 panic("%s: don't know how to deal with SVR %#jx",
1154 __func__, (uintmax_t)mfspr(SPR_SVR));
1155 }
1156
1157 /*
1158 * Initialize interrupt handler lock
1159 */
1160 mutex_init(&e500_intr_lock, MUTEX_DEFAULT, IPL_HIGH);
1161
1162 /*
1163 * We need to be in mixed mode.
1164 */
1165 openpic_write(cpu, OPENPIC_GCR, GCR_M);
1166
1167 /*
1168 * Make we and the openpic both agree about the current SPL level.
1169 */
1170 e500_splset(ci, ci->ci_cpl);
1171
1172 /*
1173 * Allow the required number of interrupt sources.
1174 */
1175 is = kmem_zalloc(nirq * sizeof(*is), KM_SLEEP);
1176 e500_intr_sources = is;
1177 e500_intr_last_source = is + nirq;
1178
1179 /*
1180 * Initialize all the external interrupts as active low.
1181 */
1182 for (u_int irq = 0; irq < e500_intr_info.ii_external_sources; irq++) {
1183 openpic_write(cpu, OPENPIC_EIVPR(irq),
1184 VPR_VECTOR_MAKE(irq) | VPR_LEVEL_LOW);
1185 }
1186 }
1187
1188 static void
1189 e500_intr_init_precpu(void)
1190 {
1191 struct cpu_info const *ci = curcpu();
1192 struct cpu_softc * const cpu = ci->ci_softc;
1193 bus_addr_t dr;
1194
1195 /*
1196 * timer's DR is set to be delivered to cpu0 as initial value.
1197 */
1198 for (u_int irq = 0; irq < e500_intr_info.ii_timer_sources; irq++) {
1199 dr = OPENPIC_GTDR(ci->ci_cpuid, irq);
1200 openpic_write(cpu, dr, 0); /* stop delivery */
1201 }
1202 }
1203
1204 static void
1205 e500_idlespin(void)
1206 {
1207 KASSERTMSG(curcpu()->ci_cpl == IPL_NONE,
1208 "%s: cpu%u: ci_cpl (%d) != 0", __func__, cpu_number(),
1209 curcpu()->ci_cpl);
1210 KASSERTMSG(CTPR2IPL(openpic_read(curcpu()->ci_softc, OPENPIC_CTPR)) == IPL_NONE,
1211 "%s: cpu%u: CTPR (%d) != IPL_NONE", __func__, cpu_number(),
1212 CTPR2IPL(openpic_read(curcpu()->ci_softc, OPENPIC_CTPR)));
1213 KASSERT(mfmsr() & PSL_EE);
1214
1215 if (powersave > 0)
1216 mtmsr(mfmsr() | PSL_POW);
1217 }
1218
1219 static void
1220 e500_intr_cpu_attach(struct cpu_info *ci)
1221 {
1222 struct cpu_softc * const cpu = ci->ci_softc;
1223 const char * const xname = device_xname(ci->ci_dev);
1224
1225 const u_int32_t frr = openpic_read(cpu, OPENPIC_FRR);
1226 const u_int nirq = FRR_NIRQ_GET(frr) + 1;
1227 // const u_int ncpu = FRR_NCPU_GET(frr) + 1;
1228
1229 const struct e500_intr_info * const info = &e500_intr_info;
1230
1231 cpu->cpu_clock_gtbcr = OPENPIC_GTBCR(ci->ci_cpuid, E500_CLOCK_TIMER);
1232
1233 cpu->cpu_evcnt_intrs =
1234 kmem_zalloc(nirq * sizeof(cpu->cpu_evcnt_intrs[0]), KM_SLEEP);
1235
1236 struct evcnt *evcnt = cpu->cpu_evcnt_intrs;
1237 for (size_t j = 0; j < info->ii_external_sources; j++, evcnt++) {
1238 const char *name = e500_intr_external_name_lookup(j);
1239 evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR, NULL, xname, name);
1240 }
1241 KASSERT(evcnt == cpu->cpu_evcnt_intrs + info->ii_ist_vectors[IST_ONCHIP]);
1242 for (size_t j = 0; j < info->ii_onchip_sources; j++, evcnt++) {
1243 if (info->ii_onchip_bitmap[j / 32] & __BIT(j & 31)) {
1244 const char *name = e500_intr_onchip_name_lookup(j);
1245 if (name != NULL) {
1246 evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR,
1247 NULL, xname, name);
1248 #ifdef DIAGNOSTIC
1249 } else {
1250 printf("%s: missing evcnt for onchip irq %zu\n",
1251 __func__, j);
1252 #endif
1253 }
1254 }
1255 }
1256
1257 KASSERT(evcnt == cpu->cpu_evcnt_intrs + info->ii_ist_vectors[IST_MSIGROUP]);
1258 for (size_t j = 0; j < info->ii_msigroup_sources; j++, evcnt++) {
1259 evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR,
1260 NULL, xname, e500_msigroup_intr_names[j].in_name);
1261 }
1262
1263 KASSERT(evcnt == cpu->cpu_evcnt_intrs + info->ii_ist_vectors[IST_TIMER]);
1264 evcnt += ci->ci_cpuid * info->ii_percpu_sources;
1265 for (size_t j = 0; j < info->ii_timer_sources; j++, evcnt++) {
1266 evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR,
1267 NULL, xname, e500_timer_intr_names[j].in_name);
1268 }
1269
1270 for (size_t j = 0; j < info->ii_ipi_sources; j++, evcnt++) {
1271 evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR,
1272 NULL, xname, e500_ipi_intr_names[j].in_name);
1273 }
1274
1275 for (size_t j = 0; j < info->ii_mi_sources; j++, evcnt++) {
1276 evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR,
1277 NULL, xname, e500_mi_intr_names[j].in_name);
1278 }
1279
1280 ci->ci_idlespin = e500_idlespin;
1281 }
1282
1283 static void
1284 e500_intr_cpu_send_ipi(cpuid_t target, uint32_t ipimsg)
1285 {
1286 struct cpu_info * const ci = curcpu();
1287 struct cpu_softc * const cpu = ci->ci_softc;
1288 uint32_t dstmask;
1289
1290 if (target >= CPU_MAXNUM) {
1291 CPU_INFO_ITERATOR cii;
1292 struct cpu_info *dst_ci;
1293
1294 KASSERT(target == IPI_DST_NOTME || target == IPI_DST_ALL);
1295
1296 dstmask = 0;
1297 for (CPU_INFO_FOREACH(cii, dst_ci)) {
1298 if (target == IPI_DST_ALL || ci != dst_ci) {
1299 dstmask |= 1 << cpu_index(ci);
1300 if (ipimsg)
1301 atomic_or_32(&dst_ci->ci_pending_ipis,
1302 ipimsg);
1303 }
1304 }
1305 } else {
1306 struct cpu_info * const dst_ci = cpu_lookup(target);
1307 KASSERT(dst_ci != NULL);
1308 KASSERTMSG(target == cpu_index(dst_ci),
1309 "%s: target (%lu) != cpu_index(cpu%u)",
1310 __func__, target, cpu_index(dst_ci));
1311 dstmask = (1 << target);
1312 if (ipimsg)
1313 atomic_or_32(&dst_ci->ci_pending_ipis, ipimsg);
1314 }
1315
1316 openpic_write(cpu, OPENPIC_IPIDR(0), dstmask);
1317 }
1318
1319 typedef void (*ipifunc_t)(void);
1320
1321 #ifdef __HAVE_PREEMPTION
1322 static void
1323 e500_ipi_kpreempt(void)
1324 {
1325 poowerpc_softint_trigger(1 << IPL_NONE);
1326 }
1327 #endif
1328
1329 static void
1330 e500_ipi_suspend(void)
1331 {
1332
1333 #ifdef MULTIPROCESSOR
1334 cpu_pause(NULL);
1335 #endif /* MULTIPROCESSOR */
1336 }
1337
1338 static void
1339 e500_ipi_ast(void)
1340 {
1341 curcpu()->ci_data.cpu_onproc->l_md.md_astpending = 1;
1342 }
1343
1344 static const ipifunc_t e500_ipifuncs[] = {
1345 [ilog2(IPI_XCALL)] = xc_ipi_handler,
1346 [ilog2(IPI_GENERIC)] = ipi_cpu_handler,
1347 [ilog2(IPI_HALT)] = e500_ipi_halt,
1348 #ifdef __HAVE_PREEMPTION
1349 [ilog2(IPI_KPREEMPT)] = e500_ipi_kpreempt,
1350 #endif
1351 [ilog2(IPI_TLB1SYNC)] = e500_tlb1_sync,
1352 [ilog2(IPI_SUSPEND)] = e500_ipi_suspend,
1353 [ilog2(IPI_AST)] = e500_ipi_ast,
1354 };
1355
1356 static int
1357 e500_ipi_intr(void *v)
1358 {
1359 struct cpu_info * const ci = curcpu();
1360
1361 ci->ci_ev_ipi.ev_count++;
1362
1363 uint32_t pending_ipis = atomic_swap_32(&ci->ci_pending_ipis, 0);
1364 for (u_int ipi = 31; pending_ipis != 0; ipi--, pending_ipis <<= 1) {
1365 const u_int bits = __builtin_clz(pending_ipis);
1366 ipi -= bits;
1367 pending_ipis <<= bits;
1368 KASSERT(e500_ipifuncs[ipi] != NULL);
1369 (*e500_ipifuncs[ipi])();
1370 }
1371
1372 return 1;
1373 }
1374
1375 static void
1376 e500_intr_cpu_hatch(struct cpu_info *ci)
1377 {
1378 char iname[INTRIDBUF];
1379
1380 /* Initialize percpu interrupts. */
1381 e500_intr_init_precpu();
1382
1383 /*
1384 * Establish clock interrupt for this CPU.
1385 */
1386 snprintf(iname, sizeof(iname), "%s clock", device_xname(ci->ci_dev));
1387 if (e500_intr_cpu_establish(ci, E500_CLOCK_TIMER, IPL_CLOCK, IST_TIMER,
1388 e500_clock_intr, NULL, iname) == NULL)
1389 panic("%s: failed to establish clock interrupt!", __func__);
1390
1391 /*
1392 * Establish the IPI interrupts for this CPU.
1393 */
1394 if (e500_intr_cpu_establish(ci, 0, IPL_VM, IST_IPI, e500_ipi_intr,
1395 NULL, "ipi") == NULL)
1396 panic("%s: failed to establish ipi interrupt!", __func__);
1397
1398 /*
1399 * Enable watchdog interrupts.
1400 */
1401 uint32_t tcr = mfspr(SPR_TCR);
1402 tcr |= TCR_WIE;
1403 mtspr(SPR_TCR, tcr);
1404 }
1405
1406 static const char *
1407 e500_intr_all_name_lookup(int irq, int ist)
1408 {
1409 const struct e500_intr_info * const info = &e500_intr_info;
1410
1411 switch (ist) {
1412 default:
1413 if (irq < info->ii_external_sources &&
1414 (ist == IST_EDGE ||
1415 ist == IST_LEVEL_LOW ||
1416 ist == IST_LEVEL_HIGH))
1417 return e500_intr_name_lookup(
1418 info->ii_external_intr_names, irq);
1419 break;
1420
1421 case IST_PULSE:
1422 break;
1423
1424 case IST_ONCHIP:
1425 if (irq < info->ii_onchip_sources)
1426 return e500_intr_onchip_name_lookup(irq);
1427 break;
1428
1429 case IST_MSIGROUP:
1430 if (irq < info->ii_msigroup_sources)
1431 return e500_intr_name_lookup(e500_msigroup_intr_names,
1432 irq);
1433 break;
1434
1435 case IST_TIMER:
1436 if (irq < info->ii_timer_sources)
1437 return e500_intr_name_lookup(e500_timer_intr_names,
1438 irq);
1439 break;
1440
1441 case IST_IPI:
1442 if (irq < info->ii_ipi_sources)
1443 return e500_intr_name_lookup(e500_ipi_intr_names, irq);
1444 break;
1445
1446 case IST_MI:
1447 if (irq < info->ii_mi_sources)
1448 return e500_intr_name_lookup(e500_mi_intr_names, irq);
1449 break;
1450 }
1451
1452 return NULL;
1453 }
1454
1455 static void
1456 e500_intr_get_affinity(struct intr_source *is, kcpuset_t *cpuset)
1457 {
1458 struct cpu_info * const ci = curcpu();
1459 struct cpu_softc * const cpu = ci->ci_softc;
1460 struct e500_intr_irq_info ii;
1461
1462 kcpuset_zero(cpuset);
1463
1464 if (is->is_ipl != IPL_NONE && !IST_PERCPU_P(is->is_ist)) {
1465 if (e500_intr_irq_info_get(ci, is->is_irq, is->is_ipl,
1466 is->is_ist, &ii)) {
1467 uint32_t dr = openpic_read(cpu, ii.irq_dr);
1468 while (dr != 0) {
1469 u_int n = ffs(dr);
1470 if (n-- == 0)
1471 break;
1472 dr &= ~(1 << n);
1473 kcpuset_set(cpuset, n);
1474 }
1475 }
1476 }
1477 }
1478
1479 static int
1480 e500_intr_set_affinity(struct intr_source *is, const kcpuset_t *cpuset)
1481 {
1482 struct cpu_info * const ci = curcpu();
1483 struct cpu_softc * const cpu = ci->ci_softc;
1484 struct e500_intr_irq_info ii;
1485 uint32_t ecpuset, tcpuset;
1486
1487 KASSERT(mutex_owned(&cpu_lock));
1488 KASSERT(mutex_owned(&e500_intr_lock));
1489 KASSERT(!kcpuset_iszero(cpuset));
1490
1491 kcpuset_export_u32(cpuset, &ecpuset, sizeof(ecpuset));
1492 tcpuset = ecpuset;
1493 while (tcpuset != 0) {
1494 u_int cpu_idx = ffs(tcpuset);
1495 if (cpu_idx-- == 0)
1496 break;
1497
1498 tcpuset &= ~(1 << cpu_idx);
1499 struct cpu_info * const newci = cpu_lookup(cpu_idx);
1500 if (newci == NULL)
1501 return EINVAL;
1502 if ((newci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0)
1503 return EINVAL;
1504 }
1505
1506 if (!e500_intr_irq_info_get(ci, is->is_irq, is->is_ipl, is->is_ist,
1507 &ii))
1508 return ENXIO;
1509
1510 /*
1511 * Update the vector/priority and destination registers keeping the
1512 * interrupt masked.
1513 */
1514 const register_t msr = wrtee(0); /* disable interrupts */
1515
1516 uint32_t vpr = openpic_read(cpu, ii.irq_vpr);
1517 openpic_write(cpu, ii.irq_vpr, vpr | VPR_MSK);
1518
1519 /*
1520 * Wait for the Activity (A) bit for the source to be cleared.
1521 */
1522 while (openpic_read(cpu, ii.irq_vpr) & VPR_A)
1523 continue;
1524
1525 /*
1526 * Update destination register
1527 */
1528 openpic_write(cpu, ii.irq_dr, ecpuset);
1529
1530 /*
1531 * Now unmask the interrupt.
1532 */
1533 openpic_write(cpu, ii.irq_vpr, vpr);
1534
1535 wrtee(msr); /* re-enable interrupts */
1536
1537 return 0;
1538 }
1539
1540 static bool
1541 e500_intr_is_affinity_intrsource(struct intr_source *is,
1542 const kcpuset_t *cpuset)
1543 {
1544 struct cpu_info * const ci = curcpu();
1545 struct cpu_softc * const cpu = ci->ci_softc;
1546 struct e500_intr_irq_info ii;
1547 bool result = false;
1548
1549 if (is->is_ipl != IPL_NONE && !IST_PERCPU_P(is->is_ist)) {
1550 if (e500_intr_irq_info_get(ci, is->is_irq, is->is_ipl,
1551 is->is_ist, &ii)) {
1552 uint32_t dr = openpic_read(cpu, ii.irq_dr);
1553 while (dr != 0 && !result) {
1554 u_int n = ffs(dr);
1555 if (n-- == 0)
1556 break;
1557 dr &= ~(1 << n);
1558 result = kcpuset_isset(cpuset, n);
1559 }
1560 }
1561 }
1562 return result;
1563 }
1564
1565 static struct intr_source *
1566 e500_intr_get_source(const char *intrid)
1567 {
1568 struct intr_source *is;
1569
1570 mutex_enter(&e500_intr_lock);
1571 for (is = e500_intr_sources; is < e500_intr_last_source; ++is) {
1572 if (is->is_source[0] == '\0')
1573 continue;
1574
1575 if (!strncmp(intrid, is->is_source, sizeof(is->is_source) - 1))
1576 break;
1577 }
1578 if (is == e500_intr_last_source)
1579 is = NULL;
1580 mutex_exit(&e500_intr_lock);
1581 return is;
1582 }
1583
1584 uint64_t
1585 interrupt_get_count(const char *intrid, u_int cpu_idx)
1586 {
1587 struct cpu_info * const ci = cpu_lookup(cpu_idx);
1588 struct cpu_softc * const cpu = ci->ci_softc;
1589 struct intr_source *is;
1590 struct e500_intr_irq_info ii;
1591
1592 is = e500_intr_get_source(intrid);
1593 if (is == NULL)
1594 return 0;
1595
1596 if (e500_intr_irq_info_get(ci, is->is_irq, is->is_ipl, is->is_ist, &ii))
1597 return cpu->cpu_evcnt_intrs[ii.irq_vector].ev_count;
1598 return 0;
1599 }
1600
1601 void
1602 interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset)
1603 {
1604 struct intr_source *is;
1605
1606 kcpuset_zero(cpuset);
1607
1608 is = e500_intr_get_source(intrid);
1609 if (is == NULL)
1610 return;
1611
1612 mutex_enter(&e500_intr_lock);
1613 e500_intr_get_affinity(is, cpuset);
1614 mutex_exit(&e500_intr_lock);
1615 }
1616
1617 void
1618 interrupt_get_available(kcpuset_t *cpuset)
1619 {
1620 CPU_INFO_ITERATOR cii;
1621 struct cpu_info *ci;
1622
1623 kcpuset_zero(cpuset);
1624
1625 mutex_enter(&cpu_lock);
1626 for (CPU_INFO_FOREACH(cii, ci)) {
1627 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0)
1628 kcpuset_set(cpuset, cpu_index(ci));
1629 }
1630 mutex_exit(&cpu_lock);
1631 }
1632
1633 void
1634 interrupt_get_devname(const char *intrid, char *buf, size_t len)
1635 {
1636 struct intr_source *is;
1637
1638 if (len == 0)
1639 return;
1640
1641 buf[0] = '\0';
1642
1643 is = e500_intr_get_source(intrid);
1644 if (is != NULL)
1645 strlcpy(buf, is->is_xname, len);
1646 }
1647
1648 struct intrids_handler *
1649 interrupt_construct_intrids(const kcpuset_t *cpuset)
1650 {
1651 struct intr_source *is;
1652 struct intrids_handler *ii_handler;
1653 intrid_t *ids;
1654 int i, n;
1655
1656 if (kcpuset_iszero(cpuset))
1657 return NULL;
1658
1659 n = 0;
1660 mutex_enter(&e500_intr_lock);
1661 for (is = e500_intr_sources; is < e500_intr_last_source; ++is) {
1662 if (e500_intr_is_affinity_intrsource(is, cpuset))
1663 ++n;
1664 }
1665 mutex_exit(&e500_intr_lock);
1666
1667 const size_t alloc_size = sizeof(int) + sizeof(intrid_t) * n;
1668 ii_handler = kmem_zalloc(alloc_size, KM_SLEEP);
1669 ii_handler->iih_nids = n;
1670 if (n == 0)
1671 return ii_handler;
1672
1673 ids = ii_handler->iih_intrids;
1674 mutex_enter(&e500_intr_lock);
1675 for (i = 0, is = e500_intr_sources;
1676 i < n && is < e500_intr_last_source;
1677 ++is) {
1678 if (!e500_intr_is_affinity_intrsource(is, cpuset))
1679 continue;
1680
1681 if (is->is_source[0] != '\0') {
1682 strlcpy(ids[i], is->is_source, sizeof(ids[0]));
1683 ++i;
1684 }
1685 }
1686 mutex_exit(&e500_intr_lock);
1687
1688 return ii_handler;
1689 }
1690
1691 void
1692 interrupt_destruct_intrids(struct intrids_handler *ii_handler)
1693 {
1694 size_t iih_size;
1695
1696 if (ii_handler == NULL)
1697 return;
1698
1699 iih_size = sizeof(int) + sizeof(intrid_t) * ii_handler->iih_nids;
1700 kmem_free(ii_handler, iih_size);
1701 }
1702
1703 static int
1704 interrupt_distribute_locked(struct intr_source *is, const kcpuset_t *newset,
1705 kcpuset_t *oldset)
1706 {
1707 int error;
1708
1709 KASSERT(mutex_owned(&cpu_lock));
1710
1711 if (is->is_ipl == IPL_NONE || IST_PERCPU_P(is->is_ist))
1712 return EINVAL;
1713
1714 mutex_enter(&e500_intr_lock);
1715 if (oldset != NULL)
1716 e500_intr_get_affinity(is, oldset);
1717 error = e500_intr_set_affinity(is, newset);
1718 mutex_exit(&e500_intr_lock);
1719
1720 return error;
1721 }
1722
1723 int
1724 interrupt_distribute(void *ich, const kcpuset_t *newset, kcpuset_t *oldset)
1725 {
1726 int error;
1727
1728 mutex_enter(&cpu_lock);
1729 error = interrupt_distribute_locked(ich, newset, oldset);
1730 mutex_exit(&cpu_lock);
1731
1732 return error;
1733 }
1734
1735 int
1736 interrupt_distribute_handler(const char *intrid, const kcpuset_t *newset,
1737 kcpuset_t *oldset)
1738 {
1739 struct intr_source *is;
1740 int error;
1741
1742 is = e500_intr_get_source(intrid);
1743 if (is != NULL) {
1744 mutex_enter(&cpu_lock);
1745 error = interrupt_distribute_locked(is, newset, oldset);
1746 mutex_exit(&cpu_lock);
1747 } else
1748 error = ENOENT;
1749
1750 return error;
1751 }
1752