e500_intr.c revision 1.9 1 1.9 matt /* $NetBSD: e500_intr.c,v 1.9 2011/06/08 05:13:00 matt Exp $ */
2 1.2 matt /*-
3 1.2 matt * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
4 1.2 matt * All rights reserved.
5 1.2 matt *
6 1.2 matt * This code is derived from software contributed to The NetBSD Foundation
7 1.2 matt * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
8 1.2 matt * Agency and which was developed by Matt Thomas of 3am Software Foundry.
9 1.2 matt *
10 1.2 matt * This material is based upon work supported by the Defense Advanced Research
11 1.2 matt * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
12 1.2 matt * Contract No. N66001-09-C-2073.
13 1.2 matt * Approved for Public Release, Distribution Unlimited
14 1.2 matt *
15 1.2 matt * Redistribution and use in source and binary forms, with or without
16 1.2 matt * modification, are permitted provided that the following conditions
17 1.2 matt * are met:
18 1.2 matt * 1. Redistributions of source code must retain the above copyright
19 1.2 matt * notice, this list of conditions and the following disclaimer.
20 1.2 matt * 2. Redistributions in binary form must reproduce the above copyright
21 1.2 matt * notice, this list of conditions and the following disclaimer in the
22 1.2 matt * documentation and/or other materials provided with the distribution.
23 1.2 matt *
24 1.2 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25 1.2 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 1.2 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 1.2 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28 1.2 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 1.2 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 1.2 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 1.2 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 1.2 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 1.2 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 1.2 matt * POSSIBILITY OF SUCH DAMAGE.
35 1.2 matt */
36 1.2 matt
37 1.3 matt #include "opt_mpc85xx.h"
38 1.3 matt
39 1.2 matt #define __INTR_PRIVATE
40 1.2 matt
41 1.2 matt #include <sys/param.h>
42 1.2 matt #include <sys/proc.h>
43 1.2 matt #include <sys/intr.h>
44 1.2 matt #include <sys/cpu.h>
45 1.2 matt #include <sys/kmem.h>
46 1.2 matt #include <sys/atomic.h>
47 1.2 matt #include <sys/bus.h>
48 1.8 matt #include <sys/xcall.h>
49 1.8 matt #include <sys/bitops.h>
50 1.2 matt
51 1.2 matt #include <uvm/uvm_extern.h>
52 1.2 matt
53 1.2 matt #include <powerpc/spr.h>
54 1.2 matt #include <powerpc/booke/spr.h>
55 1.2 matt
56 1.2 matt #include <powerpc/booke/cpuvar.h>
57 1.2 matt #include <powerpc/booke/e500reg.h>
58 1.2 matt #include <powerpc/booke/e500var.h>
59 1.2 matt #include <powerpc/booke/openpicreg.h>
60 1.2 matt
61 1.2 matt #define IPL2CTPR(ipl) ((ipl) + 15 - IPL_HIGH)
62 1.2 matt #define CTPR2IPL(ctpr) ((ctpr) - (15 - IPL_HIGH))
63 1.2 matt
64 1.2 matt #define IST_PERCPU_P(ist) ((ist) >= IST_TIMER)
65 1.2 matt
66 1.8 matt #ifdef __HAVE_PREEMPTION
67 1.8 matt #define IPL_PREEMPT_SOFTMASK (1 << IPL_NONE)
68 1.8 matt #else
69 1.8 matt #define IPL_PREEMPT_SOFTMASK 0
70 1.8 matt #endif
71 1.8 matt
72 1.2 matt #define IPL_SOFTMASK \
73 1.2 matt ((1 << IPL_SOFTSERIAL) | (1 << IPL_SOFTNET ) \
74 1.8 matt |(1 << IPL_SOFTBIO ) | (1 << IPL_SOFTCLOCK ) \
75 1.8 matt |IPL_PREEMPT_SOFTMASK)
76 1.2 matt
77 1.2 matt #define SOFTINT2IPL_MAP \
78 1.2 matt ((IPL_SOFTSERIAL << (4*SOFTINT_SERIAL)) \
79 1.2 matt |(IPL_SOFTNET << (4*SOFTINT_NET )) \
80 1.2 matt |(IPL_SOFTBIO << (4*SOFTINT_BIO )) \
81 1.2 matt |(IPL_SOFTCLOCK << (4*SOFTINT_CLOCK )))
82 1.2 matt #define SOFTINT2IPL(si_level) ((SOFTINT2IPL_MAP >> (4 * si_level)) & 0x0f)
83 1.2 matt
84 1.2 matt struct e500_intr_irq_info {
85 1.2 matt bus_addr_t irq_vpr;
86 1.2 matt bus_addr_t irq_dr;
87 1.2 matt u_int irq_vector;
88 1.2 matt };
89 1.2 matt
90 1.2 matt struct intr_source {
91 1.2 matt int (*is_func)(void *);
92 1.2 matt void *is_arg;
93 1.2 matt int8_t is_ipl;
94 1.2 matt uint8_t is_ist;
95 1.2 matt uint8_t is_irq;
96 1.2 matt bus_size_t is_vpr;
97 1.2 matt bus_size_t is_dr;
98 1.2 matt };
99 1.2 matt
100 1.2 matt #define INTR_SOURCE_INITIALIZER \
101 1.2 matt { .is_func = e500_intr_spurious, .is_arg = NULL, \
102 1.2 matt .is_irq = -1, .is_ipl = IPL_NONE, .is_ist = IST_NONE, }
103 1.2 matt
104 1.2 matt struct e500_intr_name {
105 1.2 matt uint8_t in_irq;
106 1.2 matt const char in_name[15];
107 1.2 matt };
108 1.2 matt
109 1.2 matt static const struct e500_intr_name e500_onchip_intr_names[] = {
110 1.2 matt { ISOURCE_L2, "l2" },
111 1.2 matt { ISOURCE_ECM, "ecm" },
112 1.2 matt { ISOURCE_DDR, "ddr" },
113 1.2 matt { ISOURCE_LBC, "lbc" },
114 1.2 matt { ISOURCE_DMA_CHAN1, "dma-chan1" },
115 1.2 matt { ISOURCE_DMA_CHAN2, "dma-chan2" },
116 1.2 matt { ISOURCE_DMA_CHAN3, "dma-chan3" },
117 1.2 matt { ISOURCE_DMA_CHAN4, "dma-chan4" },
118 1.2 matt { ISOURCE_PCI1, "pci1" },
119 1.2 matt { ISOURCE_PCIEX2, "pcie2" },
120 1.2 matt { ISOURCE_PCIEX , "pcie1" },
121 1.2 matt { ISOURCE_PCIEX3, "pcie3" },
122 1.3 matt { ISOURCE_USB1, "usb1" },
123 1.2 matt { ISOURCE_ETSEC1_TX, "etsec1-tx" },
124 1.2 matt { ISOURCE_ETSEC1_RX, "etsec1-rx" },
125 1.2 matt { ISOURCE_ETSEC3_TX, "etsec3-tx" },
126 1.2 matt { ISOURCE_ETSEC3_RX, "etsec3-rx" },
127 1.2 matt { ISOURCE_ETSEC3_ERR, "etsec3-err" },
128 1.2 matt { ISOURCE_ETSEC1_ERR, "etsec1-err" },
129 1.2 matt { ISOURCE_ETSEC2_TX, "etsec2-tx" },
130 1.2 matt { ISOURCE_ETSEC2_RX, "etsec2-rx" },
131 1.2 matt { ISOURCE_ETSEC4_TX, "etsec4-tx" },
132 1.2 matt { ISOURCE_ETSEC4_RX, "etsec4-rx" },
133 1.2 matt { ISOURCE_ETSEC4_ERR, "etsec4-err" },
134 1.2 matt { ISOURCE_ETSEC2_ERR, "etsec2-err" },
135 1.2 matt { ISOURCE_DUART, "duart" },
136 1.2 matt { ISOURCE_I2C, "i2c" },
137 1.2 matt { ISOURCE_PERFMON, "perfmon" },
138 1.2 matt { ISOURCE_SECURITY1, "sec1" },
139 1.3 matt { ISOURCE_GPIO, "gpio" },
140 1.2 matt { ISOURCE_SRIO_EWPU, "srio-ewpu" },
141 1.2 matt { ISOURCE_SRIO_ODBELL, "srio-odbell" },
142 1.2 matt { ISOURCE_SRIO_IDBELL, "srio-idbell" },
143 1.2 matt { ISOURCE_SRIO_OMU1, "srio-omu1" },
144 1.2 matt { ISOURCE_SRIO_IMU1, "srio-imu1" },
145 1.2 matt { ISOURCE_SRIO_OMU2, "srio-omu2" },
146 1.7 matt { ISOURCE_SRIO_IMU2, "srio-imu2" },
147 1.2 matt { ISOURCE_SECURITY2, "sec2" },
148 1.2 matt { ISOURCE_SPI, "spi" },
149 1.2 matt { ISOURCE_ETSEC1_PTP, "etsec1-ptp" },
150 1.3 matt { ISOURCE_ETSEC2_PTP, "etsec2-ptp" },
151 1.2 matt { ISOURCE_ETSEC3_PTP, "etsec3-ptp" },
152 1.3 matt { ISOURCE_ETSEC4_PTP, "etsec4-ptp" },
153 1.2 matt { ISOURCE_ESDHC, "esdhc" },
154 1.2 matt { 0, "" },
155 1.2 matt };
156 1.2 matt
157 1.3 matt const struct e500_intr_name default_external_intr_names[] = {
158 1.2 matt { 0, "" },
159 1.2 matt };
160 1.2 matt
161 1.2 matt static const struct e500_intr_name e500_msigroup_intr_names[] = {
162 1.2 matt { 0, "msigroup0" },
163 1.2 matt { 1, "msigroup1" },
164 1.2 matt { 2, "msigroup2" },
165 1.2 matt { 3, "msigroup3" },
166 1.2 matt { 4, "msigroup4" },
167 1.2 matt { 5, "msigroup5" },
168 1.2 matt { 6, "msigroup6" },
169 1.2 matt { 7, "msigroup7" },
170 1.2 matt { 0, "" },
171 1.2 matt };
172 1.2 matt
173 1.2 matt static const struct e500_intr_name e500_timer_intr_names[] = {
174 1.2 matt { 0, "timer0" },
175 1.2 matt { 1, "timer1" },
176 1.2 matt { 2, "timer2" },
177 1.2 matt { 3, "timer3" },
178 1.2 matt { 0, "" },
179 1.2 matt };
180 1.2 matt
181 1.2 matt static const struct e500_intr_name e500_ipi_intr_names[] = {
182 1.2 matt { 0, "ipi0" },
183 1.2 matt { 1, "ipi1" },
184 1.2 matt { 2, "ipi2" },
185 1.2 matt { 3, "ipi3" },
186 1.2 matt { 0, "" },
187 1.2 matt };
188 1.2 matt
189 1.2 matt static const struct e500_intr_name e500_mi_intr_names[] = {
190 1.2 matt { 0, "mi0" },
191 1.2 matt { 1, "mi1" },
192 1.2 matt { 2, "mi2" },
193 1.2 matt { 3, "mi3" },
194 1.2 matt { 0, "" },
195 1.2 matt };
196 1.2 matt
197 1.2 matt struct e500_intr_info {
198 1.2 matt u_int ii_external_sources;
199 1.2 matt uint32_t ii_onchip_bitmap[2];
200 1.2 matt u_int ii_onchip_sources;
201 1.2 matt u_int ii_msigroup_sources;
202 1.2 matt u_int ii_ipi_sources; /* per-cpu */
203 1.2 matt u_int ii_timer_sources; /* per-cpu */
204 1.2 matt u_int ii_mi_sources; /* per-cpu */
205 1.2 matt u_int ii_percpu_sources;
206 1.2 matt const struct e500_intr_name *ii_external_intr_names;
207 1.2 matt const struct e500_intr_name *ii_onchip_intr_names;
208 1.2 matt u_int8_t ii_ist_vectors[IST_MAX+1];
209 1.2 matt };
210 1.2 matt
211 1.3 matt static kmutex_t e500_intr_lock __cacheline_aligned;
212 1.2 matt static struct e500_intr_info e500_intr_info;
213 1.2 matt
214 1.3 matt #define INTR_INFO_DECL(lc_chip, UC_CHIP) \
215 1.3 matt static const struct e500_intr_info lc_chip##_intr_info = { \
216 1.3 matt .ii_external_sources = UC_CHIP ## _EXTERNALSOURCES, \
217 1.3 matt .ii_onchip_bitmap = UC_CHIP ## _ONCHIPBITMAP, \
218 1.3 matt .ii_onchip_sources = UC_CHIP ## _ONCHIPSOURCES, \
219 1.3 matt .ii_msigroup_sources = UC_CHIP ## _MSIGROUPSOURCES, \
220 1.3 matt .ii_timer_sources = UC_CHIP ## _TIMERSOURCES, \
221 1.3 matt .ii_ipi_sources = UC_CHIP ## _IPISOURCES, \
222 1.3 matt .ii_mi_sources = UC_CHIP ## _MISOURCES, \
223 1.3 matt .ii_percpu_sources = UC_CHIP ## _TIMERSOURCES \
224 1.3 matt + UC_CHIP ## _IPISOURCES + UC_CHIP ## _MISOURCES, \
225 1.3 matt .ii_external_intr_names = lc_chip ## _external_intr_names, \
226 1.3 matt .ii_onchip_intr_names = lc_chip ## _onchip_intr_names, \
227 1.3 matt .ii_ist_vectors = { \
228 1.3 matt [IST_NONE] = ~0, \
229 1.3 matt [IST_EDGE] = 0, \
230 1.3 matt [IST_LEVEL_LOW] = 0, \
231 1.3 matt [IST_LEVEL_HIGH] = 0, \
232 1.3 matt [IST_ONCHIP] = UC_CHIP ## _EXTERNALSOURCES, \
233 1.3 matt [IST_MSIGROUP] = UC_CHIP ## _EXTERNALSOURCES \
234 1.3 matt + UC_CHIP ## _ONCHIPSOURCES, \
235 1.3 matt [IST_TIMER] = UC_CHIP ## _EXTERNALSOURCES \
236 1.3 matt + UC_CHIP ## _ONCHIPSOURCES \
237 1.3 matt + UC_CHIP ## _MSIGROUPSOURCES, \
238 1.3 matt [IST_IPI] = UC_CHIP ## _EXTERNALSOURCES \
239 1.3 matt + UC_CHIP ## _ONCHIPSOURCES \
240 1.3 matt + UC_CHIP ## _MSIGROUPSOURCES \
241 1.3 matt + UC_CHIP ## _TIMERSOURCES, \
242 1.3 matt [IST_MI] = UC_CHIP ## _EXTERNALSOURCES \
243 1.3 matt + UC_CHIP ## _ONCHIPSOURCES \
244 1.3 matt + UC_CHIP ## _MSIGROUPSOURCES \
245 1.3 matt + UC_CHIP ## _TIMERSOURCES \
246 1.3 matt + UC_CHIP ## _IPISOURCES, \
247 1.3 matt [IST_MAX] = UC_CHIP ## _EXTERNALSOURCES \
248 1.3 matt + UC_CHIP ## _ONCHIPSOURCES \
249 1.3 matt + UC_CHIP ## _MSIGROUPSOURCES \
250 1.3 matt + UC_CHIP ## _TIMERSOURCES \
251 1.3 matt + UC_CHIP ## _IPISOURCES \
252 1.3 matt + UC_CHIP ## _MISOURCES, \
253 1.3 matt }, \
254 1.3 matt }
255 1.3 matt
256 1.3 matt #ifdef MPC8536
257 1.3 matt #define mpc8536_external_intr_names default_external_intr_names
258 1.3 matt const struct e500_intr_name mpc8536_onchip_intr_names[] = {
259 1.3 matt { ISOURCE_SATA2, "sata2" },
260 1.3 matt { ISOURCE_USB2, "usb2" },
261 1.3 matt { ISOURCE_USB3, "usb3" },
262 1.3 matt { ISOURCE_SATA1, "sata1" },
263 1.3 matt { 0, "" },
264 1.3 matt };
265 1.3 matt
266 1.3 matt INTR_INFO_DECL(mpc8536, MPC8536);
267 1.3 matt #endif
268 1.3 matt
269 1.3 matt #ifdef MPC8544
270 1.3 matt #define mpc8544_external_intr_names default_external_intr_names
271 1.3 matt const struct e500_intr_name mpc8544_onchip_intr_names[] = {
272 1.3 matt { 0, "" },
273 1.3 matt };
274 1.3 matt
275 1.3 matt INTR_INFO_DECL(mpc8544, MPC8544);
276 1.3 matt #endif
277 1.3 matt #ifdef MPC8548
278 1.3 matt #define mpc8548_external_intr_names default_external_intr_names
279 1.3 matt const struct e500_intr_name mpc8548_onchip_intr_names[] = {
280 1.3 matt { ISOURCE_PCI1, "pci1" },
281 1.3 matt { ISOURCE_PCI2, "pci2" },
282 1.3 matt { 0, "" },
283 1.2 matt };
284 1.2 matt
285 1.3 matt INTR_INFO_DECL(mpc8548, MPC8548);
286 1.3 matt #endif
287 1.3 matt #ifdef MPC8555
288 1.3 matt #define mpc8555_external_intr_names default_external_intr_names
289 1.3 matt const struct e500_intr_name mpc8555_onchip_intr_names[] = {
290 1.3 matt { ISOURCE_PCI2, "pci2" },
291 1.3 matt { ISOURCE_CPM, "CPM" },
292 1.3 matt { 0, "" },
293 1.3 matt };
294 1.3 matt
295 1.3 matt INTR_INFO_DECL(mpc8555, MPC8555);
296 1.3 matt #endif
297 1.3 matt #ifdef MPC8568
298 1.3 matt #define mpc8568_external_intr_names default_external_intr_names
299 1.3 matt const struct e500_intr_name mpc8568_onchip_intr_names[] = {
300 1.3 matt { ISOURCE_QEB_LOW, "QEB low" },
301 1.3 matt { ISOURCE_QEB_PORT, "QEB port" },
302 1.3 matt { ISOURCE_QEB_IECC, "QEB iram ecc" },
303 1.3 matt { ISOURCE_QEB_MUECC, "QEB ram ecc" },
304 1.3 matt { ISOURCE_TLU1, "tlu1" },
305 1.3 matt { ISOURCE_QEB_HIGH, "QEB high" },
306 1.3 matt { 0, "" },
307 1.3 matt };
308 1.3 matt
309 1.3 matt INTR_INFO_DECL(mpc8568, MPC8568);
310 1.3 matt #endif
311 1.3 matt #ifdef MPC8572
312 1.3 matt #define mpc8572_external_intr_names default_external_intr_names
313 1.3 matt const struct e500_intr_name mpc8572_onchip_intr_names[] = {
314 1.3 matt { ISOURCE_PCIEX3_MPC8572, "pcie3" },
315 1.3 matt { ISOURCE_FEC, "fec" },
316 1.3 matt { ISOURCE_PME_GENERAL, "pme" },
317 1.3 matt { ISOURCE_TLU1, "tlu1" },
318 1.3 matt { ISOURCE_TLU2, "tlu2" },
319 1.3 matt { ISOURCE_PME_CHAN1, "pme-chan1" },
320 1.3 matt { ISOURCE_PME_CHAN2, "pme-chan2" },
321 1.3 matt { ISOURCE_PME_CHAN3, "pme-chan3" },
322 1.3 matt { ISOURCE_PME_CHAN4, "pme-chan4" },
323 1.3 matt { ISOURCE_DMA2_CHAN1, "dma2-chan1" },
324 1.3 matt { ISOURCE_DMA2_CHAN2, "dma2-chan2" },
325 1.3 matt { ISOURCE_DMA2_CHAN3, "dma2-chan3" },
326 1.3 matt { ISOURCE_DMA2_CHAN4, "dma2-chan4" },
327 1.3 matt { 0, "" },
328 1.2 matt };
329 1.2 matt
330 1.3 matt INTR_INFO_DECL(mpc8572, MPC8572);
331 1.3 matt #endif
332 1.3 matt #ifdef P2020
333 1.3 matt #define p20x0_external_intr_names default_external_intr_names
334 1.3 matt const struct e500_intr_name p20x0_onchip_intr_names[] = {
335 1.3 matt { ISOURCE_PCIEX3_MPC8572, "pcie3" },
336 1.3 matt { ISOURCE_DMA2_CHAN1, "dma2-chan1" },
337 1.3 matt { ISOURCE_DMA2_CHAN2, "dma2-chan2" },
338 1.3 matt { ISOURCE_DMA2_CHAN3, "dma2-chan3" },
339 1.3 matt { ISOURCE_DMA2_CHAN4, "dma2-chan4" },
340 1.3 matt { 0, "" },
341 1.2 matt };
342 1.2 matt
343 1.3 matt INTR_INFO_DECL(p20x0, P20x0);
344 1.3 matt #endif
345 1.3 matt
346 1.2 matt static const char ist_names[][12] = {
347 1.2 matt [IST_NONE] = "none",
348 1.2 matt [IST_EDGE] = "edge",
349 1.2 matt [IST_LEVEL_LOW] = "level-",
350 1.2 matt [IST_LEVEL_HIGH] = "level+",
351 1.2 matt [IST_MSI] = "msi",
352 1.2 matt [IST_ONCHIP] = "onchip",
353 1.2 matt [IST_MSIGROUP] = "msigroup",
354 1.2 matt [IST_TIMER] = "timer",
355 1.2 matt [IST_IPI] = "ipi",
356 1.2 matt [IST_MI] = "msgint",
357 1.2 matt };
358 1.2 matt
359 1.2 matt static struct intr_source *e500_intr_sources;
360 1.2 matt static const struct intr_source *e500_intr_last_source;
361 1.2 matt
362 1.2 matt static void *e500_intr_establish(int, int, int, int (*)(void *), void *);
363 1.2 matt static void e500_intr_disestablish(void *);
364 1.8 matt static void e500_intr_cpu_attach(struct cpu_info *ci);
365 1.8 matt static void e500_intr_cpu_hatch(struct cpu_info *ci);
366 1.8 matt static void e500_intr_cpu_send_ipi(cpuid_t, uintptr_t);
367 1.2 matt static void e500_intr_init(void);
368 1.2 matt static const char *e500_intr_string(int, int);
369 1.2 matt static void e500_critintr(struct trapframe *tf);
370 1.2 matt static void e500_decrintr(struct trapframe *tf);
371 1.2 matt static void e500_extintr(struct trapframe *tf);
372 1.2 matt static void e500_fitintr(struct trapframe *tf);
373 1.2 matt static void e500_wdogintr(struct trapframe *tf);
374 1.2 matt static void e500_spl0(void);
375 1.2 matt static int e500_splraise(int);
376 1.2 matt static void e500_splx(int);
377 1.2 matt #ifdef __HAVE_FAST_SOFTINTS
378 1.2 matt static void e500_softint_init_md(lwp_t *l, u_int si_level, uintptr_t *machdep_p);
379 1.2 matt static void e500_softint_trigger(uintptr_t machdep);
380 1.2 matt #endif
381 1.2 matt
382 1.2 matt const struct intrsw e500_intrsw = {
383 1.2 matt .intrsw_establish = e500_intr_establish,
384 1.2 matt .intrsw_disestablish = e500_intr_disestablish,
385 1.2 matt .intrsw_init = e500_intr_init,
386 1.8 matt .intrsw_cpu_attach = e500_intr_cpu_attach,
387 1.8 matt .intrsw_cpu_hatch = e500_intr_cpu_hatch,
388 1.8 matt .intrsw_cpu_send_ipi = e500_intr_cpu_send_ipi,
389 1.2 matt .intrsw_string = e500_intr_string,
390 1.2 matt
391 1.2 matt .intrsw_critintr = e500_critintr,
392 1.2 matt .intrsw_decrintr = e500_decrintr,
393 1.2 matt .intrsw_extintr = e500_extintr,
394 1.2 matt .intrsw_fitintr = e500_fitintr,
395 1.2 matt .intrsw_wdogintr = e500_wdogintr,
396 1.2 matt
397 1.2 matt .intrsw_splraise = e500_splraise,
398 1.2 matt .intrsw_splx = e500_splx,
399 1.2 matt .intrsw_spl0 = e500_spl0,
400 1.2 matt
401 1.2 matt #ifdef __HAVE_FAST_SOFTINTS
402 1.2 matt .intrsw_softint_init_md = e500_softint_init_md,
403 1.2 matt .intrsw_softint_trigger = e500_softint_trigger,
404 1.2 matt #endif
405 1.2 matt };
406 1.2 matt
407 1.2 matt static inline uint32_t
408 1.2 matt openpic_read(struct cpu_softc *cpu, bus_size_t offset)
409 1.2 matt {
410 1.2 matt
411 1.2 matt return bus_space_read_4(cpu->cpu_bst, cpu->cpu_bsh,
412 1.2 matt OPENPIC_BASE + offset);
413 1.2 matt }
414 1.2 matt
415 1.2 matt static inline void
416 1.2 matt openpic_write(struct cpu_softc *cpu, bus_size_t offset, uint32_t val)
417 1.2 matt {
418 1.2 matt
419 1.2 matt return bus_space_write_4(cpu->cpu_bst, cpu->cpu_bsh,
420 1.2 matt OPENPIC_BASE + offset, val);
421 1.2 matt }
422 1.2 matt
423 1.2 matt static const char *
424 1.2 matt e500_intr_external_name_lookup(int irq)
425 1.2 matt {
426 1.2 matt prop_array_t extirqs = board_info_get_object("external-irqs");
427 1.2 matt prop_string_t irqname = prop_array_get(extirqs, irq);
428 1.2 matt KASSERT(irqname != NULL);
429 1.2 matt KASSERT(prop_object_type(irqname) == PROP_TYPE_STRING);
430 1.2 matt
431 1.2 matt return prop_string_cstring_nocopy(irqname);
432 1.2 matt }
433 1.2 matt
434 1.2 matt static const char *
435 1.2 matt e500_intr_name_lookup(const struct e500_intr_name *names, int irq)
436 1.2 matt {
437 1.2 matt for (; names->in_name[0] != '\0'; names++) {
438 1.2 matt if (names->in_irq == irq)
439 1.2 matt return names->in_name;
440 1.2 matt }
441 1.2 matt
442 1.2 matt return NULL;
443 1.2 matt }
444 1.2 matt
445 1.2 matt static const char *
446 1.2 matt e500_intr_onchip_name_lookup(int irq)
447 1.2 matt {
448 1.2 matt const char *name;
449 1.2 matt
450 1.5 matt name = e500_intr_name_lookup(e500_intr_info.ii_onchip_intr_names, irq);
451 1.5 matt if (name == NULL)
452 1.5 matt name = e500_intr_name_lookup(e500_onchip_intr_names, irq);
453 1.2 matt
454 1.5 matt return name;
455 1.2 matt }
456 1.2 matt
457 1.2 matt #ifdef __HAVE_FAST_SOFTINTS
458 1.2 matt static inline void
459 1.2 matt e500_softint_deliver(struct cpu_info *ci, struct cpu_softc *cpu,
460 1.2 matt int ipl, int si_level)
461 1.2 matt {
462 1.2 matt KASSERT(ci->ci_data.cpu_softints & (1 << ipl));
463 1.2 matt ci->ci_data.cpu_softints ^= 1 << ipl;
464 1.2 matt softint_fast_dispatch(cpu->cpu_softlwps[si_level], ipl);
465 1.2 matt KASSERT(cpu->cpu_softlwps[si_level]->l_ctxswtch == 0);
466 1.2 matt KASSERTMSG(ci->ci_cpl == IPL_HIGH,
467 1.2 matt ("%s: cpl (%d) != HIGH", __func__, ci->ci_cpl));
468 1.2 matt }
469 1.2 matt
470 1.2 matt static inline void
471 1.8 matt e500_softint(struct cpu_info *ci, struct cpu_softc *cpu, int old_ipl,
472 1.8 matt vaddr_t pc)
473 1.2 matt {
474 1.2 matt const u_int softint_mask = (IPL_SOFTMASK << old_ipl) & IPL_SOFTMASK;
475 1.2 matt u_int softints;
476 1.2 matt
477 1.2 matt KASSERT(ci->ci_mtx_count == 0);
478 1.2 matt KASSERT(ci->ci_cpl == IPL_HIGH);
479 1.2 matt while ((softints = (ci->ci_data.cpu_softints & softint_mask)) != 0) {
480 1.2 matt KASSERT(old_ipl < IPL_SOFTSERIAL);
481 1.2 matt if (softints & (1 << IPL_SOFTSERIAL)) {
482 1.2 matt e500_softint_deliver(ci, cpu, IPL_SOFTSERIAL,
483 1.2 matt SOFTINT_SERIAL);
484 1.2 matt continue;
485 1.2 matt }
486 1.2 matt KASSERT(old_ipl < IPL_SOFTNET);
487 1.2 matt if (softints & (1 << IPL_SOFTNET)) {
488 1.2 matt e500_softint_deliver(ci, cpu, IPL_SOFTNET,
489 1.2 matt SOFTINT_NET);
490 1.2 matt continue;
491 1.2 matt }
492 1.2 matt KASSERT(old_ipl < IPL_SOFTBIO);
493 1.2 matt if (softints & (1 << IPL_SOFTBIO)) {
494 1.2 matt e500_softint_deliver(ci, cpu, IPL_SOFTBIO,
495 1.2 matt SOFTINT_BIO);
496 1.2 matt continue;
497 1.2 matt }
498 1.2 matt KASSERT(old_ipl < IPL_SOFTCLOCK);
499 1.2 matt if (softints & (1 << IPL_SOFTCLOCK)) {
500 1.2 matt e500_softint_deliver(ci, cpu, IPL_SOFTCLOCK,
501 1.2 matt SOFTINT_CLOCK);
502 1.2 matt continue;
503 1.2 matt }
504 1.8 matt #ifdef __HAVE_PREEMPTION
505 1.8 matt KASSERT(old_ipl == IPL_NONE);
506 1.8 matt if (softints & (1 << IPL_NONE)) {
507 1.8 matt ci->ci_data.cpu_softints ^= (1 << IPL_NONE);
508 1.8 matt kpreempt(pc);
509 1.8 matt }
510 1.8 matt #endif
511 1.2 matt }
512 1.2 matt }
513 1.2 matt #endif /* __HAVE_FAST_SOFTINTS */
514 1.2 matt
515 1.2 matt static inline void
516 1.2 matt e500_splset(struct cpu_info *ci, int ipl)
517 1.2 matt {
518 1.2 matt struct cpu_softc * const cpu = ci->ci_softc;
519 1.2 matt //KASSERT(!cpu_intr_p() || ipl >= IPL_VM);
520 1.2 matt KASSERT((curlwp->l_pflag & LP_INTR) == 0 || ipl != IPL_NONE);
521 1.2 matt #if 0
522 1.2 matt u_int ctpr = ipl;
523 1.2 matt KASSERT(openpic_read(cpu, OPENPIC_CTPR) == ci->ci_cpl);
524 1.2 matt #elif 0
525 1.2 matt u_int old_ctpr = (ci->ci_cpl >= IPL_VM ? 15 : ci->ci_cpl);
526 1.2 matt u_int ctpr = (ipl >= IPL_VM ? 15 : ipl);
527 1.2 matt KASSERT(openpic_read(cpu, OPENPIC_CTPR) == old_ctpr);
528 1.2 matt #else
529 1.6 dyoung #ifdef DIAGNOSTIC
530 1.2 matt u_int old_ctpr = IPL2CTPR(ci->ci_cpl);
531 1.6 dyoung #endif
532 1.2 matt u_int ctpr = IPL2CTPR(ipl);
533 1.2 matt KASSERT(openpic_read(cpu, OPENPIC_CTPR) == old_ctpr);
534 1.2 matt #endif
535 1.2 matt openpic_write(cpu, OPENPIC_CTPR, ctpr);
536 1.2 matt KASSERT(openpic_read(cpu, OPENPIC_CTPR) == ctpr);
537 1.2 matt ci->ci_cpl = ipl;
538 1.2 matt }
539 1.2 matt
540 1.2 matt static void
541 1.2 matt e500_spl0(void)
542 1.2 matt {
543 1.2 matt struct cpu_info * const ci = curcpu();
544 1.2 matt
545 1.2 matt wrtee(0);
546 1.2 matt
547 1.2 matt #ifdef __HAVE_FAST_SOFTINTS
548 1.2 matt if (__predict_false(ci->ci_data.cpu_softints != 0)) {
549 1.2 matt e500_splset(ci, IPL_HIGH);
550 1.8 matt e500_softint(ci, ci->ci_softc, IPL_NONE,
551 1.8 matt (vaddr_t)__builtin_return_address(0));
552 1.2 matt }
553 1.2 matt #endif /* __HAVE_FAST_SOFTINTS */
554 1.2 matt e500_splset(ci, IPL_NONE);
555 1.2 matt
556 1.2 matt wrtee(PSL_EE);
557 1.2 matt }
558 1.2 matt
559 1.2 matt static void
560 1.2 matt e500_splx(int ipl)
561 1.2 matt {
562 1.2 matt struct cpu_info * const ci = curcpu();
563 1.2 matt const int old_ipl = ci->ci_cpl;
564 1.2 matt
565 1.2 matt KASSERT(mfmsr() & PSL_CE);
566 1.2 matt
567 1.2 matt if (ipl == old_ipl)
568 1.2 matt return;
569 1.2 matt
570 1.2 matt if (__predict_false(ipl > old_ipl)) {
571 1.2 matt printf("%s: %p: cpl=%u: ignoring splx(%u) to raise ipl\n",
572 1.2 matt __func__, __builtin_return_address(0), old_ipl, ipl);
573 1.2 matt if (old_ipl == IPL_NONE)
574 1.2 matt Debugger();
575 1.2 matt }
576 1.2 matt
577 1.2 matt // const
578 1.2 matt register_t msr = wrtee(0);
579 1.2 matt #ifdef __HAVE_FAST_SOFTINTS
580 1.2 matt const u_int softints = (ci->ci_data.cpu_softints << ipl) & IPL_SOFTMASK;
581 1.2 matt if (__predict_false(softints != 0)) {
582 1.2 matt e500_splset(ci, IPL_HIGH);
583 1.8 matt e500_softint(ci, ci->ci_softc, ipl,
584 1.8 matt (vaddr_t)__builtin_return_address(0));
585 1.2 matt }
586 1.2 matt #endif /* __HAVE_FAST_SOFTINTS */
587 1.2 matt e500_splset(ci, ipl);
588 1.2 matt #if 1
589 1.2 matt if (ipl < IPL_VM && old_ipl >= IPL_VM)
590 1.2 matt msr = PSL_EE;
591 1.2 matt #endif
592 1.2 matt wrtee(msr);
593 1.2 matt }
594 1.2 matt
595 1.2 matt static int
596 1.2 matt e500_splraise(int ipl)
597 1.2 matt {
598 1.2 matt struct cpu_info * const ci = curcpu();
599 1.2 matt const int old_ipl = ci->ci_cpl;
600 1.2 matt
601 1.2 matt KASSERT(mfmsr() & PSL_CE);
602 1.2 matt
603 1.2 matt if (old_ipl < ipl) {
604 1.2 matt //const
605 1.2 matt register_t msr = wrtee(0);
606 1.2 matt e500_splset(ci, ipl);
607 1.2 matt #if 1
608 1.2 matt if (old_ipl < IPL_VM && ipl >= IPL_VM)
609 1.2 matt msr = 0;
610 1.2 matt #endif
611 1.2 matt wrtee(msr);
612 1.2 matt } else if (ipl == IPL_NONE) {
613 1.2 matt panic("%s: %p: cpl=%u: attempt to splraise(IPL_NONE)",
614 1.2 matt __func__, __builtin_return_address(0), old_ipl);
615 1.2 matt #if 0
616 1.2 matt } else if (old_ipl > ipl) {
617 1.2 matt printf("%s: %p: cpl=%u: ignoring splraise(%u) to lower ipl\n",
618 1.2 matt __func__, __builtin_return_address(0), old_ipl, ipl);
619 1.2 matt #endif
620 1.2 matt }
621 1.2 matt
622 1.2 matt return old_ipl;
623 1.2 matt }
624 1.2 matt
625 1.2 matt #ifdef __HAVE_FAST_SOFTINTS
626 1.2 matt static void
627 1.2 matt e500_softint_init_md(lwp_t *l, u_int si_level, uintptr_t *machdep_p)
628 1.2 matt {
629 1.2 matt struct cpu_info * const ci = l->l_cpu;
630 1.2 matt struct cpu_softc * const cpu = ci->ci_softc;
631 1.2 matt
632 1.2 matt *machdep_p = 1 << SOFTINT2IPL(si_level);
633 1.2 matt KASSERT(*machdep_p & IPL_SOFTMASK);
634 1.2 matt cpu->cpu_softlwps[si_level] = l;
635 1.2 matt }
636 1.2 matt
637 1.2 matt static void
638 1.2 matt e500_softint_trigger(uintptr_t machdep)
639 1.2 matt {
640 1.2 matt struct cpu_info * const ci = curcpu();
641 1.2 matt
642 1.2 matt atomic_or_uint(&ci->ci_data.cpu_softints, machdep);
643 1.2 matt }
644 1.2 matt #endif /* __HAVE_FAST_SOFTINTS */
645 1.2 matt
646 1.2 matt static int
647 1.2 matt e500_intr_spurious(void *arg)
648 1.2 matt {
649 1.2 matt return 0;
650 1.2 matt }
651 1.2 matt
652 1.2 matt static bool
653 1.2 matt e500_intr_irq_info_get(struct cpu_info *ci, u_int irq, int ipl, int ist,
654 1.2 matt struct e500_intr_irq_info *ii)
655 1.2 matt {
656 1.2 matt const struct e500_intr_info * const info = &e500_intr_info;
657 1.2 matt bool ok;
658 1.2 matt
659 1.2 matt #if DEBUG > 2
660 1.2 matt printf("%s(%p,irq=%u,ipl=%u,ist=%u,%p)\n", __func__, ci, irq, ipl, ist, ii);
661 1.2 matt #endif
662 1.2 matt
663 1.2 matt if (ipl < IPL_VM || ipl > IPL_HIGH) {
664 1.2 matt #if DEBUG > 2
665 1.2 matt printf("%s:%d ipl=%u\n", __func__, __LINE__, ipl);
666 1.2 matt #endif
667 1.2 matt return false;
668 1.2 matt }
669 1.2 matt
670 1.2 matt if (ist <= IST_NONE || ist >= IST_MAX) {
671 1.2 matt #if DEBUG > 2
672 1.2 matt printf("%s:%d ist=%u\n", __func__, __LINE__, ist);
673 1.2 matt #endif
674 1.2 matt return false;
675 1.2 matt }
676 1.2 matt
677 1.2 matt ii->irq_vector = irq + info->ii_ist_vectors[ist];
678 1.8 matt if (IST_PERCPU_P(ist) && ist != IST_IPI)
679 1.2 matt ii->irq_vector += ci->ci_cpuid * info->ii_percpu_sources;
680 1.2 matt
681 1.2 matt switch (ist) {
682 1.2 matt default:
683 1.2 matt ii->irq_vpr = OPENPIC_EIVPR(irq);
684 1.2 matt ii->irq_dr = OPENPIC_EIDR(irq);
685 1.2 matt ok = irq < info->ii_external_sources
686 1.2 matt && (ist == IST_EDGE
687 1.2 matt || ist == IST_LEVEL_LOW
688 1.2 matt || ist == IST_LEVEL_HIGH);
689 1.2 matt break;
690 1.2 matt case IST_ONCHIP:
691 1.2 matt ii->irq_vpr = OPENPIC_IIVPR(irq);
692 1.2 matt ii->irq_dr = OPENPIC_IIDR(irq);
693 1.2 matt ok = irq < 32 * __arraycount(info->ii_onchip_bitmap);
694 1.2 matt #if DEBUG > 2
695 1.2 matt printf("%s: irq=%u: ok=%u\n", __func__, irq, ok);
696 1.2 matt #endif
697 1.2 matt ok = ok && (info->ii_onchip_bitmap[irq/32] & (1 << (irq & 31)));
698 1.2 matt #if DEBUG > 2
699 1.2 matt printf("%s: %08x%08x -> %08x%08x: ok=%u\n", __func__,
700 1.2 matt irq < 32 ? 0 : (1 << irq), irq < 32 ? (1 << irq) : 0,
701 1.2 matt info->ii_onchip_bitmap[1], info->ii_onchip_bitmap[0],
702 1.2 matt ok);
703 1.2 matt #endif
704 1.2 matt break;
705 1.2 matt case IST_MSIGROUP:
706 1.2 matt ii->irq_vpr = OPENPIC_MSIVPR(irq);
707 1.2 matt ii->irq_dr = OPENPIC_MSIDR(irq);
708 1.2 matt ok = irq < info->ii_msigroup_sources
709 1.2 matt && ipl == IPL_VM;
710 1.2 matt break;
711 1.2 matt case IST_TIMER:
712 1.2 matt ii->irq_vpr = OPENPIC_GTVPR(ci->ci_cpuid, irq);
713 1.2 matt ii->irq_dr = OPENPIC_GTDR(ci->ci_cpuid, irq);
714 1.2 matt ok = irq < info->ii_timer_sources;
715 1.2 matt #if DEBUG > 2
716 1.2 matt printf("%s: IST_TIMER irq=%u: ok=%u\n", __func__, irq, ok);
717 1.2 matt #endif
718 1.2 matt break;
719 1.2 matt case IST_IPI:
720 1.2 matt ii->irq_vpr = OPENPIC_IPIVPR(irq);
721 1.2 matt ii->irq_dr = OPENPIC_IPIDR(irq);
722 1.2 matt ok = irq < info->ii_ipi_sources;
723 1.2 matt break;
724 1.2 matt case IST_MI:
725 1.2 matt ii->irq_vpr = OPENPIC_MIVPR(irq);
726 1.2 matt ii->irq_dr = OPENPIC_MIDR(irq);
727 1.2 matt ok = irq < info->ii_mi_sources;
728 1.2 matt break;
729 1.2 matt }
730 1.2 matt
731 1.2 matt return ok;
732 1.2 matt }
733 1.2 matt
734 1.2 matt static const char *
735 1.2 matt e500_intr_string(int irq, int ist)
736 1.2 matt {
737 1.2 matt struct cpu_info * const ci = curcpu();
738 1.2 matt struct cpu_softc * const cpu = ci->ci_softc;
739 1.2 matt struct e500_intr_irq_info ii;
740 1.2 matt
741 1.2 matt if (!e500_intr_irq_info_get(ci, irq, IPL_VM, ist, &ii))
742 1.2 matt return NULL;
743 1.2 matt
744 1.2 matt return cpu->cpu_evcnt_intrs[ii.irq_vector].ev_name;
745 1.2 matt }
746 1.2 matt
747 1.2 matt static void *
748 1.2 matt e500_intr_cpu_establish(struct cpu_info *ci, int irq, int ipl, int ist,
749 1.2 matt int (*handler)(void *), void *arg)
750 1.2 matt {
751 1.2 matt struct cpu_softc * const cpu = ci->ci_softc;
752 1.2 matt struct e500_intr_irq_info ii;
753 1.2 matt
754 1.2 matt KASSERT(ipl >= IPL_VM && ipl <= IPL_HIGH);
755 1.2 matt KASSERT(ist > IST_NONE && ist < IST_MAX && ist != IST_MSI);
756 1.2 matt
757 1.2 matt if (!e500_intr_irq_info_get(ci, irq, ipl, ist, &ii)) {
758 1.2 matt printf("%s: e500_intr_irq_info_get(%p,%u,%u,%u,%p) failed\n",
759 1.2 matt __func__, ci, irq, ipl, ist, &ii);
760 1.2 matt return NULL;
761 1.2 matt }
762 1.2 matt
763 1.2 matt struct intr_source * const is = &e500_intr_sources[ii.irq_vector];
764 1.2 matt mutex_enter(&e500_intr_lock);
765 1.2 matt if (is->is_ipl != IPL_NONE)
766 1.2 matt return NULL;
767 1.2 matt
768 1.2 matt is->is_func = handler;
769 1.2 matt is->is_arg = arg;
770 1.2 matt is->is_ipl = ipl;
771 1.2 matt is->is_ist = ist;
772 1.2 matt is->is_irq = irq;
773 1.2 matt is->is_vpr = ii.irq_vpr;
774 1.2 matt is->is_dr = ii.irq_dr;
775 1.2 matt
776 1.2 matt uint32_t vpr = VPR_PRIORITY_MAKE(IPL2CTPR(ipl))
777 1.2 matt | VPR_VECTOR_MAKE(((ii.irq_vector + 1) << 4) | ipl)
778 1.2 matt | (ist == IST_LEVEL_LOW
779 1.2 matt ? VPR_LEVEL_LOW
780 1.2 matt : (ist == IST_LEVEL_HIGH
781 1.2 matt ? VPR_LEVEL_HIGH
782 1.2 matt : (ist == IST_ONCHIP
783 1.2 matt ? VPR_P_HIGH
784 1.2 matt : 0)));
785 1.2 matt
786 1.2 matt /*
787 1.2 matt * All interrupts go to the primary except per-cpu interrupts which get
788 1.2 matt * routed to the appropriate cpu.
789 1.2 matt */
790 1.8 matt uint32_t dr = openpic_read(cpu, ii.irq_dr);
791 1.8 matt
792 1.8 matt dr |= 1 << (IST_PERCPU_P(ist) ? ci->ci_cpuid : 0);
793 1.2 matt
794 1.2 matt /*
795 1.2 matt * Update the vector/priority and destination registers keeping the
796 1.2 matt * interrupt masked.
797 1.2 matt */
798 1.2 matt const register_t msr = wrtee(0); /* disable interrupts */
799 1.2 matt openpic_write(cpu, ii.irq_vpr, vpr | VPR_MSK);
800 1.2 matt openpic_write(cpu, ii.irq_dr, dr);
801 1.2 matt
802 1.2 matt /*
803 1.2 matt * Now unmask the interrupt.
804 1.2 matt */
805 1.2 matt openpic_write(cpu, ii.irq_vpr, vpr);
806 1.2 matt
807 1.2 matt wrtee(msr); /* re-enable interrupts */
808 1.2 matt
809 1.2 matt mutex_exit(&e500_intr_lock);
810 1.2 matt
811 1.2 matt return is;
812 1.2 matt }
813 1.2 matt
814 1.2 matt static void *
815 1.2 matt e500_intr_establish(int irq, int ipl, int ist,
816 1.2 matt int (*handler)(void *), void *arg)
817 1.2 matt {
818 1.2 matt return e500_intr_cpu_establish(curcpu(), irq, ipl, ist, handler, arg);
819 1.2 matt }
820 1.2 matt
821 1.2 matt static void
822 1.2 matt e500_intr_disestablish(void *vis)
823 1.2 matt {
824 1.2 matt struct cpu_softc * const cpu = curcpu()->ci_softc;
825 1.2 matt struct intr_source * const is = vis;
826 1.2 matt struct e500_intr_irq_info ii;
827 1.2 matt
828 1.2 matt KASSERT(e500_intr_sources <= is);
829 1.2 matt KASSERT(is < e500_intr_last_source);
830 1.2 matt KASSERT(!cpu_intr_p());
831 1.2 matt
832 1.2 matt bool ok = e500_intr_irq_info_get(curcpu(), is->is_irq, is->is_ipl,
833 1.2 matt is->is_ist, &ii);
834 1.2 matt (void)ok; /* appease gcc */
835 1.2 matt KASSERT(ok);
836 1.2 matt KASSERT(is - e500_intr_sources == ii.irq_vector);
837 1.2 matt
838 1.2 matt mutex_enter(&e500_intr_lock);
839 1.2 matt /*
840 1.2 matt * Mask the source using the mask (MSK) bit in the vector/priority reg.
841 1.2 matt */
842 1.2 matt uint32_t vpr = openpic_read(cpu, ii.irq_vpr);
843 1.2 matt openpic_write(cpu, ii.irq_vpr, VPR_MSK | vpr);
844 1.2 matt
845 1.2 matt /*
846 1.2 matt * Wait for the Activity (A) bit for the source to be cleared.
847 1.2 matt */
848 1.2 matt while (openpic_read(cpu, ii.irq_vpr) & VPR_A)
849 1.2 matt ;
850 1.2 matt
851 1.2 matt /*
852 1.2 matt * Now the source can be modified.
853 1.2 matt */
854 1.2 matt openpic_write(cpu, ii.irq_dr, 0); /* stop delivery */
855 1.2 matt openpic_write(cpu, ii.irq_vpr, VPR_MSK); /* mask/reset it */
856 1.2 matt
857 1.2 matt *is = (struct intr_source)INTR_SOURCE_INITIALIZER;
858 1.2 matt
859 1.2 matt mutex_exit(&e500_intr_lock);
860 1.2 matt }
861 1.2 matt
862 1.2 matt static void
863 1.2 matt e500_critintr(struct trapframe *tf)
864 1.2 matt {
865 1.2 matt panic("%s: srr0/srr1=%#lx/%#lx", __func__, tf->tf_srr0, tf->tf_srr1);
866 1.2 matt }
867 1.2 matt
868 1.2 matt static void
869 1.2 matt e500_decrintr(struct trapframe *tf)
870 1.2 matt {
871 1.2 matt panic("%s: srr0/srr1=%#lx/%#lx", __func__, tf->tf_srr0, tf->tf_srr1);
872 1.2 matt }
873 1.2 matt
874 1.2 matt static void
875 1.2 matt e500_fitintr(struct trapframe *tf)
876 1.2 matt {
877 1.2 matt panic("%s: srr0/srr1=%#lx/%#lx", __func__, tf->tf_srr0, tf->tf_srr1);
878 1.2 matt }
879 1.2 matt
880 1.2 matt static void
881 1.2 matt e500_wdogintr(struct trapframe *tf)
882 1.2 matt {
883 1.2 matt mtspr(SPR_TSR, TSR_ENW|TSR_WIS);
884 1.2 matt panic("%s: tf=%p tb=%"PRId64" srr0/srr1=%#lx/%#lx", __func__, tf,
885 1.2 matt mftb(), tf->tf_srr0, tf->tf_srr1);
886 1.2 matt }
887 1.2 matt
888 1.2 matt static void
889 1.2 matt e500_extintr(struct trapframe *tf)
890 1.2 matt {
891 1.2 matt struct cpu_info * const ci = curcpu();
892 1.2 matt struct cpu_softc * const cpu = ci->ci_softc;
893 1.2 matt const int old_ipl = ci->ci_cpl;
894 1.2 matt
895 1.2 matt KASSERT(mfmsr() & PSL_CE);
896 1.2 matt
897 1.2 matt #if 0
898 1.2 matt // printf("%s(%p): idepth=%d enter\n", __func__, tf, ci->ci_idepth);
899 1.2 matt if ((register_t)tf >= (register_t)curlwp->l_addr + USPACE
900 1.2 matt || (register_t)tf < (register_t)curlwp->l_addr + NBPG) {
901 1.2 matt printf("%s(entry): pid %d.%d (%s): srr0/srr1=%#lx/%#lx: invalid tf addr %p\n",
902 1.2 matt __func__, curlwp->l_proc->p_pid, curlwp->l_lid,
903 1.2 matt curlwp->l_proc->p_comm, tf->tf_srr0, tf->tf_srr1, tf);
904 1.2 matt }
905 1.2 matt #endif
906 1.2 matt
907 1.2 matt
908 1.2 matt ci->ci_data.cpu_nintr++;
909 1.2 matt tf->tf_cf.cf_idepth = ci->ci_idepth++;
910 1.2 matt cpu->cpu_pcpls[ci->ci_idepth] = old_ipl;
911 1.2 matt #if 1
912 1.2 matt if (mfmsr() & PSL_EE)
913 1.2 matt panic("%s(%p): MSR[EE] is on (%#lx)!", __func__, tf, mfmsr());
914 1.2 matt if (old_ipl == IPL_HIGH
915 1.2 matt || IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR))
916 1.2 matt panic("%s(%p): old_ipl(%u) == IPL_HIGH(%u) "
917 1.2 matt "|| old_ipl + %u != OPENPIC_CTPR (%u)",
918 1.2 matt __func__, tf, old_ipl, IPL_HIGH,
919 1.2 matt 15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR));
920 1.2 matt #else
921 1.2 matt if (old_ipl >= IPL_VM)
922 1.2 matt panic("%s(%p): old_ipl(%u) >= IPL_VM(%u) CTPR=%u",
923 1.2 matt __func__, tf, old_ipl, IPL_VM, openpic_read(cpu, OPENPIC_CTPR));
924 1.2 matt #endif
925 1.2 matt
926 1.2 matt for (;;) {
927 1.2 matt /*
928 1.2 matt * Find out the pending interrupt.
929 1.2 matt */
930 1.2 matt if (mfmsr() & PSL_EE)
931 1.2 matt panic("%s(%p): MSR[EE] turned on (%#lx)!", __func__, tf, mfmsr());
932 1.2 matt if (IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR))
933 1.2 matt panic("%s(%p): %d: old_ipl(%u) + %u != OPENPIC_CTPR (%u)",
934 1.2 matt __func__, tf, __LINE__, old_ipl,
935 1.2 matt 15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR));
936 1.2 matt const uint32_t iack = openpic_read(cpu, OPENPIC_IACK);
937 1.6 dyoung #ifdef DIAGNOSTIC
938 1.2 matt const int ipl = iack & 0xf;
939 1.6 dyoung #endif
940 1.2 matt const int irq = (iack >> 4) - 1;
941 1.2 matt #if 0
942 1.2 matt printf("%s: iack=%d ipl=%d irq=%d <%s>\n",
943 1.2 matt __func__, iack, ipl, irq,
944 1.2 matt (iack != IRQ_SPURIOUS ?
945 1.2 matt cpu->cpu_evcnt_intrs[irq].ev_name : "spurious"));
946 1.2 matt #endif
947 1.2 matt if (IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR))
948 1.2 matt panic("%s(%p): %d: old_ipl(%u) + %u != OPENPIC_CTPR (%u)",
949 1.2 matt __func__, tf, __LINE__, old_ipl,
950 1.2 matt 15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR));
951 1.2 matt if (iack == IRQ_SPURIOUS)
952 1.2 matt break;
953 1.2 matt
954 1.2 matt struct intr_source * const is = &e500_intr_sources[irq];
955 1.2 matt if (__predict_true(is < e500_intr_last_source)) {
956 1.2 matt /*
957 1.2 matt * Timer interrupts get their argument overriden with
958 1.2 matt * the pointer to the trapframe.
959 1.2 matt */
960 1.2 matt KASSERT(is->is_ipl == ipl);
961 1.2 matt void *arg = (is->is_ist == IST_TIMER ? tf : is->is_arg);
962 1.2 matt if (is->is_ipl <= old_ipl)
963 1.2 matt panic("%s(%p): %s (%u): is->is_ipl (%u) <= old_ipl (%u)\n",
964 1.2 matt __func__, tf,
965 1.2 matt cpu->cpu_evcnt_intrs[irq].ev_name, irq,
966 1.2 matt is->is_ipl, old_ipl);
967 1.2 matt KASSERT(is->is_ipl > old_ipl);
968 1.2 matt e500_splset(ci, is->is_ipl); /* change IPL */
969 1.2 matt if (__predict_false(is->is_func == NULL)) {
970 1.2 matt aprint_error_dev(ci->ci_dev,
971 1.2 matt "interrupt from unestablished irq %d\n",
972 1.2 matt irq);
973 1.2 matt } else {
974 1.2 matt int (*func)(void *) = is->is_func;
975 1.2 matt wrtee(PSL_EE);
976 1.2 matt int rv = (*func)(arg);
977 1.2 matt wrtee(0);
978 1.2 matt #if DEBUG > 2
979 1.2 matt printf("%s: %s handler %p(%p) returned %d\n",
980 1.2 matt __func__,
981 1.2 matt cpu->cpu_evcnt_intrs[irq].ev_name,
982 1.2 matt func, arg, rv);
983 1.2 matt #endif
984 1.2 matt if (rv == 0)
985 1.2 matt cpu->cpu_evcnt_spurious_intr.ev_count++;
986 1.2 matt }
987 1.2 matt e500_splset(ci, old_ipl); /* restore IPL */
988 1.2 matt cpu->cpu_evcnt_intrs[irq].ev_count++;
989 1.2 matt } else {
990 1.2 matt aprint_error_dev(ci->ci_dev,
991 1.2 matt "interrupt from illegal irq %d\n", irq);
992 1.2 matt cpu->cpu_evcnt_spurious_intr.ev_count++;
993 1.2 matt }
994 1.2 matt /*
995 1.2 matt * If this is a nested interrupt, simply ack it and exit
996 1.2 matt * because the loop we interrupted will complete looking
997 1.2 matt * for interrupts.
998 1.2 matt */
999 1.2 matt if (mfmsr() & PSL_EE)
1000 1.2 matt panic("%s(%p): MSR[EE] left on (%#lx)!", __func__, tf, mfmsr());
1001 1.2 matt if (IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR))
1002 1.2 matt panic("%s(%p): %d: old_ipl(%u) + %u != OPENPIC_CTPR (%u)",
1003 1.2 matt __func__, tf, __LINE__, old_ipl,
1004 1.2 matt 15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR));
1005 1.2 matt
1006 1.2 matt openpic_write(cpu, OPENPIC_EOI, 0);
1007 1.2 matt if (IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR))
1008 1.2 matt panic("%s(%p): %d: old_ipl(%u) + %u != OPENPIC_CTPR (%u)",
1009 1.2 matt __func__, tf, __LINE__, old_ipl,
1010 1.2 matt 15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR));
1011 1.2 matt if (ci->ci_idepth > 0)
1012 1.2 matt break;
1013 1.2 matt }
1014 1.2 matt
1015 1.2 matt ci->ci_idepth--;
1016 1.2 matt
1017 1.2 matt #ifdef __HAVE_FAST_SOFTINTS
1018 1.2 matt /*
1019 1.2 matt * Before exiting, deal with any softints that need to be dealt with.
1020 1.2 matt */
1021 1.2 matt const u_int softints = (ci->ci_data.cpu_softints << old_ipl) & IPL_SOFTMASK;
1022 1.2 matt if (__predict_false(softints != 0)) {
1023 1.2 matt KASSERT(old_ipl < IPL_VM);
1024 1.2 matt e500_splset(ci, IPL_HIGH); /* pop to high */
1025 1.8 matt e500_softint(ci, cpu, old_ipl, /* deal with them */
1026 1.8 matt tf->tf_srr0);
1027 1.2 matt e500_splset(ci, old_ipl); /* and drop back */
1028 1.2 matt }
1029 1.2 matt #endif /* __HAVE_FAST_SOFTINTS */
1030 1.2 matt #if 1
1031 1.2 matt KASSERT(ci->ci_cpl == old_ipl);
1032 1.2 matt #else
1033 1.2 matt e500_splset(ci, old_ipl); /* and drop back */
1034 1.2 matt #endif
1035 1.2 matt
1036 1.2 matt // printf("%s(%p): idepth=%d exit\n", __func__, tf, ci->ci_idepth);
1037 1.2 matt }
1038 1.2 matt
1039 1.2 matt static void
1040 1.2 matt e500_intr_init(void)
1041 1.2 matt {
1042 1.2 matt struct cpu_info * const ci = curcpu();
1043 1.2 matt struct cpu_softc * const cpu = ci->ci_softc;
1044 1.2 matt const uint32_t frr = openpic_read(cpu, OPENPIC_FRR);
1045 1.2 matt const u_int nirq = FRR_NIRQ_GET(frr) + 1;
1046 1.2 matt // const u_int ncpu = FRR_NCPU_GET(frr) + 1;
1047 1.2 matt struct intr_source *is;
1048 1.2 matt struct e500_intr_info * const ii = &e500_intr_info;
1049 1.2 matt
1050 1.4 matt const uint16_t svr = (mfspr(SPR_SVR) & ~0x80000) >> 16;
1051 1.3 matt switch (svr) {
1052 1.3 matt #ifdef MPC8536
1053 1.3 matt case SVR_MPC8536v1 >> 16:
1054 1.3 matt *ii = mpc8536_intr_info;
1055 1.3 matt break;
1056 1.3 matt #endif
1057 1.3 matt #ifdef MPC8544
1058 1.3 matt case SVR_MPC8544v1 >> 16:
1059 1.3 matt *ii = mpc8544_intr_info;
1060 1.3 matt break;
1061 1.3 matt #endif
1062 1.3 matt #ifdef MPC8548
1063 1.3 matt case SVR_MPC8543v1 >> 16:
1064 1.3 matt case SVR_MPC8548v1 >> 16:
1065 1.2 matt *ii = mpc8548_intr_info;
1066 1.2 matt break;
1067 1.3 matt #endif
1068 1.3 matt #ifdef MPC8555
1069 1.3 matt case SVR_MPC8541v1 >> 16:
1070 1.3 matt case SVR_MPC8555v1 >> 16:
1071 1.3 matt *ii = mpc8555_intr_info;
1072 1.3 matt break;
1073 1.3 matt #endif
1074 1.3 matt #ifdef MPC8568
1075 1.3 matt case SVR_MPC8568v1 >> 16:
1076 1.3 matt *ii = mpc8568_intr_info;
1077 1.2 matt break;
1078 1.3 matt #endif
1079 1.3 matt #ifdef MPC8572
1080 1.3 matt case SVR_MPC8572v1 >> 16:
1081 1.2 matt *ii = mpc8572_intr_info;
1082 1.2 matt break;
1083 1.3 matt #endif
1084 1.3 matt #ifdef P2020
1085 1.3 matt case SVR_P2010v2 >> 16:
1086 1.3 matt case SVR_P2020v2 >> 16:
1087 1.3 matt *ii = p20x0_intr_info;
1088 1.3 matt break;
1089 1.3 matt #endif
1090 1.2 matt default:
1091 1.3 matt panic("%s: don't know how to deal with SVR %#lx",
1092 1.3 matt __func__, mfspr(SPR_SVR));
1093 1.2 matt }
1094 1.2 matt
1095 1.2 matt /*
1096 1.2 matt * We need to be in mixed mode.
1097 1.2 matt */
1098 1.2 matt openpic_write(cpu, OPENPIC_GCR, GCR_M);
1099 1.2 matt
1100 1.2 matt /*
1101 1.2 matt * Make we and the openpic both agree about the current SPL level.
1102 1.2 matt */
1103 1.2 matt e500_splset(ci, ci->ci_cpl);
1104 1.2 matt
1105 1.2 matt /*
1106 1.2 matt * Allow the required number of interrupt sources.
1107 1.2 matt */
1108 1.2 matt is = kmem_zalloc(nirq * sizeof(*is), KM_SLEEP);
1109 1.2 matt KASSERT(is);
1110 1.2 matt e500_intr_sources = is;
1111 1.2 matt e500_intr_last_source = is + nirq;
1112 1.2 matt
1113 1.2 matt /*
1114 1.2 matt * Initialize all the external interrupts as active low.
1115 1.2 matt */
1116 1.2 matt for (u_int irq = 0; irq < e500_intr_info.ii_external_sources; irq++) {
1117 1.2 matt openpic_write(cpu, OPENPIC_EIVPR(irq),
1118 1.2 matt VPR_VECTOR_MAKE(irq) | VPR_LEVEL_LOW);
1119 1.2 matt }
1120 1.2 matt }
1121 1.2 matt
1122 1.2 matt static void
1123 1.9 matt e500_idlespin(void)
1124 1.9 matt {
1125 1.9 matt KASSERTMSG(curcpu()->ci_cpl == IPL_NONE,
1126 1.9 matt ("%s: cpu%u: ci_cpl (%d) != 0", __func__, cpu_number(),
1127 1.9 matt curcpu()->ci_cpl));
1128 1.9 matt KASSERTMSG(CTPR2IPL(openpic_read(curcpu()->ci_softc, OPENPIC_CTPR)) == IPL_NONE,
1129 1.9 matt ("%s: cpu%u: CTPR (%d) != IPL_NONE", __func__, cpu_number(),
1130 1.9 matt CTPR2IPL(openpic_read(curcpu()->ci_softc, OPENPIC_CTPR))));
1131 1.9 matt KASSERT(mfmsr() & PSL_EE);
1132 1.9 matt }
1133 1.9 matt
1134 1.9 matt static void
1135 1.8 matt e500_intr_cpu_attach(struct cpu_info *ci)
1136 1.2 matt {
1137 1.2 matt struct cpu_softc * const cpu = ci->ci_softc;
1138 1.2 matt const char * const xname = device_xname(ci->ci_dev);
1139 1.2 matt
1140 1.2 matt const u_int32_t frr = openpic_read(cpu, OPENPIC_FRR);
1141 1.2 matt const u_int nirq = FRR_NIRQ_GET(frr) + 1;
1142 1.2 matt // const u_int ncpu = FRR_NCPU_GET(frr) + 1;
1143 1.2 matt
1144 1.2 matt const struct e500_intr_info * const info = &e500_intr_info;
1145 1.2 matt
1146 1.2 matt cpu->cpu_clock_gtbcr = OPENPIC_GTBCR(ci->ci_cpuid, E500_CLOCK_TIMER);
1147 1.2 matt
1148 1.2 matt cpu->cpu_evcnt_intrs =
1149 1.2 matt kmem_zalloc(nirq * sizeof(cpu->cpu_evcnt_intrs[0]), KM_SLEEP);
1150 1.2 matt KASSERT(cpu->cpu_evcnt_intrs);
1151 1.2 matt
1152 1.2 matt struct evcnt *evcnt = cpu->cpu_evcnt_intrs;
1153 1.2 matt for (size_t j = 0; j < info->ii_external_sources; j++, evcnt++) {
1154 1.2 matt const char *name = e500_intr_external_name_lookup(j);
1155 1.2 matt evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR, NULL, xname, name);
1156 1.2 matt }
1157 1.2 matt KASSERT(evcnt == cpu->cpu_evcnt_intrs + info->ii_ist_vectors[IST_ONCHIP]);
1158 1.2 matt for (size_t j = 0; j < info->ii_onchip_sources; j++, evcnt++) {
1159 1.5 matt if (info->ii_onchip_bitmap[j / 32] & __BIT(j & 31)) {
1160 1.5 matt const char *name = e500_intr_onchip_name_lookup(j);
1161 1.5 matt if (name != NULL) {
1162 1.5 matt evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR,
1163 1.5 matt NULL, xname, name);
1164 1.5 matt #ifdef DIAGNOSTIC
1165 1.5 matt } else {
1166 1.5 matt printf("%s: missing evcnt for onchip irq %zu\n",
1167 1.5 matt __func__, j);
1168 1.5 matt #endif
1169 1.5 matt }
1170 1.2 matt }
1171 1.2 matt }
1172 1.2 matt
1173 1.2 matt KASSERT(evcnt == cpu->cpu_evcnt_intrs + info->ii_ist_vectors[IST_MSIGROUP]);
1174 1.2 matt for (size_t j = 0; j < info->ii_msigroup_sources; j++, evcnt++) {
1175 1.2 matt evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR,
1176 1.2 matt NULL, xname, e500_msigroup_intr_names[j].in_name);
1177 1.2 matt }
1178 1.2 matt
1179 1.2 matt KASSERT(evcnt == cpu->cpu_evcnt_intrs + info->ii_ist_vectors[IST_TIMER]);
1180 1.2 matt evcnt += ci->ci_cpuid * info->ii_percpu_sources;
1181 1.2 matt for (size_t j = 0; j < info->ii_timer_sources; j++, evcnt++) {
1182 1.2 matt evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR,
1183 1.2 matt NULL, xname, e500_timer_intr_names[j].in_name);
1184 1.2 matt }
1185 1.2 matt
1186 1.2 matt for (size_t j = 0; j < info->ii_ipi_sources; j++, evcnt++) {
1187 1.2 matt evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR,
1188 1.2 matt NULL, xname, e500_ipi_intr_names[j].in_name);
1189 1.2 matt }
1190 1.2 matt
1191 1.2 matt for (size_t j = 0; j < info->ii_mi_sources; j++, evcnt++) {
1192 1.2 matt evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR,
1193 1.2 matt NULL, xname, e500_mi_intr_names[j].in_name);
1194 1.2 matt }
1195 1.9 matt
1196 1.9 matt ci->ci_idlespin = e500_idlespin;
1197 1.8 matt }
1198 1.8 matt
1199 1.8 matt static void
1200 1.8 matt e500_intr_cpu_send_ipi(cpuid_t target, uint32_t ipimsg)
1201 1.8 matt {
1202 1.8 matt struct cpu_info * const ci = curcpu();
1203 1.8 matt struct cpu_softc * const cpu = ci->ci_softc;
1204 1.8 matt uint32_t dstmask;
1205 1.8 matt
1206 1.8 matt if (target >= ncpu) {
1207 1.8 matt CPU_INFO_ITERATOR cii;
1208 1.8 matt struct cpu_info *dst_ci;
1209 1.8 matt
1210 1.8 matt KASSERT(target == IPI_DST_NOTME || target == IPI_DST_ALL);
1211 1.8 matt
1212 1.8 matt dstmask = 0;
1213 1.8 matt for (CPU_INFO_FOREACH(cii, dst_ci)) {
1214 1.8 matt if (target == IPI_DST_ALL || ci != dst_ci) {
1215 1.8 matt dstmask |= 1 << cpu_index(ci);
1216 1.8 matt if (ipimsg)
1217 1.8 matt atomic_or_32(&dst_ci->ci_pending_ipis,
1218 1.8 matt ipimsg);
1219 1.8 matt }
1220 1.8 matt }
1221 1.8 matt } else {
1222 1.8 matt struct cpu_info * const dst_ci = cpu_lookup(target);
1223 1.8 matt KASSERT(target == cpu_index(dst_ci));
1224 1.8 matt dstmask = (1 << target);
1225 1.8 matt if (ipimsg)
1226 1.8 matt atomic_or_32(&dst_ci->ci_pending_ipis, ipimsg);
1227 1.8 matt }
1228 1.8 matt
1229 1.8 matt openpic_write(cpu, OPENPIC_IPIDR(0), dstmask);
1230 1.8 matt }
1231 1.8 matt
1232 1.8 matt typedef void (*ipifunc_t)(void);
1233 1.8 matt
1234 1.8 matt #ifdef __HAVE_PREEEMPTION
1235 1.8 matt static void
1236 1.8 matt e500_ipi_kpreempt(void)
1237 1.8 matt {
1238 1.8 matt e500_softint_trigger(1 << IPL_NONE);
1239 1.8 matt }
1240 1.8 matt #endif
1241 1.8 matt
1242 1.8 matt static const ipifunc_t e500_ipifuncs[] = {
1243 1.8 matt [ilog2(IPI_XCALL)] = xc_ipi_handler,
1244 1.8 matt [ilog2(IPI_HALT)] = e500_ipi_halt,
1245 1.8 matt #ifdef __HAVE_PREEMPTION
1246 1.8 matt [ilog2(IPI_KPREEMPT)] = e500_ipi_kpreempt,
1247 1.8 matt #endif
1248 1.8 matt [ilog2(IPI_TLB1SYNC)] = e500_tlb1_sync,
1249 1.8 matt };
1250 1.8 matt
1251 1.8 matt static int
1252 1.8 matt e500_ipi_intr(void *v)
1253 1.8 matt {
1254 1.8 matt struct cpu_info * const ci = curcpu();
1255 1.8 matt
1256 1.8 matt ci->ci_ev_ipi.ev_count++;
1257 1.8 matt
1258 1.8 matt uint32_t pending_ipis = atomic_swap_32(&ci->ci_pending_ipis, 0);
1259 1.8 matt for (u_int ipi = 31; pending_ipis != 0; ipi--, pending_ipis <<= 1) {
1260 1.8 matt const u_int bits = __builtin_clz(pending_ipis);
1261 1.8 matt ipi -= bits;
1262 1.8 matt pending_ipis <<= bits;
1263 1.8 matt KASSERT(e500_ipifuncs[ipi] != NULL);
1264 1.8 matt (*e500_ipifuncs[ipi])();
1265 1.8 matt }
1266 1.8 matt
1267 1.8 matt return 1;
1268 1.8 matt }
1269 1.2 matt
1270 1.8 matt static void
1271 1.8 matt e500_intr_cpu_hatch(struct cpu_info *ci)
1272 1.8 matt {
1273 1.2 matt /*
1274 1.8 matt * Establish clock interrupt for this CPU.
1275 1.2 matt */
1276 1.2 matt if (e500_intr_cpu_establish(ci, E500_CLOCK_TIMER, IPL_CLOCK, IST_TIMER,
1277 1.2 matt e500_clock_intr, NULL) == NULL)
1278 1.2 matt panic("%s: failed to establish clock interrupt!", __func__);
1279 1.2 matt
1280 1.2 matt /*
1281 1.8 matt * Establish the IPI interrupts for this CPU.
1282 1.8 matt */
1283 1.8 matt if (e500_intr_cpu_establish(ci, 0, IPL_VM, IST_IPI, e500_ipi_intr,
1284 1.8 matt NULL) == NULL)
1285 1.8 matt panic("%s: failed to establish ipi interrupt!", __func__);
1286 1.8 matt
1287 1.8 matt /*
1288 1.2 matt * Enable watchdog interrupts.
1289 1.2 matt */
1290 1.2 matt uint32_t tcr = mfspr(SPR_TCR);
1291 1.2 matt tcr |= TCR_WIE;
1292 1.2 matt mtspr(SPR_TCR, tcr);
1293 1.2 matt }
1294