i80321_icu.c revision 1.9.16.4 1 1.9.16.4 yamt /* $NetBSD: i80321_icu.c,v 1.9.16.4 2008/01/21 09:35:51 yamt Exp $ */
2 1.1 thorpej
3 1.1 thorpej /*
4 1.9.16.2 yamt * Copyright (c) 2001, 2002, 2006 Wasabi Systems, Inc.
5 1.1 thorpej * All rights reserved.
6 1.1 thorpej *
7 1.9.16.2 yamt * Written by Jason R. Thorpe and Steve C. Woodford for Wasabi Systems, Inc.
8 1.1 thorpej *
9 1.1 thorpej * Redistribution and use in source and binary forms, with or without
10 1.1 thorpej * modification, are permitted provided that the following conditions
11 1.1 thorpej * are met:
12 1.1 thorpej * 1. Redistributions of source code must retain the above copyright
13 1.1 thorpej * notice, this list of conditions and the following disclaimer.
14 1.1 thorpej * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 thorpej * notice, this list of conditions and the following disclaimer in the
16 1.1 thorpej * documentation and/or other materials provided with the distribution.
17 1.1 thorpej * 3. All advertising materials mentioning features or use of this software
18 1.1 thorpej * must display the following acknowledgement:
19 1.1 thorpej * This product includes software developed for the NetBSD Project by
20 1.1 thorpej * Wasabi Systems, Inc.
21 1.1 thorpej * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 1.1 thorpej * or promote products derived from this software without specific prior
23 1.1 thorpej * written permission.
24 1.1 thorpej *
25 1.1 thorpej * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 1.1 thorpej * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 1.1 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 1.1 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 1.1 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 1.1 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 1.1 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 1.1 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 1.1 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 1.1 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 1.1 thorpej * POSSIBILITY OF SUCH DAMAGE.
36 1.1 thorpej */
37 1.8 lukem
38 1.8 lukem #include <sys/cdefs.h>
39 1.9.16.4 yamt __KERNEL_RCSID(0, "$NetBSD: i80321_icu.c,v 1.9.16.4 2008/01/21 09:35:51 yamt Exp $");
40 1.1 thorpej
41 1.6 thorpej #ifndef EVBARM_SPL_NOINLINE
42 1.6 thorpej #define EVBARM_SPL_NOINLINE
43 1.6 thorpej #endif
44 1.6 thorpej
45 1.1 thorpej /*
46 1.1 thorpej * Interrupt support for the Intel i80321 I/O Processor.
47 1.1 thorpej */
48 1.1 thorpej
49 1.1 thorpej #include <sys/param.h>
50 1.1 thorpej #include <sys/systm.h>
51 1.1 thorpej #include <sys/malloc.h>
52 1.1 thorpej
53 1.1 thorpej #include <uvm/uvm_extern.h>
54 1.1 thorpej
55 1.1 thorpej #include <machine/bus.h>
56 1.1 thorpej #include <machine/intr.h>
57 1.1 thorpej
58 1.1 thorpej #include <arm/cpufunc.h>
59 1.1 thorpej
60 1.1 thorpej #include <arm/xscale/i80321reg.h>
61 1.1 thorpej #include <arm/xscale/i80321var.h>
62 1.1 thorpej
63 1.1 thorpej /* Interrupt handler queues. */
64 1.1 thorpej struct intrq intrq[NIRQ];
65 1.1 thorpej
66 1.1 thorpej /* Interrupts to mask at each level. */
67 1.5 briggs int i80321_imask[NIPL];
68 1.1 thorpej
69 1.1 thorpej /* Current interrupt priority level. */
70 1.9.16.1 yamt volatile int current_spl_level;
71 1.1 thorpej
72 1.1 thorpej /* Interrupts pending. */
73 1.9.16.1 yamt volatile int i80321_ipending;
74 1.1 thorpej
75 1.1 thorpej /* Software copy of the IRQs we have enabled. */
76 1.9.16.1 yamt volatile uint32_t intr_enabled;
77 1.1 thorpej
78 1.1 thorpej /* Mask if interrupts steered to FIQs. */
79 1.1 thorpej uint32_t intr_steer;
80 1.1 thorpej
81 1.1 thorpej /*
82 1.1 thorpej * Map a software interrupt queue index (to the unused bits in the
83 1.1 thorpej * ICU registers -- XXX will need to revisit this if those bits are
84 1.1 thorpej * ever used in future steppings).
85 1.1 thorpej */
86 1.9.16.4 yamt #ifdef __HAVE_FAST_SOFTINTS
87 1.9.16.3 yamt static const uint32_t si_to_irqbit[4] = {
88 1.9.16.3 yamt ICU_INT_bit26, /* SI_SOFTCLOCK */
89 1.9.16.3 yamt ICU_INT_bit22, /* SI_SOFTBIO */
90 1.1 thorpej ICU_INT_bit5, /* SI_SOFTNET */
91 1.1 thorpej ICU_INT_bit4, /* SI_SOFTSERIAL */
92 1.1 thorpej };
93 1.1 thorpej
94 1.1 thorpej #define SI_TO_IRQBIT(si) (1U << si_to_irqbit[(si)])
95 1.1 thorpej
96 1.1 thorpej /*
97 1.1 thorpej * Map a software interrupt queue to an interrupt priority level.
98 1.1 thorpej */
99 1.9.16.3 yamt static const int si_to_ipl[4] = {
100 1.1 thorpej IPL_SOFTCLOCK, /* SI_SOFTCLOCK */
101 1.9.16.3 yamt IPL_SOFTBIO, /* SI_SOFTBIO */
102 1.1 thorpej IPL_SOFTNET, /* SI_SOFTNET */
103 1.1 thorpej IPL_SOFTSERIAL, /* SI_SOFTSERIAL */
104 1.1 thorpej };
105 1.9.16.4 yamt #endif
106 1.1 thorpej
107 1.3 thorpej /*
108 1.3 thorpej * Interrupt bit names.
109 1.3 thorpej */
110 1.3 thorpej const char *i80321_irqnames[] = {
111 1.3 thorpej "DMA0 EOT",
112 1.3 thorpej "DMA0 EOC",
113 1.3 thorpej "DMA1 EOT",
114 1.3 thorpej "DMA1 EOC",
115 1.3 thorpej "irq 4",
116 1.3 thorpej "irq 5",
117 1.3 thorpej "AAU EOT",
118 1.3 thorpej "AAU EOC",
119 1.3 thorpej "core PMU",
120 1.3 thorpej "TMR0 (hardclock)",
121 1.3 thorpej "TMR1",
122 1.3 thorpej "I2C0",
123 1.3 thorpej "I2C1",
124 1.3 thorpej "MU",
125 1.3 thorpej "BIST",
126 1.3 thorpej "periph PMU",
127 1.3 thorpej "XScale PMU",
128 1.3 thorpej "BIU error",
129 1.3 thorpej "ATU error",
130 1.3 thorpej "MCU error",
131 1.3 thorpej "DMA0 error",
132 1.3 thorpej "DMA1 error",
133 1.3 thorpej "irq 22",
134 1.3 thorpej "AAU error",
135 1.3 thorpej "MU error",
136 1.3 thorpej "SSP",
137 1.3 thorpej "irq 26",
138 1.3 thorpej "irq 27",
139 1.3 thorpej "irq 28",
140 1.3 thorpej "irq 29",
141 1.3 thorpej "irq 30",
142 1.3 thorpej "irq 31",
143 1.3 thorpej };
144 1.3 thorpej
145 1.1 thorpej void i80321_intr_dispatch(struct clockframe *frame);
146 1.1 thorpej
147 1.9.16.1 yamt static inline uint32_t
148 1.1 thorpej i80321_iintsrc_read(void)
149 1.1 thorpej {
150 1.1 thorpej uint32_t iintsrc;
151 1.1 thorpej
152 1.9.16.1 yamt __asm volatile("mrc p6, 0, %0, c8, c0, 0"
153 1.1 thorpej : "=r" (iintsrc));
154 1.1 thorpej
155 1.1 thorpej /*
156 1.1 thorpej * The IINTSRC register shows bits that are active even
157 1.1 thorpej * if they are masked in INTCTL, so we have to mask them
158 1.1 thorpej * off with the interrupts we consider enabled.
159 1.1 thorpej */
160 1.1 thorpej return (iintsrc & intr_enabled);
161 1.1 thorpej }
162 1.1 thorpej
163 1.9.16.1 yamt static inline void
164 1.1 thorpej i80321_set_intrsteer(void)
165 1.1 thorpej {
166 1.1 thorpej
167 1.9.16.1 yamt __asm volatile("mcr p6, 0, %0, c4, c0, 0"
168 1.1 thorpej :
169 1.1 thorpej : "r" (intr_steer & ICU_INT_HWMASK));
170 1.1 thorpej }
171 1.1 thorpej
172 1.9.16.1 yamt static inline void
173 1.1 thorpej i80321_enable_irq(int irq)
174 1.1 thorpej {
175 1.1 thorpej
176 1.1 thorpej intr_enabled |= (1U << irq);
177 1.1 thorpej i80321_set_intrmask();
178 1.1 thorpej }
179 1.1 thorpej
180 1.9.16.1 yamt static inline void
181 1.1 thorpej i80321_disable_irq(int irq)
182 1.1 thorpej {
183 1.1 thorpej
184 1.1 thorpej intr_enabled &= ~(1U << irq);
185 1.1 thorpej i80321_set_intrmask();
186 1.1 thorpej }
187 1.1 thorpej
188 1.1 thorpej /*
189 1.1 thorpej * NOTE: This routine must be called with interrupts disabled in the CPSR.
190 1.1 thorpej */
191 1.1 thorpej static void
192 1.1 thorpej i80321_intr_calculate_masks(void)
193 1.1 thorpej {
194 1.1 thorpej struct intrq *iq;
195 1.1 thorpej struct intrhand *ih;
196 1.1 thorpej int irq, ipl;
197 1.1 thorpej
198 1.1 thorpej /* First, figure out which IPLs each IRQ has. */
199 1.1 thorpej for (irq = 0; irq < NIRQ; irq++) {
200 1.1 thorpej int levels = 0;
201 1.1 thorpej iq = &intrq[irq];
202 1.1 thorpej i80321_disable_irq(irq);
203 1.1 thorpej for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
204 1.1 thorpej ih = TAILQ_NEXT(ih, ih_list))
205 1.1 thorpej levels |= (1U << ih->ih_ipl);
206 1.1 thorpej iq->iq_levels = levels;
207 1.1 thorpej }
208 1.1 thorpej
209 1.1 thorpej /* Next, figure out which IRQs are used by each IPL. */
210 1.1 thorpej for (ipl = 0; ipl < NIPL; ipl++) {
211 1.1 thorpej int irqs = 0;
212 1.1 thorpej for (irq = 0; irq < NIRQ; irq++) {
213 1.1 thorpej if (intrq[irq].iq_levels & (1U << ipl))
214 1.1 thorpej irqs |= (1U << irq);
215 1.1 thorpej }
216 1.5 briggs i80321_imask[ipl] = irqs;
217 1.1 thorpej }
218 1.1 thorpej
219 1.5 briggs i80321_imask[IPL_NONE] = 0;
220 1.1 thorpej
221 1.1 thorpej /*
222 1.9.16.2 yamt * Enforce a hierarchy that gives "slow" device (or devices with
223 1.1 thorpej * limited input buffer space/"real-time" requirements) a better
224 1.1 thorpej * chance at not dropping data.
225 1.1 thorpej */
226 1.9.16.4 yamt #ifdef __HAVE_FAST_SOFTINTS
227 1.9.16.3 yamt i80321_imask[IPL_SOFTCLOCK] = SI_TO_IRQBIT(SI_SOFTCLOCK);
228 1.9.16.3 yamt i80321_imask[IPL_SOFTBIO] = SI_TO_IRQBIT(SI_SOFTBIO);
229 1.9.16.3 yamt i80321_imask[IPL_SOFTNET] = SI_TO_IRQBIT(SI_SOFTNET);
230 1.9.16.3 yamt i80321_imask[IPL_SOFTSERIAL] = SI_TO_IRQBIT(SI_SOFTSERIAL);
231 1.9.16.4 yamt #endif
232 1.1 thorpej
233 1.9.16.3 yamt i80321_imask[IPL_SOFTBIO] |= i80321_imask[IPL_SOFTCLOCK];
234 1.9.16.3 yamt i80321_imask[IPL_SOFTNET] |= i80321_imask[IPL_SOFTBIO];
235 1.9.16.3 yamt i80321_imask[IPL_SOFTSERIAL] |= i80321_imask[IPL_SOFTNET];
236 1.9.16.3 yamt i80321_imask[IPL_VM] |= i80321_imask[IPL_SOFTSERIAL];
237 1.9.16.3 yamt i80321_imask[IPL_HIGH] |= i80321_imask[IPL_SCHED];
238 1.1 thorpej
239 1.1 thorpej /*
240 1.1 thorpej * Now compute which IRQs must be blocked when servicing any
241 1.1 thorpej * given IRQ.
242 1.1 thorpej */
243 1.1 thorpej for (irq = 0; irq < NIRQ; irq++) {
244 1.1 thorpej int irqs = (1U << irq);
245 1.1 thorpej iq = &intrq[irq];
246 1.1 thorpej if (TAILQ_FIRST(&iq->iq_list) != NULL)
247 1.1 thorpej i80321_enable_irq(irq);
248 1.1 thorpej for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
249 1.1 thorpej ih = TAILQ_NEXT(ih, ih_list))
250 1.5 briggs irqs |= i80321_imask[ih->ih_ipl];
251 1.1 thorpej iq->iq_mask = irqs;
252 1.1 thorpej }
253 1.1 thorpej }
254 1.1 thorpej
255 1.9.16.1 yamt void
256 1.1 thorpej i80321_do_pending(void)
257 1.1 thorpej {
258 1.9.16.3 yamt #ifdef __HAVE_FAST_SOFTINTS
259 1.1 thorpej static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
260 1.1 thorpej int new, oldirqstate;
261 1.1 thorpej
262 1.1 thorpej if (__cpu_simple_lock_try(&processing) == 0)
263 1.1 thorpej return;
264 1.1 thorpej
265 1.1 thorpej new = current_spl_level;
266 1.1 thorpej
267 1.1 thorpej oldirqstate = disable_interrupts(I32_bit);
268 1.1 thorpej
269 1.1 thorpej #define DO_SOFTINT(si) \
270 1.6 thorpej if ((i80321_ipending & ~new) & SI_TO_IRQBIT(si)) { \
271 1.6 thorpej i80321_ipending &= ~SI_TO_IRQBIT(si); \
272 1.6 thorpej current_spl_level |= i80321_imask[si_to_ipl[(si)]]; \
273 1.1 thorpej restore_interrupts(oldirqstate); \
274 1.1 thorpej softintr_dispatch(si); \
275 1.1 thorpej oldirqstate = disable_interrupts(I32_bit); \
276 1.1 thorpej current_spl_level = new; \
277 1.1 thorpej }
278 1.1 thorpej
279 1.1 thorpej DO_SOFTINT(SI_SOFTSERIAL);
280 1.1 thorpej DO_SOFTINT(SI_SOFTNET);
281 1.1 thorpej DO_SOFTINT(SI_SOFTCLOCK);
282 1.1 thorpej DO_SOFTINT(SI_SOFT);
283 1.1 thorpej
284 1.1 thorpej __cpu_simple_unlock(&processing);
285 1.1 thorpej
286 1.1 thorpej restore_interrupts(oldirqstate);
287 1.9.16.4 yamt #endif /* __HAVE_FAST_SOFTINTRS */
288 1.1 thorpej }
289 1.1 thorpej
290 1.6 thorpej void
291 1.1 thorpej splx(int new)
292 1.1 thorpej {
293 1.1 thorpej
294 1.6 thorpej i80321_splx(new);
295 1.5 briggs }
296 1.5 briggs
297 1.5 briggs int
298 1.1 thorpej _spllower(int ipl)
299 1.1 thorpej {
300 1.5 briggs
301 1.6 thorpej return (i80321_spllower(ipl));
302 1.5 briggs }
303 1.5 briggs
304 1.5 briggs int
305 1.5 briggs _splraise(int ipl)
306 1.5 briggs {
307 1.6 thorpej
308 1.6 thorpej return (i80321_splraise(ipl));
309 1.1 thorpej }
310 1.5 briggs
311 1.9.16.4 yamt #if __HAVE_FAST_SOFTINTRS
312 1.1 thorpej void
313 1.1 thorpej _setsoftintr(int si)
314 1.1 thorpej {
315 1.1 thorpej int oldirqstate;
316 1.1 thorpej
317 1.1 thorpej oldirqstate = disable_interrupts(I32_bit);
318 1.5 briggs i80321_ipending |= SI_TO_IRQBIT(si);
319 1.1 thorpej restore_interrupts(oldirqstate);
320 1.1 thorpej
321 1.1 thorpej /* Process unmasked pending soft interrupts. */
322 1.5 briggs if ((i80321_ipending & INT_SWMASK) & ~current_spl_level)
323 1.1 thorpej i80321_do_pending();
324 1.1 thorpej }
325 1.9.16.4 yamt #endif /* __HAVE_FAST_SOFTINTRS */
326 1.1 thorpej
327 1.1 thorpej /*
328 1.1 thorpej * i80321_icu_init:
329 1.1 thorpej *
330 1.1 thorpej * Initialize the i80321 ICU. Called early in bootstrap
331 1.1 thorpej * to make sure the ICU is in a pristine state.
332 1.1 thorpej */
333 1.1 thorpej void
334 1.1 thorpej i80321_icu_init(void)
335 1.1 thorpej {
336 1.1 thorpej
337 1.1 thorpej intr_enabled = 0; /* All interrupts disabled */
338 1.1 thorpej i80321_set_intrmask();
339 1.1 thorpej
340 1.1 thorpej intr_steer = 0; /* All interrupts steered to IRQ */
341 1.1 thorpej i80321_set_intrsteer();
342 1.1 thorpej }
343 1.1 thorpej
344 1.1 thorpej /*
345 1.1 thorpej * i80321_intr_init:
346 1.1 thorpej *
347 1.1 thorpej * Initialize the rest of the interrupt subsystem, making it
348 1.1 thorpej * ready to handle interrupts from devices.
349 1.1 thorpej */
350 1.1 thorpej void
351 1.1 thorpej i80321_intr_init(void)
352 1.1 thorpej {
353 1.1 thorpej struct intrq *iq;
354 1.1 thorpej int i;
355 1.1 thorpej
356 1.1 thorpej intr_enabled = 0;
357 1.1 thorpej
358 1.1 thorpej for (i = 0; i < NIRQ; i++) {
359 1.1 thorpej iq = &intrq[i];
360 1.1 thorpej TAILQ_INIT(&iq->iq_list);
361 1.1 thorpej
362 1.1 thorpej evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
363 1.3 thorpej NULL, "iop321", i80321_irqnames[i]);
364 1.1 thorpej }
365 1.1 thorpej
366 1.1 thorpej i80321_intr_calculate_masks();
367 1.1 thorpej
368 1.1 thorpej /* Enable IRQs (don't yet use FIQs). */
369 1.1 thorpej enable_interrupts(I32_bit);
370 1.1 thorpej }
371 1.1 thorpej
372 1.1 thorpej void *
373 1.1 thorpej i80321_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
374 1.1 thorpej {
375 1.1 thorpej struct intrq *iq;
376 1.1 thorpej struct intrhand *ih;
377 1.1 thorpej u_int oldirqstate;
378 1.1 thorpej
379 1.1 thorpej if (irq < 0 || irq > NIRQ)
380 1.1 thorpej panic("i80321_intr_establish: IRQ %d out of range", irq);
381 1.1 thorpej
382 1.1 thorpej ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
383 1.1 thorpej if (ih == NULL)
384 1.1 thorpej return (NULL);
385 1.1 thorpej
386 1.1 thorpej ih->ih_func = func;
387 1.1 thorpej ih->ih_arg = arg;
388 1.1 thorpej ih->ih_ipl = ipl;
389 1.1 thorpej ih->ih_irq = irq;
390 1.1 thorpej
391 1.1 thorpej iq = &intrq[irq];
392 1.1 thorpej
393 1.1 thorpej /* All IOP321 interrupts are level-triggered. */
394 1.1 thorpej iq->iq_ist = IST_LEVEL;
395 1.1 thorpej
396 1.1 thorpej oldirqstate = disable_interrupts(I32_bit);
397 1.1 thorpej
398 1.1 thorpej TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
399 1.1 thorpej
400 1.1 thorpej i80321_intr_calculate_masks();
401 1.1 thorpej
402 1.1 thorpej restore_interrupts(oldirqstate);
403 1.1 thorpej
404 1.1 thorpej return (ih);
405 1.1 thorpej }
406 1.1 thorpej
407 1.1 thorpej void
408 1.1 thorpej i80321_intr_disestablish(void *cookie)
409 1.1 thorpej {
410 1.1 thorpej struct intrhand *ih = cookie;
411 1.1 thorpej struct intrq *iq = &intrq[ih->ih_irq];
412 1.1 thorpej int oldirqstate;
413 1.1 thorpej
414 1.1 thorpej oldirqstate = disable_interrupts(I32_bit);
415 1.1 thorpej
416 1.1 thorpej TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
417 1.1 thorpej
418 1.1 thorpej i80321_intr_calculate_masks();
419 1.1 thorpej
420 1.1 thorpej restore_interrupts(oldirqstate);
421 1.1 thorpej }
422 1.1 thorpej
423 1.9.16.2 yamt /*
424 1.9.16.2 yamt * Hardware interrupt handler.
425 1.9.16.2 yamt *
426 1.9.16.2 yamt * If I80321_HPI_ENABLED is defined, this code attempts to deal with
427 1.9.16.2 yamt * HPI interrupts as best it can.
428 1.9.16.2 yamt *
429 1.9.16.2 yamt * The problem is that HPIs cannot be masked at the interrupt controller;
430 1.9.16.2 yamt * they can only be masked by disabling IRQs in the XScale core.
431 1.9.16.2 yamt *
432 1.9.16.2 yamt * So, if an HPI comes in and we determine that it should be masked at
433 1.9.16.2 yamt * the current IPL then we mark it pending in the usual way and set
434 1.9.16.2 yamt * I32_bit in the interrupt frame. This ensures that when we return from
435 1.9.16.2 yamt * i80321_intr_dispatch(), IRQs will be disabled in the XScale core. (To
436 1.9.16.2 yamt * ensure IRQs are enabled later, i80321_splx() has been modified to do
437 1.9.16.2 yamt * just that when a pending HPI interrupt is unmasked.) Additionally,
438 1.9.16.2 yamt * because HPIs are level-triggered, the registered handler for the HPI
439 1.9.16.2 yamt * interrupt will also be invoked with IRQs disabled. If a masked HPI
440 1.9.16.2 yamt * occurs at the same time as another unmasked higher priority interrupt,
441 1.9.16.2 yamt * the higher priority handler will also be invoked with IRQs disabled.
442 1.9.16.2 yamt * As a result, the system could end up executing a lot of code with IRQs
443 1.9.16.2 yamt * completely disabled if the HPI's IPL is relatively low.
444 1.9.16.2 yamt *
445 1.9.16.2 yamt * At the present time, the only known use of HPI is for the console UART
446 1.9.16.2 yamt * on a couple of boards. This is probably the least intrusive use of HPI
447 1.9.16.2 yamt * as IPL_SERIAL is the highest priority IPL in the system anyway. The
448 1.9.16.2 yamt * code has not been tested with HPI hooked up to a class of device which
449 1.9.16.2 yamt * interrupts below IPL_SERIAL. Indeed, such a configuration is likely to
450 1.9.16.2 yamt * perform very poorly if at all, even though the following code has been
451 1.9.16.2 yamt * designed (hopefully) to cope with it.
452 1.9.16.2 yamt */
453 1.9.16.2 yamt
454 1.1 thorpej void
455 1.1 thorpej i80321_intr_dispatch(struct clockframe *frame)
456 1.1 thorpej {
457 1.1 thorpej struct intrq *iq;
458 1.1 thorpej struct intrhand *ih;
459 1.1 thorpej int oldirqstate, pcpl, irq, ibit, hwpend;
460 1.9.16.4 yamt struct cpu_info *ci;
461 1.9.16.2 yamt #ifdef I80321_HPI_ENABLED
462 1.9.16.2 yamt int oldpending;
463 1.9.16.2 yamt #endif
464 1.1 thorpej
465 1.9.16.4 yamt ci = curcpu();
466 1.9.16.4 yamt ci->ci_idepth++;
467 1.1 thorpej pcpl = current_spl_level;
468 1.1 thorpej hwpend = i80321_iintsrc_read();
469 1.1 thorpej
470 1.1 thorpej /*
471 1.1 thorpej * Disable all the interrupts that are pending. We will
472 1.1 thorpej * reenable them once they are processed and not masked.
473 1.1 thorpej */
474 1.1 thorpej intr_enabled &= ~hwpend;
475 1.1 thorpej i80321_set_intrmask();
476 1.1 thorpej
477 1.9.16.2 yamt #ifdef I80321_HPI_ENABLED
478 1.9.16.2 yamt oldirqstate = 0; /* XXX: quell gcc warning */
479 1.9.16.2 yamt #endif
480 1.9.16.2 yamt
481 1.1 thorpej while (hwpend != 0) {
482 1.9.16.2 yamt #ifdef I80321_HPI_ENABLED
483 1.9.16.2 yamt /* Deal with HPI interrupt first */
484 1.9.16.2 yamt if (__predict_false(hwpend & INT_HPIMASK))
485 1.9.16.2 yamt irq = ICU_INT_HPI;
486 1.9.16.2 yamt else
487 1.9.16.2 yamt #endif
488 1.1 thorpej irq = ffs(hwpend) - 1;
489 1.1 thorpej ibit = (1U << irq);
490 1.1 thorpej
491 1.1 thorpej hwpend &= ~ibit;
492 1.1 thorpej
493 1.1 thorpej if (pcpl & ibit) {
494 1.1 thorpej /*
495 1.1 thorpej * IRQ is masked; mark it as pending and check
496 1.1 thorpej * the next one. Note: the IRQ is already disabled.
497 1.1 thorpej */
498 1.9.16.2 yamt #ifdef I80321_HPI_ENABLED
499 1.9.16.2 yamt if (__predict_false(irq == ICU_INT_HPI)) {
500 1.9.16.2 yamt /*
501 1.9.16.2 yamt * This is an HPI. We *must* disable
502 1.9.16.2 yamt * IRQs in the interrupt frame until
503 1.9.16.2 yamt * INT_HPIMASK is cleared by a later
504 1.9.16.2 yamt * call to splx(). Otherwise the level-
505 1.9.16.2 yamt * triggered interrupt will just keep
506 1.9.16.2 yamt * coming back.
507 1.9.16.2 yamt */
508 1.9.16.2 yamt frame->cf_if.if_spsr |= I32_bit;
509 1.9.16.2 yamt }
510 1.9.16.2 yamt #endif
511 1.5 briggs i80321_ipending |= ibit;
512 1.1 thorpej continue;
513 1.1 thorpej }
514 1.1 thorpej
515 1.9.16.2 yamt #ifdef I80321_HPI_ENABLED
516 1.9.16.2 yamt oldpending = i80321_ipending | ibit;
517 1.9.16.2 yamt #endif
518 1.5 briggs i80321_ipending &= ~ibit;
519 1.1 thorpej
520 1.1 thorpej iq = &intrq[irq];
521 1.1 thorpej iq->iq_ev.ev_count++;
522 1.1 thorpej uvmexp.intrs++;
523 1.1 thorpej current_spl_level |= iq->iq_mask;
524 1.9.16.2 yamt #ifdef I80321_HPI_ENABLED
525 1.9.16.2 yamt /*
526 1.9.16.2 yamt * Re-enable interrupts iff an HPI is not pending
527 1.9.16.2 yamt */
528 1.9.16.2 yamt if (__predict_true((oldpending & INT_HPIMASK) == 0))
529 1.9.16.2 yamt #endif
530 1.1 thorpej oldirqstate = enable_interrupts(I32_bit);
531 1.1 thorpej for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
532 1.1 thorpej ih = TAILQ_NEXT(ih, ih_list)) {
533 1.1 thorpej (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
534 1.1 thorpej }
535 1.9.16.2 yamt #ifdef I80321_HPI_ENABLED
536 1.9.16.2 yamt if (__predict_true((oldpending & INT_HPIMASK) == 0))
537 1.9.16.2 yamt #endif
538 1.1 thorpej restore_interrupts(oldirqstate);
539 1.9.16.2 yamt #ifdef I80321_HPI_ENABLED
540 1.9.16.2 yamt else if (irq == ICU_INT_HPI) {
541 1.9.16.2 yamt /*
542 1.9.16.2 yamt * We've just handled the HPI. Make sure IRQs
543 1.9.16.2 yamt * are enabled in the interrupt frame.
544 1.9.16.2 yamt * Here's hoping the handler really did clear
545 1.9.16.2 yamt * down the source...
546 1.9.16.2 yamt */
547 1.9.16.2 yamt frame->cf_if.if_spsr &= ~I32_bit;
548 1.9.16.2 yamt }
549 1.9.16.2 yamt #endif
550 1.1 thorpej current_spl_level = pcpl;
551 1.1 thorpej
552 1.1 thorpej /* Re-enable this interrupt now that's it's cleared. */
553 1.1 thorpej intr_enabled |= ibit;
554 1.1 thorpej i80321_set_intrmask();
555 1.9 scw
556 1.9 scw /*
557 1.9 scw * Don't forget to include interrupts which may have
558 1.9 scw * arrived in the meantime.
559 1.9 scw */
560 1.9 scw hwpend |= ((i80321_ipending & ICU_INT_HWMASK) & ~pcpl);
561 1.1 thorpej }
562 1.9.16.4 yamt ci->ci_idepth--;
563 1.1 thorpej
564 1.1 thorpej /* Check for pendings soft intrs. */
565 1.5 briggs if ((i80321_ipending & INT_SWMASK) & ~current_spl_level) {
566 1.9.16.2 yamt #ifdef I80321_HPI_ENABLED
567 1.9.16.2 yamt /* XXX: This is only necessary if HPI is < IPL_SOFT* */
568 1.9.16.2 yamt if (__predict_true((i80321_ipending & INT_HPIMASK) == 0))
569 1.9.16.2 yamt #endif
570 1.1 thorpej oldirqstate = enable_interrupts(I32_bit);
571 1.1 thorpej i80321_do_pending();
572 1.9.16.2 yamt #ifdef I80321_HPI_ENABLED
573 1.9.16.2 yamt /* XXX: This is only necessary if HPI is < IPL_NET* */
574 1.9.16.2 yamt if (__predict_true((i80321_ipending & INT_HPIMASK) == 0))
575 1.9.16.2 yamt #endif
576 1.1 thorpej restore_interrupts(oldirqstate);
577 1.1 thorpej }
578 1.1 thorpej }
579