ep93xx_intr.c revision 1.12 1 /* $NetBSD: ep93xx_intr.c,v 1.12 2008/04/27 18:58:44 matt Exp $ */
2
3 /*
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jesse Off
9 *
10 * This code is derived from software contributed to The NetBSD Foundation
11 * by Ichiro FUKUHARA and Naoto Shimazaki.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the NetBSD
24 * Foundation, Inc. and its contributors.
25 * 4. Neither the name of The NetBSD Foundation nor the names of its
26 * contributors may be used to endorse or promote products derived
27 * from this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
30 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
31 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
32 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
33 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
36 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
37 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: ep93xx_intr.c,v 1.12 2008/04/27 18:58:44 matt Exp $");
44
45 /*
46 * Interrupt support for the Cirrus Logic EP93XX
47 */
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/malloc.h>
52 #include <sys/termios.h>
53
54 #include <uvm/uvm_extern.h>
55
56 #include <machine/bus.h>
57 #include <machine/intr.h>
58
59 #include <arm/cpufunc.h>
60
61 #include <arm/ep93xx/ep93xxreg.h>
62 #include <arm/ep93xx/ep93xxvar.h>
63
64 /* Interrupt handler queues. */
65 struct intrq intrq[NIRQ];
66
67 /* Interrupts to mask at each level. */
68 static u_int32_t vic1_imask[NIPL];
69 static u_int32_t vic2_imask[NIPL];
70
71 /* Current interrupt priority level. */
72 volatile int hardware_spl_level;
73
74 /* Software copy of the IRQs we have enabled. */
75 volatile u_int32_t vic1_intr_enabled;
76 volatile u_int32_t vic2_intr_enabled;
77
78 /* Interrupts pending. */
79 static volatile int ipending;
80
81 void ep93xx_intr_dispatch(struct irqframe *frame);
82
83 #define VIC1REG(reg) *((volatile u_int32_t*) (EP93XX_AHB_VBASE + \
84 EP93XX_AHB_VIC1 + (reg)))
85 #define VIC2REG(reg) *((volatile u_int32_t*) (EP93XX_AHB_VBASE + \
86 EP93XX_AHB_VIC2 + (reg)))
87
88 static void
89 ep93xx_set_intrmask(u_int32_t vic1_irqs, u_int32_t vic2_irqs)
90 {
91 VIC1REG(EP93XX_VIC_IntEnClear) = vic1_irqs;
92 VIC1REG(EP93XX_VIC_IntEnable) = vic1_intr_enabled & ~vic1_irqs;
93 VIC2REG(EP93XX_VIC_IntEnClear) = vic2_irqs;
94 VIC2REG(EP93XX_VIC_IntEnable) = vic2_intr_enabled & ~vic2_irqs;
95 }
96
97 static void
98 ep93xx_enable_irq(int irq)
99 {
100 if (irq < VIC_NIRQ) {
101 vic1_intr_enabled |= (1U << irq);
102 VIC1REG(EP93XX_VIC_IntEnable) = (1U << irq);
103 } else {
104 vic2_intr_enabled |= (1U << (irq - VIC_NIRQ));
105 VIC2REG(EP93XX_VIC_IntEnable) = (1U << (irq - VIC_NIRQ));
106 }
107 }
108
109 static inline void
110 ep93xx_disable_irq(int irq)
111 {
112 if (irq < VIC_NIRQ) {
113 vic1_intr_enabled &= ~(1U << irq);
114 VIC1REG(EP93XX_VIC_IntEnClear) = (1U << irq);
115 } else {
116 vic2_intr_enabled &= ~(1U << (irq - VIC_NIRQ));
117 VIC2REG(EP93XX_VIC_IntEnClear) = (1U << (irq - VIC_NIRQ));
118 }
119 }
120
121 /*
122 * NOTE: This routine must be called with interrupts disabled in the CPSR.
123 */
124 static void
125 ep93xx_intr_calculate_masks(void)
126 {
127 struct intrq *iq;
128 struct intrhand *ih;
129 int irq, ipl;
130
131 /* First, figure out which IPLs each IRQ has. */
132 for (irq = 0; irq < NIRQ; irq++) {
133 int levels = 0;
134 iq = &intrq[irq];
135 ep93xx_disable_irq(irq);
136 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
137 ih = TAILQ_NEXT(ih, ih_list))
138 levels |= (1U << ih->ih_ipl);
139 iq->iq_levels = levels;
140 }
141
142 /* Next, figure out which IRQs are used by each IPL. */
143 for (ipl = 0; ipl < NIPL; ipl++) {
144 int vic1_irqs = 0;
145 int vic2_irqs = 0;
146 for (irq = 0; irq < VIC_NIRQ; irq++) {
147 if (intrq[irq].iq_levels & (1U << ipl))
148 vic1_irqs |= (1U << irq);
149 }
150 vic1_imask[ipl] = vic1_irqs;
151 for (irq = 0; irq < VIC_NIRQ; irq++) {
152 if (intrq[irq + VIC_NIRQ].iq_levels & (1U << ipl))
153 vic2_irqs |= (1U << irq);
154 }
155 vic2_imask[ipl] = vic2_irqs;
156 }
157
158 KASSERT(vic1_imask[IPL_NONE] == 0);
159 KASSERT(vic2_imask[IPL_NONE] == 0);
160
161 /*
162 * splclock() must block anything that uses the scheduler.
163 */
164 vic1_imask[IPL_SCHED] |= vic1_imask[IPL_VM];
165 vic2_imask[IPL_SCHED] |= vic2_imask[IPL_VM];
166
167 /*
168 * splhigh() must block "everything".
169 */
170 vic1_imask[IPL_HIGH] |= vic1_imask[IPL_SCHED];
171 vic2_imask[IPL_HIGH] |= vic2_imask[IPL_SCHED];
172
173 /*
174 * Now compute which IRQs must be blocked when servicing any
175 * given IRQ.
176 */
177 for (irq = 0; irq < NIRQ; irq++) {
178 int vic1_irqs;
179 int vic2_irqs;
180
181 if (irq < VIC_NIRQ) {
182 vic1_irqs = (1U << irq);
183 vic2_irqs = 0;
184 } else {
185 vic1_irqs = 0;
186 vic2_irqs = (1U << (irq - VIC_NIRQ));
187 }
188 iq = &intrq[irq];
189 if (TAILQ_FIRST(&iq->iq_list) != NULL)
190 ep93xx_enable_irq(irq);
191 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
192 ih = TAILQ_NEXT(ih, ih_list)) {
193 vic1_irqs |= vic1_imask[ih->ih_ipl];
194 vic2_irqs |= vic2_imask[ih->ih_ipl];
195 }
196 iq->iq_vic1_mask = vic1_irqs;
197 iq->iq_vic2_mask = vic2_irqs;
198 }
199 }
200
201 inline void
202 splx(int new)
203 {
204 int old;
205 u_int oldirqstate;
206
207 oldirqstate = disable_interrupts(I32_bit);
208 old = curcpl();
209 set_curcpl(new);
210 if (new != hardware_spl_level) {
211 hardware_spl_level = new;
212 ep93xx_set_intrmask(vic1_imask[new], vic2_imask[new]);
213 }
214 restore_interrupts(oldirqstate);
215
216 #ifdef __HAVE_FAST_SOFTINTS
217 cpu_dosoftints();
218 #endif
219 }
220
221 int
222 _splraise(int ipl)
223 {
224 int old;
225 u_int oldirqstate;
226
227 oldirqstate = disable_interrupts(I32_bit);
228 old = curcpl();
229 set_curcpl(ipl);
230 restore_interrupts(oldirqstate);
231 return (old);
232 }
233
234 int
235 _spllower(int ipl)
236 {
237 int old = curcpl();
238
239 if (old <= ipl)
240 return (old);
241 splx(ipl);
242 return (old);
243 }
244
245 /*
246 * ep93xx_intr_init:
247 *
248 * Initialize the rest of the interrupt subsystem, making it
249 * ready to handle interrupts from devices.
250 */
251 void
252 ep93xx_intr_init(void)
253 {
254 struct intrq *iq;
255 int i;
256
257 vic1_intr_enabled = 0;
258 vic2_intr_enabled = 0;
259
260 for (i = 0; i < NIRQ; i++) {
261 iq = &intrq[i];
262 TAILQ_INIT(&iq->iq_list);
263
264 sprintf(iq->iq_name, "irq %d", i);
265 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
266 NULL, (i < VIC_NIRQ ? "vic1" : "vic2"),
267 iq->iq_name);
268 }
269 curcpu()->ci_intr_depth = 0;
270 set_curcpl(0);
271 hardware_spl_level = 0;
272
273 /* All interrupts should use IRQ not FIQ */
274 VIC1REG(EP93XX_VIC_IntSelect) = 0;
275 VIC2REG(EP93XX_VIC_IntSelect) = 0;
276
277 ep93xx_intr_calculate_masks();
278
279 /* Enable IRQs (don't yet use FIQs). */
280 enable_interrupts(I32_bit);
281 }
282
283 void *
284 ep93xx_intr_establish(int irq, int ipl, int (*ih_func)(void *), void *arg)
285 {
286 struct intrq* iq;
287 struct intrhand* ih;
288 u_int oldirqstate;
289
290 if (irq < 0 || irq > NIRQ)
291 panic("ep93xx_intr_establish: IRQ %d out of range", irq);
292 if (ipl < 0 || ipl > NIPL)
293 panic("ep93xx_intr_establish: IPL %d out of range", ipl);
294
295 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
296 if (ih == NULL)
297 return (NULL);
298
299 ih->ih_func = ih_func;
300 ih->ih_arg = arg;
301 ih->ih_irq = irq;
302 ih->ih_ipl = ipl;
303
304 iq = &intrq[irq];
305
306 oldirqstate = disable_interrupts(I32_bit);
307 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
308 ep93xx_intr_calculate_masks();
309 restore_interrupts(oldirqstate);
310
311 return (ih);
312 }
313
314 void
315 ep93xx_intr_disestablish(void *cookie)
316 {
317 struct intrhand* ih = cookie;
318 struct intrq* iq = &intrq[ih->ih_irq];
319 u_int oldirqstate;
320
321 oldirqstate = disable_interrupts(I32_bit);
322 TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
323 ep93xx_intr_calculate_masks();
324 restore_interrupts(oldirqstate);
325 }
326
327 void
328 ep93xx_intr_dispatch(struct irqframe *frame)
329 {
330 struct intrq* iq;
331 struct intrhand* ih;
332 u_int oldirqstate;
333 int pcpl;
334 u_int32_t vic1_hwpend;
335 u_int32_t vic2_hwpend;
336 int irq;
337
338 pcpl = curcpl();
339
340 vic1_hwpend = VIC1REG(EP93XX_VIC_IRQStatus);
341 vic2_hwpend = VIC2REG(EP93XX_VIC_IRQStatus);
342
343 hardware_spl_level = pcpl;
344 ep93xx_set_intrmask(vic1_imask[pcpl] | vic1_hwpend,
345 vic2_imask[pcpl] | vic2_hwpend);
346
347 vic1_hwpend &= ~vic1_imask[pcpl];
348 vic2_hwpend &= ~vic2_imask[pcpl];
349
350 if (vic1_hwpend) {
351 irq = ffs(vic1_hwpend) - 1;
352
353 iq = &intrq[irq];
354 iq->iq_ev.ev_count++;
355 uvmexp.intrs++;
356 TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
357 set_curcpl(ih->ih_ipl);
358 oldirqstate = enable_interrupts(I32_bit);
359 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
360 restore_interrupts(oldirqstate);
361 }
362 } else if (vic2_hwpend) {
363 irq = ffs(vic2_hwpend) - 1;
364
365 iq = &intrq[irq + VIC_NIRQ];
366 iq->iq_ev.ev_count++;
367 uvmexp.intrs++;
368 TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
369 set_curcpl(ih->ih_ipl);
370 oldirqstate = enable_interrupts(I32_bit);
371 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
372 restore_interrupts(oldirqstate);
373 }
374 }
375
376 set_curcpl(pcpl);
377 hardware_spl_level = pcpl;
378 ep93xx_set_intrmask(vic1_imask[pcpl], vic2_imask[pcpl]);
379
380 #ifdef __HAVE_FAST_SOFTINTS
381 cpu_dosoftints();
382 #endif
383 }
384