ep93xx_intr.c revision 1.22.6.2 1 /* $NetBSD: ep93xx_intr.c,v 1.22.6.2 2015/06/06 14:39:55 skrll Exp $ */
2
3 /*
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jesse Off
9 *
10 * This code is derived from software contributed to The NetBSD Foundation
11 * by Ichiro FUKUHARA and Naoto Shimazaki.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: ep93xx_intr.c,v 1.22.6.2 2015/06/06 14:39:55 skrll Exp $");
37
38 /*
39 * Interrupt support for the Cirrus Logic EP93XX
40 */
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
45 #include <sys/termios.h>
46 #include <sys/lwp.h>
47
48 #include <sys/cpu.h>
49 #include <sys/bus.h>
50 #include <sys/intr.h>
51
52 #include <arm/locore.h>
53
54 #include <arm/ep93xx/ep93xxreg.h>
55 #include <arm/ep93xx/ep93xxvar.h>
56
57 /* Interrupt handler queues. */
58 struct intrq intrq[NIRQ];
59
60 /* Interrupts to mask at each level. */
61 static uint32_t vic1_imask[NIPL];
62 static uint32_t vic2_imask[NIPL];
63
64 /* Current interrupt priority level. */
65 volatile int hardware_spl_level;
66
67 /* Software copy of the IRQs we have enabled. */
68 volatile uint32_t vic1_intr_enabled;
69 volatile uint32_t vic2_intr_enabled;
70
71 void ep93xx_intr_dispatch(struct trapframe *);
72
73 #define VIC1REG(reg) *((volatile uint32_t*) (EP93XX_AHB_VBASE + \
74 EP93XX_AHB_VIC1 + (reg)))
75 #define VIC2REG(reg) *((volatile uint32_t*) (EP93XX_AHB_VBASE + \
76 EP93XX_AHB_VIC2 + (reg)))
77
78 static void
79 ep93xx_set_intrmask(uint32_t vic1_irqs, uint32_t vic2_irqs)
80 {
81 VIC1REG(EP93XX_VIC_IntEnClear) = vic1_irqs;
82 VIC1REG(EP93XX_VIC_IntEnable) = vic1_intr_enabled & ~vic1_irqs;
83 VIC2REG(EP93XX_VIC_IntEnClear) = vic2_irqs;
84 VIC2REG(EP93XX_VIC_IntEnable) = vic2_intr_enabled & ~vic2_irqs;
85 }
86
87 static void
88 ep93xx_enable_irq(int irq)
89 {
90 if (irq < VIC_NIRQ) {
91 vic1_intr_enabled |= (1U << irq);
92 VIC1REG(EP93XX_VIC_IntEnable) = (1U << irq);
93 } else {
94 vic2_intr_enabled |= (1U << (irq - VIC_NIRQ));
95 VIC2REG(EP93XX_VIC_IntEnable) = (1U << (irq - VIC_NIRQ));
96 }
97 }
98
99 static inline void
100 ep93xx_disable_irq(int irq)
101 {
102 if (irq < VIC_NIRQ) {
103 vic1_intr_enabled &= ~(1U << irq);
104 VIC1REG(EP93XX_VIC_IntEnClear) = (1U << irq);
105 } else {
106 vic2_intr_enabled &= ~(1U << (irq - VIC_NIRQ));
107 VIC2REG(EP93XX_VIC_IntEnClear) = (1U << (irq - VIC_NIRQ));
108 }
109 }
110
111 /*
112 * NOTE: This routine must be called with interrupts disabled in the CPSR.
113 */
114 static void
115 ep93xx_intr_calculate_masks(void)
116 {
117 struct intrq *iq;
118 struct intrhand *ih;
119 int irq, ipl;
120
121 /* First, figure out which IPLs each IRQ has. */
122 for (irq = 0; irq < NIRQ; irq++) {
123 int levels = 0;
124 iq = &intrq[irq];
125 ep93xx_disable_irq(irq);
126 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
127 ih = TAILQ_NEXT(ih, ih_list))
128 levels |= (1U << ih->ih_ipl);
129 iq->iq_levels = levels;
130 }
131
132 /* Next, figure out which IRQs are used by each IPL. */
133 for (ipl = 0; ipl < NIPL; ipl++) {
134 int vic1_irqs = 0;
135 int vic2_irqs = 0;
136 for (irq = 0; irq < VIC_NIRQ; irq++) {
137 if (intrq[irq].iq_levels & (1U << ipl))
138 vic1_irqs |= (1U << irq);
139 }
140 vic1_imask[ipl] = vic1_irqs;
141 for (irq = 0; irq < VIC_NIRQ; irq++) {
142 if (intrq[irq + VIC_NIRQ].iq_levels & (1U << ipl))
143 vic2_irqs |= (1U << irq);
144 }
145 vic2_imask[ipl] = vic2_irqs;
146 }
147
148 KASSERT(vic1_imask[IPL_NONE] == 0);
149 KASSERT(vic2_imask[IPL_NONE] == 0);
150 KASSERT(vic1_imask[IPL_SOFTCLOCK] == 0);
151 KASSERT(vic2_imask[IPL_SOFTCLOCK] == 0);
152 KASSERT(vic1_imask[IPL_SOFTBIO] == 0);
153 KASSERT(vic2_imask[IPL_SOFTBIO] == 0);
154 KASSERT(vic1_imask[IPL_SOFTNET] == 0);
155 KASSERT(vic2_imask[IPL_SOFTNET] == 0);
156 KASSERT(vic1_imask[IPL_SOFTSERIAL] == 0);
157 KASSERT(vic2_imask[IPL_SOFTSERIAL] == 0);
158
159 /*
160 * splsched() must block anything that uses the scheduler.
161 */
162 vic1_imask[IPL_SCHED] |= vic1_imask[IPL_VM];
163 vic2_imask[IPL_SCHED] |= vic2_imask[IPL_VM];
164
165 /*
166 * splhigh() must block "everything".
167 */
168 vic1_imask[IPL_HIGH] |= vic1_imask[IPL_SCHED];
169 vic2_imask[IPL_HIGH] |= vic2_imask[IPL_SCHED];
170
171 /*
172 * Now compute which IRQs must be blocked when servicing any
173 * given IRQ.
174 */
175 for (irq = 0; irq < NIRQ; irq++) {
176 int vic1_irqs;
177 int vic2_irqs;
178
179 if (irq < VIC_NIRQ) {
180 vic1_irqs = (1U << irq);
181 vic2_irqs = 0;
182 } else {
183 vic1_irqs = 0;
184 vic2_irqs = (1U << (irq - VIC_NIRQ));
185 }
186 iq = &intrq[irq];
187 if (TAILQ_FIRST(&iq->iq_list) != NULL)
188 ep93xx_enable_irq(irq);
189 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
190 ih = TAILQ_NEXT(ih, ih_list)) {
191 vic1_irqs |= vic1_imask[ih->ih_ipl];
192 vic2_irqs |= vic2_imask[ih->ih_ipl];
193 }
194 iq->iq_vic1_mask = vic1_irqs;
195 iq->iq_vic2_mask = vic2_irqs;
196 }
197 }
198
199 inline void
200 splx(int new)
201 {
202 u_int oldirqstate;
203
204 oldirqstate = disable_interrupts(I32_bit);
205 set_curcpl(new);
206 if (new != hardware_spl_level) {
207 hardware_spl_level = new;
208 ep93xx_set_intrmask(vic1_imask[new], vic2_imask[new]);
209 }
210 restore_interrupts(oldirqstate);
211
212 #ifdef __HAVE_FAST_SOFTINTS
213 cpu_dosoftints();
214 #endif
215 }
216
217 int
218 _splraise(int ipl)
219 {
220 int old;
221 u_int oldirqstate;
222
223 oldirqstate = disable_interrupts(I32_bit);
224 old = curcpl();
225 set_curcpl(ipl);
226 restore_interrupts(oldirqstate);
227 return (old);
228 }
229
230 int
231 _spllower(int ipl)
232 {
233 int old = curcpl();
234
235 if (old <= ipl)
236 return (old);
237 splx(ipl);
238 return (old);
239 }
240
241 /*
242 * ep93xx_intr_init:
243 *
244 * Initialize the rest of the interrupt subsystem, making it
245 * ready to handle interrupts from devices.
246 */
247 void
248 ep93xx_intr_init(void)
249 {
250 struct intrq *iq;
251 int i;
252
253 vic1_intr_enabled = 0;
254 vic2_intr_enabled = 0;
255
256 for (i = 0; i < NIRQ; i++) {
257 iq = &intrq[i];
258 TAILQ_INIT(&iq->iq_list);
259
260 snprintf(iq->iq_name, sizeof(iq->iq_name), "irq %d", i);
261 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
262 NULL, (i < VIC_NIRQ ? "vic1" : "vic2"),
263 iq->iq_name);
264 }
265 curcpu()->ci_intr_depth = 0;
266 set_curcpl(0);
267 hardware_spl_level = 0;
268
269 /* All interrupts should use IRQ not FIQ */
270 VIC1REG(EP93XX_VIC_IntSelect) = 0;
271 VIC2REG(EP93XX_VIC_IntSelect) = 0;
272
273 ep93xx_intr_calculate_masks();
274
275 /* Enable IRQs (don't yet use FIQs). */
276 enable_interrupts(I32_bit);
277 }
278
279 void *
280 ep93xx_intr_establish(int irq, int ipl, int (*ih_func)(void *), void *arg)
281 {
282 struct intrq* iq;
283 struct intrhand* ih;
284 u_int oldirqstate;
285
286 if (irq < 0 || irq > NIRQ)
287 panic("ep93xx_intr_establish: IRQ %d out of range", irq);
288 if (ipl < 0 || ipl > NIPL)
289 panic("ep93xx_intr_establish: IPL %d out of range", ipl);
290
291 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
292 if (ih == NULL)
293 return (NULL);
294
295 ih->ih_func = ih_func;
296 ih->ih_arg = arg;
297 ih->ih_irq = irq;
298 ih->ih_ipl = ipl;
299
300 iq = &intrq[irq];
301
302 oldirqstate = disable_interrupts(I32_bit);
303 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
304 ep93xx_intr_calculate_masks();
305 restore_interrupts(oldirqstate);
306
307 return (ih);
308 }
309
310 void
311 ep93xx_intr_disestablish(void *cookie)
312 {
313 struct intrhand* ih = cookie;
314 struct intrq* iq = &intrq[ih->ih_irq];
315 u_int oldirqstate;
316
317 oldirqstate = disable_interrupts(I32_bit);
318 TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
319 ep93xx_intr_calculate_masks();
320 restore_interrupts(oldirqstate);
321 }
322
323 void
324 ep93xx_intr_dispatch(struct trapframe *frame)
325 {
326 struct intrq* iq;
327 struct intrhand* ih;
328 u_int oldirqstate;
329 int pcpl;
330 uint32_t vic1_hwpend;
331 uint32_t vic2_hwpend;
332 int irq;
333
334 pcpl = curcpl();
335
336 vic1_hwpend = VIC1REG(EP93XX_VIC_IRQStatus);
337 vic2_hwpend = VIC2REG(EP93XX_VIC_IRQStatus);
338
339 hardware_spl_level = pcpl;
340 ep93xx_set_intrmask(vic1_imask[pcpl] | vic1_hwpend,
341 vic2_imask[pcpl] | vic2_hwpend);
342
343 vic1_hwpend &= ~vic1_imask[pcpl];
344 vic2_hwpend &= ~vic2_imask[pcpl];
345
346 if (vic1_hwpend) {
347 irq = ffs(vic1_hwpend) - 1;
348
349 iq = &intrq[irq];
350 iq->iq_ev.ev_count++;
351 curcpu()->ci_data.cpu_nintr++;
352 TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
353 set_curcpl(ih->ih_ipl);
354 oldirqstate = enable_interrupts(I32_bit);
355 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
356 restore_interrupts(oldirqstate);
357 }
358 } else if (vic2_hwpend) {
359 irq = ffs(vic2_hwpend) - 1;
360
361 iq = &intrq[irq + VIC_NIRQ];
362 iq->iq_ev.ev_count++;
363 curcpu()->ci_data.cpu_nintr++;
364 TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
365 set_curcpl(ih->ih_ipl);
366 oldirqstate = enable_interrupts(I32_bit);
367 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
368 restore_interrupts(oldirqstate);
369 }
370 }
371
372 set_curcpl(pcpl);
373 hardware_spl_level = pcpl;
374 ep93xx_set_intrmask(vic1_imask[pcpl], vic2_imask[pcpl]);
375
376 #ifdef __HAVE_FAST_SOFTINTS
377 cpu_dosoftints();
378 #endif
379 }
380