ep93xx_intr.c revision 1.10 1 /* $NetBSD: ep93xx_intr.c,v 1.10 2008/01/06 01:37:53 matt Exp $ */
2
3 /*
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jesse Off
9 *
10 * This code is derived from software contributed to The NetBSD Foundation
11 * by Ichiro FUKUHARA and Naoto Shimazaki.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the NetBSD
24 * Foundation, Inc. and its contributors.
25 * 4. Neither the name of The NetBSD Foundation nor the names of its
26 * contributors may be used to endorse or promote products derived
27 * from this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
30 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
31 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
32 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
33 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
36 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
37 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: ep93xx_intr.c,v 1.10 2008/01/06 01:37:53 matt Exp $");
44
45 /*
46 * Interrupt support for the Cirrus Logic EP93XX
47 */
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/malloc.h>
52 #include <sys/termios.h>
53
54 #include <uvm/uvm_extern.h>
55
56 #include <machine/bus.h>
57 #include <machine/intr.h>
58
59 #include <arm/cpufunc.h>
60
61 #include <arm/ep93xx/ep93xxreg.h>
62 #include <arm/ep93xx/ep93xxvar.h>
63
64 /* Interrupt handler queues. */
65 struct intrq intrq[NIRQ];
66
67 /* Interrupts to mask at each level. */
68 static u_int32_t vic1_imask[NIPL];
69 static u_int32_t vic2_imask[NIPL];
70
71 /* Current interrupt priority level. */
72 volatile int current_spl_level;
73 volatile int hardware_spl_level;
74
75 /* Software copy of the IRQs we have enabled. */
76 volatile u_int32_t vic1_intr_enabled;
77 volatile u_int32_t vic2_intr_enabled;
78
79 /* Interrupts pending. */
80 static volatile int ipending;
81
82 #ifdef __HAVE_FAST_SOFTINTS
83 #define SI_SOFTCLOCK 0
84 #define SI_SOFTBIO 1
85 #define SI_SOFTNET 2
86 #define SI_SOFTSERIAL 3
87 /*
88 * Map a software interrupt queue index (to the unused bits in the
89 * VIC1 register -- XXX will need to revisit this if those bits are
90 * ever used in future steppings).
91 */
92 static const u_int32_t si_to_irqbit[] = {
93 [SI_SOFTCLOCK] = EP93XX_INTR_bit30,
94 [SI_SOFTBIO] = EP93XX_INTR_bit29,
95 [SI_SOFTNET] = EP93XX_INTR_bit28,
96 [SI_SOFTSERIAL] = EP93XX_INTR_bit27,
97 };
98
99 #define INT_SWMASK \
100 ((1U << EP93XX_INTR_bit30) | (1U << EP93XX_INTR_bit29) | \
101 (1U << EP93XX_INTR_bit28) | (1U << EP93XX_INTR_bit27))
102
103 #define SI_TO_IRQBIT(si) (1U << si_to_irqbit[(si)])
104
105 /*
106 * Map a software interrupt queue to an interrupt priority level.
107 */
108 static const int si_to_ipl[] = {
109 [SI_SOFTCLOCK] = IPL_SOFTCLOCK,
110 [SI_SOFTBIO] = IPL_SOFTBIO,
111 [SI_SOFTNET] = IPL_SOFTNET,
112 [SI_SOFTSERIAl] = IPL_SOFTSERIAL,
113 };
114 #endif /* __HAVE_FAST_SOFTINTS */
115
116 void ep93xx_intr_dispatch(struct irqframe *frame);
117
118 #define VIC1REG(reg) *((volatile u_int32_t*) (EP93XX_AHB_VBASE + \
119 EP93XX_AHB_VIC1 + (reg)))
120 #define VIC2REG(reg) *((volatile u_int32_t*) (EP93XX_AHB_VBASE + \
121 EP93XX_AHB_VIC2 + (reg)))
122
123 static void
124 ep93xx_set_intrmask(u_int32_t vic1_irqs, u_int32_t vic2_irqs)
125 {
126 VIC1REG(EP93XX_VIC_IntEnClear) = vic1_irqs;
127 VIC1REG(EP93XX_VIC_IntEnable) = vic1_intr_enabled & ~vic1_irqs;
128 VIC2REG(EP93XX_VIC_IntEnClear) = vic2_irqs;
129 VIC2REG(EP93XX_VIC_IntEnable) = vic2_intr_enabled & ~vic2_irqs;
130 }
131
132 static void
133 ep93xx_enable_irq(int irq)
134 {
135 if (irq < VIC_NIRQ) {
136 vic1_intr_enabled |= (1U << irq);
137 VIC1REG(EP93XX_VIC_IntEnable) = (1U << irq);
138 } else {
139 vic2_intr_enabled |= (1U << (irq - VIC_NIRQ));
140 VIC2REG(EP93XX_VIC_IntEnable) = (1U << (irq - VIC_NIRQ));
141 }
142 }
143
144 static inline void
145 ep93xx_disable_irq(int irq)
146 {
147 if (irq < VIC_NIRQ) {
148 vic1_intr_enabled &= ~(1U << irq);
149 VIC1REG(EP93XX_VIC_IntEnClear) = (1U << irq);
150 } else {
151 vic2_intr_enabled &= ~(1U << (irq - VIC_NIRQ));
152 VIC2REG(EP93XX_VIC_IntEnClear) = (1U << (irq - VIC_NIRQ));
153 }
154 }
155
156 /*
157 * NOTE: This routine must be called with interrupts disabled in the CPSR.
158 */
159 static void
160 ep93xx_intr_calculate_masks(void)
161 {
162 struct intrq *iq;
163 struct intrhand *ih;
164 int irq, ipl;
165
166 /* First, figure out which IPLs each IRQ has. */
167 for (irq = 0; irq < NIRQ; irq++) {
168 int levels = 0;
169 iq = &intrq[irq];
170 ep93xx_disable_irq(irq);
171 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
172 ih = TAILQ_NEXT(ih, ih_list))
173 levels |= (1U << ih->ih_ipl);
174 iq->iq_levels = levels;
175 }
176
177 /* Next, figure out which IRQs are used by each IPL. */
178 for (ipl = 0; ipl < NIPL; ipl++) {
179 int vic1_irqs = 0;
180 int vic2_irqs = 0;
181 for (irq = 0; irq < VIC_NIRQ; irq++) {
182 if (intrq[irq].iq_levels & (1U << ipl))
183 vic1_irqs |= (1U << irq);
184 }
185 vic1_imask[ipl] = vic1_irqs;
186 for (irq = 0; irq < VIC_NIRQ; irq++) {
187 if (intrq[irq + VIC_NIRQ].iq_levels & (1U << ipl))
188 vic2_irqs |= (1U << irq);
189 }
190 vic2_imask[ipl] = vic2_irqs;
191 }
192
193 KASSERT(vic1_imask[IPL_NONE] == 0);
194 KASSERT(vic2_imask[IPL_NONE] == 0);
195
196 #ifdef __HAVE_FAST_SOFTINTS
197 /*
198 * Initialize the soft interrupt masks to block themselves.
199 */
200 vic1_imask[IPL_SOFT] = SI_TO_IRQBIT(SI_SOFT);
201 vic1_imask[IPL_SOFTCLOCK] = SI_TO_IRQBIT(SI_SOFTCLOCK);
202 vic1_imask[IPL_SOFTNET] = SI_TO_IRQBIT(SI_SOFTNET);
203 vic1_imask[IPL_SOFTSERIAL] = SI_TO_IRQBIT(SI_SOFTSERIAL);
204
205 /*
206 * splsoftclock() is the only interface that users of the
207 * generic software interrupt facility have to block their
208 * soft intrs, so splsoftclock() must also block IPL_SOFT.
209 */
210 vic1_imask[IPL_SOFTCLOCK] |= vic1_imask[IPL_SOFT];
211 vic2_imask[IPL_SOFTCLOCK] |= vic2_imask[IPL_SOFT];
212
213 /*
214 * splsoftbio() must also block splsoftclock(), since we don't
215 * want timer-driven network events to occur while we're
216 * processing incoming packets.
217 */
218 vic1_imask[IPL_SOFTBIO] |= vic1_imask[IPL_SOFTCLOCK];
219 vic2_imask[IPL_SOFTBIO] |= vic2_imask[IPL_SOFTCLOCK];
220
221 /*
222 * splsoftnet() must also block splsoftclock(), since we don't
223 * want timer-driven network events to occur while we're
224 * processing incoming packets.
225 */
226 vic1_imask[IPL_SOFTNET] |= vic1_imask[IPL_SOFTBIO];
227 vic2_imask[IPL_SOFTNET] |= vic2_imask[IPL_SOFTBIO];
228
229 /*
230 * Enforce a hierarchy that gives "slow" device (or devices with
231 * limited input buffer space/"real-time" requirements) a better
232 * chance at not dropping data.
233 */
234 vic1_imask[IPL_SOFTSERIAL] |= vic1_imask[IPL_SOFTNET];
235 vic2_imask[IPL_SOFTSERIAL] |= vic2_imask[IPL_SOFTNET];
236
237 /*
238 * splvm() blocks all interrupts that use the kernel memory
239 * allocation facilities.
240 */
241 vic1_imask[IPL_VM] |= vic1_imask[IPL_SOFTSERIAL];
242 vic2_imask[IPL_VM] |= vic2_imask[IPL_SOFTSERIAL];
243 #endif /* __HAVE_FAST_SOFTINTS */
244
245 /*
246 * splclock() must block anything that uses the scheduler.
247 */
248 vic1_imask[IPL_CLOCK] |= vic1_imask[IPL_VM];
249 vic2_imask[IPL_CLOCK] |= vic2_imask[IPL_VM];
250
251 /*
252 * splhigh() must block "everything".
253 */
254 vic1_imask[IPL_HIGH] |= vic1_imask[IPL_CLOCK];
255 vic2_imask[IPL_HIGH] |= vic2_imask[IPL_CLOCK];
256
257 /*
258 * Now compute which IRQs must be blocked when servicing any
259 * given IRQ.
260 */
261 for (irq = 0; irq < NIRQ; irq++) {
262 int vic1_irqs;
263 int vic2_irqs;
264
265 if (irq < VIC_NIRQ) {
266 vic1_irqs = (1U << irq);
267 vic2_irqs = 0;
268 } else {
269 vic1_irqs = 0;
270 vic2_irqs = (1U << (irq - VIC_NIRQ));
271 }
272 iq = &intrq[irq];
273 if (TAILQ_FIRST(&iq->iq_list) != NULL)
274 ep93xx_enable_irq(irq);
275 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
276 ih = TAILQ_NEXT(ih, ih_list)) {
277 vic1_irqs |= vic1_imask[ih->ih_ipl];
278 vic2_irqs |= vic2_imask[ih->ih_ipl];
279 }
280 iq->iq_vic1_mask = vic1_irqs;
281 iq->iq_vic2_mask = vic2_irqs;
282 }
283 }
284
285 #ifdef __HAVE_FAST_SOFTINTS
286 static void
287 ep93xx_do_pending(void)
288 {
289 static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
290 int new;
291 u_int oldirqstate, oldirqstate2;
292
293 if (__cpu_simple_lock_try(&processing) == 0)
294 return;
295
296 new = current_spl_level;
297
298 oldirqstate = disable_interrupts(I32_bit);
299
300 #define DO_SOFTINT(si) \
301 if ((ipending & ~vic1_imask[new]) & SI_TO_IRQBIT(si)) { \
302 ipending &= ~SI_TO_IRQBIT(si); \
303 current_spl_level = si_to_ipl[(si)]; \
304 oldirqstate2 = enable_interrupts(I32_bit); \
305 softintr_dispatch(si); \
306 restore_interrupts(oldirqstate2); \
307 current_spl_level = new; \
308 }
309
310 DO_SOFTINT(SI_SOFTSERIAL);
311 DO_SOFTINT(SI_SOFTNET);
312 DO_SOFTINT(SI_SOFTCLOCK);
313 DO_SOFTINT(SI_SOFT);
314
315 __cpu_simple_unlock(&processing);
316
317 restore_interrupts(oldirqstate);
318 }
319 #endif
320
321 inline void
322 splx(int new)
323 {
324 int old;
325 u_int oldirqstate;
326
327 oldirqstate = disable_interrupts(I32_bit);
328 old = current_spl_level;
329 current_spl_level = new;
330 if (new != hardware_spl_level) {
331 hardware_spl_level = new;
332 ep93xx_set_intrmask(vic1_imask[new], vic2_imask[new]);
333 }
334 restore_interrupts(oldirqstate);
335
336 #ifdef __HAVE_FAST_SOFTINTS
337 /* If there are software interrupts to process, do it. */
338 if ((ipending & INT_SWMASK) & ~vic1_imask[new])
339 ep93xx_do_pending();
340 #endif
341 }
342
343 int
344 _splraise(int ipl)
345 {
346 int old;
347 u_int oldirqstate;
348
349 oldirqstate = disable_interrupts(I32_bit);
350 old = current_spl_level;
351 current_spl_level = ipl;
352 restore_interrupts(oldirqstate);
353 return (old);
354 }
355
356 int
357 _spllower(int ipl)
358 {
359 int old = current_spl_level;
360
361 if (old <= ipl)
362 return (old);
363 splx(ipl);
364 return (old);
365 }
366
367 #ifdef __HAVE_FAST_SOFTINTS
368 void
369 _setsoftintr(int si)
370 {
371 u_int oldirqstate;
372
373 oldirqstate = disable_interrupts(I32_bit);
374 ipending |= SI_TO_IRQBIT(si);
375 restore_interrupts(oldirqstate);
376
377 /* Process unmasked pending soft interrupts. */
378 if ((ipending & INT_SWMASK) & ~vic1_imask[current_spl_level])
379 ep93xx_do_pending();
380 }
381 #endif
382
383 /*
384 * ep93xx_intr_init:
385 *
386 * Initialize the rest of the interrupt subsystem, making it
387 * ready to handle interrupts from devices.
388 */
389 void
390 ep93xx_intr_init(void)
391 {
392 struct intrq *iq;
393 int i;
394
395 vic1_intr_enabled = 0;
396 vic2_intr_enabled = 0;
397
398 for (i = 0; i < NIRQ; i++) {
399 iq = &intrq[i];
400 TAILQ_INIT(&iq->iq_list);
401
402 sprintf(iq->iq_name, "irq %d", i);
403 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
404 NULL, (i < VIC_NIRQ ? "vic1" : "vic2"),
405 iq->iq_name);
406 }
407 current_intr_depth = 0;
408 current_spl_level = 0;
409 hardware_spl_level = 0;
410
411 /* All interrupts should use IRQ not FIQ */
412 VIC1REG(EP93XX_VIC_IntSelect) = 0;
413 VIC2REG(EP93XX_VIC_IntSelect) = 0;
414
415 ep93xx_intr_calculate_masks();
416
417 /* Enable IRQs (don't yet use FIQs). */
418 enable_interrupts(I32_bit);
419 }
420
421 void *
422 ep93xx_intr_establish(int irq, int ipl, int (*ih_func)(void *), void *arg)
423 {
424 struct intrq* iq;
425 struct intrhand* ih;
426 u_int oldirqstate;
427
428 if (irq < 0 || irq > NIRQ)
429 panic("ep93xx_intr_establish: IRQ %d out of range", irq);
430 if (ipl < 0 || ipl > NIPL)
431 panic("ep93xx_intr_establish: IPL %d out of range", ipl);
432
433 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
434 if (ih == NULL)
435 return (NULL);
436
437 ih->ih_func = ih_func;
438 ih->ih_arg = arg;
439 ih->ih_irq = irq;
440 ih->ih_ipl = ipl;
441
442 iq = &intrq[irq];
443
444 oldirqstate = disable_interrupts(I32_bit);
445 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
446 ep93xx_intr_calculate_masks();
447 restore_interrupts(oldirqstate);
448
449 return (ih);
450 }
451
452 void
453 ep93xx_intr_disestablish(void *cookie)
454 {
455 struct intrhand* ih = cookie;
456 struct intrq* iq = &intrq[ih->ih_irq];
457 u_int oldirqstate;
458
459 oldirqstate = disable_interrupts(I32_bit);
460 TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
461 ep93xx_intr_calculate_masks();
462 restore_interrupts(oldirqstate);
463 }
464
465 void
466 ep93xx_intr_dispatch(struct irqframe *frame)
467 {
468 struct intrq* iq;
469 struct intrhand* ih;
470 u_int oldirqstate;
471 int pcpl;
472 u_int32_t vic1_hwpend;
473 u_int32_t vic2_hwpend;
474 int irq;
475
476 pcpl = current_spl_level;
477
478 vic1_hwpend = VIC1REG(EP93XX_VIC_IRQStatus);
479 vic2_hwpend = VIC2REG(EP93XX_VIC_IRQStatus);
480
481 hardware_spl_level = pcpl;
482 ep93xx_set_intrmask(vic1_imask[pcpl] | vic1_hwpend,
483 vic2_imask[pcpl] | vic2_hwpend);
484
485 vic1_hwpend &= ~vic1_imask[pcpl];
486 vic2_hwpend &= ~vic2_imask[pcpl];
487
488 if (vic1_hwpend) {
489 irq = ffs(vic1_hwpend) - 1;
490
491 iq = &intrq[irq];
492 iq->iq_ev.ev_count++;
493 uvmexp.intrs++;
494 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
495 ih = TAILQ_NEXT(ih, ih_list)) {
496 current_spl_level = ih->ih_ipl;
497 oldirqstate = enable_interrupts(I32_bit);
498 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
499 restore_interrupts(oldirqstate);
500 }
501 } else if (vic2_hwpend) {
502 irq = ffs(vic2_hwpend) - 1;
503
504 iq = &intrq[irq + VIC_NIRQ];
505 iq->iq_ev.ev_count++;
506 uvmexp.intrs++;
507 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
508 ih = TAILQ_NEXT(ih, ih_list)) {
509 current_spl_level = ih->ih_ipl;
510 oldirqstate = enable_interrupts(I32_bit);
511 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
512 restore_interrupts(oldirqstate);
513 }
514 }
515
516 current_spl_level = pcpl;
517 hardware_spl_level = pcpl;
518 ep93xx_set_intrmask(vic1_imask[pcpl], vic2_imask[pcpl]);
519
520 #ifdef __HAVE_FAST_SOFTINTS
521 /* Check for pendings soft intrs. */
522 if ((ipending & INT_SWMASK) & ~vic1_imask[pcpl]) {
523 ep93xx_do_pending();
524 }
525 #endif
526 }
527