at91aic.c revision 1.1.2.1 1 /* $Id: at91aic.c,v 1.1.2.1 2007/11/10 02:56:27 matt Exp $ */
2 /* $NetBSD: at91aic.c,v 1.1.2.1 2007/11/10 02:56:27 matt Exp $ */
3
4 /*
5 * Copyright (c) 2007 Embedtronics Oy.
6 * All rights reserved.
7 *
8 * Based on ep93xx_intr.c
9 * Copyright (c) 2002 The NetBSD Foundation, Inc.
10 * All rights reserved.
11 *
12 * This code is derived from software contributed to The NetBSD Foundation
13 * by Jesse Off
14 *
15 * This code is derived from software contributed to The NetBSD Foundation
16 * by Ichiro FUKUHARA and Naoto Shimazaki.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions
20 * are met:
21 * 1. Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * 2. Redistributions in binary form must reproduce the above copyright
24 * notice, this list of conditions and the following disclaimer in the
25 * documentation and/or other materials provided with the distribution.
26 * 3. All advertising materials mentioning features or use of this software
27 * must display the following acknowledgement:
28 * This product includes software developed by the NetBSD
29 * Foundation, Inc. and its contributors.
30 * 4. Neither the name of The NetBSD Foundation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
35 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
36 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
37 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
38 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
39 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
40 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
41 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
42 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
43 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
44 * POSSIBILITY OF SUCH DAMAGE.
45 */
46
47
48 /*
49 * Interrupt support for the Atmel's AT91xx9xxx family controllers
50 */
51
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/malloc.h>
55 #include <sys/termios.h>
56
57 #include <uvm/uvm_extern.h>
58
59 #include <machine/bus.h>
60 #include <machine/intr.h>
61
62 #include <arm/cpufunc.h>
63
64 #include <arm/at91/at91reg.h>
65 #include <arm/at91/at91var.h>
66 #include <arm/at91/at91aicreg.h>
67 #include <arm/at91/at91aicvar.h>
68
69 #define NIRQ 32
70
71 /* Interrupt handler queues. */
72 struct intrq intrq[NIRQ];
73
74 /* Interrupts to mask at each level. */
75 static u_int32_t aic_imask[NIPL];
76 static u_int32_t swi_imask[NIPL]; // software interrupt mask
77
78 /* Current interrupt priority level. */
79 volatile int current_spl_level = 0;
80
81 /* Software copy of the IRQs we have enabled. */
82 volatile u_int32_t aic_intr_enabled;
83
84 /* Interrupts pending. */
85 static volatile int swipending;
86
87 #define SI_TO_IRQBIT(si) (1U << (si))
88
89 /*
90 * Map a software interrupt queue to an interrupt priority level.
91 */
92 static const int si_to_ipl[SI_NQUEUES] = {
93 IPL_SOFT, /* SI_SOFT */
94 IPL_SOFTCLOCK, /* SI_SOFTCLOCK */
95 IPL_SOFTNET, /* SI_SOFTNET */
96 IPL_SOFTSERIAL, /* SI_SOFTSERIAL */
97 };
98
99 #define AICREG(reg) *((volatile u_int32_t*) (AT91AIC_BASE + (reg)))
100
101 static int at91aic_match(struct device *, struct cfdata *, void *);
102 static void at91aic_attach(struct device *, struct device *, void *);
103
104 CFATTACH_DECL(at91aic, sizeof(struct device),
105 at91aic_match, at91aic_attach, NULL, NULL);
106
107 static int
108 at91aic_match(struct device *parent, struct cfdata *match, void *aux)
109 {
110 if (strcmp(match->cf_name, "at91aic") == 0)
111 return 2;
112 return 0;
113 }
114
115 static void
116 at91aic_attach(struct device *parent, struct device *self, void *aux)
117 {
118 (void)parent; (void)self; (void)aux;
119 printf("\n");
120 }
121
122 static inline void
123 at91_set_intrmask(u_int32_t aic_irqs)
124 {
125 AICREG(AIC_IDCR) = aic_irqs;
126 AICREG(AIC_IECR) = aic_intr_enabled & ~aic_irqs;
127 }
128
129 static inline void
130 at91_enable_irq(int irq)
131 {
132 aic_intr_enabled |= (1U << irq);
133 AICREG(AIC_IECR) = (1U << irq);
134 }
135
136 static inline void
137 at91_disable_irq(int irq)
138 {
139 aic_intr_enabled &= ~(1U << irq);
140 AICREG(AIC_IDCR) = (1U << irq);
141 }
142
143 /*
144 * NOTE: This routine must be called with interrupts disabled in the CPSR.
145 */
146 static void
147 at91aic_calculate_masks(void)
148 {
149 struct intrq *iq;
150 struct intrhand *ih;
151 int irq, ipl;
152
153 /* First, figure out which IPLs each IRQ has. */
154 for (irq = 0; irq < NIRQ; irq++) {
155 int levels = 0;
156 iq = &intrq[irq];
157 at91_disable_irq(irq);
158 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
159 ih = TAILQ_NEXT(ih, ih_list))
160 levels |= (1U << ih->ih_ipl);
161 iq->iq_levels = levels;
162 }
163
164 /* Next, figure out which IRQs are used by each IPL. */
165 for (ipl = 0; ipl < NIPL; ipl++) {
166 int aic_irqs = 0;
167 for (irq = 0; irq < AIC_NIRQ; irq++) {
168 if (intrq[irq].iq_levels & (1U << ipl))
169 aic_irqs |= (1U << irq);
170 }
171 aic_imask[ipl] = aic_irqs;
172 }
173
174 aic_imask[IPL_NONE] = 0;
175 swi_imask[IPL_NONE] = 0;
176
177 /*
178 * Initialize the soft interrupt masks to block themselves.
179 */
180 swi_imask[IPL_SOFT] = SI_TO_IRQBIT(SI_SOFT);
181 swi_imask[IPL_SOFTCLOCK] = SI_TO_IRQBIT(SI_SOFTCLOCK);
182 swi_imask[IPL_SOFTNET] = SI_TO_IRQBIT(SI_SOFTNET);
183 swi_imask[IPL_SOFTSERIAL] = SI_TO_IRQBIT(SI_SOFTSERIAL);
184
185 /*
186 * splsoftclock() is the only interface that users of the
187 * generic software interrupt facility have to block their
188 * soft intrs, so splsoftclock() must also block IPL_SOFT.
189 */
190 aic_imask[IPL_SOFTCLOCK] |= aic_imask[IPL_SOFT];
191 swi_imask[IPL_SOFTCLOCK] |= swi_imask[IPL_SOFT];
192
193 /*
194 * splsoftnet() must also block splsoftclock(), since we don't
195 * want timer-driven network events to occur while we're
196 * processing incoming packets.
197 */
198 aic_imask[IPL_SOFTNET] |= aic_imask[IPL_SOFTCLOCK];
199 swi_imask[IPL_SOFTNET] |= swi_imask[IPL_SOFTCLOCK];
200
201 /*
202 * Enforce a hierarchy that gives "slow" device (or devices with
203 * limited input buffer space/"real-time" requirements) a better
204 * chance at not dropping data.
205 */
206 aic_imask[IPL_BIO] |= aic_imask[IPL_SOFTNET];
207 swi_imask[IPL_BIO] |= swi_imask[IPL_SOFTNET];
208 aic_imask[IPL_NET] |= aic_imask[IPL_BIO];
209 swi_imask[IPL_NET] |= swi_imask[IPL_BIO];
210 aic_imask[IPL_SOFTSERIAL] |= aic_imask[IPL_NET];
211 swi_imask[IPL_SOFTSERIAL] |= swi_imask[IPL_NET];
212 aic_imask[IPL_TTY] |= aic_imask[IPL_SOFTSERIAL];
213 swi_imask[IPL_TTY] |= swi_imask[IPL_SOFTSERIAL];
214
215 /*
216 * splvm() blocks all interrupts that use the kernel memory
217 * allocation facilities.
218 */
219 aic_imask[IPL_VM] |= aic_imask[IPL_TTY];
220 swi_imask[IPL_VM] |= swi_imask[IPL_TTY];
221
222 /*
223 * Audio devices are not allowed to perform memory allocation
224 * in their interrupt routines, and they have fairly "real-time"
225 * requirements, so give them a high interrupt priority.
226 */
227 aic_imask[IPL_AUDIO] |= aic_imask[IPL_VM];
228 swi_imask[IPL_AUDIO] |= swi_imask[IPL_VM];
229
230 /*
231 * splclock() must block anything that uses the scheduler.
232 */
233 aic_imask[IPL_CLOCK] |= aic_imask[IPL_AUDIO];
234 swi_imask[IPL_CLOCK] |= swi_imask[IPL_AUDIO];
235
236 /*
237 * No separate statclock on the AT91?
238 */
239 aic_imask[IPL_STATCLOCK] |= aic_imask[IPL_CLOCK];
240 swi_imask[IPL_STATCLOCK] |= swi_imask[IPL_CLOCK];
241
242 /*
243 * serial uarts have small buffers that need low-latency servicing
244 */
245 aic_imask[IPL_SERIAL] |= aic_imask[IPL_STATCLOCK];
246 swi_imask[IPL_SERIAL] |= swi_imask[IPL_STATCLOCK];
247
248 /*
249 * splhigh() must block "everything".
250 */
251 aic_imask[IPL_HIGH] |= aic_imask[IPL_SERIAL];
252 swi_imask[IPL_HIGH] |= swi_imask[IPL_SERIAL];
253
254 /*
255 * Now compute which IRQs must be blocked when servicing any
256 * given IRQ.
257 */
258 for (irq = 0; irq < MIN(NIRQ, AIC_NIRQ); irq++) {
259 iq = &intrq[irq];
260 if (TAILQ_FIRST(&iq->iq_list) != NULL)
261 at91_enable_irq(irq);
262 }
263 /*
264 * update current mask
265 */
266 at91_set_intrmask(aic_imask[current_spl_level]);
267 }
268
269 static void
270 at91_do_pending(void)
271 {
272 static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
273 int new, cur;
274 u_int oldirqstate, oldirqstate2;
275
276 if (__cpu_simple_lock_try(&processing) == 0)
277 return;
278
279 new = current_spl_level;
280
281 oldirqstate = disable_interrupts(I32_bit);
282
283 #define DO_SOFTINT(si) \
284 if ((swipending & ~swi_imask[new]) & SI_TO_IRQBIT(si)) { \
285 swipending &= ~SI_TO_IRQBIT(si); \
286 current_spl_level = cur = si_to_ipl[(si)]; \
287 at91_set_intrmask(aic_imask[cur]); \
288 oldirqstate2 = enable_interrupts(I32_bit); \
289 softintr_dispatch(si); \
290 restore_interrupts(oldirqstate2); \
291 current_spl_level = new; \
292 at91_set_intrmask(aic_imask[new]); \
293 }
294
295 DO_SOFTINT(SI_SOFTSERIAL);
296 DO_SOFTINT(SI_SOFTNET);
297 DO_SOFTINT(SI_SOFTCLOCK);
298 DO_SOFTINT(SI_SOFT);
299
300 __cpu_simple_unlock(&processing);
301
302 restore_interrupts(oldirqstate);
303 }
304
305 inline void
306 splx(int new)
307 {
308 int old;
309 u_int oldirqstate;
310
311 oldirqstate = disable_interrupts(I32_bit);
312 old = current_spl_level;
313 if (old != new) {
314 current_spl_level = new;
315 at91_set_intrmask(aic_imask[new]);
316 }
317 restore_interrupts(oldirqstate);
318
319 /* If there are software interrupts to process, do it. */
320 if (swipending & ~swi_imask[new])
321 at91_do_pending();
322 }
323
324 int
325 _splraise(int ipl)
326 {
327 int old;
328 u_int oldirqstate;
329
330 oldirqstate = disable_interrupts(I32_bit);
331 old = current_spl_level;
332 if (old != ipl) {
333 current_spl_level = ipl;
334 at91_set_intrmask(aic_imask[ipl]);
335 }
336 restore_interrupts(oldirqstate);
337
338 return (old);
339 }
340
341 int
342 _spllower(int ipl)
343 {
344 int old = current_spl_level;
345
346 if (old <= ipl)
347 return (old);
348 splx(ipl);
349 return (old);
350 }
351
352 void
353 _setsoftintr(int si)
354 {
355 u_int oldirqstate;
356
357 oldirqstate = disable_interrupts(I32_bit);
358 swipending |= SI_TO_IRQBIT(si);
359 restore_interrupts(oldirqstate);
360
361 /* Process unmasked pending soft interrupts. */
362 if (swipending & ~swi_imask[current_spl_level])
363 at91_do_pending();
364 }
365
366 /*
367 * at91aic_init:
368 *
369 * Initialize the rest of the interrupt subsystem, making it
370 * ready to handle interrupts from devices.
371 */
372 void
373 at91aic_init(void)
374 {
375 struct intrq *iq;
376 int i;
377
378 aic_intr_enabled = 0;
379
380 // disable intrrupts:
381 AICREG(AIC_IDCR) = -1;
382
383 for (i = 0; i < NIRQ; i++) {
384 iq = &intrq[i];
385 TAILQ_INIT(&iq->iq_list);
386
387 sprintf(iq->iq_name, "irq %d", i);
388 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
389 NULL, "aic", iq->iq_name);
390 }
391 current_intr_depth = 0;
392 current_spl_level = 0;
393
394 /* All interrupts should use IRQ not FIQ */
395
396 AICREG(AIC_IDCR) = -1; /* disable interrupts */
397 AICREG(AIC_ICCR) = -1; /* clear all interrupts */
398 AICREG(AIC_DCR) = 0; /* not in debug mode, just to make sure */
399 for (i = 0; i < NIRQ; i++) {
400 AICREG(AIC_SMR(i)) = 0; /* disable interrupt */
401 AICREG(AIC_SVR(i)) = (u_int32_t)&intrq[i]; // address of interrupt queue
402 }
403 AICREG(AIC_FVR) = 0; // fast interrupt...
404 AICREG(AIC_SPU) = 0; // spurious interrupt vector
405
406 AICREG(AIC_EOICR) = 0; /* clear logic... */
407 AICREG(AIC_EOICR) = 0; /* clear logic... */
408
409 at91aic_calculate_masks();
410
411 /* Enable IRQs (don't yet use FIQs). */
412 enable_interrupts(I32_bit);
413 }
414
415 void *
416 at91aic_intr_establish(int irq, int ipl, int type, int (*ih_func)(void *), void *arg)
417 {
418 struct intrq* iq;
419 struct intrhand* ih;
420 u_int oldirqstate;
421 unsigned ok;
422 uint32_t smr;
423
424 if (irq < 0 || irq >= NIRQ)
425 panic("intr_establish: IRQ %d out of range", irq);
426 if (ipl < 0 || ipl >= NIPL)
427 panic("intr_establish: IPL %d out of range", ipl);
428
429 smr = 1; // all interrupts have priority one.. ok?
430 switch (type) {
431 case _INTR_LOW_LEVEL:
432 smr |= AIC_SMR_SRCTYPE_LVL_LO;
433 break;
434 case INTR_HIGH_LEVEL:
435 smr |= AIC_SMR_SRCTYPE_LVL_HI;
436 break;
437 case INTR_FALLING_EDGE:
438 smr |= AIC_SMR_SRCTYPE_FALLING;
439 break;
440 case INTR_RISING_EDGE:
441 smr |= AIC_SMR_SRCTYPE_RISING;
442 break;
443 default:
444 panic("intr_establish: interrupt type %d is invalid", type);
445 }
446
447 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
448 if (ih == NULL)
449 return (NULL);
450
451 ih->ih_func = ih_func;
452 ih->ih_arg = arg;
453 ih->ih_irq = irq;
454 ih->ih_ipl = ipl;
455
456 iq = &intrq[irq];
457
458 oldirqstate = disable_interrupts(I32_bit);
459 if (TAILQ_FIRST(&iq->iq_list) == NULL || (iq->iq_type & ~type) == 0) {
460 AICREG(AIC_SMR(irq)) = smr;
461 iq->iq_type = type;
462 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
463 at91aic_calculate_masks();
464 ok = 1;
465 } else
466 ok = 0;
467 restore_interrupts(oldirqstate);
468
469 if (ok) {
470 #ifdef AT91AIC_DEBUG
471 int i;
472 printf("\n");
473 for (i = 0; i < NIPL; i++) {
474 printf("IPL%d: aic_imask=0x%08X swi_imask=0x%08X\n",
475 i, aic_imask[i], swi_imask[i]);
476 }
477 #endif
478 } else {
479 free(ih, M_DEVBUF);
480 ih = NULL;
481 }
482
483 return (ih);
484 }
485
486 void
487 at91aic_intr_disestablish(void *cookie)
488 {
489 struct intrhand* ih = cookie;
490 struct intrq* iq = &intrq[ih->ih_irq];
491 u_int oldirqstate;
492
493 oldirqstate = disable_interrupts(I32_bit);
494 TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
495 at91aic_calculate_masks();
496 restore_interrupts(oldirqstate);
497 }
498
499 #include <arm/at91/at91reg.h>
500 #include <arm/at91/at91dbgureg.h>
501 #include <arm/at91/at91pdcreg.h>
502
503 static inline void intr_process(struct intrq *iq, int pcpl, struct irqframe *frame);
504
505 static inline void
506 intr_process(struct intrq *iq, int pcpl, struct irqframe *frame)
507 {
508 struct intrhand* ih;
509 u_int oldirqstate, intr;
510
511 intr = iq - intrq;
512
513 iq->iq_ev.ev_count++;
514 uvmexp.intrs++;
515
516 if ((1U << intr) & aic_imask[pcpl]) {
517 panic("interrupt %d should be masked! (aic_imask=0x%X)", intr, aic_imask[pcpl]);
518 }
519
520 if (iq->iq_busy) {
521 panic("interrupt %d busy!", intr);
522 }
523
524 iq->iq_busy = 1;
525
526 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
527 ih = TAILQ_NEXT(ih, ih_list)) {
528 current_spl_level = ih->ih_ipl;
529 at91_set_intrmask(aic_imask[ih->ih_ipl]);
530 oldirqstate = enable_interrupts(I32_bit);
531 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
532 restore_interrupts(oldirqstate);
533 }
534
535 if (!iq->iq_busy) {
536 panic("interrupt %d not busy!", intr);
537 }
538 iq->iq_busy = 0;
539
540 current_spl_level = pcpl;
541 at91_set_intrmask(aic_imask[pcpl]);
542 }
543
544 void
545 at91aic_intr_dispatch(struct irqframe *frame)
546 {
547 struct intrq* iq;
548 int pcpl = current_spl_level;
549
550 iq = (struct intrq *)AICREG(AIC_IVR); // get current queue
551
552 // OK, service interrupt
553 if (iq)
554 intr_process(iq, pcpl, frame);
555
556 AICREG(AIC_EOICR) = 0; // end of interrupt
557
558 /* Check for pendings soft intrs. */
559 if (swipending & ~swi_imask[pcpl]) {
560 at91_do_pending();
561 }
562 }
563
564 #if 0
565 void
566 at91aic_intr_poll(int irq)
567 {
568 u_int oldirqstate;
569 uint32_t ipr;
570 int pcpl = current_spl_level;
571
572 oldirqstate = disable_interrupts(I32_bit);
573 ipr = AICREG(AIC_IPR);
574 if ((ipr & (1U << irq) & ~aic_imask[pcpl]))
575 intr_process(&intrq[irq], pcpl, NULL);
576 restore_interrupts(oldirqstate);
577
578 /* Check for pendings soft intrs. */
579 if (swipending & ~swi_imask[pcpl]) {
580 at91_do_pending();
581 }
582 }
583 #endif
584
585 void
586 at91aic_intr_poll(void *ihp, int flags)
587 {
588 struct intrhand* ih = ihp;
589 u_int oldirqstate, irq = ih->ih_irq;
590 uint32_t ipr;
591 int pcpl = current_spl_level;
592
593 oldirqstate = disable_interrupts(I32_bit);
594 ipr = AICREG(AIC_IPR);
595 if ((ipr & (1U << irq))
596 && (flags || !(aic_imask[pcpl] & (1U << irq)))) {
597 current_spl_level = ih->ih_ipl;
598 at91_set_intrmask(aic_imask[ih->ih_ipl]);
599 (void)enable_interrupts(I32_bit);
600 (void)(*ih->ih_func)(ih->ih_arg ? ih->ih_arg : NULL);
601 (void)disable_interrupts(I32_bit);
602 current_spl_level = pcpl;
603 at91_set_intrmask(aic_imask[pcpl]);
604 }
605 restore_interrupts(oldirqstate);
606
607 /* Check for pendings soft intrs. */
608 if (swipending & ~swi_imask[pcpl]) {
609 at91_do_pending();
610 }
611 }
612