at91aic.c revision 1.2 1 /* $Id: at91aic.c,v 1.2 2008/07/03 01:15:38 matt Exp $ */
2 /* $NetBSD: at91aic.c,v 1.2 2008/07/03 01:15:38 matt Exp $ */
3
4 /*
5 * Copyright (c) 2007 Embedtronics Oy.
6 * All rights reserved.
7 *
8 * Based on ep93xx_intr.c
9 * Copyright (c) 2002 The NetBSD Foundation, Inc.
10 * All rights reserved.
11 *
12 * This code is derived from software contributed to The NetBSD Foundation
13 * by Jesse Off
14 *
15 * This code is derived from software contributed to The NetBSD Foundation
16 * by Ichiro FUKUHARA and Naoto Shimazaki.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions
20 * are met:
21 * 1. Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * 2. Redistributions in binary form must reproduce the above copyright
24 * notice, this list of conditions and the following disclaimer in the
25 * documentation and/or other materials provided with the distribution.
26 * 3. All advertising materials mentioning features or use of this software
27 * must display the following acknowledgement:
28 * This product includes software developed by the NetBSD
29 * Foundation, Inc. and its contributors.
30 * 4. Neither the name of The NetBSD Foundation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
35 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
36 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
37 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
38 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
39 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
40 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
41 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
42 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
43 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
44 * POSSIBILITY OF SUCH DAMAGE.
45 */
46
47
48 /*
49 * Interrupt support for the Atmel's AT91xx9xxx family controllers
50 */
51
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/malloc.h>
55 #include <sys/termios.h>
56
57 #include <uvm/uvm_extern.h>
58
59 #include <machine/bus.h>
60 #include <machine/intr.h>
61
62 #include <arm/cpufunc.h>
63
64 #include <arm/at91/at91reg.h>
65 #include <arm/at91/at91var.h>
66 #include <arm/at91/at91aicreg.h>
67 #include <arm/at91/at91aicvar.h>
68
69 #define NIRQ 32
70
71 /* Interrupt handler queues. */
72 struct intrq intrq[NIRQ];
73
74 /* Interrupts to mask at each level. */
75 static u_int32_t aic_imask[NIPL];
76
77 /* Software copy of the IRQs we have enabled. */
78 volatile u_int32_t aic_intr_enabled;
79
80 #define AICREG(reg) *((volatile u_int32_t*) (AT91AIC_BASE + (reg)))
81
82 static int at91aic_match(device_t, cfdata_t, void *);
83 static void at91aic_attach(device_t, device_t, void *);
84
85 CFATTACH_DECL(at91aic, sizeof(struct device),
86 at91aic_match, at91aic_attach, NULL, NULL);
87
88 static int
89 at91aic_match(device_t parent, cfdata_t match, void *aux)
90 {
91 if (strcmp(match->cf_name, "at91aic") == 0)
92 return 2;
93 return 0;
94 }
95
96 static void
97 at91aic_attach(device_t parent, device_t self, void *aux)
98 {
99 (void)parent; (void)self; (void)aux;
100 printf("\n");
101 }
102
103 static inline void
104 at91_set_intrmask(u_int32_t aic_irqs)
105 {
106 AICREG(AIC_IDCR) = aic_irqs;
107 AICREG(AIC_IECR) = aic_intr_enabled & ~aic_irqs;
108 }
109
110 static inline void
111 at91_enable_irq(int irq)
112 {
113 aic_intr_enabled |= (1U << irq);
114 AICREG(AIC_IECR) = (1U << irq);
115 }
116
117 static inline void
118 at91_disable_irq(int irq)
119 {
120 aic_intr_enabled &= ~(1U << irq);
121 AICREG(AIC_IDCR) = (1U << irq);
122 }
123
124 /*
125 * NOTE: This routine must be called with interrupts disabled in the CPSR.
126 */
127 static void
128 at91aic_calculate_masks(void)
129 {
130 struct intrq *iq;
131 struct intrhand *ih;
132 int irq, ipl;
133
134 /* First, figure out which IPLs each IRQ has. */
135 for (irq = 0; irq < NIRQ; irq++) {
136 int levels = 0;
137 iq = &intrq[irq];
138 at91_disable_irq(irq);
139 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
140 ih = TAILQ_NEXT(ih, ih_list))
141 levels |= (1U << ih->ih_ipl);
142 iq->iq_levels = levels;
143 }
144
145 /* Next, figure out which IRQs are used by each IPL. */
146 for (ipl = 0; ipl < NIPL; ipl++) {
147 int aic_irqs = 0;
148 for (irq = 0; irq < AIC_NIRQ; irq++) {
149 if (intrq[irq].iq_levels & (1U << ipl))
150 aic_irqs |= (1U << irq);
151 }
152 aic_imask[ipl] = aic_irqs;
153 }
154
155 aic_imask[IPL_NONE] = 0;
156
157 /*
158 * splvm() blocks all interrupts that use the kernel memory
159 * allocation facilities.
160 */
161 aic_imask[IPL_VM] |= aic_imask[IPL_NONE];
162
163 /*
164 * splclock() must block anything that uses the scheduler.
165 */
166 aic_imask[IPL_CLOCK] |= aic_imask[IPL_VM];
167
168 /*
169 * splhigh() must block "everything".
170 */
171 aic_imask[IPL_HIGH] |= aic_imask[IPL_CLOCK];
172
173 /*
174 * Now compute which IRQs must be blocked when servicing any
175 * given IRQ.
176 */
177 for (irq = 0; irq < MIN(NIRQ, AIC_NIRQ); irq++) {
178 iq = &intrq[irq];
179 if (TAILQ_FIRST(&iq->iq_list) != NULL)
180 at91_enable_irq(irq);
181 }
182 /*
183 * update current mask
184 */
185 at91_set_intrmask(aic_imask[curcpl()]);
186 }
187
188 inline void
189 splx(int new)
190 {
191 int old;
192 u_int oldirqstate;
193
194 oldirqstate = disable_interrupts(I32_bit);
195 old = curcpl();
196 if (old != new) {
197 set_curcpl(new);
198 at91_set_intrmask(aic_imask[new]);
199 }
200 restore_interrupts(oldirqstate);
201 #ifdef __HAVE_FAST_SOFTINTS
202 cpu_dosoftints();
203 #endif
204 }
205
206 int
207 _splraise(int ipl)
208 {
209 int old;
210 u_int oldirqstate;
211
212 oldirqstate = disable_interrupts(I32_bit);
213 old = curcpl();
214 if (old != ipl) {
215 set_curcpl(ipl);
216 at91_set_intrmask(aic_imask[ipl]);
217 }
218 restore_interrupts(oldirqstate);
219
220 return (old);
221 }
222
223 int
224 _spllower(int ipl)
225 {
226 int old = curcpl();
227
228 if (old <= ipl)
229 return (old);
230 splx(ipl);
231 #ifdef __HAVE_FAST_SOFTINTS
232 cpu_dosoftints();
233 #endif
234 return (old);
235 }
236
237 /*
238 * at91aic_init:
239 *
240 * Initialize the rest of the interrupt subsystem, making it
241 * ready to handle interrupts from devices.
242 */
243 void
244 at91aic_init(void)
245 {
246 struct intrq *iq;
247 int i;
248
249 aic_intr_enabled = 0;
250
251 // disable intrrupts:
252 AICREG(AIC_IDCR) = -1;
253
254 for (i = 0; i < NIRQ; i++) {
255 iq = &intrq[i];
256 TAILQ_INIT(&iq->iq_list);
257
258 sprintf(iq->iq_name, "irq %d", i);
259 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
260 NULL, "aic", iq->iq_name);
261 }
262
263 /* All interrupts should use IRQ not FIQ */
264
265 AICREG(AIC_IDCR) = -1; /* disable interrupts */
266 AICREG(AIC_ICCR) = -1; /* clear all interrupts */
267 AICREG(AIC_DCR) = 0; /* not in debug mode, just to make sure */
268 for (i = 0; i < NIRQ; i++) {
269 AICREG(AIC_SMR(i)) = 0; /* disable interrupt */
270 AICREG(AIC_SVR(i)) = (u_int32_t)&intrq[i]; // address of interrupt queue
271 }
272 AICREG(AIC_FVR) = 0; // fast interrupt...
273 AICREG(AIC_SPU) = 0; // spurious interrupt vector
274
275 AICREG(AIC_EOICR) = 0; /* clear logic... */
276 AICREG(AIC_EOICR) = 0; /* clear logic... */
277
278 at91aic_calculate_masks();
279
280 /* Enable IRQs (don't yet use FIQs). */
281 enable_interrupts(I32_bit);
282 }
283
284 void *
285 at91aic_intr_establish(int irq, int ipl, int type, int (*ih_func)(void *), void *arg)
286 {
287 struct intrq* iq;
288 struct intrhand* ih;
289 u_int oldirqstate;
290 unsigned ok;
291 uint32_t smr;
292
293 if (irq < 0 || irq >= NIRQ)
294 panic("intr_establish: IRQ %d out of range", irq);
295 if (ipl < 0 || ipl >= NIPL)
296 panic("intr_establish: IPL %d out of range", ipl);
297
298 smr = 1; // all interrupts have priority one.. ok?
299 switch (type) {
300 case _INTR_LOW_LEVEL:
301 smr |= AIC_SMR_SRCTYPE_LVL_LO;
302 break;
303 case INTR_HIGH_LEVEL:
304 smr |= AIC_SMR_SRCTYPE_LVL_HI;
305 break;
306 case INTR_FALLING_EDGE:
307 smr |= AIC_SMR_SRCTYPE_FALLING;
308 break;
309 case INTR_RISING_EDGE:
310 smr |= AIC_SMR_SRCTYPE_RISING;
311 break;
312 default:
313 panic("intr_establish: interrupt type %d is invalid", type);
314 }
315
316 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
317 if (ih == NULL)
318 return (NULL);
319
320 ih->ih_func = ih_func;
321 ih->ih_arg = arg;
322 ih->ih_irq = irq;
323 ih->ih_ipl = ipl;
324
325 iq = &intrq[irq];
326
327 oldirqstate = disable_interrupts(I32_bit);
328 if (TAILQ_FIRST(&iq->iq_list) == NULL || (iq->iq_type & ~type) == 0) {
329 AICREG(AIC_SMR(irq)) = smr;
330 iq->iq_type = type;
331 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
332 at91aic_calculate_masks();
333 ok = 1;
334 } else
335 ok = 0;
336 restore_interrupts(oldirqstate);
337
338 if (ok) {
339 #ifdef AT91AIC_DEBUG
340 int i;
341 printf("\n");
342 for (i = 0; i < NIPL; i++) {
343 printf("IPL%d: aic_imask=0x%08X\n", i, aic_imask[i]);
344 }
345 #endif
346 } else {
347 free(ih, M_DEVBUF);
348 ih = NULL;
349 }
350
351 return (ih);
352 }
353
354 void
355 at91aic_intr_disestablish(void *cookie)
356 {
357 struct intrhand* ih = cookie;
358 struct intrq* iq = &intrq[ih->ih_irq];
359 u_int oldirqstate;
360
361 oldirqstate = disable_interrupts(I32_bit);
362 TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
363 at91aic_calculate_masks();
364 restore_interrupts(oldirqstate);
365 }
366
367 #include <arm/at91/at91reg.h>
368 #include <arm/at91/at91dbgureg.h>
369 #include <arm/at91/at91pdcreg.h>
370
371 static inline void intr_process(struct intrq *iq, int pcpl, struct irqframe *frame);
372
373 static inline void
374 intr_process(struct intrq *iq, int pcpl, struct irqframe *frame)
375 {
376 struct intrhand* ih;
377 u_int oldirqstate, intr;
378
379 intr = iq - intrq;
380
381 iq->iq_ev.ev_count++;
382 uvmexp.intrs++;
383
384 if ((1U << intr) & aic_imask[pcpl]) {
385 panic("interrupt %d should be masked! (aic_imask=0x%X)", intr, aic_imask[pcpl]);
386 }
387
388 if (iq->iq_busy) {
389 panic("interrupt %d busy!", intr);
390 }
391
392 iq->iq_busy = 1;
393
394 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
395 ih = TAILQ_NEXT(ih, ih_list)) {
396 set_curcpl(ih->ih_ipl);
397 at91_set_intrmask(aic_imask[ih->ih_ipl]);
398 oldirqstate = enable_interrupts(I32_bit);
399 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
400 restore_interrupts(oldirqstate);
401 }
402
403 if (!iq->iq_busy) {
404 panic("interrupt %d not busy!", intr);
405 }
406 iq->iq_busy = 0;
407
408 set_curcpl(pcpl);
409 at91_set_intrmask(aic_imask[pcpl]);
410 }
411
412 void
413 at91aic_intr_dispatch(struct irqframe *frame)
414 {
415 struct intrq* iq;
416 int pcpl = curcpl();
417
418 iq = (struct intrq *)AICREG(AIC_IVR); // get current queue
419
420 // OK, service interrupt
421 if (iq)
422 intr_process(iq, pcpl, frame);
423
424 AICREG(AIC_EOICR) = 0; // end of interrupt
425 }
426
427 #if 0
428 void
429 at91aic_intr_poll(int irq)
430 {
431 u_int oldirqstate;
432 uint32_t ipr;
433 int pcpl = curcpl();
434
435 oldirqstate = disable_interrupts(I32_bit);
436 ipr = AICREG(AIC_IPR);
437 if ((ipr & (1U << irq) & ~aic_imask[pcpl]))
438 intr_process(&intrq[irq], pcpl, NULL);
439 restore_interrupts(oldirqstate);
440 #ifdef __HAVE_FAST_SOFTINTS
441 cpu_dosoftints();
442 #endif
443 }
444 #endif
445
446 void
447 at91aic_intr_poll(void *ihp, int flags)
448 {
449 struct intrhand* ih = ihp;
450 u_int oldirqstate, irq = ih->ih_irq;
451 uint32_t ipr;
452 int pcpl = curcpl();
453
454 oldirqstate = disable_interrupts(I32_bit);
455 ipr = AICREG(AIC_IPR);
456 if ((ipr & (1U << irq))
457 && (flags || !(aic_imask[pcpl] & (1U << irq)))) {
458 set_curcpl(ih->ih_ipl);
459 at91_set_intrmask(aic_imask[ih->ih_ipl]);
460 (void)enable_interrupts(I32_bit);
461 (void)(*ih->ih_func)(ih->ih_arg ? ih->ih_arg : NULL);
462 (void)disable_interrupts(I32_bit);
463 set_curcpl(pcpl);
464 at91_set_intrmask(aic_imask[pcpl]);
465 }
466 restore_interrupts(oldirqstate);
467
468 #ifdef __HAVE_FAST_SOFTINTS
469 cpu_dosoftints();
470 #endif
471 }
472