i80321_icu.c revision 1.3 1 /* $NetBSD: i80321_icu.c,v 1.3 2002/07/30 04:45:41 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Interrupt support for the Intel i80321 I/O Processor.
40 */
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
45
46 #include <uvm/uvm_extern.h>
47
48 #include <machine/bus.h>
49 #include <machine/intr.h>
50
51 #include <arm/cpufunc.h>
52
53 #include <arm/xscale/i80321reg.h>
54 #include <arm/xscale/i80321var.h>
55
56 /* Interrupt handler queues. */
57 struct intrq intrq[NIRQ];
58
59 /* Interrupts to mask at each level. */
60 static int imask[NIPL];
61
62 /* Current interrupt priority level. */
63 __volatile int current_spl_level;
64
65 /* Interrupts pending. */
66 static __volatile int ipending;
67
68 /* Software copy of the IRQs we have enabled. */
69 __volatile uint32_t intr_enabled;
70
71 /* Mask if interrupts steered to FIQs. */
72 uint32_t intr_steer;
73
74 /*
75 * Map a software interrupt queue index (to the unused bits in the
76 * ICU registers -- XXX will need to revisit this if those bits are
77 * ever used in future steppings).
78 */
79 static const uint32_t si_to_irqbit[SI_NQUEUES] = {
80 ICU_INT_bit26, /* SI_SOFT */
81 ICU_INT_bit22, /* SI_SOFTCLOCK */
82 ICU_INT_bit5, /* SI_SOFTNET */
83 ICU_INT_bit4, /* SI_SOFTSERIAL */
84 };
85
86 #define INT_SWMASK \
87 ((1U << ICU_INT_bit26) | (1U << ICU_INT_bit22) | \
88 (1U << ICU_INT_bit5) | (1U << ICU_INT_bit4))
89
90 #define SI_TO_IRQBIT(si) (1U << si_to_irqbit[(si)])
91
92 /*
93 * Map a software interrupt queue to an interrupt priority level.
94 */
95 static const int si_to_ipl[SI_NQUEUES] = {
96 IPL_SOFT, /* SI_SOFT */
97 IPL_SOFTCLOCK, /* SI_SOFTCLOCK */
98 IPL_SOFTNET, /* SI_SOFTNET */
99 IPL_SOFTSERIAL, /* SI_SOFTSERIAL */
100 };
101
102 /*
103 * Interrupt bit names.
104 */
105 const char *i80321_irqnames[] = {
106 "DMA0 EOT",
107 "DMA0 EOC",
108 "DMA1 EOT",
109 "DMA1 EOC",
110 "irq 4",
111 "irq 5",
112 "AAU EOT",
113 "AAU EOC",
114 "core PMU",
115 "TMR0 (hardclock)",
116 "TMR1",
117 "I2C0",
118 "I2C1",
119 "MU",
120 "BIST",
121 "periph PMU",
122 "XScale PMU",
123 "BIU error",
124 "ATU error",
125 "MCU error",
126 "DMA0 error",
127 "DMA1 error",
128 "irq 22",
129 "AAU error",
130 "MU error",
131 "SSP",
132 "irq 26",
133 "irq 27",
134 "irq 28",
135 "irq 29",
136 "irq 30",
137 "irq 31",
138 };
139
140 void i80321_intr_dispatch(struct clockframe *frame);
141
142 static __inline uint32_t
143 i80321_iintsrc_read(void)
144 {
145 uint32_t iintsrc;
146
147 __asm __volatile("mrc p6, 0, %0, c8, c0, 0"
148 : "=r" (iintsrc));
149
150 /*
151 * The IINTSRC register shows bits that are active even
152 * if they are masked in INTCTL, so we have to mask them
153 * off with the interrupts we consider enabled.
154 */
155 return (iintsrc & intr_enabled);
156 }
157
158 static __inline void
159 i80321_set_intrmask(void)
160 {
161
162 __asm __volatile("mcr p6, 0, %0, c0, c0, 0"
163 :
164 : "r" (intr_enabled & ICU_INT_HWMASK));
165 }
166
167 static __inline void
168 i80321_set_intrsteer(void)
169 {
170
171 __asm __volatile("mcr p6, 0, %0, c4, c0, 0"
172 :
173 : "r" (intr_steer & ICU_INT_HWMASK));
174 }
175
176 static __inline void
177 i80321_enable_irq(int irq)
178 {
179
180 intr_enabled |= (1U << irq);
181 i80321_set_intrmask();
182 }
183
184 static __inline void
185 i80321_disable_irq(int irq)
186 {
187
188 intr_enabled &= ~(1U << irq);
189 i80321_set_intrmask();
190 }
191
192 /*
193 * NOTE: This routine must be called with interrupts disabled in the CPSR.
194 */
195 static void
196 i80321_intr_calculate_masks(void)
197 {
198 struct intrq *iq;
199 struct intrhand *ih;
200 int irq, ipl;
201
202 /* First, figure out which IPLs each IRQ has. */
203 for (irq = 0; irq < NIRQ; irq++) {
204 int levels = 0;
205 iq = &intrq[irq];
206 i80321_disable_irq(irq);
207 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
208 ih = TAILQ_NEXT(ih, ih_list))
209 levels |= (1U << ih->ih_ipl);
210 iq->iq_levels = levels;
211 }
212
213 /* Next, figure out which IRQs are used by each IPL. */
214 for (ipl = 0; ipl < NIPL; ipl++) {
215 int irqs = 0;
216 for (irq = 0; irq < NIRQ; irq++) {
217 if (intrq[irq].iq_levels & (1U << ipl))
218 irqs |= (1U << irq);
219 }
220 imask[ipl] = irqs;
221 }
222
223 imask[IPL_NONE] = 0;
224
225 /*
226 * Initialize the soft interrupt masks to block themselves.
227 */
228 imask[IPL_SOFT] = SI_TO_IRQBIT(SI_SOFT);
229 imask[IPL_SOFTCLOCK] = SI_TO_IRQBIT(SI_SOFTCLOCK);
230 imask[IPL_SOFTNET] = SI_TO_IRQBIT(SI_SOFTNET);
231 imask[IPL_SOFTSERIAL] = SI_TO_IRQBIT(SI_SOFTSERIAL);
232
233 /*
234 * splsoftclock() is the only interface that users of the
235 * generic software interrupt facility have to block their
236 * soft intrs, so splsoftclock() must also block IPL_SOFT.
237 */
238 imask[IPL_SOFTCLOCK] |= imask[IPL_SOFT];
239
240 /*
241 * splsoftnet() must also block splsoftclock(), since we don't
242 * want timer-driven network events to occur while we're
243 * processing incoming packets.
244 */
245 imask[IPL_SOFTNET] |= imask[IPL_SOFTCLOCK];
246
247 /*
248 * Enforce a heirarchy that gives "slow" device (or devices with
249 * limited input buffer space/"real-time" requirements) a better
250 * chance at not dropping data.
251 */
252 imask[IPL_BIO] |= imask[IPL_SOFTNET];
253 imask[IPL_NET] |= imask[IPL_BIO];
254 imask[IPL_SOFTSERIAL] |= imask[IPL_NET];
255 imask[IPL_TTY] |= imask[IPL_SOFTSERIAL];
256
257 /*
258 * splvm() blocks all interrupts that use the kernel memory
259 * allocation facilities.
260 */
261 imask[IPL_IMP] |= imask[IPL_TTY];
262
263 /*
264 * Audio devices are not allowed to perform memory allocation
265 * in their interrupt routines, and they have fairly "real-time"
266 * requirements, so give them a high interrupt priority.
267 */
268 imask[IPL_AUDIO] |= imask[IPL_IMP];
269
270 /*
271 * splclock() must block anything that uses the scheduler.
272 */
273 imask[IPL_CLOCK] |= imask[IPL_AUDIO];
274
275 /*
276 * No separate statclock on the IQ80310.
277 */
278 imask[IPL_STATCLOCK] |= imask[IPL_CLOCK];
279
280 /*
281 * splhigh() must block "everything".
282 */
283 imask[IPL_HIGH] |= imask[IPL_STATCLOCK];
284
285 /*
286 * XXX We need serial drivers to run at the absolute highest priority
287 * in order to avoid overruns, so serial > high.
288 */
289 imask[IPL_SERIAL] |= imask[IPL_HIGH];
290
291 /*
292 * Now compute which IRQs must be blocked when servicing any
293 * given IRQ.
294 */
295 for (irq = 0; irq < NIRQ; irq++) {
296 int irqs = (1U << irq);
297 iq = &intrq[irq];
298 if (TAILQ_FIRST(&iq->iq_list) != NULL)
299 i80321_enable_irq(irq);
300 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
301 ih = TAILQ_NEXT(ih, ih_list))
302 irqs |= imask[ih->ih_ipl];
303 iq->iq_mask = irqs;
304 }
305 }
306
307 static void
308 i80321_do_pending(void)
309 {
310 static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
311 int new, oldirqstate;
312
313 if (__cpu_simple_lock_try(&processing) == 0)
314 return;
315
316 new = current_spl_level;
317
318 oldirqstate = disable_interrupts(I32_bit);
319
320 #define DO_SOFTINT(si) \
321 if ((ipending & ~new) & SI_TO_IRQBIT(si)) { \
322 ipending &= ~SI_TO_IRQBIT(si); \
323 current_spl_level |= imask[si_to_ipl[(si)]]; \
324 restore_interrupts(oldirqstate); \
325 softintr_dispatch(si); \
326 oldirqstate = disable_interrupts(I32_bit); \
327 current_spl_level = new; \
328 }
329
330 DO_SOFTINT(SI_SOFTSERIAL);
331 DO_SOFTINT(SI_SOFTNET);
332 DO_SOFTINT(SI_SOFTCLOCK);
333 DO_SOFTINT(SI_SOFT);
334
335 __cpu_simple_unlock(&processing);
336
337 restore_interrupts(oldirqstate);
338 }
339
340 int
341 _splraise(int ipl)
342 {
343 int old, oldirqstate;
344
345 oldirqstate = disable_interrupts(I32_bit);
346 old = current_spl_level;
347 current_spl_level |= imask[ipl];
348
349 restore_interrupts(oldirqstate);
350
351 return (old);
352 }
353
354 __inline void
355 splx(int new)
356 {
357 int oldirqstate, hwpend;
358
359 current_spl_level = new;
360
361 /*
362 * If there are pending HW interrupts which are being
363 * unmasked, then enable them in the INTCTL register.
364 * This will cause them to come flooding in.
365 */
366 hwpend = (ipending & ICU_INT_HWMASK) & ~new;
367 if (hwpend != 0) {
368 oldirqstate = disable_interrupts(I32_bit);
369 intr_enabled |= hwpend;
370 i80321_set_intrmask();
371 restore_interrupts(oldirqstate);
372 }
373
374 /* If there are software interrupts to process, do it. */
375 if ((ipending & INT_SWMASK) & ~new)
376 i80321_do_pending();
377 }
378
379 int
380 _spllower(int ipl)
381 {
382 int old = current_spl_level;
383
384 splx(imask[ipl]);
385 return (old);
386 }
387
388 void
389 _setsoftintr(int si)
390 {
391 int oldirqstate;
392
393 oldirqstate = disable_interrupts(I32_bit);
394 ipending |= SI_TO_IRQBIT(si);
395 restore_interrupts(oldirqstate);
396
397 /* Process unmasked pending soft interrupts. */
398 if ((ipending & INT_SWMASK) & ~current_spl_level)
399 i80321_do_pending();
400 }
401
402 /*
403 * i80321_icu_init:
404 *
405 * Initialize the i80321 ICU. Called early in bootstrap
406 * to make sure the ICU is in a pristine state.
407 */
408 void
409 i80321_icu_init(void)
410 {
411
412 intr_enabled = 0; /* All interrupts disabled */
413 i80321_set_intrmask();
414
415 intr_steer = 0; /* All interrupts steered to IRQ */
416 i80321_set_intrsteer();
417 }
418
419 /*
420 * i80321_intr_init:
421 *
422 * Initialize the rest of the interrupt subsystem, making it
423 * ready to handle interrupts from devices.
424 */
425 void
426 i80321_intr_init(void)
427 {
428 struct intrq *iq;
429 int i;
430
431 intr_enabled = 0;
432
433 for (i = 0; i < NIRQ; i++) {
434 iq = &intrq[i];
435 TAILQ_INIT(&iq->iq_list);
436
437 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
438 NULL, "iop321", i80321_irqnames[i]);
439 }
440
441 i80321_intr_calculate_masks();
442
443 /* Enable IRQs (don't yet use FIQs). */
444 enable_interrupts(I32_bit);
445 }
446
447 void *
448 i80321_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
449 {
450 struct intrq *iq;
451 struct intrhand *ih;
452 u_int oldirqstate;
453
454 if (irq < 0 || irq > NIRQ)
455 panic("i80321_intr_establish: IRQ %d out of range", irq);
456
457 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
458 if (ih == NULL)
459 return (NULL);
460
461 ih->ih_func = func;
462 ih->ih_arg = arg;
463 ih->ih_ipl = ipl;
464 ih->ih_irq = irq;
465
466 iq = &intrq[irq];
467
468 /* All IOP321 interrupts are level-triggered. */
469 iq->iq_ist = IST_LEVEL;
470
471 oldirqstate = disable_interrupts(I32_bit);
472
473 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
474
475 i80321_intr_calculate_masks();
476
477 restore_interrupts(oldirqstate);
478
479 return (ih);
480 }
481
482 void
483 i80321_intr_disestablish(void *cookie)
484 {
485 struct intrhand *ih = cookie;
486 struct intrq *iq = &intrq[ih->ih_irq];
487 int oldirqstate;
488
489 oldirqstate = disable_interrupts(I32_bit);
490
491 TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
492
493 i80321_intr_calculate_masks();
494
495 restore_interrupts(oldirqstate);
496 }
497
498 void
499 i80321_intr_dispatch(struct clockframe *frame)
500 {
501 struct intrq *iq;
502 struct intrhand *ih;
503 int oldirqstate, pcpl, irq, ibit, hwpend;
504
505 pcpl = current_spl_level;
506
507 hwpend = i80321_iintsrc_read();
508
509 /*
510 * Disable all the interrupts that are pending. We will
511 * reenable them once they are processed and not masked.
512 */
513 intr_enabled &= ~hwpend;
514 i80321_set_intrmask();
515
516 while (hwpend != 0) {
517 irq = ffs(hwpend) - 1;
518 ibit = (1U << irq);
519
520 hwpend &= ~ibit;
521
522 if (pcpl & ibit) {
523 /*
524 * IRQ is masked; mark it as pending and check
525 * the next one. Note: the IRQ is already disabled.
526 */
527 ipending |= ibit;
528 continue;
529 }
530
531 ipending &= ~ibit;
532
533 iq = &intrq[irq];
534 iq->iq_ev.ev_count++;
535 uvmexp.intrs++;
536 current_spl_level |= iq->iq_mask;
537 oldirqstate = enable_interrupts(I32_bit);
538 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
539 ih = TAILQ_NEXT(ih, ih_list)) {
540 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
541 }
542 restore_interrupts(oldirqstate);
543
544 current_spl_level = pcpl;
545
546 /* Re-enable this interrupt now that's it's cleared. */
547 intr_enabled |= ibit;
548 i80321_set_intrmask();
549 }
550
551 /* Check for pendings soft intrs. */
552 if ((ipending & INT_SWMASK) & ~current_spl_level) {
553 oldirqstate = enable_interrupts(I32_bit);
554 i80321_do_pending();
555 restore_interrupts(oldirqstate);
556 }
557 }
558