i80321_icu.c revision 1.14.30.1 1 /* $NetBSD: i80321_icu.c,v 1.14.30.1 2007/11/09 05:37:44 matt Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2006 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe and Steve C. Woodford for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: i80321_icu.c,v 1.14.30.1 2007/11/09 05:37:44 matt Exp $");
40
41 #ifndef EVBARM_SPL_NOINLINE
42 #define EVBARM_SPL_NOINLINE
43 #endif
44
45 /*
46 * Interrupt support for the Intel i80321 I/O Processor.
47 */
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/malloc.h>
52
53 #include <uvm/uvm_extern.h>
54
55 #include <machine/bus.h>
56 #include <machine/intr.h>
57
58 #include <arm/cpufunc.h>
59
60 #include <arm/xscale/i80321reg.h>
61 #include <arm/xscale/i80321var.h>
62
63 /* Interrupt handler queues. */
64 struct intrq intrq[NIRQ];
65
66 /* Interrupts to mask at each level. */
67 int i80321_imask[NIPL];
68
69 /* Interrupts pending. */
70 volatile int i80321_ipending;
71
72 /* Software copy of the IRQs we have enabled. */
73 volatile uint32_t intr_enabled;
74
75 /* Mask if interrupts steered to FIQs. */
76 uint32_t intr_steer;
77
78 /*
79 * Map a software interrupt queue index (to the unused bits in the
80 * ICU registers -- XXX will need to revisit this if those bits are
81 * ever used in future steppings).
82 */
83 static const uint32_t si_to_irqbit[SI_NQUEUES] = {
84 ICU_INT_bit26, /* SI_SOFT */
85 ICU_INT_bit22, /* SI_SOFTCLOCK */
86 ICU_INT_bit5, /* SI_SOFTNET */
87 ICU_INT_bit4, /* SI_SOFTSERIAL */
88 };
89
90 #define SI_TO_IRQBIT(si) (1U << si_to_irqbit[(si)])
91
92 /*
93 * Map a software interrupt queue to an interrupt priority level.
94 */
95 static const int si_to_ipl[SI_NQUEUES] = {
96 IPL_SOFT, /* SI_SOFT */
97 IPL_SOFTCLOCK, /* SI_SOFTCLOCK */
98 IPL_SOFTNET, /* SI_SOFTNET */
99 IPL_SOFTSERIAL, /* SI_SOFTSERIAL */
100 };
101
102 /*
103 * Interrupt bit names.
104 */
105 const char *i80321_irqnames[] = {
106 "DMA0 EOT",
107 "DMA0 EOC",
108 "DMA1 EOT",
109 "DMA1 EOC",
110 "irq 4",
111 "irq 5",
112 "AAU EOT",
113 "AAU EOC",
114 "core PMU",
115 "TMR0 (hardclock)",
116 "TMR1",
117 "I2C0",
118 "I2C1",
119 "MU",
120 "BIST",
121 "periph PMU",
122 "XScale PMU",
123 "BIU error",
124 "ATU error",
125 "MCU error",
126 "DMA0 error",
127 "DMA1 error",
128 "irq 22",
129 "AAU error",
130 "MU error",
131 "SSP",
132 "irq 26",
133 "irq 27",
134 "irq 28",
135 "irq 29",
136 "irq 30",
137 "irq 31",
138 };
139
140 void i80321_intr_dispatch(struct clockframe *frame);
141
142 static inline uint32_t
143 i80321_iintsrc_read(void)
144 {
145 uint32_t iintsrc;
146
147 __asm volatile("mrc p6, 0, %0, c8, c0, 0"
148 : "=r" (iintsrc));
149
150 /*
151 * The IINTSRC register shows bits that are active even
152 * if they are masked in INTCTL, so we have to mask them
153 * off with the interrupts we consider enabled.
154 */
155 return (iintsrc & intr_enabled);
156 }
157
158 static inline void
159 i80321_set_intrsteer(void)
160 {
161
162 __asm volatile("mcr p6, 0, %0, c4, c0, 0"
163 :
164 : "r" (intr_steer & ICU_INT_HWMASK));
165 }
166
167 static inline void
168 i80321_enable_irq(int irq)
169 {
170
171 intr_enabled |= (1U << irq);
172 i80321_set_intrmask();
173 }
174
175 static inline void
176 i80321_disable_irq(int irq)
177 {
178
179 intr_enabled &= ~(1U << irq);
180 i80321_set_intrmask();
181 }
182
183 /*
184 * NOTE: This routine must be called with interrupts disabled in the CPSR.
185 */
186 static void
187 i80321_intr_calculate_masks(void)
188 {
189 struct intrq *iq;
190 struct intrhand *ih;
191 int irq, ipl;
192
193 /* First, figure out which IPLs each IRQ has. */
194 for (irq = 0; irq < NIRQ; irq++) {
195 int levels = 0;
196 iq = &intrq[irq];
197 i80321_disable_irq(irq);
198 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
199 ih = TAILQ_NEXT(ih, ih_list))
200 levels |= (1U << ih->ih_ipl);
201 iq->iq_levels = levels;
202 }
203
204 /* Next, figure out which IRQs are used by each IPL. */
205 for (ipl = 0; ipl < NIPL; ipl++) {
206 int irqs = 0;
207 for (irq = 0; irq < NIRQ; irq++) {
208 if (intrq[irq].iq_levels & (1U << ipl))
209 irqs |= (1U << irq);
210 }
211 i80321_imask[ipl] = irqs;
212 }
213
214 i80321_imask[IPL_NONE] = 0;
215
216 /*
217 * Initialize the soft interrupt masks to block themselves.
218 */
219 i80321_imask[IPL_SOFT] = SI_TO_IRQBIT(SI_SOFT);
220 i80321_imask[IPL_SOFTCLOCK] = SI_TO_IRQBIT(SI_SOFTCLOCK);
221 i80321_imask[IPL_SOFTNET] = SI_TO_IRQBIT(SI_SOFTNET);
222 i80321_imask[IPL_SOFTSERIAL] = SI_TO_IRQBIT(SI_SOFTSERIAL);
223
224 /*
225 * splsoftclock() is the only interface that users of the
226 * generic software interrupt facility have to block their
227 * soft intrs, so splsoftclock() must also block IPL_SOFT.
228 */
229 i80321_imask[IPL_SOFTCLOCK] |= i80321_imask[IPL_SOFT];
230
231 /*
232 * splsoftnet() must also block splsoftclock(), since we don't
233 * want timer-driven network events to occur while we're
234 * processing incoming packets.
235 */
236 i80321_imask[IPL_SOFTNET] |= i80321_imask[IPL_SOFTCLOCK];
237
238 /*
239 * Enforce a hierarchy that gives "slow" device (or devices with
240 * limited input buffer space/"real-time" requirements) a better
241 * chance at not dropping data.
242 */
243 i80321_imask[IPL_BIO] |= i80321_imask[IPL_SOFTNET];
244 i80321_imask[IPL_NET] |= i80321_imask[IPL_BIO];
245 i80321_imask[IPL_SOFTSERIAL] |= i80321_imask[IPL_NET];
246 i80321_imask[IPL_TTY] |= i80321_imask[IPL_SOFTSERIAL];
247
248 /*
249 * splvm() blocks all interrupts that use the kernel memory
250 * allocation facilities.
251 */
252 i80321_imask[IPL_VM] |= i80321_imask[IPL_TTY];
253
254 /*
255 * Audio devices are not allowed to perform memory allocation
256 * in their interrupt routines, and they have fairly "real-time"
257 * requirements, so give them a high interrupt priority.
258 */
259 i80321_imask[IPL_AUDIO] |= i80321_imask[IPL_VM];
260
261 /*
262 * splclock() must block anything that uses the scheduler.
263 */
264 i80321_imask[IPL_CLOCK] |= i80321_imask[IPL_AUDIO];
265
266 /*
267 * No separate statclock on the IQ80310.
268 */
269 i80321_imask[IPL_STATCLOCK] |= i80321_imask[IPL_CLOCK];
270
271 /*
272 * splhigh() must block "everything".
273 */
274 i80321_imask[IPL_HIGH] |= i80321_imask[IPL_STATCLOCK];
275
276 /*
277 * XXX We need serial drivers to run at the absolute highest priority
278 * in order to avoid overruns, so serial > high.
279 */
280 i80321_imask[IPL_SERIAL] |= i80321_imask[IPL_HIGH];
281
282 /*
283 * Now compute which IRQs must be blocked when servicing any
284 * given IRQ.
285 */
286 for (irq = 0; irq < NIRQ; irq++) {
287 int irqs = (1U << irq);
288 iq = &intrq[irq];
289 if (TAILQ_FIRST(&iq->iq_list) != NULL)
290 i80321_enable_irq(irq);
291 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
292 ih = TAILQ_NEXT(ih, ih_list))
293 irqs |= i80321_imask[ih->ih_ipl];
294 iq->iq_mask = irqs;
295 }
296 }
297
298 void
299 i80321_do_pending(void)
300 {
301 static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
302 int new, oldirqstate;
303
304 if (__cpu_simple_lock_try(&processing) == 0)
305 return;
306
307 new = curcpl();
308
309 oldirqstate = disable_interrupts(I32_bit);
310
311 #define DO_SOFTINT(si) \
312 if ((i80321_ipending & ~new) & SI_TO_IRQBIT(si)) { \
313 i80321_ipending &= ~SI_TO_IRQBIT(si); \
314 set_curcpl(new | i80321_imask[si_to_ipl[(si)]]); \
315 restore_interrupts(oldirqstate); \
316 softintr_dispatch(si); \
317 oldirqstate = disable_interrupts(I32_bit); \
318 set_curcpl(new); \
319 }
320
321 DO_SOFTINT(SI_SOFTSERIAL);
322 DO_SOFTINT(SI_SOFTNET);
323 DO_SOFTINT(SI_SOFTCLOCK);
324 DO_SOFTINT(SI_SOFT);
325
326 __cpu_simple_unlock(&processing);
327
328 restore_interrupts(oldirqstate);
329 }
330
331 void
332 splx(int new)
333 {
334
335 i80321_splx(new);
336 }
337
338 int
339 _spllower(int ipl)
340 {
341
342 return (i80321_spllower(ipl));
343 }
344
345 int
346 _splraise(int ipl)
347 {
348
349 return (i80321_splraise(ipl));
350 }
351
352 void
353 _setsoftintr(int si)
354 {
355 int oldirqstate;
356
357 oldirqstate = disable_interrupts(I32_bit);
358 i80321_ipending |= SI_TO_IRQBIT(si);
359 restore_interrupts(oldirqstate);
360
361 /* Process unmasked pending soft interrupts. */
362 if ((i80321_ipending & INT_SWMASK) & ~curcpl())
363 i80321_do_pending();
364 }
365
366 /*
367 * i80321_icu_init:
368 *
369 * Initialize the i80321 ICU. Called early in bootstrap
370 * to make sure the ICU is in a pristine state.
371 */
372 void
373 i80321_icu_init(void)
374 {
375
376 intr_enabled = 0; /* All interrupts disabled */
377 i80321_set_intrmask();
378
379 intr_steer = 0; /* All interrupts steered to IRQ */
380 i80321_set_intrsteer();
381 }
382
383 /*
384 * i80321_intr_init:
385 *
386 * Initialize the rest of the interrupt subsystem, making it
387 * ready to handle interrupts from devices.
388 */
389 void
390 i80321_intr_init(void)
391 {
392 struct intrq *iq;
393 int i;
394
395 intr_enabled = 0;
396
397 for (i = 0; i < NIRQ; i++) {
398 iq = &intrq[i];
399 TAILQ_INIT(&iq->iq_list);
400
401 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
402 NULL, "iop321", i80321_irqnames[i]);
403 }
404
405 i80321_intr_calculate_masks();
406
407 /* Enable IRQs (don't yet use FIQs). */
408 enable_interrupts(I32_bit);
409 }
410
411 void *
412 i80321_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
413 {
414 struct intrq *iq;
415 struct intrhand *ih;
416 u_int oldirqstate;
417
418 if (irq < 0 || irq > NIRQ)
419 panic("i80321_intr_establish: IRQ %d out of range", irq);
420
421 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
422 if (ih == NULL)
423 return (NULL);
424
425 ih->ih_func = func;
426 ih->ih_arg = arg;
427 ih->ih_ipl = ipl;
428 ih->ih_irq = irq;
429
430 iq = &intrq[irq];
431
432 /* All IOP321 interrupts are level-triggered. */
433 iq->iq_ist = IST_LEVEL;
434
435 oldirqstate = disable_interrupts(I32_bit);
436
437 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
438
439 i80321_intr_calculate_masks();
440
441 restore_interrupts(oldirqstate);
442
443 return (ih);
444 }
445
446 void
447 i80321_intr_disestablish(void *cookie)
448 {
449 struct intrhand *ih = cookie;
450 struct intrq *iq = &intrq[ih->ih_irq];
451 int oldirqstate;
452
453 oldirqstate = disable_interrupts(I32_bit);
454
455 TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
456
457 i80321_intr_calculate_masks();
458
459 restore_interrupts(oldirqstate);
460 }
461
462 /*
463 * Hardware interrupt handler.
464 *
465 * If I80321_HPI_ENABLED is defined, this code attempts to deal with
466 * HPI interrupts as best it can.
467 *
468 * The problem is that HPIs cannot be masked at the interrupt controller;
469 * they can only be masked by disabling IRQs in the XScale core.
470 *
471 * So, if an HPI comes in and we determine that it should be masked at
472 * the current IPL then we mark it pending in the usual way and set
473 * I32_bit in the interrupt frame. This ensures that when we return from
474 * i80321_intr_dispatch(), IRQs will be disabled in the XScale core. (To
475 * ensure IRQs are enabled later, i80321_splx() has been modified to do
476 * just that when a pending HPI interrupt is unmasked.) Additionally,
477 * because HPIs are level-triggered, the registered handler for the HPI
478 * interrupt will also be invoked with IRQs disabled. If a masked HPI
479 * occurs at the same time as another unmasked higher priority interrupt,
480 * the higher priority handler will also be invoked with IRQs disabled.
481 * As a result, the system could end up executing a lot of code with IRQs
482 * completely disabled if the HPI's IPL is relatively low.
483 *
484 * At the present time, the only known use of HPI is for the console UART
485 * on a couple of boards. This is probably the least intrusive use of HPI
486 * as IPL_SERIAL is the highest priority IPL in the system anyway. The
487 * code has not been tested with HPI hooked up to a class of device which
488 * interrupts below IPL_SERIAL. Indeed, such a configuration is likely to
489 * perform very poorly if at all, even though the following code has been
490 * designed (hopefully) to cope with it.
491 */
492
493 void
494 i80321_intr_dispatch(struct clockframe *frame)
495 {
496 struct intrq *iq;
497 struct intrhand *ih;
498 int oldirqstate, pcpl, irq, ibit, hwpend;
499 #ifdef I80321_HPI_ENABLED
500 int oldpending;
501 #endif
502
503 pcpl = curcpl();
504
505 hwpend = i80321_iintsrc_read();
506
507 /*
508 * Disable all the interrupts that are pending. We will
509 * reenable them once they are processed and not masked.
510 */
511 intr_enabled &= ~hwpend;
512 i80321_set_intrmask();
513
514 #ifdef I80321_HPI_ENABLED
515 oldirqstate = 0; /* XXX: quell gcc warning */
516 #endif
517
518 while (hwpend != 0) {
519 #ifdef I80321_HPI_ENABLED
520 /* Deal with HPI interrupt first */
521 if (__predict_false(hwpend & INT_HPIMASK))
522 irq = ICU_INT_HPI;
523 else
524 #endif
525 irq = ffs(hwpend) - 1;
526 ibit = (1U << irq);
527
528 hwpend &= ~ibit;
529
530 if (pcpl & ibit) {
531 /*
532 * IRQ is masked; mark it as pending and check
533 * the next one. Note: the IRQ is already disabled.
534 */
535 #ifdef I80321_HPI_ENABLED
536 if (__predict_false(irq == ICU_INT_HPI)) {
537 /*
538 * This is an HPI. We *must* disable
539 * IRQs in the interrupt frame until
540 * INT_HPIMASK is cleared by a later
541 * call to splx(). Otherwise the level-
542 * triggered interrupt will just keep
543 * coming back.
544 */
545 frame->cf_if.if_spsr |= I32_bit;
546 }
547 #endif
548 i80321_ipending |= ibit;
549 continue;
550 }
551
552 #ifdef I80321_HPI_ENABLED
553 oldpending = i80321_ipending | ibit;
554 #endif
555 i80321_ipending &= ~ibit;
556
557 iq = &intrq[irq];
558 iq->iq_ev.ev_count++;
559 uvmexp.intrs++;
560 set_curcpl(pcpl | iq->iq_mask);
561 #ifdef I80321_HPI_ENABLED
562 /*
563 * Re-enable interrupts iff an HPI is not pending
564 */
565 if (__predict_true((oldpending & INT_HPIMASK) == 0))
566 #endif
567 oldirqstate = enable_interrupts(I32_bit);
568 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
569 ih = TAILQ_NEXT(ih, ih_list)) {
570 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
571 }
572 #ifdef I80321_HPI_ENABLED
573 if (__predict_true((oldpending & INT_HPIMASK) == 0))
574 #endif
575 restore_interrupts(oldirqstate);
576 #ifdef I80321_HPI_ENABLED
577 else if (irq == ICU_INT_HPI) {
578 /*
579 * We've just handled the HPI. Make sure IRQs
580 * are enabled in the interrupt frame.
581 * Here's hoping the handler really did clear
582 * down the source...
583 */
584 frame->cf_if.if_spsr &= ~I32_bit;
585 }
586 #endif
587 set_curcpl(pcpl);
588
589 /* Re-enable this interrupt now that's it's cleared. */
590 intr_enabled |= ibit;
591 i80321_set_intrmask();
592
593 /*
594 * Don't forget to include interrupts which may have
595 * arrived in the meantime.
596 */
597 hwpend |= ((i80321_ipending & ICU_INT_HWMASK) & ~pcpl);
598 }
599
600 /* Check for pendings soft intrs. */
601 if ((i80321_ipending & INT_SWMASK) & ~curcpl()) {
602 #ifdef I80321_HPI_ENABLED
603 /* XXX: This is only necessary if HPI is < IPL_SOFT* */
604 if (__predict_true((i80321_ipending & INT_HPIMASK) == 0))
605 #endif
606 oldirqstate = enable_interrupts(I32_bit);
607 i80321_do_pending();
608 #ifdef I80321_HPI_ENABLED
609 /* XXX: This is only necessary if HPI is < IPL_NET* */
610 if (__predict_true((i80321_ipending & INT_HPIMASK) == 0))
611 #endif
612 restore_interrupts(oldirqstate);
613 }
614 }
615