i80321_icu.c revision 1.2.4.1 1 /* $NetBSD: i80321_icu.c,v 1.2.4.1 2002/08/30 00:19:18 gehenna Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Interrupt support for the Intel i80321 I/O Processor.
40 */
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
45
46 #include <uvm/uvm_extern.h>
47
48 #include <machine/bus.h>
49 #include <machine/intr.h>
50
51 #include <arm/cpufunc.h>
52
53 #include <arm/xscale/i80321reg.h>
54 #include <arm/xscale/i80321var.h>
55
56 /* Interrupt handler queues. */
57 struct intrq intrq[NIRQ];
58
59 /* Interrupts to mask at each level. */
60 int i80321_imask[NIPL];
61
62 /* Current interrupt priority level. */
63 __volatile int current_spl_level;
64
65 /* Interrupts pending. */
66 __volatile int i80321_ipending;
67
68 /* Software copy of the IRQs we have enabled. */
69 __volatile uint32_t intr_enabled;
70
71 /* Mask if interrupts steered to FIQs. */
72 uint32_t intr_steer;
73
74 /*
75 * Map a software interrupt queue index (to the unused bits in the
76 * ICU registers -- XXX will need to revisit this if those bits are
77 * ever used in future steppings).
78 */
79 static const uint32_t si_to_irqbit[SI_NQUEUES] = {
80 ICU_INT_bit26, /* SI_SOFT */
81 ICU_INT_bit22, /* SI_SOFTCLOCK */
82 ICU_INT_bit5, /* SI_SOFTNET */
83 ICU_INT_bit4, /* SI_SOFTSERIAL */
84 };
85
86 #define INT_SWMASK \
87 ((1U << ICU_INT_bit26) | (1U << ICU_INT_bit22) | \
88 (1U << ICU_INT_bit5) | (1U << ICU_INT_bit4))
89
90 #define SI_TO_IRQBIT(si) (1U << si_to_irqbit[(si)])
91
92 /*
93 * Map a software interrupt queue to an interrupt priority level.
94 */
95 static const int si_to_ipl[SI_NQUEUES] = {
96 IPL_SOFT, /* SI_SOFT */
97 IPL_SOFTCLOCK, /* SI_SOFTCLOCK */
98 IPL_SOFTNET, /* SI_SOFTNET */
99 IPL_SOFTSERIAL, /* SI_SOFTSERIAL */
100 };
101
102 /*
103 * Interrupt bit names.
104 */
105 const char *i80321_irqnames[] = {
106 "DMA0 EOT",
107 "DMA0 EOC",
108 "DMA1 EOT",
109 "DMA1 EOC",
110 "irq 4",
111 "irq 5",
112 "AAU EOT",
113 "AAU EOC",
114 "core PMU",
115 "TMR0 (hardclock)",
116 "TMR1",
117 "I2C0",
118 "I2C1",
119 "MU",
120 "BIST",
121 "periph PMU",
122 "XScale PMU",
123 "BIU error",
124 "ATU error",
125 "MCU error",
126 "DMA0 error",
127 "DMA1 error",
128 "irq 22",
129 "AAU error",
130 "MU error",
131 "SSP",
132 "irq 26",
133 "irq 27",
134 "irq 28",
135 "irq 29",
136 "irq 30",
137 "irq 31",
138 };
139
140 void i80321_intr_dispatch(struct clockframe *frame);
141
142 static __inline uint32_t
143 i80321_iintsrc_read(void)
144 {
145 uint32_t iintsrc;
146
147 __asm __volatile("mrc p6, 0, %0, c8, c0, 0"
148 : "=r" (iintsrc));
149
150 /*
151 * The IINTSRC register shows bits that are active even
152 * if they are masked in INTCTL, so we have to mask them
153 * off with the interrupts we consider enabled.
154 */
155 return (iintsrc & intr_enabled);
156 }
157
158 #if defined(EVBARM_SPL_NOINLINE)
159 static __inline void
160 i80321_set_intrmask(void)
161 {
162 extern __volatile uint32_t intr_enabled;
163
164 __asm __volatile("mcr p6, 0, %0, c0, c0, 0"
165 :
166 : "r" (intr_enabled & ICU_INT_HWMASK));
167 }
168 #endif
169
170 static __inline void
171 i80321_set_intrsteer(void)
172 {
173
174 __asm __volatile("mcr p6, 0, %0, c4, c0, 0"
175 :
176 : "r" (intr_steer & ICU_INT_HWMASK));
177 }
178
179 static __inline void
180 i80321_enable_irq(int irq)
181 {
182
183 intr_enabled |= (1U << irq);
184 i80321_set_intrmask();
185 }
186
187 static __inline void
188 i80321_disable_irq(int irq)
189 {
190
191 intr_enabled &= ~(1U << irq);
192 i80321_set_intrmask();
193 }
194
195 /*
196 * NOTE: This routine must be called with interrupts disabled in the CPSR.
197 */
198 static void
199 i80321_intr_calculate_masks(void)
200 {
201 struct intrq *iq;
202 struct intrhand *ih;
203 int irq, ipl;
204
205 /* First, figure out which IPLs each IRQ has. */
206 for (irq = 0; irq < NIRQ; irq++) {
207 int levels = 0;
208 iq = &intrq[irq];
209 i80321_disable_irq(irq);
210 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
211 ih = TAILQ_NEXT(ih, ih_list))
212 levels |= (1U << ih->ih_ipl);
213 iq->iq_levels = levels;
214 }
215
216 /* Next, figure out which IRQs are used by each IPL. */
217 for (ipl = 0; ipl < NIPL; ipl++) {
218 int irqs = 0;
219 for (irq = 0; irq < NIRQ; irq++) {
220 if (intrq[irq].iq_levels & (1U << ipl))
221 irqs |= (1U << irq);
222 }
223 i80321_imask[ipl] = irqs;
224 }
225
226 i80321_imask[IPL_NONE] = 0;
227
228 /*
229 * Initialize the soft interrupt masks to block themselves.
230 */
231 i80321_imask[IPL_SOFT] = SI_TO_IRQBIT(SI_SOFT);
232 i80321_imask[IPL_SOFTCLOCK] = SI_TO_IRQBIT(SI_SOFTCLOCK);
233 i80321_imask[IPL_SOFTNET] = SI_TO_IRQBIT(SI_SOFTNET);
234 i80321_imask[IPL_SOFTSERIAL] = SI_TO_IRQBIT(SI_SOFTSERIAL);
235
236 /*
237 * splsoftclock() is the only interface that users of the
238 * generic software interrupt facility have to block their
239 * soft intrs, so splsoftclock() must also block IPL_SOFT.
240 */
241 i80321_imask[IPL_SOFTCLOCK] |= i80321_imask[IPL_SOFT];
242
243 /*
244 * splsoftnet() must also block splsoftclock(), since we don't
245 * want timer-driven network events to occur while we're
246 * processing incoming packets.
247 */
248 i80321_imask[IPL_SOFTNET] |= i80321_imask[IPL_SOFTCLOCK];
249
250 /*
251 * Enforce a heirarchy that gives "slow" device (or devices with
252 * limited input buffer space/"real-time" requirements) a better
253 * chance at not dropping data.
254 */
255 i80321_imask[IPL_BIO] |= i80321_imask[IPL_SOFTNET];
256 i80321_imask[IPL_NET] |= i80321_imask[IPL_BIO];
257 i80321_imask[IPL_SOFTSERIAL] |= i80321_imask[IPL_NET];
258 i80321_imask[IPL_TTY] |= i80321_imask[IPL_SOFTSERIAL];
259
260 /*
261 * splvm() blocks all interrupts that use the kernel memory
262 * allocation facilities.
263 */
264 i80321_imask[IPL_IMP] |= i80321_imask[IPL_TTY];
265
266 /*
267 * Audio devices are not allowed to perform memory allocation
268 * in their interrupt routines, and they have fairly "real-time"
269 * requirements, so give them a high interrupt priority.
270 */
271 i80321_imask[IPL_AUDIO] |= i80321_imask[IPL_IMP];
272
273 /*
274 * splclock() must block anything that uses the scheduler.
275 */
276 i80321_imask[IPL_CLOCK] |= i80321_imask[IPL_AUDIO];
277
278 /*
279 * No separate statclock on the IQ80310.
280 */
281 i80321_imask[IPL_STATCLOCK] |= i80321_imask[IPL_CLOCK];
282
283 /*
284 * splhigh() must block "everything".
285 */
286 i80321_imask[IPL_HIGH] |= i80321_imask[IPL_STATCLOCK];
287
288 /*
289 * XXX We need serial drivers to run at the absolute highest priority
290 * in order to avoid overruns, so serial > high.
291 */
292 i80321_imask[IPL_SERIAL] |= i80321_imask[IPL_HIGH];
293
294 /*
295 * Now compute which IRQs must be blocked when servicing any
296 * given IRQ.
297 */
298 for (irq = 0; irq < NIRQ; irq++) {
299 int irqs = (1U << irq);
300 iq = &intrq[irq];
301 if (TAILQ_FIRST(&iq->iq_list) != NULL)
302 i80321_enable_irq(irq);
303 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
304 ih = TAILQ_NEXT(ih, ih_list))
305 irqs |= i80321_imask[ih->ih_ipl];
306 iq->iq_mask = irqs;
307 }
308 }
309
310 __inline void
311 i80321_do_pending(void)
312 {
313 static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
314 int new, oldirqstate;
315
316 if (__cpu_simple_lock_try(&processing) == 0)
317 return;
318
319 new = current_spl_level;
320
321 oldirqstate = disable_interrupts(I32_bit);
322
323 #define DO_SOFTINT(si) \
324 if ((i80321_ipending & ~new) & SI_TO_IRQBIT(si)) { \
325 i80321_ipending &= ~SI_TO_IRQBIT(si); \
326 current_spl_level |= i80321_imask[si_to_ipl[(si)]]; \
327 restore_interrupts(oldirqstate); \
328 softintr_dispatch(si); \
329 oldirqstate = disable_interrupts(I32_bit); \
330 current_spl_level = new; \
331 }
332
333 DO_SOFTINT(SI_SOFTSERIAL);
334 DO_SOFTINT(SI_SOFTNET);
335 DO_SOFTINT(SI_SOFTCLOCK);
336 DO_SOFTINT(SI_SOFT);
337
338 __cpu_simple_unlock(&processing);
339
340 restore_interrupts(oldirqstate);
341 }
342
343 #if defined(EVBARM_SPL_NOINLINE)
344
345 __inline void
346 splx(int new)
347 {
348 int oldirqstate, hwpend;
349
350 current_spl_level = new;
351
352 hwpend = (i80321_ipending & ICU_INT_HWMASK) & ~new;
353 if (hwpend != 0) {
354 oldirqstate = disable_interrupts(I32_bit);
355 intr_enabled |= hwpend;
356 i80321_set_intrmask();
357 restore_interrupts(oldirqstate);
358 }
359
360 if ((i80321_ipending & INT_SWMASK) & ~new)
361 i80321_do_pending();
362 }
363
364 int
365 _splraise(int ipl)
366 {
367 int old;
368
369 old = current_spl_level;
370 current_spl_level |= i80321_imask[ipl];
371
372 return (old);
373 }
374
375 int
376 _spllower(int ipl)
377 {
378 int old = current_spl_level;
379
380 splx(i80321_imask[ipl]);
381 return(old);
382 }
383
384 #else /* EVBARM_SPL_NOINLINE */
385
386 #undef splx
387 __inline void
388 splx(int new)
389 {
390 i80321_splx(new);
391 }
392
393 #undef _spllower
394 int
395 _spllower(int ipl)
396 {
397 return i80321_spllower(ipl);
398 }
399
400 #undef _splraise
401 int
402 _splraise(int ipl)
403 {
404 return i80321_splraise(ipl);
405 }
406
407 #endif /* else EVBARM_SPL_NOINLINE */
408
409 void
410 _setsoftintr(int si)
411 {
412 int oldirqstate;
413
414 oldirqstate = disable_interrupts(I32_bit);
415 i80321_ipending |= SI_TO_IRQBIT(si);
416 restore_interrupts(oldirqstate);
417
418 /* Process unmasked pending soft interrupts. */
419 if ((i80321_ipending & INT_SWMASK) & ~current_spl_level)
420 i80321_do_pending();
421 }
422
423 /*
424 * i80321_icu_init:
425 *
426 * Initialize the i80321 ICU. Called early in bootstrap
427 * to make sure the ICU is in a pristine state.
428 */
429 void
430 i80321_icu_init(void)
431 {
432
433 intr_enabled = 0; /* All interrupts disabled */
434 i80321_set_intrmask();
435
436 intr_steer = 0; /* All interrupts steered to IRQ */
437 i80321_set_intrsteer();
438 }
439
440 /*
441 * i80321_intr_init:
442 *
443 * Initialize the rest of the interrupt subsystem, making it
444 * ready to handle interrupts from devices.
445 */
446 void
447 i80321_intr_init(void)
448 {
449 struct intrq *iq;
450 int i;
451
452 intr_enabled = 0;
453
454 for (i = 0; i < NIRQ; i++) {
455 iq = &intrq[i];
456 TAILQ_INIT(&iq->iq_list);
457
458 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
459 NULL, "iop321", i80321_irqnames[i]);
460 }
461
462 i80321_intr_calculate_masks();
463
464 /* Enable IRQs (don't yet use FIQs). */
465 enable_interrupts(I32_bit);
466 }
467
468 void *
469 i80321_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
470 {
471 struct intrq *iq;
472 struct intrhand *ih;
473 u_int oldirqstate;
474
475 if (irq < 0 || irq > NIRQ)
476 panic("i80321_intr_establish: IRQ %d out of range", irq);
477
478 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
479 if (ih == NULL)
480 return (NULL);
481
482 ih->ih_func = func;
483 ih->ih_arg = arg;
484 ih->ih_ipl = ipl;
485 ih->ih_irq = irq;
486
487 iq = &intrq[irq];
488
489 /* All IOP321 interrupts are level-triggered. */
490 iq->iq_ist = IST_LEVEL;
491
492 oldirqstate = disable_interrupts(I32_bit);
493
494 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
495
496 i80321_intr_calculate_masks();
497
498 restore_interrupts(oldirqstate);
499
500 return (ih);
501 }
502
503 void
504 i80321_intr_disestablish(void *cookie)
505 {
506 struct intrhand *ih = cookie;
507 struct intrq *iq = &intrq[ih->ih_irq];
508 int oldirqstate;
509
510 oldirqstate = disable_interrupts(I32_bit);
511
512 TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
513
514 i80321_intr_calculate_masks();
515
516 restore_interrupts(oldirqstate);
517 }
518
519 void
520 i80321_intr_dispatch(struct clockframe *frame)
521 {
522 struct intrq *iq;
523 struct intrhand *ih;
524 int oldirqstate, pcpl, irq, ibit, hwpend;
525
526 pcpl = current_spl_level;
527
528 hwpend = i80321_iintsrc_read();
529
530 /*
531 * Disable all the interrupts that are pending. We will
532 * reenable them once they are processed and not masked.
533 */
534 intr_enabled &= ~hwpend;
535 i80321_set_intrmask();
536
537 while (hwpend != 0) {
538 irq = ffs(hwpend) - 1;
539 ibit = (1U << irq);
540
541 hwpend &= ~ibit;
542
543 if (pcpl & ibit) {
544 /*
545 * IRQ is masked; mark it as pending and check
546 * the next one. Note: the IRQ is already disabled.
547 */
548 i80321_ipending |= ibit;
549 continue;
550 }
551
552 i80321_ipending &= ~ibit;
553
554 iq = &intrq[irq];
555 iq->iq_ev.ev_count++;
556 uvmexp.intrs++;
557 current_spl_level |= iq->iq_mask;
558 oldirqstate = enable_interrupts(I32_bit);
559 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
560 ih = TAILQ_NEXT(ih, ih_list)) {
561 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
562 }
563 restore_interrupts(oldirqstate);
564
565 current_spl_level = pcpl;
566
567 /* Re-enable this interrupt now that's it's cleared. */
568 intr_enabled |= ibit;
569 i80321_set_intrmask();
570 }
571
572 /* Check for pendings soft intrs. */
573 if ((i80321_ipending & INT_SWMASK) & ~current_spl_level) {
574 oldirqstate = enable_interrupts(I32_bit);
575 i80321_do_pending();
576 restore_interrupts(oldirqstate);
577 }
578 }
579