i80321_icu.c revision 1.2.6.2 1 /* $NetBSD: i80321_icu.c,v 1.2.6.2 2002/11/18 01:45:19 he Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Interrupt support for the Intel i80321 I/O Processor.
40 */
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
45
46 #include <uvm/uvm_extern.h>
47
48 #include <machine/bus.h>
49 #include <machine/intr.h>
50
51 #include <arm/cpufunc.h>
52
53 #include <arm/xscale/i80321reg.h>
54 #include <arm/xscale/i80321var.h>
55
56 /* Interrupt handler queues. */
57 struct intrq intrq[NIRQ];
58
59 /* Interrupts to mask at each level. */
60 static int imask[NIPL];
61
62 /* Current interrupt priority level. */
63 __volatile int current_spl_level;
64
65 /* Interrupts pending. */
66 static __volatile int ipending;
67
68 /* Software copy of the IRQs we have enabled. */
69 __volatile uint32_t intr_enabled;
70
71 /* Mask if interrupts steered to FIQs. */
72 uint32_t intr_steer;
73
74 /*
75 * Map a software interrupt queue index (to the unused bits in the
76 * ICU registers -- XXX will need to revisit this if those bits are
77 * ever used in future steppings).
78 */
79 static const uint32_t si_to_irqbit[SI_NQUEUES] = {
80 ICU_INT_bit26, /* SI_SOFT */
81 ICU_INT_bit22, /* SI_SOFTCLOCK */
82 ICU_INT_bit5, /* SI_SOFTNET */
83 ICU_INT_bit4, /* SI_SOFTSERIAL */
84 };
85
86 #define INT_SWMASK \
87 ((1U << ICU_INT_bit26) | (1U << ICU_INT_bit22) | \
88 (1U << ICU_INT_bit5) | (1U << ICU_INT_bit4))
89
90 #define SI_TO_IRQBIT(si) (1U << si_to_irqbit[(si)])
91
92 /*
93 * Map a software interrupt queue to an interrupt priority level.
94 */
95 static const int si_to_ipl[SI_NQUEUES] = {
96 IPL_SOFT, /* SI_SOFT */
97 IPL_SOFTCLOCK, /* SI_SOFTCLOCK */
98 IPL_SOFTNET, /* SI_SOFTNET */
99 IPL_SOFTSERIAL, /* SI_SOFTSERIAL */
100 };
101
102 /*
103 * Interrupt bit names.
104 */
105 const char *i80321_irqnames[] = {
106 "DMA0 EOT",
107 "DMA0 EOC",
108 "DMA1 EOT",
109 "DMA1 EOC",
110 "irq 4",
111 "irq 5",
112 "AAU EOT",
113 "AAU EOC",
114 "core PMU",
115 "TMR0 (hardclock)",
116 "TMR1",
117 "I2C0",
118 "I2C1",
119 "MU",
120 "BIST",
121 "periph PMU",
122 "XScale PMU",
123 "BIU error",
124 "ATU error",
125 "MCU error",
126 "DMA0 error",
127 "DMA1 error",
128 "irq 22",
129 "AAU error",
130 "MU error",
131 "SSP",
132 "irq 26",
133 "irq 27",
134 "irq 28",
135 "irq 29",
136 "irq 30",
137 "irq 31",
138 };
139
140 void i80321_intr_dispatch(struct clockframe *frame);
141
142 static __inline uint32_t
143 i80321_iintsrc_read(void)
144 {
145 uint32_t iintsrc;
146
147 __asm __volatile("mrc p6, 0, %0, c8, c0, 0"
148 : "=r" (iintsrc));
149
150 /*
151 * The IINTSRC register shows bits that are active even
152 * if they are masked in INTCTL, so we have to mask them
153 * off with the interrupts we consider enabled.
154 */
155 return (iintsrc & intr_enabled);
156 }
157
158 static __inline void
159 i80321_set_intrmask(void)
160 {
161
162 __asm __volatile("mcr p6, 0, %0, c0, c0, 0"
163 :
164 : "r" (intr_enabled & ICU_INT_HWMASK));
165 }
166
167 static __inline void
168 i80321_set_intrsteer(void)
169 {
170
171 __asm __volatile("mcr p6, 0, %0, c4, c0, 0"
172 :
173 : "r" (intr_steer & ICU_INT_HWMASK));
174 }
175
176 static __inline void
177 i80321_enable_irq(int irq)
178 {
179
180 intr_enabled |= (1U << irq);
181 i80321_set_intrmask();
182 }
183
184 static __inline void
185 i80321_disable_irq(int irq)
186 {
187
188 intr_enabled &= ~(1U << irq);
189 i80321_set_intrmask();
190 }
191
192 /*
193 * NOTE: This routine must be called with interrupts disabled in the CPSR.
194 */
195 static void
196 i80321_intr_calculate_masks(void)
197 {
198 struct intrq *iq;
199 struct intrhand *ih;
200 int irq, ipl;
201
202 /* First, figure out which IPLs each IRQ has. */
203 for (irq = 0; irq < NIRQ; irq++) {
204 int levels = 0;
205 iq = &intrq[irq];
206 i80321_disable_irq(irq);
207 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
208 ih = TAILQ_NEXT(ih, ih_list))
209 levels |= (1U << ih->ih_ipl);
210 iq->iq_levels = levels;
211 }
212
213 /* Next, figure out which IRQs are used by each IPL. */
214 for (ipl = 0; ipl < NIPL; ipl++) {
215 int irqs = 0;
216 for (irq = 0; irq < NIRQ; irq++) {
217 if (intrq[irq].iq_levels & (1U << ipl))
218 irqs |= (1U << irq);
219 }
220 imask[ipl] = irqs;
221 }
222
223 imask[IPL_NONE] = 0;
224
225 /*
226 * Initialize the soft interrupt masks to block themselves.
227 */
228 imask[IPL_SOFT] = SI_TO_IRQBIT(SI_SOFT);
229 imask[IPL_SOFTCLOCK] = SI_TO_IRQBIT(SI_SOFTCLOCK);
230 imask[IPL_SOFTNET] = SI_TO_IRQBIT(SI_SOFTNET);
231 imask[IPL_SOFTSERIAL] = SI_TO_IRQBIT(SI_SOFTSERIAL);
232
233 /*
234 * splsoftclock() is the only interface that users of the
235 * generic software interrupt facility have to block their
236 * soft intrs, so splsoftclock() must also block IPL_SOFT.
237 */
238 imask[IPL_SOFTCLOCK] |= imask[IPL_SOFT];
239
240 /*
241 * splsoftnet() must also block splsoftclock(), since we don't
242 * want timer-driven network events to occur while we're
243 * processing incoming packets.
244 */
245 imask[IPL_SOFTNET] |= imask[IPL_SOFTCLOCK];
246
247 /*
248 * Enforce a heirarchy that gives "slow" device (or devices with
249 * limited input buffer space/"real-time" requirements) a better
250 * chance at not dropping data.
251 */
252 imask[IPL_BIO] |= imask[IPL_SOFTNET];
253 imask[IPL_NET] |= imask[IPL_BIO];
254 imask[IPL_SOFTSERIAL] |= imask[IPL_NET];
255 imask[IPL_TTY] |= imask[IPL_SOFTSERIAL];
256
257 /*
258 * splvm() blocks all interrupts that use the kernel memory
259 * allocation facilities.
260 */
261 imask[IPL_IMP] |= imask[IPL_TTY];
262
263 /*
264 * Audio devices are not allowed to perform memory allocation
265 * in their interrupt routines, and they have fairly "real-time"
266 * requirements, so give them a high interrupt priority.
267 */
268 imask[IPL_AUDIO] |= imask[IPL_IMP];
269
270 /*
271 * splclock() must block anything that uses the scheduler.
272 */
273 imask[IPL_CLOCK] |= imask[IPL_AUDIO];
274
275 /*
276 * No separate statclock on the IQ80310.
277 */
278 imask[IPL_STATCLOCK] |= imask[IPL_CLOCK];
279
280 /*
281 * splhigh() must block "everything".
282 */
283 imask[IPL_HIGH] |= imask[IPL_STATCLOCK];
284
285 /*
286 * XXX We need serial drivers to run at the absolute highest priority
287 * in order to avoid overruns, so serial > high.
288 */
289 imask[IPL_SERIAL] |= imask[IPL_HIGH];
290
291 /*
292 * Now compute which IRQs must be blocked when servicing any
293 * given IRQ.
294 */
295 for (irq = 0; irq < NIRQ; irq++) {
296 int irqs = (1U << irq);
297 iq = &intrq[irq];
298 if (TAILQ_FIRST(&iq->iq_list) != NULL)
299 i80321_enable_irq(irq);
300 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
301 ih = TAILQ_NEXT(ih, ih_list))
302 irqs |= imask[ih->ih_ipl];
303 iq->iq_mask = irqs;
304 }
305 }
306
307 static void
308 i80321_do_pending(void)
309 {
310 static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
311 int new, oldirqstate;
312
313 if (__cpu_simple_lock_try(&processing) == 0)
314 return;
315
316 new = current_spl_level;
317
318 oldirqstate = disable_interrupts(I32_bit);
319
320 #define DO_SOFTINT(si) \
321 if ((ipending & ~new) & SI_TO_IRQBIT(si)) { \
322 ipending &= ~SI_TO_IRQBIT(si); \
323 current_spl_level |= imask[si_to_ipl[(si)]]; \
324 restore_interrupts(oldirqstate); \
325 softintr_dispatch(si); \
326 oldirqstate = disable_interrupts(I32_bit); \
327 current_spl_level = new; \
328 }
329
330 DO_SOFTINT(SI_SOFTSERIAL);
331 DO_SOFTINT(SI_SOFTNET);
332 DO_SOFTINT(SI_SOFTCLOCK);
333 DO_SOFTINT(SI_SOFT);
334
335 __cpu_simple_unlock(&processing);
336
337 restore_interrupts(oldirqstate);
338 }
339
340 int
341 _splraise(int ipl)
342 {
343 int old;
344
345 old = current_spl_level;
346 current_spl_level |= imask[ipl];
347
348 return (old);
349 }
350
351 __inline void
352 splx(int new)
353 {
354 int oldirqstate, hwpend;
355
356 current_spl_level = new;
357
358 /*
359 * If there are pending HW interrupts which are being
360 * unmasked, then enable them in the INTCTL register.
361 * This will cause them to come flooding in.
362 */
363 hwpend = (ipending & ICU_INT_HWMASK) & ~new;
364 if (hwpend != 0) {
365 oldirqstate = disable_interrupts(I32_bit);
366 intr_enabled |= hwpend;
367 i80321_set_intrmask();
368 restore_interrupts(oldirqstate);
369 }
370
371 /* If there are software interrupts to process, do it. */
372 if ((ipending & INT_SWMASK) & ~new)
373 i80321_do_pending();
374 }
375
376 int
377 _spllower(int ipl)
378 {
379 int old = current_spl_level;
380
381 splx(imask[ipl]);
382 return (old);
383 }
384
385 void
386 _setsoftintr(int si)
387 {
388 int oldirqstate;
389
390 oldirqstate = disable_interrupts(I32_bit);
391 ipending |= SI_TO_IRQBIT(si);
392 restore_interrupts(oldirqstate);
393
394 /* Process unmasked pending soft interrupts. */
395 if ((ipending & INT_SWMASK) & ~current_spl_level)
396 i80321_do_pending();
397 }
398
399 /*
400 * i80321_icu_init:
401 *
402 * Initialize the i80321 ICU. Called early in bootstrap
403 * to make sure the ICU is in a pristine state.
404 */
405 void
406 i80321_icu_init(void)
407 {
408
409 intr_enabled = 0; /* All interrupts disabled */
410 i80321_set_intrmask();
411
412 intr_steer = 0; /* All interrupts steered to IRQ */
413 i80321_set_intrsteer();
414 }
415
416 /*
417 * i80321_intr_init:
418 *
419 * Initialize the rest of the interrupt subsystem, making it
420 * ready to handle interrupts from devices.
421 */
422 void
423 i80321_intr_init(void)
424 {
425 struct intrq *iq;
426 int i;
427
428 intr_enabled = 0;
429
430 for (i = 0; i < NIRQ; i++) {
431 iq = &intrq[i];
432 TAILQ_INIT(&iq->iq_list);
433
434 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
435 NULL, "iop321", i80321_irqnames[i]);
436 }
437
438 i80321_intr_calculate_masks();
439
440 /* Enable IRQs (don't yet use FIQs). */
441 enable_interrupts(I32_bit);
442 }
443
444 void *
445 i80321_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
446 {
447 struct intrq *iq;
448 struct intrhand *ih;
449 u_int oldirqstate;
450
451 if (irq < 0 || irq > NIRQ)
452 panic("i80321_intr_establish: IRQ %d out of range", irq);
453
454 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
455 if (ih == NULL)
456 return (NULL);
457
458 ih->ih_func = func;
459 ih->ih_arg = arg;
460 ih->ih_ipl = ipl;
461 ih->ih_irq = irq;
462
463 iq = &intrq[irq];
464
465 /* All IOP321 interrupts are level-triggered. */
466 iq->iq_ist = IST_LEVEL;
467
468 oldirqstate = disable_interrupts(I32_bit);
469
470 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
471
472 i80321_intr_calculate_masks();
473
474 restore_interrupts(oldirqstate);
475
476 return (ih);
477 }
478
479 void
480 i80321_intr_disestablish(void *cookie)
481 {
482 struct intrhand *ih = cookie;
483 struct intrq *iq = &intrq[ih->ih_irq];
484 int oldirqstate;
485
486 oldirqstate = disable_interrupts(I32_bit);
487
488 TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
489
490 i80321_intr_calculate_masks();
491
492 restore_interrupts(oldirqstate);
493 }
494
495 void
496 i80321_intr_dispatch(struct clockframe *frame)
497 {
498 struct intrq *iq;
499 struct intrhand *ih;
500 int oldirqstate, pcpl, irq, ibit, hwpend;
501
502 pcpl = current_spl_level;
503
504 hwpend = i80321_iintsrc_read();
505
506 /*
507 * Disable all the interrupts that are pending. We will
508 * reenable them once they are processed and not masked.
509 */
510 intr_enabled &= ~hwpend;
511 i80321_set_intrmask();
512
513 while (hwpend != 0) {
514 irq = ffs(hwpend) - 1;
515 ibit = (1U << irq);
516
517 hwpend &= ~ibit;
518
519 if (pcpl & ibit) {
520 /*
521 * IRQ is masked; mark it as pending and check
522 * the next one. Note: the IRQ is already disabled.
523 */
524 ipending |= ibit;
525 continue;
526 }
527
528 ipending &= ~ibit;
529
530 iq = &intrq[irq];
531 iq->iq_ev.ev_count++;
532 uvmexp.intrs++;
533 current_spl_level |= iq->iq_mask;
534 oldirqstate = enable_interrupts(I32_bit);
535 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
536 ih = TAILQ_NEXT(ih, ih_list)) {
537 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
538 }
539 restore_interrupts(oldirqstate);
540
541 current_spl_level = pcpl;
542
543 /* Re-enable this interrupt now that's it's cleared. */
544 intr_enabled |= ibit;
545 i80321_set_intrmask();
546 }
547
548 /* Check for pendings soft intrs. */
549 if ((ipending & INT_SWMASK) & ~current_spl_level) {
550 oldirqstate = enable_interrupts(I32_bit);
551 i80321_do_pending();
552 restore_interrupts(oldirqstate);
553 }
554 }
555