becc_icu.c revision 1.7.36.1 1 /* $NetBSD: becc_icu.c,v 1.7.36.1 2008/02/18 21:04:24 mjf Exp $ */
2
3 /*
4 * Copyright (c) 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Interrupt support for the ADI Engineering Big Endian Companion Chip.
40 */
41
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: becc_icu.c,v 1.7.36.1 2008/02/18 21:04:24 mjf Exp $");
44
45 #ifndef EVBARM_SPL_NOINLINE
46 #define EVBARM_SPL_NOINLINE
47 #endif
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/malloc.h>
52
53 #include <uvm/uvm_extern.h>
54
55 #include <machine/bus.h>
56 #include <machine/intr.h>
57
58 #include <arm/cpufunc.h>
59
60 #include <arm/xscale/beccreg.h>
61 #include <arm/xscale/beccvar.h>
62
63 #include <arm/xscale/i80200reg.h>
64 #include <arm/xscale/i80200var.h>
65
66 /* Interrupt handler queues. */
67 struct intrq intrq[NIRQ];
68
69 /* Interrupts to mask at each level. */
70 uint32_t becc_imask[NIPL];
71
72 /* Current interrupt priority level. */
73 volatile uint32_t current_spl_level;
74
75 /* Interrupts pending. */
76 volatile uint32_t becc_ipending;
77 volatile uint32_t becc_sipending;
78
79 /* Software copy of the IRQs we have enabled. */
80 volatile uint32_t intr_enabled;
81
82 /* Mask if interrupts steered to FIQs. */
83 uint32_t intr_steer;
84
85 /*
86 * Interrupt bit names.
87 * XXX Some of these are BRH-centric.
88 */
89 const char *becc_irqnames[] = {
90 "soft",
91 "timer A",
92 "timer B",
93 "irq 3",
94 "irq 4",
95 "irq 5",
96 "irq 6",
97 "diagerr",
98 "DMA EOT",
99 "DMA PERR",
100 "DMA TABT",
101 "DMA MABT",
102 "irq 12",
103 "irq 13",
104 "irq 14",
105 "irq 15",
106 "PCI PERR",
107 "irq 17",
108 "irq 18",
109 "PCI SERR",
110 "PCI OAPE",
111 "PCI OATA",
112 "PCI OAMA",
113 "irq 23",
114 "irq 24",
115 "irq 25",
116 "irq 26", /* PCI INTA */
117 "irq 27", /* PCI INTB */
118 "irq 28", /* PCI INTC */
119 "irq 29", /* PCI INTD */
120 "pushbutton",
121 "irq 31",
122 };
123
124 void becc_intr_dispatch(struct irqframe *frame);
125
126 static inline uint32_t
127 becc_icsr_read(void)
128 {
129 uint32_t icsr;
130
131 icsr = BECC_CSR_READ(BECC_ICSR);
132
133 /*
134 * The ICSR register shows bits that are active even if they are
135 * masked in ICMR, so we have to mask them off with the interrupts
136 * we consider enabled.
137 */
138 return (icsr & intr_enabled);
139 }
140
141 static inline void
142 becc_set_intrsteer(void)
143 {
144
145 BECC_CSR_WRITE(BECC_ICSTR, intr_steer & ICU_VALID_MASK);
146 (void) BECC_CSR_READ(BECC_ICSTR);
147 }
148
149 static inline void
150 becc_enable_irq(int irq)
151 {
152
153 intr_enabled |= (1U << irq);
154 becc_set_intrmask();
155 }
156
157 static inline void
158 becc_disable_irq(int irq)
159 {
160
161 intr_enabled &= ~(1U << irq);
162 becc_set_intrmask();
163 }
164
165 /*
166 * NOTE: This routine must be called with interrupts disabled in the CPSR.
167 */
168 static void
169 becc_intr_calculate_masks(void)
170 {
171 struct intrq *iq;
172 struct intrhand *ih;
173 int irq, ipl;
174
175 /* First, figure out which IPLs each IRQ has. */
176 for (irq = 0; irq < NIRQ; irq++) {
177 int levels = 0;
178 iq = &intrq[irq];
179 becc_disable_irq(irq);
180 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
181 ih = TAILQ_NEXT(ih, ih_list))
182 levels |= (1U << ih->ih_ipl);
183 iq->iq_levels = levels;
184 }
185
186 /* Next, figure out which IRQs are used by each IPL. */
187 for (ipl = 0; ipl < NIPL; ipl++) {
188 int irqs = 0;
189 for (irq = 0; irq < NIRQ; irq++) {
190 if (intrq[irq].iq_levels & (1U << ipl))
191 irqs |= (1U << irq);
192 }
193 becc_imask[ipl] = irqs;
194 }
195
196 becc_imask[IPL_NONE] = 0;
197
198 /*
199 * Initialize the soft interrupt masks to block themselves.
200 * Note they all come in at the same physical IRQ.
201 */
202 becc_imask[IPL_SOFT] = (1U << ICU_SOFT);
203 becc_imask[IPL_SOFTCLOCK] = (1U << ICU_SOFT);
204 becc_imask[IPL_SOFTNET] = (1U << ICU_SOFT);
205 becc_imask[IPL_SOFTSERIAL] = (1U << ICU_SOFT);
206
207 /*
208 * splsoftclock() is the only interface that users of the
209 * generic software interrupt facility have to block their
210 * soft intrs, so splsoftclock() must also block IPL_SOFT.
211 */
212 becc_imask[IPL_SOFTCLOCK] |= becc_imask[IPL_SOFT];
213
214 /*
215 * splsoftnet() must also block splsoftclock(), since we don't
216 * want timer-driven network events to occur while we're
217 * processing incoming packets.
218 */
219 becc_imask[IPL_SOFTNET] |= becc_imask[IPL_SOFTCLOCK];
220
221 /*
222 * Enforce a hierarchy that gives "slow" device (or devices with
223 * limited input buffer space/"real-time" requirements) a better
224 * chance at not dropping data.
225 */
226 becc_imask[IPL_BIO] |= becc_imask[IPL_SOFTNET];
227 becc_imask[IPL_NET] |= becc_imask[IPL_BIO];
228 becc_imask[IPL_SOFTSERIAL] |= becc_imask[IPL_NET];
229 becc_imask[IPL_TTY] |= becc_imask[IPL_SOFTSERIAL];
230
231 /*
232 * splvm() blocks all interrupts that use the kernel memory
233 * allocation facilities.
234 */
235 becc_imask[IPL_VM] |= becc_imask[IPL_TTY];
236
237 /*
238 * Audio devices are not allowed to perform memory allocation
239 * in their interrupt routines, and they have fairly "real-time"
240 * requirements, so give them a high interrupt priority.
241 */
242 becc_imask[IPL_AUDIO] |= becc_imask[IPL_VM];
243
244 /*
245 * splclock() must block anything that uses the scheduler.
246 */
247 becc_imask[IPL_CLOCK] |= becc_imask[IPL_AUDIO];
248
249 /*
250 * No separate statclock on the IQ80310.
251 */
252 becc_imask[IPL_STATCLOCK] |= becc_imask[IPL_CLOCK];
253
254 /*
255 * splhigh() must block "everything".
256 */
257 becc_imask[IPL_HIGH] |= becc_imask[IPL_STATCLOCK];
258
259 /*
260 * XXX We need serial drivers to run at the absolute highest priority
261 * in order to avoid overruns, so serial > high.
262 */
263 becc_imask[IPL_SERIAL] |= becc_imask[IPL_HIGH];
264
265 /*
266 * Now compute which IRQs must be blocked when servicing any
267 * given IRQ.
268 */
269 for (irq = 0; irq < NIRQ; irq++) {
270 int irqs = (1U << irq);
271 iq = &intrq[irq];
272 if (TAILQ_FIRST(&iq->iq_list) != NULL)
273 becc_enable_irq(irq);
274 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
275 ih = TAILQ_NEXT(ih, ih_list))
276 irqs |= becc_imask[ih->ih_ipl];
277 iq->iq_mask = irqs;
278 }
279 }
280
281 void
282 splx(int new)
283 {
284
285 becc_splx(new);
286 }
287
288 int
289 _spllower(int ipl)
290 {
291
292 return (becc_spllower(ipl));
293 }
294
295 int
296 _splraise(int ipl)
297 {
298
299 return (becc_splraise(ipl));
300 }
301
302 #ifdef __HAVE_FAST_SOFTINTS
303 void
304 _setsoftintr(int si)
305 {
306
307 becc_setsoftintr(si);
308 }
309
310 static const int si_to_ipl[] = {
311 [SI_SOFTBIO] = IPL_SOFTBIO,
312 [SI_SOFTCLOCK] = IPL_SOFTCLOCK,
313 [SI_SOFTNET] = IPL_SOFTNET,
314 [SI_SOFTSERIAL] = IPL_SOFTSERIAL,
315 };
316
317 int
318 becc_softint(void *arg)
319 {
320 static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
321 uint32_t new, oldirqstate;
322
323 /* Clear interrupt */
324 BECC_CSR_WRITE(BECC_ICSR, 0);
325
326 if (__cpu_simple_lock_try(&processing) == 0)
327 return 0;
328
329 oldirqstate = disable_interrupts(I32_bit);
330
331 new = current_spl_level;
332
333 #define DO_SOFTINT(si) \
334 if (becc_sipending & (1 << (si))) { \
335 becc_sipending &= ~(1 << (si)); \
336 current_spl_level |= becc_imask[si_to_ipl[(si)]]; \
337 restore_interrupts(oldirqstate); \
338 softintr_dispatch(si); \
339 oldirqstate = disable_interrupts(I32_bit); \
340 current_spl_level = new; \
341 }
342
343 DO_SOFTINT(SI_SOFTSERIAL);
344 DO_SOFTINT(SI_SOFTNET);
345 DO_SOFTINT(SI_SOFTCLOCK);
346 DO_SOFTINT(SI_SOFT);
347
348 __cpu_simple_unlock(&processing);
349
350 restore_interrupts(oldirqstate);
351
352 return 1;
353 }
354 #endif
355
356 /*
357 * becc_icu_init:
358 *
359 * Initialize the BECC ICU. Called early in bootstrap
360 * to make sure the ICU is in a pristine state.
361 */
362 void
363 becc_icu_init(void)
364 {
365
366 intr_enabled = 0; /* All interrupts disabled */
367 becc_set_intrmask();
368
369 intr_steer = 0; /* All interrupts steered to IRQ */
370 becc_set_intrsteer();
371
372 i80200_extirq_dispatch = becc_intr_dispatch;
373
374 i80200_intr_enable(INTCTL_IM);
375 }
376
377 /*
378 * becc_intr_init:
379 *
380 * Initialize the rest of the interrupt subsystem, making it
381 * ready to handle interrupts from devices.
382 */
383 void
384 becc_intr_init(void)
385 {
386 struct intrq *iq;
387 int i;
388
389 intr_enabled = 0;
390
391 for (i = 0; i < NIRQ; i++) {
392 iq = &intrq[i];
393 TAILQ_INIT(&iq->iq_list);
394
395 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
396 NULL, "becc", becc_irqnames[i]);
397 }
398
399 becc_intr_calculate_masks();
400
401 /* Enable IRQs (don't yet use FIQs). */
402 enable_interrupts(I32_bit);
403 }
404
405 void *
406 becc_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
407 {
408 struct intrq *iq;
409 struct intrhand *ih;
410 uint32_t oldirqstate;
411
412 if (irq < 0 || irq > NIRQ)
413 panic("becc_intr_establish: IRQ %d out of range", irq);
414
415 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
416 if (ih == NULL)
417 return (NULL);
418
419 ih->ih_func = func;
420 ih->ih_arg = arg;
421 ih->ih_ipl = ipl;
422 ih->ih_irq = irq;
423
424 iq = &intrq[irq];
425
426 /* All BECC interrupts are level-triggered. */
427 iq->iq_ist = IST_LEVEL;
428
429 oldirqstate = disable_interrupts(I32_bit);
430
431 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
432
433 becc_intr_calculate_masks();
434
435 restore_interrupts(oldirqstate);
436
437 return (ih);
438 }
439
440 void
441 becc_intr_disestablish(void *cookie)
442 {
443 struct intrhand *ih = cookie;
444 struct intrq *iq = &intrq[ih->ih_irq];
445 uint32_t oldirqstate;
446
447 oldirqstate = disable_interrupts(I32_bit);
448
449 TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
450
451 becc_intr_calculate_masks();
452
453 restore_interrupts(oldirqstate);
454 }
455
456 void
457 becc_intr_dispatch(struct irqframe *frame)
458 {
459 struct intrq *iq;
460 struct intrhand *ih;
461 uint32_t oldirqstate, pcpl, irq, ibit, hwpend;
462
463 pcpl = current_spl_level;
464
465 hwpend = becc_icsr_read();
466
467 /*
468 * Disable all the interrupts that are pending. We will
469 * reenable them once they are processed and not masked.
470 */
471 intr_enabled &= ~hwpend;
472 becc_set_intrmask();
473
474 while (hwpend != 0) {
475 irq = ffs(hwpend) - 1;
476 ibit = (1U << irq);
477
478 hwpend &= ~ibit;
479
480 if (pcpl & ibit) {
481 /*
482 * IRQ is masked; mark it as pending and check
483 * the next one. Note: the IRQ is already disabled.
484 */
485 becc_ipending |= ibit;
486 continue;
487 }
488
489 becc_ipending &= ~ibit;
490
491 iq = &intrq[irq];
492 iq->iq_ev.ev_count++;
493 uvmexp.intrs++;
494 current_spl_level |= iq->iq_mask;
495 oldirqstate = enable_interrupts(I32_bit);
496 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
497 ih = TAILQ_NEXT(ih, ih_list)) {
498 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
499 }
500 restore_interrupts(oldirqstate);
501
502 current_spl_level = pcpl;
503
504 /* Re-enable this interrupt now that's it's cleared. */
505 intr_enabled |= ibit;
506 becc_set_intrmask();
507 }
508
509 if (becc_ipending & ~pcpl) {
510 intr_enabled |= (becc_ipending & ~pcpl);
511 becc_set_intrmask();
512 }
513 }
514