becc_icu.c revision 1.3 1 /* $NetBSD: becc_icu.c,v 1.3 2003/07/15 00:24:52 lukem Exp $ */
2
3 /*
4 * Copyright (c) 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Interrupt support for the ADI Engineering Big Endian Companion Chip.
40 */
41
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: becc_icu.c,v 1.3 2003/07/15 00:24:52 lukem Exp $");
44
45 #ifndef EVBARM_SPL_NOINLINE
46 #define EVBARM_SPL_NOINLINE
47 #endif
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/malloc.h>
52
53 #include <uvm/uvm_extern.h>
54
55 #include <machine/bus.h>
56 #include <machine/intr.h>
57
58 #include <arm/cpufunc.h>
59
60 #include <arm/xscale/beccreg.h>
61 #include <arm/xscale/beccvar.h>
62
63 #include <arm/xscale/i80200reg.h>
64 #include <arm/xscale/i80200var.h>
65
66 /* Interrupt handler queues. */
67 struct intrq intrq[NIRQ];
68
69 /* Interrupts to mask at each level. */
70 uint32_t becc_imask[NIPL];
71
72 /* Current interrupt priority level. */
73 __volatile uint32_t current_spl_level;
74
75 /* Interrupts pending. */
76 __volatile uint32_t becc_ipending;
77 __volatile uint32_t becc_sipending;
78
79 /* Software copy of the IRQs we have enabled. */
80 __volatile uint32_t intr_enabled;
81
82 /* Mask if interrupts steered to FIQs. */
83 uint32_t intr_steer;
84
85 /*
86 * Interrupt bit names.
87 * XXX Some of these are BRH-centric.
88 */
89 const char *becc_irqnames[] = {
90 "soft",
91 "timer A",
92 "timer B",
93 "irq 3",
94 "irq 4",
95 "irq 5",
96 "irq 6",
97 "diagerr",
98 "DMA EOT",
99 "DMA PERR",
100 "DMA TABT",
101 "DMA MABT",
102 "irq 12",
103 "irq 13",
104 "irq 14",
105 "irq 15",
106 "PCI PERR",
107 "irq 17",
108 "irq 18",
109 "PCI SERR",
110 "PCI OAPE",
111 "PCI OATA",
112 "PCI OAMA",
113 "irq 23",
114 "irq 24",
115 "irq 25",
116 "irq 26", /* PCI INTA */
117 "irq 27", /* PCI INTB */
118 "irq 28", /* PCI INTC */
119 "irq 29", /* PCI INTD */
120 "pushbutton",
121 "irq 31",
122 };
123
124 void becc_intr_dispatch(struct clockframe *frame);
125
126 static __inline uint32_t
127 becc_icsr_read(void)
128 {
129 uint32_t icsr;
130
131 icsr = BECC_CSR_READ(BECC_ICSR);
132
133 /*
134 * The ICSR register shows bits that are active even if they are
135 * masked in ICMR, so we have to mask them off with the interrupts
136 * we consider enabled.
137 */
138 return (icsr & intr_enabled);
139 }
140
141 static __inline void
142 becc_set_intrsteer(void)
143 {
144
145 BECC_CSR_WRITE(BECC_ICSTR, intr_steer & ICU_VALID_MASK);
146 (void) BECC_CSR_READ(BECC_ICSTR);
147 }
148
149 static __inline void
150 becc_enable_irq(int irq)
151 {
152
153 intr_enabled |= (1U << irq);
154 becc_set_intrmask();
155 }
156
157 static __inline void
158 becc_disable_irq(int irq)
159 {
160
161 intr_enabled &= ~(1U << irq);
162 becc_set_intrmask();
163 }
164
165 /*
166 * NOTE: This routine must be called with interrupts disabled in the CPSR.
167 */
168 static void
169 becc_intr_calculate_masks(void)
170 {
171 struct intrq *iq;
172 struct intrhand *ih;
173 int irq, ipl;
174
175 /* First, figure out which IPLs each IRQ has. */
176 for (irq = 0; irq < NIRQ; irq++) {
177 int levels = 0;
178 iq = &intrq[irq];
179 becc_disable_irq(irq);
180 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
181 ih = TAILQ_NEXT(ih, ih_list))
182 levels |= (1U << ih->ih_ipl);
183 iq->iq_levels = levels;
184 }
185
186 /* Next, figure out which IRQs are used by each IPL. */
187 for (ipl = 0; ipl < NIPL; ipl++) {
188 int irqs = 0;
189 for (irq = 0; irq < NIRQ; irq++) {
190 if (intrq[irq].iq_levels & (1U << ipl))
191 irqs |= (1U << irq);
192 }
193 becc_imask[ipl] = irqs;
194 }
195
196 becc_imask[IPL_NONE] = 0;
197
198 /*
199 * Initialize the soft interrupt masks to block themselves.
200 * Note they all come in at the same physical IRQ.
201 */
202 becc_imask[IPL_SOFT] = (1U << ICU_SOFT);
203 becc_imask[IPL_SOFTCLOCK] = (1U << ICU_SOFT);
204 becc_imask[IPL_SOFTNET] = (1U << ICU_SOFT);
205 becc_imask[IPL_SOFTSERIAL] = (1U << ICU_SOFT);
206
207 /*
208 * splsoftclock() is the only interface that users of the
209 * generic software interrupt facility have to block their
210 * soft intrs, so splsoftclock() must also block IPL_SOFT.
211 */
212 becc_imask[IPL_SOFTCLOCK] |= becc_imask[IPL_SOFT];
213
214 /*
215 * splsoftnet() must also block splsoftclock(), since we don't
216 * want timer-driven network events to occur while we're
217 * processing incoming packets.
218 */
219 becc_imask[IPL_SOFTNET] |= becc_imask[IPL_SOFTCLOCK];
220
221 /*
222 * Enforce a heirarchy that gives "slow" device (or devices with
223 * limited input buffer space/"real-time" requirements) a better
224 * chance at not dropping data.
225 */
226 becc_imask[IPL_BIO] |= becc_imask[IPL_SOFTNET];
227 becc_imask[IPL_NET] |= becc_imask[IPL_BIO];
228 becc_imask[IPL_SOFTSERIAL] |= becc_imask[IPL_NET];
229 becc_imask[IPL_TTY] |= becc_imask[IPL_SOFTSERIAL];
230
231 /*
232 * splvm() blocks all interrupts that use the kernel memory
233 * allocation facilities.
234 */
235 becc_imask[IPL_VM] |= becc_imask[IPL_TTY];
236
237 /*
238 * Audio devices are not allowed to perform memory allocation
239 * in their interrupt routines, and they have fairly "real-time"
240 * requirements, so give them a high interrupt priority.
241 */
242 becc_imask[IPL_AUDIO] |= becc_imask[IPL_VM];
243
244 /*
245 * splclock() must block anything that uses the scheduler.
246 */
247 becc_imask[IPL_CLOCK] |= becc_imask[IPL_AUDIO];
248
249 /*
250 * No separate statclock on the IQ80310.
251 */
252 becc_imask[IPL_STATCLOCK] |= becc_imask[IPL_CLOCK];
253
254 /*
255 * splhigh() must block "everything".
256 */
257 becc_imask[IPL_HIGH] |= becc_imask[IPL_STATCLOCK];
258
259 /*
260 * XXX We need serial drivers to run at the absolute highest priority
261 * in order to avoid overruns, so serial > high.
262 */
263 becc_imask[IPL_SERIAL] |= becc_imask[IPL_HIGH];
264
265 /*
266 * Now compute which IRQs must be blocked when servicing any
267 * given IRQ.
268 */
269 for (irq = 0; irq < NIRQ; irq++) {
270 int irqs = (1U << irq);
271 iq = &intrq[irq];
272 if (TAILQ_FIRST(&iq->iq_list) != NULL)
273 becc_enable_irq(irq);
274 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
275 ih = TAILQ_NEXT(ih, ih_list))
276 irqs |= becc_imask[ih->ih_ipl];
277 iq->iq_mask = irqs;
278 }
279 }
280
281 void
282 splx(int new)
283 {
284
285 becc_splx(new);
286 }
287
288 int
289 _spllower(int ipl)
290 {
291
292 return (becc_spllower(ipl));
293 }
294
295 int
296 _splraise(int ipl)
297 {
298
299 return (becc_splraise(ipl));
300 }
301
302 void
303 _setsoftintr(int si)
304 {
305
306 becc_setsoftintr(si);
307 }
308
309 static const int si_to_ipl[SI_NQUEUES] = {
310 IPL_SOFT, /* SI_SOFT */
311 IPL_SOFTCLOCK, /* SI_SOFTCLOCK */
312 IPL_SOFTNET, /* SI_SOFTNET */
313 IPL_SOFTSERIAL, /* SI_SOFTSERIAL */
314 };
315
316 int
317 becc_softint(void *arg)
318 {
319 static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
320 uint32_t new, oldirqstate;
321
322 /* Clear interrupt */
323 BECC_CSR_WRITE(BECC_ICSR, 0);
324
325 if (__cpu_simple_lock_try(&processing) == 0)
326 return 0;
327
328 oldirqstate = disable_interrupts(I32_bit);
329
330 new = current_spl_level;
331
332 #define DO_SOFTINT(si) \
333 if (becc_sipending & (1 << (si))) { \
334 becc_sipending &= ~(1 << (si)); \
335 current_spl_level |= becc_imask[si_to_ipl[(si)]]; \
336 restore_interrupts(oldirqstate); \
337 softintr_dispatch(si); \
338 oldirqstate = disable_interrupts(I32_bit); \
339 current_spl_level = new; \
340 }
341
342 DO_SOFTINT(SI_SOFTSERIAL);
343 DO_SOFTINT(SI_SOFTNET);
344 DO_SOFTINT(SI_SOFTCLOCK);
345 DO_SOFTINT(SI_SOFT);
346
347 __cpu_simple_unlock(&processing);
348
349 restore_interrupts(oldirqstate);
350
351 return 1;
352 }
353
354 /*
355 * becc_icu_init:
356 *
357 * Initialize the BECC ICU. Called early in bootstrap
358 * to make sure the ICU is in a pristine state.
359 */
360 void
361 becc_icu_init(void)
362 {
363
364 intr_enabled = 0; /* All interrupts disabled */
365 becc_set_intrmask();
366
367 intr_steer = 0; /* All interrupts steered to IRQ */
368 becc_set_intrsteer();
369
370 i80200_extirq_dispatch = becc_intr_dispatch;
371
372 i80200_intr_enable(INTCTL_IM);
373 }
374
375 /*
376 * becc_intr_init:
377 *
378 * Initialize the rest of the interrupt subsystem, making it
379 * ready to handle interrupts from devices.
380 */
381 void
382 becc_intr_init(void)
383 {
384 struct intrq *iq;
385 int i;
386
387 intr_enabled = 0;
388
389 for (i = 0; i < NIRQ; i++) {
390 iq = &intrq[i];
391 TAILQ_INIT(&iq->iq_list);
392
393 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
394 NULL, "becc", becc_irqnames[i]);
395 }
396
397 becc_intr_calculate_masks();
398
399 /* Enable IRQs (don't yet use FIQs). */
400 enable_interrupts(I32_bit);
401 }
402
403 void *
404 becc_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
405 {
406 struct intrq *iq;
407 struct intrhand *ih;
408 uint32_t oldirqstate;
409
410 if (irq < 0 || irq > NIRQ)
411 panic("becc_intr_establish: IRQ %d out of range", irq);
412
413 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
414 if (ih == NULL)
415 return (NULL);
416
417 ih->ih_func = func;
418 ih->ih_arg = arg;
419 ih->ih_ipl = ipl;
420 ih->ih_irq = irq;
421
422 iq = &intrq[irq];
423
424 /* All BECC interrupts are level-triggered. */
425 iq->iq_ist = IST_LEVEL;
426
427 oldirqstate = disable_interrupts(I32_bit);
428
429 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
430
431 becc_intr_calculate_masks();
432
433 restore_interrupts(oldirqstate);
434
435 return (ih);
436 }
437
438 void
439 becc_intr_disestablish(void *cookie)
440 {
441 struct intrhand *ih = cookie;
442 struct intrq *iq = &intrq[ih->ih_irq];
443 uint32_t oldirqstate;
444
445 oldirqstate = disable_interrupts(I32_bit);
446
447 TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
448
449 becc_intr_calculate_masks();
450
451 restore_interrupts(oldirqstate);
452 }
453
454 void
455 becc_intr_dispatch(struct clockframe *frame)
456 {
457 struct intrq *iq;
458 struct intrhand *ih;
459 uint32_t oldirqstate, pcpl, irq, ibit, hwpend;
460
461 pcpl = current_spl_level;
462
463 hwpend = becc_icsr_read();
464
465 /*
466 * Disable all the interrupts that are pending. We will
467 * reenable them once they are processed and not masked.
468 */
469 intr_enabled &= ~hwpend;
470 becc_set_intrmask();
471
472 while (hwpend != 0) {
473 irq = ffs(hwpend) - 1;
474 ibit = (1U << irq);
475
476 hwpend &= ~ibit;
477
478 if (pcpl & ibit) {
479 /*
480 * IRQ is masked; mark it as pending and check
481 * the next one. Note: the IRQ is already disabled.
482 */
483 becc_ipending |= ibit;
484 continue;
485 }
486
487 becc_ipending &= ~ibit;
488
489 iq = &intrq[irq];
490 iq->iq_ev.ev_count++;
491 uvmexp.intrs++;
492 current_spl_level |= iq->iq_mask;
493 oldirqstate = enable_interrupts(I32_bit);
494 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
495 ih = TAILQ_NEXT(ih, ih_list)) {
496 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
497 }
498 restore_interrupts(oldirqstate);
499
500 current_spl_level = pcpl;
501
502 /* Re-enable this interrupt now that's it's cleared. */
503 intr_enabled |= ibit;
504 becc_set_intrmask();
505 }
506
507 if (becc_ipending & ~pcpl) {
508 intr_enabled |= (becc_ipending & ~pcpl);
509 becc_set_intrmask();
510 }
511 }
512