footbridge_irqhandler.c revision 1.6 1 /* $NetBSD: footbridge_irqhandler.c,v 1.6 2002/11/03 21:43:31 chris Exp $ */
2
3 /*
4 * Copyright (c) 1994-1998 Mark Brinicombe.
5 * Copyright (c) 1997 Causality Limited
6 * Copyright (c) 1994 Brini.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by Mark Brinicombe
20 * for the NetBSD Project.
21 * 4. The name of the company nor the name of the author may be used to
22 * endorse or promote products derived from this software without specific
23 * prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * from: iomd_irqhandler.c,v 1.16 $
38 */
39
40 #ifndef ARM_SPL_NOINLINE
41 #define ARM_SPL_NOINLINE
42 #endif
43
44 #include <sys/cdefs.h>
45 #ifndef __lint
46 __RCSID("$NetBSD: footbridge_irqhandler.c,v 1.6 2002/11/03 21:43:31 chris Exp $");
47 #endif /* !__lint */
48
49 #include "opt_irqstats.h"
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/malloc.h>
54 #include <uvm/uvm_extern.h>
55
56 #include <machine/intr.h>
57 #include <machine/cpu.h>
58 #include <arm/footbridge/dc21285mem.h>
59 #include <arm/footbridge/dc21285reg.h>
60
61 #include <dev/pci/pcivar.h>
62
63 #include "isa.h"
64 #if NISA > 0
65 #include <dev/isa/isavar.h>
66 #endif
67
68 /* Interrupt handler queues. */
69 static struct intrq footbridge_intrq[NIRQ];
70
71 /* Interrupts to mask at each level. */
72 int footbridge_imask[NIPL];
73
74 /* Software copy of the IRQs we have enabled. */
75 __volatile uint32_t intr_enabled;
76
77 /* Current interrupt priority level */
78 __volatile int current_spl_level;
79
80 /* Interrupts pending */
81 __volatile int footbridge_ipending;
82
83 void footbridge_intr_dispatch(struct clockframe *frame);
84
85 const struct evcnt *footbridge_pci_intr_evcnt __P((void *, pci_intr_handle_t));
86
87 void footbridge_do_pending(void);
88
89 static const uint32_t si_to_irqbit[SI_NQUEUES] =
90 { IRQ_SOFTINT,
91 IRQ_RESERVED0,
92 IRQ_RESERVED1,
93 IRQ_RESERVED2 };
94
95 #define SI_TO_IRQBIT(si) (1U << si_to_irqbit[(si)])
96
97 /*
98 * Map a software interrupt queue to an interrupt priority level.
99 */
100 static const int si_to_ipl[SI_NQUEUES] = {
101 IPL_SOFT, /* SI_SOFT */
102 IPL_SOFTCLOCK, /* SI_SOFTCLOCK */
103 IPL_SOFTNET, /* SI_SOFTNET */
104 IPL_SOFTSERIAL, /* SI_SOFTSERIAL */
105 };
106
107 const struct evcnt *
108 footbridge_pci_intr_evcnt(pcv, ih)
109 void *pcv;
110 pci_intr_handle_t ih;
111 {
112 /* XXX check range is valid */
113 #if NISA > 0
114 if (ih >= 0x80 && ih <= 0x8f) {
115 return isa_intr_evcnt(NULL, (ih & 0x0f));
116 }
117 #endif
118 return &footbridge_intrq[ih].iq_ev;
119 }
120
121 static __inline void
122 footbridge_enable_irq(int irq)
123 {
124 intr_enabled |= (1U << irq);
125
126 footbridge_set_intrmask();
127 }
128
129 static __inline void
130 footbridge_disable_irq(int irq)
131 {
132 intr_enabled &= ~(1U << irq);
133 footbridge_set_intrmask();
134 }
135
136 /*
137 * NOTE: This routine must be called with interrupts disabled in the CPSR.
138 */
139 static void
140 footbridge_intr_calculate_masks(void)
141 {
142 struct intrq *iq;
143 struct intrhand *ih;
144 int irq, ipl;
145
146 /* First, figure out which IPLs each IRQ has. */
147 for (irq = 0; irq < NIRQ; irq++) {
148 int levels = 0;
149 iq = &footbridge_intrq[irq];
150 footbridge_disable_irq(irq);
151 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
152 ih = TAILQ_NEXT(ih, ih_list))
153 levels |= (1U << ih->ih_ipl);
154 iq->iq_levels = levels;
155 }
156
157 /* Next, figure out which IRQs are used by each IPL. */
158 for (ipl = 0; ipl < NIPL; ipl++) {
159 int irqs = 0;
160 for (irq = 0; irq < NIRQ; irq++) {
161 if (footbridge_intrq[irq].iq_levels & (1U << ipl))
162 irqs |= (1U << irq);
163 }
164 footbridge_imask[ipl] = irqs;
165 }
166
167 /* IPL_NONE must open up all interrupts */
168 footbridge_imask[IPL_NONE] = 0;
169
170 /*
171 * Initialize the soft interrupt masks to block themselves.
172 */
173 footbridge_imask[IPL_SOFT] = SI_TO_IRQBIT(SI_SOFT);
174 footbridge_imask[IPL_SOFTCLOCK] = SI_TO_IRQBIT(SI_SOFTCLOCK);
175 footbridge_imask[IPL_SOFTNET] = SI_TO_IRQBIT(SI_SOFTNET);
176 footbridge_imask[IPL_SOFTSERIAL] = SI_TO_IRQBIT(SI_SOFTSERIAL);
177
178 footbridge_imask[IPL_SOFTCLOCK] |= footbridge_imask[IPL_SOFT];
179 footbridge_imask[IPL_SOFTNET] |= footbridge_imask[IPL_SOFTCLOCK];
180
181 /*
182 * Enforce a heirarchy that gives "slow" device (or devices with
183 * limited input buffer space/"real-time" requirements) a better
184 * chance at not dropping data.
185 */
186 footbridge_imask[IPL_BIO] |= footbridge_imask[IPL_SOFTNET];
187 footbridge_imask[IPL_NET] |= footbridge_imask[IPL_BIO];
188 footbridge_imask[IPL_SOFTSERIAL] |= footbridge_imask[IPL_NET];
189
190 footbridge_imask[IPL_TTY] |= footbridge_imask[IPL_SOFTSERIAL];
191
192 /*
193 * splvm() blocks all interrupts that use the kernel memory
194 * allocation facilities.
195 */
196 footbridge_imask[IPL_IMP] |= footbridge_imask[IPL_TTY];
197
198 /*
199 * Audio devices are not allowed to perform memory allocation
200 * in their interrupt routines, and they have fairly "real-time"
201 * requirements, so give them a high interrupt priority.
202 */
203 footbridge_imask[IPL_AUDIO] |= footbridge_imask[IPL_IMP];
204
205 /*
206 * splclock() must block anything that uses the scheduler.
207 */
208 footbridge_imask[IPL_CLOCK] |= footbridge_imask[IPL_AUDIO];
209
210 /*
211 * footbridge has seperate statclock.
212 */
213 footbridge_imask[IPL_STATCLOCK] |= footbridge_imask[IPL_CLOCK];
214
215 /*
216 * splhigh() must block "everything".
217 */
218 footbridge_imask[IPL_HIGH] |= footbridge_imask[IPL_STATCLOCK];
219
220 /*
221 * XXX We need serial drivers to run at the absolute highest priority
222 * in order to avoid overruns, so serial > high.
223 */
224 footbridge_imask[IPL_SERIAL] |= footbridge_imask[IPL_HIGH];
225
226 /*
227 * Calculate the ipl level to go to when handling this interrupt
228 */
229 for (irq = 0; irq < NIRQ; irq++) {
230 int irqs = (1U << irq);
231 iq = &footbridge_intrq[irq];
232 if (TAILQ_FIRST(&iq->iq_list) != NULL)
233 footbridge_enable_irq(irq);
234 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
235 ih = TAILQ_NEXT(ih, ih_list))
236 irqs |= footbridge_imask[ih->ih_ipl];
237 iq->iq_mask = irqs;
238 }
239 }
240
241 int
242 _splraise(int ipl)
243 {
244 return (footbridge_splraise(ipl));
245 }
246
247 /* this will always take us to the ipl passed in */
248 void
249 splx(int new)
250 {
251 footbridge_splx(new);
252 }
253
254 int
255 _spllower(int ipl)
256 {
257 return (footbridge_spllower(ipl));
258 }
259
260 __inline void
261 footbridge_do_pending(void)
262 {
263 static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
264 uint32_t new, oldirqstate;
265
266 if (__cpu_simple_lock_try(&processing) == 0)
267 return;
268
269 new = current_spl_level;
270
271 oldirqstate = disable_interrupts(I32_bit);
272
273 #define DO_SOFTINT(si) \
274 if ((footbridge_ipending & ~new) & SI_TO_IRQBIT(si)) { \
275 footbridge_ipending &= ~SI_TO_IRQBIT(si); \
276 current_spl_level |= footbridge_imask[si_to_ipl[(si)]]; \
277 restore_interrupts(oldirqstate); \
278 softintr_dispatch(si); \
279 oldirqstate = disable_interrupts(I32_bit); \
280 current_spl_level = new; \
281 }
282 DO_SOFTINT(SI_SOFTSERIAL);
283 DO_SOFTINT(SI_SOFTNET);
284 DO_SOFTINT(SI_SOFTCLOCK);
285 DO_SOFTINT(SI_SOFT);
286
287 __cpu_simple_unlock(&processing);
288
289 restore_interrupts(oldirqstate);
290 }
291
292
293 /* called from splhigh, so the matching splx will set the interrupt up.*/
294 void
295 _setsoftintr(int si)
296 {
297 int oldirqstate;
298
299 oldirqstate = disable_interrupts(I32_bit);
300 footbridge_ipending |= SI_TO_IRQBIT(si);
301 restore_interrupts(oldirqstate);
302
303 /* Process unmasked pending soft interrupts. */
304 if ((footbridge_ipending & INT_SWMASK) & ~current_spl_level)
305 footbridge_do_pending();
306 }
307
308 void
309 footbridge_intr_init(void)
310 {
311 struct intrq *iq;
312 int i;
313
314 intr_enabled = 0;
315 current_spl_level = 0xffffffff;
316 footbridge_ipending = 0;
317 footbridge_set_intrmask();
318
319 for (i = 0; i < NIRQ; i++) {
320 iq = &footbridge_intrq[i];
321 TAILQ_INIT(&iq->iq_list);
322
323 sprintf(iq->iq_name, "irq %d", i);
324 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
325 NULL, "footbridge", iq->iq_name);
326 }
327
328 footbridge_intr_calculate_masks();
329
330 /* Enable IRQ's, we don't have any FIQ's*/
331 enable_interrupts(I32_bit);
332 }
333
334 void *
335 footbridge_intr_claim(int irq, int ipl, char *name, int (*func)(void *), void *arg)
336 {
337 struct intrq *iq;
338 struct intrhand *ih;
339 u_int oldirqstate;
340
341 if (irq < 0 || irq > NIRQ)
342 panic("footbridge_intr_establish: IRQ %d out of range", irq);
343
344 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
345 if (ih == NULL)
346 {
347 printf("No memory");
348 return (NULL);
349 }
350
351 ih->ih_func = func;
352 ih->ih_arg = arg;
353 ih->ih_ipl = ipl;
354 ih->ih_irq = irq;
355
356 iq = &footbridge_intrq[irq];
357
358 iq->iq_ist = IST_LEVEL;
359
360 oldirqstate = disable_interrupts(I32_bit);
361
362 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
363
364 footbridge_intr_calculate_masks();
365
366 /* detach the existing event counter and add the new name */
367 evcnt_detach(&iq->iq_ev);
368 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
369 NULL, "footbridge", name);
370
371 restore_interrupts(oldirqstate);
372
373 return(ih);
374 }
375
376 void
377 footbridge_intr_disestablish(void *cookie)
378 {
379 struct intrhand *ih = cookie;
380 struct intrq *iq = &footbridge_intrq[ih->ih_irq];
381 int oldirqstate;
382
383 /* XXX need to free ih ? */
384 oldirqstate = disable_interrupts(I32_bit);
385
386 TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
387
388 footbridge_intr_calculate_masks();
389
390 restore_interrupts(oldirqstate);
391 }
392
393 static uint32_t footbridge_intstatus(void);
394
395 static inline uint32_t footbridge_intstatus()
396 {
397 return ((__volatile uint32_t*)(DC21285_ARMCSR_VBASE))[IRQ_STATUS>>2];
398 }
399
400 /* called with external interrupts disabled */
401 void
402 footbridge_intr_dispatch(struct clockframe *frame)
403 {
404 struct intrq *iq;
405 struct intrhand *ih;
406 int oldirqstate, pcpl, irq, ibit, hwpend;
407
408 pcpl = current_spl_level;
409
410 hwpend = footbridge_intstatus();
411
412 /*
413 * Disable all the interrupts that are pending. We will
414 * reenable them once they are processed and not masked.
415 */
416 intr_enabled &= ~hwpend;
417 footbridge_set_intrmask();
418
419 while (hwpend != 0) {
420 int intr_rc = 0;
421 irq = ffs(hwpend) - 1;
422 ibit = (1U << irq);
423
424 hwpend &= ~ibit;
425
426 if (pcpl & ibit) {
427 /*
428 * IRQ is masked; mark it as pending and check
429 * the next one. Note: the IRQ is already disabled.
430 */
431 footbridge_ipending |= ibit;
432 continue;
433 }
434
435 footbridge_ipending &= ~ibit;
436
437 iq = &footbridge_intrq[irq];
438 iq->iq_ev.ev_count++;
439 uvmexp.intrs++;
440 current_spl_level |= iq->iq_mask;
441 oldirqstate = enable_interrupts(I32_bit);
442 for (ih = TAILQ_FIRST(&iq->iq_list);
443 ((ih != NULL) && (intr_rc != 1));
444 ih = TAILQ_NEXT(ih, ih_list)) {
445 intr_rc = (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
446 }
447 restore_interrupts(oldirqstate);
448
449 current_spl_level = pcpl;
450
451 /* Re-enable this interrupt now that's it's cleared. */
452 intr_enabled |= ibit;
453 footbridge_set_intrmask();
454 }
455
456 /*
457 * restore interrupts to their state on entry, this will
458 * trigger pending interrupts, and soft and hard
459 */
460 splx(pcpl);
461 }
462