iq80310_intr.c revision 1.8 1 /* $NetBSD: iq80310_intr.c,v 1.8 2002/01/30 03:59:42 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Interrupt support for the Intel IQ80310.
40 */
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
45
46 #include <uvm/uvm_extern.h>
47
48 #include <machine/bus.h>
49 #include <machine/intr.h>
50
51 #include <arm/cpufunc.h>
52
53 #include <arm/xscale/i80200reg.h>
54 #include <arm/xscale/i80200var.h>
55
56 #include <evbarm/iq80310/iq80310reg.h>
57 #include <evbarm/iq80310/iq80310var.h>
58 #include <evbarm/iq80310/obiovar.h>
59
60 /*
61 * We have 8 interrupt source bits -- 5 in the XINT3 register, and 3
62 * in the XINT0 register (the upper 3). Note that the XINT0 IRQs
63 * (SPCI INTA, INTB, and INTC) are always enabled, since they can not
64 * be masked out in the CPLD (it provides only status, not masking,
65 * for those interrupts).
66 */
67 #define IRQ_BITS 0xff
68 #define IRQ_BITS_ALWAYS_ON 0xe0
69
70 /* Interrupt handler queues. */
71 struct intrq intrq[NIRQ];
72
73 /* Interrupts to mask at each level. */
74 static int imask[NIPL];
75
76 /* Current interrupt priority level. */
77 __volatile int current_spl_level;
78
79 /* Interrupts pending. */
80 static __volatile int ipending;
81
82 /* Software copy of the IRQs we have enabled. */
83 uint32_t intr_enabled;
84
85 /*
86 * Map a software interrupt queue index (at the top of the word, and
87 * highest priority softintr is encountered first in an ffs()).
88 */
89 #define SI_TO_IRQBIT(si) (1U << (31 - (si)))
90
91 /*
92 * Map a software interrupt queue to an interrupt priority level.
93 */
94 static const int si_to_ipl[SI_NQUEUES] = {
95 IPL_SOFT, /* SI_SOFT */
96 IPL_SOFTCLOCK, /* SI_SOFTCLOCK */
97 IPL_SOFTNET, /* SI_SOFTNET */
98 IPL_SOFTSERIAL, /* SI_SOFTSERIAL */
99 };
100
101 void iq80310_intr_dispatch(struct clockframe *frame);
102
103 static __inline uint32_t
104 iq80310_intstat_read(void)
105 {
106 uint32_t intstat;
107
108 intstat = CPLD_READ(IQ80310_XINT3_STATUS) & 0x1f;
109 if (1/*rev F or later board*/)
110 intstat |= (CPLD_READ(IQ80310_XINT0_STATUS) & 0x7) << 5;
111
112 /* XXX Why do we have to mask off? */
113 return (intstat & intr_enabled);
114 }
115
116 static __inline void
117 iq80310_set_intrmask(void)
118 {
119 uint32_t disabled;
120
121 intr_enabled |= IRQ_BITS_ALWAYS_ON;
122
123 /* The XINT_MASK register sets a bit to *disable*. */
124 disabled = (~intr_enabled) & IRQ_BITS;
125
126 CPLD_WRITE(IQ80310_XINT_MASK, disabled & 0x1f);
127 }
128
129 static __inline void
130 iq80310_enable_irq(int irq)
131 {
132
133 intr_enabled |= (1U << irq);
134 iq80310_set_intrmask();
135 }
136
137 static __inline void
138 iq80310_disable_irq(int irq)
139 {
140
141 intr_enabled &= ~(1U << irq);
142 iq80310_set_intrmask();
143 }
144
145 /*
146 * NOTE: This routine must be called with interrupts disabled in the CPSR.
147 */
148 static void
149 iq80310_intr_calculate_masks(void)
150 {
151 struct intrq *iq;
152 struct intrhand *ih;
153 int irq, ipl;
154
155 /* First, figure out which IPLs each IRQ has. */
156 for (irq = 0; irq < NIRQ; irq++) {
157 int levels = 0;
158 iq = &intrq[irq];
159 iq80310_disable_irq(irq);
160 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
161 ih = TAILQ_NEXT(ih, ih_list))
162 levels |= (1U << ih->ih_ipl);
163 iq->iq_levels = levels;
164 }
165
166 /* Next, figure out which IRQs are used by each IPL. */
167 for (ipl = 0; ipl < NIPL; ipl++) {
168 int irqs = 0;
169 for (irq = 0; irq < NIRQ; irq++) {
170 if (intrq[irq].iq_levels & (1U << ipl))
171 irqs |= (1U << irq);
172 }
173 imask[ipl] = irqs;
174 }
175
176 imask[IPL_NONE] = 0;
177
178 /*
179 * Initialize the soft interrupt masks to block themselves.
180 */
181 imask[IPL_SOFT] = SI_TO_IRQBIT(SI_SOFT);
182 imask[IPL_SOFTCLOCK] = SI_TO_IRQBIT(SI_SOFTCLOCK);
183 imask[IPL_SOFTNET] = SI_TO_IRQBIT(SI_SOFTNET);
184 imask[IPL_SOFTSERIAL] = SI_TO_IRQBIT(SI_SOFTSERIAL);
185
186 /*
187 * splsoftclock() is the only interface that users of the
188 * generic software interrupt facility have to block their
189 * soft intrs, so splsoftclock() must also block IPL_SOFT.
190 */
191 imask[IPL_SOFTCLOCK] |= imask[IPL_SOFT];
192
193 /*
194 * splsoftnet() must also block splsoftclock(), since we don't
195 * want timer-driven network events to occur while we're
196 * processing incoming packets.
197 */
198 imask[IPL_SOFTNET] |= imask[IPL_SOFTCLOCK];
199
200 /*
201 * Enforce a heirarchy that gives "slow" device (or devices with
202 * limited input buffer space/"real-time" requirements) a better
203 * chance at not dropping data.
204 */
205 imask[IPL_BIO] |= imask[IPL_SOFTNET];
206 imask[IPL_NET] |= imask[IPL_BIO];
207 imask[IPL_SOFTSERIAL] |= imask[IPL_NET];
208 imask[IPL_TTY] |= imask[IPL_SOFTSERIAL];
209
210 /*
211 * splvm() blocks all interrupts that use the kernel memory
212 * allocation facilities.
213 */
214 imask[IPL_IMP] |= imask[IPL_TTY];
215
216 /*
217 * Audio devices are not allowed to perform memory allocation
218 * in their interrupt routines, and they have fairly "real-time"
219 * requirements, so give them a high interrupt priority.
220 */
221 imask[IPL_AUDIO] |= imask[IPL_IMP];
222
223 /*
224 * splclock() must block anything that uses the scheduler.
225 */
226 imask[IPL_CLOCK] |= imask[IPL_AUDIO];
227
228 /*
229 * No separate statclock on the IQ80310.
230 */
231 imask[IPL_STATCLOCK] |= imask[IPL_CLOCK];
232
233 /*
234 * splhigh() must block "everything".
235 */
236 imask[IPL_HIGH] |= imask[IPL_STATCLOCK];
237
238 /*
239 * XXX We need serial drivers to run at the absolute highest priority
240 * in order to avoid overruns, so serial > high.
241 */
242 imask[IPL_SERIAL] |= imask[IPL_HIGH];
243
244 /*
245 * Now compute which IRQs must be blocked when servicing any
246 * given IRQ.
247 */
248 for (irq = 0; irq < NIRQ; irq++) {
249 int irqs = (1U << irq);
250 iq = &intrq[irq];
251 if (TAILQ_FIRST(&iq->iq_list) != NULL)
252 iq80310_enable_irq(irq);
253 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
254 ih = TAILQ_NEXT(ih, ih_list))
255 irqs |= imask[ih->ih_ipl];
256 iq->iq_mask = irqs;
257 }
258 }
259
260 static void
261 iq80310_do_pending(void)
262 {
263 static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
264 int new, oldirqstate;
265
266 if (__cpu_simple_lock_try(&processing) == 0)
267 return;
268
269 new = current_spl_level;
270
271 oldirqstate = disable_interrupts(I32_bit);
272
273 #define DO_SOFTINT(si) \
274 if ((ipending & ~new) & SI_TO_IRQBIT(si)) { \
275 ipending &= ~SI_TO_IRQBIT(si); \
276 current_spl_level |= imask[si_to_ipl[(si)]]; \
277 restore_interrupts(oldirqstate); \
278 softintr_dispatch(si); \
279 oldirqstate = disable_interrupts(I32_bit); \
280 current_spl_level = new; \
281 }
282
283 DO_SOFTINT(SI_SOFTSERIAL);
284 DO_SOFTINT(SI_SOFTNET);
285 DO_SOFTINT(SI_SOFTCLOCK);
286 DO_SOFTINT(SI_SOFT);
287
288 __cpu_simple_unlock(&processing);
289
290 restore_interrupts(oldirqstate);
291 }
292
293 int
294 _splraise(int ipl)
295 {
296 int old, oldirqstate;
297
298 oldirqstate = disable_interrupts(I32_bit);
299 old = current_spl_level;
300 current_spl_level |= imask[ipl];
301
302 restore_interrupts(oldirqstate);
303
304 return (old);
305 }
306
307 __inline void
308 splx(int new)
309 {
310 int old;
311
312 old = current_spl_level;
313 current_spl_level = new;
314
315 /*
316 * If there are pending hardware interrupts (i.e. the
317 * external interrupt is disabled in the ICU), and all
318 * hardware interrupts are being unblocked, then re-enable
319 * the external hardware interrupt.
320 *
321 * XXX We have to wait for ALL hardware interrupts to
322 * XXX be unblocked, because we currently lose if we
323 * XXX get nested interrupts, and I don't know why yet.
324 */
325 if ((new & IRQ_BITS) == 0 && (ipending & IRQ_BITS))
326 i80200_intr_enable(INTCTL_IM);
327
328 /* If there are software interrupts to process, do it. */
329 if ((ipending & ~IRQ_BITS) & ~new)
330 iq80310_do_pending();
331 }
332
333 int
334 _spllower(int ipl)
335 {
336 int old = current_spl_level;
337
338 splx(imask[ipl]);
339 return (old);
340 }
341
342 void
343 _setsoftintr(int si)
344 {
345 int oldirqstate;
346
347 oldirqstate = disable_interrupts(I32_bit);
348 ipending |= SI_TO_IRQBIT(si);
349 restore_interrupts(oldirqstate);
350
351 /* Process unmasked pending soft interrupts. */
352 if ((ipending & ~IRQ_BITS) & ~current_spl_level)
353 iq80310_do_pending();
354 }
355
356 void
357 iq80310_intr_init(void)
358 {
359 struct intrq *iq;
360 int i;
361
362 /*
363 * The Secondary PCI interrupts INTA, INTB, and INTC
364 * area always enabled, since they cannot be masked
365 * in the CPLD.
366 */
367 intr_enabled |= IRQ_BITS_ALWAYS_ON;
368
369 for (i = 0; i < NIRQ; i++) {
370 iq = &intrq[i];
371 TAILQ_INIT(&iq->iq_list);
372
373 sprintf(iq->iq_name, "irq %d", i);
374 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
375 NULL, "iq80310", iq->iq_name);
376 }
377
378 iq80310_intr_calculate_masks();
379
380 /* Enable external interrupts on the i80200. */
381 i80200_extirq_dispatch = iq80310_intr_dispatch;
382 i80200_intr_enable(INTCTL_IM);
383
384 /* Enable IRQs (don't yet use FIQs). */
385 enable_interrupts(I32_bit);
386 }
387
388 void *
389 iq80310_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
390 {
391 struct intrq *iq;
392 struct intrhand *ih;
393 u_int oldirqstate;
394
395 if (irq < 0 || irq > NIRQ)
396 panic("iq80310_intr_establish: IRQ %d out of range", irq);
397
398 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
399 if (ih == NULL)
400 return (NULL);
401
402 ih->ih_func = func;
403 ih->ih_arg = arg;
404 ih->ih_ipl = ipl;
405 ih->ih_irq = irq;
406
407 iq = &intrq[irq];
408
409 /* All IQ80310 interrupts are level-triggered. */
410 iq->iq_ist = IST_LEVEL;
411
412 oldirqstate = disable_interrupts(I32_bit);
413
414 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
415
416 iq80310_intr_calculate_masks();
417
418 restore_interrupts(oldirqstate);
419
420 return (ih);
421 }
422
423 void
424 iq80310_intr_disestablish(void *cookie)
425 {
426 struct intrhand *ih = cookie;
427 struct intrq *iq = &intrq[ih->ih_irq];
428 int oldirqstate;
429
430 oldirqstate = disable_interrupts(I32_bit);
431
432 TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
433
434 iq80310_intr_calculate_masks();
435
436 restore_interrupts(oldirqstate);
437 }
438
439 void
440 iq80310_intr_dispatch(struct clockframe *frame)
441 {
442 struct intrq *iq;
443 struct intrhand *ih;
444 int oldirqstate, pcpl, irq, ibit, hwpend;
445
446 /* First, disable external IRQs. */
447 i80200_intr_disable(INTCTL_IM);
448
449 pcpl = current_spl_level;
450
451 for (hwpend = iq80310_intstat_read(); hwpend != 0;) {
452 irq = ffs(hwpend) - 1;
453 ibit = (1U << irq);
454
455 hwpend &= ~ibit;
456
457 if (pcpl & ibit) {
458 /*
459 * IRQ is masked; mark it as pending and check
460 * the next one. Note: external IRQs are already
461 * disabled.
462 */
463 ipending |= ibit;
464 continue;
465 }
466
467 ipending &= ~ibit;
468
469 iq = &intrq[irq];
470 iq->iq_ev.ev_count++;
471 uvmexp.intrs++;
472 current_spl_level |= iq->iq_mask;
473 oldirqstate = enable_interrupts(I32_bit);
474 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
475 ih = TAILQ_NEXT(ih, ih_list)) {
476 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
477 }
478 restore_interrupts(oldirqstate);
479
480 current_spl_level = pcpl;
481 }
482
483 /* Check for pendings soft intrs. */
484 if ((ipending & ~IRQ_BITS) & ~current_spl_level) {
485 oldirqstate = enable_interrupts(I32_bit);
486 iq80310_do_pending();
487 restore_interrupts(oldirqstate);
488 }
489
490 /*
491 * If no hardware interrupts are masked, re-enable external
492 * interrupts.
493 */
494 if ((ipending & IRQ_BITS) == 0)
495 i80200_intr_enable(INTCTL_IM);
496 }
497