ixp12x0_intr.c revision 1.2.4.3 1 /* $NetBSD: ixp12x0_intr.c,v 1.2.4.3 2002/10/10 18:31:54 jdolecek Exp $ */
2
3 /*
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Ichiro FUKUHARA and Naoto Shimazaki.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Interrupt support for the Intel ixp12x0
41 */
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/malloc.h>
46
47 #include <uvm/uvm_extern.h>
48
49 #include <machine/bus.h>
50 #include <machine/intr.h>
51
52 #include <arm/cpufunc.h>
53
54 #include <arm/ixp12x0/ixp12x0reg.h>
55 #include <arm/ixp12x0/ixp12x0var.h>
56
57 #include <arm/ixp12x0/ixp12x0_comreg.h>
58 #include <arm/ixp12x0/ixp12x0_pcireg.h>
59
60 extern u_int32_t ixpcom_cr; /* current cr from *_com.c */
61 extern u_int32_t ixpcom_imask; /* tell mask to *_com.c */
62
63 /* Interrupt handler queues. */
64 struct intrq intrq[NIRQ];
65
66 /* Interrupts to mask at each level. */
67 static u_int32_t imask[NIPL];
68 static u_int32_t pci_imask[NIPL];
69
70 /* Current interrupt priority level. */
71 __volatile int current_spl_level;
72
73 /* Software copy of the IRQs we have enabled. */
74 __volatile u_int32_t intr_enabled;
75 __volatile u_int32_t pci_intr_enabled;
76
77 /* Interrupts pending. */
78 static __volatile int ipending;
79
80 /*
81 * Map a software interrupt queue index (to the unused bits in the
82 * ICU registers -- XXX will need to revisit this if those bits are
83 * ever used in future steppings).
84 */
85 static const uint32_t si_to_irqbit[SI_NQUEUES] = {
86 IXP12X0_INTR_bit30, /* SI_SOFT */
87 IXP12X0_INTR_bit29, /* SI_SOFTCLOCK */
88 IXP12X0_INTR_bit28, /* SI_SOFTNET */
89 IXP12X0_INTR_bit27, /* SI_SOFTSERIAL */
90 };
91
92 #define INT_SWMASK \
93 ((1U << IXP12X0_INTR_bit30) | (1U << IXP12X0_INTR_bit29) | \
94 (1U << IXP12X0_INTR_bit28) | (1U << IXP12X0_INTR_bit27))
95
96 #define SI_TO_IRQBIT(si) (1U << si_to_irqbit[(si)])
97
98 /*
99 * Map a software interrupt queue to an interrupt priority level.
100 */
101 static const int si_to_ipl[SI_NQUEUES] = {
102 IPL_SOFT, /* SI_SOFT */
103 IPL_SOFTCLOCK, /* SI_SOFTCLOCK */
104 IPL_SOFTNET, /* SI_SOFTNET */
105 IPL_SOFTSERIAL, /* SI_SOFTSERIAL */
106 };
107
108 void ixp12x0_intr_dispatch(struct irqframe *frame);
109
110 static __inline u_int32_t
111 ixp12x0_irq_read(void)
112 {
113 return IXPREG(IXP12X0_IRQ_VBASE) & IXP12X0_INTR_MASK;
114 }
115
116 static __inline u_int32_t
117 ixp12x0_pci_irq_read(void)
118 {
119 return IXPREG(IXPPCI_IRQ_STATUS);
120 }
121
122 static void
123 ixp12x0_enable_uart_irq(void)
124 {
125 ixpcom_imask = 0;
126 IXPREG(IXPCOM_UART_BASE + IXPCOM_CR) = ixpcom_cr & ~ixpcom_imask;
127 }
128
129 static void
130 ixp12x0_disable_uart_irq(void)
131 {
132 ixpcom_imask = CR_RIE | CR_XIE;
133 IXPREG(IXPCOM_UART_BASE + IXPCOM_CR) = ixpcom_cr & ~ixpcom_imask;
134 }
135
136 static void
137 ixp12x0_set_intrmask(u_int32_t irqs, u_int32_t pci_irqs)
138 {
139 if (irqs & (1U << IXP12X0_INTR_UART)) {
140 ixp12x0_disable_uart_irq();
141 } else {
142 ixp12x0_enable_uart_irq();
143 }
144 IXPREG(IXPPCI_IRQ_ENABLE_CLEAR) = pci_irqs;
145 IXPREG(IXPPCI_IRQ_ENABLE_SET) = pci_intr_enabled & ~pci_irqs;
146 }
147
148 static void
149 ixp12x0_enable_irq(int irq)
150 {
151 if (irq < SYS_NIRQ) {
152 intr_enabled |= (1U << irq);
153 switch (irq) {
154 case IXP12X0_INTR_UART:
155 ixp12x0_enable_uart_irq();
156 break;
157
158 case IXP12X0_INTR_PCI:
159 /* nothing to do */
160 break;
161 default:
162 panic("enable_irq:bad IRQ %d", irq);
163 }
164 } else {
165 pci_intr_enabled |= (1U << (irq - SYS_NIRQ));
166 IXPREG(IXPPCI_IRQ_ENABLE_SET) = (1U << (irq - SYS_NIRQ));
167 }
168 }
169
170 static __inline void
171 ixp12x0_disable_irq(int irq)
172 {
173 if (irq < SYS_NIRQ) {
174 intr_enabled ^= ~(1U << irq);
175 switch (irq) {
176 case IXP12X0_INTR_UART:
177 ixp12x0_disable_uart_irq();
178 break;
179
180 case IXP12X0_INTR_PCI:
181 /* nothing to do */
182 break;
183 default:
184 /* nothing to do */
185 }
186 } else {
187 pci_intr_enabled &= ~(1U << (irq - SYS_NIRQ));
188 IXPREG(IXPPCI_IRQ_ENABLE_CLEAR) = (1U << (irq - SYS_NIRQ));
189 }
190 }
191
192 /*
193 * NOTE: This routine must be called with interrupts disabled in the CPSR.
194 */
195 static void
196 ixp12x0_intr_calculate_masks(void)
197 {
198 struct intrq *iq;
199 struct intrhand *ih;
200 int irq, ipl;
201
202 /* First, figure out which IPLs each IRQ has. */
203 for (irq = 0; irq < NIRQ; irq++) {
204 int levels = 0;
205 iq = &intrq[irq];
206 ixp12x0_disable_irq(irq);
207 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
208 ih = TAILQ_NEXT(ih, ih_list))
209 levels |= (1U << ih->ih_ipl);
210 iq->iq_levels = levels;
211 }
212
213 /* Next, figure out which IRQs are used by each IPL. */
214 for (ipl = 0; ipl < NIPL; ipl++) {
215 int irqs = 0;
216 int pci_irqs = 0;
217 for (irq = 0; irq < SYS_NIRQ; irq++) {
218 if (intrq[irq].iq_levels & (1U << ipl))
219 irqs |= (1U << irq);
220 }
221 imask[ipl] = irqs;
222 for (irq = 0; irq < SYS_NIRQ; irq++) {
223 if (intrq[irq + SYS_NIRQ].iq_levels & (1U << ipl))
224 pci_irqs |= (1U << irq);
225 }
226 pci_imask[ipl] = pci_irqs;
227 }
228
229 imask[IPL_NONE] = 0;
230 pci_imask[IPL_NONE] = 0;
231
232 /*
233 * Initialize the soft interrupt masks to block themselves.
234 */
235 imask[IPL_SOFT] = SI_TO_IRQBIT(SI_SOFT);
236 imask[IPL_SOFTCLOCK] = SI_TO_IRQBIT(SI_SOFTCLOCK);
237 imask[IPL_SOFTNET] = SI_TO_IRQBIT(SI_SOFTNET);
238 imask[IPL_SOFTSERIAL] = SI_TO_IRQBIT(SI_SOFTSERIAL);
239
240 /*
241 * splsoftclock() is the only interface that users of the
242 * generic software interrupt facility have to block their
243 * soft intrs, so splsoftclock() must also block IPL_SOFT.
244 */
245 imask[IPL_SOFTCLOCK] |= imask[IPL_SOFT];
246 pci_imask[IPL_SOFTCLOCK] |= pci_imask[IPL_SOFT];
247
248 /*
249 * splsoftnet() must also block splsoftclock(), since we don't
250 * want timer-driven network events to occur while we're
251 * processing incoming packets.
252 */
253 imask[IPL_SOFTNET] |= imask[IPL_SOFTCLOCK];
254 pci_imask[IPL_SOFTNET] |= pci_imask[IPL_SOFTCLOCK];
255
256 /*
257 * Enforce a heirarchy that gives "slow" device (or devices with
258 * limited input buffer space/"real-time" requirements) a better
259 * chance at not dropping data.
260 */
261 imask[IPL_BIO] |= imask[IPL_SOFTNET];
262 pci_imask[IPL_BIO] |= pci_imask[IPL_SOFTNET];
263 imask[IPL_NET] |= imask[IPL_BIO];
264 pci_imask[IPL_NET] |= pci_imask[IPL_BIO];
265 imask[IPL_SOFTSERIAL] |= pci_imask[IPL_NET];
266 pci_imask[IPL_SOFTSERIAL] |= pci_imask[IPL_NET];
267 imask[IPL_TTY] |= imask[IPL_SOFTSERIAL];
268 pci_imask[IPL_TTY] |= pci_imask[IPL_SOFTSERIAL];
269
270 /*
271 * splvm() blocks all interrupts that use the kernel memory
272 * allocation facilities.
273 */
274 imask[IPL_IMP] |= imask[IPL_TTY];
275 pci_imask[IPL_IMP] |= pci_imask[IPL_TTY];
276
277 /*
278 * Audio devices are not allowed to perform memory allocation
279 * in their interrupt routines, and they have fairly "real-time"
280 * requirements, so give them a high interrupt priority.
281 */
282 imask[IPL_AUDIO] |= imask[IPL_IMP];
283 pci_imask[IPL_AUDIO] |= pci_imask[IPL_IMP];
284
285 /*
286 * splclock() must block anything that uses the scheduler.
287 */
288 imask[IPL_CLOCK] |= imask[IPL_AUDIO];
289 pci_imask[IPL_CLOCK] |= pci_imask[IPL_AUDIO];
290
291 /*
292 * No separate statclock on the IQ80310.
293 */
294 imask[IPL_STATCLOCK] |= imask[IPL_CLOCK];
295 pci_imask[IPL_STATCLOCK] |= pci_imask[IPL_CLOCK];
296
297 /*
298 * splhigh() must block "everything".
299 */
300 imask[IPL_HIGH] |= imask[IPL_STATCLOCK];
301 pci_imask[IPL_HIGH] |= pci_imask[IPL_STATCLOCK];
302
303 /*
304 * XXX We need serial drivers to run at the absolute highest priority
305 * in order to avoid overruns, so serial > high.
306 */
307 imask[IPL_SERIAL] |= imask[IPL_HIGH];
308 pci_imask[IPL_SERIAL] |= pci_imask[IPL_HIGH];
309
310 /*
311 * Now compute which IRQs must be blocked when servicing any
312 * given IRQ.
313 */
314 for (irq = 0; irq < NIRQ; irq++) {
315 int irqs;
316 int pci_irqs;
317
318 if (irq < SYS_NIRQ) {
319 irqs = (1U << irq);
320 pci_irqs = 0;
321 } else {
322 irqs = 0;
323 pci_irqs = (1U << (irq - SYS_NIRQ));
324 }
325 iq = &intrq[irq];
326 if (TAILQ_FIRST(&iq->iq_list) != NULL)
327 ixp12x0_enable_irq(irq);
328 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
329 ih = TAILQ_NEXT(ih, ih_list)) {
330 irqs |= imask[ih->ih_ipl];
331 pci_irqs |= pci_imask[ih->ih_ipl];
332 }
333 iq->iq_mask = irqs;
334 iq->iq_pci_mask = pci_irqs;
335 }
336 }
337
338 static void
339 ixp12x0_do_pending(void)
340 {
341 static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
342 int new;
343 u_int oldirqstate;
344
345 if (__cpu_simple_lock_try(&processing) == 0)
346 return;
347
348 new = current_spl_level;
349
350 oldirqstate = disable_interrupts(I32_bit);
351
352 #define DO_SOFTINT(si) \
353 if ((ipending & ~imask[new]) & SI_TO_IRQBIT(si)) { \
354 ipending &= ~SI_TO_IRQBIT(si); \
355 current_spl_level = si_to_ipl[(si)]; \
356 restore_interrupts(oldirqstate); \
357 softintr_dispatch(si); \
358 oldirqstate = disable_interrupts(I32_bit); \
359 current_spl_level = new; \
360 }
361
362 DO_SOFTINT(SI_SOFTSERIAL);
363 DO_SOFTINT(SI_SOFTNET);
364 DO_SOFTINT(SI_SOFTCLOCK);
365 DO_SOFTINT(SI_SOFT);
366
367 __cpu_simple_unlock(&processing);
368
369 restore_interrupts(oldirqstate);
370 }
371
372 __inline void
373 splx(int new)
374 {
375 int old;
376 u_int oldirqstate;
377
378 if (current_spl_level == new)
379 return;
380 oldirqstate = disable_interrupts(I32_bit);
381 old = current_spl_level;
382 current_spl_level = new;
383 ixp12x0_set_intrmask(imask[new], pci_imask[new]);
384 restore_interrupts(oldirqstate);
385
386 /* If there are software interrupts to process, do it. */
387 if ((ipending & INT_SWMASK) & ~imask[new])
388 ixp12x0_do_pending();
389 }
390
391 int
392 _splraise(int ipl)
393 {
394 int old = current_spl_level;
395
396 if (old >= ipl)
397 return (old);
398 splx(ipl);
399 return (old);
400 }
401
402 int
403 _spllower(int ipl)
404 {
405 int old = current_spl_level;
406
407 if (old <= ipl)
408 return (old);
409 splx(ipl);
410 return (old);
411 }
412
413 void
414 _setsoftintr(int si)
415 {
416 u_int oldirqstate;
417
418 oldirqstate = disable_interrupts(I32_bit);
419 ipending |= SI_TO_IRQBIT(si);
420 restore_interrupts(oldirqstate);
421
422 /* Process unmasked pending soft interrupts. */
423 if ((ipending & INT_SWMASK) & ~imask[current_spl_level])
424 ixp12x0_do_pending();
425 }
426
427 /*
428 * ixp12x0_intr_init:
429 *
430 * Initialize the rest of the interrupt subsystem, making it
431 * ready to handle interrupts from devices.
432 */
433 void
434 ixp12x0_intr_init(void)
435 {
436 struct intrq *iq;
437 int i;
438
439 intr_enabled = 0;
440 pci_intr_enabled = 0;
441
442 for (i = 0; i < NIRQ; i++) {
443 iq = &intrq[i];
444 TAILQ_INIT(&iq->iq_list);
445
446 sprintf(iq->iq_name, "ipl %d", i);
447 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
448 NULL, "ixpintr", iq->iq_name);
449 }
450 current_intr_depth = 0;
451 current_spl_level = 0;
452
453 /* Enable IRQs (don't yet use FIQs). */
454 enable_interrupts(I32_bit);
455 }
456
457 void *
458 ixp12x0_intr_establish(int irq, int ipl, int (*ih_func)(void *), void *arg)
459 {
460 struct intrq* iq;
461 struct intrhand* ih;
462 u_int oldirqstate;
463 #ifdef DEBUG
464 printf("ixp12x0_intr_establish(%d, %d, %08x, %08x)\n",
465 irq, ipl, (u_int32_t) ih_func, (u_int32_t) arg);
466 #endif
467 if (irq < 0 || irq > NIRQ)
468 panic("ixp12x0_intr_establish: IRQ %d out of range", ipl);
469 if (ipl < 0 || ipl > NIPL)
470 panic("ixp12x0_intr_establish: IPL %d out of range", ipl);
471
472 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
473 if (ih == NULL)
474 return (NULL);
475
476 ih->ih_func = ih_func;
477 ih->ih_arg = arg;
478 ih->ih_irq = irq;
479 ih->ih_ipl = ipl;
480
481 iq = &intrq[irq];
482
483 oldirqstate = disable_interrupts(I32_bit);
484 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
485 ixp12x0_intr_calculate_masks();
486 restore_interrupts(oldirqstate);
487
488 return (ih);
489 }
490
491 void
492 ixp12x0_intr_disestablish(void *cookie)
493 {
494 struct intrhand* ih = cookie;
495 struct intrq* iq = &intrq[ih->ih_ipl];
496 u_int oldirqstate;
497
498 oldirqstate = disable_interrupts(I32_bit);
499 TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
500 ixp12x0_intr_calculate_masks();
501 restore_interrupts(oldirqstate);
502 }
503
504 void
505 ixp12x0_intr_dispatch(struct clockframe *frame)
506 {
507 struct intrq* iq;
508 struct intrhand* ih;
509 u_int oldirqstate;
510 int pcpl;
511 u_int32_t hwpend;
512 u_int32_t pci_hwpend;
513 int irq;
514 u_int32_t ibit;
515
516 pcpl = current_spl_level;
517
518 hwpend = ixp12x0_irq_read();
519 pci_hwpend = ixp12x0_pci_irq_read();
520
521 while (hwpend) {
522 irq = ffs(hwpend) - 1;
523 ibit = (1U << irq);
524
525 iq = &intrq[irq];
526 iq->iq_ev.ev_count++;
527 uvmexp.intrs++;
528 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
529 ih = TAILQ_NEXT(ih, ih_list)) {
530 int ipl;
531 current_spl_level = ipl = ih->ih_ipl;
532 ixp12x0_set_intrmask(imask[ipl] | hwpend,
533 pci_imask[ipl] | pci_hwpend);
534 oldirqstate = enable_interrupts(I32_bit);
535 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
536 restore_interrupts(oldirqstate);
537 hwpend &= ~ibit;
538 }
539 }
540 while (pci_hwpend) {
541 irq = ffs(pci_hwpend) - 1;
542 ibit = (1U << irq);
543
544 iq = &intrq[irq + SYS_NIRQ];
545 iq->iq_ev.ev_count++;
546 uvmexp.intrs++;
547 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
548 ih = TAILQ_NEXT(ih, ih_list)) {
549 int ipl;
550
551 current_spl_level = ipl = ih->ih_ipl;
552 ixp12x0_set_intrmask(imask[ipl] | hwpend,
553 pci_imask[ipl] | pci_hwpend);
554 oldirqstate = enable_interrupts(I32_bit);
555 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
556 restore_interrupts(oldirqstate);
557 pci_hwpend &= ~ibit;
558 }
559 }
560
561 splx(pcpl);
562
563 /* Check for pendings soft intrs. */
564 if ((ipending & INT_SWMASK) & ~imask[pcpl]) {
565 oldirqstate = enable_interrupts(I32_bit);
566 ixp12x0_do_pending();
567 restore_interrupts(oldirqstate);
568 }
569 }
570