ixp12x0_intr.c revision 1.7 1 /* $NetBSD: ixp12x0_intr.c,v 1.7 2003/03/25 06:12:46 igy Exp $ */
2
3 /*
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Ichiro FUKUHARA and Naoto Shimazaki.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ixp12x0_intr.c,v 1.7 2003/03/25 06:12:46 igy Exp $");
41
42 /*
43 * Interrupt support for the Intel ixp12x0
44 */
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/malloc.h>
49 #include <sys/termios.h>
50
51 #include <uvm/uvm_extern.h>
52
53 #include <machine/bus.h>
54 #include <machine/intr.h>
55
56 #include <arm/cpufunc.h>
57
58 #include <arm/ixp12x0/ixp12x0reg.h>
59 #include <arm/ixp12x0/ixp12x0var.h>
60 #include <arm/ixp12x0/ixp12x0_comreg.h>
61 #include <arm/ixp12x0/ixp12x0_comvar.h>
62 #include <arm/ixp12x0/ixp12x0_pcireg.h>
63
64 extern u_int32_t ixpcom_cr; /* current cr from *_com.c */
65 extern u_int32_t ixpcom_imask; /* tell mask to *_com.c */
66
67 /* Interrupt handler queues. */
68 struct intrq intrq[NIRQ];
69
70 /* Interrupts to mask at each level. */
71 static u_int32_t imask[NIPL];
72 static u_int32_t pci_imask[NIPL];
73
74 /* Current interrupt priority level. */
75 __volatile int current_spl_level;
76
77 /* Software copy of the IRQs we have enabled. */
78 __volatile u_int32_t intr_enabled;
79 __volatile u_int32_t pci_intr_enabled;
80
81 /* Interrupts pending. */
82 static __volatile int ipending;
83
84 /*
85 * Map a software interrupt queue index (to the unused bits in the
86 * ICU registers -- XXX will need to revisit this if those bits are
87 * ever used in future steppings).
88 */
89 static const u_int32_t si_to_irqbit[SI_NQUEUES] = {
90 IXP12X0_INTR_bit30, /* SI_SOFT */
91 IXP12X0_INTR_bit29, /* SI_SOFTCLOCK */
92 IXP12X0_INTR_bit28, /* SI_SOFTNET */
93 IXP12X0_INTR_bit27, /* SI_SOFTSERIAL */
94 };
95
96 #define INT_SWMASK \
97 ((1U << IXP12X0_INTR_bit30) | (1U << IXP12X0_INTR_bit29) | \
98 (1U << IXP12X0_INTR_bit28) | (1U << IXP12X0_INTR_bit27))
99
100 #define SI_TO_IRQBIT(si) (1U << si_to_irqbit[(si)])
101
102 /*
103 * Map a software interrupt queue to an interrupt priority level.
104 */
105 static const int si_to_ipl[SI_NQUEUES] = {
106 IPL_SOFT, /* SI_SOFT */
107 IPL_SOFTCLOCK, /* SI_SOFTCLOCK */
108 IPL_SOFTNET, /* SI_SOFTNET */
109 IPL_SOFTSERIAL, /* SI_SOFTSERIAL */
110 };
111
112 void ixp12x0_intr_dispatch(struct irqframe *frame);
113
114 static __inline u_int32_t
115 ixp12x0_irq_read(void)
116 {
117 return IXPREG(IXP12X0_IRQ_VBASE) & IXP12X0_INTR_MASK;
118 }
119
120 static __inline u_int32_t
121 ixp12x0_pci_irq_read(void)
122 {
123 return IXPREG(IXPPCI_IRQ_STATUS);
124 }
125
126 static void
127 ixp12x0_enable_uart_irq(void)
128 {
129 ixpcom_imask = 0;
130 if (ixpcom_sc)
131 bus_space_write_4(ixpcom_sc->sc_iot, ixpcom_sc->sc_ioh,
132 IXPCOM_CR, ixpcom_cr & ~ixpcom_imask);
133 }
134
135 static void
136 ixp12x0_disable_uart_irq(void)
137 {
138 ixpcom_imask = CR_RIE | CR_XIE;
139 if (ixpcom_sc)
140 bus_space_write_4(ixpcom_sc->sc_iot, ixpcom_sc->sc_ioh,
141 IXPCOM_CR, ixpcom_cr & ~ixpcom_imask);
142 }
143
144 static void
145 ixp12x0_set_intrmask(u_int32_t irqs, u_int32_t pci_irqs)
146 {
147 if (irqs & (1U << IXP12X0_INTR_UART)) {
148 ixp12x0_disable_uart_irq();
149 } else {
150 ixp12x0_enable_uart_irq();
151 }
152 IXPREG(IXPPCI_IRQ_ENABLE_CLEAR) = pci_irqs;
153 IXPREG(IXPPCI_IRQ_ENABLE_SET) = pci_intr_enabled & ~pci_irqs;
154 }
155
156 static void
157 ixp12x0_enable_irq(int irq)
158 {
159 if (irq < SYS_NIRQ) {
160 intr_enabled |= (1U << irq);
161 switch (irq) {
162 case IXP12X0_INTR_UART:
163 ixp12x0_enable_uart_irq();
164 break;
165
166 case IXP12X0_INTR_PCI:
167 /* nothing to do */
168 break;
169 default:
170 panic("enable_irq:bad IRQ %d", irq);
171 }
172 } else {
173 pci_intr_enabled |= (1U << (irq - SYS_NIRQ));
174 IXPREG(IXPPCI_IRQ_ENABLE_SET) = (1U << (irq - SYS_NIRQ));
175 }
176 }
177
178 static __inline void
179 ixp12x0_disable_irq(int irq)
180 {
181 if (irq < SYS_NIRQ) {
182 intr_enabled ^= ~(1U << irq);
183 switch (irq) {
184 case IXP12X0_INTR_UART:
185 ixp12x0_disable_uart_irq();
186 break;
187
188 case IXP12X0_INTR_PCI:
189 /* nothing to do */
190 break;
191 default:
192 /* nothing to do */
193 }
194 } else {
195 pci_intr_enabled &= ~(1U << (irq - SYS_NIRQ));
196 IXPREG(IXPPCI_IRQ_ENABLE_CLEAR) = (1U << (irq - SYS_NIRQ));
197 }
198 }
199
200 /*
201 * NOTE: This routine must be called with interrupts disabled in the CPSR.
202 */
203 static void
204 ixp12x0_intr_calculate_masks(void)
205 {
206 struct intrq *iq;
207 struct intrhand *ih;
208 int irq, ipl;
209
210 /* First, figure out which IPLs each IRQ has. */
211 for (irq = 0; irq < NIRQ; irq++) {
212 int levels = 0;
213 iq = &intrq[irq];
214 ixp12x0_disable_irq(irq);
215 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
216 ih = TAILQ_NEXT(ih, ih_list))
217 levels |= (1U << ih->ih_ipl);
218 iq->iq_levels = levels;
219 }
220
221 /* Next, figure out which IRQs are used by each IPL. */
222 for (ipl = 0; ipl < NIPL; ipl++) {
223 int irqs = 0;
224 int pci_irqs = 0;
225 for (irq = 0; irq < SYS_NIRQ; irq++) {
226 if (intrq[irq].iq_levels & (1U << ipl))
227 irqs |= (1U << irq);
228 }
229 imask[ipl] = irqs;
230 for (irq = 0; irq < SYS_NIRQ; irq++) {
231 if (intrq[irq + SYS_NIRQ].iq_levels & (1U << ipl))
232 pci_irqs |= (1U << irq);
233 }
234 pci_imask[ipl] = pci_irqs;
235 }
236
237 imask[IPL_NONE] = 0;
238 pci_imask[IPL_NONE] = 0;
239
240 /*
241 * Initialize the soft interrupt masks to block themselves.
242 */
243 imask[IPL_SOFT] = SI_TO_IRQBIT(SI_SOFT);
244 imask[IPL_SOFTCLOCK] = SI_TO_IRQBIT(SI_SOFTCLOCK);
245 imask[IPL_SOFTNET] = SI_TO_IRQBIT(SI_SOFTNET);
246 imask[IPL_SOFTSERIAL] = SI_TO_IRQBIT(SI_SOFTSERIAL);
247
248 /*
249 * splsoftclock() is the only interface that users of the
250 * generic software interrupt facility have to block their
251 * soft intrs, so splsoftclock() must also block IPL_SOFT.
252 */
253 imask[IPL_SOFTCLOCK] |= imask[IPL_SOFT];
254 pci_imask[IPL_SOFTCLOCK] |= pci_imask[IPL_SOFT];
255
256 /*
257 * splsoftnet() must also block splsoftclock(), since we don't
258 * want timer-driven network events to occur while we're
259 * processing incoming packets.
260 */
261 imask[IPL_SOFTNET] |= imask[IPL_SOFTCLOCK];
262 pci_imask[IPL_SOFTNET] |= pci_imask[IPL_SOFTCLOCK];
263
264 /*
265 * Enforce a heirarchy that gives "slow" device (or devices with
266 * limited input buffer space/"real-time" requirements) a better
267 * chance at not dropping data.
268 */
269 imask[IPL_BIO] |= imask[IPL_SOFTNET];
270 pci_imask[IPL_BIO] |= pci_imask[IPL_SOFTNET];
271 imask[IPL_NET] |= imask[IPL_BIO];
272 pci_imask[IPL_NET] |= pci_imask[IPL_BIO];
273 imask[IPL_SOFTSERIAL] |= imask[IPL_NET];
274 pci_imask[IPL_SOFTSERIAL] |= pci_imask[IPL_NET];
275 imask[IPL_TTY] |= imask[IPL_SOFTSERIAL];
276 pci_imask[IPL_TTY] |= pci_imask[IPL_SOFTSERIAL];
277
278 /*
279 * splvm() blocks all interrupts that use the kernel memory
280 * allocation facilities.
281 */
282 imask[IPL_IMP] |= imask[IPL_TTY];
283 pci_imask[IPL_IMP] |= pci_imask[IPL_TTY];
284
285 /*
286 * Audio devices are not allowed to perform memory allocation
287 * in their interrupt routines, and they have fairly "real-time"
288 * requirements, so give them a high interrupt priority.
289 */
290 imask[IPL_AUDIO] |= imask[IPL_IMP];
291 pci_imask[IPL_AUDIO] |= pci_imask[IPL_IMP];
292
293 /*
294 * splclock() must block anything that uses the scheduler.
295 */
296 imask[IPL_CLOCK] |= imask[IPL_AUDIO];
297 pci_imask[IPL_CLOCK] |= pci_imask[IPL_AUDIO];
298
299 /*
300 * No separate statclock on the IXP12x0.
301 */
302 imask[IPL_STATCLOCK] |= imask[IPL_CLOCK];
303 pci_imask[IPL_STATCLOCK] |= pci_imask[IPL_CLOCK];
304
305 /*
306 * splhigh() must block "everything".
307 */
308 imask[IPL_HIGH] |= imask[IPL_STATCLOCK];
309 pci_imask[IPL_HIGH] |= pci_imask[IPL_STATCLOCK];
310
311 /*
312 * XXX We need serial drivers to run at the absolute highest priority
313 * in order to avoid overruns, so serial > high.
314 */
315 imask[IPL_SERIAL] |= imask[IPL_HIGH];
316 pci_imask[IPL_SERIAL] |= pci_imask[IPL_HIGH];
317
318 /*
319 * Now compute which IRQs must be blocked when servicing any
320 * given IRQ.
321 */
322 for (irq = 0; irq < NIRQ; irq++) {
323 int irqs;
324 int pci_irqs;
325
326 if (irq < SYS_NIRQ) {
327 irqs = (1U << irq);
328 pci_irqs = 0;
329 } else {
330 irqs = 0;
331 pci_irqs = (1U << (irq - SYS_NIRQ));
332 }
333 iq = &intrq[irq];
334 if (TAILQ_FIRST(&iq->iq_list) != NULL)
335 ixp12x0_enable_irq(irq);
336 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
337 ih = TAILQ_NEXT(ih, ih_list)) {
338 irqs |= imask[ih->ih_ipl];
339 pci_irqs |= pci_imask[ih->ih_ipl];
340 }
341 iq->iq_mask = irqs;
342 iq->iq_pci_mask = pci_irqs;
343 }
344 }
345
346 static void
347 ixp12x0_do_pending(void)
348 {
349 static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
350 int new;
351 u_int oldirqstate;
352
353 if (__cpu_simple_lock_try(&processing) == 0)
354 return;
355
356 new = current_spl_level;
357
358 oldirqstate = disable_interrupts(I32_bit);
359
360 #define DO_SOFTINT(si) \
361 if ((ipending & ~imask[new]) & SI_TO_IRQBIT(si)) { \
362 ipending &= ~SI_TO_IRQBIT(si); \
363 current_spl_level = si_to_ipl[(si)]; \
364 restore_interrupts(oldirqstate); \
365 softintr_dispatch(si); \
366 oldirqstate = disable_interrupts(I32_bit); \
367 current_spl_level = new; \
368 }
369
370 DO_SOFTINT(SI_SOFTSERIAL);
371 DO_SOFTINT(SI_SOFTNET);
372 DO_SOFTINT(SI_SOFTCLOCK);
373 DO_SOFTINT(SI_SOFT);
374
375 __cpu_simple_unlock(&processing);
376
377 restore_interrupts(oldirqstate);
378 }
379
380 __inline void
381 splx(int new)
382 {
383 int old;
384 u_int oldirqstate;
385
386 if (current_spl_level == new)
387 return;
388 oldirqstate = disable_interrupts(I32_bit);
389 old = current_spl_level;
390 current_spl_level = new;
391 ixp12x0_set_intrmask(imask[new], pci_imask[new]);
392 restore_interrupts(oldirqstate);
393
394 /* If there are software interrupts to process, do it. */
395 if ((ipending & INT_SWMASK) & ~imask[new])
396 ixp12x0_do_pending();
397 }
398
399 int
400 _splraise(int ipl)
401 {
402 int old = current_spl_level;
403
404 if (old >= ipl)
405 return (old);
406 splx(ipl);
407 return (old);
408 }
409
410 int
411 _spllower(int ipl)
412 {
413 int old = current_spl_level;
414
415 if (old <= ipl)
416 return (old);
417 splx(ipl);
418 return (old);
419 }
420
421 void
422 _setsoftintr(int si)
423 {
424 u_int oldirqstate;
425
426 oldirqstate = disable_interrupts(I32_bit);
427 ipending |= SI_TO_IRQBIT(si);
428 restore_interrupts(oldirqstate);
429
430 /* Process unmasked pending soft interrupts. */
431 if ((ipending & INT_SWMASK) & ~imask[current_spl_level])
432 ixp12x0_do_pending();
433 }
434
435 /*
436 * ixp12x0_intr_init:
437 *
438 * Initialize the rest of the interrupt subsystem, making it
439 * ready to handle interrupts from devices.
440 */
441 void
442 ixp12x0_intr_init(void)
443 {
444 struct intrq *iq;
445 int i;
446
447 intr_enabled = 0;
448 pci_intr_enabled = 0;
449
450 for (i = 0; i < NIRQ; i++) {
451 iq = &intrq[i];
452 TAILQ_INIT(&iq->iq_list);
453
454 sprintf(iq->iq_name, "ipl %d", i);
455 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
456 NULL, "ixpintr", iq->iq_name);
457 }
458 current_intr_depth = 0;
459 current_spl_level = 0;
460
461 ixp12x0_intr_calculate_masks();
462
463 /* Enable IRQs (don't yet use FIQs). */
464 enable_interrupts(I32_bit);
465 }
466
467 void *
468 ixp12x0_intr_establish(int irq, int ipl, int (*ih_func)(void *), void *arg)
469 {
470 struct intrq* iq;
471 struct intrhand* ih;
472 u_int oldirqstate;
473 #ifdef DEBUG
474 printf("ixp12x0_intr_establish(irq=%d, ipl=%d, ih_func=%08x, arg=%08x)\n",
475 irq, ipl, (u_int32_t) ih_func, (u_int32_t) arg);
476 #endif
477 if (irq < 0 || irq > NIRQ)
478 panic("ixp12x0_intr_establish: IRQ %d out of range", ipl);
479 if (ipl < 0 || ipl > NIPL)
480 panic("ixp12x0_intr_establish: IPL %d out of range", ipl);
481
482 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
483 if (ih == NULL)
484 return (NULL);
485
486 ih->ih_func = ih_func;
487 ih->ih_arg = arg;
488 ih->ih_irq = irq;
489 ih->ih_ipl = ipl;
490
491 iq = &intrq[irq];
492 iq->iq_ist = IST_LEVEL;
493
494 oldirqstate = disable_interrupts(I32_bit);
495 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
496 ixp12x0_intr_calculate_masks();
497 restore_interrupts(oldirqstate);
498
499 return (ih);
500 }
501
502 void
503 ixp12x0_intr_disestablish(void *cookie)
504 {
505 struct intrhand* ih = cookie;
506 struct intrq* iq = &intrq[ih->ih_ipl];
507 u_int oldirqstate;
508
509 oldirqstate = disable_interrupts(I32_bit);
510 TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
511 ixp12x0_intr_calculate_masks();
512 restore_interrupts(oldirqstate);
513 }
514
515 void
516 ixp12x0_intr_dispatch(struct clockframe *frame)
517 {
518 struct intrq* iq;
519 struct intrhand* ih;
520 u_int oldirqstate;
521 int pcpl;
522 u_int32_t hwpend;
523 u_int32_t pci_hwpend;
524 int irq;
525 u_int32_t ibit;
526
527 pcpl = current_spl_level;
528
529 hwpend = ixp12x0_irq_read();
530 pci_hwpend = ixp12x0_pci_irq_read();
531
532 while (hwpend) {
533 irq = ffs(hwpend) - 1;
534 ibit = (1U << irq);
535
536 iq = &intrq[irq];
537 iq->iq_ev.ev_count++;
538 uvmexp.intrs++;
539 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
540 ih = TAILQ_NEXT(ih, ih_list)) {
541 int ipl;
542 current_spl_level = ipl = ih->ih_ipl;
543 ixp12x0_set_intrmask(imask[ipl] | hwpend,
544 pci_imask[ipl] | pci_hwpend);
545 oldirqstate = enable_interrupts(I32_bit);
546 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
547 restore_interrupts(oldirqstate);
548 hwpend &= ~ibit;
549 }
550 }
551 while (pci_hwpend) {
552 irq = ffs(pci_hwpend) - 1;
553 ibit = (1U << irq);
554
555 iq = &intrq[irq + SYS_NIRQ];
556 iq->iq_ev.ev_count++;
557 uvmexp.intrs++;
558 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
559 ih = TAILQ_NEXT(ih, ih_list)) {
560 int ipl;
561
562 current_spl_level = ipl = ih->ih_ipl;
563 ixp12x0_set_intrmask(imask[ipl] | hwpend,
564 pci_imask[ipl] | pci_hwpend);
565 oldirqstate = enable_interrupts(I32_bit);
566 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
567 restore_interrupts(oldirqstate);
568 pci_hwpend &= ~ibit;
569 }
570 }
571
572 splx(pcpl);
573
574 /* Check for pendings soft intrs. */
575 if ((ipending & INT_SWMASK) & ~imask[pcpl]) {
576 oldirqstate = enable_interrupts(I32_bit);
577 ixp12x0_do_pending();
578 restore_interrupts(oldirqstate);
579 }
580 }
581