ixp12x0_intr.c revision 1.15.30.3 1 /* $NetBSD: ixp12x0_intr.c,v 1.15.30.3 2008/01/28 18:29:06 matt Exp $ */
2
3 /*
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Ichiro FUKUHARA and Naoto Shimazaki.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ixp12x0_intr.c,v 1.15.30.3 2008/01/28 18:29:06 matt Exp $");
41
42 /*
43 * Interrupt support for the Intel ixp12x0
44 */
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/malloc.h>
49 #include <sys/simplelock.h>
50 #include <sys/termios.h>
51
52 #include <uvm/uvm_extern.h>
53
54 #include <machine/bus.h>
55 #include <machine/intr.h>
56
57 #include <arm/cpufunc.h>
58
59 #include <arm/ixp12x0/ixp12x0reg.h>
60 #include <arm/ixp12x0/ixp12x0var.h>
61 #include <arm/ixp12x0/ixp12x0_comreg.h>
62 #include <arm/ixp12x0/ixp12x0_comvar.h>
63 #include <arm/ixp12x0/ixp12x0_pcireg.h>
64
65
66 extern u_int32_t ixpcom_cr; /* current cr from *_com.c */
67 extern u_int32_t ixpcom_imask; /* tell mask to *_com.c */
68
69 /* Interrupt handler queues. */
70 struct intrq intrq[NIRQ];
71
72 /* Interrupts to mask at each level. */
73 static u_int32_t imask[NIPL];
74 static u_int32_t pci_imask[NIPL];
75
76 /* Current interrupt priority level. */
77 volatile int hardware_spl_level;
78
79 /* Software copy of the IRQs we have enabled. */
80 volatile u_int32_t intr_enabled;
81 volatile u_int32_t pci_intr_enabled;
82
83 /* Interrupts pending. */
84 static volatile int ipending;
85
86 void ixp12x0_intr_dispatch(struct irqframe *frame);
87
88 #define IXPREG(reg) *((volatile u_int32_t*) (reg))
89
90 static inline u_int32_t
91 ixp12x0_irq_read(void)
92 {
93 return IXPREG(IXP12X0_IRQ_VBASE) & IXP12X0_INTR_MASK;
94 }
95
96 static inline u_int32_t
97 ixp12x0_pci_irq_read(void)
98 {
99 return IXPREG(IXPPCI_IRQ_STATUS);
100 }
101
102 static void
103 ixp12x0_enable_uart_irq(void)
104 {
105 ixpcom_imask = 0;
106 if (ixpcom_sc)
107 bus_space_write_4(ixpcom_sc->sc_iot, ixpcom_sc->sc_ioh,
108 IXPCOM_CR, ixpcom_cr & ~ixpcom_imask);
109 }
110
111 static void
112 ixp12x0_disable_uart_irq(void)
113 {
114 ixpcom_imask = CR_RIE | CR_XIE;
115 if (ixpcom_sc)
116 bus_space_write_4(ixpcom_sc->sc_iot, ixpcom_sc->sc_ioh,
117 IXPCOM_CR, ixpcom_cr & ~ixpcom_imask);
118 }
119
120 static void
121 ixp12x0_set_intrmask(u_int32_t irqs, u_int32_t pci_irqs)
122 {
123 if (irqs & (1U << IXP12X0_INTR_UART)) {
124 ixp12x0_disable_uart_irq();
125 } else {
126 ixp12x0_enable_uart_irq();
127 }
128 IXPREG(IXPPCI_IRQ_ENABLE_CLEAR) = pci_irqs;
129 IXPREG(IXPPCI_IRQ_ENABLE_SET) = pci_intr_enabled & ~pci_irqs;
130 }
131
132 static void
133 ixp12x0_enable_irq(int irq)
134 {
135 if (irq < SYS_NIRQ) {
136 intr_enabled |= (1U << irq);
137 switch (irq) {
138 case IXP12X0_INTR_UART:
139 ixp12x0_enable_uart_irq();
140 break;
141
142 case IXP12X0_INTR_PCI:
143 /* nothing to do */
144 break;
145 default:
146 panic("enable_irq:bad IRQ %d", irq);
147 }
148 } else {
149 pci_intr_enabled |= (1U << (irq - SYS_NIRQ));
150 IXPREG(IXPPCI_IRQ_ENABLE_SET) = (1U << (irq - SYS_NIRQ));
151 }
152 }
153
154 static inline void
155 ixp12x0_disable_irq(int irq)
156 {
157 if (irq < SYS_NIRQ) {
158 intr_enabled ^= ~(1U << irq);
159 switch (irq) {
160 case IXP12X0_INTR_UART:
161 ixp12x0_disable_uart_irq();
162 break;
163
164 case IXP12X0_INTR_PCI:
165 /* nothing to do */
166 break;
167 default:
168 /* nothing to do */
169 break;
170 }
171 } else {
172 pci_intr_enabled &= ~(1U << (irq - SYS_NIRQ));
173 IXPREG(IXPPCI_IRQ_ENABLE_CLEAR) = (1U << (irq - SYS_NIRQ));
174 }
175 }
176
177 /*
178 * NOTE: This routine must be called with interrupts disabled in the CPSR.
179 */
180 static void
181 ixp12x0_intr_calculate_masks(void)
182 {
183 struct intrq *iq;
184 struct intrhand *ih;
185 int irq, ipl;
186
187 /* First, figure out which IPLs each IRQ has. */
188 for (irq = 0; irq < NIRQ; irq++) {
189 int levels = 0;
190 iq = &intrq[irq];
191 ixp12x0_disable_irq(irq);
192 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
193 ih = TAILQ_NEXT(ih, ih_list))
194 levels |= (1U << ih->ih_ipl);
195 iq->iq_levels = levels;
196 }
197
198 /* Next, figure out which IRQs are used by each IPL. */
199 for (ipl = 0; ipl < NIPL; ipl++) {
200 int irqs = 0;
201 int pci_irqs = 0;
202 for (irq = 0; irq < SYS_NIRQ; irq++) {
203 if (intrq[irq].iq_levels & (1U << ipl))
204 irqs |= (1U << irq);
205 }
206 imask[ipl] = irqs;
207 for (irq = 0; irq < SYS_NIRQ; irq++) {
208 if (intrq[irq + SYS_NIRQ].iq_levels & (1U << ipl))
209 pci_irqs |= (1U << irq);
210 }
211 pci_imask[ipl] = pci_irqs;
212 }
213
214 KASSERT(imask[IPL_NONE] == 0);
215 KASSERT(pci_imask[IPL_NONE] == 0);
216
217 KASSERT(imask[IPL_VM] != 0);
218 KASSERT(pci_imask[IPL_VM] != 0);
219
220 /*
221 * splclock() must block anything that uses the scheduler.
222 */
223 imask[IPL_CLOCK] |= imask[IPL_VM];
224 pci_imask[IPL_CLOCK] |= pci_imask[IPL_VM];
225
226 /*
227 * splhigh() must block "everything".
228 */
229 imask[IPL_HIGH] |= imask[IPL_CLOCK];
230 pci_imask[IPL_HIGH] |= pci_imask[IPL_CLOCK];
231
232 /*
233 * Now compute which IRQs must be blocked when servicing any
234 * given IRQ.
235 */
236 for (irq = 0; irq < NIRQ; irq++) {
237 int irqs;
238 int pci_irqs;
239
240 if (irq < SYS_NIRQ) {
241 irqs = (1U << irq);
242 pci_irqs = 0;
243 } else {
244 irqs = 0;
245 pci_irqs = (1U << (irq - SYS_NIRQ));
246 }
247 iq = &intrq[irq];
248 if (TAILQ_FIRST(&iq->iq_list) != NULL)
249 ixp12x0_enable_irq(irq);
250 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
251 ih = TAILQ_NEXT(ih, ih_list)) {
252 irqs |= imask[ih->ih_ipl];
253 pci_irqs |= pci_imask[ih->ih_ipl];
254 }
255 iq->iq_mask = irqs;
256 iq->iq_pci_mask = pci_irqs;
257 }
258 }
259
260 inline void
261 splx(int new)
262 {
263 int old;
264 u_int oldirqstate;
265
266 oldirqstate = disable_interrupts(I32_bit);
267 old = curcpl();
268 set_curcpl(new);
269 if (new != hardware_spl_level) {
270 hardware_spl_level = new;
271 ixp12x0_set_intrmask(imask[new], pci_imask[new]);
272 }
273 restore_interrupts(oldirqstate);
274
275 #ifdef __HAVE_FAST_SOFTINTS
276 cpu_dosoftints();
277 #endif
278 }
279
280 int
281 _splraise(int ipl)
282 {
283 int old;
284 u_int oldirqstate;
285
286 oldirqstate = disable_interrupts(I32_bit);
287 old = curcpl();
288 set_curcpl(ipl);
289 restore_interrupts(oldirqstate);
290 return (old);
291 }
292
293 int
294 _spllower(int ipl)
295 {
296 int old = curcpl();
297
298 if (old <= ipl)
299 return (old);
300 splx(ipl);
301 return (old);
302 }
303
304 /*
305 * ixp12x0_intr_init:
306 *
307 * Initialize the rest of the interrupt subsystem, making it
308 * ready to handle interrupts from devices.
309 */
310 void
311 ixp12x0_intr_init(void)
312 {
313 struct intrq *iq;
314 int i;
315
316 intr_enabled = 0;
317 pci_intr_enabled = 0;
318
319 for (i = 0; i < NIRQ; i++) {
320 iq = &intrq[i];
321 TAILQ_INIT(&iq->iq_list);
322
323 sprintf(iq->iq_name, "ipl %d", i);
324 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
325 NULL, "ixpintr", iq->iq_name);
326 }
327 curcpu()->ci_intr_depth = 0;
328 curcpu()->ci_cpl = 0;
329 hardware_spl_level = 0;
330
331 ixp12x0_intr_calculate_masks();
332
333 /* Enable IRQs (don't yet use FIQs). */
334 enable_interrupts(I32_bit);
335 }
336
337 void *
338 ixp12x0_intr_establish(int irq, int ipl, int (*ih_func)(void *), void *arg)
339 {
340 struct intrq* iq;
341 struct intrhand* ih;
342 u_int oldirqstate;
343 #ifdef DEBUG
344 printf("ixp12x0_intr_establish(irq=%d, ipl=%d, ih_func=%08x, arg=%08x)\n",
345 irq, ipl, (u_int32_t) ih_func, (u_int32_t) arg);
346 #endif
347 if (irq < 0 || irq > NIRQ)
348 panic("ixp12x0_intr_establish: IRQ %d out of range", ipl);
349 if (ipl < 0 || ipl > NIPL)
350 panic("ixp12x0_intr_establish: IPL %d out of range", ipl);
351
352 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
353 if (ih == NULL)
354 return (NULL);
355
356 ih->ih_func = ih_func;
357 ih->ih_arg = arg;
358 ih->ih_irq = irq;
359 ih->ih_ipl = ipl;
360
361 iq = &intrq[irq];
362 iq->iq_ist = IST_LEVEL;
363
364 oldirqstate = disable_interrupts(I32_bit);
365 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
366 ixp12x0_intr_calculate_masks();
367 restore_interrupts(oldirqstate);
368
369 return (ih);
370 }
371
372 void
373 ixp12x0_intr_disestablish(void *cookie)
374 {
375 struct intrhand* ih = cookie;
376 struct intrq* iq = &intrq[ih->ih_ipl];
377 u_int oldirqstate;
378
379 oldirqstate = disable_interrupts(I32_bit);
380 TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
381 ixp12x0_intr_calculate_masks();
382 restore_interrupts(oldirqstate);
383 }
384
385 void
386 ixp12x0_intr_dispatch(struct irqframe *frame)
387 {
388 struct intrq* iq;
389 struct intrhand* ih;
390 struct cpu_info* const ci = curcpu();
391 const int ppl = ci->ci_cpl;
392 u_int oldirqstate;
393 u_int32_t hwpend;
394 u_int32_t pci_hwpend;
395 int irq;
396 u_int32_t ibit;
397
398
399 hwpend = ixp12x0_irq_read();
400 pci_hwpend = ixp12x0_pci_irq_read();
401
402 hardware_spl_level = ppl;
403 ixp12x0_set_intrmask(imask[ppl] | hwpend, pci_imask[ppl] | pci_hwpend);
404
405 hwpend &= ~imask[ppl];
406 pci_hwpend &= ~pci_imask[ppl];
407
408 while (hwpend) {
409 irq = ffs(hwpend) - 1;
410 ibit = (1U << irq);
411
412 iq = &intrq[irq];
413 iq->iq_ev.ev_count++;
414 uvmexp.intrs++;
415 TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
416 ci->ci_cpl = ih->ih_ipl;
417 oldirqstate = enable_interrupts(I32_bit);
418 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
419 restore_interrupts(oldirqstate);
420 hwpend &= ~ibit;
421 }
422 }
423 while (pci_hwpend) {
424 irq = ffs(pci_hwpend) - 1;
425 ibit = (1U << irq);
426
427 iq = &intrq[irq + SYS_NIRQ];
428 iq->iq_ev.ev_count++;
429 uvmexp.intrs++;
430 TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
431 ci->ci_cpl = ih->ih_ipl;
432 oldirqstate = enable_interrupts(I32_bit);
433 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
434 restore_interrupts(oldirqstate);
435 }
436 pci_hwpend &= ~ibit;
437 }
438
439 ci->ci_cpl = ppl;
440 hardware_spl_level = ppl;
441 ixp12x0_set_intrmask(imask[ppl], pci_imask[ppl]);
442
443 #ifdef __HAVE_FAST_SOFTINTS
444 cpu_dosoftints();
445 #endif
446 }
447