ixp12x0_intr.c revision 1.25 1 /* $NetBSD: ixp12x0_intr.c,v 1.25 2013/08/18 15:58:19 matt Exp $ */
2
3 /*
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Ichiro FUKUHARA and Naoto Shimazaki.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: ixp12x0_intr.c,v 1.25 2013/08/18 15:58:19 matt Exp $");
34
35 /*
36 * Interrupt support for the Intel ixp12x0
37 */
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/simplelock.h>
43 #include <sys/termios.h>
44 #include <sys/bus.h>
45 #include <sys/intr.h>
46
47 #include <arm/locore.h>
48
49 #include <arm/ixp12x0/ixp12x0reg.h>
50 #include <arm/ixp12x0/ixp12x0var.h>
51 #include <arm/ixp12x0/ixp12x0_comreg.h>
52 #include <arm/ixp12x0/ixp12x0_comvar.h>
53 #include <arm/ixp12x0/ixp12x0_pcireg.h>
54
55
56 extern uint32_t ixpcom_cr; /* current cr from *_com.c */
57 extern uint32_t ixpcom_imask; /* tell mask to *_com.c */
58
59 /* Interrupt handler queues. */
60 struct intrq intrq[NIRQ];
61
62 /* Interrupts to mask at each level. */
63 static uint32_t imask[NIPL];
64 static uint32_t pci_imask[NIPL];
65
66 /* Current interrupt priority level. */
67 volatile int hardware_spl_level;
68
69 /* Software copy of the IRQs we have enabled. */
70 volatile uint32_t intr_enabled;
71 volatile uint32_t pci_intr_enabled;
72
73 /* Interrupts pending. */
74 static volatile int ipending;
75
76 void ixp12x0_intr_dispatch(struct trapframe *);
77
78 #define IXPREG(reg) *((volatile uint32_t*) (reg))
79
80 static inline uint32_t
81 ixp12x0_irq_read(void)
82 {
83 return IXPREG(IXP12X0_IRQ_VBASE) & IXP12X0_INTR_MASK;
84 }
85
86 static inline uint32_t
87 ixp12x0_pci_irq_read(void)
88 {
89 return IXPREG(IXPPCI_IRQ_STATUS);
90 }
91
92 static void
93 ixp12x0_enable_uart_irq(void)
94 {
95 ixpcom_imask = 0;
96 if (ixpcom_sc)
97 bus_space_write_4(ixpcom_sc->sc_iot, ixpcom_sc->sc_ioh,
98 IXPCOM_CR, ixpcom_cr & ~ixpcom_imask);
99 }
100
101 static void
102 ixp12x0_disable_uart_irq(void)
103 {
104 ixpcom_imask = CR_RIE | CR_XIE;
105 if (ixpcom_sc)
106 bus_space_write_4(ixpcom_sc->sc_iot, ixpcom_sc->sc_ioh,
107 IXPCOM_CR, ixpcom_cr & ~ixpcom_imask);
108 }
109
110 static void
111 ixp12x0_set_intrmask(uint32_t irqs, uint32_t pci_irqs)
112 {
113 if (irqs & (1U << IXP12X0_INTR_UART)) {
114 ixp12x0_disable_uart_irq();
115 } else {
116 ixp12x0_enable_uart_irq();
117 }
118 IXPREG(IXPPCI_IRQ_ENABLE_CLEAR) = pci_irqs;
119 IXPREG(IXPPCI_IRQ_ENABLE_SET) = pci_intr_enabled & ~pci_irqs;
120 }
121
122 static void
123 ixp12x0_enable_irq(int irq)
124 {
125 if (irq < SYS_NIRQ) {
126 intr_enabled |= (1U << irq);
127 switch (irq) {
128 case IXP12X0_INTR_UART:
129 ixp12x0_enable_uart_irq();
130 break;
131
132 case IXP12X0_INTR_PCI:
133 /* nothing to do */
134 break;
135 default:
136 panic("enable_irq:bad IRQ %d", irq);
137 }
138 } else {
139 pci_intr_enabled |= (1U << (irq - SYS_NIRQ));
140 IXPREG(IXPPCI_IRQ_ENABLE_SET) = (1U << (irq - SYS_NIRQ));
141 }
142 }
143
144 static inline void
145 ixp12x0_disable_irq(int irq)
146 {
147 if (irq < SYS_NIRQ) {
148 intr_enabled ^= ~(1U << irq);
149 switch (irq) {
150 case IXP12X0_INTR_UART:
151 ixp12x0_disable_uart_irq();
152 break;
153
154 case IXP12X0_INTR_PCI:
155 /* nothing to do */
156 break;
157 default:
158 /* nothing to do */
159 break;
160 }
161 } else {
162 pci_intr_enabled &= ~(1U << (irq - SYS_NIRQ));
163 IXPREG(IXPPCI_IRQ_ENABLE_CLEAR) = (1U << (irq - SYS_NIRQ));
164 }
165 }
166
167 /*
168 * NOTE: This routine must be called with interrupts disabled in the CPSR.
169 */
170 static void
171 ixp12x0_intr_calculate_masks(void)
172 {
173 struct intrq *iq;
174 struct intrhand *ih;
175 int irq, ipl;
176
177 /* First, figure out which IPLs each IRQ has. */
178 for (irq = 0; irq < NIRQ; irq++) {
179 int levels = 0;
180 iq = &intrq[irq];
181 ixp12x0_disable_irq(irq);
182 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
183 ih = TAILQ_NEXT(ih, ih_list))
184 levels |= (1U << ih->ih_ipl);
185 iq->iq_levels = levels;
186 }
187
188 /* Next, figure out which IRQs are used by each IPL. */
189 for (ipl = 0; ipl < NIPL; ipl++) {
190 int irqs = 0;
191 int pci_irqs = 0;
192 for (irq = 0; irq < SYS_NIRQ; irq++) {
193 if (intrq[irq].iq_levels & (1U << ipl))
194 irqs |= (1U << irq);
195 }
196 imask[ipl] = irqs;
197 for (irq = 0; irq < SYS_NIRQ; irq++) {
198 if (intrq[irq + SYS_NIRQ].iq_levels & (1U << ipl))
199 pci_irqs |= (1U << irq);
200 }
201 pci_imask[ipl] = pci_irqs;
202 }
203
204 KASSERT(imask[IPL_NONE] == 0);
205 KASSERT(pci_imask[IPL_NONE] == 0);
206 KASSERT(imask[IPL_SOFTCLOCK] == 0);
207 KASSERT(pci_imask[IPL_SOFTCLOCK] == 0);
208 KASSERT(imask[IPL_SOFTBIO] == 0);
209 KASSERT(pci_imask[IPL_SOFTBIO] == 0);
210 KASSERT(imask[IPL_SOFTNET] == 0);
211 KASSERT(pci_imask[IPL_SOFTNET] == 0);
212 KASSERT(imask[IPL_SOFTSERIAL] == 0);
213 KASSERT(pci_imask[IPL_SOFTSERIAL] == 0);
214
215 KASSERT(imask[IPL_VM] != 0);
216 KASSERT(pci_imask[IPL_VM] != 0);
217
218 /*
219 * splsched() must block anything that uses the scheduler.
220 */
221 imask[IPL_SCHED] |= imask[IPL_VM];
222 pci_imask[IPL_SCHED] |= pci_imask[IPL_VM];
223
224 /*
225 * splhigh() must block "everything".
226 */
227 imask[IPL_HIGH] |= imask[IPL_SCHED];
228 pci_imask[IPL_HIGH] |= pci_imask[IPL_SCHED];
229
230 /*
231 * Now compute which IRQs must be blocked when servicing any
232 * given IRQ.
233 */
234 for (irq = 0; irq < NIRQ; irq++) {
235 int irqs;
236 int pci_irqs;
237
238 if (irq < SYS_NIRQ) {
239 irqs = (1U << irq);
240 pci_irqs = 0;
241 } else {
242 irqs = 0;
243 pci_irqs = (1U << (irq - SYS_NIRQ));
244 }
245 iq = &intrq[irq];
246 if (TAILQ_FIRST(&iq->iq_list) != NULL)
247 ixp12x0_enable_irq(irq);
248 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
249 ih = TAILQ_NEXT(ih, ih_list)) {
250 irqs |= imask[ih->ih_ipl];
251 pci_irqs |= pci_imask[ih->ih_ipl];
252 }
253 iq->iq_mask = irqs;
254 iq->iq_pci_mask = pci_irqs;
255 }
256 }
257
258 inline void
259 splx(int new)
260 {
261 int old;
262 u_int oldirqstate;
263
264 oldirqstate = disable_interrupts(I32_bit);
265 old = curcpl();
266 set_curcpl(new);
267 if (new != hardware_spl_level) {
268 hardware_spl_level = new;
269 ixp12x0_set_intrmask(imask[new], pci_imask[new]);
270 }
271 restore_interrupts(oldirqstate);
272
273 #ifdef __HAVE_FAST_SOFTINTS
274 cpu_dosoftints();
275 #endif
276 }
277
278 int
279 _splraise(int ipl)
280 {
281 int old;
282 u_int oldirqstate;
283
284 oldirqstate = disable_interrupts(I32_bit);
285 old = curcpl();
286 set_curcpl(ipl);
287 restore_interrupts(oldirqstate);
288 return (old);
289 }
290
291 int
292 _spllower(int ipl)
293 {
294 int old = curcpl();
295
296 if (old <= ipl)
297 return (old);
298 splx(ipl);
299 return (old);
300 }
301
302 /*
303 * ixp12x0_intr_init:
304 *
305 * Initialize the rest of the interrupt subsystem, making it
306 * ready to handle interrupts from devices.
307 */
308 void
309 ixp12x0_intr_init(void)
310 {
311 struct intrq *iq;
312 int i;
313
314 intr_enabled = 0;
315 pci_intr_enabled = 0;
316
317 for (i = 0; i < NIRQ; i++) {
318 iq = &intrq[i];
319 TAILQ_INIT(&iq->iq_list);
320
321 sprintf(iq->iq_name, "ipl %d", i);
322 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
323 NULL, "ixpintr", iq->iq_name);
324 }
325 curcpu()->ci_intr_depth = 0;
326 curcpu()->ci_cpl = 0;
327 hardware_spl_level = 0;
328
329 ixp12x0_intr_calculate_masks();
330
331 /* Enable IRQs (don't yet use FIQs). */
332 enable_interrupts(I32_bit);
333 }
334
335 void *
336 ixp12x0_intr_establish(int irq, int ipl, int (*ih_func)(void *), void *arg)
337 {
338 struct intrq* iq;
339 struct intrhand* ih;
340 u_int oldirqstate;
341 #ifdef DEBUG
342 printf("ixp12x0_intr_establish(irq=%d, ipl=%d, ih_func=%08x, arg=%08x)\n",
343 irq, ipl, (uint32_t) ih_func, (uint32_t) arg);
344 #endif
345 if (irq < 0 || irq > NIRQ)
346 panic("ixp12x0_intr_establish: IRQ %d out of range", ipl);
347 if (ipl < 0 || ipl > NIPL)
348 panic("ixp12x0_intr_establish: IPL %d out of range", ipl);
349
350 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
351 if (ih == NULL)
352 return (NULL);
353
354 ih->ih_func = ih_func;
355 ih->ih_arg = arg;
356 ih->ih_irq = irq;
357 ih->ih_ipl = ipl;
358
359 iq = &intrq[irq];
360 iq->iq_ist = IST_LEVEL;
361
362 oldirqstate = disable_interrupts(I32_bit);
363 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
364 ixp12x0_intr_calculate_masks();
365 restore_interrupts(oldirqstate);
366
367 return (ih);
368 }
369
370 void
371 ixp12x0_intr_disestablish(void *cookie)
372 {
373 struct intrhand* ih = cookie;
374 struct intrq* iq = &intrq[ih->ih_ipl];
375 u_int oldirqstate;
376
377 oldirqstate = disable_interrupts(I32_bit);
378 TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
379 ixp12x0_intr_calculate_masks();
380 restore_interrupts(oldirqstate);
381 }
382
383 void
384 ixp12x0_intr_dispatch(struct trapframe *frame)
385 {
386 struct intrq* iq;
387 struct intrhand* ih;
388 struct cpu_info* const ci = curcpu();
389 const int ppl = ci->ci_cpl;
390 u_int oldirqstate;
391 uint32_t hwpend;
392 uint32_t pci_hwpend;
393 int irq;
394 uint32_t ibit;
395
396
397 hwpend = ixp12x0_irq_read();
398 pci_hwpend = ixp12x0_pci_irq_read();
399
400 hardware_spl_level = ppl;
401 ixp12x0_set_intrmask(imask[ppl] | hwpend, pci_imask[ppl] | pci_hwpend);
402
403 hwpend &= ~imask[ppl];
404 pci_hwpend &= ~pci_imask[ppl];
405
406 while (hwpend) {
407 irq = ffs(hwpend) - 1;
408 ibit = (1U << irq);
409
410 iq = &intrq[irq];
411 iq->iq_ev.ev_count++;
412 ci->ci_data.cpu_nintr++;
413 TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
414 ci->ci_cpl = ih->ih_ipl;
415 oldirqstate = enable_interrupts(I32_bit);
416 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
417 restore_interrupts(oldirqstate);
418 hwpend &= ~ibit;
419 }
420 }
421 while (pci_hwpend) {
422 irq = ffs(pci_hwpend) - 1;
423 ibit = (1U << irq);
424
425 iq = &intrq[irq + SYS_NIRQ];
426 iq->iq_ev.ev_count++;
427 ci->ci_data.cpu_nintr++;
428 TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
429 ci->ci_cpl = ih->ih_ipl;
430 oldirqstate = enable_interrupts(I32_bit);
431 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
432 restore_interrupts(oldirqstate);
433 }
434 pci_hwpend &= ~ibit;
435 }
436
437 ci->ci_cpl = ppl;
438 hardware_spl_level = ppl;
439 ixp12x0_set_intrmask(imask[ppl], pci_imask[ppl]);
440
441 #ifdef __HAVE_FAST_SOFTINTS
442 cpu_dosoftints();
443 #endif
444 }
445