ifpga_intr.c revision 1.5.28.1 1 1.5.28.1 matt /* $NetBSD: ifpga_intr.c,v 1.5.28.1 2008/01/09 01:45:46 matt Exp $ */
2 1.1 rearnsha
3 1.1 rearnsha /*
4 1.1 rearnsha * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 1.1 rearnsha * All rights reserved.
6 1.1 rearnsha *
7 1.1 rearnsha * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 1.1 rearnsha *
9 1.1 rearnsha * Redistribution and use in source and binary forms, with or without
10 1.1 rearnsha * modification, are permitted provided that the following conditions
11 1.1 rearnsha * are met:
12 1.1 rearnsha * 1. Redistributions of source code must retain the above copyright
13 1.1 rearnsha * notice, this list of conditions and the following disclaimer.
14 1.1 rearnsha * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 rearnsha * notice, this list of conditions and the following disclaimer in the
16 1.1 rearnsha * documentation and/or other materials provided with the distribution.
17 1.1 rearnsha * 3. All advertising materials mentioning features or use of this software
18 1.1 rearnsha * must display the following acknowledgement:
19 1.1 rearnsha * This product includes software developed for the NetBSD Project by
20 1.1 rearnsha * Wasabi Systems, Inc.
21 1.1 rearnsha * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 1.1 rearnsha * or promote products derived from this software without specific prior
23 1.1 rearnsha * written permission.
24 1.1 rearnsha *
25 1.1 rearnsha * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 1.1 rearnsha * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 1.1 rearnsha * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 1.1 rearnsha * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 1.1 rearnsha * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 1.1 rearnsha * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 1.1 rearnsha * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 1.1 rearnsha * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 1.1 rearnsha * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 1.1 rearnsha * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 1.1 rearnsha * POSSIBILITY OF SUCH DAMAGE.
36 1.1 rearnsha */
37 1.1 rearnsha
38 1.1 rearnsha #ifndef EVBARM_SPL_NOINLINE
39 1.1 rearnsha #define EVBARM_SPL_NOINLINE
40 1.1 rearnsha #endif
41 1.1 rearnsha
42 1.1 rearnsha /*
43 1.1 rearnsha * Interrupt support for the Integrator FPGA.
44 1.1 rearnsha */
45 1.1 rearnsha
46 1.1 rearnsha #include <sys/param.h>
47 1.1 rearnsha #include <sys/systm.h>
48 1.1 rearnsha #include <sys/malloc.h>
49 1.5.28.1 matt #include <sys/bus.h>
50 1.5.28.1 matt #include <sys/intr.h>
51 1.1 rearnsha
52 1.1 rearnsha #include <uvm/uvm_extern.h>
53 1.1 rearnsha
54 1.1 rearnsha #include <arm/cpufunc.h>
55 1.1 rearnsha
56 1.1 rearnsha #include <evbarm/ifpga/ifpgareg.h>
57 1.1 rearnsha #include <evbarm/ifpga/ifpgavar.h>
58 1.1 rearnsha
59 1.1 rearnsha /* Interrupt handler queues. */
60 1.1 rearnsha struct intrq intrq[NIRQ];
61 1.1 rearnsha
62 1.1 rearnsha /* Interrupts to mask at each level. */
63 1.1 rearnsha int ifpga_imask[NIPL];
64 1.1 rearnsha
65 1.1 rearnsha /* Interrupts pending. */
66 1.3 perry volatile int ifpga_ipending;
67 1.1 rearnsha
68 1.1 rearnsha /* Software copy of the IRQs we have enabled. */
69 1.3 perry volatile uint32_t intr_enabled;
70 1.1 rearnsha
71 1.1 rearnsha /* Mask if interrupts steered to FIQs. */
72 1.1 rearnsha uint32_t intr_steer;
73 1.1 rearnsha
74 1.5.28.1 matt #ifdef __HAVE_FAST_SOFTINTS
75 1.1 rearnsha /*
76 1.1 rearnsha * Map a software interrupt queue index (to the unused bits in the
77 1.1 rearnsha * ICU registers -- XXX will need to revisit this if those bits are
78 1.1 rearnsha * ever used in future steppings).
79 1.1 rearnsha */
80 1.5.28.1 matt static const uint32_t si_to_irqbit[] = {
81 1.5.28.1 matt [SI_SOFTCLOCK] = IFPGA_INTR_bit31,
82 1.5.28.1 matt [SI_SOFTBIO] = IFPGA_INTR_bit30,
83 1.5.28.1 matt [SI_SOFTNET] = IFPGA_INTR_bit29,
84 1.5.28.1 matt [SI_SOFTSERIAL] = IFPGA_INTR_bit28,
85 1.1 rearnsha };
86 1.1 rearnsha
87 1.1 rearnsha #define SI_TO_IRQBIT(si) (si_to_irqbit[(si)])
88 1.1 rearnsha
89 1.1 rearnsha /*
90 1.1 rearnsha * Map a software interrupt queue to an interrupt priority level.
91 1.1 rearnsha */
92 1.5.28.1 matt static const int si_to_ipl[] = {
93 1.5.28.1 matt [SI_SOFTCLOCK] = IPL_SOFTCLOCK,
94 1.5.28.1 matt [SI_SOFTBIO] = IPL_SOFTBIO,
95 1.5.28.1 matt [SI_SOFTNET] = IPL_SOFTNET,
96 1.5.28.1 matt [SI_SOFTSERIAL] = IPL_SOFTSERIAL,
97 1.1 rearnsha };
98 1.5.28.1 matt #endif
99 1.1 rearnsha
100 1.1 rearnsha /*
101 1.1 rearnsha * Interrupt bit names.
102 1.1 rearnsha */
103 1.5.28.1 matt const char * const ifpga_irqnames[] = {
104 1.1 rearnsha "soft", /* 0 */
105 1.1 rearnsha "uart 0", /* 1 */
106 1.1 rearnsha "uart 1", /* 2 */
107 1.1 rearnsha "kbd", /* 3 */
108 1.1 rearnsha "mouse", /* 4 */
109 1.1 rearnsha "tmr 0", /* 5 */
110 1.1 rearnsha "tmr 1 hard", /* 6 */
111 1.1 rearnsha "tmr 2 stat", /* 7 */
112 1.1 rearnsha "rtc", /* 8 */
113 1.1 rearnsha "exp 0", /* 9 */
114 1.1 rearnsha "exp 1", /* 10 */
115 1.1 rearnsha "exp 2", /* 11 */
116 1.1 rearnsha "exp 3", /* 12 */
117 1.1 rearnsha "pci 0", /* 13 */
118 1.1 rearnsha "pci 1", /* 14 */
119 1.1 rearnsha "pci 2", /* 15 */
120 1.1 rearnsha "pci 3", /* 16 */
121 1.1 rearnsha "V3 br", /* 17 */
122 1.1 rearnsha "deg", /* 18 */
123 1.1 rearnsha "enum", /* 19 */
124 1.1 rearnsha "pci lb", /* 20 */
125 1.1 rearnsha "autoPC", /* 21 */
126 1.1 rearnsha "irq 22", /* 22 */
127 1.1 rearnsha "irq 23", /* 23 */
128 1.1 rearnsha "irq 24", /* 24 */
129 1.1 rearnsha "irq 25", /* 25 */
130 1.1 rearnsha "irq 26", /* 26 */
131 1.1 rearnsha "irq 27", /* 27 */
132 1.1 rearnsha "irq 28", /* 28 */
133 1.1 rearnsha "irq 29", /* 29 */
134 1.1 rearnsha "irq 30", /* 30 */
135 1.1 rearnsha "irq 31", /* 31 */
136 1.1 rearnsha };
137 1.1 rearnsha
138 1.1 rearnsha void ifpga_intr_dispatch(struct clockframe *frame);
139 1.1 rearnsha
140 1.1 rearnsha extern struct ifpga_softc *ifpga_sc;
141 1.1 rearnsha
142 1.3 perry static inline uint32_t
143 1.1 rearnsha ifpga_iintsrc_read(void)
144 1.1 rearnsha {
145 1.1 rearnsha return bus_space_read_4(ifpga_sc->sc_iot, ifpga_sc->sc_irq_ioh,
146 1.1 rearnsha IFPGA_INTR_STATUS);
147 1.1 rearnsha }
148 1.1 rearnsha
149 1.3 perry static inline void
150 1.1 rearnsha ifpga_enable_irq(int irq)
151 1.1 rearnsha {
152 1.1 rearnsha
153 1.1 rearnsha intr_enabled |= (1U << irq);
154 1.1 rearnsha ifpga_set_intrmask();
155 1.1 rearnsha }
156 1.1 rearnsha
157 1.3 perry static inline void
158 1.1 rearnsha ifpga_disable_irq(int irq)
159 1.1 rearnsha {
160 1.1 rearnsha
161 1.1 rearnsha intr_enabled &= ~(1U << irq);
162 1.1 rearnsha ifpga_set_intrmask();
163 1.1 rearnsha }
164 1.1 rearnsha
165 1.1 rearnsha /*
166 1.1 rearnsha * NOTE: This routine must be called with interrupts disabled in the CPSR.
167 1.1 rearnsha */
168 1.1 rearnsha static void
169 1.1 rearnsha ifpga_intr_calculate_masks(void)
170 1.1 rearnsha {
171 1.1 rearnsha struct intrq *iq;
172 1.1 rearnsha struct intrhand *ih;
173 1.1 rearnsha int irq, ipl;
174 1.1 rearnsha
175 1.1 rearnsha /* First, figure out which IPLs each IRQ has. */
176 1.1 rearnsha for (irq = 0; irq < NIRQ; irq++) {
177 1.1 rearnsha int levels = 0;
178 1.1 rearnsha iq = &intrq[irq];
179 1.1 rearnsha ifpga_disable_irq(irq);
180 1.1 rearnsha for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
181 1.1 rearnsha ih = TAILQ_NEXT(ih, ih_list))
182 1.1 rearnsha levels |= (1U << ih->ih_ipl);
183 1.1 rearnsha iq->iq_levels = levels;
184 1.1 rearnsha }
185 1.1 rearnsha
186 1.1 rearnsha /* Next, figure out which IRQs are used by each IPL. */
187 1.1 rearnsha for (ipl = 0; ipl < NIPL; ipl++) {
188 1.1 rearnsha int irqs = 0;
189 1.1 rearnsha for (irq = 0; irq < NIRQ; irq++) {
190 1.1 rearnsha if (intrq[irq].iq_levels & (1U << ipl))
191 1.1 rearnsha irqs |= (1U << irq);
192 1.1 rearnsha }
193 1.1 rearnsha ifpga_imask[ipl] = irqs;
194 1.1 rearnsha }
195 1.1 rearnsha
196 1.5.28.1 matt KASSERT(ifpga_imask[IPL_NONE] == 0);
197 1.1 rearnsha
198 1.5.28.1 matt #ifdef __HAVE_FAST_SOFTINTS
199 1.1 rearnsha /*
200 1.1 rearnsha * Initialize the soft interrupt masks to block themselves.
201 1.1 rearnsha */
202 1.1 rearnsha ifpga_imask[IPL_SOFTCLOCK] = SI_TO_IRQBIT(SI_SOFTCLOCK);
203 1.5.28.1 matt ifpga_imask[IPL_SOFTBIO] = SI_TO_IRQBIT(SI_SOFTBIO);
204 1.1 rearnsha ifpga_imask[IPL_SOFTNET] = SI_TO_IRQBIT(SI_SOFTNET);
205 1.1 rearnsha ifpga_imask[IPL_SOFTSERIAL] = SI_TO_IRQBIT(SI_SOFTSERIAL);
206 1.5.28.1 matt #endif
207 1.1 rearnsha
208 1.1 rearnsha /*
209 1.5 wiz * Enforce a hierarchy that gives "slow" device (or devices with
210 1.1 rearnsha * limited input buffer space/"real-time" requirements) a better
211 1.1 rearnsha * chance at not dropping data.
212 1.1 rearnsha */
213 1.5.28.1 matt ifpga_imask[IPL_SOFTBIO] |= ifpga_imask[IPL_SOFTCLOCK];
214 1.5.28.1 matt ifpga_imask[IPL_SOFTNET] |= ifpga_imask[IPL_SOFTBIO];
215 1.5.28.1 matt ifpga_imask[IPL_SOFTSERIAL] |= ifpga_imask[IPL_SOFTNET];
216 1.5.28.1 matt ifpga_imask[IPL_VM] |= ifpga_imask[IPL_SOFTSERIAL];
217 1.5.28.1 matt ifpga_imask[IPL_SCHED] |= ifpga_imask[IPL_VM];
218 1.5.28.1 matt ifpga_imask[IPL_HIGH] |= ifpga_imask[IPL_SCHED];
219 1.1 rearnsha
220 1.1 rearnsha /*
221 1.1 rearnsha * Now compute which IRQs must be blocked when servicing any
222 1.1 rearnsha * given IRQ.
223 1.1 rearnsha */
224 1.1 rearnsha for (irq = 0; irq < NIRQ; irq++) {
225 1.1 rearnsha int irqs = (1U << irq);
226 1.1 rearnsha iq = &intrq[irq];
227 1.1 rearnsha if (TAILQ_FIRST(&iq->iq_list) != NULL)
228 1.1 rearnsha ifpga_enable_irq(irq);
229 1.1 rearnsha for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
230 1.1 rearnsha ih = TAILQ_NEXT(ih, ih_list))
231 1.1 rearnsha irqs |= ifpga_imask[ih->ih_ipl];
232 1.1 rearnsha iq->iq_mask = irqs;
233 1.1 rearnsha }
234 1.1 rearnsha }
235 1.1 rearnsha
236 1.5.28.1 matt #ifdef __HAVE_FAST_SOFTINTS
237 1.4 mrg void
238 1.1 rearnsha ifpga_do_pending(void)
239 1.1 rearnsha {
240 1.1 rearnsha static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
241 1.5.28.1 matt struct cpu_info * const ci = curcpu();
242 1.1 rearnsha int new, oldirqstate;
243 1.1 rearnsha
244 1.1 rearnsha if (__cpu_simple_lock_try(&processing) == 0)
245 1.1 rearnsha return;
246 1.1 rearnsha
247 1.5.28.1 matt new = ci->ci_cpu;
248 1.1 rearnsha
249 1.1 rearnsha oldirqstate = disable_interrupts(I32_bit);
250 1.1 rearnsha
251 1.1 rearnsha #define DO_SOFTINT(si) \
252 1.1 rearnsha if ((ifpga_ipending & ~new) & SI_TO_IRQBIT(si)) { \
253 1.1 rearnsha ifpga_ipending &= ~SI_TO_IRQBIT(si); \
254 1.5.28.1 matt ci->ci_cpu |= ifpga_imask[si_to_ipl[(si)]]; \
255 1.1 rearnsha restore_interrupts(oldirqstate); \
256 1.1 rearnsha softintr_dispatch(si); \
257 1.1 rearnsha oldirqstate = disable_interrupts(I32_bit); \
258 1.5.28.1 matt ci->ci_cpu = new; \
259 1.1 rearnsha }
260 1.1 rearnsha
261 1.1 rearnsha DO_SOFTINT(SI_SOFTSERIAL);
262 1.1 rearnsha DO_SOFTINT(SI_SOFTNET);
263 1.5.28.1 matt DO_SOFTINT(SI_SOFTBIO);
264 1.1 rearnsha DO_SOFTINT(SI_SOFTCLOCK);
265 1.1 rearnsha
266 1.1 rearnsha __cpu_simple_unlock(&processing);
267 1.1 rearnsha
268 1.1 rearnsha restore_interrupts(oldirqstate);
269 1.1 rearnsha }
270 1.5.28.1 matt #endif
271 1.1 rearnsha
272 1.1 rearnsha void
273 1.1 rearnsha splx(int new)
274 1.1 rearnsha {
275 1.1 rearnsha
276 1.1 rearnsha ifpga_splx(new);
277 1.1 rearnsha }
278 1.1 rearnsha
279 1.1 rearnsha int
280 1.1 rearnsha _spllower(int ipl)
281 1.1 rearnsha {
282 1.1 rearnsha
283 1.1 rearnsha return (ifpga_spllower(ipl));
284 1.1 rearnsha }
285 1.1 rearnsha
286 1.1 rearnsha int
287 1.1 rearnsha _splraise(int ipl)
288 1.1 rearnsha {
289 1.1 rearnsha
290 1.1 rearnsha return (ifpga_splraise(ipl));
291 1.1 rearnsha }
292 1.1 rearnsha
293 1.5.28.1 matt #ifdef __HAVE_FAST_SOFTINTS
294 1.1 rearnsha void
295 1.1 rearnsha _setsoftintr(int si)
296 1.1 rearnsha {
297 1.1 rearnsha int oldirqstate;
298 1.1 rearnsha
299 1.1 rearnsha oldirqstate = disable_interrupts(I32_bit);
300 1.1 rearnsha ifpga_ipending |= SI_TO_IRQBIT(si);
301 1.1 rearnsha restore_interrupts(oldirqstate);
302 1.1 rearnsha
303 1.1 rearnsha /* Process unmasked pending soft interrupts. */
304 1.5.28.1 matt if ((ifpga_ipending & INT_SWMASK) & ~curcpl())
305 1.1 rearnsha ifpga_do_pending();
306 1.1 rearnsha }
307 1.5.28.1 matt #endif
308 1.1 rearnsha
309 1.1 rearnsha /*
310 1.1 rearnsha * ifpga_intr_init:
311 1.1 rearnsha *
312 1.1 rearnsha * Initialize the rest of the interrupt subsystem, making it
313 1.1 rearnsha * ready to handle interrupts from devices.
314 1.1 rearnsha */
315 1.1 rearnsha void
316 1.1 rearnsha ifpga_intr_init(void)
317 1.1 rearnsha {
318 1.1 rearnsha struct intrq *iq;
319 1.1 rearnsha int i;
320 1.1 rearnsha
321 1.1 rearnsha intr_enabled = 0;
322 1.1 rearnsha
323 1.1 rearnsha for (i = 0; i < NIRQ; i++) {
324 1.1 rearnsha iq = &intrq[i];
325 1.1 rearnsha TAILQ_INIT(&iq->iq_list);
326 1.1 rearnsha
327 1.1 rearnsha evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
328 1.1 rearnsha NULL, "ifpga", ifpga_irqnames[i]);
329 1.1 rearnsha }
330 1.1 rearnsha }
331 1.1 rearnsha
332 1.1 rearnsha void
333 1.1 rearnsha ifpga_intr_postinit(void)
334 1.1 rearnsha {
335 1.1 rearnsha ifpga_intr_calculate_masks();
336 1.1 rearnsha
337 1.1 rearnsha /* Enable IRQs (don't yet use FIQs). */
338 1.1 rearnsha enable_interrupts(I32_bit);
339 1.1 rearnsha }
340 1.1 rearnsha
341 1.1 rearnsha void *
342 1.1 rearnsha ifpga_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
343 1.1 rearnsha {
344 1.1 rearnsha struct intrq *iq;
345 1.1 rearnsha struct intrhand *ih;
346 1.1 rearnsha u_int oldirqstate;
347 1.1 rearnsha
348 1.1 rearnsha if (irq < 0 || irq > NIRQ)
349 1.1 rearnsha panic("ifpga_intr_establish: IRQ %d out of range", irq);
350 1.1 rearnsha
351 1.1 rearnsha ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
352 1.1 rearnsha if (ih == NULL)
353 1.1 rearnsha return (NULL);
354 1.1 rearnsha
355 1.1 rearnsha ih->ih_func = func;
356 1.1 rearnsha ih->ih_arg = arg;
357 1.1 rearnsha ih->ih_ipl = ipl;
358 1.1 rearnsha ih->ih_irq = irq;
359 1.1 rearnsha
360 1.1 rearnsha iq = &intrq[irq];
361 1.1 rearnsha
362 1.1 rearnsha /* All IOP321 interrupts are level-triggered. */
363 1.1 rearnsha iq->iq_ist = IST_LEVEL;
364 1.1 rearnsha
365 1.1 rearnsha oldirqstate = disable_interrupts(I32_bit);
366 1.1 rearnsha
367 1.1 rearnsha TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
368 1.1 rearnsha
369 1.1 rearnsha ifpga_intr_calculate_masks();
370 1.1 rearnsha
371 1.1 rearnsha restore_interrupts(oldirqstate);
372 1.1 rearnsha
373 1.1 rearnsha return (ih);
374 1.1 rearnsha }
375 1.1 rearnsha
376 1.1 rearnsha void
377 1.1 rearnsha ifpga_intr_disestablish(void *cookie)
378 1.1 rearnsha {
379 1.1 rearnsha struct intrhand *ih = cookie;
380 1.1 rearnsha struct intrq *iq = &intrq[ih->ih_irq];
381 1.1 rearnsha int oldirqstate;
382 1.1 rearnsha
383 1.1 rearnsha oldirqstate = disable_interrupts(I32_bit);
384 1.1 rearnsha
385 1.1 rearnsha TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
386 1.1 rearnsha
387 1.1 rearnsha ifpga_intr_calculate_masks();
388 1.1 rearnsha
389 1.1 rearnsha restore_interrupts(oldirqstate);
390 1.1 rearnsha }
391 1.1 rearnsha
392 1.1 rearnsha void
393 1.1 rearnsha ifpga_intr_dispatch(struct clockframe *frame)
394 1.1 rearnsha {
395 1.1 rearnsha struct intrq *iq;
396 1.1 rearnsha struct intrhand *ih;
397 1.1 rearnsha int oldirqstate, pcpl, irq, ibit, hwpend;
398 1.5.28.1 matt struct cpu_info * const ci = curcpu();
399 1.1 rearnsha
400 1.5.28.1 matt pcpl = ci->ci_cpl;
401 1.1 rearnsha
402 1.1 rearnsha hwpend = ifpga_iintsrc_read();
403 1.1 rearnsha
404 1.1 rearnsha /*
405 1.1 rearnsha * Disable all the interrupts that are pending. We will
406 1.1 rearnsha * reenable them once they are processed and not masked.
407 1.1 rearnsha */
408 1.1 rearnsha intr_enabled &= ~hwpend;
409 1.1 rearnsha ifpga_set_intrmask();
410 1.1 rearnsha
411 1.1 rearnsha /* Wait for these interrupts to be suppressed. */
412 1.1 rearnsha while ((ifpga_iintsrc_read() & hwpend) != 0)
413 1.1 rearnsha ;
414 1.1 rearnsha
415 1.1 rearnsha while (hwpend != 0) {
416 1.1 rearnsha irq = ffs(hwpend) - 1;
417 1.1 rearnsha ibit = (1U << irq);
418 1.1 rearnsha
419 1.1 rearnsha hwpend &= ~ibit;
420 1.1 rearnsha
421 1.1 rearnsha if (pcpl & ibit) {
422 1.1 rearnsha /*
423 1.1 rearnsha * IRQ is masked; mark it as pending and check
424 1.1 rearnsha * the next one. Note: the IRQ is already disabled.
425 1.1 rearnsha */
426 1.1 rearnsha ifpga_ipending |= ibit;
427 1.1 rearnsha continue;
428 1.1 rearnsha }
429 1.1 rearnsha
430 1.1 rearnsha ifpga_ipending &= ~ibit;
431 1.1 rearnsha
432 1.1 rearnsha iq = &intrq[irq];
433 1.1 rearnsha iq->iq_ev.ev_count++;
434 1.1 rearnsha uvmexp.intrs++;
435 1.5.28.1 matt ci->ci_cpl |= iq->iq_mask;
436 1.1 rearnsha oldirqstate = enable_interrupts(I32_bit);
437 1.1 rearnsha for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
438 1.1 rearnsha ih = TAILQ_NEXT(ih, ih_list)) {
439 1.1 rearnsha (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
440 1.1 rearnsha }
441 1.1 rearnsha restore_interrupts(oldirqstate);
442 1.5.28.1 matt ci->ci_cpl = pcpl;
443 1.1 rearnsha
444 1.1 rearnsha hwpend |= (ifpga_ipending & IFPGA_INTR_HWMASK) & ~pcpl;
445 1.1 rearnsha
446 1.1 rearnsha /* Re-enable this interrupt now that's it's cleared. */
447 1.1 rearnsha intr_enabled |= ibit;
448 1.1 rearnsha ifpga_set_intrmask();
449 1.1 rearnsha }
450 1.1 rearnsha
451 1.5.28.1 matt #ifdef __HAVE_FAST_SOFTINTS
452 1.1 rearnsha /* Check for pendings soft intrs. */
453 1.5.28.1 matt if ((ifpga_ipending & INT_SWMASK) & ~ci->ci_cpl) {
454 1.1 rearnsha oldirqstate = enable_interrupts(I32_bit);
455 1.1 rearnsha ifpga_do_pending();
456 1.1 rearnsha restore_interrupts(oldirqstate);
457 1.1 rearnsha }
458 1.5.28.1 matt #endif
459 1.1 rearnsha }
460