i80321_icu.c revision 1.23 1 1.23 matt /* $NetBSD: i80321_icu.c,v 1.23 2012/02/12 16:31:01 matt Exp $ */
2 1.1 thorpej
3 1.1 thorpej /*
4 1.13 scw * Copyright (c) 2001, 2002, 2006 Wasabi Systems, Inc.
5 1.1 thorpej * All rights reserved.
6 1.1 thorpej *
7 1.13 scw * Written by Jason R. Thorpe and Steve C. Woodford for Wasabi Systems, Inc.
8 1.1 thorpej *
9 1.1 thorpej * Redistribution and use in source and binary forms, with or without
10 1.1 thorpej * modification, are permitted provided that the following conditions
11 1.1 thorpej * are met:
12 1.1 thorpej * 1. Redistributions of source code must retain the above copyright
13 1.1 thorpej * notice, this list of conditions and the following disclaimer.
14 1.1 thorpej * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 thorpej * notice, this list of conditions and the following disclaimer in the
16 1.1 thorpej * documentation and/or other materials provided with the distribution.
17 1.1 thorpej * 3. All advertising materials mentioning features or use of this software
18 1.1 thorpej * must display the following acknowledgement:
19 1.1 thorpej * This product includes software developed for the NetBSD Project by
20 1.1 thorpej * Wasabi Systems, Inc.
21 1.1 thorpej * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 1.1 thorpej * or promote products derived from this software without specific prior
23 1.1 thorpej * written permission.
24 1.1 thorpej *
25 1.1 thorpej * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 1.1 thorpej * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 1.1 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 1.1 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 1.1 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 1.1 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 1.1 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 1.1 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 1.1 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 1.1 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 1.1 thorpej * POSSIBILITY OF SUCH DAMAGE.
36 1.1 thorpej */
37 1.8 lukem
38 1.8 lukem #include <sys/cdefs.h>
39 1.23 matt __KERNEL_RCSID(0, "$NetBSD: i80321_icu.c,v 1.23 2012/02/12 16:31:01 matt Exp $");
40 1.1 thorpej
41 1.6 thorpej #ifndef EVBARM_SPL_NOINLINE
42 1.6 thorpej #define EVBARM_SPL_NOINLINE
43 1.6 thorpej #endif
44 1.6 thorpej
45 1.1 thorpej /*
46 1.1 thorpej * Interrupt support for the Intel i80321 I/O Processor.
47 1.1 thorpej */
48 1.1 thorpej
49 1.1 thorpej #include <sys/param.h>
50 1.1 thorpej #include <sys/systm.h>
51 1.1 thorpej #include <sys/malloc.h>
52 1.1 thorpej
53 1.1 thorpej #include <uvm/uvm_extern.h>
54 1.1 thorpej
55 1.22 dyoung #include <sys/bus.h>
56 1.1 thorpej #include <machine/intr.h>
57 1.1 thorpej
58 1.1 thorpej #include <arm/cpufunc.h>
59 1.1 thorpej
60 1.1 thorpej #include <arm/xscale/i80321reg.h>
61 1.1 thorpej #include <arm/xscale/i80321var.h>
62 1.1 thorpej
63 1.1 thorpej /* Interrupt handler queues. */
64 1.1 thorpej struct intrq intrq[NIRQ];
65 1.1 thorpej
66 1.1 thorpej /* Interrupts to mask at each level. */
67 1.5 briggs int i80321_imask[NIPL];
68 1.1 thorpej
69 1.1 thorpej /* Interrupts pending. */
70 1.11 perry volatile int i80321_ipending;
71 1.1 thorpej
72 1.1 thorpej /* Software copy of the IRQs we have enabled. */
73 1.11 perry volatile uint32_t intr_enabled;
74 1.1 thorpej
75 1.1 thorpej /* Mask if interrupts steered to FIQs. */
76 1.1 thorpej uint32_t intr_steer;
77 1.1 thorpej
78 1.1 thorpej /*
79 1.3 thorpej * Interrupt bit names.
80 1.3 thorpej */
81 1.18 matt const char * const i80321_irqnames[] = {
82 1.3 thorpej "DMA0 EOT",
83 1.3 thorpej "DMA0 EOC",
84 1.3 thorpej "DMA1 EOT",
85 1.3 thorpej "DMA1 EOC",
86 1.3 thorpej "irq 4",
87 1.3 thorpej "irq 5",
88 1.3 thorpej "AAU EOT",
89 1.3 thorpej "AAU EOC",
90 1.3 thorpej "core PMU",
91 1.3 thorpej "TMR0 (hardclock)",
92 1.3 thorpej "TMR1",
93 1.3 thorpej "I2C0",
94 1.3 thorpej "I2C1",
95 1.3 thorpej "MU",
96 1.3 thorpej "BIST",
97 1.3 thorpej "periph PMU",
98 1.3 thorpej "XScale PMU",
99 1.3 thorpej "BIU error",
100 1.3 thorpej "ATU error",
101 1.3 thorpej "MCU error",
102 1.3 thorpej "DMA0 error",
103 1.3 thorpej "DMA1 error",
104 1.3 thorpej "irq 22",
105 1.3 thorpej "AAU error",
106 1.3 thorpej "MU error",
107 1.3 thorpej "SSP",
108 1.3 thorpej "irq 26",
109 1.3 thorpej "irq 27",
110 1.3 thorpej "irq 28",
111 1.3 thorpej "irq 29",
112 1.3 thorpej "irq 30",
113 1.3 thorpej "irq 31",
114 1.3 thorpej };
115 1.3 thorpej
116 1.1 thorpej void i80321_intr_dispatch(struct clockframe *frame);
117 1.1 thorpej
118 1.11 perry static inline uint32_t
119 1.1 thorpej i80321_iintsrc_read(void)
120 1.1 thorpej {
121 1.1 thorpej uint32_t iintsrc;
122 1.1 thorpej
123 1.11 perry __asm volatile("mrc p6, 0, %0, c8, c0, 0"
124 1.1 thorpej : "=r" (iintsrc));
125 1.1 thorpej
126 1.1 thorpej /*
127 1.1 thorpej * The IINTSRC register shows bits that are active even
128 1.1 thorpej * if they are masked in INTCTL, so we have to mask them
129 1.1 thorpej * off with the interrupts we consider enabled.
130 1.1 thorpej */
131 1.1 thorpej return (iintsrc & intr_enabled);
132 1.1 thorpej }
133 1.1 thorpej
134 1.11 perry static inline void
135 1.1 thorpej i80321_set_intrsteer(void)
136 1.1 thorpej {
137 1.1 thorpej
138 1.11 perry __asm volatile("mcr p6, 0, %0, c4, c0, 0"
139 1.1 thorpej :
140 1.1 thorpej : "r" (intr_steer & ICU_INT_HWMASK));
141 1.1 thorpej }
142 1.1 thorpej
143 1.11 perry static inline void
144 1.1 thorpej i80321_enable_irq(int irq)
145 1.1 thorpej {
146 1.1 thorpej
147 1.1 thorpej intr_enabled |= (1U << irq);
148 1.1 thorpej i80321_set_intrmask();
149 1.1 thorpej }
150 1.1 thorpej
151 1.11 perry static inline void
152 1.1 thorpej i80321_disable_irq(int irq)
153 1.1 thorpej {
154 1.1 thorpej
155 1.1 thorpej intr_enabled &= ~(1U << irq);
156 1.1 thorpej i80321_set_intrmask();
157 1.1 thorpej }
158 1.1 thorpej
159 1.1 thorpej /*
160 1.1 thorpej * NOTE: This routine must be called with interrupts disabled in the CPSR.
161 1.1 thorpej */
162 1.1 thorpej static void
163 1.1 thorpej i80321_intr_calculate_masks(void)
164 1.1 thorpej {
165 1.1 thorpej struct intrq *iq;
166 1.1 thorpej struct intrhand *ih;
167 1.1 thorpej int irq, ipl;
168 1.1 thorpej
169 1.1 thorpej /* First, figure out which IPLs each IRQ has. */
170 1.1 thorpej for (irq = 0; irq < NIRQ; irq++) {
171 1.1 thorpej int levels = 0;
172 1.1 thorpej iq = &intrq[irq];
173 1.1 thorpej i80321_disable_irq(irq);
174 1.1 thorpej for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
175 1.1 thorpej ih = TAILQ_NEXT(ih, ih_list))
176 1.1 thorpej levels |= (1U << ih->ih_ipl);
177 1.1 thorpej iq->iq_levels = levels;
178 1.1 thorpej }
179 1.1 thorpej
180 1.1 thorpej /* Next, figure out which IRQs are used by each IPL. */
181 1.1 thorpej for (ipl = 0; ipl < NIPL; ipl++) {
182 1.1 thorpej int irqs = 0;
183 1.1 thorpej for (irq = 0; irq < NIRQ; irq++) {
184 1.1 thorpej if (intrq[irq].iq_levels & (1U << ipl))
185 1.1 thorpej irqs |= (1U << irq);
186 1.1 thorpej }
187 1.5 briggs i80321_imask[ipl] = irqs;
188 1.1 thorpej }
189 1.1 thorpej
190 1.20 tsutsui KASSERT(i80321_imask[IPL_NONE] == 0);
191 1.20 tsutsui KASSERT(i80321_imask[IPL_SOFTCLOCK] == 0);
192 1.20 tsutsui KASSERT(i80321_imask[IPL_SOFTBIO] == 0);
193 1.20 tsutsui KASSERT(i80321_imask[IPL_SOFTNET] == 0);
194 1.20 tsutsui KASSERT(i80321_imask[IPL_SOFTSERIAL] == 0);
195 1.1 thorpej
196 1.1 thorpej /*
197 1.15 ad * Enforce a hierarchy that gives "slow" device (or devices with
198 1.15 ad * limited input buffer space/"real-time" requirements) a better
199 1.15 ad * chance at not dropping data.
200 1.1 thorpej */
201 1.1 thorpej
202 1.19 briggs #if 0
203 1.19 briggs /*
204 1.19 briggs * This assert might be useful, but only after some interrupts
205 1.19 briggs * are configured. As it stands now, it will always fire early
206 1.19 briggs * in the initialization phase. If it's useful enough to re-
207 1.19 briggs * enable, it should be conditionalized on something else like
208 1.19 briggs * having at least something in the levels/irqs above.
209 1.19 briggs */
210 1.18 matt KASSERT(i80321_imask[IPL_VM] != 0);
211 1.19 briggs #endif
212 1.18 matt i80321_imask[IPL_SCHED] |= i80321_imask[IPL_VM];
213 1.15 ad i80321_imask[IPL_HIGH] |= i80321_imask[IPL_SCHED];
214 1.1 thorpej
215 1.1 thorpej /*
216 1.1 thorpej * Now compute which IRQs must be blocked when servicing any
217 1.1 thorpej * given IRQ.
218 1.1 thorpej */
219 1.1 thorpej for (irq = 0; irq < NIRQ; irq++) {
220 1.1 thorpej int irqs = (1U << irq);
221 1.1 thorpej iq = &intrq[irq];
222 1.1 thorpej if (TAILQ_FIRST(&iq->iq_list) != NULL)
223 1.1 thorpej i80321_enable_irq(irq);
224 1.1 thorpej for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
225 1.1 thorpej ih = TAILQ_NEXT(ih, ih_list))
226 1.5 briggs irqs |= i80321_imask[ih->ih_ipl];
227 1.1 thorpej iq->iq_mask = irqs;
228 1.1 thorpej }
229 1.1 thorpej }
230 1.1 thorpej
231 1.12 mrg void
232 1.1 thorpej splx(int new)
233 1.1 thorpej {
234 1.6 thorpej i80321_splx(new);
235 1.5 briggs }
236 1.5 briggs
237 1.5 briggs int
238 1.1 thorpej _spllower(int ipl)
239 1.1 thorpej {
240 1.6 thorpej return (i80321_spllower(ipl));
241 1.5 briggs }
242 1.5 briggs
243 1.5 briggs int
244 1.5 briggs _splraise(int ipl)
245 1.5 briggs {
246 1.6 thorpej return (i80321_splraise(ipl));
247 1.1 thorpej }
248 1.5 briggs
249 1.1 thorpej /*
250 1.1 thorpej * i80321_icu_init:
251 1.1 thorpej *
252 1.1 thorpej * Initialize the i80321 ICU. Called early in bootstrap
253 1.1 thorpej * to make sure the ICU is in a pristine state.
254 1.1 thorpej */
255 1.1 thorpej void
256 1.1 thorpej i80321_icu_init(void)
257 1.1 thorpej {
258 1.1 thorpej
259 1.1 thorpej intr_enabled = 0; /* All interrupts disabled */
260 1.1 thorpej i80321_set_intrmask();
261 1.1 thorpej
262 1.1 thorpej intr_steer = 0; /* All interrupts steered to IRQ */
263 1.1 thorpej i80321_set_intrsteer();
264 1.1 thorpej }
265 1.1 thorpej
266 1.1 thorpej /*
267 1.1 thorpej * i80321_intr_init:
268 1.1 thorpej *
269 1.1 thorpej * Initialize the rest of the interrupt subsystem, making it
270 1.1 thorpej * ready to handle interrupts from devices.
271 1.1 thorpej */
272 1.1 thorpej void
273 1.1 thorpej i80321_intr_init(void)
274 1.1 thorpej {
275 1.1 thorpej struct intrq *iq;
276 1.1 thorpej int i;
277 1.1 thorpej
278 1.1 thorpej intr_enabled = 0;
279 1.1 thorpej
280 1.1 thorpej for (i = 0; i < NIRQ; i++) {
281 1.1 thorpej iq = &intrq[i];
282 1.1 thorpej TAILQ_INIT(&iq->iq_list);
283 1.1 thorpej }
284 1.1 thorpej
285 1.1 thorpej i80321_intr_calculate_masks();
286 1.1 thorpej
287 1.1 thorpej /* Enable IRQs (don't yet use FIQs). */
288 1.1 thorpej enable_interrupts(I32_bit);
289 1.1 thorpej }
290 1.1 thorpej
291 1.23 matt void
292 1.23 matt i80321_intr_evcnt_attach(void)
293 1.23 matt {
294 1.23 matt for (u_int i = 0; i < NIRQ; i++) {
295 1.23 matt struct intrq *iq = &intrq[i];
296 1.23 matt evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
297 1.23 matt NULL, "iop321", i80321_irqnames[i]);
298 1.23 matt }
299 1.23 matt
300 1.23 matt }
301 1.23 matt
302 1.1 thorpej void *
303 1.1 thorpej i80321_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
304 1.1 thorpej {
305 1.1 thorpej struct intrq *iq;
306 1.1 thorpej struct intrhand *ih;
307 1.1 thorpej u_int oldirqstate;
308 1.1 thorpej
309 1.1 thorpej if (irq < 0 || irq > NIRQ)
310 1.1 thorpej panic("i80321_intr_establish: IRQ %d out of range", irq);
311 1.1 thorpej
312 1.1 thorpej ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
313 1.1 thorpej if (ih == NULL)
314 1.1 thorpej return (NULL);
315 1.1 thorpej
316 1.1 thorpej ih->ih_func = func;
317 1.1 thorpej ih->ih_arg = arg;
318 1.1 thorpej ih->ih_ipl = ipl;
319 1.1 thorpej ih->ih_irq = irq;
320 1.1 thorpej
321 1.1 thorpej iq = &intrq[irq];
322 1.1 thorpej
323 1.1 thorpej /* All IOP321 interrupts are level-triggered. */
324 1.1 thorpej iq->iq_ist = IST_LEVEL;
325 1.1 thorpej
326 1.1 thorpej oldirqstate = disable_interrupts(I32_bit);
327 1.1 thorpej
328 1.1 thorpej TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
329 1.1 thorpej
330 1.1 thorpej i80321_intr_calculate_masks();
331 1.1 thorpej
332 1.1 thorpej restore_interrupts(oldirqstate);
333 1.1 thorpej
334 1.1 thorpej return (ih);
335 1.1 thorpej }
336 1.1 thorpej
337 1.1 thorpej void
338 1.1 thorpej i80321_intr_disestablish(void *cookie)
339 1.1 thorpej {
340 1.1 thorpej struct intrhand *ih = cookie;
341 1.1 thorpej struct intrq *iq = &intrq[ih->ih_irq];
342 1.1 thorpej int oldirqstate;
343 1.1 thorpej
344 1.1 thorpej oldirqstate = disable_interrupts(I32_bit);
345 1.1 thorpej
346 1.1 thorpej TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
347 1.1 thorpej
348 1.1 thorpej i80321_intr_calculate_masks();
349 1.1 thorpej
350 1.1 thorpej restore_interrupts(oldirqstate);
351 1.1 thorpej }
352 1.1 thorpej
353 1.13 scw /*
354 1.13 scw * Hardware interrupt handler.
355 1.13 scw *
356 1.13 scw * If I80321_HPI_ENABLED is defined, this code attempts to deal with
357 1.13 scw * HPI interrupts as best it can.
358 1.13 scw *
359 1.13 scw * The problem is that HPIs cannot be masked at the interrupt controller;
360 1.13 scw * they can only be masked by disabling IRQs in the XScale core.
361 1.13 scw *
362 1.13 scw * So, if an HPI comes in and we determine that it should be masked at
363 1.13 scw * the current IPL then we mark it pending in the usual way and set
364 1.13 scw * I32_bit in the interrupt frame. This ensures that when we return from
365 1.13 scw * i80321_intr_dispatch(), IRQs will be disabled in the XScale core. (To
366 1.13 scw * ensure IRQs are enabled later, i80321_splx() has been modified to do
367 1.13 scw * just that when a pending HPI interrupt is unmasked.) Additionally,
368 1.13 scw * because HPIs are level-triggered, the registered handler for the HPI
369 1.13 scw * interrupt will also be invoked with IRQs disabled. If a masked HPI
370 1.13 scw * occurs at the same time as another unmasked higher priority interrupt,
371 1.13 scw * the higher priority handler will also be invoked with IRQs disabled.
372 1.13 scw * As a result, the system could end up executing a lot of code with IRQs
373 1.13 scw * completely disabled if the HPI's IPL is relatively low.
374 1.13 scw *
375 1.13 scw * At the present time, the only known use of HPI is for the console UART
376 1.13 scw * on a couple of boards. This is probably the least intrusive use of HPI
377 1.13 scw * as IPL_SERIAL is the highest priority IPL in the system anyway. The
378 1.13 scw * code has not been tested with HPI hooked up to a class of device which
379 1.13 scw * interrupts below IPL_SERIAL. Indeed, such a configuration is likely to
380 1.13 scw * perform very poorly if at all, even though the following code has been
381 1.13 scw * designed (hopefully) to cope with it.
382 1.13 scw */
383 1.13 scw
384 1.1 thorpej void
385 1.1 thorpej i80321_intr_dispatch(struct clockframe *frame)
386 1.1 thorpej {
387 1.1 thorpej struct intrq *iq;
388 1.1 thorpej struct intrhand *ih;
389 1.18 matt int oldirqstate, irq, ibit, hwpend;
390 1.13 scw #ifdef I80321_HPI_ENABLED
391 1.13 scw int oldpending;
392 1.13 scw #endif
393 1.18 matt struct cpu_info * const ci = curcpu();
394 1.18 matt const int ppl = ci->ci_cpl;
395 1.18 matt const uint32_t imask = i80321_imask[ppl];
396 1.1 thorpej
397 1.1 thorpej hwpend = i80321_iintsrc_read();
398 1.1 thorpej
399 1.1 thorpej /*
400 1.1 thorpej * Disable all the interrupts that are pending. We will
401 1.1 thorpej * reenable them once they are processed and not masked.
402 1.1 thorpej */
403 1.1 thorpej intr_enabled &= ~hwpend;
404 1.1 thorpej i80321_set_intrmask();
405 1.1 thorpej
406 1.13 scw #ifdef I80321_HPI_ENABLED
407 1.13 scw oldirqstate = 0; /* XXX: quell gcc warning */
408 1.13 scw #endif
409 1.13 scw
410 1.1 thorpej while (hwpend != 0) {
411 1.13 scw #ifdef I80321_HPI_ENABLED
412 1.13 scw /* Deal with HPI interrupt first */
413 1.13 scw if (__predict_false(hwpend & INT_HPIMASK))
414 1.13 scw irq = ICU_INT_HPI;
415 1.13 scw else
416 1.13 scw #endif
417 1.1 thorpej irq = ffs(hwpend) - 1;
418 1.1 thorpej ibit = (1U << irq);
419 1.1 thorpej
420 1.1 thorpej hwpend &= ~ibit;
421 1.1 thorpej
422 1.18 matt if (imask & ibit) {
423 1.1 thorpej /*
424 1.1 thorpej * IRQ is masked; mark it as pending and check
425 1.1 thorpej * the next one. Note: the IRQ is already disabled.
426 1.1 thorpej */
427 1.13 scw #ifdef I80321_HPI_ENABLED
428 1.13 scw if (__predict_false(irq == ICU_INT_HPI)) {
429 1.13 scw /*
430 1.13 scw * This is an HPI. We *must* disable
431 1.13 scw * IRQs in the interrupt frame until
432 1.13 scw * INT_HPIMASK is cleared by a later
433 1.13 scw * call to splx(). Otherwise the level-
434 1.13 scw * triggered interrupt will just keep
435 1.13 scw * coming back.
436 1.13 scw */
437 1.13 scw frame->cf_if.if_spsr |= I32_bit;
438 1.13 scw }
439 1.13 scw #endif
440 1.5 briggs i80321_ipending |= ibit;
441 1.1 thorpej continue;
442 1.1 thorpej }
443 1.1 thorpej
444 1.13 scw #ifdef I80321_HPI_ENABLED
445 1.13 scw oldpending = i80321_ipending | ibit;
446 1.13 scw #endif
447 1.5 briggs i80321_ipending &= ~ibit;
448 1.1 thorpej
449 1.1 thorpej iq = &intrq[irq];
450 1.1 thorpej iq->iq_ev.ev_count++;
451 1.21 matt ci->ci_data.cpu_nintr++;
452 1.13 scw #ifdef I80321_HPI_ENABLED
453 1.13 scw /*
454 1.13 scw * Re-enable interrupts iff an HPI is not pending
455 1.13 scw */
456 1.18 matt if (__predict_true((oldpending & INT_HPIMASK) == 0)) {
457 1.13 scw #endif
458 1.18 matt TAILQ_FOREACH (ih, &iq->iq_list, ih_list) {
459 1.18 matt ci->ci_cpl = ih->ih_ipl;
460 1.18 matt oldirqstate = enable_interrupts(I32_bit);
461 1.18 matt (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
462 1.18 matt restore_interrupts(oldirqstate);
463 1.18 matt }
464 1.13 scw #ifdef I80321_HPI_ENABLED
465 1.18 matt } else if (irq == ICU_INT_HPI) {
466 1.13 scw /*
467 1.13 scw * We've just handled the HPI. Make sure IRQs
468 1.13 scw * are enabled in the interrupt frame.
469 1.13 scw * Here's hoping the handler really did clear
470 1.13 scw * down the source...
471 1.13 scw */
472 1.13 scw frame->cf_if.if_spsr &= ~I32_bit;
473 1.13 scw }
474 1.13 scw #endif
475 1.18 matt ci->ci_cpl = ppl;
476 1.1 thorpej
477 1.1 thorpej /* Re-enable this interrupt now that's it's cleared. */
478 1.1 thorpej intr_enabled |= ibit;
479 1.1 thorpej i80321_set_intrmask();
480 1.9 scw
481 1.9 scw /*
482 1.9 scw * Don't forget to include interrupts which may have
483 1.9 scw * arrived in the meantime.
484 1.9 scw */
485 1.18 matt hwpend |= ((i80321_ipending & ICU_INT_HWMASK) & ~imask);
486 1.1 thorpej }
487 1.1 thorpej
488 1.18 matt #ifdef __HAVE_FAST_SOFTINTS
489 1.18 matt cpu_dosoftints();
490 1.13 scw #endif
491 1.1 thorpej }
492