i80321_icu.c revision 1.2.2.2 1 1.2.2.2 nathanw /* $NetBSD: i80321_icu.c,v 1.2.2.2 2002/04/01 07:39:16 nathanw Exp $ */
2 1.2.2.2 nathanw
3 1.2.2.2 nathanw /*
4 1.2.2.2 nathanw * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 1.2.2.2 nathanw * All rights reserved.
6 1.2.2.2 nathanw *
7 1.2.2.2 nathanw * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 1.2.2.2 nathanw *
9 1.2.2.2 nathanw * Redistribution and use in source and binary forms, with or without
10 1.2.2.2 nathanw * modification, are permitted provided that the following conditions
11 1.2.2.2 nathanw * are met:
12 1.2.2.2 nathanw * 1. Redistributions of source code must retain the above copyright
13 1.2.2.2 nathanw * notice, this list of conditions and the following disclaimer.
14 1.2.2.2 nathanw * 2. Redistributions in binary form must reproduce the above copyright
15 1.2.2.2 nathanw * notice, this list of conditions and the following disclaimer in the
16 1.2.2.2 nathanw * documentation and/or other materials provided with the distribution.
17 1.2.2.2 nathanw * 3. All advertising materials mentioning features or use of this software
18 1.2.2.2 nathanw * must display the following acknowledgement:
19 1.2.2.2 nathanw * This product includes software developed for the NetBSD Project by
20 1.2.2.2 nathanw * Wasabi Systems, Inc.
21 1.2.2.2 nathanw * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 1.2.2.2 nathanw * or promote products derived from this software without specific prior
23 1.2.2.2 nathanw * written permission.
24 1.2.2.2 nathanw *
25 1.2.2.2 nathanw * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 1.2.2.2 nathanw * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 1.2.2.2 nathanw * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 1.2.2.2 nathanw * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 1.2.2.2 nathanw * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 1.2.2.2 nathanw * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 1.2.2.2 nathanw * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 1.2.2.2 nathanw * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 1.2.2.2 nathanw * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 1.2.2.2 nathanw * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 1.2.2.2 nathanw * POSSIBILITY OF SUCH DAMAGE.
36 1.2.2.2 nathanw */
37 1.2.2.2 nathanw
38 1.2.2.2 nathanw /*
39 1.2.2.2 nathanw * Interrupt support for the Intel i80321 I/O Processor.
40 1.2.2.2 nathanw */
41 1.2.2.2 nathanw
42 1.2.2.2 nathanw #include <sys/param.h>
43 1.2.2.2 nathanw #include <sys/systm.h>
44 1.2.2.2 nathanw #include <sys/malloc.h>
45 1.2.2.2 nathanw
46 1.2.2.2 nathanw #include <uvm/uvm_extern.h>
47 1.2.2.2 nathanw
48 1.2.2.2 nathanw #include <machine/bus.h>
49 1.2.2.2 nathanw #include <machine/intr.h>
50 1.2.2.2 nathanw
51 1.2.2.2 nathanw #include <arm/cpufunc.h>
52 1.2.2.2 nathanw
53 1.2.2.2 nathanw #include <arm/xscale/i80321reg.h>
54 1.2.2.2 nathanw #include <arm/xscale/i80321var.h>
55 1.2.2.2 nathanw
56 1.2.2.2 nathanw /* Interrupt handler queues. */
57 1.2.2.2 nathanw struct intrq intrq[NIRQ];
58 1.2.2.2 nathanw
59 1.2.2.2 nathanw /* Interrupts to mask at each level. */
60 1.2.2.2 nathanw static int imask[NIPL];
61 1.2.2.2 nathanw
62 1.2.2.2 nathanw /* Current interrupt priority level. */
63 1.2.2.2 nathanw __volatile int current_spl_level;
64 1.2.2.2 nathanw
65 1.2.2.2 nathanw /* Interrupts pending. */
66 1.2.2.2 nathanw static __volatile int ipending;
67 1.2.2.2 nathanw
68 1.2.2.2 nathanw /* Software copy of the IRQs we have enabled. */
69 1.2.2.2 nathanw __volatile uint32_t intr_enabled;
70 1.2.2.2 nathanw
71 1.2.2.2 nathanw /* Mask if interrupts steered to FIQs. */
72 1.2.2.2 nathanw uint32_t intr_steer;
73 1.2.2.2 nathanw
74 1.2.2.2 nathanw /*
75 1.2.2.2 nathanw * Map a software interrupt queue index (to the unused bits in the
76 1.2.2.2 nathanw * ICU registers -- XXX will need to revisit this if those bits are
77 1.2.2.2 nathanw * ever used in future steppings).
78 1.2.2.2 nathanw */
79 1.2.2.2 nathanw static const uint32_t si_to_irqbit[SI_NQUEUES] = {
80 1.2.2.2 nathanw ICU_INT_bit26, /* SI_SOFT */
81 1.2.2.2 nathanw ICU_INT_bit22, /* SI_SOFTCLOCK */
82 1.2.2.2 nathanw ICU_INT_bit5, /* SI_SOFTNET */
83 1.2.2.2 nathanw ICU_INT_bit4, /* SI_SOFTSERIAL */
84 1.2.2.2 nathanw };
85 1.2.2.2 nathanw
86 1.2.2.2 nathanw #define INT_SWMASK \
87 1.2.2.2 nathanw ((1U << ICU_INT_bit26) | (1U << ICU_INT_bit22) | \
88 1.2.2.2 nathanw (1U << ICU_INT_bit5) | (1U << ICU_INT_bit4))
89 1.2.2.2 nathanw
90 1.2.2.2 nathanw #define SI_TO_IRQBIT(si) (1U << si_to_irqbit[(si)])
91 1.2.2.2 nathanw
92 1.2.2.2 nathanw /*
93 1.2.2.2 nathanw * Map a software interrupt queue to an interrupt priority level.
94 1.2.2.2 nathanw */
95 1.2.2.2 nathanw static const int si_to_ipl[SI_NQUEUES] = {
96 1.2.2.2 nathanw IPL_SOFT, /* SI_SOFT */
97 1.2.2.2 nathanw IPL_SOFTCLOCK, /* SI_SOFTCLOCK */
98 1.2.2.2 nathanw IPL_SOFTNET, /* SI_SOFTNET */
99 1.2.2.2 nathanw IPL_SOFTSERIAL, /* SI_SOFTSERIAL */
100 1.2.2.2 nathanw };
101 1.2.2.2 nathanw
102 1.2.2.2 nathanw void i80321_intr_dispatch(struct clockframe *frame);
103 1.2.2.2 nathanw
104 1.2.2.2 nathanw static __inline uint32_t
105 1.2.2.2 nathanw i80321_iintsrc_read(void)
106 1.2.2.2 nathanw {
107 1.2.2.2 nathanw uint32_t iintsrc;
108 1.2.2.2 nathanw
109 1.2.2.2 nathanw __asm __volatile("mrc p6, 0, %0, c8, c0, 0"
110 1.2.2.2 nathanw : "=r" (iintsrc));
111 1.2.2.2 nathanw
112 1.2.2.2 nathanw /*
113 1.2.2.2 nathanw * The IINTSRC register shows bits that are active even
114 1.2.2.2 nathanw * if they are masked in INTCTL, so we have to mask them
115 1.2.2.2 nathanw * off with the interrupts we consider enabled.
116 1.2.2.2 nathanw */
117 1.2.2.2 nathanw return (iintsrc & intr_enabled);
118 1.2.2.2 nathanw }
119 1.2.2.2 nathanw
120 1.2.2.2 nathanw static __inline void
121 1.2.2.2 nathanw i80321_set_intrmask(void)
122 1.2.2.2 nathanw {
123 1.2.2.2 nathanw
124 1.2.2.2 nathanw __asm __volatile("mcr p6, 0, %0, c0, c0, 0"
125 1.2.2.2 nathanw :
126 1.2.2.2 nathanw : "r" (intr_enabled & ICU_INT_HWMASK));
127 1.2.2.2 nathanw }
128 1.2.2.2 nathanw
129 1.2.2.2 nathanw static __inline void
130 1.2.2.2 nathanw i80321_set_intrsteer(void)
131 1.2.2.2 nathanw {
132 1.2.2.2 nathanw
133 1.2.2.2 nathanw __asm __volatile("mcr p6, 0, %0, c4, c0, 0"
134 1.2.2.2 nathanw :
135 1.2.2.2 nathanw : "r" (intr_steer & ICU_INT_HWMASK));
136 1.2.2.2 nathanw }
137 1.2.2.2 nathanw
138 1.2.2.2 nathanw static __inline void
139 1.2.2.2 nathanw i80321_enable_irq(int irq)
140 1.2.2.2 nathanw {
141 1.2.2.2 nathanw
142 1.2.2.2 nathanw intr_enabled |= (1U << irq);
143 1.2.2.2 nathanw i80321_set_intrmask();
144 1.2.2.2 nathanw }
145 1.2.2.2 nathanw
146 1.2.2.2 nathanw static __inline void
147 1.2.2.2 nathanw i80321_disable_irq(int irq)
148 1.2.2.2 nathanw {
149 1.2.2.2 nathanw
150 1.2.2.2 nathanw intr_enabled &= ~(1U << irq);
151 1.2.2.2 nathanw i80321_set_intrmask();
152 1.2.2.2 nathanw }
153 1.2.2.2 nathanw
154 1.2.2.2 nathanw /*
155 1.2.2.2 nathanw * NOTE: This routine must be called with interrupts disabled in the CPSR.
156 1.2.2.2 nathanw */
157 1.2.2.2 nathanw static void
158 1.2.2.2 nathanw i80321_intr_calculate_masks(void)
159 1.2.2.2 nathanw {
160 1.2.2.2 nathanw struct intrq *iq;
161 1.2.2.2 nathanw struct intrhand *ih;
162 1.2.2.2 nathanw int irq, ipl;
163 1.2.2.2 nathanw
164 1.2.2.2 nathanw /* First, figure out which IPLs each IRQ has. */
165 1.2.2.2 nathanw for (irq = 0; irq < NIRQ; irq++) {
166 1.2.2.2 nathanw int levels = 0;
167 1.2.2.2 nathanw iq = &intrq[irq];
168 1.2.2.2 nathanw i80321_disable_irq(irq);
169 1.2.2.2 nathanw for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
170 1.2.2.2 nathanw ih = TAILQ_NEXT(ih, ih_list))
171 1.2.2.2 nathanw levels |= (1U << ih->ih_ipl);
172 1.2.2.2 nathanw iq->iq_levels = levels;
173 1.2.2.2 nathanw }
174 1.2.2.2 nathanw
175 1.2.2.2 nathanw /* Next, figure out which IRQs are used by each IPL. */
176 1.2.2.2 nathanw for (ipl = 0; ipl < NIPL; ipl++) {
177 1.2.2.2 nathanw int irqs = 0;
178 1.2.2.2 nathanw for (irq = 0; irq < NIRQ; irq++) {
179 1.2.2.2 nathanw if (intrq[irq].iq_levels & (1U << ipl))
180 1.2.2.2 nathanw irqs |= (1U << irq);
181 1.2.2.2 nathanw }
182 1.2.2.2 nathanw imask[ipl] = irqs;
183 1.2.2.2 nathanw }
184 1.2.2.2 nathanw
185 1.2.2.2 nathanw imask[IPL_NONE] = 0;
186 1.2.2.2 nathanw
187 1.2.2.2 nathanw /*
188 1.2.2.2 nathanw * Initialize the soft interrupt masks to block themselves.
189 1.2.2.2 nathanw */
190 1.2.2.2 nathanw imask[IPL_SOFT] = SI_TO_IRQBIT(SI_SOFT);
191 1.2.2.2 nathanw imask[IPL_SOFTCLOCK] = SI_TO_IRQBIT(SI_SOFTCLOCK);
192 1.2.2.2 nathanw imask[IPL_SOFTNET] = SI_TO_IRQBIT(SI_SOFTNET);
193 1.2.2.2 nathanw imask[IPL_SOFTSERIAL] = SI_TO_IRQBIT(SI_SOFTSERIAL);
194 1.2.2.2 nathanw
195 1.2.2.2 nathanw /*
196 1.2.2.2 nathanw * splsoftclock() is the only interface that users of the
197 1.2.2.2 nathanw * generic software interrupt facility have to block their
198 1.2.2.2 nathanw * soft intrs, so splsoftclock() must also block IPL_SOFT.
199 1.2.2.2 nathanw */
200 1.2.2.2 nathanw imask[IPL_SOFTCLOCK] |= imask[IPL_SOFT];
201 1.2.2.2 nathanw
202 1.2.2.2 nathanw /*
203 1.2.2.2 nathanw * splsoftnet() must also block splsoftclock(), since we don't
204 1.2.2.2 nathanw * want timer-driven network events to occur while we're
205 1.2.2.2 nathanw * processing incoming packets.
206 1.2.2.2 nathanw */
207 1.2.2.2 nathanw imask[IPL_SOFTNET] |= imask[IPL_SOFTCLOCK];
208 1.2.2.2 nathanw
209 1.2.2.2 nathanw /*
210 1.2.2.2 nathanw * Enforce a heirarchy that gives "slow" device (or devices with
211 1.2.2.2 nathanw * limited input buffer space/"real-time" requirements) a better
212 1.2.2.2 nathanw * chance at not dropping data.
213 1.2.2.2 nathanw */
214 1.2.2.2 nathanw imask[IPL_BIO] |= imask[IPL_SOFTNET];
215 1.2.2.2 nathanw imask[IPL_NET] |= imask[IPL_BIO];
216 1.2.2.2 nathanw imask[IPL_SOFTSERIAL] |= imask[IPL_NET];
217 1.2.2.2 nathanw imask[IPL_TTY] |= imask[IPL_SOFTSERIAL];
218 1.2.2.2 nathanw
219 1.2.2.2 nathanw /*
220 1.2.2.2 nathanw * splvm() blocks all interrupts that use the kernel memory
221 1.2.2.2 nathanw * allocation facilities.
222 1.2.2.2 nathanw */
223 1.2.2.2 nathanw imask[IPL_IMP] |= imask[IPL_TTY];
224 1.2.2.2 nathanw
225 1.2.2.2 nathanw /*
226 1.2.2.2 nathanw * Audio devices are not allowed to perform memory allocation
227 1.2.2.2 nathanw * in their interrupt routines, and they have fairly "real-time"
228 1.2.2.2 nathanw * requirements, so give them a high interrupt priority.
229 1.2.2.2 nathanw */
230 1.2.2.2 nathanw imask[IPL_AUDIO] |= imask[IPL_IMP];
231 1.2.2.2 nathanw
232 1.2.2.2 nathanw /*
233 1.2.2.2 nathanw * splclock() must block anything that uses the scheduler.
234 1.2.2.2 nathanw */
235 1.2.2.2 nathanw imask[IPL_CLOCK] |= imask[IPL_AUDIO];
236 1.2.2.2 nathanw
237 1.2.2.2 nathanw /*
238 1.2.2.2 nathanw * No separate statclock on the IQ80310.
239 1.2.2.2 nathanw */
240 1.2.2.2 nathanw imask[IPL_STATCLOCK] |= imask[IPL_CLOCK];
241 1.2.2.2 nathanw
242 1.2.2.2 nathanw /*
243 1.2.2.2 nathanw * splhigh() must block "everything".
244 1.2.2.2 nathanw */
245 1.2.2.2 nathanw imask[IPL_HIGH] |= imask[IPL_STATCLOCK];
246 1.2.2.2 nathanw
247 1.2.2.2 nathanw /*
248 1.2.2.2 nathanw * XXX We need serial drivers to run at the absolute highest priority
249 1.2.2.2 nathanw * in order to avoid overruns, so serial > high.
250 1.2.2.2 nathanw */
251 1.2.2.2 nathanw imask[IPL_SERIAL] |= imask[IPL_HIGH];
252 1.2.2.2 nathanw
253 1.2.2.2 nathanw /*
254 1.2.2.2 nathanw * Now compute which IRQs must be blocked when servicing any
255 1.2.2.2 nathanw * given IRQ.
256 1.2.2.2 nathanw */
257 1.2.2.2 nathanw for (irq = 0; irq < NIRQ; irq++) {
258 1.2.2.2 nathanw int irqs = (1U << irq);
259 1.2.2.2 nathanw iq = &intrq[irq];
260 1.2.2.2 nathanw if (TAILQ_FIRST(&iq->iq_list) != NULL)
261 1.2.2.2 nathanw i80321_enable_irq(irq);
262 1.2.2.2 nathanw for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
263 1.2.2.2 nathanw ih = TAILQ_NEXT(ih, ih_list))
264 1.2.2.2 nathanw irqs |= imask[ih->ih_ipl];
265 1.2.2.2 nathanw iq->iq_mask = irqs;
266 1.2.2.2 nathanw }
267 1.2.2.2 nathanw }
268 1.2.2.2 nathanw
269 1.2.2.2 nathanw static void
270 1.2.2.2 nathanw i80321_do_pending(void)
271 1.2.2.2 nathanw {
272 1.2.2.2 nathanw static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
273 1.2.2.2 nathanw int new, oldirqstate;
274 1.2.2.2 nathanw
275 1.2.2.2 nathanw if (__cpu_simple_lock_try(&processing) == 0)
276 1.2.2.2 nathanw return;
277 1.2.2.2 nathanw
278 1.2.2.2 nathanw new = current_spl_level;
279 1.2.2.2 nathanw
280 1.2.2.2 nathanw oldirqstate = disable_interrupts(I32_bit);
281 1.2.2.2 nathanw
282 1.2.2.2 nathanw #define DO_SOFTINT(si) \
283 1.2.2.2 nathanw if ((ipending & ~new) & SI_TO_IRQBIT(si)) { \
284 1.2.2.2 nathanw ipending &= ~SI_TO_IRQBIT(si); \
285 1.2.2.2 nathanw current_spl_level |= imask[si_to_ipl[(si)]]; \
286 1.2.2.2 nathanw restore_interrupts(oldirqstate); \
287 1.2.2.2 nathanw softintr_dispatch(si); \
288 1.2.2.2 nathanw oldirqstate = disable_interrupts(I32_bit); \
289 1.2.2.2 nathanw current_spl_level = new; \
290 1.2.2.2 nathanw }
291 1.2.2.2 nathanw
292 1.2.2.2 nathanw DO_SOFTINT(SI_SOFTSERIAL);
293 1.2.2.2 nathanw DO_SOFTINT(SI_SOFTNET);
294 1.2.2.2 nathanw DO_SOFTINT(SI_SOFTCLOCK);
295 1.2.2.2 nathanw DO_SOFTINT(SI_SOFT);
296 1.2.2.2 nathanw
297 1.2.2.2 nathanw __cpu_simple_unlock(&processing);
298 1.2.2.2 nathanw
299 1.2.2.2 nathanw restore_interrupts(oldirqstate);
300 1.2.2.2 nathanw }
301 1.2.2.2 nathanw
302 1.2.2.2 nathanw int
303 1.2.2.2 nathanw _splraise(int ipl)
304 1.2.2.2 nathanw {
305 1.2.2.2 nathanw int old, oldirqstate;
306 1.2.2.2 nathanw
307 1.2.2.2 nathanw oldirqstate = disable_interrupts(I32_bit);
308 1.2.2.2 nathanw old = current_spl_level;
309 1.2.2.2 nathanw current_spl_level |= imask[ipl];
310 1.2.2.2 nathanw
311 1.2.2.2 nathanw restore_interrupts(oldirqstate);
312 1.2.2.2 nathanw
313 1.2.2.2 nathanw return (old);
314 1.2.2.2 nathanw }
315 1.2.2.2 nathanw
316 1.2.2.2 nathanw __inline void
317 1.2.2.2 nathanw splx(int new)
318 1.2.2.2 nathanw {
319 1.2.2.2 nathanw int oldirqstate, hwpend;
320 1.2.2.2 nathanw
321 1.2.2.2 nathanw current_spl_level = new;
322 1.2.2.2 nathanw
323 1.2.2.2 nathanw /*
324 1.2.2.2 nathanw * If there are pending HW interrupts which are being
325 1.2.2.2 nathanw * unmasked, then enable them in the INTCTL register.
326 1.2.2.2 nathanw * This will cause them to come flooding in.
327 1.2.2.2 nathanw */
328 1.2.2.2 nathanw hwpend = (ipending & ICU_INT_HWMASK) & ~new;
329 1.2.2.2 nathanw if (hwpend != 0) {
330 1.2.2.2 nathanw oldirqstate = disable_interrupts(I32_bit);
331 1.2.2.2 nathanw intr_enabled |= hwpend;
332 1.2.2.2 nathanw i80321_set_intrmask();
333 1.2.2.2 nathanw restore_interrupts(oldirqstate);
334 1.2.2.2 nathanw }
335 1.2.2.2 nathanw
336 1.2.2.2 nathanw /* If there are software interrupts to process, do it. */
337 1.2.2.2 nathanw if ((ipending & INT_SWMASK) & ~new)
338 1.2.2.2 nathanw i80321_do_pending();
339 1.2.2.2 nathanw }
340 1.2.2.2 nathanw
341 1.2.2.2 nathanw int
342 1.2.2.2 nathanw _spllower(int ipl)
343 1.2.2.2 nathanw {
344 1.2.2.2 nathanw int old = current_spl_level;
345 1.2.2.2 nathanw
346 1.2.2.2 nathanw splx(imask[ipl]);
347 1.2.2.2 nathanw return (old);
348 1.2.2.2 nathanw }
349 1.2.2.2 nathanw
350 1.2.2.2 nathanw void
351 1.2.2.2 nathanw _setsoftintr(int si)
352 1.2.2.2 nathanw {
353 1.2.2.2 nathanw int oldirqstate;
354 1.2.2.2 nathanw
355 1.2.2.2 nathanw oldirqstate = disable_interrupts(I32_bit);
356 1.2.2.2 nathanw ipending |= SI_TO_IRQBIT(si);
357 1.2.2.2 nathanw restore_interrupts(oldirqstate);
358 1.2.2.2 nathanw
359 1.2.2.2 nathanw /* Process unmasked pending soft interrupts. */
360 1.2.2.2 nathanw if ((ipending & INT_SWMASK) & ~current_spl_level)
361 1.2.2.2 nathanw i80321_do_pending();
362 1.2.2.2 nathanw }
363 1.2.2.2 nathanw
364 1.2.2.2 nathanw /*
365 1.2.2.2 nathanw * i80321_icu_init:
366 1.2.2.2 nathanw *
367 1.2.2.2 nathanw * Initialize the i80321 ICU. Called early in bootstrap
368 1.2.2.2 nathanw * to make sure the ICU is in a pristine state.
369 1.2.2.2 nathanw */
370 1.2.2.2 nathanw void
371 1.2.2.2 nathanw i80321_icu_init(void)
372 1.2.2.2 nathanw {
373 1.2.2.2 nathanw
374 1.2.2.2 nathanw intr_enabled = 0; /* All interrupts disabled */
375 1.2.2.2 nathanw i80321_set_intrmask();
376 1.2.2.2 nathanw
377 1.2.2.2 nathanw intr_steer = 0; /* All interrupts steered to IRQ */
378 1.2.2.2 nathanw i80321_set_intrsteer();
379 1.2.2.2 nathanw }
380 1.2.2.2 nathanw
381 1.2.2.2 nathanw /*
382 1.2.2.2 nathanw * i80321_intr_init:
383 1.2.2.2 nathanw *
384 1.2.2.2 nathanw * Initialize the rest of the interrupt subsystem, making it
385 1.2.2.2 nathanw * ready to handle interrupts from devices.
386 1.2.2.2 nathanw */
387 1.2.2.2 nathanw void
388 1.2.2.2 nathanw i80321_intr_init(void)
389 1.2.2.2 nathanw {
390 1.2.2.2 nathanw struct intrq *iq;
391 1.2.2.2 nathanw int i;
392 1.2.2.2 nathanw
393 1.2.2.2 nathanw intr_enabled = 0;
394 1.2.2.2 nathanw
395 1.2.2.2 nathanw for (i = 0; i < NIRQ; i++) {
396 1.2.2.2 nathanw iq = &intrq[i];
397 1.2.2.2 nathanw TAILQ_INIT(&iq->iq_list);
398 1.2.2.2 nathanw
399 1.2.2.2 nathanw sprintf(iq->iq_name, "irq %d", i);
400 1.2.2.2 nathanw evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
401 1.2.2.2 nathanw NULL, "iop321", iq->iq_name);
402 1.2.2.2 nathanw }
403 1.2.2.2 nathanw
404 1.2.2.2 nathanw i80321_intr_calculate_masks();
405 1.2.2.2 nathanw
406 1.2.2.2 nathanw /* Enable IRQs (don't yet use FIQs). */
407 1.2.2.2 nathanw enable_interrupts(I32_bit);
408 1.2.2.2 nathanw }
409 1.2.2.2 nathanw
410 1.2.2.2 nathanw void *
411 1.2.2.2 nathanw i80321_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
412 1.2.2.2 nathanw {
413 1.2.2.2 nathanw struct intrq *iq;
414 1.2.2.2 nathanw struct intrhand *ih;
415 1.2.2.2 nathanw u_int oldirqstate;
416 1.2.2.2 nathanw
417 1.2.2.2 nathanw if (irq < 0 || irq > NIRQ)
418 1.2.2.2 nathanw panic("i80321_intr_establish: IRQ %d out of range", irq);
419 1.2.2.2 nathanw
420 1.2.2.2 nathanw ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
421 1.2.2.2 nathanw if (ih == NULL)
422 1.2.2.2 nathanw return (NULL);
423 1.2.2.2 nathanw
424 1.2.2.2 nathanw ih->ih_func = func;
425 1.2.2.2 nathanw ih->ih_arg = arg;
426 1.2.2.2 nathanw ih->ih_ipl = ipl;
427 1.2.2.2 nathanw ih->ih_irq = irq;
428 1.2.2.2 nathanw
429 1.2.2.2 nathanw iq = &intrq[irq];
430 1.2.2.2 nathanw
431 1.2.2.2 nathanw /* All IOP321 interrupts are level-triggered. */
432 1.2.2.2 nathanw iq->iq_ist = IST_LEVEL;
433 1.2.2.2 nathanw
434 1.2.2.2 nathanw oldirqstate = disable_interrupts(I32_bit);
435 1.2.2.2 nathanw
436 1.2.2.2 nathanw TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
437 1.2.2.2 nathanw
438 1.2.2.2 nathanw i80321_intr_calculate_masks();
439 1.2.2.2 nathanw
440 1.2.2.2 nathanw restore_interrupts(oldirqstate);
441 1.2.2.2 nathanw
442 1.2.2.2 nathanw return (ih);
443 1.2.2.2 nathanw }
444 1.2.2.2 nathanw
445 1.2.2.2 nathanw void
446 1.2.2.2 nathanw i80321_intr_disestablish(void *cookie)
447 1.2.2.2 nathanw {
448 1.2.2.2 nathanw struct intrhand *ih = cookie;
449 1.2.2.2 nathanw struct intrq *iq = &intrq[ih->ih_irq];
450 1.2.2.2 nathanw int oldirqstate;
451 1.2.2.2 nathanw
452 1.2.2.2 nathanw oldirqstate = disable_interrupts(I32_bit);
453 1.2.2.2 nathanw
454 1.2.2.2 nathanw TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
455 1.2.2.2 nathanw
456 1.2.2.2 nathanw i80321_intr_calculate_masks();
457 1.2.2.2 nathanw
458 1.2.2.2 nathanw restore_interrupts(oldirqstate);
459 1.2.2.2 nathanw }
460 1.2.2.2 nathanw
461 1.2.2.2 nathanw void
462 1.2.2.2 nathanw i80321_intr_dispatch(struct clockframe *frame)
463 1.2.2.2 nathanw {
464 1.2.2.2 nathanw struct intrq *iq;
465 1.2.2.2 nathanw struct intrhand *ih;
466 1.2.2.2 nathanw int oldirqstate, pcpl, irq, ibit, hwpend;
467 1.2.2.2 nathanw
468 1.2.2.2 nathanw pcpl = current_spl_level;
469 1.2.2.2 nathanw
470 1.2.2.2 nathanw hwpend = i80321_iintsrc_read();
471 1.2.2.2 nathanw
472 1.2.2.2 nathanw /*
473 1.2.2.2 nathanw * Disable all the interrupts that are pending. We will
474 1.2.2.2 nathanw * reenable them once they are processed and not masked.
475 1.2.2.2 nathanw */
476 1.2.2.2 nathanw intr_enabled &= ~hwpend;
477 1.2.2.2 nathanw i80321_set_intrmask();
478 1.2.2.2 nathanw
479 1.2.2.2 nathanw while (hwpend != 0) {
480 1.2.2.2 nathanw irq = ffs(hwpend) - 1;
481 1.2.2.2 nathanw ibit = (1U << irq);
482 1.2.2.2 nathanw
483 1.2.2.2 nathanw hwpend &= ~ibit;
484 1.2.2.2 nathanw
485 1.2.2.2 nathanw if (pcpl & ibit) {
486 1.2.2.2 nathanw /*
487 1.2.2.2 nathanw * IRQ is masked; mark it as pending and check
488 1.2.2.2 nathanw * the next one. Note: the IRQ is already disabled.
489 1.2.2.2 nathanw */
490 1.2.2.2 nathanw ipending |= ibit;
491 1.2.2.2 nathanw continue;
492 1.2.2.2 nathanw }
493 1.2.2.2 nathanw
494 1.2.2.2 nathanw ipending &= ~ibit;
495 1.2.2.2 nathanw
496 1.2.2.2 nathanw iq = &intrq[irq];
497 1.2.2.2 nathanw iq->iq_ev.ev_count++;
498 1.2.2.2 nathanw uvmexp.intrs++;
499 1.2.2.2 nathanw current_spl_level |= iq->iq_mask;
500 1.2.2.2 nathanw oldirqstate = enable_interrupts(I32_bit);
501 1.2.2.2 nathanw for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
502 1.2.2.2 nathanw ih = TAILQ_NEXT(ih, ih_list)) {
503 1.2.2.2 nathanw (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
504 1.2.2.2 nathanw }
505 1.2.2.2 nathanw restore_interrupts(oldirqstate);
506 1.2.2.2 nathanw
507 1.2.2.2 nathanw current_spl_level = pcpl;
508 1.2.2.2 nathanw
509 1.2.2.2 nathanw /* Re-enable this interrupt now that's it's cleared. */
510 1.2.2.2 nathanw intr_enabled |= ibit;
511 1.2.2.2 nathanw i80321_set_intrmask();
512 1.2.2.2 nathanw }
513 1.2.2.2 nathanw
514 1.2.2.2 nathanw /* Check for pendings soft intrs. */
515 1.2.2.2 nathanw if ((ipending & INT_SWMASK) & ~current_spl_level) {
516 1.2.2.2 nathanw oldirqstate = enable_interrupts(I32_bit);
517 1.2.2.2 nathanw i80321_do_pending();
518 1.2.2.2 nathanw restore_interrupts(oldirqstate);
519 1.2.2.2 nathanw }
520 1.2.2.2 nathanw }
521