i80321_icu.c revision 1.26 1 /* $NetBSD: i80321_icu.c,v 1.26 2020/11/20 18:49:45 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2006 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe and Steve C. Woodford for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: i80321_icu.c,v 1.26 2020/11/20 18:49:45 thorpej Exp $");
40
41 #ifndef EVBARM_SPL_NOINLINE
42 #define EVBARM_SPL_NOINLINE
43 #endif
44
45 /*
46 * Interrupt support for the Intel i80321 I/O Processor.
47 */
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kmem.h>
52
53 #include <uvm/uvm_extern.h>
54
55 #include <sys/bus.h>
56 #include <machine/intr.h>
57
58 #include <arm/cpufunc.h>
59
60 #include <arm/xscale/i80321reg.h>
61 #include <arm/xscale/i80321var.h>
62
63 /* Interrupt handler queues. */
64 struct intrq intrq[NIRQ];
65
66 /* Interrupts to mask at each level. */
67 int i80321_imask[NIPL];
68
69 /* Interrupts pending. */
70 volatile int i80321_ipending;
71
72 /* Software copy of the IRQs we have enabled. */
73 volatile uint32_t intr_enabled;
74
75 /* Mask if interrupts steered to FIQs. */
76 uint32_t intr_steer;
77
78 /*
79 * Interrupt bit names.
80 */
81 const char * const i80321_irqnames[] = {
82 "DMA0 EOT",
83 "DMA0 EOC",
84 "DMA1 EOT",
85 "DMA1 EOC",
86 "irq 4",
87 "irq 5",
88 "AAU EOT",
89 "AAU EOC",
90 "core PMU",
91 "TMR0 (hardclock)",
92 "TMR1",
93 "I2C0",
94 "I2C1",
95 "MU",
96 "BIST",
97 "periph PMU",
98 "XScale PMU",
99 "BIU error",
100 "ATU error",
101 "MCU error",
102 "DMA0 error",
103 "DMA1 error",
104 "irq 22",
105 "AAU error",
106 "MU error",
107 "SSP",
108 "irq 26",
109 "irq 27",
110 "irq 28",
111 "irq 29",
112 "irq 30",
113 "irq 31",
114 };
115
116 void i80321_intr_dispatch(struct clockframe *frame);
117
118 static inline uint32_t
119 i80321_iintsrc_read(void)
120 {
121 uint32_t iintsrc;
122
123 __asm volatile("mrc p6, 0, %0, c8, c0, 0"
124 : "=r" (iintsrc));
125
126 /*
127 * The IINTSRC register shows bits that are active even
128 * if they are masked in INTCTL, so we have to mask them
129 * off with the interrupts we consider enabled.
130 */
131 return (iintsrc & intr_enabled);
132 }
133
134 static inline void
135 i80321_set_intrsteer(void)
136 {
137
138 __asm volatile("mcr p6, 0, %0, c4, c0, 0"
139 :
140 : "r" (intr_steer & ICU_INT_HWMASK));
141 }
142
143 static inline void
144 i80321_enable_irq(int irq)
145 {
146
147 intr_enabled |= (1U << irq);
148 i80321_set_intrmask();
149 }
150
151 static inline void
152 i80321_disable_irq(int irq)
153 {
154
155 intr_enabled &= ~(1U << irq);
156 i80321_set_intrmask();
157 }
158
159 /*
160 * NOTE: This routine must be called with interrupts disabled in the CPSR.
161 */
162 static void
163 i80321_intr_calculate_masks(void)
164 {
165 struct intrq *iq;
166 struct intrhand *ih;
167 int irq, ipl;
168
169 /* First, figure out which IPLs each IRQ has. */
170 for (irq = 0; irq < NIRQ; irq++) {
171 int levels = 0;
172 iq = &intrq[irq];
173 i80321_disable_irq(irq);
174 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
175 ih = TAILQ_NEXT(ih, ih_list))
176 levels |= (1U << ih->ih_ipl);
177 iq->iq_levels = levels;
178 }
179
180 /* Next, figure out which IRQs are used by each IPL. */
181 for (ipl = 0; ipl < NIPL; ipl++) {
182 int irqs = 0;
183 for (irq = 0; irq < NIRQ; irq++) {
184 if (intrq[irq].iq_levels & (1U << ipl))
185 irqs |= (1U << irq);
186 }
187 i80321_imask[ipl] = irqs;
188 }
189
190 KASSERT(i80321_imask[IPL_NONE] == 0);
191 KASSERT(i80321_imask[IPL_SOFTCLOCK] == 0);
192 KASSERT(i80321_imask[IPL_SOFTBIO] == 0);
193 KASSERT(i80321_imask[IPL_SOFTNET] == 0);
194 KASSERT(i80321_imask[IPL_SOFTSERIAL] == 0);
195
196 /*
197 * Enforce a hierarchy that gives "slow" device (or devices with
198 * limited input buffer space/"real-time" requirements) a better
199 * chance at not dropping data.
200 */
201
202 #if 0
203 /*
204 * This assert might be useful, but only after some interrupts
205 * are configured. As it stands now, it will always fire early
206 * in the initialization phase. If it's useful enough to re-
207 * enable, it should be conditionalized on something else like
208 * having at least something in the levels/irqs above.
209 */
210 KASSERT(i80321_imask[IPL_VM] != 0);
211 #endif
212 i80321_imask[IPL_SCHED] |= i80321_imask[IPL_VM];
213 i80321_imask[IPL_HIGH] |= i80321_imask[IPL_SCHED];
214
215 /*
216 * Now compute which IRQs must be blocked when servicing any
217 * given IRQ.
218 */
219 for (irq = 0; irq < NIRQ; irq++) {
220 int irqs = (1U << irq);
221 iq = &intrq[irq];
222 if (TAILQ_FIRST(&iq->iq_list) != NULL)
223 i80321_enable_irq(irq);
224 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
225 ih = TAILQ_NEXT(ih, ih_list))
226 irqs |= i80321_imask[ih->ih_ipl];
227 iq->iq_mask = irqs;
228 }
229 }
230
231 void
232 splx(int new)
233 {
234 i80321_splx(new);
235 }
236
237 int
238 _spllower(int ipl)
239 {
240 return (i80321_spllower(ipl));
241 }
242
243 int
244 _splraise(int ipl)
245 {
246 return (i80321_splraise(ipl));
247 }
248
249 /*
250 * i80321_icu_init:
251 *
252 * Initialize the i80321 ICU. Called early in bootstrap
253 * to make sure the ICU is in a pristine state.
254 */
255 void
256 i80321_icu_init(void)
257 {
258
259 intr_enabled = 0; /* All interrupts disabled */
260 i80321_set_intrmask();
261
262 intr_steer = 0; /* All interrupts steered to IRQ */
263 i80321_set_intrsteer();
264 }
265
266 /*
267 * i80321_intr_init:
268 *
269 * Initialize the rest of the interrupt subsystem, making it
270 * ready to handle interrupts from devices.
271 */
272 void
273 i80321_intr_init(void)
274 {
275 struct intrq *iq;
276 int i;
277
278 intr_enabled = 0;
279
280 for (i = 0; i < NIRQ; i++) {
281 iq = &intrq[i];
282 TAILQ_INIT(&iq->iq_list);
283 }
284
285 i80321_intr_calculate_masks();
286
287 /* Enable IRQs (don't yet use FIQs). */
288 enable_interrupts(I32_bit);
289 }
290
291 void
292 i80321_intr_evcnt_attach(void)
293 {
294 for (u_int i = 0; i < NIRQ; i++) {
295 struct intrq *iq = &intrq[i];
296 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
297 NULL, "iop321", i80321_irqnames[i]);
298 }
299
300 }
301
302 void *
303 i80321_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
304 {
305 struct intrq *iq;
306 struct intrhand *ih;
307 u_int oldirqstate;
308
309 if (irq < 0 || irq > NIRQ)
310 panic("i80321_intr_establish: IRQ %d out of range", irq);
311
312 ih = kmem_alloc(sizeof(*ih), KM_SLEEP);
313 ih->ih_func = func;
314 ih->ih_arg = arg;
315 ih->ih_ipl = ipl;
316 ih->ih_irq = irq;
317
318 iq = &intrq[irq];
319
320 /* All IOP321 interrupts are level-triggered. */
321 iq->iq_ist = IST_LEVEL;
322
323 oldirqstate = disable_interrupts(I32_bit);
324
325 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
326
327 i80321_intr_calculate_masks();
328
329 restore_interrupts(oldirqstate);
330
331 return (ih);
332 }
333
334 void
335 i80321_intr_disestablish(void *cookie)
336 {
337 struct intrhand *ih = cookie;
338 struct intrq *iq = &intrq[ih->ih_irq];
339 int oldirqstate;
340
341 oldirqstate = disable_interrupts(I32_bit);
342
343 TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
344
345 i80321_intr_calculate_masks();
346
347 restore_interrupts(oldirqstate);
348 }
349
350 /*
351 * Hardware interrupt handler.
352 *
353 * If I80321_HPI_ENABLED is defined, this code attempts to deal with
354 * HPI interrupts as best it can.
355 *
356 * The problem is that HPIs cannot be masked at the interrupt controller;
357 * they can only be masked by disabling IRQs in the XScale core.
358 *
359 * So, if an HPI comes in and we determine that it should be masked at
360 * the current IPL then we mark it pending in the usual way and set
361 * I32_bit in the interrupt frame. This ensures that when we return from
362 * i80321_intr_dispatch(), IRQs will be disabled in the XScale core. (To
363 * ensure IRQs are enabled later, i80321_splx() has been modified to do
364 * just that when a pending HPI interrupt is unmasked.) Additionally,
365 * because HPIs are level-triggered, the registered handler for the HPI
366 * interrupt will also be invoked with IRQs disabled. If a masked HPI
367 * occurs at the same time as another unmasked higher priority interrupt,
368 * the higher priority handler will also be invoked with IRQs disabled.
369 * As a result, the system could end up executing a lot of code with IRQs
370 * completely disabled if the HPI's IPL is relatively low.
371 *
372 * At the present time, the only known use of HPI is for the console UART
373 * on a couple of boards. This is probably the least intrusive use of HPI
374 * as IPL_SERIAL is the highest priority IPL in the system anyway. The
375 * code has not been tested with HPI hooked up to a class of device which
376 * interrupts below IPL_SERIAL. Indeed, such a configuration is likely to
377 * perform very poorly if at all, even though the following code has been
378 * designed (hopefully) to cope with it.
379 */
380
381 void
382 i80321_intr_dispatch(struct clockframe *frame)
383 {
384 struct intrq *iq;
385 struct intrhand *ih;
386 int oldirqstate, irq, ibit, hwpend;
387 #ifdef I80321_HPI_ENABLED
388 int oldpending;
389 #endif
390 struct cpu_info * const ci = curcpu();
391 const int ppl = ci->ci_cpl;
392 const uint32_t imask = i80321_imask[ppl];
393
394 hwpend = i80321_iintsrc_read();
395
396 /*
397 * Disable all the interrupts that are pending. We will
398 * reenable them once they are processed and not masked.
399 */
400 intr_enabled &= ~hwpend;
401 i80321_set_intrmask();
402
403 #ifdef I80321_HPI_ENABLED
404 oldirqstate = 0; /* XXX: quell gcc warning */
405 #endif
406
407 while (hwpend != 0) {
408 #ifdef I80321_HPI_ENABLED
409 /* Deal with HPI interrupt first */
410 if (__predict_false(hwpend & INT_HPIMASK))
411 irq = ICU_INT_HPI;
412 else
413 #endif
414 irq = ffs(hwpend) - 1;
415 ibit = (1U << irq);
416
417 hwpend &= ~ibit;
418
419 if (imask & ibit) {
420 /*
421 * IRQ is masked; mark it as pending and check
422 * the next one. Note: the IRQ is already disabled.
423 */
424 #ifdef I80321_HPI_ENABLED
425 if (__predict_false(irq == ICU_INT_HPI)) {
426 /*
427 * This is an HPI. We *must* disable
428 * IRQs in the interrupt frame until
429 * INT_HPIMASK is cleared by a later
430 * call to splx(). Otherwise the level-
431 * triggered interrupt will just keep
432 * coming back.
433 */
434 frame->cf_tf.tf_spsr |= I32_bit;
435 }
436 #endif
437 i80321_ipending |= ibit;
438 continue;
439 }
440
441 #ifdef I80321_HPI_ENABLED
442 oldpending = i80321_ipending | ibit;
443 #endif
444 i80321_ipending &= ~ibit;
445
446 iq = &intrq[irq];
447 iq->iq_ev.ev_count++;
448 ci->ci_data.cpu_nintr++;
449 #ifdef I80321_HPI_ENABLED
450 /*
451 * Re-enable interrupts iff an HPI is not pending
452 */
453 if (__predict_true((oldpending & INT_HPIMASK) == 0)) {
454 #endif
455 TAILQ_FOREACH (ih, &iq->iq_list, ih_list) {
456 ci->ci_cpl = ih->ih_ipl;
457 oldirqstate = enable_interrupts(I32_bit);
458 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
459 restore_interrupts(oldirqstate);
460 }
461 #ifdef I80321_HPI_ENABLED
462 } else if (irq == ICU_INT_HPI) {
463 /*
464 * We've just handled the HPI. Make sure IRQs
465 * are enabled in the interrupt frame.
466 * Here's hoping the handler really did clear
467 * down the source...
468 */
469 frame->cf_tf.tf_spsr &= ~I32_bit;
470 }
471 #endif
472 ci->ci_cpl = ppl;
473
474 /* Re-enable this interrupt now that's it's cleared. */
475 intr_enabled |= ibit;
476 i80321_set_intrmask();
477
478 /*
479 * Don't forget to include interrupts which may have
480 * arrived in the meantime.
481 */
482 hwpend |= ((i80321_ipending & ICU_INT_HWMASK) & ~imask);
483 }
484
485 #ifdef __HAVE_FAST_SOFTINTS
486 cpu_dosoftints();
487 #endif
488 }
489