1 /* $NetBSD: i80321_icu.c,v 1.27 2021/08/06 09:01:36 rin Exp $ */ 2 3 /* 4 * Copyright (c) 2001, 2002, 2006 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe and Steve C. Woodford for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <sys/cdefs.h> 39 __KERNEL_RCSID(0, "$NetBSD: i80321_icu.c,v 1.27 2021/08/06 09:01:36 rin Exp $"); 40 41 #ifndef EVBARM_SPL_NOINLINE 42 #define EVBARM_SPL_NOINLINE 43 #endif 44 45 /* 46 * Interrupt support for the Intel i80321 I/O Processor. 47 */ 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/kmem.h> 52 53 #include <uvm/uvm_extern.h> 54 55 #include <sys/bus.h> 56 #include <machine/intr.h> 57 58 #include <arm/cpufunc.h> 59 60 #include <arm/xscale/i80321reg.h> 61 #include <arm/xscale/i80321var.h> 62 63 /* Interrupt handler queues. */ 64 struct intrq intrq[NIRQ]; 65 66 /* Interrupts to mask at each level. */ 67 int i80321_imask[NIPL]; 68 69 /* Interrupts pending. */ 70 volatile int i80321_ipending; 71 72 /* Software copy of the IRQs we have enabled. */ 73 volatile uint32_t intr_enabled; 74 75 /* Mask if interrupts steered to FIQs. */ 76 uint32_t intr_steer; 77 78 /* 79 * Interrupt bit names. 80 */ 81 const char * const i80321_irqnames[] = { 82 "DMA0 EOT", 83 "DMA0 EOC", 84 "DMA1 EOT", 85 "DMA1 EOC", 86 "irq 4", 87 "irq 5", 88 "AAU EOT", 89 "AAU EOC", 90 "core PMU", 91 "TMR0 (hardclock)", 92 "TMR1", 93 "I2C0", 94 "I2C1", 95 "MU", 96 "BIST", 97 "periph PMU", 98 "XScale PMU", 99 "BIU error", 100 "ATU error", 101 "MCU error", 102 "DMA0 error", 103 "DMA1 error", 104 "irq 22", 105 "AAU error", 106 "MU error", 107 "SSP", 108 "irq 26", 109 "irq 27", 110 "irq 28", 111 "irq 29", 112 "irq 30", 113 "irq 31", 114 }; 115 116 void i80321_intr_dispatch(struct clockframe *frame); 117 118 static inline uint32_t 119 i80321_iintsrc_read(void) 120 { 121 uint32_t iintsrc; 122 123 __asm volatile("mrc p6, 0, %0, c8, c0, 0" 124 : "=r" (iintsrc)); 125 126 /* 127 * The IINTSRC register shows bits that are active even 128 * if they are masked in INTCTL, so we have to mask them 129 * off with the interrupts we consider enabled. 130 */ 131 return (iintsrc & intr_enabled); 132 } 133 134 static inline void 135 i80321_set_intrsteer(void) 136 { 137 138 __asm volatile("mcr p6, 0, %0, c4, c0, 0" 139 : 140 : "r" (intr_steer & ICU_INT_HWMASK)); 141 } 142 143 static inline void 144 i80321_enable_irq(int irq) 145 { 146 147 intr_enabled |= (1U << irq); 148 i80321_set_intrmask(); 149 } 150 151 static inline void 152 i80321_disable_irq(int irq) 153 { 154 155 intr_enabled &= ~(1U << irq); 156 i80321_set_intrmask(); 157 } 158 159 /* 160 * NOTE: This routine must be called with interrupts disabled in the CPSR. 161 */ 162 static void 163 i80321_intr_calculate_masks(void) 164 { 165 struct intrq *iq; 166 struct intrhand *ih; 167 int irq, ipl; 168 169 /* Disable all IRQs. */ 170 for (irq = 0; irq < NIRQ; irq++) 171 i80321_disable_irq(irq); 172 173 /* Figure out which IRQs are used by each IPL. */ 174 for (ipl = 0; ipl < NIPL; ipl++) 175 i80321_imask[ipl] = 0; 176 for (irq = 0; irq < NIRQ; irq++) { 177 iq = &intrq[irq]; 178 TAILQ_FOREACH(ih, &iq->iq_list, ih_list) 179 i80321_imask[ih->ih_ipl] |= (1U << irq); 180 } 181 182 /* All IPLs block everything blocked by any lower IPL. */ 183 for (ipl = 1; ipl < NIPL; ipl++) 184 i80321_imask[ipl] |= i80321_imask[ipl - 1]; 185 186 KASSERT(i80321_imask[IPL_NONE] == 0); 187 KASSERT(i80321_imask[IPL_SOFTCLOCK] == 0); 188 KASSERT(i80321_imask[IPL_SOFTBIO] == 0); 189 KASSERT(i80321_imask[IPL_SOFTNET] == 0); 190 KASSERT(i80321_imask[IPL_SOFTSERIAL] == 0); 191 192 /* Enable IRQs in use. */ 193 for (irq = 0; irq < NIRQ; irq++) { 194 iq = &intrq[irq]; 195 if (!TAILQ_EMPTY(&iq->iq_list)) 196 i80321_enable_irq(irq); 197 } 198 } 199 200 void 201 splx(int new) 202 { 203 i80321_splx(new); 204 } 205 206 int 207 _spllower(int ipl) 208 { 209 return (i80321_spllower(ipl)); 210 } 211 212 int 213 _splraise(int ipl) 214 { 215 return (i80321_splraise(ipl)); 216 } 217 218 /* 219 * i80321_icu_init: 220 * 221 * Initialize the i80321 ICU. Called early in bootstrap 222 * to make sure the ICU is in a pristine state. 223 */ 224 void 225 i80321_icu_init(void) 226 { 227 228 intr_enabled = 0; /* All interrupts disabled */ 229 i80321_set_intrmask(); 230 231 intr_steer = 0; /* All interrupts steered to IRQ */ 232 i80321_set_intrsteer(); 233 } 234 235 /* 236 * i80321_intr_init: 237 * 238 * Initialize the rest of the interrupt subsystem, making it 239 * ready to handle interrupts from devices. 240 */ 241 void 242 i80321_intr_init(void) 243 { 244 struct intrq *iq; 245 int i; 246 247 intr_enabled = 0; 248 249 for (i = 0; i < NIRQ; i++) { 250 iq = &intrq[i]; 251 TAILQ_INIT(&iq->iq_list); 252 } 253 254 i80321_intr_calculate_masks(); 255 256 /* Enable IRQs (don't yet use FIQs). */ 257 enable_interrupts(I32_bit); 258 } 259 260 void 261 i80321_intr_evcnt_attach(void) 262 { 263 for (u_int i = 0; i < NIRQ; i++) { 264 struct intrq *iq = &intrq[i]; 265 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR, 266 NULL, "iop321", i80321_irqnames[i]); 267 } 268 269 } 270 271 void * 272 i80321_intr_establish(int irq, int ipl, int (*func)(void *), void *arg) 273 { 274 struct intrq *iq; 275 struct intrhand *ih; 276 u_int oldirqstate; 277 278 if (irq < 0 || irq > NIRQ) 279 panic("i80321_intr_establish: IRQ %d out of range", irq); 280 281 ih = kmem_alloc(sizeof(*ih), KM_SLEEP); 282 ih->ih_func = func; 283 ih->ih_arg = arg; 284 ih->ih_ipl = ipl; 285 ih->ih_irq = irq; 286 287 iq = &intrq[irq]; 288 289 /* All IOP321 interrupts are level-triggered. */ 290 iq->iq_ist = IST_LEVEL; 291 292 oldirqstate = disable_interrupts(I32_bit); 293 294 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list); 295 296 i80321_intr_calculate_masks(); 297 298 restore_interrupts(oldirqstate); 299 300 return (ih); 301 } 302 303 void 304 i80321_intr_disestablish(void *cookie) 305 { 306 struct intrhand *ih = cookie; 307 struct intrq *iq = &intrq[ih->ih_irq]; 308 int oldirqstate; 309 310 oldirqstate = disable_interrupts(I32_bit); 311 312 TAILQ_REMOVE(&iq->iq_list, ih, ih_list); 313 314 i80321_intr_calculate_masks(); 315 316 restore_interrupts(oldirqstate); 317 } 318 319 /* 320 * Hardware interrupt handler. 321 * 322 * If I80321_HPI_ENABLED is defined, this code attempts to deal with 323 * HPI interrupts as best it can. 324 * 325 * The problem is that HPIs cannot be masked at the interrupt controller; 326 * they can only be masked by disabling IRQs in the XScale core. 327 * 328 * So, if an HPI comes in and we determine that it should be masked at 329 * the current IPL then we mark it pending in the usual way and set 330 * I32_bit in the interrupt frame. This ensures that when we return from 331 * i80321_intr_dispatch(), IRQs will be disabled in the XScale core. (To 332 * ensure IRQs are enabled later, i80321_splx() has been modified to do 333 * just that when a pending HPI interrupt is unmasked.) Additionally, 334 * because HPIs are level-triggered, the registered handler for the HPI 335 * interrupt will also be invoked with IRQs disabled. If a masked HPI 336 * occurs at the same time as another unmasked higher priority interrupt, 337 * the higher priority handler will also be invoked with IRQs disabled. 338 * As a result, the system could end up executing a lot of code with IRQs 339 * completely disabled if the HPI's IPL is relatively low. 340 * 341 * At the present time, the only known use of HPI is for the console UART 342 * on a couple of boards. This is probably the least intrusive use of HPI 343 * as IPL_SERIAL is the highest priority IPL in the system anyway. The 344 * code has not been tested with HPI hooked up to a class of device which 345 * interrupts below IPL_SERIAL. Indeed, such a configuration is likely to 346 * perform very poorly if at all, even though the following code has been 347 * designed (hopefully) to cope with it. 348 */ 349 350 void 351 i80321_intr_dispatch(struct clockframe *frame) 352 { 353 struct intrq *iq; 354 struct intrhand *ih; 355 int oldirqstate, irq, ibit, hwpend; 356 #ifdef I80321_HPI_ENABLED 357 int oldpending; 358 #endif 359 struct cpu_info * const ci = curcpu(); 360 const int ppl = ci->ci_cpl; 361 const uint32_t imask = i80321_imask[ppl]; 362 363 hwpend = i80321_iintsrc_read(); 364 365 /* 366 * Disable all the interrupts that are pending. We will 367 * reenable them once they are processed and not masked. 368 */ 369 intr_enabled &= ~hwpend; 370 i80321_set_intrmask(); 371 372 #ifdef I80321_HPI_ENABLED 373 oldirqstate = 0; /* XXX: quell gcc warning */ 374 #endif 375 376 while (hwpend != 0) { 377 #ifdef I80321_HPI_ENABLED 378 /* Deal with HPI interrupt first */ 379 if (__predict_false(hwpend & INT_HPIMASK)) 380 irq = ICU_INT_HPI; 381 else 382 #endif 383 irq = ffs(hwpend) - 1; 384 ibit = (1U << irq); 385 386 hwpend &= ~ibit; 387 388 if (imask & ibit) { 389 /* 390 * IRQ is masked; mark it as pending and check 391 * the next one. Note: the IRQ is already disabled. 392 */ 393 #ifdef I80321_HPI_ENABLED 394 if (__predict_false(irq == ICU_INT_HPI)) { 395 /* 396 * This is an HPI. We *must* disable 397 * IRQs in the interrupt frame until 398 * INT_HPIMASK is cleared by a later 399 * call to splx(). Otherwise the level- 400 * triggered interrupt will just keep 401 * coming back. 402 */ 403 frame->cf_tf.tf_spsr |= I32_bit; 404 } 405 #endif 406 i80321_ipending |= ibit; 407 continue; 408 } 409 410 #ifdef I80321_HPI_ENABLED 411 oldpending = i80321_ipending | ibit; 412 #endif 413 i80321_ipending &= ~ibit; 414 415 iq = &intrq[irq]; 416 iq->iq_ev.ev_count++; 417 ci->ci_data.cpu_nintr++; 418 #ifdef I80321_HPI_ENABLED 419 /* 420 * Re-enable interrupts iff an HPI is not pending 421 */ 422 if (__predict_true((oldpending & INT_HPIMASK) == 0)) { 423 #endif 424 TAILQ_FOREACH (ih, &iq->iq_list, ih_list) { 425 ci->ci_cpl = ih->ih_ipl; 426 oldirqstate = enable_interrupts(I32_bit); 427 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame); 428 restore_interrupts(oldirqstate); 429 } 430 #ifdef I80321_HPI_ENABLED 431 } else if (irq == ICU_INT_HPI) { 432 /* 433 * We've just handled the HPI. Make sure IRQs 434 * are enabled in the interrupt frame. 435 * Here's hoping the handler really did clear 436 * down the source... 437 */ 438 frame->cf_tf.tf_spsr &= ~I32_bit; 439 } 440 #endif 441 ci->ci_cpl = ppl; 442 443 /* Re-enable this interrupt now that's it's cleared. */ 444 intr_enabled |= ibit; 445 i80321_set_intrmask(); 446 447 /* 448 * Don't forget to include interrupts which may have 449 * arrived in the meantime. 450 */ 451 hwpend |= ((i80321_ipending & ICU_INT_HWMASK) & ~imask); 452 } 453 454 #ifdef __HAVE_FAST_SOFTINTS 455 cpu_dosoftints(); 456 #endif 457 } 458