Home | History | Annotate | Line # | Download | only in xscale
i80321_icu.c revision 1.9.16.4
      1 /*	$NetBSD: i80321_icu.c,v 1.9.16.4 2008/01/21 09:35:51 yamt Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2006 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe and Steve C. Woodford for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 #include <sys/cdefs.h>
     39 __KERNEL_RCSID(0, "$NetBSD: i80321_icu.c,v 1.9.16.4 2008/01/21 09:35:51 yamt Exp $");
     40 
     41 #ifndef EVBARM_SPL_NOINLINE
     42 #define	EVBARM_SPL_NOINLINE
     43 #endif
     44 
     45 /*
     46  * Interrupt support for the Intel i80321 I/O Processor.
     47  */
     48 
     49 #include <sys/param.h>
     50 #include <sys/systm.h>
     51 #include <sys/malloc.h>
     52 
     53 #include <uvm/uvm_extern.h>
     54 
     55 #include <machine/bus.h>
     56 #include <machine/intr.h>
     57 
     58 #include <arm/cpufunc.h>
     59 
     60 #include <arm/xscale/i80321reg.h>
     61 #include <arm/xscale/i80321var.h>
     62 
     63 /* Interrupt handler queues. */
     64 struct intrq intrq[NIRQ];
     65 
     66 /* Interrupts to mask at each level. */
     67 int i80321_imask[NIPL];
     68 
     69 /* Current interrupt priority level. */
     70 volatile int current_spl_level;
     71 
     72 /* Interrupts pending. */
     73 volatile int i80321_ipending;
     74 
     75 /* Software copy of the IRQs we have enabled. */
     76 volatile uint32_t intr_enabled;
     77 
     78 /* Mask if interrupts steered to FIQs. */
     79 uint32_t intr_steer;
     80 
     81 /*
     82  * Map a software interrupt queue index (to the unused bits in the
     83  * ICU registers -- XXX will need to revisit this if those bits are
     84  * ever used in future steppings).
     85  */
     86 #ifdef __HAVE_FAST_SOFTINTS
     87 static const uint32_t si_to_irqbit[4] = {
     88 	ICU_INT_bit26,		/* SI_SOFTCLOCK */
     89 	ICU_INT_bit22,		/* SI_SOFTBIO */
     90 	ICU_INT_bit5,		/* SI_SOFTNET */
     91 	ICU_INT_bit4,		/* SI_SOFTSERIAL */
     92 };
     93 
     94 #define	SI_TO_IRQBIT(si)	(1U << si_to_irqbit[(si)])
     95 
     96 /*
     97  * Map a software interrupt queue to an interrupt priority level.
     98  */
     99 static const int si_to_ipl[4] = {
    100 	IPL_SOFTCLOCK,		/* SI_SOFTCLOCK */
    101 	IPL_SOFTBIO,		/* SI_SOFTBIO */
    102 	IPL_SOFTNET,		/* SI_SOFTNET */
    103 	IPL_SOFTSERIAL,		/* SI_SOFTSERIAL */
    104 };
    105 #endif
    106 
    107 /*
    108  * Interrupt bit names.
    109  */
    110 const char *i80321_irqnames[] = {
    111 	"DMA0 EOT",
    112 	"DMA0 EOC",
    113 	"DMA1 EOT",
    114 	"DMA1 EOC",
    115 	"irq 4",
    116 	"irq 5",
    117 	"AAU EOT",
    118 	"AAU EOC",
    119 	"core PMU",
    120 	"TMR0 (hardclock)",
    121 	"TMR1",
    122 	"I2C0",
    123 	"I2C1",
    124 	"MU",
    125 	"BIST",
    126 	"periph PMU",
    127 	"XScale PMU",
    128 	"BIU error",
    129 	"ATU error",
    130 	"MCU error",
    131 	"DMA0 error",
    132 	"DMA1 error",
    133 	"irq 22",
    134 	"AAU error",
    135 	"MU error",
    136 	"SSP",
    137 	"irq 26",
    138 	"irq 27",
    139 	"irq 28",
    140 	"irq 29",
    141 	"irq 30",
    142 	"irq 31",
    143 };
    144 
    145 void	i80321_intr_dispatch(struct clockframe *frame);
    146 
    147 static inline uint32_t
    148 i80321_iintsrc_read(void)
    149 {
    150 	uint32_t iintsrc;
    151 
    152 	__asm volatile("mrc p6, 0, %0, c8, c0, 0"
    153 		: "=r" (iintsrc));
    154 
    155 	/*
    156 	 * The IINTSRC register shows bits that are active even
    157 	 * if they are masked in INTCTL, so we have to mask them
    158 	 * off with the interrupts we consider enabled.
    159 	 */
    160 	return (iintsrc & intr_enabled);
    161 }
    162 
    163 static inline void
    164 i80321_set_intrsteer(void)
    165 {
    166 
    167 	__asm volatile("mcr p6, 0, %0, c4, c0, 0"
    168 		:
    169 		: "r" (intr_steer & ICU_INT_HWMASK));
    170 }
    171 
    172 static inline void
    173 i80321_enable_irq(int irq)
    174 {
    175 
    176 	intr_enabled |= (1U << irq);
    177 	i80321_set_intrmask();
    178 }
    179 
    180 static inline void
    181 i80321_disable_irq(int irq)
    182 {
    183 
    184 	intr_enabled &= ~(1U << irq);
    185 	i80321_set_intrmask();
    186 }
    187 
    188 /*
    189  * NOTE: This routine must be called with interrupts disabled in the CPSR.
    190  */
    191 static void
    192 i80321_intr_calculate_masks(void)
    193 {
    194 	struct intrq *iq;
    195 	struct intrhand *ih;
    196 	int irq, ipl;
    197 
    198 	/* First, figure out which IPLs each IRQ has. */
    199 	for (irq = 0; irq < NIRQ; irq++) {
    200 		int levels = 0;
    201 		iq = &intrq[irq];
    202 		i80321_disable_irq(irq);
    203 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
    204 		     ih = TAILQ_NEXT(ih, ih_list))
    205 			levels |= (1U << ih->ih_ipl);
    206 		iq->iq_levels = levels;
    207 	}
    208 
    209 	/* Next, figure out which IRQs are used by each IPL. */
    210 	for (ipl = 0; ipl < NIPL; ipl++) {
    211 		int irqs = 0;
    212 		for (irq = 0; irq < NIRQ; irq++) {
    213 			if (intrq[irq].iq_levels & (1U << ipl))
    214 				irqs |= (1U << irq);
    215 		}
    216 		i80321_imask[ipl] = irqs;
    217 	}
    218 
    219 	i80321_imask[IPL_NONE] = 0;
    220 
    221 	/*
    222 	 * Enforce a hierarchy that gives "slow" device (or devices with
    223 	 * limited input buffer space/"real-time" requirements) a better
    224 	 * chance at not dropping data.
    225 	 */
    226 #ifdef __HAVE_FAST_SOFTINTS
    227 	i80321_imask[IPL_SOFTCLOCK] = SI_TO_IRQBIT(SI_SOFTCLOCK);
    228 	i80321_imask[IPL_SOFTBIO] = SI_TO_IRQBIT(SI_SOFTBIO);
    229 	i80321_imask[IPL_SOFTNET] = SI_TO_IRQBIT(SI_SOFTNET);
    230 	i80321_imask[IPL_SOFTSERIAL] = SI_TO_IRQBIT(SI_SOFTSERIAL);
    231 #endif
    232 
    233 	i80321_imask[IPL_SOFTBIO] |= i80321_imask[IPL_SOFTCLOCK];
    234 	i80321_imask[IPL_SOFTNET] |= i80321_imask[IPL_SOFTBIO];
    235 	i80321_imask[IPL_SOFTSERIAL] |= i80321_imask[IPL_SOFTNET];
    236 	i80321_imask[IPL_VM] |= i80321_imask[IPL_SOFTSERIAL];
    237 	i80321_imask[IPL_HIGH] |= i80321_imask[IPL_SCHED];
    238 
    239 	/*
    240 	 * Now compute which IRQs must be blocked when servicing any
    241 	 * given IRQ.
    242 	 */
    243 	for (irq = 0; irq < NIRQ; irq++) {
    244 		int irqs = (1U << irq);
    245 		iq = &intrq[irq];
    246 		if (TAILQ_FIRST(&iq->iq_list) != NULL)
    247 			i80321_enable_irq(irq);
    248 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
    249 		     ih = TAILQ_NEXT(ih, ih_list))
    250 			irqs |= i80321_imask[ih->ih_ipl];
    251 		iq->iq_mask = irqs;
    252 	}
    253 }
    254 
    255 void
    256 i80321_do_pending(void)
    257 {
    258 #ifdef __HAVE_FAST_SOFTINTS
    259 	static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
    260 	int new, oldirqstate;
    261 
    262 	if (__cpu_simple_lock_try(&processing) == 0)
    263 		return;
    264 
    265 	new = current_spl_level;
    266 
    267 	oldirqstate = disable_interrupts(I32_bit);
    268 
    269 #define	DO_SOFTINT(si)							\
    270 	if ((i80321_ipending & ~new) & SI_TO_IRQBIT(si)) {		\
    271 		i80321_ipending &= ~SI_TO_IRQBIT(si);			\
    272 		current_spl_level |= i80321_imask[si_to_ipl[(si)]];	\
    273 		restore_interrupts(oldirqstate);			\
    274 		softintr_dispatch(si);					\
    275 		oldirqstate = disable_interrupts(I32_bit);		\
    276 		current_spl_level = new;				\
    277 	}
    278 
    279 	DO_SOFTINT(SI_SOFTSERIAL);
    280 	DO_SOFTINT(SI_SOFTNET);
    281 	DO_SOFTINT(SI_SOFTCLOCK);
    282 	DO_SOFTINT(SI_SOFT);
    283 
    284 	__cpu_simple_unlock(&processing);
    285 
    286 	restore_interrupts(oldirqstate);
    287 #endif	/* __HAVE_FAST_SOFTINTRS */
    288 }
    289 
    290 void
    291 splx(int new)
    292 {
    293 
    294 	i80321_splx(new);
    295 }
    296 
    297 int
    298 _spllower(int ipl)
    299 {
    300 
    301 	return (i80321_spllower(ipl));
    302 }
    303 
    304 int
    305 _splraise(int ipl)
    306 {
    307 
    308 	return (i80321_splraise(ipl));
    309 }
    310 
    311 #if __HAVE_FAST_SOFTINTRS
    312 void
    313 _setsoftintr(int si)
    314 {
    315 	int oldirqstate;
    316 
    317 	oldirqstate = disable_interrupts(I32_bit);
    318 	i80321_ipending |= SI_TO_IRQBIT(si);
    319 	restore_interrupts(oldirqstate);
    320 
    321 	/* Process unmasked pending soft interrupts. */
    322 	if ((i80321_ipending & INT_SWMASK) & ~current_spl_level)
    323 		i80321_do_pending();
    324 }
    325 #endif /* __HAVE_FAST_SOFTINTRS */
    326 
    327 /*
    328  * i80321_icu_init:
    329  *
    330  *	Initialize the i80321 ICU.  Called early in bootstrap
    331  *	to make sure the ICU is in a pristine state.
    332  */
    333 void
    334 i80321_icu_init(void)
    335 {
    336 
    337 	intr_enabled = 0;	/* All interrupts disabled */
    338 	i80321_set_intrmask();
    339 
    340 	intr_steer = 0;		/* All interrupts steered to IRQ */
    341 	i80321_set_intrsteer();
    342 }
    343 
    344 /*
    345  * i80321_intr_init:
    346  *
    347  *	Initialize the rest of the interrupt subsystem, making it
    348  *	ready to handle interrupts from devices.
    349  */
    350 void
    351 i80321_intr_init(void)
    352 {
    353 	struct intrq *iq;
    354 	int i;
    355 
    356 	intr_enabled = 0;
    357 
    358 	for (i = 0; i < NIRQ; i++) {
    359 		iq = &intrq[i];
    360 		TAILQ_INIT(&iq->iq_list);
    361 
    362 		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
    363 		    NULL, "iop321", i80321_irqnames[i]);
    364 	}
    365 
    366 	i80321_intr_calculate_masks();
    367 
    368 	/* Enable IRQs (don't yet use FIQs). */
    369 	enable_interrupts(I32_bit);
    370 }
    371 
    372 void *
    373 i80321_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
    374 {
    375 	struct intrq *iq;
    376 	struct intrhand *ih;
    377 	u_int oldirqstate;
    378 
    379 	if (irq < 0 || irq > NIRQ)
    380 		panic("i80321_intr_establish: IRQ %d out of range", irq);
    381 
    382 	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
    383 	if (ih == NULL)
    384 		return (NULL);
    385 
    386 	ih->ih_func = func;
    387 	ih->ih_arg = arg;
    388 	ih->ih_ipl = ipl;
    389 	ih->ih_irq = irq;
    390 
    391 	iq = &intrq[irq];
    392 
    393 	/* All IOP321 interrupts are level-triggered. */
    394 	iq->iq_ist = IST_LEVEL;
    395 
    396 	oldirqstate = disable_interrupts(I32_bit);
    397 
    398 	TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
    399 
    400 	i80321_intr_calculate_masks();
    401 
    402 	restore_interrupts(oldirqstate);
    403 
    404 	return (ih);
    405 }
    406 
    407 void
    408 i80321_intr_disestablish(void *cookie)
    409 {
    410 	struct intrhand *ih = cookie;
    411 	struct intrq *iq = &intrq[ih->ih_irq];
    412 	int oldirqstate;
    413 
    414 	oldirqstate = disable_interrupts(I32_bit);
    415 
    416 	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
    417 
    418 	i80321_intr_calculate_masks();
    419 
    420 	restore_interrupts(oldirqstate);
    421 }
    422 
    423 /*
    424  * Hardware interrupt handler.
    425  *
    426  * If I80321_HPI_ENABLED is defined, this code attempts to deal with
    427  * HPI interrupts as best it can.
    428  *
    429  * The problem is that HPIs cannot be masked at the interrupt controller;
    430  * they can only be masked by disabling IRQs in the XScale core.
    431  *
    432  * So, if an HPI comes in and we determine that it should be masked at
    433  * the current IPL then we mark it pending in the usual way and set
    434  * I32_bit in the interrupt frame. This ensures that when we return from
    435  * i80321_intr_dispatch(), IRQs will be disabled in the XScale core. (To
    436  * ensure IRQs are enabled later, i80321_splx() has been modified to do
    437  * just that when a pending HPI interrupt is unmasked.) Additionally,
    438  * because HPIs are level-triggered, the registered handler for the HPI
    439  * interrupt will also be invoked with IRQs disabled. If a masked HPI
    440  * occurs at the same time as another unmasked higher priority interrupt,
    441  * the higher priority handler will also be invoked with IRQs disabled.
    442  * As a result, the system could end up executing a lot of code with IRQs
    443  * completely disabled if the HPI's IPL is relatively low.
    444  *
    445  * At the present time, the only known use of HPI is for the console UART
    446  * on a couple of boards. This is probably the least intrusive use of HPI
    447  * as IPL_SERIAL is the highest priority IPL in the system anyway. The
    448  * code has not been tested with HPI hooked up to a class of device which
    449  * interrupts below IPL_SERIAL. Indeed, such a configuration is likely to
    450  * perform very poorly if at all, even though the following code has been
    451  * designed (hopefully) to cope with it.
    452  */
    453 
    454 void
    455 i80321_intr_dispatch(struct clockframe *frame)
    456 {
    457 	struct intrq *iq;
    458 	struct intrhand *ih;
    459 	int oldirqstate, pcpl, irq, ibit, hwpend;
    460 	struct cpu_info *ci;
    461 #ifdef I80321_HPI_ENABLED
    462 	int oldpending;
    463 #endif
    464 
    465 	ci = curcpu();
    466 	ci->ci_idepth++;
    467 	pcpl = current_spl_level;
    468 	hwpend = i80321_iintsrc_read();
    469 
    470 	/*
    471 	 * Disable all the interrupts that are pending.  We will
    472 	 * reenable them once they are processed and not masked.
    473 	 */
    474 	intr_enabled &= ~hwpend;
    475 	i80321_set_intrmask();
    476 
    477 #ifdef I80321_HPI_ENABLED
    478 	oldirqstate = 0;	/* XXX: quell gcc warning */
    479 #endif
    480 
    481 	while (hwpend != 0) {
    482 #ifdef I80321_HPI_ENABLED
    483 		/* Deal with HPI interrupt first */
    484 		if (__predict_false(hwpend & INT_HPIMASK))
    485 			irq = ICU_INT_HPI;
    486 		else
    487 #endif
    488 		irq = ffs(hwpend) - 1;
    489 		ibit = (1U << irq);
    490 
    491 		hwpend &= ~ibit;
    492 
    493 		if (pcpl & ibit) {
    494 			/*
    495 			 * IRQ is masked; mark it as pending and check
    496 			 * the next one.  Note: the IRQ is already disabled.
    497 			 */
    498 #ifdef I80321_HPI_ENABLED
    499 			if (__predict_false(irq == ICU_INT_HPI)) {
    500 				/*
    501 				 * This is an HPI. We *must* disable
    502 				 * IRQs in the interrupt frame until
    503 				 * INT_HPIMASK is cleared by a later
    504 				 * call to splx(). Otherwise the level-
    505 				 * triggered interrupt will just keep
    506 				 * coming back.
    507 				 */
    508 				frame->cf_if.if_spsr |= I32_bit;
    509 			}
    510 #endif
    511 			i80321_ipending |= ibit;
    512 			continue;
    513 		}
    514 
    515 #ifdef I80321_HPI_ENABLED
    516 		oldpending = i80321_ipending | ibit;
    517 #endif
    518 		i80321_ipending &= ~ibit;
    519 
    520 		iq = &intrq[irq];
    521 		iq->iq_ev.ev_count++;
    522 		uvmexp.intrs++;
    523 		current_spl_level |= iq->iq_mask;
    524 #ifdef I80321_HPI_ENABLED
    525 		/*
    526 		 * Re-enable interrupts iff an HPI is not pending
    527 		 */
    528 		if (__predict_true((oldpending & INT_HPIMASK) == 0))
    529 #endif
    530 		oldirqstate = enable_interrupts(I32_bit);
    531 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
    532 		     ih = TAILQ_NEXT(ih, ih_list)) {
    533 			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
    534 		}
    535 #ifdef I80321_HPI_ENABLED
    536 		if (__predict_true((oldpending & INT_HPIMASK) == 0))
    537 #endif
    538 		restore_interrupts(oldirqstate);
    539 #ifdef I80321_HPI_ENABLED
    540 		else if (irq == ICU_INT_HPI) {
    541 			/*
    542 			 * We've just handled the HPI. Make sure IRQs
    543 			 * are enabled in the interrupt frame.
    544 			 * Here's hoping the handler really did clear
    545 			 * down the source...
    546 			 */
    547 			frame->cf_if.if_spsr &= ~I32_bit;
    548 		}
    549 #endif
    550 		current_spl_level = pcpl;
    551 
    552 		/* Re-enable this interrupt now that's it's cleared. */
    553 		intr_enabled |= ibit;
    554 		i80321_set_intrmask();
    555 
    556 		/*
    557 		 * Don't forget to include interrupts which may have
    558 		 * arrived in the meantime.
    559 		 */
    560 		hwpend |= ((i80321_ipending & ICU_INT_HWMASK) & ~pcpl);
    561 	}
    562 	ci->ci_idepth--;
    563 
    564 	/* Check for pendings soft intrs. */
    565 	if ((i80321_ipending & INT_SWMASK) & ~current_spl_level) {
    566 #ifdef I80321_HPI_ENABLED
    567 		/* XXX: This is only necessary if HPI is < IPL_SOFT* */
    568 		if (__predict_true((i80321_ipending & INT_HPIMASK) == 0))
    569 #endif
    570 		oldirqstate = enable_interrupts(I32_bit);
    571 		i80321_do_pending();
    572 #ifdef I80321_HPI_ENABLED
    573 		/* XXX: This is only necessary if HPI is < IPL_NET* */
    574 		if (__predict_true((i80321_ipending & INT_HPIMASK) == 0))
    575 #endif
    576 		restore_interrupts(oldirqstate);
    577 	}
    578 }
    579