Home | History | Annotate | Line # | Download | only in xscale
i80321_icu.c revision 1.20
      1 /*	$NetBSD: i80321_icu.c,v 1.20 2010/06/13 02:11:23 tsutsui Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2006 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe and Steve C. Woodford for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 #include <sys/cdefs.h>
     39 __KERNEL_RCSID(0, "$NetBSD: i80321_icu.c,v 1.20 2010/06/13 02:11:23 tsutsui Exp $");
     40 
     41 #ifndef EVBARM_SPL_NOINLINE
     42 #define	EVBARM_SPL_NOINLINE
     43 #endif
     44 
     45 /*
     46  * Interrupt support for the Intel i80321 I/O Processor.
     47  */
     48 
     49 #include <sys/param.h>
     50 #include <sys/systm.h>
     51 #include <sys/malloc.h>
     52 
     53 #include <uvm/uvm_extern.h>
     54 
     55 #include <machine/bus.h>
     56 #include <machine/intr.h>
     57 
     58 #include <arm/cpufunc.h>
     59 
     60 #include <arm/xscale/i80321reg.h>
     61 #include <arm/xscale/i80321var.h>
     62 
     63 /* Interrupt handler queues. */
     64 struct intrq intrq[NIRQ];
     65 
     66 /* Interrupts to mask at each level. */
     67 int i80321_imask[NIPL];
     68 
     69 /* Interrupts pending. */
     70 volatile int i80321_ipending;
     71 
     72 /* Software copy of the IRQs we have enabled. */
     73 volatile uint32_t intr_enabled;
     74 
     75 /* Mask if interrupts steered to FIQs. */
     76 uint32_t intr_steer;
     77 
     78 /*
     79  * Interrupt bit names.
     80  */
     81 const char * const i80321_irqnames[] = {
     82 	"DMA0 EOT",
     83 	"DMA0 EOC",
     84 	"DMA1 EOT",
     85 	"DMA1 EOC",
     86 	"irq 4",
     87 	"irq 5",
     88 	"AAU EOT",
     89 	"AAU EOC",
     90 	"core PMU",
     91 	"TMR0 (hardclock)",
     92 	"TMR1",
     93 	"I2C0",
     94 	"I2C1",
     95 	"MU",
     96 	"BIST",
     97 	"periph PMU",
     98 	"XScale PMU",
     99 	"BIU error",
    100 	"ATU error",
    101 	"MCU error",
    102 	"DMA0 error",
    103 	"DMA1 error",
    104 	"irq 22",
    105 	"AAU error",
    106 	"MU error",
    107 	"SSP",
    108 	"irq 26",
    109 	"irq 27",
    110 	"irq 28",
    111 	"irq 29",
    112 	"irq 30",
    113 	"irq 31",
    114 };
    115 
    116 void	i80321_intr_dispatch(struct clockframe *frame);
    117 
    118 static inline uint32_t
    119 i80321_iintsrc_read(void)
    120 {
    121 	uint32_t iintsrc;
    122 
    123 	__asm volatile("mrc p6, 0, %0, c8, c0, 0"
    124 		: "=r" (iintsrc));
    125 
    126 	/*
    127 	 * The IINTSRC register shows bits that are active even
    128 	 * if they are masked in INTCTL, so we have to mask them
    129 	 * off with the interrupts we consider enabled.
    130 	 */
    131 	return (iintsrc & intr_enabled);
    132 }
    133 
    134 static inline void
    135 i80321_set_intrsteer(void)
    136 {
    137 
    138 	__asm volatile("mcr p6, 0, %0, c4, c0, 0"
    139 		:
    140 		: "r" (intr_steer & ICU_INT_HWMASK));
    141 }
    142 
    143 static inline void
    144 i80321_enable_irq(int irq)
    145 {
    146 
    147 	intr_enabled |= (1U << irq);
    148 	i80321_set_intrmask();
    149 }
    150 
    151 static inline void
    152 i80321_disable_irq(int irq)
    153 {
    154 
    155 	intr_enabled &= ~(1U << irq);
    156 	i80321_set_intrmask();
    157 }
    158 
    159 /*
    160  * NOTE: This routine must be called with interrupts disabled in the CPSR.
    161  */
    162 static void
    163 i80321_intr_calculate_masks(void)
    164 {
    165 	struct intrq *iq;
    166 	struct intrhand *ih;
    167 	int irq, ipl;
    168 
    169 	/* First, figure out which IPLs each IRQ has. */
    170 	for (irq = 0; irq < NIRQ; irq++) {
    171 		int levels = 0;
    172 		iq = &intrq[irq];
    173 		i80321_disable_irq(irq);
    174 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
    175 		     ih = TAILQ_NEXT(ih, ih_list))
    176 			levels |= (1U << ih->ih_ipl);
    177 		iq->iq_levels = levels;
    178 	}
    179 
    180 	/* Next, figure out which IRQs are used by each IPL. */
    181 	for (ipl = 0; ipl < NIPL; ipl++) {
    182 		int irqs = 0;
    183 		for (irq = 0; irq < NIRQ; irq++) {
    184 			if (intrq[irq].iq_levels & (1U << ipl))
    185 				irqs |= (1U << irq);
    186 		}
    187 		i80321_imask[ipl] = irqs;
    188 	}
    189 
    190 	KASSERT(i80321_imask[IPL_NONE] == 0);
    191 	KASSERT(i80321_imask[IPL_SOFTCLOCK] == 0);
    192 	KASSERT(i80321_imask[IPL_SOFTBIO] == 0);
    193 	KASSERT(i80321_imask[IPL_SOFTNET] == 0);
    194 	KASSERT(i80321_imask[IPL_SOFTSERIAL] == 0);
    195 
    196 	/*
    197 	 * Enforce a hierarchy that gives "slow" device (or devices with
    198 	 * limited input buffer space/"real-time" requirements) a better
    199 	 * chance at not dropping data.
    200 	 */
    201 
    202 #if 0
    203 	/*
    204 	 * This assert might be useful, but only after some interrupts
    205 	 * are configured.  As it stands now, it will always fire early
    206 	 * in the initialization phase.  If it's useful enough to re-
    207 	 * enable, it should be conditionalized on something else like
    208 	 * having at least something in the levels/irqs above.
    209 	 */
    210 	KASSERT(i80321_imask[IPL_VM] != 0);
    211 #endif
    212 	i80321_imask[IPL_SCHED] |= i80321_imask[IPL_VM];
    213 	i80321_imask[IPL_HIGH] |= i80321_imask[IPL_SCHED];
    214 
    215 	/*
    216 	 * Now compute which IRQs must be blocked when servicing any
    217 	 * given IRQ.
    218 	 */
    219 	for (irq = 0; irq < NIRQ; irq++) {
    220 		int irqs = (1U << irq);
    221 		iq = &intrq[irq];
    222 		if (TAILQ_FIRST(&iq->iq_list) != NULL)
    223 			i80321_enable_irq(irq);
    224 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
    225 		     ih = TAILQ_NEXT(ih, ih_list))
    226 			irqs |= i80321_imask[ih->ih_ipl];
    227 		iq->iq_mask = irqs;
    228 	}
    229 }
    230 
    231 void
    232 splx(int new)
    233 {
    234 	i80321_splx(new);
    235 }
    236 
    237 int
    238 _spllower(int ipl)
    239 {
    240 	return (i80321_spllower(ipl));
    241 }
    242 
    243 int
    244 _splraise(int ipl)
    245 {
    246 	return (i80321_splraise(ipl));
    247 }
    248 
    249 /*
    250  * i80321_icu_init:
    251  *
    252  *	Initialize the i80321 ICU.  Called early in bootstrap
    253  *	to make sure the ICU is in a pristine state.
    254  */
    255 void
    256 i80321_icu_init(void)
    257 {
    258 
    259 	intr_enabled = 0;	/* All interrupts disabled */
    260 	i80321_set_intrmask();
    261 
    262 	intr_steer = 0;		/* All interrupts steered to IRQ */
    263 	i80321_set_intrsteer();
    264 }
    265 
    266 /*
    267  * i80321_intr_init:
    268  *
    269  *	Initialize the rest of the interrupt subsystem, making it
    270  *	ready to handle interrupts from devices.
    271  */
    272 void
    273 i80321_intr_init(void)
    274 {
    275 	struct intrq *iq;
    276 	int i;
    277 
    278 	intr_enabled = 0;
    279 
    280 	for (i = 0; i < NIRQ; i++) {
    281 		iq = &intrq[i];
    282 		TAILQ_INIT(&iq->iq_list);
    283 
    284 		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
    285 		    NULL, "iop321", i80321_irqnames[i]);
    286 	}
    287 
    288 	i80321_intr_calculate_masks();
    289 
    290 	/* Enable IRQs (don't yet use FIQs). */
    291 	enable_interrupts(I32_bit);
    292 }
    293 
    294 void *
    295 i80321_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
    296 {
    297 	struct intrq *iq;
    298 	struct intrhand *ih;
    299 	u_int oldirqstate;
    300 
    301 	if (irq < 0 || irq > NIRQ)
    302 		panic("i80321_intr_establish: IRQ %d out of range", irq);
    303 
    304 	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
    305 	if (ih == NULL)
    306 		return (NULL);
    307 
    308 	ih->ih_func = func;
    309 	ih->ih_arg = arg;
    310 	ih->ih_ipl = ipl;
    311 	ih->ih_irq = irq;
    312 
    313 	iq = &intrq[irq];
    314 
    315 	/* All IOP321 interrupts are level-triggered. */
    316 	iq->iq_ist = IST_LEVEL;
    317 
    318 	oldirqstate = disable_interrupts(I32_bit);
    319 
    320 	TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
    321 
    322 	i80321_intr_calculate_masks();
    323 
    324 	restore_interrupts(oldirqstate);
    325 
    326 	return (ih);
    327 }
    328 
    329 void
    330 i80321_intr_disestablish(void *cookie)
    331 {
    332 	struct intrhand *ih = cookie;
    333 	struct intrq *iq = &intrq[ih->ih_irq];
    334 	int oldirqstate;
    335 
    336 	oldirqstate = disable_interrupts(I32_bit);
    337 
    338 	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
    339 
    340 	i80321_intr_calculate_masks();
    341 
    342 	restore_interrupts(oldirqstate);
    343 }
    344 
    345 /*
    346  * Hardware interrupt handler.
    347  *
    348  * If I80321_HPI_ENABLED is defined, this code attempts to deal with
    349  * HPI interrupts as best it can.
    350  *
    351  * The problem is that HPIs cannot be masked at the interrupt controller;
    352  * they can only be masked by disabling IRQs in the XScale core.
    353  *
    354  * So, if an HPI comes in and we determine that it should be masked at
    355  * the current IPL then we mark it pending in the usual way and set
    356  * I32_bit in the interrupt frame. This ensures that when we return from
    357  * i80321_intr_dispatch(), IRQs will be disabled in the XScale core. (To
    358  * ensure IRQs are enabled later, i80321_splx() has been modified to do
    359  * just that when a pending HPI interrupt is unmasked.) Additionally,
    360  * because HPIs are level-triggered, the registered handler for the HPI
    361  * interrupt will also be invoked with IRQs disabled. If a masked HPI
    362  * occurs at the same time as another unmasked higher priority interrupt,
    363  * the higher priority handler will also be invoked with IRQs disabled.
    364  * As a result, the system could end up executing a lot of code with IRQs
    365  * completely disabled if the HPI's IPL is relatively low.
    366  *
    367  * At the present time, the only known use of HPI is for the console UART
    368  * on a couple of boards. This is probably the least intrusive use of HPI
    369  * as IPL_SERIAL is the highest priority IPL in the system anyway. The
    370  * code has not been tested with HPI hooked up to a class of device which
    371  * interrupts below IPL_SERIAL. Indeed, such a configuration is likely to
    372  * perform very poorly if at all, even though the following code has been
    373  * designed (hopefully) to cope with it.
    374  */
    375 
    376 void
    377 i80321_intr_dispatch(struct clockframe *frame)
    378 {
    379 	struct intrq *iq;
    380 	struct intrhand *ih;
    381 	int oldirqstate, irq, ibit, hwpend;
    382 #ifdef I80321_HPI_ENABLED
    383 	int oldpending;
    384 #endif
    385 	struct cpu_info * const ci = curcpu();
    386 	const int ppl = ci->ci_cpl;
    387 	const uint32_t imask = i80321_imask[ppl];
    388 
    389 	hwpend = i80321_iintsrc_read();
    390 
    391 	/*
    392 	 * Disable all the interrupts that are pending.  We will
    393 	 * reenable them once they are processed and not masked.
    394 	 */
    395 	intr_enabled &= ~hwpend;
    396 	i80321_set_intrmask();
    397 
    398 #ifdef I80321_HPI_ENABLED
    399 	oldirqstate = 0;	/* XXX: quell gcc warning */
    400 #endif
    401 
    402 	while (hwpend != 0) {
    403 #ifdef I80321_HPI_ENABLED
    404 		/* Deal with HPI interrupt first */
    405 		if (__predict_false(hwpend & INT_HPIMASK))
    406 			irq = ICU_INT_HPI;
    407 		else
    408 #endif
    409 		irq = ffs(hwpend) - 1;
    410 		ibit = (1U << irq);
    411 
    412 		hwpend &= ~ibit;
    413 
    414 		if (imask & ibit) {
    415 			/*
    416 			 * IRQ is masked; mark it as pending and check
    417 			 * the next one.  Note: the IRQ is already disabled.
    418 			 */
    419 #ifdef I80321_HPI_ENABLED
    420 			if (__predict_false(irq == ICU_INT_HPI)) {
    421 				/*
    422 				 * This is an HPI. We *must* disable
    423 				 * IRQs in the interrupt frame until
    424 				 * INT_HPIMASK is cleared by a later
    425 				 * call to splx(). Otherwise the level-
    426 				 * triggered interrupt will just keep
    427 				 * coming back.
    428 				 */
    429 				frame->cf_if.if_spsr |= I32_bit;
    430 			}
    431 #endif
    432 			i80321_ipending |= ibit;
    433 			continue;
    434 		}
    435 
    436 #ifdef I80321_HPI_ENABLED
    437 		oldpending = i80321_ipending | ibit;
    438 #endif
    439 		i80321_ipending &= ~ibit;
    440 
    441 		iq = &intrq[irq];
    442 		iq->iq_ev.ev_count++;
    443 		uvmexp.intrs++;
    444 #ifdef I80321_HPI_ENABLED
    445 		/*
    446 		 * Re-enable interrupts iff an HPI is not pending
    447 		 */
    448 		if (__predict_true((oldpending & INT_HPIMASK) == 0)) {
    449 #endif
    450 			TAILQ_FOREACH (ih, &iq->iq_list, ih_list) {
    451 				ci->ci_cpl = ih->ih_ipl;
    452 				oldirqstate = enable_interrupts(I32_bit);
    453 				(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
    454 				restore_interrupts(oldirqstate);
    455 			}
    456 #ifdef I80321_HPI_ENABLED
    457 		} else if (irq == ICU_INT_HPI) {
    458 			/*
    459 			 * We've just handled the HPI. Make sure IRQs
    460 			 * are enabled in the interrupt frame.
    461 			 * Here's hoping the handler really did clear
    462 			 * down the source...
    463 			 */
    464 			frame->cf_if.if_spsr &= ~I32_bit;
    465 		}
    466 #endif
    467 		ci->ci_cpl = ppl;
    468 
    469 		/* Re-enable this interrupt now that's it's cleared. */
    470 		intr_enabled |= ibit;
    471 		i80321_set_intrmask();
    472 
    473 		/*
    474 		 * Don't forget to include interrupts which may have
    475 		 * arrived in the meantime.
    476 		 */
    477 		hwpend |= ((i80321_ipending & ICU_INT_HWMASK) & ~imask);
    478 	}
    479 
    480 #ifdef __HAVE_FAST_SOFTINTS
    481 	cpu_dosoftints();
    482 #endif
    483 }
    484