Home | History | Annotate | Line # | Download | only in xscale
i80321_icu.c revision 1.13
      1 /*	$NetBSD: i80321_icu.c,v 1.13 2006/11/08 23:45:41 scw Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2006 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe and Steve C. Woodford for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 #include <sys/cdefs.h>
     39 __KERNEL_RCSID(0, "$NetBSD: i80321_icu.c,v 1.13 2006/11/08 23:45:41 scw Exp $");
     40 
     41 #ifndef EVBARM_SPL_NOINLINE
     42 #define	EVBARM_SPL_NOINLINE
     43 #endif
     44 
     45 /*
     46  * Interrupt support for the Intel i80321 I/O Processor.
     47  */
     48 
     49 #include <sys/param.h>
     50 #include <sys/systm.h>
     51 #include <sys/malloc.h>
     52 
     53 #include <uvm/uvm_extern.h>
     54 
     55 #include <machine/bus.h>
     56 #include <machine/intr.h>
     57 
     58 #include <arm/cpufunc.h>
     59 
     60 #include <arm/xscale/i80321reg.h>
     61 #include <arm/xscale/i80321var.h>
     62 
     63 /* Interrupt handler queues. */
     64 struct intrq intrq[NIRQ];
     65 
     66 /* Interrupts to mask at each level. */
     67 int i80321_imask[NIPL];
     68 
     69 /* Current interrupt priority level. */
     70 volatile int current_spl_level;
     71 
     72 /* Interrupts pending. */
     73 volatile int i80321_ipending;
     74 
     75 /* Software copy of the IRQs we have enabled. */
     76 volatile uint32_t intr_enabled;
     77 
     78 /* Mask if interrupts steered to FIQs. */
     79 uint32_t intr_steer;
     80 
     81 /*
     82  * Map a software interrupt queue index (to the unused bits in the
     83  * ICU registers -- XXX will need to revisit this if those bits are
     84  * ever used in future steppings).
     85  */
     86 static const uint32_t si_to_irqbit[SI_NQUEUES] = {
     87 	ICU_INT_bit26,		/* SI_SOFT */
     88 	ICU_INT_bit22,		/* SI_SOFTCLOCK */
     89 	ICU_INT_bit5,		/* SI_SOFTNET */
     90 	ICU_INT_bit4,		/* SI_SOFTSERIAL */
     91 };
     92 
     93 #define	SI_TO_IRQBIT(si)	(1U << si_to_irqbit[(si)])
     94 
     95 /*
     96  * Map a software interrupt queue to an interrupt priority level.
     97  */
     98 static const int si_to_ipl[SI_NQUEUES] = {
     99 	IPL_SOFT,		/* SI_SOFT */
    100 	IPL_SOFTCLOCK,		/* SI_SOFTCLOCK */
    101 	IPL_SOFTNET,		/* SI_SOFTNET */
    102 	IPL_SOFTSERIAL,		/* SI_SOFTSERIAL */
    103 };
    104 
    105 /*
    106  * Interrupt bit names.
    107  */
    108 const char *i80321_irqnames[] = {
    109 	"DMA0 EOT",
    110 	"DMA0 EOC",
    111 	"DMA1 EOT",
    112 	"DMA1 EOC",
    113 	"irq 4",
    114 	"irq 5",
    115 	"AAU EOT",
    116 	"AAU EOC",
    117 	"core PMU",
    118 	"TMR0 (hardclock)",
    119 	"TMR1",
    120 	"I2C0",
    121 	"I2C1",
    122 	"MU",
    123 	"BIST",
    124 	"periph PMU",
    125 	"XScale PMU",
    126 	"BIU error",
    127 	"ATU error",
    128 	"MCU error",
    129 	"DMA0 error",
    130 	"DMA1 error",
    131 	"irq 22",
    132 	"AAU error",
    133 	"MU error",
    134 	"SSP",
    135 	"irq 26",
    136 	"irq 27",
    137 	"irq 28",
    138 	"irq 29",
    139 	"irq 30",
    140 	"irq 31",
    141 };
    142 
    143 void	i80321_intr_dispatch(struct clockframe *frame);
    144 
    145 static inline uint32_t
    146 i80321_iintsrc_read(void)
    147 {
    148 	uint32_t iintsrc;
    149 
    150 	__asm volatile("mrc p6, 0, %0, c8, c0, 0"
    151 		: "=r" (iintsrc));
    152 
    153 	/*
    154 	 * The IINTSRC register shows bits that are active even
    155 	 * if they are masked in INTCTL, so we have to mask them
    156 	 * off with the interrupts we consider enabled.
    157 	 */
    158 	return (iintsrc & intr_enabled);
    159 }
    160 
    161 static inline void
    162 i80321_set_intrsteer(void)
    163 {
    164 
    165 	__asm volatile("mcr p6, 0, %0, c4, c0, 0"
    166 		:
    167 		: "r" (intr_steer & ICU_INT_HWMASK));
    168 }
    169 
    170 static inline void
    171 i80321_enable_irq(int irq)
    172 {
    173 
    174 	intr_enabled |= (1U << irq);
    175 	i80321_set_intrmask();
    176 }
    177 
    178 static inline void
    179 i80321_disable_irq(int irq)
    180 {
    181 
    182 	intr_enabled &= ~(1U << irq);
    183 	i80321_set_intrmask();
    184 }
    185 
    186 /*
    187  * NOTE: This routine must be called with interrupts disabled in the CPSR.
    188  */
    189 static void
    190 i80321_intr_calculate_masks(void)
    191 {
    192 	struct intrq *iq;
    193 	struct intrhand *ih;
    194 	int irq, ipl;
    195 
    196 	/* First, figure out which IPLs each IRQ has. */
    197 	for (irq = 0; irq < NIRQ; irq++) {
    198 		int levels = 0;
    199 		iq = &intrq[irq];
    200 		i80321_disable_irq(irq);
    201 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
    202 		     ih = TAILQ_NEXT(ih, ih_list))
    203 			levels |= (1U << ih->ih_ipl);
    204 		iq->iq_levels = levels;
    205 	}
    206 
    207 	/* Next, figure out which IRQs are used by each IPL. */
    208 	for (ipl = 0; ipl < NIPL; ipl++) {
    209 		int irqs = 0;
    210 		for (irq = 0; irq < NIRQ; irq++) {
    211 			if (intrq[irq].iq_levels & (1U << ipl))
    212 				irqs |= (1U << irq);
    213 		}
    214 		i80321_imask[ipl] = irqs;
    215 	}
    216 
    217 	i80321_imask[IPL_NONE] = 0;
    218 
    219 	/*
    220 	 * Initialize the soft interrupt masks to block themselves.
    221 	 */
    222 	i80321_imask[IPL_SOFT] = SI_TO_IRQBIT(SI_SOFT);
    223 	i80321_imask[IPL_SOFTCLOCK] = SI_TO_IRQBIT(SI_SOFTCLOCK);
    224 	i80321_imask[IPL_SOFTNET] = SI_TO_IRQBIT(SI_SOFTNET);
    225 	i80321_imask[IPL_SOFTSERIAL] = SI_TO_IRQBIT(SI_SOFTSERIAL);
    226 
    227 	/*
    228 	 * splsoftclock() is the only interface that users of the
    229 	 * generic software interrupt facility have to block their
    230 	 * soft intrs, so splsoftclock() must also block IPL_SOFT.
    231 	 */
    232 	i80321_imask[IPL_SOFTCLOCK] |= i80321_imask[IPL_SOFT];
    233 
    234 	/*
    235 	 * splsoftnet() must also block splsoftclock(), since we don't
    236 	 * want timer-driven network events to occur while we're
    237 	 * processing incoming packets.
    238 	 */
    239 	i80321_imask[IPL_SOFTNET] |= i80321_imask[IPL_SOFTCLOCK];
    240 
    241 	/*
    242 	 * Enforce a heirarchy that gives "slow" device (or devices with
    243 	 * limited input buffer space/"real-time" requirements) a better
    244 	 * chance at not dropping data.
    245 	 */
    246 	i80321_imask[IPL_BIO] |= i80321_imask[IPL_SOFTNET];
    247 	i80321_imask[IPL_NET] |= i80321_imask[IPL_BIO];
    248 	i80321_imask[IPL_SOFTSERIAL] |= i80321_imask[IPL_NET];
    249 	i80321_imask[IPL_TTY] |= i80321_imask[IPL_SOFTSERIAL];
    250 
    251 	/*
    252 	 * splvm() blocks all interrupts that use the kernel memory
    253 	 * allocation facilities.
    254 	 */
    255 	i80321_imask[IPL_VM] |= i80321_imask[IPL_TTY];
    256 
    257 	/*
    258 	 * Audio devices are not allowed to perform memory allocation
    259 	 * in their interrupt routines, and they have fairly "real-time"
    260 	 * requirements, so give them a high interrupt priority.
    261 	 */
    262 	i80321_imask[IPL_AUDIO] |= i80321_imask[IPL_VM];
    263 
    264 	/*
    265 	 * splclock() must block anything that uses the scheduler.
    266 	 */
    267 	i80321_imask[IPL_CLOCK] |= i80321_imask[IPL_AUDIO];
    268 
    269 	/*
    270 	 * No separate statclock on the IQ80310.
    271 	 */
    272 	i80321_imask[IPL_STATCLOCK] |= i80321_imask[IPL_CLOCK];
    273 
    274 	/*
    275 	 * splhigh() must block "everything".
    276 	 */
    277 	i80321_imask[IPL_HIGH] |= i80321_imask[IPL_STATCLOCK];
    278 
    279 	/*
    280 	 * XXX We need serial drivers to run at the absolute highest priority
    281 	 * in order to avoid overruns, so serial > high.
    282 	 */
    283 	i80321_imask[IPL_SERIAL] |= i80321_imask[IPL_HIGH];
    284 
    285 	/*
    286 	 * Now compute which IRQs must be blocked when servicing any
    287 	 * given IRQ.
    288 	 */
    289 	for (irq = 0; irq < NIRQ; irq++) {
    290 		int irqs = (1U << irq);
    291 		iq = &intrq[irq];
    292 		if (TAILQ_FIRST(&iq->iq_list) != NULL)
    293 			i80321_enable_irq(irq);
    294 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
    295 		     ih = TAILQ_NEXT(ih, ih_list))
    296 			irqs |= i80321_imask[ih->ih_ipl];
    297 		iq->iq_mask = irqs;
    298 	}
    299 }
    300 
    301 void
    302 i80321_do_pending(void)
    303 {
    304 	static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
    305 	int new, oldirqstate;
    306 
    307 	if (__cpu_simple_lock_try(&processing) == 0)
    308 		return;
    309 
    310 	new = current_spl_level;
    311 
    312 	oldirqstate = disable_interrupts(I32_bit);
    313 
    314 #define	DO_SOFTINT(si)							\
    315 	if ((i80321_ipending & ~new) & SI_TO_IRQBIT(si)) {		\
    316 		i80321_ipending &= ~SI_TO_IRQBIT(si);			\
    317 		current_spl_level |= i80321_imask[si_to_ipl[(si)]];	\
    318 		restore_interrupts(oldirqstate);			\
    319 		softintr_dispatch(si);					\
    320 		oldirqstate = disable_interrupts(I32_bit);		\
    321 		current_spl_level = new;				\
    322 	}
    323 
    324 	DO_SOFTINT(SI_SOFTSERIAL);
    325 	DO_SOFTINT(SI_SOFTNET);
    326 	DO_SOFTINT(SI_SOFTCLOCK);
    327 	DO_SOFTINT(SI_SOFT);
    328 
    329 	__cpu_simple_unlock(&processing);
    330 
    331 	restore_interrupts(oldirqstate);
    332 }
    333 
    334 void
    335 splx(int new)
    336 {
    337 
    338 	i80321_splx(new);
    339 }
    340 
    341 int
    342 _spllower(int ipl)
    343 {
    344 
    345 	return (i80321_spllower(ipl));
    346 }
    347 
    348 int
    349 _splraise(int ipl)
    350 {
    351 
    352 	return (i80321_splraise(ipl));
    353 }
    354 
    355 void
    356 _setsoftintr(int si)
    357 {
    358 	int oldirqstate;
    359 
    360 	oldirqstate = disable_interrupts(I32_bit);
    361 	i80321_ipending |= SI_TO_IRQBIT(si);
    362 	restore_interrupts(oldirqstate);
    363 
    364 	/* Process unmasked pending soft interrupts. */
    365 	if ((i80321_ipending & INT_SWMASK) & ~current_spl_level)
    366 		i80321_do_pending();
    367 }
    368 
    369 /*
    370  * i80321_icu_init:
    371  *
    372  *	Initialize the i80321 ICU.  Called early in bootstrap
    373  *	to make sure the ICU is in a pristine state.
    374  */
    375 void
    376 i80321_icu_init(void)
    377 {
    378 
    379 	intr_enabled = 0;	/* All interrupts disabled */
    380 	i80321_set_intrmask();
    381 
    382 	intr_steer = 0;		/* All interrupts steered to IRQ */
    383 	i80321_set_intrsteer();
    384 }
    385 
    386 /*
    387  * i80321_intr_init:
    388  *
    389  *	Initialize the rest of the interrupt subsystem, making it
    390  *	ready to handle interrupts from devices.
    391  */
    392 void
    393 i80321_intr_init(void)
    394 {
    395 	struct intrq *iq;
    396 	int i;
    397 
    398 	intr_enabled = 0;
    399 
    400 	for (i = 0; i < NIRQ; i++) {
    401 		iq = &intrq[i];
    402 		TAILQ_INIT(&iq->iq_list);
    403 
    404 		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
    405 		    NULL, "iop321", i80321_irqnames[i]);
    406 	}
    407 
    408 	i80321_intr_calculate_masks();
    409 
    410 	/* Enable IRQs (don't yet use FIQs). */
    411 	enable_interrupts(I32_bit);
    412 }
    413 
    414 void *
    415 i80321_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
    416 {
    417 	struct intrq *iq;
    418 	struct intrhand *ih;
    419 	u_int oldirqstate;
    420 
    421 	if (irq < 0 || irq > NIRQ)
    422 		panic("i80321_intr_establish: IRQ %d out of range", irq);
    423 
    424 	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
    425 	if (ih == NULL)
    426 		return (NULL);
    427 
    428 	ih->ih_func = func;
    429 	ih->ih_arg = arg;
    430 	ih->ih_ipl = ipl;
    431 	ih->ih_irq = irq;
    432 
    433 	iq = &intrq[irq];
    434 
    435 	/* All IOP321 interrupts are level-triggered. */
    436 	iq->iq_ist = IST_LEVEL;
    437 
    438 	oldirqstate = disable_interrupts(I32_bit);
    439 
    440 	TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
    441 
    442 	i80321_intr_calculate_masks();
    443 
    444 	restore_interrupts(oldirqstate);
    445 
    446 	return (ih);
    447 }
    448 
    449 void
    450 i80321_intr_disestablish(void *cookie)
    451 {
    452 	struct intrhand *ih = cookie;
    453 	struct intrq *iq = &intrq[ih->ih_irq];
    454 	int oldirqstate;
    455 
    456 	oldirqstate = disable_interrupts(I32_bit);
    457 
    458 	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
    459 
    460 	i80321_intr_calculate_masks();
    461 
    462 	restore_interrupts(oldirqstate);
    463 }
    464 
    465 /*
    466  * Hardware interrupt handler.
    467  *
    468  * If I80321_HPI_ENABLED is defined, this code attempts to deal with
    469  * HPI interrupts as best it can.
    470  *
    471  * The problem is that HPIs cannot be masked at the interrupt controller;
    472  * they can only be masked by disabling IRQs in the XScale core.
    473  *
    474  * So, if an HPI comes in and we determine that it should be masked at
    475  * the current IPL then we mark it pending in the usual way and set
    476  * I32_bit in the interrupt frame. This ensures that when we return from
    477  * i80321_intr_dispatch(), IRQs will be disabled in the XScale core. (To
    478  * ensure IRQs are enabled later, i80321_splx() has been modified to do
    479  * just that when a pending HPI interrupt is unmasked.) Additionally,
    480  * because HPIs are level-triggered, the registered handler for the HPI
    481  * interrupt will also be invoked with IRQs disabled. If a masked HPI
    482  * occurs at the same time as another unmasked higher priority interrupt,
    483  * the higher priority handler will also be invoked with IRQs disabled.
    484  * As a result, the system could end up executing a lot of code with IRQs
    485  * completely disabled if the HPI's IPL is relatively low.
    486  *
    487  * At the present time, the only known use of HPI is for the console UART
    488  * on a couple of boards. This is probably the least intrusive use of HPI
    489  * as IPL_SERIAL is the highest priority IPL in the system anyway. The
    490  * code has not been tested with HPI hooked up to a class of device which
    491  * interrupts below IPL_SERIAL. Indeed, such a configuration is likely to
    492  * perform very poorly if at all, even though the following code has been
    493  * designed (hopefully) to cope with it.
    494  */
    495 
    496 void
    497 i80321_intr_dispatch(struct clockframe *frame)
    498 {
    499 	struct intrq *iq;
    500 	struct intrhand *ih;
    501 	int oldirqstate, pcpl, irq, ibit, hwpend;
    502 #ifdef I80321_HPI_ENABLED
    503 	int oldpending;
    504 #endif
    505 
    506 	pcpl = current_spl_level;
    507 
    508 	hwpend = i80321_iintsrc_read();
    509 
    510 	/*
    511 	 * Disable all the interrupts that are pending.  We will
    512 	 * reenable them once they are processed and not masked.
    513 	 */
    514 	intr_enabled &= ~hwpend;
    515 	i80321_set_intrmask();
    516 
    517 #ifdef I80321_HPI_ENABLED
    518 	oldirqstate = 0;	/* XXX: quell gcc warning */
    519 #endif
    520 
    521 	while (hwpend != 0) {
    522 #ifdef I80321_HPI_ENABLED
    523 		/* Deal with HPI interrupt first */
    524 		if (__predict_false(hwpend & INT_HPIMASK))
    525 			irq = ICU_INT_HPI;
    526 		else
    527 #endif
    528 		irq = ffs(hwpend) - 1;
    529 		ibit = (1U << irq);
    530 
    531 		hwpend &= ~ibit;
    532 
    533 		if (pcpl & ibit) {
    534 			/*
    535 			 * IRQ is masked; mark it as pending and check
    536 			 * the next one.  Note: the IRQ is already disabled.
    537 			 */
    538 #ifdef I80321_HPI_ENABLED
    539 			if (__predict_false(irq == ICU_INT_HPI)) {
    540 				/*
    541 				 * This is an HPI. We *must* disable
    542 				 * IRQs in the interrupt frame until
    543 				 * INT_HPIMASK is cleared by a later
    544 				 * call to splx(). Otherwise the level-
    545 				 * triggered interrupt will just keep
    546 				 * coming back.
    547 				 */
    548 				frame->cf_if.if_spsr |= I32_bit;
    549 			}
    550 #endif
    551 			i80321_ipending |= ibit;
    552 			continue;
    553 		}
    554 
    555 #ifdef I80321_HPI_ENABLED
    556 		oldpending = i80321_ipending | ibit;
    557 #endif
    558 		i80321_ipending &= ~ibit;
    559 
    560 		iq = &intrq[irq];
    561 		iq->iq_ev.ev_count++;
    562 		uvmexp.intrs++;
    563 		current_spl_level |= iq->iq_mask;
    564 #ifdef I80321_HPI_ENABLED
    565 		/*
    566 		 * Re-enable interrupts iff an HPI is not pending
    567 		 */
    568 		if (__predict_true((oldpending & INT_HPIMASK) == 0))
    569 #endif
    570 		oldirqstate = enable_interrupts(I32_bit);
    571 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
    572 		     ih = TAILQ_NEXT(ih, ih_list)) {
    573 			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
    574 		}
    575 #ifdef I80321_HPI_ENABLED
    576 		if (__predict_true((oldpending & INT_HPIMASK) == 0))
    577 #endif
    578 		restore_interrupts(oldirqstate);
    579 #ifdef I80321_HPI_ENABLED
    580 		else if (irq == ICU_INT_HPI) {
    581 			/*
    582 			 * We've just handled the HPI. Make sure IRQs
    583 			 * are enabled in the interrupt frame.
    584 			 * Here's hoping the handler really did clear
    585 			 * down the source...
    586 			 */
    587 			frame->cf_if.if_spsr &= ~I32_bit;
    588 		}
    589 #endif
    590 		current_spl_level = pcpl;
    591 
    592 		/* Re-enable this interrupt now that's it's cleared. */
    593 		intr_enabled |= ibit;
    594 		i80321_set_intrmask();
    595 
    596 		/*
    597 		 * Don't forget to include interrupts which may have
    598 		 * arrived in the meantime.
    599 		 */
    600 		hwpend |= ((i80321_ipending & ICU_INT_HWMASK) & ~pcpl);
    601 	}
    602 
    603 	/* Check for pendings soft intrs. */
    604 	if ((i80321_ipending & INT_SWMASK) & ~current_spl_level) {
    605 #ifdef I80321_HPI_ENABLED
    606 		/* XXX: This is only necessary if HPI is < IPL_SOFT* */
    607 		if (__predict_true((i80321_ipending & INT_HPIMASK) == 0))
    608 #endif
    609 		oldirqstate = enable_interrupts(I32_bit);
    610 		i80321_do_pending();
    611 #ifdef I80321_HPI_ENABLED
    612 		/* XXX: This is only necessary if HPI is < IPL_NET* */
    613 		if (__predict_true((i80321_ipending & INT_HPIMASK) == 0))
    614 #endif
    615 		restore_interrupts(oldirqstate);
    616 	}
    617 }
    618