Home | History | Annotate | Line # | Download | only in xscale
becc_icu.c revision 1.8
      1 /*	$NetBSD: becc_icu.c,v 1.8 2007/12/11 17:03:35 ad Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2002 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Interrupt support for the ADI Engineering Big Endian Companion Chip.
     40  */
     41 
     42 #include <sys/cdefs.h>
     43 __KERNEL_RCSID(0, "$NetBSD: becc_icu.c,v 1.8 2007/12/11 17:03:35 ad Exp $");
     44 
     45 #ifndef EVBARM_SPL_NOINLINE
     46 #define	EVBARM_SPL_NOINLINE
     47 #endif
     48 
     49 #include <sys/param.h>
     50 #include <sys/systm.h>
     51 #include <sys/malloc.h>
     52 #include <sys/bus.h>
     53 #include <sys/intr.h>
     54 
     55 #include <uvm/uvm_extern.h>
     56 
     57 #include <arm/cpufunc.h>
     58 
     59 #include <arm/xscale/beccreg.h>
     60 #include <arm/xscale/beccvar.h>
     61 
     62 #include <arm/xscale/i80200reg.h>
     63 #include <arm/xscale/i80200var.h>
     64 
     65 /* Interrupt handler queues. */
     66 struct intrq intrq[NIRQ];
     67 
     68 /* Interrupts to mask at each level. */
     69 uint32_t becc_imask[NIPL];
     70 
     71 /* Current interrupt priority level. */
     72 volatile uint32_t current_spl_level;
     73 
     74 /* Interrupts pending. */
     75 volatile uint32_t becc_ipending;
     76 volatile uint32_t becc_sipending;
     77 
     78 /* Software copy of the IRQs we have enabled. */
     79 volatile uint32_t intr_enabled;
     80 
     81 /* Mask if interrupts steered to FIQs. */
     82 uint32_t intr_steer;
     83 
     84 /*
     85  * Interrupt bit names.
     86  * XXX Some of these are BRH-centric.
     87  */
     88 const char *becc_irqnames[] = {
     89 	"soft",
     90 	"timer A",
     91 	"timer B",
     92 	"irq 3",
     93 	"irq 4",
     94 	"irq 5",
     95 	"irq 6",
     96 	"diagerr",
     97 	"DMA EOT",
     98 	"DMA PERR",
     99 	"DMA TABT",
    100 	"DMA MABT",
    101 	"irq 12",
    102 	"irq 13",
    103 	"irq 14",
    104 	"irq 15",
    105 	"PCI PERR",
    106 	"irq 17",
    107 	"irq 18",
    108 	"PCI SERR",
    109 	"PCI OAPE",
    110 	"PCI OATA",
    111 	"PCI OAMA",
    112 	"irq 23",
    113 	"irq 24",
    114 	"irq 25",
    115 	"irq 26",	/* PCI INTA */
    116 	"irq 27",	/* PCI INTB */
    117 	"irq 28",	/* PCI INTC */
    118 	"irq 29",	/* PCI INTD */
    119 	"pushbutton",
    120 	"irq 31",
    121 };
    122 
    123 void	becc_intr_dispatch(struct irqframe *frame);
    124 
    125 static inline uint32_t
    126 becc_icsr_read(void)
    127 {
    128 	uint32_t icsr;
    129 
    130 	icsr = BECC_CSR_READ(BECC_ICSR);
    131 
    132 	/*
    133 	 * The ICSR register shows bits that are active even if they are
    134 	 * masked in ICMR, so we have to mask them off with the interrupts
    135 	 * we consider enabled.
    136 	 */
    137 	return (icsr & intr_enabled);
    138 }
    139 
    140 static inline void
    141 becc_set_intrsteer(void)
    142 {
    143 
    144 	BECC_CSR_WRITE(BECC_ICSTR, intr_steer & ICU_VALID_MASK);
    145 	(void) BECC_CSR_READ(BECC_ICSTR);
    146 }
    147 
    148 static inline void
    149 becc_enable_irq(int irq)
    150 {
    151 
    152 	intr_enabled |= (1U << irq);
    153 	becc_set_intrmask();
    154 }
    155 
    156 static inline void
    157 becc_disable_irq(int irq)
    158 {
    159 
    160 	intr_enabled &= ~(1U << irq);
    161 	becc_set_intrmask();
    162 }
    163 
    164 /*
    165  * NOTE: This routine must be called with interrupts disabled in the CPSR.
    166  */
    167 static void
    168 becc_intr_calculate_masks(void)
    169 {
    170 	struct intrq *iq;
    171 	struct intrhand *ih;
    172 	int irq, ipl;
    173 
    174 	/* First, figure out which IPLs each IRQ has. */
    175 	for (irq = 0; irq < NIRQ; irq++) {
    176 		int levels = 0;
    177 		iq = &intrq[irq];
    178 		becc_disable_irq(irq);
    179 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
    180 		     ih = TAILQ_NEXT(ih, ih_list))
    181 			levels |= (1U << ih->ih_ipl);
    182 		iq->iq_levels = levels;
    183 	}
    184 
    185 	/* Next, figure out which IRQs are used by each IPL. */
    186 	for (ipl = 0; ipl < NIPL; ipl++) {
    187 		int irqs = 0;
    188 		for (irq = 0; irq < NIRQ; irq++) {
    189 			if (intrq[irq].iq_levels & (1U << ipl))
    190 				irqs |= (1U << irq);
    191 		}
    192 		becc_imask[ipl] = irqs;
    193 	}
    194 
    195 	becc_imask[IPL_NONE] = 0;
    196 
    197 	/*
    198 	 * Enforce a hierarchy that gives "slow" device (or devices with
    199 	 * limited input buffer space/"real-time" requirements) a better
    200 	 * chance at not dropping data.
    201 	 */
    202 	becc_imask[IPL_SOFTCLOCK] = (1U << ICU_SOFT);
    203 	becc_imask[IPL_SOFTNET] = (1U << ICU_SOFT);
    204 	becc_imask[IPL_SOFTBIO] = (1U << ICU_SOFT);
    205 	becc_imask[IPL_SOFTSERIAL] = (1U << ICU_SOFT);
    206 	becc_imask[IPL_VM] |= becc_imask[IPL_SOFTSERIAL];
    207 	becc_imask[IPL_SCHED] |= becc_imask[IPL_VM];
    208 	becc_imask[IPL_HIGH] |= becc_imask[IPL_SCHED];
    209 
    210 	/*
    211 	 * Now compute which IRQs must be blocked when servicing any
    212 	 * given IRQ.
    213 	 */
    214 	for (irq = 0; irq < NIRQ; irq++) {
    215 		int irqs = (1U << irq);
    216 		iq = &intrq[irq];
    217 		if (TAILQ_FIRST(&iq->iq_list) != NULL)
    218 			becc_enable_irq(irq);
    219 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
    220 		     ih = TAILQ_NEXT(ih, ih_list))
    221 			irqs |= becc_imask[ih->ih_ipl];
    222 		iq->iq_mask = irqs;
    223 	}
    224 }
    225 
    226 void
    227 splx(int new)
    228 {
    229 
    230 	becc_splx(new);
    231 }
    232 
    233 int
    234 _spllower(int ipl)
    235 {
    236 
    237 	return (becc_spllower(ipl));
    238 }
    239 
    240 int
    241 _splraise(int ipl)
    242 {
    243 
    244 	return (becc_splraise(ipl));
    245 }
    246 
    247 void
    248 _setsoftintr(int si)
    249 {
    250 
    251 	becc_setsoftintr(si);
    252 }
    253 
    254 static const int si_to_ipl[SI_NQUEUES] = {
    255 	IPL_SOFTCLOCK,		/* SI_SOFTCLOCK */
    256 	IPL_SOFTBIO,		/* SI_SOFTBIO */
    257 	IPL_SOFTNET,		/* SI_SOFTNET */
    258 	IPL_SOFTSERIAL,		/* SI_SOFTSERIAL */
    259 };
    260 
    261 int
    262 becc_softint(void *arg)
    263 {
    264 #ifdef __HAVE_FAST_SOFTINTS
    265 	static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
    266 	uint32_t	new, oldirqstate;
    267 
    268 	/* Clear interrupt */
    269 	BECC_CSR_WRITE(BECC_ICSR, 0);
    270 
    271 	if (__cpu_simple_lock_try(&processing) == 0)
    272 		return 0;
    273 
    274 	oldirqstate = disable_interrupts(I32_bit);
    275 
    276 	new = current_spl_level;
    277 
    278 #define DO_SOFTINT(si)							\
    279 	if (becc_sipending & (1 << (si))) {				\
    280 		becc_sipending &= ~(1 << (si));				\
    281 		current_spl_level |= becc_imask[si_to_ipl[(si)]];	\
    282 		restore_interrupts(oldirqstate);			\
    283 		softintr_dispatch(si);					\
    284 		oldirqstate = disable_interrupts(I32_bit);		\
    285 		current_spl_level = new;				\
    286 	}
    287 
    288 	DO_SOFTINT(SI_SOFTSERIAL);
    289 	DO_SOFTINT(SI_SOFTNET);
    290 	DO_SOFTINT(SI_SOFTCLOCK);
    291 	DO_SOFTINT(SI_SOFT);
    292 
    293 	__cpu_simple_unlock(&processing);
    294 
    295 	restore_interrupts(oldirqstate);
    296 #endif
    297 
    298 	return 1;
    299 }
    300 
    301 /*
    302  * becc_icu_init:
    303  *
    304  *	Initialize the BECC ICU.  Called early in bootstrap
    305  *	to make sure the ICU is in a pristine state.
    306  */
    307 void
    308 becc_icu_init(void)
    309 {
    310 
    311 	intr_enabled = 0;	/* All interrupts disabled */
    312 	becc_set_intrmask();
    313 
    314 	intr_steer = 0;		/* All interrupts steered to IRQ */
    315 	becc_set_intrsteer();
    316 
    317 	i80200_extirq_dispatch = becc_intr_dispatch;
    318 
    319 	i80200_intr_enable(INTCTL_IM);
    320 }
    321 
    322 /*
    323  * becc_intr_init:
    324  *
    325  *	Initialize the rest of the interrupt subsystem, making it
    326  *	ready to handle interrupts from devices.
    327  */
    328 void
    329 becc_intr_init(void)
    330 {
    331 	struct intrq *iq;
    332 	int i;
    333 
    334 	intr_enabled = 0;
    335 
    336 	for (i = 0; i < NIRQ; i++) {
    337 		iq = &intrq[i];
    338 		TAILQ_INIT(&iq->iq_list);
    339 
    340 		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
    341 		    NULL, "becc", becc_irqnames[i]);
    342 	}
    343 
    344 	becc_intr_calculate_masks();
    345 
    346 	/* Enable IRQs (don't yet use FIQs). */
    347 	enable_interrupts(I32_bit);
    348 }
    349 
    350 void *
    351 becc_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
    352 {
    353 	struct intrq *iq;
    354 	struct intrhand *ih;
    355 	uint32_t oldirqstate;
    356 
    357 	if (irq < 0 || irq > NIRQ)
    358 		panic("becc_intr_establish: IRQ %d out of range", irq);
    359 
    360 	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
    361 	if (ih == NULL)
    362 		return (NULL);
    363 
    364 	ih->ih_func = func;
    365 	ih->ih_arg = arg;
    366 	ih->ih_ipl = ipl;
    367 	ih->ih_irq = irq;
    368 
    369 	iq = &intrq[irq];
    370 
    371 	/* All BECC interrupts are level-triggered. */
    372 	iq->iq_ist = IST_LEVEL;
    373 
    374 	oldirqstate = disable_interrupts(I32_bit);
    375 
    376 	TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
    377 
    378 	becc_intr_calculate_masks();
    379 
    380 	restore_interrupts(oldirqstate);
    381 
    382 	return (ih);
    383 }
    384 
    385 void
    386 becc_intr_disestablish(void *cookie)
    387 {
    388 	struct intrhand *ih = cookie;
    389 	struct intrq *iq = &intrq[ih->ih_irq];
    390 	uint32_t oldirqstate;
    391 
    392 	oldirqstate = disable_interrupts(I32_bit);
    393 
    394 	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
    395 
    396 	becc_intr_calculate_masks();
    397 
    398 	restore_interrupts(oldirqstate);
    399 }
    400 
    401 void
    402 becc_intr_dispatch(struct irqframe *frame)
    403 {
    404 	struct intrq *iq;
    405 	struct intrhand *ih;
    406 	uint32_t oldirqstate, pcpl, irq, ibit, hwpend;
    407 	struct cpu_info *ci;
    408 
    409 	ci = curcpu();
    410 	ci->ci_idepth++;
    411 	pcpl = current_spl_level;
    412 	hwpend = becc_icsr_read();
    413 
    414 	/*
    415 	 * Disable all the interrupts that are pending.  We will
    416 	 * reenable them once they are processed and not masked.
    417 	 */
    418 	intr_enabled &= ~hwpend;
    419 	becc_set_intrmask();
    420 
    421 	while (hwpend != 0) {
    422 		irq = ffs(hwpend) - 1;
    423 		ibit = (1U << irq);
    424 
    425 		hwpend &= ~ibit;
    426 
    427 		if (pcpl & ibit) {
    428 			/*
    429 			 * IRQ is masked; mark it as pending and check
    430 			 * the next one.  Note: the IRQ is already disabled.
    431 			 */
    432 			becc_ipending |= ibit;
    433 			continue;
    434 		}
    435 
    436 		becc_ipending &= ~ibit;
    437 
    438 		iq = &intrq[irq];
    439 		iq->iq_ev.ev_count++;
    440 		uvmexp.intrs++;
    441 		current_spl_level |= iq->iq_mask;
    442 		oldirqstate = enable_interrupts(I32_bit);
    443 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
    444 		     ih = TAILQ_NEXT(ih, ih_list)) {
    445 			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
    446 		}
    447 		restore_interrupts(oldirqstate);
    448 
    449 		current_spl_level = pcpl;
    450 
    451 		/* Re-enable this interrupt now that's it's cleared. */
    452 		intr_enabled |= ibit;
    453 		becc_set_intrmask();
    454 	}
    455 
    456 	if (becc_ipending & ~pcpl) {
    457 		intr_enabled |= (becc_ipending & ~pcpl);
    458 		becc_set_intrmask();
    459 	}
    460 
    461 	ci->ci_idepth--;
    462 }
    463