Home | History | Annotate | Line # | Download | only in xscale
i80321_icu.c revision 1.2.8.2
      1 /*	$NetBSD: i80321_icu.c,v 1.2.8.2 2002/06/23 17:34:57 jdolecek Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Interrupt support for the Intel i80321 I/O Processor.
     40  */
     41 
     42 #include <sys/param.h>
     43 #include <sys/systm.h>
     44 #include <sys/malloc.h>
     45 
     46 #include <uvm/uvm_extern.h>
     47 
     48 #include <machine/bus.h>
     49 #include <machine/intr.h>
     50 
     51 #include <arm/cpufunc.h>
     52 
     53 #include <arm/xscale/i80321reg.h>
     54 #include <arm/xscale/i80321var.h>
     55 
     56 /* Interrupt handler queues. */
     57 struct intrq intrq[NIRQ];
     58 
     59 /* Interrupts to mask at each level. */
     60 static int imask[NIPL];
     61 
     62 /* Current interrupt priority level. */
     63 __volatile int current_spl_level;
     64 
     65 /* Interrupts pending. */
     66 static __volatile int ipending;
     67 
     68 /* Software copy of the IRQs we have enabled. */
     69 __volatile uint32_t intr_enabled;
     70 
     71 /* Mask if interrupts steered to FIQs. */
     72 uint32_t intr_steer;
     73 
     74 /*
     75  * Map a software interrupt queue index (to the unused bits in the
     76  * ICU registers -- XXX will need to revisit this if those bits are
     77  * ever used in future steppings).
     78  */
     79 static const uint32_t si_to_irqbit[SI_NQUEUES] = {
     80 	ICU_INT_bit26,		/* SI_SOFT */
     81 	ICU_INT_bit22,		/* SI_SOFTCLOCK */
     82 	ICU_INT_bit5,		/* SI_SOFTNET */
     83 	ICU_INT_bit4,		/* SI_SOFTSERIAL */
     84 };
     85 
     86 #define	INT_SWMASK							\
     87 	((1U << ICU_INT_bit26) | (1U << ICU_INT_bit22) |		\
     88 	 (1U << ICU_INT_bit5)  | (1U << ICU_INT_bit4))
     89 
     90 #define	SI_TO_IRQBIT(si)	(1U << si_to_irqbit[(si)])
     91 
     92 /*
     93  * Map a software interrupt queue to an interrupt priority level.
     94  */
     95 static const int si_to_ipl[SI_NQUEUES] = {
     96 	IPL_SOFT,		/* SI_SOFT */
     97 	IPL_SOFTCLOCK,		/* SI_SOFTCLOCK */
     98 	IPL_SOFTNET,		/* SI_SOFTNET */
     99 	IPL_SOFTSERIAL,		/* SI_SOFTSERIAL */
    100 };
    101 
    102 void	i80321_intr_dispatch(struct clockframe *frame);
    103 
    104 static __inline uint32_t
    105 i80321_iintsrc_read(void)
    106 {
    107 	uint32_t iintsrc;
    108 
    109 	__asm __volatile("mrc p6, 0, %0, c8, c0, 0"
    110 		: "=r" (iintsrc));
    111 
    112 	/*
    113 	 * The IINTSRC register shows bits that are active even
    114 	 * if they are masked in INTCTL, so we have to mask them
    115 	 * off with the interrupts we consider enabled.
    116 	 */
    117 	return (iintsrc & intr_enabled);
    118 }
    119 
    120 static __inline void
    121 i80321_set_intrmask(void)
    122 {
    123 
    124 	__asm __volatile("mcr p6, 0, %0, c0, c0, 0"
    125 		:
    126 		: "r" (intr_enabled & ICU_INT_HWMASK));
    127 }
    128 
    129 static __inline void
    130 i80321_set_intrsteer(void)
    131 {
    132 
    133 	__asm __volatile("mcr p6, 0, %0, c4, c0, 0"
    134 		:
    135 		: "r" (intr_steer & ICU_INT_HWMASK));
    136 }
    137 
    138 static __inline void
    139 i80321_enable_irq(int irq)
    140 {
    141 
    142 	intr_enabled |= (1U << irq);
    143 	i80321_set_intrmask();
    144 }
    145 
    146 static __inline void
    147 i80321_disable_irq(int irq)
    148 {
    149 
    150 	intr_enabled &= ~(1U << irq);
    151 	i80321_set_intrmask();
    152 }
    153 
    154 /*
    155  * NOTE: This routine must be called with interrupts disabled in the CPSR.
    156  */
    157 static void
    158 i80321_intr_calculate_masks(void)
    159 {
    160 	struct intrq *iq;
    161 	struct intrhand *ih;
    162 	int irq, ipl;
    163 
    164 	/* First, figure out which IPLs each IRQ has. */
    165 	for (irq = 0; irq < NIRQ; irq++) {
    166 		int levels = 0;
    167 		iq = &intrq[irq];
    168 		i80321_disable_irq(irq);
    169 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
    170 		     ih = TAILQ_NEXT(ih, ih_list))
    171 			levels |= (1U << ih->ih_ipl);
    172 		iq->iq_levels = levels;
    173 	}
    174 
    175 	/* Next, figure out which IRQs are used by each IPL. */
    176 	for (ipl = 0; ipl < NIPL; ipl++) {
    177 		int irqs = 0;
    178 		for (irq = 0; irq < NIRQ; irq++) {
    179 			if (intrq[irq].iq_levels & (1U << ipl))
    180 				irqs |= (1U << irq);
    181 		}
    182 		imask[ipl] = irqs;
    183 	}
    184 
    185 	imask[IPL_NONE] = 0;
    186 
    187 	/*
    188 	 * Initialize the soft interrupt masks to block themselves.
    189 	 */
    190 	imask[IPL_SOFT] = SI_TO_IRQBIT(SI_SOFT);
    191 	imask[IPL_SOFTCLOCK] = SI_TO_IRQBIT(SI_SOFTCLOCK);
    192 	imask[IPL_SOFTNET] = SI_TO_IRQBIT(SI_SOFTNET);
    193 	imask[IPL_SOFTSERIAL] = SI_TO_IRQBIT(SI_SOFTSERIAL);
    194 
    195 	/*
    196 	 * splsoftclock() is the only interface that users of the
    197 	 * generic software interrupt facility have to block their
    198 	 * soft intrs, so splsoftclock() must also block IPL_SOFT.
    199 	 */
    200 	imask[IPL_SOFTCLOCK] |= imask[IPL_SOFT];
    201 
    202 	/*
    203 	 * splsoftnet() must also block splsoftclock(), since we don't
    204 	 * want timer-driven network events to occur while we're
    205 	 * processing incoming packets.
    206 	 */
    207 	imask[IPL_SOFTNET] |= imask[IPL_SOFTCLOCK];
    208 
    209 	/*
    210 	 * Enforce a heirarchy that gives "slow" device (or devices with
    211 	 * limited input buffer space/"real-time" requirements) a better
    212 	 * chance at not dropping data.
    213 	 */
    214 	imask[IPL_BIO] |= imask[IPL_SOFTNET];
    215 	imask[IPL_NET] |= imask[IPL_BIO];
    216 	imask[IPL_SOFTSERIAL] |= imask[IPL_NET];
    217 	imask[IPL_TTY] |= imask[IPL_SOFTSERIAL];
    218 
    219 	/*
    220 	 * splvm() blocks all interrupts that use the kernel memory
    221 	 * allocation facilities.
    222 	 */
    223 	imask[IPL_IMP] |= imask[IPL_TTY];
    224 
    225 	/*
    226 	 * Audio devices are not allowed to perform memory allocation
    227 	 * in their interrupt routines, and they have fairly "real-time"
    228 	 * requirements, so give them a high interrupt priority.
    229 	 */
    230 	imask[IPL_AUDIO] |= imask[IPL_IMP];
    231 
    232 	/*
    233 	 * splclock() must block anything that uses the scheduler.
    234 	 */
    235 	imask[IPL_CLOCK] |= imask[IPL_AUDIO];
    236 
    237 	/*
    238 	 * No separate statclock on the IQ80310.
    239 	 */
    240 	imask[IPL_STATCLOCK] |= imask[IPL_CLOCK];
    241 
    242 	/*
    243 	 * splhigh() must block "everything".
    244 	 */
    245 	imask[IPL_HIGH] |= imask[IPL_STATCLOCK];
    246 
    247 	/*
    248 	 * XXX We need serial drivers to run at the absolute highest priority
    249 	 * in order to avoid overruns, so serial > high.
    250 	 */
    251 	imask[IPL_SERIAL] |= imask[IPL_HIGH];
    252 
    253 	/*
    254 	 * Now compute which IRQs must be blocked when servicing any
    255 	 * given IRQ.
    256 	 */
    257 	for (irq = 0; irq < NIRQ; irq++) {
    258 		int irqs = (1U << irq);
    259 		iq = &intrq[irq];
    260 		if (TAILQ_FIRST(&iq->iq_list) != NULL)
    261 			i80321_enable_irq(irq);
    262 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
    263 		     ih = TAILQ_NEXT(ih, ih_list))
    264 			irqs |= imask[ih->ih_ipl];
    265 		iq->iq_mask = irqs;
    266 	}
    267 }
    268 
    269 static void
    270 i80321_do_pending(void)
    271 {
    272 	static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
    273 	int new, oldirqstate;
    274 
    275 	if (__cpu_simple_lock_try(&processing) == 0)
    276 		return;
    277 
    278 	new = current_spl_level;
    279 
    280 	oldirqstate = disable_interrupts(I32_bit);
    281 
    282 #define	DO_SOFTINT(si)							\
    283 	if ((ipending & ~new) & SI_TO_IRQBIT(si)) {			\
    284 		ipending &= ~SI_TO_IRQBIT(si);				\
    285 		current_spl_level |= imask[si_to_ipl[(si)]];		\
    286 		restore_interrupts(oldirqstate);			\
    287 		softintr_dispatch(si);					\
    288 		oldirqstate = disable_interrupts(I32_bit);		\
    289 		current_spl_level = new;				\
    290 	}
    291 
    292 	DO_SOFTINT(SI_SOFTSERIAL);
    293 	DO_SOFTINT(SI_SOFTNET);
    294 	DO_SOFTINT(SI_SOFTCLOCK);
    295 	DO_SOFTINT(SI_SOFT);
    296 
    297 	__cpu_simple_unlock(&processing);
    298 
    299 	restore_interrupts(oldirqstate);
    300 }
    301 
    302 int
    303 _splraise(int ipl)
    304 {
    305 	int old, oldirqstate;
    306 
    307 	oldirqstate = disable_interrupts(I32_bit);
    308 	old = current_spl_level;
    309 	current_spl_level |= imask[ipl];
    310 
    311 	restore_interrupts(oldirqstate);
    312 
    313 	return (old);
    314 }
    315 
    316 __inline void
    317 splx(int new)
    318 {
    319 	int oldirqstate, hwpend;
    320 
    321 	current_spl_level = new;
    322 
    323 	/*
    324 	 * If there are pending HW interrupts which are being
    325 	 * unmasked, then enable them in the INTCTL register.
    326 	 * This will cause them to come flooding in.
    327 	 */
    328 	hwpend = (ipending & ICU_INT_HWMASK) & ~new;
    329 	if (hwpend != 0) {
    330 		oldirqstate = disable_interrupts(I32_bit);
    331 		intr_enabled |= hwpend;
    332 		i80321_set_intrmask();
    333 		restore_interrupts(oldirqstate);
    334 	}
    335 
    336 	/* If there are software interrupts to process, do it. */
    337 	if ((ipending & INT_SWMASK) & ~new)
    338 		i80321_do_pending();
    339 }
    340 
    341 int
    342 _spllower(int ipl)
    343 {
    344 	int old = current_spl_level;
    345 
    346 	splx(imask[ipl]);
    347 	return (old);
    348 }
    349 
    350 void
    351 _setsoftintr(int si)
    352 {
    353 	int oldirqstate;
    354 
    355 	oldirqstate = disable_interrupts(I32_bit);
    356 	ipending |= SI_TO_IRQBIT(si);
    357 	restore_interrupts(oldirqstate);
    358 
    359 	/* Process unmasked pending soft interrupts. */
    360 	if ((ipending & INT_SWMASK) & ~current_spl_level)
    361 		i80321_do_pending();
    362 }
    363 
    364 /*
    365  * i80321_icu_init:
    366  *
    367  *	Initialize the i80321 ICU.  Called early in bootstrap
    368  *	to make sure the ICU is in a pristine state.
    369  */
    370 void
    371 i80321_icu_init(void)
    372 {
    373 
    374 	intr_enabled = 0;	/* All interrupts disabled */
    375 	i80321_set_intrmask();
    376 
    377 	intr_steer = 0;		/* All interrupts steered to IRQ */
    378 	i80321_set_intrsteer();
    379 }
    380 
    381 /*
    382  * i80321_intr_init:
    383  *
    384  *	Initialize the rest of the interrupt subsystem, making it
    385  *	ready to handle interrupts from devices.
    386  */
    387 void
    388 i80321_intr_init(void)
    389 {
    390 	struct intrq *iq;
    391 	int i;
    392 
    393 	intr_enabled = 0;
    394 
    395 	for (i = 0; i < NIRQ; i++) {
    396 		iq = &intrq[i];
    397 		TAILQ_INIT(&iq->iq_list);
    398 
    399 		sprintf(iq->iq_name, "irq %d", i);
    400 		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
    401 		    NULL, "iop321", iq->iq_name);
    402 	}
    403 
    404 	i80321_intr_calculate_masks();
    405 
    406 	/* Enable IRQs (don't yet use FIQs). */
    407 	enable_interrupts(I32_bit);
    408 }
    409 
    410 void *
    411 i80321_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
    412 {
    413 	struct intrq *iq;
    414 	struct intrhand *ih;
    415 	u_int oldirqstate;
    416 
    417 	if (irq < 0 || irq > NIRQ)
    418 		panic("i80321_intr_establish: IRQ %d out of range", irq);
    419 
    420 	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
    421 	if (ih == NULL)
    422 		return (NULL);
    423 
    424 	ih->ih_func = func;
    425 	ih->ih_arg = arg;
    426 	ih->ih_ipl = ipl;
    427 	ih->ih_irq = irq;
    428 
    429 	iq = &intrq[irq];
    430 
    431 	/* All IOP321 interrupts are level-triggered. */
    432 	iq->iq_ist = IST_LEVEL;
    433 
    434 	oldirqstate = disable_interrupts(I32_bit);
    435 
    436 	TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
    437 
    438 	i80321_intr_calculate_masks();
    439 
    440 	restore_interrupts(oldirqstate);
    441 
    442 	return (ih);
    443 }
    444 
    445 void
    446 i80321_intr_disestablish(void *cookie)
    447 {
    448 	struct intrhand *ih = cookie;
    449 	struct intrq *iq = &intrq[ih->ih_irq];
    450 	int oldirqstate;
    451 
    452 	oldirqstate = disable_interrupts(I32_bit);
    453 
    454 	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
    455 
    456 	i80321_intr_calculate_masks();
    457 
    458 	restore_interrupts(oldirqstate);
    459 }
    460 
    461 void
    462 i80321_intr_dispatch(struct clockframe *frame)
    463 {
    464 	struct intrq *iq;
    465 	struct intrhand *ih;
    466 	int oldirqstate, pcpl, irq, ibit, hwpend;
    467 
    468 	pcpl = current_spl_level;
    469 
    470 	hwpend = i80321_iintsrc_read();
    471 
    472 	/*
    473 	 * Disable all the interrupts that are pending.  We will
    474 	 * reenable them once they are processed and not masked.
    475 	 */
    476 	intr_enabled &= ~hwpend;
    477 	i80321_set_intrmask();
    478 
    479 	while (hwpend != 0) {
    480 		irq = ffs(hwpend) - 1;
    481 		ibit = (1U << irq);
    482 
    483 		hwpend &= ~ibit;
    484 
    485 		if (pcpl & ibit) {
    486 			/*
    487 			 * IRQ is masked; mark it as pending and check
    488 			 * the next one.  Note: the IRQ is already disabled.
    489 			 */
    490 			ipending |= ibit;
    491 			continue;
    492 		}
    493 
    494 		ipending &= ~ibit;
    495 
    496 		iq = &intrq[irq];
    497 		iq->iq_ev.ev_count++;
    498 		uvmexp.intrs++;
    499 		current_spl_level |= iq->iq_mask;
    500 		oldirqstate = enable_interrupts(I32_bit);
    501 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
    502 		     ih = TAILQ_NEXT(ih, ih_list)) {
    503 			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
    504 		}
    505 		restore_interrupts(oldirqstate);
    506 
    507 		current_spl_level = pcpl;
    508 
    509 		/* Re-enable this interrupt now that's it's cleared. */
    510 		intr_enabled |= ibit;
    511 		i80321_set_intrmask();
    512 	}
    513 
    514 	/* Check for pendings soft intrs. */
    515 	if ((ipending & INT_SWMASK) & ~current_spl_level) {
    516 		oldirqstate = enable_interrupts(I32_bit);
    517 		i80321_do_pending();
    518 		restore_interrupts(oldirqstate);
    519 	}
    520 }
    521