Home | History | Annotate | Line # | Download | only in footbridge
footbridge_irqhandler.c revision 1.21
      1 /*	$NetBSD: footbridge_irqhandler.c,v 1.21 2008/04/27 18:58:44 matt Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 #ifndef ARM_SPL_NOINLINE
     39 #define	ARM_SPL_NOINLINE
     40 #endif
     41 
     42 #include <sys/cdefs.h>
     43 __KERNEL_RCSID(0,"$NetBSD: footbridge_irqhandler.c,v 1.21 2008/04/27 18:58:44 matt Exp $");
     44 
     45 #include "opt_irqstats.h"
     46 
     47 #include <sys/param.h>
     48 #include <sys/systm.h>
     49 #include <sys/malloc.h>
     50 #include <uvm/uvm_extern.h>
     51 
     52 #include <machine/intr.h>
     53 #include <machine/cpu.h>
     54 #include <arm/footbridge/dc21285mem.h>
     55 #include <arm/footbridge/dc21285reg.h>
     56 
     57 #include <dev/pci/pcivar.h>
     58 
     59 #include "isa.h"
     60 #if NISA > 0
     61 #include <dev/isa/isavar.h>
     62 #endif
     63 
     64 /* Interrupt handler queues. */
     65 static struct intrq footbridge_intrq[NIRQ];
     66 
     67 /* Interrupts to mask at each level. */
     68 int footbridge_imask[NIPL];
     69 
     70 /* Software copy of the IRQs we have enabled. */
     71 volatile uint32_t intr_enabled;
     72 
     73 /* Interrupts pending */
     74 volatile int footbridge_ipending;
     75 
     76 void footbridge_intr_dispatch(struct clockframe *frame);
     77 
     78 const struct evcnt *footbridge_pci_intr_evcnt(void *, pci_intr_handle_t);
     79 
     80 const struct evcnt *
     81 footbridge_pci_intr_evcnt(void *pcv, pci_intr_handle_t ih)
     82 {
     83 	/* XXX check range is valid */
     84 #if NISA > 0
     85 	if (ih >= 0x80 && ih <= 0x8f) {
     86 		return isa_intr_evcnt(NULL, (ih & 0x0f));
     87 	}
     88 #endif
     89 	return &footbridge_intrq[ih].iq_ev;
     90 }
     91 
     92 static inline void
     93 footbridge_enable_irq(int irq)
     94 {
     95 	intr_enabled |= (1U << irq);
     96 	footbridge_set_intrmask();
     97 }
     98 
     99 static inline void
    100 footbridge_disable_irq(int irq)
    101 {
    102 	intr_enabled &= ~(1U << irq);
    103 	footbridge_set_intrmask();
    104 }
    105 
    106 /*
    107  * NOTE: This routine must be called with interrupts disabled in the CPSR.
    108  */
    109 static void
    110 footbridge_intr_calculate_masks(void)
    111 {
    112 	struct intrq *iq;
    113 	struct intrhand *ih;
    114 	int irq, ipl;
    115 
    116 	/* First, figure out which IPLs each IRQ has. */
    117 	for (irq = 0; irq < NIRQ; irq++) {
    118 		int levels = 0;
    119 		iq = &footbridge_intrq[irq];
    120 		footbridge_disable_irq(irq);
    121 		TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
    122 			levels |= (1U << ih->ih_ipl);
    123 		}
    124 		iq->iq_levels = levels;
    125 	}
    126 
    127 	/* Next, figure out which IRQs are used by each IPL. */
    128 	for (ipl = 0; ipl < NIPL; ipl++) {
    129 		int irqs = 0;
    130 		for (irq = 0; irq < NIRQ; irq++) {
    131 			if (footbridge_intrq[irq].iq_levels & (1U << ipl))
    132 				irqs |= (1U << irq);
    133 		}
    134 		footbridge_imask[ipl] = irqs;
    135 	}
    136 
    137 	/* IPL_NONE must open up all interrupts */
    138 	KASSERT(footbridge_imask[IPL_NONE] == 0);
    139 	KASSERT(footbridge_imask[IPL_SOFTCLOCK] == 0);
    140 	KASSERT(footbridge_imask[IPL_SOFTBIO] == 0);
    141 	KASSERT(footbridge_imask[IPL_SOFTNET] == 0);
    142 	KASSERT(footbridge_imask[IPL_SOFTSERIAL] == 0);
    143 
    144 	/*
    145 	 * Enforce a hierarchy that gives "slow" device (or devices with
    146 	 * limited input buffer space/"real-time" requirements) a better
    147 	 * chance at not dropping data.
    148 	 */
    149 	KASSERT(footbridge_imask[IPL_VM] != 0);
    150 	footbridge_imask[IPL_SCHED] |= footbridge_imask[IPL_VM];
    151 	footbridge_imask[IPL_HIGH] |= footbridge_imask[IPL_SCHED];
    152 
    153 	/*
    154 	 * Calculate the ipl level to go to when handling this interrupt
    155 	 */
    156 	for (irq = 0, iq = footbridge_intrq; irq < NIRQ; irq++, iq++) {
    157 		int irqs = (1U << irq);
    158 		if (!TAILQ_EMPTY(&iq->iq_list)) {
    159 			footbridge_enable_irq(irq);
    160 			TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
    161 				irqs |= footbridge_imask[ih->ih_ipl];
    162 			}
    163 		}
    164 		iq->iq_mask = irqs;
    165 	}
    166 }
    167 
    168 int
    169 _splraise(int ipl)
    170 {
    171     return (footbridge_splraise(ipl));
    172 }
    173 
    174 /* this will always take us to the ipl passed in */
    175 void
    176 splx(int new)
    177 {
    178     footbridge_splx(new);
    179 }
    180 
    181 int
    182 _spllower(int ipl)
    183 {
    184     return (footbridge_spllower(ipl));
    185 }
    186 
    187 void
    188 footbridge_intr_init(void)
    189 {
    190 	struct intrq *iq;
    191 	int i;
    192 
    193 	intr_enabled = 0;
    194 	set_curcpl(0xffffffff);
    195 	footbridge_ipending = 0;
    196 	footbridge_set_intrmask();
    197 
    198 	for (i = 0, iq = footbridge_intrq; i < NIRQ; i++, iq++) {
    199 		TAILQ_INIT(&iq->iq_list);
    200 
    201 		sprintf(iq->iq_name, "irq %d", i);
    202 		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
    203 		    NULL, "footbridge", iq->iq_name);
    204 	}
    205 
    206 	footbridge_intr_calculate_masks();
    207 
    208 	/* Enable IRQ's, we don't have any FIQ's*/
    209 	enable_interrupts(I32_bit);
    210 }
    211 
    212 void *
    213 footbridge_intr_claim(int irq, int ipl, const char *name, int (*func)(void *), void *arg)
    214 {
    215 	struct intrq *iq;
    216 	struct intrhand *ih;
    217 	u_int oldirqstate;
    218 
    219 	if (irq < 0 || irq > NIRQ)
    220 		panic("footbridge_intr_establish: IRQ %d out of range", irq);
    221 
    222 	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
    223 	if (ih == NULL)
    224 	{
    225 		printf("No memory");
    226 		return (NULL);
    227 	}
    228 
    229 	ih->ih_func = func;
    230 	ih->ih_arg = arg;
    231 	ih->ih_ipl = ipl;
    232 	ih->ih_irq = irq;
    233 
    234 	iq = &footbridge_intrq[irq];
    235 
    236 	iq->iq_ist = IST_LEVEL;
    237 
    238 	oldirqstate = disable_interrupts(I32_bit);
    239 
    240 	TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
    241 
    242 	footbridge_intr_calculate_masks();
    243 
    244 	/* detach the existing event counter and add the new name */
    245 	evcnt_detach(&iq->iq_ev);
    246 	evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
    247 			NULL, "footbridge", name);
    248 
    249 	restore_interrupts(oldirqstate);
    250 
    251 	return(ih);
    252 }
    253 
    254 void
    255 footbridge_intr_disestablish(void *cookie)
    256 {
    257 	struct intrhand *ih = cookie;
    258 	struct intrq *iq = &footbridge_intrq[ih->ih_irq];
    259 	int oldirqstate;
    260 
    261 	/* XXX need to free ih ? */
    262 	oldirqstate = disable_interrupts(I32_bit);
    263 
    264 	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
    265 
    266 	footbridge_intr_calculate_masks();
    267 
    268 	restore_interrupts(oldirqstate);
    269 }
    270 
    271 static inline uint32_t footbridge_intstatus(void)
    272 {
    273 	return ((volatile uint32_t*)(DC21285_ARMCSR_VBASE))[IRQ_STATUS>>2];
    274 }
    275 
    276 /* called with external interrupts disabled */
    277 void
    278 footbridge_intr_dispatch(struct clockframe *frame)
    279 {
    280 	struct intrq *iq;
    281 	struct intrhand *ih;
    282 	int oldirqstate, irq, ibit, hwpend;
    283 	struct cpu_info * const ci = curcpu();
    284 	const int ppl = ci->ci_cpl;
    285 	const int imask = footbridge_imask[ppl];
    286 
    287 	hwpend = footbridge_intstatus();
    288 
    289 	/*
    290 	 * Disable all the interrupts that are pending.  We will
    291 	 * reenable them once they are processed and not masked.
    292 	 */
    293 	intr_enabled &= ~hwpend;
    294 	footbridge_set_intrmask();
    295 
    296 	while (hwpend != 0) {
    297 		int intr_rc = 0;
    298 		irq = ffs(hwpend) - 1;
    299 		ibit = (1U << irq);
    300 
    301 		hwpend &= ~ibit;
    302 
    303 		if (imask & ibit) {
    304 			/*
    305 			 * IRQ is masked; mark it as pending and check
    306 			 * the next one.  Note: the IRQ is already disabled.
    307 			 */
    308 			footbridge_ipending |= ibit;
    309 			continue;
    310 		}
    311 
    312 		footbridge_ipending &= ~ibit;
    313 
    314 		iq = &footbridge_intrq[irq];
    315 		iq->iq_ev.ev_count++;
    316 		uvmexp.intrs++;
    317 		TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
    318 			ci->ci_cpl = ih->ih_ipl;
    319 			oldirqstate = enable_interrupts(I32_bit);
    320 			intr_rc = (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
    321 			restore_interrupts(oldirqstate);
    322 			if (intr_rc != 1)
    323 				break;
    324 		}
    325 
    326 		ci->ci_cpl = ppl;
    327 
    328 		/* Re-enable this interrupt now that's it's cleared. */
    329 		intr_enabled |= ibit;
    330 		footbridge_set_intrmask();
    331 
    332 		/* also check for any new interrupts that may have occurred,
    333 		 * that we can handle at this spl level */
    334 		hwpend |= (footbridge_ipending & ICU_INT_HWMASK) & ~imask;
    335 	}
    336 
    337 #ifdef __HAVE_FAST_SOFTINTS
    338 	cpu_dosoftints();
    339 #endif /* __HAVE_FAST_SOFTINTS */
    340 }
    341