Home | History | Annotate | Line # | Download | only in ixp12x0
ixp12x0_intr.c revision 1.17.8.1
      1 /* $NetBSD: ixp12x0_intr.c,v 1.17.8.1 2008/05/18 12:31:37 yamt Exp $ */
      2 
      3 /*
      4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Ichiro FUKUHARA and Naoto Shimazaki.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: ixp12x0_intr.c,v 1.17.8.1 2008/05/18 12:31:37 yamt Exp $");
     34 
     35 /*
     36  * Interrupt support for the Intel ixp12x0
     37  */
     38 
     39 #include <sys/param.h>
     40 #include <sys/systm.h>
     41 #include <sys/malloc.h>
     42 #include <sys/simplelock.h>
     43 #include <sys/termios.h>
     44 
     45 #include <uvm/uvm_extern.h>
     46 
     47 #include <machine/bus.h>
     48 #include <machine/intr.h>
     49 
     50 #include <arm/cpufunc.h>
     51 
     52 #include <arm/ixp12x0/ixp12x0reg.h>
     53 #include <arm/ixp12x0/ixp12x0var.h>
     54 #include <arm/ixp12x0/ixp12x0_comreg.h>
     55 #include <arm/ixp12x0/ixp12x0_comvar.h>
     56 #include <arm/ixp12x0/ixp12x0_pcireg.h>
     57 
     58 
     59 extern u_int32_t	ixpcom_cr;	/* current cr from *_com.c */
     60 extern u_int32_t	ixpcom_imask;	/* tell mask to *_com.c */
     61 
     62 /* Interrupt handler queues. */
     63 struct intrq intrq[NIRQ];
     64 
     65 /* Interrupts to mask at each level. */
     66 static u_int32_t imask[NIPL];
     67 static u_int32_t pci_imask[NIPL];
     68 
     69 /* Current interrupt priority level. */
     70 volatile int hardware_spl_level;
     71 
     72 /* Software copy of the IRQs we have enabled. */
     73 volatile u_int32_t intr_enabled;
     74 volatile u_int32_t pci_intr_enabled;
     75 
     76 /* Interrupts pending. */
     77 static volatile int ipending;
     78 
     79 void	ixp12x0_intr_dispatch(struct irqframe *frame);
     80 
     81 #define IXPREG(reg)	*((volatile u_int32_t*) (reg))
     82 
     83 static inline u_int32_t
     84 ixp12x0_irq_read(void)
     85 {
     86 	return IXPREG(IXP12X0_IRQ_VBASE) & IXP12X0_INTR_MASK;
     87 }
     88 
     89 static inline u_int32_t
     90 ixp12x0_pci_irq_read(void)
     91 {
     92 	return IXPREG(IXPPCI_IRQ_STATUS);
     93 }
     94 
     95 static void
     96 ixp12x0_enable_uart_irq(void)
     97 {
     98 	ixpcom_imask = 0;
     99 	if (ixpcom_sc)
    100 		bus_space_write_4(ixpcom_sc->sc_iot, ixpcom_sc->sc_ioh,
    101 				  IXPCOM_CR, ixpcom_cr & ~ixpcom_imask);
    102 }
    103 
    104 static void
    105 ixp12x0_disable_uart_irq(void)
    106 {
    107 	ixpcom_imask = CR_RIE | CR_XIE;
    108 	if (ixpcom_sc)
    109 		bus_space_write_4(ixpcom_sc->sc_iot, ixpcom_sc->sc_ioh,
    110 				  IXPCOM_CR, ixpcom_cr & ~ixpcom_imask);
    111 }
    112 
    113 static void
    114 ixp12x0_set_intrmask(u_int32_t irqs, u_int32_t pci_irqs)
    115 {
    116 	if (irqs & (1U << IXP12X0_INTR_UART)) {
    117 		ixp12x0_disable_uart_irq();
    118 	} else {
    119 		ixp12x0_enable_uart_irq();
    120 	}
    121 	IXPREG(IXPPCI_IRQ_ENABLE_CLEAR) = pci_irqs;
    122 	IXPREG(IXPPCI_IRQ_ENABLE_SET) = pci_intr_enabled & ~pci_irqs;
    123 }
    124 
    125 static void
    126 ixp12x0_enable_irq(int irq)
    127 {
    128 	if (irq < SYS_NIRQ) {
    129 		intr_enabled |= (1U << irq);
    130 		switch (irq) {
    131 		case IXP12X0_INTR_UART:
    132 			ixp12x0_enable_uart_irq();
    133 			break;
    134 
    135 		case IXP12X0_INTR_PCI:
    136 			/* nothing to do */
    137 			break;
    138 		default:
    139 			panic("enable_irq:bad IRQ %d", irq);
    140 		}
    141 	} else {
    142 		pci_intr_enabled |= (1U << (irq - SYS_NIRQ));
    143 		IXPREG(IXPPCI_IRQ_ENABLE_SET) = (1U << (irq - SYS_NIRQ));
    144 	}
    145 }
    146 
    147 static inline void
    148 ixp12x0_disable_irq(int irq)
    149 {
    150 	if (irq < SYS_NIRQ) {
    151 		intr_enabled ^= ~(1U << irq);
    152 		switch (irq) {
    153 		case IXP12X0_INTR_UART:
    154 			ixp12x0_disable_uart_irq();
    155 			break;
    156 
    157 		case IXP12X0_INTR_PCI:
    158 			/* nothing to do */
    159 			break;
    160 		default:
    161 			/* nothing to do */
    162 			break;
    163 		}
    164 	} else {
    165 		pci_intr_enabled &= ~(1U << (irq - SYS_NIRQ));
    166 		IXPREG(IXPPCI_IRQ_ENABLE_CLEAR) = (1U << (irq - SYS_NIRQ));
    167 	}
    168 }
    169 
    170 /*
    171  * NOTE: This routine must be called with interrupts disabled in the CPSR.
    172  */
    173 static void
    174 ixp12x0_intr_calculate_masks(void)
    175 {
    176 	struct intrq *iq;
    177 	struct intrhand *ih;
    178 	int irq, ipl;
    179 
    180 	/* First, figure out which IPLs each IRQ has. */
    181 	for (irq = 0; irq < NIRQ; irq++) {
    182 		int levels = 0;
    183 		iq = &intrq[irq];
    184 		ixp12x0_disable_irq(irq);
    185 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
    186 		     ih = TAILQ_NEXT(ih, ih_list))
    187 			levels |= (1U << ih->ih_ipl);
    188 		iq->iq_levels = levels;
    189 	}
    190 
    191 	/* Next, figure out which IRQs are used by each IPL. */
    192 	for (ipl = 0; ipl < NIPL; ipl++) {
    193 		int irqs = 0;
    194 		int pci_irqs = 0;
    195 		for (irq = 0; irq < SYS_NIRQ; irq++) {
    196 			if (intrq[irq].iq_levels & (1U << ipl))
    197 				irqs |= (1U << irq);
    198 		}
    199 		imask[ipl] = irqs;
    200 		for (irq = 0; irq < SYS_NIRQ; irq++) {
    201 			if (intrq[irq + SYS_NIRQ].iq_levels & (1U << ipl))
    202 				pci_irqs |= (1U << irq);
    203 		}
    204 		pci_imask[ipl] = pci_irqs;
    205 	}
    206 
    207 	KASSERT(imask[IPL_NONE] == 0);
    208 	KASSERT(pci_imask[IPL_NONE] == 0);
    209 
    210 	KASSERT(imask[IPL_VM] != 0);
    211 	KASSERT(pci_imask[IPL_VM] != 0);
    212 
    213 	/*
    214 	 * splclock() must block anything that uses the scheduler.
    215 	 */
    216 	imask[IPL_CLOCK] |= imask[IPL_VM];
    217 	pci_imask[IPL_CLOCK] |= pci_imask[IPL_VM];
    218 
    219 	/*
    220 	 * splhigh() must block "everything".
    221 	 */
    222 	imask[IPL_HIGH] |= imask[IPL_CLOCK];
    223 	pci_imask[IPL_HIGH] |= pci_imask[IPL_CLOCK];
    224 
    225 	/*
    226 	 * Now compute which IRQs must be blocked when servicing any
    227 	 * given IRQ.
    228 	 */
    229 	for (irq = 0; irq < NIRQ; irq++) {
    230 		int	irqs;
    231 		int	pci_irqs;
    232 
    233 		if (irq < SYS_NIRQ) {
    234 			irqs = (1U << irq);
    235 			pci_irqs = 0;
    236 		} else {
    237 			irqs = 0;
    238 			pci_irqs = (1U << (irq - SYS_NIRQ));
    239 		}
    240 		iq = &intrq[irq];
    241 		if (TAILQ_FIRST(&iq->iq_list) != NULL)
    242 			ixp12x0_enable_irq(irq);
    243 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
    244 		     ih = TAILQ_NEXT(ih, ih_list)) {
    245 			irqs |= imask[ih->ih_ipl];
    246 			pci_irqs |= pci_imask[ih->ih_ipl];
    247 		}
    248 		iq->iq_mask = irqs;
    249 		iq->iq_pci_mask = pci_irqs;
    250 	}
    251 }
    252 
    253 inline void
    254 splx(int new)
    255 {
    256 	int	old;
    257 	u_int	oldirqstate;
    258 
    259 	oldirqstate = disable_interrupts(I32_bit);
    260 	old = curcpl();
    261 	set_curcpl(new);
    262 	if (new != hardware_spl_level) {
    263 		hardware_spl_level = new;
    264 		ixp12x0_set_intrmask(imask[new], pci_imask[new]);
    265 	}
    266 	restore_interrupts(oldirqstate);
    267 
    268 #ifdef __HAVE_FAST_SOFTINTS
    269 	cpu_dosoftints();
    270 #endif
    271 }
    272 
    273 int
    274 _splraise(int ipl)
    275 {
    276 	int	old;
    277 	u_int	oldirqstate;
    278 
    279 	oldirqstate = disable_interrupts(I32_bit);
    280 	old = curcpl();
    281 	set_curcpl(ipl);
    282 	restore_interrupts(oldirqstate);
    283 	return (old);
    284 }
    285 
    286 int
    287 _spllower(int ipl)
    288 {
    289 	int	old = curcpl();
    290 
    291 	if (old <= ipl)
    292 		return (old);
    293 	splx(ipl);
    294 	return (old);
    295 }
    296 
    297 /*
    298  * ixp12x0_intr_init:
    299  *
    300  *	Initialize the rest of the interrupt subsystem, making it
    301  *	ready to handle interrupts from devices.
    302  */
    303 void
    304 ixp12x0_intr_init(void)
    305 {
    306 	struct intrq *iq;
    307 	int i;
    308 
    309 	intr_enabled = 0;
    310 	pci_intr_enabled = 0;
    311 
    312 	for (i = 0; i < NIRQ; i++) {
    313 		iq = &intrq[i];
    314 		TAILQ_INIT(&iq->iq_list);
    315 
    316 		sprintf(iq->iq_name, "ipl %d", i);
    317 		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
    318 				     NULL, "ixpintr", iq->iq_name);
    319 	}
    320 	curcpu()->ci_intr_depth = 0;
    321 	curcpu()->ci_cpl = 0;
    322 	hardware_spl_level = 0;
    323 
    324 	ixp12x0_intr_calculate_masks();
    325 
    326 	/* Enable IRQs (don't yet use FIQs). */
    327 	enable_interrupts(I32_bit);
    328 }
    329 
    330 void *
    331 ixp12x0_intr_establish(int irq, int ipl, int (*ih_func)(void *), void *arg)
    332 {
    333 	struct intrq*		iq;
    334 	struct intrhand*	ih;
    335 	u_int			oldirqstate;
    336 #ifdef DEBUG
    337 	printf("ixp12x0_intr_establish(irq=%d, ipl=%d, ih_func=%08x, arg=%08x)\n",
    338 	       irq, ipl, (u_int32_t) ih_func, (u_int32_t) arg);
    339 #endif
    340 	if (irq < 0 || irq > NIRQ)
    341 		panic("ixp12x0_intr_establish: IRQ %d out of range", ipl);
    342 	if (ipl < 0 || ipl > NIPL)
    343 		panic("ixp12x0_intr_establish: IPL %d out of range", ipl);
    344 
    345 	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
    346 	if (ih == NULL)
    347 		return (NULL);
    348 
    349 	ih->ih_func = ih_func;
    350 	ih->ih_arg = arg;
    351 	ih->ih_irq = irq;
    352 	ih->ih_ipl = ipl;
    353 
    354 	iq = &intrq[irq];
    355 	iq->iq_ist = IST_LEVEL;
    356 
    357 	oldirqstate = disable_interrupts(I32_bit);
    358 	TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
    359 	ixp12x0_intr_calculate_masks();
    360 	restore_interrupts(oldirqstate);
    361 
    362 	return (ih);
    363 }
    364 
    365 void
    366 ixp12x0_intr_disestablish(void *cookie)
    367 {
    368 	struct intrhand*	ih = cookie;
    369 	struct intrq*		iq = &intrq[ih->ih_ipl];
    370 	u_int			oldirqstate;
    371 
    372 	oldirqstate = disable_interrupts(I32_bit);
    373 	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
    374 	ixp12x0_intr_calculate_masks();
    375 	restore_interrupts(oldirqstate);
    376 }
    377 
    378 void
    379 ixp12x0_intr_dispatch(struct irqframe *frame)
    380 {
    381 	struct intrq*		iq;
    382 	struct intrhand*	ih;
    383 	struct cpu_info* const	ci = curcpu();
    384 	const int		ppl = ci->ci_cpl;
    385 	u_int			oldirqstate;
    386 	u_int32_t		hwpend;
    387 	u_int32_t		pci_hwpend;
    388 	int			irq;
    389 	u_int32_t		ibit;
    390 
    391 
    392 	hwpend = ixp12x0_irq_read();
    393 	pci_hwpend = ixp12x0_pci_irq_read();
    394 
    395 	hardware_spl_level = ppl;
    396 	ixp12x0_set_intrmask(imask[ppl] | hwpend, pci_imask[ppl] | pci_hwpend);
    397 
    398 	hwpend &= ~imask[ppl];
    399 	pci_hwpend &= ~pci_imask[ppl];
    400 
    401 	while (hwpend) {
    402 		irq = ffs(hwpend) - 1;
    403 		ibit = (1U << irq);
    404 
    405 		iq = &intrq[irq];
    406 		iq->iq_ev.ev_count++;
    407 		uvmexp.intrs++;
    408 		TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
    409 			ci->ci_cpl = ih->ih_ipl;
    410 			oldirqstate = enable_interrupts(I32_bit);
    411 			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
    412 			restore_interrupts(oldirqstate);
    413 			hwpend &= ~ibit;
    414 		}
    415 	}
    416 	while (pci_hwpend) {
    417 		irq = ffs(pci_hwpend) - 1;
    418 		ibit = (1U << irq);
    419 
    420 		iq = &intrq[irq + SYS_NIRQ];
    421 		iq->iq_ev.ev_count++;
    422 		uvmexp.intrs++;
    423 		TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
    424 			ci->ci_cpl = ih->ih_ipl;
    425 			oldirqstate = enable_interrupts(I32_bit);
    426 			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
    427 			restore_interrupts(oldirqstate);
    428 		}
    429 		pci_hwpend &= ~ibit;
    430 	}
    431 
    432 	ci->ci_cpl = ppl;
    433 	hardware_spl_level = ppl;
    434 	ixp12x0_set_intrmask(imask[ppl], pci_imask[ppl]);
    435 
    436 #ifdef __HAVE_FAST_SOFTINTS
    437 	cpu_dosoftints();
    438 #endif
    439 }
    440