Home | History | Annotate | Line # | Download | only in ixp12x0
      1 /* $NetBSD: ixp12x0_intr.c,v 1.33 2020/11/20 18:26:26 thorpej Exp $ */
      2 
      3 /*
      4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Ichiro FUKUHARA and Naoto Shimazaki.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: ixp12x0_intr.c,v 1.33 2020/11/20 18:26:26 thorpej Exp $");
     34 
     35 /*
     36  * Interrupt support for the Intel ixp12x0
     37  */
     38 
     39 #include <sys/param.h>
     40 #include <sys/systm.h>
     41 #include <sys/kmem.h>
     42 #include <sys/termios.h>
     43 #include <sys/bus.h>
     44 #include <sys/intr.h>
     45 #include <sys/lwp.h>
     46 
     47 #include <arm/locore.h>
     48 
     49 #include <arm/ixp12x0/ixp12x0reg.h>
     50 #include <arm/ixp12x0/ixp12x0var.h>
     51 #include <arm/ixp12x0/ixp12x0_comreg.h>
     52 #include <arm/ixp12x0/ixp12x0_comvar.h>
     53 #include <arm/ixp12x0/ixp12x0_pcireg.h>
     54 
     55 
     56 extern uint32_t	ixpcom_cr;	/* current cr from *_com.c */
     57 extern uint32_t	ixpcom_imask;	/* tell mask to *_com.c */
     58 
     59 /* Interrupt handler queues. */
     60 struct intrq intrq[NIRQ];
     61 
     62 /* Interrupts to mask at each level. */
     63 static uint32_t imask[NIPL];
     64 static uint32_t pci_imask[NIPL];
     65 
     66 /* Current interrupt priority level. */
     67 volatile int hardware_spl_level;
     68 
     69 /* Software copy of the IRQs we have enabled. */
     70 volatile uint32_t intr_enabled;
     71 volatile uint32_t pci_intr_enabled;
     72 
     73 void	ixp12x0_intr_dispatch(struct trapframe *);
     74 
     75 #define IXPREG(reg)	*((volatile uint32_t*) (reg))
     76 
     77 static inline uint32_t
     78 ixp12x0_irq_read(void)
     79 {
     80 	return IXPREG(IXP12X0_IRQ_VBASE) & IXP12X0_INTR_MASK;
     81 }
     82 
     83 static inline uint32_t
     84 ixp12x0_pci_irq_read(void)
     85 {
     86 	return IXPREG(IXPPCI_IRQ_STATUS);
     87 }
     88 
     89 static void
     90 ixp12x0_enable_uart_irq(void)
     91 {
     92 	ixpcom_imask = 0;
     93 	if (ixpcom_sc)
     94 		bus_space_write_4(ixpcom_sc->sc_iot, ixpcom_sc->sc_ioh,
     95 				  IXPCOM_CR, ixpcom_cr & ~ixpcom_imask);
     96 }
     97 
     98 static void
     99 ixp12x0_disable_uart_irq(void)
    100 {
    101 	ixpcom_imask = CR_RIE | CR_XIE;
    102 	if (ixpcom_sc)
    103 		bus_space_write_4(ixpcom_sc->sc_iot, ixpcom_sc->sc_ioh,
    104 				  IXPCOM_CR, ixpcom_cr & ~ixpcom_imask);
    105 }
    106 
    107 static void
    108 ixp12x0_set_intrmask(uint32_t irqs, uint32_t pci_irqs)
    109 {
    110 	if (irqs & (1U << IXP12X0_INTR_UART)) {
    111 		ixp12x0_disable_uart_irq();
    112 	} else {
    113 		ixp12x0_enable_uart_irq();
    114 	}
    115 	IXPREG(IXPPCI_IRQ_ENABLE_CLEAR) = pci_irqs;
    116 	IXPREG(IXPPCI_IRQ_ENABLE_SET) = pci_intr_enabled & ~pci_irqs;
    117 }
    118 
    119 static void
    120 ixp12x0_enable_irq(int irq)
    121 {
    122 	if (irq < SYS_NIRQ) {
    123 		intr_enabled |= (1U << irq);
    124 		switch (irq) {
    125 		case IXP12X0_INTR_UART:
    126 			ixp12x0_enable_uart_irq();
    127 			break;
    128 
    129 		case IXP12X0_INTR_PCI:
    130 			/* nothing to do */
    131 			break;
    132 		default:
    133 			panic("enable_irq:bad IRQ %d", irq);
    134 		}
    135 	} else {
    136 		pci_intr_enabled |= (1U << (irq - SYS_NIRQ));
    137 		IXPREG(IXPPCI_IRQ_ENABLE_SET) = (1U << (irq - SYS_NIRQ));
    138 	}
    139 }
    140 
    141 static inline void
    142 ixp12x0_disable_irq(int irq)
    143 {
    144 	if (irq < SYS_NIRQ) {
    145 		intr_enabled ^= ~(1U << irq);
    146 		switch (irq) {
    147 		case IXP12X0_INTR_UART:
    148 			ixp12x0_disable_uart_irq();
    149 			break;
    150 
    151 		case IXP12X0_INTR_PCI:
    152 			/* nothing to do */
    153 			break;
    154 		default:
    155 			/* nothing to do */
    156 			break;
    157 		}
    158 	} else {
    159 		pci_intr_enabled &= ~(1U << (irq - SYS_NIRQ));
    160 		IXPREG(IXPPCI_IRQ_ENABLE_CLEAR) = (1U << (irq - SYS_NIRQ));
    161 	}
    162 }
    163 
    164 /*
    165  * NOTE: This routine must be called with interrupts disabled in the CPSR.
    166  */
    167 static void
    168 ixp12x0_intr_calculate_masks(void)
    169 {
    170 	struct intrq *iq;
    171 	struct intrhand *ih;
    172 	int irq, ipl;
    173 
    174 	/* First, figure out which IPLs each IRQ has. */
    175 	for (irq = 0; irq < NIRQ; irq++) {
    176 		int levels = 0;
    177 		iq = &intrq[irq];
    178 		ixp12x0_disable_irq(irq);
    179 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
    180 		     ih = TAILQ_NEXT(ih, ih_list))
    181 			levels |= (1U << ih->ih_ipl);
    182 		iq->iq_levels = levels;
    183 	}
    184 
    185 	/* Next, figure out which IRQs are used by each IPL. */
    186 	for (ipl = 0; ipl < NIPL; ipl++) {
    187 		int irqs = 0;
    188 		int pci_irqs = 0;
    189 		for (irq = 0; irq < SYS_NIRQ; irq++) {
    190 			if (intrq[irq].iq_levels & (1U << ipl))
    191 				irqs |= (1U << irq);
    192 		}
    193 		imask[ipl] = irqs;
    194 		for (irq = 0; irq < SYS_NIRQ; irq++) {
    195 			if (intrq[irq + SYS_NIRQ].iq_levels & (1U << ipl))
    196 				pci_irqs |= (1U << irq);
    197 		}
    198 		pci_imask[ipl] = pci_irqs;
    199 	}
    200 
    201 	KASSERT(imask[IPL_NONE] == 0);
    202 	KASSERT(pci_imask[IPL_NONE] == 0);
    203 	KASSERT(imask[IPL_SOFTCLOCK] == 0);
    204 	KASSERT(pci_imask[IPL_SOFTCLOCK] == 0);
    205 	KASSERT(imask[IPL_SOFTBIO] == 0);
    206 	KASSERT(pci_imask[IPL_SOFTBIO] == 0);
    207 	KASSERT(imask[IPL_SOFTNET] == 0);
    208 	KASSERT(pci_imask[IPL_SOFTNET] == 0);
    209 	KASSERT(imask[IPL_SOFTSERIAL] == 0);
    210 	KASSERT(pci_imask[IPL_SOFTSERIAL] == 0);
    211 
    212 	KASSERT(imask[IPL_VM] != 0);
    213 	KASSERT(pci_imask[IPL_VM] != 0);
    214 
    215 	/*
    216 	 * splsched() must block anything that uses the scheduler.
    217 	 */
    218 	imask[IPL_SCHED] |= imask[IPL_VM];
    219 	pci_imask[IPL_SCHED] |= pci_imask[IPL_VM];
    220 
    221 	/*
    222 	 * splhigh() must block "everything".
    223 	 */
    224 	imask[IPL_HIGH] |= imask[IPL_SCHED];
    225 	pci_imask[IPL_HIGH] |= pci_imask[IPL_SCHED];
    226 
    227 	/*
    228 	 * Now compute which IRQs must be blocked when servicing any
    229 	 * given IRQ.
    230 	 */
    231 	for (irq = 0; irq < NIRQ; irq++) {
    232 		int	irqs;
    233 		int	pci_irqs;
    234 
    235 		if (irq < SYS_NIRQ) {
    236 			irqs = (1U << irq);
    237 			pci_irqs = 0;
    238 		} else {
    239 			irqs = 0;
    240 			pci_irqs = (1U << (irq - SYS_NIRQ));
    241 		}
    242 		iq = &intrq[irq];
    243 		if (TAILQ_FIRST(&iq->iq_list) != NULL)
    244 			ixp12x0_enable_irq(irq);
    245 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
    246 		     ih = TAILQ_NEXT(ih, ih_list)) {
    247 			irqs |= imask[ih->ih_ipl];
    248 			pci_irqs |= pci_imask[ih->ih_ipl];
    249 		}
    250 		iq->iq_mask = irqs;
    251 		iq->iq_pci_mask = pci_irqs;
    252 	}
    253 }
    254 
    255 inline void
    256 splx(int new)
    257 {
    258 	u_int	oldirqstate;
    259 
    260 	oldirqstate = disable_interrupts(I32_bit);
    261 	set_curcpl(new);
    262 	if (new != hardware_spl_level) {
    263 		hardware_spl_level = new;
    264 		ixp12x0_set_intrmask(imask[new], pci_imask[new]);
    265 	}
    266 	restore_interrupts(oldirqstate);
    267 
    268 #ifdef __HAVE_FAST_SOFTINTS
    269 	cpu_dosoftints();
    270 #endif
    271 }
    272 
    273 int
    274 _splraise(int ipl)
    275 {
    276 	int	old;
    277 	u_int	oldirqstate;
    278 
    279 	oldirqstate = disable_interrupts(I32_bit);
    280 	old = curcpl();
    281 	set_curcpl(ipl);
    282 	restore_interrupts(oldirqstate);
    283 	return (old);
    284 }
    285 
    286 int
    287 _spllower(int ipl)
    288 {
    289 	int	old = curcpl();
    290 
    291 	if (old <= ipl)
    292 		return (old);
    293 	splx(ipl);
    294 	return (old);
    295 }
    296 
    297 /*
    298  * ixp12x0_intr_init:
    299  *
    300  *	Initialize the rest of the interrupt subsystem, making it
    301  *	ready to handle interrupts from devices.
    302  */
    303 void
    304 ixp12x0_intr_init(void)
    305 {
    306 	struct intrq *iq;
    307 	int i;
    308 
    309 	intr_enabled = 0;
    310 	pci_intr_enabled = 0;
    311 
    312 	for (i = 0; i < NIRQ; i++) {
    313 		iq = &intrq[i];
    314 		TAILQ_INIT(&iq->iq_list);
    315 
    316 		snprintf(iq->iq_name, sizeof(iq->iq_name), "ipl %d", i);
    317 		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
    318 				     NULL, "ixpintr", iq->iq_name);
    319 	}
    320 	curcpu()->ci_intr_depth = 0;
    321 	curcpu()->ci_cpl = 0;
    322 	hardware_spl_level = 0;
    323 
    324 	ixp12x0_intr_calculate_masks();
    325 
    326 	/* Enable IRQs (don't yet use FIQs). */
    327 	enable_interrupts(I32_bit);
    328 }
    329 
    330 void *
    331 ixp12x0_intr_establish(int irq, int ipl, int (*ih_func)(void *), void *arg)
    332 {
    333 	struct intrq*		iq;
    334 	struct intrhand*	ih;
    335 	u_int			oldirqstate;
    336 #ifdef DEBUG
    337 	printf("ixp12x0_intr_establish(irq=%d, ipl=%d, ih_func=%08x, arg=%08x)\n",
    338 	       irq, ipl, (uint32_t) ih_func, (uint32_t) arg);
    339 #endif
    340 	if (irq < 0 || irq > NIRQ)
    341 		panic("ixp12x0_intr_establish: IRQ %d out of range", ipl);
    342 	if (ipl < 0 || ipl > NIPL)
    343 		panic("ixp12x0_intr_establish: IPL %d out of range", ipl);
    344 
    345 	ih = kmem_alloc(sizeof(*ih), KM_SLEEP);
    346 	ih->ih_func = ih_func;
    347 	ih->ih_arg = arg;
    348 	ih->ih_irq = irq;
    349 	ih->ih_ipl = ipl;
    350 
    351 	iq = &intrq[irq];
    352 	iq->iq_ist = IST_LEVEL;
    353 
    354 	oldirqstate = disable_interrupts(I32_bit);
    355 	TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
    356 	ixp12x0_intr_calculate_masks();
    357 	restore_interrupts(oldirqstate);
    358 
    359 	return (ih);
    360 }
    361 
    362 void
    363 ixp12x0_intr_disestablish(void *cookie)
    364 {
    365 	struct intrhand*	ih = cookie;
    366 	struct intrq*		iq = &intrq[ih->ih_ipl];
    367 	u_int			oldirqstate;
    368 
    369 	oldirqstate = disable_interrupts(I32_bit);
    370 	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
    371 	ixp12x0_intr_calculate_masks();
    372 	restore_interrupts(oldirqstate);
    373 }
    374 
    375 void
    376 ixp12x0_intr_dispatch(struct trapframe *frame)
    377 {
    378 	struct intrq*		iq;
    379 	struct intrhand*	ih;
    380 	struct cpu_info* const	ci = curcpu();
    381 	const int		ppl = ci->ci_cpl;
    382 	u_int			oldirqstate;
    383 	uint32_t		hwpend;
    384 	uint32_t		pci_hwpend;
    385 	int			irq;
    386 	uint32_t		ibit;
    387 
    388 
    389 	hwpend = ixp12x0_irq_read();
    390 	pci_hwpend = ixp12x0_pci_irq_read();
    391 
    392 	hardware_spl_level = ppl;
    393 	ixp12x0_set_intrmask(imask[ppl] | hwpend, pci_imask[ppl] | pci_hwpend);
    394 
    395 	hwpend &= ~imask[ppl];
    396 	pci_hwpend &= ~pci_imask[ppl];
    397 
    398 	while (hwpend) {
    399 		irq = ffs(hwpend) - 1;
    400 		ibit = (1U << irq);
    401 
    402 		iq = &intrq[irq];
    403 		iq->iq_ev.ev_count++;
    404 		ci->ci_data.cpu_nintr++;
    405 		TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
    406 			ci->ci_cpl = ih->ih_ipl;
    407 			oldirqstate = enable_interrupts(I32_bit);
    408 			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
    409 			restore_interrupts(oldirqstate);
    410 			hwpend &= ~ibit;
    411 		}
    412 	}
    413 	while (pci_hwpend) {
    414 		irq = ffs(pci_hwpend) - 1;
    415 		ibit = (1U << irq);
    416 
    417 		iq = &intrq[irq + SYS_NIRQ];
    418 		iq->iq_ev.ev_count++;
    419 		ci->ci_data.cpu_nintr++;
    420 		TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
    421 			ci->ci_cpl = ih->ih_ipl;
    422 			oldirqstate = enable_interrupts(I32_bit);
    423 			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
    424 			restore_interrupts(oldirqstate);
    425 		}
    426 		pci_hwpend &= ~ibit;
    427 	}
    428 
    429 	ci->ci_cpl = ppl;
    430 	hardware_spl_level = ppl;
    431 	ixp12x0_set_intrmask(imask[ppl], pci_imask[ppl]);
    432 
    433 #ifdef __HAVE_FAST_SOFTINTS
    434 	cpu_dosoftints();
    435 #endif
    436 }
    437