Home | History | Annotate | Line # | Download | only in rmi
rmixl_intr.c revision 1.1.2.2
      1 /*	$NetBSD: rmixl_intr.c,v 1.1.2.2 2009/09/25 22:22:09 cliff Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or
      8  * without modification, are permitted provided that the following
      9  * conditions are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above
     13  *    copyright notice, this list of conditions and the following
     14  *    disclaimer in the documentation and/or other materials provided
     15  *    with the distribution.
     16  * 3. The names of the authors may not be used to endorse or promote
     17  *    products derived from this software without specific prior
     18  *    written permission.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY
     21  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
     22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
     23  * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
     25  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     26  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
     27  * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
     29  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
     30  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
     31  * OF SUCH DAMAGE.
     32  */
     33 /*-
     34  * Copyright (c) 2001 The NetBSD Foundation, Inc.
     35  * All rights reserved.
     36  *
     37  * This code is derived from software contributed to The NetBSD Foundation
     38  * by Jason R. Thorpe.
     39  *
     40  * Redistribution and use in source and binary forms, with or without
     41  * modification, are permitted provided that the following conditions
     42  * are met:
     43  * 1. Redistributions of source code must retain the above copyright
     44  *    notice, this list of conditions and the following disclaimer.
     45  * 2. Redistributions in binary form must reproduce the above copyright
     46  *    notice, this list of conditions and the following disclaimer in the
     47  *    documentation and/or other materials provided with the distribution.
     48  *
     49  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     50  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     51  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     52  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     53  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     54  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     55  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     56  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     57  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     58  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     59  * POSSIBILITY OF SUCH DAMAGE.
     60  */
     61 
     62 /*
     63  * Platform-specific interrupt support for the RMI XLP, XLR, XLS
     64  */
     65 
     66 #include <sys/cdefs.h>
     67 __KERNEL_RCSID(0, "$NetBSD: rmixl_intr.c,v 1.1.2.2 2009/09/25 22:22:09 cliff Exp $");
     68 
     69 #include "opt_ddb.h"
     70 
     71 #include <sys/param.h>
     72 #include <sys/queue.h>
     73 #include <sys/malloc.h>
     74 #include <sys/systm.h>
     75 #include <sys/device.h>
     76 #include <sys/kernel.h>
     77 
     78 #include <machine/bus.h>
     79 #include <machine/intr.h>
     80 
     81 #include <mips/locore.h>
     82 #include <mips/rmi/rmixlreg.h>
     83 #include <mips/rmi/rmixlvar.h>
     84 
     85 #include <dev/pci/pcireg.h>
     86 #include <dev/pci/pcivar.h>
     87 
     88 /*
     89  * This is a mask of bits to clear in the SR when we go to a
     90  * given hardware interrupt priority level.
     91  */
     92 const uint32_t ipl_sr_bits[_IPL_N] = {
     93 	[IPL_NONE] = 0,
     94 	[IPL_SOFTCLOCK] =
     95 		MIPS_SOFT_INT_MASK_0,
     96 	[IPL_SOFTNET] =
     97 		MIPS_SOFT_INT_MASK_0
     98 	      | MIPS_SOFT_INT_MASK_1,
     99 	[IPL_VM] =
    100 		MIPS_SOFT_INT_MASK_0
    101 	      | MIPS_SOFT_INT_MASK_1
    102 	      | MIPS_INT_MASK_0,
    103 	[IPL_SCHED] =
    104 		MIPS_SOFT_INT_MASK_0
    105 	      | MIPS_SOFT_INT_MASK_1
    106 	      | MIPS_INT_MASK_0
    107 	      | MIPS_INT_MASK_1
    108 	      | MIPS_INT_MASK_2
    109 	      | MIPS_INT_MASK_3
    110 	      | MIPS_INT_MASK_4
    111 	      | MIPS_INT_MASK_5,
    112 };
    113 
    114 /*
    115  * 'IRQs' here are indiividual interrupt sources
    116  * each has a slot in the Interrupt Redirection Table (IRT)
    117  * in the order listed
    118  *
    119  * NOTE: many irq sources depend on the chip family
    120  * XLS1xx vs. XLS2xx vs. XLS3xx vs. XLS6xx
    121  * so just use generic names where they diverge
    122  */
    123 #define	NIRQS	32
    124 static const char *rmixl_irqnames[NIRQS] = {
    125 	"int 0 (watchdog)",	/*  0 */
    126 	"int 1 (timer0)",	/*  1 */
    127 	"int 2 (timer1)",	/*  2 */
    128 	"int 3 (timer2)",	/*  3 */
    129 	"int 4 (timer3)",	/*  4 */
    130 	"int 5 (timer4)",	/*  5 */
    131 	"int 6 (timer5)",	/*  6 */
    132 	"int 7 (timer6)",	/*  7 */
    133 	"int 8 (timer7)",	/*  8 */
    134 	"int 9 (uart0)",	/*  9 */
    135 	"int 10 (uart1)",	/* 10 */
    136 	"int 11 (i2c0)",	/* 11 */
    137 	"int 12 (i2c1)",	/* 12 */
    138 	"int 13 (pcmcia)",	/* 13 */
    139 	"int 14 (gpio_a)",	/* 14 */
    140 	"int 15 (irq15)",	/* 15 */
    141 	"int 16 (bridge_tb)",	/* 16 */
    142 	"int 17 (gmac0)",	/* 17 */
    143 	"int 18 (gmac1)",	/* 18 */
    144 	"int 19 (gmac2)",	/* 19 */
    145 	"int 20 (gmac3)",	/* 20 */
    146 	"int 21 (irq21)",	/* 21 */
    147 	"int 22 (irq22)",	/* 22 */
    148 	"int 23 (irq23)",	/* 23 */
    149 	"int 24 (irq24)",	/* 24 */
    150 	"int 25 (bridge_err)",	/* 25 */
    151 	"int 26 (pcie_link0)",	/* 26 */
    152 	"int 27 (pcie_link1)",	/* 27 */
    153 	"int 28 (irq28)",	/* 28 */
    154 	"int 29 (irq29)",	/* 29 */
    155 	"int 30 (gpio_b)",	/* 30 */
    156 	"int 31 (usb)",		/* 31 */
    157 };
    158 
    159 /*
    160  * per-IRQ event stats
    161  */
    162 struct rmixl_irqtab {
    163 	struct evcnt irq_count;
    164 	void *irq_ih;
    165 };
    166 static struct rmixl_irqtab rmixl_irqtab[NIRQS];
    167 
    168 
    169 /*
    170  * 'vectors' here correspond to IRT Entry vector numbers
    171  * - IRT Entry vector# is bit# in EIRR
    172  * - note that EIRR[7:0] == CAUSE[15:8]
    173  * - we actually only use the first _IPL_N bits
    174  *   (less than 8)
    175  *
    176  * each IRT entry gets routed to a vector
    177  * (if and when that interrupt is established)
    178  * the vectors are shared on a per-IPL basis
    179  * which simplifies dispatch
    180  *
    181  * XXX use of mips64 extended IRQs is TBD
    182  */
    183 #define	NINTRVECS	_IPL_N
    184 
    185 /*
    186  * translate IPL to vector number
    187  */
    188 static const int rmixl_iplvec[_IPL_N] = {
    189 	[IPL_NONE] = 		-1,	/* XXX */
    190 	[IPL_SOFTCLOCK] =	 0,
    191 	[IPL_SOFTNET] =		 1,
    192 	[IPL_VM] =		 2,
    193 	[IPL_SCHED] =		 3,
    194 };
    195 
    196 /*
    197  * list and ref count manage sharing of each vector
    198  */
    199 struct rmixl_intrvec {
    200 	LIST_HEAD(, evbmips_intrhand) iv_list;
    201 	u_int iv_refcnt;
    202 };
    203 static struct rmixl_intrvec rmixl_intrvec[NINTRVECS];
    204 
    205 
    206 /*
    207  * register byte order is BIG ENDIAN regardless of code model
    208  */
    209 #define REG_DEREF(o)					\
    210 	*((volatile uint32_t *)MIPS_PHYS_TO_KSEG1( 	\
    211 		rmixl_configuration.rc_io_pbase 	\
    212 		+ RMIXL_IO_DEV_PIC + (o)))
    213 
    214 #define REG_READ(o)	be32toh(REG_DEREF(o))
    215 #define REG_WRITE(o,v)	REG_DEREF(o) = htobe32(v)
    216 
    217 void
    218 evbmips_intr_init(void)
    219 {
    220 	uint32_t r;
    221 	int i;
    222 
    223 	for (i=0; i < NIRQS; i++) {
    224 		evcnt_attach_dynamic(&rmixl_irqtab[i].irq_count,
    225 			EVCNT_TYPE_INTR, NULL, "rmixl", rmixl_irqnames[i]);
    226 		rmixl_irqtab[i].irq_ih = NULL;
    227 	}
    228 
    229 	for (i=0; i < NINTRVECS; i++) {
    230 		LIST_INIT(&rmixl_intrvec[i].iv_list);
    231 		rmixl_intrvec[i].iv_refcnt = 0;
    232 	}
    233 
    234 	/*
    235 	 * disable watchdog, watchdog NMI, timers
    236 	 */
    237 	r = REG_READ(RMIXL_PIC_CONTROL);
    238 	r &= RMIXL_PIC_CONTROL_RESV;
    239 	REG_WRITE(RMIXL_PIC_CONTROL, r);
    240 
    241 	/*
    242 	 * invalidate all IRT Entries
    243 	 * permanently unmask Thread#0 in low word
    244 	 * (assume we only have 1 thread)
    245 	 */
    246 	for (i=0; i < NIRQS; i++) {
    247 
    248 		/* high word */
    249 		r = REG_READ(RMIXL_PIC_IRTENTRYC1(i));
    250 		r &= RMIXL_PIC_IRTENTRYC1_RESV;
    251 		REG_WRITE(RMIXL_PIC_IRTENTRYC1(i), r);
    252 
    253 		/* low word */
    254 		r = REG_READ(RMIXL_PIC_IRTENTRYC0(i));
    255 		r &= RMIXL_PIC_IRTENTRYC0_RESV;
    256 		r |= 1;					/* Thread Mask */
    257 		REG_WRITE(RMIXL_PIC_IRTENTRYC0(i), r);
    258 	}
    259 }
    260 
    261 void *
    262 rmixl_intr_establish(int irq, int ipl, rmixl_intr_trigger_t trigger,
    263 	rmixl_intr_polarity_t polarity, int (*func)(void *), void *arg)
    264 {
    265 	struct evbmips_intrhand *ih;
    266 	uint32_t irtc1;
    267 	int vec;
    268 	int s;
    269 
    270 	/*
    271 	 * check args and assemble an IRT Entry
    272 	 */
    273 	if (irq < 0 || irq >= NIRQS)
    274 		panic("%s: irq %d out of range, max %d",
    275 			__func__, irq, NIRQS - 1);
    276 	if (ipl < 0 || ipl >= _IPL_N)
    277 		panic("%s: ipl %d out of range, max %d",
    278 			__func__, ipl, _IPL_N - 1);
    279 	if (rmixl_irqtab[irq].irq_ih != NULL)
    280 		panic("%s: irq %d busy", __func__, irq);
    281 
    282 	irtc1  = RMIXL_PIC_IRTENTRYC1_VALID;
    283 	irtc1 |= RMIXL_PIC_IRTENTRYC1_GL;	/* local */
    284 
    285 	switch (trigger) {
    286 	case RMIXL_INTR_EDGE:
    287 		break;
    288 	case RMIXL_INTR_LEVEL:
    289 		irtc1 |= RMIXL_PIC_IRTENTRYC1_TRG;
    290 		break;
    291 	default:
    292 		panic("%s: bad trigger %d\n", __func__, trigger);
    293 	}
    294 
    295 	switch (polarity) {
    296 	case RMIXL_INTR_RISING:
    297 	case RMIXL_INTR_HIGH:
    298 		break;
    299 	case RMIXL_INTR_FALLING:
    300 	case RMIXL_INTR_LOW:
    301 		irtc1 |= RMIXL_PIC_IRTENTRYC1_P;
    302 		break;
    303 	default:
    304 		panic("%s: bad polarity %d\n", __func__, polarity);
    305 	}
    306 
    307 	/*
    308 	 * ipl determines which vector to use
    309 	 */
    310 	vec = rmixl_iplvec[ipl];
    311 printf("%s: ipl=%d, vec=%d\n", __func__, ipl, vec);
    312 	KASSERT((vec & ~RMIXL_PIC_IRTENTRYC1_INTVEC) == 0);
    313 	irtc1 |= vec;
    314 
    315 	/*
    316 	 * allocate and initialize an interrupt handle
    317 	 */
    318 	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
    319 	if (ih == NULL)
    320 		return NULL;
    321 
    322 	ih->ih_func = func;
    323 	ih->ih_arg = arg;
    324 	ih->ih_irq = irq;
    325 	ih->ih_ipl = ipl;
    326 
    327 	s = splhigh();
    328 
    329 	/*
    330 	 * mark this irq as established, busy
    331 	 */
    332 	rmixl_irqtab[irq].irq_ih = ih;
    333 
    334 	/*
    335 	 * link this ih into the tables and bump reference count
    336 	 */
    337 	LIST_INSERT_HEAD(&rmixl_intrvec[vec].iv_list, ih, ih_q);
    338 	rmixl_intrvec[vec].iv_refcnt++;
    339 
    340 	/*
    341 	 * establish IRT Entry (low word only)
    342 	 */
    343 	REG_WRITE(RMIXL_PIC_IRTENTRYC1(irq), irtc1);
    344 
    345 	splx(s);
    346 
    347 	return ih;
    348 }
    349 
    350 void
    351 rmixl_intr_disestablish(void *cookie)
    352 {
    353 	struct evbmips_intrhand *ih = cookie;
    354 	uint32_t r;
    355 	int irq;
    356 	int vec;
    357 	int s;
    358 
    359 	irq = ih->ih_irq;
    360 	vec = rmixl_iplvec[ih->ih_ipl];
    361 
    362 	s = splhigh();
    363 
    364 	/*
    365 	 * remove from the table and adjust the reference count
    366 	 */
    367 	LIST_REMOVE(ih, ih_q);
    368 	rmixl_intrvec[vec].iv_refcnt--;
    369 
    370 	/*
    371 	 * disable the IRT Entry (low word only)
    372 	 */
    373 	r = REG_READ(RMIXL_PIC_IRTENTRYC1(irq));
    374 	r &= RMIXL_PIC_IRTENTRYC1_RESV;
    375 	REG_WRITE(RMIXL_PIC_IRTENTRYC1(irq), r);
    376 
    377 	/*
    378 	 * this irq now disestablished, not busy
    379 	 */
    380 	rmixl_irqtab[irq].irq_ih = NULL;
    381 
    382 	splx(s);
    383 
    384 	free(ih, M_DEVBUF);
    385 }
    386 
    387 void
    388 evbmips_iointr(uint32_t status, uint32_t cause, uint32_t pc, uint32_t ipending)
    389 {
    390 	struct evbmips_intrhand *ih;
    391 	uint64_t eirr;
    392 	uint64_t eimr;
    393 	uint32_t sr;
    394 	int vec;
    395 
    396 	printf("\n%s: status: %#x, cause %#x\n", __func__, status, cause);
    397 	asm volatile ("mfc0 %0, $9, 6;" :"=r"(sr));
    398 	printf("%s:%d: SR: %#x\n", __func__, __LINE__, sr);
    399 	asm volatile ("dmfc0 %0, $9, 7;" :"=r"(eimr));
    400 	printf("%s: EIMR: %#lx\n", __func__, eimr);
    401 
    402 	for (vec = NINTRVECS - 1; vec >= 0; vec--) {
    403 		if ((ipending & (MIPS_SOFT_INT_MASK_0 << vec)) == 0)
    404 			continue;
    405 
    406 		/* ack this vec in the EIRR */
    407 		eirr = (1 << vec);
    408 		asm volatile ("dmtc0 %0, $9, 6;" :: "r"(eirr));
    409 
    410 		LIST_FOREACH(ih, &rmixl_intrvec[vec].iv_list, ih_q) {
    411 			if ((*ih->ih_func)(ih->ih_arg) != 0)
    412 				rmixl_irqtab[ih->ih_irq].irq_count.ev_count++;
    413 		}
    414 		cause &= ~(MIPS_SOFT_INT_MASK_0 << vec);
    415 	}
    416 
    417 	/* Re-enable anything that we have processed. */
    418 	printf("%s:%d: re-enable: %#x\n", __func__, __LINE__,
    419 		MIPS_SR_INT_IE | ((status & ~cause) & MIPS_HARD_INT_MASK));
    420 	_splset(MIPS_SR_INT_IE | ((status & ~cause) & MIPS_HARD_INT_MASK));
    421 
    422 	asm volatile ("mfc0 %0, $9, 6;" :"=r"(sr));
    423 	printf("%s: SR: %#x\n", __func__, sr);
    424 
    425 	asm volatile ("dmfc0 %0, $9, 6;" :"=r"(eirr));
    426 	printf("%s: EIRR: %#lx\n", __func__, eirr);
    427 
    428 	asm volatile ("dmfc0 %0, $9, 7;" :"=r"(eimr));
    429 	printf("%s: EIMR: %#lx\n", __func__, eimr);
    430 }
    431