Home | History | Annotate | Line # | Download | only in cavium
octeon_intr.c revision 1.3.2.2
      1 /*	$NetBSD: octeon_intr.c,v 1.3.2.2 2015/06/06 14:40:01 skrll Exp $	*/
      2 /*
      3  * Copyright 2001, 2002 Wasabi Systems, Inc.
      4  * All rights reserved.
      5  *
      6  * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *      This product includes software developed for the NetBSD Project by
     19  *      Wasabi Systems, Inc.
     20  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     21  *    or promote products derived from this software without specific prior
     22  *    written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     34  * POSSIBILITY OF SUCH DAMAGE.
     35  */
     36 
     37 /*
     38  * Platform-specific interrupt support for the MIPS Malta.
     39  */
     40 
     41 #include "opt_octeon.h"
     42 #define __INTR_PRIVATE
     43 
     44 #include <sys/cdefs.h>
     45 __KERNEL_RCSID(0, "$NetBSD: octeon_intr.c,v 1.3.2.2 2015/06/06 14:40:01 skrll Exp $");
     46 
     47 #include <sys/param.h>
     48 #include <sys/cpu.h>
     49 #include <sys/systm.h>
     50 #include <sys/device.h>
     51 #include <sys/intr.h>
     52 #include <sys/kernel.h>
     53 #include <sys/kmem.h>
     54 #include <sys/atomic.h>
     55 
     56 #include <lib/libkern/libkern.h>
     57 
     58 #include <mips/locore.h>
     59 
     60 #include <mips/cavium/dev/octeon_ciureg.h>
     61 #include <mips/cavium/octeonvar.h>
     62 
     63 /*
     64  * This is a mask of bits to clear in the SR when we go to a
     65  * given hardware interrupt priority level.
     66  */
     67 static const struct ipl_sr_map octeon_ipl_sr_map = {
     68     .sr_bits = {
     69 	[IPL_NONE] =		0,
     70 	[IPL_SOFTCLOCK] =	MIPS_SOFT_INT_MASK_0,
     71 	[IPL_SOFTNET] =		MIPS_SOFT_INT_MASK,
     72 	[IPL_VM] =		MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0,
     73 	[IPL_SCHED] =		MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
     74 				    | MIPS_INT_MASK_5,
     75 	[IPL_DDB] =		MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
     76 				    | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
     77 	[IPL_HIGH] =		MIPS_INT_MASK,
     78     },
     79 };
     80 
     81 const char * const octeon_intrnames[NIRQS] = {
     82 	"workq 0",
     83 	"workq 1",
     84 	"workq 2",
     85 	"workq 3",
     86 	"workq 4",
     87 	"workq 5",
     88 	"workq 6",
     89 	"workq 7",
     90 	"workq 8",
     91 	"workq 9",
     92 	"workq 10",
     93 	"workq 11",
     94 	"workq 12",
     95 	"workq 13",
     96 	"workq 14",
     97 	"workq 15",
     98 	"gpio 0",
     99 	"gpio 1",
    100 	"gpio 2",
    101 	"gpio 3",
    102 	"gpio 4",
    103 	"gpio 5",
    104 	"gpio 6",
    105 	"gpio 7",
    106 	"gpio 8",
    107 	"gpio 9",
    108 	"gpio 10",
    109 	"gpio 11",
    110 	"gpio 12",
    111 	"gpio 13",
    112 	"gpio 14",
    113 	"gpio 15",
    114 	"mbox 0-15",
    115 	"mbox 16-31",
    116 	"uart 0",
    117 	"uart 1",
    118 	"pci inta",
    119 	"pci intb",
    120 	"pci intc",
    121 	"pci intd",
    122 	"pci msi 0-15",
    123 	"pci msi 16-31",
    124 	"pci msi 32-47",
    125 	"pci msi 48-63",
    126 	"wdog summary",
    127 	"twsi",
    128 	"rml",
    129 	"trace",
    130 	"gmx drop",
    131 	"reserved",
    132 	"ipd drop",
    133 	"reserved",
    134 	"timer 0",
    135 	"timer 1",
    136 	"timer 2",
    137 	"timer 3",
    138 	"usb",
    139 	"pcm/tdm",
    140 	"mpi/spi",
    141 	"reserved",
    142 	"reserved",
    143 	"reserved",
    144 	"reserved",
    145 	"reserved",
    146 };
    147 
    148 struct octeon_intrhand {
    149 	int (*ih_func)(void *);
    150 	void *ih_arg;
    151 	int ih_irq;
    152 	int ih_ipl;
    153 };
    154 
    155 #ifdef MULTIPROCESSOR
    156 static int octeon_send_ipi(struct cpu_info *, int);
    157 static int octeon_ipi_intr(void *);
    158 
    159 struct octeon_intrhand ipi_intrhands[2] = {
    160 	[0] = {
    161 		.ih_func = octeon_ipi_intr,
    162 		.ih_arg = (void *)(uintptr_t)__BITS(15,0),
    163 		.ih_irq = _CIU_INT_MBOX_15_0_SHIFT,
    164 		.ih_ipl = IPL_SCHED,
    165 	},
    166 	[1] = {
    167 		.ih_func = octeon_ipi_intr,
    168 		.ih_arg = (void *)(uintptr_t)__BITS(31,16),
    169 		.ih_irq = _CIU_INT_MBOX_31_16_SHIFT,
    170 		.ih_ipl = IPL_HIGH,
    171 	},
    172 };
    173 #endif
    174 
    175 struct octeon_intrhand *octeon_ciu_intrs[NIRQS] = {
    176 #ifdef MULTIPROCESSOR
    177 	[_CIU_INT_MBOX_15_0_SHIFT] = &ipi_intrhands[0],
    178 	[_CIU_INT_MBOX_31_16_SHIFT] = &ipi_intrhands[1],
    179 #endif
    180 };
    181 
    182 kmutex_t octeon_intr_lock;
    183 
    184 #define X(a)	MIPS_PHYS_TO_XKPHYS(OCTEON_CCA_NONE, (a))
    185 
    186 struct cpu_softc octeon_cpu0_softc = {
    187 	.cpu_ci = &cpu_info_store,
    188 	.cpu_int0_sum0 = X(CIU_INT0_SUM0),
    189 	.cpu_int1_sum0 = X(CIU_INT1_SUM0),
    190 	.cpu_int2_sum0 = X(CIU_INT4_SUM0),
    191 
    192 	.cpu_int0_en0 = X(CIU_INT0_EN0),
    193 	.cpu_int1_en0 = X(CIU_INT1_EN0),
    194 	.cpu_int2_en0 = X(CIU_INT4_EN00),
    195 
    196 	.cpu_int0_en1 = X(CIU_INT0_EN1),
    197 	.cpu_int1_en1 = X(CIU_INT1_EN1),
    198 	.cpu_int2_en1 = X(CIU_INT4_EN01),
    199 
    200 	.cpu_int32_en = X(CIU_INT32_EN0),
    201 
    202 #ifdef MULTIPROCESSOR
    203 	.cpu_mbox_set = X(CIU_MBOX_SET0),
    204 	.cpu_mbox_clr = X(CIU_MBOX_CLR0),
    205 #endif
    206 };
    207 
    208 #ifdef MULTIPROCESSOR
    209 struct cpu_softc octeon_cpu1_softc = {
    210 	.cpu_int0_sum0 = X(CIU_INT2_SUM0),
    211 	.cpu_int1_sum0 = X(CIU_INT3_SUM0),
    212 	.cpu_int2_sum0 = X(CIU_INT4_SUM1),
    213 
    214 	.cpu_int0_en0 = X(CIU_INT2_EN0),
    215 	.cpu_int1_en0 = X(CIU_INT3_EN0),
    216 	.cpu_int2_en0 = X(CIU_INT4_EN10),
    217 
    218 	.cpu_int0_en1 = X(CIU_INT2_EN1),
    219 	.cpu_int1_en1 = X(CIU_INT3_EN1),
    220 	.cpu_int2_en1 = X(CIU_INT4_EN11),
    221 
    222 	.cpu_int32_en = X(CIU_INT32_EN1),
    223 
    224 	.cpu_mbox_set = X(CIU_MBOX_SET1),
    225 	.cpu_mbox_clr = X(CIU_MBOX_CLR1),
    226 };
    227 #endif
    228 
    229 #undef X
    230 
    231 void
    232 octeon_intr_init(struct cpu_info *ci)
    233 {
    234 	const int cpunum = cpu_index(ci);
    235 	const char * const xname = cpu_name(ci);
    236 	struct cpu_softc *cpu;
    237 
    238 	ipl_sr_map = octeon_ipl_sr_map;
    239 
    240 	if (ci->ci_cpuid == 0) {
    241 		mutex_init(&octeon_intr_lock, MUTEX_DEFAULT, IPL_HIGH);
    242 		cpu = &octeon_cpu0_softc;
    243 #ifdef MULTIPROCESSOR
    244 		mips_locoresw.lsw_send_ipi = octeon_send_ipi;
    245 #endif
    246 	} else {
    247 		KASSERT(cpunum == 1);
    248 #ifdef MULTIPROCESSOR
    249 		cpu = &octeon_cpu1_softc;
    250 #else
    251 		cpu = NULL;
    252 #endif
    253 	}
    254 	ci->ci_softc = cpu;
    255 
    256 #ifdef MULTIPROCESSOR
    257 	// Enable the IPIs
    258 	cpu->cpu_int0_enable0 |= __BIT(_CIU_INT_MBOX_15_0_SHIFT);
    259 	cpu->cpu_int2_enable0 |= __BIT(_CIU_INT_MBOX_31_16_SHIFT);
    260 #endif
    261 
    262 	mips64_sd_a64(cpu->cpu_int0_en0, cpu->cpu_int0_enable0);
    263 	mips64_sd_a64(cpu->cpu_int1_en0, cpu->cpu_int1_enable0);
    264 	mips64_sd_a64(cpu->cpu_int2_en0, cpu->cpu_int2_enable0);
    265 
    266 	mips64_sd_a64(cpu->cpu_int32_en, 0);
    267 
    268 	mips64_sd_a64(cpu->cpu_int0_en1, 0);	// WDOG IPL2
    269 	mips64_sd_a64(cpu->cpu_int1_en1, 0);	// WDOG IPL3
    270 	mips64_sd_a64(cpu->cpu_int2_en1, 0);	// WDOG IPL4
    271 
    272 #ifdef MULTIPROCESSOR
    273 	mips64_sd_a64(cpu->cpu_mbox_clr, __BITS(31,0));
    274 #endif
    275 
    276 	for (size_t i = 0; i < NIRQS; i++) {
    277 		evcnt_attach_dynamic(&cpu->cpu_intr_evs[i],
    278 		    EVCNT_TYPE_INTR, NULL, xname, octeon_intrnames[i]);
    279 	}
    280 }
    281 
    282 void
    283 octeon_cal_timer(int corefreq)
    284 {
    285 	/* Compute the number of cycles per second. */
    286 	curcpu()->ci_cpu_freq = corefreq;
    287 
    288 	/* Compute the number of ticks for hz. */
    289 	curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz;
    290 
    291 	/* Compute the delay divisor and reciprical. */
    292 	curcpu()->ci_divisor_delay =
    293 	    ((curcpu()->ci_cpu_freq + 500000) / 1000000);
    294 #if 0
    295 	MIPS_SET_CI_RECIPRICAL(curcpu());
    296 #endif
    297 
    298 	mips3_cp0_count_write(0);
    299 	mips3_cp0_compare_write(0);
    300 }
    301 
    302 void *
    303 octeon_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
    304 {
    305 	struct octeon_intrhand *ih;
    306 
    307 	if (irq >= NIRQS)
    308 		panic("octeon_intr_establish: bogus IRQ %d", irq);
    309 	if (ipl < IPL_VM)
    310 		panic("octeon_intr_establish: bogus IPL %d", ipl);
    311 
    312 	ih = kmem_zalloc(sizeof(*ih), KM_NOSLEEP);
    313 	if (ih == NULL)
    314 		return (NULL);
    315 
    316 	ih->ih_func = func;
    317 	ih->ih_arg = arg;
    318 	ih->ih_irq = irq;
    319 	ih->ih_ipl = ipl;
    320 
    321 	mutex_enter(&octeon_intr_lock);
    322 
    323 	/*
    324 	 * First, make it known.
    325 	 */
    326 	KASSERTMSG(octeon_ciu_intrs[irq] == NULL, "irq %d in use! (%p)",
    327 	    irq, octeon_ciu_intrs[irq]);
    328 
    329 	octeon_ciu_intrs[irq] = ih;
    330 	membar_producer();
    331 
    332 	/*
    333 	 * Now enable it.
    334 	 */
    335 	const uint64_t irq_mask = __BIT(irq);
    336 	struct cpu_softc * const cpu0 = &octeon_cpu0_softc;
    337 #if MULTIPROCESSOR
    338 	struct cpu_softc * const cpu1 = &octeon_cpu1_softc;
    339 #endif
    340 
    341 	switch (ipl) {
    342 	case IPL_VM:
    343 		cpu0->cpu_int0_enable0 |= irq_mask;
    344 		mips64_sd_a64(cpu0->cpu_int0_en0, cpu0->cpu_int0_enable0);
    345 		break;
    346 
    347 	case IPL_SCHED:
    348 		cpu0->cpu_int1_enable0 |= irq_mask;
    349 		mips64_sd_a64(cpu0->cpu_int1_en0, cpu0->cpu_int1_enable0);
    350 #ifdef MULTIPROCESSOR
    351 		cpu1->cpu_int1_enable0 = cpu0->cpu_int1_enable0;
    352 		mips64_sd_a64(cpu1->cpu_int1_en0, cpu1->cpu_int1_enable0);
    353 #endif
    354 		break;
    355 
    356 	case IPL_DDB:
    357 	case IPL_HIGH:
    358 		cpu0->cpu_int2_enable0 |= irq_mask;
    359 		mips64_sd_a64(cpu0->cpu_int2_en0, cpu0->cpu_int2_enable0);
    360 #ifdef MULTIPROCESSOR
    361 		cpu1->cpu_int2_enable0 = cpu0->cpu_int2_enable0;
    362 		mips64_sd_a64(cpu1->cpu_int2_en0, cpu1->cpu_int2_enable0);
    363 #endif
    364 		break;
    365 	}
    366 
    367 	mutex_exit(&octeon_intr_lock);
    368 
    369 	return ih;
    370 }
    371 
    372 void
    373 octeon_intr_disestablish(void *cookie)
    374 {
    375 	struct octeon_intrhand * const ih = cookie;
    376 	const int irq = ih->ih_irq & (NIRQS-1);
    377 	const int ipl = ih->ih_ipl;
    378 
    379 	mutex_enter(&octeon_intr_lock);
    380 
    381 	/*
    382 	 * First disable it.
    383 	 */
    384 	const uint64_t irq_mask = ~__BIT(irq);
    385 	struct cpu_softc * const cpu0 = &octeon_cpu0_softc;
    386 #if MULTIPROCESSOR
    387 	struct cpu_softc * const cpu1 = &octeon_cpu1_softc;
    388 #endif
    389 
    390 	switch (ipl) {
    391 	case IPL_VM:
    392 		cpu0->cpu_int0_enable0 &= ~irq_mask;
    393 		mips64_sd_a64(cpu0->cpu_int0_en0, cpu0->cpu_int0_enable0);
    394 		break;
    395 
    396 	case IPL_SCHED:
    397 		cpu0->cpu_int1_enable0 &= ~irq_mask;
    398 		mips64_sd_a64(cpu0->cpu_int1_en0, cpu0->cpu_int1_enable0);
    399 #ifdef MULTIPROCESSOR
    400 		cpu1->cpu_int1_enable0 = cpu0->cpu_int1_enable0;
    401 		mips64_sd_a64(cpu1->cpu_int1_en0, cpu1->cpu_int1_enable0);
    402 #endif
    403 		break;
    404 
    405 	case IPL_DDB:
    406 	case IPL_HIGH:
    407 		cpu0->cpu_int2_enable0 &= ~irq_mask;
    408 		mips64_sd_a64(cpu0->cpu_int2_en0, cpu0->cpu_int2_enable0);
    409 #ifdef MULTIPROCESSOR
    410 		cpu1->cpu_int2_enable0 = cpu0->cpu_int2_enable0;
    411 		mips64_sd_a64(cpu1->cpu_int2_en0, cpu1->cpu_int2_enable0);
    412 #endif
    413 		break;
    414 	}
    415 
    416 	/*
    417 	 * Now remove it since we shouldn't get interrupts for it.
    418 	 */
    419 	octeon_ciu_intrs[irq] = NULL;
    420 
    421 	mutex_exit(&octeon_intr_lock);
    422 
    423 	kmem_free(ih, sizeof(*ih));
    424 }
    425 
    426 void
    427 octeon_iointr(int ipl, vaddr_t pc, uint32_t ipending)
    428 {
    429 	struct cpu_info * const ci = curcpu();
    430 	struct cpu_softc * const cpu = ci->ci_softc;
    431 
    432 	KASSERT((ipending & ~MIPS_INT_MASK) == 0);
    433 	KASSERT(ipending & MIPS_HARD_INT_MASK);
    434 	uint64_t hwpend = 0;
    435 
    436 	if (ipending & MIPS_INT_MASK_2) {
    437 		hwpend = mips64_ld_a64(cpu->cpu_int2_sum0)
    438 		    & cpu->cpu_int2_enable0;
    439 	} else if (ipending & MIPS_INT_MASK_1) {
    440 		hwpend = mips64_ld_a64(cpu->cpu_int1_sum0)
    441 		    & cpu->cpu_int1_enable0;
    442 	} else if (ipending & MIPS_INT_MASK_0) {
    443 		hwpend = mips64_ld_a64(cpu->cpu_int0_sum0)
    444 		    & cpu->cpu_int0_enable0;
    445 	} else {
    446 		panic("octeon_iointr: unexpected ipending %#x", ipending);
    447 	}
    448 	while (hwpend != 0) {
    449 		const int irq = ffs64(hwpend) - 1;
    450 		hwpend &= ~__BIT(irq);
    451 
    452 		struct octeon_intrhand * const ih = octeon_ciu_intrs[irq];
    453 		cpu->cpu_intr_evs[irq].ev_count++;
    454 		if (__predict_true(ih != NULL)) {
    455 #ifdef MULTIPROCESSOR
    456 			if (ipl == IPL_VM) {
    457 				KERNEL_LOCK(1, NULL);
    458 #endif
    459 				(*ih->ih_func)(ih->ih_arg);
    460 #ifdef MULTIPROCESSOR
    461 				KERNEL_UNLOCK_ONE(NULL);
    462 			} else {
    463 				(*ih->ih_func)(ih->ih_arg);
    464 			}
    465 #endif
    466 		}
    467 	}
    468 }
    469 
    470 #ifdef MULTIPROCESSOR
    471 __CTASSERT(NIPIS < 16);
    472 
    473 int
    474 octeon_ipi_intr(void *arg)
    475 {
    476 	struct cpu_info * const ci = curcpu();
    477 	struct cpu_softc * const cpu = ci->ci_softc;
    478 	uint64_t ipi_mask = (uintptr_t) arg;
    479 
    480 	ipi_mask &= mips64_ld_a64(cpu->cpu_mbox_set);
    481 	mips64_sd_a64(cpu->cpu_mbox_clr, ipi_mask);
    482 
    483 	ipi_mask |= (ipi_mask >> 16);
    484 	ipi_mask &= __BITS(15,0);
    485 
    486 	KASSERT(ci->ci_cpl >= IPL_SCHED);
    487 	KASSERT(ipi_mask < __BIT(NIPIS));
    488 
    489 	/* if the request is clear, it was previously processed */
    490 	if ((ci->ci_request_ipis & ipi_mask) == 0)
    491 		return 0;
    492 
    493 	atomic_or_64(&ci->ci_active_ipis, ipi_mask);
    494 	atomic_and_64(&ci->ci_request_ipis, ~ipi_mask);
    495 
    496 	ipi_process(ci, ipi_mask);
    497 
    498 	atomic_and_64(&ci->ci_active_ipis, ~ipi_mask);
    499 
    500 	return 1;
    501 }
    502 
    503 int
    504 octeon_send_ipi(struct cpu_info *ci, int req)
    505 {
    506 
    507 	KASSERT(req < NIPIS);
    508 	if (ci == NULL) {
    509 		// only deals with 2 CPUs
    510 		ci = cpuid_infos[(cpu_number() == 0) ? 1 : 0];
    511 	}
    512 
    513 	struct cpu_softc * const cpu = ci->ci_softc;
    514 	uint64_t ipi_mask = __BIT(req);
    515 
    516 	if (req == IPI_SUSPEND) {
    517 		ipi_mask <<= 16;
    518 	}
    519 
    520 	mips64_sd_a64(cpu->cpu_mbox_set, ipi_mask);
    521 	return 0;
    522 }
    523 #endif	/* MULTIPROCESSOR */
    524