Home | History | Annotate | Line # | Download | only in cavium
octeon_intr.c revision 1.3
      1  1.3    matt /*	$NetBSD: octeon_intr.c,v 1.3 2015/06/01 22:55:12 matt Exp $	*/
      2  1.1  hikaru /*
      3  1.1  hikaru  * Copyright 2001, 2002 Wasabi Systems, Inc.
      4  1.1  hikaru  * All rights reserved.
      5  1.1  hikaru  *
      6  1.1  hikaru  * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
      7  1.1  hikaru  *
      8  1.1  hikaru  * Redistribution and use in source and binary forms, with or without
      9  1.1  hikaru  * modification, are permitted provided that the following conditions
     10  1.1  hikaru  * are met:
     11  1.1  hikaru  * 1. Redistributions of source code must retain the above copyright
     12  1.1  hikaru  *    notice, this list of conditions and the following disclaimer.
     13  1.1  hikaru  * 2. Redistributions in binary form must reproduce the above copyright
     14  1.1  hikaru  *    notice, this list of conditions and the following disclaimer in the
     15  1.1  hikaru  *    documentation and/or other materials provided with the distribution.
     16  1.1  hikaru  * 3. All advertising materials mentioning features or use of this software
     17  1.1  hikaru  *    must display the following acknowledgement:
     18  1.1  hikaru  *      This product includes software developed for the NetBSD Project by
     19  1.1  hikaru  *      Wasabi Systems, Inc.
     20  1.1  hikaru  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     21  1.1  hikaru  *    or promote products derived from this software without specific prior
     22  1.1  hikaru  *    written permission.
     23  1.1  hikaru  *
     24  1.1  hikaru  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     25  1.1  hikaru  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     26  1.1  hikaru  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     27  1.1  hikaru  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     28  1.1  hikaru  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     29  1.1  hikaru  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     30  1.1  hikaru  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     31  1.1  hikaru  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     32  1.1  hikaru  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     33  1.1  hikaru  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     34  1.1  hikaru  * POSSIBILITY OF SUCH DAMAGE.
     35  1.1  hikaru  */
     36  1.1  hikaru 
     37  1.1  hikaru /*
     38  1.1  hikaru  * Platform-specific interrupt support for the MIPS Malta.
     39  1.1  hikaru  */
     40  1.1  hikaru 
     41  1.1  hikaru #include "opt_octeon.h"
     42  1.1  hikaru #define __INTR_PRIVATE
     43  1.1  hikaru 
     44  1.1  hikaru #include <sys/cdefs.h>
     45  1.3    matt __KERNEL_RCSID(0, "$NetBSD: octeon_intr.c,v 1.3 2015/06/01 22:55:12 matt Exp $");
     46  1.1  hikaru 
     47  1.1  hikaru #include <sys/param.h>
     48  1.1  hikaru #include <sys/cpu.h>
     49  1.1  hikaru #include <sys/systm.h>
     50  1.1  hikaru #include <sys/device.h>
     51  1.1  hikaru #include <sys/intr.h>
     52  1.1  hikaru #include <sys/kernel.h>
     53  1.3    matt #include <sys/kmem.h>
     54  1.3    matt #include <sys/atomic.h>
     55  1.1  hikaru 
     56  1.1  hikaru #include <lib/libkern/libkern.h>
     57  1.1  hikaru 
     58  1.1  hikaru #include <mips/locore.h>
     59  1.1  hikaru 
     60  1.1  hikaru #include <mips/cavium/dev/octeon_ciureg.h>
     61  1.1  hikaru #include <mips/cavium/octeonvar.h>
     62  1.1  hikaru 
     63  1.1  hikaru /*
     64  1.1  hikaru  * This is a mask of bits to clear in the SR when we go to a
     65  1.1  hikaru  * given hardware interrupt priority level.
     66  1.1  hikaru  */
     67  1.1  hikaru static const struct ipl_sr_map octeon_ipl_sr_map = {
     68  1.1  hikaru     .sr_bits = {
     69  1.1  hikaru 	[IPL_NONE] =		0,
     70  1.1  hikaru 	[IPL_SOFTCLOCK] =	MIPS_SOFT_INT_MASK_0,
     71  1.1  hikaru 	[IPL_SOFTNET] =		MIPS_SOFT_INT_MASK,
     72  1.1  hikaru 	[IPL_VM] =		MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0,
     73  1.1  hikaru 	[IPL_SCHED] =		MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
     74  1.1  hikaru 				    | MIPS_INT_MASK_5,
     75  1.3    matt 	[IPL_DDB] =		MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
     76  1.3    matt 				    | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
     77  1.1  hikaru 	[IPL_HIGH] =		MIPS_INT_MASK,
     78  1.1  hikaru     },
     79  1.1  hikaru };
     80  1.1  hikaru 
     81  1.2    matt const char * const octeon_intrnames[NIRQS] = {
     82  1.1  hikaru 	"workq 0",
     83  1.1  hikaru 	"workq 1",
     84  1.1  hikaru 	"workq 2",
     85  1.1  hikaru 	"workq 3",
     86  1.1  hikaru 	"workq 4",
     87  1.1  hikaru 	"workq 5",
     88  1.1  hikaru 	"workq 6",
     89  1.1  hikaru 	"workq 7",
     90  1.1  hikaru 	"workq 8",
     91  1.1  hikaru 	"workq 9",
     92  1.1  hikaru 	"workq 10",
     93  1.1  hikaru 	"workq 11",
     94  1.1  hikaru 	"workq 12",
     95  1.1  hikaru 	"workq 13",
     96  1.1  hikaru 	"workq 14",
     97  1.1  hikaru 	"workq 15",
     98  1.1  hikaru 	"gpio 0",
     99  1.1  hikaru 	"gpio 1",
    100  1.1  hikaru 	"gpio 2",
    101  1.1  hikaru 	"gpio 3",
    102  1.1  hikaru 	"gpio 4",
    103  1.1  hikaru 	"gpio 5",
    104  1.1  hikaru 	"gpio 6",
    105  1.1  hikaru 	"gpio 7",
    106  1.1  hikaru 	"gpio 8",
    107  1.1  hikaru 	"gpio 9",
    108  1.1  hikaru 	"gpio 10",
    109  1.1  hikaru 	"gpio 11",
    110  1.1  hikaru 	"gpio 12",
    111  1.1  hikaru 	"gpio 13",
    112  1.1  hikaru 	"gpio 14",
    113  1.1  hikaru 	"gpio 15",
    114  1.1  hikaru 	"mbox 0-15",
    115  1.1  hikaru 	"mbox 16-31",
    116  1.1  hikaru 	"uart 0",
    117  1.1  hikaru 	"uart 1",
    118  1.1  hikaru 	"pci inta",
    119  1.1  hikaru 	"pci intb",
    120  1.1  hikaru 	"pci intc",
    121  1.1  hikaru 	"pci intd",
    122  1.1  hikaru 	"pci msi 0-15",
    123  1.1  hikaru 	"pci msi 16-31",
    124  1.1  hikaru 	"pci msi 32-47",
    125  1.1  hikaru 	"pci msi 48-63",
    126  1.1  hikaru 	"wdog summary",
    127  1.1  hikaru 	"twsi",
    128  1.1  hikaru 	"rml",
    129  1.1  hikaru 	"trace",
    130  1.1  hikaru 	"gmx drop",
    131  1.1  hikaru 	"reserved",
    132  1.1  hikaru 	"ipd drop",
    133  1.1  hikaru 	"reserved",
    134  1.1  hikaru 	"timer 0",
    135  1.1  hikaru 	"timer 1",
    136  1.1  hikaru 	"timer 2",
    137  1.1  hikaru 	"timer 3",
    138  1.1  hikaru 	"usb",
    139  1.1  hikaru 	"pcm/tdm",
    140  1.1  hikaru 	"mpi/spi",
    141  1.1  hikaru 	"reserved",
    142  1.1  hikaru 	"reserved",
    143  1.1  hikaru 	"reserved",
    144  1.1  hikaru 	"reserved",
    145  1.1  hikaru 	"reserved",
    146  1.1  hikaru };
    147  1.1  hikaru 
    148  1.1  hikaru struct octeon_intrhand {
    149  1.1  hikaru 	int (*ih_func)(void *);
    150  1.1  hikaru 	void *ih_arg;
    151  1.1  hikaru 	int ih_irq;
    152  1.1  hikaru 	int ih_ipl;
    153  1.1  hikaru };
    154  1.1  hikaru 
    155  1.3    matt #ifdef MULTIPROCESSOR
    156  1.3    matt static int octeon_send_ipi(struct cpu_info *, int);
    157  1.3    matt static int octeon_ipi_intr(void *);
    158  1.3    matt 
    159  1.3    matt struct octeon_intrhand ipi_intrhands[2] = {
    160  1.3    matt 	[0] = {
    161  1.3    matt 		.ih_func = octeon_ipi_intr,
    162  1.3    matt 		.ih_arg = (void *)(uintptr_t)__BITS(15,0),
    163  1.3    matt 		.ih_irq = _CIU_INT_MBOX_15_0_SHIFT,
    164  1.3    matt 		.ih_ipl = IPL_SCHED,
    165  1.3    matt 	},
    166  1.3    matt 	[1] = {
    167  1.3    matt 		.ih_func = octeon_ipi_intr,
    168  1.3    matt 		.ih_arg = (void *)(uintptr_t)__BITS(31,16),
    169  1.3    matt 		.ih_irq = _CIU_INT_MBOX_31_16_SHIFT,
    170  1.3    matt 		.ih_ipl = IPL_HIGH,
    171  1.3    matt 	},
    172  1.1  hikaru };
    173  1.3    matt #endif
    174  1.1  hikaru 
    175  1.3    matt struct octeon_intrhand *octeon_ciu_intrs[NIRQS] = {
    176  1.3    matt #ifdef MULTIPROCESSOR
    177  1.3    matt 	[_CIU_INT_MBOX_15_0_SHIFT] = &ipi_intrhands[0],
    178  1.3    matt 	[_CIU_INT_MBOX_31_16_SHIFT] = &ipi_intrhands[1],
    179  1.3    matt #endif
    180  1.1  hikaru };
    181  1.1  hikaru 
    182  1.3    matt kmutex_t octeon_intr_lock;
    183  1.1  hikaru 
    184  1.3    matt #define X(a)	MIPS_PHYS_TO_XKPHYS(OCTEON_CCA_NONE, (a))
    185  1.1  hikaru 
    186  1.3    matt struct cpu_softc octeon_cpu0_softc = {
    187  1.3    matt 	.cpu_ci = &cpu_info_store,
    188  1.3    matt 	.cpu_int0_sum0 = X(CIU_INT0_SUM0),
    189  1.3    matt 	.cpu_int1_sum0 = X(CIU_INT1_SUM0),
    190  1.3    matt 	.cpu_int2_sum0 = X(CIU_INT4_SUM0),
    191  1.1  hikaru 
    192  1.3    matt 	.cpu_int0_en0 = X(CIU_INT0_EN0),
    193  1.3    matt 	.cpu_int1_en0 = X(CIU_INT1_EN0),
    194  1.3    matt 	.cpu_int2_en0 = X(CIU_INT4_EN00),
    195  1.1  hikaru 
    196  1.3    matt 	.cpu_int0_en1 = X(CIU_INT0_EN1),
    197  1.3    matt 	.cpu_int1_en1 = X(CIU_INT1_EN1),
    198  1.3    matt 	.cpu_int2_en1 = X(CIU_INT4_EN01),
    199  1.1  hikaru 
    200  1.3    matt 	.cpu_int32_en = X(CIU_INT32_EN0),
    201  1.1  hikaru 
    202  1.3    matt #ifdef MULTIPROCESSOR
    203  1.3    matt 	.cpu_mbox_set = X(CIU_MBOX_SET0),
    204  1.3    matt 	.cpu_mbox_clr = X(CIU_MBOX_CLR0),
    205  1.3    matt #endif
    206  1.3    matt };
    207  1.1  hikaru 
    208  1.3    matt #ifdef MULTIPROCESSOR
    209  1.3    matt struct cpu_softc octeon_cpu1_softc = {
    210  1.3    matt 	.cpu_int0_sum0 = X(CIU_INT2_SUM0),
    211  1.3    matt 	.cpu_int1_sum0 = X(CIU_INT3_SUM0),
    212  1.3    matt 	.cpu_int2_sum0 = X(CIU_INT4_SUM1),
    213  1.3    matt 
    214  1.3    matt 	.cpu_int0_en0 = X(CIU_INT2_EN0),
    215  1.3    matt 	.cpu_int1_en0 = X(CIU_INT3_EN0),
    216  1.3    matt 	.cpu_int2_en0 = X(CIU_INT4_EN10),
    217  1.3    matt 
    218  1.3    matt 	.cpu_int0_en1 = X(CIU_INT2_EN1),
    219  1.3    matt 	.cpu_int1_en1 = X(CIU_INT3_EN1),
    220  1.3    matt 	.cpu_int2_en1 = X(CIU_INT4_EN11),
    221  1.1  hikaru 
    222  1.3    matt 	.cpu_int32_en = X(CIU_INT32_EN1),
    223  1.1  hikaru 
    224  1.3    matt 	.cpu_mbox_set = X(CIU_MBOX_SET1),
    225  1.3    matt 	.cpu_mbox_clr = X(CIU_MBOX_CLR1),
    226  1.3    matt };
    227  1.3    matt #endif
    228  1.1  hikaru 
    229  1.3    matt #undef X
    230  1.1  hikaru 
    231  1.3    matt void
    232  1.3    matt octeon_intr_init(struct cpu_info *ci)
    233  1.3    matt {
    234  1.3    matt 	const int cpunum = cpu_index(ci);
    235  1.3    matt 	const char * const xname = cpu_name(ci);
    236  1.3    matt 	struct cpu_softc *cpu;
    237  1.1  hikaru 
    238  1.3    matt 	ipl_sr_map = octeon_ipl_sr_map;
    239  1.1  hikaru 
    240  1.3    matt 	if (ci->ci_cpuid == 0) {
    241  1.3    matt 		mutex_init(&octeon_intr_lock, MUTEX_DEFAULT, IPL_HIGH);
    242  1.3    matt 		cpu = &octeon_cpu0_softc;
    243  1.3    matt #ifdef MULTIPROCESSOR
    244  1.3    matt 		mips_locoresw.lsw_send_ipi = octeon_send_ipi;
    245  1.3    matt #endif
    246  1.3    matt 	} else {
    247  1.3    matt 		KASSERT(cpunum == 1);
    248  1.3    matt #ifdef MULTIPROCESSOR
    249  1.3    matt 		cpu = &octeon_cpu1_softc;
    250  1.3    matt #else
    251  1.3    matt 		cpu = NULL;
    252  1.3    matt #endif
    253  1.1  hikaru 	}
    254  1.3    matt 	ci->ci_softc = cpu;
    255  1.1  hikaru 
    256  1.3    matt #ifdef MULTIPROCESSOR
    257  1.3    matt 	// Enable the IPIs
    258  1.3    matt 	cpu->cpu_int0_enable0 |= __BIT(_CIU_INT_MBOX_15_0_SHIFT);
    259  1.3    matt 	cpu->cpu_int2_enable0 |= __BIT(_CIU_INT_MBOX_31_16_SHIFT);
    260  1.1  hikaru #endif
    261  1.1  hikaru 
    262  1.3    matt 	mips64_sd_a64(cpu->cpu_int0_en0, cpu->cpu_int0_enable0);
    263  1.3    matt 	mips64_sd_a64(cpu->cpu_int1_en0, cpu->cpu_int1_enable0);
    264  1.3    matt 	mips64_sd_a64(cpu->cpu_int2_en0, cpu->cpu_int2_enable0);
    265  1.3    matt 
    266  1.3    matt 	mips64_sd_a64(cpu->cpu_int32_en, 0);
    267  1.3    matt 
    268  1.3    matt 	mips64_sd_a64(cpu->cpu_int0_en1, 0);	// WDOG IPL2
    269  1.3    matt 	mips64_sd_a64(cpu->cpu_int1_en1, 0);	// WDOG IPL3
    270  1.3    matt 	mips64_sd_a64(cpu->cpu_int2_en1, 0);	// WDOG IPL4
    271  1.1  hikaru 
    272  1.3    matt #ifdef MULTIPROCESSOR
    273  1.3    matt 	mips64_sd_a64(cpu->cpu_mbox_clr, __BITS(31,0));
    274  1.3    matt #endif
    275  1.1  hikaru 
    276  1.1  hikaru 	for (size_t i = 0; i < NIRQS; i++) {
    277  1.3    matt 		evcnt_attach_dynamic(&cpu->cpu_intr_evs[i],
    278  1.3    matt 		    EVCNT_TYPE_INTR, NULL, xname, octeon_intrnames[i]);
    279  1.1  hikaru 	}
    280  1.1  hikaru }
    281  1.1  hikaru 
    282  1.1  hikaru void
    283  1.1  hikaru octeon_cal_timer(int corefreq)
    284  1.1  hikaru {
    285  1.1  hikaru 	/* Compute the number of cycles per second. */
    286  1.1  hikaru 	curcpu()->ci_cpu_freq = corefreq;
    287  1.1  hikaru 
    288  1.1  hikaru 	/* Compute the number of ticks for hz. */
    289  1.1  hikaru 	curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz;
    290  1.1  hikaru 
    291  1.1  hikaru 	/* Compute the delay divisor and reciprical. */
    292  1.1  hikaru 	curcpu()->ci_divisor_delay =
    293  1.1  hikaru 	    ((curcpu()->ci_cpu_freq + 500000) / 1000000);
    294  1.1  hikaru #if 0
    295  1.1  hikaru 	MIPS_SET_CI_RECIPRICAL(curcpu());
    296  1.1  hikaru #endif
    297  1.1  hikaru 
    298  1.1  hikaru 	mips3_cp0_count_write(0);
    299  1.1  hikaru 	mips3_cp0_compare_write(0);
    300  1.1  hikaru }
    301  1.1  hikaru 
    302  1.1  hikaru void *
    303  1.3    matt octeon_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
    304  1.1  hikaru {
    305  1.1  hikaru 	struct octeon_intrhand *ih;
    306  1.1  hikaru 
    307  1.1  hikaru 	if (irq >= NIRQS)
    308  1.1  hikaru 		panic("octeon_intr_establish: bogus IRQ %d", irq);
    309  1.3    matt 	if (ipl < IPL_VM)
    310  1.3    matt 		panic("octeon_intr_establish: bogus IPL %d", ipl);
    311  1.1  hikaru 
    312  1.3    matt 	ih = kmem_zalloc(sizeof(*ih), KM_NOSLEEP);
    313  1.1  hikaru 	if (ih == NULL)
    314  1.1  hikaru 		return (NULL);
    315  1.1  hikaru 
    316  1.1  hikaru 	ih->ih_func = func;
    317  1.1  hikaru 	ih->ih_arg = arg;
    318  1.1  hikaru 	ih->ih_irq = irq;
    319  1.3    matt 	ih->ih_ipl = ipl;
    320  1.1  hikaru 
    321  1.3    matt 	mutex_enter(&octeon_intr_lock);
    322  1.1  hikaru 
    323  1.1  hikaru 	/*
    324  1.3    matt 	 * First, make it known.
    325  1.1  hikaru 	 */
    326  1.3    matt 	KASSERTMSG(octeon_ciu_intrs[irq] == NULL, "irq %d in use! (%p)",
    327  1.3    matt 	    irq, octeon_ciu_intrs[irq]);
    328  1.3    matt 
    329  1.3    matt 	octeon_ciu_intrs[irq] = ih;
    330  1.3    matt 	membar_producer();
    331  1.1  hikaru 
    332  1.1  hikaru 	/*
    333  1.1  hikaru 	 * Now enable it.
    334  1.1  hikaru 	 */
    335  1.3    matt 	const uint64_t irq_mask = __BIT(irq);
    336  1.3    matt 	struct cpu_softc * const cpu0 = &octeon_cpu0_softc;
    337  1.3    matt #if MULTIPROCESSOR
    338  1.3    matt 	struct cpu_softc * const cpu1 = &octeon_cpu1_softc;
    339  1.3    matt #endif
    340  1.3    matt 
    341  1.3    matt 	switch (ipl) {
    342  1.3    matt 	case IPL_VM:
    343  1.3    matt 		cpu0->cpu_int0_enable0 |= irq_mask;
    344  1.3    matt 		mips64_sd_a64(cpu0->cpu_int0_en0, cpu0->cpu_int0_enable0);
    345  1.3    matt 		break;
    346  1.1  hikaru 
    347  1.3    matt 	case IPL_SCHED:
    348  1.3    matt 		cpu0->cpu_int1_enable0 |= irq_mask;
    349  1.3    matt 		mips64_sd_a64(cpu0->cpu_int1_en0, cpu0->cpu_int1_enable0);
    350  1.3    matt #ifdef MULTIPROCESSOR
    351  1.3    matt 		cpu1->cpu_int1_enable0 = cpu0->cpu_int1_enable0;
    352  1.3    matt 		mips64_sd_a64(cpu1->cpu_int1_en0, cpu1->cpu_int1_enable0);
    353  1.3    matt #endif
    354  1.3    matt 		break;
    355  1.3    matt 
    356  1.3    matt 	case IPL_DDB:
    357  1.3    matt 	case IPL_HIGH:
    358  1.3    matt 		cpu0->cpu_int2_enable0 |= irq_mask;
    359  1.3    matt 		mips64_sd_a64(cpu0->cpu_int2_en0, cpu0->cpu_int2_enable0);
    360  1.3    matt #ifdef MULTIPROCESSOR
    361  1.3    matt 		cpu1->cpu_int2_enable0 = cpu0->cpu_int2_enable0;
    362  1.3    matt 		mips64_sd_a64(cpu1->cpu_int2_en0, cpu1->cpu_int2_enable0);
    363  1.3    matt #endif
    364  1.3    matt 		break;
    365  1.1  hikaru 	}
    366  1.1  hikaru 
    367  1.3    matt 	mutex_exit(&octeon_intr_lock);
    368  1.3    matt 
    369  1.3    matt 	return ih;
    370  1.1  hikaru }
    371  1.1  hikaru 
    372  1.1  hikaru void
    373  1.1  hikaru octeon_intr_disestablish(void *cookie)
    374  1.1  hikaru {
    375  1.3    matt 	struct octeon_intrhand * const ih = cookie;
    376  1.3    matt 	const int irq = ih->ih_irq & (NIRQS-1);
    377  1.3    matt 	const int ipl = ih->ih_ipl;
    378  1.1  hikaru 
    379  1.3    matt 	mutex_enter(&octeon_intr_lock);
    380  1.1  hikaru 
    381  1.1  hikaru 	/*
    382  1.3    matt 	 * First disable it.
    383  1.1  hikaru 	 */
    384  1.3    matt 	const uint64_t irq_mask = ~__BIT(irq);
    385  1.3    matt 	struct cpu_softc * const cpu0 = &octeon_cpu0_softc;
    386  1.3    matt #if MULTIPROCESSOR
    387  1.3    matt 	struct cpu_softc * const cpu1 = &octeon_cpu1_softc;
    388  1.3    matt #endif
    389  1.3    matt 
    390  1.3    matt 	switch (ipl) {
    391  1.3    matt 	case IPL_VM:
    392  1.3    matt 		cpu0->cpu_int0_enable0 &= ~irq_mask;
    393  1.3    matt 		mips64_sd_a64(cpu0->cpu_int0_en0, cpu0->cpu_int0_enable0);
    394  1.3    matt 		break;
    395  1.3    matt 
    396  1.3    matt 	case IPL_SCHED:
    397  1.3    matt 		cpu0->cpu_int1_enable0 &= ~irq_mask;
    398  1.3    matt 		mips64_sd_a64(cpu0->cpu_int1_en0, cpu0->cpu_int1_enable0);
    399  1.3    matt #ifdef MULTIPROCESSOR
    400  1.3    matt 		cpu1->cpu_int1_enable0 = cpu0->cpu_int1_enable0;
    401  1.3    matt 		mips64_sd_a64(cpu1->cpu_int1_en0, cpu1->cpu_int1_enable0);
    402  1.3    matt #endif
    403  1.3    matt 		break;
    404  1.3    matt 
    405  1.3    matt 	case IPL_DDB:
    406  1.3    matt 	case IPL_HIGH:
    407  1.3    matt 		cpu0->cpu_int2_enable0 &= ~irq_mask;
    408  1.3    matt 		mips64_sd_a64(cpu0->cpu_int2_en0, cpu0->cpu_int2_enable0);
    409  1.3    matt #ifdef MULTIPROCESSOR
    410  1.3    matt 		cpu1->cpu_int2_enable0 = cpu0->cpu_int2_enable0;
    411  1.3    matt 		mips64_sd_a64(cpu1->cpu_int2_en0, cpu1->cpu_int2_enable0);
    412  1.3    matt #endif
    413  1.3    matt 		break;
    414  1.3    matt 	}
    415  1.1  hikaru 
    416  1.1  hikaru 	/*
    417  1.3    matt 	 * Now remove it since we shouldn't get interrupts for it.
    418  1.1  hikaru 	 */
    419  1.3    matt 	octeon_ciu_intrs[irq] = NULL;
    420  1.3    matt 
    421  1.3    matt 	mutex_exit(&octeon_intr_lock);
    422  1.1  hikaru 
    423  1.3    matt 	kmem_free(ih, sizeof(*ih));
    424  1.1  hikaru }
    425  1.1  hikaru 
    426  1.1  hikaru void
    427  1.1  hikaru octeon_iointr(int ipl, vaddr_t pc, uint32_t ipending)
    428  1.1  hikaru {
    429  1.3    matt 	struct cpu_info * const ci = curcpu();
    430  1.3    matt 	struct cpu_softc * const cpu = ci->ci_softc;
    431  1.3    matt 
    432  1.3    matt 	KASSERT((ipending & ~MIPS_INT_MASK) == 0);
    433  1.3    matt 	KASSERT(ipending & MIPS_HARD_INT_MASK);
    434  1.1  hikaru 	uint64_t hwpend = 0;
    435  1.1  hikaru 
    436  1.3    matt 	if (ipending & MIPS_INT_MASK_2) {
    437  1.3    matt 		hwpend = mips64_ld_a64(cpu->cpu_int2_sum0)
    438  1.3    matt 		    & cpu->cpu_int2_enable0;
    439  1.3    matt 	} else if (ipending & MIPS_INT_MASK_1) {
    440  1.3    matt 		hwpend = mips64_ld_a64(cpu->cpu_int1_sum0)
    441  1.3    matt 		    & cpu->cpu_int1_enable0;
    442  1.3    matt 	} else if (ipending & MIPS_INT_MASK_0) {
    443  1.3    matt 		hwpend = mips64_ld_a64(cpu->cpu_int0_sum0)
    444  1.3    matt 		    & cpu->cpu_int0_enable0;
    445  1.3    matt 	} else {
    446  1.3    matt 		panic("octeon_iointr: unexpected ipending %#x", ipending);
    447  1.3    matt 	}
    448  1.3    matt 	while (hwpend != 0) {
    449  1.3    matt 		const int irq = ffs64(hwpend) - 1;
    450  1.3    matt 		hwpend &= ~__BIT(irq);
    451  1.3    matt 
    452  1.3    matt 		struct octeon_intrhand * const ih = octeon_ciu_intrs[irq];
    453  1.3    matt 		cpu->cpu_intr_evs[irq].ev_count++;
    454  1.3    matt 		if (__predict_true(ih != NULL)) {
    455  1.3    matt #ifdef MULTIPROCESSOR
    456  1.3    matt 			if (ipl == IPL_VM) {
    457  1.3    matt 				KERNEL_LOCK(1, NULL);
    458  1.3    matt #endif
    459  1.3    matt 				(*ih->ih_func)(ih->ih_arg);
    460  1.3    matt #ifdef MULTIPROCESSOR
    461  1.3    matt 				KERNEL_UNLOCK_ONE(NULL);
    462  1.3    matt 			} else {
    463  1.3    matt 				(*ih->ih_func)(ih->ih_arg);
    464  1.3    matt 			}
    465  1.3    matt #endif
    466  1.3    matt 		}
    467  1.3    matt 	}
    468  1.3    matt }
    469  1.3    matt 
    470  1.3    matt #ifdef MULTIPROCESSOR
    471  1.3    matt __CTASSERT(NIPIS < 16);
    472  1.3    matt 
    473  1.3    matt int
    474  1.3    matt octeon_ipi_intr(void *arg)
    475  1.3    matt {
    476  1.3    matt 	struct cpu_info * const ci = curcpu();
    477  1.3    matt 	struct cpu_softc * const cpu = ci->ci_softc;
    478  1.3    matt 	uint64_t ipi_mask = (uintptr_t) arg;
    479  1.3    matt 
    480  1.3    matt 	ipi_mask &= mips64_ld_a64(cpu->cpu_mbox_set);
    481  1.3    matt 	mips64_sd_a64(cpu->cpu_mbox_clr, ipi_mask);
    482  1.3    matt 
    483  1.3    matt 	ipi_mask |= (ipi_mask >> 16);
    484  1.3    matt 	ipi_mask &= __BITS(15,0);
    485  1.3    matt 
    486  1.3    matt 	KASSERT(ci->ci_cpl >= IPL_SCHED);
    487  1.3    matt 	KASSERT(ipi_mask < __BIT(NIPIS));
    488  1.3    matt 
    489  1.3    matt 	/* if the request is clear, it was previously processed */
    490  1.3    matt 	if ((ci->ci_request_ipis & ipi_mask) == 0)
    491  1.3    matt 		return 0;
    492  1.3    matt 
    493  1.3    matt 	atomic_or_64(&ci->ci_active_ipis, ipi_mask);
    494  1.3    matt 	atomic_and_64(&ci->ci_request_ipis, ~ipi_mask);
    495  1.3    matt 
    496  1.3    matt 	ipi_process(ci, ipi_mask);
    497  1.3    matt 
    498  1.3    matt 	atomic_and_64(&ci->ci_active_ipis, ~ipi_mask);
    499  1.3    matt 
    500  1.3    matt 	return 1;
    501  1.3    matt }
    502  1.1  hikaru 
    503  1.3    matt int
    504  1.3    matt octeon_send_ipi(struct cpu_info *ci, int req)
    505  1.3    matt {
    506  1.1  hikaru 
    507  1.3    matt 	KASSERT(req < NIPIS);
    508  1.3    matt 	if (ci == NULL) {
    509  1.3    matt 		// only deals with 2 CPUs
    510  1.3    matt 		ci = cpuid_infos[(cpu_number() == 0) ? 1 : 0];
    511  1.1  hikaru 	}
    512  1.3    matt 
    513  1.3    matt 	struct cpu_softc * const cpu = ci->ci_softc;
    514  1.3    matt 	uint64_t ipi_mask = __BIT(req);
    515  1.3    matt 
    516  1.3    matt 	if (req == IPI_SUSPEND) {
    517  1.3    matt 		ipi_mask <<= 16;
    518  1.1  hikaru 	}
    519  1.3    matt 
    520  1.3    matt 	mips64_sd_a64(cpu->cpu_mbox_set, ipi_mask);
    521  1.3    matt 	return 0;
    522  1.1  hikaru }
    523  1.3    matt #endif	/* MULTIPROCESSOR */
    524