Home | History | Annotate | Line # | Download | only in cavium
octeon_intr.c revision 1.15
      1  1.15  jmcneill /*	$NetBSD: octeon_intr.c,v 1.15 2020/07/16 21:33:50 jmcneill Exp $	*/
      2   1.1    hikaru /*
      3   1.1    hikaru  * Copyright 2001, 2002 Wasabi Systems, Inc.
      4   1.1    hikaru  * All rights reserved.
      5   1.1    hikaru  *
      6   1.1    hikaru  * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
      7   1.1    hikaru  *
      8   1.1    hikaru  * Redistribution and use in source and binary forms, with or without
      9   1.1    hikaru  * modification, are permitted provided that the following conditions
     10   1.1    hikaru  * are met:
     11   1.1    hikaru  * 1. Redistributions of source code must retain the above copyright
     12   1.1    hikaru  *    notice, this list of conditions and the following disclaimer.
     13   1.1    hikaru  * 2. Redistributions in binary form must reproduce the above copyright
     14   1.1    hikaru  *    notice, this list of conditions and the following disclaimer in the
     15   1.1    hikaru  *    documentation and/or other materials provided with the distribution.
     16   1.1    hikaru  * 3. All advertising materials mentioning features or use of this software
     17   1.1    hikaru  *    must display the following acknowledgement:
     18   1.1    hikaru  *      This product includes software developed for the NetBSD Project by
     19   1.1    hikaru  *      Wasabi Systems, Inc.
     20   1.1    hikaru  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     21   1.1    hikaru  *    or promote products derived from this software without specific prior
     22   1.1    hikaru  *    written permission.
     23   1.1    hikaru  *
     24   1.1    hikaru  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     25   1.1    hikaru  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     26   1.1    hikaru  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     27   1.1    hikaru  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     28   1.1    hikaru  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     29   1.1    hikaru  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     30   1.1    hikaru  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     31   1.1    hikaru  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     32   1.1    hikaru  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     33   1.1    hikaru  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     34   1.1    hikaru  * POSSIBILITY OF SUCH DAMAGE.
     35   1.1    hikaru  */
     36   1.1    hikaru 
     37   1.1    hikaru /*
     38   1.1    hikaru  * Platform-specific interrupt support for the MIPS Malta.
     39   1.1    hikaru  */
     40   1.1    hikaru 
     41   1.6     skrll #include "opt_multiprocessor.h"
     42   1.6     skrll 
     43   1.4      matt #include "cpunode.h"
     44   1.1    hikaru #define __INTR_PRIVATE
     45   1.1    hikaru 
     46   1.1    hikaru #include <sys/cdefs.h>
     47  1.15  jmcneill __KERNEL_RCSID(0, "$NetBSD: octeon_intr.c,v 1.15 2020/07/16 21:33:50 jmcneill Exp $");
     48   1.1    hikaru 
     49   1.1    hikaru #include <sys/param.h>
     50   1.1    hikaru #include <sys/cpu.h>
     51   1.1    hikaru #include <sys/systm.h>
     52   1.1    hikaru #include <sys/device.h>
     53   1.1    hikaru #include <sys/intr.h>
     54   1.1    hikaru #include <sys/kernel.h>
     55   1.3      matt #include <sys/kmem.h>
     56   1.3      matt #include <sys/atomic.h>
     57   1.1    hikaru 
     58   1.1    hikaru #include <lib/libkern/libkern.h>
     59   1.1    hikaru 
     60   1.1    hikaru #include <mips/locore.h>
     61   1.1    hikaru 
     62   1.1    hikaru #include <mips/cavium/dev/octeon_ciureg.h>
     63   1.1    hikaru #include <mips/cavium/octeonvar.h>
     64   1.1    hikaru 
     65   1.1    hikaru /*
     66   1.1    hikaru  * This is a mask of bits to clear in the SR when we go to a
     67   1.1    hikaru  * given hardware interrupt priority level.
     68   1.1    hikaru  */
     69   1.1    hikaru static const struct ipl_sr_map octeon_ipl_sr_map = {
     70   1.1    hikaru     .sr_bits = {
     71   1.1    hikaru 	[IPL_NONE] =		0,
     72   1.1    hikaru 	[IPL_SOFTCLOCK] =	MIPS_SOFT_INT_MASK_0,
     73   1.1    hikaru 	[IPL_SOFTNET] =		MIPS_SOFT_INT_MASK,
     74   1.1    hikaru 	[IPL_VM] =		MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0,
     75   1.1    hikaru 	[IPL_SCHED] =		MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
     76   1.8     skrll 				    | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
     77   1.3      matt 	[IPL_DDB] =		MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
     78   1.3      matt 				    | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
     79   1.1    hikaru 	[IPL_HIGH] =		MIPS_INT_MASK,
     80   1.1    hikaru     },
     81   1.1    hikaru };
     82   1.1    hikaru 
     83  1.15  jmcneill const char * octeon_intrnames[NIRQS] = {
     84   1.1    hikaru 	"workq 0",
     85   1.1    hikaru 	"workq 1",
     86   1.1    hikaru 	"workq 2",
     87   1.1    hikaru 	"workq 3",
     88   1.1    hikaru 	"workq 4",
     89   1.1    hikaru 	"workq 5",
     90   1.1    hikaru 	"workq 6",
     91   1.1    hikaru 	"workq 7",
     92   1.1    hikaru 	"workq 8",
     93   1.1    hikaru 	"workq 9",
     94   1.1    hikaru 	"workq 10",
     95   1.1    hikaru 	"workq 11",
     96   1.1    hikaru 	"workq 12",
     97   1.1    hikaru 	"workq 13",
     98   1.1    hikaru 	"workq 14",
     99   1.1    hikaru 	"workq 15",
    100   1.1    hikaru 	"gpio 0",
    101   1.1    hikaru 	"gpio 1",
    102   1.1    hikaru 	"gpio 2",
    103   1.1    hikaru 	"gpio 3",
    104   1.1    hikaru 	"gpio 4",
    105   1.1    hikaru 	"gpio 5",
    106   1.1    hikaru 	"gpio 6",
    107   1.1    hikaru 	"gpio 7",
    108   1.1    hikaru 	"gpio 8",
    109   1.1    hikaru 	"gpio 9",
    110   1.1    hikaru 	"gpio 10",
    111   1.1    hikaru 	"gpio 11",
    112   1.1    hikaru 	"gpio 12",
    113   1.1    hikaru 	"gpio 13",
    114   1.1    hikaru 	"gpio 14",
    115   1.1    hikaru 	"gpio 15",
    116   1.1    hikaru 	"mbox 0-15",
    117   1.1    hikaru 	"mbox 16-31",
    118   1.1    hikaru 	"uart 0",
    119   1.1    hikaru 	"uart 1",
    120   1.1    hikaru 	"pci inta",
    121   1.1    hikaru 	"pci intb",
    122   1.1    hikaru 	"pci intc",
    123   1.1    hikaru 	"pci intd",
    124   1.1    hikaru 	"pci msi 0-15",
    125   1.1    hikaru 	"pci msi 16-31",
    126   1.1    hikaru 	"pci msi 32-47",
    127   1.1    hikaru 	"pci msi 48-63",
    128   1.1    hikaru 	"wdog summary",
    129   1.1    hikaru 	"twsi",
    130   1.1    hikaru 	"rml",
    131   1.1    hikaru 	"trace",
    132   1.1    hikaru 	"gmx drop",
    133   1.1    hikaru 	"reserved",
    134   1.1    hikaru 	"ipd drop",
    135   1.1    hikaru 	"reserved",
    136   1.1    hikaru 	"timer 0",
    137   1.1    hikaru 	"timer 1",
    138   1.1    hikaru 	"timer 2",
    139   1.1    hikaru 	"timer 3",
    140   1.1    hikaru 	"usb",
    141   1.1    hikaru 	"pcm/tdm",
    142   1.1    hikaru 	"mpi/spi",
    143   1.1    hikaru 	"reserved",
    144   1.1    hikaru 	"reserved",
    145   1.1    hikaru 	"reserved",
    146   1.1    hikaru 	"reserved",
    147   1.1    hikaru 	"reserved",
    148   1.1    hikaru };
    149   1.1    hikaru 
    150   1.1    hikaru struct octeon_intrhand {
    151   1.1    hikaru 	int (*ih_func)(void *);
    152   1.1    hikaru 	void *ih_arg;
    153   1.1    hikaru 	int ih_irq;
    154   1.1    hikaru 	int ih_ipl;
    155   1.1    hikaru };
    156   1.1    hikaru 
    157   1.3      matt #ifdef MULTIPROCESSOR
    158   1.3      matt static int octeon_send_ipi(struct cpu_info *, int);
    159   1.3      matt static int octeon_ipi_intr(void *);
    160   1.3      matt 
    161   1.3      matt struct octeon_intrhand ipi_intrhands[2] = {
    162   1.3      matt 	[0] = {
    163   1.3      matt 		.ih_func = octeon_ipi_intr,
    164   1.3      matt 		.ih_arg = (void *)(uintptr_t)__BITS(15,0),
    165  1.12    simonb 		.ih_irq = CIU_INT_MBOX_15_0,
    166   1.3      matt 		.ih_ipl = IPL_SCHED,
    167   1.3      matt 	},
    168   1.3      matt 	[1] = {
    169   1.3      matt 		.ih_func = octeon_ipi_intr,
    170   1.3      matt 		.ih_arg = (void *)(uintptr_t)__BITS(31,16),
    171  1.12    simonb 		.ih_irq = CIU_INT_MBOX_31_16,
    172   1.3      matt 		.ih_ipl = IPL_HIGH,
    173   1.3      matt 	},
    174   1.1    hikaru };
    175   1.3      matt #endif
    176   1.1    hikaru 
    177  1.11    simonb struct octeon_intrhand *octciu_intrs[NIRQS] = {
    178   1.3      matt #ifdef MULTIPROCESSOR
    179  1.12    simonb 	[CIU_INT_MBOX_15_0] = &ipi_intrhands[0],
    180  1.12    simonb 	[CIU_INT_MBOX_31_16] = &ipi_intrhands[1],
    181   1.3      matt #endif
    182   1.1    hikaru };
    183   1.1    hikaru 
    184   1.3      matt kmutex_t octeon_intr_lock;
    185   1.1    hikaru 
    186   1.3      matt #define X(a)	MIPS_PHYS_TO_XKPHYS(OCTEON_CCA_NONE, (a))
    187   1.1    hikaru 
    188   1.3      matt struct cpu_softc octeon_cpu0_softc = {
    189   1.3      matt 	.cpu_ci = &cpu_info_store,
    190   1.3      matt 	.cpu_int0_sum0 = X(CIU_INT0_SUM0),
    191   1.3      matt 	.cpu_int1_sum0 = X(CIU_INT1_SUM0),
    192   1.3      matt 	.cpu_int2_sum0 = X(CIU_INT4_SUM0),
    193   1.1    hikaru 
    194  1.15  jmcneill 	.cpu_int_sum1 = X(CIU_INT_SUM1),
    195  1.15  jmcneill 
    196   1.3      matt 	.cpu_int0_en0 = X(CIU_INT0_EN0),
    197   1.3      matt 	.cpu_int1_en0 = X(CIU_INT1_EN0),
    198   1.3      matt 	.cpu_int2_en0 = X(CIU_INT4_EN00),
    199   1.1    hikaru 
    200   1.3      matt 	.cpu_int0_en1 = X(CIU_INT0_EN1),
    201   1.3      matt 	.cpu_int1_en1 = X(CIU_INT1_EN1),
    202   1.3      matt 	.cpu_int2_en1 = X(CIU_INT4_EN01),
    203   1.1    hikaru 
    204   1.3      matt 	.cpu_int32_en = X(CIU_INT32_EN0),
    205   1.1    hikaru 
    206   1.4      matt 	.cpu_wdog = X(CIU_WDOG0),
    207   1.4      matt 	.cpu_pp_poke = X(CIU_PP_POKE0),
    208   1.4      matt 
    209   1.3      matt #ifdef MULTIPROCESSOR
    210   1.3      matt 	.cpu_mbox_set = X(CIU_MBOX_SET0),
    211   1.3      matt 	.cpu_mbox_clr = X(CIU_MBOX_CLR0),
    212   1.3      matt #endif
    213   1.3      matt };
    214   1.1    hikaru 
    215   1.3      matt #ifdef MULTIPROCESSOR
    216  1.12    simonb /* XXX limit of two CPUs ... */
    217   1.3      matt struct cpu_softc octeon_cpu1_softc = {
    218   1.3      matt 	.cpu_int0_sum0 = X(CIU_INT2_SUM0),
    219   1.3      matt 	.cpu_int1_sum0 = X(CIU_INT3_SUM0),
    220   1.3      matt 	.cpu_int2_sum0 = X(CIU_INT4_SUM1),
    221   1.3      matt 
    222  1.15  jmcneill 	.cpu_int_sum1 = X(CIU_INT_SUM1),
    223  1.15  jmcneill 
    224   1.3      matt 	.cpu_int0_en0 = X(CIU_INT2_EN0),
    225   1.3      matt 	.cpu_int1_en0 = X(CIU_INT3_EN0),
    226   1.3      matt 	.cpu_int2_en0 = X(CIU_INT4_EN10),
    227   1.3      matt 
    228   1.3      matt 	.cpu_int0_en1 = X(CIU_INT2_EN1),
    229   1.3      matt 	.cpu_int1_en1 = X(CIU_INT3_EN1),
    230   1.3      matt 	.cpu_int2_en1 = X(CIU_INT4_EN11),
    231   1.1    hikaru 
    232   1.3      matt 	.cpu_int32_en = X(CIU_INT32_EN1),
    233   1.1    hikaru 
    234  1.12    simonb 	.cpu_wdog = X(CIU_WDOG(1)),
    235   1.4      matt 	.cpu_pp_poke = X(CIU_PP_POKE1),
    236   1.4      matt 
    237   1.3      matt 	.cpu_mbox_set = X(CIU_MBOX_SET1),
    238   1.3      matt 	.cpu_mbox_clr = X(CIU_MBOX_CLR1),
    239   1.3      matt };
    240   1.3      matt #endif
    241   1.1    hikaru 
    242   1.4      matt #ifdef DEBUG
    243   1.4      matt static void
    244   1.4      matt octeon_mbox_test(void)
    245   1.4      matt {
    246   1.4      matt 	const uint64_t mbox_clr0 = X(CIU_MBOX_CLR0);
    247   1.4      matt 	const uint64_t mbox_clr1 = X(CIU_MBOX_CLR1);
    248   1.4      matt 	const uint64_t mbox_set0 = X(CIU_MBOX_SET0);
    249   1.4      matt 	const uint64_t mbox_set1 = X(CIU_MBOX_SET1);
    250   1.4      matt 	const uint64_t int_sum0 = X(CIU_INT0_SUM0);
    251   1.4      matt 	const uint64_t int_sum1 = X(CIU_INT2_SUM0);
    252  1.12    simonb 	const uint64_t sum_mbox_lo = __BIT(CIU_INT_MBOX_15_0);
    253  1.12    simonb 	const uint64_t sum_mbox_hi = __BIT(CIU_INT_MBOX_31_16);
    254   1.4      matt 
    255   1.5      matt 	mips3_sd(mbox_clr0, ~0ULL);
    256   1.5      matt 	mips3_sd(mbox_clr1, ~0ULL);
    257   1.4      matt 
    258   1.5      matt 	uint32_t mbox0 = mips3_ld(mbox_set0);
    259   1.5      matt 	uint32_t mbox1 = mips3_ld(mbox_set1);
    260   1.4      matt 
    261   1.4      matt 	KDASSERTMSG(mbox0 == 0, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
    262   1.4      matt 	KDASSERTMSG(mbox1 == 0, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
    263   1.4      matt 
    264   1.5      matt 	mips3_sd(mbox_set0, __BIT(0));
    265   1.4      matt 
    266   1.5      matt 	mbox0 = mips3_ld(mbox_set0);
    267   1.5      matt 	mbox1 = mips3_ld(mbox_set1);
    268   1.4      matt 
    269   1.4      matt 	KDASSERTMSG(mbox0 == 1, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
    270   1.4      matt 	KDASSERTMSG(mbox1 == 0, "mbox0 %#x mbox1 %#x", mbox0, mbox1);
    271   1.4      matt 
    272   1.5      matt 	uint64_t sum0 = mips3_ld(int_sum0);
    273   1.5      matt 	uint64_t sum1 = mips3_ld(int_sum1);
    274   1.4      matt 
    275   1.4      matt 	KDASSERTMSG((sum0 & sum_mbox_lo) != 0, "sum0 %#"PRIx64, sum0);
    276   1.4      matt 	KDASSERTMSG((sum0 & sum_mbox_hi) == 0, "sum0 %#"PRIx64, sum0);
    277   1.4      matt 
    278   1.4      matt 	KDASSERTMSG((sum1 & sum_mbox_lo) == 0, "sum1 %#"PRIx64, sum1);
    279   1.4      matt 	KDASSERTMSG((sum1 & sum_mbox_hi) == 0, "sum1 %#"PRIx64, sum1);
    280   1.4      matt 
    281   1.5      matt 	mips3_sd(mbox_clr0, mbox0);
    282   1.5      matt 	mbox0 = mips3_ld(mbox_set0);
    283   1.4      matt 	KDASSERTMSG(mbox0 == 0, "mbox0 %#x", mbox0);
    284   1.4      matt 
    285   1.5      matt 	mips3_sd(mbox_set0, __BIT(16));
    286   1.4      matt 
    287   1.5      matt 	mbox0 = mips3_ld(mbox_set0);
    288   1.5      matt 	mbox1 = mips3_ld(mbox_set1);
    289   1.4      matt 
    290   1.4      matt 	KDASSERTMSG(mbox0 == __BIT(16), "mbox0 %#x", mbox0);
    291   1.4      matt 	KDASSERTMSG(mbox1 == 0, "mbox1 %#x", mbox1);
    292   1.4      matt 
    293   1.5      matt 	sum0 = mips3_ld(int_sum0);
    294   1.5      matt 	sum1 = mips3_ld(int_sum1);
    295   1.4      matt 
    296   1.4      matt 	KDASSERTMSG((sum0 & sum_mbox_lo) == 0, "sum0 %#"PRIx64, sum0);
    297   1.4      matt 	KDASSERTMSG((sum0 & sum_mbox_hi) != 0, "sum0 %#"PRIx64, sum0);
    298   1.4      matt 
    299   1.4      matt 	KDASSERTMSG((sum1 & sum_mbox_lo) == 0, "sum1 %#"PRIx64, sum1);
    300   1.4      matt 	KDASSERTMSG((sum1 & sum_mbox_hi) == 0, "sum1 %#"PRIx64, sum1);
    301   1.4      matt }
    302   1.4      matt #endif
    303   1.4      matt 
    304   1.3      matt #undef X
    305   1.1    hikaru 
    306   1.3      matt void
    307   1.3      matt octeon_intr_init(struct cpu_info *ci)
    308   1.3      matt {
    309   1.9       mrg #ifdef DIAGNOSTIC
    310   1.3      matt 	const int cpunum = cpu_index(ci);
    311   1.9       mrg #endif
    312   1.3      matt 	const char * const xname = cpu_name(ci);
    313   1.4      matt 	struct cpu_softc *cpu = ci->ci_softc;
    314   1.1    hikaru 
    315   1.1    hikaru 
    316   1.3      matt 	if (ci->ci_cpuid == 0) {
    317   1.4      matt 		KASSERT(ci->ci_softc == &octeon_cpu0_softc);
    318   1.4      matt 		ipl_sr_map = octeon_ipl_sr_map;
    319   1.3      matt 		mutex_init(&octeon_intr_lock, MUTEX_DEFAULT, IPL_HIGH);
    320   1.3      matt #ifdef MULTIPROCESSOR
    321   1.3      matt 		mips_locoresw.lsw_send_ipi = octeon_send_ipi;
    322   1.3      matt #endif
    323   1.4      matt #ifdef DEBUG
    324   1.4      matt 		octeon_mbox_test();
    325   1.4      matt #endif
    326   1.3      matt 	} else {
    327   1.3      matt 		KASSERT(cpunum == 1);
    328   1.3      matt #ifdef MULTIPROCESSOR
    329   1.4      matt 		KASSERT(ci->ci_softc == &octeon_cpu1_softc);
    330   1.3      matt #endif
    331   1.1    hikaru 	}
    332   1.1    hikaru 
    333   1.3      matt #ifdef MULTIPROCESSOR
    334   1.3      matt 	// Enable the IPIs
    335  1.12    simonb 	cpu->cpu_int1_enable0 |= __BIT(CIU_INT_MBOX_15_0);
    336  1.12    simonb 	cpu->cpu_int2_enable0 |= __BIT(CIU_INT_MBOX_31_16);
    337   1.1    hikaru #endif
    338   1.1    hikaru 
    339   1.4      matt 	if (ci->ci_dev)
    340  1.10     skrll 		aprint_verbose_dev(ci->ci_dev,
    341  1.10     skrll 		    "enabling intr masks %#"PRIx64"/%#"PRIx64"/%#"PRIx64"\n",
    342  1.10     skrll 		    cpu->cpu_int0_enable0, cpu->cpu_int1_enable0,
    343  1.10     skrll 		    cpu->cpu_int2_enable0);
    344   1.4      matt 
    345   1.5      matt 	mips3_sd(cpu->cpu_int0_en0, cpu->cpu_int0_enable0);
    346   1.5      matt 	mips3_sd(cpu->cpu_int1_en0, cpu->cpu_int1_enable0);
    347   1.5      matt 	mips3_sd(cpu->cpu_int2_en0, cpu->cpu_int2_enable0);
    348   1.3      matt 
    349   1.5      matt 	mips3_sd(cpu->cpu_int32_en, 0);
    350   1.3      matt 
    351  1.15  jmcneill 	mips3_sd(cpu->cpu_int0_en1, cpu->cpu_int0_enable1);
    352  1.15  jmcneill 	mips3_sd(cpu->cpu_int1_en1, cpu->cpu_int1_enable1);
    353  1.15  jmcneill 	mips3_sd(cpu->cpu_int2_en1, cpu->cpu_int2_enable1);
    354   1.1    hikaru 
    355   1.3      matt #ifdef MULTIPROCESSOR
    356   1.5      matt 	mips3_sd(cpu->cpu_mbox_clr, __BITS(31,0));
    357   1.3      matt #endif
    358   1.1    hikaru 
    359  1.15  jmcneill 	for (int i = 0; i < NIRQS; i++) {
    360  1.15  jmcneill 		if (octeon_intrnames[i] == NULL)
    361  1.15  jmcneill 			octeon_intrnames[i] = kmem_asprintf("irq %d", i);
    362   1.3      matt 		evcnt_attach_dynamic(&cpu->cpu_intr_evs[i],
    363   1.3      matt 		    EVCNT_TYPE_INTR, NULL, xname, octeon_intrnames[i]);
    364   1.1    hikaru 	}
    365   1.1    hikaru }
    366   1.1    hikaru 
    367   1.1    hikaru void
    368   1.1    hikaru octeon_cal_timer(int corefreq)
    369   1.1    hikaru {
    370   1.1    hikaru 	/* Compute the number of cycles per second. */
    371   1.1    hikaru 	curcpu()->ci_cpu_freq = corefreq;
    372   1.1    hikaru 
    373   1.1    hikaru 	/* Compute the number of ticks for hz. */
    374   1.1    hikaru 	curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz;
    375   1.1    hikaru 
    376   1.1    hikaru 	/* Compute the delay divisor and reciprical. */
    377   1.1    hikaru 	curcpu()->ci_divisor_delay =
    378   1.1    hikaru 	    ((curcpu()->ci_cpu_freq + 500000) / 1000000);
    379   1.1    hikaru #if 0
    380   1.1    hikaru 	MIPS_SET_CI_RECIPRICAL(curcpu());
    381   1.1    hikaru #endif
    382   1.1    hikaru 
    383   1.1    hikaru 	mips3_cp0_count_write(0);
    384   1.1    hikaru 	mips3_cp0_compare_write(0);
    385   1.1    hikaru }
    386   1.1    hikaru 
    387   1.1    hikaru void *
    388   1.3      matt octeon_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
    389   1.1    hikaru {
    390   1.1    hikaru 	struct octeon_intrhand *ih;
    391   1.1    hikaru 
    392   1.1    hikaru 	if (irq >= NIRQS)
    393   1.1    hikaru 		panic("octeon_intr_establish: bogus IRQ %d", irq);
    394   1.3      matt 	if (ipl < IPL_VM)
    395   1.3      matt 		panic("octeon_intr_establish: bogus IPL %d", ipl);
    396   1.1    hikaru 
    397   1.3      matt 	ih = kmem_zalloc(sizeof(*ih), KM_NOSLEEP);
    398   1.1    hikaru 	if (ih == NULL)
    399   1.1    hikaru 		return (NULL);
    400   1.1    hikaru 
    401   1.1    hikaru 	ih->ih_func = func;
    402   1.1    hikaru 	ih->ih_arg = arg;
    403   1.1    hikaru 	ih->ih_irq = irq;
    404   1.3      matt 	ih->ih_ipl = ipl;
    405   1.1    hikaru 
    406   1.3      matt 	mutex_enter(&octeon_intr_lock);
    407   1.1    hikaru 
    408   1.1    hikaru 	/*
    409   1.3      matt 	 * First, make it known.
    410   1.1    hikaru 	 */
    411  1.11    simonb 	KASSERTMSG(octciu_intrs[irq] == NULL, "irq %d in use! (%p)",
    412  1.11    simonb 	    irq, octciu_intrs[irq]);
    413   1.3      matt 
    414  1.11    simonb 	octciu_intrs[irq] = ih;
    415   1.3      matt 	membar_producer();
    416   1.1    hikaru 
    417   1.1    hikaru 	/*
    418   1.1    hikaru 	 * Now enable it.
    419   1.1    hikaru 	 */
    420  1.15  jmcneill 	const int bank = irq / 64;
    421  1.15  jmcneill 	const uint64_t irq_mask = __BIT(irq % 64);
    422   1.3      matt 	struct cpu_softc * const cpu0 = &octeon_cpu0_softc;
    423   1.3      matt #if MULTIPROCESSOR
    424   1.3      matt 	struct cpu_softc * const cpu1 = &octeon_cpu1_softc;
    425   1.3      matt #endif
    426   1.3      matt 
    427   1.3      matt 	switch (ipl) {
    428   1.3      matt 	case IPL_VM:
    429  1.15  jmcneill 		if (bank == 0) {
    430  1.15  jmcneill 			cpu0->cpu_int0_enable0 |= irq_mask;
    431  1.15  jmcneill 			mips3_sd(cpu0->cpu_int0_en0, cpu0->cpu_int0_enable0);
    432  1.15  jmcneill 		} else {
    433  1.15  jmcneill 			cpu0->cpu_int0_enable1 |= irq_mask;
    434  1.15  jmcneill 			mips3_sd(cpu0->cpu_int0_en1, cpu0->cpu_int0_enable1);
    435  1.15  jmcneill 		}
    436   1.3      matt 		break;
    437   1.1    hikaru 
    438   1.3      matt 	case IPL_SCHED:
    439  1.15  jmcneill 		if (bank == 0) {
    440  1.15  jmcneill 			cpu0->cpu_int1_enable0 |= irq_mask;
    441  1.15  jmcneill 			mips3_sd(cpu0->cpu_int1_en0, cpu0->cpu_int1_enable0);
    442  1.15  jmcneill #ifdef MULTIPROCESSOR
    443  1.15  jmcneill 			cpu1->cpu_int1_enable0 = cpu0->cpu_int1_enable0;
    444  1.15  jmcneill 			mips3_sd(cpu1->cpu_int1_en0, cpu1->cpu_int1_enable0);
    445  1.15  jmcneill #endif
    446  1.15  jmcneill 		} else {
    447  1.15  jmcneill 			cpu0->cpu_int1_enable1 |= irq_mask;
    448  1.15  jmcneill 			mips3_sd(cpu0->cpu_int1_en1, cpu0->cpu_int1_enable1);
    449   1.3      matt #ifdef MULTIPROCESSOR
    450  1.15  jmcneill 			cpu1->cpu_int1_enable1 = cpu0->cpu_int1_enable1;
    451  1.15  jmcneill 			mips3_sd(cpu1->cpu_int1_en1, cpu1->cpu_int1_enable1);
    452   1.3      matt #endif
    453  1.15  jmcneill 		}
    454  1.15  jmcneill 
    455   1.3      matt 		break;
    456   1.3      matt 
    457   1.3      matt 	case IPL_DDB:
    458   1.3      matt 	case IPL_HIGH:
    459  1.15  jmcneill 		if (bank == 0) {
    460  1.15  jmcneill 			cpu0->cpu_int2_enable0 |= irq_mask;
    461  1.15  jmcneill 			mips3_sd(cpu0->cpu_int2_en0, cpu0->cpu_int2_enable0);
    462  1.15  jmcneill #ifdef MULTIPROCESSOR
    463  1.15  jmcneill 			cpu1->cpu_int2_enable0 = cpu0->cpu_int2_enable0;
    464  1.15  jmcneill 			mips3_sd(cpu1->cpu_int2_en0, cpu1->cpu_int2_enable0);
    465  1.15  jmcneill #endif
    466  1.15  jmcneill 		} else {
    467  1.15  jmcneill 			cpu0->cpu_int2_enable1 |= irq_mask;
    468  1.15  jmcneill 			mips3_sd(cpu0->cpu_int2_en1, cpu0->cpu_int2_enable1);
    469   1.3      matt #ifdef MULTIPROCESSOR
    470  1.15  jmcneill 			cpu1->cpu_int2_enable1 = cpu0->cpu_int2_enable1;
    471  1.15  jmcneill 			mips3_sd(cpu1->cpu_int2_en1, cpu1->cpu_int2_enable1);
    472   1.3      matt #endif
    473  1.15  jmcneill 		}
    474   1.3      matt 		break;
    475   1.1    hikaru 	}
    476   1.1    hikaru 
    477   1.3      matt 	mutex_exit(&octeon_intr_lock);
    478   1.3      matt 
    479   1.3      matt 	return ih;
    480   1.1    hikaru }
    481   1.1    hikaru 
    482   1.1    hikaru void
    483   1.1    hikaru octeon_intr_disestablish(void *cookie)
    484   1.1    hikaru {
    485   1.3      matt 	struct octeon_intrhand * const ih = cookie;
    486   1.3      matt 	const int irq = ih->ih_irq & (NIRQS-1);
    487   1.3      matt 	const int ipl = ih->ih_ipl;
    488   1.1    hikaru 
    489   1.3      matt 	mutex_enter(&octeon_intr_lock);
    490   1.1    hikaru 
    491   1.1    hikaru 	/*
    492   1.3      matt 	 * First disable it.
    493   1.1    hikaru 	 */
    494  1.15  jmcneill 	const int bank = irq / 64;
    495  1.15  jmcneill 	const uint64_t irq_mask = ~__BIT(irq % 64);
    496   1.3      matt 	struct cpu_softc * const cpu0 = &octeon_cpu0_softc;
    497   1.3      matt #if MULTIPROCESSOR
    498   1.3      matt 	struct cpu_softc * const cpu1 = &octeon_cpu1_softc;
    499   1.3      matt #endif
    500   1.3      matt 
    501   1.3      matt 	switch (ipl) {
    502   1.3      matt 	case IPL_VM:
    503  1.15  jmcneill 		if (bank == 0) {
    504  1.15  jmcneill 			cpu0->cpu_int0_enable0 &= ~irq_mask;
    505  1.15  jmcneill 			mips3_sd(cpu0->cpu_int0_en0, cpu0->cpu_int0_enable0);
    506  1.15  jmcneill 		} else {
    507  1.15  jmcneill 			cpu0->cpu_int0_enable1 &= ~irq_mask;
    508  1.15  jmcneill 			mips3_sd(cpu0->cpu_int0_en1, cpu0->cpu_int0_enable1);
    509  1.15  jmcneill 		}
    510   1.3      matt 		break;
    511   1.3      matt 
    512   1.3      matt 	case IPL_SCHED:
    513  1.15  jmcneill 		if (bank == 0) {
    514  1.15  jmcneill 			cpu0->cpu_int1_enable0 &= ~irq_mask;
    515  1.15  jmcneill 			mips3_sd(cpu0->cpu_int1_en0, cpu0->cpu_int1_enable0);
    516  1.15  jmcneill #ifdef MULTIPROCESSOR
    517  1.15  jmcneill 			cpu1->cpu_int1_enable0 = cpu0->cpu_int1_enable0;
    518  1.15  jmcneill 			mips3_sd(cpu1->cpu_int1_en0, cpu1->cpu_int1_enable0);
    519  1.15  jmcneill #endif
    520  1.15  jmcneill 		} else {
    521  1.15  jmcneill 			cpu0->cpu_int1_enable1 &= ~irq_mask;
    522  1.15  jmcneill 			mips3_sd(cpu0->cpu_int1_en1, cpu0->cpu_int1_enable1);
    523   1.3      matt #ifdef MULTIPROCESSOR
    524  1.15  jmcneill 			cpu1->cpu_int1_enable1 = cpu0->cpu_int1_enable1;
    525  1.15  jmcneill 			mips3_sd(cpu1->cpu_int1_en1, cpu1->cpu_int1_enable1);
    526   1.3      matt #endif
    527  1.15  jmcneill 		}
    528   1.3      matt 		break;
    529   1.3      matt 
    530   1.3      matt 	case IPL_DDB:
    531   1.3      matt 	case IPL_HIGH:
    532  1.15  jmcneill 		if (bank == 0) {
    533  1.15  jmcneill 			cpu0->cpu_int2_enable0 &= ~irq_mask;
    534  1.15  jmcneill 			mips3_sd(cpu0->cpu_int2_en0, cpu0->cpu_int2_enable0);
    535  1.15  jmcneill #ifdef MULTIPROCESSOR
    536  1.15  jmcneill 			cpu1->cpu_int2_enable0 = cpu0->cpu_int2_enable0;
    537  1.15  jmcneill 			mips3_sd(cpu1->cpu_int2_en0, cpu1->cpu_int2_enable0);
    538  1.15  jmcneill #endif
    539  1.15  jmcneill 		} else {
    540  1.15  jmcneill 			cpu0->cpu_int2_enable1 &= ~irq_mask;
    541  1.15  jmcneill 			mips3_sd(cpu0->cpu_int2_en1, cpu0->cpu_int2_enable1);
    542   1.3      matt #ifdef MULTIPROCESSOR
    543  1.15  jmcneill 			cpu1->cpu_int2_enable1 = cpu0->cpu_int2_enable1;
    544  1.15  jmcneill 			mips3_sd(cpu1->cpu_int2_en1, cpu1->cpu_int2_enable1);
    545   1.3      matt #endif
    546  1.15  jmcneill 		}
    547   1.3      matt 		break;
    548   1.3      matt 	}
    549   1.1    hikaru 
    550   1.1    hikaru 	/*
    551   1.3      matt 	 * Now remove it since we shouldn't get interrupts for it.
    552   1.1    hikaru 	 */
    553  1.11    simonb 	octciu_intrs[irq] = NULL;
    554   1.3      matt 
    555   1.3      matt 	mutex_exit(&octeon_intr_lock);
    556   1.1    hikaru 
    557   1.3      matt 	kmem_free(ih, sizeof(*ih));
    558   1.1    hikaru }
    559   1.1    hikaru 
    560   1.1    hikaru void
    561   1.1    hikaru octeon_iointr(int ipl, vaddr_t pc, uint32_t ipending)
    562   1.1    hikaru {
    563   1.3      matt 	struct cpu_info * const ci = curcpu();
    564   1.3      matt 	struct cpu_softc * const cpu = ci->ci_softc;
    565  1.15  jmcneill 	int bank;
    566   1.3      matt 
    567   1.4      matt 	KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
    568   1.3      matt 	KASSERT((ipending & ~MIPS_INT_MASK) == 0);
    569   1.3      matt 	KASSERT(ipending & MIPS_HARD_INT_MASK);
    570  1.15  jmcneill 	uint64_t hwpend[2] = { 0, 0 };
    571  1.15  jmcneill 
    572  1.15  jmcneill 	const uint64_t sum1 = mips3_ld(cpu->cpu_int_sum1);
    573   1.1    hikaru 
    574   1.3      matt 	if (ipending & MIPS_INT_MASK_2) {
    575  1.15  jmcneill 		hwpend[0] = mips3_ld(cpu->cpu_int2_sum0)
    576   1.3      matt 		    & cpu->cpu_int2_enable0;
    577  1.15  jmcneill 		hwpend[1] = sum1 & cpu->cpu_int2_enable1;
    578   1.3      matt 	} else if (ipending & MIPS_INT_MASK_1) {
    579  1.15  jmcneill 		hwpend[0] = mips3_ld(cpu->cpu_int1_sum0)
    580   1.3      matt 		    & cpu->cpu_int1_enable0;
    581  1.15  jmcneill 		hwpend[1] = sum1 & cpu->cpu_int1_enable1;
    582   1.3      matt 	} else if (ipending & MIPS_INT_MASK_0) {
    583  1.15  jmcneill 		hwpend[0] = mips3_ld(cpu->cpu_int0_sum0)
    584   1.3      matt 		    & cpu->cpu_int0_enable0;
    585  1.15  jmcneill 		hwpend[1] = sum1 & cpu->cpu_int0_enable1;
    586   1.3      matt 	} else {
    587   1.3      matt 		panic("octeon_iointr: unexpected ipending %#x", ipending);
    588   1.3      matt 	}
    589  1.15  jmcneill 	for (bank = 0; bank <= 1; bank++) {
    590  1.15  jmcneill 		while (hwpend[bank] != 0) {
    591  1.15  jmcneill 			const int bit = ffs64(hwpend[bank]) - 1;
    592  1.15  jmcneill 			const int irq = (bank * 64) + bit;
    593  1.15  jmcneill 			hwpend[bank] &= ~__BIT(bit);
    594  1.15  jmcneill 
    595  1.15  jmcneill 			struct octeon_intrhand * const ih = octciu_intrs[irq];
    596  1.15  jmcneill 			cpu->cpu_intr_evs[irq].ev_count++;
    597  1.15  jmcneill 			if (__predict_true(ih != NULL)) {
    598  1.15  jmcneill #ifdef MULTIPROCESSOR
    599  1.15  jmcneill 				if (ipl == IPL_VM) {
    600  1.15  jmcneill 					KERNEL_LOCK(1, NULL);
    601  1.15  jmcneill #endif
    602  1.15  jmcneill 					(*ih->ih_func)(ih->ih_arg);
    603  1.15  jmcneill #ifdef MULTIPROCESSOR
    604  1.15  jmcneill 					KERNEL_UNLOCK_ONE(NULL);
    605  1.15  jmcneill 				} else {
    606  1.15  jmcneill 					(*ih->ih_func)(ih->ih_arg);
    607  1.15  jmcneill 				}
    608  1.15  jmcneill #endif
    609  1.15  jmcneill 				KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
    610   1.3      matt 			}
    611   1.3      matt 		}
    612   1.3      matt 	}
    613   1.4      matt 	KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
    614   1.3      matt }
    615   1.3      matt 
    616   1.3      matt #ifdef MULTIPROCESSOR
    617   1.3      matt __CTASSERT(NIPIS < 16);
    618   1.3      matt 
    619   1.3      matt int
    620   1.3      matt octeon_ipi_intr(void *arg)
    621   1.3      matt {
    622   1.3      matt 	struct cpu_info * const ci = curcpu();
    623   1.3      matt 	struct cpu_softc * const cpu = ci->ci_softc;
    624   1.4      matt 	uint32_t ipi_mask = (uintptr_t) arg;
    625   1.4      matt 
    626   1.4      matt 	KASSERTMSG((ipi_mask & __BITS(31,16)) == 0 || ci->ci_cpl >= IPL_SCHED,
    627   1.4      matt 	    "ipi_mask %#"PRIx32" cpl %d", ipi_mask, ci->ci_cpl);
    628   1.3      matt 
    629   1.5      matt 	ipi_mask &= mips3_ld(cpu->cpu_mbox_set);
    630   1.4      matt 	if (ipi_mask == 0)
    631   1.4      matt 		return 0;
    632   1.4      matt 
    633   1.5      matt 	mips3_sd(cpu->cpu_mbox_clr, ipi_mask);
    634   1.3      matt 
    635   1.3      matt 	ipi_mask |= (ipi_mask >> 16);
    636   1.3      matt 	ipi_mask &= __BITS(15,0);
    637   1.3      matt 
    638   1.3      matt 	KASSERT(ipi_mask < __BIT(NIPIS));
    639   1.3      matt 
    640   1.4      matt #if NWDOG > 0
    641   1.4      matt 	// Handle WDOG requests ourselves.
    642   1.4      matt 	if (ipi_mask & __BIT(IPI_WDOG)) {
    643   1.4      matt 		softint_schedule(cpu->cpu_wdog_sih);
    644   1.4      matt 		atomic_and_64(&ci->ci_request_ipis, ~__BIT(IPI_WDOG));
    645   1.4      matt 		ipi_mask &= ~__BIT(IPI_WDOG);
    646   1.4      matt 		ci->ci_evcnt_per_ipi[IPI_WDOG].ev_count++;
    647   1.4      matt 		if (__predict_true(ipi_mask == 0))
    648   1.4      matt 			return 1;
    649   1.4      matt 	}
    650   1.4      matt #endif
    651   1.4      matt 
    652   1.3      matt 	/* if the request is clear, it was previously processed */
    653   1.3      matt 	if ((ci->ci_request_ipis & ipi_mask) == 0)
    654   1.3      matt 		return 0;
    655   1.3      matt 
    656   1.3      matt 	atomic_or_64(&ci->ci_active_ipis, ipi_mask);
    657   1.3      matt 	atomic_and_64(&ci->ci_request_ipis, ~ipi_mask);
    658   1.3      matt 
    659   1.3      matt 	ipi_process(ci, ipi_mask);
    660   1.3      matt 
    661   1.3      matt 	atomic_and_64(&ci->ci_active_ipis, ~ipi_mask);
    662   1.3      matt 
    663   1.3      matt 	return 1;
    664   1.3      matt }
    665   1.1    hikaru 
    666   1.3      matt int
    667   1.3      matt octeon_send_ipi(struct cpu_info *ci, int req)
    668   1.3      matt {
    669   1.3      matt 	KASSERT(req < NIPIS);
    670   1.3      matt 	if (ci == NULL) {
    671   1.4      matt 		CPU_INFO_ITERATOR cii;
    672   1.4      matt 		for (CPU_INFO_FOREACH(cii, ci)) {
    673   1.4      matt 			if (ci != curcpu()) {
    674   1.4      matt 				octeon_send_ipi(ci, req);
    675   1.4      matt 			}
    676   1.4      matt 		}
    677   1.4      matt 		return 0;
    678   1.1    hikaru 	}
    679   1.4      matt 	KASSERT(cold || ci->ci_softc != NULL);
    680   1.4      matt 	if (ci->ci_softc == NULL)
    681   1.4      matt 		return -1;
    682   1.3      matt 
    683   1.3      matt 	struct cpu_softc * const cpu = ci->ci_softc;
    684   1.3      matt 	uint64_t ipi_mask = __BIT(req);
    685   1.3      matt 
    686   1.7     skrll 	atomic_or_64(&ci->ci_request_ipis, ipi_mask);
    687   1.7     skrll 	if (req == IPI_SUSPEND || req == IPI_WDOG) {
    688   1.3      matt 		ipi_mask <<= 16;
    689   1.1    hikaru 	}
    690   1.3      matt 
    691   1.5      matt 	mips3_sd(cpu->cpu_mbox_set, ipi_mask);
    692   1.3      matt 	return 0;
    693   1.1    hikaru }
    694   1.3      matt #endif	/* MULTIPROCESSOR */
    695