Home | History | Annotate | Line # | Download | only in rmi
rmixl_intr.c revision 1.15
      1  1.15     skrll /*	$NetBSD: rmixl_intr.c,v 1.15 2022/09/29 07:00:47 skrll Exp $	*/
      2   1.2      matt 
      3   1.2      matt /*-
      4   1.2      matt  * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko.
      5   1.2      matt  * All rights reserved.
      6   1.2      matt  *
      7   1.2      matt  * Redistribution and use in source and binary forms, with or
      8   1.2      matt  * without modification, are permitted provided that the following
      9   1.2      matt  * conditions are met:
     10   1.2      matt  * 1. Redistributions of source code must retain the above copyright
     11   1.2      matt  *    notice, this list of conditions and the following disclaimer.
     12   1.2      matt  * 2. Redistributions in binary form must reproduce the above
     13   1.2      matt  *    copyright notice, this list of conditions and the following
     14   1.2      matt  *    disclaimer in the documentation and/or other materials provided
     15   1.2      matt  *    with the distribution.
     16   1.2      matt  * 3. The names of the authors may not be used to endorse or promote
     17   1.2      matt  *    products derived from this software without specific prior
     18   1.2      matt  *    written permission.
     19   1.2      matt  *
     20   1.2      matt  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY
     21   1.2      matt  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
     22   1.2      matt  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
     23   1.2      matt  * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS
     24   1.2      matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
     25   1.2      matt  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     26   1.2      matt  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
     27   1.2      matt  * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     28   1.2      matt  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
     29   1.2      matt  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
     30   1.2      matt  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
     31   1.2      matt  * OF SUCH DAMAGE.
     32   1.2      matt  */
     33   1.2      matt /*-
     34   1.2      matt  * Copyright (c) 2001 The NetBSD Foundation, Inc.
     35   1.2      matt  * All rights reserved.
     36   1.2      matt  *
     37   1.2      matt  * This code is derived from software contributed to The NetBSD Foundation
     38   1.2      matt  * by Jason R. Thorpe.
     39   1.2      matt  *
     40   1.2      matt  * Redistribution and use in source and binary forms, with or without
     41   1.2      matt  * modification, are permitted provided that the following conditions
     42   1.2      matt  * are met:
     43   1.2      matt  * 1. Redistributions of source code must retain the above copyright
     44   1.2      matt  *    notice, this list of conditions and the following disclaimer.
     45   1.2      matt  * 2. Redistributions in binary form must reproduce the above copyright
     46   1.2      matt  *    notice, this list of conditions and the following disclaimer in the
     47   1.2      matt  *    documentation and/or other materials provided with the distribution.
     48   1.2      matt  *
     49   1.2      matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     50   1.2      matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     51   1.2      matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     52   1.2      matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     53   1.2      matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     54   1.2      matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     55   1.2      matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     56   1.2      matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     57   1.2      matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     58   1.2      matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     59   1.2      matt  * POSSIBILITY OF SUCH DAMAGE.
     60   1.2      matt  */
     61   1.2      matt 
     62   1.2      matt /*
     63   1.2      matt  * Platform-specific interrupt support for the RMI XLP, XLR, XLS
     64   1.2      matt  */
     65   1.2      matt 
     66   1.2      matt #include <sys/cdefs.h>
     67  1.15     skrll __KERNEL_RCSID(0, "$NetBSD: rmixl_intr.c,v 1.15 2022/09/29 07:00:47 skrll Exp $");
     68   1.2      matt 
     69   1.2      matt #include "opt_ddb.h"
     70   1.3      matt #include "opt_multiprocessor.h"
     71   1.3      matt #define	__INTR_PRIVATE
     72   1.2      matt 
     73   1.2      matt #include <sys/param.h>
     74   1.7      matt #include <sys/atomic.h>
     75   1.7      matt #include <sys/bus.h>
     76   1.7      matt #include <sys/cpu.h>
     77   1.2      matt #include <sys/device.h>
     78   1.7      matt #include <sys/intr.h>
     79   1.2      matt #include <sys/kernel.h>
     80   1.3      matt #include <sys/mutex.h>
     81   1.7      matt #include <sys/systm.h>
     82   1.2      matt 
     83   1.2      matt #include <mips/locore.h>
     84   1.2      matt 
     85   1.2      matt #include <mips/rmi/rmixlreg.h>
     86   1.2      matt #include <mips/rmi/rmixlvar.h>
     87   1.2      matt 
     88   1.3      matt #include <mips/rmi/rmixl_cpuvar.h>
     89   1.3      matt #include <mips/rmi/rmixl_intr.h>
     90   1.3      matt 
     91   1.2      matt #include <dev/pci/pcireg.h>
     92   1.2      matt #include <dev/pci/pcivar.h>
     93   1.2      matt 
     94   1.3      matt //#define IOINTR_DEBUG	1
     95   1.2      matt #ifdef IOINTR_DEBUG
     96   1.2      matt int iointr_debug = IOINTR_DEBUG;
     97   1.2      matt # define DPRINTF(x)	do { if (iointr_debug) printf x ; } while(0)
     98   1.2      matt #else
     99   1.2      matt # define DPRINTF(x)
    100   1.2      matt #endif
    101   1.2      matt 
    102   1.2      matt #define RMIXL_PICREG_READ(off) \
    103   1.2      matt 	RMIXL_IOREG_READ(RMIXL_IO_DEV_PIC + (off))
    104   1.2      matt #define RMIXL_PICREG_WRITE(off, val) \
    105   1.2      matt 	RMIXL_IOREG_WRITE(RMIXL_IO_DEV_PIC + (off), (val))
    106   1.2      matt 
    107   1.2      matt /*
    108   1.3      matt  * do not clear these when acking EIRR
    109   1.3      matt  * (otherwise they get lost)
    110   1.2      matt  */
    111   1.3      matt #define RMIXL_EIRR_PRESERVE_MASK	\
    112   1.3      matt 		((MIPS_INT_MASK_5|MIPS_SOFT_INT_MASK) >> 8)
    113   1.2      matt 
    114   1.2      matt /*
    115   1.3      matt  * IRT assignments depends on the RMI chip family
    116   1.3      matt  * (XLS1xx vs. XLS2xx vs. XLS3xx vs. XLS6xx)
    117   1.3      matt  * use the right display string table for the CPU that's running.
    118   1.3      matt  */
    119   1.2      matt 
    120   1.2      matt /*
    121   1.3      matt  * rmixl_irtnames_xlrxxx
    122   1.3      matt  * - use for XLRxxx
    123   1.2      matt  */
    124   1.3      matt static const char * const rmixl_irtnames_xlrxxx[NIRTS] = {
    125   1.3      matt 	"pic int 0 (watchdog)",		/*  0 */
    126   1.3      matt 	"pic int 1 (timer0)",		/*  1 */
    127   1.3      matt 	"pic int 2 (timer1)",		/*  2 */
    128   1.3      matt 	"pic int 3 (timer2)",		/*  3 */
    129   1.3      matt 	"pic int 4 (timer3)",		/*  4 */
    130   1.3      matt 	"pic int 5 (timer4)",		/*  5 */
    131   1.3      matt 	"pic int 6 (timer5)",		/*  6 */
    132   1.3      matt 	"pic int 7 (timer6)",		/*  7 */
    133   1.3      matt 	"pic int 8 (timer7)",		/*  8 */
    134   1.3      matt 	"pic int 9 (uart0)",		/*  9 */
    135   1.3      matt 	"pic int 10 (uart1)",		/* 10 */
    136   1.3      matt 	"pic int 11 (i2c0)",		/* 11 */
    137   1.3      matt 	"pic int 12 (i2c1)",		/* 12 */
    138   1.3      matt 	"pic int 13 (pcmcia)",		/* 13 */
    139   1.3      matt 	"pic int 14 (gpio)",		/* 14 */
    140   1.3      matt 	"pic int 15 (hyper)",		/* 15 */
    141   1.3      matt 	"pic int 16 (pcix)",		/* 16 */
    142   1.3      matt 	"pic int 17 (gmac0)",		/* 17 */
    143   1.3      matt 	"pic int 18 (gmac1)",		/* 18 */
    144   1.3      matt 	"pic int 19 (gmac2)",		/* 19 */
    145   1.3      matt 	"pic int 20 (gmac3)",		/* 20 */
    146   1.3      matt 	"pic int 21 (xgs0)",		/* 21 */
    147   1.3      matt 	"pic int 22 (xgs1)",		/* 22 */
    148   1.3      matt 	"pic int 23 (irq23)",		/* 23 */
    149   1.3      matt 	"pic int 24 (hyper_fatal)",	/* 24 */
    150   1.3      matt 	"pic int 25 (bridge_aerr)",	/* 25 */
    151   1.3      matt 	"pic int 26 (bridge_berr)",	/* 26 */
    152   1.3      matt 	"pic int 27 (bridge_tb)",	/* 27 */
    153   1.3      matt 	"pic int 28 (bridge_nmi)",	/* 28 */
    154   1.3      matt 	"pic int 29 (bridge_sram_derr)",/* 29 */
    155   1.3      matt 	"pic int 30 (gpio_fatal)",	/* 30 */
    156   1.3      matt 	"pic int 31 (reserved)",	/* 31 */
    157   1.2      matt };
    158   1.2      matt 
    159   1.2      matt /*
    160   1.3      matt  * rmixl_irtnames_xls2xx
    161   1.3      matt  * - use for XLS2xx
    162   1.2      matt  */
    163   1.3      matt static const char * const rmixl_irtnames_xls2xx[NIRTS] = {
    164   1.3      matt 	"pic int 0 (watchdog)",		/*  0 */
    165   1.3      matt 	"pic int 1 (timer0)",		/*  1 */
    166   1.3      matt 	"pic int 2 (timer1)",		/*  2 */
    167   1.3      matt 	"pic int 3 (timer2)",		/*  3 */
    168   1.3      matt 	"pic int 4 (timer3)",		/*  4 */
    169   1.3      matt 	"pic int 5 (timer4)",		/*  5 */
    170   1.3      matt 	"pic int 6 (timer5)",		/*  6 */
    171   1.3      matt 	"pic int 7 (timer6)",		/*  7 */
    172   1.3      matt 	"pic int 8 (timer7)",		/*  8 */
    173   1.3      matt 	"pic int 9 (uart0)",		/*  9 */
    174   1.3      matt 	"pic int 10 (uart1)",		/* 10 */
    175   1.3      matt 	"pic int 11 (i2c0)",		/* 11 */
    176   1.3      matt 	"pic int 12 (i2c1)",		/* 12 */
    177   1.3      matt 	"pic int 13 (pcmcia)",		/* 13 */
    178   1.3      matt 	"pic int 14 (gpio_a)",		/* 14 */
    179   1.3      matt 	"pic int 15 (irq15)",		/* 15 */
    180   1.3      matt 	"pic int 16 (bridge_tb)",	/* 16 */
    181   1.3      matt 	"pic int 17 (gmac0)",		/* 17 */
    182   1.3      matt 	"pic int 18 (gmac1)",		/* 18 */
    183   1.3      matt 	"pic int 19 (gmac2)",		/* 19 */
    184   1.3      matt 	"pic int 20 (gmac3)",		/* 20 */
    185   1.3      matt 	"pic int 21 (irq21)",		/* 21 */
    186   1.3      matt 	"pic int 22 (irq22)",		/* 22 */
    187   1.3      matt 	"pic int 23 (pcie_link2)",	/* 23 */
    188   1.3      matt 	"pic int 24 (pcie_link3)",	/* 24 */
    189   1.3      matt 	"pic int 25 (bridge_err)",	/* 25 */
    190   1.3      matt 	"pic int 26 (pcie_link0)",	/* 26 */
    191   1.3      matt 	"pic int 27 (pcie_link1)",	/* 27 */
    192   1.3      matt 	"pic int 28 (irq28)",		/* 28 */
    193   1.3      matt 	"pic int 29 (pcie_err)",	/* 29 */
    194   1.3      matt 	"pic int 30 (gpio_b)",		/* 30 */
    195   1.3      matt 	"pic int 31 (usb)",		/* 31 */
    196   1.2      matt };
    197   1.2      matt 
    198   1.2      matt /*
    199   1.3      matt  * rmixl_irtnames_xls1xx
    200   1.3      matt  * - use for XLS1xx, XLS4xx-Lite
    201   1.2      matt  */
    202   1.3      matt static const char * const rmixl_irtnames_xls1xx[NIRTS] = {
    203   1.3      matt 	"pic int 0 (watchdog)",		/*  0 */
    204   1.3      matt 	"pic int 1 (timer0)",		/*  1 */
    205   1.3      matt 	"pic int 2 (timer1)",		/*  2 */
    206   1.3      matt 	"pic int 3 (timer2)",		/*  3 */
    207   1.3      matt 	"pic int 4 (timer3)",		/*  4 */
    208   1.3      matt 	"pic int 5 (timer4)",		/*  5 */
    209   1.3      matt 	"pic int 6 (timer5)",		/*  6 */
    210   1.3      matt 	"pic int 7 (timer6)",		/*  7 */
    211   1.3      matt 	"pic int 8 (timer7)",		/*  8 */
    212   1.3      matt 	"pic int 9 (uart0)",		/*  9 */
    213   1.3      matt 	"pic int 10 (uart1)",		/* 10 */
    214   1.3      matt 	"pic int 11 (i2c0)",		/* 11 */
    215   1.3      matt 	"pic int 12 (i2c1)",		/* 12 */
    216   1.3      matt 	"pic int 13 (pcmcia)",		/* 13 */
    217   1.3      matt 	"pic int 14 (gpio_a)",		/* 14 */
    218   1.3      matt 	"pic int 15 (irq15)",		/* 15 */
    219   1.3      matt 	"pic int 16 (bridge_tb)",	/* 16 */
    220   1.3      matt 	"pic int 17 (gmac0)",		/* 17 */
    221   1.3      matt 	"pic int 18 (gmac1)",		/* 18 */
    222   1.3      matt 	"pic int 19 (gmac2)",		/* 19 */
    223   1.3      matt 	"pic int 20 (gmac3)",		/* 20 */
    224   1.3      matt 	"pic int 21 (irq21)",		/* 21 */
    225   1.3      matt 	"pic int 22 (irq22)",		/* 22 */
    226   1.3      matt 	"pic int 23 (irq23)",		/* 23 */
    227   1.3      matt 	"pic int 24 (irq24)",		/* 24 */
    228   1.3      matt 	"pic int 25 (bridge_err)",	/* 25 */
    229   1.3      matt 	"pic int 26 (pcie_link0)",	/* 26 */
    230   1.3      matt 	"pic int 27 (pcie_link1)",	/* 27 */
    231   1.3      matt 	"pic int 28 (irq28)",		/* 28 */
    232   1.3      matt 	"pic int 29 (pcie_err)",	/* 29 */
    233   1.3      matt 	"pic int 30 (gpio_b)",		/* 30 */
    234   1.3      matt 	"pic int 31 (usb)",		/* 31 */
    235   1.2      matt };
    236   1.2      matt 
    237   1.2      matt /*
    238   1.3      matt  * rmixl_irtnames_xls4xx:
    239   1.3      matt  * - use for XLS4xx, XLS6xx
    240   1.2      matt  */
    241   1.3      matt static const char * const rmixl_irtnames_xls4xx[NIRTS] = {
    242   1.3      matt 	"pic int 0 (watchdog)",		/*  0 */
    243   1.3      matt 	"pic int 1 (timer0)",		/*  1 */
    244   1.3      matt 	"pic int 2 (timer1)",		/*  2 */
    245   1.3      matt 	"pic int 3 (timer2)",		/*  3 */
    246   1.3      matt 	"pic int 4 (timer3)",		/*  4 */
    247   1.3      matt 	"pic int 5 (timer4)",		/*  5 */
    248   1.3      matt 	"pic int 6 (timer5)",		/*  6 */
    249   1.3      matt 	"pic int 7 (timer6)",		/*  7 */
    250   1.3      matt 	"pic int 8 (timer7)",		/*  8 */
    251   1.3      matt 	"pic int 9 (uart0)",		/*  9 */
    252   1.3      matt 	"pic int 10 (uart1)",		/* 10 */
    253   1.3      matt 	"pic int 11 (i2c0)",		/* 11 */
    254   1.3      matt 	"pic int 12 (i2c1)",		/* 12 */
    255   1.3      matt 	"pic int 13 (pcmcia)",		/* 13 */
    256   1.3      matt 	"pic int 14 (gpio_a)",		/* 14 */
    257   1.3      matt 	"pic int 15 (irq15)",		/* 15 */
    258   1.3      matt 	"pic int 16 (bridge_tb)",	/* 16 */
    259   1.3      matt 	"pic int 17 (gmac0)",		/* 17 */
    260   1.3      matt 	"pic int 18 (gmac1)",		/* 18 */
    261   1.3      matt 	"pic int 19 (gmac2)",		/* 19 */
    262   1.3      matt 	"pic int 20 (gmac3)",		/* 20 */
    263   1.3      matt 	"pic int 21 (irq21)",		/* 21 */
    264   1.3      matt 	"pic int 22 (irq22)",		/* 22 */
    265   1.3      matt 	"pic int 23 (irq23)",		/* 23 */
    266   1.3      matt 	"pic int 24 (irq24)",		/* 24 */
    267   1.3      matt 	"pic int 25 (bridge_err)",	/* 25 */
    268   1.3      matt 	"pic int 26 (pcie_link0)",	/* 26 */
    269   1.3      matt 	"pic int 27 (pcie_link1)",	/* 27 */
    270   1.3      matt 	"pic int 28 (pcie_link2)",	/* 28 */
    271   1.3      matt 	"pic int 29 (pcie_link3)",	/* 29 */
    272   1.3      matt 	"pic int 30 (gpio_b)",		/* 30 */
    273   1.3      matt 	"pic int 31 (usb)",		/* 31 */
    274   1.3      matt };
    275   1.2      matt 
    276   1.2      matt /*
    277   1.3      matt  * rmixl_vecnames_common:
    278   1.3      matt  * - use for unknown cpu implementation
    279   1.3      matt  * - covers all vectors, not just IRT intrs
    280   1.2      matt  */
    281   1.3      matt static const char * const rmixl_vecnames_common[NINTRVECS] = {
    282   1.3      matt 	"vec 0",		/*  0 */
    283   1.3      matt 	"vec 1",		/*  1 */
    284   1.3      matt 	"vec 2",		/*  2 */
    285   1.3      matt 	"vec 3",		/*  3 */
    286   1.3      matt 	"vec 4",		/*  4 */
    287   1.3      matt 	"vec 5",		/*  5 */
    288   1.3      matt 	"vec 6",		/*  6 */
    289   1.3      matt 	"vec 7",		/*  7 */
    290   1.4     cliff 	"vec 8 (ipi 0)",	/*  8 */
    291   1.4     cliff 	"vec 9 (ipi 1)",	/*  9 */
    292   1.4     cliff 	"vec 10 (ipi 2)",	/* 10 */
    293   1.4     cliff 	"vec 11 (ipi 3)",	/* 11 */
    294   1.4     cliff 	"vec 12 (ipi 4)",	/* 12 */
    295   1.4     cliff 	"vec 13 (ipi 5)",	/* 13 */
    296   1.4     cliff 	"vec 14 (ipi 6)",	/* 14 */
    297   1.4     cliff 	"vec 15 (fmn)",		/* 15 */
    298   1.4     cliff 	"vec 16",		/* 16 */
    299   1.3      matt 	"vec 17",		/* 17 */
    300   1.3      matt 	"vec 18",		/* 18 */
    301   1.3      matt 	"vec 19",		/* 19 */
    302   1.3      matt 	"vec 20",		/* 20 */
    303   1.3      matt 	"vec 21",		/* 21 */
    304   1.3      matt 	"vec 22",		/* 22 */
    305   1.3      matt 	"vec 23",		/* 23 */
    306   1.3      matt 	"vec 24",		/* 24 */
    307   1.3      matt 	"vec 25",		/* 25 */
    308   1.3      matt 	"vec 26",		/* 26 */
    309   1.3      matt 	"vec 27",		/* 27 */
    310   1.3      matt 	"vec 28",		/* 28 */
    311   1.3      matt 	"vec 29",		/* 29 */
    312   1.3      matt 	"vec 30",		/* 30 */
    313   1.3      matt 	"vec 31",		/* 31 */
    314   1.3      matt 	"vec 32",		/* 32 */
    315   1.3      matt 	"vec 33",		/* 33 */
    316   1.3      matt 	"vec 34",		/* 34 */
    317   1.3      matt 	"vec 35",		/* 35 */
    318   1.3      matt 	"vec 36",		/* 36 */
    319   1.3      matt 	"vec 37",		/* 37 */
    320   1.3      matt 	"vec 38",		/* 38 */
    321   1.3      matt 	"vec 39",		/* 39 */
    322   1.3      matt 	"vec 40",		/* 40 */
    323   1.3      matt 	"vec 41",		/* 41 */
    324   1.3      matt 	"vec 42",		/* 42 */
    325   1.3      matt 	"vec 43",		/* 43 */
    326   1.3      matt 	"vec 44",		/* 44 */
    327   1.3      matt 	"vec 45",		/* 45 */
    328   1.3      matt 	"vec 46",		/* 46 */
    329   1.3      matt 	"vec 47",		/* 47 */
    330   1.3      matt 	"vec 48",		/* 48 */
    331   1.3      matt 	"vec 49",		/* 49 */
    332   1.3      matt 	"vec 50",		/* 50 */
    333   1.3      matt 	"vec 51",		/* 51 */
    334   1.3      matt 	"vec 52",		/* 52 */
    335   1.3      matt 	"vec 53",		/* 53 */
    336   1.3      matt 	"vec 54",		/* 54 */
    337   1.3      matt 	"vec 55",		/* 55 */
    338   1.3      matt 	"vec 56",		/* 56 */
    339   1.3      matt 	"vec 57",		/* 57 */
    340   1.3      matt 	"vec 58",		/* 58 */
    341   1.3      matt 	"vec 59",		/* 59 */
    342   1.3      matt 	"vec 60",		/* 60 */
    343   1.3      matt 	"vec 61",		/* 61 */
    344   1.3      matt 	"vec 62",		/* 63 */
    345   1.3      matt 	"vec 63",		/* 63 */
    346   1.2      matt };
    347   1.2      matt 
    348   1.2      matt /*
    349   1.3      matt  * mask of CPUs attached
    350   1.3      matt  * once they are attached, this var is read-only so mp safe
    351   1.2      matt  */
    352   1.3      matt static uint32_t cpu_present_mask;
    353   1.3      matt 
    354   1.3      matt kmutex_t rmixl_ipi_lock __cacheline_aligned;
    355   1.3      matt 				/* covers RMIXL_PIC_IPIBASE */
    356   1.3      matt kmutex_t rmixl_intr_lock __cacheline_aligned;
    357   1.3      matt 				/* covers rest of PIC, and rmixl_intrhand[] */
    358   1.3      matt rmixl_intrhand_t rmixl_intrhand[NINTRVECS];
    359   1.2      matt 
    360   1.2      matt #ifdef DIAGNOSTIC
    361   1.3      matt static int rmixl_pic_init_done;
    362   1.2      matt #endif
    363   1.2      matt 
    364   1.2      matt 
    365   1.3      matt static const char *rmixl_intr_string_xlr(int);
    366   1.3      matt static const char *rmixl_intr_string_xls(int);
    367   1.3      matt static uint32_t rmixl_irt_thread_mask(int);
    368   1.3      matt static void rmixl_irt_init(int);
    369   1.3      matt static void rmixl_irt_disestablish(int);
    370   1.3      matt static void rmixl_irt_establish(int, int, int,
    371   1.3      matt 		rmixl_intr_trigger_t, rmixl_intr_polarity_t);
    372   1.3      matt 
    373   1.3      matt #ifdef MULTIPROCESSOR
    374   1.3      matt static int rmixl_send_ipi(struct cpu_info *, int);
    375   1.3      matt static int rmixl_ipi_intr(void *);
    376   1.3      matt #endif
    377   1.3      matt 
    378   1.3      matt #if defined(DIAGNOSTIC) || defined(IOINTR_DEBUG) || defined(DDB)
    379   1.3      matt int  rmixl_intrhand_print_subr(int);
    380   1.3      matt int  rmixl_intrhand_print(void);
    381   1.3      matt int  rmixl_irt_print(void);
    382   1.3      matt void rmixl_ipl_eimr_map_print(void);
    383   1.3      matt #endif
    384   1.2      matt 
    385   1.2      matt 
    386   1.3      matt static inline u_int
    387   1.3      matt dclz(uint64_t val)
    388   1.2      matt {
    389   1.3      matt 	int nlz;
    390   1.2      matt 
    391   1.3      matt 	asm volatile("dclz %0, %1;"
    392   1.3      matt 		: "=r"(nlz) : "r"(val));
    393  1.14     skrll 
    394   1.3      matt 	return nlz;
    395   1.2      matt }
    396   1.2      matt 
    397   1.2      matt void
    398   1.2      matt evbmips_intr_init(void)
    399   1.2      matt {
    400   1.2      matt 	uint32_t r;
    401   1.2      matt 
    402   1.3      matt 	KASSERT(cpu_rmixlr(mips_options.mips_cpu)
    403   1.3      matt 	     || cpu_rmixls(mips_options.mips_cpu));
    404   1.3      matt 
    405   1.2      matt 
    406   1.2      matt #ifdef DIAGNOSTIC
    407   1.3      matt 	if (rmixl_pic_init_done != 0)
    408   1.3      matt 		panic("%s: rmixl_pic_init_done %d",
    409   1.3      matt 			__func__, rmixl_pic_init_done);
    410   1.2      matt #endif
    411   1.2      matt 
    412   1.3      matt 	mutex_init(&rmixl_ipi_lock, MUTEX_DEFAULT, IPL_HIGH);
    413   1.3      matt 	mutex_init(&rmixl_intr_lock, MUTEX_DEFAULT, IPL_HIGH);
    414   1.3      matt 
    415   1.3      matt 	mutex_enter(&rmixl_intr_lock);
    416   1.2      matt 
    417   1.3      matt 	/*
    418   1.3      matt 	 * initialize (zero) all IRT Entries in the PIC
    419   1.3      matt 	 */
    420   1.3      matt 	for (u_int i = 0; i < NIRTS; i++) {
    421   1.3      matt 		rmixl_irt_init(i);
    422   1.2      matt 	}
    423   1.2      matt 
    424   1.2      matt 	/*
    425   1.2      matt 	 * disable watchdog NMI, timers
    426   1.2      matt 	 *
    427   1.2      matt 	 * XXX
    428   1.2      matt 	 *  WATCHDOG_ENB is preserved because clearing it causes
    429   1.2      matt 	 *  hang on the XLS616 (but not on the XLS408)
    430   1.2      matt 	 */
    431   1.2      matt 	r = RMIXL_PICREG_READ(RMIXL_PIC_CONTROL);
    432   1.2      matt 	r &= RMIXL_PIC_CONTROL_RESV|RMIXL_PIC_CONTROL_WATCHDOG_ENB;
    433   1.2      matt 	RMIXL_PICREG_WRITE(RMIXL_PIC_CONTROL, r);
    434   1.2      matt 
    435   1.3      matt #ifdef DIAGNOSTIC
    436   1.3      matt 	rmixl_pic_init_done = 1;
    437   1.3      matt #endif
    438   1.3      matt 	mutex_exit(&rmixl_intr_lock);
    439   1.3      matt 
    440   1.3      matt }
    441   1.3      matt 
    442   1.3      matt /*
    443   1.3      matt  * establish vector for mips3 count/compare clock interrupt
    444   1.3      matt  * this ensures we enable in EIRR,
    445   1.3      matt  * even though cpu_intr() handles the interrupt
    446   1.3      matt  * note the 'mpsafe' arg here is a placeholder only
    447   1.3      matt  */
    448   1.3      matt void
    449   1.3      matt rmixl_intr_init_clk(void)
    450   1.3      matt {
    451   1.3      matt 	const int vec = ffs(MIPS_INT_MASK_5 >> MIPS_INT_MASK_SHIFT) - 1;
    452   1.3      matt 
    453   1.3      matt 	mutex_enter(&rmixl_intr_lock);
    454   1.3      matt 
    455   1.3      matt 	void *ih = rmixl_vec_establish(vec, 0, IPL_SCHED, NULL, NULL, false);
    456   1.3      matt 	if (ih == NULL)
    457   1.3      matt 		panic("%s: establish vec %d failed", __func__, vec);
    458   1.3      matt 
    459   1.3      matt 	mutex_exit(&rmixl_intr_lock);
    460   1.3      matt }
    461   1.3      matt 
    462   1.3      matt #ifdef MULTIPROCESSOR
    463   1.3      matt /*
    464   1.3      matt  * establish IPI interrupt and send function
    465   1.3      matt  */
    466   1.3      matt void
    467   1.3      matt rmixl_intr_init_ipi(void)
    468   1.3      matt {
    469   1.3      matt 	mutex_enter(&rmixl_intr_lock);
    470   1.3      matt 
    471   1.3      matt 	for (u_int ipi = 0; ipi < NIPIS; ipi++) {
    472   1.3      matt 		const u_int vec = RMIXL_INTRVEC_IPI + ipi;
    473   1.3      matt 		void * const ih = rmixl_vec_establish(vec, -1, IPL_SCHED,
    474   1.3      matt 			rmixl_ipi_intr, (void *)(uintptr_t)ipi, true);
    475   1.3      matt 		if (ih == NULL)
    476   1.3      matt 			panic("%s: establish ipi %d at vec %d failed",
    477   1.3      matt 				__func__, ipi, vec);
    478   1.3      matt 	}
    479   1.5      matt 
    480   1.3      matt 	mips_locoresw.lsw_send_ipi = rmixl_send_ipi;
    481   1.5      matt 
    482   1.3      matt 	mutex_exit(&rmixl_intr_lock);
    483   1.3      matt }
    484   1.3      matt #endif 	/* MULTIPROCESSOR */
    485   1.3      matt 
    486   1.3      matt /*
    487   1.3      matt  * initialize per-cpu interrupt stuff in softc
    488   1.3      matt  * accumulate per-cpu bits in 'cpu_present_mask'
    489   1.3      matt  */
    490   1.3      matt void
    491   1.3      matt rmixl_intr_init_cpu(struct cpu_info *ci)
    492   1.3      matt {
    493   1.3      matt 	struct rmixl_cpu_softc *sc = (void *)ci->ci_softc;
    494   1.3      matt 
    495   1.3      matt 	KASSERT(sc != NULL);
    496   1.2      matt 
    497   1.3      matt 	for (int vec=0; vec < NINTRVECS; vec++)
    498   1.3      matt 		evcnt_attach_dynamic(&sc->sc_vec_evcnts[vec],
    499   1.3      matt 			EVCNT_TYPE_INTR, NULL,
    500   1.3      matt 			device_xname(sc->sc_dev),
    501   1.3      matt 			rmixl_intr_string(vec));
    502   1.2      matt 
    503   1.3      matt 	KASSERT(cpu_index(ci) < (sizeof(cpu_present_mask) * 8));
    504   1.3      matt 	atomic_or_32((volatile uint32_t *)&cpu_present_mask, 1 << cpu_index(ci));
    505   1.2      matt }
    506   1.2      matt 
    507   1.3      matt /*
    508   1.3      matt  * rmixl_intr_string - return pointer to display name of a PIC-based interrupt
    509   1.3      matt  */
    510   1.2      matt const char *
    511   1.3      matt rmixl_intr_string(int vec)
    512   1.3      matt {
    513   1.3      matt 	int irt;
    514   1.3      matt 
    515   1.3      matt 	if (vec < 0 || vec >= NINTRVECS)
    516   1.3      matt 		panic("%s: vec index %d out of range, max %d",
    517   1.3      matt 			__func__, vec, NINTRVECS - 1);
    518   1.3      matt 
    519   1.3      matt 	if (! RMIXL_VECTOR_IS_IRT(vec))
    520   1.3      matt 		return rmixl_vecnames_common[vec];
    521   1.3      matt 
    522   1.3      matt 	irt = RMIXL_VECTOR_IRT(vec);
    523   1.3      matt 	switch(cpu_rmixl_chip_type(mips_options.mips_cpu)) {
    524   1.3      matt 	case CIDFL_RMI_TYPE_XLR:
    525   1.3      matt 		return rmixl_intr_string_xlr(irt);
    526   1.3      matt 	case CIDFL_RMI_TYPE_XLS:
    527   1.3      matt 		return rmixl_intr_string_xls(irt);
    528   1.3      matt 	case CIDFL_RMI_TYPE_XLP:
    529   1.3      matt 		panic("%s: RMI XLP not yet supported", __func__);
    530   1.3      matt 	}
    531   1.3      matt 
    532   1.3      matt 	return "undefined";	/* appease gcc */
    533   1.3      matt }
    534   1.3      matt 
    535   1.3      matt static const char *
    536   1.3      matt rmixl_intr_string_xlr(int irt)
    537   1.3      matt {
    538   1.3      matt 	return rmixl_irtnames_xlrxxx[irt];
    539   1.3      matt }
    540   1.3      matt 
    541   1.3      matt static const char *
    542   1.3      matt rmixl_intr_string_xls(int irt)
    543   1.2      matt {
    544   1.2      matt 	const char *name;
    545   1.2      matt 
    546   1.3      matt 	switch (MIPS_PRID_IMPL(mips_options.mips_cpu_id)) {
    547   1.2      matt 	case MIPS_XLS104:
    548   1.2      matt 	case MIPS_XLS108:
    549   1.3      matt 	case MIPS_XLS404LITE:
    550   1.3      matt 	case MIPS_XLS408LITE:
    551   1.3      matt 		name = rmixl_irtnames_xls1xx[irt];
    552   1.3      matt 		break;
    553   1.2      matt 	case MIPS_XLS204:
    554   1.2      matt 	case MIPS_XLS208:
    555   1.3      matt 		name = rmixl_irtnames_xls2xx[irt];
    556   1.3      matt 		break;
    557   1.3      matt 	case MIPS_XLS404:
    558   1.3      matt 	case MIPS_XLS408:
    559   1.3      matt 	case MIPS_XLS416:
    560   1.3      matt 	case MIPS_XLS608:
    561   1.3      matt 	case MIPS_XLS616:
    562   1.3      matt 		name = rmixl_irtnames_xls4xx[irt];
    563   1.3      matt 		break;
    564   1.3      matt 	default:
    565   1.3      matt 		name = rmixl_vecnames_common[RMIXL_IRT_VECTOR(irt)];
    566   1.3      matt 		break;
    567   1.3      matt 	}
    568   1.3      matt 
    569   1.3      matt 	return name;
    570   1.3      matt }
    571   1.3      matt 
    572   1.3      matt /*
    573   1.3      matt  * rmixl_irt_thread_mask
    574   1.3      matt  *
    575   1.3      matt  *	given a bitmask of cpus, return a, IRT thread mask
    576   1.3      matt  */
    577   1.3      matt static uint32_t
    578   1.3      matt rmixl_irt_thread_mask(int cpumask)
    579   1.3      matt {
    580   1.3      matt 	uint32_t irtc0;
    581   1.3      matt 
    582   1.3      matt #if defined(MULTIPROCESSOR)
    583   1.3      matt #ifndef NOTYET
    584   1.3      matt 	if (cpumask == -1)
    585   1.3      matt 		return 1;	/* XXX TMP FIXME */
    586   1.3      matt #endif
    587   1.3      matt 
    588   1.3      matt 	/*
    589   1.3      matt 	 * discount cpus not present
    590   1.3      matt 	 */
    591   1.3      matt 	cpumask &= cpu_present_mask;
    592  1.14     skrll 
    593   1.3      matt 	switch (MIPS_PRID_IMPL(mips_options.mips_cpu_id)) {
    594   1.3      matt 	case MIPS_XLS104:
    595   1.3      matt 	case MIPS_XLS204:
    596   1.3      matt 	case MIPS_XLS404:
    597   1.2      matt 	case MIPS_XLS404LITE:
    598   1.3      matt 		irtc0 = ((cpumask >> 2) << 4) | (cpumask & __BITS(1,0));
    599   1.3      matt 		irtc0 &= (__BITS(5,4) | __BITS(1,0));
    600   1.3      matt 		break;
    601   1.3      matt 	case MIPS_XLS108:
    602   1.3      matt 	case MIPS_XLS208:
    603   1.3      matt 	case MIPS_XLS408:
    604   1.2      matt 	case MIPS_XLS408LITE:
    605   1.3      matt 	case MIPS_XLS608:
    606   1.3      matt 		irtc0 = cpumask & __BITS(7,0);
    607   1.2      matt 		break;
    608   1.3      matt 	case MIPS_XLS416:
    609   1.3      matt 	case MIPS_XLS616:
    610   1.3      matt 		irtc0 = cpumask & __BITS(15,0);
    611   1.2      matt 		break;
    612   1.2      matt 	default:
    613   1.3      matt 		panic("%s: unknown cpu ID %#x\n", __func__,
    614   1.3      matt 			mips_options.mips_cpu_id);
    615   1.2      matt 	}
    616   1.3      matt #else
    617   1.3      matt 	irtc0 = 1;
    618   1.3      matt #endif	/* MULTIPROCESSOR */
    619   1.2      matt 
    620   1.3      matt 	return irtc0;
    621   1.2      matt }
    622   1.2      matt 
    623   1.2      matt /*
    624   1.3      matt  * rmixl_irt_init
    625   1.3      matt  * - initialize IRT Entry for given index
    626   1.2      matt  * - unmask Thread#0 in low word (assume we only have 1 thread)
    627   1.2      matt  */
    628   1.2      matt static void
    629   1.3      matt rmixl_irt_init(int irt)
    630   1.2      matt {
    631   1.3      matt 	KASSERT(irt < NIRTS);
    632   1.3      matt 	RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC1(irt), 0);	/* high word */
    633   1.3      matt 	RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC0(irt), 0);	/* low  word */
    634   1.2      matt }
    635   1.2      matt 
    636   1.2      matt /*
    637   1.3      matt  * rmixl_irt_disestablish
    638   1.3      matt  * - invalidate IRT Entry for given index
    639   1.2      matt  */
    640   1.2      matt static void
    641   1.3      matt rmixl_irt_disestablish(int irt)
    642   1.2      matt {
    643   1.3      matt 	KASSERT(mutex_owned(&rmixl_intr_lock));
    644   1.3      matt 	DPRINTF(("%s: irt %d, irtc1 %#x\n", __func__, irt, 0));
    645   1.3      matt 	rmixl_irt_init(irt);
    646   1.2      matt }
    647   1.2      matt 
    648   1.2      matt /*
    649   1.3      matt  * rmixl_irt_establish
    650   1.3      matt  * - construct an IRT Entry for irt and write to PIC
    651   1.2      matt  */
    652   1.2      matt static void
    653   1.3      matt rmixl_irt_establish(int irt, int vec, int cpumask, rmixl_intr_trigger_t trigger,
    654   1.3      matt 	rmixl_intr_polarity_t polarity)
    655   1.2      matt {
    656   1.2      matt 	uint32_t irtc1;
    657   1.3      matt 	uint32_t irtc0;
    658   1.3      matt 
    659   1.3      matt 	KASSERT(mutex_owned(&rmixl_intr_lock));
    660   1.3      matt 
    661   1.3      matt 	if (irt >= NIRTS)
    662   1.3      matt 		panic("%s: bad irt %d\n", __func__, irt);
    663   1.3      matt 
    664   1.3      matt 	if (! RMIXL_VECTOR_IS_IRT(vec))
    665   1.3      matt 		panic("%s: bad vec %d\n", __func__, vec);
    666   1.3      matt 
    667   1.3      matt 	switch (trigger) {
    668   1.3      matt 	case RMIXL_TRIG_EDGE:
    669   1.3      matt 	case RMIXL_TRIG_LEVEL:
    670   1.3      matt 		break;
    671   1.3      matt 	default:
    672   1.3      matt 		panic("%s: bad trigger %d\n", __func__, trigger);
    673   1.3      matt 	}
    674   1.3      matt 
    675   1.3      matt 	switch (polarity) {
    676   1.3      matt 	case RMIXL_POLR_RISING:
    677   1.3      matt 	case RMIXL_POLR_HIGH:
    678   1.3      matt 	case RMIXL_POLR_FALLING:
    679   1.3      matt 	case RMIXL_POLR_LOW:
    680   1.3      matt 		break;
    681   1.3      matt 	default:
    682   1.3      matt 		panic("%s: bad polarity %d\n", __func__, polarity);
    683   1.3      matt 	}
    684   1.3      matt 
    685   1.3      matt 	/*
    686   1.3      matt 	 * XXX IRT entries are not shared
    687   1.3      matt 	 */
    688   1.3      matt 	KASSERT(RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC0(irt)) == 0);
    689   1.3      matt 	KASSERT(RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC1(irt)) == 0);
    690   1.3      matt 
    691   1.3      matt 	irtc0 = rmixl_irt_thread_mask(cpumask);
    692   1.2      matt 
    693   1.2      matt 	irtc1  = RMIXL_PIC_IRTENTRYC1_VALID;
    694   1.2      matt 	irtc1 |= RMIXL_PIC_IRTENTRYC1_GL;	/* local */
    695   1.3      matt 	KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
    696   1.2      matt 
    697   1.3      matt 	if (trigger == RMIXL_TRIG_LEVEL)
    698   1.2      matt 		irtc1 |= RMIXL_PIC_IRTENTRYC1_TRG;
    699   1.3      matt 	KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
    700   1.2      matt 
    701   1.3      matt 	if ((polarity == RMIXL_POLR_FALLING) || (polarity == RMIXL_POLR_LOW))
    702   1.2      matt 		irtc1 |= RMIXL_PIC_IRTENTRYC1_P;
    703   1.3      matt 	KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
    704   1.2      matt 
    705   1.3      matt 	irtc1 |= vec;			/* vector in EIRR */
    706   1.3      matt 	KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
    707   1.2      matt 
    708   1.2      matt 	/*
    709   1.3      matt 	 * write IRT Entry to PIC
    710   1.2      matt 	 */
    711   1.3      matt 	DPRINTF(("%s: vec %d (%#x), irt %d, irtc0 %#x, irtc1 %#x\n",
    712   1.3      matt 		__func__, vec, vec, irt, irtc0, irtc1));
    713   1.3      matt 	RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC0(irt), irtc0);	/* low  word */
    714   1.3      matt 	RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC1(irt), irtc1);	/* high word */
    715   1.2      matt }
    716   1.2      matt 
    717   1.2      matt void *
    718   1.3      matt rmixl_vec_establish(int vec, int cpumask, int ipl,
    719   1.3      matt 	int (*func)(void *), void *arg, bool mpsafe)
    720   1.2      matt {
    721   1.3      matt 	rmixl_intrhand_t *ih;
    722   1.3      matt 	uint64_t eimr_bit;
    723   1.2      matt 	int s;
    724   1.2      matt 
    725   1.3      matt 	KASSERT(mutex_owned(&rmixl_intr_lock));
    726   1.3      matt 
    727   1.5      matt 	DPRINTF(("%s: vec %d cpumask %#x ipl %d func %p arg %p mpsafe %d\n",
    728   1.3      matt 			__func__, vec, cpumask, ipl, func, arg, mpsafe));
    729   1.2      matt #ifdef DIAGNOSTIC
    730   1.3      matt 	if (rmixl_pic_init_done == 0)
    731   1.2      matt 		panic("%s: called before evbmips_intr_init", __func__);
    732   1.2      matt #endif
    733   1.2      matt 
    734   1.2      matt 	/*
    735   1.3      matt 	 * check args
    736   1.2      matt 	 */
    737   1.3      matt 	if (vec < 0 || vec >= NINTRVECS)
    738   1.3      matt 		panic("%s: vec %d out of range, max %d",
    739   1.3      matt 			__func__, vec, NINTRVECS - 1);
    740   1.2      matt 	if (ipl <= 0 || ipl >= _IPL_N)
    741   1.2      matt 		panic("%s: ipl %d out of range, min %d, max %d",
    742   1.2      matt 			__func__, ipl, 1, _IPL_N - 1);
    743   1.2      matt 
    744   1.3      matt 	s = splhigh();
    745   1.3      matt 
    746   1.3      matt 	ih = &rmixl_intrhand[vec];
    747   1.3      matt 	if (ih->ih_func != NULL) {
    748   1.3      matt #ifdef DIAGNOSTIC
    749   1.3      matt 		printf("%s: intrhand[%d] busy\n", __func__, vec);
    750   1.3      matt #endif
    751   1.3      matt 		splx(s);
    752   1.3      matt 		return NULL;
    753   1.2      matt 	}
    754   1.2      matt 
    755   1.3      matt 	ih->ih_arg = arg;
    756   1.3      matt 	ih->ih_mpsafe = mpsafe;
    757   1.3      matt 	ih->ih_vec = vec;
    758   1.3      matt 	ih->ih_ipl = ipl;
    759   1.3      matt 	ih->ih_cpumask = cpumask;
    760   1.3      matt 
    761   1.3      matt 	eimr_bit = (uint64_t)1 << vec;
    762   1.3      matt 	for (int i=ih->ih_ipl; --i >= 0; ) {
    763   1.3      matt 		KASSERT((ipl_eimr_map[i] & eimr_bit) == 0);
    764   1.3      matt 		ipl_eimr_map[i] |= eimr_bit;
    765   1.2      matt 	}
    766   1.2      matt 
    767   1.3      matt 	ih->ih_func = func;	/* do this last */
    768   1.3      matt 
    769   1.3      matt 	splx(s);
    770   1.3      matt 
    771   1.3      matt 	return ih;
    772   1.3      matt }
    773   1.2      matt 
    774   1.3      matt /*
    775   1.3      matt  * rmixl_intr_establish
    776   1.3      matt  * - used to establish an IRT-based interrupt only
    777   1.3      matt  */
    778   1.3      matt void *
    779   1.3      matt rmixl_intr_establish(int irt, int cpumask, int ipl,
    780   1.3      matt 	rmixl_intr_trigger_t trigger, rmixl_intr_polarity_t polarity,
    781   1.3      matt 	int (*func)(void *), void *arg, bool mpsafe)
    782   1.3      matt {
    783   1.3      matt 	rmixl_intrhand_t *ih;
    784   1.3      matt 	int vec;
    785   1.2      matt 
    786   1.2      matt #ifdef DIAGNOSTIC
    787   1.3      matt 	if (rmixl_pic_init_done == 0)
    788   1.3      matt 		panic("%s: called before rmixl_pic_init_done", __func__);
    789   1.2      matt #endif
    790   1.2      matt 
    791   1.2      matt 	/*
    792   1.3      matt 	 * check args
    793   1.2      matt 	 */
    794   1.3      matt 	if (irt < 0 || irt >= NIRTS)
    795   1.3      matt 		panic("%s: irt %d out of range, max %d",
    796   1.3      matt 			__func__, irt, NIRTS - 1);
    797   1.3      matt 	if (ipl <= 0 || ipl >= _IPL_N)
    798   1.3      matt 		panic("%s: ipl %d out of range, min %d, max %d",
    799   1.3      matt 			__func__, ipl, 1, _IPL_N - 1);
    800   1.3      matt 
    801   1.3      matt 	vec = RMIXL_IRT_VECTOR(irt);
    802   1.2      matt 
    803   1.3      matt 	DPRINTF(("%s: irt %d, vec %d, ipl %d\n", __func__, irt, vec, ipl));
    804   1.2      matt 
    805   1.3      matt 	mutex_enter(&rmixl_intr_lock);
    806   1.2      matt 
    807   1.2      matt 	/*
    808   1.3      matt 	 * establish vector
    809   1.2      matt 	 */
    810   1.3      matt 	ih = rmixl_vec_establish(vec, cpumask, ipl, func, arg, mpsafe);
    811   1.2      matt 
    812   1.2      matt 	/*
    813   1.2      matt 	 * establish IRT Entry
    814   1.2      matt 	 */
    815   1.3      matt 	rmixl_irt_establish(irt, vec, cpumask, trigger, polarity);
    816   1.2      matt 
    817   1.3      matt 	mutex_exit(&rmixl_intr_lock);
    818   1.2      matt 
    819   1.2      matt 	return ih;
    820   1.2      matt }
    821   1.2      matt 
    822   1.2      matt void
    823   1.3      matt rmixl_vec_disestablish(void *cookie)
    824   1.3      matt {
    825   1.3      matt 	rmixl_intrhand_t *ih = cookie;
    826   1.3      matt 	uint64_t eimr_bit;
    827   1.3      matt 
    828   1.3      matt 	KASSERT(mutex_owned(&rmixl_intr_lock));
    829   1.3      matt 	KASSERT(ih->ih_vec < NINTRVECS);
    830   1.3      matt 	KASSERT(ih == &rmixl_intrhand[ih->ih_vec]);
    831   1.3      matt 
    832   1.3      matt 	ih->ih_func = NULL;	/* do this first */
    833   1.3      matt 
    834   1.3      matt 	eimr_bit = (uint64_t)1 << ih->ih_vec;
    835   1.3      matt 	for (int i=ih->ih_ipl; --i >= 0; ) {
    836   1.3      matt 		KASSERT((ipl_eimr_map[i] & eimr_bit) != 0);
    837   1.3      matt 		ipl_eimr_map[i] ^= eimr_bit;
    838   1.3      matt 	}
    839   1.3      matt }
    840   1.3      matt 
    841   1.3      matt void
    842   1.2      matt rmixl_intr_disestablish(void *cookie)
    843   1.2      matt {
    844   1.3      matt 	rmixl_intrhand_t *ih = cookie;
    845   1.5      matt 	const int vec = ih->ih_vec;
    846   1.3      matt 
    847   1.3      matt 	KASSERT(vec < NINTRVECS);
    848   1.3      matt 	KASSERT(ih == &rmixl_intrhand[vec]);
    849   1.2      matt 
    850   1.3      matt 	mutex_enter(&rmixl_intr_lock);
    851   1.2      matt 
    852   1.2      matt 	/*
    853   1.3      matt 	 * disable/invalidate the IRT Entry if needed
    854   1.2      matt 	 */
    855   1.3      matt 	if (RMIXL_VECTOR_IS_IRT(vec))
    856   1.3      matt 		rmixl_irt_disestablish(vec);
    857   1.2      matt 
    858   1.2      matt 	/*
    859   1.3      matt 	 * disasociate from vector and free the handle
    860   1.2      matt 	 */
    861   1.3      matt 	rmixl_vec_disestablish(cookie);
    862   1.3      matt 
    863   1.3      matt 	mutex_exit(&rmixl_intr_lock);
    864   1.3      matt }
    865   1.3      matt 
    866   1.3      matt void
    867  1.12     skrll evbmips_iointr(int ipl, uint32_t pending, struct clockframe *cf)
    868   1.3      matt {
    869   1.3      matt 	struct rmixl_cpu_softc *sc = (void *)curcpu()->ci_softc;
    870   1.2      matt 
    871   1.5      matt 	DPRINTF(("%s: cpu%u: ipl %d, pc %#"PRIxVADDR", pending %#x\n",
    872  1.12     skrll 		__func__, cpu_number(), ipl, cf->pc, pending));
    873   1.3      matt 
    874   1.3      matt 	/*
    875   1.3      matt 	 * 'pending' arg is a summary that there is something to do
    876   1.3      matt 	 * the real pending status is obtained from EIRR
    877   1.2      matt 	 */
    878   1.3      matt 	KASSERT(pending == MIPS_INT_MASK_1);
    879   1.2      matt 
    880   1.3      matt 	for (;;) {
    881   1.3      matt 		rmixl_intrhand_t *ih;
    882   1.3      matt 		uint64_t eirr;
    883   1.3      matt 		uint64_t eimr;
    884   1.3      matt 		uint64_t vecbit;
    885   1.3      matt 		int vec;
    886   1.3      matt 
    887   1.3      matt 		asm volatile("dmfc0 %0, $9, 6;" : "=r"(eirr));
    888   1.3      matt 		asm volatile("dmfc0 %0, $9, 7;" : "=r"(eimr));
    889   1.3      matt 
    890   1.3      matt #ifdef IOINTR_DEBUG
    891   1.5      matt 		printf("%s: cpu%u: eirr %#"PRIx64", eimr %#"PRIx64", mask %#"PRIx64"\n",
    892   1.3      matt 			__func__, cpu_number(), eirr, eimr, ipl_eimr_map[ipl-1]);
    893   1.3      matt #endif	/* IOINTR_DEBUG */
    894   1.3      matt 
    895   1.3      matt 		/*
    896   1.3      matt 		 * reduce eirr to
    897   1.3      matt 		 * - ints that are enabled at or below this ipl
    898   1.3      matt 		 * - exclude count/compare clock and soft ints
    899   1.3      matt 		 *   they are handled elsewhere
    900   1.3      matt 		 */
    901   1.3      matt 		eirr &= ipl_eimr_map[ipl-1];
    902   1.3      matt 		eirr &= ~ipl_eimr_map[ipl];
    903   1.3      matt 		eirr &= ~((MIPS_INT_MASK_5 | MIPS_SOFT_INT_MASK) >> 8);
    904   1.3      matt 		if (eirr == 0)
    905   1.3      matt 			break;
    906   1.3      matt 
    907   1.3      matt 		vec = 63 - dclz(eirr);
    908   1.3      matt 		ih = &rmixl_intrhand[vec];
    909   1.3      matt 		vecbit = 1ULL << vec;
    910   1.3      matt 		KASSERT (ih->ih_ipl == ipl);
    911   1.3      matt 		KASSERT ((vecbit & eimr) == 0);
    912   1.3      matt 		KASSERT ((vecbit & RMIXL_EIRR_PRESERVE_MASK) == 0);
    913   1.3      matt 
    914   1.3      matt 		/*
    915   1.4     cliff 		 * ack in EIRR, and in PIC if needed,
    916   1.4     cliff 		 * the irq we are about to handle
    917   1.3      matt 		 */
    918   1.4     cliff 		rmixl_eirr_ack(eimr, vecbit, RMIXL_EIRR_PRESERVE_MASK);
    919   1.3      matt 		if (RMIXL_VECTOR_IS_IRT(vec))
    920   1.3      matt 			RMIXL_PICREG_WRITE(RMIXL_PIC_INTRACK,
    921   1.3      matt 				1 << RMIXL_VECTOR_IRT(vec));
    922   1.2      matt 
    923   1.3      matt 		if (ih->ih_func != NULL) {
    924   1.3      matt #ifdef MULTIPROCESSOR
    925   1.3      matt 			if (ih->ih_mpsafe) {
    926   1.3      matt 				(void)(*ih->ih_func)(ih->ih_arg);
    927   1.3      matt 			} else {
    928   1.3      matt 				KASSERTMSG(ipl == IPL_VM,
    929   1.8       jym 				    "%s: %s: ipl (%d) != IPL_VM for KERNEL_LOCK",
    930   1.3      matt 				    __func__, sc->sc_vec_evcnts[vec].ev_name,
    931   1.8       jym 				    ipl);
    932   1.3      matt 				KERNEL_LOCK(1, NULL);
    933   1.3      matt 				(void)(*ih->ih_func)(ih->ih_arg);
    934   1.3      matt 				KERNEL_UNLOCK_ONE(NULL);
    935   1.3      matt 			}
    936   1.3      matt #else
    937   1.3      matt 			(void)(*ih->ih_func)(ih->ih_arg);
    938   1.3      matt #endif /* MULTIPROCESSOR */
    939   1.3      matt 		}
    940   1.3      matt 		KASSERT(ipl == ih->ih_ipl);
    941   1.3      matt 		KASSERTMSG(curcpu()->ci_cpl >= ipl,
    942   1.8       jym 		    "%s: after %s: cpl (%d) < ipl %d",
    943   1.3      matt 		    __func__, sc->sc_vec_evcnts[vec].ev_name,
    944   1.8       jym 		    ipl, curcpu()->ci_cpl);
    945   1.3      matt 		sc->sc_vec_evcnts[vec].ev_count++;
    946   1.3      matt 	}
    947   1.2      matt }
    948   1.2      matt 
    949   1.3      matt #ifdef MULTIPROCESSOR
    950   1.3      matt static int
    951   1.3      matt rmixl_send_ipi(struct cpu_info *ci, int tag)
    952   1.2      matt {
    953   1.3      matt 	const cpuid_t cpuid = ci->ci_cpuid;
    954   1.3      matt 	uint32_t core = (uint32_t)(cpuid >> 2);
    955   1.3      matt 	uint32_t thread = (uint32_t)(cpuid & __BITS(1,0));
    956   1.3      matt 	uint64_t req = 1 << tag;
    957   1.2      matt 	uint32_t r;
    958   1.3      matt 
    959   1.9      matt 	if (!kcpuset_isset(cpus_running, cpu_index(ci)))
    960   1.3      matt 		return -1;
    961   1.3      matt 
    962   1.3      matt 	KASSERT((tag >= 0) && (tag < NIPIS));
    963   1.3      matt 
    964   1.3      matt 	r = (thread << RMIXL_PIC_IPIBASE_ID_THREAD_SHIFT)
    965   1.3      matt 	  | (core << RMIXL_PIC_IPIBASE_ID_CORE_SHIFT)
    966   1.3      matt 	  | (RMIXL_INTRVEC_IPI + tag);
    967   1.3      matt 
    968   1.3      matt 	mutex_enter(&rmixl_ipi_lock);
    969  1.13  riastrad 	membar_release();
    970   1.3      matt 	atomic_or_64(&ci->ci_request_ipis, req);
    971   1.3      matt 	RMIXL_PICREG_WRITE(RMIXL_PIC_IPIBASE, r);
    972   1.3      matt 	mutex_exit(&rmixl_ipi_lock);
    973   1.3      matt 
    974   1.3      matt 	return 0;
    975   1.2      matt }
    976   1.2      matt 
    977   1.3      matt static int
    978   1.3      matt rmixl_ipi_intr(void *arg)
    979   1.2      matt {
    980   1.3      matt 	struct cpu_info * const ci = curcpu();
    981  1.11  dholland 	const uint64_t ipi_mask = 1ULL << (uintptr_t)arg;
    982   1.2      matt 
    983   1.3      matt 	KASSERT(ci->ci_cpl >= IPL_SCHED);
    984   1.4     cliff 	KASSERT((uintptr_t)arg < NIPIS);
    985   1.2      matt 
    986   1.4     cliff 	/* if the request is clear, it was previously processed */
    987  1.13  riastrad 	if ((atomic_load_relaxed(&ci->ci_request_ipis) & ipi_mask) == 0)
    988   1.4     cliff 		return 0;
    989  1.13  riastrad 	membar_acquire();
    990   1.2      matt 
    991   1.3      matt 	atomic_or_64(&ci->ci_active_ipis, ipi_mask);
    992   1.3      matt 	atomic_and_64(&ci->ci_request_ipis, ~ipi_mask);
    993   1.2      matt 
    994   1.3      matt 	ipi_process(ci, ipi_mask);
    995   1.2      matt 
    996   1.3      matt 	atomic_and_64(&ci->ci_active_ipis, ~ipi_mask);
    997   1.2      matt 
    998   1.3      matt 	return 1;
    999   1.3      matt }
   1000   1.3      matt #endif	/* MULTIPROCESSOR */
   1001   1.2      matt 
   1002   1.3      matt #if defined(DIAGNOSTIC) || defined(IOINTR_DEBUG) || defined(DDB)
   1003   1.3      matt int
   1004   1.3      matt rmixl_intrhand_print_subr(int vec)
   1005   1.3      matt {
   1006   1.3      matt 	rmixl_intrhand_t *ih = &rmixl_intrhand[vec];
   1007   1.3      matt 	printf("vec %d: func %p, arg %p, vec %d, ipl %d, mask %#x\n",
   1008   1.3      matt 		vec, ih->ih_func, ih->ih_arg, ih->ih_vec, ih->ih_ipl,
   1009   1.3      matt 		ih->ih_cpumask);
   1010   1.3      matt 	return 0;
   1011   1.3      matt }
   1012   1.3      matt int
   1013   1.3      matt rmixl_intrhand_print(void)
   1014   1.3      matt {
   1015   1.3      matt 	for (int vec=0; vec < NINTRVECS ; vec++)
   1016   1.3      matt 		rmixl_intrhand_print_subr(vec);
   1017   1.3      matt 	return 0;
   1018   1.3      matt }
   1019   1.2      matt 
   1020   1.3      matt static inline void
   1021   1.3      matt rmixl_irt_entry_print(u_int irt)
   1022   1.3      matt {
   1023   1.3      matt 	uint32_t c0, c1;
   1024   1.2      matt 
   1025   1.3      matt 	if ((irt < 0) || (irt > NIRTS))
   1026   1.3      matt 		return;
   1027   1.3      matt 	c0 = RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC0(irt));
   1028   1.3      matt 	c1 = RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC1(irt));
   1029   1.3      matt 	printf("irt[%d]: %#x, %#x\n", irt, c0, c1);
   1030   1.2      matt }
   1031   1.2      matt 
   1032   1.2      matt int
   1033   1.3      matt rmixl_irt_print(void)
   1034   1.2      matt {
   1035   1.3      matt 	printf("%s:\n", __func__);
   1036   1.3      matt 	for (int irt=0; irt < NIRTS ; irt++)
   1037   1.3      matt 		rmixl_irt_entry_print(irt);
   1038   1.3      matt 	return 0;
   1039   1.3      matt }
   1040   1.2      matt 
   1041   1.3      matt void
   1042   1.3      matt rmixl_ipl_eimr_map_print(void)
   1043   1.3      matt {
   1044   1.3      matt 	printf("IPL_NONE=%d, mask %#"PRIx64"\n",
   1045   1.3      matt 		IPL_NONE, ipl_eimr_map[IPL_NONE]);
   1046   1.3      matt 	printf("IPL_SOFTCLOCK=%d, mask %#"PRIx64"\n",
   1047   1.3      matt 		IPL_SOFTCLOCK, ipl_eimr_map[IPL_SOFTCLOCK]);
   1048   1.3      matt 	printf("IPL_SOFTNET=%d, mask %#"PRIx64"\n",
   1049   1.3      matt 		IPL_SOFTNET, ipl_eimr_map[IPL_SOFTNET]);
   1050   1.3      matt 	printf("IPL_VM=%d, mask %#"PRIx64"\n",
   1051   1.3      matt 		IPL_VM, ipl_eimr_map[IPL_VM]);
   1052   1.3      matt 	printf("IPL_SCHED=%d, mask %#"PRIx64"\n",
   1053   1.3      matt 		IPL_SCHED, ipl_eimr_map[IPL_SCHED]);
   1054   1.3      matt 	printf("IPL_DDB=%d, mask %#"PRIx64"\n",
   1055   1.3      matt 		IPL_DDB, ipl_eimr_map[IPL_DDB]);
   1056   1.3      matt 	printf("IPL_HIGH=%d, mask %#"PRIx64"\n",
   1057   1.3      matt 		IPL_HIGH, ipl_eimr_map[IPL_HIGH]);
   1058   1.2      matt }
   1059   1.3      matt 
   1060   1.2      matt #endif
   1061