Home | History | Annotate | Line # | Download | only in rmi
rmixl_intr.c revision 1.5
      1  1.5   matt /*	$NetBSD: rmixl_intr.c,v 1.5 2011/04/29 21:58:27 matt Exp $	*/
      2  1.2   matt 
      3  1.2   matt /*-
      4  1.2   matt  * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko.
      5  1.2   matt  * All rights reserved.
      6  1.2   matt  *
      7  1.2   matt  * Redistribution and use in source and binary forms, with or
      8  1.2   matt  * without modification, are permitted provided that the following
      9  1.2   matt  * conditions are met:
     10  1.2   matt  * 1. Redistributions of source code must retain the above copyright
     11  1.2   matt  *    notice, this list of conditions and the following disclaimer.
     12  1.2   matt  * 2. Redistributions in binary form must reproduce the above
     13  1.2   matt  *    copyright notice, this list of conditions and the following
     14  1.2   matt  *    disclaimer in the documentation and/or other materials provided
     15  1.2   matt  *    with the distribution.
     16  1.2   matt  * 3. The names of the authors may not be used to endorse or promote
     17  1.2   matt  *    products derived from this software without specific prior
     18  1.2   matt  *    written permission.
     19  1.2   matt  *
     20  1.2   matt  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY
     21  1.2   matt  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
     22  1.2   matt  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
     23  1.2   matt  * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS
     24  1.2   matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
     25  1.2   matt  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     26  1.2   matt  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
     27  1.2   matt  * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     28  1.2   matt  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
     29  1.2   matt  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
     30  1.2   matt  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
     31  1.2   matt  * OF SUCH DAMAGE.
     32  1.2   matt  */
     33  1.2   matt /*-
     34  1.2   matt  * Copyright (c) 2001 The NetBSD Foundation, Inc.
     35  1.2   matt  * All rights reserved.
     36  1.2   matt  *
     37  1.2   matt  * This code is derived from software contributed to The NetBSD Foundation
     38  1.2   matt  * by Jason R. Thorpe.
     39  1.2   matt  *
     40  1.2   matt  * Redistribution and use in source and binary forms, with or without
     41  1.2   matt  * modification, are permitted provided that the following conditions
     42  1.2   matt  * are met:
     43  1.2   matt  * 1. Redistributions of source code must retain the above copyright
     44  1.2   matt  *    notice, this list of conditions and the following disclaimer.
     45  1.2   matt  * 2. Redistributions in binary form must reproduce the above copyright
     46  1.2   matt  *    notice, this list of conditions and the following disclaimer in the
     47  1.2   matt  *    documentation and/or other materials provided with the distribution.
     48  1.2   matt  *
     49  1.2   matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     50  1.2   matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     51  1.2   matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     52  1.2   matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     53  1.2   matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     54  1.2   matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     55  1.2   matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     56  1.2   matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     57  1.2   matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     58  1.2   matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     59  1.2   matt  * POSSIBILITY OF SUCH DAMAGE.
     60  1.2   matt  */
     61  1.2   matt 
     62  1.2   matt /*
     63  1.2   matt  * Platform-specific interrupt support for the RMI XLP, XLR, XLS
     64  1.2   matt  */
     65  1.2   matt 
     66  1.2   matt #include <sys/cdefs.h>
     67  1.5   matt __KERNEL_RCSID(0, "$NetBSD: rmixl_intr.c,v 1.5 2011/04/29 21:58:27 matt Exp $");
     68  1.2   matt 
     69  1.2   matt #include "opt_ddb.h"
     70  1.3   matt #include "opt_multiprocessor.h"
     71  1.3   matt #define	__INTR_PRIVATE
     72  1.2   matt 
     73  1.2   matt #include <sys/param.h>
     74  1.2   matt #include <sys/queue.h>
     75  1.2   matt #include <sys/malloc.h>
     76  1.2   matt #include <sys/systm.h>
     77  1.2   matt #include <sys/device.h>
     78  1.2   matt #include <sys/kernel.h>
     79  1.3   matt #include <sys/atomic.h>
     80  1.3   matt #include <sys/mutex.h>
     81  1.3   matt #include <sys/cpu.h>
     82  1.2   matt 
     83  1.2   matt #include <machine/bus.h>
     84  1.2   matt #include <machine/intr.h>
     85  1.2   matt 
     86  1.2   matt #include <mips/cpu.h>
     87  1.3   matt #include <mips/cpuset.h>
     88  1.2   matt #include <mips/locore.h>
     89  1.2   matt 
     90  1.2   matt #include <mips/rmi/rmixlreg.h>
     91  1.2   matt #include <mips/rmi/rmixlvar.h>
     92  1.2   matt 
     93  1.3   matt #include <mips/rmi/rmixl_cpuvar.h>
     94  1.3   matt #include <mips/rmi/rmixl_intr.h>
     95  1.3   matt 
     96  1.2   matt #include <dev/pci/pcireg.h>
     97  1.2   matt #include <dev/pci/pcivar.h>
     98  1.2   matt 
     99  1.3   matt //#define IOINTR_DEBUG	1
    100  1.2   matt #ifdef IOINTR_DEBUG
    101  1.2   matt int iointr_debug = IOINTR_DEBUG;
    102  1.2   matt # define DPRINTF(x)	do { if (iointr_debug) printf x ; } while(0)
    103  1.2   matt #else
    104  1.2   matt # define DPRINTF(x)
    105  1.2   matt #endif
    106  1.2   matt 
    107  1.2   matt #define RMIXL_PICREG_READ(off) \
    108  1.2   matt 	RMIXL_IOREG_READ(RMIXL_IO_DEV_PIC + (off))
    109  1.2   matt #define RMIXL_PICREG_WRITE(off, val) \
    110  1.2   matt 	RMIXL_IOREG_WRITE(RMIXL_IO_DEV_PIC + (off), (val))
    111  1.2   matt 
    112  1.2   matt /*
    113  1.3   matt  * do not clear these when acking EIRR
    114  1.3   matt  * (otherwise they get lost)
    115  1.2   matt  */
    116  1.3   matt #define RMIXL_EIRR_PRESERVE_MASK	\
    117  1.3   matt 		((MIPS_INT_MASK_5|MIPS_SOFT_INT_MASK) >> 8)
    118  1.2   matt 
    119  1.2   matt /*
    120  1.3   matt  * IRT assignments depends on the RMI chip family
    121  1.3   matt  * (XLS1xx vs. XLS2xx vs. XLS3xx vs. XLS6xx)
    122  1.3   matt  * use the right display string table for the CPU that's running.
    123  1.3   matt  */
    124  1.2   matt 
    125  1.2   matt /*
    126  1.3   matt  * rmixl_irtnames_xlrxxx
    127  1.3   matt  * - use for XLRxxx
    128  1.2   matt  */
    129  1.3   matt static const char * const rmixl_irtnames_xlrxxx[NIRTS] = {
    130  1.3   matt 	"pic int 0 (watchdog)",		/*  0 */
    131  1.3   matt 	"pic int 1 (timer0)",		/*  1 */
    132  1.3   matt 	"pic int 2 (timer1)",		/*  2 */
    133  1.3   matt 	"pic int 3 (timer2)",		/*  3 */
    134  1.3   matt 	"pic int 4 (timer3)",		/*  4 */
    135  1.3   matt 	"pic int 5 (timer4)",		/*  5 */
    136  1.3   matt 	"pic int 6 (timer5)",		/*  6 */
    137  1.3   matt 	"pic int 7 (timer6)",		/*  7 */
    138  1.3   matt 	"pic int 8 (timer7)",		/*  8 */
    139  1.3   matt 	"pic int 9 (uart0)",		/*  9 */
    140  1.3   matt 	"pic int 10 (uart1)",		/* 10 */
    141  1.3   matt 	"pic int 11 (i2c0)",		/* 11 */
    142  1.3   matt 	"pic int 12 (i2c1)",		/* 12 */
    143  1.3   matt 	"pic int 13 (pcmcia)",		/* 13 */
    144  1.3   matt 	"pic int 14 (gpio)",		/* 14 */
    145  1.3   matt 	"pic int 15 (hyper)",		/* 15 */
    146  1.3   matt 	"pic int 16 (pcix)",		/* 16 */
    147  1.3   matt 	"pic int 17 (gmac0)",		/* 17 */
    148  1.3   matt 	"pic int 18 (gmac1)",		/* 18 */
    149  1.3   matt 	"pic int 19 (gmac2)",		/* 19 */
    150  1.3   matt 	"pic int 20 (gmac3)",		/* 20 */
    151  1.3   matt 	"pic int 21 (xgs0)",		/* 21 */
    152  1.3   matt 	"pic int 22 (xgs1)",		/* 22 */
    153  1.3   matt 	"pic int 23 (irq23)",		/* 23 */
    154  1.3   matt 	"pic int 24 (hyper_fatal)",	/* 24 */
    155  1.3   matt 	"pic int 25 (bridge_aerr)",	/* 25 */
    156  1.3   matt 	"pic int 26 (bridge_berr)",	/* 26 */
    157  1.3   matt 	"pic int 27 (bridge_tb)",	/* 27 */
    158  1.3   matt 	"pic int 28 (bridge_nmi)",	/* 28 */
    159  1.3   matt 	"pic int 29 (bridge_sram_derr)",/* 29 */
    160  1.3   matt 	"pic int 30 (gpio_fatal)",	/* 30 */
    161  1.3   matt 	"pic int 31 (reserved)",	/* 31 */
    162  1.2   matt };
    163  1.2   matt 
    164  1.2   matt /*
    165  1.3   matt  * rmixl_irtnames_xls2xx
    166  1.3   matt  * - use for XLS2xx
    167  1.2   matt  */
    168  1.3   matt static const char * const rmixl_irtnames_xls2xx[NIRTS] = {
    169  1.3   matt 	"pic int 0 (watchdog)",		/*  0 */
    170  1.3   matt 	"pic int 1 (timer0)",		/*  1 */
    171  1.3   matt 	"pic int 2 (timer1)",		/*  2 */
    172  1.3   matt 	"pic int 3 (timer2)",		/*  3 */
    173  1.3   matt 	"pic int 4 (timer3)",		/*  4 */
    174  1.3   matt 	"pic int 5 (timer4)",		/*  5 */
    175  1.3   matt 	"pic int 6 (timer5)",		/*  6 */
    176  1.3   matt 	"pic int 7 (timer6)",		/*  7 */
    177  1.3   matt 	"pic int 8 (timer7)",		/*  8 */
    178  1.3   matt 	"pic int 9 (uart0)",		/*  9 */
    179  1.3   matt 	"pic int 10 (uart1)",		/* 10 */
    180  1.3   matt 	"pic int 11 (i2c0)",		/* 11 */
    181  1.3   matt 	"pic int 12 (i2c1)",		/* 12 */
    182  1.3   matt 	"pic int 13 (pcmcia)",		/* 13 */
    183  1.3   matt 	"pic int 14 (gpio_a)",		/* 14 */
    184  1.3   matt 	"pic int 15 (irq15)",		/* 15 */
    185  1.3   matt 	"pic int 16 (bridge_tb)",	/* 16 */
    186  1.3   matt 	"pic int 17 (gmac0)",		/* 17 */
    187  1.3   matt 	"pic int 18 (gmac1)",		/* 18 */
    188  1.3   matt 	"pic int 19 (gmac2)",		/* 19 */
    189  1.3   matt 	"pic int 20 (gmac3)",		/* 20 */
    190  1.3   matt 	"pic int 21 (irq21)",		/* 21 */
    191  1.3   matt 	"pic int 22 (irq22)",		/* 22 */
    192  1.3   matt 	"pic int 23 (pcie_link2)",	/* 23 */
    193  1.3   matt 	"pic int 24 (pcie_link3)",	/* 24 */
    194  1.3   matt 	"pic int 25 (bridge_err)",	/* 25 */
    195  1.3   matt 	"pic int 26 (pcie_link0)",	/* 26 */
    196  1.3   matt 	"pic int 27 (pcie_link1)",	/* 27 */
    197  1.3   matt 	"pic int 28 (irq28)",		/* 28 */
    198  1.3   matt 	"pic int 29 (pcie_err)",	/* 29 */
    199  1.3   matt 	"pic int 30 (gpio_b)",		/* 30 */
    200  1.3   matt 	"pic int 31 (usb)",		/* 31 */
    201  1.2   matt };
    202  1.2   matt 
    203  1.2   matt /*
    204  1.3   matt  * rmixl_irtnames_xls1xx
    205  1.3   matt  * - use for XLS1xx, XLS4xx-Lite
    206  1.2   matt  */
    207  1.3   matt static const char * const rmixl_irtnames_xls1xx[NIRTS] = {
    208  1.3   matt 	"pic int 0 (watchdog)",		/*  0 */
    209  1.3   matt 	"pic int 1 (timer0)",		/*  1 */
    210  1.3   matt 	"pic int 2 (timer1)",		/*  2 */
    211  1.3   matt 	"pic int 3 (timer2)",		/*  3 */
    212  1.3   matt 	"pic int 4 (timer3)",		/*  4 */
    213  1.3   matt 	"pic int 5 (timer4)",		/*  5 */
    214  1.3   matt 	"pic int 6 (timer5)",		/*  6 */
    215  1.3   matt 	"pic int 7 (timer6)",		/*  7 */
    216  1.3   matt 	"pic int 8 (timer7)",		/*  8 */
    217  1.3   matt 	"pic int 9 (uart0)",		/*  9 */
    218  1.3   matt 	"pic int 10 (uart1)",		/* 10 */
    219  1.3   matt 	"pic int 11 (i2c0)",		/* 11 */
    220  1.3   matt 	"pic int 12 (i2c1)",		/* 12 */
    221  1.3   matt 	"pic int 13 (pcmcia)",		/* 13 */
    222  1.3   matt 	"pic int 14 (gpio_a)",		/* 14 */
    223  1.3   matt 	"pic int 15 (irq15)",		/* 15 */
    224  1.3   matt 	"pic int 16 (bridge_tb)",	/* 16 */
    225  1.3   matt 	"pic int 17 (gmac0)",		/* 17 */
    226  1.3   matt 	"pic int 18 (gmac1)",		/* 18 */
    227  1.3   matt 	"pic int 19 (gmac2)",		/* 19 */
    228  1.3   matt 	"pic int 20 (gmac3)",		/* 20 */
    229  1.3   matt 	"pic int 21 (irq21)",		/* 21 */
    230  1.3   matt 	"pic int 22 (irq22)",		/* 22 */
    231  1.3   matt 	"pic int 23 (irq23)",		/* 23 */
    232  1.3   matt 	"pic int 24 (irq24)",		/* 24 */
    233  1.3   matt 	"pic int 25 (bridge_err)",	/* 25 */
    234  1.3   matt 	"pic int 26 (pcie_link0)",	/* 26 */
    235  1.3   matt 	"pic int 27 (pcie_link1)",	/* 27 */
    236  1.3   matt 	"pic int 28 (irq28)",		/* 28 */
    237  1.3   matt 	"pic int 29 (pcie_err)",	/* 29 */
    238  1.3   matt 	"pic int 30 (gpio_b)",		/* 30 */
    239  1.3   matt 	"pic int 31 (usb)",		/* 31 */
    240  1.2   matt };
    241  1.2   matt 
    242  1.2   matt /*
    243  1.3   matt  * rmixl_irtnames_xls4xx:
    244  1.3   matt  * - use for XLS4xx, XLS6xx
    245  1.2   matt  */
    246  1.3   matt static const char * const rmixl_irtnames_xls4xx[NIRTS] = {
    247  1.3   matt 	"pic int 0 (watchdog)",		/*  0 */
    248  1.3   matt 	"pic int 1 (timer0)",		/*  1 */
    249  1.3   matt 	"pic int 2 (timer1)",		/*  2 */
    250  1.3   matt 	"pic int 3 (timer2)",		/*  3 */
    251  1.3   matt 	"pic int 4 (timer3)",		/*  4 */
    252  1.3   matt 	"pic int 5 (timer4)",		/*  5 */
    253  1.3   matt 	"pic int 6 (timer5)",		/*  6 */
    254  1.3   matt 	"pic int 7 (timer6)",		/*  7 */
    255  1.3   matt 	"pic int 8 (timer7)",		/*  8 */
    256  1.3   matt 	"pic int 9 (uart0)",		/*  9 */
    257  1.3   matt 	"pic int 10 (uart1)",		/* 10 */
    258  1.3   matt 	"pic int 11 (i2c0)",		/* 11 */
    259  1.3   matt 	"pic int 12 (i2c1)",		/* 12 */
    260  1.3   matt 	"pic int 13 (pcmcia)",		/* 13 */
    261  1.3   matt 	"pic int 14 (gpio_a)",		/* 14 */
    262  1.3   matt 	"pic int 15 (irq15)",		/* 15 */
    263  1.3   matt 	"pic int 16 (bridge_tb)",	/* 16 */
    264  1.3   matt 	"pic int 17 (gmac0)",		/* 17 */
    265  1.3   matt 	"pic int 18 (gmac1)",		/* 18 */
    266  1.3   matt 	"pic int 19 (gmac2)",		/* 19 */
    267  1.3   matt 	"pic int 20 (gmac3)",		/* 20 */
    268  1.3   matt 	"pic int 21 (irq21)",		/* 21 */
    269  1.3   matt 	"pic int 22 (irq22)",		/* 22 */
    270  1.3   matt 	"pic int 23 (irq23)",		/* 23 */
    271  1.3   matt 	"pic int 24 (irq24)",		/* 24 */
    272  1.3   matt 	"pic int 25 (bridge_err)",	/* 25 */
    273  1.3   matt 	"pic int 26 (pcie_link0)",	/* 26 */
    274  1.3   matt 	"pic int 27 (pcie_link1)",	/* 27 */
    275  1.3   matt 	"pic int 28 (pcie_link2)",	/* 28 */
    276  1.3   matt 	"pic int 29 (pcie_link3)",	/* 29 */
    277  1.3   matt 	"pic int 30 (gpio_b)",		/* 30 */
    278  1.3   matt 	"pic int 31 (usb)",		/* 31 */
    279  1.3   matt };
    280  1.2   matt 
    281  1.2   matt /*
    282  1.3   matt  * rmixl_vecnames_common:
    283  1.3   matt  * - use for unknown cpu implementation
    284  1.3   matt  * - covers all vectors, not just IRT intrs
    285  1.2   matt  */
    286  1.3   matt static const char * const rmixl_vecnames_common[NINTRVECS] = {
    287  1.3   matt 	"vec 0",		/*  0 */
    288  1.3   matt 	"vec 1",		/*  1 */
    289  1.3   matt 	"vec 2",		/*  2 */
    290  1.3   matt 	"vec 3",		/*  3 */
    291  1.3   matt 	"vec 4",		/*  4 */
    292  1.3   matt 	"vec 5",		/*  5 */
    293  1.3   matt 	"vec 6",		/*  6 */
    294  1.3   matt 	"vec 7",		/*  7 */
    295  1.4  cliff 	"vec 8 (ipi 0)",	/*  8 */
    296  1.4  cliff 	"vec 9 (ipi 1)",	/*  9 */
    297  1.4  cliff 	"vec 10 (ipi 2)",	/* 10 */
    298  1.4  cliff 	"vec 11 (ipi 3)",	/* 11 */
    299  1.4  cliff 	"vec 12 (ipi 4)",	/* 12 */
    300  1.4  cliff 	"vec 13 (ipi 5)",	/* 13 */
    301  1.4  cliff 	"vec 14 (ipi 6)",	/* 14 */
    302  1.4  cliff 	"vec 15 (fmn)",		/* 15 */
    303  1.4  cliff 	"vec 16",		/* 16 */
    304  1.3   matt 	"vec 17",		/* 17 */
    305  1.3   matt 	"vec 18",		/* 18 */
    306  1.3   matt 	"vec 19",		/* 19 */
    307  1.3   matt 	"vec 20",		/* 20 */
    308  1.3   matt 	"vec 21",		/* 21 */
    309  1.3   matt 	"vec 22",		/* 22 */
    310  1.3   matt 	"vec 23",		/* 23 */
    311  1.3   matt 	"vec 24",		/* 24 */
    312  1.3   matt 	"vec 25",		/* 25 */
    313  1.3   matt 	"vec 26",		/* 26 */
    314  1.3   matt 	"vec 27",		/* 27 */
    315  1.3   matt 	"vec 28",		/* 28 */
    316  1.3   matt 	"vec 29",		/* 29 */
    317  1.3   matt 	"vec 30",		/* 30 */
    318  1.3   matt 	"vec 31",		/* 31 */
    319  1.3   matt 	"vec 32",		/* 32 */
    320  1.3   matt 	"vec 33",		/* 33 */
    321  1.3   matt 	"vec 34",		/* 34 */
    322  1.3   matt 	"vec 35",		/* 35 */
    323  1.3   matt 	"vec 36",		/* 36 */
    324  1.3   matt 	"vec 37",		/* 37 */
    325  1.3   matt 	"vec 38",		/* 38 */
    326  1.3   matt 	"vec 39",		/* 39 */
    327  1.3   matt 	"vec 40",		/* 40 */
    328  1.3   matt 	"vec 41",		/* 41 */
    329  1.3   matt 	"vec 42",		/* 42 */
    330  1.3   matt 	"vec 43",		/* 43 */
    331  1.3   matt 	"vec 44",		/* 44 */
    332  1.3   matt 	"vec 45",		/* 45 */
    333  1.3   matt 	"vec 46",		/* 46 */
    334  1.3   matt 	"vec 47",		/* 47 */
    335  1.3   matt 	"vec 48",		/* 48 */
    336  1.3   matt 	"vec 49",		/* 49 */
    337  1.3   matt 	"vec 50",		/* 50 */
    338  1.3   matt 	"vec 51",		/* 51 */
    339  1.3   matt 	"vec 52",		/* 52 */
    340  1.3   matt 	"vec 53",		/* 53 */
    341  1.3   matt 	"vec 54",		/* 54 */
    342  1.3   matt 	"vec 55",		/* 55 */
    343  1.3   matt 	"vec 56",		/* 56 */
    344  1.3   matt 	"vec 57",		/* 57 */
    345  1.3   matt 	"vec 58",		/* 58 */
    346  1.3   matt 	"vec 59",		/* 59 */
    347  1.3   matt 	"vec 60",		/* 60 */
    348  1.3   matt 	"vec 61",		/* 61 */
    349  1.3   matt 	"vec 62",		/* 63 */
    350  1.3   matt 	"vec 63",		/* 63 */
    351  1.2   matt };
    352  1.2   matt 
    353  1.2   matt /*
    354  1.3   matt  * mask of CPUs attached
    355  1.3   matt  * once they are attached, this var is read-only so mp safe
    356  1.2   matt  */
    357  1.3   matt static uint32_t cpu_present_mask;
    358  1.3   matt 
    359  1.3   matt kmutex_t rmixl_ipi_lock __cacheline_aligned;
    360  1.3   matt 				/* covers RMIXL_PIC_IPIBASE */
    361  1.3   matt kmutex_t rmixl_intr_lock __cacheline_aligned;
    362  1.3   matt 				/* covers rest of PIC, and rmixl_intrhand[] */
    363  1.3   matt rmixl_intrhand_t rmixl_intrhand[NINTRVECS];
    364  1.2   matt 
    365  1.2   matt #ifdef DIAGNOSTIC
    366  1.3   matt static int rmixl_pic_init_done;
    367  1.2   matt #endif
    368  1.2   matt 
    369  1.2   matt 
    370  1.3   matt static const char *rmixl_intr_string_xlr(int);
    371  1.3   matt static const char *rmixl_intr_string_xls(int);
    372  1.3   matt static uint32_t rmixl_irt_thread_mask(int);
    373  1.3   matt static void rmixl_irt_init(int);
    374  1.3   matt static void rmixl_irt_disestablish(int);
    375  1.3   matt static void rmixl_irt_establish(int, int, int,
    376  1.3   matt 		rmixl_intr_trigger_t, rmixl_intr_polarity_t);
    377  1.3   matt 
    378  1.3   matt #ifdef MULTIPROCESSOR
    379  1.3   matt static int rmixl_send_ipi(struct cpu_info *, int);
    380  1.3   matt static int rmixl_ipi_intr(void *);
    381  1.3   matt #endif
    382  1.3   matt 
    383  1.3   matt #if defined(DIAGNOSTIC) || defined(IOINTR_DEBUG) || defined(DDB)
    384  1.3   matt int  rmixl_intrhand_print_subr(int);
    385  1.3   matt int  rmixl_intrhand_print(void);
    386  1.3   matt int  rmixl_irt_print(void);
    387  1.3   matt void rmixl_ipl_eimr_map_print(void);
    388  1.3   matt #endif
    389  1.2   matt 
    390  1.2   matt 
    391  1.3   matt static inline u_int
    392  1.3   matt dclz(uint64_t val)
    393  1.2   matt {
    394  1.3   matt 	int nlz;
    395  1.2   matt 
    396  1.3   matt 	asm volatile("dclz %0, %1;"
    397  1.3   matt 		: "=r"(nlz) : "r"(val));
    398  1.3   matt 
    399  1.3   matt 	return nlz;
    400  1.2   matt }
    401  1.2   matt 
    402  1.2   matt void
    403  1.2   matt evbmips_intr_init(void)
    404  1.2   matt {
    405  1.2   matt 	uint32_t r;
    406  1.2   matt 
    407  1.3   matt 	KASSERT(cpu_rmixlr(mips_options.mips_cpu)
    408  1.3   matt 	     || cpu_rmixls(mips_options.mips_cpu));
    409  1.3   matt 
    410  1.2   matt 
    411  1.2   matt #ifdef DIAGNOSTIC
    412  1.3   matt 	if (rmixl_pic_init_done != 0)
    413  1.3   matt 		panic("%s: rmixl_pic_init_done %d",
    414  1.3   matt 			__func__, rmixl_pic_init_done);
    415  1.2   matt #endif
    416  1.2   matt 
    417  1.3   matt 	mutex_init(&rmixl_ipi_lock, MUTEX_DEFAULT, IPL_HIGH);
    418  1.3   matt 	mutex_init(&rmixl_intr_lock, MUTEX_DEFAULT, IPL_HIGH);
    419  1.3   matt 
    420  1.3   matt 	mutex_enter(&rmixl_intr_lock);
    421  1.2   matt 
    422  1.3   matt 	/*
    423  1.3   matt 	 * initialize (zero) all IRT Entries in the PIC
    424  1.3   matt 	 */
    425  1.3   matt 	for (u_int i = 0; i < NIRTS; i++) {
    426  1.3   matt 		rmixl_irt_init(i);
    427  1.2   matt 	}
    428  1.2   matt 
    429  1.2   matt 	/*
    430  1.2   matt 	 * disable watchdog NMI, timers
    431  1.2   matt 	 *
    432  1.2   matt 	 * XXX
    433  1.2   matt 	 *  WATCHDOG_ENB is preserved because clearing it causes
    434  1.2   matt 	 *  hang on the XLS616 (but not on the XLS408)
    435  1.2   matt 	 */
    436  1.2   matt 	r = RMIXL_PICREG_READ(RMIXL_PIC_CONTROL);
    437  1.2   matt 	r &= RMIXL_PIC_CONTROL_RESV|RMIXL_PIC_CONTROL_WATCHDOG_ENB;
    438  1.2   matt 	RMIXL_PICREG_WRITE(RMIXL_PIC_CONTROL, r);
    439  1.2   matt 
    440  1.3   matt #ifdef DIAGNOSTIC
    441  1.3   matt 	rmixl_pic_init_done = 1;
    442  1.3   matt #endif
    443  1.3   matt 	mutex_exit(&rmixl_intr_lock);
    444  1.3   matt 
    445  1.3   matt }
    446  1.3   matt 
    447  1.3   matt /*
    448  1.3   matt  * establish vector for mips3 count/compare clock interrupt
    449  1.3   matt  * this ensures we enable in EIRR,
    450  1.3   matt  * even though cpu_intr() handles the interrupt
    451  1.3   matt  * note the 'mpsafe' arg here is a placeholder only
    452  1.3   matt  */
    453  1.3   matt void
    454  1.3   matt rmixl_intr_init_clk(void)
    455  1.3   matt {
    456  1.3   matt 	const int vec = ffs(MIPS_INT_MASK_5 >> MIPS_INT_MASK_SHIFT) - 1;
    457  1.3   matt 
    458  1.3   matt 	mutex_enter(&rmixl_intr_lock);
    459  1.3   matt 
    460  1.3   matt 	void *ih = rmixl_vec_establish(vec, 0, IPL_SCHED, NULL, NULL, false);
    461  1.3   matt 	if (ih == NULL)
    462  1.3   matt 		panic("%s: establish vec %d failed", __func__, vec);
    463  1.3   matt 
    464  1.3   matt 	mutex_exit(&rmixl_intr_lock);
    465  1.3   matt }
    466  1.3   matt 
    467  1.3   matt #ifdef MULTIPROCESSOR
    468  1.3   matt /*
    469  1.3   matt  * establish IPI interrupt and send function
    470  1.3   matt  */
    471  1.3   matt void
    472  1.3   matt rmixl_intr_init_ipi(void)
    473  1.3   matt {
    474  1.3   matt 	mutex_enter(&rmixl_intr_lock);
    475  1.3   matt 
    476  1.3   matt 	for (u_int ipi = 0; ipi < NIPIS; ipi++) {
    477  1.3   matt 		const u_int vec = RMIXL_INTRVEC_IPI + ipi;
    478  1.3   matt 		void * const ih = rmixl_vec_establish(vec, -1, IPL_SCHED,
    479  1.3   matt 			rmixl_ipi_intr, (void *)(uintptr_t)ipi, true);
    480  1.3   matt 		if (ih == NULL)
    481  1.3   matt 			panic("%s: establish ipi %d at vec %d failed",
    482  1.3   matt 				__func__, ipi, vec);
    483  1.3   matt 	}
    484  1.5   matt 
    485  1.3   matt 	mips_locoresw.lsw_send_ipi = rmixl_send_ipi;
    486  1.5   matt 
    487  1.3   matt 	mutex_exit(&rmixl_intr_lock);
    488  1.3   matt }
    489  1.3   matt #endif 	/* MULTIPROCESSOR */
    490  1.3   matt 
    491  1.3   matt /*
    492  1.3   matt  * initialize per-cpu interrupt stuff in softc
    493  1.3   matt  * accumulate per-cpu bits in 'cpu_present_mask'
    494  1.3   matt  */
    495  1.3   matt void
    496  1.3   matt rmixl_intr_init_cpu(struct cpu_info *ci)
    497  1.3   matt {
    498  1.3   matt 	struct rmixl_cpu_softc *sc = (void *)ci->ci_softc;
    499  1.3   matt 
    500  1.3   matt 	KASSERT(sc != NULL);
    501  1.2   matt 
    502  1.3   matt 	for (int vec=0; vec < NINTRVECS; vec++)
    503  1.3   matt 		evcnt_attach_dynamic(&sc->sc_vec_evcnts[vec],
    504  1.3   matt 			EVCNT_TYPE_INTR, NULL,
    505  1.3   matt 			device_xname(sc->sc_dev),
    506  1.3   matt 			rmixl_intr_string(vec));
    507  1.2   matt 
    508  1.3   matt 	KASSERT(cpu_index(ci) < (sizeof(cpu_present_mask) * 8));
    509  1.3   matt 	atomic_or_32((volatile uint32_t *)&cpu_present_mask, 1 << cpu_index(ci));
    510  1.2   matt }
    511  1.2   matt 
    512  1.3   matt /*
    513  1.3   matt  * rmixl_intr_string - return pointer to display name of a PIC-based interrupt
    514  1.3   matt  */
    515  1.2   matt const char *
    516  1.3   matt rmixl_intr_string(int vec)
    517  1.3   matt {
    518  1.3   matt 	int irt;
    519  1.3   matt 
    520  1.3   matt 	if (vec < 0 || vec >= NINTRVECS)
    521  1.3   matt 		panic("%s: vec index %d out of range, max %d",
    522  1.3   matt 			__func__, vec, NINTRVECS - 1);
    523  1.3   matt 
    524  1.3   matt 	if (! RMIXL_VECTOR_IS_IRT(vec))
    525  1.3   matt 		return rmixl_vecnames_common[vec];
    526  1.3   matt 
    527  1.3   matt 	irt = RMIXL_VECTOR_IRT(vec);
    528  1.3   matt 	switch(cpu_rmixl_chip_type(mips_options.mips_cpu)) {
    529  1.3   matt 	case CIDFL_RMI_TYPE_XLR:
    530  1.3   matt 		return rmixl_intr_string_xlr(irt);
    531  1.3   matt 	case CIDFL_RMI_TYPE_XLS:
    532  1.3   matt 		return rmixl_intr_string_xls(irt);
    533  1.3   matt 	case CIDFL_RMI_TYPE_XLP:
    534  1.3   matt 		panic("%s: RMI XLP not yet supported", __func__);
    535  1.3   matt 	}
    536  1.3   matt 
    537  1.3   matt 	return "undefined";	/* appease gcc */
    538  1.3   matt }
    539  1.3   matt 
    540  1.3   matt static const char *
    541  1.3   matt rmixl_intr_string_xlr(int irt)
    542  1.3   matt {
    543  1.3   matt 	return rmixl_irtnames_xlrxxx[irt];
    544  1.3   matt }
    545  1.3   matt 
    546  1.3   matt static const char *
    547  1.3   matt rmixl_intr_string_xls(int irt)
    548  1.2   matt {
    549  1.2   matt 	const char *name;
    550  1.2   matt 
    551  1.3   matt 	switch (MIPS_PRID_IMPL(mips_options.mips_cpu_id)) {
    552  1.2   matt 	case MIPS_XLS104:
    553  1.2   matt 	case MIPS_XLS108:
    554  1.3   matt 	case MIPS_XLS404LITE:
    555  1.3   matt 	case MIPS_XLS408LITE:
    556  1.3   matt 		name = rmixl_irtnames_xls1xx[irt];
    557  1.3   matt 		break;
    558  1.2   matt 	case MIPS_XLS204:
    559  1.2   matt 	case MIPS_XLS208:
    560  1.3   matt 		name = rmixl_irtnames_xls2xx[irt];
    561  1.3   matt 		break;
    562  1.3   matt 	case MIPS_XLS404:
    563  1.3   matt 	case MIPS_XLS408:
    564  1.3   matt 	case MIPS_XLS416:
    565  1.3   matt 	case MIPS_XLS608:
    566  1.3   matt 	case MIPS_XLS616:
    567  1.3   matt 		name = rmixl_irtnames_xls4xx[irt];
    568  1.3   matt 		break;
    569  1.3   matt 	default:
    570  1.3   matt 		name = rmixl_vecnames_common[RMIXL_IRT_VECTOR(irt)];
    571  1.3   matt 		break;
    572  1.3   matt 	}
    573  1.3   matt 
    574  1.3   matt 	return name;
    575  1.3   matt }
    576  1.3   matt 
    577  1.3   matt /*
    578  1.3   matt  * rmixl_irt_thread_mask
    579  1.3   matt  *
    580  1.3   matt  *	given a bitmask of cpus, return a, IRT thread mask
    581  1.3   matt  */
    582  1.3   matt static uint32_t
    583  1.3   matt rmixl_irt_thread_mask(int cpumask)
    584  1.3   matt {
    585  1.3   matt 	uint32_t irtc0;
    586  1.3   matt 
    587  1.3   matt #if defined(MULTIPROCESSOR)
    588  1.3   matt #ifndef NOTYET
    589  1.3   matt 	if (cpumask == -1)
    590  1.3   matt 		return 1;	/* XXX TMP FIXME */
    591  1.3   matt #endif
    592  1.3   matt 
    593  1.3   matt 	/*
    594  1.3   matt 	 * discount cpus not present
    595  1.3   matt 	 */
    596  1.3   matt 	cpumask &= cpu_present_mask;
    597  1.3   matt 
    598  1.3   matt 	switch (MIPS_PRID_IMPL(mips_options.mips_cpu_id)) {
    599  1.3   matt 	case MIPS_XLS104:
    600  1.3   matt 	case MIPS_XLS204:
    601  1.3   matt 	case MIPS_XLS404:
    602  1.2   matt 	case MIPS_XLS404LITE:
    603  1.3   matt 		irtc0 = ((cpumask >> 2) << 4) | (cpumask & __BITS(1,0));
    604  1.3   matt 		irtc0 &= (__BITS(5,4) | __BITS(1,0));
    605  1.3   matt 		break;
    606  1.3   matt 	case MIPS_XLS108:
    607  1.3   matt 	case MIPS_XLS208:
    608  1.3   matt 	case MIPS_XLS408:
    609  1.2   matt 	case MIPS_XLS408LITE:
    610  1.3   matt 	case MIPS_XLS608:
    611  1.3   matt 		irtc0 = cpumask & __BITS(7,0);
    612  1.2   matt 		break;
    613  1.3   matt 	case MIPS_XLS416:
    614  1.3   matt 	case MIPS_XLS616:
    615  1.3   matt 		irtc0 = cpumask & __BITS(15,0);
    616  1.2   matt 		break;
    617  1.2   matt 	default:
    618  1.3   matt 		panic("%s: unknown cpu ID %#x\n", __func__,
    619  1.3   matt 			mips_options.mips_cpu_id);
    620  1.2   matt 	}
    621  1.3   matt #else
    622  1.3   matt 	irtc0 = 1;
    623  1.3   matt #endif	/* MULTIPROCESSOR */
    624  1.2   matt 
    625  1.3   matt 	return irtc0;
    626  1.2   matt }
    627  1.2   matt 
    628  1.2   matt /*
    629  1.3   matt  * rmixl_irt_init
    630  1.3   matt  * - initialize IRT Entry for given index
    631  1.2   matt  * - unmask Thread#0 in low word (assume we only have 1 thread)
    632  1.2   matt  */
    633  1.2   matt static void
    634  1.3   matt rmixl_irt_init(int irt)
    635  1.2   matt {
    636  1.3   matt 	KASSERT(irt < NIRTS);
    637  1.3   matt 	RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC1(irt), 0);	/* high word */
    638  1.3   matt 	RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC0(irt), 0);	/* low  word */
    639  1.2   matt }
    640  1.2   matt 
    641  1.2   matt /*
    642  1.3   matt  * rmixl_irt_disestablish
    643  1.3   matt  * - invalidate IRT Entry for given index
    644  1.2   matt  */
    645  1.2   matt static void
    646  1.3   matt rmixl_irt_disestablish(int irt)
    647  1.2   matt {
    648  1.3   matt 	KASSERT(mutex_owned(&rmixl_intr_lock));
    649  1.3   matt 	DPRINTF(("%s: irt %d, irtc1 %#x\n", __func__, irt, 0));
    650  1.3   matt 	rmixl_irt_init(irt);
    651  1.2   matt }
    652  1.2   matt 
    653  1.2   matt /*
    654  1.3   matt  * rmixl_irt_establish
    655  1.3   matt  * - construct an IRT Entry for irt and write to PIC
    656  1.2   matt  */
    657  1.2   matt static void
    658  1.3   matt rmixl_irt_establish(int irt, int vec, int cpumask, rmixl_intr_trigger_t trigger,
    659  1.3   matt 	rmixl_intr_polarity_t polarity)
    660  1.2   matt {
    661  1.2   matt 	uint32_t irtc1;
    662  1.3   matt 	uint32_t irtc0;
    663  1.3   matt 
    664  1.3   matt 	KASSERT(mutex_owned(&rmixl_intr_lock));
    665  1.3   matt 
    666  1.3   matt 	if (irt >= NIRTS)
    667  1.3   matt 		panic("%s: bad irt %d\n", __func__, irt);
    668  1.3   matt 
    669  1.3   matt 	if (! RMIXL_VECTOR_IS_IRT(vec))
    670  1.3   matt 		panic("%s: bad vec %d\n", __func__, vec);
    671  1.3   matt 
    672  1.3   matt 	switch (trigger) {
    673  1.3   matt 	case RMIXL_TRIG_EDGE:
    674  1.3   matt 	case RMIXL_TRIG_LEVEL:
    675  1.3   matt 		break;
    676  1.3   matt 	default:
    677  1.3   matt 		panic("%s: bad trigger %d\n", __func__, trigger);
    678  1.3   matt 	}
    679  1.3   matt 
    680  1.3   matt 	switch (polarity) {
    681  1.3   matt 	case RMIXL_POLR_RISING:
    682  1.3   matt 	case RMIXL_POLR_HIGH:
    683  1.3   matt 	case RMIXL_POLR_FALLING:
    684  1.3   matt 	case RMIXL_POLR_LOW:
    685  1.3   matt 		break;
    686  1.3   matt 	default:
    687  1.3   matt 		panic("%s: bad polarity %d\n", __func__, polarity);
    688  1.3   matt 	}
    689  1.3   matt 
    690  1.3   matt 	/*
    691  1.3   matt 	 * XXX IRT entries are not shared
    692  1.3   matt 	 */
    693  1.3   matt 	KASSERT(RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC0(irt)) == 0);
    694  1.3   matt 	KASSERT(RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC1(irt)) == 0);
    695  1.3   matt 
    696  1.3   matt 	irtc0 = rmixl_irt_thread_mask(cpumask);
    697  1.2   matt 
    698  1.2   matt 	irtc1  = RMIXL_PIC_IRTENTRYC1_VALID;
    699  1.2   matt 	irtc1 |= RMIXL_PIC_IRTENTRYC1_GL;	/* local */
    700  1.3   matt 	KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
    701  1.2   matt 
    702  1.3   matt 	if (trigger == RMIXL_TRIG_LEVEL)
    703  1.2   matt 		irtc1 |= RMIXL_PIC_IRTENTRYC1_TRG;
    704  1.3   matt 	KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
    705  1.2   matt 
    706  1.3   matt 	if ((polarity == RMIXL_POLR_FALLING) || (polarity == RMIXL_POLR_LOW))
    707  1.2   matt 		irtc1 |= RMIXL_PIC_IRTENTRYC1_P;
    708  1.3   matt 	KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
    709  1.2   matt 
    710  1.3   matt 	irtc1 |= vec;			/* vector in EIRR */
    711  1.3   matt 	KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
    712  1.2   matt 
    713  1.2   matt 	/*
    714  1.3   matt 	 * write IRT Entry to PIC
    715  1.2   matt 	 */
    716  1.3   matt 	DPRINTF(("%s: vec %d (%#x), irt %d, irtc0 %#x, irtc1 %#x\n",
    717  1.3   matt 		__func__, vec, vec, irt, irtc0, irtc1));
    718  1.3   matt 	RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC0(irt), irtc0);	/* low  word */
    719  1.3   matt 	RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC1(irt), irtc1);	/* high word */
    720  1.2   matt }
    721  1.2   matt 
    722  1.2   matt void *
    723  1.3   matt rmixl_vec_establish(int vec, int cpumask, int ipl,
    724  1.3   matt 	int (*func)(void *), void *arg, bool mpsafe)
    725  1.2   matt {
    726  1.3   matt 	rmixl_intrhand_t *ih;
    727  1.3   matt 	uint64_t eimr_bit;
    728  1.2   matt 	int s;
    729  1.2   matt 
    730  1.3   matt 	KASSERT(mutex_owned(&rmixl_intr_lock));
    731  1.3   matt 
    732  1.5   matt 	DPRINTF(("%s: vec %d cpumask %#x ipl %d func %p arg %p mpsafe %d\n",
    733  1.3   matt 			__func__, vec, cpumask, ipl, func, arg, mpsafe));
    734  1.2   matt #ifdef DIAGNOSTIC
    735  1.3   matt 	if (rmixl_pic_init_done == 0)
    736  1.2   matt 		panic("%s: called before evbmips_intr_init", __func__);
    737  1.2   matt #endif
    738  1.2   matt 
    739  1.2   matt 	/*
    740  1.3   matt 	 * check args
    741  1.2   matt 	 */
    742  1.3   matt 	if (vec < 0 || vec >= NINTRVECS)
    743  1.3   matt 		panic("%s: vec %d out of range, max %d",
    744  1.3   matt 			__func__, vec, NINTRVECS - 1);
    745  1.2   matt 	if (ipl <= 0 || ipl >= _IPL_N)
    746  1.2   matt 		panic("%s: ipl %d out of range, min %d, max %d",
    747  1.2   matt 			__func__, ipl, 1, _IPL_N - 1);
    748  1.2   matt 
    749  1.3   matt 	s = splhigh();
    750  1.3   matt 
    751  1.3   matt 	ih = &rmixl_intrhand[vec];
    752  1.3   matt 	if (ih->ih_func != NULL) {
    753  1.3   matt #ifdef DIAGNOSTIC
    754  1.3   matt 		printf("%s: intrhand[%d] busy\n", __func__, vec);
    755  1.3   matt #endif
    756  1.3   matt 		splx(s);
    757  1.3   matt 		return NULL;
    758  1.2   matt 	}
    759  1.2   matt 
    760  1.3   matt 	ih->ih_arg = arg;
    761  1.3   matt 	ih->ih_mpsafe = mpsafe;
    762  1.3   matt 	ih->ih_vec = vec;
    763  1.3   matt 	ih->ih_ipl = ipl;
    764  1.3   matt 	ih->ih_cpumask = cpumask;
    765  1.3   matt 
    766  1.3   matt 	eimr_bit = (uint64_t)1 << vec;
    767  1.3   matt 	for (int i=ih->ih_ipl; --i >= 0; ) {
    768  1.3   matt 		KASSERT((ipl_eimr_map[i] & eimr_bit) == 0);
    769  1.3   matt 		ipl_eimr_map[i] |= eimr_bit;
    770  1.2   matt 	}
    771  1.2   matt 
    772  1.3   matt 	ih->ih_func = func;	/* do this last */
    773  1.3   matt 
    774  1.3   matt 	splx(s);
    775  1.3   matt 
    776  1.3   matt 	return ih;
    777  1.3   matt }
    778  1.2   matt 
    779  1.3   matt /*
    780  1.3   matt  * rmixl_intr_establish
    781  1.3   matt  * - used to establish an IRT-based interrupt only
    782  1.3   matt  */
    783  1.3   matt void *
    784  1.3   matt rmixl_intr_establish(int irt, int cpumask, int ipl,
    785  1.3   matt 	rmixl_intr_trigger_t trigger, rmixl_intr_polarity_t polarity,
    786  1.3   matt 	int (*func)(void *), void *arg, bool mpsafe)
    787  1.3   matt {
    788  1.3   matt 	rmixl_intrhand_t *ih;
    789  1.3   matt 	int vec;
    790  1.2   matt 
    791  1.2   matt #ifdef DIAGNOSTIC
    792  1.3   matt 	if (rmixl_pic_init_done == 0)
    793  1.3   matt 		panic("%s: called before rmixl_pic_init_done", __func__);
    794  1.2   matt #endif
    795  1.2   matt 
    796  1.2   matt 	/*
    797  1.3   matt 	 * check args
    798  1.2   matt 	 */
    799  1.3   matt 	if (irt < 0 || irt >= NIRTS)
    800  1.3   matt 		panic("%s: irt %d out of range, max %d",
    801  1.3   matt 			__func__, irt, NIRTS - 1);
    802  1.3   matt 	if (ipl <= 0 || ipl >= _IPL_N)
    803  1.3   matt 		panic("%s: ipl %d out of range, min %d, max %d",
    804  1.3   matt 			__func__, ipl, 1, _IPL_N - 1);
    805  1.3   matt 
    806  1.3   matt 	vec = RMIXL_IRT_VECTOR(irt);
    807  1.2   matt 
    808  1.3   matt 	DPRINTF(("%s: irt %d, vec %d, ipl %d\n", __func__, irt, vec, ipl));
    809  1.2   matt 
    810  1.3   matt 	mutex_enter(&rmixl_intr_lock);
    811  1.2   matt 
    812  1.2   matt 	/*
    813  1.3   matt 	 * establish vector
    814  1.2   matt 	 */
    815  1.3   matt 	ih = rmixl_vec_establish(vec, cpumask, ipl, func, arg, mpsafe);
    816  1.2   matt 
    817  1.2   matt 	/*
    818  1.2   matt 	 * establish IRT Entry
    819  1.2   matt 	 */
    820  1.3   matt 	rmixl_irt_establish(irt, vec, cpumask, trigger, polarity);
    821  1.2   matt 
    822  1.3   matt 	mutex_exit(&rmixl_intr_lock);
    823  1.2   matt 
    824  1.2   matt 	return ih;
    825  1.2   matt }
    826  1.2   matt 
    827  1.2   matt void
    828  1.3   matt rmixl_vec_disestablish(void *cookie)
    829  1.3   matt {
    830  1.3   matt 	rmixl_intrhand_t *ih = cookie;
    831  1.3   matt 	uint64_t eimr_bit;
    832  1.3   matt 
    833  1.3   matt 	KASSERT(mutex_owned(&rmixl_intr_lock));
    834  1.3   matt 	KASSERT(ih->ih_vec < NINTRVECS);
    835  1.3   matt 	KASSERT(ih == &rmixl_intrhand[ih->ih_vec]);
    836  1.3   matt 
    837  1.3   matt 	ih->ih_func = NULL;	/* do this first */
    838  1.3   matt 
    839  1.3   matt 	eimr_bit = (uint64_t)1 << ih->ih_vec;
    840  1.3   matt 	for (int i=ih->ih_ipl; --i >= 0; ) {
    841  1.3   matt 		KASSERT((ipl_eimr_map[i] & eimr_bit) != 0);
    842  1.3   matt 		ipl_eimr_map[i] ^= eimr_bit;
    843  1.3   matt 	}
    844  1.3   matt }
    845  1.3   matt 
    846  1.3   matt void
    847  1.2   matt rmixl_intr_disestablish(void *cookie)
    848  1.2   matt {
    849  1.3   matt 	rmixl_intrhand_t *ih = cookie;
    850  1.5   matt 	const int vec = ih->ih_vec;
    851  1.3   matt 
    852  1.3   matt 	KASSERT(vec < NINTRVECS);
    853  1.3   matt 	KASSERT(ih == &rmixl_intrhand[vec]);
    854  1.2   matt 
    855  1.3   matt 	mutex_enter(&rmixl_intr_lock);
    856  1.2   matt 
    857  1.2   matt 	/*
    858  1.3   matt 	 * disable/invalidate the IRT Entry if needed
    859  1.2   matt 	 */
    860  1.3   matt 	if (RMIXL_VECTOR_IS_IRT(vec))
    861  1.3   matt 		rmixl_irt_disestablish(vec);
    862  1.2   matt 
    863  1.2   matt 	/*
    864  1.3   matt 	 * disasociate from vector and free the handle
    865  1.2   matt 	 */
    866  1.3   matt 	rmixl_vec_disestablish(cookie);
    867  1.3   matt 
    868  1.3   matt 	mutex_exit(&rmixl_intr_lock);
    869  1.3   matt }
    870  1.3   matt 
    871  1.3   matt void
    872  1.3   matt evbmips_iointr(int ipl, vaddr_t pc, uint32_t pending)
    873  1.3   matt {
    874  1.3   matt 	struct rmixl_cpu_softc *sc = (void *)curcpu()->ci_softc;
    875  1.2   matt 
    876  1.5   matt 	DPRINTF(("%s: cpu%u: ipl %d, pc %#"PRIxVADDR", pending %#x\n",
    877  1.3   matt 		__func__, cpu_number(), ipl, pc, pending));
    878  1.3   matt 
    879  1.3   matt 	/*
    880  1.3   matt 	 * 'pending' arg is a summary that there is something to do
    881  1.3   matt 	 * the real pending status is obtained from EIRR
    882  1.2   matt 	 */
    883  1.3   matt 	KASSERT(pending == MIPS_INT_MASK_1);
    884  1.2   matt 
    885  1.3   matt 	for (;;) {
    886  1.3   matt 		rmixl_intrhand_t *ih;
    887  1.3   matt 		uint64_t eirr;
    888  1.3   matt 		uint64_t eimr;
    889  1.3   matt 		uint64_t vecbit;
    890  1.3   matt 		int vec;
    891  1.3   matt 
    892  1.3   matt 		asm volatile("dmfc0 %0, $9, 6;" : "=r"(eirr));
    893  1.3   matt 		asm volatile("dmfc0 %0, $9, 7;" : "=r"(eimr));
    894  1.3   matt 
    895  1.3   matt #ifdef IOINTR_DEBUG
    896  1.5   matt 		printf("%s: cpu%u: eirr %#"PRIx64", eimr %#"PRIx64", mask %#"PRIx64"\n",
    897  1.3   matt 			__func__, cpu_number(), eirr, eimr, ipl_eimr_map[ipl-1]);
    898  1.3   matt #endif	/* IOINTR_DEBUG */
    899  1.3   matt 
    900  1.3   matt 		/*
    901  1.3   matt 		 * reduce eirr to
    902  1.3   matt 		 * - ints that are enabled at or below this ipl
    903  1.3   matt 		 * - exclude count/compare clock and soft ints
    904  1.3   matt 		 *   they are handled elsewhere
    905  1.3   matt 		 */
    906  1.3   matt 		eirr &= ipl_eimr_map[ipl-1];
    907  1.3   matt 		eirr &= ~ipl_eimr_map[ipl];
    908  1.3   matt 		eirr &= ~((MIPS_INT_MASK_5 | MIPS_SOFT_INT_MASK) >> 8);
    909  1.3   matt 		if (eirr == 0)
    910  1.3   matt 			break;
    911  1.3   matt 
    912  1.3   matt 		vec = 63 - dclz(eirr);
    913  1.3   matt 		ih = &rmixl_intrhand[vec];
    914  1.3   matt 		vecbit = 1ULL << vec;
    915  1.3   matt 		KASSERT (ih->ih_ipl == ipl);
    916  1.3   matt 		KASSERT ((vecbit & eimr) == 0);
    917  1.3   matt 		KASSERT ((vecbit & RMIXL_EIRR_PRESERVE_MASK) == 0);
    918  1.3   matt 
    919  1.3   matt 		/*
    920  1.4  cliff 		 * ack in EIRR, and in PIC if needed,
    921  1.4  cliff 		 * the irq we are about to handle
    922  1.3   matt 		 */
    923  1.4  cliff 		rmixl_eirr_ack(eimr, vecbit, RMIXL_EIRR_PRESERVE_MASK);
    924  1.3   matt 		if (RMIXL_VECTOR_IS_IRT(vec))
    925  1.3   matt 			RMIXL_PICREG_WRITE(RMIXL_PIC_INTRACK,
    926  1.3   matt 				1 << RMIXL_VECTOR_IRT(vec));
    927  1.2   matt 
    928  1.3   matt 		if (ih->ih_func != NULL) {
    929  1.3   matt #ifdef MULTIPROCESSOR
    930  1.3   matt 			if (ih->ih_mpsafe) {
    931  1.3   matt 				(void)(*ih->ih_func)(ih->ih_arg);
    932  1.3   matt 			} else {
    933  1.3   matt 				KASSERTMSG(ipl == IPL_VM,
    934  1.3   matt 				    ("%s: %s: ipl (%d) != IPL_VM for KERNEL_LOCK",
    935  1.3   matt 				    __func__, sc->sc_vec_evcnts[vec].ev_name,
    936  1.3   matt 				    ipl));
    937  1.3   matt 				KERNEL_LOCK(1, NULL);
    938  1.3   matt 				(void)(*ih->ih_func)(ih->ih_arg);
    939  1.3   matt 				KERNEL_UNLOCK_ONE(NULL);
    940  1.3   matt 			}
    941  1.3   matt #else
    942  1.3   matt 			(void)(*ih->ih_func)(ih->ih_arg);
    943  1.3   matt #endif /* MULTIPROCESSOR */
    944  1.3   matt 		}
    945  1.3   matt 		KASSERT(ipl == ih->ih_ipl);
    946  1.3   matt 		KASSERTMSG(curcpu()->ci_cpl >= ipl,
    947  1.3   matt 		    ("%s: after %s: cpl (%d) < ipl %d",
    948  1.3   matt 		    __func__, sc->sc_vec_evcnts[vec].ev_name,
    949  1.3   matt 		    ipl, curcpu()->ci_cpl));
    950  1.3   matt 		sc->sc_vec_evcnts[vec].ev_count++;
    951  1.3   matt 	}
    952  1.2   matt }
    953  1.2   matt 
    954  1.3   matt #ifdef MULTIPROCESSOR
    955  1.3   matt static int
    956  1.3   matt rmixl_send_ipi(struct cpu_info *ci, int tag)
    957  1.2   matt {
    958  1.3   matt 	const cpuid_t cpuid = ci->ci_cpuid;
    959  1.3   matt 	uint32_t core = (uint32_t)(cpuid >> 2);
    960  1.3   matt 	uint32_t thread = (uint32_t)(cpuid & __BITS(1,0));
    961  1.3   matt 	uint64_t req = 1 << tag;
    962  1.2   matt 	uint32_t r;
    963  1.3   matt 
    964  1.3   matt 	if (! CPUSET_HAS_P(cpus_running, cpu_index(ci)))
    965  1.3   matt 		return -1;
    966  1.3   matt 
    967  1.3   matt 	KASSERT((tag >= 0) && (tag < NIPIS));
    968  1.3   matt 
    969  1.3   matt 	r = (thread << RMIXL_PIC_IPIBASE_ID_THREAD_SHIFT)
    970  1.3   matt 	  | (core << RMIXL_PIC_IPIBASE_ID_CORE_SHIFT)
    971  1.3   matt 	  | (RMIXL_INTRVEC_IPI + tag);
    972  1.3   matt 
    973  1.3   matt 	mutex_enter(&rmixl_ipi_lock);
    974  1.3   matt 	atomic_or_64(&ci->ci_request_ipis, req);
    975  1.3   matt 	RMIXL_PICREG_WRITE(RMIXL_PIC_IPIBASE, r);
    976  1.3   matt 	mutex_exit(&rmixl_ipi_lock);
    977  1.3   matt 
    978  1.3   matt 	return 0;
    979  1.2   matt }
    980  1.2   matt 
    981  1.3   matt static int
    982  1.3   matt rmixl_ipi_intr(void *arg)
    983  1.2   matt {
    984  1.3   matt 	struct cpu_info * const ci = curcpu();
    985  1.4  cliff 	const uint64_t ipi_mask = 1 << (uintptr_t)arg;
    986  1.2   matt 
    987  1.3   matt 	KASSERT(ci->ci_cpl >= IPL_SCHED);
    988  1.4  cliff 	KASSERT((uintptr_t)arg < NIPIS);
    989  1.2   matt 
    990  1.4  cliff 	/* if the request is clear, it was previously processed */
    991  1.4  cliff 	if ((ci->ci_request_ipis & ipi_mask) == 0)
    992  1.4  cliff 		return 0;
    993  1.2   matt 
    994  1.3   matt 	atomic_or_64(&ci->ci_active_ipis, ipi_mask);
    995  1.3   matt 	atomic_and_64(&ci->ci_request_ipis, ~ipi_mask);
    996  1.2   matt 
    997  1.3   matt 	ipi_process(ci, ipi_mask);
    998  1.2   matt 
    999  1.3   matt 	atomic_and_64(&ci->ci_active_ipis, ~ipi_mask);
   1000  1.2   matt 
   1001  1.3   matt 	return 1;
   1002  1.3   matt }
   1003  1.3   matt #endif	/* MULTIPROCESSOR */
   1004  1.2   matt 
   1005  1.3   matt #if defined(DIAGNOSTIC) || defined(IOINTR_DEBUG) || defined(DDB)
   1006  1.3   matt int
   1007  1.3   matt rmixl_intrhand_print_subr(int vec)
   1008  1.3   matt {
   1009  1.3   matt 	rmixl_intrhand_t *ih = &rmixl_intrhand[vec];
   1010  1.3   matt 	printf("vec %d: func %p, arg %p, vec %d, ipl %d, mask %#x\n",
   1011  1.3   matt 		vec, ih->ih_func, ih->ih_arg, ih->ih_vec, ih->ih_ipl,
   1012  1.3   matt 		ih->ih_cpumask);
   1013  1.3   matt 	return 0;
   1014  1.3   matt }
   1015  1.3   matt int
   1016  1.3   matt rmixl_intrhand_print(void)
   1017  1.3   matt {
   1018  1.3   matt 	for (int vec=0; vec < NINTRVECS ; vec++)
   1019  1.3   matt 		rmixl_intrhand_print_subr(vec);
   1020  1.3   matt 	return 0;
   1021  1.3   matt }
   1022  1.2   matt 
   1023  1.3   matt static inline void
   1024  1.3   matt rmixl_irt_entry_print(u_int irt)
   1025  1.3   matt {
   1026  1.3   matt 	uint32_t c0, c1;
   1027  1.2   matt 
   1028  1.3   matt 	if ((irt < 0) || (irt > NIRTS))
   1029  1.3   matt 		return;
   1030  1.3   matt 	c0 = RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC0(irt));
   1031  1.3   matt 	c1 = RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC1(irt));
   1032  1.3   matt 	printf("irt[%d]: %#x, %#x\n", irt, c0, c1);
   1033  1.2   matt }
   1034  1.2   matt 
   1035  1.2   matt int
   1036  1.3   matt rmixl_irt_print(void)
   1037  1.2   matt {
   1038  1.3   matt 	printf("%s:\n", __func__);
   1039  1.3   matt 	for (int irt=0; irt < NIRTS ; irt++)
   1040  1.3   matt 		rmixl_irt_entry_print(irt);
   1041  1.3   matt 	return 0;
   1042  1.3   matt }
   1043  1.2   matt 
   1044  1.3   matt void
   1045  1.3   matt rmixl_ipl_eimr_map_print(void)
   1046  1.3   matt {
   1047  1.3   matt 	printf("IPL_NONE=%d, mask %#"PRIx64"\n",
   1048  1.3   matt 		IPL_NONE, ipl_eimr_map[IPL_NONE]);
   1049  1.3   matt 	printf("IPL_SOFTCLOCK=%d, mask %#"PRIx64"\n",
   1050  1.3   matt 		IPL_SOFTCLOCK, ipl_eimr_map[IPL_SOFTCLOCK]);
   1051  1.3   matt 	printf("IPL_SOFTNET=%d, mask %#"PRIx64"\n",
   1052  1.3   matt 		IPL_SOFTNET, ipl_eimr_map[IPL_SOFTNET]);
   1053  1.3   matt 	printf("IPL_VM=%d, mask %#"PRIx64"\n",
   1054  1.3   matt 		IPL_VM, ipl_eimr_map[IPL_VM]);
   1055  1.3   matt 	printf("IPL_SCHED=%d, mask %#"PRIx64"\n",
   1056  1.3   matt 		IPL_SCHED, ipl_eimr_map[IPL_SCHED]);
   1057  1.3   matt 	printf("IPL_DDB=%d, mask %#"PRIx64"\n",
   1058  1.3   matt 		IPL_DDB, ipl_eimr_map[IPL_DDB]);
   1059  1.3   matt 	printf("IPL_HIGH=%d, mask %#"PRIx64"\n",
   1060  1.3   matt 		IPL_HIGH, ipl_eimr_map[IPL_HIGH]);
   1061  1.2   matt }
   1062  1.3   matt 
   1063  1.2   matt #endif
   1064