1 1.4 andvar /* $NetBSD: sb1250_icu.c,v 1.4 2023/12/05 19:16:48 andvar Exp $ */ 2 1.1 mrg 3 1.1 mrg /* 4 1.1 mrg * Copyright 2000, 2001 5 1.1 mrg * Broadcom Corporation. All rights reserved. 6 1.1 mrg * 7 1.1 mrg * This software is furnished under license and may be used and copied only 8 1.1 mrg * in accordance with the following terms and conditions. Subject to these 9 1.1 mrg * conditions, you may download, copy, install, use, modify and distribute 10 1.1 mrg * modified or unmodified copies of this software in source and/or binary 11 1.1 mrg * form. No title or ownership is transferred hereby. 12 1.1 mrg * 13 1.1 mrg * 1) Any source code used, modified or distributed must reproduce and 14 1.1 mrg * retain this copyright notice and list of conditions as they appear in 15 1.1 mrg * the source file. 16 1.1 mrg * 17 1.1 mrg * 2) No right is granted to use any trade name, trademark, or logo of 18 1.1 mrg * Broadcom Corporation. The "Broadcom Corporation" name may not be 19 1.1 mrg * used to endorse or promote products derived from this software 20 1.1 mrg * without the prior written permission of Broadcom Corporation. 21 1.1 mrg * 22 1.1 mrg * 3) THIS SOFTWARE IS PROVIDED "AS-IS" AND ANY EXPRESS OR IMPLIED 23 1.1 mrg * WARRANTIES, INCLUDING BUT NOT LIMITED TO, ANY IMPLIED WARRANTIES OF 24 1.1 mrg * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR 25 1.1 mrg * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM BE LIABLE 26 1.1 mrg * FOR ANY DAMAGES WHATSOEVER, AND IN PARTICULAR, BROADCOM SHALL NOT BE 27 1.1 mrg * LIABLE FOR DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 1.1 mrg * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 1.1 mrg * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 30 1.1 mrg * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 31 1.1 mrg * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 32 1.1 mrg * OR OTHERWISE), EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 1.1 mrg */ 34 1.1 mrg 35 1.1 mrg #include <sys/cdefs.h> 36 1.4 andvar __KERNEL_RCSID(0, "$NetBSD: sb1250_icu.c,v 1.4 2023/12/05 19:16:48 andvar Exp $"); 37 1.1 mrg 38 1.1 mrg #define __INTR_PRIVATE 39 1.1 mrg 40 1.1 mrg #include <sys/param.h> 41 1.1 mrg #include <sys/systm.h> 42 1.1 mrg #include <sys/cpu.h> 43 1.1 mrg #include <sys/device.h> 44 1.1 mrg #include <sys/evcnt.h> 45 1.1 mrg #include <sys/kmem.h> 46 1.1 mrg 47 1.1 mrg /* XXX for uvmexp */ 48 1.1 mrg #include <uvm/uvm_extern.h> 49 1.1 mrg 50 1.1 mrg #include <mips/locore.h> 51 1.1 mrg 52 1.2 mrg #include <evbmips/sbmips/cpuvar.h> 53 1.2 mrg #include <evbmips/sbmips/systemsw.h> 54 1.1 mrg 55 1.1 mrg #include <mips/sibyte/include/sb1250_regs.h> 56 1.1 mrg #include <mips/sibyte/include/sb1250_int.h> 57 1.1 mrg #include <mips/sibyte/include/sb1250_scd.h> 58 1.1 mrg 59 1.1 mrg static const struct ipl_sr_map sb1250_ipl_sr_map = { 60 1.1 mrg .sr_bits = { 61 1.1 mrg [IPL_NONE] = MIPS_INT_MASK_5, 62 1.1 mrg [IPL_SOFTCLOCK] = MIPS_SOFT_INT_MASK_0 | MIPS_INT_MASK_5, 63 1.1 mrg [IPL_SOFTBIO] = MIPS_SOFT_INT_MASK_0 | MIPS_INT_MASK_5, 64 1.1 mrg [IPL_SOFTNET] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_5, 65 1.1 mrg [IPL_SOFTSERIAL] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_5, 66 1.1 mrg [IPL_VM] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0 67 1.1 mrg | MIPS_INT_MASK_5, 68 1.1 mrg [IPL_SCHED] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0 69 1.1 mrg | MIPS_INT_MASK_1 | MIPS_INT_MASK_5, 70 1.1 mrg [IPL_DDB] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0 71 1.1 mrg | MIPS_INT_MASK_1 | MIPS_INT_MASK_4 72 1.1 mrg | MIPS_INT_MASK_5, 73 1.1 mrg [IPL_HIGH] = MIPS_INT_MASK, 74 1.1 mrg }, 75 1.1 mrg }; 76 1.1 mrg 77 1.1 mrg /* imr values corresponding to each pin */ 78 1.1 mrg static uint64_t ints_for_ipl[_IPL_N]; 79 1.1 mrg 80 1.1 mrg struct sb1250_ihand { 81 1.1 mrg void (*ih_fun)(void *, uint32_t, vaddr_t); 82 1.1 mrg void *ih_arg; 83 1.1 mrg int ih_ipl; 84 1.1 mrg }; 85 1.1 mrg 86 1.1 mrg static struct sb1250_ihand sb1250_ihands[K_INT_SOURCES]; 87 1.1 mrg 88 1.1 mrg #ifdef MULTIPROCESSOR 89 1.1 mrg static void sb1250_ipi_intr(void *, uint32_t, vaddr_t); 90 1.1 mrg #endif 91 1.1 mrg #define SB1250_I_MAP(x) (R_IMR_INTERRUPT_MAP_BASE + (x) * 8) 92 1.1 mrg 93 1.1 mrg #define READ_REG(rp) mips3_ld((register_t)(rp)) 94 1.1 mrg #define WRITE_REG(rp, val) mips3_sd((register_t)(rp), (val)) 95 1.1 mrg 96 1.1 mrg static void sb1250_cpu_intr(int, vaddr_t, uint32_t); 97 1.1 mrg static void *sb1250_intr_establish(u_int, u_int, 98 1.1 mrg void (*fun)(void *, uint32_t, vaddr_t), void *); 99 1.1 mrg 100 1.1 mrg static const char sb1250_intr_names[K_INT_SOURCES][16] = { 101 1.1 mrg [K_INT_WATCHDOG_TIMER_0] = "wdog0", 102 1.1 mrg [K_INT_WATCHDOG_TIMER_1] = "wdog1", 103 1.1 mrg [K_INT_TIMER_0] = "timer0", 104 1.1 mrg [K_INT_TIMER_1] = "timer1", 105 1.1 mrg [K_INT_TIMER_2] = "timer2", 106 1.1 mrg [K_INT_TIMER_3] = "timer3", 107 1.1 mrg [K_INT_SMB_0] = "smb0", 108 1.1 mrg [K_INT_SMB_1] = "smb1", 109 1.1 mrg [K_INT_UART_0] = "uart0", 110 1.1 mrg [K_INT_UART_1] = "uart1", 111 1.1 mrg [K_INT_SER_0] = "syncser0", 112 1.1 mrg [K_INT_SER_1] = "syncser1", 113 1.1 mrg [K_INT_PCMCIA] = "pcmcia", 114 1.1 mrg [K_INT_ADDR_TRAP] = "addrtrap", 115 1.1 mrg [K_INT_PERF_CNT] = "perfcnt", 116 1.1 mrg [K_INT_TRACE_FREEZE] = "tracefreeze", 117 1.1 mrg [K_INT_BAD_ECC] = "bad ECC", 118 1.1 mrg [K_INT_COR_ECC] = "corrected ECC", 119 1.1 mrg [K_INT_IO_BUS] = "iobus", 120 1.1 mrg [K_INT_MAC_0] = "mac0", 121 1.1 mrg [K_INT_MAC_1] = "mac1", 122 1.1 mrg [K_INT_MAC_2] = "mac2", 123 1.1 mrg [K_INT_DM_CH_0] = "dmover0", 124 1.1 mrg [K_INT_DM_CH_1] = "dmover1", 125 1.1 mrg [K_INT_DM_CH_2] = "dmover2", 126 1.1 mrg [K_INT_DM_CH_3] = "dmover3", 127 1.1 mrg [K_INT_MBOX_0] = "mbox0", 128 1.1 mrg [K_INT_MBOX_1] = "mbox1", 129 1.1 mrg [K_INT_MBOX_2] = "mbox2", 130 1.1 mrg [K_INT_MBOX_3] = "mbox3", 131 1.1 mrg [K_INT_CYCLE_CP0_INT] = "zbccp0", 132 1.1 mrg [K_INT_CYCLE_CP1_INT] = "zbccp1", 133 1.1 mrg [K_INT_GPIO_0] = "gpio0", 134 1.1 mrg [K_INT_GPIO_1] = "gpio1", 135 1.1 mrg [K_INT_GPIO_2] = "gpio2", 136 1.1 mrg [K_INT_GPIO_3] = "gpio3", 137 1.1 mrg [K_INT_GPIO_4] = "gpio4", 138 1.1 mrg [K_INT_GPIO_5] = "gpio5", 139 1.1 mrg [K_INT_GPIO_6] = "gpio6", 140 1.1 mrg [K_INT_GPIO_7] = "gpio7", 141 1.1 mrg [K_INT_GPIO_8] = "gpio8", 142 1.1 mrg [K_INT_GPIO_9] = "gpio9", 143 1.1 mrg [K_INT_GPIO_10] = "gpio10", 144 1.1 mrg [K_INT_GPIO_11] = "gpio11", 145 1.1 mrg [K_INT_GPIO_12] = "gpio12", 146 1.1 mrg [K_INT_GPIO_13] = "gpio13", 147 1.1 mrg [K_INT_GPIO_14] = "gpio14", 148 1.1 mrg [K_INT_GPIO_15] = "gpio15", 149 1.1 mrg [K_INT_LDT_FATAL] = "ldt fatal", 150 1.1 mrg [K_INT_LDT_NONFATAL] = "ldt nonfatal", 151 1.1 mrg [K_INT_LDT_SMI] = "ldt smi", 152 1.1 mrg [K_INT_LDT_NMI] = "ldt nmi", 153 1.1 mrg [K_INT_LDT_INIT] = "ldt init", 154 1.1 mrg [K_INT_LDT_STARTUP] = "ldt startup", 155 1.1 mrg [K_INT_LDT_EXT] = "ldt ext", 156 1.1 mrg [K_INT_PCI_ERROR] = "pci error", 157 1.1 mrg [K_INT_PCI_INTA] = "pci inta", 158 1.1 mrg [K_INT_PCI_INTB] = "pci intb", 159 1.1 mrg [K_INT_PCI_INTC] = "pci intc", 160 1.1 mrg [K_INT_PCI_INTD] = "pci intd", 161 1.1 mrg [K_INT_SPARE_2] = "spare2", 162 1.1 mrg [K_INT_MAC_0_CH1] = "mac0 ch1", 163 1.1 mrg [K_INT_MAC_1_CH1] = "mac1 ch1", 164 1.1 mrg [K_INT_MAC_2_CH1] = "mac2 ch1", 165 1.1 mrg }; 166 1.1 mrg 167 1.1 mrg #ifdef MULTIPROCESSOR 168 1.1 mrg static void 169 1.1 mrg sb1250_lsw_cpu_init(struct cpu_info *ci) 170 1.1 mrg { 171 1.1 mrg struct cpu_softc * const cpu = ci->ci_softc; 172 1.1 mrg 173 1.1 mrg WRITE_REG(cpu->sb1cpu_imr_base + R_IMR_INTERRUPT_MASK, cpu->sb1cpu_imr_all); 174 1.1 mrg } 175 1.1 mrg 176 1.1 mrg static int 177 1.1 mrg sb1250_lsw_send_ipi(struct cpu_info *ci, int tag) 178 1.1 mrg { 179 1.1 mrg struct cpu_softc * const cpu = ci->ci_softc; 180 1.1 mrg const uint64_t mbox_mask = 1LLU << tag; 181 1.1 mrg 182 1.4 andvar if (kcpuset_isset(cpus_running, cpu_index(ci))) 183 1.1 mrg WRITE_REG(cpu->sb1cpu_imr_base + R_IMR_MAILBOX_SET_CPU, mbox_mask); 184 1.1 mrg 185 1.1 mrg return 0; 186 1.1 mrg } 187 1.1 mrg 188 1.1 mrg static void 189 1.1 mrg sb1250_ipi_intr(void *arg, uint32_t status, vaddr_t pc) 190 1.1 mrg { 191 1.1 mrg struct cpu_info * const ci = curcpu(); 192 1.1 mrg struct cpu_softc * const cpu = ci->ci_softc; 193 1.1 mrg uint64_t mbox_mask; 194 1.1 mrg 195 1.1 mrg ci->ci_data.cpu_nintr++; 196 1.1 mrg 197 1.1 mrg mbox_mask = READ_REG(cpu->sb1cpu_imr_base + R_IMR_MAILBOX_CPU); 198 1.1 mrg WRITE_REG(cpu->sb1cpu_imr_base + R_IMR_MAILBOX_CLR_CPU, mbox_mask); 199 1.1 mrg 200 1.1 mrg ipi_process(ci, mbox_mask); 201 1.1 mrg } 202 1.1 mrg #endif /* MULTIPROCESSOR */ 203 1.1 mrg 204 1.1 mrg void 205 1.1 mrg sb1250_cpu_init(struct cpu_softc *cpu) 206 1.1 mrg { 207 1.1 mrg const char * const xname = device_xname(cpu->sb1cpu_dev); 208 1.1 mrg struct evcnt * evcnts = cpu->sb1cpu_intr_evcnts; 209 1.1 mrg 210 1.1 mrg cpu->sb1cpu_imr_base = 211 1.1 mrg MIPS_PHYS_TO_KSEG1(A_IMR_MAPPER(cpu->sb1cpu_ci->ci_cpuid)); 212 1.1 mrg #ifdef MULTIPROCESSOR 213 1.1 mrg cpu->sb1cpu_imr_all = 214 1.1 mrg ~(M_INT_MBOX_0|M_INT_MBOX_1|M_INT_MBOX_2|M_INT_MBOX_3 215 1.1 mrg |M_INT_WATCHDOG_TIMER_0|M_INT_WATCHDOG_TIMER_1); 216 1.1 mrg #else 217 1.1 mrg cpu->sb1cpu_imr_all = ~(M_INT_WATCHDOG_TIMER_0|M_INT_WATCHDOG_TIMER_1); 218 1.1 mrg #endif 219 1.1 mrg 220 1.1 mrg for (u_int i = 0; i < K_INT_SOURCES; i++, evcnts++) { 221 1.1 mrg WRITE_REG(cpu->sb1cpu_imr_base + SB1250_I_MAP(i), K_INT_MAP_I0); 222 1.1 mrg evcnt_attach_dynamic(evcnts, EVCNT_TYPE_INTR, NULL, 223 1.1 mrg xname, sb1250_intr_names[i]); 224 1.1 mrg } 225 1.1 mrg #if 0 226 1.1 mrg WRITE_REG(cpu->sb1cpu_imr_base + SB1250_I_MAP(K_INT_WATCHDOG_TIMER_0), K_INT_MAP_NMI); 227 1.1 mrg WRITE_REG(cpu->sb1cpu_imr_base + SB1250_I_MAP(K_INT_WATCHDOG_TIMER_1), K_INT_MAP_NMI); 228 1.1 mrg #endif 229 1.1 mrg 230 1.1 mrg WRITE_REG(cpu->sb1cpu_imr_base + R_IMR_INTERRUPT_MASK, cpu->sb1cpu_imr_all); 231 1.1 mrg #ifdef MULTIPROCESSOR 232 1.1 mrg if (sb1250_ihands[K_INT_MBOX_0].ih_fun == NULL) { 233 1.1 mrg /* 234 1.1 mrg * For now, deliver all IPIs at IPL_SCHED. Eventually 235 1.1 mrg * some will be at IPL_VM. 236 1.1 mrg */ 237 1.1 mrg for (int irq = K_INT_MBOX_0; irq <= K_INT_MBOX_3; irq++) 238 1.1 mrg sb1250_intr_establish(irq, IPL_SCHED, 239 1.1 mrg sb1250_ipi_intr, NULL); 240 1.1 mrg } 241 1.1 mrg #endif /* MULTIPROCESSOR */ 242 1.1 mrg } 243 1.1 mrg 244 1.1 mrg void 245 1.1 mrg sb1250_ipl_map_init(void) 246 1.1 mrg { 247 1.1 mrg ipl_sr_map = sb1250_ipl_sr_map; 248 1.1 mrg } 249 1.1 mrg 250 1.1 mrg void 251 1.1 mrg sb1250_icu_init(void) 252 1.1 mrg { 253 1.1 mrg const uint64_t imr_all = 0xffffffffffffffffULL; 254 1.1 mrg 255 1.1 mrg KASSERT(memcmp((const void *)&ipl_sr_map, (const void *)&sb1250_ipl_sr_map, sizeof(ipl_sr_map)) == 0); 256 1.1 mrg 257 1.1 mrg /* zero out the list of used interrupts/lines */ 258 1.1 mrg memset(ints_for_ipl, 0, sizeof ints_for_ipl); 259 1.1 mrg memset(sb1250_ihands, 0, sizeof sb1250_ihands); 260 1.1 mrg 261 1.1 mrg systemsw.s_cpu_intr = sb1250_cpu_intr; 262 1.1 mrg systemsw.s_intr_establish = sb1250_intr_establish; 263 1.1 mrg 264 1.1 mrg #ifdef MULTIPROCESSOR 265 1.1 mrg /* 266 1.1 mrg * Bits 27:24 (11:8 of G_SYS_PART) encode the number of CPUs present. 267 1.1 mrg */ 268 1.1 mrg u_int sys_part = G_SYS_PART(READ_REG(MIPS_PHYS_TO_KSEG1(A_SCD_SYSTEM_REVISION))); 269 1.1 mrg const u_int cpus = (sys_part >> 8) & 0xf; 270 1.1 mrg 271 1.1 mrg /* 272 1.1 mrg * Allocate an evcnt structure for every possible interrupt on 273 1.1 mrg * every possible CPU. 274 1.1 mrg */ 275 1.1 mrg vaddr_t imr = MIPS_PHYS_TO_KSEG1(A_IMR_CPU0_BASE + R_IMR_INTERRUPT_MASK); 276 1.1 mrg for (u_int i = 1; imr += IMR_REGISTER_SPACING, i < cpus; i++) { 277 1.1 mrg WRITE_REG(imr, imr_all); 278 1.1 mrg } 279 1.1 mrg #endif /* MULTIPROCESSOR */ 280 1.1 mrg WRITE_REG(MIPS_PHYS_TO_KSEG1(A_IMR_CPU0_BASE + R_IMR_INTERRUPT_MASK), 281 1.1 mrg imr_all); 282 1.1 mrg 283 1.1 mrg #ifdef MULTIPROCESSOR 284 1.1 mrg mips_locoresw.lsw_send_ipi = sb1250_lsw_send_ipi; 285 1.1 mrg mips_locoresw.lsw_cpu_init = sb1250_lsw_cpu_init; 286 1.1 mrg #endif /* MULTIPROCESSOR */ 287 1.1 mrg } 288 1.1 mrg 289 1.1 mrg static void 290 1.1 mrg sb1250_cpu_intr(int ppl, vaddr_t pc, uint32_t status) 291 1.1 mrg { 292 1.1 mrg struct cpu_info * const ci = curcpu(); 293 1.1 mrg struct cpu_softc * const cpu = ci->ci_softc; 294 1.1 mrg const vaddr_t imr_base = cpu->sb1cpu_imr_base; 295 1.1 mrg struct evcnt * const evcnts = cpu->sb1cpu_intr_evcnts; 296 1.1 mrg uint32_t pending; 297 1.1 mrg int ipl; 298 1.1 mrg 299 1.1 mrg ci->ci_data.cpu_nintr++; 300 1.1 mrg 301 1.1 mrg while (ppl < (ipl = splintr(&pending))) { 302 1.1 mrg splx(ipl); 303 1.1 mrg 304 1.1 mrg /* XXX do something if 5? */ 305 1.1 mrg if (pending & MIPS_INT_MASK_5) { 306 1.1 mrg uint32_t cycles = mips3_cp0_count_read(); 307 1.1 mrg mips3_cp0_compare_write(cycles - 1); 308 1.1 mrg /* just leave the bugger disabled */ 309 1.1 mrg } 310 1.1 mrg 311 1.1 mrg uint64_t sstatus = ints_for_ipl[ipl]; 312 1.1 mrg sstatus &= READ_REG(imr_base + R_IMR_INTERRUPT_SOURCE_STATUS); 313 1.1 mrg while (sstatus != 0) { 314 1.1 mrg #ifndef __mips_o32 315 1.1 mrg u_int n; 316 1.1 mrg __asm("dclz %0,%1" : "=r"(n) : "r"(sstatus)); 317 1.1 mrg #else 318 1.1 mrg u_int n = (sstatus >> 32) 319 1.1 mrg ? 0 + __builtin_clz(sstatus >> 32) 320 1.1 mrg : 32 + __builtin_clz((uint32_t)sstatus); 321 1.1 mrg #endif 322 1.1 mrg u_int j = 63 - n; 323 1.1 mrg KASSERT(sstatus & (1ULL << j)); 324 1.1 mrg sstatus ^= (1ULL << j); 325 1.1 mrg struct sb1250_ihand *ihp = &sb1250_ihands[j]; 326 1.1 mrg KASSERT(ihp->ih_fun); 327 1.1 mrg (*ihp->ih_fun)(ihp->ih_arg, status, pc); 328 1.1 mrg evcnts[j].ev_count++; 329 1.1 mrg } 330 1.1 mrg (void) splhigh(); 331 1.1 mrg } 332 1.1 mrg } 333 1.1 mrg 334 1.1 mrg static void * 335 1.1 mrg sb1250_intr_establish(u_int num, u_int ipl, 336 1.1 mrg void (*fun)(void *, uint32_t, vaddr_t), void *arg) 337 1.1 mrg { 338 1.1 mrg struct cpu_softc * const cpu = curcpu()->ci_softc; 339 1.1 mrg struct sb1250_ihand * const ih = &sb1250_ihands[num]; 340 1.1 mrg const int s = splhigh(); 341 1.1 mrg 342 1.3 simonb /* 343 1.3 simonb * XXX simonb 344 1.3 simonb * The swarm wedges hard on first serial interrupt when 345 1.3 simonb * we try to map IPL_SERIAL at a higher priority than 346 1.3 simonb * other device interrupts. For now, just force all 347 1.3 simonb * devices to interrupt at IPL_VM. 348 1.3 simonb * 349 1.3 simonb */ 350 1.3 simonb ipl = IPL_VM; /* XXX */ 351 1.3 simonb 352 1.1 mrg if (num >= K_INT_SOURCES) 353 1.1 mrg panic("%s: invalid interrupt number (0x%x)", __func__, num); 354 1.1 mrg if (ipl >= _IPL_N || ipl < IPL_VM) 355 1.1 mrg panic("%s: invalid ipl %d", __func__, ipl); 356 1.1 mrg if (ih->ih_fun != NULL) 357 1.1 mrg panic("%s: cannot share sb1250 interrupts", __func__); 358 1.1 mrg 359 1.1 mrg ints_for_ipl[ipl] |= (1ULL << num); 360 1.1 mrg cpu->sb1cpu_imr_all &= ~(1ULL << num); 361 1.1 mrg 362 1.1 mrg ih->ih_fun = fun; 363 1.1 mrg ih->ih_arg = arg; 364 1.1 mrg ih->ih_ipl = ipl; 365 1.1 mrg 366 1.1 mrg if (num <= K_INT_WATCHDOG_TIMER_1) 367 1.1 mrg WRITE_REG(cpu->sb1cpu_imr_base + SB1250_I_MAP(num), K_INT_MAP_I4); 368 1.1 mrg else if (ipl > IPL_VM) 369 1.1 mrg WRITE_REG(cpu->sb1cpu_imr_base + SB1250_I_MAP(num), K_INT_MAP_I1); 370 1.1 mrg 371 1.1 mrg WRITE_REG(cpu->sb1cpu_imr_base + R_IMR_INTERRUPT_MASK, cpu->sb1cpu_imr_all); 372 1.1 mrg 373 1.1 mrg splx(s); 374 1.1 mrg 375 1.1 mrg return ih; 376 1.1 mrg } 377