Home | History | Annotate | Line # | Download | only in sparc
      1 /*	$NetBSD: intr.c,v 1.127 2021/01/24 07:36:54 mrg Exp $ */
      2 
      3 /*
      4  * Copyright (c) 1992, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This software was developed by the Computer Systems Engineering group
      8  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
      9  * contributed to Berkeley.
     10  *
     11  * All advertising materials mentioning features or use of this software
     12  * must display the following acknowledgement:
     13  *	This product includes software developed by the University of
     14  *	California, Lawrence Berkeley Laboratory.
     15  *
     16  * Redistribution and use in source and binary forms, with or without
     17  * modification, are permitted provided that the following conditions
     18  * are met:
     19  * 1. Redistributions of source code must retain the above copyright
     20  *    notice, this list of conditions and the following disclaimer.
     21  * 2. Redistributions in binary form must reproduce the above copyright
     22  *    notice, this list of conditions and the following disclaimer in the
     23  *    documentation and/or other materials provided with the distribution.
     24  * 3. Neither the name of the University nor the names of its contributors
     25  *    may be used to endorse or promote products derived from this software
     26  *    without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     38  * SUCH DAMAGE.
     39  *
     40  *	@(#)intr.c	8.3 (Berkeley) 11/11/93
     41  */
     42 
     43 #include <sys/cdefs.h>
     44 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.127 2021/01/24 07:36:54 mrg Exp $");
     45 
     46 #include "opt_multiprocessor.h"
     47 #include "opt_sparc_arch.h"
     48 #include "sx.h"
     49 
     50 #include <sys/param.h>
     51 #include <sys/systm.h>
     52 #include <sys/kernel.h>
     53 #include <sys/kmem.h>
     54 #include <sys/cpu.h>
     55 #include <sys/intr.h>
     56 #include <sys/atomic.h>
     57 
     58 #include <uvm/uvm_extern.h>
     59 
     60 #include <dev/cons.h>
     61 
     62 #include <machine/ctlreg.h>
     63 #include <machine/instr.h>
     64 #include <machine/trap.h>
     65 #include <machine/promlib.h>
     66 #include <machine/locore.h>
     67 
     68 #include <sparc/sparc/asm.h>
     69 #include <sparc/sparc/cpuvar.h>
     70 
     71 #if defined(MULTIPROCESSOR) && defined(DDB)
     72 #include <machine/db_machdep.h>
     73 #endif
     74 
     75 #if NSX > 0
     76 #include <sys/bus.h>
     77 #include <sparc/dev/sxvar.h>
     78 #endif
     79 
     80 #if defined(MULTIPROCESSOR)
     81 static int intr_biglock_wrapper(void *);
     82 
     83 void *xcall_cookie;
     84 #endif
     85 
     86 void	strayintr(struct clockframe *);
     87 #ifdef DIAGNOSTIC
     88 void	bogusintr(struct clockframe *);
     89 #endif
     90 
     91 /*
     92  * Stray interrupt handler.  Clear it if possible.
     93  * If not, and if we get 10 interrupts in 10 seconds, panic.
     94  * XXXSMP: We are holding the kernel lock at entry & exit.
     95  */
     96 void
     97 strayintr(struct clockframe *fp)
     98 {
     99 	static int straytime, nstray;
    100 	char bits[64];
    101 	int timesince;
    102 
    103 #if defined(MULTIPROCESSOR)
    104 	/*
    105 	 * XXX
    106 	 *
    107 	 * Don't whine about zs interrupts on MP.  We sometimes get
    108 	 * stray interrupts when polled kernel output on cpu>0 eats
    109 	 * the interrupt and cpu0 sees it.
    110 	 */
    111 #define ZS_INTR_IPL	12
    112 	if (fp->ipl == ZS_INTR_IPL)
    113 		return;
    114 #endif
    115 
    116 	snprintb(bits, sizeof(bits), PSR_BITS, fp->psr);
    117 	printf("stray interrupt cpu%d ipl 0x%x pc=0x%x npc=0x%x psr=%s\n",
    118 	    cpu_number(), fp->ipl, fp->pc, fp->npc, bits);
    119 
    120 	timesince = time_uptime - straytime;
    121 	if (timesince <= 10) {
    122 		if (++nstray > 10)
    123 			panic("crazy interrupts");
    124 	} else {
    125 		straytime = time_uptime;
    126 		nstray = 1;
    127 	}
    128 }
    129 
    130 
    131 #ifdef DIAGNOSTIC
    132 /*
    133  * Bogus interrupt for which neither hard nor soft interrupt bit in
    134  * the IPR was set.
    135  */
    136 void
    137 bogusintr(struct clockframe *fp)
    138 {
    139 	char bits[64];
    140 
    141 #if defined(MULTIPROCESSOR)
    142 	/*
    143 	 * XXX as above.
    144 	 */
    145 	if (fp->ipl == ZS_INTR_IPL)
    146 		return;
    147 #endif
    148 
    149 	snprintb(bits, sizeof(bits), PSR_BITS, fp->psr);
    150 	printf("cpu%d: bogus interrupt ipl 0x%x pc=0x%x npc=0x%x psr=%s\n",
    151 	    cpu_number(), fp->ipl, fp->pc, fp->npc, bits);
    152 }
    153 #endif /* DIAGNOSTIC */
    154 
    155 /*
    156  * Get module ID of interrupt target.
    157  */
    158 u_int
    159 getitr(void)
    160 {
    161 #if defined(MULTIPROCESSOR)
    162 	u_int v;
    163 
    164 	if (!CPU_ISSUN4M || sparc_ncpus <= 1)
    165 		return (0);
    166 
    167 	v = *((u_int *)ICR_ITR);
    168 	return (v + 8);
    169 #else
    170 	return (0);
    171 #endif
    172 }
    173 
    174 /*
    175  * Set interrupt target.
    176  * Return previous value.
    177  */
    178 u_int
    179 setitr(u_int mid)
    180 {
    181 #if defined(MULTIPROCESSOR)
    182 	u_int v;
    183 
    184 	if (!CPU_ISSUN4M || sparc_ncpus <= 1)
    185 		return (0);
    186 
    187 	v = *((u_int *)ICR_ITR);
    188 	*((u_int *)ICR_ITR) = CPU_MID2CPUNO(mid);
    189 	return (v + 8);
    190 #else
    191 	return (0);
    192 #endif
    193 }
    194 
    195 #if (defined(SUN4M) && !defined(MSIIEP)) || defined(SUN4D)
    196 void	nmi_hard(void);
    197 void	nmi_soft(struct trapframe *);
    198 
    199 int	(*memerr_handler)(void);
    200 int	(*sbuserr_handler)(void);
    201 int	(*vmeerr_handler)(void);
    202 int	(*moduleerr_handler)(void);
    203 
    204 #if defined(MULTIPROCESSOR)
    205 static volatile u_int	nmi_hard_wait = 0;
    206 int			drop_into_rom_on_fatal = 1;
    207 #endif
    208 
    209 void
    210 nmi_hard(void)
    211 {
    212 	/*
    213 	 * A level 15 hard interrupt.
    214 	 */
    215 	int fatal = 0;
    216 	uint32_t si;
    217 	char bits[64];
    218 	u_int afsr, afva;
    219 
    220 	/* Tally */
    221 	cpuinfo.ci_intrcnt[15].ev_count++;
    222 	cpuinfo.ci_data.cpu_nintr++;
    223 
    224 	afsr = afva = 0;
    225 	if ((*cpuinfo.get_asyncflt)(&afsr, &afva) == 0) {
    226 		snprintb(bits, sizeof(bits), AFSR_BITS, afsr);
    227 		printf("Async registers (mid %d): afsr=%s; afva=0x%x%x\n",
    228 			cpuinfo.mid, bits,
    229 			(afsr & AFSR_AFA) >> AFSR_AFA_RSHIFT, afva);
    230 	}
    231 
    232 #if defined(MULTIPROCESSOR)
    233 	/*
    234 	 * Increase nmi_hard_wait.  If we aren't the master, loop while this
    235 	 * variable is non-zero.  If we are the master, loop while this
    236 	 * variable is less than the number of cpus.
    237 	 */
    238 	atomic_inc_uint(&nmi_hard_wait);
    239 
    240 	if (cpuinfo.master == 0) {
    241 		while (nmi_hard_wait)
    242 			;
    243 		return;
    244 	} else {
    245 		int n = 100000;
    246 
    247 		while (nmi_hard_wait < sparc_ncpus) {
    248 			DELAY(1);
    249 			if (n-- > 0)
    250 				continue;
    251 			printf("nmi_hard: SMP botch.\n");
    252 			break;
    253 		}
    254 	}
    255 #endif
    256 
    257 	/*
    258 	 * Examine pending system interrupts.
    259 	 */
    260 	si = *((uint32_t *)ICR_SI_PEND);
    261 	snprintb(bits, sizeof(bits), SINTR_BITS, si);
    262 	printf("cpu%d: NMI: system interrupts: %s\n", cpu_number(), bits);
    263 
    264 #if NSX > 0
    265 	sx_dump();
    266 #endif
    267 
    268 	if ((si & SINTR_M) != 0) {
    269 		/* ECC memory error */
    270 		if (memerr_handler != NULL)
    271 			fatal |= (*memerr_handler)();
    272 	}
    273 	if ((si & SINTR_I) != 0) {
    274 		/* MBus/SBus async error */
    275 		if (sbuserr_handler != NULL)
    276 			fatal |= (*sbuserr_handler)();
    277 	}
    278 	if ((si & SINTR_V) != 0) {
    279 		/* VME async error */
    280 		if (vmeerr_handler != NULL)
    281 			fatal |= (*vmeerr_handler)();
    282 	}
    283 	if ((si & SINTR_ME) != 0) {
    284 		/* Module async error */
    285 		if (moduleerr_handler != NULL)
    286 			fatal |= (*moduleerr_handler)();
    287 	}
    288 
    289 #if defined(MULTIPROCESSOR)
    290 	/*
    291 	 * Tell everyone else we've finished dealing with the hard NMI.
    292 	 */
    293 	nmi_hard_wait = 0;
    294 	if (fatal && drop_into_rom_on_fatal) {
    295 		prom_abort();
    296 		return;
    297 	}
    298 #endif
    299 
    300 	if (fatal)
    301 		panic("nmi");
    302 }
    303 
    304 /*
    305  * Non-maskable soft interrupt level 15 handler
    306  */
    307 void
    308 nmi_soft(struct trapframe *tf)
    309 {
    310 
    311 	/* Tally */
    312 	cpuinfo.ci_sintrcnt[15].ev_count++;
    313 	cpuinfo.ci_data.cpu_nintr++;
    314 
    315 	if (cpuinfo.mailbox) {
    316 		/* Check PROM messages */
    317 		uint8_t msg = *(uint8_t *)cpuinfo.mailbox;
    318 		switch (msg) {
    319 		case OPENPROM_MBX_STOP:
    320 		case OPENPROM_MBX_WD:
    321 			/* In case there's an xcall in progress (unlikely) */
    322 			spl0();
    323 #ifdef MULTIPROCESSOR
    324 			cpu_ready_mask &= ~(1 << cpu_number());
    325 #endif
    326 			prom_cpustop(0);
    327 			break;
    328 		case OPENPROM_MBX_ABORT:
    329 		case OPENPROM_MBX_BPT:
    330 			prom_cpuidle(0);
    331 			/*
    332 			 * We emerge here after someone does a
    333 			 * prom_resumecpu(ournode).
    334 			 */
    335 			return;
    336 		default:
    337 			break;
    338 		}
    339 	}
    340 
    341 #if defined(MULTIPROCESSOR)
    342 	switch (cpuinfo.msg_lev15.tag) {
    343 	case XPMSG15_PAUSECPU:
    344 		/* XXX - assumes DDB is the only user of mp_pause_cpu() */
    345 		cpuinfo.flags |= CPUFLG_PAUSED;
    346 #if defined(DDB)
    347 		/* trap(T_DBPAUSE) */
    348 		__asm("ta 0x8b");
    349 #else
    350 		while (cpuinfo.flags & CPUFLG_PAUSED)
    351 			/* spin */;
    352 #endif /* DDB */
    353 	}
    354 	cpuinfo.msg_lev15.tag = 0;
    355 #endif /* MULTIPROCESSOR */
    356 }
    357 
    358 #if defined(MULTIPROCESSOR)
    359 /*
    360  * Respond to an xcall() request from another CPU.
    361  *
    362  * This is also called directly from xcall() if we notice an
    363  * incoming message while we're waiting to grab the xpmsg_lock.
    364  * We pass the address of xcallintr() itself to indicate that
    365  * this is not a real interrupt.
    366  */
    367 void
    368 xcallintr(void *v)
    369 {
    370 
    371 	kpreempt_disable();
    372 
    373 	/* Tally */
    374 	if (v != xcallintr)
    375 		cpuinfo.ci_sintrcnt[13].ev_count++;
    376 
    377 	/*
    378 	 * This happens when the remote CPU is slow at responding and the
    379 	 * caller gave up, and has given up the mutex.
    380 	 */
    381 	if (mutex_owned(&xpmsg_mutex) == 0) {
    382 		cpuinfo.ci_xpmsg_mutex_not_held.ev_count++;
    383 #ifdef DEBUG
    384 		printf("%s: cpu%d mutex not held\n", __func__, cpu_number());
    385 #endif
    386 		cpuinfo.msg.complete = 1;
    387 		kpreempt_enable();
    388 		return;
    389 	}
    390 
    391 	if (cpuinfo.msg.complete != 0) {
    392 		cpuinfo.ci_xpmsg_bogus.ev_count++;
    393 #ifdef DEBUG
    394 		volatile struct xpmsg_func *p = &cpuinfo.msg.u.xpmsg_func;
    395 		printf("%s: bogus message %08x %08x %08x %08x\n", __func__,
    396 		    cpuinfo.msg.tag, (uint32_t)p->func, p->arg0, p->arg1);
    397 #endif
    398 		kpreempt_enable();
    399 		return;
    400 	}
    401 
    402 	/* notyet - cpuinfo.msg.received = 1; */
    403 	switch (cpuinfo.msg.tag) {
    404 	case XPMSG_FUNC:
    405 	    {
    406 		volatile struct xpmsg_func *p = &cpuinfo.msg.u.xpmsg_func;
    407 
    408 		if (p->func)
    409 			(*p->func)(p->arg0, p->arg1, p->arg2);
    410 		break;
    411 	    }
    412 	}
    413 	cpuinfo.msg.tag = 0;
    414 	cpuinfo.msg.complete = 1;
    415 
    416 	kpreempt_enable();
    417 }
    418 #endif /* MULTIPROCESSOR */
    419 #endif /* SUN4M || SUN4D */
    420 
    421 
    422 #ifdef MSIIEP
    423 /*
    424  * It's easier to make this separate so that not to further obscure
    425  * SUN4M case with more ifdefs.  There's no common functionality
    426  * anyway.
    427  */
    428 
    429 #include <sparc/sparc/msiiepreg.h>
    430 
    431 void	nmi_hard_msiiep(void);
    432 void	nmi_soft_msiiep(void);
    433 
    434 
    435 void
    436 nmi_hard_msiiep(void)
    437 {
    438 	uint32_t si;
    439 	char bits[128];
    440 	int fatal = 0;
    441 
    442 	si = mspcic_read_4(pcic_sys_ipr);
    443 	snprintb(bits, sizeof(bits), MSIIEP_SYS_IPR_BITS, si);
    444 	printf("NMI: system interrupts: %s\n", bits);
    445 
    446 
    447 	if (si & MSIIEP_SYS_IPR_MEM_FAULT) {
    448 		uint32_t afsr, afar, mfsr, mfar;
    449 
    450 		afar = *(volatile uint32_t *)MSIIEP_AFAR;
    451 		afsr = *(volatile uint32_t *)MSIIEP_AFSR;
    452 
    453 		mfar = *(volatile uint32_t *)MSIIEP_MFAR;
    454 		mfsr = *(volatile uint32_t *)MSIIEP_MFSR;
    455 
    456 		if (afsr & MSIIEP_AFSR_ERR) {
    457 			snprintb(bits, sizeof(bits), MSIIEP_AFSR_BITS, afsr);
    458 			printf("async fault: afsr=%s; afar=%08x\n", bits, afar);
    459 		}
    460 
    461 		if (mfsr & MSIIEP_MFSR_ERR) {
    462 			snprintb(bits, sizeof(bits), MSIIEP_MFSR_BITS, mfsr);
    463 			printf("mem fault: mfsr=%s; mfar=%08x\n", bits, mfar);
    464 		}
    465 
    466 		fatal = 0;
    467 	}
    468 
    469 	if (si & MSIIEP_SYS_IPR_SERR) {	/* XXX */
    470 		printf("serr#\n");
    471 		fatal = 0;
    472 	}
    473 
    474 	if (si & MSIIEP_SYS_IPR_DMA_ERR) {
    475 		printf("dma: %08x\n",
    476 		       mspcic_read_stream_4(pcic_iotlb_err_addr));
    477 		fatal = 0;
    478 	}
    479 
    480 	if (si & MSIIEP_SYS_IPR_PIO_ERR) {
    481 		printf("pio: addr=%08x, cmd=%x stat=%04x\n",
    482 		       mspcic_read_stream_4(pcic_pio_err_addr),
    483 		       mspcic_read_stream_1(pcic_pio_err_cmd),
    484 		       mspcic_read_stream_2(pcic_stat));
    485 		fatal = 0;
    486 	}
    487 
    488 	if (fatal)
    489 		panic("nmi");
    490 
    491 	/* Clear the NMI if it was PCIC related */
    492 	mspcic_write_1(pcic_sys_ipr_clr, MSIIEP_SYS_IPR_CLR_ALL);
    493 }
    494 
    495 
    496 void
    497 nmi_soft_msiiep(void)
    498 {
    499 
    500 	panic("soft nmi");
    501 }
    502 
    503 #endif /* MSIIEP */
    504 
    505 
    506 /*
    507  * Level 15 interrupts are special, and not vectored here.
    508  * Only `prewired' interrupts appear here; boot-time configured devices
    509  * are attached via intr_establish() below.
    510  */
    511 struct intrhand *intrhand[15] = {
    512 	NULL,			/*  0 = error */
    513 	NULL,			/*  1 = software level 1 + Sbus */
    514 	NULL,	 		/*  2 = Sbus level 2 (4m: Sbus L1) */
    515 	NULL,			/*  3 = SCSI + DMA + Sbus level 3 (4m: L2,lpt)*/
    516 	NULL,			/*  4 = software level 4 (tty softint) (scsi) */
    517 	NULL,			/*  5 = Ethernet + Sbus level 4 (4m: Sbus L3) */
    518 	NULL,			/*  6 = software level 6 (not used) (4m: enet)*/
    519 	NULL,			/*  7 = video + Sbus level 5 */
    520 	NULL,			/*  8 = Sbus level 6 */
    521 	NULL,			/*  9 = Sbus level 7 */
    522 	NULL, 			/* 10 = counter 0 = clock */
    523 	NULL,			/* 11 = floppy */
    524 	NULL,			/* 12 = zs hardware interrupt */
    525 	NULL,			/* 13 = audio chip */
    526 	NULL, 			/* 14 = counter 1 = profiling timer */
    527 };
    528 
    529 /*
    530  * Soft interrupts use a separate set of handler chains.
    531  * This is necessary since soft interrupt handlers do not return a value
    532  * and therefore cannot be mixed with hardware interrupt handlers on a
    533  * shared handler chain.
    534  */
    535 struct intrhand *sintrhand[15] = { NULL };
    536 
    537 static void
    538 ih_insert(struct intrhand **head, struct intrhand *ih)
    539 {
    540 	struct intrhand **p, *q;
    541 	/*
    542 	 * This is O(N^2) for long chains, but chains are never long
    543 	 * and we do want to preserve order.
    544 	 */
    545 	for (p = head; (q = *p) != NULL; p = &q->ih_next)
    546 		continue;
    547 	*p = ih;
    548 	ih->ih_next = NULL;
    549 }
    550 
    551 static void
    552 ih_remove(struct intrhand **head, struct intrhand *ih)
    553 {
    554 	struct intrhand **p, *q;
    555 
    556 	for (p = head; (q = *p) != ih; p = &q->ih_next)
    557 		continue;
    558 	if (q == NULL)
    559 		panic("intr_remove: intrhand %p fun %p arg %p",
    560 			ih, ih->ih_fun, ih->ih_arg);
    561 
    562 	*p = q->ih_next;
    563 	q->ih_next = NULL;
    564 }
    565 
    566 static int fastvec;		/* marks fast vectors (see below) */
    567 
    568 #ifdef DIAGNOSTIC
    569 static void
    570 check_tv(int level)
    571 {
    572 	struct trapvec *tv;
    573 	int displ;
    574 
    575 	/* double check for legal hardware interrupt */
    576 	tv = &trapbase[T_L1INT - 1 + level];
    577 	displ = (CPU_ISSUN4M || CPU_ISSUN4D)
    578 		? &sparc_interrupt4m[0] - &tv->tv_instr[1]
    579 		: &sparc_interrupt44c[0] - &tv->tv_instr[1];
    580 
    581 	/* has to be `mov level,%l3; ba _sparc_interrupt; rdpsr %l0' */
    582 	if (tv->tv_instr[0] != I_MOVi(I_L3, level) ||
    583 	    tv->tv_instr[1] != I_BA(0, displ) ||
    584 	    tv->tv_instr[2] != I_RDPSR(I_L0))
    585 		panic("intr_establish(%d)\n0x%x 0x%x 0x%x != 0x%x 0x%x 0x%x",
    586 		    level,
    587 		    tv->tv_instr[0], tv->tv_instr[1], tv->tv_instr[2],
    588 		    I_MOVi(I_L3, level), I_BA(0, displ), I_RDPSR(I_L0));
    589 }
    590 #endif
    591 
    592 /*
    593  * Wire a fast trap vector.  Only one such fast trap is legal for any
    594  * interrupt, and it must be a hardware interrupt.
    595  */
    596 static void
    597 inst_fasttrap(int level, void (*vec)(void))
    598 {
    599 	struct trapvec *tv;
    600 	u_long hi22, lo10;
    601 	int s;
    602 
    603 	if (CPU_ISSUN4 || CPU_ISSUN4C) {
    604 		/* Can't wire to softintr slots */
    605 		if (level == 1 || level == 4 || level == 6)
    606 			return;
    607 	}
    608 
    609 #ifdef DIAGNOSTIC
    610 	check_tv(level);
    611 #endif
    612 
    613 	tv = &trapbase[T_L1INT - 1 + level];
    614 	hi22 = ((u_long)vec) >> 10;
    615 	lo10 = ((u_long)vec) & 0x3ff;
    616 	s = splhigh();
    617 
    618 	/* kernel text is write protected -- let us in for a moment */
    619 	pmap_kprotect((vaddr_t)tv & -PAGE_SIZE, PAGE_SIZE,
    620 	    VM_PROT_READ|VM_PROT_WRITE);
    621 	cpuinfo.cache_flush_all();
    622 	tv->tv_instr[0] = I_SETHI(I_L3, hi22);	/* sethi %hi(vec),%l3 */
    623 	tv->tv_instr[1] = I_JMPLri(I_G0, I_L3, lo10);/* jmpl %l3+%lo(vec),%g0 */
    624 	tv->tv_instr[2] = I_RDPSR(I_L0);	/* mov %psr, %l0 */
    625 	pmap_kprotect((vaddr_t)tv & -PAGE_SIZE, PAGE_SIZE, VM_PROT_READ);
    626 	cpuinfo.cache_flush_all();
    627 	fastvec |= 1 << level;
    628 	splx(s);
    629 }
    630 
    631 /*
    632  * Uninstall a fast trap handler.
    633  */
    634 static void
    635 uninst_fasttrap(int level)
    636 {
    637 	struct trapvec *tv;
    638 	int displ;	/* suspenders, belt, and buttons too */
    639 	int s;
    640 
    641 	tv = &trapbase[T_L1INT - 1 + level];
    642 	s = splhigh();
    643 	displ = (CPU_ISSUN4M || CPU_ISSUN4D)
    644 		? &sparc_interrupt4m[0] - &tv->tv_instr[1]
    645 		: &sparc_interrupt44c[0] - &tv->tv_instr[1];
    646 
    647 	/* kernel text is write protected -- let us in for a moment */
    648 	pmap_kprotect((vaddr_t)tv & -PAGE_SIZE, PAGE_SIZE,
    649 	    VM_PROT_READ|VM_PROT_WRITE);
    650 	cpuinfo.cache_flush_all();
    651 	tv->tv_instr[0] = I_MOVi(I_L3, level);
    652 	tv->tv_instr[1] = I_BA(0, displ);
    653 	tv->tv_instr[2] = I_RDPSR(I_L0);
    654 	pmap_kprotect((vaddr_t)tv & -PAGE_SIZE, PAGE_SIZE, VM_PROT_READ);
    655 	cpuinfo.cache_flush_all();
    656 	fastvec &= ~(1 << level);
    657 	splx(s);
    658 }
    659 
    660 /*
    661  * Attach an interrupt handler to the vector chain for the given level.
    662  * This is not possible if it has been taken away as a fast vector.
    663  */
    664 void
    665 intr_establish(int level, int classipl,
    666 	       struct intrhand *ih, void (*vec)(void),
    667 	       bool maybe_mpsafe)
    668 {
    669 	int s = splhigh();
    670 #ifdef MULTIPROCESSOR
    671 	bool mpsafe;
    672 #endif /* MULTIPROCESSOR */
    673 	if (classipl == 0)
    674 		classipl = level;
    675 
    676 #ifdef MULTIPROCESSOR
    677 	mpsafe = (classipl != IPL_VM) || maybe_mpsafe;
    678 #endif
    679 
    680 #ifdef DIAGNOSTIC
    681 	if (CPU_ISSUN4C) {
    682 		/*
    683 		 * Check reserved softintr slots on SUN4C only.
    684 		 * No check for SUN4, as 4/300's have
    685 		 * esp0 at level 4 and le0 at level 6.
    686 		 */
    687 		if (level == 1 || level == 4 || level == 6)
    688 			panic("intr_establish: reserved softintr level");
    689 	}
    690 #endif
    691 
    692 	/*
    693 	 * If a `fast vector' is currently tied to this level, we must
    694 	 * first undo that.
    695 	 */
    696 	if (fastvec & (1 << level)) {
    697 		printf("intr_establish: untie fast vector at level %d\n",
    698 		    level);
    699 		uninst_fasttrap(level);
    700 	} else if (vec != NULL &&
    701 		   intrhand[level] == NULL && sintrhand[level] == NULL) {
    702 		inst_fasttrap(level, vec);
    703 	}
    704 
    705 	/* A requested IPL cannot exceed its device class level */
    706 	if (classipl < level)
    707 		panic("intr_establish: class lvl (%d) < pil (%d)\n",
    708 			classipl, level);
    709 
    710 	/* pre-shift to PIL field in %psr */
    711 	ih->ih_classipl = (classipl << 8) & PSR_PIL;
    712 
    713 #ifdef MULTIPROCESSOR
    714 	if (!mpsafe) {
    715 		ih->ih_realfun = ih->ih_fun;
    716 		ih->ih_realarg = ih->ih_arg;
    717 		ih->ih_fun = intr_biglock_wrapper;
    718 		ih->ih_arg = ih;
    719 	}
    720 #endif /* MULTIPROCESSOR */
    721 
    722 	ih_insert(&intrhand[level], ih);
    723 	splx(s);
    724 }
    725 
    726 void
    727 intr_disestablish(int level, struct intrhand *ih)
    728 {
    729 
    730 	ih_remove(&intrhand[level], ih);
    731 }
    732 
    733 /*
    734  * This is a softintr cookie.  NB that sic_pilreq MUST be the
    735  * first element in the struct, because the softintr_schedule()
    736  * macro in intr.h casts cookies to int * to get it.  On a
    737  * sun4m, sic_pilreq is an actual processor interrupt level that
    738  * is passed to raise(), and on a sun4 or sun4c sic_pilreq is a
    739  * bit to set in the interrupt enable register with ienab_bis().
    740  */
    741 struct softintr_cookie {
    742 	int sic_pilreq;		/* CPU-specific bits; MUST be first! */
    743 	int sic_pil;		/* Actual machine PIL that is used */
    744 	struct intrhand sic_hand;
    745 };
    746 
    747 /*
    748  * softintr_init(): initialise the MI softintr system.
    749  */
    750 void
    751 sparc_softintr_init(void)
    752 {
    753 
    754 #if defined(MULTIPROCESSOR) && (defined(SUN4M) || defined(SUN4D))
    755 	/* Establish a standard soft interrupt handler for cross calls */
    756 	xcall_cookie = sparc_softintr_establish(13, xcallintr, NULL);
    757 #endif
    758 }
    759 
    760 /*
    761  * softintr_establish(): MI interface.  establish a func(arg) as a
    762  * software interrupt.
    763  */
    764 void *
    765 sparc_softintr_establish(int level, void (*fun)(void *), void *arg)
    766 {
    767 	struct softintr_cookie *sic;
    768 	struct intrhand *ih;
    769 	int pilreq;
    770 	int pil;
    771 #ifdef MULTIPROCESSOR
    772 	bool mpsafe = (level != IPL_VM);
    773 #endif /* MULTIPROCESSOR */
    774 
    775 	/*
    776 	 * On a sun4m, the processor interrupt level is stored
    777 	 * in the softintr cookie to be passed to raise().
    778 	 *
    779 	 * On a sun4 or sun4c the appropriate bit to set
    780 	 * in the interrupt enable register is stored in
    781 	 * the softintr cookie to be passed to ienab_bis().
    782 	 */
    783 	pil = pilreq = level;
    784 	if (CPU_ISSUN4 || CPU_ISSUN4C) {
    785 		/* Select the most suitable of three available softint levels */
    786 		if (level >= 1 && level < 4) {
    787 			pil = 1;
    788 			pilreq = IE_L1;
    789 		} else if (level >= 4 && level < 6) {
    790 			pil = 4;
    791 			pilreq = IE_L4;
    792 		} else {
    793 			pil = 6;
    794 			pilreq = IE_L6;
    795 		}
    796 	}
    797 
    798 	sic = kmem_alloc(sizeof(*sic), KM_SLEEP);
    799 	sic->sic_pil = pil;
    800 	sic->sic_pilreq = pilreq;
    801 	ih = &sic->sic_hand;
    802 #ifdef MULTIPROCESSOR
    803 	if (!mpsafe) {
    804 		ih->ih_realfun = (int (*)(void *))fun;
    805 		ih->ih_realarg = arg;
    806 		ih->ih_fun = intr_biglock_wrapper;
    807 		ih->ih_arg = ih;
    808 	} else
    809 #endif /* MULTIPROCESSOR */
    810 	{
    811 		ih->ih_fun = (int (*)(void *))fun;
    812 		ih->ih_arg = arg;
    813 	}
    814 
    815 	/*
    816 	 * Always run the handler at the requested level, which might
    817 	 * be higher than the hardware can provide.
    818 	 *
    819 	 * pre-shift to PIL field in %psr
    820 	 */
    821 	ih->ih_classipl = (level << 8) & PSR_PIL;
    822 
    823 	if (fastvec & (1 << pil)) {
    824 		printf("softintr_establish: untie fast vector at level %d\n",
    825 		    pil);
    826 		uninst_fasttrap(level);
    827 	}
    828 
    829 	ih_insert(&sintrhand[pil], ih);
    830 	return (void *)sic;
    831 }
    832 
    833 /*
    834  * softintr_disestablish(): MI interface.  disestablish the specified
    835  * software interrupt.
    836  */
    837 void
    838 sparc_softintr_disestablish(void *cookie)
    839 {
    840 	struct softintr_cookie *sic = cookie;
    841 
    842 	ih_remove(&sintrhand[sic->sic_pil], &sic->sic_hand);
    843 	kmem_free(sic, sizeof(*sic));
    844 }
    845 
    846 #if 0
    847 void
    848 sparc_softintr_schedule(void *cookie)
    849 {
    850 	struct softintr_cookie *sic = cookie;
    851 	if (CPU_ISSUN4M || CPU_ISSUN4D) {
    852 #if defined(SUN4M) || defined(SUN4D)
    853 		raise(0, sic->sic_pilreq);
    854 #endif
    855 	} else {
    856 #if defined(SUN4) || defined(SUN4C)
    857 		ienab_bis(sic->sic_pilreq);
    858 #endif
    859 	}
    860 }
    861 #endif
    862 
    863 #ifdef MULTIPROCESSOR
    864 
    865 /*
    866  * intr_biglock_wrapper: grab biglock and call a real interrupt handler.
    867  */
    868 
    869 static int
    870 intr_biglock_wrapper(void *vp)
    871 {
    872 	struct intrhand *ih = vp;
    873 	int ret;
    874 
    875 	KERNEL_LOCK(1, NULL);
    876 
    877 	ret = (*ih->ih_realfun)(ih->ih_realarg);
    878 
    879 	KERNEL_UNLOCK_ONE(NULL);
    880 
    881 	return ret;
    882 }
    883 #endif /* MULTIPROCESSOR */
    884 
    885 bool
    886 cpu_intr_p(void)
    887 {
    888 
    889 	/*
    890 	 * cpuinfo is the same VA on every CPU.  Even if preempted it will
    891 	 * give the correct answer.
    892 	 */
    893 	return cpuinfo.ci_idepth != 0;
    894 }
    895