Home | History | Annotate | Line # | Download | only in sparc64
      1 /*	$NetBSD: locore.s,v 1.438 2025/11/04 20:51:49 palle Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2006-2010 Matthew R. Green
      5  * Copyright (c) 1996-2002 Eduardo Horvath
      6  * Copyright (c) 1996 Paul Kranenburg
      7  * Copyright (c) 1996
      8  * 	The President and Fellows of Harvard College.
      9  *	All rights reserved.
     10  * Copyright (c) 1992, 1993
     11  *	The Regents of the University of California.
     12  *	All rights reserved.
     13  *
     14  * This software was developed by the Computer Systems Engineering group
     15  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
     16  * contributed to Berkeley.
     17  *
     18  * All advertising materials mentioning features or use of this software
     19  * must display the following acknowledgement:
     20  *	This product includes software developed by the University of
     21  *	California, Lawrence Berkeley Laboratory.
     22  *	This product includes software developed by Harvard University.
     23  *
     24  * Redistribution and use in source and binary forms, with or without
     25  * modification, are permitted provided that the following conditions
     26  * are met:
     27  * 1. Redistributions of source code must retain the above copyright
     28  *    notice, this list of conditions and the following disclaimer.
     29  * 2. Redistributions in binary form must reproduce the above copyright
     30  *    notice, this list of conditions and the following disclaimer in the
     31  *    documentation and/or other materials provided with the
     32  *    distribution.
     33  * 3. All advertising materials mentioning features or use of this
     34  *    software must display the following acknowledgement:
     35  *	This product includes software developed by the University of
     36  *	California, Berkeley and its contributors.
     37  *	This product includes software developed by Harvard University.
     38  *	This product includes software developed by Paul Kranenburg.
     39  * 4. Neither the name of the University nor the names of its
     40  *    contributors may be used to endorse or promote products derived
     41  *    from this software without specific prior written permission.
     42  *
     43  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS''
     44  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
     45  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
     46  * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR
     47  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
     51  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
     52  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
     53  * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
     54  * DAMAGE.
     55  *
     56  *	@(#)locore.s	8.4 (Berkeley) 12/10/93
     57  */
     58 
     59 #undef	PARANOID		/* Extremely expensive consistency checks */
     60 #undef	NO_VCACHE		/* Map w/D$ disabled */
     61 #undef	TRAPSTATS		/* Count traps */
     62 #undef	TRAPS_USE_IG		/* Use Interrupt Globals for all traps */
     63 #define	HWREF			/* Track ref/mod bits in trap handlers */
     64 #undef	DCACHE_BUG		/* Flush D$ around ASI_PHYS accesses */
     65 #undef	NO_TSB			/* Don't use TSB */
     66 #define	BB_ERRATA_1		/* writes to TICK_CMPR may fail */
     67 #undef	TLB_FLUSH_LOWVA		/* also flush 32-bit entries from the MMU */
     68 
     69 #include "opt_ddb.h"
     70 #include "opt_kgdb.h"
     71 #include "opt_multiprocessor.h"
     72 #include "opt_compat_netbsd.h"
     73 #include "opt_compat_netbsd32.h"
     74 #include "opt_lockdebug.h"
     75 
     76 #include "assym.h"
     77 #include <machine/param.h>
     78 #include <machine/types.h>
     79 #include <sparc64/sparc64/intreg.h>
     80 #include <sparc64/sparc64/timerreg.h>
     81 #include <machine/ctlreg.h>
     82 #include <machine/psl.h>
     83 #include <machine/signal.h>
     84 #include <machine/trap.h>
     85 #include <machine/frame.h>
     86 #include <machine/pmap.h>
     87 #include <machine/intr.h>
     88 #include <machine/asm.h>
     89 #include <machine/locore.h>
     90 #ifdef SUN4V
     91 #include <machine/hypervisor.h>
     92 #endif
     93 #include <sys/syscall.h>
     94 
     95 #define BLOCK_SIZE SPARC64_BLOCK_SIZE
     96 #define BLOCK_ALIGN SPARC64_BLOCK_ALIGN
     97 
     98 #ifdef SUN4V
     99 #define SUN4V_N_REG_WINDOWS    8  /* As per UA2005 spec */
    100 #define SUN4V_NWINDOWS           (SUN4V_N_REG_WINDOWS-1) /* This is an index number, so subtract one */
    101 #endif
    102 
    103 #include "ksyms.h"
    104 
    105 	/* Misc. macros */
    106 
    107 	.macro	GET_MAXCWP reg
    108 #ifdef SUN4V
    109 	sethi	%hi(cputyp), \reg
    110 	ld	[\reg + %lo(cputyp)], \reg
    111 	cmp	\reg, CPU_SUN4V
    112 	bne,pt	%icc, 2f
    113 	 nop
    114 	/* sun4v */
    115 	ba	3f
    116 	 mov	SUN4V_NWINDOWS, \reg
    117 2:
    118 #endif
    119 	/* sun4u */
    120 	rdpr	%ver, \reg
    121 	and	\reg, CWP, \reg
    122 3:
    123 	.endm
    124 
    125 	.macro	SET_MMU_CONTEXTID_SUN4U ctxid,ctx
    126 	stxa	\ctxid, [\ctx] ASI_DMMU;
    127 	.endm
    128 
    129 #ifdef SUN4V
    130 	.macro	SET_MMU_CONTEXTID_SUN4V ctxid,ctx
    131 	stxa	\ctxid, [\ctx] ASI_MMU_CONTEXTID;
    132 	.endm
    133 #endif
    134 
    135 	.macro	SET_MMU_CONTEXTID ctxid,ctx,scratch
    136 #ifdef SUN4V
    137 	sethi	%hi(cputyp), \scratch
    138 	ld	[\scratch + %lo(cputyp)], \scratch
    139 	cmp	\scratch, CPU_SUN4V
    140 	bne,pt	%icc, 2f
    141 	 nop
    142 	/* sun4v */
    143 	SET_MMU_CONTEXTID_SUN4V \ctxid,\ctx
    144 	ba	3f
    145 	 nop
    146 2:
    147 #endif
    148 	/* sun4u */
    149 	SET_MMU_CONTEXTID_SUN4U \ctxid,\ctx
    150 3:
    151 	.endm
    152 
    153 	.macro	GET_MMU_CONTEXTID_SUN4U ctxid,ctx
    154 	ldxa	[\ctx] ASI_DMMU, \ctxid
    155 	.endm
    156 
    157 #ifdef SUN4V
    158 	.macro	GET_MMU_CONTEXTID_SUN4V ctxid,ctx
    159 	ldxa	[\ctx] ASI_MMU_CONTEXTID, \ctxid
    160 	.endm
    161 #endif
    162 
    163 	.macro	GET_MMU_CONTEXTID ctxid,ctx,scratch
    164 #ifdef SUN4V
    165 	sethi	%hi(cputyp), \scratch
    166 	ld	[\scratch + %lo(cputyp)], \scratch
    167 	cmp	\scratch, CPU_SUN4V
    168 	bne,pt	%icc, 2f
    169 	 nop
    170 	/* sun4v */
    171 	GET_MMU_CONTEXTID_SUN4V \ctxid,\ctx
    172 	ba	3f
    173 	 nop
    174 2:
    175 #endif
    176 	/* sun4u */
    177 	GET_MMU_CONTEXTID_SUN4U \ctxid,\ctx
    178 3:
    179 	.endm
    180 
    181 #ifdef SUN4V
    182 	.macro	NORMAL_GLOBALS_SUN4V
    183 	 wrpr	%g0, 0, %gl				! Set globals to level 0
    184 	.endm
    185 #endif
    186 	.macro	NORMAL_GLOBALS_SUN4U
    187 	wrpr	%g0, PSTATE_KERN, %pstate		! Alternate Globals (AG) bit set to zero
    188 	.endm
    189 
    190 #ifdef SUN4V
    191 	.macro	ALTERNATE_GLOBALS_SUN4V
    192 	 wrpr	%g0, 1, %gl				! Set globals to level 1
    193 	.endm
    194 #endif
    195 	.macro	ALTERNATE_GLOBALS_SUN4U
    196 	 wrpr    %g0, PSTATE_KERN|PSTATE_AG, %pstate	! Alternate Globals (AG) bit set to one
    197 	.endm
    198 
    199 	.macro	ENABLE_INTERRUPTS scratch
    200 	rdpr	 %pstate, \scratch
    201 	or	\scratch, PSTATE_IE, \scratch	! Interrupt Enable (IE) bit set to one
    202 	wrpr	%g0, \scratch, %pstate
    203 	.endm
    204 
    205 	.macro	DISABLE_INTERRUPTS scratch
    206 	rdpr	 %pstate, \scratch
    207 	and	\scratch, ~PSTATE_IE, \scratch	! Interrupt Enable (IE) bit set to zero
    208 	wrpr	%g0, \scratch, %pstate
    209 	.endm
    210 
    211 
    212 #ifdef SUN4V
    213 	/* Misc. sun4v macros */
    214 
    215 	.macro	GET_MMFSA reg
    216 	sethi	%hi(CPUINFO_VA + CI_MMUFSA), \reg
    217 	LDPTR	[\reg + %lo(CPUINFO_VA + CI_MMUFSA)], \reg
    218 	.endm
    219 
    220 	.macro	GET_CTXBUSY reg
    221 	sethi	%hi(CPUINFO_VA + CI_CTXBUSY), \reg
    222 	LDPTR	[\reg + %lo(CPUINFO_VA + CI_CTXBUSY)], \reg
    223 	.endm
    224 
    225 	.macro	GET_TSB_DMMU reg
    226 	sethi	%hi(CPUINFO_VA + CI_TSB_DMMU), \reg
    227 	LDPTR	[\reg + %lo(CPUINFO_VA + CI_TSB_DMMU)], \reg
    228 	.endm
    229 
    230 	.macro sun4v_tl1_uspill_normal
    231 	ba,a,pt	%xcc, spill_normal_to_user_stack
    232 	 nop
    233 	.align 128
    234 	.endm
    235 
    236 	.macro sun4v_tl1_uspill_other
    237 	ba,a,pt	%xcc, pcbspill_other
    238 	 nop
    239 	.align 128
    240 	.endm
    241 
    242 #endif
    243 
    244 #if 1
    245 /*
    246  * Try to issue an elf note to ask the Solaris
    247  * bootloader to align the kernel properly.
    248  */
    249 	.section	.note
    250 	.word	0x0d
    251 	.word	4		! Dunno why
    252 	.word	1
    253 0:	.asciz	"SUNW Solaris"
    254 1:
    255 	.align	4
    256 	.word	0x0400000
    257 #endif
    258 
    259 	.register	%g2,#scratch
    260 	.register	%g3,#scratch
    261 
    262 
    263 	.data
    264 	.globl	_C_LABEL(data_start)
    265 _C_LABEL(data_start):					! Start of data segment
    266 
    267 #ifdef KGDB
    268 /*
    269  * Another item that must be aligned, easiest to put it here.
    270  */
    271 KGDB_STACK_SIZE = 2048
    272 	.globl	_C_LABEL(kgdb_stack)
    273 _C_LABEL(kgdb_stack):
    274 	.space	KGDB_STACK_SIZE		! hope this is enough
    275 #endif
    276 
    277 #ifdef NOTDEF_DEBUG
    278 /*
    279  * This stack is used when we detect kernel stack corruption.
    280  */
    281 	.space	USPACE
    282 	.align	16
    283 panicstack:
    284 #endif
    285 
    286 /*
    287  * romp is the prom entry pointer
    288  * romtba is the prom trap table base address
    289  */
    290 	.globl	romp
    291 romp:	POINTER	0
    292 	.globl	romtba
    293 romtba:	POINTER	0
    294 
    295 	.globl	cputyp
    296 cputyp:	.word	CPU_SUN4U ! Default to sun4u
    297 
    298 	_ALIGN
    299 	.text
    300 
    301 /*
    302  * The v9 trap frame is stored in the special trap registers.  The
    303  * register window is only modified on window overflow, underflow,
    304  * and clean window traps, where it points to the register window
    305  * needing service.  Traps have space for 8 instructions, except for
    306  * the window overflow, underflow, and clean window traps which are
    307  * 32 instructions long, large enough to in-line.
    308  *
    309  * The spitfire CPU (Ultra I) has 4 different sets of global registers.
    310  * (blah blah...)
    311  *
    312  * I used to generate these numbers by address arithmetic, but gas's
    313  * expression evaluator has about as much sense as your average slug
    314  * (oddly enough, the code looks about as slimy too).  Thus, all the
    315  * trap numbers are given as arguments to the trap macros.  This means
    316  * there is one line per trap.  Sigh.
    317  *
    318  * Hardware interrupt vectors can be `linked'---the linkage is to regular
    319  * C code---or rewired to fast in-window handlers.  The latter are good
    320  * for unbuffered hardware like the Zilog serial chip and the AMD audio
    321  * chip, where many interrupts can be handled trivially with pseudo-DMA
    322  * or similar.  Only one `fast' interrupt can be used per level, however,
    323  * and direct and `fast' interrupts are incompatible.  Routines in intr.c
    324  * handle setting these, with optional paranoia.
    325  */
    326 
    327 /*
    328  *	TA8 -- trap align for 8 instruction traps
    329  *	TA32 -- trap align for 32 instruction traps
    330  */
    331 #define TA8	.align 32
    332 #define TA32	.align 128
    333 
    334 /*
    335  * v9 trap macros:
    336  *
    337  *	We have a problem with v9 traps; we have no registers to put the
    338  *	trap type into.  But we do have a %tt register which already has
    339  *	that information.  Trap types in these macros are all dummys.
    340  */
    341 	/* regular vectored traps */
    342 
    343 #define	VTRAP(type, label) \
    344 	ba,a,pt	%icc,label; nop; NOTREACHED; TA8
    345 
    346 	/* hardware interrupts (can be linked or made `fast') */
    347 #define	HARDINT4U(lev) \
    348 	VTRAP(lev, _C_LABEL(sparc_interrupt))
    349 #ifdef SUN4V
    350 #define HARDINT4V(lev) HARDINT4U(lev)
    351 #endif
    352 
    353 	/* software interrupts (may not be made direct, sorry---but you
    354 	   should not be using them trivially anyway) */
    355 #define	SOFTINT4U(lev, bit) \
    356 	HARDINT4U(lev)
    357 
    358 	/* traps that just call trap() */
    359 #define	TRAP(type)	VTRAP(type, slowtrap)
    360 
    361 	/* architecturally undefined traps (cause panic) */
    362 #ifndef DEBUG
    363 #define	UTRAP(type)	sir; VTRAP(type, slowtrap)
    364 #else
    365 #define	UTRAP(type)	VTRAP(type, slowtrap)
    366 #endif
    367 
    368 	/* software undefined traps (may be replaced) */
    369 #define	STRAP(type)	VTRAP(type, slowtrap)
    370 
    371 /* breakpoint acts differently under kgdb */
    372 #ifdef KGDB
    373 #define	BPT		VTRAP(T_BREAKPOINT, bpt)
    374 #define	BPT_KGDB_EXEC	VTRAP(T_KGDB_EXEC, bpt)
    375 #else
    376 #define	BPT		TRAP(T_BREAKPOINT)
    377 #define	BPT_KGDB_EXEC	TRAP(T_KGDB_EXEC)
    378 #endif
    379 
    380 #define	SYSCALL		VTRAP(0x100, syscall_setup)
    381 #ifdef notyet
    382 #define	ZS_INTERRUPT	ba,a,pt %icc, zshard; nop; TA8
    383 #else
    384 #define	ZS_INTERRUPT4U	HARDINT4U(12)
    385 #endif
    386 
    387 
    388 /*
    389  * Macro to clear %tt so we don't get confused with old traps.
    390  */
    391 #ifdef DEBUG
    392 #define CLRTT	wrpr	%g0,0x1ff,%tt
    393 #else
    394 #define CLRTT
    395 #endif
    396 
    397 
    398 /*
    399  * Some macros to load and store a register window
    400  */
    401 
    402 	.macro	SPILL storer,base,size,asi
    403 
    404 	.irpc n,01234567
    405 		\storer %l\n, [\base + (\n * \size)] \asi
    406 	.endr
    407 	.irpc n,01234567
    408 		\storer %i\n, [\base + ((8+\n) * \size)] \asi
    409 	.endr
    410 
    411 	.endm
    412 
    413 
    414 	.macro FILL loader, base, size, asi
    415 
    416 	.irpc n,01234567
    417 		\loader [\base + (\n * \size)] \asi, %l\n
    418 	.endr
    419 
    420 	.irpc n,01234567
    421 		\loader [\base + ((8+\n) * \size)] \asi, %i\n
    422 	.endr
    423 
    424 	.endm
    425 
    426 /*
    427  * Here are some oft repeated traps as macros.
    428  */
    429 
    430 	/* spill a 64-bit register window */
    431 #define SPILL64(label,as) \
    432 label:	\
    433 	wr	%g0, as, %asi; \
    434 	stxa	%l0, [%sp+BIAS+0x00]%asi; \
    435 	stxa	%l1, [%sp+BIAS+0x08]%asi; \
    436 	stxa	%l2, [%sp+BIAS+0x10]%asi; \
    437 	stxa	%l3, [%sp+BIAS+0x18]%asi; \
    438 	stxa	%l4, [%sp+BIAS+0x20]%asi; \
    439 	stxa	%l5, [%sp+BIAS+0x28]%asi; \
    440 	stxa	%l6, [%sp+BIAS+0x30]%asi; \
    441 	\
    442 	stxa	%l7, [%sp+BIAS+0x38]%asi; \
    443 	stxa	%i0, [%sp+BIAS+0x40]%asi; \
    444 	stxa	%i1, [%sp+BIAS+0x48]%asi; \
    445 	stxa	%i2, [%sp+BIAS+0x50]%asi; \
    446 	stxa	%i3, [%sp+BIAS+0x58]%asi; \
    447 	stxa	%i4, [%sp+BIAS+0x60]%asi; \
    448 	stxa	%i5, [%sp+BIAS+0x68]%asi; \
    449 	stxa	%i6, [%sp+BIAS+0x70]%asi; \
    450 	\
    451 	stxa	%i7, [%sp+BIAS+0x78]%asi; \
    452 	saved; \
    453 	CLRTT; \
    454 	retry; \
    455 	NOTREACHED; \
    456 	TA32
    457 
    458 	/* spill a 32-bit register window */
    459 #define SPILL32(label,as) \
    460 label:	\
    461 	wr	%g0, as, %asi; \
    462 	srl	%sp, 0, %sp; /* fixup 32-bit pointers */ \
    463 	stwa	%l0, [%sp+0x00]%asi; \
    464 	stwa	%l1, [%sp+0x04]%asi; \
    465 	stwa	%l2, [%sp+0x08]%asi; \
    466 	stwa	%l3, [%sp+0x0c]%asi; \
    467 	stwa	%l4, [%sp+0x10]%asi; \
    468 	stwa	%l5, [%sp+0x14]%asi; \
    469 	\
    470 	stwa	%l6, [%sp+0x18]%asi; \
    471 	stwa	%l7, [%sp+0x1c]%asi; \
    472 	stwa	%i0, [%sp+0x20]%asi; \
    473 	stwa	%i1, [%sp+0x24]%asi; \
    474 	stwa	%i2, [%sp+0x28]%asi; \
    475 	stwa	%i3, [%sp+0x2c]%asi; \
    476 	stwa	%i4, [%sp+0x30]%asi; \
    477 	stwa	%i5, [%sp+0x34]%asi; \
    478 	\
    479 	stwa	%i6, [%sp+0x38]%asi; \
    480 	stwa	%i7, [%sp+0x3c]%asi; \
    481 	saved; \
    482 	CLRTT; \
    483 	retry; \
    484 	NOTREACHED; \
    485 	TA32
    486 
    487 	/* Spill either 32-bit or 64-bit register window. */
    488 #define SPILLBOTH(label64,label32,as) \
    489 	andcc	%sp, 1, %g0; \
    490 	bnz,pt	%xcc, label64+4;	/* Is it a v9 or v8 stack? */ \
    491 	 wr	%g0, as, %asi; \
    492 	ba,pt	%xcc, label32+8; \
    493 	 srl	%sp, 0, %sp; /* fixup 32-bit pointers */ \
    494 	NOTREACHED; \
    495 	TA32
    496 
    497 	/* fill a 64-bit register window */
    498 #define FILL64(label,as) \
    499 label: \
    500 	wr	%g0, as, %asi; \
    501 	ldxa	[%sp+BIAS+0x00]%asi, %l0; \
    502 	ldxa	[%sp+BIAS+0x08]%asi, %l1; \
    503 	ldxa	[%sp+BIAS+0x10]%asi, %l2; \
    504 	ldxa	[%sp+BIAS+0x18]%asi, %l3; \
    505 	ldxa	[%sp+BIAS+0x20]%asi, %l4; \
    506 	ldxa	[%sp+BIAS+0x28]%asi, %l5; \
    507 	ldxa	[%sp+BIAS+0x30]%asi, %l6; \
    508 	\
    509 	ldxa	[%sp+BIAS+0x38]%asi, %l7; \
    510 	ldxa	[%sp+BIAS+0x40]%asi, %i0; \
    511 	ldxa	[%sp+BIAS+0x48]%asi, %i1; \
    512 	ldxa	[%sp+BIAS+0x50]%asi, %i2; \
    513 	ldxa	[%sp+BIAS+0x58]%asi, %i3; \
    514 	ldxa	[%sp+BIAS+0x60]%asi, %i4; \
    515 	ldxa	[%sp+BIAS+0x68]%asi, %i5; \
    516 	ldxa	[%sp+BIAS+0x70]%asi, %i6; \
    517 	\
    518 	ldxa	[%sp+BIAS+0x78]%asi, %i7; \
    519 	restored; \
    520 	CLRTT; \
    521 	retry; \
    522 	NOTREACHED; \
    523 	TA32
    524 
    525 	/* fill a 32-bit register window */
    526 #define FILL32(label,as) \
    527 label:	\
    528 	wr	%g0, as, %asi; \
    529 	srl	%sp, 0, %sp; /* fixup 32-bit pointers */ \
    530 	lda	[%sp+0x00]%asi, %l0; \
    531 	lda	[%sp+0x04]%asi, %l1; \
    532 	lda	[%sp+0x08]%asi, %l2; \
    533 	lda	[%sp+0x0c]%asi, %l3; \
    534 	lda	[%sp+0x10]%asi, %l4; \
    535 	lda	[%sp+0x14]%asi, %l5; \
    536 	\
    537 	lda	[%sp+0x18]%asi, %l6; \
    538 	lda	[%sp+0x1c]%asi, %l7; \
    539 	lda	[%sp+0x20]%asi, %i0; \
    540 	lda	[%sp+0x24]%asi, %i1; \
    541 	lda	[%sp+0x28]%asi, %i2; \
    542 	lda	[%sp+0x2c]%asi, %i3; \
    543 	lda	[%sp+0x30]%asi, %i4; \
    544 	lda	[%sp+0x34]%asi, %i5; \
    545 	\
    546 	lda	[%sp+0x38]%asi, %i6; \
    547 	lda	[%sp+0x3c]%asi, %i7; \
    548 	restored; \
    549 	CLRTT; \
    550 	retry; \
    551 	NOTREACHED; \
    552 	TA32
    553 
    554 	/* fill either 32-bit or 64-bit register window. */
    555 #define FILLBOTH(label64,label32,as) \
    556 	andcc	%sp, 1, %i0; \
    557 	bnz	(label64)+4; /* See if it's a v9 stack or v8 */ \
    558 	 wr	%g0, as, %asi; \
    559 	ba	(label32)+8; \
    560 	 srl	%sp, 0, %sp; /* fixup 32-bit pointers */ \
    561 	NOTREACHED; \
    562 	TA32
    563 
    564 	/* handle clean window trap when trap level = 0 */
    565 	.macro CLEANWIN0
    566 	rdpr %cleanwin, %o7
    567 	inc %o7				!	This handler is in-lined and cannot fault
    568 #ifdef DEBUG
    569 	set	0xbadcafe, %l0		! DEBUG -- compiler should not rely on zero-ed registers.
    570 #else
    571 	clr	%l0
    572 #endif
    573 	wrpr %g0, %o7, %cleanwin	!       Nucleus (trap&IRQ) code does not need clean windows
    574 
    575 	mov %l0,%l1; mov %l0,%l2	!	Clear out %l0-%l8 and %o0-%o8 and inc %cleanwin and done
    576 	mov %l0,%l3; mov %l0,%l4
    577 #if 0
    578 #ifdef DIAGNOSTIC
    579 	!!
    580 	!! Check the sp redzone
    581 	!!
    582 	!! Since we can't spill the current window, we'll just keep
    583 	!! track of the frame pointer.  Problems occur when the routine
    584 	!! allocates and uses stack storage.
    585 	!!
    586 !	rdpr	%wstate, %l5	! User stack?
    587 !	cmp	%l5, WSTATE_KERN
    588 !	bne,pt	%icc, 7f
    589 	 sethi	%hi(CPCB), %l5
    590 	LDPTR	[%l5 + %lo(CPCB)], %l5	! If pcb < fp < pcb+sizeof(pcb)
    591 	inc	PCB_SIZE, %l5		! then we have a stack overflow
    592 	btst	%fp, 1			! 64-bit stack?
    593 	sub	%fp, %l5, %l7
    594 	bnz,a,pt	%icc, 1f
    595 	 inc	BIAS, %l7		! Remove BIAS
    596 1:
    597 	cmp	%l7, PCB_SIZE
    598 	blu	%xcc, cleanwin_overflow
    599 #endif
    600 #endif
    601 	mov %l0, %l5
    602 	mov %l0, %l6; mov %l0, %l7; mov %l0, %o0; mov %l0, %o1
    603 
    604 	mov %l0, %o2; mov %l0, %o3; mov %l0, %o4; mov %l0, %o5;
    605 	mov %l0, %o6; mov %l0, %o7
    606 	CLRTT
    607 	retry; nop; NOTREACHED; TA32
    608 	.endm
    609 
    610 	/* handle clean window trap when trap level = 1 */
    611 	.macro CLEANWIN1
    612 	clr	%l0
    613 #ifdef DEBUG
    614 	set	0xbadbeef, %l0		! DEBUG
    615 #endif
    616 	mov %l0, %l1; mov %l0, %l2
    617 	rdpr %cleanwin, %o7		!	This handler is in-lined and cannot fault
    618 	inc %o7; mov %l0, %l3		!       Nucleus (trap&IRQ) code does not need clean windows
    619 	wrpr %g0, %o7, %cleanwin	!	Clear out %l0-%l8 and %o0-%o8 and inc %cleanwin and done
    620 #ifdef NOT_DEBUG
    621 	!!
    622 	!! Check the sp redzone
    623 	!!
    624 	rdpr	%wstate, t1
    625 	cmp	t1, WSTATE_KERN
    626 	bne,pt	icc, 7f
    627 	 sethi	%hi(_C_LABEL(redzone)), t1
    628 	ldx	[t1 + %lo(_C_LABEL(redzone))], t2
    629 	cmp	%sp, t2			! if sp >= t2, not in red zone
    630 	blu	panic_red		! and can continue normally
    631 7:
    632 #endif
    633 	mov %l0, %l4; mov %l0, %l5; mov %l0, %l6; mov %l0, %l7
    634 	mov %l0, %o0; mov %l0, %o1; mov %l0, %o2; mov %l0, %o3
    635 
    636 	mov %l0, %o4; mov %l0, %o5; mov %l0, %o6; mov %l0, %o7
    637 	CLRTT
    638 	retry; nop; TA32
    639 	.endm
    640 
    641 	.globl	start, _C_LABEL(kernel_text)
    642 	_C_LABEL(kernel_text) = kernel_start		! for kvm_mkdb(8)
    643 kernel_start:
    644 	/* Traps from TL=0 -- traps from user mode */
    645 #ifdef __STDC__
    646 #define TABLE(name)	user_ ## name
    647 #else
    648 #define	TABLE(name)	user_/**/name
    649 #endif
    650 	.globl	_C_LABEL(trapbase)
    651 _C_LABEL(trapbase):
    652 	b dostart; nop; TA8	! 000 = reserved -- Use it to boot
    653 	/* We should not get the next 5 traps */
    654 	UTRAP(0x001)		! 001 = POR Reset -- ROM should get this
    655 	UTRAP(0x002)		! 002 = WDR -- ROM should get this
    656 	UTRAP(0x003)		! 003 = XIR -- ROM should get this
    657 	UTRAP(0x004)		! 004 = SIR -- ROM should get this
    658 	UTRAP(0x005)		! 005 = RED state exception
    659 	UTRAP(0x006); UTRAP(0x007)
    660 	VTRAP(T_INST_EXCEPT, textfault)	! 008 = instr. access except
    661 	VTRAP(T_TEXTFAULT, textfault)	! 009 = instr access MMU miss
    662 	VTRAP(T_INST_ERROR, textfault)	! 00a = instr. access err
    663 	UTRAP(0x00b); UTRAP(0x00c); UTRAP(0x00d); UTRAP(0x00e); UTRAP(0x00f)
    664 	TRAP(T_ILLINST)			! 010 = illegal instruction
    665 	TRAP(T_PRIVINST)		! 011 = privileged instruction
    666 	UTRAP(0x012)			! 012 = unimplemented LDD
    667 	UTRAP(0x013)			! 013 = unimplemented STD
    668 	UTRAP(0x014); UTRAP(0x015); UTRAP(0x016); UTRAP(0x017); UTRAP(0x018)
    669 	UTRAP(0x019); UTRAP(0x01a); UTRAP(0x01b); UTRAP(0x01c); UTRAP(0x01d)
    670 	UTRAP(0x01e); UTRAP(0x01f)
    671 	TRAP(T_FPDISABLED)		! 020 = fp instr, but EF bit off in psr
    672 	TRAP(T_FP_IEEE_754)		! 021 = ieee 754 exception
    673 	TRAP(T_FP_OTHER)		! 022 = other fp exception
    674 	TRAP(T_TAGOF)			! 023 = tag overflow
    675 	CLEANWIN0			! 024-027 = clean window trap
    676 	TRAP(T_DIV0)			! 028 = divide by zero
    677 	UTRAP(0x029)			! 029 = internal processor error
    678 	UTRAP(0x02a); UTRAP(0x02b); UTRAP(0x02c); UTRAP(0x02d); UTRAP(0x02e); UTRAP(0x02f)
    679 	VTRAP(T_DATAFAULT, winfault)	! 030 = data fetch fault
    680 	UTRAP(0x031)			! 031 = data MMU miss -- no MMU
    681 	VTRAP(T_DATA_ERROR, winfault)	! 032 = data access error
    682 	VTRAP(T_DATA_PROT, winfault)	! 033 = data protection fault
    683 	TRAP(T_ALIGN)			! 034 = address alignment error -- we could fix it inline...
    684 	TRAP(T_LDDF_ALIGN)		! 035 = LDDF address alignment error -- we could fix it inline...
    685 	TRAP(T_STDF_ALIGN)		! 036 = STDF address alignment error -- we could fix it inline...
    686 	TRAP(T_PRIVACT)			! 037 = privileged action
    687 	UTRAP(0x038); UTRAP(0x039); UTRAP(0x03a); UTRAP(0x03b); UTRAP(0x03c);
    688 	UTRAP(0x03d); UTRAP(0x03e); UTRAP(0x03f);
    689 	VTRAP(T_ASYNC_ERROR, winfault)	! 040 = data fetch fault
    690 	SOFTINT4U(1, IE_L1)		! 041 = level 1 interrupt
    691 	HARDINT4U(2)			! 042 = level 2 interrupt
    692 	HARDINT4U(3)			! 043 = level 3 interrupt
    693 	SOFTINT4U(4, IE_L4)		! 044 = level 4 interrupt
    694 	HARDINT4U(5)			! 045 = level 5 interrupt
    695 	SOFTINT4U(6, IE_L6)		! 046 = level 6 interrupt
    696 	HARDINT4U(7)			! 047 = level 7 interrupt
    697 	HARDINT4U(8)			! 048 = level 8 interrupt
    698 	HARDINT4U(9)			! 049 = level 9 interrupt
    699 	HARDINT4U(10)			! 04a = level 10 interrupt
    700 	HARDINT4U(11)			! 04b = level 11 interrupt
    701 	ZS_INTERRUPT4U			! 04c = level 12 (zs) interrupt
    702 	HARDINT4U(13)			! 04d = level 13 interrupt
    703 	HARDINT4U(14)			! 04e = level 14 interrupt
    704 	HARDINT4U(15)			! 04f = nonmaskable interrupt
    705 	UTRAP(0x050); UTRAP(0x051); UTRAP(0x052); UTRAP(0x053); UTRAP(0x054); UTRAP(0x055)
    706 	UTRAP(0x056); UTRAP(0x057); UTRAP(0x058); UTRAP(0x059); UTRAP(0x05a); UTRAP(0x05b)
    707 	UTRAP(0x05c); UTRAP(0x05d); UTRAP(0x05e); UTRAP(0x05f)
    708 	VTRAP(0x060, interrupt_vector); ! 060 = interrupt vector
    709 	TRAP(T_PA_WATCHPT)		! 061 = physical address data watchpoint
    710 	TRAP(T_VA_WATCHPT)		! 062 = virtual address data watchpoint
    711 	TRAP(T_ECCERR)			! 063 = corrected ECC error
    712 ufast_IMMU_miss:			! 064 = fast instr access MMU miss
    713 	ldxa	[%g0] ASI_IMMU_8KPTR, %g2 ! Load IMMU 8K TSB pointer
    714 #ifdef NO_TSB
    715 	ba,a	%icc, instr_miss
    716 #endif
    717 	ldxa	[%g0] ASI_IMMU, %g1	! Load IMMU tag target register
    718 	ldda	[%g2] ASI_NUCLEUS_QUAD_LDD, %g4	! Load TSB tag:data into %g4:%g5
    719 	brgez,pn %g5, instr_miss	! Entry invalid?  Punt
    720 	 cmp	%g1, %g4		! Compare TLB tags
    721 	bne,pn %xcc, instr_miss		! Got right tag?
    722 	 nop
    723 	CLRTT
    724 	stxa	%g5, [%g0] ASI_IMMU_DATA_IN ! Enter new mapping
    725 	retry				! Try new mapping
    726 1:
    727 	sir
    728 	TA32
    729 ufast_DMMU_miss:			! 068 = fast data access MMU miss
    730 	ldxa	[%g0] ASI_DMMU_8KPTR, %g2! Load DMMU 8K TSB pointer
    731 #ifdef NO_TSB
    732 	ba,a	%icc, data_miss
    733 #endif
    734 	ldxa	[%g0] ASI_DMMU, %g1	! Load DMMU tag target register
    735 	ldda	[%g2] ASI_NUCLEUS_QUAD_LDD, %g4	! Load TSB tag and data into %g4 and %g5
    736 	brgez,pn %g5, data_miss		! Entry invalid?  Punt
    737 	 cmp	%g1, %g4		! Compare TLB tags
    738 	bnz,pn	%xcc, data_miss		! Got right tag?
    739 	 nop
    740 	CLRTT
    741 #ifdef TRAPSTATS
    742 	sethi	%hi(_C_LABEL(udhit)), %g1
    743 	lduw	[%g1+%lo(_C_LABEL(udhit))], %g2
    744 	inc	%g2
    745 	stw	%g2, [%g1+%lo(_C_LABEL(udhit))]
    746 #endif
    747 	stxa	%g5, [%g0] ASI_DMMU_DATA_IN ! Enter new mapping
    748 	retry				! Try new mapping
    749 1:
    750 	sir
    751 	TA32
    752 ufast_DMMU_protection:			! 06c = fast data access MMU protection
    753 #ifdef TRAPSTATS
    754 	sethi	%hi(_C_LABEL(udprot)), %g1
    755 	lduw	[%g1+%lo(_C_LABEL(udprot))], %g2
    756 	inc	%g2
    757 	stw	%g2, [%g1+%lo(_C_LABEL(udprot))]
    758 #endif
    759 #ifdef HWREF
    760 	ba,a,pt	%xcc, dmmu_write_fault
    761 #else
    762 	ba,a,pt	%xcc, winfault
    763 #endif
    764 	nop
    765 	TA32
    766 	TRAP(0x070)			! 0x070 fast_ECC_error
    767 					! Implementation dependent traps
    768 	UTRAP(0x071); UTRAP(0x072); UTRAP(0x073); UTRAP(0x074); UTRAP(0x075); UTRAP(0x076)
    769 	UTRAP(0x077); UTRAP(0x078); UTRAP(0x079); UTRAP(0x07a); UTRAP(0x07b); UTRAP(0x07c)
    770 	UTRAP(0x07d); UTRAP(0x07e); UTRAP(0x07f)
    771 TABLE(uspill):
    772 	SPILL64(uspill8,ASI_AIUS)	! 0x080 spill_0_normal -- used to save user windows in user mode
    773 	SPILL32(uspill4,ASI_AIUS)	! 0x084 spill_1_normal
    774 	SPILLBOTH(uspill8,uspill4,ASI_AIUS)	 ! 0x088 spill_2_normal
    775 	UTRAP(0x08c); TA32		! 0x08c spill_3_normal
    776 TABLE(kspill):
    777 	SPILL64(kspill8,ASI_N)		! 0x090 spill_4_normal -- used to save supervisor windows
    778 	SPILL32(kspill4,ASI_N)		! 0x094 spill_5_normal
    779 	SPILLBOTH(kspill8,kspill4,ASI_N) ! 0x098 spill_6_normal
    780 	UTRAP(0x09c); TA32		! 0x09c spill_7_normal
    781 TABLE(uspillk):
    782 	SPILL64(uspillk8,ASI_AIUS)	! 0x0a0 spill_0_other -- used to save user windows in supervisor mode
    783 	SPILL32(uspillk4,ASI_AIUS)	! 0x0a4 spill_1_other
    784 	SPILLBOTH(uspillk8,uspillk4,ASI_AIUS) ! 0x0a8 spill_2_other
    785 	UTRAP(0x0ac); TA32		! 0x0ac spill_3_other
    786 	UTRAP(0x0b0); TA32		! 0x0b0 spill_4_other
    787 	UTRAP(0x0b4); TA32		! 0x0b4 spill_5_other
    788 	UTRAP(0x0b8); TA32		! 0x0b8 spill_6_other
    789 	UTRAP(0x0bc); TA32		! 0x0bc spill_7_other
    790 TABLE(ufill):
    791 	FILL64(ufill8,ASI_AIUS)		! 0x0c0 fill_0_normal -- used to fill windows when running user mode
    792 	FILL32(ufill4,ASI_AIUS)		! 0x0c4 fill_1_normal
    793 	FILLBOTH(ufill8,ufill4,ASI_AIUS) ! 0x0c8 fill_2_normal
    794 	UTRAP(0x0cc); TA32		! 0x0cc fill_3_normal
    795 TABLE(kfill):
    796 	FILL64(kfill8,ASI_N)		! 0x0d0 fill_4_normal -- used to fill windows when running supervisor mode
    797 	FILL32(kfill4,ASI_N)		! 0x0d4 fill_5_normal
    798 	FILLBOTH(kfill8,kfill4,ASI_N)	! 0x0d8 fill_6_normal
    799 	UTRAP(0x0dc); TA32		! 0x0dc fill_7_normal
    800 TABLE(ufillk):
    801 	FILL64(ufillk8,ASI_AIUS)	! 0x0e0 fill_0_other
    802 	FILL32(ufillk4,ASI_AIUS)	! 0x0e4 fill_1_other
    803 	FILLBOTH(ufillk8,ufillk4,ASI_AIUS) ! 0x0e8 fill_2_other
    804 	UTRAP(0x0ec); TA32		! 0x0ec fill_3_other
    805 	UTRAP(0x0f0); TA32		! 0x0f0 fill_4_other
    806 	UTRAP(0x0f4); TA32		! 0x0f4 fill_5_other
    807 	UTRAP(0x0f8); TA32		! 0x0f8 fill_6_other
    808 	UTRAP(0x0fc); TA32		! 0x0fc fill_7_other
    809 TABLE(syscall):
    810 	SYSCALL				! 0x100 = sun syscall
    811 	BPT				! 0x101 = pseudo breakpoint instruction
    812 	STRAP(0x102); STRAP(0x103); STRAP(0x104); STRAP(0x105); STRAP(0x106); STRAP(0x107)
    813 	SYSCALL				! 0x108 = svr4 syscall
    814 	SYSCALL				! 0x109 = bsd syscall
    815 	BPT_KGDB_EXEC			! 0x10a = enter kernel gdb on kernel startup
    816 	STRAP(0x10b); STRAP(0x10c); STRAP(0x10d); STRAP(0x10e); STRAP(0x10f);
    817 	STRAP(0x110); STRAP(0x111); STRAP(0x112); STRAP(0x113); STRAP(0x114); STRAP(0x115); STRAP(0x116); STRAP(0x117)
    818 	STRAP(0x118); STRAP(0x119); STRAP(0x11a); STRAP(0x11b); STRAP(0x11c); STRAP(0x11d); STRAP(0x11e); STRAP(0x11f)
    819 	STRAP(0x120); STRAP(0x121); STRAP(0x122); STRAP(0x123); STRAP(0x124); STRAP(0x125); STRAP(0x126); STRAP(0x127)
    820 	STRAP(0x128); STRAP(0x129); STRAP(0x12a); STRAP(0x12b); STRAP(0x12c); STRAP(0x12d); STRAP(0x12e); STRAP(0x12f)
    821 	STRAP(0x130); STRAP(0x131); STRAP(0x132); STRAP(0x133); STRAP(0x134); STRAP(0x135); STRAP(0x136); STRAP(0x137)
    822 	STRAP(0x138); STRAP(0x139); STRAP(0x13a); STRAP(0x13b); STRAP(0x13c); STRAP(0x13d); STRAP(0x13e); STRAP(0x13f)
    823 	SYSCALL				! 0x140 SVID syscall (Solaris 2.7)
    824 	SYSCALL				! 0x141 SPARC International syscall
    825 	SYSCALL				! 0x142	OS Vendor syscall
    826 	SYSCALL				! 0x143 HW OEM syscall
    827 	STRAP(0x144); STRAP(0x145); STRAP(0x146); STRAP(0x147)
    828 	STRAP(0x148); STRAP(0x149); STRAP(0x14a); STRAP(0x14b); STRAP(0x14c); STRAP(0x14d); STRAP(0x14e); STRAP(0x14f)
    829 	STRAP(0x150); STRAP(0x151); STRAP(0x152); STRAP(0x153); STRAP(0x154); STRAP(0x155); STRAP(0x156); STRAP(0x157)
    830 	STRAP(0x158); STRAP(0x159); STRAP(0x15a); STRAP(0x15b); STRAP(0x15c); STRAP(0x15d); STRAP(0x15e); STRAP(0x15f)
    831 	STRAP(0x160); STRAP(0x161); STRAP(0x162); STRAP(0x163); STRAP(0x164); STRAP(0x165); STRAP(0x166); STRAP(0x167)
    832 	STRAP(0x168); STRAP(0x169); STRAP(0x16a); STRAP(0x16b); STRAP(0x16c); STRAP(0x16d); STRAP(0x16e); STRAP(0x16f)
    833 	STRAP(0x170); STRAP(0x171); STRAP(0x172); STRAP(0x173); STRAP(0x174); STRAP(0x175); STRAP(0x176); STRAP(0x177)
    834 	STRAP(0x178); STRAP(0x179); STRAP(0x17a); STRAP(0x17b); STRAP(0x17c); STRAP(0x17d); STRAP(0x17e); STRAP(0x17f)
    835 	! Traps beyond 0x17f are reserved
    836 	UTRAP(0x180); UTRAP(0x181); UTRAP(0x182); UTRAP(0x183); UTRAP(0x184); UTRAP(0x185); UTRAP(0x186); UTRAP(0x187)
    837 	UTRAP(0x188); UTRAP(0x189); UTRAP(0x18a); UTRAP(0x18b); UTRAP(0x18c); UTRAP(0x18d); UTRAP(0x18e); UTRAP(0x18f)
    838 	UTRAP(0x190); UTRAP(0x191); UTRAP(0x192); UTRAP(0x193); UTRAP(0x194); UTRAP(0x195); UTRAP(0x196); UTRAP(0x197)
    839 	UTRAP(0x198); UTRAP(0x199); UTRAP(0x19a); UTRAP(0x19b); UTRAP(0x19c); UTRAP(0x19d); UTRAP(0x19e); UTRAP(0x19f)
    840 	UTRAP(0x1a0); UTRAP(0x1a1); UTRAP(0x1a2); UTRAP(0x1a3); UTRAP(0x1a4); UTRAP(0x1a5); UTRAP(0x1a6); UTRAP(0x1a7)
    841 	UTRAP(0x1a8); UTRAP(0x1a9); UTRAP(0x1aa); UTRAP(0x1ab); UTRAP(0x1ac); UTRAP(0x1ad); UTRAP(0x1ae); UTRAP(0x1af)
    842 	UTRAP(0x1b0); UTRAP(0x1b1); UTRAP(0x1b2); UTRAP(0x1b3); UTRAP(0x1b4); UTRAP(0x1b5); UTRAP(0x1b6); UTRAP(0x1b7)
    843 	UTRAP(0x1b8); UTRAP(0x1b9); UTRAP(0x1ba); UTRAP(0x1bb); UTRAP(0x1bc); UTRAP(0x1bd); UTRAP(0x1be); UTRAP(0x1bf)
    844 	UTRAP(0x1c0); UTRAP(0x1c1); UTRAP(0x1c2); UTRAP(0x1c3); UTRAP(0x1c4); UTRAP(0x1c5); UTRAP(0x1c6); UTRAP(0x1c7)
    845 	UTRAP(0x1c8); UTRAP(0x1c9); UTRAP(0x1ca); UTRAP(0x1cb); UTRAP(0x1cc); UTRAP(0x1cd); UTRAP(0x1ce); UTRAP(0x1cf)
    846 	UTRAP(0x1d0); UTRAP(0x1d1); UTRAP(0x1d2); UTRAP(0x1d3); UTRAP(0x1d4); UTRAP(0x1d5); UTRAP(0x1d6); UTRAP(0x1d7)
    847 	UTRAP(0x1d8); UTRAP(0x1d9); UTRAP(0x1da); UTRAP(0x1db); UTRAP(0x1dc); UTRAP(0x1dd); UTRAP(0x1de); UTRAP(0x1df)
    848 	UTRAP(0x1e0); UTRAP(0x1e1); UTRAP(0x1e2); UTRAP(0x1e3); UTRAP(0x1e4); UTRAP(0x1e5); UTRAP(0x1e6); UTRAP(0x1e7)
    849 	UTRAP(0x1e8); UTRAP(0x1e9); UTRAP(0x1ea); UTRAP(0x1eb); UTRAP(0x1ec); UTRAP(0x1ed); UTRAP(0x1ee); UTRAP(0x1ef)
    850 	UTRAP(0x1f0); UTRAP(0x1f1); UTRAP(0x1f2); UTRAP(0x1f3); UTRAP(0x1f4); UTRAP(0x1f5); UTRAP(0x1f6); UTRAP(0x1f7)
    851 	UTRAP(0x1f8); UTRAP(0x1f9); UTRAP(0x1fa); UTRAP(0x1fb); UTRAP(0x1fc); UTRAP(0x1fd); UTRAP(0x1fe); UTRAP(0x1ff)
    852 
    853 	/* Traps from TL>0 -- traps from supervisor mode */
    854 #undef TABLE
    855 #ifdef __STDC__
    856 #define	TABLE(name)	nucleus_ ## name
    857 #else
    858 #define	TABLE(name)	nucleus_/**/name
    859 #endif
    860 trapbase_priv:
    861 	UTRAP(0x000)			! 000 = reserved -- Use it to boot
    862 	/* We should not get the next 5 traps */
    863 	UTRAP(0x001)			! 001 = POR Reset -- ROM should get this
    864 	UTRAP(0x002)			! 002 = WDR Watchdog -- ROM should get this
    865 	UTRAP(0x003)			! 003 = XIR -- ROM should get this
    866 	UTRAP(0x004)			! 004 = SIR -- ROM should get this
    867 	UTRAP(0x005)			! 005 = RED state exception
    868 	UTRAP(0x006); UTRAP(0x007)
    869 ktextfault:
    870 	VTRAP(T_INST_EXCEPT, textfault)	! 008 = instr. access except
    871 	VTRAP(T_TEXTFAULT, textfault)	! 009 = instr access MMU miss -- no MMU
    872 	VTRAP(T_INST_ERROR, textfault)	! 00a = instr. access err
    873 	UTRAP(0x00b); UTRAP(0x00c); UTRAP(0x00d); UTRAP(0x00e); UTRAP(0x00f)
    874 	TRAP(T_ILLINST)			! 010 = illegal instruction
    875 	TRAP(T_PRIVINST)		! 011 = privileged instruction
    876 	UTRAP(0x012)			! 012 = unimplemented LDD
    877 	UTRAP(0x013)			! 013 = unimplemented STD
    878 	UTRAP(0x014); UTRAP(0x015); UTRAP(0x016); UTRAP(0x017); UTRAP(0x018)
    879 	UTRAP(0x019); UTRAP(0x01a); UTRAP(0x01b); UTRAP(0x01c); UTRAP(0x01d)
    880 	UTRAP(0x01e); UTRAP(0x01f)
    881 	TRAP(T_FPDISABLED)		! 020 = fp instr, but EF bit off in psr
    882 	TRAP(T_FP_IEEE_754)		! 021 = ieee 754 exception
    883 	TRAP(T_FP_OTHER)		! 022 = other fp exception
    884 	TRAP(T_TAGOF)			! 023 = tag overflow
    885 	CLEANWIN1			! 024-027 = clean window trap
    886 	TRAP(T_DIV0)			! 028 = divide by zero
    887 	UTRAP(0x029)			! 029 = internal processor error
    888 	UTRAP(0x02a); UTRAP(0x02b); UTRAP(0x02c); UTRAP(0x02d); UTRAP(0x02e); UTRAP(0x02f)
    889 kdatafault:
    890 	VTRAP(T_DATAFAULT, winfault)	! 030 = data fetch fault
    891 	UTRAP(0x031)			! 031 = data MMU miss -- no MMU
    892 	VTRAP(T_DATA_ERROR, winfault)	! 032 = data fetch fault
    893 	VTRAP(T_DATA_PROT, winfault)	! 033 = data fetch fault
    894 	VTRAP(T_ALIGN, checkalign)	! 034 = address alignment error -- we could fix it inline...
    895 	TRAP(T_LDDF_ALIGN)		! 035 = LDDF address alignment error -- we could fix it inline...
    896 	TRAP(T_STDF_ALIGN)		! 036 = STDF address alignment error -- we could fix it inline...
    897 	TRAP(T_PRIVACT)			! 037 = privileged action
    898 	UTRAP(0x038); UTRAP(0x039); UTRAP(0x03a); UTRAP(0x03b); UTRAP(0x03c);
    899 	UTRAP(0x03d); UTRAP(0x03e); UTRAP(0x03f);
    900 	VTRAP(T_ASYNC_ERROR, winfault)	! 040 = data fetch fault
    901 	SOFTINT4U(1, IE_L1)		! 041 = level 1 interrupt
    902 	HARDINT4U(2)			! 042 = level 2 interrupt
    903 	HARDINT4U(3)			! 043 = level 3 interrupt
    904 	SOFTINT4U(4, IE_L4)		! 044 = level 4 interrupt
    905 	HARDINT4U(5)			! 045 = level 5 interrupt
    906 	SOFTINT4U(6, IE_L6)		! 046 = level 6 interrupt
    907 	HARDINT4U(7)			! 047 = level 7 interrupt
    908 	HARDINT4U(8)			! 048 = level 8 interrupt
    909 	HARDINT4U(9)			! 049 = level 9 interrupt
    910 	HARDINT4U(10)			! 04a = level 10 interrupt
    911 	HARDINT4U(11)			! 04b = level 11 interrupt
    912 	ZS_INTERRUPT4U			! 04c = level 12 (zs) interrupt
    913 	HARDINT4U(13)			! 04d = level 13 interrupt
    914 	HARDINT4U(14)			! 04e = level 14 interrupt
    915 	HARDINT4U(15)			! 04f = nonmaskable interrupt
    916 	UTRAP(0x050); UTRAP(0x051); UTRAP(0x052); UTRAP(0x053); UTRAP(0x054); UTRAP(0x055)
    917 	UTRAP(0x056); UTRAP(0x057); UTRAP(0x058); UTRAP(0x059); UTRAP(0x05a); UTRAP(0x05b)
    918 	UTRAP(0x05c); UTRAP(0x05d); UTRAP(0x05e); UTRAP(0x05f)
    919 	VTRAP(0x060, interrupt_vector); ! 060 = interrupt vector
    920 	TRAP(T_PA_WATCHPT)		! 061 = physical address data watchpoint
    921 	TRAP(T_VA_WATCHPT)		! 062 = virtual address data watchpoint
    922 	TRAP(T_ECCERR)			! 063 = corrected ECC error
    923 kfast_IMMU_miss:			! 064 = fast instr access MMU miss
    924 	ldxa	[%g0] ASI_IMMU_8KPTR, %g2 ! Load IMMU 8K TSB pointer
    925 #ifdef NO_TSB
    926 	ba,a	%icc, instr_miss
    927 #endif
    928 	ldxa	[%g0] ASI_IMMU, %g1	! Load IMMU tag target register
    929 	ldda	[%g2] ASI_NUCLEUS_QUAD_LDD, %g4	! Load TSB tag:data into %g4:%g5
    930 	brgez,pn %g5, instr_miss	! Entry invalid?  Punt
    931 	 cmp	%g1, %g4		! Compare TLB tags
    932 	bne,pn %xcc, instr_miss		! Got right tag?
    933 	 nop
    934 	CLRTT
    935 	stxa	%g5, [%g0] ASI_IMMU_DATA_IN ! Enter new mapping
    936 	retry				! Try new mapping
    937 1:
    938 	sir
    939 	TA32
    940 kfast_DMMU_miss:			! 068 = fast data access MMU miss
    941 	ldxa	[%g0] ASI_DMMU_8KPTR, %g2! Load DMMU 8K TSB pointer
    942 #ifdef NO_TSB
    943 	ba,a	%icc, data_miss
    944 #endif
    945 	ldxa	[%g0] ASI_DMMU, %g1	! Load DMMU tag target register
    946 	ldda	[%g2] ASI_NUCLEUS_QUAD_LDD, %g4	! Load TSB tag and data into %g4 and %g5
    947 	brgez,pn %g5, data_miss		! Entry invalid?  Punt
    948 	 cmp	%g1, %g4		! Compare TLB tags
    949 	bnz,pn	%xcc, data_miss		! Got right tag?
    950 	 nop
    951 	CLRTT
    952 #ifdef TRAPSTATS
    953 	sethi	%hi(_C_LABEL(kdhit)), %g1
    954 	lduw	[%g1+%lo(_C_LABEL(kdhit))], %g2
    955 	inc	%g2
    956 	stw	%g2, [%g1+%lo(_C_LABEL(kdhit))]
    957 #endif
    958 	stxa	%g5, [%g0] ASI_DMMU_DATA_IN ! Enter new mapping
    959 	retry				! Try new mapping
    960 1:
    961 	sir
    962 	TA32
    963 kfast_DMMU_protection:			! 06c = fast data access MMU protection
    964 #ifdef TRAPSTATS
    965 	sethi	%hi(_C_LABEL(kdprot)), %g1
    966 	lduw	[%g1+%lo(_C_LABEL(kdprot))], %g2
    967 	inc	%g2
    968 	stw	%g2, [%g1+%lo(_C_LABEL(kdprot))]
    969 #endif
    970 #ifdef HWREF
    971 	ba,a,pt	%xcc, dmmu_write_fault
    972 #else
    973 	ba,a,pt	%xcc, winfault
    974 #endif
    975 	nop
    976 	TA32
    977 	TRAP(0x070)			! 0x070 fast_ECC_error
    978 					! Implementation dependent traps
    979 	UTRAP(0x071); UTRAP(0x072); UTRAP(0x073); UTRAP(0x074); UTRAP(0x075); UTRAP(0x076)
    980 	UTRAP(0x077); UTRAP(0x078); UTRAP(0x079); UTRAP(0x07a); UTRAP(0x07b); UTRAP(0x07c)
    981 	UTRAP(0x07d); UTRAP(0x07e); UTRAP(0x07f)
    982 TABLE(uspill):
    983 	SPILL64(1,ASI_AIUS)		! 0x080 spill_0_normal -- used to save user windows
    984 	SPILL32(2,ASI_AIUS)		! 0x084 spill_1_normal
    985 	SPILLBOTH(1b,2b,ASI_AIUS)	! 0x088 spill_2_normal
    986 	UTRAP(0x08c); TA32		! 0x08c spill_3_normal
    987 TABLE(kspill):
    988 	SPILL64(1,ASI_N)		! 0x090 spill_4_normal -- used to save supervisor windows
    989 	SPILL32(2,ASI_N)		! 0x094 spill_5_normal
    990 	SPILLBOTH(1b,2b,ASI_N)		! 0x098 spill_6_normal
    991 	UTRAP(0x09c); TA32		! 0x09c spill_7_normal
    992 TABLE(uspillk):
    993 	SPILL64(1,ASI_AIUS)		! 0x0a0 spill_0_other -- used to save user windows in nucleus mode
    994 	SPILL32(2,ASI_AIUS)		! 0x0a4 spill_1_other
    995 	SPILLBOTH(1b,2b,ASI_AIUS)	! 0x0a8 spill_2_other
    996 	UTRAP(0x0ac); TA32		! 0x0ac spill_3_other
    997 	UTRAP(0x0b0); TA32		! 0x0b0 spill_4_other
    998 	UTRAP(0x0b4); TA32		! 0x0b4 spill_5_other
    999 	UTRAP(0x0b8); TA32		! 0x0b8 spill_6_other
   1000 	UTRAP(0x0bc); TA32		! 0x0bc spill_7_other
   1001 TABLE(ufill):
   1002 	FILL64(nufill8,ASI_AIUS)	! 0x0c0 fill_0_normal -- used to fill windows when running nucleus mode from user
   1003 	FILL32(nufill4,ASI_AIUS)	! 0x0c4 fill_1_normal
   1004 	FILLBOTH(nufill8,nufill4,ASI_AIUS) ! 0x0c8 fill_2_normal
   1005 	UTRAP(0x0cc); TA32		! 0x0cc fill_3_normal
   1006 TABLE(sfill):
   1007 	FILL64(sfill8,ASI_N)		! 0x0d0 fill_4_normal -- used to fill windows when running nucleus mode from supervisor
   1008 	FILL32(sfill4,ASI_N)		! 0x0d4 fill_5_normal
   1009 	FILLBOTH(sfill8,sfill4,ASI_N)	! 0x0d8 fill_6_normal
   1010 	UTRAP(0x0dc); TA32		! 0x0dc fill_7_normal
   1011 TABLE(kfill):
   1012 	FILL64(nkfill8,ASI_AIUS)	! 0x0e0 fill_0_other -- used to fill user windows when running nucleus mode -- will we ever use this?
   1013 	FILL32(nkfill4,ASI_AIUS)	! 0x0e4 fill_1_other
   1014 	FILLBOTH(nkfill8,nkfill4,ASI_AIUS)! 0x0e8 fill_2_other
   1015 	UTRAP(0x0ec); TA32		! 0x0ec fill_3_other
   1016 	UTRAP(0x0f0); TA32		! 0x0f0 fill_4_other
   1017 	UTRAP(0x0f4); TA32		! 0x0f4 fill_5_other
   1018 	UTRAP(0x0f8); TA32		! 0x0f8 fill_6_other
   1019 	UTRAP(0x0fc); TA32		! 0x0fc fill_7_other
   1020 TABLE(syscall):
   1021 	SYSCALL				! 0x100 = sun syscall
   1022 	BPT				! 0x101 = pseudo breakpoint instruction
   1023 	STRAP(0x102); STRAP(0x103); STRAP(0x104); STRAP(0x105); STRAP(0x106); STRAP(0x107)
   1024 	SYSCALL				! 0x108 = svr4 syscall
   1025 	SYSCALL				! 0x109 = bsd syscall
   1026 	BPT_KGDB_EXEC			! 0x10a = enter kernel gdb on kernel startup
   1027 	STRAP(0x10b); STRAP(0x10c); STRAP(0x10d); STRAP(0x10e); STRAP(0x10f);
   1028 	STRAP(0x110); STRAP(0x111); STRAP(0x112); STRAP(0x113); STRAP(0x114); STRAP(0x115); STRAP(0x116); STRAP(0x117)
   1029 	STRAP(0x118); STRAP(0x119); STRAP(0x11a); STRAP(0x11b); STRAP(0x11c); STRAP(0x11d); STRAP(0x11e); STRAP(0x11f)
   1030 	STRAP(0x120); STRAP(0x121); STRAP(0x122); STRAP(0x123); STRAP(0x124); STRAP(0x125); STRAP(0x126); STRAP(0x127)
   1031 	STRAP(0x128); STRAP(0x129); STRAP(0x12a); STRAP(0x12b); STRAP(0x12c); STRAP(0x12d); STRAP(0x12e); STRAP(0x12f)
   1032 	STRAP(0x130); STRAP(0x131); STRAP(0x132); STRAP(0x133); STRAP(0x134); STRAP(0x135); STRAP(0x136); STRAP(0x137)
   1033 	STRAP(0x138); STRAP(0x139); STRAP(0x13a); STRAP(0x13b); STRAP(0x13c); STRAP(0x13d); STRAP(0x13e); STRAP(0x13f)
   1034 	STRAP(0x140); STRAP(0x141); STRAP(0x142); STRAP(0x143); STRAP(0x144); STRAP(0x145); STRAP(0x146); STRAP(0x147)
   1035 	STRAP(0x148); STRAP(0x149); STRAP(0x14a); STRAP(0x14b); STRAP(0x14c); STRAP(0x14d); STRAP(0x14e); STRAP(0x14f)
   1036 	STRAP(0x150); STRAP(0x151); STRAP(0x152); STRAP(0x153); STRAP(0x154); STRAP(0x155); STRAP(0x156); STRAP(0x157)
   1037 	STRAP(0x158); STRAP(0x159); STRAP(0x15a); STRAP(0x15b); STRAP(0x15c); STRAP(0x15d); STRAP(0x15e); STRAP(0x15f)
   1038 	STRAP(0x160); STRAP(0x161); STRAP(0x162); STRAP(0x163); STRAP(0x164); STRAP(0x165); STRAP(0x166); STRAP(0x167)
   1039 	STRAP(0x168); STRAP(0x169); STRAP(0x16a); STRAP(0x16b); STRAP(0x16c); STRAP(0x16d); STRAP(0x16e); STRAP(0x16f)
   1040 	STRAP(0x170); STRAP(0x171); STRAP(0x172); STRAP(0x173); STRAP(0x174); STRAP(0x175); STRAP(0x176); STRAP(0x177)
   1041 	STRAP(0x178); STRAP(0x179); STRAP(0x17a); STRAP(0x17b); STRAP(0x17c); STRAP(0x17d); STRAP(0x17e); STRAP(0x17f)
   1042 	! Traps beyond 0x17f are reserved
   1043 	UTRAP(0x180); UTRAP(0x181); UTRAP(0x182); UTRAP(0x183); UTRAP(0x184); UTRAP(0x185); UTRAP(0x186); UTRAP(0x187)
   1044 	UTRAP(0x188); UTRAP(0x189); UTRAP(0x18a); UTRAP(0x18b); UTRAP(0x18c); UTRAP(0x18d); UTRAP(0x18e); UTRAP(0x18f)
   1045 	UTRAP(0x190); UTRAP(0x191); UTRAP(0x192); UTRAP(0x193); UTRAP(0x194); UTRAP(0x195); UTRAP(0x196); UTRAP(0x197)
   1046 	UTRAP(0x198); UTRAP(0x199); UTRAP(0x19a); UTRAP(0x19b); UTRAP(0x19c); UTRAP(0x19d); UTRAP(0x19e); UTRAP(0x19f)
   1047 	UTRAP(0x1a0); UTRAP(0x1a1); UTRAP(0x1a2); UTRAP(0x1a3); UTRAP(0x1a4); UTRAP(0x1a5); UTRAP(0x1a6); UTRAP(0x1a7)
   1048 	UTRAP(0x1a8); UTRAP(0x1a9); UTRAP(0x1aa); UTRAP(0x1ab); UTRAP(0x1ac); UTRAP(0x1ad); UTRAP(0x1ae); UTRAP(0x1af)
   1049 	UTRAP(0x1b0); UTRAP(0x1b1); UTRAP(0x1b2); UTRAP(0x1b3); UTRAP(0x1b4); UTRAP(0x1b5); UTRAP(0x1b6); UTRAP(0x1b7)
   1050 	UTRAP(0x1b8); UTRAP(0x1b9); UTRAP(0x1ba); UTRAP(0x1bb); UTRAP(0x1bc); UTRAP(0x1bd); UTRAP(0x1be); UTRAP(0x1bf)
   1051 	UTRAP(0x1c0); UTRAP(0x1c1); UTRAP(0x1c2); UTRAP(0x1c3); UTRAP(0x1c4); UTRAP(0x1c5); UTRAP(0x1c6); UTRAP(0x1c7)
   1052 	UTRAP(0x1c8); UTRAP(0x1c9); UTRAP(0x1ca); UTRAP(0x1cb); UTRAP(0x1cc); UTRAP(0x1cd); UTRAP(0x1ce); UTRAP(0x1cf)
   1053 	UTRAP(0x1d0); UTRAP(0x1d1); UTRAP(0x1d2); UTRAP(0x1d3); UTRAP(0x1d4); UTRAP(0x1d5); UTRAP(0x1d6); UTRAP(0x1d7)
   1054 	UTRAP(0x1d8); UTRAP(0x1d9); UTRAP(0x1da); UTRAP(0x1db); UTRAP(0x1dc); UTRAP(0x1dd); UTRAP(0x1de); UTRAP(0x1df)
   1055 	UTRAP(0x1e0); UTRAP(0x1e1); UTRAP(0x1e2); UTRAP(0x1e3); UTRAP(0x1e4); UTRAP(0x1e5); UTRAP(0x1e6); UTRAP(0x1e7)
   1056 	UTRAP(0x1e8); UTRAP(0x1e9); UTRAP(0x1ea); UTRAP(0x1eb); UTRAP(0x1ec); UTRAP(0x1ed); UTRAP(0x1ee); UTRAP(0x1ef)
   1057 	UTRAP(0x1f0); UTRAP(0x1f1); UTRAP(0x1f2); UTRAP(0x1f3); UTRAP(0x1f4); UTRAP(0x1f5); UTRAP(0x1f6); UTRAP(0x1f7)
   1058 	UTRAP(0x1f8); UTRAP(0x1f9); UTRAP(0x1fa); UTRAP(0x1fb); UTRAP(0x1fc); UTRAP(0x1fd); UTRAP(0x1fe); UTRAP(0x1ff)
   1059 
   1060 #ifdef SUN4V
   1061 
   1062 /* Macros for sun4v traps */
   1063 
   1064 	.macro	sun4v_trap_entry count
   1065 	.rept	\count
   1066 	ba	slowtrap
   1067 	 nop
   1068 	.align	32
   1069 	.endr
   1070 	.endm
   1071 
   1072 	.macro	sun4v_trap_entry_fail count
   1073 	.rept	\count
   1074 	sir
   1075 	.align	32
   1076 	.endr
   1077 	.endm
   1078 
   1079 	.macro	sun4v_trap_entry_spill_fill_fail count
   1080 	.rept	\count
   1081 	sir
   1082 	.align	128
   1083 	.endr
   1084 	.endm
   1085 
   1086 /* The actual trap base for sun4v */
   1087 	.align	0x8000
   1088 	.globl	_C_LABEL(trapbase_sun4v)
   1089 _C_LABEL(trapbase_sun4v):
   1090 	!
   1091 	! trap level 0
   1092 	!
   1093 	sun4v_trap_entry 8					! 0x000-0x007
   1094 	VTRAP(T_INST_EXCEPT, sun4v_tl0_itsb_miss)		! 0x008 - inst except
   1095 	VTRAP(T_TEXTFAULT, sun4v_tl0_itsb_miss)			! 0x009 - inst MMU miss
   1096 	sun4v_trap_entry 26					! 0x00a-0x023
   1097 	CLEANWIN0						! 0x24-0x27 = clean window
   1098 	sun4v_trap_entry 9					! 0x028-0x030
   1099 	VTRAP(T_DATA_MMU_MISS, sun4v_dtsb_miss)			! 0x031 = data MMU miss
   1100 	sun4v_trap_entry 2					! 0x032-0x033
   1101 	TRAP(T_ALIGN)						! 0x034 = address alignment error
   1102 	sun4v_trap_entry 12					! 0x035-0x040
   1103 	HARDINT4V(1)						! 0x041 = level 1 interrupt
   1104 	HARDINT4V(2)						! 0x042 = level 2 interrupt
   1105 	HARDINT4V(3)						! 0x043 = level 3 interrupt
   1106 	HARDINT4V(4)						! 0x044 = level 4 interrupt
   1107 	HARDINT4V(5)						! 0x045 = level 5 interrupt
   1108 	HARDINT4V(6)						! 0x046 = level 6 interrupt
   1109 	HARDINT4V(7)						! 0x047 = level 7 interrupt
   1110 	HARDINT4V(8)						! 0x048 = level 8 interrupt
   1111 	HARDINT4V(9)						! 0x049 = level 9 interrupt
   1112 	HARDINT4V(10)						! 0x04a = level 10 interrupt
   1113 	HARDINT4V(11)						! 0x04b = level 11 interrupt
   1114 	HARDINT4V(12)						! 0x04c = level 12 interrupt
   1115 	HARDINT4V(13)						! 0x04d = level 13 interrupt
   1116 	HARDINT4V(14)						! 0x04e = level 14 interrupt
   1117 	HARDINT4V(15)						! 0x04f = level 15 interrupt
   1118 	sun4v_trap_entry 28					! 0x050-0x06b
   1119 	VTRAP(T_FDMMU_PROT, sun4v_tl0_dtsb_prot)		! 0x06c
   1120 	sun4v_trap_entry 15					! 0x06d-0x07b
   1121 	VTRAP(T_CPU_MONDO, sun4v_cpu_mondo)			! 0x07c = cpu mondo
   1122 	VTRAP(T_DEV_MONDO, sun4v_dev_mondo)			! 0x07d = dev mondo
   1123 	sun4v_trap_entry 2					! 0x07e-0x07f
   1124 	SPILL64(uspill8_sun4vt0,ASI_AIUS)			! 0x080 spill_0_normal -- used to save user windows in user mode
   1125 	SPILL32(uspill4_sun4vt0,ASI_AIUS)			! 0x084 spill_1_normal
   1126 	SPILLBOTH(uspill8_sun4vt0,uspill4_sun4vt0,ASI_AIUS)	! 0x088 spill_2_normal
   1127 	sun4v_trap_entry_spill_fill_fail 1			! 0x08c spill_3_normal
   1128 	SPILL64(kspill8_sun4vt0,ASI_N)				! 0x090 spill_4_normal  -- used to save supervisor windows
   1129 	SPILL32(kspill4_sun4vt0,ASI_N)				! 0x094 spill_5_normal
   1130 	SPILLBOTH(kspill8_sun4vt0,kspill4_sun4vt0,ASI_N)	! 0x098 spill_6_normal
   1131 	sun4v_trap_entry_spill_fill_fail 1			! 0x09c spill_7_normal
   1132 	SPILL64(uspillk8_sun4vt0,ASI_AIUS)			! 0x0a0 spill_0_other -- used to save user windows in supervisor mode
   1133 	SPILL32(uspillk4_sun4vt0,ASI_AIUS)			! 0x0a4 spill_1_other
   1134 	SPILLBOTH(uspillk8_sun4vt0,uspillk4_sun4vt0,ASI_AIUS)	! 0x0a8 spill_2_other
   1135 	sun4v_trap_entry_spill_fill_fail 1			! 0x0ac spill_3_other
   1136 	sun4v_trap_entry_spill_fill_fail 1			! 0x0b0 spill_4_other
   1137 	sun4v_trap_entry_spill_fill_fail 1			! 0x0b4 spill_5_other
   1138 	sun4v_trap_entry_spill_fill_fail 1			! 0x0b8 spill_6_other
   1139 	sun4v_trap_entry_spill_fill_fail 1			! 0x0bc spill_7_other
   1140 	FILL64(ufill8_sun4vt0,ASI_AIUS)				! 0x0c0 fill_0_normal -- used to fill windows when running user mode
   1141 	FILL32(ufill4_sun4vt0,ASI_AIUS)				! 0x0c4 fill_1_normal
   1142 	FILLBOTH(ufill8_sun4vt0,ufill4_sun4vt0,ASI_AIUS)	! 0x0c8 fill_2_normal
   1143 	sun4v_trap_entry_spill_fill_fail 1			! 0x0cc fill_3_normal
   1144 	FILL64(kfill8_sun4vt0,ASI_N)				! 0x0d0 fill_4_normal  -- used to fill windows when running supervisor mode
   1145 	FILL32(kfill4_sun4vt0,ASI_N)				! 0x0d4 fill_5_normal
   1146 	FILLBOTH(kfill8_sun4vt0,kfill4_sun4vt0,ASI_N)		! 0x0d8 fill_6_normal
   1147 	sun4v_trap_entry_spill_fill_fail 1			! 0x0dc fill_7_normal
   1148 	FILL64(ufillk8_sun4vt0,ASI_AIUS)			! 0x0e0 fill_0_other
   1149 	FILL32(ufillk4_sun4vt0,ASI_AIUS)			! 0x0e4 fill_1_other
   1150 	FILLBOTH(ufillk8_sun4vt0,ufillk4_sun4vt0,ASI_AIUS)	! 0x0e8 fill_2_other
   1151 	sun4v_trap_entry_spill_fill_fail 1			! 0x0ec fill_3_other
   1152 	sun4v_trap_entry_spill_fill_fail 1			! 0x0f0 fill_4_other
   1153 	sun4v_trap_entry_spill_fill_fail 1			! 0x0f4 fill_5_other
   1154 	sun4v_trap_entry_spill_fill_fail 1			! 0x0f8 fill_6_other
   1155 	sun4v_trap_entry_spill_fill_fail 1			! 0x0fc fill_7_other
   1156 	SYSCALL							! 0x100 = syscall
   1157 	BPT							! 0x101 = pseudo breakpoint instruction
   1158 	sun4v_trap_entry 254					! 0x102-0x1ff
   1159 	!
   1160 	! trap level 1
   1161 	!
   1162 	sun4v_trap_entry 36					! 0x000-0x023
   1163 	CLEANWIN1						! 0x24-0x27 = clean window
   1164 	sun4v_trap_entry 8					! 0x028-0x02F
   1165 	VTRAP(T_DATAFAULT, sun4v_tl1_ptbl_miss)			! 0x030 = ???
   1166 	VTRAP(T_DATA_MMU_MISS, sun4v_tl1_dtsb_miss)		! 0x031 = data MMU miss
   1167 	VTRAP(T_DATA_ERROR, sun4v_tl1_ptbl_miss)		! 0x032 = ???
   1168 	VTRAP(T_DATA_PROT, sun4v_tl1_ptbl_miss)			! 0x033 = ???
   1169 	sun4v_trap_entry 56					! 0x034-0x06b
   1170 	VTRAP(T_FDMMU_PROT, sun4v_tl1_dtsb_prot)		! 0x06c
   1171 	sun4v_trap_entry 19					! 0x06d-0x07f
   1172 	sun4v_tl1_uspill_normal					! 0x080 spill_0_normal -- save user windows
   1173 	sun4v_tl1_uspill_normal					! 0x084 spill_1_normal
   1174 	sun4v_tl1_uspill_normal					! 0x088 spill_2_normal
   1175 	sun4v_trap_entry_spill_fill_fail 1			! 0x08c spill_3_normal
   1176 	SPILL64(kspill8_sun4vt1,ASI_N)				! 0x090 spill_4_normal -- save supervisor windows
   1177 	SPILL32(kspill4_sun4vt1,ASI_N)				! 0x094 spill_5_normal
   1178 	SPILLBOTH(kspill8_sun4vt1,kspill4_sun4vt1,ASI_N)	! 0x098 spill_6_normal
   1179 	sun4v_trap_entry_spill_fill_fail 1			! 0x09c spill_7_normal
   1180 	sun4v_tl1_uspill_other					! 0x0a0 spill_0_other -- save user windows in nucleus mode
   1181 	sun4v_tl1_uspill_other					! 0x0a4 spill_1_other
   1182 	sun4v_tl1_uspill_other					! 0x0a8 spill_2_other
   1183 	sun4v_trap_entry_spill_fill_fail 1			! 0x0ac spill_3_other
   1184 	sun4v_trap_entry_spill_fill_fail 1			! 0x0b0 spill_4_other
   1185 	sun4v_trap_entry_spill_fill_fail 1			! 0x0b4 spill_5_other
   1186 	sun4v_trap_entry_spill_fill_fail 1			! 0x0b8 spill_6_other
   1187 	sun4v_trap_entry_spill_fill_fail 1			! 0x0bc spill_7_other
   1188 	FILL64(ufill8_sun4vt1,ASI_AIUS)				! 0x0c0 fill_0_normal -- fill windows when running nucleus mode from user
   1189 	FILL32(ufill4_sun4vt1,ASI_AIUS)				! 0x0c4 fill_1_normal
   1190 	FILLBOTH(ufill8_sun4vt1,ufill4_sun4vt1,ASI_AIUS)	! 0x0c8 fill_2_normal
   1191 	sun4v_trap_entry_spill_fill_fail 1			! 0x0cc fill_3_normal
   1192 	FILL64(kfill8_sun4vt1,ASI_N)				! 0x0d0 fill_4_normal -- fill windows when running nucleus mode from supervisor
   1193 	FILL32(kfill4_sun4vt1,ASI_N)				! 0x0d4 fill_5_normal
   1194 	FILLBOTH(kfill8_sun4vt1,kfill4_sun4vt1,ASI_N)		! 0x0d8 fill_6_normal
   1195 	sun4v_trap_entry_spill_fill_fail 1			! 0x0dc fill_7_normal
   1196 	FILL64(ufillk8_sun4vt1,ASI_AIUS)			! 0x0e0 fill_0_other -- fill user windows when running nucleus mode -- will we ever use this?
   1197 	FILL32(ufillk4_sun4vt1,ASI_AIUS)			! 0x0e4 fill_1_other
   1198 	FILLBOTH(ufillk8_sun4vt1,ufillk4_sun4vt1,ASI_AIUS)	! 0x0e8 fill_2_other
   1199 	sun4v_trap_entry_spill_fill_fail 1			! 0x0ec fill_3_other
   1200 	sun4v_trap_entry_spill_fill_fail 1			! 0x0f0 fill_4_other
   1201 	sun4v_trap_entry_spill_fill_fail 1			! 0x0f4 fill_5_other
   1202 	sun4v_trap_entry_spill_fill_fail 1			! 0x0f8 fill_6_other
   1203 	sun4v_trap_entry_spill_fill_fail 1			! 0x0fc fill_7_other
   1204 	sun4v_trap_entry_fail 256				! 0x100-0x1ff
   1205 
   1206 #endif
   1207 
   1208 #if 0
   1209 /*
   1210  * If the cleanwin trap handler detects an overflow we come here.
   1211  * We need to fix up the window registers, switch to the interrupt
   1212  * stack, and then trap to the debugger.
   1213  */
   1214 cleanwin_overflow:
   1215 	!! We've already incremented %cleanwin
   1216 	!! So restore %cwp
   1217 	rdpr	%cwp, %l0
   1218 	dec	%l0
   1219 	wrpr	%l0, %g0, %cwp
   1220 	set	EINTSTACK-STKB-CC64FSZ, %l0
   1221 	save	%l0, 0, %sp
   1222 
   1223 	ta	1		! Enter debugger
   1224 	sethi	%hi(1f), %o0
   1225 	call	_C_LABEL(panic)
   1226 	 or	%o0, %lo(1f), %o0
   1227 	restore
   1228 	retry
   1229 	.data
   1230 1:
   1231 	.asciz	"Kernel stack overflow!"
   1232 	_ALIGN
   1233 	.text
   1234 #endif
   1235 
   1236 #ifdef NOTDEF_DEBUG
   1237 /*
   1238  * A hardware red zone is impossible.  We simulate one in software by
   1239  * keeping a `red zone' pointer; if %sp becomes less than this, we panic.
   1240  * This is expensive and is only enabled when debugging.
   1241  */
   1242 #define	REDSIZE	(PCB_SIZE)	/* Mark used portion of pcb structure out of bounds */
   1243 #define	REDSTACK 2048		/* size of `panic: stack overflow' region */
   1244 	.data
   1245 	_ALIGN
   1246 redzone:
   1247 	.xword	_C_LABEL(XXX) + REDSIZE
   1248 redstack:
   1249 	.space	REDSTACK
   1250 eredstack:
   1251 Lpanic_red:
   1252 	.asciz	"kernel stack overflow"
   1253 	_ALIGN
   1254 	.text
   1255 
   1256 	/* set stack pointer redzone to base+minstack; alters base */
   1257 #define	SET_SP_REDZONE(base, tmp) \
   1258 	add	base, REDSIZE, base; \
   1259 	sethi	%hi(_C_LABEL(redzone)), tmp; \
   1260 	stx	base, [tmp + %lo(_C_LABEL(redzone))]
   1261 
   1262 	/* variant with a constant */
   1263 #define	SET_SP_REDZONE_CONST(const, tmp1, tmp2) \
   1264 	set	(const) + REDSIZE, tmp1; \
   1265 	sethi	%hi(_C_LABEL(redzone)), tmp2; \
   1266 	stx	tmp1, [tmp2 + %lo(_C_LABEL(redzone))]
   1267 
   1268 	/* check stack pointer against redzone (uses two temps) */
   1269 #define	CHECK_SP_REDZONE(t1, t2) \
   1270 	sethi	KERNBASE, t1;	\
   1271 	cmp	%sp, t1;	\
   1272 	blu,pt	%xcc, 7f;	\
   1273 	 sethi	%hi(_C_LABEL(redzone)), t1; \
   1274 	ldx	[t1 + %lo(_C_LABEL(redzone))], t2; \
   1275 	cmp	%sp, t2;	/* if sp >= t2, not in red zone */ \
   1276 	blu	panic_red; nop;	/* and can continue normally */ \
   1277 7:
   1278 
   1279 panic_red:
   1280 	/* move to panic stack */
   1281 	stx	%g0, [t1 + %lo(_C_LABEL(redzone))];
   1282 	set	eredstack - BIAS, %sp;
   1283 	/* prevent panic() from lowering ipl */
   1284 	sethi	%hi(_C_LABEL(panicstr)), t2;
   1285 	set	Lpanic_red, t2;
   1286 	st	t2, [t1 + %lo(_C_LABEL(panicstr))];
   1287 	wrpr	g0, 15, %pil		/* t1 = splhigh() */
   1288 	save	%sp, -CCF64SZ, %sp;	/* preserve current window */
   1289 	sethi	%hi(Lpanic_red), %o0;
   1290 	call	_C_LABEL(panic);
   1291 	 or %o0, %lo(Lpanic_red), %o0;
   1292 
   1293 
   1294 #else
   1295 
   1296 #define	SET_SP_REDZONE(base, tmp)
   1297 #define	SET_SP_REDZONE_CONST(const, t1, t2)
   1298 #define	CHECK_SP_REDZONE(t1, t2)
   1299 #endif
   1300 
   1301 #define TRACESIZ	0x01000
   1302 	.globl	_C_LABEL(trap_trace)
   1303 	.globl	_C_LABEL(trap_trace_ptr)
   1304 	.globl	_C_LABEL(trap_trace_end)
   1305 	.globl	_C_LABEL(trap_trace_dis)
   1306 	.data
   1307 _C_LABEL(trap_trace_dis):
   1308 	.word	1, 1		! Starts disabled.  DDB turns it on.
   1309 _C_LABEL(trap_trace_ptr):
   1310 	.word	0, 0, 0, 0
   1311 _C_LABEL(trap_trace):
   1312 	.space	TRACESIZ
   1313 _C_LABEL(trap_trace_end):
   1314 	.space	0x20		! safety margin
   1315 
   1316 
   1317 /*
   1318  * v9 machines do not have a trap window.
   1319  *
   1320  * When we take a trap the trap state is pushed on to the stack of trap
   1321  * registers, interrupts are disabled, then we switch to an alternate set
   1322  * of global registers.
   1323  *
   1324  * The trap handling code needs to allocate a trap frame on the kernel, or
   1325  * for interrupts, the interrupt stack, save the out registers to the trap
   1326  * frame, then switch to the normal globals and save them to the trap frame
   1327  * too.
   1328  *
   1329  * XXX it would be good to save the interrupt stack frame to the kernel
   1330  * stack so we wouldn't have to copy it later if we needed to handle a AST.
   1331  *
   1332  * Since kernel stacks are all on one page and the interrupt stack is entirely
   1333  * within the locked TLB, we can use physical addressing to save out our
   1334  * trap frame so we don't trap during the TRAP_SETUP() operation.  There
   1335  * is unfortunately no supportable method for issuing a non-trapping save.
   1336  *
   1337  * However, if we use physical addresses to save our trapframe, we will need
   1338  * to clear out the data cache before continuing much further.
   1339  *
   1340  * In short, what we need to do is:
   1341  *
   1342  *	all preliminary processing is done using the alternate globals
   1343  *
   1344  *	When we allocate our trap windows we must give up our globals because
   1345  *	their state may have changed during the save operation
   1346  *
   1347  *	we need to save our normal globals as soon as we have a stack
   1348  *
   1349  * Finally, we may now call C code.
   1350  *
   1351  * This macro will destroy %g5-%g7.  %g0-%g4 remain unchanged.
   1352  *
   1353  * In order to properly handle nested traps without lossage, alternate
   1354  * global %g6 is used as a kernel stack pointer.  It is set to the last
   1355  * allocated stack pointer (trapframe) and the old value is stored in
   1356  * tf_kstack.  It is restored when returning from a trap.  It is cleared
   1357  * on entering user mode.
   1358  */
   1359 
   1360  /*
   1361   * Other misc. design criteria:
   1362   *
   1363   * When taking an address fault, fault info is in the sfsr, sfar,
   1364   * TLB_TAG_ACCESS registers.  If we take another address fault
   1365   * while trying to handle the first fault then that information,
   1366   * the only information that tells us what address we trapped on,
   1367   * can potentially be lost.  This trap can be caused when allocating
   1368   * a register window with which to handle the trap because the save
   1369   * may try to store or restore a register window that corresponds
   1370   * to part of the stack that is not mapped.  Preventing this trap,
   1371   * while possible, is much too complicated to do in a trap handler,
   1372   * and then we will need to do just as much work to restore the processor
   1373   * window state.
   1374   *
   1375   * Possible solutions to the problem:
   1376   *
   1377   * Since we have separate AG, MG, and IG, we could have all traps
   1378   * above level-1 preserve AG and use other registers.  This causes
   1379   * a problem for the return from trap code which is coded to use
   1380   * alternate globals only.
   1381   *
   1382   * We could store the trapframe and trap address info to the stack
   1383   * using physical addresses.  Then we need to read it back using
   1384   * physical addressing, or flush the D$.
   1385   *
   1386   * We could identify certain registers to hold address fault info.
   1387   * this means that these registers need to be preserved across all
   1388   * fault handling.  But since we only have 7 useable globals, that
   1389   * really puts a cramp in our style.
   1390   *
   1391   * Finally, there is the issue of returning from kernel mode to user
   1392   * mode.  If we need to issue a restore of a user window in kernel
   1393   * mode, we need the window control registers in a user mode setup.
   1394   * If the trap handlers notice the register windows are in user mode,
   1395   * they will allocate a trapframe at the bottom of the kernel stack,
   1396   * overwriting the frame we were trying to return to.  This means that
   1397   * we must complete the restoration of all registers *before* switching
   1398   * to a user-mode window configuration.
   1399   *
   1400   * Essentially we need to be able to write re-entrant code w/no stack.
   1401   */
   1402 	.data
   1403 trap_setup_msg:
   1404 	.asciz	"TRAP_SETUP: tt=%x osp=%x nsp=%x tl=%x tpc=%x\n"
   1405 	_ALIGN
   1406 intr_setup_msg:
   1407 	.asciz	"INTR_SETUP: tt=%x osp=%x nsp=%x tl=%x tpc=%x\n"
   1408 	_ALIGN
   1409 	.text
   1410 
   1411 #ifdef DEBUG
   1412 	/* Only save a snapshot of locals and ins in DEBUG kernels */
   1413 #define	SAVE_LOCALS_INS	\
   1414 	/* Save local registers to trap frame */ \
   1415 	stx	%l0, [%g6 + CC64FSZ + STKB + TF_L + (0*8)]; \
   1416 	stx	%l1, [%g6 + CC64FSZ + STKB + TF_L + (1*8)]; \
   1417 	stx	%l2, [%g6 + CC64FSZ + STKB + TF_L + (2*8)]; \
   1418 	stx	%l3, [%g6 + CC64FSZ + STKB + TF_L + (3*8)]; \
   1419 	stx	%l4, [%g6 + CC64FSZ + STKB + TF_L + (4*8)]; \
   1420 	stx	%l5, [%g6 + CC64FSZ + STKB + TF_L + (5*8)]; \
   1421 	stx	%l6, [%g6 + CC64FSZ + STKB + TF_L + (6*8)]; \
   1422 	stx	%l7, [%g6 + CC64FSZ + STKB + TF_L + (7*8)]; \
   1423 \
   1424 	/* Save in registers to trap frame */ \
   1425 	stx	%i0, [%g6 + CC64FSZ + STKB + TF_I + (0*8)]; \
   1426 	stx	%i1, [%g6 + CC64FSZ + STKB + TF_I + (1*8)]; \
   1427 	stx	%i2, [%g6 + CC64FSZ + STKB + TF_I + (2*8)]; \
   1428 	stx	%i3, [%g6 + CC64FSZ + STKB + TF_I + (3*8)]; \
   1429 	stx	%i4, [%g6 + CC64FSZ + STKB + TF_I + (4*8)]; \
   1430 	stx	%i5, [%g6 + CC64FSZ + STKB + TF_I + (5*8)]; \
   1431 	stx	%i6, [%g6 + CC64FSZ + STKB + TF_I + (6*8)]; \
   1432 	stx	%i7, [%g6 + CC64FSZ + STKB + TF_I + (7*8)]; \
   1433 \
   1434 	stx	%g1, [%g6 + CC64FSZ + STKB + TF_FAULT];
   1435 #else
   1436 #define	SAVE_LOCALS_INS
   1437 #endif
   1438 
   1439 #ifdef _LP64
   1440 #define	FIXUP_TRAP_STACK \
   1441 	btst	1, %g6;						/* Fixup 64-bit stack if necessary */ \
   1442 	bnz,pt	%icc, 1f; \
   1443 	 add	%g6, %g5, %g6;					/* Allocate a stack frame */ \
   1444 	inc	-BIAS, %g6; \
   1445 1:
   1446 #else
   1447 #define	FIXUP_TRAP_STACK \
   1448 	srl	%g6, 0, %g6;					/* truncate at 32-bits */ \
   1449 	btst	1, %g6;						/* Fixup 64-bit stack if necessary */ \
   1450 	add	%g6, %g5, %g6;					/* Allocate a stack frame */ \
   1451 	add	%g6, BIAS, %g5; \
   1452 	movne	%icc, %g5, %g6;
   1453 #endif
   1454 
   1455 #ifdef _LP64
   1456 #define	TRAP_SETUP(stackspace) \
   1457 	sethi	%hi(CPCB), %g6; \
   1458 	sethi	%hi((stackspace)), %g5; \
   1459 	LDPTR	[%g6 + %lo(CPCB)], %g6; \
   1460 	sethi	%hi(USPACE), %g7;				/* Always multiple of page size */ \
   1461 	or	%g5, %lo((stackspace)), %g5; \
   1462 	add	%g6, %g7, %g6; \
   1463 	rdpr	%wstate, %g7;					/* Find if we're from user mode */ \
   1464 	sra	%g5, 0, %g5;					/* Sign extend the damn thing */ \
   1465 	\
   1466 	sub	%g7, WSTATE_KERN, %g7;				/* Compare & leave in register */ \
   1467 	movrz	%g7, %sp, %g6;					/* Select old (kernel) stack or base of kernel stack */ \
   1468 	FIXUP_TRAP_STACK \
   1469 	SAVE_LOCALS_INS	\
   1470 	save	%g6, 0, %sp;					/* If we fault we should come right back here */ \
   1471 	stx	%i0, [%sp + CC64FSZ + BIAS + TF_O + (0*8)];		/* Save out registers to trap frame */ \
   1472 	stx	%i1, [%sp + CC64FSZ + BIAS + TF_O + (1*8)]; \
   1473 	stx	%i2, [%sp + CC64FSZ + BIAS + TF_O + (2*8)]; \
   1474 	stx	%i3, [%sp + CC64FSZ + BIAS + TF_O + (3*8)]; \
   1475 	stx	%i4, [%sp + CC64FSZ + BIAS + TF_O + (4*8)]; \
   1476 	stx	%i5, [%sp + CC64FSZ + BIAS + TF_O + (5*8)]; \
   1477 \
   1478 	stx	%i6, [%sp + CC64FSZ + BIAS + TF_O + (6*8)]; \
   1479 	brz,pt	%g7, 1f;					/* If we were in kernel mode start saving globals */ \
   1480 	 stx	%i7, [%sp + CC64FSZ + BIAS + TF_O + (7*8)]; \
   1481 	mov	CTX_PRIMARY, %g7; \
   1482 	/* came from user mode -- switch to kernel mode stack */ \
   1483 	rdpr	%canrestore, %g5;				/* Fixup register window state registers */ \
   1484 	wrpr	%g0, 0, %canrestore; \
   1485 	wrpr	%g0, %g5, %otherwin; \
   1486 	wrpr	%g0, WSTATE_KERN, %wstate;			/* Enable kernel mode window traps -- now we can trap again */ \
   1487 \
   1488 	SET_MMU_CONTEXTID %g0, %g7,%g5; 			/* Switch MMU to kernel primary context */ \
   1489 	sethi	%hi(KERNBASE), %g5; \
   1490 	flush	%g5;						/* Some convenient address that won't trap */ \
   1491 1:
   1492 
   1493 /*
   1494  * Interrupt setup is almost exactly like trap setup, but we need to
   1495  * go to the interrupt stack if (a) we came from user mode or (b) we
   1496  * came from kernel mode on the kernel stack.
   1497  *
   1498  * We don't guarantee any registers are preserved during this operation.
   1499  * So we can be more efficient.
   1500  */
   1501 #define	INTR_SETUP(stackspace) \
   1502 	rdpr	%wstate, %g7;					/* Find if we're from user mode */ \
   1503 	\
   1504 	sethi	%hi(EINTSTACK-BIAS), %g6; \
   1505 	sethi	%hi(EINTSTACK-INTSTACK), %g4; \
   1506 	\
   1507 	or	%g6, %lo(EINTSTACK-BIAS), %g6;			/* Base of interrupt stack */ \
   1508 	dec	%g4;						/* Make it into a mask */ \
   1509 	\
   1510 	sub	%g6, %sp, %g1;					/* Offset from interrupt stack */ \
   1511 	sethi	%hi((stackspace)), %g5; \
   1512 	\
   1513 	or	%g5, %lo((stackspace)), %g5; \
   1514 \
   1515 	andn	%g1, %g4, %g4;					/* Are we out of the interrupt stack range? */ \
   1516 	xor	%g7, WSTATE_KERN, %g3;				/* Are we on the user stack ? */ \
   1517 	\
   1518 	sra	%g5, 0, %g5;					/* Sign extend the damn thing */ \
   1519 	orcc	%g3, %g4, %g0;					/* Definitely not off the interrupt stack */ \
   1520 	\
   1521 	sethi	%hi(CPUINFO_VA + CI_EINTSTACK), %g4; \
   1522 	bz,a,pt	%xcc, 1f; \
   1523 	 mov	%sp, %g6; \
   1524 	\
   1525 	ldx	[%g4 + %lo(CPUINFO_VA + CI_EINTSTACK)], %g4; \
   1526 	movrnz	%g4, %g4, %g6;					/* Use saved intr stack if exists */ \
   1527 	\
   1528 1:	add	%g6, %g5, %g5;					/* Allocate a stack frame */ \
   1529 	btst	1, %g6; \
   1530 	bnz,pt	%icc, 1f; \
   1531 \
   1532 	 mov	%g5, %g6; \
   1533 	\
   1534 	add	%g5, -BIAS, %g6; \
   1535 	\
   1536 1:	SAVE_LOCALS_INS	\
   1537 	save	%g6, 0, %sp;					/* If we fault we should come right back here */ \
   1538 	stx	%i0, [%sp + CC64FSZ + BIAS + TF_O + (0*8)];		/* Save out registers to trap frame */ \
   1539 	stx	%i1, [%sp + CC64FSZ + BIAS + TF_O + (1*8)]; \
   1540 	stx	%i2, [%sp + CC64FSZ + BIAS + TF_O + (2*8)]; \
   1541 	stx	%i3, [%sp + CC64FSZ + BIAS + TF_O + (3*8)]; \
   1542 	stx	%i4, [%sp + CC64FSZ + BIAS + TF_O + (4*8)]; \
   1543 \
   1544 	stx	%i5, [%sp + CC64FSZ + BIAS + TF_O + (5*8)]; \
   1545 	stx	%i6, [%sp + CC64FSZ + BIAS + TF_O + (6*8)]; \
   1546 	stx	%i6, [%sp + CC64FSZ + BIAS + TF_G + (0*8)];		/* Save fp in clockframe->cf_fp */ \
   1547 	brz,pt	%g3, 1f;					/* If we were in kernel mode start saving globals */ \
   1548 	 stx	%i7, [%sp + CC64FSZ + BIAS + TF_O + (7*8)]; \
   1549 	/* came from user mode -- switch to kernel mode stack */ \
   1550 	 rdpr	%otherwin, %g5;					/* Has this already been done? */ \
   1551 	\
   1552 	brnz,pn	%g5, 1f;					/* Don't set this twice */ \
   1553 	\
   1554 	 rdpr	%canrestore, %g5;				/* Fixup register window state registers */ \
   1555 \
   1556 	wrpr	%g0, 0, %canrestore; \
   1557 	\
   1558 	wrpr	%g0, %g5, %otherwin; \
   1559 	\
   1560 	mov	CTX_PRIMARY, %g7; \
   1561 	\
   1562 	wrpr	%g0, WSTATE_KERN, %wstate;			/* Enable kernel mode window traps -- now we can trap again */ \
   1563 	\
   1564 	SET_MMU_CONTEXTID %g0, %g7, %g5;			/* Switch MMU to kernel primary context */ \
   1565 	\
   1566 	sethi	%hi(KERNBASE), %g5; \
   1567 	flush	%g5;						/* Some convenient address that won't trap */ \
   1568 1:
   1569 
   1570 #else /* _LP64 */
   1571 
   1572 #define	TRAP_SETUP(stackspace) \
   1573 	sethi	%hi(CPCB), %g6; \
   1574 	sethi	%hi((stackspace)), %g5; \
   1575 	LDPTR	[%g6 + %lo(CPCB)], %g6; \
   1576 	sethi	%hi(USPACE), %g7; \
   1577 	or	%g5, %lo((stackspace)), %g5; \
   1578 	add	%g6, %g7, %g6; \
   1579 	rdpr	%wstate, %g7;					/* Find if we're from user mode */ \
   1580 	\
   1581 	sra	%g5, 0, %g5;					/* Sign extend the damn thing */ \
   1582 	subcc	%g7, WSTATE_KERN, %g7;				/* Compare & leave in register */ \
   1583 	movz	%icc, %sp, %g6;					/* Select old (kernel) stack or base of kernel stack */ \
   1584 	FIXUP_TRAP_STACK \
   1585 	SAVE_LOCALS_INS \
   1586 	save	%g6, 0, %sp;					/* If we fault we should come right back here */ \
   1587 	stx	%i0, [%sp + CC64FSZ + STKB + TF_O + (0*8)];		/* Save out registers to trap frame */ \
   1588 	stx	%i1, [%sp + CC64FSZ + STKB + TF_O + (1*8)]; \
   1589 	stx	%i2, [%sp + CC64FSZ + STKB + TF_O + (2*8)]; \
   1590 	stx	%i3, [%sp + CC64FSZ + STKB + TF_O + (3*8)]; \
   1591 	stx	%i4, [%sp + CC64FSZ + STKB + TF_O + (4*8)]; \
   1592 	stx	%i5, [%sp + CC64FSZ + STKB + TF_O + (5*8)]; \
   1593 	\
   1594 	stx	%i6, [%sp + CC64FSZ + STKB + TF_O + (6*8)]; \
   1595 	brz,pn	%g7, 1f;					/* If we were in kernel mode start saving globals */ \
   1596 	 stx	%i7, [%sp + CC64FSZ + STKB + TF_O + (7*8)]; \
   1597 	mov	CTX_PRIMARY, %g7; \
   1598 	/* came from user mode -- switch to kernel mode stack */ \
   1599 	rdpr	%canrestore, %g5;				/* Fixup register window state registers */ \
   1600 	wrpr	%g0, 0, %canrestore; \
   1601 	wrpr	%g0, %g5, %otherwin; \
   1602 	wrpr	%g0, WSTATE_KERN, %wstate;			/* Enable kernel mode window traps -- now we can trap again */ \
   1603 	\
   1604 	SET_MMU_CONTEXTID %g0, %g7, %g5;			/* Switch MMU to kernel primary context */ \
   1605 	sethi	%hi(KERNBASE), %g5; \
   1606 	flush	%g5;						/* Some convenient address that won't trap */ \
   1607 1:
   1608 
   1609 /*
   1610  * Interrupt setup is almost exactly like trap setup, but we need to
   1611  * go to the interrupt stack if (a) we came from user mode or (b) we
   1612  * came from kernel mode on the kernel stack.
   1613  *
   1614  * We don't guarantee any registers are preserved during this operation.
   1615  */
   1616 #define	INTR_SETUP(stackspace) \
   1617 	sethi	%hi(EINTSTACK), %g1; \
   1618 	sethi	%hi((stackspace)), %g5; \
   1619 	btst	1, %sp; \
   1620 	add	%sp, BIAS, %g6; \
   1621 	movz	%icc, %sp, %g6; \
   1622 	or	%g1, %lo(EINTSTACK), %g1; \
   1623 	srl	%g6, 0, %g6;					/* truncate at 32-bits */ \
   1624 	set	(EINTSTACK-INTSTACK), %g7; \
   1625 	or	%g5, %lo((stackspace)), %g5; \
   1626 	sub	%g1, %g6, %g2;					/* Determine if we need to switch to intr stack or not */ \
   1627 	dec	%g7;						/* Make it into a mask */ \
   1628 	sethi	%hi(CPUINFO_VA + CI_EINTSTACK), %g3; \
   1629 	andncc	%g2, %g7, %g0;					/* XXXXXXXXXX This assumes kernel addresses are unique from user addresses */ \
   1630 	LDPTR	[%g3 + %lo(CPUINFO_VA + CI_EINTSTACK)], %g3; \
   1631 	rdpr	%wstate, %g7;					/* Find if we're from user mode */ \
   1632 	movrnz	%g3, %g3, %g1;					/* Use saved intr stack if exists */ \
   1633 	sra	%g5, 0, %g5;					/* Sign extend the damn thing */ \
   1634 	movnz	%xcc, %g1, %g6;					/* Stay on interrupt stack? */ \
   1635 	cmp	%g7, WSTATE_KERN;				/* User or kernel sp? */ \
   1636 	movnz	%icc, %g1, %g6;					/* Stay on interrupt stack? */ \
   1637 	add	%g6, %g5, %g6;					/* Allocate a stack frame */ \
   1638 	\
   1639 	SAVE_LOCALS_INS \
   1640 	save	%g6, 0, %sp;					/* If we fault we should come right back here */ \
   1641 	stx	%i0, [%sp + CC64FSZ + STKB + TF_O + (0*8)];		/* Save out registers to trap frame */ \
   1642 	stx	%i1, [%sp + CC64FSZ + STKB + TF_O + (1*8)]; \
   1643 	stx	%i2, [%sp + CC64FSZ + STKB + TF_O + (2*8)]; \
   1644 	stx	%i3, [%sp + CC64FSZ + STKB + TF_O + (3*8)]; \
   1645 	stx	%i4, [%sp + CC64FSZ + STKB + TF_O + (4*8)]; \
   1646 	stx	%i5, [%sp + CC64FSZ + STKB + TF_O + (5*8)]; \
   1647 	stx	%i6, [%sp + CC64FSZ + STKB + TF_O + (6*8)]; \
   1648 	stx	%i6, [%sp + CC64FSZ + STKB + TF_G + (0*8)];		/* Save fp in clockframe->cf_fp */ \
   1649 	rdpr	%wstate, %g7;					/* Find if we're from user mode */ \
   1650 	stx	%i7, [%sp + CC64FSZ + STKB + TF_O + (7*8)]; \
   1651 	cmp	%g7, WSTATE_KERN;				/* Compare & leave in register */ \
   1652 	be,pn	%icc, 1f;					/* If we were in kernel mode start saving globals */ \
   1653 	/* came from user mode -- switch to kernel mode stack */ \
   1654 	 rdpr	%otherwin, %g5;					/* Has this already been done? */ \
   1655 	tst	%g5; tnz %xcc, 1; nop; /* DEBUG -- this should _NEVER_ happen */ \
   1656 	brnz,pn	%g5, 1f;					/* Don't set this twice */ \
   1657 	 rdpr	%canrestore, %g5;				/* Fixup register window state registers */ \
   1658 	wrpr	%g0, 0, %canrestore; \
   1659 	mov	CTX_PRIMARY, %g7; \
   1660 	wrpr	%g0, %g5, %otherwin; \
   1661 	wrpr	%g0, WSTATE_KERN, %wstate;			/* Enable kernel mode window traps -- now we can trap again */ \
   1662 	SET_MMU_CONTEXTID %g0, %g7, %g5;			/* Switch MMU to kernel primary context */ \
   1663 	sethi	%hi(KERNBASE), %g5; \
   1664 	flush	%g5;						/* Some convenient address that won't trap */ \
   1665 1:
   1666 #endif /* _LP64 */
   1667 
   1668 #ifdef DEBUG
   1669 
   1670 	/* Look up kpte to test algorithm */
   1671 	.globl	asmptechk
   1672 asmptechk:
   1673 	mov	%o0, %g4	! pmap->pm_segs
   1674 	mov	%o1, %g3	! Addr to lookup -- mind the context
   1675 
   1676 	srax	%g3, HOLESHIFT, %g5			! Check for valid address
   1677 	brz,pt	%g5, 0f					! Should be zero or -1
   1678 	 inc	%g5					! Make -1 -> 0
   1679 	brnz,pn	%g5, 1f					! Error!
   1680 0:
   1681 	 srlx	%g3, STSHIFT, %g5
   1682 	and	%g5, STMASK, %g5
   1683 	sll	%g5, 3, %g5
   1684 	add	%g4, %g5, %g4
   1685 	DLFLUSH(%g4,%g5)
   1686 	ldxa	[%g4] ASI_PHYS_CACHED, %g4		! Remember -- UNSIGNED
   1687 	DLFLUSH2(%g5)
   1688 	brz,pn	%g4, 1f					! NULL entry? check somewhere else
   1689 
   1690 	 srlx	%g3, PDSHIFT, %g5
   1691 	and	%g5, PDMASK, %g5
   1692 	sll	%g5, 3, %g5
   1693 	add	%g4, %g5, %g4
   1694 	DLFLUSH(%g4,%g5)
   1695 	ldxa	[%g4] ASI_PHYS_CACHED, %g4		! Remember -- UNSIGNED
   1696 	DLFLUSH2(%g5)
   1697 	brz,pn	%g4, 1f					! NULL entry? check somewhere else
   1698 
   1699 	 srlx	%g3, PTSHIFT, %g5			! Convert to ptab offset
   1700 	and	%g5, PTMASK, %g5
   1701 	sll	%g5, 3, %g5
   1702 	add	%g4, %g5, %g4
   1703 	DLFLUSH(%g4,%g5)
   1704 	ldxa	[%g4] ASI_PHYS_CACHED, %g6
   1705 	DLFLUSH2(%g5)
   1706 	brgez,pn %g6, 1f				! Entry invalid?  Punt
   1707 	 srlx	%g6, 32, %o0
   1708 	retl
   1709 	 srl	%g6, 0, %o1
   1710 1:
   1711 	mov	%g0, %o1
   1712 	retl
   1713 	 mov	%g0, %o0
   1714 
   1715 	.data
   1716 2:
   1717 	.asciz	"asmptechk: %x %x %x %x:%x\n"
   1718 	_ALIGN
   1719 	.text
   1720 #endif
   1721 
   1722 /*
   1723  * This is the MMU protection handler.  It's too big to fit
   1724  * in the trap table so I moved it here.  It's relatively simple.
   1725  * It looks up the page mapping in the page table associated with
   1726  * the trapping context.  It checks to see if the S/W writable bit
   1727  * is set.  If so, it sets the H/W write bit, marks the tte modified,
   1728  * and enters the mapping into the MMU.  Otherwise it does a regular
   1729  * data fault.
   1730  */
   1731 	ICACHE_ALIGN
   1732 dmmu_write_fault:
   1733 	mov	TLB_TAG_ACCESS, %g3
   1734 	sethi	%hi(0x1fff), %g6			! 8K context mask
   1735 	ldxa	[%g3] ASI_DMMU, %g3			! Get fault addr from Tag Target
   1736 	sethi	%hi(CPUINFO_VA+CI_CTXBUSY), %g4
   1737 	or	%g6, %lo(0x1fff), %g6
   1738 	LDPTR	[%g4 + %lo(CPUINFO_VA+CI_CTXBUSY)], %g4
   1739 	srax	%g3, HOLESHIFT, %g5			! Check for valid address
   1740 	and	%g3, %g6, %g6				! Isolate context
   1741 
   1742 	inc	%g5					! (0 or -1) -> (1 or 0)
   1743 	sllx	%g6, 3, %g6				! Make it into an offset into ctxbusy
   1744 	ldx	[%g4+%g6], %g4				! Load up our page table.
   1745 	srlx	%g3, STSHIFT, %g6
   1746 	cmp	%g5, 1
   1747 	bgu,pn %xcc, winfix				! Error!
   1748 	 srlx	%g3, PDSHIFT, %g5
   1749 	and	%g6, STMASK, %g6
   1750 	sll	%g6, 3, %g6
   1751 
   1752 	and	%g5, PDMASK, %g5
   1753 	sll	%g5, 3, %g5
   1754 	add	%g6, %g4, %g4
   1755 	DLFLUSH(%g4,%g6)
   1756 	ldxa	[%g4] ASI_PHYS_CACHED, %g4
   1757 	DLFLUSH2(%g6)
   1758 	srlx	%g3, PTSHIFT, %g6			! Convert to ptab offset
   1759 	and	%g6, PTMASK, %g6
   1760 	add	%g5, %g4, %g5
   1761 	brz,pn	%g4, winfix				! NULL entry? check somewhere else
   1762 	 nop
   1763 
   1764 	ldxa	[%g5] ASI_PHYS_CACHED, %g4
   1765 	sll	%g6, 3, %g6
   1766 	brz,pn	%g4, winfix				! NULL entry? check somewhere else
   1767 	 add	%g6, %g4, %g6
   1768 1:
   1769 	ldxa	[%g6] ASI_PHYS_CACHED, %g4
   1770 	brgez,pn %g4, winfix				! Entry invalid?  Punt
   1771 	 or	%g4, SUN4U_TTE_MODIFY|SUN4U_TTE_ACCESS|SUN4U_TTE_W, %g7	! Update the modified bit
   1772 
   1773 	btst	SUN4U_TTE_REAL_W|SUN4U_TTE_W, %g4			! Is it a ref fault?
   1774 	bz,pn	%xcc, winfix				! No -- really fault
   1775 #ifdef DEBUG
   1776 	/* Make sure we don't try to replace a kernel translation */
   1777 	/* This should not be necessary */
   1778 	sllx	%g3, 64-13, %g2				! Isolate context bits
   1779 	sethi	%hi(KERNBASE), %g5			! Don't need %lo
   1780 	brnz,pt	%g2, 0f					! Ignore context != 0
   1781 	 set	0x0800000, %g2				! 8MB
   1782 	sub	%g3, %g5, %g5
   1783 	cmp	%g5, %g2
   1784 	tlu	%xcc, 1; nop
   1785 	blu,pn	%xcc, winfix				! Next insn in delay slot is unimportant
   1786 0:
   1787 #endif
   1788 	/* Need to check for and handle large pages. */
   1789 	 srlx	%g4, 61, %g5				! Isolate the size bits
   1790 	ldxa	[%g0] ASI_DMMU_8KPTR, %g2		! Load DMMU 8K TSB pointer
   1791 	andcc	%g5, 0x3, %g5				! 8K?
   1792 	bnz,pn	%icc, winfix				! We punt to the pmap code since we can't handle policy
   1793 	 ldxa	[%g0] ASI_DMMU, %g1			! Load DMMU tag target register
   1794 	casxa	[%g6] ASI_PHYS_CACHED, %g4, %g7		!  and write it out
   1795 	membar	#StoreLoad
   1796 	cmp	%g4, %g7
   1797 	bne,pn	%xcc, 1b
   1798 	 or	%g4, SUN4U_TTE_MODIFY|SUN4U_TTE_ACCESS|SUN4U_TTE_W, %g4	! Update the modified bit
   1799 	stx	%g1, [%g2]				! Update TSB entry tag
   1800 	mov	SFSR, %g7
   1801 	stx	%g4, [%g2+8]				! Update TSB entry data
   1802 	nop
   1803 
   1804 #ifdef TRAPSTATS
   1805 	sethi	%hi(_C_LABEL(protfix)), %g1
   1806 	lduw	[%g1+%lo(_C_LABEL(protfix))], %g2
   1807 	inc	%g2
   1808 	stw	%g2, [%g1+%lo(_C_LABEL(protfix))]
   1809 #endif
   1810 	mov	DEMAP_PAGE_SECONDARY, %g1		! Secondary flush
   1811 	mov	DEMAP_PAGE_NUCLEUS, %g5			! Nucleus flush
   1812 	stxa	%g0, [%g7] ASI_DMMU			! clear out the fault
   1813 	sllx	%g3, (64-13), %g7			! Need to demap old entry first
   1814 	andn	%g3, 0xfff, %g6
   1815 	movrz	%g7, %g5, %g1				! Pick one
   1816 	or	%g6, %g1, %g6
   1817 	membar	#Sync
   1818 	stxa	%g6, [%g6] ASI_DMMU_DEMAP		! Do the demap
   1819 	membar	#Sync
   1820 
   1821 	stxa	%g4, [%g0] ASI_DMMU_DATA_IN		! Enter new mapping
   1822 	membar	#Sync
   1823 	retry
   1824 
   1825 /*
   1826  * Each memory data access fault from a fast access miss handler comes here.
   1827  * We will quickly check if this is an original prom mapping before going
   1828  * to the generic fault handler
   1829  *
   1830  * We will assume that %pil is not lost so we won't bother to save it
   1831  * unless we're in an interrupt handler.
   1832  *
   1833  * On entry:
   1834  *	We are on one of the alternate set of globals
   1835  *	%g1 = MMU tag target
   1836  *	%g2 = 8Kptr
   1837  *	%g3 = TLB TAG ACCESS
   1838  *
   1839  * On return:
   1840  *
   1841  */
   1842 	ICACHE_ALIGN
   1843 data_miss:
   1844 #ifdef TRAPSTATS
   1845 	set	_C_LABEL(kdmiss), %g3
   1846 	set	_C_LABEL(udmiss), %g4
   1847 	rdpr	%tl, %g6
   1848 	dec	%g6
   1849 	movrz	%g6, %g4, %g3
   1850 	lduw	[%g3], %g4
   1851 	inc	%g4
   1852 	stw	%g4, [%g3]
   1853 #endif
   1854 	mov	TLB_TAG_ACCESS, %g3			! Get real fault page
   1855 	sethi	%hi(0x1fff), %g6			! 8K context mask
   1856 	ldxa	[%g3] ASI_DMMU, %g3			! from tag access register
   1857 	sethi	%hi(CPUINFO_VA+CI_CTXBUSY), %g4
   1858 	or	%g6, %lo(0x1fff), %g6
   1859 	LDPTR	[%g4 + %lo(CPUINFO_VA+CI_CTXBUSY)], %g4
   1860 	srax	%g3, HOLESHIFT, %g5			! Check for valid address
   1861 	and	%g3, %g6, %g6				! Isolate context
   1862 
   1863 	inc	%g5					! (0 or -1) -> (1 or 0)
   1864 	sllx	%g6, 3, %g6				! Make it into an offset into ctxbusy
   1865 	ldx	[%g4+%g6], %g4				! Load up our page table.
   1866 #ifdef DEBUG
   1867 	/* Make sure we don't try to replace a kernel translation */
   1868 	/* This should not be necessary */
   1869 	brnz,pt	%g6, 1f			! If user context continue miss
   1870 	sethi	%hi(KERNBASE), %g7			! Don't need %lo
   1871 	set	0x0800000, %g6				! 8MB
   1872 	sub	%g3, %g7, %g7
   1873 	cmp	%g7, %g6
   1874 	tlu	%xcc, 1; nop
   1875 1:
   1876 #endif
   1877 	srlx	%g3, STSHIFT, %g6
   1878 	cmp	%g5, 1
   1879 	bgu,pn %xcc, winfix				! Error!
   1880 	 srlx	%g3, PDSHIFT, %g5
   1881 	and	%g6, STMASK, %g6
   1882 
   1883 	sll	%g6, 3, %g6
   1884 	and	%g5, PDMASK, %g5
   1885 	sll	%g5, 3, %g5
   1886 	add	%g6, %g4, %g4
   1887 	ldxa	[%g4] ASI_PHYS_CACHED, %g4
   1888 	srlx	%g3, PTSHIFT, %g6			! Convert to ptab offset
   1889 	and	%g6, PTMASK, %g6
   1890 	add	%g5, %g4, %g5
   1891 	brz,pn	%g4, data_nfo				! NULL entry? check somewhere else
   1892 
   1893 	 nop
   1894 	ldxa	[%g5] ASI_PHYS_CACHED, %g4
   1895 	sll	%g6, 3, %g6
   1896 	brz,pn	%g4, data_nfo				! NULL entry? check somewhere else
   1897 	 add	%g6, %g4, %g6
   1898 
   1899 1:
   1900 	ldxa	[%g6] ASI_PHYS_CACHED, %g4
   1901 	brgez,pn %g4, data_nfo				! Entry invalid?  Punt
   1902 	 or	%g4, SUN4U_TTE_ACCESS, %g7			! Update the access bit
   1903 
   1904 	btst	SUN4U_TTE_ACCESS, %g4				! Need to update access git?
   1905 	bne,pt	%xcc, 1f
   1906 	 nop
   1907 	casxa	[%g6] ASI_PHYS_CACHED, %g4, %g7		!  and write it out
   1908 	cmp	%g4, %g7
   1909 	bne,pn	%xcc, 1b
   1910 	 or	%g4, SUN4U_TTE_ACCESS, %g4			! Update the access bit
   1911 
   1912 1:
   1913 	stx	%g1, [%g2]				! Update TSB entry tag
   1914 	stx	%g4, [%g2+8]				! Update TSB entry data
   1915 	stxa	%g4, [%g0] ASI_DMMU_DATA_IN		! Enter new mapping
   1916 	membar	#Sync
   1917 	CLRTT
   1918 	retry
   1919 	NOTREACHED
   1920 /*
   1921  * We had a data miss but did not find a mapping.  Insert
   1922  * a NFO mapping to satisfy speculative loads and return.
   1923  * If this had been a real load, it will re-execute and
   1924  * result in a data fault or protection fault rather than
   1925  * a TLB miss.  We insert an 8K TTE with the valid and NFO
   1926  * bits set.  All others should zero.  The TTE looks like this:
   1927  *
   1928  *	0x9000000000000000
   1929  *
   1930  */
   1931 data_nfo:
   1932 	sethi	%hi(0x90000000), %g4			! V(0x8)|NFO(0x1)
   1933 	sllx	%g4, 32, %g4
   1934 	stxa	%g4, [%g0] ASI_DMMU_DATA_IN		! Enter new mapping
   1935 	membar	#Sync
   1936 	CLRTT
   1937 	retry
   1938 
   1939 /*
   1940  * Handler for making the trap window shiny clean.
   1941  *
   1942  * If the store that trapped was to a kernel address, panic.
   1943  *
   1944  * If the store that trapped was to a user address, stick it in the PCB.
   1945  * Since we don't want to force user code to use the standard register
   1946  * convention if we don't have to, we will not assume that %fp points to
   1947  * anything valid.
   1948  *
   1949  * On entry:
   1950  *	We are on one of the alternate set of globals
   1951  *	%g1 = %tl - 1, tstate[tl-1], scratch	- local
   1952  *	%g2 = %tl				- local
   1953  *	%g3 = MMU tag access			- in
   1954  *	%g4 = %cwp				- local
   1955  *	%g5 = scratch				- local
   1956  *	%g6 = cpcb				- local
   1957  *	%g7 = scratch				- local
   1958  *
   1959  * On return:
   1960  *
   1961  * NB:	 remove most of this from main codepath & cleanup I$
   1962  */
   1963 winfault:
   1964 	mov	TLB_TAG_ACCESS, %g3	! Get real fault page from tag access register
   1965 	ldxa	[%g3] ASI_DMMU, %g3	! And put it into the non-MMU alternate regs
   1966 winfix:
   1967 	rdpr	%tl, %g2
   1968 	subcc	%g2, 1, %g1
   1969 	ble,pt	%icc, datafault		! Don't go below trap level 1
   1970 	 sethi	%hi(CPCB), %g6		! get current pcb
   1971 
   1972 
   1973 	wrpr	%g1, 0, %tl		! Pop a trap level
   1974 	rdpr	%tt, %g7		! Read type of prev. trap
   1975 	rdpr	%tstate, %g4		! Try to restore prev %cwp if we were executing a restore
   1976 	andn	%g7, 0x3f, %g5		!   window fill traps are all 0b 0000 11xx xxxx
   1977 
   1978 #if 1
   1979 	cmp	%g7, 0x30		! If we took a datafault just before this trap
   1980 	bne,pt	%icc, winfixfill	! our stack's probably bad so we need to switch somewhere else
   1981 	 nop
   1982 
   1983 	!!
   1984 	!! Double data fault -- bad stack?
   1985 	!!
   1986 	wrpr	%g2, %tl		! Restore trap level.
   1987 	sir				! Just issue a reset and don't try to recover.
   1988 	mov	%fp, %l6		! Save the frame pointer
   1989 	set	EINTSTACK+USPACE+CC64FSZ-STKB, %fp ! Set the frame pointer to the middle of the idle stack
   1990 	add	%fp, -CC64FSZ, %sp	! Create a stackframe
   1991 	wrpr	%g0, 15, %pil		! Disable interrupts, too
   1992 	wrpr	%g0, %g0, %canrestore	! Our stack is hozed and our PCB
   1993 	wrpr	%g0, 7, %cansave	!  probably is too, so blow away
   1994 	ba	slowtrap		!  all our register windows.
   1995 	 wrpr	%g0, 0x101, %tt
   1996 #endif
   1997 
   1998 winfixfill:
   1999 	cmp	%g5, 0x0c0		!   so we mask lower bits & compare to 0b 0000 1100 0000
   2000 	bne,pt	%icc, winfixspill	! Dump our trap frame -- we will retry the fill when the page is loaded
   2001 	 cmp	%g5, 0x080		!   window spill traps are all 0b 0000 10xx xxxx
   2002 
   2003 	!!
   2004 	!! This was a fill
   2005 	!!
   2006 #ifdef TRAPSTATS
   2007 	set	_C_LABEL(wfill), %g1
   2008 	lduw	[%g1], %g5
   2009 	inc	%g5
   2010 	stw	%g5, [%g1]
   2011 #endif
   2012 	btst	TSTATE_PRIV, %g4	! User mode?
   2013 	and	%g4, CWP, %g5		! %g4 = %cwp of trap
   2014 	wrpr	%g7, 0, %tt
   2015 	bz,a,pt	%icc, datafault		! We were in user mode -- normal fault
   2016 	 wrpr	%g5, %cwp		! Restore cwp from before fill trap -- regs should now be consistent
   2017 
   2018 	/*
   2019 	 * We're in a pickle here.  We were trying to return to user mode
   2020 	 * and the restore of the user window failed, so now we have one valid
   2021 	 * kernel window and a user window state.  If we do a TRAP_SETUP() now,
   2022 	 * our kernel window will be considered a user window and cause a
   2023 	 * fault when we try to save it later due to an invalid user address.
   2024 	 * If we return to where we faulted, our window state will not be valid
   2025 	 * and we will fault trying to enter user with our primary context of zero.
   2026 	 *
   2027 	 * What we'll do is arrange to have us return to return_from_trap so we will
   2028 	 * start the whole business over again.  But first, switch to a kernel window
   2029 	 * setup.  Let's see, canrestore and otherwin are zero.  Set WSTATE_KERN and
   2030 	 * make sure we're in kernel context and we're done.
   2031 	 */
   2032 
   2033 #ifdef TRAPSTATS
   2034 	set	_C_LABEL(kwfill), %g4
   2035 	lduw	[%g4], %g7
   2036 	inc	%g7
   2037 	stw	%g7, [%g4]
   2038 #endif
   2039 #if 0 /* Need to switch over to new stuff to fix WDR bug */
   2040 	wrpr	%g5, %cwp				! Restore cwp from before fill trap -- regs should now be consistent
   2041 	wrpr	%g2, %g0, %tl				! Restore trap level -- we need to reuse it
   2042 	set	return_from_trap, %g4			! XXX - need to set %g1 to tstate
   2043 	set	CTX_PRIMARY, %g7
   2044 	wrpr	%g4, 0, %tpc
   2045 	stxa	%g0, [%g7] ASI_DMMU
   2046 	inc	4, %g4
   2047 	membar	#Sync
   2048 	flush	%g4					! Isn't this convenient?
   2049 	wrpr	%g0, WSTATE_KERN, %wstate
   2050 	wrpr	%g0, 0, %canrestore			! These should be zero but
   2051 	wrpr	%g0, 0, %otherwin			! clear them just in case
   2052 	rdpr	%ver, %g5
   2053 	and	%g5, CWP, %g5
   2054 	wrpr	%g0, 0, %cleanwin
   2055 	dec	1, %g5					! NWINDOWS-1-1
   2056 	wrpr	%g5, 0, %cansave			! Invalidate all windows
   2057 !	flushw						! DEBUG
   2058 	ba,pt	%icc, datafault
   2059 	 wrpr	%g4, 0, %tnpc
   2060 #else
   2061 	wrpr	%g2, %g0, %tl				! Restore trap level
   2062 	cmp	%g2, 3
   2063 	tne	%icc, 1
   2064 	rdpr	%tt, %g5
   2065 	wrpr	%g0, 1, %tl				! Revert to TL==1 XXX what if this wasn't in rft_user? Oh well.
   2066 	wrpr	%g5, %g0, %tt				! Set trap type correctly
   2067 /*
   2068  * Here we need to implement the beginning of datafault.
   2069  * TRAP_SETUP expects to come from either kernel mode or
   2070  * user mode with at least one valid register window.  It
   2071  * will allocate a trap frame, save the out registers, and
   2072  * fix the window registers to think we have one user
   2073  * register window.
   2074  *
   2075  * However, under these circumstances we don't have any
   2076  * valid register windows, so we need to clean up the window
   2077  * registers to prevent garbage from being saved to either
   2078  * the user stack or the PCB before calling the datafault
   2079  * handler.
   2080  *
   2081  * We could simply jump to datafault if we could somehow
   2082  * make the handler issue a `saved' instruction immediately
   2083  * after creating the trapframe.
   2084  *
   2085  * The following is duplicated from datafault:
   2086  */
   2087 #ifdef TRAPS_USE_IG
   2088 	wrpr	%g0, PSTATE_KERN|PSTATE_IG, %pstate	! We need to save volatile stuff to interrupt globals
   2089 #else
   2090 	wrpr	%g0, PSTATE_KERN|PSTATE_AG, %pstate	! We need to save volatile stuff to alternate globals
   2091 #endif
   2092 	wr	%g0, ASI_DMMU, %asi			! We need to re-load trap info
   2093 	ldxa	[%g0 + TLB_TAG_ACCESS] %asi, %g1	! Get fault address from tag access register
   2094 	ldxa	[SFAR] %asi, %g2			! sync virt addr; must be read first
   2095 	ldxa	[SFSR] %asi, %g3			! get sync fault status register
   2096 	stxa	%g0, [SFSR] %asi			! Clear out fault now
   2097 
   2098 	TRAP_SETUP(-CC64FSZ-TF_SIZE)
   2099 	saved						! Blow away that one register window we didn't ever use.
   2100 	ba,a,pt	%icc, Ldatafault_internal		! Now we should return directly to user mode
   2101 	 nop
   2102 #endif
   2103 winfixspill:
   2104 	bne,a,pt	%xcc, datafault			! Was not a spill -- handle it normally
   2105 	 wrpr	%g2, 0, %tl				! Restore trap level for now XXXX
   2106 
   2107 	!!
   2108 	!! This was a spill
   2109 	!!
   2110 #if 1
   2111 	btst	TSTATE_PRIV, %g4	! From user mode?
   2112 	wrpr	%g2, 0, %tl		! We need to load the fault type so we can
   2113 	rdpr	%tt, %g5		! overwrite the lower trap and get it to the fault handler
   2114 	wrpr	%g1, 0, %tl
   2115 	wrpr	%g5, 0, %tt		! Copy over trap type for the fault handler
   2116 	and	%g4, CWP, %g5		! find %cwp from trap
   2117 	be,a,pt	%xcc, datafault		! Let's do a regular datafault.  When we try a save in datafault we'll
   2118 	 wrpr	%g5, 0, %cwp		!  return here and write out all dirty windows.
   2119 #endif
   2120 	wrpr	%g2, 0, %tl				! Restore trap level for now XXXX
   2121 	LDPTR	[%g6 + %lo(CPCB)], %g6	! This is in the locked TLB and should not fault
   2122 #ifdef TRAPSTATS
   2123 	set	_C_LABEL(wspill), %g7
   2124 	lduw	[%g7], %g5
   2125 	inc	%g5
   2126 	stw	%g5, [%g7]
   2127 #endif
   2128 
   2129 	/*
   2130 	 * Traverse kernel map to find paddr of cpcb and only us ASI_PHYS_CACHED to
   2131 	 * prevent any faults while saving the windows.  BTW if it isn't mapped, we
   2132 	 * will trap and hopefully panic.
   2133 	 */
   2134 
   2135 !	ba	0f					! DEBUG -- don't use phys addresses
   2136 	 wr	%g0, ASI_NUCLEUS, %asi			! In case of problems finding PA
   2137 	sethi	%hi(CPUINFO_VA+CI_CTXBUSY), %g1
   2138 	LDPTR	[%g1 + %lo(CPUINFO_VA+CI_CTXBUSY)], %g1	! Load start of ctxbusy
   2139 #ifdef DEBUG
   2140 	srax	%g6, HOLESHIFT, %g7			! Check for valid address
   2141 	brz,pt	%g7, 1f					! Should be zero or -1
   2142 	 addcc	%g7, 1, %g7					! Make -1 -> 0
   2143 	tnz	%xcc, 1					! Invalid address??? How did this happen?
   2144 1:
   2145 #endif
   2146 	srlx	%g6, STSHIFT, %g7
   2147 	ldx	[%g1], %g1				! Load pointer to kernel_pmap
   2148 	and	%g7, STMASK, %g7
   2149 	sll	%g7, 3, %g7
   2150 	add	%g7, %g1, %g1
   2151 	DLFLUSH(%g1,%g7)
   2152 	ldxa	[%g1] ASI_PHYS_CACHED, %g1		! Load pointer to directory
   2153 	DLFLUSH2(%g7)
   2154 
   2155 	srlx	%g6, PDSHIFT, %g7			! Do page directory
   2156 	and	%g7, PDMASK, %g7
   2157 	sll	%g7, 3, %g7
   2158 	brz,pn	%g1, 0f
   2159 	 add	%g7, %g1, %g1
   2160 	DLFLUSH(%g1,%g7)
   2161 	ldxa	[%g1] ASI_PHYS_CACHED, %g1
   2162 	DLFLUSH2(%g7)
   2163 
   2164 	srlx	%g6, PTSHIFT, %g7			! Convert to ptab offset
   2165 	and	%g7, PTMASK, %g7
   2166 	brz	%g1, 0f
   2167 	 sll	%g7, 3, %g7
   2168 	add	%g1, %g7, %g7
   2169 	DLFLUSH(%g7,%g1)
   2170 	ldxa	[%g7] ASI_PHYS_CACHED, %g7		! This one is not
   2171 	DLFLUSH2(%g1)
   2172 	brgez	%g7, 0f
   2173 	 srlx	%g7, PGSHIFT, %g7			! Isolate PA part
   2174 	sll	%g6, 32-PGSHIFT, %g6			! And offset
   2175 	sllx	%g7, PGSHIFT+23, %g7			! There are 23 bits to the left of the PA in the TTE
   2176 	srl	%g6, 32-PGSHIFT, %g6
   2177 	srax	%g7, 23, %g7
   2178 	or	%g7, %g6, %g6				! Then combine them to form PA
   2179 
   2180 	wr	%g0, ASI_PHYS_CACHED, %asi		! Use ASI_PHYS_CACHED to prevent possible page faults
   2181 0:
   2182 	/*
   2183 	 * Now save all user windows to cpcb.
   2184 	 */
   2185 #ifdef NOTDEF_DEBUG
   2186 	add	%g6, PCB_NSAVED, %g7
   2187 	DLFLUSH(%g7,%g5)
   2188 	lduba	[%g6 + PCB_NSAVED] %asi, %g7		! make sure that pcb_nsaved
   2189 	DLFLUSH2(%g5)
   2190 	brz,pt	%g7, 1f					! is zero, else
   2191 	 nop
   2192 	wrpr	%g0, 4, %tl
   2193 	sir						! Force a watchdog
   2194 1:
   2195 #endif
   2196 	rdpr	%otherwin, %g7
   2197 	brnz,pt	%g7, 1f
   2198 	 rdpr	%canrestore, %g5
   2199 	rdpr	%cansave, %g1
   2200 	add	%g5, 1, %g7				! add the %cwp window to the list to save
   2201 !	movrnz	%g1, %g5, %g7				! If we're issuing a save
   2202 !	mov	%g5, %g7				! DEBUG
   2203 	wrpr	%g0, 0, %canrestore
   2204 	wrpr	%g7, 0, %otherwin			! Still in user mode -- need to switch to kernel mode
   2205 1:
   2206 	mov	%g7, %g1
   2207 	add	%g6, PCB_NSAVED, %g7
   2208 	DLFLUSH(%g7,%g5)
   2209 	lduba	[%g6 + PCB_NSAVED] %asi, %g7		! Start incrementing pcb_nsaved
   2210 	DLFLUSH2(%g5)
   2211 
   2212 #ifdef DEBUG
   2213 	wrpr	%g0, 5, %tl
   2214 #endif
   2215 	mov	%g6, %g5
   2216 	brz,pt	%g7, winfixsave				! If it's in use, panic
   2217 	 saved						! frob window registers
   2218 
   2219 	/* PANIC */
   2220 !	sir						! Force a watchdog
   2221 #ifdef DEBUG
   2222 	wrpr	%g2, 0, %tl
   2223 #endif
   2224 	mov	%g7, %o2
   2225 	rdpr	%ver, %o1
   2226 	sethi	%hi(2f), %o0
   2227 	and	%o1, CWP, %o1
   2228 	wrpr	%g0, %o1, %cleanwin
   2229 	dec	1, %o1
   2230 	wrpr	%g0, %o1, %cansave			! kludge away any more window problems
   2231 	wrpr	%g0, 0, %canrestore
   2232 	wrpr	%g0, 0, %otherwin
   2233 	or	%lo(2f), %o0, %o0
   2234 	wrpr	%g0, WSTATE_KERN, %wstate
   2235 	set	PANICSTACK-CC64FSZ-STKB, %sp
   2236 	ta	1; nop					! This helps out traptrace.
   2237 	call	_C_LABEL(panic)				! This needs to be fixed properly but we should panic here
   2238 	 mov	%g1, %o1
   2239 	NOTREACHED
   2240 	.data
   2241 2:
   2242 	.asciz	"winfault: double invalid window at %p, nsaved=%d"
   2243 	_ALIGN
   2244 	.text
   2245 3:
   2246 	saved
   2247 	save
   2248 winfixsave:
   2249 	stxa	%l0, [%g5 + PCB_RW + ( 0*8)] %asi	! Save the window in the pcb, we can schedule other stuff in here
   2250 	stxa	%l1, [%g5 + PCB_RW + ( 1*8)] %asi
   2251 	stxa	%l2, [%g5 + PCB_RW + ( 2*8)] %asi
   2252 	stxa	%l3, [%g5 + PCB_RW + ( 3*8)] %asi
   2253 	stxa	%l4, [%g5 + PCB_RW + ( 4*8)] %asi
   2254 	stxa	%l5, [%g5 + PCB_RW + ( 5*8)] %asi
   2255 	stxa	%l6, [%g5 + PCB_RW + ( 6*8)] %asi
   2256 	stxa	%l7, [%g5 + PCB_RW + ( 7*8)] %asi
   2257 
   2258 	stxa	%i0, [%g5 + PCB_RW + ( 8*8)] %asi
   2259 	stxa	%i1, [%g5 + PCB_RW + ( 9*8)] %asi
   2260 	stxa	%i2, [%g5 + PCB_RW + (10*8)] %asi
   2261 	stxa	%i3, [%g5 + PCB_RW + (11*8)] %asi
   2262 	stxa	%i4, [%g5 + PCB_RW + (12*8)] %asi
   2263 	stxa	%i5, [%g5 + PCB_RW + (13*8)] %asi
   2264 	stxa	%i6, [%g5 + PCB_RW + (14*8)] %asi
   2265 	stxa	%i7, [%g5 + PCB_RW + (15*8)] %asi
   2266 
   2267 !	rdpr	%otherwin, %g1	! Check to see if we's done
   2268 	dec	%g1
   2269 	wrpr	%g0, 7, %cleanwin			! BUGBUG -- we should not hardcode this, but I have no spare globals
   2270 	inc	16*8, %g5				! Move to next window
   2271 	inc	%g7					! inc pcb_nsaved
   2272 	brnz,pt	%g1, 3b
   2273 	 stxa	%o6, [%g5 + PCB_RW + (14*8)] %asi	! Save %sp so we can write these all out
   2274 
   2275 	/* fix up pcb fields */
   2276 	stba	%g7, [%g6 + PCB_NSAVED] %asi		! cpcb->pcb_nsaved = n
   2277 #if 0
   2278 	mov	%g7, %g5				! fixup window registers
   2279 5:
   2280 	dec	%g5
   2281 	brgz,a,pt	%g5, 5b
   2282 	 restore
   2283 #ifdef NOT_DEBUG
   2284 	rdpr	%wstate, %g5				! DEBUG
   2285 	wrpr	%g0, WSTATE_KERN, %wstate		! DEBUG
   2286 	wrpr	%g0, 4, %tl
   2287 	rdpr	%cansave, %g7
   2288 	rdpr	%canrestore, %g6
   2289 	flushw						! DEBUG
   2290 	wrpr	%g2, 0, %tl
   2291 	wrpr	%g5, 0, %wstate				! DEBUG
   2292 #endif
   2293 #else
   2294 	/*
   2295 	 * We just issued a bunch of saves, so %cansave is now 0,
   2296 	 * probably (if we were doing a flushw then we may have
   2297 	 * come in with only partially full register windows and
   2298 	 * it may not be 0).
   2299 	 *
   2300 	 * %g7 contains the count of the windows we just finished
   2301 	 * saving.
   2302 	 *
   2303 	 * What we need to do now is move some of the windows from
   2304 	 * %canrestore to %cansave.  What we should do is take
   2305 	 * min(%canrestore, %g7) and move that over to %cansave.
   2306 	 *
   2307 	 * %g7 is the number of windows we flushed, so we should
   2308 	 * use that as a base.  Clear out %otherwin, set %cansave
   2309 	 * to min(%g7, NWINDOWS - 2), set %cleanwin to %canrestore
   2310 	 * + %cansave and the rest follows:
   2311 	 *
   2312 	 * %otherwin = 0
   2313 	 * %cansave = NWINDOWS - 2 - %canrestore
   2314 	 */
   2315 	wrpr	%g0, 0, %otherwin
   2316 	rdpr	%canrestore, %g1
   2317 	sub	%g1, %g7, %g1				! Calculate %canrestore - %g7
   2318 	movrlz	%g1, %g0, %g1				! Clamp at zero
   2319 	wrpr	%g1, 0, %canrestore			! This is the new canrestore
   2320 	rdpr	%ver, %g5
   2321 	and	%g5, CWP, %g5				! NWINDOWS-1
   2322 	dec	%g5					! NWINDOWS-2
   2323 	wrpr	%g5, 0, %cleanwin			! Set cleanwin to max, since we're in-kernel
   2324 	sub	%g5, %g1, %g5				! NWINDOWS-2-%canrestore
   2325 	wrpr	%g5, 0, %cansave
   2326 #ifdef NOT_DEBUG
   2327 	rdpr	%wstate, %g5				! DEBUG
   2328 	wrpr	%g0, WSTATE_KERN, %wstate		! DEBUG
   2329 	wrpr	%g0, 4, %tl
   2330 	flushw						! DEBUG
   2331 	wrpr	%g2, 0, %tl
   2332 	wrpr	%g5, 0, %wstate				! DEBUG
   2333 #endif
   2334 #endif
   2335 
   2336 #ifdef NOTDEF_DEBUG
   2337 	set	panicstack-CC64FSZ, %g1
   2338 	save	%g1, 0, %sp
   2339 	GLOBTOLOC
   2340 	rdpr	%wstate, %l0
   2341 	wrpr	%g0, WSTATE_KERN, %wstate
   2342 	set	8f, %o0
   2343 	mov	%g7, %o1
   2344 	call	printf
   2345 	 mov	%g5, %o2
   2346 	wrpr	%l0, 0, %wstate
   2347 	LOCTOGLOB
   2348 	restore
   2349 	.data
   2350 8:
   2351 	.asciz	"winfix: spill fixup\n"
   2352 	_ALIGN
   2353 	.text
   2354 #endif
   2355 !	rdpr	%tl, %g2				! DEBUG DEBUG -- did we trap somewhere?
   2356 	sub	%g2, 1, %g1
   2357 	rdpr	%tt, %g2
   2358 	wrpr	%g1, 0, %tl				! We will not attempt to re-execute the spill, so dump our trap frame permanently
   2359 	wrpr	%g2, 0, %tt				! Move trap type from fault frame here, overwriting spill
   2360 
   2361 	/* Did we save a user or kernel window ? */
   2362 !	srax	%g3, 48, %g5				! User or kernel store? (TAG TARGET)
   2363 	sllx	%g3, (64-13), %g5			! User or kernel store? (TAG ACCESS)
   2364 	sethi	%hi(dcache_size), %g7
   2365 	ld	[%g7 + %lo(dcache_size)], %g7
   2366 	sethi	%hi(dcache_line_size), %g6
   2367 	ld	[%g6 + %lo(dcache_line_size)], %g6
   2368 	brnz,pt	%g5, 1f					! User fault -- save windows to pcb
   2369 	 sub	%g7, %g6, %g7
   2370 
   2371 	and	%g4, CWP, %g4				! %g4 = %cwp of trap
   2372 	wrpr	%g4, 0, %cwp				! Kernel fault -- restore %cwp and force and trap to debugger
   2373 	!!
   2374 	!! Here we managed to fault trying to access a kernel window
   2375 	!! This is a bug.  Switch to the interrupt stack if we aren't
   2376 	!! there already and then trap into the debugger or panic.
   2377 	!!
   2378 	sethi	%hi(EINTSTACK-BIAS), %g6
   2379 	btst	1, %sp
   2380 	bnz,pt	%icc, 0f
   2381 	 mov	%sp, %g1
   2382 	add	%sp, -BIAS, %g1
   2383 0:
   2384 	or	%g6, %lo(EINTSTACK-BIAS), %g6
   2385 	set	(EINTSTACK-INTSTACK), %g7	! XXXXXXXXXX This assumes kernel addresses are unique from user addresses
   2386 	sub	%g6, %g1, %g2				! Determine if we need to switch to intr stack or not
   2387 	dec	%g7					! Make it into a mask
   2388 	andncc	%g2, %g7, %g0				! XXXXXXXXXX This assumes kernel addresses are unique from user addresses */ \
   2389 	movz	%xcc, %g1, %g6				! Stay on interrupt stack?
   2390 	add	%g6, -CCFSZ, %g6			! Allocate a stack frame
   2391 	mov	%sp, %l6				! XXXXX Save old stack pointer
   2392 	mov	%g6, %sp
   2393 	ta	1; nop					! Enter debugger
   2394 	NOTREACHED
   2395 1:
   2396 #if 1
   2397 	/* Now we need to blast away the D$ to make sure we're in sync */
   2398 	stxa	%g0, [%g7] ASI_DCACHE_TAG
   2399 	brnz,pt	%g7, 1b
   2400 	 sub	%g7, %g6, %g7
   2401 #endif
   2402 
   2403 #ifdef NOTDEF_DEBUG
   2404 	set	panicstack-CC64FSZ, %g5
   2405 	save	%g5, 0, %sp
   2406 	GLOBTOLOC
   2407 	rdpr	%wstate, %l0
   2408 	wrpr	%g0, WSTATE_KERN, %wstate
   2409 	set	8f, %o0
   2410 	call	printf
   2411 	 mov	%fp, %o1
   2412 	wrpr	%l0, 0, %wstate
   2413 	LOCTOGLOB
   2414 	restore
   2415 	.data
   2416 8:
   2417 	.asciz	"winfix: kernel spill retry\n"
   2418 	_ALIGN
   2419 	.text
   2420 #endif
   2421 #ifdef TRAPSTATS
   2422 	set	_C_LABEL(wspillskip), %g4
   2423 	lduw	[%g4], %g5
   2424 	inc	%g5
   2425 	stw	%g5, [%g4]
   2426 #endif
   2427 	/*
   2428 	 * If we had WSTATE_KERN then we had at least one valid kernel window.
   2429 	 * We should re-execute the trapping save.
   2430 	 */
   2431 	rdpr	%wstate, %g3
   2432 	mov	%g3, %g3
   2433 	cmp	%g3, WSTATE_KERN
   2434 	bne,pt	%icc, 1f
   2435 	 nop
   2436 	retry						! Now we can complete the save
   2437 1:
   2438 	/*
   2439 	 * Since we had a WSTATE_USER, we had no valid kernel windows.  This should
   2440 	 * only happen inside TRAP_SETUP or INTR_SETUP. Emulate
   2441 	 * the instruction, clean up the register windows, then done.
   2442 	 */
   2443 	rdpr	%cwp, %g1
   2444 	inc	%g1
   2445 	rdpr	%tstate, %g2
   2446 	wrpr	%g1, %cwp
   2447 	andn	%g2, CWP, %g2
   2448 	wrpr	%g1, %g2, %tstate
   2449 #ifdef TRAPS_USE_IG
   2450 	wrpr	%g0, PSTATE_KERN|PSTATE_IG, %pstate	! DEBUG
   2451 #else
   2452 	wrpr	%g0, PSTATE_KERN|PSTATE_AG, %pstate
   2453 #endif
   2454 	mov	%g6, %sp
   2455 	done
   2456 
   2457 /*
   2458  * Each memory data access fault, from user or kernel mode,
   2459  * comes here.
   2460  *
   2461  * We will assume that %pil is not lost so we won't bother to save it
   2462  * unless we're in an interrupt handler.
   2463  *
   2464  * On entry:
   2465  *	We are on one of the alternate set of globals
   2466  *	%g1 = MMU tag target
   2467  *	%g2 = %tl
   2468  *
   2469  * On return:
   2470  *
   2471  */
   2472 datafault:
   2473 #ifdef TRAPS_USE_IG
   2474 	wrpr	%g0, PSTATE_KERN|PSTATE_IG, %pstate	! We need to save volatile stuff to interrupt globals
   2475 #else
   2476 	wrpr	%g0, PSTATE_KERN|PSTATE_AG, %pstate	! We need to save volatile stuff to alternate globals
   2477 #endif
   2478 	wr	%g0, ASI_DMMU, %asi			! We need to re-load trap info
   2479 	ldxa	[%g0 + TLB_TAG_ACCESS] %asi, %g1	! Get fault address from tag access register
   2480 	ldxa	[SFAR] %asi, %g2			! sync virt addr; must be read first
   2481 	ldxa	[SFSR] %asi, %g3			! get sync fault status register
   2482 	stxa	%g0, [SFSR] %asi			! Clear out fault now
   2483 
   2484 	TRAP_SETUP(-CC64FSZ-TF_SIZE)
   2485 Ldatafault_internal:
   2486 	INCR64(CPUINFO_VA+CI_NFAULT)			! cnt.v_faults++ (clobbers %o0,%o1)
   2487 !	ldx	[%sp + CC64FSZ + STKB + TF_FAULT], %g1	! DEBUG make sure this has not changed
   2488 	mov	%g1, %o0				! Move these to the out regs so we can save the globals
   2489 	mov	%g2, %o4
   2490 	mov	%g3, %o5
   2491 
   2492 	ldxa	[%g0] ASI_AFAR, %o2			! get async fault address
   2493 	ldxa	[%g0] ASI_AFSR, %o3			! get async fault status
   2494 	mov	-1, %g7
   2495 	stxa	%g7, [%g0] ASI_AFSR			! And clear this out, too
   2496 
   2497 	wrpr	%g0, PSTATE_KERN, %pstate		! Get back to normal globals
   2498 
   2499 	stx	%g1, [%sp + CC64FSZ + STKB + TF_G + (1*8)]	! save g1
   2500 	rdpr	%tt, %o1					! find out what trap brought us here
   2501 	stx	%g2, [%sp + CC64FSZ + STKB + TF_G + (2*8)]	! save g2
   2502 	rdpr	%tstate, %g1
   2503 	stx	%g3, [%sp + CC64FSZ + STKB + TF_G + (3*8)]	! (sneak g3 in here)
   2504 	rdpr	%tpc, %g2
   2505 	stx	%g4, [%sp + CC64FSZ + STKB + TF_G + (4*8)]	! sneak in g4
   2506 	rdpr	%tnpc, %g3
   2507 	stx	%g5, [%sp + CC64FSZ + STKB + TF_G + (5*8)]	! sneak in g5
   2508 	mov	%g2, %o7					! Make the fault address look like the return address
   2509 	stx	%g6, [%sp + CC64FSZ + STKB + TF_G + (6*8)]	! sneak in g6
   2510 	rd	%y, %g5						! save y
   2511 	stx	%g7, [%sp + CC64FSZ + STKB + TF_G + (7*8)]	! sneak in g7
   2512 
   2513 	sth	%o1, [%sp + CC64FSZ + STKB + TF_TT]
   2514 	stx	%g1, [%sp + CC64FSZ + STKB + TF_TSTATE]		! set tf.tf_psr, tf.tf_pc
   2515 	stx	%g2, [%sp + CC64FSZ + STKB + TF_PC]		! set tf.tf_npc
   2516 	stx	%g3, [%sp + CC64FSZ + STKB + TF_NPC]
   2517 
   2518 	rdpr	%pil, %g4
   2519 	stb	%g4, [%sp + CC64FSZ + STKB + TF_PIL]
   2520 	stb	%g4, [%sp + CC64FSZ + STKB + TF_OLDPIL]
   2521 
   2522 #if 1
   2523 	rdpr	%tl, %g7
   2524 	dec	%g7
   2525 	movrlz	%g7, %g0, %g7
   2526 	wrpr	%g0, %g7, %tl		! Revert to kernel mode
   2527 #else
   2528 	wrpr	%g0, 0, %tl		! Revert to kernel mode
   2529 #endif
   2530 	/* Finish stackframe, call C trap handler */
   2531 	flushw						! Get this clean so we won't take any more user faults
   2532 #ifdef NOTDEF_DEBUG
   2533 	set	CPCB, %o7
   2534 	LDPTR	[%o7], %o7
   2535 	ldub	[%o7 + PCB_NSAVED], %o7
   2536 	brz,pt	%o7, 2f
   2537 	 nop
   2538 	save	%sp, -CC64FSZ, %sp
   2539 	set	1f, %o0
   2540 	call printf
   2541 	 mov	%i7, %o1
   2542 	ta	1; nop
   2543 	 restore
   2544 	.data
   2545 1:	.asciz	"datafault: nsaved = %d\n"
   2546 	_ALIGN
   2547 	.text
   2548 2:
   2549 #endif
   2550 	!! In the EMBEDANY memory model %g4 points to the start of the data segment.
   2551 	!! In our case we need to clear it before calling any C-code
   2552 	clr	%g4
   2553 
   2554 	/*
   2555 	 * Right now the registers have the following values:
   2556 	 *
   2557 	 *	%o0 -- MMU_TAG_ACCESS
   2558 	 *	%o1 -- TT
   2559 	 *	%o2 -- afar
   2560 	 *	%o3 -- afsr
   2561 	 *	%o4 -- sfar
   2562 	 *	%o5 -- sfsr
   2563 	 */
   2564 
   2565 	cmp	%o1, T_DATA_ERROR
   2566 	st	%g5, [%sp + CC64FSZ + STKB + TF_Y]
   2567 	wr	%g0, ASI_PRIMARY_NOFAULT, %asi	! Restore default ASI
   2568 	be,pn	%icc, data_error
   2569 	 wrpr	%g0, PSTATE_INTR, %pstate	! reenable interrupts
   2570 
   2571 	mov	%o0, %o3			! (argument: trap address)
   2572 	mov	%g2, %o2			! (argument: trap pc)
   2573 	call	_C_LABEL(data_access_fault)	! data_access_fault(&tf, type,
   2574 						!	pc, addr, sfva, sfsr)
   2575 	 add	%sp, CC64FSZ + STKB, %o0	! (argument: &tf)
   2576 
   2577 data_recover:
   2578 #ifdef TRAPSTATS
   2579 	set	_C_LABEL(uintrcnt), %g1
   2580 	stw	%g0, [%g1]
   2581 	set	_C_LABEL(iveccnt), %g1
   2582 	stw	%g0, [%g1]
   2583 #endif
   2584 	wrpr	%g0, PSTATE_KERN, %pstate		! disable interrupts
   2585 	b	return_from_trap			! go return
   2586 	 ldx	[%sp + CC64FSZ + STKB + TF_TSTATE], %g1		! Load this for return_from_trap
   2587 	NOTREACHED
   2588 
   2589 data_error:
   2590 	call	_C_LABEL(data_access_error)	! data_access_error(&tf, type,
   2591 						!	afva, afsr, sfva, sfsr)
   2592 	 add	%sp, CC64FSZ + STKB, %o0	! (argument: &tf)
   2593 	ba	data_recover
   2594 	 nop
   2595 	NOTREACHED
   2596 
   2597 /*
   2598  * Each memory instruction access fault from a fast access handler comes here.
   2599  * We will quickly check if this is an original prom mapping before going
   2600  * to the generic fault handler
   2601  *
   2602  * We will assume that %pil is not lost so we won't bother to save it
   2603  * unless we're in an interrupt handler.
   2604  *
   2605  * On entry:
   2606  *	We are on one of the alternate set of globals
   2607  *	%g1 = MMU tag target
   2608  *	%g2 = TSB entry ptr
   2609  *	%g3 = TLB Tag Access
   2610  *
   2611  * On return:
   2612  *
   2613  */
   2614 
   2615 	ICACHE_ALIGN
   2616 instr_miss:
   2617 #ifdef TRAPSTATS
   2618 	set	_C_LABEL(ktmiss), %g3
   2619 	set	_C_LABEL(utmiss), %g4
   2620 	rdpr	%tl, %g6
   2621 	dec	%g6
   2622 	movrz	%g6, %g4, %g3
   2623 	lduw	[%g3], %g4
   2624 	inc	%g4
   2625 	stw	%g4, [%g3]
   2626 #endif
   2627 	mov	TLB_TAG_ACCESS, %g3			! Get real fault page
   2628 	sethi	%hi(0x1fff), %g7			! 8K context mask
   2629 	ldxa	[%g3] ASI_IMMU, %g3			! from tag access register
   2630 	sethi	%hi(CPUINFO_VA+CI_CTXBUSY), %g4
   2631 	or	%g7, %lo(0x1fff), %g7
   2632 	LDPTR	[%g4 + %lo(CPUINFO_VA+CI_CTXBUSY)], %g4
   2633 	srax	%g3, HOLESHIFT, %g5			! Check for valid address
   2634 	and	%g3, %g7, %g6				! Isolate context
   2635 	sllx	%g6, 3, %g6				! Make it into an offset into ctxbusy
   2636 	inc	%g5					! (0 or -1) -> (1 or 0)
   2637 	ldx	[%g4+%g6], %g4				! Load up our page table.
   2638 #ifdef DEBUG
   2639 	/* Make sure we don't try to replace a kernel translation */
   2640 	/* This should not be necessary */
   2641 	brnz,pt	%g6, 1f					! If user context continue miss
   2642 	sethi	%hi(KERNBASE), %g7			! Don't need %lo
   2643 	set	0x0800000, %g6				! 8MB
   2644 	sub	%g3, %g7, %g7
   2645 	cmp	%g7, %g6
   2646 	tlu	%xcc, 1; nop
   2647 1:
   2648 #endif
   2649 	srlx	%g3, STSHIFT, %g6
   2650 	cmp	%g5, 1
   2651 	bgu,pn %xcc, textfault				! Error!
   2652 	 srlx	%g3, PDSHIFT, %g5
   2653 	and	%g6, STMASK, %g6
   2654 	sll	%g6, 3, %g6
   2655 	and	%g5, PDMASK, %g5
   2656 	nop
   2657 
   2658 	sll	%g5, 3, %g5
   2659 	add	%g6, %g4, %g4
   2660 	ldxa	[%g4] ASI_PHYS_CACHED, %g4
   2661 	srlx	%g3, PTSHIFT, %g6			! Convert to ptab offset
   2662 	and	%g6, PTMASK, %g6
   2663 	add	%g5, %g4, %g5
   2664 	brz,pn	%g4, textfault				! NULL entry? check somewhere else
   2665 	 nop
   2666 
   2667 	ldxa	[%g5] ASI_PHYS_CACHED, %g4
   2668 	sll	%g6, 3, %g6
   2669 	brz,pn	%g4, textfault				! NULL entry? check somewhere else
   2670 	 add	%g6, %g4, %g6
   2671 1:
   2672 	ldxa	[%g6] ASI_PHYS_CACHED, %g4
   2673 	brgez,pn %g4, textfault
   2674 	 nop
   2675 
   2676 	/* Check if it's an executable mapping. */
   2677 	andcc	%g4, SUN4U_TTE_EXEC, %g0
   2678 	bz,pn	%xcc, textfault
   2679 	 nop
   2680 
   2681 	or	%g4, SUN4U_TTE_ACCESS, %g7			! Update accessed bit
   2682 	btst	SUN4U_TTE_ACCESS, %g4				! Need to update access git?
   2683 	bne,pt	%xcc, 1f
   2684 	 nop
   2685 	casxa	[%g6] ASI_PHYS_CACHED, %g4, %g7		!  and store it
   2686 	cmp	%g4, %g7
   2687 	bne,pn	%xcc, 1b
   2688 	 or	%g4, SUN4U_TTE_ACCESS, %g4			! Update accessed bit
   2689 1:
   2690 	stx	%g1, [%g2]				! Update TSB entry tag
   2691 	stx	%g4, [%g2+8]				! Update TSB entry data
   2692 	stxa	%g4, [%g0] ASI_IMMU_DATA_IN		! Enter new mapping
   2693 	membar	#Sync
   2694 	CLRTT
   2695 	retry
   2696 	NOTREACHED
   2697 	!!
   2698 	!!  Check our prom mappings -- temporary
   2699 	!!
   2700 
   2701 /*
   2702  * Each memory text access fault, from user or kernel mode,
   2703  * comes here.
   2704  *
   2705  * We will assume that %pil is not lost so we won't bother to save it
   2706  * unless we're in an interrupt handler.
   2707  *
   2708  * On entry:
   2709  *	We are on one of the alternate set of globals
   2710  *	%g1 = MMU tag target
   2711  *	%g2 = %tl
   2712  *	%g3 = %tl - 1
   2713  *
   2714  * On return:
   2715  *
   2716  */
   2717 
   2718 textfault:
   2719 #ifdef TRAPS_USE_IG
   2720 	wrpr	%g0, PSTATE_KERN|PSTATE_IG, %pstate	! We need to save volatile stuff to interrupt globals
   2721 #else
   2722 	wrpr	%g0, PSTATE_KERN|PSTATE_AG, %pstate	! We need to save volatile stuff to alternate globals
   2723 #endif
   2724 	wr	%g0, ASI_IMMU, %asi
   2725 	ldxa	[%g0 + TLB_TAG_ACCESS] %asi, %g1	! Get fault address from tag access register
   2726 	ldxa	[SFSR] %asi, %g3			! get sync fault status register
   2727 	membar	#LoadStore
   2728 	stxa	%g0, [SFSR] %asi			! Clear out old info
   2729 
   2730 	TRAP_SETUP(-CC64FSZ-TF_SIZE)
   2731 	INCR64(CPUINFO_VA+CI_NFAULT)			! cnt.v_faults++ (clobbers %o0,%o1)
   2732 
   2733 	mov	%g3, %o3
   2734 
   2735 	wrpr	%g0, PSTATE_KERN, %pstate		! Switch to normal globals
   2736 	ldxa	[%g0] ASI_AFSR, %o4			! get async fault status
   2737 	ldxa	[%g0] ASI_AFAR, %o5			! get async fault address
   2738 	mov	-1, %o0
   2739 	stxa	%o0, [%g0] ASI_AFSR			! Clear this out
   2740 	stx	%g1, [%sp + CC64FSZ + STKB + TF_G + (1*8)]	! save g1
   2741 	stx	%g2, [%sp + CC64FSZ + STKB + TF_G + (2*8)]	! save g2
   2742 	stx	%g3, [%sp + CC64FSZ + STKB + TF_G + (3*8)]	! (sneak g3 in here)
   2743 	rdpr	%tt, %o1					! Find out what caused this trap
   2744 	stx	%g4, [%sp + CC64FSZ + STKB + TF_G + (4*8)]	! sneak in g4
   2745 	rdpr	%tstate, %g1
   2746 	stx	%g5, [%sp + CC64FSZ + STKB + TF_G + (5*8)]	! sneak in g5
   2747 	rdpr	%tpc, %o2					! sync virt addr; must be read first
   2748 	stx	%g6, [%sp + CC64FSZ + STKB + TF_G + (6*8)]	! sneak in g6
   2749 	rdpr	%tnpc, %g3
   2750 	stx	%g7, [%sp + CC64FSZ + STKB + TF_G + (7*8)]	! sneak in g7
   2751 	rd	%y, %g5						! save y
   2752 
   2753 	/* Finish stackframe, call C trap handler */
   2754 	stx	%g1, [%sp + CC64FSZ + STKB + TF_TSTATE]		! set tf.tf_psr, tf.tf_pc
   2755 	sth	%o1, [%sp + CC64FSZ + STKB + TF_TT]		! debug
   2756 
   2757 	stx	%o2, [%sp + CC64FSZ + STKB + TF_PC]
   2758 	stx	%g3, [%sp + CC64FSZ + STKB + TF_NPC]		! set tf.tf_npc
   2759 
   2760 	rdpr	%pil, %g4
   2761 	stb	%g4, [%sp + CC64FSZ + STKB + TF_PIL]
   2762 	stb	%g4, [%sp + CC64FSZ + STKB + TF_OLDPIL]
   2763 
   2764 	rdpr	%tl, %g7
   2765 	dec	%g7
   2766 	movrlz	%g7, %g0, %g7
   2767 	wrpr	%g0, %g7, %tl		! Revert to kernel mode
   2768 
   2769 	wr	%g0, ASI_PRIMARY_NOFAULT, %asi		! Restore default ASI
   2770 	flushw						! Get rid of any user windows so we don't deadlock
   2771 
   2772 	!! In the EMBEDANY memory model %g4 points to the start of the data segment.
   2773 	!! In our case we need to clear it before calling any C-code
   2774 	clr	%g4
   2775 
   2776 	/* Use trap type to see what handler to call */
   2777 	cmp	%o1, T_INST_ERROR
   2778 	be,pn	%xcc, text_error
   2779 	 st	%g5, [%sp + CC64FSZ + STKB + TF_Y]		! set tf.tf_y
   2780 
   2781 	wrpr	%g0, PSTATE_INTR, %pstate	! reenable interrupts
   2782 	call	_C_LABEL(text_access_fault)	! mem_access_fault(&tf, type, pc, sfsr)
   2783 	 add	%sp, CC64FSZ + STKB, %o0	! (argument: &tf)
   2784 text_recover:
   2785 	wrpr	%g0, PSTATE_KERN, %pstate	! disable interrupts
   2786 	b	return_from_trap		! go return
   2787 	 ldx	[%sp + CC64FSZ + STKB + TF_TSTATE], %g1	! Load this for return_from_trap
   2788 	NOTREACHED
   2789 
   2790 text_error:
   2791 	wrpr	%g0, PSTATE_INTR, %pstate	! reenable interrupts
   2792 	call	_C_LABEL(text_access_error)	! mem_access_fault(&tfm type, sfva [pc], sfsr,
   2793 						!		afva, afsr);
   2794 	 add	%sp, CC64FSZ + STKB, %o0	! (argument: &tf)
   2795 	ba	text_recover
   2796 	 nop
   2797 	NOTREACHED
   2798 
   2799 #ifdef SUN4V
   2800 
   2801 /*
   2802  * Traps for sun4v.
   2803  */
   2804 
   2805 sun4v_dtsb_miss:
   2806 	GET_MMFSA %g1				! MMU Fault status area
   2807 	add	%g1, 0x48, %g3
   2808 	LDPTRA	[%g3] ASI_PHYS_CACHED, %g3	! Data fault address
   2809 	add	%g1, 0x50, %g6
   2810 	LDPTRA	[%g6] ASI_PHYS_CACHED, %g6	! Data fault context
   2811 
   2812 	GET_CTXBUSY %g4
   2813 	sllx	%g6, 3, %g6			! Make it into an offset into ctxbusy
   2814 	LDPTR	[%g4 + %g6], %g4		! Load up our page table.
   2815 
   2816 	srax	%g3, HOLESHIFT, %g5		! Check for valid address
   2817 	brz,pt	%g5, 0f				! Should be zero or -1
   2818 	 inc	%g5				! Make -1 -> 0
   2819 	brnz,pn	%g5, sun4v_datatrap		! Error! In hole!
   2820 0:
   2821 	srlx	%g3, STSHIFT, %g6
   2822 	and	%g6, STMASK, %g6		! Index into pm_segs
   2823 	sll	%g6, 3, %g6
   2824 	add	%g4, %g6, %g4
   2825 	LDPTRA	[%g4] ASI_PHYS_CACHED, %g4	! Load page directory pointer
   2826 	srlx	%g3, PDSHIFT, %g6
   2827 	and	%g6, PDMASK, %g6
   2828 	sll	%g6, 3, %g6
   2829 	brz,pn	%g4, sun4v_datatrap		! NULL entry? check somewhere else
   2830 	 add	%g4, %g6, %g4
   2831 	LDPTRA	[%g4] ASI_PHYS_CACHED, %g4	! Load page table pointer
   2832 
   2833 	srlx	%g3, PTSHIFT, %g6		! Convert to ptab offset
   2834 	and	%g6, PTMASK, %g6
   2835 	sll	%g6, 3, %g6
   2836 	brz,pn	%g4, sun4v_datatrap		! NULL entry? check somewhere else
   2837 	 add	%g4, %g6, %g6
   2838 1:
   2839 	LDPTRA	[%g6] ASI_PHYS_CACHED, %g4	! Fetch TTE
   2840 	brgez,pn %g4, sun4v_datatrap		! Entry invalid?  Punt
   2841 	 or	%g4, SUN4V_TLB_ACCESS, %g7	! Update the access bit
   2842 
   2843 	btst	SUN4V_TLB_ACCESS, %g4		! Need to update access bit?
   2844 	bne,pt	%xcc, 2f
   2845 	 nop
   2846 	casxa	[%g6] ASI_PHYS_CACHED, %g4, %g7	! and write it out
   2847 	cmp	%g4, %g7
   2848 	bne,pn	%xcc, 1b
   2849 	 or	%g4, SUN4V_TLB_ACCESS, %g4	! Update the access bit
   2850 2:
   2851 	GET_TSB_DMMU %g2
   2852 
   2853 	/* Construct TSB tag word. */
   2854 	add	%g1, 0x50, %g6
   2855 	LDPTRA	[%g6] ASI_PHYS_CACHED, %g6	! Data fault context
   2856 	mov	%g3, %g1			! Data fault address
   2857 	srlx	%g1, 22, %g1			! 63..22 of virt addr
   2858 	sllx	%g6, 48, %g6			! context_id in 63..48
   2859 	or	%g1, %g6, %g1			! construct TTE tag
   2860 	srlx	%g3, PTSHIFT, %g3
   2861 	sethi	%hi(_C_LABEL(tsbsize)), %g5
   2862 	mov	512, %g6
   2863 	ld	[%g5 + %lo(_C_LABEL(tsbsize))], %g5
   2864 	sllx	%g6, %g5, %g5			! %g5 = 512 << tsbsize = TSBENTS
   2865 	sub	%g5, 1, %g5			! TSBENTS -> offset
   2866 	and	%g3, %g5, %g3			! mask out TTE index
   2867 	sllx	%g3, 4, %g3			! TTE size is 16 bytes
   2868 	add	%g2, %g3, %g2			! location of TTE in ci_tsb_dmmu
   2869 
   2870 	membar	#StoreStore
   2871 
   2872 	STPTR	%g4, [%g2 + 8]			! store TTE data
   2873 	STPTR	%g1, [%g2]			! store TTE tag
   2874 
   2875 	retry
   2876 	NOTREACHED
   2877 
   2878 sun4v_tl1_dtsb_miss:
   2879 	GET_MMFSA %g1				! MMU Fault status area
   2880 	add	%g1, 0x48, %g3
   2881 	LDPTRA	[%g3] ASI_PHYS_CACHED, %g3	! Data fault address
   2882 	add	%g1, 0x50, %g6
   2883 	LDPTRA	[%g6] ASI_PHYS_CACHED, %g6	! Data fault context
   2884 
   2885 	GET_CTXBUSY %g4
   2886 	sllx	%g6, 3, %g6			! Make it into an offset into ctxbusy
   2887 	LDPTR	[%g4 + %g6], %g4		! Load up our page table.
   2888 
   2889 	srax	%g3, HOLESHIFT, %g5		! Check for valid address
   2890 	brz,pt	%g5, 0f				! Should be zero or -1
   2891 	 inc	%g5				! Make -1 -> 0
   2892 	brnz,pn	%g5, sun4v_tl1_ptbl_miss	! Error! In hole!
   2893 0:
   2894 	srlx	%g3, STSHIFT, %g6
   2895 	and	%g6, STMASK, %g6		! Index into pm_segs
   2896 	sll	%g6, 3, %g6
   2897 	add	%g4, %g6, %g4
   2898 	LDPTRA	[%g4] ASI_PHYS_CACHED, %g4	! Load page directory pointer
   2899 	srlx	%g3, PDSHIFT, %g6
   2900 	and	%g6, PDMASK, %g6
   2901 	sll	%g6, 3, %g6
   2902 	brz,pn	%g4, sun4v_tl1_ptbl_miss	! NULL entry? check somewhere else
   2903 	 add	%g4, %g6, %g4
   2904 	LDPTRA	[%g4] ASI_PHYS_CACHED, %g4	! Load page table pointer
   2905 
   2906 	srlx	%g3, PTSHIFT, %g6		! Convert to ptab offset
   2907 	and	%g6, PTMASK, %g6
   2908 	sll	%g6, 3, %g6
   2909 	brz,pn	%g4, sun4v_tl1_ptbl_miss	! NULL entry? check somewhere else
   2910 	 add	%g4, %g6, %g6
   2911 1:
   2912 	LDPTRA	[%g6] ASI_PHYS_CACHED, %g4	! Fetch TTE
   2913 	brgez,pn %g4, sun4v_tl1_ptbl_miss	! Entry invalid?  Punt
   2914 	 or	%g4, SUN4V_TLB_ACCESS, %g7	! Update the access bit
   2915 
   2916 	btst	SUN4V_TLB_ACCESS, %g4		! Need to update access bit?
   2917 	bne,pt	%xcc, 2f
   2918 	 nop
   2919 	casxa	[%g6] ASI_PHYS_CACHED, %g4, %g7	! and write it out
   2920 	cmp	%g4, %g7
   2921 	bne,pn	%xcc, 1b
   2922 	 or	%g4, SUN4V_TLB_ACCESS, %g4	! Update the access bit
   2923 2:
   2924 	GET_TSB_DMMU %g2
   2925 
   2926 	/* Construct TSB tag word. */
   2927 	add	%g1, 0x50, %g6
   2928 	LDPTRA	[%g6] ASI_PHYS_CACHED, %g6	! Data fault context
   2929 	mov	%g3, %g1			! Data fault address
   2930 	srlx	%g1, 22, %g1			! 63..22 of virt addr
   2931 	sllx	%g6, 48, %g6			! context_id in 63..48
   2932 	or	%g1, %g6, %g1			! construct TTE tag
   2933 	srlx	%g3, PTSHIFT, %g3
   2934 	sethi	%hi(_C_LABEL(tsbsize)), %g5
   2935 	mov	512, %g6
   2936 	ld	[%g5 + %lo(_C_LABEL(tsbsize))], %g5
   2937 	sllx	%g6, %g5, %g5			! %g5 = 512 << tsbsize = TSBENTS
   2938 	sub	%g5, 1, %g5			! TSBENTS -> offset
   2939 	and	%g3, %g5, %g3			! mask out TTE index
   2940 	sllx	%g3, 4, %g3			! TTE size is 16 bytes
   2941 	add	%g2, %g3, %g2			! location of TTE in ci_tsb_dmmu
   2942 
   2943 	membar	#StoreStore
   2944 
   2945 	STPTR	%g4, [%g2 + 8]			! store TTE data
   2946 	STPTR	%g1, [%g2]			! store TTE tag
   2947 
   2948 	retry
   2949 	NOTREACHED
   2950 
   2951 sun4v_datatrap:
   2952 	GET_MMFSA %g3				! MMU Fault status area
   2953 	add	%g3, 0x48, %g1
   2954 	LDPTRA	[%g1] ASI_PHYS_CACHED, %g1	! Data fault address
   2955 	add	%g3, 0x50, %g2
   2956 	LDPTRA	[%g2] ASI_PHYS_CACHED, %g2	! Data fault context
   2957 
   2958 	TRAP_SETUP(-CC64FSZ-TF_SIZE)
   2959 	or	%g1, %g2, %o3
   2960 	mov	%g1, %o4
   2961 
   2962 	rdpr	%tt, %g4
   2963 	rdpr	%tstate, %g1
   2964 	rdpr	%tpc, %g2
   2965 	rdpr	%tnpc, %g3
   2966 
   2967 	stx	%g1, [%sp + CC64FSZ + BIAS + TF_TSTATE]
   2968 	mov	%g4, %o1		! (type)
   2969 	stx	%g2, [%sp + CC64FSZ + BIAS + TF_PC]
   2970 	rd	%y, %g5
   2971 	stx	%g3, [%sp + CC64FSZ + BIAS + TF_NPC]
   2972 	st	%g5, [%sp + CC64FSZ + BIAS + TF_Y]
   2973 	mov	%g2, %o2		! (pc)
   2974 	sth	%o1, [%sp + CC64FSZ + BIAS + TF_TT]! debug
   2975 
   2976 	cmp	%o1, T_FDMMU_PROT
   2977 	bne,pn	%icc, 1f
   2978 	 mov	SFSR_FV, %o5
   2979 	or	%o5, SFSR_W, %o5
   2980 
   2981 1:
   2982 	NORMAL_GLOBALS_SUN4V
   2983 
   2984 	stx	%g1, [%sp + CC64FSZ + BIAS + TF_G + (1*8)]
   2985 	stx	%g2, [%sp + CC64FSZ + BIAS + TF_G + (2*8)]
   2986 	add	%sp, CC64FSZ + BIAS, %o0		! (&tf)
   2987 	stx	%g3, [%sp + CC64FSZ + BIAS + TF_G + (3*8)]
   2988 	stx	%g4, [%sp + CC64FSZ + BIAS + TF_G + (4*8)]
   2989 	stx	%g5, [%sp + CC64FSZ + BIAS + TF_G + (5*8)]
   2990 	rdpr	%pil, %g5
   2991 	stx	%g6, [%sp + CC64FSZ + BIAS + TF_G + (6*8)]
   2992 	stx	%g7, [%sp + CC64FSZ + BIAS + TF_G + (7*8)]
   2993 	stb	%g5, [%sp + CC64FSZ + BIAS + TF_PIL]
   2994 	stb	%g5, [%sp + CC64FSZ + BIAS + TF_OLDPIL]
   2995 
   2996 	/*
   2997 	 * Phew, ready to enable traps and call C code.
   2998 	 */
   2999 	wrpr	%g0, 0, %tl
   3000 
   3001 	wr	%g0, ASI_PRIMARY_NOFAULT, %asi	! Restore default ASI
   3002 	wrpr	%g0, PSTATE_INTR, %pstate	! traps on again
   3003 	call	_C_LABEL(data_access_fault)	! data_acces_fault(tf, type, ...)
   3004 	 nop
   3005 
   3006 	ba,a,pt	%icc, return_from_trap
   3007 	 nop
   3008 	NOTREACHED
   3009 
   3010 sun4v_tl0_dtsb_prot:
   3011 	GET_MMFSA %g1				! MMU Fault status area
   3012 	add	%g1, 0x48, %g3
   3013 	LDPTRA	[%g3] ASI_PHYS_CACHED, %g3	! Data fault address
   3014 	add	%g1, 0x50, %g6
   3015 	LDPTRA	[%g6] ASI_PHYS_CACHED, %g6	! Data fault context
   3016 
   3017 	GET_CTXBUSY %g4
   3018 	sllx	%g6, 3, %g6			! Make it into an offset into ctxbusy
   3019 	LDPTR	[%g4 + %g6], %g4		! Load up our page table.
   3020 
   3021 	srax	%g3, HOLESHIFT, %g5		! Check for valid address
   3022 	brz,pt	%g5, 0f				! Should be zero or -1
   3023 	 inc	%g5				! Make -1 -> 0
   3024 	brnz,pn	%g5, sun4v_datatrap		! Error! In hole!
   3025 0:
   3026 	srlx	%g3, STSHIFT, %g6
   3027 	and	%g6, STMASK, %g6		! Index into pm_segs
   3028 	sll	%g6, 3, %g6
   3029 	add	%g4, %g6, %g4
   3030 	LDPTRA	[%g4] ASI_PHYS_CACHED, %g4	! Load page directory pointer
   3031 
   3032 	srlx	%g3, PDSHIFT, %g6
   3033 	and	%g6, PDMASK, %g6
   3034 	sll	%g6, 3, %g6
   3035 	brz,pn	%g4, sun4v_datatrap		! NULL entry? check somewhere else
   3036 	 add	%g4, %g6, %g4
   3037 	LDPTRA	[%g4] ASI_PHYS_CACHED, %g4	! Load page table pointer
   3038 
   3039 	srlx	%g3, PTSHIFT, %g6		! Convert to ptab offset
   3040 	and	%g6, PTMASK, %g6
   3041 	sll	%g6, 3, %g6
   3042 	brz,pn	%g4, sun4v_datatrap		! NULL entry? check somewhere else
   3043 	 add	%g4, %g6, %g6
   3044 1:
   3045 	LDPTRA	[%g6] ASI_PHYS_CACHED, %g4	! Fetch TTE
   3046 	brgez,pn %g4, sun4v_datatrap		! Entry invalid?  Punt
   3047 	 or	%g4, SUN4V_TLB_MODIFY|SUN4V_TLB_ACCESS|SUN4V_TLB_W, %g7 ! Update the modified bit
   3048 
   3049 #	btst	SUN4V_TLB_REAL_W|SUN4V_TLB_W, %g4	! Is it a ref fault?
   3050 	mov	1, %g2
   3051 	sllx	%g2, 61, %g2			! %g2 is now SUN4V_TLB_REAL_W
   3052 	or	%g2, SUN4V_TLB_W, %g2
   3053 	btst	%g2, %g4
   3054 	bz,pn	%xcc, sun4v_datatrap			! No -- really fault
   3055 	 nop
   3056 	casxa	[%g6] ASI_PHYS_CACHED, %g4, %g7		!  and write it out
   3057 	cmp	%g4, %g7
   3058 	bne,pn	%xcc, 1b
   3059 	 or	%g4, SUN4V_TLB_MODIFY|SUN4V_TLB_ACCESS|SUN4V_TLB_W, %g4 ! Update the modified bit
   3060 2:
   3061 	GET_TSB_DMMU %g2
   3062 
   3063 	mov	%g1, %g7			! save MMFSA
   3064 
   3065 	/* Construct TSB tag word. */
   3066 	add	%g1, 0x50, %g6
   3067 	LDPTRA	[%g6] ASI_PHYS_CACHED, %g6	! Data fault context
   3068 	mov	%g3, %g1			! Data fault address
   3069 	srlx	%g1, 22, %g1			! 63..22 of virt addr
   3070 	sllx	%g6, 48, %g6			! context_id in 63..48
   3071 	or	%g1, %g6, %g1			! construct TTE tag
   3072 
   3073 	srlx	%g3, PTSHIFT, %g3
   3074 	sethi	%hi(_C_LABEL(tsbsize)), %g5
   3075 	mov	512, %g6
   3076 	ld	[%g5 + %lo(_C_LABEL(tsbsize))], %g5
   3077 	sllx	%g6, %g5, %g5			! %g5 = 512 << tsbsize = TSBENTS
   3078 	sub	%g5, 1, %g5			! TSBENTS -> offset
   3079 	and	%g3, %g5, %g3			! mask out TTE index
   3080 	sllx	%g3, 4, %g3			! TTE size is 16 bytes
   3081 	add	%g2, %g3, %g2			! location of TTE in ci_tsb_dmmu
   3082 
   3083 	membar	#StoreStore
   3084 
   3085 	STPTR	%g4, [%g2 + 8]		! store TTE data
   3086 	STPTR	%g1, [%g2]		! store TTE tag
   3087 
   3088 	mov	%o0, %g1
   3089 	mov	%o1, %g2
   3090 	mov	%o2, %g3
   3091 
   3092 	add	%g7, 0x48, %o0
   3093 	ldxa	[%o0] ASI_PHYS_CACHED, %o0	! Data fault address
   3094 	add	%g7, 0x50, %o1
   3095 	ldxa	[%o1] ASI_PHYS_CACHED, %o1	! Data fault context
   3096 	mov	MAP_DTLB, %o2
   3097 	ta	ST_MMU_UNMAP_ADDR
   3098 
   3099 	mov	%g1, %o0
   3100 	mov	%g2, %o1
   3101 	mov	%g3, %o2
   3102 
   3103 	retry
   3104 	NOTREACHED
   3105 
   3106 sun4v_tl0_itsb_miss:
   3107 	GET_MMFSA %g1				! MMU Fault status area
   3108 	add	%g1, 0x8, %g3
   3109 	LDPTRA	[%g3] ASI_PHYS_CACHED, %g3	! Instruction fault address
   3110 	add	%g1, 0x10, %g6
   3111 	LDPTRA	[%g6] ASI_PHYS_CACHED, %g6	! Data fault context
   3112 
   3113 	GET_CTXBUSY %g4
   3114 	sllx	%g6, 3, %g6			! Make it into an offset into ctxbusy
   3115 	LDPTR	[%g4 + %g6], %g4		! Load up our page table.
   3116 
   3117 	srax	%g3, HOLESHIFT, %g5		! Check for valid address
   3118 	brz,pt	%g5, 0f				! Should be zero or -1
   3119 	 inc	%g5				! Make -1 -> 0
   3120 	brnz,pn	%g5, sun4v_texttrap		! Error! In hole!
   3121 0:
   3122 	srlx	%g3, STSHIFT, %g6
   3123 	and	%g6, STMASK, %g6		! Index into pm_segs
   3124 	sll	%g6, 3, %g6
   3125 	add	%g4, %g6, %g4
   3126 	LDPTRA	[%g4] ASI_PHYS_CACHED, %g4	! Load page directory pointer
   3127 
   3128 	srlx	%g3, PDSHIFT, %g6
   3129 	and	%g6, PDMASK, %g6
   3130 	sll	%g6, 3, %g6
   3131 	brz,pn	%g4, sun4v_texttrap		! NULL entry? check somewhere else
   3132 	 add	%g4, %g6, %g4
   3133 	LDPTRA	[%g4] ASI_PHYS_CACHED, %g4	! Load page table pointer
   3134 
   3135 	srlx	%g3, PTSHIFT, %g6		! Convert to ptab offset
   3136 	and	%g6, PTMASK, %g6
   3137 	sll	%g6, 3, %g6
   3138 	brz,pn	%g4, sun4v_texttrap		! NULL entry? check somewhere else
   3139 	 add	%g4, %g6, %g6
   3140 1:
   3141 	LDPTRA	[%g6] ASI_PHYS_CACHED, %g4	! Fetch TTE
   3142 	brgez,pn %g4, sun4v_texttrap		! Entry invalid?  Punt
   3143 	 or	%g4, SUN4V_TLB_ACCESS, %g7	! Update the access bit
   3144 
   3145 	btst	SUN4V_TLB_EXEC, %g4		! Need to update exec bit?
   3146 	bz,pn	%xcc, sun4v_texttrap
   3147 	 nop
   3148 	btst	SUN4V_TLB_ACCESS, %g4		! Need to update access bit?
   3149 	bne,pt	%xcc, 2f
   3150 	 nop
   3151 	casxa	[%g6] ASI_PHYS_CACHED, %g4, %g7	! and write it out
   3152 	cmp	%g4, %g7
   3153 	bne,pn	%xcc, 1b
   3154 	 or	%g4, SUN4V_TLB_ACCESS, %g4	! Update the modified bit
   3155 2:
   3156 	GET_TSB_DMMU %g2
   3157 
   3158 	mov	%g1, %g7
   3159 	/* Construct TSB tag word. */
   3160 	add	%g1, 0x10, %g6
   3161 	LDPTRA	[%g6] ASI_PHYS_CACHED, %g6	! Instruction fault context
   3162 	mov	%g3, %g1			! Instruction fault address
   3163 	srlx	%g1, 22, %g1			! 63..22 of virt addr
   3164 	sllx	%g6, 48, %g6			! context_id in 63..48
   3165 	or	%g1, %g6, %g1			! construct TTE tag
   3166 
   3167 	srlx	%g3, PTSHIFT, %g3
   3168 	sethi	%hi(_C_LABEL(tsbsize)), %g5
   3169 	mov	512, %g6
   3170 	ld	[%g5 + %lo(_C_LABEL(tsbsize))], %g5
   3171 	sllx	%g6, %g5, %g5			! %g5 = 512 << tsbsize = TSBENTS
   3172 	sub	%g5, 1, %g5			! TSBENTS -> offset
   3173 	and	%g3, %g5, %g3			! mask out TTE index
   3174 	sllx	%g3, 4, %g3			! TTE size is 16 bytes
   3175 	add	%g2, %g3, %g2			! location of TTE in ci_tsb_dmmu (FIXME ci_tsb_immu?)
   3176 
   3177 	membar	#StoreStore
   3178 	STPTR	%g4, [%g2 + 8]			! store TTE data
   3179 	stx	%g1, [%g2]			! store TTE tag
   3180 
   3181 	retry
   3182 	NOTREACHED
   3183 
   3184 sun4v_texttrap:
   3185 	GET_MMFSA %g3				! MMU Fault status area
   3186 	add	%g3, 0x08, %g1
   3187 	LDPTRA	[%g1] ASI_PHYS_CACHED, %g1	! Instruction fault address
   3188 	add	%g3, 0x10, %g2
   3189 	LDPTRA	[%g2] ASI_PHYS_CACHED, %g2	! Instruction fault context
   3190 
   3191 	TRAP_SETUP(-CC64FSZ-TF_SIZE)
   3192 
   3193 	or	%g1, %g2, %o2
   3194 	clr	%o3
   3195 
   3196 	rdpr	%tt, %g4
   3197 	rdpr	%tstate, %g1
   3198 	rdpr	%tpc, %g2
   3199 	rdpr	%tnpc, %g3
   3200 
   3201 	stx	%g1, [%sp + CC64FSZ + BIAS + TF_TSTATE]
   3202 	mov	%g4, %o1		! (type)
   3203 	stx	%g2, [%sp + CC64FSZ + BIAS + TF_PC]
   3204 	rd	%y, %g5
   3205 	stx	%g3, [%sp + CC64FSZ + BIAS + TF_NPC]
   3206 	st	%g5, [%sp + CC64FSZ + BIAS + TF_Y]
   3207 	sth	%o1, [%sp + CC64FSZ + BIAS + TF_TT]! debug
   3208 
   3209 	! Get back to normal globals
   3210 	wrpr	%g0, PSTATE_KERN, %pstate
   3211 	NORMAL_GLOBALS_SUN4V
   3212 
   3213 	stx	%g1, [%sp + CC64FSZ + BIAS + TF_G + (1*8)]
   3214 	stx	%g2, [%sp + CC64FSZ + BIAS + TF_G + (2*8)]
   3215 	add	%sp, CC64FSZ + BIAS, %o0		! (&tf)
   3216 	stx	%g3, [%sp + CC64FSZ + BIAS + TF_G + (3*8)]
   3217 	stx	%g4, [%sp + CC64FSZ + BIAS + TF_G + (4*8)]
   3218 	stx	%g5, [%sp + CC64FSZ + BIAS + TF_G + (5*8)]
   3219 	rdpr	%pil, %g5
   3220 	stx	%g6, [%sp + CC64FSZ + BIAS + TF_G + (6*8)]
   3221 	stx	%g7, [%sp + CC64FSZ + BIAS + TF_G + (7*8)]
   3222 	stb	%g5, [%sp + CC64FSZ + BIAS + TF_PIL]
   3223 	stb	%g5, [%sp + CC64FSZ + BIAS + TF_OLDPIL]
   3224 
   3225 	/*
   3226 	 * Phew, ready to enable traps and call C code.
   3227 	 */
   3228 	wrpr	%g0, 0, %tl
   3229 
   3230 	wr	%g0, ASI_PRIMARY_NOFAULT, %asi	! Restore default ASI
   3231 	wrpr	%g0, PSTATE_INTR, %pstate	! traps on again
   3232 	call	_C_LABEL(text_access_fault)	! text_access_fault(tf, type, ...)
   3233 	 nop
   3234 
   3235 	ba,a,pt	%icc, return_from_trap
   3236 	 nop
   3237 	NOTREACHED
   3238 
   3239 sun4v_tl1_dtsb_prot:
   3240 	GET_MMFSA %g1				! MMU Fault status area
   3241 	add	%g1, 0x48, %g3
   3242 	LDPTRA	[%g3] ASI_PHYS_CACHED, %g3	! Data fault address
   3243 	add	%g1, 0x50, %g6
   3244 	LDPTRA	[%g6] ASI_PHYS_CACHED, %g6	! Data fault context
   3245 
   3246 	GET_CTXBUSY %g4
   3247 	sllx	%g6, 3, %g6			! Make it into an offset into ctxbusy
   3248 	LDPTR	[%g4 + %g6], %g4		! Load up our page table.
   3249 
   3250 	srax	%g3, HOLESHIFT, %g5		! Check for valid address
   3251 	brz,pt	%g5, 0f				! Should be zero or -1
   3252 	 inc	%g5				! Make -1 -> 0
   3253 	brnz,pn	%g5, sun4v_tl1_ptbl_miss	! Error! In hole!
   3254 0:
   3255 	srlx	%g3, STSHIFT, %g6
   3256 	and	%g6, STMASK, %g6		! Index into pm_segs
   3257 	sll	%g6, 3, %g6
   3258 	add	%g4, %g6, %g4
   3259 	LDPTRA	[%g4] ASI_PHYS_CACHED, %g4	! Load page directory pointer
   3260 
   3261 	srlx	%g3, PDSHIFT, %g6
   3262 	and	%g6, PDMASK, %g6
   3263 	sll	%g6, 3, %g6
   3264 	brz,pn	%g4, sun4v_tl1_ptbl_miss	! NULL entry? check somewhere else
   3265 	 add	%g4, %g6, %g4
   3266 	LDPTRA	[%g4] ASI_PHYS_CACHED, %g4	! Load page table pointer
   3267 
   3268 	srlx	%g3, PTSHIFT, %g6		! Convert to ptab offset
   3269 	and	%g6, PTMASK, %g6
   3270 	sll	%g6, 3, %g6
   3271 	brz,pn	%g4, sun4v_tl1_ptbl_miss	! NULL entry? check somewhere else
   3272 	 add	%g4, %g6, %g6
   3273 1:
   3274 	LDPTRA	[%g6] ASI_PHYS_CACHED, %g4	! Fetch TTE
   3275 	brgez,pn %g4, sun4v_tl1_ptbl_miss	! Entry invalid?  Punt
   3276 	 or	%g4, SUN4V_TLB_MODIFY|SUN4V_TLB_ACCESS|SUN4V_TLB_W, %g7 ! Update the modified bit
   3277 
   3278 #	btst	SUN4V_TLB_REAL_W|SUN4V_TLB_W, %g4	! Is it a ref fault?
   3279 	mov	1, %g2
   3280 	sllx	%g2, 61, %g2			! %g2 is now SUN4V_TLB_REAL_W
   3281 	or	%g2, SUN4V_TLB_W, %g2
   3282 	btst	%g2, %g4
   3283 	bz,pn	%xcc, sun4v_tl1_ptbl_miss		! No -- really fault
   3284 	 nop
   3285 	casxa	[%g6] ASI_PHYS_CACHED, %g4, %g7		!  and write it out
   3286 	cmp	%g4, %g7
   3287 	bne,pn	%xcc, 1b
   3288 	 or	%g4, SUN4V_TLB_MODIFY|SUN4V_TLB_ACCESS|SUN4V_TLB_W, %g4 ! Update the modified bit
   3289 2:
   3290 	GET_TSB_DMMU %g2
   3291 
   3292 	mov	%g1, %g7			! save MMFSA
   3293 
   3294 	/* Construct TSB tag word. */
   3295 	add	%g1, 0x50, %g6
   3296 	LDPTRA	[%g6] ASI_PHYS_CACHED, %g6	! Data fault context
   3297 	mov	%g3, %g1			! Data fault address
   3298 	srlx	%g1, 22, %g1			! 63..22 of virt addr
   3299 	sllx	%g6, 48, %g6			! context_id in 63..48
   3300 	or	%g1, %g6, %g1			! construct TTE tag
   3301 
   3302 	srlx	%g3, PTSHIFT, %g3
   3303 	sethi	%hi(_C_LABEL(tsbsize)), %g5
   3304 	mov	512, %g6
   3305 	ld	[%g5 + %lo(_C_LABEL(tsbsize))], %g5
   3306 	sllx	%g6, %g5, %g5			! %g5 = 512 << tsbsize = TSBENTS
   3307 	sub	%g5, 1, %g5			! TSBENTS -> offset
   3308 	and	%g3, %g5, %g3			! mask out TTE index
   3309 	sllx	%g3, 4, %g3			! TTE size is 16 bytes
   3310 	add	%g2, %g3, %g2			! location of TTE in ci_tsb_dmmu
   3311 
   3312 	membar	#StoreStore
   3313 
   3314 	STPTR	%g4, [%g2 + 8]		! store TTE data
   3315 	STPTR	%g1, [%g2]		! store TTE tag
   3316 
   3317 	mov	%o0, %g1
   3318 	mov	%o1, %g2
   3319 	mov	%o2, %g3
   3320 
   3321 	add	%g7, 0x48, %o0
   3322 	ldxa	[%o0] ASI_PHYS_CACHED, %o0	! Data fault address
   3323 	add	%g7, 0x50, %o1
   3324 	ldxa	[%o1] ASI_PHYS_CACHED, %o1	! Data fault context
   3325 	mov	MAP_DTLB, %o2
   3326 	ta	ST_MMU_UNMAP_ADDR
   3327 
   3328 	mov	%g1, %o0
   3329 	mov	%g2, %o1
   3330 	mov	%g3, %o2
   3331 
   3332 	retry
   3333 	NOTREACHED
   3334 
   3335 sun4v_tl1_ptbl_miss:
   3336 	rdpr	%tpc, %g1
   3337 
   3338 	set	rft_user_fault_start, %g2
   3339 	cmp	%g1, %g2
   3340 	blu,pt	%xcc, 1f
   3341 	 set	rft_user_fault_end, %g2
   3342 	cmp	%g1, %g2
   3343 	bgeu,pt	%xcc, 1f
   3344 	 nop
   3345 
   3346 	/* We had a miss inside rtf_user_fault_start/rtf_user_fault_end block (FILL) */
   3347 
   3348 	/* Fixup %cwp. */
   3349 	rdpr	%cwp, %g1
   3350 	inc	%g1
   3351 	wrpr	%g1, %cwp
   3352 
   3353 	rdpr	%tt, %g1
   3354 	wrpr	1, %tl
   3355 	wrpr	%g1, %tt
   3356 	rdpr	%cwp, %g1
   3357 	set	TSTATE_KERN, %g2
   3358 	wrpr	%g1, %g2, %tstate
   3359 	set	return_from_trap, %g1
   3360 	wrpr	%g1, %tpc
   3361 	add	%g1, 4, %g1
   3362 	wrpr	%g1, %tnpc
   3363 	wrpr	%g0, 1, %gl
   3364 
   3365 	ba,pt %xcc, sun4v_datatrap
   3366 	 wrpr	WSTATE_KERN, %wstate
   3367 
   3368 1:
   3369 	rdpr	%tstate, %g3
   3370 	rdpr	%tt, %g4
   3371 
   3372 	rdpr	%tl, %g1
   3373 	dec	%g1
   3374 	wrpr	%g1, %tl
   3375 	rdpr	%tt, %g2
   3376 	inc	%g1
   3377 	wrpr	%g1, %tl
   3378 
   3379 	wrpr	%g0, %g3, %tstate
   3380 	wrpr	%g0, %g4, %tt
   3381 
   3382 	andn	%g2, 0x00f, %g3
   3383 	cmp	%g3, 0x080
   3384 	be,pn	%icc, flush_normals
   3385 	 nop
   3386 	cmp	%g3, 0x0a0
   3387 	be,pn	%icc, flush_others
   3388 	 nop
   3389 	cmp	%g3, 0x0c0
   3390 	be,pn	%icc, ufill_trap
   3391 	 nop
   3392 
   3393 	Debugger()
   3394 	NOTREACHED
   3395 
   3396 flush_others:
   3397 	set	pcbspill_others, %g1
   3398 	wrpr	%g1, %tnpc
   3399 	done
   3400 	NOTREACHED
   3401 
   3402 flush_normals:
   3403 ufill_trap:
   3404 
   3405 	/*
   3406 	 * Rearrange our trap state such that it appears as if we got
   3407 	 * this trap directly from user mode.  Then process it at TL = 1.
   3408 	 * We'll take the spill/fill trap again once we return to user mode.
   3409 	 */
   3410 	rdpr	%tt, %g1
   3411 	rdpr	%tstate, %g3
   3412 	wrpr	%g0, 1, %tl
   3413 	wrpr	%g0, %g1, %tt
   3414 	rdpr	%tstate, %g2
   3415 	wrpr	%g0, 2, %tl
   3416 	and	%g2, TSTATE_CWP, %g2
   3417 	andn	%g3, TSTATE_CWP, %g3
   3418 	wrpr	%g2, %g3, %tstate
   3419 	set	sun4v_datatrap, %g4
   3420 	wrpr	%g0, %g4, %tnpc
   3421 	done
   3422 
   3423 /*
   3424  * Spill user windows into the PCB.
   3425  */
   3426 pcbspill_normals:
   3427 	ba,pt	%xcc, pcbspill
   3428 	 wrpr	0x80, %tt
   3429 
   3430 pcbspill_others:
   3431 	wrpr	0xa0, %tt
   3432 
   3433 pcbspill:
   3434 	set	CPUINFO_VA, %g6
   3435 	ldx	[%g6 + CI_CPCB], %g6
   3436 
   3437 	GET_CTXBUSY %g1
   3438 
   3439 	ldx	[%g1], %g1				! kernel pmap is ctx 0
   3440 
   3441 	srlx	%g6, STSHIFT, %g7
   3442 	and	%g7, STMASK, %g7
   3443 	sll	%g7, 3, %g7				! byte offset into ctxbusy
   3444 	add	%g7, %g1, %g1
   3445 	ldxa	[%g1] ASI_PHYS_CACHED, %g1		! Load pointer to directory
   3446 
   3447 	srlx	%g6, PDSHIFT, %g7			! Do page directory
   3448 	and	%g7, PDMASK, %g7
   3449 	sll	%g7, 3, %g7
   3450 	brz,pn	%g1, pcbspill_fail
   3451 	 add	%g7, %g1, %g1
   3452 	ldxa	[%g1] ASI_PHYS_CACHED, %g1
   3453 	srlx	%g6, PTSHIFT, %g7			! Convert to ptab offset
   3454 	and	%g7, PTMASK, %g7
   3455 	brz	%g1, pcbspill_fail
   3456 	 sll	%g7, 3, %g7
   3457 	add	%g1, %g7, %g7
   3458 	ldxa	[%g7] ASI_PHYS_CACHED, %g7		! This one is not
   3459 	brgez	%g7, pcbspill_fail
   3460 	 srlx	%g7, PGSHIFT, %g7			! Isolate PA part
   3461 	sll	%g6, 32-PGSHIFT, %g6			! And offset
   3462 	sllx	%g7, PGSHIFT+8, %g7			! There are 8 bits to the left of the PA in the TTE
   3463 	srl	%g6, 32-PGSHIFT, %g6
   3464 	srax	%g7, 8, %g7
   3465 	or	%g7, %g6, %g6				! Then combine them to form PA
   3466 
   3467 	wr	%g0, ASI_PHYS_CACHED, %asi		! Use ASI_PHYS_CACHED to prevent possible page faults
   3468 
   3469 	lduba	[%g6 + PCB_NSAVED] %asi, %g7		! Fetch current nsaved from the pcb
   3470 	sllx	%g7, 7, %g5				! 8+8 registers each 8 bytes = 128 bytes (2^7)
   3471 	add	%g6, %g5, %g5				! Offset into pcb_rw
   3472 1:
   3473 	SPILL	stxa, %g5 + PCB_RW, 8, %asi		! Store the locals and ins
   3474 
   3475 	add	%g5, 16*8, %g5				! Next location for saved register windows
   3476 
   3477 	stxa	%o6, [%g5 + PCB_RW + (14*8)] %asi	! Save %sp so we can write these all out
   3478 
   3479 	saved						! Increments %cansave and decrements %canrestore
   3480 							! or %otherwin
   3481 
   3482 	rdpr	%cwp, %g1				! shift register window forward
   3483 	inc	%g1
   3484 	wrpr	%g1, %cwp
   3485 	inc	%g7					! increment number of saved register windows
   3486 
   3487 	rdpr	%otherwin, %g1				! Check to see if done spill'ing otherwin
   3488 	brnz,pt	%g1, 1b
   3489 	 nop
   3490 
   3491 	stba	%g7, [%g6 + PCB_NSAVED] %asi
   3492 
   3493 	retry
   3494 	NOTREACHED
   3495 
   3496 pcbspill_fail:
   3497 	Debugger()
   3498 	NOTREACHED
   3499 
   3500 
   3501 pcbspill_other:
   3502 	set	CPUINFO_VA, %g6
   3503 	ldx	[%g6 + CI_CPCB], %g6
   3504 
   3505 	GET_CTXBUSY %g1
   3506 
   3507 	ldx	[%g1], %g1				! kernel pmap is ctx 0
   3508 
   3509 	srlx	%g6, STSHIFT, %g7
   3510 	and	%g7, STMASK, %g7
   3511 	sll	%g7, 3, %g7				! byte offset into ctxbusy
   3512 	add	%g7, %g1, %g1
   3513 	ldxa	[%g1] ASI_PHYS_CACHED, %g1		! Load pointer to directory
   3514 
   3515 	srlx	%g6, PDSHIFT, %g7			! Do page directory
   3516 	and	%g7, PDMASK, %g7
   3517 	sll	%g7, 3, %g7
   3518 	brz,pn	%g1, pcbspill_other_fail
   3519 	 add	%g7, %g1, %g1
   3520 	ldxa	[%g1] ASI_PHYS_CACHED, %g1
   3521 	srlx	%g6, PTSHIFT, %g7			! Convert to ptab offset
   3522 	and	%g7, PTMASK, %g7
   3523 	brz	%g1, pcbspill_other_fail
   3524 	 sll	%g7, 3, %g7
   3525 	add	%g1, %g7, %g7
   3526 	ldxa	[%g7] ASI_PHYS_CACHED, %g7		! This one is not
   3527 	brgez	%g7, pcbspill_other_fail
   3528 	 srlx	%g7, PGSHIFT, %g7			! Isolate PA part
   3529 	sll	%g6, 32-PGSHIFT, %g6			! And offset
   3530 	sllx	%g7, PGSHIFT+8, %g7			! There are 8 bits to the left of the PA in the TTE
   3531 	srl	%g6, 32-PGSHIFT, %g6
   3532 	srax	%g7, 8, %g7
   3533 	or	%g7, %g6, %g6				! Then combine them to form PA
   3534 
   3535 	wr	%g0, ASI_PHYS_CACHED, %asi		! Use ASI_PHYS_CACHED to prevent possible page faults
   3536 
   3537 	lduba	[%g6 + PCB_NSAVED] %asi, %g7		! Fetch current nsaved from the pcb
   3538 	sllx	%g7, 7, %g5				! 8+8 registers each 8 bytes = 128 bytes (2^7)
   3539 	add	%g6, %g5, %g5				! Offset into pcb_rw
   3540 1:
   3541 	SPILL	stxa, %g5 + PCB_RW, 8, %asi		! Store the locals and ins
   3542 
   3543 	add	%g5, 16*8, %g5				! Next location for saved register windows
   3544 
   3545 	stxa	%o6, [%g5 + PCB_RW + (14*8)] %asi	! Save %sp so we can write these all out
   3546 
   3547 	saved						! Increments %cansave and decrements %canrestore
   3548 							! or %otherwin
   3549 
   3550 	rdpr	%cwp, %g1				! shift register window forward
   3551 	inc	%g1
   3552 	wrpr	%g1, %cwp
   3553 
   3554 
   3555 	inc	%g7					! increment number of saved register windows
   3556 
   3557 	rdpr	%otherwin, %g1				! Check to see if done spill'ing otherwin
   3558 	brnz,pt	%g1, 1b
   3559 	 nop
   3560 
   3561 	stba	%g7, [%g6 + PCB_NSAVED] %asi
   3562 
   3563 	retry
   3564 	NOTREACHED
   3565 
   3566 pcbspill_other_fail:
   3567 	Debugger()
   3568 	NOTREACHED
   3569 
   3570 
   3571 spill_normal_to_user_stack:
   3572 	mov	%sp, %g6						! calculate virtual address of destination stack
   3573 	add	%g6, BIAS, %g6
   3574 
   3575 	mov	CTX_SECONDARY, %g2				! Is this context ok or should it be CTX_PRIMARY? XXX
   3576 	GET_MMU_CONTEXTID %g3, %g2, %g1
   3577 	sllx	%g3, 3, %g3					! Make it into an offset into ctxbusy (see below)
   3578 
   3579 	GET_CTXBUSY %g1
   3580 	ldx	[%g1 + %g3], %g1				! Fetch pmap for current context id
   3581 
   3582 	! Start of code to extract PA
   3583 	srlx	%g6, STSHIFT, %g7
   3584 	and	%g7, STMASK, %g7
   3585 	sll	%g7, 3, %g7						! byte offset into ctxbusy
   3586 	add	%g7, %g1, %g1
   3587 	ldxa	[%g1] ASI_PHYS_CACHED, %g1	! Load pointer to directory
   3588 	srlx	%g6, PDSHIFT, %g7			! Do page directory
   3589 	and	%g7, PDMASK, %g7
   3590 	sll	%g7, 3, %g7
   3591 	brz,pn	%g1, spill_normal_to_user_stack_fail
   3592 	 add	%g7, %g1, %g1
   3593 
   3594 	ldxa	[%g1] ASI_PHYS_CACHED, %g1
   3595 	srlx	%g6, PTSHIFT, %g7			! Convert to ptab offset
   3596 	and	%g7, PTMASK, %g7
   3597 	brz	%g1, spill_normal_to_user_stack_fail
   3598 	 sll	%g7, 3, %g7
   3599 
   3600 	add	%g1, %g7, %g7
   3601 	ldxa	[%g7] ASI_PHYS_CACHED, %g7	! This one is not
   3602 	brgez	%g7, spill_normal_to_user_stack_fail
   3603 	 srlx	%g7, PGSHIFT, %g7			! Isolate PA part
   3604 
   3605 	sll	%g6, 32-PGSHIFT, %g6			! And offset
   3606 	sllx	%g7, PGSHIFT+8, %g7			! There are 8 bits to the left of the PA in the TTE
   3607 	srl	%g6, 32-PGSHIFT, %g6
   3608 	srax	%g7, 8, %g7
   3609 	or	%g7, %g6, %g6					! Then combine them to form PA
   3610 	! End of code to extract PA
   3611 
   3612 	wr	%g0, ASI_PHYS_CACHED, %asi		! Use ASI_PHYS_CACHED to prevent possible page faults
   3613 	SPILL	stxa, %g6, 8, %asi			! Store the locals and ins
   3614 	saved
   3615 
   3616 	retry
   3617 	NOTREACHED
   3618 
   3619 spill_normal_to_user_stack_fail:
   3620 	sir
   3621 	 nop
   3622 
   3623 /*
   3624  * End of traps for sun4v.
   3625  */
   3626 
   3627 #endif
   3628 
   3629 /*
   3630  * We're here because we took an alignment fault in NUCLEUS context.
   3631  * This could be a kernel bug or it could be due to saving a user
   3632  * window to an invalid stack pointer.
   3633  *
   3634  * If the latter is the case, we could try to emulate unaligned accesses,
   3635  * but we really don't know where to store the registers since we can't
   3636  * determine if there's a stack bias.  Or we could store all the regs
   3637  * into the PCB and punt, until the user program uses up all the CPU's
   3638  * register windows and we run out of places to store them.  So for
   3639  * simplicity we'll just blow them away and enter the trap code which
   3640  * will generate a bus error.  Debugging the problem will be a bit
   3641  * complicated since lots of register windows will be lost, but what
   3642  * can we do?
   3643  */
   3644 checkalign:
   3645 	rdpr	%tl, %g2
   3646 	subcc	%g2, 1, %g1
   3647 	bneg,pn	%icc, slowtrap		! Huh?
   3648 	 sethi	%hi(CPCB), %g6		! get current pcb
   3649 
   3650 	wrpr	%g1, 0, %tl
   3651 	rdpr	%tt, %g7
   3652 	rdpr	%tstate, %g4
   3653 	andn	%g7, 0x3f, %g5
   3654 	cmp	%g5, 0x080		!   window spill traps are all 0b 0000 10xx xxxx
   3655 	bne,a,pn	%icc, slowtrap
   3656 	 wrpr	%g1, 0, %tl		! Revert TL  XXX wrpr in a delay slot...
   3657 
   3658 #ifdef DEBUG
   3659 	cmp	%g7, 0x34		! If we took a datafault just before this trap
   3660 	bne,pt	%icc, checkalignspill	! our stack's probably bad so we need to switch somewhere else
   3661 	 nop
   3662 
   3663 	!!
   3664 	!! Double data fault -- bad stack?
   3665 	!!
   3666 	wrpr	%g2, %tl		! Restore trap level.
   3667 	sir				! Just issue a reset and don't try to recover.
   3668 	mov	%fp, %l6		! Save the frame pointer
   3669 	set	EINTSTACK+USPACE+CC64FSZ-STKB, %fp ! Set the frame pointer to the middle of the idle stack
   3670 	add	%fp, -CC64FSZ, %sp	! Create a stackframe
   3671 	wrpr	%g0, 15, %pil		! Disable interrupts, too
   3672 	wrpr	%g0, %g0, %canrestore	! Our stack is hozed and our PCB
   3673 	wrpr	%g0, 7, %cansave	!  probably is too, so blow away
   3674 	ba	slowtrap		!  all our register windows.
   3675 	 wrpr	%g0, 0x101, %tt
   3676 #endif
   3677 checkalignspill:
   3678 	/*
   3679          * %g1 -- current tl
   3680 	 * %g2 -- original tl
   3681 	 * %g4 -- tstate
   3682          * %g7 -- tt
   3683 	 */
   3684 
   3685 	and	%g4, CWP, %g5
   3686 	wrpr	%g5, %cwp		! Go back to the original register win
   3687 
   3688 	/*
   3689 	 * Remember:
   3690 	 *
   3691 	 * %otherwin = 0
   3692 	 * %cansave = NWINDOWS - 2 - %canrestore
   3693 	 */
   3694 
   3695 	rdpr	%otherwin, %g6
   3696 	rdpr	%canrestore, %g3
   3697 	rdpr	%ver, %g5
   3698 	sub	%g3, %g6, %g3		! Calculate %canrestore - %g7
   3699 	and	%g5, CWP, %g5		! NWINDOWS-1
   3700 	movrlz	%g3, %g0, %g3		! Clamp at zero
   3701 	wrpr	%g0, 0, %otherwin
   3702 	wrpr	%g3, 0, %canrestore	! This is the new canrestore
   3703 	dec	%g5			! NWINDOWS-2
   3704 	wrpr	%g5, 0, %cleanwin	! Set cleanwin to max, since we're in-kernel
   3705 	sub	%g5, %g3, %g5		! NWINDOWS-2-%canrestore
   3706 	wrpr	%g5, 0, %cansave
   3707 
   3708 	wrpr	%g0, T_ALIGN, %tt	! This was an alignment fault
   3709 	/*
   3710 	 * Now we need to determine if this was a userland store or not.
   3711 	 * Userland stores occur in anything other than the kernel spill
   3712 	 * handlers (trap type 09x).
   3713 	 */
   3714 	and	%g7, 0xff0, %g5
   3715 	cmp	%g5, 0x90
   3716 	bz,pn	%icc, slowtrap
   3717 	 nop
   3718 	bclr	TSTATE_PRIV, %g4
   3719 	wrpr	%g4, 0, %tstate
   3720 	ba,a,pt	%icc, slowtrap
   3721 	 nop
   3722 
   3723 /*
   3724  * slowtrap() builds a trap frame and calls trap().
   3725  * This is called `slowtrap' because it *is*....
   3726  * We have to build a full frame for ptrace(), for instance.
   3727  *
   3728  * Registers:
   3729  *
   3730  */
   3731 slowtrap:
   3732 #ifdef TRAPS_USE_IG
   3733 	wrpr	%g0, PSTATE_KERN|PSTATE_IG, %pstate	! DEBUG
   3734 #endif
   3735 #ifdef DIAGNOSTIC
   3736 	/* Make sure kernel stack is aligned */
   3737 	btst	0x03, %sp		! 32-bit stack OK?
   3738 	 and	%sp, 0x07, %g4		! 64-bit stack OK?
   3739 	bz,pt	%icc, 1f
   3740 	cmp	%g4, 0x1		! Must end in 0b001
   3741 	be,pt	%icc, 1f
   3742 	 rdpr	%wstate, %g7
   3743 	cmp	%g7, WSTATE_KERN
   3744 	bnz,pt	%icc, 1f		! User stack -- we'll blow it away
   3745 	 nop
   3746 	set	PANICSTACK-CC64FSZ-STKB, %sp
   3747 1:
   3748 #endif
   3749 	rdpr	%tt, %g4
   3750 	rdpr	%tstate, %g1
   3751 	rdpr	%tpc, %g2
   3752 	rdpr	%tnpc, %g3
   3753 
   3754 	TRAP_SETUP(-CC64FSZ-TF_SIZE)
   3755 Lslowtrap_reenter:
   3756 	stx	%g1, [%sp + CC64FSZ + STKB + TF_TSTATE]
   3757 	mov	%g4, %o1		! (type)
   3758 	stx	%g2, [%sp + CC64FSZ + STKB + TF_PC]
   3759 	rd	%y, %g5
   3760 	stx	%g3, [%sp + CC64FSZ + STKB + TF_NPC]
   3761 	mov	%g1, %o3		! (pstate)
   3762 	st	%g5, [%sp + CC64FSZ + STKB + TF_Y]
   3763 	mov	%g2, %o2		! (pc)
   3764 	sth	%o1, [%sp + CC64FSZ + STKB + TF_TT]! debug
   3765 
   3766 	! Get back to normal globals
   3767 #ifdef SUN4V
   3768 	sethi	%hi(cputyp), %g5
   3769 	ld	[%g5 + %lo(cputyp)], %g5
   3770 	cmp	%g5, CPU_SUN4V
   3771 	bne,pt	%icc, 1f
   3772 	 nop
   3773 	NORMAL_GLOBALS_SUN4V
   3774 	ba	2f
   3775 	 nop
   3776 1:
   3777 #endif
   3778 	NORMAL_GLOBALS_SUN4U
   3779 2:
   3780 	stx	%g1, [%sp + CC64FSZ + STKB + TF_G + (1*8)]
   3781 	stx	%g2, [%sp + CC64FSZ + STKB + TF_G + (2*8)]
   3782 	add	%sp, CC64FSZ + STKB, %o0		! (&tf)
   3783 	stx	%g3, [%sp + CC64FSZ + STKB + TF_G + (3*8)]
   3784 	stx	%g4, [%sp + CC64FSZ + STKB + TF_G + (4*8)]
   3785 	stx	%g5, [%sp + CC64FSZ + STKB + TF_G + (5*8)]
   3786 	rdpr	%pil, %g5
   3787 	stx	%g6, [%sp + CC64FSZ + STKB + TF_G + (6*8)]
   3788 	stx	%g7, [%sp + CC64FSZ + STKB + TF_G + (7*8)]
   3789 	stb	%g5, [%sp + CC64FSZ + STKB + TF_PIL]
   3790 	stb	%g5, [%sp + CC64FSZ + STKB + TF_OLDPIL]
   3791 	/*
   3792 	 * Phew, ready to enable traps and call C code.
   3793 	 */
   3794 	rdpr	%tl, %g1
   3795 	dec	%g1
   3796 	movrlz	%g1, %g0, %g1
   3797 	wrpr	%g0, %g1, %tl		! Revert to kernel mode
   3798 	!! In the EMBEDANY memory model %g4 points to the start of the data segment.
   3799 	!! In our case we need to clear it before calling any C-code
   3800 	clr	%g4
   3801 
   3802 	wr	%g0, ASI_PRIMARY_NOFAULT, %asi		! Restore default ASI
   3803 	wrpr	%g0, PSTATE_INTR, %pstate	! traps on again
   3804 	call	_C_LABEL(trap)			! trap(tf, type, pc, pstate)
   3805 	 nop
   3806 
   3807 	b	return_from_trap
   3808 	 ldx	[%sp + CC64FSZ + STKB + TF_TSTATE], %g1	! Load this for return_from_trap
   3809 	NOTREACHED
   3810 #if 1
   3811 /*
   3812  * This code is no longer needed.
   3813  */
   3814 /*
   3815  * Do a `software' trap by re-entering the trap code, possibly first
   3816  * switching from interrupt stack to kernel stack.  This is used for
   3817  * scheduling and signal ASTs (which generally occur from softclock or
   3818  * tty or net interrupts).
   3819  *
   3820  * We enter with the trap type in %g1.  All we have to do is jump to
   3821  * Lslowtrap_reenter above, but maybe after switching stacks....
   3822  *
   3823  * We should be running alternate globals.  The normal globals and
   3824  * out registers were just loaded from the old trap frame.
   3825  *
   3826  *	Input Params:
   3827  *	%g1 = tstate
   3828  *	%g2 = tpc
   3829  *	%g3 = tnpc
   3830  *	%g4 = tt == T_AST
   3831  */
   3832 softtrap:
   3833 	sethi	%hi(EINTSTACK-STKB), %g5
   3834 	sethi	%hi(EINTSTACK-INTSTACK), %g7
   3835 	or	%g5, %lo(EINTSTACK-STKB), %g5
   3836 	dec	%g7
   3837 	sub	%g5, %sp, %g5
   3838 	sethi	%hi(CPCB), %g6
   3839 	andncc	%g5, %g7, %g0
   3840 	bnz,pt	%xcc, Lslowtrap_reenter
   3841 	 LDPTR	[%g6 + %lo(CPCB)], %g7
   3842 	set	USPACE-CC64FSZ-TF_SIZE-STKB, %g5
   3843 	add	%g7, %g5, %g6
   3844 	SET_SP_REDZONE(%g7, %g5)
   3845 #ifdef DEBUG
   3846 	stx	%g1, [%g6 + CC64FSZ + STKB + TF_FAULT]		! Generate a new trapframe
   3847 #endif
   3848 	stx	%i0, [%g6 + CC64FSZ + STKB + TF_O + (0*8)]	!	but don't bother with
   3849 	stx	%i1, [%g6 + CC64FSZ + STKB + TF_O + (1*8)]	!	locals and ins
   3850 	stx	%i2, [%g6 + CC64FSZ + STKB + TF_O + (2*8)]
   3851 	stx	%i3, [%g6 + CC64FSZ + STKB + TF_O + (3*8)]
   3852 	stx	%i4, [%g6 + CC64FSZ + STKB + TF_O + (4*8)]
   3853 	stx	%i5, [%g6 + CC64FSZ + STKB + TF_O + (5*8)]
   3854 	stx	%i6, [%g6 + CC64FSZ + STKB + TF_O + (6*8)]
   3855 	stx	%i7, [%g6 + CC64FSZ + STKB + TF_O + (7*8)]
   3856 #ifdef DEBUG
   3857 	ldx	[%sp + CC64FSZ + STKB + TF_I + (0*8)], %l0	! Copy over the rest of the regs
   3858 	ldx	[%sp + CC64FSZ + STKB + TF_I + (1*8)], %l1	! But just dirty the locals
   3859 	ldx	[%sp + CC64FSZ + STKB + TF_I + (2*8)], %l2
   3860 	ldx	[%sp + CC64FSZ + STKB + TF_I + (3*8)], %l3
   3861 	ldx	[%sp + CC64FSZ + STKB + TF_I + (4*8)], %l4
   3862 	ldx	[%sp + CC64FSZ + STKB + TF_I + (5*8)], %l5
   3863 	ldx	[%sp + CC64FSZ + STKB + TF_I + (6*8)], %l6
   3864 	ldx	[%sp + CC64FSZ + STKB + TF_I + (7*8)], %l7
   3865 	stx	%l0, [%g6 + CC64FSZ + STKB + TF_I + (0*8)]
   3866 	stx	%l1, [%g6 + CC64FSZ + STKB + TF_I + (1*8)]
   3867 	stx	%l2, [%g6 + CC64FSZ + STKB + TF_I + (2*8)]
   3868 	stx	%l3, [%g6 + CC64FSZ + STKB + TF_I + (3*8)]
   3869 	stx	%l4, [%g6 + CC64FSZ + STKB + TF_I + (4*8)]
   3870 	stx	%l5, [%g6 + CC64FSZ + STKB + TF_I + (5*8)]
   3871 	stx	%l6, [%g6 + CC64FSZ + STKB + TF_I + (6*8)]
   3872 	stx	%l7, [%g6 + CC64FSZ + STKB + TF_I + (7*8)]
   3873 	ldx	[%sp + CC64FSZ + STKB + TF_L + (0*8)], %l0
   3874 	ldx	[%sp + CC64FSZ + STKB + TF_L + (1*8)], %l1
   3875 	ldx	[%sp + CC64FSZ + STKB + TF_L + (2*8)], %l2
   3876 	ldx	[%sp + CC64FSZ + STKB + TF_L + (3*8)], %l3
   3877 	ldx	[%sp + CC64FSZ + STKB + TF_L + (4*8)], %l4
   3878 	ldx	[%sp + CC64FSZ + STKB + TF_L + (5*8)], %l5
   3879 	ldx	[%sp + CC64FSZ + STKB + TF_L + (6*8)], %l6
   3880 	ldx	[%sp + CC64FSZ + STKB + TF_L + (7*8)], %l7
   3881 	stx	%l0, [%g6 + CC64FSZ + STKB + TF_L + (0*8)]
   3882 	stx	%l1, [%g6 + CC64FSZ + STKB + TF_L + (1*8)]
   3883 	stx	%l2, [%g6 + CC64FSZ + STKB + TF_L + (2*8)]
   3884 	stx	%l3, [%g6 + CC64FSZ + STKB + TF_L + (3*8)]
   3885 	stx	%l4, [%g6 + CC64FSZ + STKB + TF_L + (4*8)]
   3886 	stx	%l5, [%g6 + CC64FSZ + STKB + TF_L + (5*8)]
   3887 	stx	%l6, [%g6 + CC64FSZ + STKB + TF_L + (6*8)]
   3888 	stx	%l7, [%g6 + CC64FSZ + STKB + TF_L + (7*8)]
   3889 #endif
   3890 	ba,pt	%xcc, Lslowtrap_reenter
   3891 	 mov	%g6, %sp
   3892 #endif
   3893 
   3894 #if 0
   3895 /*
   3896  * breakpoint:	capture as much info as possible and then call DDB
   3897  * or trap, as the case may be.
   3898  *
   3899  * First, we switch to interrupt globals, and blow away %g7.  Then
   3900  * switch down one stackframe -- just fiddle w/cwp, don't save or
   3901  * we'll trap.  Then slowly save all the globals into our static
   3902  * register buffer.  etc. etc.
   3903  */
   3904 
   3905 breakpoint:
   3906 	wrpr	%g0, PSTATE_KERN|PSTATE_IG, %pstate	! Get IG to use
   3907 	rdpr	%cwp, %g7
   3908 	inc	1, %g7					! Equivalent of save
   3909 	wrpr	%g7, 0, %cwp				! Now we have some unused locals to fiddle with
   3910 XXX ddb_regs is now ddb-regp and is a pointer not a symbol.
   3911 	set	_C_LABEL(ddb_regs), %l0
   3912 	stx	%g1, [%l0+DBR_IG+(1*8)]			! Save IGs
   3913 	stx	%g2, [%l0+DBR_IG+(2*8)]
   3914 	stx	%g3, [%l0+DBR_IG+(3*8)]
   3915 	stx	%g4, [%l0+DBR_IG+(4*8)]
   3916 	stx	%g5, [%l0+DBR_IG+(5*8)]
   3917 	stx	%g6, [%l0+DBR_IG+(6*8)]
   3918 	stx	%g7, [%l0+DBR_IG+(7*8)]
   3919 	wrpr	%g0, PSTATE_KERN|PSTATE_MG, %pstate	! Get MG to use
   3920 	stx	%g1, [%l0+DBR_MG+(1*8)]			! Save MGs
   3921 	stx	%g2, [%l0+DBR_MG+(2*8)]
   3922 	stx	%g3, [%l0+DBR_MG+(3*8)]
   3923 	stx	%g4, [%l0+DBR_MG+(4*8)]
   3924 	stx	%g5, [%l0+DBR_MG+(5*8)]
   3925 	stx	%g6, [%l0+DBR_MG+(6*8)]
   3926 	stx	%g7, [%l0+DBR_MG+(7*8)]
   3927 	wrpr	%g0, PSTATE_KERN|PSTATE_AG, %pstate	! Get AG to use
   3928 	stx	%g1, [%l0+DBR_AG+(1*8)]			! Save AGs
   3929 	stx	%g2, [%l0+DBR_AG+(2*8)]
   3930 	stx	%g3, [%l0+DBR_AG+(3*8)]
   3931 	stx	%g4, [%l0+DBR_AG+(4*8)]
   3932 	stx	%g5, [%l0+DBR_AG+(5*8)]
   3933 	stx	%g6, [%l0+DBR_AG+(6*8)]
   3934 	stx	%g7, [%l0+DBR_AG+(7*8)]
   3935 	wrpr	%g0, PSTATE_KERN, %pstate	! Get G to use
   3936 	stx	%g1, [%l0+DBR_G+(1*8)]			! Save Gs
   3937 	stx	%g2, [%l0+DBR_G+(2*8)]
   3938 	stx	%g3, [%l0+DBR_G+(3*8)]
   3939 	stx	%g4, [%l0+DBR_G+(4*8)]
   3940 	stx	%g5, [%l0+DBR_G+(5*8)]
   3941 	stx	%g6, [%l0+DBR_G+(6*8)]
   3942 	stx	%g7, [%l0+DBR_G+(7*8)]
   3943 	rdpr	%canrestore, %l1
   3944 	stb	%l1, [%l0+DBR_CANRESTORE]
   3945 	rdpr	%cansave, %l2
   3946 	stb	%l2, [%l0+DBR_CANSAVE]
   3947 	rdpr	%cleanwin, %l3
   3948 	stb	%l3, [%l0+DBR_CLEANWIN]
   3949 	rdpr	%wstate, %l4
   3950 	stb	%l4, [%l0+DBR_WSTATE]
   3951 	rd	%y, %l5
   3952 	stw	%l5, [%l0+DBR_Y]
   3953 	rdpr	%tl, %l6
   3954 	stb	%l6, [%l0+DBR_TL]
   3955 	dec	1, %g7
   3956 #endif
   3957 
   3958 /*
   3959  * I will not touch any of the DDB or KGDB stuff until I know what's going
   3960  * on with the symbol table.  This is all still v7/v8 code and needs to be fixed.
   3961  */
   3962 #ifdef KGDB
   3963 /*
   3964  * bpt is entered on all breakpoint traps.
   3965  * If this is a kernel breakpoint, we do not want to call trap().
   3966  * Among other reasons, this way we can set breakpoints in trap().
   3967  */
   3968 bpt:
   3969 	set	TSTATE_PRIV, %l4
   3970 	andcc	%l4, %l0, %g0		! breakpoint from kernel?
   3971 	bz	slowtrap		! no, go do regular trap
   3972 	 nop
   3973 
   3974 	/*
   3975 	 * Build a trap frame for kgdb_trap_glue to copy.
   3976 	 * Enable traps but set ipl high so that we will not
   3977 	 * see interrupts from within breakpoints.
   3978 	 */
   3979 	save	%sp, -CCFSZ-TF_SIZE, %sp		! allocate a trap frame
   3980 	TRAP_SETUP(-CCFSZ-TF_SIZE)
   3981 	or	%l0, PSR_PIL, %l4	! splhigh()
   3982 	wr	%l4, 0, %psr		! the manual claims that this
   3983 	wr	%l4, PSR_ET, %psr	! song and dance is necessary
   3984 	std	%l0, [%sp + CCFSZ + 0]	! tf.tf_psr, tf.tf_pc
   3985 	mov	%l3, %o0		! trap type arg for kgdb_trap_glue
   3986 	rd	%y, %l3
   3987 	std	%l2, [%sp + CCFSZ + 8]	! tf.tf_npc, tf.tf_y
   3988 	rd	%wim, %l3
   3989 	st	%l3, [%sp + CCFSZ + 16]	! tf.tf_wim (a kgdb-only r/o field)
   3990 	st	%g1, [%sp + CCFSZ + 20]	! tf.tf_global[1]
   3991 	std	%g2, [%sp + CCFSZ + 24]	! etc
   3992 	std	%g4, [%sp + CCFSZ + 32]
   3993 	std	%g6, [%sp + CCFSZ + 40]
   3994 	std	%i0, [%sp + CCFSZ + 48]	! tf.tf_in[0..1]
   3995 	std	%i2, [%sp + CCFSZ + 56]	! etc
   3996 	std	%i4, [%sp + CCFSZ + 64]
   3997 	std	%i6, [%sp + CCFSZ + 72]
   3998 
   3999 	/*
   4000 	 * Now call kgdb_trap_glue(); if it returns, call trap().
   4001 	 */
   4002 	mov	%o0, %l3		! gotta save trap type
   4003 	call	_C_LABEL(kgdb_trap_glue)		! kgdb_trap_glue(type, &trapframe)
   4004 	 add	%sp, CCFSZ, %o1		! (&trapframe)
   4005 
   4006 	/*
   4007 	 * Use slowtrap to call trap---but first erase our tracks
   4008 	 * (put the registers back the way they were).
   4009 	 */
   4010 	mov	%l3, %o0		! slowtrap will need trap type
   4011 	ld	[%sp + CCFSZ + 12], %l3
   4012 	wr	%l3, 0, %y
   4013 	ld	[%sp + CCFSZ + 20], %g1
   4014 	ldd	[%sp + CCFSZ + 24], %g2
   4015 	ldd	[%sp + CCFSZ + 32], %g4
   4016 	b	Lslowtrap_reenter
   4017 	 ldd	[%sp + CCFSZ + 40], %g6
   4018 
   4019 /*
   4020  * Enter kernel breakpoint.  Write all the windows (not including the
   4021  * current window) into the stack, so that backtrace works.  Copy the
   4022  * supplied trap frame to the kgdb stack and switch stacks.
   4023  *
   4024  * kgdb_trap_glue(type, tf0)
   4025  *	int type;
   4026  *	struct trapframe *tf0;
   4027  */
   4028 ENTRY_NOPROFILE(kgdb_trap_glue)
   4029 	save	%sp, -CCFSZ, %sp
   4030 
   4031 	flushw				! flush all windows
   4032 	mov	%sp, %l4		! %l4 = current %sp
   4033 
   4034 	/* copy trapframe to top of kgdb stack */
   4035 	set	_C_LABEL(kgdb_stack) + KGDB_STACK_SIZE - 80, %l0
   4036 					! %l0 = tfcopy -> end_of_kgdb_stack
   4037 	mov	80, %l1
   4038 1:	ldd	[%i1], %l2
   4039 	inc	8, %i1
   4040 	deccc	8, %l1
   4041 	std	%l2, [%l0]
   4042 	bg	1b
   4043 	 inc	8, %l0
   4044 
   4045 #ifdef NOTDEF_DEBUG
   4046 	/* save old red zone and then turn it off */
   4047 	sethi	%hi(_C_LABEL(redzone)), %l7
   4048 	ld	[%l7 + %lo(_C_LABEL(redzone))], %l6
   4049 	st	%g0, [%l7 + %lo(_C_LABEL(redzone))]
   4050 #endif
   4051 	/* switch to kgdb stack */
   4052 	add	%l0, -CCFSZ-TF_SIZE, %sp
   4053 
   4054 	/* if (kgdb_trap(type, tfcopy)) kgdb_rett(tfcopy); */
   4055 	mov	%i0, %o0
   4056 	call	_C_LABEL(kgdb_trap)
   4057 	add	%l0, -80, %o1
   4058 	tst	%o0
   4059 	bnz,a	kgdb_rett
   4060 	 add	%l0, -80, %g1
   4061 
   4062 	/*
   4063 	 * kgdb_trap() did not handle the trap at all so the stack is
   4064 	 * still intact.  A simple `restore' will put everything back,
   4065 	 * after we reset the stack pointer.
   4066 	 */
   4067 	mov	%l4, %sp
   4068 #ifdef NOTDEF_DEBUG
   4069 	st	%l6, [%l7 + %lo(_C_LABEL(redzone))]	! restore red zone
   4070 #endif
   4071 	ret
   4072 	 restore
   4073 
   4074 /*
   4075  * Return from kgdb trap.  This is sort of special.
   4076  *
   4077  * We know that kgdb_trap_glue wrote the window above it, so that we will
   4078  * be able to (and are sure to have to) load it up.  We also know that we
   4079  * came from kernel land and can assume that the %fp (%i6) we load here
   4080  * is proper.  We must also be sure not to lower ipl (it is at splhigh())
   4081  * until we have traps disabled, due to the SPARC taking traps at the
   4082  * new ipl before noticing that PSR_ET has been turned off.  We are on
   4083  * the kgdb stack, so this could be disastrous.
   4084  *
   4085  * Note that the trapframe argument in %g1 points into the current stack
   4086  * frame (current window).  We abandon this window when we move %g1->tf_psr
   4087  * into %psr, but we will not have loaded the new %sp yet, so again traps
   4088  * must be disabled.
   4089  */
   4090 kgdb_rett:
   4091 	rd	%psr, %g4		! turn off traps
   4092 	wr	%g4, PSR_ET, %psr
   4093 	/* use the three-instruction delay to do something useful */
   4094 	ld	[%g1], %g2		! pick up new %psr
   4095 	ld	[%g1 + 12], %g3		! set %y
   4096 	wr	%g3, 0, %y
   4097 #ifdef NOTDEF_DEBUG
   4098 	st	%l6, [%l7 + %lo(_C_LABEL(redzone))] ! and restore red zone
   4099 #endif
   4100 	wr	%g0, 0, %wim		! enable window changes
   4101 	nop; nop; nop
   4102 	/* now safe to set the new psr (changes CWP, leaves traps disabled) */
   4103 	wr	%g2, 0, %psr		! set rett psr (including cond codes)
   4104 	/* 3 instruction delay before we can use the new window */
   4105 /*1*/	ldd	[%g1 + 24], %g2		! set new %g2, %g3
   4106 /*2*/	ldd	[%g1 + 32], %g4		! set new %g4, %g5
   4107 /*3*/	ldd	[%g1 + 40], %g6		! set new %g6, %g7
   4108 
   4109 	/* now we can use the new window */
   4110 	mov	%g1, %l4
   4111 	ld	[%l4 + 4], %l1		! get new pc
   4112 	ld	[%l4 + 8], %l2		! get new npc
   4113 	ld	[%l4 + 20], %g1		! set new %g1
   4114 
   4115 	/* set up returnee's out registers, including its %sp */
   4116 	ldd	[%l4 + 48], %i0
   4117 	ldd	[%l4 + 56], %i2
   4118 	ldd	[%l4 + 64], %i4
   4119 	ldd	[%l4 + 72], %i6
   4120 
   4121 	/* load returnee's window, making the window above it be invalid */
   4122 	restore
   4123 	restore	%g0, 1, %l1		! move to inval window and set %l1 = 1
   4124 	rd	%psr, %l0
   4125 	srl	%l1, %l0, %l1
   4126 	wr	%l1, 0, %wim		! %wim = 1 << (%psr & 31)
   4127 	sethi	%hi(CPCB), %l1
   4128 	LDPTR	[%l1 + %lo(CPCB)], %l1
   4129 	and	%l0, 31, %l0		! CWP = %psr & 31;
   4130 !	st	%l0, [%l1 + PCB_WIM]	! cpcb->pcb_wim = CWP;
   4131 	save	%g0, %g0, %g0		! back to window to reload
   4132 !	LOADWIN(%sp)
   4133 	save	%g0, %g0, %g0		! back to trap window
   4134 	/* note, we have not altered condition codes; safe to just rett */
   4135 	RETT
   4136 #endif
   4137 
   4138 /*
   4139  * syscall_setup() builds a trap frame and calls syscall().
   4140  * sun_syscall is same but delivers sun system call number
   4141  * XXX	should not have to save&reload ALL the registers just for
   4142  *	ptrace...
   4143  */
   4144 syscall_setup:
   4145 #ifdef TRAPS_USE_IG
   4146 	wrpr	%g0, PSTATE_KERN|PSTATE_IG, %pstate	! DEBUG
   4147 #endif
   4148 	TRAP_SETUP(-CC64FSZ-TF_SIZE)
   4149 
   4150 #ifdef DEBUG
   4151 	rdpr	%tt, %o1	! debug
   4152 	sth	%o1, [%sp + CC64FSZ + STKB + TF_TT]! debug
   4153 #endif
   4154 
   4155 	! Get back to normal globals
   4156 #ifdef SUN4V
   4157 	sethi	%hi(cputyp), %g5
   4158 	ld	[%g5 + %lo(cputyp)], %g5
   4159 	cmp	%g5, CPU_SUN4V
   4160 	bne,pt	%icc, 1f
   4161 	 nop
   4162 	NORMAL_GLOBALS_SUN4V
   4163 	ba	2f
   4164 	 nop
   4165 1:
   4166 #endif
   4167 	NORMAL_GLOBALS_SUN4U
   4168 2:
   4169 
   4170 	stx	%g1, [%sp + CC64FSZ + STKB + TF_G + ( 1*8)]
   4171 	mov	%g1, %o1			! code
   4172 	rdpr	%tpc, %o2			! (pc)
   4173 	stx	%g2, [%sp + CC64FSZ + STKB + TF_G + ( 2*8)]
   4174 	rdpr	%tstate, %g1
   4175 	stx	%g3, [%sp + CC64FSZ + STKB + TF_G + ( 3*8)]
   4176 	rdpr	%tnpc, %o3
   4177 	stx	%g4, [%sp + CC64FSZ + STKB + TF_G + ( 4*8)]
   4178 	rd	%y, %o4
   4179 	stx	%g5, [%sp + CC64FSZ + STKB + TF_G + ( 5*8)]
   4180 	stx	%g6, [%sp + CC64FSZ + STKB + TF_G + ( 6*8)]
   4181 	wrpr	%g0, 0, %tl			! return to tl=0
   4182 	stx	%g7, [%sp + CC64FSZ + STKB + TF_G + ( 7*8)]
   4183 	add	%sp, CC64FSZ + STKB, %o0	! (&tf)
   4184 
   4185 	stx	%g1, [%sp + CC64FSZ + STKB + TF_TSTATE]
   4186 	stx	%o2, [%sp + CC64FSZ + STKB + TF_PC]
   4187 	stx	%o3, [%sp + CC64FSZ + STKB + TF_NPC]
   4188 	st	%o4, [%sp + CC64FSZ + STKB + TF_Y]
   4189 
   4190 	rdpr	%pil, %g5
   4191 	stb	%g5, [%sp + CC64FSZ + STKB + TF_PIL]
   4192 	stb	%g5, [%sp + CC64FSZ + STKB + TF_OLDPIL]
   4193 
   4194 	!! In the EMBEDANY memory model %g4 points to the start of the data segment.
   4195 	!! In our case we need to clear it before calling any C-code
   4196 	clr	%g4
   4197 	wr	%g0, ASI_PRIMARY_NOFAULT, %asi	! Restore default ASI
   4198 
   4199 	sethi	%hi(CURLWP), %l1
   4200 	LDPTR	[%l1 + %lo(CURLWP)], %l1
   4201 	LDPTR	[%l1 + L_PROC], %l1		! now %l1 points to p
   4202 	LDPTR	[%l1 + P_MD_SYSCALL], %l1
   4203 	call	%l1
   4204 	 wrpr	%g0, PSTATE_INTR, %pstate	! turn on interrupts
   4205 
   4206 	/* see `lwp_trampoline' for the reason for this label */
   4207 return_from_syscall:
   4208 	wrpr	%g0, PSTATE_KERN, %pstate	! Disable interrupts
   4209 	wrpr	%g0, 0, %tl			! Return to tl==0
   4210 	b	return_from_trap
   4211 	 nop
   4212 	NOTREACHED
   4213 
   4214 /*
   4215  * interrupt_vector:
   4216  *
   4217  * Spitfire chips never get level interrupts directly from H/W.
   4218  * Instead, all interrupts come in as interrupt_vector traps.
   4219  * The interrupt number or handler address is an 11 bit number
   4220  * encoded in the first interrupt data word.  Additional words
   4221  * are application specific and used primarily for cross-calls.
   4222  *
   4223  * The interrupt vector handler then needs to identify the
   4224  * interrupt source from the interrupt number and arrange to
   4225  * invoke the interrupt handler.  This can either be done directly
   4226  * from here, or a softint at a particular level can be issued.
   4227  *
   4228  * To call an interrupt directly and not overflow the trap stack,
   4229  * the trap registers should be saved on the stack, registers
   4230  * cleaned, trap-level decremented, the handler called, and then
   4231  * the process must be reversed.
   4232  *
   4233  * To simplify life all we do here is issue an appropriate softint.
   4234  *
   4235  * Note:	It is impossible to identify or change a device's
   4236  *		interrupt number until it is probed.  That's the
   4237  *		purpose for all the funny interrupt acknowledge
   4238  *		code.
   4239  *
   4240  */
   4241 
   4242 /*
   4243  * Vectored interrupts:
   4244  *
   4245  * When an interrupt comes in, interrupt_vector uses the interrupt
   4246  * vector number to lookup the appropriate intrhand from the intrlev
   4247  * array.  It then looks up the interrupt level from the intrhand
   4248  * structure.  It uses the level to index the intrpending array,
   4249  * which is 8 slots for each possible interrupt level (so we can
   4250  * shift instead of multiply for address calculation).  It hunts for
   4251  * any available slot at that level.  Available slots are NULL.
   4252  *
   4253  * Then interrupt_vector uses the interrupt level in the intrhand
   4254  * to issue a softint of the appropriate level.  The softint handler
   4255  * figures out what level interrupt it's handling and pulls the first
   4256  * intrhand pointer out of the intrpending array for that interrupt
   4257  * level, puts a NULL in its place, clears the interrupt generator,
   4258  * and invokes the interrupt handler.
   4259  */
   4260 
   4261 /* intrpending array is now in per-CPU structure. */
   4262 
   4263 #ifdef DEBUG
   4264 #define INTRDEBUG_VECTOR	0x1
   4265 #define INTRDEBUG_LEVEL		0x2
   4266 #define INTRDEBUG_FUNC		0x4
   4267 #define INTRDEBUG_SPUR		0x8
   4268 	.data
   4269 	.globl	_C_LABEL(intrdebug)
   4270 _C_LABEL(intrdebug):	.word 0x0
   4271 /*
   4272  * Note: we use the local label `97' to branch forward to, to skip
   4273  * actual debugging code following a `intrdebug' bit test.
   4274  */
   4275 #endif
   4276 	.text
   4277 interrupt_vector:
   4278 #ifdef TRAPSTATS
   4279 	set	_C_LABEL(kiveccnt), %g1
   4280 	set	_C_LABEL(iveccnt), %g2
   4281 	rdpr	%tl, %g3
   4282 	dec	%g3
   4283 	movrz	%g3, %g2, %g1
   4284 	lduw	[%g1], %g2
   4285 	inc	%g2
   4286 	stw	%g2, [%g1]
   4287 #endif
   4288 	ldxa	[%g0] ASI_IRSR, %g1
   4289 	mov	IRDR_0H, %g7
   4290 	ldxa	[%g7] ASI_IRDR, %g7	! Get interrupt number
   4291 	membar	#Sync
   4292 
   4293 	btst	IRSR_BUSY, %g1
   4294 	bz,pn	%icc, 3f		! spurious interrupt
   4295 #ifdef MULTIPROCESSOR
   4296 	 sethi	%hi(KERNBASE), %g1
   4297 
   4298 	cmp	%g7, %g1
   4299 	bl,a,pt	%xcc, Lsoftint_regular	! >= KERNBASE is a fast cross-call
   4300 	 and	%g7, (MAXINTNUM-1), %g7	! XXX make sun4us work
   4301 
   4302 	mov	IRDR_1H, %g2
   4303 	ldxa	[%g2] ASI_IRDR, %g2	! Get IPI handler argument 1
   4304 	mov	IRDR_2H, %g3
   4305 	ldxa	[%g3] ASI_IRDR, %g3	! Get IPI handler argument 2
   4306 
   4307 	stxa	%g0, [%g0] ASI_IRSR	! Ack IRQ
   4308 	membar	#Sync			! Should not be needed due to retry
   4309 
   4310 	jmpl	%g7, %g0
   4311 	 nop
   4312 #else
   4313 	and	%g7, (MAXINTNUM-1), %g7	! XXX make sun4us work
   4314 #endif
   4315 
   4316 Lsoftint_regular:
   4317 	stxa	%g0, [%g0] ASI_IRSR	! Ack IRQ
   4318 	membar	#Sync			! Should not be needed due to retry
   4319 	sethi	%hi(_C_LABEL(intrlev)), %g3
   4320 	sllx	%g7, PTRSHFT, %g5	! Calculate entry number
   4321 	or	%g3, %lo(_C_LABEL(intrlev)), %g3
   4322 	LDPTR	[%g3 + %g5], %g5	! We have a pointer to the handler
   4323 	brz,pn	%g5, 3f			! NULL means it isn't registered yet.  Skip it.
   4324 	 nop
   4325 
   4326 	! increment per-ivec counter
   4327 	ldx	[%g5 + IH_CNT], %g1
   4328 	inc	%g1
   4329 	stx	%g1, [%g5 + IH_CNT]
   4330 
   4331 setup_sparcintr:
   4332 	LDPTR	[%g5+IH_PEND], %g6	! Read pending flag
   4333 	brnz,pn	%g6, ret_from_intr_vector ! Skip it if it's running
   4334 	 ldub	[%g5+IH_PIL], %g6	! Read interrupt level
   4335 	sethi	%hi(CPUINFO_VA+CI_INTRPENDING), %g1
   4336 	sll	%g6, PTRSHFT, %g3	! Find start of table for this IPL
   4337 	or	%g1, %lo(CPUINFO_VA+CI_INTRPENDING), %g1
   4338 	add	%g1, %g3, %g1
   4339 1:
   4340 	LDPTR	[%g1], %g3		! Load list head
   4341 	STPTR	%g3, [%g5+IH_PEND]	! Link our intrhand node in
   4342 	mov	%g5, %g7
   4343 	CASPTRA	[%g1] ASI_N, %g3, %g7
   4344 	cmp	%g7, %g3		! Did it work?
   4345 	bne,pn	CCCR, 1b		! No, try again
   4346 	 .empty
   4347 2:
   4348 #ifdef NOT_DEBUG
   4349 	set	_C_LABEL(intrdebug), %g7
   4350 	ld	[%g7], %g7
   4351 	btst	INTRDEBUG_VECTOR, %g7
   4352 	bz,pt	%icc, 97f
   4353 	 nop
   4354 
   4355 	cmp	%g6, 0xa		! ignore clock interrupts?
   4356 	bz,pt	%icc, 97f
   4357 	 nop
   4358 
   4359 	STACKFRAME(-CC64FSZ)		! Get a clean register window
   4360 	LOAD_ASCIZ(%o0,\
   4361 	    "interrupt_vector: number %lx softint mask %lx pil %lu slot %p\n")
   4362 	mov	%g2, %o1
   4363 	rdpr	%pil, %o3
   4364 	mov	%g1, %o4
   4365 	GLOBTOLOC
   4366 	clr	%g4
   4367 	call	prom_printf
   4368 	 mov	%g6, %o2
   4369 	LOCTOGLOB
   4370 	restore
   4371 97:
   4372 #endif
   4373 	mov	1, %g7
   4374 	sll	%g7, %g6, %g6
   4375 	wr	%g6, 0, SET_SOFTINT	! Invoke a softint
   4376 
   4377 	.global ret_from_intr_vector
   4378 ret_from_intr_vector:
   4379 	retry
   4380 	NOTREACHED
   4381 
   4382 3:
   4383 #ifdef NOT_DEBUG	/* always do this */
   4384 	set	_C_LABEL(intrdebug), %g6
   4385 	ld	[%g6], %g6
   4386 	btst	INTRDEBUG_SPUR, %g6
   4387 	bz,pt	%icc, 97f
   4388 	 nop
   4389 #endif
   4390 #if 1
   4391 	set	PANICSTACK-STKB, %g1	! Use panic stack temporarily
   4392 	save	%g1, -CC64FSZ, %sp	! Get a clean register window
   4393 	LOAD_ASCIZ(%o0, "interrupt_vector: spurious vector %lx at pil %d\n")
   4394 	mov	%g7, %o1
   4395 	GLOBTOLOC
   4396 	clr	%g4
   4397 	call	prom_printf
   4398 	 rdpr	%pil, %o2
   4399 	LOCTOGLOB
   4400 	restore
   4401 97:
   4402 #endif
   4403 	ba,a	ret_from_intr_vector
   4404 	 nop				! XXX spitfire bug?
   4405 
   4406 sun4v_cpu_mondo:
   4407 ! XXX Rework this when a UP kernel works - crash for now
   4408 	sir
   4409 	mov	0x3c0, %g1			 ! CPU Mondo Queue Head
   4410 	ldxa	[%g1] ASI_QUEUE, %g2		 ! fetch index value for head
   4411 	set	CPUINFO_VA, %g3
   4412 	ldx	[%g3 + CI_PADDR], %g3
   4413 	add	%g3, CI_CPUMQ, %g3
   4414 	ldxa	[%g3] ASI_PHYS_CACHED, %g3	 ! fetch head element
   4415 	ldxa	[%g3 + %g2] ASI_PHYS_CACHED, %g4 ! fetch func
   4416 	add	%g2, 8, %g5
   4417 	ldxa	[%g3 + %g5] ASI_PHYS_CACHED, %g5 ! fetch arg1
   4418 	add	%g2, 16, %g6
   4419 	ldxa	[%g3 + %g6] ASI_PHYS_CACHED, %g6 ! fetch arg2
   4420 	add	%g2, 64, %g2			 ! point to next element in queue
   4421 	and	%g2, 0x7ff, %g2			 ! modulo queue size 2048 (32*64)
   4422 	stxa	%g2, [%g1] ASI_QUEUE		 ! update head index
   4423 	membar	#Sync
   4424 
   4425 	mov	%g4, %g2
   4426 	mov	%g5, %g3
   4427 	mov	%g6, %g5
   4428 	jmpl	%g2, %g0
   4429 	 nop			! No store here!
   4430 	retry
   4431 	NOTREACHED
   4432 
   4433 sun4v_dev_mondo:
   4434 	mov	0x3d0, %g1			! Dev Mondo Queue Head
   4435 	ldxa	[%g1] ASI_QUEUE, %g2		! fetch index value
   4436 	mov	0x3d8, %g1			! Dev Mondo Queue Tail
   4437 	ldxa	[%g1] ASI_QUEUE, %g4		! fetch index value
   4438 	cmp	%g2, %g4			! head = queue?
   4439 	bne,pt 	%xcc, 2f			! unusually not the case
   4440 	 nop
   4441 	retry					! unlikely, ignore interrupt
   4442 2:
   4443 	set	CPUINFO_VA, %g3			 ! fetch cpuinfo pa
   4444 	ldx	[%g3 + CI_PADDR], %g3		 ! fetch intstack pa
   4445 	set	CPUINFO_VA-INTSTACK, %g4	 ! offset to cpuinfo
   4446 	add	%g4, %g3, %g3			 ! %g3 is now cpuifo
   4447 	add	%g3, CI_DEVMQ, %g3		 ! calc offset to devmq
   4448 	ldxa	[%g3] ASI_PHYS_CACHED, %g3	 ! fetch address of devmq
   4449 	ldxa	[%g3 + %g2] ASI_PHYS_CACHED, %g5 !
   4450 	add	%g2, 64, %g2			 ! each element is 64 bytes
   4451 	and	%g2, 0x7ff, %g2			 ! assume 32 elements
   4452 	mov	0x3d0, %g1			 ! Dev Mondo Queue Head
   4453 	stxa	%g2, [%g1] ASI_QUEUE		 ! adjust head index value
   4454 	membar	#Sync
   4455 
   4456 	cmp	%g5, MAXINTNUM			! Handle both sun4v legacy (sysino) and cookies.
   4457 	bgeu,pn	%xcc, 1f			! See UltraSPARC Virtual Machine Specification
   4458 	 nop					! version 3 chapter 6 (Interrupt model)
   4459 
   4460 	sethi	%hi(_C_LABEL(intrlev)), %g3
   4461 	sllx	%g5, PTRSHFT, %g5	! Calculate entry number
   4462 	or	%g3, %lo(_C_LABEL(intrlev)), %g3
   4463 	LDPTR	[%g3 + %g5], %g5	! We have a pointer to the handler
   4464 1:
   4465 	brnz,pt	%g5, setup_sparcintr	! branch if valid handle
   4466 	 nop
   4467 
   4468 	ba,a	3b			! log if invalid handle
   4469 	 nop
   4470 
   4471 /*
   4472  * Ultra1 and Ultra2 CPUs use soft interrupts for everything.  What we do
   4473  * on a soft interrupt, is we should check which bits in SOFTINT(%asr22)
   4474  * are set, handle those interrupts, then clear them by setting the
   4475  * appropriate bits in CLEAR_SOFTINT(%asr21).
   4476  *
   4477  * We have an array of 8 interrupt vector slots for each of 15 interrupt
   4478  * levels.  If a vectored interrupt can be dispatched, the dispatch
   4479  * routine will place a pointer to an intrhand structure in one of
   4480  * the slots.  The interrupt handler will go through the list to look
   4481  * for an interrupt to dispatch.  If it finds one it will pull it off
   4482  * the list, free the entry, and call the handler.  The code is like
   4483  * this:
   4484  *
   4485  *	for (i=0; i<8; i++)
   4486  *		if (ih = intrpending[intlev][i]) {
   4487  *			intrpending[intlev][i] = NULL;
   4488  *			if ((*ih->ih_fun)(ih->ih_arg ? ih->ih_arg : &frame))
   4489  *				return;
   4490  *			strayintr(&frame);
   4491  *			return;
   4492  *		}
   4493  *
   4494  * Otherwise we go back to the old style of polled interrupts.
   4495  *
   4496  * After preliminary setup work, the interrupt is passed to each
   4497  * registered handler in turn.  These are expected to return nonzero if
   4498  * they took care of the interrupt.  If a handler claims the interrupt,
   4499  * we exit (hardware interrupts are latched in the requestor so we'll
   4500  * just take another interrupt in the unlikely event of simultaneous
   4501  * interrupts from two different devices at the same level).  If we go
   4502  * through all the registered handlers and no one claims it, we report a
   4503  * stray interrupt.  This is more or less done as:
   4504  *
   4505  *	for (ih = intrhand[intlev]; ih; ih = ih->ih_next)
   4506  *		if ((*ih->ih_fun)(ih->ih_arg ? ih->ih_arg : &frame))
   4507  *			return;
   4508  *	strayintr(&frame);
   4509  *
   4510  * Inputs:
   4511  *	%l0 = %tstate
   4512  *	%l1 = return pc
   4513  *	%l2 = return npc
   4514  *	%l3 = interrupt level
   4515  *	(software interrupt only) %l4 = bits to clear in interrupt register
   4516  *
   4517  * Internal:
   4518  *	%l4, %l5: local variables
   4519  *	%l6 = %y
   4520  *	%l7 = %g1
   4521  *	%g2..%g7 go to stack
   4522  *
   4523  * An interrupt frame is built in the space for a full trapframe;
   4524  * this contains the psr, pc, npc, and interrupt level.
   4525  *
   4526  * The level of this interrupt is determined by:
   4527  *
   4528  *       IRQ# = %tt - 0x40
   4529  */
   4530 
   4531 ENTRY_NOPROFILE(sparc_interrupt)
   4532 #ifdef TRAPS_USE_IG
   4533 	! This is for interrupt debugging
   4534 	wrpr	%g0, PSTATE_KERN|PSTATE_IG, %pstate	! DEBUG
   4535 #endif
   4536 	/*
   4537 	 * If this is a %tick or %stick softint, clear it then call
   4538 	 * interrupt_vector. Only one of them should be enabled at any given
   4539 	 * time.
   4540 	 */
   4541 	rd	SOFTINT, %g1
   4542 	set	TICK_INT|STICK_INT, %g5
   4543 	andcc	%g5, %g1, %g5
   4544 	bz,pt	%icc, 0f
   4545 	 sethi	%hi(CPUINFO_VA+CI_TICK_IH), %g3
   4546 	wr	%g0, %g5, CLEAR_SOFTINT
   4547 	ba,pt	%icc, setup_sparcintr
   4548 	 LDPTR	[%g3 + %lo(CPUINFO_VA+CI_TICK_IH)], %g5
   4549 0:
   4550 
   4551 #ifdef TRAPSTATS
   4552 	sethi	%hi(_C_LABEL(kintrcnt)), %g1
   4553 	sethi	%hi(_C_LABEL(uintrcnt)), %g2
   4554 	or	%g1, %lo(_C_LABEL(kintrcnt)), %g1
   4555 	or	%g1, %lo(_C_LABEL(uintrcnt)), %g2
   4556 	rdpr	%tl, %g3
   4557 	dec	%g3
   4558 	movrz	%g3, %g2, %g1
   4559 	lduw	[%g1], %g2
   4560 	inc	%g2
   4561 	stw	%g2, [%g1]
   4562 	/* See if we're on the interrupt stack already. */
   4563 	set	EINTSTACK, %g2
   4564 	set	(EINTSTACK-INTSTACK), %g1
   4565 	btst	1, %sp
   4566 	add	%sp, BIAS, %g3
   4567 	movz	%icc, %sp, %g3
   4568 	srl	%g3, 0, %g3
   4569 	sub	%g2, %g3, %g3
   4570 	cmp	%g3, %g1
   4571 	bgu	1f
   4572 	 set	_C_LABEL(intristk), %g1
   4573 	lduw	[%g1], %g2
   4574 	inc	%g2
   4575 	stw	%g2, [%g1]
   4576 1:
   4577 #endif
   4578 	INTR_SETUP(-CC64FSZ-TF_SIZE)
   4579 
   4580 	! Switch to normal globals so we can save them
   4581 #ifdef SUN4V
   4582 	sethi	%hi(cputyp), %g5
   4583 	ld	[%g5 + %lo(cputyp)], %g5
   4584 	cmp	%g5, CPU_SUN4V
   4585 	bne,pt	%icc, 1f
   4586 	 nop
   4587 	NORMAL_GLOBALS_SUN4V
   4588 	! Save the normal globals
   4589 	stx	%g1, [%sp + CC64FSZ + STKB + TF_G + ( 1*8)]
   4590 	stx	%g2, [%sp + CC64FSZ + STKB + TF_G + ( 2*8)]
   4591 	stx	%g3, [%sp + CC64FSZ + STKB + TF_G + ( 3*8)]
   4592 	stx	%g4, [%sp + CC64FSZ + STKB + TF_G + ( 4*8)]
   4593 	stx	%g5, [%sp + CC64FSZ + STKB + TF_G + ( 5*8)]
   4594 	stx	%g6, [%sp + CC64FSZ + STKB + TF_G + ( 6*8)]
   4595 	stx	%g7, [%sp + CC64FSZ + STKB + TF_G + ( 7*8)]
   4596 
   4597 	/*
   4598 	 * In the EMBEDANY memory model %g4 points to the start of the
   4599 	 * data segment.  In our case we need to clear it before calling
   4600 	 * any C-code.
   4601 	 */
   4602 	clr	%g4
   4603 
   4604 	ba	2f
   4605 	 nop
   4606 1:
   4607 #endif
   4608 	NORMAL_GLOBALS_SUN4U
   4609 	! Save the normal globals
   4610 	stx	%g1, [%sp + CC64FSZ + STKB + TF_G + ( 1*8)]
   4611 	stx	%g2, [%sp + CC64FSZ + STKB + TF_G + ( 2*8)]
   4612 	stx	%g3, [%sp + CC64FSZ + STKB + TF_G + ( 3*8)]
   4613 	stx	%g4, [%sp + CC64FSZ + STKB + TF_G + ( 4*8)]
   4614 	stx	%g5, [%sp + CC64FSZ + STKB + TF_G + ( 5*8)]
   4615 	stx	%g6, [%sp + CC64FSZ + STKB + TF_G + ( 6*8)]
   4616 	stx	%g7, [%sp + CC64FSZ + STKB + TF_G + ( 7*8)]
   4617 
   4618 	/*
   4619 	 * In the EMBEDANY memory model %g4 points to the start of the
   4620 	 * data segment.  In our case we need to clear it before calling
   4621 	 * any C-code.
   4622 	 */
   4623 	clr	%g4
   4624 
   4625 	flushw			! Do not remove this insn -- causes interrupt loss
   4626 
   4627 2:
   4628 	rd	%y, %l6
   4629 	INCR64(CPUINFO_VA+CI_NINTR)	! cnt.v_ints++ (clobbers %o0,%o1)
   4630 	rdpr	%tt, %l5		! Find out our current IPL
   4631 	rdpr	%tstate, %l0
   4632 	rdpr	%tpc, %l1
   4633 	rdpr	%tnpc, %l2
   4634 	rdpr	%tl, %l3		! Dump our trap frame now we have taken the IRQ
   4635 	stw	%l6, [%sp + CC64FSZ + STKB + TF_Y]	! Silly, but we need to save this for rft
   4636 	dec	%l3
   4637 	wrpr	%g0, %l3, %tl
   4638 	sth	%l5, [%sp + CC64FSZ + STKB + TF_TT]! debug
   4639 	stx	%l0, [%sp + CC64FSZ + STKB + TF_TSTATE]	! set up intrframe/clockframe
   4640 	stx	%l1, [%sp + CC64FSZ + STKB + TF_PC]
   4641 	btst	TSTATE_PRIV, %l0		! User mode?
   4642 	stx	%l2, [%sp + CC64FSZ + STKB + TF_NPC]
   4643 
   4644 	sub	%l5, 0x40, %l6			! Convert to interrupt level
   4645 	sethi	%hi(_C_LABEL(intr_evcnts)), %l4
   4646 	stb	%l6, [%sp + CC64FSZ + STKB + TF_PIL]	! set up intrframe/clockframe
   4647 	rdpr	%pil, %o1
   4648 	mulx	%l6, EVC_SIZE, %l3
   4649 	or	%l4, %lo(_C_LABEL(intr_evcnts)), %l4	! intrcnt[intlev]++;
   4650 	stb	%o1, [%sp + CC64FSZ + STKB + TF_OLDPIL]	! old %pil
   4651 	ldx	[%l4 + %l3], %o0
   4652 	add	%l4, %l3, %l4
   4653 	clr	%l5			! Zero handled count
   4654 #ifdef MULTIPROCESSOR
   4655 	mov	1, %l3			! Ack softint
   4656 1:	add	%o0, 1, %l7
   4657 	casxa	[%l4] ASI_N, %o0, %l7
   4658 	cmp	%o0, %l7
   4659 	bne,a,pn %xcc, 1b		! retry if changed
   4660 	 mov	%l7, %o0
   4661 #else
   4662 	inc	%o0
   4663 	mov	1, %l3			! Ack softint
   4664 	stx	%o0, [%l4]
   4665 #endif
   4666 	sll	%l3, %l6, %l3		! Generate IRQ mask
   4667 
   4668 	wrpr	%l6, %pil
   4669 
   4670 #define SOFTINT_INT \
   4671 	(1<<IPL_SOFTCLOCK|1<<IPL_SOFTBIO|1<<IPL_SOFTNET|1<<IPL_SOFTSERIAL)
   4672 
   4673 	! Increment the per-cpu interrupt depth in case of hardintrs
   4674 	btst	SOFTINT_INT, %l3
   4675 	bnz,pn	%icc, sparc_intr_retry
   4676 	 sethi	%hi(CPUINFO_VA+CI_IDEPTH), %l1
   4677 	ld	[%l1 + %lo(CPUINFO_VA+CI_IDEPTH)], %l2
   4678 	inc	%l2
   4679 	st	%l2, [%l1 + %lo(CPUINFO_VA+CI_IDEPTH)]
   4680 
   4681 sparc_intr_retry:
   4682 	wr	%l3, 0, CLEAR_SOFTINT	! (don't clear possible %tick IRQ)
   4683 	sethi	%hi(CPUINFO_VA+CI_INTRPENDING), %l4
   4684 	sll	%l6, PTRSHFT, %l2
   4685 	or	%l4, %lo(CPUINFO_VA+CI_INTRPENDING), %l4
   4686 	add	%l2, %l4, %l4
   4687 
   4688 1:
   4689 	membar	#StoreLoad		! Make sure any failed casxa insns complete
   4690 	LDPTR	[%l4], %l2		! Check a slot
   4691 	cmp	%l2, -1
   4692 	beq,pn	CCCR, intrcmplt		! Empty list?
   4693 	 mov	-1, %l7
   4694 	membar	#LoadStore
   4695 	CASPTRA	[%l4] ASI_N, %l2, %l7	! Grab the entire list
   4696 	cmp	%l7, %l2
   4697 	bne,pn	CCCR, 1b
   4698 	 add	%sp, CC64FSZ+STKB, %o2	! tf = %sp + CC64FSZ + STKB
   4699 	LDPTR	[%l2 + IH_PEND], %l7
   4700 	cmp	%l7, -1			! Last slot?
   4701 	be,pt	CCCR, 3f
   4702 	 membar	#LoadStore
   4703 
   4704 	/*
   4705 	 * Reverse a pending list since setup_sparcintr/send_softint
   4706 	 * makes it in a LIFO order.
   4707 	 */
   4708 	mov	-1, %o0			! prev = -1
   4709 1:	STPTR	%o0, [%l2 + IH_PEND]	! ih->ih_pending = prev
   4710 	mov	%l2, %o0		! prev = ih
   4711 	mov	%l7, %l2		! ih = ih->ih_pending
   4712 	LDPTR	[%l2 + IH_PEND], %l7
   4713 	cmp	%l7, -1			! Last slot?
   4714 	bne,pn	CCCR, 1b
   4715 	 membar	#LoadStore
   4716 	ba,pt	CCCR, 3f
   4717 	 mov	%o0, %l7		! save ih->ih_pending
   4718 
   4719 2:
   4720 	add	%sp, CC64FSZ+STKB, %o2	! tf = %sp + CC64FSZ + STKB
   4721 	LDPTR	[%l2 + IH_PEND], %l7	! save ih->ih_pending
   4722 	membar	#LoadStore
   4723 3:
   4724 	STPTR	%g0, [%l2 + IH_PEND]	! Clear pending flag
   4725 	membar	#Sync
   4726 	LDPTR	[%l2 + IH_FUN], %o4	! ih->ih_fun
   4727 	LDPTR	[%l2 + IH_ARG], %o0	! ih->ih_arg
   4728 
   4729 #ifdef NOT_DEBUG
   4730 	set	_C_LABEL(intrdebug), %o3
   4731 	ld	[%o2], %o3
   4732 	btst	INTRDEBUG_FUNC, %o3
   4733 	bz,a,pt	%icc, 97f
   4734 	 nop
   4735 
   4736 	cmp	%l6, 0xa		! ignore clock interrupts?
   4737 	bz,pt	%icc, 97f
   4738 	 nop
   4739 
   4740 	STACKFRAME(-CC64FSZ)		! Get a clean register window
   4741 	LOAD_ASCIZ(%o0, "sparc_interrupt: func %p arg %p\n")
   4742 	mov	%i0, %o2		! arg
   4743 	GLOBTOLOC
   4744 	call	prom_printf
   4745 	 mov	%i4, %o1		! func
   4746 	LOCTOGLOB
   4747 	restore
   4748 97:
   4749 	mov	%l4, %o1
   4750 #endif
   4751 
   4752 	wrpr	%g0, PSTATE_INTR, %pstate	! Reenable interrupts
   4753 	jmpl	%o4, %o7		! handled = (*ih->ih_fun)(...)
   4754 	 movrz	%o0, %o2, %o0		! arg = (arg == 0) ? arg : tf
   4755 	wrpr	%g0, PSTATE_KERN, %pstate	! Disable interrupts
   4756 	LDPTR	[%l2 + IH_CLR], %l1
   4757 	membar	#Sync
   4758 
   4759 	brz,pn	%l1, 0f
   4760 	 add	%l5, %o0, %l5
   4761 	stx	%g0, [%l1]		! Clear intr source
   4762 	membar	#Sync			! Should not be needed
   4763 0:
   4764 	LDPTR	[%l2 + IH_ACK], %l1	! ih->ih_ack
   4765 	brz,pn	%l1, 1f
   4766 	 nop
   4767 	jmpl	%l1, %o7		! (*ih->ih_ack)(ih)
   4768 	 mov	%l2, %o0
   4769 1:
   4770 	cmp	%l7, -1
   4771 	bne,pn	CCCR, 2b		! 'Nother?
   4772 	 mov	%l7, %l2
   4773 
   4774 intrcmplt:
   4775 	/*
   4776 	 * Re-read SOFTINT to see if any new  pending interrupts
   4777 	 * at this level.
   4778 	 */
   4779 	mov	1, %l3			! Ack softint
   4780 	rd	SOFTINT, %l7		! %l5 contains #intr handled.
   4781 	sll	%l3, %l6, %l3		! Generate IRQ mask
   4782 	btst	%l3, %l7		! leave mask in %l3 for retry code
   4783 	bnz,pn	%icc, sparc_intr_retry
   4784 	 mov	1, %l5			! initialize intr count for next run
   4785 
   4786 	! Decrement this cpu's interrupt depth in case of hardintrs
   4787 	btst	SOFTINT_INT, %l3
   4788 	bnz,pn	%icc, 1f
   4789 	 sethi	%hi(CPUINFO_VA+CI_IDEPTH), %l4
   4790 	ld	[%l4 + %lo(CPUINFO_VA+CI_IDEPTH)], %l5
   4791 	dec	%l5
   4792 	st	%l5, [%l4 + %lo(CPUINFO_VA+CI_IDEPTH)]
   4793 1:
   4794 
   4795 #ifdef NOT_DEBUG
   4796 	set	_C_LABEL(intrdebug), %o2
   4797 	ld	[%o2], %o2
   4798 	btst	INTRDEBUG_FUNC, %o2
   4799 	bz,a,pt	%icc, 97f
   4800 	 nop
   4801 
   4802 	cmp	%l6, 0xa		! ignore clock interrupts?
   4803 	bz,pt	%icc, 97f
   4804 	 nop
   4805 
   4806 	STACKFRAME(-CC64FSZ)		! Get a clean register window
   4807 	LOAD_ASCIZ(%o0, "sparc_interrupt:  done\n")
   4808 	GLOBTOLOC
   4809 	call	prom_printf
   4810 	 nop
   4811 	LOCTOGLOB
   4812 	restore
   4813 97:
   4814 #endif
   4815 
   4816 	ldub	[%sp + CC64FSZ + STKB + TF_OLDPIL], %l3	! restore old %pil
   4817 	wrpr	%l3, 0, %pil
   4818 
   4819 	b	return_from_trap
   4820 	 ldx	[%sp + CC64FSZ + STKB + TF_TSTATE], %g1	! Load this for return_from_trap
   4821 
   4822 #ifdef notyet
   4823 /*
   4824  * Level 12 (ZS serial) interrupt.  Handle it quickly, schedule a
   4825  * software interrupt, and get out.  Do the software interrupt directly
   4826  * if we would just take it on the way out.
   4827  *
   4828  * Input:
   4829  *	%l0 = %psr
   4830  *	%l1 = return pc
   4831  *	%l2 = return npc
   4832  * Internal:
   4833  *	%l3 = zs device
   4834  *	%l4, %l5 = temporary
   4835  *	%l6 = rr3 (or temporary data) + 0x100 => need soft int
   4836  *	%l7 = zs soft status
   4837  */
   4838 zshard:
   4839 #endif /* notyet */
   4840 
   4841 	.globl	return_from_trap, rft_kernel, rft_user
   4842 	.globl	softtrap, slowtrap
   4843 
   4844 /*
   4845  * Various return-from-trap routines (see return_from_trap).
   4846  */
   4847 
   4848 /*
   4849  * Return from trap.
   4850  * registers are:
   4851  *
   4852  *	[%sp + CC64FSZ + STKB] => trap frame
   4853  *      %g1 => tstate from trap frame
   4854  *
   4855  * We must load all global, out, and trap registers from the trap frame.
   4856  *
   4857  * If returning to kernel, we should be at the proper trap level because
   4858  * we don't touch %tl.
   4859  *
   4860  * When returning to user mode, the trap level does not matter, as it
   4861  * will be set explicitly.
   4862  *
   4863  * If we are returning to user code, we must:
   4864  *  1.  Check for register windows in the pcb that belong on the stack.
   4865  *	If there are any, reload them
   4866  */
   4867 return_from_trap:
   4868 #ifdef DEBUG
   4869 	!! Make sure we don't have pc == npc == 0 or we suck.
   4870 	ldx	[%sp + CC64FSZ + STKB + TF_PC], %g2
   4871 	ldx	[%sp + CC64FSZ + STKB + TF_NPC], %g3
   4872 	orcc	%g2, %g3, %g0
   4873 	tz	%icc, 1
   4874 #endif
   4875 
   4876 	!!
   4877 	!! We'll make sure we flush our pcb here, rather than later.
   4878 	!!
   4879 !	ldx	[%sp + CC64FSZ + STKB + TF_TSTATE], %g1	! already passed in, no need to reload
   4880 	btst	TSTATE_PRIV, %g1			! returning to userland?
   4881 
   4882 	!!
   4883 	!! Let all pending interrupts drain before returning to userland
   4884 	!!
   4885 	bnz,pn	%icc, 1f				! Returning to userland?
   4886 	 nop
   4887 	ENABLE_INTERRUPTS %g5
   4888 	wrpr	%g0, %g0, %pil				! Lower IPL
   4889 1:
   4890 	!! Make sure we have no IRQs
   4891 	DISABLE_INTERRUPTS %g5
   4892 
   4893 #ifdef SUN4V
   4894 	sethi	%hi(cputyp), %g5
   4895 	ld	[%g5 + %lo(cputyp)], %g5
   4896 	cmp	%g5, CPU_SUN4V
   4897 	bne,pt	%icc, 1f
   4898 	 nop
   4899 	!! Make sure we have normal globals
   4900 	NORMAL_GLOBALS_SUN4V
   4901 	/* Restore normal globals */
   4902 	ldx	[%sp + CC64FSZ + STKB + TF_G + (1*8)], %g1
   4903 	ldx	[%sp + CC64FSZ + STKB + TF_G + (2*8)], %g2
   4904 	ldx	[%sp + CC64FSZ + STKB + TF_G + (3*8)], %g3
   4905 	ldx	[%sp + CC64FSZ + STKB + TF_G + (4*8)], %g4
   4906 	ldx	[%sp + CC64FSZ + STKB + TF_G + (5*8)], %g5
   4907 	ldx	[%sp + CC64FSZ + STKB + TF_G + (6*8)], %g6
   4908 	ldx	[%sp + CC64FSZ + STKB + TF_G + (7*8)], %g7
   4909 	/* Switch to alternate globals */
   4910 	ALTERNATE_GLOBALS_SUN4V
   4911 	ba	2f
   4912 	 nop
   4913 1:
   4914 #endif
   4915 	!! Make sure we have normal globals
   4916 	NORMAL_GLOBALS_SUN4U
   4917 	/* Restore normal globals */
   4918 	ldx	[%sp + CC64FSZ + STKB + TF_G + (1*8)], %g1
   4919 	ldx	[%sp + CC64FSZ + STKB + TF_G + (2*8)], %g2
   4920 	ldx	[%sp + CC64FSZ + STKB + TF_G + (3*8)], %g3
   4921 	ldx	[%sp + CC64FSZ + STKB + TF_G + (4*8)], %g4
   4922 	ldx	[%sp + CC64FSZ + STKB + TF_G + (5*8)], %g5
   4923 	ldx	[%sp + CC64FSZ + STKB + TF_G + (6*8)], %g6
   4924 	ldx	[%sp + CC64FSZ + STKB + TF_G + (7*8)], %g7
   4925 	/* Switch to alternate globals */
   4926 #ifdef TRAPS_USE_IG
   4927 	wrpr	%g0, PSTATE_KERN|PSTATE_IG, %pstate	! DEBUG
   4928 #else
   4929 	ALTERNATE_GLOBALS_SUN4U
   4930 #endif
   4931 2:
   4932 
   4933 	/* Load outs */
   4934 	ldx	[%sp + CC64FSZ + STKB + TF_O + (0*8)], %i0
   4935 	ldx	[%sp + CC64FSZ + STKB + TF_O + (1*8)], %i1
   4936 	ldx	[%sp + CC64FSZ + STKB + TF_O + (2*8)], %i2
   4937 	ldx	[%sp + CC64FSZ + STKB + TF_O + (3*8)], %i3
   4938 	ldx	[%sp + CC64FSZ + STKB + TF_O + (4*8)], %i4
   4939 	ldx	[%sp + CC64FSZ + STKB + TF_O + (5*8)], %i5
   4940 	ldx	[%sp + CC64FSZ + STKB + TF_O + (6*8)], %i6
   4941 	ldx	[%sp + CC64FSZ + STKB + TF_O + (7*8)], %i7
   4942 	/* Now load trap registers into alternate globals */
   4943 	ld	[%sp + CC64FSZ + STKB + TF_Y], %g4
   4944 	ldx	[%sp + CC64FSZ + STKB + TF_TSTATE], %g1		! load new values
   4945 	wr	%g4, 0, %y
   4946 	ldx	[%sp + CC64FSZ + STKB + TF_PC], %g2
   4947 	ldx	[%sp + CC64FSZ + STKB + TF_NPC], %g3
   4948 
   4949 #ifdef NOTDEF_DEBUG
   4950 	ldub	[%sp + CC64FSZ + STKB + TF_PIL], %g5		! restore %pil
   4951 	wrpr	%g5, %pil				! DEBUG
   4952 #endif
   4953 
   4954 	/* Returning to user mode or kernel mode? */
   4955 	btst	TSTATE_PRIV, %g1		! returning to userland?
   4956 	bz,pt	%icc, rft_user
   4957 	 sethi	%hi(CPUINFO_VA+CI_WANT_AST), %g7	! first instr of rft_user
   4958 
   4959 /*
   4960  * Return from trap, to kernel.
   4961  *
   4962  * We will assume, for the moment, that all kernel traps are properly stacked
   4963  * in the trap registers, so all we have to do is insert the (possibly modified)
   4964  * register values into the trap registers then do a retry.
   4965  *
   4966  */
   4967 rft_kernel:
   4968 	rdpr	%tl, %g4			! Grab a set of trap registers
   4969 	inc	%g4
   4970 	wrpr	%g4, %g0, %tl
   4971 	wrpr	%g3, 0, %tnpc
   4972 	wrpr	%g2, 0, %tpc
   4973 	wrpr	%g1, 0, %tstate
   4974 
   4975 	rdpr	%canrestore, %g2
   4976 	brnz	%g2, 1f
   4977 	 nop
   4978 
   4979 	wr	%g0, ASI_NUCLEUS, %asi
   4980 	rdpr	%cwp, %g1
   4981 	dec	%g1
   4982 	wrpr	%g1, %cwp
   4983 #ifdef _LP64
   4984 	FILL	ldxa, %sp+BIAS, 8, %asi
   4985 #else
   4986 	FILL	lda, %sp, 4, %asi
   4987 #endif
   4988 	restored
   4989 	inc	%g1
   4990 	wrpr	%g1, %cwp
   4991 1:
   4992 	restore
   4993 	rdpr	%tstate, %g1			! Since we may have trapped our regs may be toast
   4994 	rdpr	%cwp, %g2
   4995 	andn	%g1, CWP, %g1
   4996 	wrpr	%g1, %g2, %tstate		! Put %cwp in %tstate
   4997 	CLRTT
   4998 #ifdef TRAPSTATS
   4999 	rdpr	%tl, %g2
   5000 	set	_C_LABEL(rftkcnt), %g1
   5001 	sllx	%g2, 2, %g2
   5002 	add	%g1, %g2, %g1
   5003 	lduw	[%g1], %g2
   5004 	inc	%g2
   5005 	stw	%g2, [%g1]
   5006 #endif
   5007 #if	0
   5008 	wrpr	%g0, 0, %cleanwin	! DEBUG
   5009 #endif
   5010 #if defined(DDB) && defined(MULTIPROCESSOR)
   5011 	set	sparc64_ipi_pause_trap_point, %g1
   5012 	rdpr	%tpc, %g2
   5013 	cmp	%g1, %g2
   5014 	bne,pt	%icc, 0f
   5015 	 nop
   5016 	done
   5017 0:
   5018 #endif
   5019 	retry
   5020 	NOTREACHED
   5021 /*
   5022  * Return from trap, to user.  Checks for scheduling trap (`ast') first;
   5023  * will re-enter trap() if set.  Note that we may have to switch from
   5024  * the interrupt stack to the kernel stack in this case.
   5025  *	%g1 = %tstate
   5026  *	%g2 = return %pc
   5027  *	%g3 = return %npc
   5028  * If returning to a valid window, just set psr and return.
   5029  */
   5030 	.data
   5031 rft_wcnt:	.word 0
   5032 	.text
   5033 
   5034 rft_user:
   5035 !	sethi	%hi(CPUINFO_VA+CI_WANT_AST), %g7	! (done above)
   5036 	lduw	[%g7 + %lo(CPUINFO_VA+CI_WANT_AST)], %g7! want AST trap?
   5037 	brnz,pn	%g7, softtrap			! yes, re-enter trap with type T_AST
   5038 	 mov	T_AST, %g4
   5039 
   5040 #ifdef NOTDEF_DEBUG
   5041 	sethi	%hi(CPCB), %g4
   5042 	LDPTR	[%g4 + %lo(CPCB)], %g4
   5043 	ldub	[%g4 + PCB_NSAVED], %g4		! nsaved
   5044 	brz,pt	%g4, 2f		! Only print if nsaved <> 0
   5045 	 nop
   5046 
   5047 	set	1f, %o0
   5048 	mov	%g4, %o1
   5049 	mov	%g2, %o2			! pc
   5050 	wr	%g0, ASI_DMMU, %asi		! restore the user context
   5051 	ldxa	[CTX_SECONDARY] %asi, %o3	! ctx
   5052 	GLOBTOLOC
   5053 	mov	%g3, %o5
   5054 	call	printf
   5055 	 mov	%i6, %o4			! sp
   5056 !	wrpr	%g0, PSTATE_INTR, %pstate		! Allow IRQ service
   5057 !	wrpr	%g0, PSTATE_KERN, %pstate		! DenyIRQ service
   5058 	LOCTOGLOB
   5059 1:
   5060 	.data
   5061 	.asciz	"rft_user: nsaved=%x pc=%d ctx=%x sp=%x npc=%p\n"
   5062 	_ALIGN
   5063 	.text
   5064 #endif
   5065 
   5066 	/*
   5067 	 * NB: only need to do this after a cache miss
   5068 	 */
   5069 #ifdef TRAPSTATS
   5070 	set	_C_LABEL(rftucnt), %g6
   5071 	lduw	[%g6], %g7
   5072 	inc	%g7
   5073 	stw	%g7, [%g6]
   5074 #endif
   5075 	/*
   5076 	 * Now check to see if any regs are saved in the pcb and restore them.
   5077 	 *
   5078 	 * Here we need to undo the damage caused by switching to a kernel
   5079 	 * stack.
   5080 	 *
   5081 	 * We will use alternate globals %g4..%g7 because %g1..%g3 are used
   5082 	 * by the data fault trap handlers and we don't want possible conflict.
   5083 	 */
   5084 
   5085 	sethi	%hi(CPCB), %g6
   5086 	rdpr	%otherwin, %g7			! restore register window controls
   5087 #ifdef DEBUG
   5088 	rdpr	%canrestore, %g5		! DEBUG
   5089 	tst	%g5				! DEBUG
   5090 	tnz	%icc, 1; nop			! DEBUG
   5091 !	mov	%g0, %g5			! There should be *NO* %canrestore
   5092 	add	%g7, %g5, %g7			! DEBUG
   5093 #endif
   5094 	wrpr	%g0, %g7, %canrestore
   5095 	LDPTR	[%g6 + %lo(CPCB)], %g6
   5096 	wrpr	%g0, 0, %otherwin
   5097 
   5098 	ldub	[%g6 + PCB_NSAVED], %g7		! Any saved reg windows?
   5099 	wrpr	%g0, WSTATE_USER, %wstate	! Need to know where our sp points
   5100 
   5101 #ifdef DEBUG
   5102 	set	rft_wcnt, %g4	! Keep track of all the windows we restored
   5103 	stw	%g7, [%g4]
   5104 #endif
   5105 
   5106 	brz,pt	%g7, 5f				! No saved reg wins
   5107 	 nop
   5108 	dec	%g7				! We can do this now or later.  Move to last entry
   5109 
   5110 #ifdef DEBUG
   5111 	rdpr	%canrestore, %g4			! DEBUG Make sure we've restored everything
   5112 	brnz,a,pn	%g4, 0f				! DEBUG
   5113 	 sir						! DEBUG we should NOT have any usable windows here
   5114 0:							! DEBUG
   5115 	wrpr	%g0, 5, %tl
   5116 #endif
   5117 	rdpr	%otherwin, %g4
   5118 	sll	%g7, 7, %g5			! calculate ptr into rw64 array 8*16 == 128 or 7 bits
   5119 	brz,pt	%g4, 6f				! We should not have any user windows left
   5120 	 add	%g5, %g6, %g5
   5121 
   5122 	set	1f, %o0
   5123 	mov	%g7, %o1
   5124 	mov	%g4, %o2
   5125 	call	printf
   5126 	 wrpr	%g0, PSTATE_KERN, %pstate
   5127 	set	2f, %o0
   5128 	call	panic
   5129 	 nop
   5130 	NOTREACHED
   5131 	.data
   5132 1:	.asciz	"pcb_nsaved=%x and otherwin=%x\n"
   5133 2:	.asciz	"rft_user\n"
   5134 	_ALIGN
   5135 	.text
   5136 6:
   5137 3:
   5138 	restored					! Load in the window
   5139 	restore						! This should not trap!
   5140 	ldx	[%g5 + PCB_RW + ( 0*8)], %l0		! Load the window from the pcb
   5141 	ldx	[%g5 + PCB_RW + ( 1*8)], %l1
   5142 	ldx	[%g5 + PCB_RW + ( 2*8)], %l2
   5143 	ldx	[%g5 + PCB_RW + ( 3*8)], %l3
   5144 	ldx	[%g5 + PCB_RW + ( 4*8)], %l4
   5145 	ldx	[%g5 + PCB_RW + ( 5*8)], %l5
   5146 	ldx	[%g5 + PCB_RW + ( 6*8)], %l6
   5147 	ldx	[%g5 + PCB_RW + ( 7*8)], %l7
   5148 
   5149 	ldx	[%g5 + PCB_RW + ( 8*8)], %i0
   5150 	ldx	[%g5 + PCB_RW + ( 9*8)], %i1
   5151 	ldx	[%g5 + PCB_RW + (10*8)], %i2
   5152 	ldx	[%g5 + PCB_RW + (11*8)], %i3
   5153 	ldx	[%g5 + PCB_RW + (12*8)], %i4
   5154 	ldx	[%g5 + PCB_RW + (13*8)], %i5
   5155 	ldx	[%g5 + PCB_RW + (14*8)], %i6
   5156 	ldx	[%g5 + PCB_RW + (15*8)], %i7
   5157 
   5158 #ifdef DEBUG
   5159 	stx	%g0, [%g5 + PCB_RW + (14*8)]		! DEBUG mark that we've saved this one
   5160 #endif
   5161 
   5162 	cmp	%g5, %g6
   5163 	bgu,pt	%xcc, 3b				! Next one?
   5164 	 dec	8*16, %g5
   5165 
   5166 	stb	%g0, [%g6 + PCB_NSAVED]			! Clear them out so we won't do this again
   5167 	GET_MAXCWP %g5
   5168 	add	%g5, %g7, %g4
   5169 	dec	1, %g5					! NWINDOWS-1-1
   5170 	wrpr	%g5, 0, %cansave
   5171 	wrpr	%g0, 0, %canrestore			! Make sure we have no freeloaders XXX
   5172 	wrpr	%g0, WSTATE_USER, %wstate		! Save things to user space
   5173 	mov	%g7, %g5				! We already did one restore
   5174 4:
   5175 	rdpr	%canrestore, %g4
   5176 	inc	%g4
   5177 	deccc	%g5
   5178 	wrpr	%g4, 0, %cleanwin			! Make *sure* we don't trap to cleanwin
   5179 	bge,a,pt	%xcc, 4b				! return to starting regwin
   5180 	 save	%g0, %g0, %g0				! This may force a datafault
   5181 
   5182 #ifdef DEBUG
   5183 	wrpr	%g0, 0, %tl
   5184 #endif
   5185 #ifdef TRAPSTATS
   5186 	set	_C_LABEL(rftuld), %g5
   5187 	lduw	[%g5], %g4
   5188 	inc	%g4
   5189 	stw	%g4, [%g5]
   5190 #endif
   5191 	!!
   5192 	!! We can't take any save faults in here 'cause they will never be serviced
   5193 	!!
   5194 
   5195 #ifdef DEBUG
   5196 	sethi	%hi(CPCB), %g5
   5197 	LDPTR	[%g5 + %lo(CPCB)], %g5
   5198 	ldub	[%g5 + PCB_NSAVED], %g5		! Any saved reg windows?
   5199 	tst	%g5
   5200 	tnz	%icc, 1; nop			! Debugger if we still have saved windows
   5201 	bne,a	rft_user			! Try starting over again
   5202 	 sethi	%hi(CPUINFO_VA+CI_WANT_AST), %g7
   5203 #endif
   5204 	/*
   5205 	 * Set up our return trapframe so we can recover if we trap from here
   5206 	 * on in.
   5207 	 */
   5208 	wrpr	%g0, 1, %tl			! Set up the trap state
   5209 	wrpr	%g2, 0, %tpc
   5210 	wrpr	%g3, 0, %tnpc
   5211 	ba,pt	%icc, 6f
   5212 	 wrpr	%g1, %g0, %tstate
   5213 
   5214 5:
   5215 	/*
   5216 	 * Set up our return trapframe so we can recover if we trap from here
   5217 	 * on in.
   5218 	 */
   5219 	wrpr	%g0, 1, %tl			! Set up the trap state
   5220 	wrpr	%g2, 0, %tpc
   5221 	wrpr	%g3, 0, %tnpc
   5222 	wrpr	%g1, %g0, %tstate
   5223 
   5224 	/*
   5225 	 * The restore instruction further down may cause the trap level
   5226 	 * to exceed the maximum trap level on sun4v, so a manual fill
   5227 	 * may be necessary.
   5228 	*/
   5229 
   5230 #ifdef SUN4V
   5231 	sethi	%hi(cputyp), %g5
   5232 	ld	[%g5 + %lo(cputyp)], %g5
   5233 	cmp	%g5, CPU_SUN4V
   5234 	bne,pt	%icc, 1f
   5235 	 nop
   5236 
   5237 	! Only manual fill if the restore instruction will cause a fill trap
   5238 	rdpr	%canrestore, %g5
   5239 	brnz	%g5, 1f
   5240 	 nop
   5241 
   5242 	! Do a manual fill
   5243 	wr	%g0, ASI_AIUS, %asi
   5244 	rdpr	%cwp, %g4
   5245 	dec	%g4
   5246 	wrpr	%g4, 0, %cwp
   5247 rft_user_fault_start:
   5248 	FILL	ldxa, %sp+BIAS, 8, %asi
   5249 rft_user_fault_end:
   5250 	restored
   5251 	inc	%g4
   5252 	wrpr	%g4, 0, %cwp
   5253 1:
   5254 #endif
   5255 	restore
   5256 6:
   5257 	rdpr	%canrestore, %g5
   5258 	wrpr	%g5, 0, %cleanwin			! Force cleanup of kernel windows
   5259 
   5260 #ifdef NOTDEF_DEBUG
   5261 	ldx	[%g6 + CC64FSZ + STKB + TF_L + (0*8)], %g5! DEBUG -- get proper value for %l0
   5262 	cmp	%l0, %g5
   5263 	be,a,pt %icc, 1f
   5264 	 nop
   5265 !	sir			! WATCHDOG
   5266 	set	badregs, %g1	! Save the suspect regs
   5267 	stw	%l0, [%g1+(4*0)]
   5268 	stw	%l1, [%g1+(4*1)]
   5269 	stw	%l2, [%g1+(4*2)]
   5270 	stw	%l3, [%g1+(4*3)]
   5271 	stw	%l4, [%g1+(4*4)]
   5272 	stw	%l5, [%g1+(4*5)]
   5273 	stw	%l6, [%g1+(4*6)]
   5274 	stw	%l7, [%g1+(4*7)]
   5275 	stw	%i0, [%g1+(4*8)+(4*0)]
   5276 	stw	%i1, [%g1+(4*8)+(4*1)]
   5277 	stw	%i2, [%g1+(4*8)+(4*2)]
   5278 	stw	%i3, [%g1+(4*8)+(4*3)]
   5279 	stw	%i4, [%g1+(4*8)+(4*4)]
   5280 	stw	%i5, [%g1+(4*8)+(4*5)]
   5281 	stw	%i6, [%g1+(4*8)+(4*6)]
   5282 	stw	%i7, [%g1+(4*8)+(4*7)]
   5283 	save
   5284 	inc	%g7
   5285 	wrpr	%g7, 0, %otherwin
   5286 	wrpr	%g0, 0, %canrestore
   5287 	wrpr	%g0, WSTATE_KERN, %wstate	! Need to know where our sp points
   5288 	set	rft_wcnt, %g4	! Restore nsaved before trapping
   5289 	sethi	%hi(CPCB), %g6
   5290 	LDPTR	[%g6 + %lo(CPCB)], %g6
   5291 	lduw	[%g4], %g4
   5292 	stb	%g4, [%g6 + PCB_NSAVED]
   5293 	ta	1
   5294 	sir
   5295 	.data
   5296 badregs:
   5297 	.space	16*4
   5298 	.text
   5299 1:
   5300 #endif
   5301 
   5302 	rdpr	%tstate, %g1
   5303 	rdpr	%cwp, %g7			! Find our cur window
   5304 	andn	%g1, CWP, %g1			! Clear it from %tstate
   5305 	wrpr	%g1, %g7, %tstate		! Set %tstate with %cwp
   5306 	mov	CTX_SECONDARY, %g1		! Restore the user context
   5307 	GET_MMU_CONTEXTID %g4, %g1, %g3
   5308 	mov	CTX_PRIMARY, %g2
   5309 	SET_MMU_CONTEXTID %g4, %g2, %g3
   5310 	sethi	%hi(KERNBASE), %g7		! Should not be needed due to retry
   5311 	membar	#Sync				! Should not be needed due to retry
   5312 	flush	%g7				! Should not be needed due to retry
   5313 
   5314 	CLRTT
   5315 #ifdef TRAPSTATS
   5316 	set	_C_LABEL(rftudone), %g1
   5317 	lduw	[%g1], %g2
   5318 	inc	%g2
   5319 	stw	%g2, [%g1]
   5320 #endif
   5321 #ifdef DEBUG
   5322 	sethi	%hi(CPCB), %g5
   5323 	LDPTR	[%g5 + %lo(CPCB)], %g5
   5324 	ldub	[%g5 + PCB_NSAVED], %g5		! Any saved reg windows?
   5325 	tst	%g5
   5326 	tnz	%icc, 1; nop			! Debugger if we still have saved windows!
   5327 #endif
   5328 	wrpr	%g0, 0, %pil			! Enable all interrupts
   5329 	retry
   5330 
   5331 ! exported end marker for kernel gdb
   5332 	.globl	_C_LABEL(endtrapcode)
   5333 _C_LABEL(endtrapcode):
   5334 
   5335 /*
   5336  * Kernel entry point.
   5337  *
   5338  * The contract between bootloader and kernel is:
   5339  *
   5340  * %o0		OpenFirmware entry point, to keep Sun's updaters happy
   5341  * %o1		Address of boot information vector (see bootinfo.h)
   5342  * %o2		Length of the vector, in bytes
   5343  * %o3		OpenFirmware entry point, to mimic Sun bootloader behavior
   5344  * %o4		OpenFirmware, to meet earlier NetBSD kernels expectations
   5345  */
   5346 	.align	8
   5347 start:
   5348 dostart:
   5349 	/*
   5350 	 * Startup.
   5351 	 *
   5352 	 * The Sun FCODE bootloader is nice and loads us where we want
   5353 	 * to be.  We have a full set of mappings already set up for us.
   5354 	 *
   5355 	 * I think we end up having an entire 16M allocated to us.
   5356 	 *
   5357 	 * We enter with the prom entry vector in %o0, dvec in %o1,
   5358 	 * and the bootops vector in %o2.
   5359 	 *
   5360 	 * All we need to do is:
   5361 	 *
   5362 	 *	1:	Save the prom vector
   5363 	 *
   5364 	 *	2:	Create a decent stack for ourselves
   5365 	 *
   5366 	 *	3:	Install the permanent 4MB kernel mapping
   5367 	 *
   5368 	 *	4:	Call the C language initialization code
   5369 	 *
   5370 	 */
   5371 
   5372 	/*
   5373 	 * Set the psr into a known state:
   5374 	 * Set supervisor mode, interrupt level >= 13, traps enabled
   5375 	 */
   5376 	wrpr	%g0, 13, %pil
   5377 	wrpr	%g0, PSTATE_INTR|PSTATE_PEF, %pstate
   5378 	wr	%g0, FPRS_FEF, %fprs		! Turn on FPU
   5379 
   5380 	/*
   5381 	 * Step 2: Set up a v8-like stack if we need to
   5382 	 */
   5383 
   5384 #ifdef _LP64
   5385 	btst	1, %sp
   5386 	bnz,pt	%icc, 0f
   5387 	 nop
   5388 	add	%sp, -BIAS, %sp
   5389 #else
   5390 	btst	1, %sp
   5391 	bz,pt	%icc, 0f
   5392 	 nop
   5393 	add	%sp, BIAS, %sp
   5394 #endif
   5395 0:
   5396 
   5397 	call	_C_LABEL(bootstrap)
   5398 	 clr	%g4				! Clear data segment pointer
   5399 
   5400 /*
   5401  * Initialize the boot CPU.  Basically:
   5402  *
   5403  *	Locate the cpu_info structure for this CPU.
   5404  *	Establish a locked mapping for interrupt stack.
   5405  *	Switch to the initial stack.
   5406  *	Call the routine passed in in cpu_info->ci_spinup
   5407  */
   5408 
   5409 #ifdef NO_VCACHE
   5410 #define	SUN4U_TTE_DATABITS	SUN4U_TTE_L|SUN4U_TTE_CP|SUN4U_TTE_P|SUN4U_TTE_W
   5411 #else
   5412 #define	SUN4U_TTE_DATABITS	SUN4U_TTE_L|SUN4U_TTE_CP|SUN4U_TTE_CV|SUN4U_TTE_P|SUN4U_TTE_W
   5413 #endif
   5414 
   5415 
   5416 ENTRY_NOPROFILE(cpu_initialize)	/* for cosmetic reasons - nicer backtrace */
   5417 
   5418 	/* Cache the cputyp in %l6 for later use below */
   5419 	sethi	%hi(cputyp), %l6
   5420 	ld	[%l6 + %lo(cputyp)], %l6
   5421 
   5422 	/*
   5423 	 * Step 5: is no more.
   5424 	 */
   5425 
   5426 	/*
   5427 	 * Step 6: hunt through cpus list and find the one that matches our cpuid
   5428 	 */
   5429 
   5430 	call	_C_LABEL(cpu_myid)	! Retrieve cpuid in %o0
   5431 	 mov	%g0, %o0
   5432 
   5433 	sethi	%hi(_C_LABEL(cpus)), %l1
   5434 	LDPTR	[%l1 + %lo(_C_LABEL(cpus))], %l1
   5435 0:
   5436 	ld	[%l1 + CI_CPUID], %l3		! Load CPUID
   5437 	cmp	%l3, %o0			! Does it match?
   5438 	bne,a,pt	%icc, 0b		! no
   5439 	 LDPTR	[%l1 + CI_NEXT], %l1		! Load next cpu_info pointer
   5440 
   5441 	/*
   5442 	 * Get pointer to our cpu_info struct
   5443 	 */
   5444 	mov	%l1, %l7			! save cpu_info pointer
   5445 	ldx	[%l1 + CI_PADDR], %l1		! Load the interrupt stack's PA
   5446 #ifdef SUN4V
   5447 	cmp	%l6, CPU_SUN4V
   5448 	bne,pt	%icc, 3f
   5449 	 nop
   5450 
   5451 	/* sun4v */
   5452 	call	_C_LABEL(pmap_setup_intstack_sun4v)	! Call nice C function for mapping INTSTACK
   5453 	 mov	%l1, %o0
   5454 	ba	4f
   5455 	 nop
   5456 3:
   5457 #endif
   5458 	/* sun4u */
   5459 	sethi	%hi(0xa0000000), %l2		! V=1|SZ=01|NFO=0|IE=0
   5460 	sllx	%l2, 32, %l2			! Shift it into place
   5461 
   5462 	mov	-1, %l3				! Create a nice mask
   5463 	sllx	%l3, 43, %l4			! Mask off high bits
   5464 	or	%l4, 0xfff, %l4			! We can just load this in 12 (of 13) bits
   5465 
   5466 	andn	%l1, %l4, %l1			! Mask the phys page number
   5467 
   5468 	or	%l2, %l1, %l1			! Now take care of the high bits
   5469 	or	%l1, SUN4U_TTE_DATABITS, %l2	! And low bits:	L=1|CP=1|CV=?|E=0|P=1|W=1|G=0
   5470 
   5471 	!!
   5472 	!!  Now, map in the interrupt stack as context==0
   5473 	!!
   5474 	set	TLB_TAG_ACCESS, %l5
   5475 	set	INTSTACK, %l0
   5476 	stxa	%l0, [%l5] ASI_DMMU		! Make DMMU point to it
   5477 	stxa	%l2, [%g0] ASI_DMMU_DATA_IN	! Store it
   5478 	membar	#Sync
   5479 4:
   5480 
   5481 	!! Setup kernel stack (we rely on curlwp on this cpu
   5482 	!! being lwp0 here and its uarea is mapped special
   5483 	!! and already accessible here)
   5484 	flushw
   5485 	LDPTR	[%l7 + CI_CPCB], %l0		! load PCB/uarea pointer
   5486 	set	2*USPACE - TF_SIZE - CC64FSZ, %l1
   5487  	add	%l1, %l0, %l0
   5488 #ifdef _LP64
   5489 	andn	%l0, 0x0f, %l0			! Needs to be 16-byte aligned
   5490 	sub	%l0, BIAS, %l0			! and biased
   5491 #endif
   5492 	mov	%l0, %sp
   5493 	flushw
   5494 
   5495 #ifdef DEBUG
   5496 	set	_C_LABEL(pmapdebug), %o1
   5497 	ld	[%o1], %o1
   5498 	sethi	%hi(0x40000), %o2
   5499 	btst	%o2, %o1
   5500 	bz	0f
   5501 
   5502 	set	1f, %o0		! Debug printf
   5503 	call	_C_LABEL(prom_printf)
   5504 	 nop
   5505 	.data
   5506 1:
   5507 	.asciz	"Setting trap base...\n"
   5508 	_ALIGN
   5509 	.text
   5510 0:
   5511 #endif
   5512 	/*
   5513 	 * Step 7: change the trap base register, and install our TSB pointers
   5514 	 */
   5515 
   5516 	/*
   5517 	 * install our TSB pointers
   5518 	 */
   5519 
   5520 #ifdef SUN4V
   5521 	cmp	%l6, CPU_SUN4V
   5522 	bne,pt	%icc, 5f
   5523 	 nop
   5524 
   5525 	/* sun4v */
   5526 	LDPTR	[%l7 + CI_TSB_DESC], %o0
   5527 	call	_C_LABEL(pmap_setup_tsb_sun4v)
   5528 	 nop
   5529 	ba	1f
   5530 	 nop
   5531 5:
   5532 #endif
   5533 	/* sun4u */
   5534 	sethi	%hi(_C_LABEL(tsbsize)), %l2
   5535 	sethi	%hi(0x1fff), %l3
   5536 	sethi	%hi(TSB), %l4
   5537 	LDPTR	[%l7 + CI_TSB_DMMU], %l0
   5538 	LDPTR	[%l7 + CI_TSB_IMMU], %l1
   5539 	ld	[%l2 + %lo(_C_LABEL(tsbsize))], %l2
   5540 	or	%l3, %lo(0x1fff), %l3
   5541 	or	%l4, %lo(TSB), %l4
   5542 
   5543 	andn	%l0, %l3, %l0			! Mask off size and split bits
   5544 	or	%l0, %l2, %l0			! Make a TSB pointer
   5545 	stxa	%l0, [%l4] ASI_DMMU		! Install data TSB pointer
   5546 
   5547 	andn	%l1, %l3, %l1			! Mask off size and split bits
   5548 	or	%l1, %l2, %l1			! Make a TSB pointer
   5549 	stxa	%l1, [%l4] ASI_IMMU		! Install instruction TSB pointer
   5550 	membar	#Sync
   5551 	set	1f, %l1
   5552 	flush	%l1
   5553 1:
   5554 
   5555 	/* set trap table */
   5556 #ifdef SUN4V
   5557 	cmp	%l6, CPU_SUN4V
   5558 	bne,pt	%icc, 6f
   5559 	 nop
   5560 	/* sun4v */
   5561 	set	_C_LABEL(trapbase_sun4v), %l1
   5562 	GET_MMFSA %o1
   5563 	call	_C_LABEL(prom_set_trap_table_sun4v)	! Now we should be running 100% from our handlers
   5564 	 mov	%l1, %o0
   5565 
   5566 	ba	7f
   5567 	 nop
   5568 6:
   5569 #endif
   5570 	/* sun4u */
   5571 	set	_C_LABEL(trapbase), %l1
   5572 	call	_C_LABEL(prom_set_trap_table_sun4u)	! Now we should be running 100% from our handlers
   5573 	 mov	%l1, %o0
   5574 7:
   5575 	wrpr	%l1, 0, %tba			! Make sure the PROM didn't foul up.
   5576 
   5577 	/*
   5578 	 * Switch to the kernel mode and run away.
   5579 	 */
   5580 	wrpr	%g0, WSTATE_KERN, %wstate
   5581 
   5582 #ifdef DEBUG
   5583 	wrpr	%g0, 1, %tl			! Debug -- start at tl==3 so we'll watchdog
   5584 	wrpr	%g0, 0x1ff, %tt			! Debug -- clear out unused trap regs
   5585 	wrpr	%g0, 0, %tpc
   5586 	wrpr	%g0, 0, %tnpc
   5587 	wrpr	%g0, 0, %tstate
   5588 	wrpr	%g0, 0, %tl
   5589 #endif
   5590 
   5591 #ifdef DEBUG
   5592 	set	_C_LABEL(pmapdebug), %o1
   5593 	ld	[%o1], %o1
   5594 	sethi	%hi(0x40000), %o2
   5595 	btst	%o2, %o1
   5596 	bz	0f
   5597 
   5598 	LDPTR	[%l7 + CI_SPINUP], %o1
   5599 	set	1f, %o0		! Debug printf
   5600 	call	_C_LABEL(prom_printf)
   5601 	 mov	%sp, %o2
   5602 
   5603 	.data
   5604 1:
   5605 	.asciz	"Calling startup routine %p with stack at %p...\n"
   5606 	_ALIGN
   5607 	.text
   5608 0:
   5609 #endif
   5610 	/*
   5611 	 * Call our startup routine.
   5612 	 */
   5613 
   5614 	LDPTR	[%l7 + CI_SPINUP], %o1
   5615 
   5616 	call	%o1				! Call routine
   5617 	 clr	%o0				! our frame arg is ignored
   5618 
   5619 	set	1f, %o0				! Main should never come back here
   5620 	call	_C_LABEL(panic)
   5621 	 nop
   5622 	.data
   5623 1:
   5624 	.asciz	"main() returned\n"
   5625 	_ALIGN
   5626 	.text
   5627 
   5628 	.align 8
   5629 ENTRY(get_romtba)
   5630 	retl
   5631 	 rdpr	%tba, %o0
   5632 
   5633 ENTRY(setcputyp)
   5634 	sethi	%hi(cputyp), %o1	! Trash %o1 assuming this is ok
   5635 	st	%o0, [%o1 + %lo(cputyp)]
   5636 	retl
   5637 	 nop
   5638 
   5639 #ifdef MULTIPROCESSOR
   5640 	/*
   5641 	 * cpu_mp_startup is called with:
   5642 	 *
   5643 	 *	%g2 = cpu_args
   5644 	 */
   5645 ENTRY(cpu_mp_startup)
   5646 	mov	1, %o0
   5647 	sllx	%o0, 63, %o0
   5648 	wr	%o0, TICK_CMPR	! XXXXXXX clear and disable %tick_cmpr for now
   5649 	wrpr    %g0, 0, %cleanwin
   5650 	wrpr	%g0, 0, %tl			! Make sure we're not in NUCLEUS mode
   5651 	wrpr	%g0, WSTATE_KERN, %wstate
   5652 	wrpr	%g0, PSTATE_KERN, %pstate
   5653 	flushw
   5654 
   5655 	/* Cache the cputyp in %l6 for later use below */
   5656 	sethi	%hi(cputyp), %l6
   5657 	ld	[%l6 + %lo(cputyp)], %l6
   5658 
   5659 	/*
   5660 	 * Get pointer to our cpu_info struct
   5661 	 */
   5662 	ldx	[%g2 + CBA_CPUINFO], %l1	! Load the interrupt stack's PA
   5663 
   5664 #ifdef SUN4V
   5665 	cmp	%l6, CPU_SUN4V
   5666 	bne,pt	%icc, 3f
   5667 	 nop
   5668 
   5669 	/* sun4v */
   5670 
   5671 	sethi	%hi(0x80000000), %l2		! V=1|NFO=0|SW=0
   5672 	sllx	%l2, 32, %l2			! Shift it into place
   5673 	mov	-1, %l3				! Create a nice mask
   5674 	sllx	%l3, 56, %l4			! Mask off high 8 bits
   5675 	or	%l4, 0xfff, %l4			! We can just load this in 12 (of 13) bits
   5676 	andn	%l1, %l4, %l1			! Mask the phys page number into RA
   5677 	or	%l2, %l1, %l1			! Now take care of the 8 high bits V|NFO|SW
   5678 	or	%l1, 0x0741, %l2		! And low 13 bits IE=0|E=0|CP=1|CV=1|P=1|
   5679 						!		  X=0|W=1|SW=00|SZ=0001
   5680 
   5681 	/*
   5682 	 *  Now, map in the interrupt stack & cpu_info as context==0
   5683 	 */
   5684 
   5685 	set	INTSTACK, %o0			! vaddr
   5686 	clr	%o1				! reserved
   5687 	mov	%l2, %o2			! tte
   5688 	mov	MAP_DTLB, %o3			! flags
   5689 	mov	FT_MMU_MAP_PERM_ADDR, %o5	! hv fast trap function
   5690 	ta	ST_FAST_TRAP
   5691 	cmp	%o0, 0
   5692 	be,pt	%icc, 5f
   5693 	 nop
   5694 	sir					! crash if mapping fails
   5695 5:
   5696 
   5697 	/*
   5698 	 * Set 0 as primary context XXX
   5699 	 */
   5700 
   5701 	mov	CTX_PRIMARY, %o0
   5702 	SET_MMU_CONTEXTID_SUN4V %g0, %o0
   5703 
   5704 	ba	4f
   5705 	 nop
   5706 3:
   5707 #endif
   5708 
   5709 	/* sun4u */
   5710 
   5711 	sethi	%hi(0xa0000000), %l2		! V=1|SZ=01|NFO=0|IE=0
   5712 	sllx	%l2, 32, %l2			! Shift it into place
   5713 	mov	-1, %l3				! Create a nice mask
   5714 	sllx	%l3, 43, %l4			! Mask off high bits
   5715 	or	%l4, 0xfff, %l4			! We can just load this in 12 (of 13) bits
   5716 	andn	%l1, %l4, %l1			! Mask the phys page number
   5717 	or	%l2, %l1, %l1			! Now take care of the high bits
   5718 	or	%l1, SUN4U_TTE_DATABITS, %l2	! And low bits:	L=1|CP=1|CV=?|E=0|P=1|W=1|G=0
   5719 
   5720 	/*
   5721 	 *  Now, map in the interrupt stack & cpu_info as context==0
   5722 	 */
   5723 
   5724 	set	TLB_TAG_ACCESS, %l5
   5725 	set	INTSTACK, %l0
   5726 	stxa	%l0, [%l5] ASI_DMMU		! Make DMMU point to it
   5727 	stxa	%l2, [%g0] ASI_DMMU_DATA_IN	! Store it
   5728 
   5729 	/*
   5730 	 * Set 0 as primary context XXX
   5731 	 */
   5732 
   5733 	mov	CTX_PRIMARY, %o0
   5734 	SET_MMU_CONTEXTID_SUN4U %g0, %o0
   5735 
   5736 4:
   5737 	membar	#Sync
   5738 
   5739 	/*
   5740 	 * Temporarily use the interrupt stack
   5741 	 */
   5742 #ifdef _LP64
   5743 	set	((EINTSTACK - CC64FSZ - TF_SIZE)) & ~0x0f - BIAS, %sp
   5744 #else
   5745 	set	EINTSTACK - CC64FSZ - TF_SIZE, %sp
   5746 #endif
   5747 	set	1, %fp
   5748 	clr	%i7
   5749 
   5750 #ifdef SUN4V
   5751 	cmp	%l6, CPU_SUN4V
   5752 	bne,pt	%icc, 2f
   5753 	 nop
   5754 
   5755 	/* sun4v */
   5756 
   5757 	/*
   5758 	 * install our TSB pointers
   5759 	 */
   5760 
   5761 	set	CPUINFO_VA, %o0
   5762 	LDPTR	[%o0 + CI_TSB_DESC], %o0
   5763 	call	_C_LABEL(pmap_setup_tsb_sun4v)
   5764 	 nop
   5765 
   5766 	/* set trap table */
   5767 
   5768 	set	_C_LABEL(trapbase_sun4v), %l1
   5769 	GET_MMFSA %o1
   5770 	call	_C_LABEL(prom_set_trap_table_sun4v)
   5771 	 mov	%l1, %o0
   5772 
   5773 	! Now we should be running 100% from our handlers
   5774 	ba	3f
   5775 	 nop
   5776 2:
   5777 #endif
   5778 	/* sun4u */
   5779 
   5780 	/*
   5781 	 * install our TSB pointers
   5782 	 */
   5783 
   5784 	sethi	%hi(CPUINFO_VA+CI_TSB_DMMU), %l0
   5785 	sethi	%hi(CPUINFO_VA+CI_TSB_IMMU), %l1
   5786 	sethi	%hi(_C_LABEL(tsbsize)), %l2
   5787 	sethi	%hi(0x1fff), %l3
   5788 	sethi	%hi(TSB), %l4
   5789 	LDPTR	[%l0 + %lo(CPUINFO_VA+CI_TSB_DMMU)], %l0
   5790 	LDPTR	[%l1 + %lo(CPUINFO_VA+CI_TSB_IMMU)], %l1
   5791 	ld	[%l2 + %lo(_C_LABEL(tsbsize))], %l2
   5792 	or	%l3, %lo(0x1fff), %l3
   5793 	or	%l4, %lo(TSB), %l4
   5794 
   5795 	andn	%l0, %l3, %l0			! Mask off size and split bits
   5796 	or	%l0, %l2, %l0			! Make a TSB pointer
   5797 	stxa	%l0, [%l4] ASI_DMMU		! Install data TSB pointer
   5798 	membar	#Sync
   5799 
   5800 	andn	%l1, %l3, %l1			! Mask off size and split bits
   5801 	or	%l1, %l2, %l1			! Make a TSB pointer
   5802 	stxa	%l1, [%l4] ASI_IMMU		! Install instruction TSB pointer
   5803 	membar	#Sync
   5804 	set	1f, %o0
   5805 	flush	%o0
   5806 1:
   5807 
   5808 	/* set trap table */
   5809 
   5810 	set	_C_LABEL(trapbase), %l1
   5811 	call	_C_LABEL(prom_set_trap_table_sun4u)
   5812 	 mov	%l1, %o0
   5813 3:
   5814 	wrpr	%l1, 0, %tba			! Make sure the PROM didn't
   5815 						! foul up.
   5816 	/*
   5817 	 * Use this CPUs idlelewp's uarea stack
   5818 	 */
   5819 	sethi	%hi(CPUINFO_VA+CI_IDLELWP), %l0
   5820 	LDPTR	[%l0 + %lo(CPUINFO_VA+CI_IDLELWP)], %l0
   5821 	set	USPACE - TF_SIZE - CC64FSZ, %l1
   5822 	LDPTR	[%l0 + L_PCB], %l0
   5823 	add	%l0, %l1, %l0
   5824 #ifdef _LP64
   5825 	andn	%l0, 0x0f, %l0			! Needs to be 16-byte aligned
   5826 	sub	%l0, BIAS, %l0			! and biased
   5827 #endif
   5828 	mov	%l0, %sp
   5829 	flushw
   5830 
   5831 	/*
   5832 	 * Switch to the kernel mode and run away.
   5833 	 */
   5834 	wrpr	%g0, 13, %pil
   5835 	wrpr	%g0, PSTATE_INTR|PSTATE_PEF, %pstate
   5836 	wr	%g0, FPRS_FEF, %fprs			! Turn on FPU
   5837 
   5838 	call	_C_LABEL(cpu_hatch)
   5839 	 clr %g4
   5840 
   5841 	b	_C_LABEL(idle_loop)
   5842 	 clr	%o0
   5843 
   5844 	NOTREACHED
   5845 
   5846 	.globl cpu_mp_startup_end
   5847 cpu_mp_startup_end:
   5848 #endif
   5849 
   5850 /*
   5851  * openfirmware(cell* param);
   5852  *
   5853  * OpenFirmware entry point
   5854  *
   5855  * If we're running in 32-bit mode we need to convert to a 64-bit stack
   5856  * and 64-bit cells.  The cells we'll allocate off the stack for simplicity.
   5857  */
   5858 	.align 8
   5859 ENTRY(openfirmware)
   5860 	sethi	%hi(romp), %o4
   5861 	andcc	%sp, 1, %g0
   5862 	bz,pt	%icc, 1f
   5863 	 LDPTR	[%o4+%lo(romp)], %o4		! v9 stack, just load the addr and callit
   5864 	save	%sp, -CC64FSZ, %sp
   5865 	rdpr	%pil, %i2
   5866 	mov	PIL_HIGH, %i3
   5867 	cmp	%i3, %i2
   5868 	movle	%icc, %i2, %i3
   5869 	wrpr	%g0, %i3, %pil
   5870 	mov	%i0, %o0
   5871 	mov	%g1, %l1
   5872 	mov	%g2, %l2
   5873 	mov	%g3, %l3
   5874 	mov	%g4, %l4
   5875 	mov	%g5, %l5
   5876 	mov	%g6, %l6
   5877 	mov	%g7, %l7
   5878 	rdpr	%pstate, %l0
   5879 	jmpl	%i4, %o7
   5880 #if !defined(_LP64)
   5881 	 wrpr	%g0, PSTATE_PROM, %pstate
   5882 #else
   5883 	 wrpr	%g0, PSTATE_PROM|PSTATE_IE, %pstate
   5884 #endif
   5885 	wrpr	%l0, %g0, %pstate
   5886 	mov	%l1, %g1
   5887 	mov	%l2, %g2
   5888 	mov	%l3, %g3
   5889 	mov	%l4, %g4
   5890 	mov	%l5, %g5
   5891 	mov	%l6, %g6
   5892 	mov	%l7, %g7
   5893 	wrpr	%i2, 0, %pil
   5894 	ret
   5895 	 restore	%o0, %g0, %o0
   5896 
   5897 1:	! v8 -- need to screw with stack & params
   5898 #ifdef NOTDEF_DEBUG
   5899 	mov	%o7, %o5
   5900 	call	globreg_check
   5901 	 nop
   5902 	mov	%o5, %o7
   5903 #endif
   5904 	save	%sp, -CC64FSZ, %sp		! Get a new 64-bit stack frame
   5905 	add	%sp, -BIAS, %sp
   5906 	rdpr	%pstate, %l0
   5907 	srl	%sp, 0, %sp
   5908 	rdpr	%pil, %i2	! s = splx(level)
   5909 	mov	%i0, %o0
   5910 	mov	PIL_HIGH, %i3
   5911 	mov	%g1, %l1
   5912 	mov	%g2, %l2
   5913 	cmp	%i3, %i2
   5914 	mov	%g3, %l3
   5915 	mov	%g4, %l4
   5916 	mov	%g5, %l5
   5917 	movle	%icc, %i2, %i3
   5918 	mov	%g6, %l6
   5919 	mov	%g7, %l7
   5920 	wrpr	%i3, %g0, %pil
   5921 	jmpl	%i4, %o7
   5922 	! Enable 64-bit addresses for the prom
   5923 #if defined(_LP64)
   5924 	 wrpr	%g0, PSTATE_PROM, %pstate
   5925 #else
   5926 	 wrpr	%g0, PSTATE_PROM|PSTATE_IE, %pstate
   5927 #endif
   5928 	wrpr	%l0, 0, %pstate
   5929 	wrpr	%i2, 0, %pil
   5930 	mov	%l1, %g1
   5931 	mov	%l2, %g2
   5932 	mov	%l3, %g3
   5933 	mov	%l4, %g4
   5934 	mov	%l5, %g5
   5935 	mov	%l6, %g6
   5936 	mov	%l7, %g7
   5937 	ret
   5938 	 restore	%o0, %g0, %o0
   5939 
   5940 /*
   5941  * void ofw_exit(cell_t args[])
   5942  */
   5943 ENTRY(openfirmware_exit)
   5944 	STACKFRAME(-CC64FSZ)
   5945 	flushw					! Flush register windows
   5946 
   5947 	wrpr	%g0, PIL_HIGH, %pil		! Disable interrupts
   5948 	sethi	%hi(romtba), %l5
   5949 	LDPTR	[%l5 + %lo(romtba)], %l5
   5950 	wrpr	%l5, 0, %tba			! restore the ofw trap table
   5951 
   5952 	/* Arrange locked kernel stack as PROM stack */
   5953 	set	EINTSTACK  - CC64FSZ, %l5
   5954 
   5955 	andn	%l5, 0x0f, %l5			! Needs to be 16-byte aligned
   5956 	sub	%l5, BIAS, %l5			! and biased
   5957 	mov	%l5, %sp
   5958 	flushw
   5959 
   5960 	sethi	%hi(romp), %l6
   5961 	LDPTR	[%l6 + %lo(romp)], %l6
   5962 
   5963 	mov     CTX_PRIMARY, %l3		! set context 0
   5964 	stxa    %g0, [%l3] ASI_DMMU
   5965 	membar	#Sync
   5966 
   5967 	wrpr	%g0, PSTATE_PROM, %pstate	! Disable interrupts
   5968 						! and enable 64-bit addresses
   5969 	wrpr	%g0, 0, %tl			! force trap level 0
   5970 	call	%l6
   5971 	 mov	%i0, %o0
   5972 	NOTREACHED
   5973 
   5974 /*
   5975  * sp_tlb_flush_pte_us(vaddr_t va, int ctx)
   5976  * sp_tlb_flush_pte_usiii(vaddr_t va, int ctx)
   5977  *
   5978  * Flush tte from both IMMU and DMMU.
   5979  *
   5980  * This uses %o0-%o5
   5981  */
   5982 	.align 8
   5983 ENTRY(sp_tlb_flush_pte_us)
   5984 #ifdef DEBUG
   5985 	set	pmapdebug, %o3
   5986 	lduw	[%o3], %o3
   5987 !	movrz	%o1, -1, %o3				! Print on either pmapdebug & PDB_DEMAP or ctx == 0
   5988 	btst	0x0020, %o3
   5989 	bz,pt	%icc, 2f
   5990 	 nop
   5991 	save	%sp, -CC64FSZ, %sp
   5992 	set	1f, %o0
   5993 	mov	%i1, %o1
   5994 	andn	%i0, 0xfff, %o3
   5995 	or	%o3, 0x010, %o3
   5996 	call	_C_LABEL(printf)
   5997 	 mov	%i0, %o2
   5998 	restore
   5999 	.data
   6000 1:
   6001 	.asciz	"sp_tlb_flush_pte_us:	demap ctx=%x va=%08x res=%x\n"
   6002 	_ALIGN
   6003 	.text
   6004 2:
   6005 #endif
   6006 #ifdef MULTIPROCESSOR
   6007 	rdpr	%pstate, %o3
   6008 	andn	%o3, PSTATE_IE, %o4			! disable interrupts
   6009 	wrpr	%o4, 0, %pstate
   6010 #endif
   6011 	srlx	%o0, PG_SHIFT4U, %o0			! drop unused va bits
   6012 	mov	CTX_SECONDARY, %o2
   6013 	sllx	%o0, PG_SHIFT4U, %o0
   6014 	ldxa	[%o2] ASI_DMMU, %o5			! Save secondary context
   6015 	sethi	%hi(KERNBASE), %o4
   6016 	membar	#LoadStore
   6017 	stxa	%o1, [%o2] ASI_DMMU			! Insert context to demap
   6018 	membar	#Sync
   6019 	or	%o0, DEMAP_PAGE_SECONDARY, %o0		! Demap page from secondary context only
   6020 	stxa	%o0, [%o0] ASI_DMMU_DEMAP		! Do the demap
   6021 	stxa	%o0, [%o0] ASI_IMMU_DEMAP		! to both TLBs
   6022 #ifdef TLB_FLUSH_LOWVA
   6023 	srl	%o0, 0, %o0				! and make sure it's both 32- and 64-bit entries
   6024 	stxa	%o0, [%o0] ASI_DMMU_DEMAP		! Do the demap
   6025 	stxa	%o0, [%o0] ASI_IMMU_DEMAP		! Do the demap
   6026 #endif
   6027 	flush	%o4
   6028 	stxa	%o5, [%o2] ASI_DMMU			! Restore secondary context
   6029 	membar	#Sync
   6030 	retl
   6031 #ifdef MULTIPROCESSOR
   6032 	 wrpr	%o3, %pstate				! restore interrupts
   6033 #else
   6034 	 nop
   6035 #endif
   6036 
   6037 ENTRY(sp_tlb_flush_pte_usiii)
   6038 #ifdef DEBUG
   6039 	set	pmapdebug, %o3
   6040 	lduw	[%o3], %o3
   6041 !	movrz	%o1, -1, %o3				! Print on either pmapdebug & PDB_DEMAP or ctx == 0
   6042 	btst	0x0020, %o3
   6043 	bz,pt	%icc, 2f
   6044 	 nop
   6045 	save	%sp, -CC64FSZ, %sp
   6046 	set	1f, %o0
   6047 	mov	%i1, %o1
   6048 	andn	%i0, 0xfff, %o3
   6049 	or	%o3, 0x010, %o3
   6050 	call	_C_LABEL(printf)
   6051 	 mov	%i0, %o2
   6052 	restore
   6053 	.data
   6054 1:
   6055 	.asciz	"sp_tlb_flush_pte_usiii:	demap ctx=%x va=%08x res=%x\n"
   6056 	_ALIGN
   6057 	.text
   6058 2:
   6059 #endif
   6060 	! %o0 = VA [in]
   6061 	! %o1 = ctx value [in] / KERNBASE
   6062 	! %o2 = CTX_PRIMARY
   6063 	! %o3 = saved %tl
   6064 	! %o4 = saved %pstate
   6065 	! %o5 = saved primary ctx
   6066 
   6067 	! Need this for UP as well
   6068 	rdpr	%pstate, %o4
   6069 	andn	%o4, PSTATE_IE, %o3			! disable interrupts
   6070 	wrpr	%o3, 0, %pstate
   6071 
   6072 	!!
   6073 	!! Cheetahs do not support flushing the IMMU from secondary context
   6074 	!!
   6075 	rdpr	%tl, %o3
   6076 	mov	CTX_PRIMARY, %o2
   6077 	brnz,pt	%o3, 1f
   6078 	 andn	%o0, 0xfff, %o0				! drop unused va bits
   6079 	wrpr	%g0, 1, %tl				! Make sure we're NUCLEUS
   6080 1:
   6081 	ldxa	[%o2] ASI_DMMU, %o5			! Save primary context
   6082 	membar	#LoadStore
   6083 	stxa	%o1, [%o2] ASI_DMMU			! Insert context to demap
   6084 	sethi	%hi(KERNBASE), %o1
   6085 	membar	#Sync
   6086 	or	%o0, DEMAP_PAGE_PRIMARY, %o0
   6087 	stxa	%o0, [%o0] ASI_DMMU_DEMAP		! Do the demap
   6088 	membar	#Sync
   6089 	stxa	%o0, [%o0] ASI_IMMU_DEMAP		! to both TLBs
   6090 	membar	#Sync
   6091 #ifdef TLB_FLUSH_LOWVA
   6092 	srl	%o0, 0, %o0				! and make sure it's both 32- and 64-bit entries
   6093 	stxa	%o0, [%o0] ASI_DMMU_DEMAP		! Do the demap
   6094 	membar	#Sync
   6095 	stxa	%o0, [%o0] ASI_IMMU_DEMAP		! Do the demap
   6096 	membar	#Sync
   6097 #endif
   6098 	flush	%o1
   6099 	stxa	%o5, [%o2] ASI_DMMU			! Restore primary context
   6100 	membar	#Sync
   6101 	brnz,pt	%o3, 1f
   6102 	 flush	%o1
   6103 	wrpr	%g0, %o3, %tl				! Return to kernel mode.
   6104 1:
   6105 	retl
   6106 	 wrpr	%o4, %pstate				! restore interrupts
   6107 
   6108 
   6109 /*
   6110  * sp_tlb_flush_all_us(void)
   6111  * sp_tlb_flush_all_usiii(void)
   6112  *
   6113  * Flush all user TLB entries from both IMMU and DMMU.
   6114  * We have both UltraSPARC I+II, and UltraSPARC >=III versions.
   6115  */
   6116 	.align 8
   6117 ENTRY(sp_tlb_flush_all_us)
   6118 	rdpr	%pstate, %o3
   6119 	andn	%o3, PSTATE_IE, %o4			! disable interrupts
   6120 	wrpr	%o4, 0, %pstate
   6121 	set	((TLB_SIZE_SPITFIRE-1) * 8), %o0
   6122 	set	CTX_SECONDARY, %o4
   6123 	ldxa	[%o4] ASI_DMMU, %o4			! save secondary context
   6124 	set	CTX_MASK, %o5
   6125 	membar	#Sync
   6126 
   6127 	! %o0 = loop counter
   6128 	! %o1 = ctx value
   6129 	! %o2 = TLB tag value
   6130 	! %o3 = saved %pstate
   6131 	! %o4 = saved primary ctx
   6132 	! %o5 = CTX_MASK
   6133 	! %xx = saved %tl
   6134 
   6135 0:
   6136 	ldxa	[%o0] ASI_DMMU_TLB_TAG, %o2		! fetch the TLB tag
   6137 	andcc	%o2, %o5, %o1				! context 0?
   6138 	bz,pt	%xcc, 1f				! if so, skip
   6139 	 mov	CTX_SECONDARY, %o2
   6140 
   6141 	stxa	%o1, [%o2] ASI_DMMU			! set the context
   6142 	set	DEMAP_CTX_SECONDARY, %o2
   6143 	membar	#Sync
   6144 	stxa	%o2, [%o2] ASI_DMMU_DEMAP		! do the demap
   6145 	membar	#Sync
   6146 
   6147 1:
   6148 	dec	8, %o0
   6149 	brgz,pt %o0, 0b					! loop over all entries
   6150 	 nop
   6151 
   6152 /*
   6153  * now do the IMMU
   6154  */
   6155 
   6156 	set	((TLB_SIZE_SPITFIRE-1) * 8), %o0
   6157 
   6158 0:
   6159 	ldxa	[%o0] ASI_IMMU_TLB_TAG, %o2		! fetch the TLB tag
   6160 	andcc	%o2, %o5, %o1				! context 0?
   6161 	bz,pt	%xcc, 1f				! if so, skip
   6162 	 mov	CTX_SECONDARY, %o2
   6163 
   6164 	stxa	%o1, [%o2] ASI_DMMU			! set the context
   6165 	set	DEMAP_CTX_SECONDARY, %o2
   6166 	membar	#Sync
   6167 	stxa	%o2, [%o2] ASI_IMMU_DEMAP		! do the demap
   6168 	membar	#Sync
   6169 
   6170 1:
   6171 	dec	8, %o0
   6172 	brgz,pt %o0, 0b					! loop over all entries
   6173 	 nop
   6174 
   6175 	set	CTX_SECONDARY, %o2
   6176 	stxa	%o4, [%o2] ASI_DMMU			! restore secondary ctx
   6177 	sethi	%hi(KERNBASE), %o4
   6178 	membar	#Sync
   6179 	flush	%o4
   6180 	retl
   6181 	 wrpr	%o3, %pstate
   6182 
   6183 	.align 8
   6184 ENTRY(sp_tlb_flush_all_usiii)
   6185 	rdpr	%tl, %o5
   6186 	brnz,pt	%o5, 1f
   6187 	 set	DEMAP_ALL, %o2
   6188 	wrpr	1, %tl
   6189 1:
   6190 	rdpr	%pstate, %o3
   6191 	andn	%o3, PSTATE_IE, %o4			! disable interrupts
   6192 	wrpr	%o4, 0, %pstate
   6193 
   6194 	stxa	%o2, [%o2] ASI_IMMU_DEMAP
   6195 	membar	#Sync
   6196 	stxa	%o2, [%o2] ASI_DMMU_DEMAP
   6197 
   6198 	sethi	%hi(KERNBASE), %o4
   6199 	membar	#Sync
   6200 	flush	%o4
   6201 
   6202 	wrpr	%o5, %tl
   6203 	retl
   6204 	 wrpr	%o3, %pstate
   6205 
   6206 /*
   6207  * sp_blast_dcache(int dcache_size, int dcache_line_size)
   6208  * sp_blast_dcache_disabled(int dcache_size, int dcache_line_size)
   6209  *
   6210  * Clear out all of D$ regardless of contents.  The latter one also
   6211  * disables the D$ while doing so.
   6212  */
   6213 	.align 8
   6214 ENTRY(sp_blast_dcache)
   6215 /*
   6216  * We turn off interrupts for the duration to prevent RED exceptions.
   6217  */
   6218 #ifdef PROF
   6219 	save	%sp, -CC64FSZ, %sp
   6220 #endif
   6221 
   6222 	rdpr	%pstate, %o3
   6223 	sub	%o0, %o1, %o0
   6224 	andn	%o3, PSTATE_IE, %o4			! Turn off PSTATE_IE bit
   6225 	wrpr	%o4, 0, %pstate
   6226 1:
   6227 	stxa	%g0, [%o0] ASI_DCACHE_TAG
   6228 	membar	#Sync
   6229 	brnz,pt	%o0, 1b
   6230 	 sub	%o0, %o1, %o0
   6231 
   6232 	sethi	%hi(KERNBASE), %o2
   6233 	flush	%o2
   6234 	membar	#Sync
   6235 #ifdef PROF
   6236 	wrpr	%o3, %pstate
   6237 	ret
   6238 	 restore
   6239 #else
   6240 	retl
   6241 	 wrpr	%o3, %pstate
   6242 #endif
   6243 
   6244 	.align 8
   6245 ENTRY(sp_blast_dcache_disabled)
   6246 /*
   6247  * We turn off interrupts for the duration to prevent RED exceptions.
   6248  */
   6249 #ifdef PROF
   6250 	save	%sp, -CC64FSZ, %sp
   6251 #endif
   6252 
   6253 	rdpr	%pstate, %o3
   6254 	sub	%o0, %o1, %o0
   6255 	andn	%o3, PSTATE_IE, %o4			! Turn off PSTATE_IE bit
   6256 	wrpr	%o4, 0, %pstate
   6257 
   6258 	ldxa    [%g0] ASI_MCCR, %o5
   6259 	andn	%o5, MCCR_DCACHE_EN, %o4		! Turn off the D$
   6260 	stxa	%o4, [%g0] ASI_MCCR
   6261 	flush 	%g0
   6262 
   6263 1:
   6264 	stxa	%g0, [%o0] ASI_DCACHE_TAG
   6265 	membar	#Sync
   6266 	brnz,pt	%o0, 1b
   6267 	 sub	%o0, %o1, %o0
   6268 
   6269 	sethi	%hi(KERNBASE), %o2
   6270 	flush	%o2
   6271 	membar	#Sync
   6272 
   6273 	stxa	%o5, [%g0] ASI_MCCR			! Restore the D$
   6274 	flush 	%g0
   6275 #ifdef PROF
   6276 	wrpr	%o3, %pstate
   6277 	ret
   6278 	 restore
   6279 #else
   6280 	retl
   6281 	 wrpr	%o3, %pstate
   6282 #endif
   6283 
   6284 #ifdef MULTIPROCESSOR
   6285 /*
   6286  * void sparc64_ipi_blast_dcache(int dcache_size, int dcache_line_size)
   6287  *
   6288  * Clear out all of D$ regardless of contents
   6289  *
   6290  * On entry:
   6291  *	%g2 = dcache_size
   6292  *	%g3 = dcache_line_size
   6293  */
   6294 	.align 8
   6295 ENTRY(sparc64_ipi_blast_dcache)
   6296 	sub	%g2, %g3, %g2
   6297 1:
   6298 	stxa	%g0, [%g2] ASI_DCACHE_TAG
   6299 	membar	#Sync
   6300 	brnz,pt	%g2, 1b
   6301 	 sub	%g2, %g3, %g2
   6302 
   6303 	sethi	%hi(KERNBASE), %g5
   6304 	flush	%g5
   6305 	membar	#Sync
   6306 
   6307 	ba,a	ret_from_intr_vector
   6308 	 nop
   6309 #endif /* MULTIPROCESSOR */
   6310 
   6311 /*
   6312  * blast_icache_us()
   6313  * blast_icache_usiii()
   6314  *
   6315  * Clear out all of I$ regardless of contents
   6316  * Does not modify %o0
   6317  *
   6318  * We turn off interrupts for the duration to prevent RED exceptions.
   6319  * For the Cheetah version, we also have to to turn off the I$ during this as
   6320  * ASI_ICACHE_TAG accesses interfere with coherency.
   6321  */
   6322 	.align 8
   6323 ENTRY(blast_icache_us)
   6324 	rdpr	%pstate, %o3
   6325 	sethi	%hi(icache_size), %o1
   6326 	ld	[%o1 + %lo(icache_size)], %o1
   6327 	sethi	%hi(icache_line_size), %o2
   6328 	ld	[%o2 + %lo(icache_line_size)], %o2
   6329 	sub	%o1, %o2, %o1
   6330 	andn	%o3, PSTATE_IE, %o4			! Turn off PSTATE_IE bit
   6331 	wrpr	%o4, 0, %pstate
   6332 1:
   6333 	stxa	%g0, [%o1] ASI_ICACHE_TAG
   6334 	brnz,pt	%o1, 1b
   6335 	 sub	%o1, %o2, %o1
   6336 	sethi	%hi(KERNBASE), %o5
   6337 	flush	%o5
   6338 	membar	#Sync
   6339 	retl
   6340 	 wrpr	%o3, %pstate
   6341 
   6342 	.align 8
   6343 ENTRY(blast_icache_usiii)
   6344 	rdpr	%pstate, %o3
   6345 	sethi	%hi(icache_size), %o1
   6346 	ld	[%o1 + %lo(icache_size)], %o1
   6347 	sethi	%hi(icache_line_size), %o2
   6348 	ld	[%o2 + %lo(icache_line_size)], %o2
   6349 	sub	%o1, %o2, %o1
   6350 	andn	%o3, PSTATE_IE, %o4			! Turn off PSTATE_IE bit
   6351 	wrpr	%o4, 0, %pstate
   6352 	ldxa    [%g0] ASI_MCCR, %o5
   6353 	andn	%o5, MCCR_ICACHE_EN, %o4		! Turn off the I$
   6354 	stxa	%o4, [%g0] ASI_MCCR
   6355 	flush 	%g0
   6356 1:
   6357 	stxa	%g0, [%o1] ASI_ICACHE_TAG
   6358 	membar	#Sync
   6359 	brnz,pt	%o1, 1b
   6360 	 sub	%o1, %o2, %o1
   6361 	stxa	%o5, [%g0] ASI_MCCR			! Restore the I$
   6362 	flush 	%g0
   6363 	retl
   6364 	 wrpr	%o3, %pstate
   6365 
   6366 /*
   6367  * dcache_flush_page_us(paddr_t pa)
   6368  * dcache_flush_page_usiii(paddr_t pa)
   6369  *
   6370  * Clear one page from D$.
   6371  *
   6372  */
   6373 	.align 8
   6374 ENTRY(dcache_flush_page_us)
   6375 #ifndef _LP64
   6376 	COMBINE(%o0, %o1, %o0)
   6377 #endif
   6378 	mov	-1, %o1		! Generate mask for tag: bits [29..2]
   6379 	srlx	%o0, 13-2, %o2	! Tag is PA bits <40:13> in bits <29:2>
   6380 	clr	%o4
   6381 	srl	%o1, 2, %o1	! Now we have bits <29:0> set
   6382 	set	(2*NBPG), %o5
   6383 	ba,pt	%icc, 1f
   6384 	 andn	%o1, 3, %o1	! Now we have bits <29:2> set
   6385 
   6386 	.align 8
   6387 1:
   6388 	ldxa	[%o4] ASI_DCACHE_TAG, %o3
   6389 	mov	%o4, %o0
   6390 	deccc	32, %o5
   6391 	bl,pn	%icc, 2f
   6392 	 inc	32, %o4
   6393 
   6394 	xor	%o3, %o2, %o3
   6395 	andcc	%o3, %o1, %g0
   6396 	bne,pt	%xcc, 1b
   6397 	 membar	#LoadStore
   6398 
   6399 	stxa	%g0, [%o0] ASI_DCACHE_TAG
   6400 	ba,pt	%icc, 1b
   6401 	 membar	#StoreLoad
   6402 2:
   6403 
   6404 	sethi	%hi(KERNBASE), %o5
   6405 	flush	%o5
   6406 	retl
   6407 	 membar	#Sync
   6408 
   6409 	.align 8
   6410 ENTRY(dcache_flush_page_usiii)
   6411 #ifndef _LP64
   6412 	COMBINE(%o0, %o1, %o0)
   6413 #endif
   6414 	set	NBPG, %o1
   6415 	sethi	%hi(dcache_line_size), %o2
   6416 	add	%o0, %o1, %o1	! end address
   6417 	ld	[%o2 + %lo(dcache_line_size)], %o2
   6418 
   6419 1:
   6420 	stxa	%g0, [%o0] ASI_DCACHE_INVALIDATE
   6421 	add	%o0, %o2, %o0
   6422 	cmp	%o0, %o1
   6423 	bl,pt	%xcc, 1b
   6424 	 nop
   6425 
   6426 	sethi	%hi(KERNBASE), %o5
   6427 	flush	%o5
   6428 	retl
   6429 	 membar	#Sync
   6430 
   6431 /*
   6432  *	cache_flush_phys_us(paddr_t, psize_t, int);
   6433  *	cache_flush_phys_usiii(paddr_t, psize_t, int);
   6434  *
   6435  *	Clear a set of paddrs from the D$, I$ and if param3 is
   6436  *	non-zero, E$.  (E$ is not supported yet).
   6437  */
   6438 
   6439 	.align 8
   6440 ENTRY(cache_flush_phys_us)
   6441 #ifndef _LP64
   6442 	COMBINE(%o0, %o1, %o0)
   6443 	COMBINE(%o2, %o3, %o1)
   6444 	mov	%o4, %o2
   6445 #endif
   6446 #ifdef DEBUG
   6447 	tst	%o2		! Want to clear E$?
   6448 	tnz	1		! Error!
   6449 #endif
   6450 	add	%o0, %o1, %o1	! End PA
   6451 	dec	%o1
   6452 
   6453 	!!
   6454 	!! Both D$ and I$ tags match pa bits 42-13, but
   6455 	!! they are shifted different amounts.  So we'll
   6456 	!! generate a mask for bits 40-13.
   6457 	!!
   6458 
   6459 	mov	-1, %o2		! Generate mask for tag: bits [40..13]
   6460 	srl	%o2, 5, %o2	! 32-5 = [27..0]
   6461 	sllx	%o2, 13, %o2	! 27+13 = [40..13]
   6462 
   6463 	and	%o2, %o0, %o0	! Mask away uninteresting bits
   6464 	and	%o2, %o1, %o1	! (probably not necessary)
   6465 
   6466 	set	(2*NBPG), %o5
   6467 	clr	%o4
   6468 1:
   6469 	ldxa	[%o4] ASI_DCACHE_TAG, %o3
   6470 	sllx	%o3, 40-29, %o3	! Shift D$ tag into place
   6471 	and	%o3, %o2, %o3	! Mask out trash
   6472 
   6473 	cmp	%o0, %o3
   6474 	blt,pt	%xcc, 2f	! Too low
   6475 	 cmp	%o1, %o3
   6476 	bgt,pt	%xcc, 2f	! Too high
   6477 	 nop
   6478 
   6479 	membar	#LoadStore
   6480 	stxa	%g0, [%o4] ASI_DCACHE_TAG ! Just right
   6481 	membar	#Sync
   6482 2:
   6483 	ldda	[%o4] ASI_ICACHE_TAG, %g0	! Tag goes in %g1
   6484 	sllx	%g1, 40-35, %g1			! Shift I$ tag into place
   6485 	and	%g1, %o2, %g1			! Mask out trash
   6486 	cmp	%o0, %g1
   6487 	blt,pt	%xcc, 3f
   6488 	 cmp	%o1, %g1
   6489 	bgt,pt	%xcc, 3f
   6490 	 nop
   6491 	stxa	%g0, [%o4] ASI_ICACHE_TAG
   6492 3:
   6493 	membar	#StoreLoad
   6494 	dec	32, %o5
   6495 	brgz,pt	%o5, 1b
   6496 	 inc	32, %o4
   6497 
   6498 	sethi	%hi(KERNBASE), %o5
   6499 	flush	%o5
   6500 	retl
   6501 	 membar	#Sync
   6502 
   6503 	.align 8
   6504 ENTRY(cache_flush_phys_usiii)
   6505 #ifndef _LP64
   6506 	COMBINE(%o0, %o1, %o0)
   6507 	COMBINE(%o2, %o3, %o1)
   6508 	mov	%o4, %o2
   6509 #endif
   6510 #ifdef DEBUG
   6511 	tst	%o2		! Want to clear E$?
   6512 	tnz	1		! Error!
   6513 #endif
   6514 	add	%o0, %o1, %o1	! End PA
   6515 	sethi	%hi(dcache_line_size), %o3
   6516 	ld	[%o3 + %lo(dcache_line_size)], %o3
   6517 	sethi	%hi(KERNBASE), %o5
   6518 1:
   6519 	stxa	%g0, [%o0] ASI_DCACHE_INVALIDATE
   6520 	add	%o0, %o3, %o0
   6521 	cmp	%o0, %o1
   6522 	bl,pt	%xcc, 1b
   6523 	 nop
   6524 
   6525 	/* don't need to flush the I$ on cheetah */
   6526 
   6527 	flush	%o5
   6528 	retl
   6529 	 membar	#Sync
   6530 
   6531 #ifdef COMPAT_16
   6532 #ifdef _LP64
   6533 /*
   6534  * XXXXX Still needs lotsa cleanup after sendsig is complete and offsets are known
   6535  *
   6536  * The following code is copied to the top of the user stack when each
   6537  * process is exec'ed, and signals are `trampolined' off it.
   6538  *
   6539  * When this code is run, the stack looks like:
   6540  *	[%sp]			128 bytes to which registers can be dumped
   6541  *	[%sp + 128]		signal number (goes in %o0)
   6542  *	[%sp + 128 + 4]		signal code (goes in %o1)
   6543  *	[%sp + 128 + 8]		first word of saved state (sigcontext)
   6544  *	    .
   6545  *	    .
   6546  *	    .
   6547  *	[%sp + NNN]	last word of saved state
   6548  * (followed by previous stack contents or top of signal stack).
   6549  * The address of the function to call is in %g1; the old %g1 and %o0
   6550  * have already been saved in the sigcontext.  We are running in a clean
   6551  * window, all previous windows now being saved to the stack.
   6552  *
   6553  * Note that [%sp + 128 + 8] == %sp + 128 + 16.  The copy at %sp+128+8
   6554  * will eventually be removed, with a hole left in its place, if things
   6555  * work out.
   6556  */
   6557 ENTRY_NOPROFILE(sigcode)
   6558 	/*
   6559 	 * XXX  the `save' and `restore' below are unnecessary: should
   6560 	 *	replace with simple arithmetic on %sp
   6561 	 *
   6562 	 * Make room on the stack for 64 %f registers + %fsr.  This comes
   6563 	 * out to 64*4+8 or 264 bytes, but this must be aligned to a multiple
   6564 	 * of 64, or 320 bytes.
   6565 	 */
   6566 	save	%sp, -CC64FSZ - 320, %sp
   6567 	mov	%g2, %l2		! save globals in %l registers
   6568 	mov	%g3, %l3
   6569 	mov	%g4, %l4
   6570 	mov	%g5, %l5
   6571 	mov	%g6, %l6
   6572 	mov	%g7, %l7
   6573 	/*
   6574 	 * Saving the fpu registers is expensive, so do it iff it is
   6575 	 * enabled and dirty.
   6576 	 */
   6577 	rd	%fprs, %l0
   6578 	btst	FPRS_DL|FPRS_DU, %l0	! All clean?
   6579 	bz,pt	%icc, 2f
   6580 	 btst	FPRS_DL, %l0		! test dl
   6581 	bz,pt	%icc, 1f
   6582 	 btst	FPRS_DU, %l0		! test du
   6583 
   6584 	! fpu is enabled, oh well
   6585 	stx	%fsr, [%sp + CC64FSZ + BIAS + 0]
   6586 	add	%sp, BIAS+CC64FSZ+BLOCK_SIZE, %l0	! Generate a pointer so we can
   6587 	andn	%l0, BLOCK_ALIGN, %l0	! do a block store
   6588 	stda	%f0, [%l0] ASI_BLK_P
   6589 	inc	BLOCK_SIZE, %l0
   6590 	stda	%f16, [%l0] ASI_BLK_P
   6591 1:
   6592 	bz,pt	%icc, 2f
   6593 	 add	%sp, BIAS+CC64FSZ+BLOCK_SIZE, %l0	! Generate a pointer so we can
   6594 	andn	%l0, BLOCK_ALIGN, %l0	! do a block store
   6595 	add	%l0, 2*BLOCK_SIZE, %l0	! and skip what we already stored
   6596 	stda	%f32, [%l0] ASI_BLK_P
   6597 	inc	BLOCK_SIZE, %l0
   6598 	stda	%f48, [%l0] ASI_BLK_P
   6599 2:
   6600 	membar	#Sync
   6601 	rd	%fprs, %l0		! reload fprs copy, for checking after
   6602 	rd	%y, %l1			! in any case, save %y
   6603 	lduw	[%fp + BIAS + 128], %o0	! sig
   6604 	lduw	[%fp + BIAS + 128 + 4], %o1	! code
   6605 	call	%g1			! (*sa->sa_handler)(sig,code,scp)
   6606 	 add	%fp, BIAS + 128 + 8, %o2	! scp
   6607 	wr	%l1, %g0, %y		! in any case, restore %y
   6608 
   6609 	/*
   6610 	 * Now that the handler has returned, re-establish all the state
   6611 	 * we just saved above, then do a sigreturn.
   6612 	 */
   6613 	btst	FPRS_DL|FPRS_DU, %l0	! All clean?
   6614 	bz,pt	%icc, 2f
   6615 	 btst	FPRS_DL, %l0		! test dl
   6616 	bz,pt	%icc, 1f
   6617 	 btst	FPRS_DU, %l0		! test du
   6618 
   6619 	ldx	[%sp + CC64FSZ + BIAS + 0], %fsr
   6620 	add	%sp, BIAS+CC64FSZ+BLOCK_SIZE, %l0	! Generate a pointer so we can
   6621 	andn	%l0, BLOCK_ALIGN, %l0	! do a block load
   6622 	ldda	[%l0] ASI_BLK_P, %f0
   6623 	inc	BLOCK_SIZE, %l0
   6624 	ldda	[%l0] ASI_BLK_P, %f16
   6625 1:
   6626 	bz,pt	%icc, 2f
   6627 	 nop
   6628 	add	%sp, BIAS+CC64FSZ+BLOCK_SIZE, %l0	! Generate a pointer so we can
   6629 	andn	%l0, BLOCK_ALIGN, %l0	! do a block load
   6630 	inc	2*BLOCK_SIZE, %l0	! and skip what we already loaded
   6631 	ldda	[%l0] ASI_BLK_P, %f32
   6632 	inc	BLOCK_SIZE, %l0
   6633 	ldda	[%l0] ASI_BLK_P, %f48
   6634 2:
   6635 	mov	%l2, %g2
   6636 	mov	%l3, %g3
   6637 	mov	%l4, %g4
   6638 	mov	%l5, %g5
   6639 	mov	%l6, %g6
   6640 	mov	%l7, %g7
   6641 	membar	#Sync
   6642 
   6643 	restore	%g0, SYS_compat_16___sigreturn14, %g1 ! get registers back & set syscall #
   6644 	add	%sp, BIAS + 128 + 8, %o0! compute scp
   6645 !	andn	%o0, 0x0f, %o0
   6646 	t	ST_SYSCALL		! sigreturn(scp)
   6647 	! sigreturn does not return unless it fails
   6648 	mov	SYS_exit, %g1		! exit(errno)
   6649 	t	ST_SYSCALL
   6650 	/* NOTREACHED */
   6651 
   6652 	.globl	_C_LABEL(esigcode)
   6653 _C_LABEL(esigcode):
   6654 #endif
   6655 
   6656 #if !defined(_LP64)
   6657 
   6658 #define SIGCODE_NAME		sigcode
   6659 #define ESIGCODE_NAME		esigcode
   6660 #define SIGRETURN_NAME		SYS_compat_16___sigreturn14
   6661 #define EXIT_NAME		SYS_exit
   6662 
   6663 #include "sigcode32.s"
   6664 
   6665 #endif
   6666 #endif
   6667 
   6668 /*
   6669  * getfp() - get stack frame pointer
   6670  */
   6671 ENTRY(getfp)
   6672 	retl
   6673 	 mov %fp, %o0
   6674 
   6675 /*
   6676  * Call optional cpu_idle handler if provided
   6677  */
   6678 ENTRY(cpu_idle)
   6679 	set	CPUINFO_VA, %o0
   6680 	LDPTR	[%o0 + CI_IDLESPIN], %o1
   6681 	tst	%o1
   6682 	bz	1f
   6683 	 nop
   6684 	jmp	%o1
   6685 	 nop
   6686 1:
   6687 	retl
   6688 	nop
   6689 
   6690 /*
   6691  * cpu_switchto() switches to an lwp to run and runs it, saving the
   6692  * current one away.
   6693  *
   6694  * struct lwp * cpu_switchto(struct lwp *current, struct lwp *next)
   6695  * Switch to the specified next LWP
   6696  * Arguments:
   6697  *	i0	'struct lwp *' of the current LWP
   6698  *	i1	'struct lwp *' of the LWP to switch to
   6699  *	i2	'bool' of the flag returning to a softint LWP or not
   6700  * Returns:
   6701  *	the old lwp switched away from
   6702  */
   6703 ENTRY(cpu_switchto)
   6704 	save	%sp, -CC64FSZ, %sp
   6705 	/*
   6706 	 * REGISTER USAGE AT THIS POINT:
   6707 	 *	%l1 = newpcb
   6708 	 *	%l3 = new trapframe
   6709 	 *	%l4 = new l->l_proc
   6710 	 *	%l5 = pcb of oldlwp
   6711 	 *	%l6 = %hi(CPCB)
   6712 	 *	%l7 = %hi(CURLWP)
   6713 	 *	%i0 = oldlwp
   6714 	 *	%i1 = lwp
   6715 	 *	%i2 = returning
   6716 	 *	%o0 = tmp 1
   6717 	 *	%o1 = tmp 2
   6718 	 *	%o2 = tmp 3
   6719 	 *	%o3 = tmp 4
   6720 	 */
   6721 
   6722 	flushw				! save all register windows except this one
   6723 	wrpr	%g0, PSTATE_KERN, %pstate	! make sure we're on normal globals
   6724 						! with traps turned off
   6725 
   6726 	sethi	%hi(CPCB), %l6
   6727 
   6728 	rdpr	%pstate, %o1			! oldpstate = %pstate;
   6729 	LDPTR	[%i0 + L_PCB], %l5
   6730 
   6731 	stx	%i7, [%l5 + PCB_PC]
   6732 	stx	%i6, [%l5 + PCB_SP]
   6733 	sth	%o1, [%l5 + PCB_PSTATE]
   6734 
   6735 	rdpr	%cwp, %o2		! Useless
   6736 	stb	%o2, [%l5 + PCB_CWP]
   6737 
   6738 	sethi	%hi(CURLWP), %l7
   6739 
   6740 	LDPTR   [%i1 + L_PCB], %l1	! newpcb = l->l_pcb;
   6741 
   6742 	/*
   6743 	 * Load the new lwp.  To load, we must change stacks and
   6744 	 * alter cpcb and the window control registers, hence we must
   6745 	 * keep interrupts disabled.
   6746 	 *
   6747 	 * Issue barriers to coordinate mutex_exit on this CPU with
   6748 	 * mutex_vector_enter on another CPU.
   6749 	 *
   6750 	 * 1. Any prior mutex_exit by oldlwp must be visible to other
   6751 	 *    CPUs before we set ci_curlwp := newlwp on this one,
   6752 	 *    requiring a store-before-store barrier.
   6753 	 *
   6754 	 * 2. ci_curlwp := newlwp must be visible on all other CPUs
   6755 	 *    before any subsequent mutex_exit by newlwp can even test
   6756 	 *    whether there might be waiters, requiring a
   6757 	 *    store-before-load barrier.
   6758 	 *
   6759 	 * See kern_mutex.c for details -- this is necessary for
   6760 	 * adaptive mutexes to detect whether the lwp is on the CPU in
   6761 	 * order to safely block without requiring atomic r/m/w in
   6762 	 * mutex_exit.
   6763 	 */
   6764 
   6765 	membar	#StoreStore
   6766 	STPTR	%i1, [%l7 + %lo(CURLWP)]	! curlwp = l;
   6767 	membar	#StoreLoad
   6768 	STPTR	%l1, [%l6 + %lo(CPCB)]		! cpcb = newpcb;
   6769 
   6770 	ldx	[%l1 + PCB_SP], %i6
   6771 	ldx	[%l1 + PCB_PC], %i7
   6772 
   6773 	wrpr	%g0, 0, %otherwin	! These two insns should be redundant
   6774 	wrpr	%g0, 0, %canrestore
   6775 	GET_MAXCWP %o3
   6776 	wrpr	%g0, %o3, %cleanwin
   6777 	dec	1, %o3			! CANSAVE + CANRESTORE + OTHERWIN = MAXCWP - 1
   6778 	/* Skip the rest if returning to a interrupted LWP. */
   6779 	brnz,pn	%i2, Lsw_noras
   6780 	 wrpr	%o3, %cansave
   6781 
   6782 	/* finally, enable traps */
   6783 	wrpr	%g0, PSTATE_INTR, %pstate
   6784 
   6785 	!flushw
   6786 	!membar #Sync
   6787 
   6788 	/*
   6789 	 * Check for restartable atomic sequences (RAS)
   6790 	 */
   6791 	LDPTR	[%i1 + L_PROC], %l4		! now %l4 points to p
   6792 	mov	%l4, %o0		! p is first arg to ras_lookup
   6793 	LDPTR	[%o0 + P_RASLIST], %o1	! any RAS in p?
   6794 	brz,pt	%o1, Lsw_noras		! no, skip RAS check
   6795 	 LDPTR	[%i1 + L_TF], %l3	! pointer to trap frame
   6796 	call	_C_LABEL(ras_lookup)
   6797 	 ldx	[%l3 + TF_PC], %o1
   6798 	cmp	%o0, -1
   6799 	be,pt	CCCR, Lsw_noras
   6800 	 add	%o0, 4, %o1
   6801 	stx	%o0, [%l3 + TF_PC]	! store rewound %pc
   6802 	stx	%o1, [%l3 + TF_NPC]	! and %npc
   6803 
   6804 Lsw_noras:
   6805 
   6806 	/*
   6807 	 * We are resuming the process that was running at the
   6808 	 * call to switch().  Just set psr ipl and return.
   6809 	 */
   6810 !	wrpr	%g0, 0, %cleanwin	! DEBUG
   6811 	clr	%g4		! This needs to point to the base of the data segment
   6812 	wr	%g0, ASI_PRIMARY_NOFAULT, %asi		! Restore default ASI
   6813 	!wrpr	%g0, PSTATE_INTR, %pstate
   6814 	ret
   6815 	 restore %i0, %g0, %o0				! return old curlwp
   6816 
   6817 #ifdef __HAVE_FAST_SOFTINTS
   6818 /*
   6819  * Switch to the LWP assigned to handle interrupts from the given
   6820  * source.  We borrow the VM context from the interrupted LWP.
   6821  *
   6822  * int softint_fastintr(void *l)
   6823  *
   6824  * Arguments:
   6825  *	i0	softint lwp
   6826  */
   6827 ENTRY(softint_fastintr)
   6828 	save	%sp, -CC64FSZ, %sp
   6829 	set	CPUINFO_VA, %l0			! l0 = curcpu()
   6830 	rdpr	%pil, %l7			! l7 = splhigh()
   6831 	wrpr	%g0, PIL_HIGH, %pil
   6832 	LDPTR	[%l0 + CI_EINTSTACK], %l6	! l6 = ci_eintstack
   6833 	add	%sp, -CC64FSZ, %l2		! ci_eintstack = sp - CC64FSZ
   6834 	STPTR	%l2, [%l0 + CI_EINTSTACK]	! save intstack for nested intr
   6835 
   6836 	mov	%i0, %o0			! o0/i0 = softint lwp
   6837 	mov	%l7, %o1			! o1/i1 = ipl
   6838 	save	%sp, -CC64FSZ, %sp		! make one more register window
   6839 	flushw					! and save all
   6840 
   6841 	sethi	%hi(CURLWP), %l7
   6842 	sethi	%hi(CPCB), %l6
   6843 	LDPTR	[%l7 + %lo(CURLWP)], %l0	! l0 = interrupted lwp (curlwp)
   6844 
   6845 	/* save interrupted lwp/pcb info */
   6846 	sethi	%hi(softint_fastintr_ret - 8), %o0	! trampoline function
   6847 	LDPTR	[%l0 + L_PCB], %l5		! l5 = interrupted pcb
   6848 	or	%o0, %lo(softint_fastintr_ret - 8), %o0
   6849 	stx	%i6, [%l5 + PCB_SP]
   6850 	stx	%o0, [%l5 + PCB_PC]
   6851 	rdpr	%pstate, %o1
   6852 	rdpr	%cwp, %o2
   6853 	sth	%o1, [%l5 + PCB_PSTATE]
   6854 	stb	%o2, [%l5 + PCB_CWP]
   6855 
   6856 	/* switch to softint lwp */
   6857 	sethi	%hi(USPACE - TF_SIZE - CC64FSZ - STKB), %o3
   6858 	LDPTR	[%i0 + L_PCB], %l1		! l1 = softint pcb
   6859 	or	%o3, %lo(USPACE - TF_SIZE - CC64FSZ - STKB), %o3
   6860 	membar	#StoreStore		/* for mutex_enter; see cpu_switchto */
   6861 	STPTR	%i0, [%l7 + %lo(CURLWP)]
   6862 	/*
   6863 	 * No need for barrier after ci->ci_curlwp = softlwp -- when we
   6864 	 * enter a softint lwp, it can't be holding any mutexes, so it
   6865 	 * can't release any until after it has acquired them, so we
   6866 	 * need not participate in the protocol with mutex_vector_enter
   6867 	 * barriers here.
   6868 	 */
   6869 	add	%l1, %o3, %i6
   6870 	STPTR	%l1, [%l6 + %lo(CPCB)]
   6871 	stx	%i6, [%l1 + PCB_SP]
   6872 	add	%i6, -CC64FSZ, %sp		! new stack
   6873 
   6874 	/* now switched, then invoke MI dispatcher */
   6875 	mov	%i1, %o1
   6876 	call	_C_LABEL(softint_dispatch)
   6877 	 mov	%l0, %o0
   6878 
   6879 	/* switch back to interrupted lwp */
   6880 	ldx	[%l5 + PCB_SP], %i6
   6881 	membar	#StoreStore		/* for mutex_enter; see cpu_switchto */
   6882 	STPTR	%l0, [%l7 + %lo(CURLWP)]
   6883 	membar	#StoreLoad		/* for mutex_enter; see cpu_switchto */
   6884 	STPTR	%l5, [%l6 + %lo(CPCB)]
   6885 
   6886 	restore					! rewind register window
   6887 
   6888 	STPTR	%l6, [%l0 + CI_EINTSTACK]	! restore ci_eintstack
   6889 	wrpr	%g0, %l7, %pil			! restore ipl
   6890 	ret
   6891 	 restore	%g0, 1, %o0
   6892 
   6893 /*
   6894  * Trampoline function that gets returned to by cpu_switchto() when
   6895  * an interrupt handler blocks.
   6896  *
   6897  * Arguments:
   6898  *	o0	old lwp from cpu_switchto()
   6899  *
   6900  * from softint_fastintr():
   6901  *	l0	CPUINFO_VA
   6902  *	l6	saved ci_eintstack
   6903  *	l7	saved ipl
   6904  */
   6905 softint_fastintr_ret:
   6906 	/* re-adjust after mi_switch() */
   6907 	ld	[%l0 + CI_MTX_COUNT], %o1
   6908 	inc	%o1				! ci_mtx_count++
   6909 	st	%o1, [%l0 + CI_MTX_COUNT]
   6910 
   6911 	STPTR	%l6, [%l0 + CI_EINTSTACK]	! restore ci_eintstack
   6912 	wrpr	%g0, %l7, %pil			! restore ipl
   6913 	ret
   6914 	 restore	%g0, 1, %o0
   6915 
   6916 #endif /* __HAVE_FAST_SOFTINTS */
   6917 
   6918 /*
   6919  * Snapshot the current process so that stack frames are up to date.
   6920  * Only used just before a crash dump.
   6921  */
   6922 ENTRY(snapshot)
   6923 	rdpr	%pstate, %o1		! save psr
   6924 	stx	%o7, [%o0 + PCB_PC]	! save pc
   6925 	stx	%o6, [%o0 + PCB_SP]	! save sp
   6926 	rdpr	%pil, %o2
   6927 	sth	%o1, [%o0 + PCB_PSTATE]
   6928 	rdpr	%cwp, %o3
   6929 	stb	%o2, [%o0 + PCB_PIL]
   6930 	stb	%o3, [%o0 + PCB_CWP]
   6931 
   6932 	flushw
   6933 	save	%sp, -CC64FSZ, %sp
   6934 	flushw
   6935 	ret
   6936 	 restore
   6937 
   6938 /*
   6939  * cpu_lwp_fork() arranges for lwp_trampoline() to run when the
   6940  * nascent lwp is selected by switch().
   6941  *
   6942  * The switch frame will contain pointer to struct lwp of this lwp in
   6943  * %l2, a pointer to the function to call in %l0, and an argument to
   6944  * pass to it in %l1 (we abuse the callee-saved registers).
   6945  *
   6946  * We enter lwp_trampoline as if we are "returning" from
   6947  * cpu_switchto(), so %o0 contains previous lwp (the one we are
   6948  * switching from) that we pass to lwp_startup().
   6949  *
   6950  * If the function *(%l0) returns, we arrange for an immediate return
   6951  * to user mode.  This happens in two known cases: after execve(2) of
   6952  * init, and when returning a child to user mode after a fork(2).
   6953  *
   6954  * If were setting up a kernel thread, the function *(%l0) will not
   6955  * return.
   6956  */
   6957 ENTRY(lwp_trampoline)
   6958 	/*
   6959 	 * Note: cpu_lwp_fork() has set up a stack frame for us to run
   6960 	 * in, so we can call other functions from here without using
   6961 	 * `save ... restore'.
   6962 	 */
   6963 
   6964 	! newlwp in %l2, oldlwp in %o0
   6965 	call    lwp_startup
   6966 	 mov    %l2, %o1
   6967 
   6968 	call	%l0			! re-use current frame
   6969 	 mov	%l1, %o0
   6970 
   6971 	/*
   6972 	 * Here we finish up as in syscall, but simplified.
   6973 	 */
   6974 	b	return_from_trap
   6975 	 nop
   6976 
   6977 /*
   6978  * pmap_zero_page_phys(pa)
   6979  *
   6980  * Zero one page physically addressed
   6981  *
   6982  * Block load/store ASIs do not exist for physical addresses,
   6983  * so we won't use them.
   6984  *
   6985  * We will execute a flush at the end to sync the I$.
   6986  *
   6987  * This version expects to have the dcache_flush_page_all(pa)
   6988  * to have been called before calling into here.
   6989  */
   6990 ENTRY(pmap_zero_page_phys)
   6991 #ifndef _LP64
   6992 	COMBINE(%o0, %o1, %o0)
   6993 #endif
   6994 #ifdef DEBUG
   6995 	set	pmapdebug, %o4
   6996 	ld	[%o4], %o4
   6997 	btst	0x80, %o4	! PDB_COPY
   6998 	bz,pt	%icc, 3f
   6999 	 nop
   7000 	save	%sp, -CC64FSZ, %sp
   7001 	set	2f, %o0
   7002 	call	printf
   7003 	 mov	%i0, %o1
   7004 !	ta	1; nop
   7005 	restore
   7006 	.data
   7007 2:	.asciz	"pmap_zero_page(%p)\n"
   7008 	_ALIGN
   7009 	.text
   7010 3:
   7011 #endif
   7012 	set	NBPG, %o2		! Loop count
   7013 	wr	%g0, ASI_PHYS_CACHED, %asi
   7014 1:
   7015 	/* Unroll the loop 8 times */
   7016 	stxa	%g0, [%o0 + 0x00] %asi
   7017 	deccc	0x40, %o2
   7018 	stxa	%g0, [%o0 + 0x08] %asi
   7019 	stxa	%g0, [%o0 + 0x10] %asi
   7020 	stxa	%g0, [%o0 + 0x18] %asi
   7021 	stxa	%g0, [%o0 + 0x20] %asi
   7022 	stxa	%g0, [%o0 + 0x28] %asi
   7023 	stxa	%g0, [%o0 + 0x30] %asi
   7024 	stxa	%g0, [%o0 + 0x38] %asi
   7025 	bg,pt	%icc, 1b
   7026 	 inc	0x40, %o0
   7027 
   7028 	sethi	%hi(KERNBASE), %o3
   7029 	flush	%o3
   7030 	retl
   7031 	 wr	%g0, ASI_PRIMARY_NOFAULT, %asi	! Make C code happy
   7032 
   7033 /*
   7034  * pmap_copy_page_phys(paddr_t src, paddr_t dst)
   7035  *
   7036  * Copy one page physically addressed
   7037  * We need to use a global reg for ldxa/stxa
   7038  * so the top 32-bits cannot be lost if we take
   7039  * a trap and need to save our stack frame to a
   7040  * 32-bit stack.  We will unroll the loop by 4 to
   7041  * improve performance.
   7042  *
   7043  * This version expects to have the dcache_flush_page_all(pa)
   7044  * to have been called before calling into here.
   7045  *
   7046  */
   7047 ENTRY(pmap_copy_page_phys)
   7048 #ifndef _LP64
   7049 	COMBINE(%o0, %o1, %o0)
   7050 	COMBINE(%o2, %o3, %o1)
   7051 #endif
   7052 #ifdef DEBUG
   7053 	set	pmapdebug, %o4
   7054 	ld	[%o4], %o4
   7055 	btst	0x80, %o4	! PDB_COPY
   7056 	bz,pt	%icc, 3f
   7057 	 nop
   7058 	save	%sp, -CC64FSZ, %sp
   7059 	mov	%i0, %o1
   7060 	set	2f, %o0
   7061 	call	printf
   7062 	 mov	%i1, %o2
   7063 !	ta	1; nop
   7064 	restore
   7065 	.data
   7066 2:	.asciz	"pmap_copy_page(%p,%p)\n"
   7067 	_ALIGN
   7068 	.text
   7069 3:
   7070 #endif
   7071 #if 1
   7072 	set	NBPG, %o2
   7073 	wr	%g0, ASI_PHYS_CACHED, %asi
   7074 1:
   7075 	ldxa	[%o0 + 0x00] %asi, %g1
   7076 	ldxa	[%o0 + 0x08] %asi, %o3
   7077 	ldxa	[%o0 + 0x10] %asi, %o4
   7078 	ldxa	[%o0 + 0x18] %asi, %o5
   7079 	inc	0x20, %o0
   7080 	deccc	0x20, %o2
   7081 	stxa	%g1, [%o1 + 0x00] %asi
   7082 	stxa	%o3, [%o1 + 0x08] %asi
   7083 	stxa	%o4, [%o1 + 0x10] %asi
   7084 	stxa	%o5, [%o1 + 0x18] %asi
   7085 	bg,pt	%icc, 1b		! We don't care about pages >4GB
   7086 	 inc	0x20, %o1
   7087 	retl
   7088 	 wr	%g0, ASI_PRIMARY_NOFAULT, %asi
   7089 #else
   7090 	set	NBPG, %o3
   7091 	add	%o3, %o0, %o3
   7092 	mov	%g1, %o4		! Save g1
   7093 1:
   7094 	ldxa	[%o0] ASI_PHYS_CACHED, %g1
   7095 	inc	8, %o0
   7096 	cmp	%o0, %o3
   7097 	stxa	%g1, [%o1] ASI_PHYS_CACHED
   7098 	bl,pt	%icc, 1b		! We don't care about pages >4GB
   7099 	 inc	8, %o1
   7100 	retl
   7101 	 mov	%o4, %g1		! Restore g1
   7102 #endif
   7103 
   7104 /*
   7105  * extern int64_t pseg_get_real(struct pmap *pm, vaddr_t addr);
   7106  *
   7107  * Return TTE at addr in pmap.  Uses physical addressing only.
   7108  * pmap->pm_physaddr must by the physical address of pm_segs
   7109  *
   7110  */
   7111 ENTRY(pseg_get_real)
   7112 !	flushw			! Make sure we don't have stack probs & lose hibits of %o
   7113 #ifndef _LP64
   7114 	clruw	%o1					! Zero extend
   7115 #endif
   7116 	ldx	[%o0 + PM_PHYS], %o2			! pmap->pm_segs
   7117 
   7118 	srax	%o1, HOLESHIFT, %o3			! Check for valid address
   7119 	brz,pt	%o3, 0f					! Should be zero or -1
   7120 	 inc	%o3					! Make -1 -> 0
   7121 	brnz,pn	%o3, 1f					! Error! In hole!
   7122 0:
   7123 	srlx	%o1, STSHIFT, %o3
   7124 	and	%o3, STMASK, %o3			! Index into pm_segs
   7125 	sll	%o3, 3, %o3
   7126 	add	%o2, %o3, %o2
   7127 	DLFLUSH(%o2,%o3)
   7128 	ldxa	[%o2] ASI_PHYS_CACHED, %o2		! Load page directory pointer
   7129 	DLFLUSH2(%o3)
   7130 
   7131 	srlx	%o1, PDSHIFT, %o3
   7132 	and	%o3, PDMASK, %o3
   7133 	sll	%o3, 3, %o3
   7134 	brz,pn	%o2, 1f					! NULL entry? check somewhere else
   7135 	 add	%o2, %o3, %o2
   7136 	DLFLUSH(%o2,%o3)
   7137 	ldxa	[%o2] ASI_PHYS_CACHED, %o2		! Load page table pointer
   7138 	DLFLUSH2(%o3)
   7139 
   7140 	srlx	%o1, PTSHIFT, %o3			! Convert to ptab offset
   7141 	and	%o3, PTMASK, %o3
   7142 	sll	%o3, 3, %o3
   7143 	brz,pn	%o2, 1f					! NULL entry? check somewhere else
   7144 	 add	%o2, %o3, %o2
   7145 	DLFLUSH(%o2,%o3)
   7146 	ldxa	[%o2] ASI_PHYS_CACHED, %o0
   7147 	DLFLUSH2(%o3)
   7148 	brgez,pn %o0, 1f				! Entry invalid?  Punt
   7149 	 btst	1, %sp
   7150 	bz,pn	%icc, 0f				! 64-bit mode?
   7151 	 nop
   7152 	retl						! Yes, return full value
   7153 	 nop
   7154 0:
   7155 #if 1
   7156 	srl	%o0, 0, %o1
   7157 	retl						! No, generate a %o0:%o1 double
   7158 	 srlx	%o0, 32, %o0
   7159 #else
   7160 	DLFLUSH(%o2,%o3)
   7161 	ldda	[%o2] ASI_PHYS_CACHED, %o0
   7162 	DLFLUSH2(%o3)
   7163 	retl						! No, generate a %o0:%o1 double
   7164 	 nop
   7165 #endif
   7166 1:
   7167 #ifndef _LP64
   7168 	clr	%o1
   7169 #endif
   7170 	retl
   7171 	 clr	%o0
   7172 
   7173 /*
   7174  * In 32-bit mode:
   7175  *
   7176  * extern int pseg_set_real(struct pmap* %o0, vaddr_t addr %o1,
   7177  *			    int64_t tte %o2:%o3, paddr_t spare %o4:%o5);
   7178  *
   7179  * In 64-bit mode:
   7180  *
   7181  * extern int pseg_set_real(struct pmap* %o0, vaddr_t addr %o1,
   7182  *			    int64_t tte %o2, paddr_t spare %o3);
   7183  *
   7184  * Set a pseg entry to a particular TTE value.  Return values are:
   7185  *
   7186  *	-2	addr in hole
   7187  *	0	success	(spare was not used if given)
   7188  *	1	failure	(spare was not given, but one is needed)
   7189  *	2	success	(spare was given, used for L2)
   7190  *	3	failure	(spare was given, used for L2, another is needed for L3)
   7191  *	4	success	(spare was given, used for L3)
   7192  *
   7193  *	rv == 0	success, spare not used if one was given
   7194  *	rv & 4	spare was used for L3
   7195  *	rv & 2	spare was used for L2
   7196  *	rv & 1	failure, spare is needed
   7197  *
   7198  * (NB: nobody in pmap checks for the virtual hole, so the system will hang.)
   7199  * The way to call this is:  first just call it without a spare page.
   7200  * If that fails, allocate a page and try again, passing the paddr of the
   7201  * new page as the spare.
   7202  * If spare is non-zero it is assumed to be the address of a zeroed physical
   7203  * page that can be used to generate a directory table or page table if needed.
   7204  *
   7205  * We keep track of valid (A_TLB_V bit set) and wired (A_TLB_TSB_LOCK bit set)
   7206  * mappings that are set here. We check both bits on the new data entered
   7207  * and increment counts, as well as decrementing counts if the bits are set
   7208  * in the value replaced by this call.
   7209  * The counters are 32 bit or 64 bit wide, depending on the kernel type we are
   7210  * running!
   7211  */
   7212 ENTRY(pseg_set_real)
   7213 #ifndef _LP64
   7214 	clruw	%o1					! Zero extend
   7215 	COMBINE(%o2, %o3, %o2)
   7216 	COMBINE(%o4, %o5, %o3)
   7217 #endif
   7218 	!!
   7219 	!! However we managed to get here we now have:
   7220 	!!
   7221 	!! %o0 = *pmap
   7222 	!! %o1 = addr
   7223 	!! %o2 = tte
   7224 	!! %o3 = paddr of spare page
   7225 	!!
   7226 	srax	%o1, HOLESHIFT, %o4			! Check for valid address
   7227 	brz,pt	%o4, 0f					! Should be zero or -1
   7228 	 inc	%o4					! Make -1 -> 0
   7229 	brz,pt	%o4, 0f
   7230 	 nop
   7231 #ifdef DEBUG
   7232 	ta	1					! Break into debugger
   7233 #endif
   7234 	retl
   7235 	 mov -2, %o0					! Error -- in hole!
   7236 
   7237 0:
   7238 	ldx	[%o0 + PM_PHYS], %o4			! pmap->pm_segs
   7239 	clr	%g1
   7240 	srlx	%o1, STSHIFT, %o5
   7241 	and	%o5, STMASK, %o5
   7242 	sll	%o5, 3, %o5
   7243 	add	%o4, %o5, %o4
   7244 0:
   7245 	DLFLUSH(%o4,%g5)
   7246 	ldxa	[%o4] ASI_PHYS_CACHED, %o5		! Load page directory pointer
   7247 	DLFLUSH2(%g5)
   7248 
   7249 	brnz,a,pt %o5, 0f				! Null pointer?
   7250 	 mov	%o5, %o4
   7251 	brz,pn	%o3, 9f					! Have a spare?
   7252 	 mov	%o3, %o5
   7253 	casxa	[%o4] ASI_PHYS_CACHED, %g0, %o5
   7254 	brnz,pn	%o5, 0b					! Something changed?
   7255 	DLFLUSH(%o4, %o5)
   7256 	mov	%o3, %o4
   7257 	mov	2, %g1					! record spare used for L2
   7258 	clr	%o3					! and not available for L3
   7259 0:
   7260 	srlx	%o1, PDSHIFT, %o5
   7261 	and	%o5, PDMASK, %o5
   7262 	sll	%o5, 3, %o5
   7263 	add	%o4, %o5, %o4
   7264 0:
   7265 	DLFLUSH(%o4,%g5)
   7266 	ldxa	[%o4] ASI_PHYS_CACHED, %o5		! Load table directory pointer
   7267 	DLFLUSH2(%g5)
   7268 
   7269 	brnz,a,pt %o5, 0f				! Null pointer?
   7270 	 mov	%o5, %o4
   7271 	brz,pn	%o3, 9f					! Have a spare?
   7272 	 mov	%o3, %o5
   7273 	casxa	[%o4] ASI_PHYS_CACHED, %g0, %o5
   7274 	brnz,pn	%o5, 0b					! Something changed?
   7275 	DLFLUSH(%o4, %o4)
   7276 	mov	%o3, %o4
   7277 	mov	4, %g1					! record spare used for L3
   7278 0:
   7279 	srlx	%o1, PTSHIFT, %o5			! Convert to ptab offset
   7280 	and	%o5, PTMASK, %o5
   7281 	sll	%o5, 3, %o5
   7282 	add	%o5, %o4, %o4
   7283 
   7284 	DLFLUSH(%o4,%g5)
   7285 	ldxa	[%o4] ASI_PHYS_CACHED, %o5		! save old value in %o5
   7286 	stxa	%o2, [%o4] ASI_PHYS_CACHED		! Easier than shift+or
   7287 	DLFLUSH2(%g5)
   7288 
   7289 	!! at this point we have:
   7290 	!!  %g1 = return value
   7291 	!!  %o0 = struct pmap * (where the counts are)
   7292 	!!  %o2 = new TTE
   7293 	!!  %o5 = old TTE
   7294 
   7295 	!! see if stats needs an update
   7296 #ifdef SUN4V
   7297 	sethi	%hi(cputyp), %g5
   7298 	ld	[%g5 + %lo(cputyp)], %g5
   7299 	cmp	%g5, CPU_SUN4V
   7300 	bne,pt	%icc, 0f
   7301 	 nop
   7302 	sethi	%hh(SUN4V_TLB_TSB_LOCK), %g5
   7303 	sllx	%g5, 32, %g5
   7304 	ba	1f
   7305 	 nop
   7306 0:
   7307 #endif
   7308 	set	SUN4U_TLB_TSB_LOCK, %g5
   7309 1:
   7310 	xor	%o2, %o5, %o3			! %o3 - what changed
   7311 
   7312 	brgez,pn %o3, 5f			! has resident changed? (we predict it has)
   7313 	 btst	%g5, %o3			! has wired changed?
   7314 
   7315 	LDPTR	[%o0 + PM_RESIDENT], %o1	! gonna update resident count
   7316 	brlz	%o2, 0f
   7317 	 mov	1, %o4
   7318 	neg	%o4				! new is not resident -> decrement
   7319 0:	add	%o1, %o4, %o1
   7320 	STPTR	%o1, [%o0 + PM_RESIDENT]
   7321 	btst	%g5, %o3			! has wired changed?
   7322 5:	bz,pt	%xcc, 8f			! we predict it's not
   7323 	 btst	%g5, %o2			! don't waste delay slot, check if new one is wired
   7324 	LDPTR	[%o0 + PM_WIRED], %o1		! gonna update wired count
   7325 	bnz,pt	%xcc, 0f			! if wired changes, we predict it increments
   7326 	 mov	1, %o4
   7327 	neg	%o4				! new is not wired -> decrement
   7328 0:	add	%o1, %o4, %o1
   7329 	STPTR	%o1, [%o0 + PM_WIRED]
   7330 8:	retl
   7331 	 mov	%g1, %o0			! return %g1
   7332 
   7333 9:	retl
   7334 	 or	%g1, 1, %o0			! spare needed, return flags + 1
   7335 
   7336 
   7337 /*
   7338  * clearfpstate()
   7339  *
   7340  * Drops the current fpu state, without saving it.
   7341  */
   7342 ENTRY(clearfpstate)
   7343 	rdpr	%pstate, %o1		! enable FPU
   7344 	wr	%g0, FPRS_FEF, %fprs
   7345 	or	%o1, PSTATE_PEF, %o1
   7346 	retl
   7347 	 wrpr	%o1, 0, %pstate
   7348 
   7349 /*
   7350  * savefpstate(f) struct fpstate *f;
   7351  *
   7352  * Store the current FPU state.
   7353  *
   7354  * Since the kernel may need to use the FPU and we have problems atomically
   7355  * testing and enabling the FPU, we leave here with the FPRS_FEF bit set.
   7356  * Normally this should be turned on in loadfpstate().
   7357  */
   7358  /* XXXXXXXXXX  Assume caller created a proper stack frame */
   7359 ENTRY(savefpstate)
   7360 !	flushw			! Make sure we don't have stack probs & lose hibits of %o
   7361 	rdpr	%pstate, %o1		! enable FP before we begin
   7362 	rd	%fprs, %o5
   7363 	wr	%g0, FPRS_FEF, %fprs
   7364 	or	%o1, PSTATE_PEF, %o1
   7365 	wrpr	%o1, 0, %pstate
   7366 
   7367 	stx	%fsr, [%o0 + FS_FSR]	! f->fs_fsr = getfsr();
   7368 	rd	%gsr, %o4		! Save %gsr
   7369 	st	%o4, [%o0 + FS_GSR]
   7370 
   7371 	add	%o0, FS_REGS, %o2
   7372 #ifdef DIAGNOSTIC
   7373 	btst	BLOCK_ALIGN, %o2	! Needs to be re-executed
   7374 	bnz,pn	%icc, 6f		! Check alignment
   7375 #endif
   7376 	 st	%g0, [%o0 + FS_QSIZE]	! f->fs_qsize = 0;
   7377 	btst	FPRS_DL|FPRS_DU, %o5	! Both FPU halves clean?
   7378 	bz,pt	%icc, 5f		! Then skip it
   7379 
   7380 	 btst	FPRS_DL, %o5		! Lower FPU clean?
   7381 	membar	#Sync
   7382 	bz,a,pt	%icc, 1f		! Then skip it, but upper FPU not clean
   7383 	 add	%o2, 2*BLOCK_SIZE, %o2	! Skip a block
   7384 
   7385 	stda	%f0, [%o2] ASI_BLK_P	! f->fs_f0 = etc;
   7386 	inc	BLOCK_SIZE, %o2
   7387 	stda	%f16, [%o2] ASI_BLK_P
   7388 
   7389 	btst	FPRS_DU, %o5		! Upper FPU clean?
   7390 	bz,pt	%icc, 2f		! Then skip it
   7391 	 inc	BLOCK_SIZE, %o2
   7392 1:
   7393 	stda	%f32, [%o2] ASI_BLK_P
   7394 	inc	BLOCK_SIZE, %o2
   7395 	stda	%f48, [%o2] ASI_BLK_P
   7396 2:
   7397 	membar	#Sync			! Finish operation so we can
   7398 5:
   7399 	retl
   7400 	 wr	%g0, FPRS_FEF, %fprs	! Mark FPU clean
   7401 
   7402 #ifdef DIAGNOSTIC
   7403 	!!
   7404 	!! Damn thing is *NOT* aligned on a 64-byte boundary
   7405 	!!
   7406 6:
   7407 	wr	%g0, FPRS_FEF, %fprs
   7408 	! XXX -- we should panic instead of silently entering debugger
   7409 	ta	1
   7410 	retl
   7411 	 nop
   7412 #endif
   7413 
   7414 /*
   7415  * Load FPU state.
   7416  */
   7417  /* XXXXXXXXXX  Should test to see if we only need to do a partial restore */
   7418 ENTRY(loadfpstate)
   7419 	flushw			! Make sure we don't have stack probs & lose hibits of %o
   7420 	rdpr	%pstate, %o1		! enable FP before we begin
   7421 	ld	[%o0 + FS_GSR], %o4	! Restore %gsr
   7422 	set	PSTATE_PEF, %o2
   7423 	wr	%g0, FPRS_FEF, %fprs
   7424 	or	%o1, %o2, %o1
   7425 	wrpr	%o1, 0, %pstate
   7426 	ldx	[%o0 + FS_FSR], %fsr	! setfsr(f->fs_fsr);
   7427 	add	%o0, FS_REGS, %o3	! This is zero...
   7428 #ifdef DIAGNOSTIC
   7429 	btst	BLOCK_ALIGN, %o3
   7430 	bne,pn	%icc, 1f	! Only use block loads on aligned blocks
   7431 #endif
   7432 	 wr	%o4, %g0, %gsr
   7433 	membar	#Sync
   7434 	ldda	[%o3] ASI_BLK_P, %f0
   7435 	inc	BLOCK_SIZE, %o3
   7436 	ldda	[%o3] ASI_BLK_P, %f16
   7437 	inc	BLOCK_SIZE, %o3
   7438 	ldda	[%o3] ASI_BLK_P, %f32
   7439 	inc	BLOCK_SIZE, %o3
   7440 	ldda	[%o3] ASI_BLK_P, %f48
   7441 	membar	#Sync			! Make sure loads are complete
   7442 	retl
   7443 	 wr	%g0, FPRS_FEF, %fprs	! Clear dirty bits
   7444 
   7445 #ifdef DIAGNOSTIC
   7446 	!!
   7447 	!! Damn thing is *NOT* aligned on a 64-byte boundary
   7448 	!!
   7449 1:
   7450 	wr	%g0, FPRS_FEF, %fprs	! Clear dirty bits
   7451 	! XXX -- we should panic instead of silently entering debugger
   7452 	ta	1
   7453 	retl
   7454 	 nop
   7455 #endif
   7456 
   7457 /*
   7458  * ienab_bis(bis) int bis;
   7459  * ienab_bic(bic) int bic;
   7460  *
   7461  * Set and clear bits in the interrupt register.
   7462  */
   7463 
   7464 /*
   7465  * sun4u has separate asr's for clearing/setting the interrupt mask.
   7466  */
   7467 ENTRY(ienab_bis)
   7468 	retl
   7469 	 wr	%o0, 0, SET_SOFTINT	! SET_SOFTINT
   7470 
   7471 ENTRY(ienab_bic)
   7472 	retl
   7473 	 wr	%o0, 0, CLEAR_SOFTINT	! CLEAR_SOFTINT
   7474 
   7475 /*
   7476  * send_softint(cpu, level, intrhand)
   7477  *
   7478  * Send a softint with an intrhand pointer so we can cause a vectored
   7479  * interrupt instead of a polled interrupt.  This does pretty much the same
   7480  * as interrupt_vector.  If cpu is -1 then send it to this CPU, if it's -2
   7481  * send it to any CPU, otherwise send it to a particular CPU.
   7482  *
   7483  * XXXX Dispatching to different CPUs is not implemented yet.
   7484  */
   7485 ENTRY(send_softint)
   7486 	rdpr	%pstate, %g1
   7487 	andn	%g1, PSTATE_IE, %g2	! clear PSTATE.IE
   7488 	wrpr	%g2, 0, %pstate
   7489 
   7490 	sethi	%hi(CPUINFO_VA+CI_INTRPENDING), %o3
   7491 	LDPTR	[%o2 + IH_PEND], %o5
   7492 	or	%o3, %lo(CPUINFO_VA+CI_INTRPENDING), %o3
   7493 	brnz	%o5, 1f
   7494 	 sll	%o1, PTRSHFT, %o5	! Find start of table for this IPL
   7495 	add	%o3, %o5, %o3
   7496 2:
   7497 	LDPTR	[%o3], %o5		! Load list head
   7498 	STPTR	%o5, [%o2+IH_PEND]	! Link our intrhand node in
   7499 	mov	%o2, %o4
   7500 	CASPTRA	[%o3] ASI_N, %o5, %o4
   7501 	cmp	%o4, %o5		! Did it work?
   7502 	bne,pn	CCCR, 2b		! No, try again
   7503 	 .empty
   7504 
   7505 	mov	1, %o4			! Change from level to bitmask
   7506 	sllx	%o4, %o1, %o4
   7507 	wr	%o4, 0, SET_SOFTINT	! SET_SOFTINT
   7508 1:
   7509 	retl
   7510 	 wrpr	%g1, 0, %pstate		! restore PSTATE.IE
   7511 
   7512 
   7513 #define MICROPERSEC	(1000000)
   7514 
   7515 /*
   7516  * delay function
   7517  *
   7518  * void delay(N)  -- delay N microseconds
   7519  *
   7520  * Register usage: %o0 = "N" number of usecs to go (counts down to zero)
   7521  *		   %o1 = "timerblurb" (stays constant)
   7522  *		   %o2 = counter for 1 usec (counts down from %o1 to zero)
   7523  *
   7524  *
   7525  *	ci_cpu_clockrate should be tuned during CPU probe to the CPU
   7526  *	clockrate in Hz
   7527  *
   7528  */
   7529 ENTRY(delay)			! %o0 = n
   7530 #if 1
   7531 	rdpr	%tick, %o1					! Take timer snapshot
   7532 	sethi	%hi(CPUINFO_VA + CI_CLOCKRATE), %o2
   7533 	sethi	%hi(MICROPERSEC), %o3
   7534 	ldx	[%o2 + %lo(CPUINFO_VA + CI_CLOCKRATE + 8)], %o4	! Get scale factor
   7535 	brnz,pt	%o4, 0f
   7536 	 or	%o3, %lo(MICROPERSEC), %o3
   7537 
   7538 	!! Calculate ticks/usec
   7539 	ldx	[%o2 + %lo(CPUINFO_VA + CI_CLOCKRATE)], %o4	! No, we need to calculate it
   7540 	udivx	%o4, %o3, %o4
   7541 	stx	%o4, [%o2 + %lo(CPUINFO_VA + CI_CLOCKRATE + 8)]	! Save it so we don't need to divide again
   7542 0:
   7543 
   7544 	mulx	%o0, %o4, %o0					! Convert usec -> ticks
   7545 	rdpr	%tick, %o2					! Top of next itr
   7546 1:
   7547 	sub	%o2, %o1, %o3					! How many ticks have gone by?
   7548 	sub	%o0, %o3, %o4					! Decrement count by that much
   7549 	movrgz	%o3, %o4, %o0					! But only if we're decrementing
   7550 	mov	%o2, %o1					! Remember last tick
   7551 	brgz,pt	%o0, 1b						! Done?
   7552 	 rdpr	%tick, %o2					! Get new tick
   7553 
   7554 	retl
   7555 	 nop
   7556 #else
   7557 /* This code only works if %tick does not wrap */
   7558 	rdpr	%tick, %g1					! Take timer snapshot
   7559 	sethi	%hi(CPUINFO_VA + CI_CLOCKRATE), %g2
   7560 	sethi	%hi(MICROPERSEC), %o2
   7561 	ldx	[%g2 + %lo(CPUINFO_VA + CI_CLOCKRATE)], %g2	! Get scale factor
   7562 	or	%o2, %lo(MICROPERSEC), %o2
   7563 !	sethi	%hi(_C_LABEL(timerblurb), %o5			! This is if we plan to tune the clock
   7564 !	ld	[%o5 + %lo(_C_LABEL(timerblurb))], %o5		!  with respect to the counter/timer
   7565 	mulx	%o0, %g2, %g2					! Scale it: (usec * Hz) / 1 x 10^6 = ticks
   7566 	udivx	%g2, %o2, %g2
   7567 	add	%g1, %g2, %g2
   7568 !	add	%o5, %g2, %g2			5, %g2, %g2					! But this gets complicated
   7569 	rdpr	%tick, %g1					! Top of next itr
   7570 	mov	%g1, %g1	! Erratum 50
   7571 1:
   7572 	cmp	%g1, %g2
   7573 	bl,a,pn %xcc, 1b					! Done?
   7574 	 rdpr	%tick, %g1
   7575 
   7576 	retl
   7577 	 nop
   7578 #endif
   7579 	/*
   7580 	 * If something's wrong with the standard setup do this stupid loop
   7581 	 * calibrated for a 143MHz processor.
   7582 	 */
   7583 Lstupid_delay:
   7584 	set	142857143/MICROPERSEC, %o1
   7585 Lstupid_loop:
   7586 	brnz,pt	%o1, Lstupid_loop
   7587 	 dec	%o1
   7588 	brnz,pt	%o0, Lstupid_delay
   7589 	 dec	%o0
   7590 	retl
   7591 	 nop
   7592 
   7593 /*
   7594  * next_tick(long increment)
   7595  *
   7596  * Sets the %tick_cmpr register to fire off in `increment' machine
   7597  * cycles in the future.  Also handles %tick wraparound.  In 32-bit
   7598  * mode we're limited to a 32-bit increment.
   7599  */
   7600 ENTRY(next_tick)
   7601 	rd	TICK_CMPR, %o2
   7602 	rdpr	%tick, %o1
   7603 
   7604 	mov	1, %o3		! Mask off high bits of these registers
   7605 	sllx	%o3, 63, %o3
   7606 	andn	%o1, %o3, %o1
   7607 	andn	%o2, %o3, %o2
   7608 	cmp	%o1, %o2	! Did we wrap?  (tick < tick_cmpr)
   7609 	bgt,pt	%icc, 1f
   7610 	 add	%o1, 1000, %o1	! Need some slack so we don't lose intrs.
   7611 
   7612 	/*
   7613 	 * Handle the unlikely case of %tick wrapping.
   7614 	 *
   7615 	 * This should only happen every 10 years or more.
   7616 	 *
   7617 	 * We need to increment the time base by the size of %tick in
   7618 	 * microseconds.  This will require some divides and multiplies
   7619 	 * which can take time.  So we re-read %tick.
   7620 	 *
   7621 	 */
   7622 
   7623 	/* XXXXX NOT IMPLEMENTED */
   7624 
   7625 
   7626 
   7627 1:
   7628 	add	%o2, %o0, %o2
   7629 	andn	%o2, %o3, %o4
   7630 	brlz,pn	%o4, Ltick_ovflw
   7631 	 cmp	%o2, %o1	! Has this tick passed?
   7632 	blt,pn	%xcc, 1b	! Yes
   7633 	 nop
   7634 
   7635 #ifdef BB_ERRATA_1
   7636 	ba,a	2f
   7637 	 nop
   7638 #else
   7639 	retl
   7640 	 wr	%o2, TICK_CMPR
   7641 #endif
   7642 
   7643 Ltick_ovflw:
   7644 /*
   7645  * When we get here tick_cmpr has wrapped, but we don't know if %tick
   7646  * has wrapped.  If bit 62 is set then we have not wrapped and we can
   7647  * use the current value of %o4 as %tick.  Otherwise we need to return
   7648  * to our loop with %o4 as %tick_cmpr (%o2).
   7649  */
   7650 	srlx	%o3, 1, %o5
   7651 	btst	%o5, %o1
   7652 	bz,pn	%xcc, 1b
   7653 	 mov	%o4, %o2
   7654 #ifdef BB_ERRATA_1
   7655 	ba,a	2f
   7656 	 nop
   7657 	.align	64
   7658 2:	wr	%o2, TICK_CMPR
   7659 	rd	TICK_CMPR, %g0
   7660 	retl
   7661 	 nop
   7662 #else
   7663 	retl
   7664 	 wr	%o2, TICK_CMPR
   7665 #endif
   7666 
   7667 /*
   7668  * next_stick(long increment)
   7669  *
   7670  * Sets the %stick_cmpr register to fire off in `increment' machine
   7671  * cycles in the future.  Also handles %stick wraparound.  In 32-bit
   7672  * mode we're limited to a 32-bit increment.
   7673  */
   7674 ENTRY(next_stick)
   7675 	rd	STICK_CMPR, %o2
   7676 	rd	STICK, %o1
   7677 
   7678 	mov	1, %o3		! Mask off high bits of these registers
   7679 	sllx	%o3, 63, %o3
   7680 	andn	%o1, %o3, %o1
   7681 	andn	%o2, %o3, %o2
   7682 	cmp	%o1, %o2	! Did we wrap?  (stick < stick_cmpr)
   7683 	bgt,pt	%xcc, 1f
   7684 	 add	%o1, 1000, %o1	! Need some slack so we don't lose intrs.
   7685 
   7686 	/*
   7687 	 * Handle the unlikely case of %stick wrapping.
   7688 	 *
   7689 	 * This should only happen every 10 years or more.
   7690 	 *
   7691 	 * We need to increment the time base by the size of %stick in
   7692 	 * microseconds.  This will require some divides and multiplies
   7693 	 * which can take time.  So we re-read %stick.
   7694 	 *
   7695 	 */
   7696 
   7697 	/* XXXXX NOT IMPLEMENTED */
   7698 
   7699 
   7700 
   7701 1:
   7702 	add	%o2, %o0, %o2
   7703 	andn	%o2, %o3, %o4
   7704 	brlz,pn	%o4, Lstick_ovflw
   7705 	 cmp	%o2, %o1	! Has this stick passed?
   7706 	blt,pn	%xcc, 1b	! Yes
   7707 	 nop
   7708 	retl
   7709 	 wr	%o2, STICK_CMPR
   7710 
   7711 Lstick_ovflw:
   7712 /*
   7713  * When we get here tick_cmpr has wrapped, but we don't know if %stick
   7714  * has wrapped.  If bit 62 is set then we have not wrapped and we can
   7715  * use the current value of %o4 as %stick.  Otherwise we need to return
   7716  * to our loop with %o4 as %stick_cmpr (%o2).
   7717  */
   7718 	srlx	%o3, 1, %o5
   7719 	btst	%o5, %o1
   7720 	bz,pn	%xcc, 1b
   7721 	 mov	%o4, %o2
   7722 	retl
   7723 	 wr	%o2, STICK_CMPR
   7724 
   7725 /*
   7726  * next_stick_init()
   7727  *
   7728  * Sets the %stick_cmpr register to the value retrieved from %stick so
   7729  * next_stick() does not spend too much time in the function when called
   7730  * for the first time.
   7731  * This has been observed on (at least) a SPARC-T5 (sun4v) system where
   7732  * the %stick_cmpr ends up being less than the %stick value and then
   7733  * the stickitr() interrupt is never triggered.
   7734  */
   7735 ENTRY(next_stick_init)
   7736 	rd	STICK, %o0
   7737 	mov	1, %o1		! Mask off high bits of the register
   7738 	sllx	%o1, 63, %o1
   7739 	andn	%o0, %o1, %o0
   7740 	retl
   7741 	 wr	%o0, STICK_CMPR
   7742 
   7743 ENTRY(setjmp)
   7744 	save	%sp, -CC64FSZ, %sp	! Need a frame to return to.
   7745 	flushw
   7746 	stx	%fp, [%i0+0]	! 64-bit stack pointer
   7747 	stx	%i7, [%i0+8]	! 64-bit return pc
   7748 	ret
   7749 	 restore	%g0, 0, %o0
   7750 
   7751 	.data
   7752 Lpanic_ljmp:
   7753 	.asciz	"longjmp botch"
   7754 	_ALIGN
   7755 	.text
   7756 
   7757 ENTRY(longjmp)
   7758 	save	%sp, -CC64FSZ, %sp	! prepare to restore to (old) frame
   7759 	flushw
   7760 	mov	1, %i2
   7761 	ldx	[%i0+0], %fp	! get return stack
   7762 	ldx	[%i0+8], %i7	! get rpc
   7763 	ret
   7764 	 restore	%i2, 0, %o0
   7765 
   7766 #if defined(DDB) || defined(KGDB)
   7767 	/*
   7768 	 * Debug stuff.  Dump the trap registers into buffer & set tl=0.
   7769 	 *
   7770 	 *  %o0 = *ts
   7771 	 */
   7772 ENTRY(savetstate)
   7773 	mov	%o0, %o1
   7774 	rdpr	%tl, %o0
   7775 	brz	%o0, 2f
   7776 	 mov	%o0, %o2
   7777 1:
   7778 	rdpr	%tstate, %o3
   7779 	stx	%o3, [%o1]
   7780 	deccc	%o2
   7781 	inc	8, %o1
   7782 	rdpr	%tpc, %o4
   7783 	stx	%o4, [%o1]
   7784 	inc	8, %o1
   7785 	rdpr	%tnpc, %o5
   7786 	stx	%o5, [%o1]
   7787 	inc	8, %o1
   7788 	rdpr	%tt, %o4
   7789 	stx	%o4, [%o1]
   7790 	inc	8, %o1
   7791 	bnz	1b
   7792 	 wrpr	%o2, 0, %tl
   7793 2:
   7794 	retl
   7795 	 nop
   7796 
   7797 	/*
   7798 	 * Debug stuff.  Restore trap registers from buffer.
   7799 	 *
   7800 	 *  %o0 = %tl
   7801 	 *  %o1 = *ts
   7802 	 *
   7803 	 * Maybe this should be re-written to increment tl instead of decrementing.
   7804 	 */
   7805 ENTRY(restoretstate)
   7806 	flushw			! Make sure we don't have stack probs & lose hibits of %o
   7807 	brz,pn	%o0, 2f
   7808 	 mov	%o0, %o2
   7809 	wrpr	%o0, 0, %tl
   7810 1:
   7811 	ldx	[%o1], %o3
   7812 	deccc	%o2
   7813 	inc	8, %o1
   7814 	wrpr	%o3, 0, %tstate
   7815 	ldx	[%o1], %o4
   7816 	inc	8, %o1
   7817 	wrpr	%o4, 0, %tpc
   7818 	ldx	[%o1], %o5
   7819 	inc	8, %o1
   7820 	wrpr	%o5, 0, %tnpc
   7821 	ldx	[%o1], %o4
   7822 	inc	8, %o1
   7823 	wrpr	%o4, 0, %tt
   7824 	bnz	1b
   7825 	 wrpr	%o2, 0, %tl
   7826 2:
   7827 	retl
   7828 	 wrpr	%o0, 0, %tl
   7829 
   7830 	/*
   7831 	 * Switch to context in abs(%o0)
   7832 	 */
   7833 ENTRY(switchtoctx_us)
   7834 	set	DEMAP_CTX_SECONDARY, %o3
   7835 	stxa	%o3, [%o3] ASI_DMMU_DEMAP
   7836 	mov	CTX_SECONDARY, %o4
   7837 	stxa	%o3, [%o3] ASI_IMMU_DEMAP
   7838 	membar	#Sync
   7839 	stxa	%o0, [%o4] ASI_DMMU		! Maybe we should invalid
   7840 	sethi	%hi(KERNBASE), %o2
   7841 	membar	#Sync
   7842 	flush	%o2
   7843 	retl
   7844 	 nop
   7845 
   7846 ENTRY(switchtoctx_usiii)
   7847 	mov	CTX_SECONDARY, %o4
   7848 	ldxa	[%o4] ASI_DMMU, %o2		! Load secondary context
   7849 	mov	CTX_PRIMARY, %o5
   7850 	ldxa	[%o5] ASI_DMMU, %o1		! Save primary context
   7851 	membar	#LoadStore
   7852 	stxa	%o2, [%o5] ASI_DMMU		! Insert secondary for demap
   7853 	membar	#Sync
   7854 	set	DEMAP_CTX_PRIMARY, %o3
   7855 	stxa	%o3, [%o3] ASI_DMMU_DEMAP
   7856 	membar	#Sync
   7857 	stxa	%o0, [%o4] ASI_DMMU		! Maybe we should invalid
   7858 	membar	#Sync
   7859 	stxa	%o1, [%o5] ASI_DMMU		! Restore primary context
   7860 	sethi	%hi(KERNBASE), %o2
   7861 	membar	#Sync
   7862 	flush	%o2
   7863 	retl
   7864 	 nop
   7865 
   7866 #ifndef _LP64
   7867 	/*
   7868 	 * Convert to 32-bit stack then call OF_sym2val()
   7869 	 */
   7870 ENTRY(OF_sym2val32)
   7871 	save	%sp, -CC64FSZ, %sp
   7872 	btst	7, %i0
   7873 	bnz,pn	%icc, 1f
   7874 	 add	%sp, BIAS, %o1
   7875 	btst	1, %sp
   7876 	movnz	%icc, %o1, %sp
   7877 	call	_C_LABEL(OF_sym2val)
   7878 	 mov	%i0, %o0
   7879 1:
   7880 	ret
   7881 	 restore	%o0, 0, %o0
   7882 
   7883 	/*
   7884 	 * Convert to 32-bit stack then call OF_val2sym()
   7885 	 */
   7886 ENTRY(OF_val2sym32)
   7887 	save	%sp, -CC64FSZ, %sp
   7888 	btst	7, %i0
   7889 	bnz,pn	%icc, 1f
   7890 	 add	%sp, BIAS, %o1
   7891 	btst	1, %sp
   7892 	movnz	%icc, %o1, %sp
   7893 	call	_C_LABEL(OF_val2sym)
   7894 	 mov	%i0, %o0
   7895 1:
   7896 	ret
   7897 	 restore	%o0, 0, %o0
   7898 #endif /* _LP64 */
   7899 #endif /* DDB */
   7900 
   7901 
   7902 #if defined(MULTIPROCESSOR)
   7903 /*
   7904  * IPI target function to setup a C compatible environment and call a MI function.
   7905  *
   7906  * On entry:
   7907  *	We are on one of the alternate set of globals
   7908  *	%g2 = function to call
   7909  *	%g3 = single argument to called function
   7910  */
   7911 ENTRY(sparc64_ipi_ccall)
   7912 #ifdef TRAPS_USE_IG
   7913 	wrpr	%g0, PSTATE_KERN|PSTATE_IG, %pstate	! DEBUG
   7914 #endif
   7915 	TRAP_SETUP(-CC64FSZ-TF_SIZE)
   7916 
   7917 #ifdef DEBUG
   7918 	rdpr	%tt, %o1	! debug
   7919 	sth	%o1, [%sp + CC64FSZ + STKB + TF_TT]! debug
   7920 #endif
   7921 	mov	%g3, %o0			! save argument of function to call
   7922 	mov	%g2, %o5			! save function pointer
   7923 
   7924 	wrpr	%g0, PSTATE_KERN, %pstate	! Get back to normal globals
   7925 	stx	%g1, [%sp + CC64FSZ + STKB + TF_G + ( 1*8)]
   7926 	rdpr	%tpc, %o2			! (pc)
   7927 	stx	%g2, [%sp + CC64FSZ + STKB + TF_G + ( 2*8)]
   7928 	rdpr	%tstate, %g1
   7929 	stx	%g3, [%sp + CC64FSZ + STKB + TF_G + ( 3*8)]
   7930 	rdpr	%tnpc, %o3
   7931 	stx	%g4, [%sp + CC64FSZ + STKB + TF_G + ( 4*8)]
   7932 	rd	%y, %o4
   7933 	stx	%g5, [%sp + CC64FSZ + STKB + TF_G + ( 5*8)]
   7934 	stx	%g6, [%sp + CC64FSZ + STKB + TF_G + ( 6*8)]
   7935 	stx	%g7, [%sp + CC64FSZ + STKB + TF_G + ( 7*8)]
   7936 
   7937 	stx	%g1, [%sp + CC64FSZ + STKB + TF_TSTATE]
   7938 	stx	%o2, [%sp + CC64FSZ + STKB + TF_PC]
   7939 	stx	%o3, [%sp + CC64FSZ + STKB + TF_NPC]
   7940 	st	%o4, [%sp + CC64FSZ + STKB + TF_Y]
   7941 
   7942 	rdpr	%pil, %g5
   7943 	stb	%g5, [%sp + CC64FSZ + STKB + TF_PIL]
   7944 	stb	%g5, [%sp + CC64FSZ + STKB + TF_OLDPIL]
   7945 
   7946 	rdpr	%tl, %g7
   7947 	dec	%g7
   7948 	movrlz	%g7, %g0, %g7
   7949 	wrpr	%g0, %g7, %tl
   7950 	!! In the EMBEDANY memory model %g4 points to the start of the data segment.
   7951 	!! In our case we need to clear it before calling any C-code
   7952 	clr	%g4
   7953 	wr	%g0, ASI_NUCLEUS, %asi			! default kernel ASI
   7954 
   7955 	call %o5					! call function
   7956 	 nop
   7957 
   7958 	b	return_from_trap			! and return from IPI
   7959 	 ldx	[%sp + CC64FSZ + STKB + TF_TSTATE], %g1	! Load this for return_from_trap
   7960 
   7961 #endif
   7962 
   7963 ENTRY(paravirt_membar_sync)
   7964 	/*
   7965 	 * Store-before-load ordering with respect to matching logic
   7966 	 * on the hypervisor side.
   7967 	 *
   7968 	 * This is the same as membar_sync, but without patching or
   7969 	 * conditionalizing away the MEMBAR instruction on uniprocessor
   7970 	 * builds or boots -- because under virtualization, we still
   7971 	 * have to coordinate with a `device' backed by a hypervisor
   7972 	 * that is potentially on another physical CPU even if we
   7973 	 * observe only one virtual CPU as the guest.
   7974 	 *
   7975 	 * See common/lib/libc/arch/sparc64/atomic/membar_ops.S for why
   7976 	 * we avoid using the delay slot and keep this in sync with the
   7977 	 * implementation of membar_sync there.
   7978 	 */
   7979 	membar	#StoreLoad
   7980 	retl
   7981 	 nop
   7982 END(paravirt_membar_sync)
   7983 
   7984 	.data
   7985 	_ALIGN
   7986 #if NKSYMS || defined(DDB) || defined(MODULAR)
   7987 	.globl	_C_LABEL(esym)
   7988 _C_LABEL(esym):
   7989 	POINTER	0
   7990 	.globl	_C_LABEL(ssym)
   7991 _C_LABEL(ssym):
   7992 	POINTER	0
   7993 #endif
   7994 	.comm	_C_LABEL(promvec), PTRSZ
   7995 
   7996 #ifdef DEBUG
   7997 	.comm	_C_LABEL(trapdebug), 4
   7998 	.comm	_C_LABEL(pmapdebug), 4
   7999 #endif
   8000