Home | History | Annotate | Line # | Download | only in sun3x
locore.s revision 1.64.10.1
      1 /*	$NetBSD: locore.s,v 1.64.10.1 2014/05/18 17:45:27 rmind Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1988 University of Utah.
      5  * Copyright (c) 1980, 1990, 1993
      6  *	The Regents of the University of California.  All rights reserved.
      7  *
      8  * This code is derived from software contributed to Berkeley by
      9  * the Systems Programming Group of the University of Utah Computer
     10  * Science Department.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. Neither the name of the University nor the names of its contributors
     21  *    may be used to endorse or promote products derived from this software
     22  *    without specific prior written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34  * SUCH DAMAGE.
     35  *
     36  *	from: Utah $Hdr: locore.s 1.66 92/12/22$
     37  *	@(#)locore.s	8.6 (Berkeley) 5/27/94
     38  */
     39 
     40 #include "opt_compat_netbsd.h"
     41 #include "opt_compat_svr4.h"
     42 #include "opt_compat_sunos.h"
     43 #include "opt_kgdb.h"
     44 #include "opt_lockdebug.h"
     45 
     46 #include "assym.h"
     47 #include <machine/asm.h>
     48 #include <machine/trap.h>
     49 
     50 | Remember this is a fun project!
     51 
     52 	.data
     53 GLOBAL(mon_crp)
     54 	.long	0,0
     55 
     56 | This is for kvm_mkdb, and should be the address of the beginning
     57 | of the kernel text segment (not necessarily the same as kernbase).
     58 	.text
     59 GLOBAL(kernel_text)
     60 
     61 | This is the entry point, as well as the end of the temporary stack
     62 | used during process switch (one 8K page ending at start)
     63 ASGLOBAL(tmpstk)
     64 ASGLOBAL(start)
     65 
     66 | The first step, after disabling interrupts, is to map enough of the kernel
     67 | into high virtual address space so that we can use position dependent code.
     68 | This is a tricky task on the sun3x because the MMU is already enabled and
     69 | the ROM monitor provides no indication of where the root MMU table is mapped.
     70 | Therefore we must use one of the 68030's 'transparent translation' registers
     71 | to define a range in the address space where the MMU translation is
     72 | turned off.  Once this is complete we can modify the MMU table directly
     73 | without the need for it to be mapped into virtual memory.
     74 | All code must be position independent until otherwise noted, as the
     75 | boot loader has loaded us into low memory but all the symbols in this
     76 | code have been linked high.
     77 	movw	#PSL_HIGHIPL,%sr	| no interrupts
     78 	movl	#KERNBASE3X,%a5		| for vtop conversion
     79 	lea	_C_LABEL(mon_crp),%a0	| where to store the CRP
     80 	subl	%a5,%a0
     81 	| Note: borrowing mon_crp for tt0 setup...
     82 	movl	#0x3F8107,%a0@		| map the low 1GB v=p with the
     83 	.long	0xf0100800		| transparent translation reg0
     84 					| [ pmove a0@, tt0 ]
     85 | In order to map the kernel into high memory we will copy the root table
     86 | entry which maps the 16 megabytes of memory starting at 0x0 into the
     87 | entry which maps the 16 megabytes starting at KERNBASE.
     88 	pmove	%crp,%a0@		| Get monitor CPU root pointer
     89 	movl	%a0@(4),%a1		| 2nd word is PA of level A table
     90 
     91 	movl	%a1,%a0			| compute the descriptor address
     92 	addl	#0x3e0,%a1		| for VA starting at KERNBASE
     93 	movl	%a0@,%a1@		| copy descriptor type
     94 	movl	%a0@(4),%a1@(4)		| copy physical address
     95 
     96 | Kernel is now double mapped at zero and KERNBASE.
     97 | Force a long jump to the relocated code (high VA).
     98 	movl	#IC_CLEAR,%d0		| Flush the I-cache
     99 	movc	%d0,%cacr
    100 	jmp L_high_code:l		| long jump
    101 
    102 L_high_code:
    103 | We are now running in the correctly relocated kernel, so
    104 | we are no longer restricted to position-independent code.
    105 | It is handy to leave transparent translation enabled while
    106 | for the low 1GB while _bootstrap() is doing its thing.
    107 
    108 | Do bootstrap stuff needed before main() gets called.
    109 | Our boot loader leaves a copy of the kernel's exec header
    110 | just before the start of the kernel text segment, so the
    111 | kernel can sanity-check the DDB symbols at [end...esym].
    112 | Pass the struct exec at tmpstk-32 to _bootstrap().
    113 | Also, make sure the initial frame pointer is zero so that
    114 | the backtrace algorithm used by KGDB terminates nicely.
    115 	lea	_ASM_LABEL(tmpstk)-32,%sp
    116 	movl	#0,%a6
    117 	jsr	_C_LABEL(_bootstrap)	| See locore2.c
    118 
    119 | Now turn off the transparent translation of the low 1GB.
    120 | (this also flushes the ATC)
    121 	clrl	%sp@-
    122 	.long	0xf0170800		| pmove	sp@,tt0
    123 	addql	#4,%sp
    124 
    125 | Now that _bootstrap() is done using the PROM functions,
    126 | we can safely set the sfc/dfc to something != FC_CONTROL
    127 	moveq	#FC_USERD,%d0		| make movs access "user data"
    128 	movc	%d0,%sfc		| space for copyin/copyout
    129 	movc	%d0,%dfc
    130 
    131 | Setup process zero user/kernel stacks.
    132 	lea	_C_LABEL(lwp0),%a0	| get lwp0
    133 	movl	%a0@(L_PCB),%a1		| XXXuvm_lwp_getuarea
    134 	lea	%a1@(USPACE-4),%sp	| set SSP to last word
    135 	movl	#USRSTACK3X-4,%a2
    136 	movl	%a2,%usp		| init user SP
    137 
    138 | Note curpcb was already set in _bootstrap().
    139 | Will do fpu initialization during autoconfig (see fpu.c)
    140 | The interrupt vector table and stack are now ready.
    141 | Interrupts will be enabled later, AFTER  autoconfiguration
    142 | is finished, to avoid spurrious interrupts.
    143 
    144 /*
    145  * Create a fake exception frame so that cpu_lwp_fork() can copy it.
    146  * main() nevers returns; we exit to user mode from a forked process
    147  * later on.
    148  */
    149 	clrw	%sp@-			| tf_format,tf_vector
    150 	clrl	%sp@-			| tf_pc (filled in later)
    151 	movw	#PSL_USER,%sp@-		| tf_sr for user mode
    152 	clrl	%sp@-			| tf_stackadj
    153 	lea	%sp@(-64),%sp		| tf_regs[16]
    154 	movl	%a1,%a0@(L_MD_REGS)	| lwp0.l_md.md_regs = trapframe
    155 	jbsr	_C_LABEL(main)		| main(&trapframe)
    156 	PANIC("main() returned")
    157 
    158 | That is all the assembly startup code we need on the sun3x!
    159 | The rest of this is like the hp300/locore.s where possible.
    160 
    161 /*
    162  * Trap/interrupt vector routines
    163  */
    164 #include <m68k/m68k/trap_subr.s>
    165 
    166 GLOBAL(buserr)
    167 	tstl	_C_LABEL(nofault)	| device probe?
    168 	jeq	_C_LABEL(addrerr)	| no, handle as usual
    169 	movl	_C_LABEL(nofault),%sp@-	| yes,
    170 	jbsr	_C_LABEL(longjmp)	|  longjmp(nofault)
    171 GLOBAL(addrerr)
    172 	clrl	%sp@-			| stack adjust count
    173 	moveml	#0xFFFF,%sp@-		| save user registers
    174 	movl	%usp,%a0		| save the user SP
    175 	movl	%a0,%sp@(FR_SP)		|   in the savearea
    176 	lea	%sp@(FR_HW),%a1		| grab base of HW berr frame
    177 	moveq	#0,%d0
    178 	movw	%a1@(10),%d0		| grab SSW for fault processing
    179 	btst	#12,%d0			| RB set?
    180 	jeq	LbeX0			| no, test RC
    181 	bset	#14,%d0			| yes, must set FB
    182 	movw	%d0,%a1@(10)		| for hardware too
    183 LbeX0:
    184 	btst	#13,%d0			| RC set?
    185 	jeq	LbeX1			| no, skip
    186 	bset	#15,%d0			| yes, must set FC
    187 	movw	%d0,%a1@(10)		| for hardware too
    188 LbeX1:
    189 	btst	#8,%d0			| data fault?
    190 	jeq	Lbe0			| no, check for hard cases
    191 	movl	%a1@(16),%d1		| fault address is as given in frame
    192 	jra	Lbe10			| thats it
    193 Lbe0:
    194 	btst	#4,%a1@(6)		| long (type B) stack frame?
    195 	jne	Lbe4			| yes, go handle
    196 	movl	%a1@(2),%d1		| no, can use save PC
    197 	btst	#14,%d0			| FB set?
    198 	jeq	Lbe3			| no, try FC
    199 	addql	#4,%d1			| yes, adjust address
    200 	jra	Lbe10			| done
    201 Lbe3:
    202 	btst	#15,%d0			| FC set?
    203 	jeq	Lbe10			| no, done
    204 	addql	#2,%d1			| yes, adjust address
    205 	jra	Lbe10			| done
    206 Lbe4:
    207 	movl	%a1@(36),%d1		| long format, use stage B address
    208 	btst	#15,%d0			| FC set?
    209 	jeq	Lbe10			| no, all done
    210 	subql	#2,%d1			| yes, adjust address
    211 Lbe10:
    212 	movl	%d1,%sp@-		| push fault VA
    213 	movl	%d0,%sp@-		| and padded SSW
    214 	movw	%a1@(6),%d0		| get frame format/vector offset
    215 	andw	#0x0FFF,%d0		| clear out frame format
    216 	cmpw	#12,%d0			| address error vector?
    217 	jeq	Lisaerr			| yes, go to it
    218 
    219 /* MMU-specific code to determine reason for bus error. */
    220 	movl	%d1,%a0			| fault address
    221 	movl	%sp@,%d0		| function code from ssw
    222 	btst	#8,%d0			| data fault?
    223 	jne	Lbe10a
    224 	movql	#1,%d0			| user program access FC
    225 					| (we dont separate data/program)
    226 	btst	#5,%a1@			| supervisor mode?
    227 	jeq	Lbe10a			| if no, done
    228 	movql	#5,%d0			| else supervisor program access
    229 Lbe10a:
    230 	ptestr	%d0,%a0@,#7		| do a table search
    231 	pmove	%psr,%sp@		| save result
    232 	movb	%sp@,%d1
    233 	btst	#2,%d1			| invalid? (incl. limit viol and berr)
    234 	jeq	Lmightnotbemerr		| no -> wp check
    235 	btst	#7,%d1			| is it MMU table berr?
    236 	jeq	Lismerr			| no, must be fast
    237 	jra	Lisberr1		| real bus err needs not be fast
    238 Lmightnotbemerr:
    239 	btst	#3,%d1			| write protect bit set?
    240 	jeq	Lisberr1		| no, must be bus error
    241 	movl	%sp@,%d0		| ssw into low word of d0
    242 	andw	#0xc0,%d0		| write protect is set on page:
    243 	cmpw	#0x40,%d0		| was it read cycle?
    244 	jeq	Lisberr1		| yes, was not WPE, must be bus err
    245 /* End of MMU-specific bus error code. */
    246 
    247 Lismerr:
    248 	movl	#T_MMUFLT,%sp@-		| show that we are an MMU fault
    249 	jra	_ASM_LABEL(faultstkadj)	| and deal with it
    250 Lisaerr:
    251 	movl	#T_ADDRERR,%sp@-	| mark address error
    252 	jra	_ASM_LABEL(faultstkadj)	| and deal with it
    253 Lisberr1:
    254 	clrw	%sp@			| re-clear pad word
    255 Lisberr:
    256 	movl	#T_BUSERR,%sp@-		| mark bus error
    257 	jra	_ASM_LABEL(faultstkadj)	| and deal with it
    258 
    259 /*
    260  * FP exceptions.
    261  */
    262 GLOBAL(fpfline)
    263 	clrl	%sp@-			| stack adjust count
    264 	moveml	#0xFFFF,%sp@-		| save registers
    265 	moveq	#T_FPEMULI,%d0		| denote as FP emulation trap
    266 	jra	_ASM_LABEL(fault)	| do it
    267 
    268 GLOBAL(fpunsupp)
    269 	clrl	%sp@-			| stack adjust count
    270 	moveml	#0xFFFF,%sp@-		| save registers
    271 	moveq	#T_FPEMULD,%d0		| denote as FP emulation trap
    272 	jra	_ASM_LABEL(fault)	| do it
    273 
    274 /*
    275  * Handles all other FP coprocessor exceptions.
    276  * Note that since some FP exceptions generate mid-instruction frames
    277  * and may cause signal delivery, we need to test for stack adjustment
    278  * after the trap call.
    279  */
    280 GLOBAL(fpfault)
    281 	clrl	%sp@-		| stack adjust count
    282 	moveml	#0xFFFF,%sp@-	| save user registers
    283 	movl	%usp,%a0	| and save
    284 	movl	%a0,%sp@(FR_SP)	|   the user stack pointer
    285 	clrl	%sp@-		| no VA arg
    286 	movl	_C_LABEL(curpcb),%a0	| current pcb
    287 	lea	%a0@(PCB_FPCTX),%a0 | address of FP savearea
    288 	fsave	%a0@		| save state
    289 	tstb	%a0@		| null state frame?
    290 	jeq	Lfptnull	| yes, safe
    291 	clrw	%d0		| no, need to tweak BIU
    292 	movb	%a0@(1),%d0	| get frame size
    293 	bset	#3,%a0@(0,%d0:w) | set exc_pend bit of BIU
    294 Lfptnull:
    295 	fmovem	%fpsr,%sp@-	| push fpsr as code argument
    296 	frestore %a0@		| restore state
    297 	movl	#T_FPERR,%sp@-	| push type arg
    298 	jra	_ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
    299 
    300 /*
    301  * Other exceptions only cause four and six word stack frame and require
    302  * no post-trap stack adjustment.
    303  */
    304 GLOBAL(badtrap)
    305 	clrl	%sp@-			| stack adjust count
    306 	moveml	#0xFFFF,%sp@-		| save std frame regs
    307 	jbsr	_C_LABEL(straytrap)	| report
    308 	moveml	%sp@+,#0xFFFF		| restore regs
    309 	addql	#4,%sp			| stack adjust count
    310 	jra	_ASM_LABEL(rei)		| all done
    311 
    312 /*
    313  * Trap 0 is for system calls
    314  */
    315 GLOBAL(trap0)
    316 	clrl	%sp@-			| stack adjust count
    317 	moveml	#0xFFFF,%sp@-		| save user registers
    318 	movl	%usp,%a0		| save the user SP
    319 	movl	%a0,%sp@(FR_SP)		|   in the savearea
    320 	movl	%d0,%sp@-		| push syscall number
    321 	jbsr	_C_LABEL(syscall)	| handle it
    322 	addql	#4,%sp			| pop syscall arg
    323 	movl	%sp@(FR_SP),%a0		| grab and restore
    324 	movl	%a0,%usp		|   user SP
    325 	moveml	%sp@+,#0x7FFF		| restore most registers
    326 	addql	#8,%sp			| pop SP and stack adjust
    327 	jra	_ASM_LABEL(rei)		| all done
    328 
    329 /*
    330  * Trap 12 is the entry point for the cachectl "syscall"
    331  *	cachectl(command, addr, length)
    332  * command in d0, addr in a1, length in d1
    333  */
    334 GLOBAL(trap12)
    335 	movl	_C_LABEL(curlwp),%a0
    336 	movl	%a0@(L_PROC),%sp@-	| push curproc pointer
    337 	movl	%d1,%sp@-		| push length
    338 	movl	%a1,%sp@-		| push addr
    339 	movl	%d0,%sp@-		| push command
    340 	jbsr	_C_LABEL(cachectl1)	| do it
    341 	lea	%sp@(16),%sp		| pop args
    342 	jra	_ASM_LABEL(rei)		| all done
    343 
    344 /*
    345  * Trace (single-step) trap.  Kernel-mode is special.
    346  * User mode traps are simply passed on to trap().
    347  */
    348 GLOBAL(trace)
    349 	clrl	%sp@-			| stack adjust count
    350 	moveml	#0xFFFF,%sp@-
    351 	moveq	#T_TRACE,%d0
    352 
    353 	| Check PSW and see what happen.
    354 	|   T=0 S=0	(should not happen)
    355 	|   T=1 S=0	trace trap from user mode
    356 	|   T=0 S=1	trace trap on a trap instruction
    357 	|   T=1 S=1	trace trap from system mode (kernel breakpoint)
    358 
    359 	movw	%sp@(FR_HW),%d1		| get PSW
    360 	notw	%d1			| XXX no support for T0 on 680[234]0
    361 	andw	#PSL_TS,%d1		| from system mode (T=1, S=1)?
    362 	jeq	_ASM_LABEL(kbrkpt)	|  yes, kernel brkpt
    363 	jra	_ASM_LABEL(fault)	| no, user-mode fault
    364 
    365 /*
    366  * Trap 15 is used for:
    367  *	- GDB breakpoints (in user programs)
    368  *	- KGDB breakpoints (in the kernel)
    369  *	- trace traps for SUN binaries (not fully supported yet)
    370  * User mode traps are simply passed to trap().
    371  */
    372 GLOBAL(trap15)
    373 	clrl	%sp@-			| stack adjust count
    374 	moveml	#0xFFFF,%sp@-
    375 	moveq	#T_TRAP15,%d0
    376 	btst	#5,%sp@(FR_HW)		| was supervisor mode?
    377 	jne	_ASM_LABEL(kbrkpt)	|  yes, kernel brkpt
    378 	jra	_ASM_LABEL(fault)	| no, user-mode fault
    379 
    380 ASLOCAL(kbrkpt)
    381 	| Kernel-mode breakpoint or trace trap. (%d0=trap_type)
    382 	| Save the system sp rather than the user sp.
    383 	movw	#PSL_HIGHIPL,%sr	| lock out interrupts
    384 	lea	%sp@(FR_SIZE),%a6	| Save stack pointer
    385 	movl	%a6,%sp@(FR_SP)		|  from before trap
    386 
    387 	| If we are not on tmpstk switch to it.
    388 	| (so debugger can change the stack pointer)
    389 	movl	%a6,%d1
    390 	cmpl	#_ASM_LABEL(tmpstk),%d1
    391 	jls	Lbrkpt2 		| already on tmpstk
    392 	| Copy frame to the temporary stack
    393 	movl	%sp,%a0			| %a0=src
    394 	lea	_ASM_LABEL(tmpstk)-96,%a1 | %a1=dst
    395 	movl	%a1,%sp			| sp=new frame
    396 	moveq	#FR_SIZE,%d1
    397 Lbrkpt1:
    398 	movl	%a0@+,%a1@+
    399 	subql	#4,%d1
    400 	bgt	Lbrkpt1
    401 
    402 Lbrkpt2:
    403 	| Call the trap handler for the kernel debugger.
    404 	| Do not call trap() to handle it, so that we can
    405 	| set breakpoints in trap() if we want.  We know
    406 	| the trap type is either T_TRACE or T_BREAKPOINT.
    407 	movl	%d0,%sp@-		| push trap type
    408 	jbsr	_C_LABEL(trap_kdebug)
    409 	addql	#4,%sp			| pop args
    410 
    411 	| The stack pointer may have been modified, or
    412 	| data below it modified (by kgdb push call),
    413 	| so push the hardware frame at the current sp
    414 	| before restoring registers and returning.
    415 	movl	%sp@(FR_SP),%a0		| modified sp
    416 	lea	%sp@(FR_SIZE),%a1	| end of our frame
    417 	movl	%a1@-,%a0@-		| copy 2 longs with
    418 	movl	%a1@-,%a0@-		| ... predecrement
    419 	movl	%a0,%sp@(FR_SP)		| sp = h/w frame
    420 	moveml	%sp@+,#0x7FFF		| restore all but sp
    421 	movl	%sp@,%sp		| ... and sp
    422 	rte				| all done
    423 
    424 /* Use common m68k sigreturn */
    425 #include <m68k/m68k/sigreturn.s>
    426 
    427 /*
    428  * Interrupt handlers.  Most are auto-vectored,
    429  * and hard-wired the same way on all sun3 models.
    430  * Format in the stack is:
    431  *   %d0,%d1,%a0,%a1, sr, pc, vo
    432  */
    433 
    434 /*
    435  * This is the common auto-vector interrupt handler,
    436  * for which the CPU provides the vector=0x18+level.
    437  * These are installed in the interrupt vector table.
    438  */
    439 #ifdef __ELF__
    440 	.align	4
    441 #else
    442 	.align	2
    443 #endif
    444 GLOBAL(_isr_autovec)
    445 	INTERRUPT_SAVEREG
    446 	jbsr	_C_LABEL(isr_autovec)
    447 	INTERRUPT_RESTOREREG
    448 	jra	_ASM_LABEL(rei)
    449 
    450 /* clock: see clock.c */
    451 #ifdef __ELF__
    452 	.align	4
    453 #else
    454 	.align	2
    455 #endif
    456 GLOBAL(_isr_clock)
    457 	INTERRUPT_SAVEREG
    458 	jbsr	_C_LABEL(clock_intr)
    459 	INTERRUPT_RESTOREREG
    460 	jra	_ASM_LABEL(rei)
    461 
    462 | Handler for all vectored interrupts (i.e. VME interrupts)
    463 #ifdef __ELF__
    464 	.align	4
    465 #else
    466 	.align	2
    467 #endif
    468 GLOBAL(_isr_vectored)
    469 	INTERRUPT_SAVEREG
    470 	jbsr	_C_LABEL(isr_vectored)
    471 	INTERRUPT_RESTOREREG
    472 	jra	_ASM_LABEL(rei)
    473 
    474 #undef	INTERRUPT_SAVEREG
    475 #undef	INTERRUPT_RESTOREREG
    476 
    477 /* interrupt counters (needed by vmstat) */
    478 GLOBAL(intrnames)
    479 	.asciz	"spur"	| 0
    480 	.asciz	"lev1"	| 1
    481 	.asciz	"lev2"	| 2
    482 	.asciz	"lev3"	| 3
    483 	.asciz	"lev4"	| 4
    484 	.asciz	"clock"	| 5
    485 	.asciz	"lev6"	| 6
    486 	.asciz	"nmi"	| 7
    487 GLOBAL(eintrnames)
    488 
    489 	.data
    490 	.even
    491 GLOBAL(intrcnt)
    492 	.long	0,0,0,0,0,0,0,0,0,0
    493 GLOBAL(eintrcnt)
    494 	.text
    495 
    496 /*
    497  * Emulation of VAX REI instruction.
    498  *
    499  * This code is (mostly) un-altered from the hp300 code,
    500  * except that sun machines do not need a simulated SIR
    501  * because they have a real software interrupt register.
    502  *
    503  * This code deals with checking for and servicing ASTs
    504  * (profiling, scheduling) and software interrupts (network, softclock).
    505  * We check for ASTs first, just like the VAX.  To avoid excess overhead
    506  * the T_ASTFLT handling code will also check for software interrupts so we
    507  * do not have to do it here.  After identifying that we need an AST we
    508  * drop the IPL to allow device interrupts.
    509  *
    510  * This code is complicated by the fact that sendsig may have been called
    511  * necessitating a stack cleanup.
    512  */
    513 
    514 ASGLOBAL(rei)
    515 #ifdef	DIAGNOSTIC
    516 	tstl	_C_LABEL(panicstr)	| have we paniced?
    517 	jne	Ldorte			| yes, do not make matters worse
    518 #endif
    519 	tstl	_C_LABEL(astpending)	| AST pending?
    520 	jeq	Ldorte			| no, done
    521 Lrei1:
    522 	btst	#5,%sp@			| yes, are we returning to user mode?
    523 	jne	Ldorte			| no, done
    524 	movw	#PSL_LOWIPL,%sr		| lower SPL
    525 	clrl	%sp@-			| stack adjust
    526 	moveml	#0xFFFF,%sp@-		| save all registers
    527 	movl	%usp,%a1		| including
    528 	movl	%a1,%sp@(FR_SP)		|    the users SP
    529 	clrl	%sp@-			| VA == none
    530 	clrl	%sp@-			| code == none
    531 	movl	#T_ASTFLT,%sp@-		| type == async system trap
    532 	pea	%sp@(12)		| fp == address of trap frame
    533 	jbsr	_C_LABEL(trap)		| go handle it
    534 	lea	%sp@(16),%sp		| pop value args
    535 	movl	%sp@(FR_SP),%a0		| restore user SP
    536 	movl	%a0,%usp		|   from save area
    537 	movw	%sp@(FR_ADJ),%d0	| need to adjust stack?
    538 	jne	Laststkadj		| yes, go to it
    539 	moveml	%sp@+,#0x7FFF		| no, restore most user regs
    540 	addql	#8,%sp			| toss SP and stack adjust
    541 	rte				| and do real RTE
    542 Laststkadj:
    543 	lea	%sp@(FR_HW),%a1		| pointer to HW frame
    544 	addql	#8,%a1			| source pointer
    545 	movl	%a1,%a0			| source
    546 	addw	%d0,%a0			|  + hole size = dest pointer
    547 	movl	%a1@-,%a0@-		| copy
    548 	movl	%a1@-,%a0@-		|  8 bytes
    549 	movl	%a0,%sp@(FR_SP)		| new SSP
    550 	moveml	%sp@+,#0x7FFF		| restore user registers
    551 	movl	%sp@,%sp		| and our SP
    552 Ldorte:
    553 	rte				| real return
    554 
    555 /*
    556  * Initialization is at the beginning of this file, because the
    557  * kernel entry point needs to be at zero for compatibility with
    558  * the Sun boot loader.  This works on Sun machines because the
    559  * interrupt vector table for reset is NOT at address zero.
    560  * (The MMU has a "boot" bit that forces access to the PROM)
    561  */
    562 
    563 /*
    564  * Use common m68k sigcode.
    565  */
    566 #include <m68k/m68k/sigcode.s>
    567 #ifdef COMPAT_SUNOS
    568 #include <m68k/m68k/sunos_sigcode.s>
    569 #endif
    570 #ifdef COMPAT_SVR4
    571 #include <m68k/m68k/svr4_sigcode.s>
    572 #endif
    573 
    574 	.text
    575 
    576 /*
    577  * Primitives
    578  */
    579 
    580 /*
    581  * Use common m68k support routines.
    582  */
    583 #include <m68k/m68k/support.s>
    584 
    585 /*
    586  * Use common m68k process/lwp switch and context save subroutines.
    587  */
    588 #define FPCOPROC	/* XXX: Temp. Reqd. */
    589 #include <m68k/m68k/switch_subr.s>
    590 
    591 
    592 /* suline() */
    593 
    594 #ifdef DEBUG
    595 	.data
    596 ASGLOBAL(fulltflush)
    597 	.long	0
    598 ASGLOBAL(fullcflush)
    599 	.long	0
    600 	.text
    601 #endif
    602 
    603 ENTRY(ecacheon)
    604 	rts
    605 
    606 ENTRY(ecacheoff)
    607 	rts
    608 
    609 /*
    610  * Get callers current SP value.
    611  * Note that simply taking the address of a local variable in a C function
    612  * doesn't work because callee saved registers may be outside the stack frame
    613  * defined by A6 (e.g. GCC generated code).
    614  *
    615  * [I don't think the ENTRY() macro will do the right thing with this -- glass]
    616  */
    617 GLOBAL(getsp)
    618 	movl	%sp,%d0			| get current SP
    619 	addql	#4,%d0			| compensate for return address
    620 	movl	%d0,%a0
    621 	rts
    622 
    623 ENTRY(getvbr)
    624 	movc	%vbr,%d0
    625 	movl	%d0,%a0
    626 	rts
    627 
    628 ENTRY(setvbr)
    629 	movl	%sp@(4),%d0
    630 	movc	%d0,%vbr
    631 	rts
    632 
    633 /*
    634  * Load a new CPU Root Pointer (CRP) into the MMU.
    635  *	void	loadcrp(struct mmu_rootptr *);
    636  */
    637 ENTRY(loadcrp)
    638 	movl	%sp@(4),%a0		| arg1: &CRP
    639 	movl	#CACHE_CLR,%d0
    640 	movc	%d0,%cacr		| invalidate cache(s)
    641 	pflusha				| flush entire TLB
    642 	pmove	%a0@,%crp		| load new user root pointer
    643 	rts
    644 
    645 ENTRY(getcrp)
    646 	movl	%sp@(4),%a0		| arg1: &crp
    647 	pmove	%crp,%a0@		| *crpp = %crp
    648 	rts
    649 
    650 /*
    651  * Get the physical address of the PTE for a given VA.
    652  */
    653 ENTRY(ptest_addr)
    654 	movl	%sp@(4),%a1		| VA
    655 	ptestr	#5,%a1@,#7,%a0		| %a0 = addr of PTE
    656 	movl	%a0,%d0			| Result in %d0 (not a pointer return)
    657 	rts
    658 
    659 /*
    660  * Set processor priority level calls.  Most are implemented with
    661  * inline asm expansions.  However, we need one instantiation here
    662  * in case some non-optimized code makes external references.
    663  * Most places will use the inlined functions param.h supplies.
    664  */
    665 
    666 ENTRY(_getsr)
    667 	clrl	%d0
    668 	movw	%sr,%d0
    669 	movl	%a1,%d0
    670 	rts
    671 
    672 ENTRY(_spl)
    673 	clrl	%d0
    674 	movw	%sr,%d0
    675 	movl	%sp@(4),%d1
    676 	movw	%d1,%sr
    677 	rts
    678 
    679 ENTRY(_splraise)
    680 	clrl	%d0
    681 	movw	%sr,%d0
    682 	movl	%d0,%d1
    683 	andl	#PSL_HIGHIPL,%d1 	| old &= PSL_HIGHIPL
    684 	cmpl	%sp@(4),%d1		| (old - new)
    685 	bge	Lsplr
    686 	movl	%sp@(4),%d1
    687 	movw	%d1,%sr
    688 Lsplr:
    689 	rts
    690 
    691 /*
    692  * _delay(unsigned N)
    693  * Delay for at least (N/256) microseconds.
    694  * This routine depends on the variable:  delay_divisor
    695  * which should be set based on the CPU clock rate.
    696  * XXX: Currently this is set based on the CPU model,
    697  * XXX: but this should be determined at run time...
    698  */
    699 GLOBAL(_delay)
    700 	| %d0 = arg = (usecs << 8)
    701 	movl	%sp@(4),%d0
    702 	| %d1 = delay_divisor;
    703 	movl	_C_LABEL(delay_divisor),%d1
    704 	jra	L_delay			/* Jump into the loop! */
    705 
    706 	/*
    707 	 * Align the branch target of the loop to a half-line (8-byte)
    708 	 * boundary to minimize cache effects.  This guarantees both
    709 	 * that there will be no prefetch stalls due to cache line burst
    710 	 * operations and that the loop will run from a single cache
    711 	 * half-line.
    712 	 */
    713 #ifdef __ELF__
    714 	.align	8
    715 #else
    716 	.align	3
    717 #endif
    718 L_delay:
    719 	subl	%d1,%d0
    720 	jgt	L_delay
    721 	rts
    722 
    723 | Define some addresses, mostly so DDB can print useful info.
    724 | Not using _C_LABEL() here because these symbols are never
    725 | referenced by any C code, and if the leading underscore
    726 | ever goes away, these lines turn into syntax errors...
    727 	.set	_KERNBASE3X,KERNBASE3X
    728 	.set	_MONSTART,SUN3X_MONSTART
    729 	.set	_PROM_BASE,SUN3X_PROM_BASE
    730 	.set	_MONEND,SUN3X_MONEND
    731 
    732 |The end!
    733