Home | History | Annotate | Line # | Download | only in sun3x
locore.s revision 1.46
      1 /*	$NetBSD: locore.s,v 1.46 2001/05/12 01:11:50 kleink Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1988 University of Utah.
      5  * Copyright (c) 1980, 1990, 1993
      6  *	The Regents of the University of California.  All rights reserved.
      7  *
      8  * This code is derived from software contributed to Berkeley by
      9  * the Systems Programming Group of the University of Utah Computer
     10  * Science Department.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by the University of
     23  *	California, Berkeley and its contributors.
     24  * 4. Neither the name of the University nor the names of its contributors
     25  *    may be used to endorse or promote products derived from this software
     26  *    without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     38  * SUCH DAMAGE.
     39  *
     40  *	from: Utah $Hdr: locore.s 1.66 92/12/22$
     41  *	@(#)locore.s	8.6 (Berkeley) 5/27/94
     42  */
     43 
     44 #include "opt_compat_netbsd.h"
     45 #include "opt_compat_svr4.h"
     46 #include "opt_compat_sunos.h"
     47 #include "opt_lockdebug.h"
     48 
     49 #include "assym.h"
     50 #include <machine/asm.h>
     51 #include <machine/trap.h>
     52 
     53 | Remember this is a fun project!
     54 
     55 	.data
     56 GLOBAL(mon_crp)
     57 	.long	0,0
     58 
     59 | This is for kvm_mkdb, and should be the address of the beginning
     60 | of the kernel text segment (not necessarily the same as kernbase).
     61 	.text
     62 GLOBAL(kernel_text)
     63 
     64 | This is the entry point, as well as the end of the temporary stack
     65 | used during process switch (one 8K page ending at start)
     66 ASGLOBAL(tmpstk)
     67 ASGLOBAL(start)
     68 
     69 | The first step, after disabling interrupts, is to map enough of the kernel
     70 | into high virtual address space so that we can use position dependent code.
     71 | This is a tricky task on the sun3x because the MMU is already enabled and
     72 | the ROM monitor provides no indication of where the root MMU table is mapped.
     73 | Therefore we must use one of the 68030's 'transparent translation' registers
     74 | to define a range in the address space where the MMU translation is
     75 | turned off.  Once this is complete we can modify the MMU table directly
     76 | without the need for it to be mapped into virtual memory.
     77 | All code must be position independent until otherwise noted, as the
     78 | boot loader has loaded us into low memory but all the symbols in this
     79 | code have been linked high.
     80 	movw	#PSL_HIGHIPL,%sr	| no interrupts
     81 	movl	#KERNBASE,%a5		| for vtop conversion
     82 	lea	_C_LABEL(mon_crp),%a0	| where to store the CRP
     83 	subl	%a5,%a0
     84 	| Note: borrowing mon_crp for tt0 setup...
     85 	movl	#0x3F8107,%a0@		| map the low 1GB v=p with the
     86 	.long	0xf0100800		| transparent translation reg0
     87 					| [ pmove a0@, tt0 ]
     88 | In order to map the kernel into high memory we will copy the root table
     89 | entry which maps the 16 megabytes of memory starting at 0x0 into the
     90 | entry which maps the 16 megabytes starting at KERNBASE.
     91 	pmove	%crp,%a0@		| Get monitor CPU root pointer
     92 	movl	%a0@(4),%a1		| 2nd word is PA of level A table
     93 
     94 	movl	%a1,%a0			| compute the descriptor address
     95 	addl	#0x3e0,%a1		| for VA starting at KERNBASE
     96 	movl	%a0@,%a1@		| copy descriptor type
     97 	movl	%a0@(4),%a1@(4)		| copy physical address
     98 
     99 | Kernel is now double mapped at zero and KERNBASE.
    100 | Force a long jump to the relocated code (high VA).
    101 	movl	#IC_CLEAR,%d0		| Flush the I-cache
    102 	movc	%d0,%cacr
    103 	jmp L_high_code:l		| long jump
    104 
    105 L_high_code:
    106 | We are now running in the correctly relocated kernel, so
    107 | we are no longer restricted to position-independent code.
    108 | It is handy to leave transparent translation enabled while
    109 | for the low 1GB while _bootstrap() is doing its thing.
    110 
    111 | Do bootstrap stuff needed before main() gets called.
    112 | Our boot loader leaves a copy of the kernel's exec header
    113 | just before the start of the kernel text segment, so the
    114 | kernel can sanity-check the DDB symbols at [end...esym].
    115 | Pass the struct exec at tmpstk-32 to _bootstrap().
    116 | Also, make sure the initial frame pointer is zero so that
    117 | the backtrace algorithm used by KGDB terminates nicely.
    118 	lea	_ASM_LABEL(tmpstk)-32,%sp
    119 	movl	#0,%a6
    120 	jsr	_C_LABEL(_bootstrap)	| See locore2.c
    121 
    122 | Now turn off the transparent translation of the low 1GB.
    123 | (this also flushes the ATC)
    124 	clrl	%sp@-
    125 	.long	0xf0170800		| pmove	sp@,tt0
    126 	addql	#4,%sp
    127 
    128 | Now that _bootstrap() is done using the PROM functions,
    129 | we can safely set the sfc/dfc to something != FC_CONTROL
    130 	moveq	#FC_USERD,%d0		| make movs access "user data"
    131 	movc	%d0,%sfc		| space for copyin/copyout
    132 	movc	%d0,%dfc
    133 
    134 | Setup process zero user/kernel stacks.
    135 	movl	_C_LABEL(proc0paddr),%a1| get proc0 pcb addr
    136 	lea	%a1@(USPACE-4),%sp	| set SSP to last word
    137 	movl	#USRSTACK-4,%a2
    138 	movl	%a2,%usp		| init user SP
    139 
    140 | Note curpcb was already set in _bootstrap().
    141 | Will do fpu initialization during autoconfig (see fpu.c)
    142 | The interrupt vector table and stack are now ready.
    143 | Interrupts will be enabled later, AFTER  autoconfiguration
    144 | is finished, to avoid spurrious interrupts.
    145 
    146 /*
    147  * Final preparation for calling main.
    148  *
    149  * Create a fake exception frame that returns to user mode,
    150  * and save its address in p->p_md.md_regs for cpu_fork().
    151  * The new frames for process 1 and 2 will be adjusted by
    152  * cpu_set_kpc() to arrange for a call to a kernel function
    153  * before the new process does its rte out to user mode.
    154  */
    155 	clrw	%sp@-			| tf_format,tf_vector
    156 	clrl	%sp@-			| tf_pc (filled in later)
    157 	movw	#PSL_USER,%sp@-		| tf_sr for user mode
    158 	clrl	%sp@-			| tf_stackadj
    159 	lea	%sp@(-64),%sp		| tf_regs[16]
    160 	movl	%sp,%a1			| a1=trapframe
    161 	lea	_C_LABEL(proc0),%a0	| proc0.p_md.md_regs =
    162 	movl	%a1,%a0@(P_MDREGS)	|   trapframe
    163 	movl	%a2,%a1@(FR_SP)		| a2 == usp (from above)
    164 	pea	%a1@			| push &trapframe
    165 	jbsr	_C_LABEL(main)		| main(&trapframe)
    166 	addql	#4,%sp			| help DDB backtrace
    167 	trap	#15			| should not get here
    168 
    169 | This is used by cpu_fork() to return to user mode.
    170 | It is called with SP pointing to a struct trapframe.
    171 GLOBAL(proc_do_uret)
    172 	movl	%sp@(FR_SP),%a0		| grab and load
    173 	movl	%a0,%usp		|   user SP
    174 	moveml	%sp@+,#0x7FFF		| load most registers (all but SSP)
    175 	addql	#8,%sp			| pop SSP and stack adjust count
    176 	rte
    177 
    178 /*
    179  * proc_trampoline:
    180  * This is used by cpu_set_kpc() to "push" a function call onto the
    181  * kernel stack of some process, very much like a signal delivery.
    182  * When we get here, the stack has:
    183  *
    184  * SP+8:	switchframe from before cpu_set_kpc
    185  * SP+4:	void *arg;
    186  * SP:  	u_long func;
    187  *
    188  * On entry, the switchframe pushed by cpu_set_kpc has already been
    189  * popped off the stack, so all this needs to do is pop the function
    190  * pointer into a register, call it, then pop the arg, and finally
    191  * return using the switchframe that remains on the stack.
    192  */
    193 GLOBAL(proc_trampoline)
    194 	movl	%sp@+,%a0		| function pointer
    195 	jbsr	%a0@			| (*func)(arg)
    196 	addql	#4,%sp			| toss the arg
    197 	rts				| as cpu_switch would do
    198 
    199 | That is all the assembly startup code we need on the sun3x!
    200 | The rest of this is like the hp300/locore.s where possible.
    201 
    202 /*
    203  * Trap/interrupt vector routines
    204  */
    205 #include <m68k/m68k/trap_subr.s>
    206 
    207 GLOBAL(buserr)
    208 	tstl	_C_LABEL(nofault)	| device probe?
    209 	jeq	_C_LABEL(addrerr)	| no, handle as usual
    210 	movl	_C_LABEL(nofault),%sp@-	| yes,
    211 	jbsr	_C_LABEL(longjmp)	|  longjmp(nofault)
    212 GLOBAL(addrerr)
    213 	clrl	%sp@-			| stack adjust count
    214 	moveml	#0xFFFF,%sp@-		| save user registers
    215 	movl	%usp,%a0		| save the user SP
    216 	movl	%a0,%sp@(FR_SP)		|   in the savearea
    217 	lea	%sp@(FR_HW),%a1		| grab base of HW berr frame
    218 	moveq	#0,%d0
    219 	movw	%a1@(10),%d0		| grab SSW for fault processing
    220 	btst	#12,%d0			| RB set?
    221 	jeq	LbeX0			| no, test RC
    222 	bset	#14,%d0			| yes, must set FB
    223 	movw	%d0,%a1@(10)		| for hardware too
    224 LbeX0:
    225 	btst	#13,%d0			| RC set?
    226 	jeq	LbeX1			| no, skip
    227 	bset	#15,%d0			| yes, must set FC
    228 	movw	%d0,%a1@(10)		| for hardware too
    229 LbeX1:
    230 	btst	#8,%d0			| data fault?
    231 	jeq	Lbe0			| no, check for hard cases
    232 	movl	%a1@(16),%d1		| fault address is as given in frame
    233 	jra	Lbe10			| thats it
    234 Lbe0:
    235 	btst	#4,%a1@(6)		| long (type B) stack frame?
    236 	jne	Lbe4			| yes, go handle
    237 	movl	%a1@(2),%d1		| no, can use save PC
    238 	btst	#14,%d0			| FB set?
    239 	jeq	Lbe3			| no, try FC
    240 	addql	#4,%d1			| yes, adjust address
    241 	jra	Lbe10			| done
    242 Lbe3:
    243 	btst	#15,%d0			| FC set?
    244 	jeq	Lbe10			| no, done
    245 	addql	#2,%d1			| yes, adjust address
    246 	jra	Lbe10			| done
    247 Lbe4:
    248 	movl	%a1@(36),%d1		| long format, use stage B address
    249 	btst	#15,%d0			| FC set?
    250 	jeq	Lbe10			| no, all done
    251 	subql	#2,%d1			| yes, adjust address
    252 Lbe10:
    253 	movl	%d1,%sp@-		| push fault VA
    254 	movl	%d0,%sp@-		| and padded SSW
    255 	movw	%a1@(6),%d0		| get frame format/vector offset
    256 	andw	#0x0FFF,%d0		| clear out frame format
    257 	cmpw	#12,%d0			| address error vector?
    258 	jeq	Lisaerr			| yes, go to it
    259 
    260 /* MMU-specific code to determine reason for bus error. */
    261 	movl	%d1,%a0			| fault address
    262 	movl	%sp@,%d0		| function code from ssw
    263 	btst	#8,%d0			| data fault?
    264 	jne	Lbe10a
    265 	movql	#1,%d0			| user program access FC
    266 					| (we dont separate data/program)
    267 	btst	#5,%a1@			| supervisor mode?
    268 	jeq	Lbe10a			| if no, done
    269 	movql	#5,%d0			| else supervisor program access
    270 Lbe10a:
    271 	ptestr	%d0,%a0@,#7		| do a table search
    272 	pmove	%psr,%sp@		| save result
    273 	movb	%sp@,%d1
    274 	btst	#2,%d1			| invalid? (incl. limit viol and berr)
    275 	jeq	Lmightnotbemerr		| no -> wp check
    276 	btst	#7,%d1			| is it MMU table berr?
    277 	jeq	Lismerr			| no, must be fast
    278 	jra	Lisberr1		| real bus err needs not be fast
    279 Lmightnotbemerr:
    280 	btst	#3,%d1			| write protect bit set?
    281 	jeq	Lisberr1		| no, must be bus error
    282 	movl	%sp@,%d0		| ssw into low word of d0
    283 	andw	#0xc0,%d0		| write protect is set on page:
    284 	cmpw	#0x40,%d0		| was it read cycle?
    285 	jeq	Lisberr1		| yes, was not WPE, must be bus err
    286 /* End of MMU-specific bus error code. */
    287 
    288 Lismerr:
    289 	movl	#T_MMUFLT,%sp@-		| show that we are an MMU fault
    290 	jra	_ASM_LABEL(faultstkadj)	| and deal with it
    291 Lisaerr:
    292 	movl	#T_ADDRERR,%sp@-	| mark address error
    293 	jra	_ASM_LABEL(faultstkadj)	| and deal with it
    294 Lisberr1:
    295 	clrw	%sp@			| re-clear pad word
    296 Lisberr:
    297 	movl	#T_BUSERR,%sp@-		| mark bus error
    298 	jra	_ASM_LABEL(faultstkadj)	| and deal with it
    299 
    300 /*
    301  * FP exceptions.
    302  */
    303 GLOBAL(fpfline)
    304 	clrl	%sp@-			| stack adjust count
    305 	moveml	#0xFFFF,%sp@-		| save registers
    306 	moveq	#T_FPEMULI,%d0		| denote as FP emulation trap
    307 	jra	_ASM_LABEL(fault)	| do it
    308 
    309 GLOBAL(fpunsupp)
    310 	clrl	%sp@-			| stack adjust count
    311 	moveml	#0xFFFF,%sp@-		| save registers
    312 	moveq	#T_FPEMULD,%d0		| denote as FP emulation trap
    313 	jra	_ASM_LABEL(fault)	| do it
    314 
    315 /*
    316  * Handles all other FP coprocessor exceptions.
    317  * Note that since some FP exceptions generate mid-instruction frames
    318  * and may cause signal delivery, we need to test for stack adjustment
    319  * after the trap call.
    320  */
    321 GLOBAL(fpfault)
    322 	clrl	%sp@-		| stack adjust count
    323 	moveml	#0xFFFF,%sp@-	| save user registers
    324 	movl	%usp,%a0	| and save
    325 	movl	%a0,%sp@(FR_SP)	|   the user stack pointer
    326 	clrl	%sp@-		| no VA arg
    327 	movl	_C_LABEL(curpcb),%a0	| current pcb
    328 	lea	%a0@(PCB_FPCTX),%a0 | address of FP savearea
    329 	fsave	%a0@		| save state
    330 	tstb	%a0@		| null state frame?
    331 	jeq	Lfptnull	| yes, safe
    332 	clrw	%d0		| no, need to tweak BIU
    333 	movb	%a0@(1),%d0	| get frame size
    334 	bset	#3,%a0@(0,%d0:w) | set exc_pend bit of BIU
    335 Lfptnull:
    336 	fmovem	%fpsr,%sp@-	| push fpsr as code argument
    337 	frestore %a0@		| restore state
    338 	movl	#T_FPERR,%sp@-	| push type arg
    339 	jra	_ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
    340 
    341 /*
    342  * Other exceptions only cause four and six word stack frame and require
    343  * no post-trap stack adjustment.
    344  */
    345 GLOBAL(badtrap)
    346 	clrl	%sp@-			| stack adjust count
    347 	moveml	#0xFFFF,%sp@-		| save std frame regs
    348 	jbsr	_C_LABEL(straytrap)	| report
    349 	moveml	%sp@+,#0xFFFF		| restore regs
    350 	addql	#4,%sp			| stack adjust count
    351 	jra	_ASM_LABEL(rei)		| all done
    352 
    353 /*
    354  * Trap 0 is for system calls
    355  */
    356 GLOBAL(trap0)
    357 	clrl	%sp@-			| stack adjust count
    358 	moveml	#0xFFFF,%sp@-		| save user registers
    359 	movl	%usp,%a0		| save the user SP
    360 	movl	%a0,%sp@(FR_SP)		|   in the savearea
    361 	movl	%d0,%sp@-		| push syscall number
    362 	jbsr	_C_LABEL(syscall)	| handle it
    363 	addql	#4,%sp			| pop syscall arg
    364 	movl	%sp@(FR_SP),%a0		| grab and restore
    365 	movl	%a0,%usp		|   user SP
    366 	moveml	%sp@+,#0x7FFF		| restore most registers
    367 	addql	#8,%sp			| pop SP and stack adjust
    368 	jra	_ASM_LABEL(rei)		| all done
    369 
    370 /*
    371  * Trap 12 is the entry point for the cachectl "syscall"
    372  *	cachectl(command, addr, length)
    373  * command in d0, addr in a1, length in d1
    374  */
    375 GLOBAL(trap12)
    376 	movl	_C_LABEL(curproc),%sp@-	| push curproc pointer
    377 	movl	%d1,%sp@-		| push length
    378 	movl	%a1,%sp@-		| push addr
    379 	movl	%d0,%sp@-		| push command
    380 	jbsr	_C_LABEL(cachectl1)	| do it
    381 	lea	%sp@(16),%sp		| pop args
    382 	jra	_ASM_LABEL(rei)		| all done
    383 
    384 /*
    385  * Trace (single-step) trap.  Kernel-mode is special.
    386  * User mode traps are simply passed on to trap().
    387  */
    388 GLOBAL(trace)
    389 	clrl	%sp@-			| stack adjust count
    390 	moveml	#0xFFFF,%sp@-
    391 	moveq	#T_TRACE,%d0
    392 
    393 	| Check PSW and see what happen.
    394 	|   T=0 S=0	(should not happen)
    395 	|   T=1 S=0	trace trap from user mode
    396 	|   T=0 S=1	trace trap on a trap instruction
    397 	|   T=1 S=1	trace trap from system mode (kernel breakpoint)
    398 
    399 	movw	%sp@(FR_HW),%d1		| get PSW
    400 	notw	%d1			| XXX no support for T0 on 680[234]0
    401 	andw	#PSL_TS,%d1		| from system mode (T=1, S=1)?
    402 	jeq	_ASM_LABEL(kbrkpt)	|  yes, kernel brkpt
    403 	jra	_ASM_LABEL(fault)	| no, user-mode fault
    404 
    405 /*
    406  * Trap 15 is used for:
    407  *	- GDB breakpoints (in user programs)
    408  *	- KGDB breakpoints (in the kernel)
    409  *	- trace traps for SUN binaries (not fully supported yet)
    410  * User mode traps are simply passed to trap().
    411  */
    412 GLOBAL(trap15)
    413 	clrl	%sp@-			| stack adjust count
    414 	moveml	#0xFFFF,%sp@-
    415 	moveq	#T_TRAP15,%d0
    416 	btst	#5,%sp@(FR_HW)		| was supervisor mode?
    417 	jne	_ASM_LABEL(kbrkpt)	|  yes, kernel brkpt
    418 	jra	_ASM_LABEL(fault)	| no, user-mode fault
    419 
    420 ASLOCAL(kbrkpt)
    421 	| Kernel-mode breakpoint or trace trap. (%d0=trap_type)
    422 	| Save the system sp rather than the user sp.
    423 	movw	#PSL_HIGHIPL,%sr	| lock out interrupts
    424 	lea	%sp@(FR_SIZE),%a6	| Save stack pointer
    425 	movl	%a6,%sp@(FR_SP)		|  from before trap
    426 
    427 	| If we are not on tmpstk switch to it.
    428 	| (so debugger can change the stack pointer)
    429 	movl	%a6,%d1
    430 	cmpl	#_ASM_LABEL(tmpstk),%d1
    431 	jls	Lbrkpt2 		| already on tmpstk
    432 	| Copy frame to the temporary stack
    433 	movl	%sp,%a0			| %a0=src
    434 	lea	_ASM_LABEL(tmpstk)-96,%a1 | %a1=dst
    435 	movl	%a1,%sp			| sp=new frame
    436 	moveq	#FR_SIZE,%d1
    437 Lbrkpt1:
    438 	movl	%a0@+,%a1@+
    439 	subql	#4,%d1
    440 	bgt	Lbrkpt1
    441 
    442 Lbrkpt2:
    443 	| Call the trap handler for the kernel debugger.
    444 	| Do not call trap() to handle it, so that we can
    445 	| set breakpoints in trap() if we want.  We know
    446 	| the trap type is either T_TRACE or T_BREAKPOINT.
    447 	movl	%d0,%sp@-		| push trap type
    448 	jbsr	_C_LABEL(trap_kdebug)
    449 	addql	#4,%sp			| pop args
    450 
    451 	| The stack pointer may have been modified, or
    452 	| data below it modified (by kgdb push call),
    453 	| so push the hardware frame at the current sp
    454 	| before restoring registers and returning.
    455 	movl	%sp@(FR_SP),%a0		| modified sp
    456 	lea	%sp@(FR_SIZE),%a1	| end of our frame
    457 	movl	%a1@-,%a0@-		| copy 2 longs with
    458 	movl	%a1@-,%a0@-		| ... predecrement
    459 	movl	%a0,%sp@(FR_SP)		| sp = h/w frame
    460 	moveml	%sp@+,#0x7FFF		| restore all but sp
    461 	movl	%sp@,%sp		| ... and sp
    462 	rte				| all done
    463 
    464 /* Use common m68k sigreturn */
    465 #include <m68k/m68k/sigreturn.s>
    466 
    467 /*
    468  * Interrupt handlers.  Most are auto-vectored,
    469  * and hard-wired the same way on all sun3 models.
    470  * Format in the stack is:
    471  *   %d0,%d1,%a0,%a1, sr, pc, vo
    472  */
    473 
    474 #define INTERRUPT_SAVEREG \
    475 	moveml	#0xC0C0,%sp@-
    476 
    477 #define INTERRUPT_RESTORE \
    478 	moveml	%sp@+,#0x0303
    479 
    480 /*
    481  * This is the common auto-vector interrupt handler,
    482  * for which the CPU provides the vector=0x18+level.
    483  * These are installed in the interrupt vector table.
    484  */
    485 #ifdef __ELF__
    486 	.align	4
    487 #else
    488 	.align	2
    489 #endif
    490 GLOBAL(_isr_autovec)
    491 	INTERRUPT_SAVEREG
    492 	jbsr	_C_LABEL(isr_autovec)
    493 	INTERRUPT_RESTORE
    494 	jra	_ASM_LABEL(rei)
    495 
    496 /* clock: see clock.c */
    497 #ifdef __ELF__
    498 	.align	4
    499 #else
    500 	.align	2
    501 #endif
    502 GLOBAL(_isr_clock)
    503 	INTERRUPT_SAVEREG
    504 	jbsr	_C_LABEL(clock_intr)
    505 	INTERRUPT_RESTORE
    506 	jra	_ASM_LABEL(rei)
    507 
    508 | Handler for all vectored interrupts (i.e. VME interrupts)
    509 #ifdef __ELF__
    510 	.align	4
    511 #else
    512 	.align	2
    513 #endif
    514 GLOBAL(_isr_vectored)
    515 	INTERRUPT_SAVEREG
    516 	jbsr	_C_LABEL(isr_vectored)
    517 	INTERRUPT_RESTORE
    518 	jra	_ASM_LABEL(rei)
    519 
    520 #undef	INTERRUPT_SAVEREG
    521 #undef	INTERRUPT_RESTORE
    522 
    523 /* interrupt counters (needed by vmstat) */
    524 GLOBAL(intrnames)
    525 	.asciz	"spur"	| 0
    526 	.asciz	"lev1"	| 1
    527 	.asciz	"lev2"	| 2
    528 	.asciz	"lev3"	| 3
    529 	.asciz	"lev4"	| 4
    530 	.asciz	"clock"	| 5
    531 	.asciz	"lev6"	| 6
    532 	.asciz	"nmi"	| 7
    533 GLOBAL(eintrnames)
    534 
    535 	.data
    536 	.even
    537 GLOBAL(intrcnt)
    538 	.long	0,0,0,0,0,0,0,0,0,0
    539 GLOBAL(eintrcnt)
    540 	.text
    541 
    542 /*
    543  * Emulation of VAX REI instruction.
    544  *
    545  * This code is (mostly) un-altered from the hp300 code,
    546  * except that sun machines do not need a simulated SIR
    547  * because they have a real software interrupt register.
    548  *
    549  * This code deals with checking for and servicing ASTs
    550  * (profiling, scheduling) and software interrupts (network, softclock).
    551  * We check for ASTs first, just like the VAX.  To avoid excess overhead
    552  * the T_ASTFLT handling code will also check for software interrupts so we
    553  * do not have to do it here.  After identifying that we need an AST we
    554  * drop the IPL to allow device interrupts.
    555  *
    556  * This code is complicated by the fact that sendsig may have been called
    557  * necessitating a stack cleanup.
    558  */
    559 
    560 ASGLOBAL(rei)
    561 #ifdef	DIAGNOSTIC
    562 	tstl	_C_LABEL(panicstr)	| have we paniced?
    563 	jne	Ldorte			| yes, do not make matters worse
    564 #endif
    565 	tstl	_C_LABEL(astpending)	| AST pending?
    566 	jeq	Ldorte			| no, done
    567 Lrei1:
    568 	btst	#5,%sp@			| yes, are we returning to user mode?
    569 	jne	Ldorte			| no, done
    570 	movw	#PSL_LOWIPL,%sr		| lower SPL
    571 	clrl	%sp@-			| stack adjust
    572 	moveml	#0xFFFF,%sp@-		| save all registers
    573 	movl	%usp,%a1		| including
    574 	movl	%a1,%sp@(FR_SP)		|    the users SP
    575 	clrl	%sp@-			| VA == none
    576 	clrl	%sp@-			| code == none
    577 	movl	#T_ASTFLT,%sp@-		| type == async system trap
    578 	jbsr	_C_LABEL(trap)		| go handle it
    579 	lea	%sp@(12),%sp		| pop value args
    580 	movl	%sp@(FR_SP),%a0		| restore user SP
    581 	movl	%a0,%usp		|   from save area
    582 	movw	%sp@(FR_ADJ),%d0	| need to adjust stack?
    583 	jne	Laststkadj		| yes, go to it
    584 	moveml	%sp@+,#0x7FFF		| no, restore most user regs
    585 	addql	#8,%sp			| toss SP and stack adjust
    586 	rte				| and do real RTE
    587 Laststkadj:
    588 	lea	%sp@(FR_HW),%a1		| pointer to HW frame
    589 	addql	#8,%a1			| source pointer
    590 	movl	%a1,%a0			| source
    591 	addw	%d0,%a0			|  + hole size = dest pointer
    592 	movl	%a1@-,%a0@-		| copy
    593 	movl	%a1@-,%a0@-		|  8 bytes
    594 	movl	%a0,%sp@(FR_SP)		| new SSP
    595 	moveml	%sp@+,#0x7FFF		| restore user registers
    596 	movl	%sp@,%sp		| and our SP
    597 Ldorte:
    598 	rte				| real return
    599 
    600 /*
    601  * Initialization is at the beginning of this file, because the
    602  * kernel entry point needs to be at zero for compatibility with
    603  * the Sun boot loader.  This works on Sun machines because the
    604  * interrupt vector table for reset is NOT at address zero.
    605  * (The MMU has a "boot" bit that forces access to the PROM)
    606  */
    607 
    608 /*
    609  * Use common m68k sigcode.
    610  */
    611 #include <m68k/m68k/sigcode.s>
    612 #ifdef COMPAT_SUNOS
    613 #include <m68k/m68k/sunos_sigcode.s>
    614 #endif
    615 #ifdef COMPAT_SVR4
    616 #include <m68k/m68k/svr4_sigcode.s>
    617 #endif
    618 
    619 	.text
    620 
    621 /*
    622  * Primitives
    623  */
    624 
    625 /*
    626  * Use common m68k support routines.
    627  */
    628 #include <m68k/m68k/support.s>
    629 
    630 BSS(want_resched,4)
    631 
    632 /*
    633  * Use common m68k process manipulation routines.
    634  */
    635 #include <m68k/m68k/proc_subr.s>
    636 
    637 | Message for Lbadsw panic
    638 Lsw0:
    639 	.asciz	"cpu_switch"
    640 	.even
    641 
    642 	.data
    643 GLOBAL(masterpaddr)		| XXX compatibility (debuggers)
    644 GLOBAL(curpcb)
    645 	.long	0
    646 ASBSS(nullpcb,SIZEOF_PCB)
    647 	.text
    648 
    649 /*
    650  * At exit of a process, do a cpu_switch for the last time.
    651  * Switch to a safe stack and PCB, and select a new process to run.  The
    652  * old stack and u-area will be freed by the reaper.
    653  *
    654  * MUST BE CALLED AT SPLHIGH!
    655  */
    656 ENTRY(switch_exit)
    657 	movl	%sp@(4),%a0		| struct proc *p
    658 					| save state into garbage pcb
    659 	movl	#_ASM_LABEL(nullpcb),_C_LABEL(curpcb)
    660 	lea	_ASM_LABEL(tmpstk),%sp	| goto a tmp stack
    661 
    662 	/* Schedule the vmspace and stack to be freed. */
    663 	movl	%a0,%sp@-			| exit2(p)
    664 	jbsr	_C_LABEL(exit2)
    665 	lea	%sp@(4),%sp
    666 
    667 #if defined(LOCKDEBUG)
    668 	/* Acquire sched_lock */
    669 	jbsr	_C_LABEL(sched_lock_idle)
    670 #endif
    671 
    672 	jra	_C_LABEL(cpu_switch)
    673 
    674 /*
    675  * When no processes are on the runq, cpu_switch() branches to idle
    676  * to wait for something to come ready.
    677  */
    678 Lidle:
    679 #if defined(LOCKDEBUG)
    680 	/* Release sched_lock */
    681 	jbsr	_C_LABEL(sched_unlock_idle)
    682 #endif
    683 	stop	#PSL_LOWIPL
    684 GLOBAL(_Idle)				| See clock.c
    685 	movw	#PSL_HIGHIPL,%sr
    686 #if defined(LOCKDEBUG)
    687 	/* Acquire sched_lock */
    688 	jbsr	_C_LABEL(sched_lock_idle)
    689 #endif
    690 	movl	_C_LABEL(sched_whichqs),%d0
    691 	jeq	Lidle
    692 	jra	Lsw1
    693 
    694 Lbadsw:
    695 	movl	#Lsw0,%sp@-
    696 	jbsr	_C_LABEL(panic)
    697 	/*NOTREACHED*/
    698 
    699 /*
    700  * cpu_switch()
    701  * Hacked for sun3
    702  */
    703 ENTRY(cpu_switch)
    704 	movl	_C_LABEL(curpcb),%a1	| current pcb
    705 	movw	%sr,%a1@(PCB_PS)	| save sr before changing ipl
    706 #ifdef notyet
    707 	movl	_C_LABEL(curproc),%sp@-	| remember last proc running
    708 #endif
    709 	clrl	_C_LABEL(curproc)
    710 
    711 	/*
    712 	 * Find the highest-priority queue that isn't empty,
    713 	 * then take the first proc from that queue.
    714 	 */
    715 	movl	_C_LABEL(sched_whichqs),%d0
    716 	jeq	Lidle
    717 Lsw1:
    718 	/*
    719 	 * Interrupts are blocked, sched_lock is held.  If
    720 	 * we come here via Idle, %d0 contains the contents
    721 	 * of a non-zero sched_whichqs.
    722 	 */
    723 	movl	%d0,%d1
    724 	negl	%d0
    725 	andl	%d1,%d0
    726 	bfffo	%d0{#0:#32},%d1
    727 	eorib	#31,%d1
    728 
    729 	movl	%d1,%d0
    730 	lslb	#3,%d1			| convert queue number to index
    731 	addl	#_C_LABEL(sched_qs),%d1	| locate queue (q)
    732 	movl	%d1,%a1
    733 	movl	%a1@(P_FORW),%a0	| p = q->p_forw
    734 	cmpal	%d1,%a0			| anyone on queue?
    735 	jeq	Lbadsw			| no, panic
    736 #ifdef DIAGNOSTIC
    737 	tstl	%a0@(P_WCHAN)
    738 	jne	Lbadsw
    739 	cmpb	#SRUN,%a0@(P_STAT)
    740 	jne	Lbadsw
    741 #endif
    742 	movl	%a0@(P_FORW),%a1@(P_FORW)	| q->p_forw = p->p_forw
    743 	movl	%a0@(P_FORW),%a1		| n = p->p_forw
    744 	movl	%a0@(P_BACK),%a1@(P_BACK)	| n->p_back = q
    745 	cmpal	%d1,%a1			| anyone left on queue?
    746 	jne	Lsw2			| yes, skip
    747 	movl	_C_LABEL(sched_whichqs),%d1
    748 	bclr	%d0,%d1			| no, clear bit
    749 	movl	%d1,_C_LABEL(sched_whichqs)
    750 Lsw2:
    751 	/* p->p_cpu initialized in fork1() for single-processor */
    752 	movb	#SONPROC,%a0@(P_STAT)	| p->p_stat = SONPROC
    753 	movl	%a0,_C_LABEL(curproc)
    754 	clrl	_C_LABEL(want_resched)
    755 #ifdef notyet
    756 	movl	%sp@+,%a1		| XXX - Make this work!
    757 	cmpl	%a0,%a1			| switching to same proc?
    758 	jeq	Lswdone			| yes, skip save and restore
    759 #endif
    760 	/*
    761 	 * Save state of previous process in its pcb.
    762 	 */
    763 	movl	_C_LABEL(curpcb),%a1
    764 	moveml	#0xFCFC,%a1@(PCB_REGS)	| save non-scratch registers
    765 	movl	%usp,%a2		| grab USP (a2 has been saved)
    766 	movl	%a2,%a1@(PCB_USP)	| and save it
    767 
    768 	tstl	_C_LABEL(fputype)	| Do we have an fpu?
    769 	jeq	Lswnofpsave		| No?  Then don't try save.
    770 	lea	%a1@(PCB_FPCTX),%a2	| pointer to FP save area
    771 	fsave	%a2@			| save FP state
    772 	tstb	%a2@			| null state frame?
    773 	jeq	Lswnofpsave		| yes, all done
    774 	fmovem	%fp0-%fp7,%a2@(FPF_REGS)	| save FP general regs
    775 	fmovem	%fpcr/%fpsr/%fpi,%a2@(FPF_FPCR)	| save FP control regs
    776 Lswnofpsave:
    777 
    778 	/*
    779 	 * Now that we have saved all the registers that must be
    780 	 * preserved, we are free to use those registers until
    781 	 * we load the registers for the switched-to process.
    782 	 * In this section, keep:  %a0=curproc, %a1=curpcb
    783 	 */
    784 
    785 	clrl	%a0@(P_BACK)		| clear back link
    786 	movl	%a0@(P_ADDR),%a1		| get p_addr
    787 	movl	%a1,_C_LABEL(curpcb)
    788 
    789 #if defined(LOCKDEBUG)
    790 	/*
    791 	 * Done mucking with the run queues, release the
    792 	 * scheduler lock, but keep interrupts out.
    793 	 */
    794 	movl	%a0,%sp@-		| not args...
    795 	movl	%a1,%sp@-		| ...just saving
    796 	jbsr	_C_LABEL(sched_unlock_idle)
    797 	movl	%sp@+,%a1
    798 	movl	%sp@+,%a0
    799 #endif
    800 
    801 	/*
    802 	 * Load the new VM context (new MMU root pointer)
    803 	 */
    804 	movl	%a0@(P_VMSPACE),%a2	| vm = p->p_vmspace
    805 #ifdef DIAGNOSTIC
    806 	tstl	%a2			| vm == VM_MAP_NULL?
    807 	jeq	Lbadsw			| panic
    808 #endif
    809 #ifdef PMAP_DEBUG
    810 	/* When debugging just call _pmap_switch(). */
    811 	movl	%a2@(VM_PMAP),a2 	| pmap = vm->vm_map.pmap
    812 	pea	%a2@			| push pmap
    813 	jbsr	_C_LABEL(_pmap_switch)	| _pmap_switch(pmap)
    814 	addql	#4,%sp
    815 	movl	_C_LABEL(curpcb),%a1	| restore p_addr
    816 #else
    817 	/* Otherwise, use this inline version. */
    818 	lea	_C_LABEL(kernel_crp),%a3 | our CPU Root Ptr. (CRP)
    819 	movl	%a2@(VM_PMAP),%a2 	| pmap = vm->vm_map.pmap
    820 	movl	%a2@(PM_A_PHYS),%d0	| phys = pmap->pm_a_phys
    821 	cmpl	%a3@(4),%d0		|  == kernel_crp.rp_addr ?
    822 	jeq	Lsame_mmuctx		| skip loadcrp/flush
    823 	/* OK, it is a new MMU context.  Load it up. */
    824 	movl	%d0,%a3@(4)
    825 	movl	#CACHE_CLR,%d0
    826 	movc	%d0,%cacr		| invalidate cache(s)
    827 	pflusha				| flush entire TLB
    828 	pmove	%a3@,%crp		| load new user root pointer
    829 Lsame_mmuctx:
    830 #endif
    831 
    832 	/*
    833 	 * Reload the registers for the new process.
    834 	 * After this point we can only use %d0,%d1,%a0,%a1
    835 	 */
    836 	moveml	%a1@(PCB_REGS),#0xFCFC	| reload registers
    837 	movl	%a1@(PCB_USP),%a0
    838 	movl	%a0,%usp		| and USP
    839 
    840 	tstl	_C_LABEL(fputype)	| If we don't have an fpu,
    841 	jeq	Lres_skip		|  don't try to restore it.
    842 	lea	%a1@(PCB_FPCTX),%a0	| pointer to FP save area
    843 	tstb	%a0@			| null state frame?
    844 	jeq	Lresfprest		| yes, easy
    845 	fmovem	%a0@(FPF_FPCR),%fpcr/%fpsr/%fpi	| restore FP control regs
    846 	fmovem	%a0@(FPF_REGS),%fp0-%fp7	| restore FP general regs
    847 Lresfprest:
    848 	frestore %a0@			| restore state
    849 Lres_skip:
    850 	movw	%a1@(PCB_PS),%d0		| no, restore PS
    851 #ifdef DIAGNOSTIC
    852 	btst	#13,%d0			| supervisor mode?
    853 	jeq	Lbadsw			| no? panic!
    854 #endif
    855 	movw	%d0,%sr			| OK, restore PS
    856 	movl	#1,%a0			| return 1 (for alternate returns)
    857 	rts
    858 
    859 /*
    860  * savectx(pcb)
    861  * Update pcb, saving current processor state.
    862  */
    863 ENTRY(savectx)
    864 	movl	%sp@(4),%a1
    865 	movw	%sr,%a1@(PCB_PS)
    866 	movl	%usp,%a0		| grab USP
    867 	movl	%a0,%a1@(PCB_USP)	| and save it
    868 	moveml	#0xFCFC,%a1@(PCB_REGS)	| save non-scratch registers
    869 
    870 	tstl	_C_LABEL(fputype)	| Do we have FPU?
    871 	jeq	Lsavedone		| No?  Then don't save state.
    872 	lea	%a1@(PCB_FPCTX),%a0	| pointer to FP save area
    873 	fsave	%a0@			| save FP state
    874 	tstb	%a0@			| null state frame?
    875 	jeq	Lsavedone		| yes, all done
    876 	fmovem	%fp0-%fp7,%a0@(FPF_REGS)	| save FP general regs
    877 	fmovem	%fpcr/%fpsr/%fpi,%a0@(FPF_FPCR)	| save FP control regs
    878 Lsavedone:
    879 	movl	#0,%a0			| return 0
    880 	rts
    881 
    882 /* suline() */
    883 
    884 #ifdef DEBUG
    885 	.data
    886 ASGLOBAL(fulltflush)
    887 	.long	0
    888 ASGLOBAL(fullcflush)
    889 	.long	0
    890 	.text
    891 #endif
    892 
    893 /*
    894  * Invalidate entire TLB.
    895  */
    896 ENTRY(TBIA)
    897 _C_LABEL(_TBIA):
    898 	pflusha
    899 	movl	#DC_CLEAR,%d0
    900 	movc	%d0,%cacr			| invalidate on-chip d-cache
    901 	rts
    902 
    903 /*
    904  * Invalidate any TLB entry for given VA (TB Invalidate Single)
    905  */
    906 ENTRY(TBIS)
    907 #ifdef DEBUG
    908 	tstl	_ASM_LABEL(fulltflush)	| being conservative?
    909 	jne	_C_LABEL(_TBIA)		| yes, flush entire TLB
    910 #endif
    911 	movl	%sp@(4),%a0
    912 	pflush	#0,#0,%a0@		| flush address from both sides
    913 	movl	#DC_CLEAR,%d0
    914 	movc	%d0,%cacr			| invalidate on-chip data cache
    915 	rts
    916 
    917 /*
    918  * Invalidate supervisor side of TLB
    919  */
    920 ENTRY(TBIAS)
    921 #ifdef DEBUG
    922 	tstl	_ASM_LABEL(fulltflush)	| being conservative?
    923 	jne	_C_LABEL(_TBIA)		| yes, flush everything
    924 #endif
    925 	pflush	#4,#4			| flush supervisor TLB entries
    926 	movl	#DC_CLEAR,%d0
    927 	movc	%d0,%cacr			| invalidate on-chip d-cache
    928 	rts
    929 
    930 /*
    931  * Invalidate user side of TLB
    932  */
    933 ENTRY(TBIAU)
    934 #ifdef DEBUG
    935 	tstl	_ASM_LABEL(fulltflush)	| being conservative?
    936 	jne	_C_LABEL(_TBIA)		| yes, flush everything
    937 #endif
    938 	pflush	#0,#4			| flush user TLB entries
    939 	movl	#DC_CLEAR,%d0
    940 	movc	%d0,%cacr			| invalidate on-chip d-cache
    941 	rts
    942 
    943 /*
    944  * Invalidate instruction cache
    945  */
    946 ENTRY(ICIA)
    947 	movl	#IC_CLEAR,%d0
    948 	movc	%d0,%cacr			| invalidate i-cache
    949 	rts
    950 
    951 /*
    952  * Invalidate data cache.
    953  * NOTE: we do not flush 68030 on-chip cache as there are no aliasing
    954  * problems with DC_WA.  The only cases we have to worry about are context
    955  * switch and TLB changes, both of which are handled "in-line" in resume
    956  * and TBI*.
    957  */
    958 ENTRY(DCIA)
    959 __DCIA:
    960 	rts
    961 
    962 ENTRY(DCIS)
    963 __DCIS:
    964 	rts
    965 
    966 /*
    967  * Invalidate data cache.
    968  */
    969 ENTRY(DCIU)
    970 	movl	#DC_CLEAR,%d0
    971 	movc	%d0,%cacr			| invalidate on-chip d-cache
    972 	rts
    973 
    974 /* ICPL, ICPP, DCPL, DCPP, DCPA, DCFL, DCFP */
    975 
    976 ENTRY(PCIA)
    977 	movl	#DC_CLEAR,%d0
    978 	movc	%d0,%cacr			| invalidate on-chip d-cache
    979 	rts
    980 
    981 ENTRY(ecacheon)
    982 	rts
    983 
    984 ENTRY(ecacheoff)
    985 	rts
    986 
    987 /*
    988  * Get callers current SP value.
    989  * Note that simply taking the address of a local variable in a C function
    990  * doesn't work because callee saved registers may be outside the stack frame
    991  * defined by A6 (e.g. GCC generated code).
    992  *
    993  * [I don't think the ENTRY() macro will do the right thing with this -- glass]
    994  */
    995 GLOBAL(getsp)
    996 	movl	%sp,%d0			| get current SP
    997 	addql	#4,%d0			| compensate for return address
    998 	movl	%d0,%a0
    999 	rts
   1000 
   1001 ENTRY(getsfc)
   1002 	movc	%sfc,%d0
   1003 	movl	%d0,%a0
   1004 	rts
   1005 
   1006 ENTRY(getdfc)
   1007 	movc	%dfc,%d0
   1008 	movl	%d0,%a0
   1009 	rts
   1010 
   1011 ENTRY(getvbr)
   1012 	movc	%vbr,%d0
   1013 	movl	%d0,%a0
   1014 	rts
   1015 
   1016 ENTRY(setvbr)
   1017 	movl	%sp@(4),%d0
   1018 	movc	%d0,%vbr
   1019 	rts
   1020 
   1021 /*
   1022  * Load a new CPU Root Pointer (CRP) into the MMU.
   1023  *	void	loadcrp(struct mmu_rootptr *);
   1024  */
   1025 ENTRY(loadcrp)
   1026 	movl	%sp@(4),%a0		| arg1: &CRP
   1027 	movl	#CACHE_CLR,%d0
   1028 	movc	%d0,%cacr		| invalidate cache(s)
   1029 	pflusha				| flush entire TLB
   1030 	pmove	%a0@,%crp		| load new user root pointer
   1031 	rts
   1032 
   1033 ENTRY(getcrp)
   1034 	movl	%sp@(4),%a0		| arg1: &crp
   1035 	pmove	%crp,%a0@		| *crpp = %crp
   1036 	rts
   1037 
   1038 /*
   1039  * Get the physical address of the PTE for a given VA.
   1040  */
   1041 ENTRY(ptest_addr)
   1042 	movl	%sp@(4),%a1		| VA
   1043 	ptestr	#5,%a1@,#7,%a0		| %a0 = addr of PTE
   1044 	rts
   1045 
   1046 /*
   1047  * Set processor priority level calls.  Most are implemented with
   1048  * inline asm expansions.  However, we need one instantiation here
   1049  * in case some non-optimized code makes external references.
   1050  * Most places will use the inlined functions param.h supplies.
   1051  */
   1052 
   1053 ENTRY(_getsr)
   1054 	clrl	%d0
   1055 	movw	%sr,%d0
   1056 	movl	%a1,%d0
   1057 	rts
   1058 
   1059 ENTRY(_spl)
   1060 	clrl	%d0
   1061 	movw	%sr,%d0
   1062 	movl	%sp@(4),%d1
   1063 	movw	%d1,%sr
   1064 	rts
   1065 
   1066 ENTRY(_splraise)
   1067 	clrl	%d0
   1068 	movw	%sr,%d0
   1069 	movl	%d0,%d1
   1070 	andl	#PSL_HIGHIPL,%d1 	| old &= PSL_HIGHIPL
   1071 	cmpl	%sp@(4),%d1		| (old - new)
   1072 	bge	Lsplr
   1073 	movl	%sp@(4),%d1
   1074 	movw	%d1,%sr
   1075 Lsplr:
   1076 	rts
   1077 
   1078 /*
   1079  * Save and restore 68881 state.
   1080  */
   1081 ENTRY(m68881_save)
   1082 	movl	%sp@(4),%a0		| save area pointer
   1083 	fsave	%a0@			| save state
   1084 	tstb	%a0@			| null state frame?
   1085 	jeq	Lm68881sdone		| yes, all done
   1086 	fmovem	%fp0-%fp7,%a0@(FPF_REGS)	| save FP general regs
   1087 	fmovem	%fpcr/%fpsr/%fpi,%a0@(FPF_FPCR)	| save FP control regs
   1088 Lm68881sdone:
   1089 	rts
   1090 
   1091 ENTRY(m68881_restore)
   1092 	movl	%sp@(4),%a0		| save area pointer
   1093 	tstb	%a0@			| null state frame?
   1094 	jeq	Lm68881rdone		| yes, easy
   1095 	fmovem	%a0@(FPF_FPCR),%fpcr/%fpsr/%fpi	| restore FP control regs
   1096 	fmovem	%a0@(FPF_REGS),%fp0-%fp7	| restore FP general regs
   1097 Lm68881rdone:
   1098 	frestore %a0@			| restore state
   1099 	rts
   1100 
   1101 /*
   1102  * _delay(unsigned N)
   1103  * Delay for at least (N/256) microseconds.
   1104  * This routine depends on the variable:  delay_divisor
   1105  * which should be set based on the CPU clock rate.
   1106  * XXX: Currently this is set based on the CPU model,
   1107  * XXX: but this should be determined at run time...
   1108  */
   1109 GLOBAL(_delay)
   1110 	| %d0 = arg = (usecs << 8)
   1111 	movl	%sp@(4),%d0
   1112 	| %d1 = delay_divisor;
   1113 	movl	_C_LABEL(delay_divisor),%d1
   1114 	jra	L_delay			/* Jump into the loop! */
   1115 
   1116 	/*
   1117 	 * Align the branch target of the loop to a half-line (8-byte)
   1118 	 * boundary to minimize cache effects.  This guarantees both
   1119 	 * that there will be no prefetch stalls due to cache line burst
   1120 	 * operations and that the loop will run from a single cache
   1121 	 * half-line.
   1122 	 */
   1123 #ifdef __ELF__
   1124 	.align	8
   1125 #else
   1126 	.align	3
   1127 #endif
   1128 L_delay:
   1129 	subl	%d1,%d0
   1130 	jgt	L_delay
   1131 	rts
   1132 
   1133 | Define some addresses, mostly so DDB can print useful info.
   1134 | Not using _C_LABEL() here because these symbols are never
   1135 | referenced by any C code, and if the leading underscore
   1136 | ever goes away, these lines turn into syntax errors...
   1137 	.set	_KERNBASE,KERNBASE
   1138 	.set	_MONSTART,SUN3X_MONSTART
   1139 	.set	_PROM_BASE,SUN3X_PROM_BASE
   1140 	.set	_MONEND,SUN3X_MONEND
   1141 
   1142 |The end!
   1143