Home | History | Annotate | Line # | Download | only in sun3x
locore.s revision 1.6
      1 /*	$NetBSD: locore.s,v 1.6 1997/02/11 00:58:33 gwr Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1988 University of Utah.
      5  * Copyright (c) 1980, 1990, 1993
      6  *	The Regents of the University of California.  All rights reserved.
      7  *
      8  * This code is derived from software contributed to Berkeley by
      9  * the Systems Programming Group of the University of Utah Computer
     10  * Science Department.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by the University of
     23  *	California, Berkeley and its contributors.
     24  * 4. Neither the name of the University nor the names of its contributors
     25  *    may be used to endorse or promote products derived from this software
     26  *    without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     38  * SUCH DAMAGE.
     39  *
     40  *	from: Utah $Hdr: locore.s 1.66 92/12/22$
     41  *	@(#)locore.s	8.6 (Berkeley) 5/27/94
     42  */
     43 
     44 #include "assym.h"
     45 #include <machine/trap.h>
     46 
     47 | Remember this is a fun project!
     48 
     49 	.data
     50 	.globl	_mon_crp
     51 _mon_crp:
     52 	.long	0,0
     53 
     54 | This is for kvm_mkdb, and should be the address of the beginning
     55 | of the kernel text segment (not necessarily the same as kernbase).
     56 	.text
     57 	.globl	_kernel_text
     58 _kernel_text:
     59 
     60 | This is the entry point, as well as the end of the temporary stack
     61 | used during process switch (one 8K page ending at start)
     62 	.globl tmpstk
     63 tmpstk:
     64 	.globl start
     65 start:
     66 | The first step, after disabling interrupts, is to map enough of the kernel
     67 | into high virtual address space so that we can use position dependent code.
     68 | This is a tricky task on the sun3x because the MMU is already enabled and
     69 | the ROM monitor provides no indication of where the root MMU table is mapped.
     70 | Therefore we must use one of the 68030's 'transparent translation' registers
     71 | to define a range in the address space where the MMU translation is
     72 | turned off.  Once this is complete we can modify the MMU table directly
     73 | without the need for it to be mapped into virtual memory.
     74 | All code must be position independent until otherwise noted, as the
     75 | boot loader has loaded us into low memory but all the symbols in this
     76 | code have been linked high.
     77 	movw	#PSL_HIGHIPL, sr	| no interrupts
     78 	movl	#KERNBASE, a5		| for vtop conversion
     79 	lea	_mon_crp, a0		| where to store the CRP
     80 	subl	a5, a0
     81 	| Note: borrowing mon_crp for tt0 setup...
     82 	movl	#0x3F8107, a0@		| map the low 1GB v=p with the
     83 	pmove	a0@, tt0		| transparent translation reg0
     84 
     85 | In order to map the kernel into high memory we will copy the root table
     86 | entry which maps the 16 megabytes of memory starting at 0x0 into the
     87 | entry which maps the 16 megabytes starting at KERNBASE.
     88 	pmove	crp, a0@		| Get monitor CPU root pointer
     89 	movl	a0@(4), a1		| 2nd word is PA of level A table
     90 
     91 	movl	a1, a0			| compute the descriptor address
     92 	addl	#0x3e0, a1		| for VA starting at KERNBASE
     93 	movl	a0@, a1@		| copy descriptor type
     94 	movl	a0@(4), a1@(4)		| copy physical address
     95 
     96 | Kernel is now double mapped at zero and KERNBASE.
     97 | Force a long jump to the relocated code (high VA).
     98 	movl	#IC_CLEAR, d0		| Flush the I-cache
     99 	movc	d0, cacr
    100 	jmp L_high_code:l		| long jump
    101 
    102 L_high_code:
    103 | We are now running in the correctly relocated kernel, so
    104 | we are no longer restricted to position-independent code.
    105 | It is handy to leave transparent translation enabled while
    106 | for the low 1GB while __bootstrap() is doing its thing.
    107 
    108 | Do bootstrap stuff needed before main() gets called.
    109 | Our boot loader leaves a copy of the kernel's exec header
    110 | just before the start of the kernel text segment, so the
    111 | kernel can sanity-check the DDB symbols at [end...esym].
    112 | Pass the struct exec at tmpstk-32 to __bootstrap().
    113 	lea	tmpstk-32, sp
    114 	movl	#0,a6
    115 	jsr	__bootstrap		| See _startup.c
    116 
    117 | Now turn off the transparent translation of the low 1GB.
    118 | (this also flushes the ATC)
    119 	clrl	sp@-
    120 	pmove	sp@,tt0
    121 	addql	#4,sp
    122 
    123 | Now that __bootstrap() is done using the PROM functions,
    124 | we can safely set the sfc/dfc to something != FC_CONTROL
    125 	moveq	#FC_USERD, d0		| make movs access "user data"
    126 	movc	d0, sfc			| space for copyin/copyout
    127 	movc	d0, dfc
    128 
    129 | Setup process zero user/kernel stacks.
    130 	movl	_proc0paddr,a1		| get proc0 pcb addr
    131 	lea	a1@(USPACE-4),sp	| set SSP to last word
    132 	movl	#USRSTACK-4,a2
    133 	movl	a2,usp			| init user SP
    134 
    135 | Note curpcb was already set in __bootstrap().
    136 | Will do fpu initialization during autoconfig (see fpu.c)
    137 | The interrupt vector table and stack are now ready.
    138 | Interrupts will be enabled later, AFTER  autoconfiguration
    139 | is finished, to avoid spurrious interrupts.
    140 
    141 /*
    142  * Final preparation for calling main.
    143  *
    144  * Create a fake exception frame that returns to user mode,
    145  * and save its address in p->p_md.md_regs for cpu_fork().
    146  * The new frames for process 1 and 2 will be adjusted by
    147  * cpu_set_kpc() to arrange for a call to a kernel function
    148  * before the new process does its rte out to user mode.
    149  */
    150 	clrw	sp@-			| tf_format,tf_vector
    151 	clrl	sp@-			| tf_pc (filled in later)
    152 	movw	#PSL_USER,sp@-		| tf_sr for user mode
    153 	clrl	sp@-			| tf_stackadj
    154 	lea	sp@(-64),sp		| tf_regs[16]
    155 	movl	sp,a1			| a1=trapframe
    156 	lea	_proc0,a0		| proc0.p_md.md_regs =
    157 	movl	a1,a0@(P_MDREGS)	|   trapframe
    158 	movl	a2,a1@(FR_SP)		| a2 == usp (from above)
    159 	pea	a1@			|
    160 	jbsr	_main			| main(&trapframe)
    161 	addql	#4,sp			| help backtrace work
    162 	trap	#15			| should not get here
    163 
    164 | This is used by cpu_fork() to return to user mode.
    165 | It is called with SP pointing to a struct trapframe.
    166 	.globl	_proc_do_uret
    167 _proc_do_uret:
    168 	movl	sp@(FR_SP),a0		| grab and load
    169 	movl	a0,usp			|   user SP
    170 	moveml	sp@+,#0x7FFF		| load most registers (all but SSP)
    171 	addql	#8,sp			| pop SSP and stack adjust count
    172 	rte
    173 
    174 /*
    175  * proc_trampoline:
    176  * This is used by cpu_set_kpc() to "push" a function call onto the
    177  * kernel stack of some process, very much like a signal delivery.
    178  * When we get here, the stack has:
    179  *
    180  * SP+8:	switchframe from before cpu_set_kpc
    181  * SP+4:	void *proc;
    182  * SP:  	u_long func;
    183  *
    184  * On entry, the switchframe pushed by cpu_set_kpc has already been
    185  * popped off the stack, so all this needs to do is pop the function
    186  * pointer into a register, call it, then pop the arg, and finally
    187  * return using the switchframe that remains on the stack.
    188  */
    189 	.globl	_proc_trampoline
    190 _proc_trampoline:
    191 	movl	sp@+,a0			| function pointer
    192 	jbsr	a0@			| (*func)(procp)
    193 	addql	#4,sp			| toss the arg
    194 	rts				| as cpu_switch would do
    195 
    196 | That is all the assembly startup code we need on the sun3x!
    197 | The rest of this is like the hp300/locore.s where possible.
    198 
    199 /*
    200  * Trap/interrupt vector routines
    201  */
    202 
    203 	.globl _buserr, _addrerr, _illinst, _zerodiv, _chkinst
    204 	.globl _trapvinst, _privinst, _trace, _badtrap, _fmterr
    205 	.globl _trap0, _trap1, _trap2, _trap12, _trap15
    206 	.globl _coperr, _fpfline, _fpunsupp
    207 
    208 	.globl	_trap, _nofault, _longjmp
    209 _buserr:
    210 	tstl	_nofault		| device probe?
    211 	jeq	_addrerr		| no, handle as usual
    212 	movl	_nofault,sp@-		| yes,
    213 	jbsr	_longjmp		|  longjmp(nofault)
    214 _addrerr:
    215 	clrl	sp@-			| stack adjust count
    216 	moveml	#0xFFFF,sp@-		| save user registers
    217 	movl	usp,a0			| save the user SP
    218 	movl	a0,sp@(FR_SP)		|   in the savearea
    219 	lea	sp@(FR_HW),a1		| grab base of HW berr frame
    220 	moveq	#0,d0
    221 	movw	a1@(10),d0		| grab SSW for fault processing
    222 	btst	#12,d0			| RB set?
    223 	jeq	LbeX0			| no, test RC
    224 	bset	#14,d0			| yes, must set FB
    225 	movw	d0,a1@(10)		| for hardware too
    226 LbeX0:
    227 	btst	#13,d0			| RC set?
    228 	jeq	LbeX1			| no, skip
    229 	bset	#15,d0			| yes, must set FC
    230 	movw	d0,a1@(10)		| for hardware too
    231 LbeX1:
    232 	btst	#8,d0			| data fault?
    233 	jeq	Lbe0			| no, check for hard cases
    234 	movl	a1@(16),d1		| fault address is as given in frame
    235 	jra	Lbe10			| thats it
    236 Lbe0:
    237 	btst	#4,a1@(6)		| long (type B) stack frame?
    238 	jne	Lbe4			| yes, go handle
    239 	movl	a1@(2),d1		| no, can use save PC
    240 	btst	#14,d0			| FB set?
    241 	jeq	Lbe3			| no, try FC
    242 	addql	#4,d1			| yes, adjust address
    243 	jra	Lbe10			| done
    244 Lbe3:
    245 	btst	#15,d0			| FC set?
    246 	jeq	Lbe10			| no, done
    247 	addql	#2,d1			| yes, adjust address
    248 	jra	Lbe10			| done
    249 Lbe4:
    250 	movl	a1@(36),d1		| long format, use stage B address
    251 	btst	#15,d0			| FC set?
    252 	jeq	Lbe10			| no, all done
    253 	subql	#2,d1			| yes, adjust address
    254 Lbe10:
    255 	movl	d1,sp@-			| push fault VA
    256 	movl	d0,sp@-			| and padded SSW
    257 	movw	a1@(6),d0		| get frame format/vector offset
    258 	andw	#0x0FFF,d0		| clear out frame format
    259 	cmpw	#12,d0			| address error vector?
    260 	jeq	Lisaerr			| yes, go to it
    261 
    262 /* MMU-specific code to determine reason for bus error. */
    263 	movl	d1,a0			| fault address
    264 	movl	sp@,d0			| function code from ssw
    265 	btst	#8,d0			| data fault?
    266 	jne	Lbe10a
    267 	movql	#1,d0			| user program access FC
    268 					| (we dont separate data/program)
    269 	btst	#5,a1@			| supervisor mode?
    270 	jeq	Lbe10a			| if no, done
    271 	movql	#5,d0			| else supervisor program access
    272 Lbe10a:
    273 	ptestr	d0,a0@,#7		| do a table search
    274 	pmove	psr,sp@			| save result
    275 	movb	sp@,d1
    276 	btst	#2,d1			| invalid? (incl. limit viol and berr)
    277 	jeq	Lmightnotbemerr		| no -> wp check
    278 	btst	#7,d1			| is it MMU table berr?
    279 	jeq	Lismerr			| no, must be fast
    280 	jra	Lisberr1		| real bus err needs not be fast
    281 Lmightnotbemerr:
    282 	btst	#3,d1			| write protect bit set?
    283 	jeq	Lisberr1		| no, must be bus error
    284 	movl	sp@,d0			| ssw into low word of d0
    285 	andw	#0xc0,d0		| write protect is set on page:
    286 	cmpw	#0x40,d0		| was it read cycle?
    287 	jeq	Lisberr1		| yes, was not WPE, must be bus err
    288 /* End of MMU-specific bus error code. */
    289 
    290 Lismerr:
    291 	movl	#T_MMUFLT,sp@-		| show that we are an MMU fault
    292 	jra	Ltrapnstkadj		| and deal with it
    293 Lisaerr:
    294 	movl	#T_ADDRERR,sp@-		| mark address error
    295 	jra	Ltrapnstkadj		| and deal with it
    296 Lisberr1:
    297 	clrw	sp@			| re-clear pad word
    298 Lisberr:
    299 	movl	#T_BUSERR,sp@-		| mark bus error
    300 Ltrapnstkadj:
    301 	jbsr	_trap			| handle the error
    302 	lea	sp@(12),sp		| pop value args
    303 	movl	sp@(FR_SP),a0		| restore user SP
    304 	movl	a0,usp			|   from save area
    305 	movw	sp@(FR_ADJ),d0		| need to adjust stack?
    306 	jne	Lstkadj			| yes, go to it
    307 	moveml	sp@+,#0x7FFF		| no, restore most user regs
    308 	addql	#8,sp			| toss SSP and stkadj
    309 	jra	rei			| all done
    310 Lstkadj:
    311 	lea	sp@(FR_HW),a1		| pointer to HW frame
    312 	addql	#8,a1			| source pointer
    313 	movl	a1,a0			| source
    314 	addw	d0,a0			|  + hole size = dest pointer
    315 	movl	a1@-,a0@-		| copy
    316 	movl	a1@-,a0@-		|  8 bytes
    317 	movl	a0,sp@(FR_SP)		| new SSP
    318 	moveml	sp@+,#0x7FFF		| restore user registers
    319 	movl	sp@,sp			| and our SP
    320 	jra	rei			| all done
    321 
    322 /*
    323  * FP exceptions.
    324  */
    325 _fpfline:
    326 	clrl	sp@-			| stack adjust count
    327 	moveml	#0xFFFF,sp@-		| save registers
    328 	moveq	#T_FPEMULI,d0		| denote as FP emulation trap
    329 	jra	fault			| do it
    330 
    331 _fpunsupp:
    332 	clrl	sp@-			| stack adjust count
    333 	moveml	#0xFFFF,sp@-		| save registers
    334 	moveq	#T_FPEMULD,d0		| denote as FP emulation trap
    335 	jra	fault			| do it
    336 
    337 /*
    338  * Handles all other FP coprocessor exceptions.
    339  * Note that since some FP exceptions generate mid-instruction frames
    340  * and may cause signal delivery, we need to test for stack adjustment
    341  * after the trap call.
    342  */
    343 	.globl	_fpfault
    344 _fpfault:
    345 	clrl	sp@-		| stack adjust count
    346 	moveml	#0xFFFF,sp@-	| save user registers
    347 	movl	usp,a0		| and save
    348 	movl	a0,sp@(FR_SP)	|   the user stack pointer
    349 	clrl	sp@-		| no VA arg
    350 	movl	_curpcb,a0	| current pcb
    351 	lea	a0@(PCB_FPCTX),a0 | address of FP savearea
    352 	fsave	a0@		| save state
    353 	tstb	a0@		| null state frame?
    354 	jeq	Lfptnull	| yes, safe
    355 	clrw	d0		| no, need to tweak BIU
    356 	movb	a0@(1),d0	| get frame size
    357 	bset	#3,a0@(0,d0:w)	| set exc_pend bit of BIU
    358 Lfptnull:
    359 	fmovem	fpsr,sp@-	| push fpsr as code argument
    360 	frestore a0@		| restore state
    361 	movl	#T_FPERR,sp@-	| push type arg
    362 	jra	Ltrapnstkadj	| call trap and deal with stack cleanup
    363 
    364 /*
    365  * Coprocessor and format errors can generate mid-instruction stack
    366  * frames and cause signal delivery hence we need to check for potential
    367  * stack adjustment.
    368  */
    369 _coperr:
    370 	clrl	sp@-		| stack adjust count
    371 	moveml	#0xFFFF,sp@-
    372 	movl	usp,a0		| get and save
    373 	movl	a0,sp@(FR_SP)	|   the user stack pointer
    374 	clrl	sp@-		| no VA arg
    375 	clrl	sp@-		| or code arg
    376 	movl	#T_COPERR,sp@-	| push trap type
    377 	jra	Ltrapnstkadj	| call trap and deal with stack adjustments
    378 
    379 _fmterr:
    380 	clrl	sp@-		| stack adjust count
    381 	moveml	#0xFFFF,sp@-
    382 	movl	usp,a0		| get and save
    383 	movl	a0,sp@(FR_SP)	|   the user stack pointer
    384 	clrl	sp@-		| no VA arg
    385 	clrl	sp@-		| or code arg
    386 	movl	#T_FMTERR,sp@-	| push trap type
    387 	jra	Ltrapnstkadj	| call trap and deal with stack adjustments
    388 
    389 /*
    390  * Other exceptions only cause four and six word stack frame and require
    391  * no post-trap stack adjustment.
    392  */
    393 _illinst:
    394 	clrl	sp@-
    395 	moveml	#0xFFFF,sp@-
    396 	moveq	#T_ILLINST,d0
    397 	jra	fault
    398 
    399 _zerodiv:
    400 	clrl	sp@-
    401 	moveml	#0xFFFF,sp@-
    402 	moveq	#T_ZERODIV,d0
    403 	jra	fault
    404 
    405 _chkinst:
    406 	clrl	sp@-
    407 	moveml	#0xFFFF,sp@-
    408 	moveq	#T_CHKINST,d0
    409 	jra	fault
    410 
    411 _trapvinst:
    412 	clrl	sp@-
    413 	moveml	#0xFFFF,sp@-
    414 	moveq	#T_TRAPVINST,d0
    415 	jra	fault
    416 
    417 _privinst:
    418 	clrl	sp@-
    419 	moveml	#0xFFFF,sp@-
    420 	moveq	#T_PRIVINST,d0
    421 	jra	fault
    422 
    423 	.globl	fault
    424 fault:
    425 	movl	usp,a0			| get and save
    426 	movl	a0,sp@(FR_SP)		|   the user stack pointer
    427 	clrl	sp@-			| no VA arg
    428 	clrl	sp@-			| or code arg
    429 	movl	d0,sp@-			| push trap type
    430 	jbsr	_trap			| handle trap
    431 	lea	sp@(12),sp		| pop value args
    432 	movl	sp@(FR_SP),a0		| restore
    433 	movl	a0,usp			|   user SP
    434 	moveml	sp@+,#0x7FFF		| restore most user regs
    435 	addql	#8,sp			| pop SP and stack adjust
    436 	jra	rei			| all done
    437 
    438 	.globl	_straytrap
    439 _badtrap:
    440 	clrl	sp@-			| stack adjust count
    441 	moveml	#0xFFFF,sp@-		| save std frame regs
    442 	jbsr	_straytrap		| report
    443 	moveml	sp@+,#0xFFFF		| restore regs
    444 	addql	#4, sp			| stack adjust count
    445 	jra	rei			| all done
    446 
    447 /*
    448  * Trap 0 is for system calls
    449  */
    450 	.globl	_syscall
    451 _trap0:
    452 	clrl	sp@-			| stack adjust count
    453 	moveml	#0xFFFF,sp@-		| save user registers
    454 	movl	usp,a0			| save the user SP
    455 	movl	a0,sp@(FR_SP)		|   in the savearea
    456 	movl	d0,sp@-			| push syscall number
    457 	jbsr	_syscall		| handle it
    458 	addql	#4,sp			| pop syscall arg
    459 	movl	sp@(FR_SP),a0		| grab and restore
    460 	movl	a0,usp			|   user SP
    461 	moveml	sp@+,#0x7FFF		| restore most registers
    462 	addql	#8,sp			| pop SP and stack adjust
    463 	jra	rei			| all done
    464 
    465 /*
    466  * Trap 1 is either:
    467  * sigreturn (native NetBSD executable)
    468  * breakpoint (HPUX executable)
    469  */
    470 _trap1:
    471 #if 0 /* COMPAT_HPUX */
    472 	/* If process is HPUX, this is a user breakpoint. */
    473 	jne	trap15			| breakpoint
    474 #endif
    475 	/* fall into sigreturn */
    476 
    477 /*
    478  * The sigreturn() syscall comes here.  It requires special handling
    479  * because we must open a hole in the stack to fill in the (possibly much
    480  * larger) original stack frame.
    481  */
    482 sigreturn:
    483 	lea	sp@(-84),sp		| leave enough space for largest frame
    484 	movl	sp@(84),sp@		| move up current 8 byte frame
    485 	movl	sp@(88),sp@(4)
    486 	movl	#84,sp@-		| default: adjust by 84 bytes
    487 	moveml	#0xFFFF,sp@-		| save user registers
    488 	movl	usp,a0			| save the user SP
    489 	movl	a0,sp@(FR_SP)		|   in the savearea
    490 	movl	#SYS_sigreturn,sp@-	| push syscall number
    491 	jbsr	_syscall		| handle it
    492 	addql	#4,sp			| pop syscall#
    493 	movl	sp@(FR_SP),a0		| grab and restore
    494 	movl	a0,usp			|   user SP
    495 	lea	sp@(FR_HW),a1		| pointer to HW frame
    496 	movw	sp@(FR_ADJ),d0		| do we need to adjust the stack?
    497 	jeq	Lsigr1			| no, just continue
    498 	moveq	#92,d1			| total size
    499 	subw	d0,d1			|  - hole size = frame size
    500 	lea	a1@(92),a0		| destination
    501 	addw	d1,a1			| source
    502 	lsrw	#1,d1			| convert to word count
    503 	subqw	#1,d1			| minus 1 for dbf
    504 Lsigrlp:
    505 	movw	a1@-,a0@-		| copy a word
    506 	dbf	d1,Lsigrlp		| continue
    507 	movl	a0,a1			| new HW frame base
    508 Lsigr1:
    509 	movl	a1,sp@(FR_SP)		| new SP value
    510 	moveml	sp@+,#0x7FFF		| restore user registers
    511 	movl	sp@,sp			| and our SP
    512 	jra	rei			| all done
    513 
    514 /*
    515  * Trap 2 is one of:
    516  * NetBSD: not used (ignore)
    517  * SunOS:  Some obscure FPU operation
    518  * HPUX:   sigreturn
    519  */
    520 _trap2:
    521 #if 0 /* COMPAT_HPUX */
    522 	/* XXX:	If HPUX, this is a user breakpoint. */
    523 	jne	sigreturn
    524 #endif
    525 	/* fall into trace (NetBSD or SunOS) */
    526 
    527 /*
    528  * Trace (single-step) trap.  Kernel-mode is special.
    529  * User mode traps are simply passed on to trap().
    530  */
    531 _trace:
    532 	clrl	sp@-			| stack adjust count
    533 	moveml	#0xFFFF,sp@-
    534 	moveq	#T_TRACE,d0
    535 	movw	sp@(FR_HW),d1		| get PSW
    536 	andw	#PSL_S,d1		| from system mode?
    537 	jne	kbrkpt			| yes, kernel breakpoint
    538 	jra	fault			| no, user-mode fault
    539 
    540 /*
    541  * Trap 15 is used for:
    542  *	- GDB breakpoints (in user programs)
    543  *	- KGDB breakpoints (in the kernel)
    544  *	- trace traps for SUN binaries (not fully supported yet)
    545  * User mode traps are passed simply passed to trap()
    546  */
    547 _trap15:
    548 	clrl	sp@-			| stack adjust count
    549 	moveml	#0xFFFF,sp@-
    550 	moveq	#T_TRAP15,d0
    551 	movw	sp@(FR_HW),d1		| get PSW
    552 	andw	#PSL_S,d1		| from system mode?
    553 	jne	kbrkpt			| yes, kernel breakpoint
    554 	jra	fault			| no, user-mode fault
    555 
    556 kbrkpt:	| Kernel-mode breakpoint or trace trap. (d0=trap_type)
    557 	| Save the system sp rather than the user sp.
    558 	movw	#PSL_HIGHIPL,sr		| lock out interrupts
    559 	lea	sp@(FR_SIZE),a6		| Save stack pointer
    560 	movl	a6,sp@(FR_SP)		|  from before trap
    561 
    562 	| If we are not on tmpstk switch to it.
    563 	| (so debugger can change the stack pointer)
    564 	movl	a6,d1
    565 	cmpl	#tmpstk,d1
    566 	jls	Lbrkpt2 		| already on tmpstk
    567 	| Copy frame to the temporary stack
    568 	movl	sp,a0			| a0=src
    569 	lea	tmpstk-96,a1		| a1=dst
    570 	movl	a1,sp			| sp=new frame
    571 	moveq	#FR_SIZE,d1
    572 Lbrkpt1:
    573 	movl	a0@+,a1@+
    574 	subql	#4,d1
    575 	bgt	Lbrkpt1
    576 
    577 Lbrkpt2:
    578 	| Call the special kernel debugger trap handler.
    579 	| Do not call trap() to handle it, so that we can
    580 	| set breakpoints in trap() if we want.  We know
    581 	| the trap type is either T_TRACE or T_BREAKPOINT.
    582 	movl	d0,sp@-			| push trap type
    583 	jbsr	_trap_kdebug
    584 	addql	#4,sp			| pop args
    585 
    586 	| The stack pointer may have been modified, or
    587 	| data below it modified (by kgdb push call),
    588 	| so push the hardware frame at the current sp
    589 	| before restoring registers and returning.
    590 	movl	sp@(FR_SP),a0		| modified sp
    591 	lea	sp@(FR_SIZE),a1		| end of our frame
    592 	movl	a1@-,a0@-		| copy 2 longs with
    593 	movl	a1@-,a0@-		| ... predecrement
    594 	movl	a0,sp@(FR_SP)		| sp = h/w frame
    595 	moveml	sp@+,#0x7FFF		| restore all but sp
    596 	movl	sp@,sp			| ... and sp
    597 	rte				| all done
    598 
    599 /*
    600  * Trap 12 is the entry point for the cachectl "syscall"
    601  *	cachectl(command, addr, length)
    602  * command in d0, addr in a1, length in d1
    603  */
    604 	.globl	_cachectl
    605 _trap12:
    606 	movl	d1,sp@-			| push length
    607 	movl	a1,sp@-			| push addr
    608 	movl	d0,sp@-			| push command
    609 	jbsr	_cachectl		| do it
    610 	lea	sp@(12),sp		| pop args
    611 	jra	rei			| all done
    612 
    613 /*
    614  * Interrupt handlers.  Most are auto-vectored,
    615  * and hard-wired the same way on all sun3 models.
    616  * Format in the stack is:
    617  *   d0,d1,a0,a1, sr, pc, vo
    618  */
    619 
    620 #define INTERRUPT_SAVEREG \
    621 	moveml	#0xC0C0,sp@-
    622 
    623 #define INTERRUPT_RESTORE \
    624 	moveml	sp@+,#0x0303
    625 
    626 /*
    627  * This is the common auto-vector interrupt handler,
    628  * for which the CPU provides the vector=0x18+level.
    629  * These are installed in the interrupt vector table.
    630  */
    631 	.align	2
    632 	.globl	__isr_autovec, _isr_autovec
    633 __isr_autovec:
    634 	INTERRUPT_SAVEREG
    635 	jbsr	_isr_autovec
    636 	INTERRUPT_RESTORE
    637 	jra	rei
    638 
    639 /* clock: see clock.c */
    640 	.align	2
    641 	.globl	__isr_clock, _clock_intr
    642 __isr_clock:
    643 	INTERRUPT_SAVEREG
    644 	jbsr	_clock_intr
    645 	INTERRUPT_RESTORE
    646 	jra	rei
    647 
    648 | Handler for all vectored interrupts (i.e. VME interrupts)
    649 	.align	2
    650 	.globl	__isr_vectored, _isr_vectored
    651 __isr_vectored:
    652 	INTERRUPT_SAVEREG
    653 	jbsr	_isr_vectored
    654 	INTERRUPT_RESTORE
    655 	jra	rei
    656 
    657 #undef	INTERRUPT_SAVEREG
    658 #undef	INTERRUPT_RESTORE
    659 
    660 /* interrupt counters (needed by vmstat) */
    661 	.globl	_intrcnt,_eintrcnt,_intrnames,_eintrnames
    662 _intrnames:
    663 	.asciz	"spur"	| 0
    664 	.asciz	"lev1"	| 1
    665 	.asciz	"lev2"	| 2
    666 	.asciz	"lev3"	| 3
    667 	.asciz	"lev4"	| 4
    668 	.asciz	"clock"	| 5
    669 	.asciz	"lev6"	| 6
    670 	.asciz	"nmi"	| 7
    671 _eintrnames:
    672 
    673 	.data
    674 	.even
    675 _intrcnt:
    676 	.long	0,0,0,0,0,0,0,0,0,0
    677 _eintrcnt:
    678 	.text
    679 
    680 /*
    681  * Emulation of VAX REI instruction.
    682  *
    683  * This code is (mostly) un-altered from the hp300 code,
    684  * except that sun machines do not need a simulated SIR
    685  * because they have a real software interrupt register.
    686  *
    687  * This code deals with checking for and servicing ASTs
    688  * (profiling, scheduling) and software interrupts (network, softclock).
    689  * We check for ASTs first, just like the VAX.  To avoid excess overhead
    690  * the T_ASTFLT handling code will also check for software interrupts so we
    691  * do not have to do it here.  After identifying that we need an AST we
    692  * drop the IPL to allow device interrupts.
    693  *
    694  * This code is complicated by the fact that sendsig may have been called
    695  * necessitating a stack cleanup.
    696  */
    697 
    698 	.globl	_astpending
    699 	.globl	rei
    700 rei:
    701 #ifdef	DIAGNOSTIC
    702 	tstl	_panicstr		| have we paniced?
    703 	jne	Ldorte			| yes, do not make matters worse
    704 #endif
    705 	tstl	_astpending		| AST pending?
    706 	jeq	Ldorte			| no, done
    707 Lrei1:
    708 	btst	#5,sp@			| yes, are we returning to user mode?
    709 	jne	Ldorte			| no, done
    710 	movw	#PSL_LOWIPL,sr		| lower SPL
    711 	clrl	sp@-			| stack adjust
    712 	moveml	#0xFFFF,sp@-		| save all registers
    713 	movl	usp,a1			| including
    714 	movl	a1,sp@(FR_SP)		|    the users SP
    715 	clrl	sp@-			| VA == none
    716 	clrl	sp@-			| code == none
    717 	movl	#T_ASTFLT,sp@-		| type == async system trap
    718 	jbsr	_trap			| go handle it
    719 	lea	sp@(12),sp		| pop value args
    720 	movl	sp@(FR_SP),a0		| restore user SP
    721 	movl	a0,usp			|   from save area
    722 	movw	sp@(FR_ADJ),d0		| need to adjust stack?
    723 	jne	Laststkadj		| yes, go to it
    724 	moveml	sp@+,#0x7FFF		| no, restore most user regs
    725 	addql	#8,sp			| toss SP and stack adjust
    726 	rte				| and do real RTE
    727 Laststkadj:
    728 	lea	sp@(FR_HW),a1		| pointer to HW frame
    729 	addql	#8,a1			| source pointer
    730 	movl	a1,a0			| source
    731 	addw	d0,a0			|  + hole size = dest pointer
    732 	movl	a1@-,a0@-		| copy
    733 	movl	a1@-,a0@-		|  8 bytes
    734 	movl	a0,sp@(FR_SP)		| new SSP
    735 	moveml	sp@+,#0x7FFF		| restore user registers
    736 	movl	sp@,sp			| and our SP
    737 Ldorte:
    738 	rte				| real return
    739 
    740 /*
    741  * Initialization is at the beginning of this file, because the
    742  * kernel entry point needs to be at zero for compatibility with
    743  * the Sun boot loader.  This works on Sun machines because the
    744  * interrupt vector table for reset is NOT at address zero.
    745  * (The MMU has a "boot" bit that forces access to the PROM)
    746  */
    747 
    748 /*
    749  * Signal "trampoline" code (18 bytes).  Invoked from RTE setup by sendsig().
    750  *
    751  * Stack looks like:
    752  *
    753  *	sp+0 ->	signal number
    754  *	sp+4	signal specific code
    755  *	sp+8	pointer to signal context frame (scp)
    756  *	sp+12	address of handler
    757  *	sp+16	saved hardware state
    758  *			.
    759  *			.
    760  *	scp+0->	beginning of signal context frame
    761  */
    762 	.globl	_sigcode, _esigcode
    763 	.data
    764 	.align	2
    765 _sigcode:	/* Found at address: 0x0DFFffdc */
    766 	movl	sp@(12),a0		| signal handler addr	(4 bytes)
    767 	jsr	a0@			| call signal handler	(2 bytes)
    768 	addql	#4,sp			| pop signo		(2 bytes)
    769 	trap	#1			| special syscall entry	(2 bytes)
    770 	movl	d0,sp@(4)		| save errno		(4 bytes)
    771 	moveq	#1,d0			| syscall == exit	(2 bytes)
    772 	trap	#0			| exit(errno)		(2 bytes)
    773 	.align	2
    774 _esigcode:
    775 	.text
    776 
    777 /* XXX - hp300 still has icode here... */
    778 
    779 /*
    780  * Primitives
    781  */
    782 #include <machine/asm.h>
    783 
    784 /*
    785  * non-local gotos
    786  */
    787 ENTRY(setjmp)
    788 	movl	sp@(4),a0	| savearea pointer
    789 	moveml	#0xFCFC,a0@	| save d2-d7/a2-a7
    790 	movl	sp@,a0@(48)	| and return address
    791 	moveq	#0,d0		| return 0
    792 	rts
    793 
    794 ENTRY(longjmp)
    795 	movl	sp@(4),a0
    796 	moveml	a0@+,#0xFCFC
    797 	movl	a0@,sp@
    798 	moveq	#1,d0
    799 	rts
    800 
    801 /*
    802  * The following primitives manipulate the run queues.
    803  * _whichqs tells which of the 32 queues _qs have processes in them.
    804  * Setrunqueue puts processes into queues, Remrunqueue removes them
    805  * from queues.  The running process is on no queue, other processes
    806  * are on a queue related to p->p_priority, divided by 4 actually to
    807  * shrink the 0-127 range of priorities into the 32 available queues.
    808  */
    809 
    810 	.globl	_whichqs,_qs,_cnt,_panic
    811 	.globl	_curproc
    812 	.comm	_want_resched,4
    813 
    814 /*
    815  * setrunqueue(p)
    816  *
    817  * Call should be made at splclock(), and p->p_stat should be SRUN
    818  */
    819 ENTRY(setrunqueue)
    820 	movl	sp@(4),a0
    821 #ifdef DIAGNOSTIC
    822 	tstl	a0@(P_BACK)
    823 	jne	Lset1
    824 	tstl	a0@(P_WCHAN)
    825 	jne	Lset1
    826 	cmpb	#SRUN,a0@(P_STAT)
    827 	jne	Lset1
    828 #endif
    829 	clrl	d0
    830 	movb	a0@(P_PRIORITY),d0
    831 	lsrb	#2,d0
    832 	movl	_whichqs,d1
    833 	bset	d0,d1
    834 	movl	d1,_whichqs
    835 	lslb	#3,d0
    836 	addl	#_qs,d0
    837 	movl	d0,a0@(P_FORW)
    838 	movl	d0,a1
    839 	movl	a1@(P_BACK),a0@(P_BACK)
    840 	movl	a0,a1@(P_BACK)
    841 	movl	a0@(P_BACK),a1
    842 	movl	a0,a1@(P_FORW)
    843 	rts
    844 #ifdef DIAGNOSTIC
    845 Lset1:
    846 	movl	#Lset2,sp@-
    847 	jbsr	_panic
    848 Lset2:
    849 	.asciz	"setrunqueue"
    850 	.even
    851 #endif
    852 
    853 /*
    854  * remrunqueue(p)
    855  *
    856  * Call should be made at splclock().
    857  */
    858 ENTRY(remrunqueue)
    859 	movl	sp@(4),a0		| proc *p
    860 	clrl	d0
    861 	movb	a0@(P_PRIORITY),d0
    862 	lsrb	#2,d0
    863 	movl	_whichqs,d1
    864 	bclr	d0,d1			| if ((d1 & (1 << d0)) == 0)
    865 	jeq	Lrem2			|   panic (empty queue)
    866 	movl	d1,_whichqs
    867 	movl	a0@(P_FORW),a1
    868 	movl	a0@(P_BACK),a1@(P_BACK)
    869 	movl	a0@(P_BACK),a1
    870 	movl	a0@(P_FORW),a1@(P_FORW)
    871 	movl	#_qs,a1
    872 	movl	d0,d1
    873 	lslb	#3,d1
    874 	addl	d1,a1
    875 	cmpl	a1@(P_FORW),a1
    876 	jeq	Lrem1
    877 	movl	_whichqs,d1
    878 	bset	d0,d1
    879 	movl	d1,_whichqs
    880 Lrem1:
    881 	clrl	a0@(P_BACK)
    882 	rts
    883 Lrem2:
    884 	movl	#Lrem3,sp@-
    885 	jbsr	_panic
    886 Lrem3:
    887 	.asciz	"remrunqueue"
    888 	.even
    889 
    890 | Message for Lbadsw panic
    891 Lsw0:
    892 	.asciz	"cpu_switch"
    893 	.even
    894 
    895 	.globl	_curpcb
    896 	.globl	_masterpaddr	| XXX compatibility (debuggers)
    897 	.data
    898 _masterpaddr:			| XXX compatibility (debuggers)
    899 _curpcb:
    900 	.long	0
    901 	.comm	nullpcb,SIZEOF_PCB
    902 	.text
    903 
    904 /*
    905  * At exit of a process, do a cpu_switch for the last time.
    906  * Switch to a safe stack and PCB, and deallocate the process's resources.
    907  * The ipl is high enough to prevent the memory from being reallocated.
    908  */
    909 ENTRY(switch_exit)
    910 	movl	sp@(4),a0		| struct proc *p
    911 	movl	#nullpcb,_curpcb	| save state into garbage pcb
    912 	lea	tmpstk,sp		| goto a tmp stack
    913 	movl	a0,sp@-			| pass proc ptr down
    914 
    915 	/* Free old process's u-area. */
    916 	movl	#USPACE,sp@-		| size of u-area
    917 	movl	a0@(P_ADDR),sp@-	| address of process's u-area
    918 	movl	_kernel_map,sp@-	| map it was allocated in
    919 	jbsr	_kmem_free		| deallocate it
    920 	lea	sp@(12),sp		| pop args
    921 
    922 	jra	_cpu_switch
    923 
    924 /*
    925  * When no processes are on the runq, cpu_switch() branches to idle
    926  * to wait for something to come ready.
    927  */
    928 	.data
    929 	.globl _Idle_count
    930 _Idle_count:
    931 	.long	0
    932 	.text
    933 
    934 	.globl	Idle
    935 Lidle:
    936 	stop	#PSL_LOWIPL
    937 Idle:
    938 	movw	#PSL_HIGHIPL,sr
    939 	addql	#1, _Idle_count
    940 	tstl	_whichqs
    941 	jeq	Lidle
    942 	movw	#PSL_LOWIPL,sr
    943 	jra	Lsw1
    944 
    945 Lbadsw:
    946 	movl	#Lsw0,sp@-
    947 	jbsr	_panic
    948 	/*NOTREACHED*/
    949 
    950 /*
    951  * cpu_switch()
    952  * Hacked for sun3
    953  * XXX - Arg 1 is a proc pointer (curproc) but this doesn't use it.
    954  * XXX - Sould we use p->p_addr instead of curpcb? -gwr
    955  */
    956 ENTRY(cpu_switch)
    957 	movl	_curpcb,a1		| current pcb
    958 	movw	sr,a1@(PCB_PS)		| save sr before changing ipl
    959 #ifdef notyet
    960 	movl	_curproc,sp@-		| remember last proc running
    961 #endif
    962 	clrl	_curproc
    963 
    964 Lsw1:
    965 	/*
    966 	 * Find the highest-priority queue that isn't empty,
    967 	 * then take the first proc from that queue.
    968 	 */
    969 	clrl	d0
    970 	lea	_whichqs,a0
    971 	movl	a0@,d1
    972 Lswchk:
    973 	btst	d0,d1
    974 	jne	Lswfnd
    975 	addqb	#1,d0
    976 	cmpb	#32,d0
    977 	jne	Lswchk
    978 	jra	Idle
    979 Lswfnd:
    980 	movw	#PSL_HIGHIPL,sr		| lock out interrupts
    981 	movl	a0@,d1			| and check again...
    982 	bclr	d0,d1
    983 	jeq	Lsw1			| proc moved, rescan
    984 	movl	d1,a0@			| update whichqs
    985 	moveq	#1,d1			| double check for higher priority
    986 	lsll	d0,d1			| process (which may have snuck in
    987 	subql	#1,d1			| while we were finding this one)
    988 	andl	a0@,d1
    989 	jeq	Lswok			| no one got in, continue
    990 	movl	a0@,d1
    991 	bset	d0,d1			| otherwise put this one back
    992 	movl	d1,a0@
    993 	jra	Lsw1			| and rescan
    994 Lswok:
    995 	movl	d0,d1
    996 	lslb	#3,d1			| convert queue number to index
    997 	addl	#_qs,d1			| locate queue (q)
    998 	movl	d1,a1
    999 	cmpl	a1@(P_FORW),a1		| anyone on queue?
   1000 	jeq	Lbadsw			| no, panic
   1001 	movl	a1@(P_FORW),a0		| p = q->p_forw
   1002 	movl	a0@(P_FORW),a1@(P_FORW)	| q->p_forw = p->p_forw
   1003 	movl	a0@(P_FORW),a1		| q = p->p_forw
   1004 	movl	a0@(P_BACK),a1@(P_BACK)	| q->p_back = p->p_back
   1005 	cmpl	a0@(P_FORW),d1		| anyone left on queue?
   1006 	jeq	Lsw2			| no, skip
   1007 	movl	_whichqs,d1
   1008 	bset	d0,d1			| yes, reset bit
   1009 	movl	d1,_whichqs
   1010 Lsw2:
   1011 	movl	a0,_curproc
   1012 	clrl	_want_resched
   1013 #ifdef notyet
   1014 	movl	sp@+,a1			| XXX - Make this work!
   1015 	cmpl	a0,a1			| switching to same proc?
   1016 	jeq	Lswdone			| yes, skip save and restore
   1017 #endif
   1018 	/*
   1019 	 * Save state of previous process in its pcb.
   1020 	 */
   1021 	movl	_curpcb,a1
   1022 	moveml	#0xFCFC,a1@(PCB_REGS)	| save non-scratch registers
   1023 	movl	usp,a2			| grab USP (a2 has been saved)
   1024 	movl	a2,a1@(PCB_USP)		| and save it
   1025 
   1026 	tstl	_fpu_type		| Do we have an fpu?
   1027 	jeq	Lswnofpsave		| No?  Then don't try save.
   1028 	lea	a1@(PCB_FPCTX),a2	| pointer to FP save area
   1029 	fsave	a2@			| save FP state
   1030 	tstb	a2@			| null state frame?
   1031 	jeq	Lswnofpsave		| yes, all done
   1032 	fmovem	fp0-fp7,a2@(FPF_REGS)		| save FP general regs
   1033 	fmovem	fpcr/fpsr/fpi,a2@(FPF_FPCR)	| save FP control regs
   1034 Lswnofpsave:
   1035 
   1036 	/*
   1037 	 * Now that we have saved all the registers that must be
   1038 	 * preserved, we are free to use those registers until
   1039 	 * we load the registers for the switched-to process.
   1040 	 * In this section, keep:  a0=curproc, a1=curpcb
   1041 	 */
   1042 
   1043 #ifdef DIAGNOSTIC
   1044 	tstl	a0@(P_WCHAN)
   1045 	jne	Lbadsw
   1046 	cmpb	#SRUN,a0@(P_STAT)
   1047 	jne	Lbadsw
   1048 #endif
   1049 	clrl	a0@(P_BACK)		| clear back link
   1050 	movl	a0@(P_ADDR),a1		| get p_addr
   1051 	movl	a1,_curpcb
   1052 
   1053 	/* Our pmap does not need pmap_activate() */
   1054 	/* Just load the new CPU Root Pointer (MMU) */
   1055 	/* XXX - Skip if oldproc has same pm_a_tbl? */
   1056 	movl	a0@(P_VMSPACE),a0	| vm = p->p_vmspace
   1057 	lea	a0@(VM_PMAP_MMUCRP),a0	| a0 = &vm->vm_pmap.pm_mmucrp
   1058 
   1059 	movl	#CACHE_CLR,d0
   1060 	movc	d0,cacr			| invalidate cache(s)
   1061 	pflusha				| flush entire TLB
   1062 	pmove	a0@,crp			| load new user root pointer
   1063 
   1064 	/*
   1065 	 * Reload the registers for the new process.
   1066 	 * After this point we can only use d0,d1,a0,a1
   1067 	 */
   1068 	moveml	a1@(PCB_REGS),#0xFCFC	| reload registers
   1069 	movl	a1@(PCB_USP),a0
   1070 	movl	a0,usp			| and USP
   1071 
   1072 	tstl	_fpu_type		| If we don't have an fpu,
   1073 	jeq	Lres_skip		|  don't try to restore it.
   1074 	lea	a1@(PCB_FPCTX),a0	| pointer to FP save area
   1075 	tstb	a0@			| null state frame?
   1076 	jeq	Lresfprest		| yes, easy
   1077 	fmovem	a0@(FPF_FPCR),fpcr/fpsr/fpi	| restore FP control regs
   1078 	fmovem	a0@(FPF_REGS),fp0-fp7		| restore FP general regs
   1079 Lresfprest:
   1080 	frestore a0@			| restore state
   1081 Lres_skip:
   1082 	movw	a1@(PCB_PS),d0		| no, restore PS
   1083 #ifdef DIAGNOSTIC
   1084 	btst	#13,d0			| supervisor mode?
   1085 	jeq	Lbadsw			| no? panic!
   1086 #endif
   1087 	movw	d0,sr			| OK, restore PS
   1088 	moveq	#1,d0			| return 1 (for alternate returns)
   1089 	rts
   1090 
   1091 /*
   1092  * savectx(pcb)
   1093  * Update pcb, saving current processor state.
   1094  */
   1095 ENTRY(savectx)
   1096 	movl	sp@(4),a1
   1097 	movw	sr,a1@(PCB_PS)
   1098 	movl	usp,a0			| grab USP
   1099 	movl	a0,a1@(PCB_USP)		| and save it
   1100 	moveml	#0xFCFC,a1@(PCB_REGS)	| save non-scratch registers
   1101 
   1102 	tstl	_fpu_type		| Do we have FPU?
   1103 	jeq	Lsavedone		| No?  Then don't save state.
   1104 	lea	a1@(PCB_FPCTX),a0	| pointer to FP save area
   1105 	fsave	a0@			| save FP state
   1106 	tstb	a0@			| null state frame?
   1107 	jeq	Lsavedone		| yes, all done
   1108 	fmovem	fp0-fp7,a0@(FPF_REGS)		| save FP general regs
   1109 	fmovem	fpcr/fpsr/fpi,a0@(FPF_FPCR)	| save FP control regs
   1110 Lsavedone:
   1111 	moveq	#0,d0			| return 0
   1112 	rts
   1113 
   1114 /* suline() `040 only */
   1115 
   1116 #ifdef DEBUG
   1117 	.data
   1118 	.globl	fulltflush, fullcflush
   1119 fulltflush:
   1120 	.long	0
   1121 fullcflush:
   1122 	.long	0
   1123 	.text
   1124 #endif
   1125 
   1126 /*
   1127  * Invalidate entire TLB.
   1128  */
   1129 ENTRY(TBIA)
   1130 __TBIA:
   1131 	pflusha
   1132 	movl	#DC_CLEAR,d0
   1133 	movc	d0,cacr			| invalidate on-chip d-cache
   1134 	rts
   1135 
   1136 /*
   1137  * Invalidate any TLB entry for given VA (TB Invalidate Single)
   1138  */
   1139 ENTRY(TBIS)
   1140 #ifdef DEBUG
   1141 	tstl	fulltflush		| being conservative?
   1142 	jne	__TBIA			| yes, flush entire TLB
   1143 #endif
   1144 	movl	sp@(4),a0
   1145 	pflush	#0,#0,a0@		| flush address from both sides
   1146 	movl	#DC_CLEAR,d0
   1147 	movc	d0,cacr			| invalidate on-chip data cache
   1148 	rts
   1149 
   1150 /*
   1151  * Invalidate supervisor side of TLB
   1152  */
   1153 ENTRY(TBIAS)
   1154 #ifdef DEBUG
   1155 	tstl	fulltflush		| being conservative?
   1156 	jne	__TBIA			| yes, flush everything
   1157 #endif
   1158 	pflush	#4,#4			| flush supervisor TLB entries
   1159 	movl	#DC_CLEAR,d0
   1160 	movc	d0,cacr			| invalidate on-chip d-cache
   1161 	rts
   1162 
   1163 /*
   1164  * Invalidate user side of TLB
   1165  */
   1166 ENTRY(TBIAU)
   1167 #ifdef DEBUG
   1168 	tstl	fulltflush		| being conservative?
   1169 	jne	__TBIA			| yes, flush everything
   1170 #endif
   1171 	pflush	#0,#4			| flush user TLB entries
   1172 	movl	#DC_CLEAR,d0
   1173 	movc	d0,cacr			| invalidate on-chip d-cache
   1174 	rts
   1175 
   1176 /*
   1177  * Invalidate instruction cache
   1178  */
   1179 ENTRY(ICIA)
   1180 	movl	#IC_CLEAR,d0
   1181 	movc	d0,cacr			| invalidate i-cache
   1182 	rts
   1183 
   1184 /*
   1185  * Invalidate data cache.
   1186  * NOTE: we do not flush 68030 on-chip cache as there are no aliasing
   1187  * problems with DC_WA.  The only cases we have to worry about are context
   1188  * switch and TLB changes, both of which are handled "in-line" in resume
   1189  * and TBI*.
   1190  */
   1191 ENTRY(DCIA)
   1192 __DCIA:
   1193 	rts
   1194 
   1195 ENTRY(DCIS)
   1196 __DCIS:
   1197 	rts
   1198 
   1199 /*
   1200  * Invalidate data cache.
   1201  */
   1202 ENTRY(DCIU)
   1203 	rts
   1204 
   1205 /* ICPL, ICPP, DCPL, DCPP, DCPA, DCFL, DCFP */
   1206 
   1207 ENTRY(PCIA)
   1208 	movl	#DC_CLEAR,d0
   1209 	movc	d0,cacr			| invalidate on-chip d-cache
   1210 	rts
   1211 
   1212 ENTRY(ecacheon)
   1213 	rts
   1214 
   1215 ENTRY(ecacheoff)
   1216 	rts
   1217 
   1218 /*
   1219  * Get callers current SP value.
   1220  * Note that simply taking the address of a local variable in a C function
   1221  * doesn't work because callee saved registers may be outside the stack frame
   1222  * defined by A6 (e.g. GCC generated code).
   1223  *
   1224  * [I don't think the ENTRY() macro will do the right thing with this -- glass]
   1225  */
   1226 	.globl	_getsp
   1227 _getsp:
   1228 	movl	sp,d0			| get current SP
   1229 	addql	#4,d0			| compensate for return address
   1230 	rts
   1231 
   1232 ENTRY(getsfc)
   1233 	movc	sfc,d0
   1234 	rts
   1235 
   1236 ENTRY(getdfc)
   1237 	movc	dfc,d0
   1238 	rts
   1239 
   1240 ENTRY(getvbr)
   1241 	movc vbr, d0
   1242 	rts
   1243 
   1244 ENTRY(setvbr)
   1245 	movl sp@(4), d0
   1246 	movc d0, vbr
   1247 	rts
   1248 
   1249 /*
   1250  * Load a new CPU Root Pointer (CRP) into the MMU.
   1251  *	void	loadcrp(struct mmu_rootptr *);
   1252  */
   1253 ENTRY(loadcrp)
   1254 	movl	sp@(4),a0		| arg1: &CRP
   1255 	movl	#CACHE_CLR,d0
   1256 	movc	d0,cacr			| invalidate cache(s)
   1257 	pflusha				| flush entire TLB
   1258 	pmove	a0@,crp			| load new user root pointer
   1259 	rts
   1260 
   1261 /*
   1262  * Set processor priority level calls.  Most are implemented with
   1263  * inline asm expansions.  However, we need one instantiation here
   1264  * in case some non-optimized code makes external references.
   1265  * Most places will use the inlined function param.h supplies.
   1266  */
   1267 
   1268 ENTRY(_spl)
   1269 	movl	sp@(4),d1
   1270 	clrl	d0
   1271 	movw	sr,d0
   1272 	movw	d1,sr
   1273 	rts
   1274 
   1275 ENTRY(getsr)
   1276 	moveq	#0, d0
   1277 	movw	sr, d0
   1278 	rts
   1279 
   1280 ENTRY(_insque)
   1281 	movw	sr,d0
   1282 	movw	#PSL_HIGHIPL,sr		| atomic
   1283 	movl	sp@(8),a0		| where to insert (after)
   1284 	movl	sp@(4),a1		| element to insert (e)
   1285 	movl	a0@,a1@			| e->next = after->next
   1286 	movl	a0,a1@(4)		| e->prev = after
   1287 	movl	a1,a0@			| after->next = e
   1288 	movl	a1@,a0
   1289 	movl	a1,a0@(4)		| e->next->prev = e
   1290 	movw	d0,sr
   1291 	rts
   1292 
   1293 ENTRY(_remque)
   1294 	movw	sr,d0
   1295 	movw	#PSL_HIGHIPL,sr		| atomic
   1296 	movl	sp@(4),a0		| element to remove (e)
   1297 	movl	a0@,a1
   1298 	movl	a0@(4),a0
   1299 	movl	a0,a1@(4)		| e->next->prev = e->prev
   1300 	movl	a1,a0@			| e->prev->next = e->next
   1301 	movw	d0,sr
   1302 	rts
   1303 
   1304 /*
   1305  * Save and restore 68881 state.
   1306  */
   1307 ENTRY(m68881_save)
   1308 	movl	sp@(4),a0		| save area pointer
   1309 	fsave	a0@			| save state
   1310 	tstb	a0@			| null state frame?
   1311 	jeq	Lm68881sdone		| yes, all done
   1312 	fmovem fp0-fp7,a0@(FPF_REGS)		| save FP general regs
   1313 	fmovem fpcr/fpsr/fpi,a0@(FPF_FPCR)	| save FP control regs
   1314 Lm68881sdone:
   1315 	rts
   1316 
   1317 ENTRY(m68881_restore)
   1318 	movl	sp@(4),a0		| save area pointer
   1319 	tstb	a0@			| null state frame?
   1320 	jeq	Lm68881rdone		| yes, easy
   1321 	fmovem	a0@(FPF_FPCR),fpcr/fpsr/fpi	| restore FP control regs
   1322 	fmovem	a0@(FPF_REGS),fp0-fp7		| restore FP general regs
   1323 Lm68881rdone:
   1324 	frestore a0@			| restore state
   1325 	rts
   1326 
   1327 /*
   1328  * _delay(unsigned N)
   1329  * Delay for at least (N/256) microseconds.
   1330  * This routine depends on the variable:  delay_divisor
   1331  * which should be set based on the CPU clock rate.
   1332  * XXX: Currently this is set in sun3_startup.c based on the
   1333  * XXX: CPU model but this should be determined at run time...
   1334  */
   1335 	.globl	__delay
   1336 __delay:
   1337 	| d0 = arg = (usecs << 8)
   1338 	movl	sp@(4),d0
   1339 	| d1 = delay_divisor;
   1340 	movl	_delay_divisor,d1
   1341 L_delay:
   1342 	subl	d1,d0
   1343 	jgt	L_delay
   1344 	rts
   1345 
   1346 
   1347 | Define some addresses, mostly so DDB can print useful info.
   1348 	.globl	_kernbase
   1349 	.set	_kernbase,KERNBASE
   1350 	.globl	_dvma_base
   1351 	.set	_dvma_base,DVMA_SPACE_START
   1352 	.globl	_prom_start
   1353 	.set	_prom_start,MONSTART
   1354 	.globl	_prom_base
   1355 	.set	_prom_base,PROM_BASE
   1356 
   1357 |The end!
   1358