Home | History | Annotate | Line # | Download | only in sun3x
locore.s revision 1.36.6.1
      1 /*	$NetBSD: locore.s,v 1.36.6.1 1999/12/27 18:34:09 wrstuden Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1988 University of Utah.
      5  * Copyright (c) 1980, 1990, 1993
      6  *	The Regents of the University of California.  All rights reserved.
      7  *
      8  * This code is derived from software contributed to Berkeley by
      9  * the Systems Programming Group of the University of Utah Computer
     10  * Science Department.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by the University of
     23  *	California, Berkeley and its contributors.
     24  * 4. Neither the name of the University nor the names of its contributors
     25  *    may be used to endorse or promote products derived from this software
     26  *    without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     38  * SUCH DAMAGE.
     39  *
     40  *	from: Utah $Hdr: locore.s 1.66 92/12/22$
     41  *	@(#)locore.s	8.6 (Berkeley) 5/27/94
     42  */
     43 
     44 #include "opt_compat_netbsd.h"
     45 #include "opt_compat_svr4.h"
     46 #include "opt_compat_sunos.h"
     47 
     48 #include "assym.h"
     49 #include <machine/asm.h>
     50 #include <machine/trap.h>
     51 
     52 | Remember this is a fun project!
     53 
     54 	.data
     55 GLOBAL(mon_crp)
     56 	.long	0,0
     57 
     58 | This is for kvm_mkdb, and should be the address of the beginning
     59 | of the kernel text segment (not necessarily the same as kernbase).
     60 	.text
     61 GLOBAL(kernel_text)
     62 
     63 | This is the entry point, as well as the end of the temporary stack
     64 | used during process switch (one 8K page ending at start)
     65 ASGLOBAL(tmpstk)
     66 ASGLOBAL(start)
     67 
     68 | The first step, after disabling interrupts, is to map enough of the kernel
     69 | into high virtual address space so that we can use position dependent code.
     70 | This is a tricky task on the sun3x because the MMU is already enabled and
     71 | the ROM monitor provides no indication of where the root MMU table is mapped.
     72 | Therefore we must use one of the 68030's 'transparent translation' registers
     73 | to define a range in the address space where the MMU translation is
     74 | turned off.  Once this is complete we can modify the MMU table directly
     75 | without the need for it to be mapped into virtual memory.
     76 | All code must be position independent until otherwise noted, as the
     77 | boot loader has loaded us into low memory but all the symbols in this
     78 | code have been linked high.
     79 	movw	#PSL_HIGHIPL, sr	| no interrupts
     80 	movl	#KERNBASE, a5		| for vtop conversion
     81 	lea	_C_LABEL(mon_crp), a0	| where to store the CRP
     82 	subl	a5, a0
     83 	| Note: borrowing mon_crp for tt0 setup...
     84 	movl	#0x3F8107, a0@		| map the low 1GB v=p with the
     85 	.long	0xf0100800		| transparent translation reg0
     86 					| [ pmove a0@, tt0 ]
     87 | In order to map the kernel into high memory we will copy the root table
     88 | entry which maps the 16 megabytes of memory starting at 0x0 into the
     89 | entry which maps the 16 megabytes starting at KERNBASE.
     90 	pmove	crp, a0@		| Get monitor CPU root pointer
     91 	movl	a0@(4), a1		| 2nd word is PA of level A table
     92 
     93 	movl	a1, a0			| compute the descriptor address
     94 	addl	#0x3e0, a1		| for VA starting at KERNBASE
     95 	movl	a0@, a1@		| copy descriptor type
     96 	movl	a0@(4), a1@(4)		| copy physical address
     97 
     98 | Kernel is now double mapped at zero and KERNBASE.
     99 | Force a long jump to the relocated code (high VA).
    100 	movl	#IC_CLEAR, d0		| Flush the I-cache
    101 	movc	d0, cacr
    102 	jmp L_high_code:l		| long jump
    103 
    104 L_high_code:
    105 | We are now running in the correctly relocated kernel, so
    106 | we are no longer restricted to position-independent code.
    107 | It is handy to leave transparent translation enabled while
    108 | for the low 1GB while _bootstrap() is doing its thing.
    109 
    110 | Do bootstrap stuff needed before main() gets called.
    111 | Our boot loader leaves a copy of the kernel's exec header
    112 | just before the start of the kernel text segment, so the
    113 | kernel can sanity-check the DDB symbols at [end...esym].
    114 | Pass the struct exec at tmpstk-32 to _bootstrap().
    115 | Also, make sure the initial frame pointer is zero so that
    116 | the backtrace algorithm used by KGDB terminates nicely.
    117 	lea	_ASM_LABEL(tmpstk)-32, sp
    118 	movl	#0,a6
    119 	jsr	_C_LABEL(_bootstrap)	| See locore2.c
    120 
    121 | Now turn off the transparent translation of the low 1GB.
    122 | (this also flushes the ATC)
    123 	clrl	sp@-
    124 	.long	0xf0170800		| pmove	sp@,tt0
    125 	addql	#4,sp
    126 
    127 | Now that _bootstrap() is done using the PROM functions,
    128 | we can safely set the sfc/dfc to something != FC_CONTROL
    129 	moveq	#FC_USERD, d0		| make movs access "user data"
    130 	movc	d0, sfc			| space for copyin/copyout
    131 	movc	d0, dfc
    132 
    133 | Setup process zero user/kernel stacks.
    134 	movl	_C_LABEL(proc0paddr),a1	| get proc0 pcb addr
    135 	lea	a1@(USPACE-4),sp	| set SSP to last word
    136 	movl	#USRSTACK-4,a2
    137 	movl	a2,usp			| init user SP
    138 
    139 | Note curpcb was already set in _bootstrap().
    140 | Will do fpu initialization during autoconfig (see fpu.c)
    141 | The interrupt vector table and stack are now ready.
    142 | Interrupts will be enabled later, AFTER  autoconfiguration
    143 | is finished, to avoid spurrious interrupts.
    144 
    145 /*
    146  * Final preparation for calling main.
    147  *
    148  * Create a fake exception frame that returns to user mode,
    149  * and save its address in p->p_md.md_regs for cpu_fork().
    150  * The new frames for process 1 and 2 will be adjusted by
    151  * cpu_set_kpc() to arrange for a call to a kernel function
    152  * before the new process does its rte out to user mode.
    153  */
    154 	clrw	sp@-			| tf_format,tf_vector
    155 	clrl	sp@-			| tf_pc (filled in later)
    156 	movw	#PSL_USER,sp@-		| tf_sr for user mode
    157 	clrl	sp@-			| tf_stackadj
    158 	lea	sp@(-64),sp		| tf_regs[16]
    159 	movl	sp,a1			| a1=trapframe
    160 	lea	_C_LABEL(proc0),a0	| proc0.p_md.md_regs =
    161 	movl	a1,a0@(P_MDREGS)	|   trapframe
    162 	movl	a2,a1@(FR_SP)		| a2 == usp (from above)
    163 	pea	a1@			| push &trapframe
    164 	jbsr	_C_LABEL(main)		| main(&trapframe)
    165 	addql	#4,sp			| help DDB backtrace
    166 	trap	#15			| should not get here
    167 
    168 | This is used by cpu_fork() to return to user mode.
    169 | It is called with SP pointing to a struct trapframe.
    170 GLOBAL(proc_do_uret)
    171 	movl	sp@(FR_SP),a0		| grab and load
    172 	movl	a0,usp			|   user SP
    173 	moveml	sp@+,#0x7FFF		| load most registers (all but SSP)
    174 	addql	#8,sp			| pop SSP and stack adjust count
    175 	rte
    176 
    177 /*
    178  * proc_trampoline:
    179  * This is used by cpu_set_kpc() to "push" a function call onto the
    180  * kernel stack of some process, very much like a signal delivery.
    181  * When we get here, the stack has:
    182  *
    183  * SP+8:	switchframe from before cpu_set_kpc
    184  * SP+4:	void *arg;
    185  * SP:  	u_long func;
    186  *
    187  * On entry, the switchframe pushed by cpu_set_kpc has already been
    188  * popped off the stack, so all this needs to do is pop the function
    189  * pointer into a register, call it, then pop the arg, and finally
    190  * return using the switchframe that remains on the stack.
    191  */
    192 GLOBAL(proc_trampoline)
    193 	movl	sp@+,a0			| function pointer
    194 	jbsr	a0@			| (*func)(arg)
    195 	addql	#4,sp			| toss the arg
    196 	rts				| as cpu_switch would do
    197 
    198 | That is all the assembly startup code we need on the sun3x!
    199 | The rest of this is like the hp300/locore.s where possible.
    200 
    201 /*
    202  * Trap/interrupt vector routines
    203  */
    204 #include <m68k/m68k/trap_subr.s>
    205 
    206 GLOBAL(buserr)
    207 	tstl	_C_LABEL(nofault)	| device probe?
    208 	jeq	_C_LABEL(addrerr)	| no, handle as usual
    209 	movl	_C_LABEL(nofault),sp@-	| yes,
    210 	jbsr	_C_LABEL(longjmp)	|  longjmp(nofault)
    211 GLOBAL(addrerr)
    212 	clrl	sp@-			| stack adjust count
    213 	moveml	#0xFFFF,sp@-		| save user registers
    214 	movl	usp,a0			| save the user SP
    215 	movl	a0,sp@(FR_SP)		|   in the savearea
    216 	lea	sp@(FR_HW),a1		| grab base of HW berr frame
    217 	moveq	#0,d0
    218 	movw	a1@(10),d0		| grab SSW for fault processing
    219 	btst	#12,d0			| RB set?
    220 	jeq	LbeX0			| no, test RC
    221 	bset	#14,d0			| yes, must set FB
    222 	movw	d0,a1@(10)		| for hardware too
    223 LbeX0:
    224 	btst	#13,d0			| RC set?
    225 	jeq	LbeX1			| no, skip
    226 	bset	#15,d0			| yes, must set FC
    227 	movw	d0,a1@(10)		| for hardware too
    228 LbeX1:
    229 	btst	#8,d0			| data fault?
    230 	jeq	Lbe0			| no, check for hard cases
    231 	movl	a1@(16),d1		| fault address is as given in frame
    232 	jra	Lbe10			| thats it
    233 Lbe0:
    234 	btst	#4,a1@(6)		| long (type B) stack frame?
    235 	jne	Lbe4			| yes, go handle
    236 	movl	a1@(2),d1		| no, can use save PC
    237 	btst	#14,d0			| FB set?
    238 	jeq	Lbe3			| no, try FC
    239 	addql	#4,d1			| yes, adjust address
    240 	jra	Lbe10			| done
    241 Lbe3:
    242 	btst	#15,d0			| FC set?
    243 	jeq	Lbe10			| no, done
    244 	addql	#2,d1			| yes, adjust address
    245 	jra	Lbe10			| done
    246 Lbe4:
    247 	movl	a1@(36),d1		| long format, use stage B address
    248 	btst	#15,d0			| FC set?
    249 	jeq	Lbe10			| no, all done
    250 	subql	#2,d1			| yes, adjust address
    251 Lbe10:
    252 	movl	d1,sp@-			| push fault VA
    253 	movl	d0,sp@-			| and padded SSW
    254 	movw	a1@(6),d0		| get frame format/vector offset
    255 	andw	#0x0FFF,d0		| clear out frame format
    256 	cmpw	#12,d0			| address error vector?
    257 	jeq	Lisaerr			| yes, go to it
    258 
    259 /* MMU-specific code to determine reason for bus error. */
    260 	movl	d1,a0			| fault address
    261 	movl	sp@,d0			| function code from ssw
    262 	btst	#8,d0			| data fault?
    263 	jne	Lbe10a
    264 	movql	#1,d0			| user program access FC
    265 					| (we dont separate data/program)
    266 	btst	#5,a1@			| supervisor mode?
    267 	jeq	Lbe10a			| if no, done
    268 	movql	#5,d0			| else supervisor program access
    269 Lbe10a:
    270 	ptestr	d0,a0@,#7		| do a table search
    271 	pmove	psr,sp@			| save result
    272 	movb	sp@,d1
    273 	btst	#2,d1			| invalid? (incl. limit viol and berr)
    274 	jeq	Lmightnotbemerr		| no -> wp check
    275 	btst	#7,d1			| is it MMU table berr?
    276 	jeq	Lismerr			| no, must be fast
    277 	jra	Lisberr1		| real bus err needs not be fast
    278 Lmightnotbemerr:
    279 	btst	#3,d1			| write protect bit set?
    280 	jeq	Lisberr1		| no, must be bus error
    281 	movl	sp@,d0			| ssw into low word of d0
    282 	andw	#0xc0,d0		| write protect is set on page:
    283 	cmpw	#0x40,d0		| was it read cycle?
    284 	jeq	Lisberr1		| yes, was not WPE, must be bus err
    285 /* End of MMU-specific bus error code. */
    286 
    287 Lismerr:
    288 	movl	#T_MMUFLT,sp@-		| show that we are an MMU fault
    289 	jra	_ASM_LABEL(faultstkadj)	| and deal with it
    290 Lisaerr:
    291 	movl	#T_ADDRERR,sp@-		| mark address error
    292 	jra	_ASM_LABEL(faultstkadj)	| and deal with it
    293 Lisberr1:
    294 	clrw	sp@			| re-clear pad word
    295 Lisberr:
    296 	movl	#T_BUSERR,sp@-		| mark bus error
    297 	jra	_ASM_LABEL(faultstkadj)	| and deal with it
    298 
    299 /*
    300  * FP exceptions.
    301  */
    302 GLOBAL(fpfline)
    303 	clrl	sp@-			| stack adjust count
    304 	moveml	#0xFFFF,sp@-		| save registers
    305 	moveq	#T_FPEMULI,d0		| denote as FP emulation trap
    306 	jra	_ASM_LABEL(fault)	| do it
    307 
    308 GLOBAL(fpunsupp)
    309 	clrl	sp@-			| stack adjust count
    310 	moveml	#0xFFFF,sp@-		| save registers
    311 	moveq	#T_FPEMULD,d0		| denote as FP emulation trap
    312 	jra	_ASM_LABEL(fault)	| do it
    313 
    314 /*
    315  * Handles all other FP coprocessor exceptions.
    316  * Note that since some FP exceptions generate mid-instruction frames
    317  * and may cause signal delivery, we need to test for stack adjustment
    318  * after the trap call.
    319  */
    320 GLOBAL(fpfault)
    321 	clrl	sp@-		| stack adjust count
    322 	moveml	#0xFFFF,sp@-	| save user registers
    323 	movl	usp,a0		| and save
    324 	movl	a0,sp@(FR_SP)	|   the user stack pointer
    325 	clrl	sp@-		| no VA arg
    326 	movl	_C_LABEL(curpcb),a0	| current pcb
    327 	lea	a0@(PCB_FPCTX),a0 | address of FP savearea
    328 	fsave	a0@		| save state
    329 	tstb	a0@		| null state frame?
    330 	jeq	Lfptnull	| yes, safe
    331 	clrw	d0		| no, need to tweak BIU
    332 	movb	a0@(1),d0	| get frame size
    333 	bset	#3,a0@(0,d0:w)	| set exc_pend bit of BIU
    334 Lfptnull:
    335 	fmovem	fpsr,sp@-	| push fpsr as code argument
    336 	frestore a0@		| restore state
    337 	movl	#T_FPERR,sp@-	| push type arg
    338 	jra	_ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
    339 
    340 /*
    341  * Other exceptions only cause four and six word stack frame and require
    342  * no post-trap stack adjustment.
    343  */
    344 GLOBAL(badtrap)
    345 	clrl	sp@-			| stack adjust count
    346 	moveml	#0xFFFF,sp@-		| save std frame regs
    347 	jbsr	_C_LABEL(straytrap)	| report
    348 	moveml	sp@+,#0xFFFF		| restore regs
    349 	addql	#4, sp			| stack adjust count
    350 	jra	_ASM_LABEL(rei)		| all done
    351 
    352 /*
    353  * Trap 0 is for system calls
    354  */
    355 GLOBAL(trap0)
    356 	clrl	sp@-			| stack adjust count
    357 	moveml	#0xFFFF,sp@-		| save user registers
    358 	movl	usp,a0			| save the user SP
    359 	movl	a0,sp@(FR_SP)		|   in the savearea
    360 	movl	d0,sp@-			| push syscall number
    361 	jbsr	_C_LABEL(syscall)	| handle it
    362 	addql	#4,sp			| pop syscall arg
    363 	movl	sp@(FR_SP),a0		| grab and restore
    364 	movl	a0,usp			|   user SP
    365 	moveml	sp@+,#0x7FFF		| restore most registers
    366 	addql	#8,sp			| pop SP and stack adjust
    367 	jra	_ASM_LABEL(rei)		| all done
    368 
    369 /*
    370  * Trap 12 is the entry point for the cachectl "syscall"
    371  *	cachectl(command, addr, length)
    372  * command in d0, addr in a1, length in d1
    373  */
    374 GLOBAL(trap12)
    375 	movl	_C_LABEL(curproc),sp@-	| push curproc pointer
    376 	movl	d1,sp@-			| push length
    377 	movl	a1,sp@-			| push addr
    378 	movl	d0,sp@-			| push command
    379 	jbsr	_C_LABEL(cachectl1)	| do it
    380 	lea	sp@(16),sp		| pop args
    381 	jra	_ASM_LABEL(rei)		| all done
    382 
    383 /*
    384  * Trace (single-step) trap.  Kernel-mode is special.
    385  * User mode traps are simply passed on to trap().
    386  */
    387 GLOBAL(trace)
    388 	clrl	sp@-			| stack adjust count
    389 	moveml	#0xFFFF,sp@-
    390 	moveq	#T_TRACE,d0
    391 
    392 	| Check PSW and see what happen.
    393 	|   T=0 S=0	(should not happen)
    394 	|   T=1 S=0	trace trap from user mode
    395 	|   T=0 S=1	trace trap on a trap instruction
    396 	|   T=1 S=1	trace trap from system mode (kernel breakpoint)
    397 
    398 	movw	sp@(FR_HW),d1		| get PSW
    399 	notw	d1			| XXX no support for T0 on 680[234]0
    400 	andw	#PSL_TS,d1		| from system mode (T=1, S=1)?
    401 	jeq	_ASM_LABEL(kbrkpt)	|  yes, kernel brkpt
    402 	jra	_ASM_LABEL(fault)	| no, user-mode fault
    403 
    404 /*
    405  * Trap 15 is used for:
    406  *	- GDB breakpoints (in user programs)
    407  *	- KGDB breakpoints (in the kernel)
    408  *	- trace traps for SUN binaries (not fully supported yet)
    409  * User mode traps are simply passed to trap().
    410  */
    411 GLOBAL(trap15)
    412 	clrl	sp@-			| stack adjust count
    413 	moveml	#0xFFFF,sp@-
    414 	moveq	#T_TRAP15,d0
    415 	btst	#5,sp@(FR_HW)		| was supervisor mode?
    416 	jne	_ASM_LABEL(kbrkpt)	|  yes, kernel brkpt
    417 	jra	_ASM_LABEL(fault)	| no, user-mode fault
    418 
    419 ASLOCAL(kbrkpt)
    420 	| Kernel-mode breakpoint or trace trap. (d0=trap_type)
    421 	| Save the system sp rather than the user sp.
    422 	movw	#PSL_HIGHIPL,sr		| lock out interrupts
    423 	lea	sp@(FR_SIZE),a6		| Save stack pointer
    424 	movl	a6,sp@(FR_SP)		|  from before trap
    425 
    426 	| If we are not on tmpstk switch to it.
    427 	| (so debugger can change the stack pointer)
    428 	movl	a6,d1
    429 	cmpl	#_ASM_LABEL(tmpstk),d1
    430 	jls	Lbrkpt2 		| already on tmpstk
    431 	| Copy frame to the temporary stack
    432 	movl	sp,a0			| a0=src
    433 	lea	_ASM_LABEL(tmpstk)-96,a1	| a1=dst
    434 	movl	a1,sp			| sp=new frame
    435 	moveq	#FR_SIZE,d1
    436 Lbrkpt1:
    437 	movl	a0@+,a1@+
    438 	subql	#4,d1
    439 	bgt	Lbrkpt1
    440 
    441 Lbrkpt2:
    442 	| Call the trap handler for the kernel debugger.
    443 	| Do not call trap() to handle it, so that we can
    444 	| set breakpoints in trap() if we want.  We know
    445 	| the trap type is either T_TRACE or T_BREAKPOINT.
    446 	movl	d0,sp@-			| push trap type
    447 	jbsr	_C_LABEL(trap_kdebug)
    448 	addql	#4,sp			| pop args
    449 
    450 	| The stack pointer may have been modified, or
    451 	| data below it modified (by kgdb push call),
    452 	| so push the hardware frame at the current sp
    453 	| before restoring registers and returning.
    454 	movl	sp@(FR_SP),a0		| modified sp
    455 	lea	sp@(FR_SIZE),a1		| end of our frame
    456 	movl	a1@-,a0@-		| copy 2 longs with
    457 	movl	a1@-,a0@-		| ... predecrement
    458 	movl	a0,sp@(FR_SP)		| sp = h/w frame
    459 	moveml	sp@+,#0x7FFF		| restore all but sp
    460 	movl	sp@,sp			| ... and sp
    461 	rte				| all done
    462 
    463 /* Use common m68k sigreturn */
    464 #include <m68k/m68k/sigreturn.s>
    465 
    466 /*
    467  * Interrupt handlers.  Most are auto-vectored,
    468  * and hard-wired the same way on all sun3 models.
    469  * Format in the stack is:
    470  *   d0,d1,a0,a1, sr, pc, vo
    471  */
    472 
    473 #define INTERRUPT_SAVEREG \
    474 	moveml	#0xC0C0,sp@-
    475 
    476 #define INTERRUPT_RESTORE \
    477 	moveml	sp@+,#0x0303
    478 
    479 /*
    480  * This is the common auto-vector interrupt handler,
    481  * for which the CPU provides the vector=0x18+level.
    482  * These are installed in the interrupt vector table.
    483  */
    484 	.align	2
    485 GLOBAL(_isr_autovec)
    486 	INTERRUPT_SAVEREG
    487 	jbsr	_C_LABEL(isr_autovec)
    488 	INTERRUPT_RESTORE
    489 	jra	_ASM_LABEL(rei)
    490 
    491 /* clock: see clock.c */
    492 	.align	2
    493 GLOBAL(_isr_clock)
    494 	INTERRUPT_SAVEREG
    495 	jbsr	_C_LABEL(clock_intr)
    496 	INTERRUPT_RESTORE
    497 	jra	_ASM_LABEL(rei)
    498 
    499 | Handler for all vectored interrupts (i.e. VME interrupts)
    500 	.align	2
    501 GLOBAL(_isr_vectored)
    502 	INTERRUPT_SAVEREG
    503 	jbsr	_C_LABEL(isr_vectored)
    504 	INTERRUPT_RESTORE
    505 	jra	_ASM_LABEL(rei)
    506 
    507 #undef	INTERRUPT_SAVEREG
    508 #undef	INTERRUPT_RESTORE
    509 
    510 /* interrupt counters (needed by vmstat) */
    511 GLOBAL(intrnames)
    512 	.asciz	"spur"	| 0
    513 	.asciz	"lev1"	| 1
    514 	.asciz	"lev2"	| 2
    515 	.asciz	"lev3"	| 3
    516 	.asciz	"lev4"	| 4
    517 	.asciz	"clock"	| 5
    518 	.asciz	"lev6"	| 6
    519 	.asciz	"nmi"	| 7
    520 GLOBAL(eintrnames)
    521 
    522 	.data
    523 	.even
    524 GLOBAL(intrcnt)
    525 	.long	0,0,0,0,0,0,0,0,0,0
    526 GLOBAL(eintrcnt)
    527 	.text
    528 
    529 /*
    530  * Emulation of VAX REI instruction.
    531  *
    532  * This code is (mostly) un-altered from the hp300 code,
    533  * except that sun machines do not need a simulated SIR
    534  * because they have a real software interrupt register.
    535  *
    536  * This code deals with checking for and servicing ASTs
    537  * (profiling, scheduling) and software interrupts (network, softclock).
    538  * We check for ASTs first, just like the VAX.  To avoid excess overhead
    539  * the T_ASTFLT handling code will also check for software interrupts so we
    540  * do not have to do it here.  After identifying that we need an AST we
    541  * drop the IPL to allow device interrupts.
    542  *
    543  * This code is complicated by the fact that sendsig may have been called
    544  * necessitating a stack cleanup.
    545  */
    546 
    547 ASGLOBAL(rei)
    548 #ifdef	DIAGNOSTIC
    549 	tstl	_C_LABEL(panicstr)	| have we paniced?
    550 	jne	Ldorte			| yes, do not make matters worse
    551 #endif
    552 	tstl	_C_LABEL(astpending)	| AST pending?
    553 	jeq	Ldorte			| no, done
    554 Lrei1:
    555 	btst	#5,sp@			| yes, are we returning to user mode?
    556 	jne	Ldorte			| no, done
    557 	movw	#PSL_LOWIPL,sr		| lower SPL
    558 	clrl	sp@-			| stack adjust
    559 	moveml	#0xFFFF,sp@-		| save all registers
    560 	movl	usp,a1			| including
    561 	movl	a1,sp@(FR_SP)		|    the users SP
    562 	clrl	sp@-			| VA == none
    563 	clrl	sp@-			| code == none
    564 	movl	#T_ASTFLT,sp@-		| type == async system trap
    565 	jbsr	_C_LABEL(trap)		| go handle it
    566 	lea	sp@(12),sp		| pop value args
    567 	movl	sp@(FR_SP),a0		| restore user SP
    568 	movl	a0,usp			|   from save area
    569 	movw	sp@(FR_ADJ),d0		| need to adjust stack?
    570 	jne	Laststkadj		| yes, go to it
    571 	moveml	sp@+,#0x7FFF		| no, restore most user regs
    572 	addql	#8,sp			| toss SP and stack adjust
    573 	rte				| and do real RTE
    574 Laststkadj:
    575 	lea	sp@(FR_HW),a1		| pointer to HW frame
    576 	addql	#8,a1			| source pointer
    577 	movl	a1,a0			| source
    578 	addw	d0,a0			|  + hole size = dest pointer
    579 	movl	a1@-,a0@-		| copy
    580 	movl	a1@-,a0@-		|  8 bytes
    581 	movl	a0,sp@(FR_SP)		| new SSP
    582 	moveml	sp@+,#0x7FFF		| restore user registers
    583 	movl	sp@,sp			| and our SP
    584 Ldorte:
    585 	rte				| real return
    586 
    587 /*
    588  * Initialization is at the beginning of this file, because the
    589  * kernel entry point needs to be at zero for compatibility with
    590  * the Sun boot loader.  This works on Sun machines because the
    591  * interrupt vector table for reset is NOT at address zero.
    592  * (The MMU has a "boot" bit that forces access to the PROM)
    593  */
    594 
    595 /*
    596  * Use common m68k sigcode.
    597  */
    598 #include <m68k/m68k/sigcode.s>
    599 
    600 	.text
    601 
    602 /*
    603  * Primitives
    604  */
    605 
    606 /*
    607  * Use common m68k support routines.
    608  */
    609 #include <m68k/m68k/support.s>
    610 
    611 BSS(want_resched,4)
    612 
    613 /*
    614  * Use common m68k process manipulation routines.
    615  */
    616 #include <m68k/m68k/proc_subr.s>
    617 
    618 | Message for Lbadsw panic
    619 Lsw0:
    620 	.asciz	"cpu_switch"
    621 	.even
    622 
    623 	.data
    624 GLOBAL(masterpaddr)		| XXX compatibility (debuggers)
    625 GLOBAL(curpcb)
    626 	.long	0
    627 ASBSS(nullpcb,SIZEOF_PCB)
    628 	.text
    629 
    630 /*
    631  * At exit of a process, do a cpu_switch for the last time.
    632  * Switch to a safe stack and PCB, and select a new process to run.  The
    633  * old stack and u-area will be freed by the reaper.
    634  */
    635 ENTRY(switch_exit)
    636 	movl	sp@(4),a0		| struct proc *p
    637 					| save state into garbage pcb
    638 	movl	#_ASM_LABEL(nullpcb),_C_LABEL(curpcb)
    639 	lea	_ASM_LABEL(tmpstk),sp	| goto a tmp stack
    640 
    641 	/* Schedule the vmspace and stack to be freed. */
    642 	movl	a0,sp@-			| exit2(p)
    643 	jbsr	_C_LABEL(exit2)
    644 
    645 	/* Don't pop the proc; pass it to cpu_switch(). */
    646 
    647 	jra	_C_LABEL(cpu_switch)
    648 
    649 /*
    650  * When no processes are on the runq, cpu_switch() branches to idle
    651  * to wait for something to come ready.
    652  */
    653 	.data
    654 GLOBAL(Idle_count)
    655 	.long	0
    656 	.text
    657 
    658 Lidle:
    659 	stop	#PSL_LOWIPL
    660 GLOBAL(_Idle)				| See clock.c
    661 	movw	#PSL_HIGHIPL,sr
    662 	addql	#1, _C_LABEL(Idle_count)
    663 	tstl	_C_LABEL(whichqs)
    664 	jeq	Lidle
    665 	movw	#PSL_LOWIPL,sr
    666 	jra	Lsw1
    667 
    668 Lbadsw:
    669 	movl	#Lsw0,sp@-
    670 	jbsr	_C_LABEL(panic)
    671 	/*NOTREACHED*/
    672 
    673 /*
    674  * cpu_switch()
    675  * Hacked for sun3
    676  * XXX - Arg 1 is a proc pointer (curproc) but this doesn't use it.
    677  * XXX - Sould we use p->p_addr instead of curpcb? -gwr
    678  */
    679 ENTRY(cpu_switch)
    680 	movl	_C_LABEL(curpcb),a1	| current pcb
    681 	movw	sr,a1@(PCB_PS)		| save sr before changing ipl
    682 #ifdef notyet
    683 	movl	_C_LABEL(curproc),sp@-	| remember last proc running
    684 #endif
    685 	clrl	_C_LABEL(curproc)
    686 
    687 Lsw1:
    688 	/*
    689 	 * Find the highest-priority queue that isn't empty,
    690 	 * then take the first proc from that queue.
    691 	 */
    692 	clrl	d0
    693 	lea	_C_LABEL(whichqs),a0
    694 	movl	a0@,d1
    695 Lswchk:
    696 	btst	d0,d1
    697 	jne	Lswfnd
    698 	addqb	#1,d0
    699 	cmpb	#32,d0
    700 	jne	Lswchk
    701 	jra	_C_LABEL(_Idle)
    702 Lswfnd:
    703 	movw	#PSL_HIGHIPL,sr		| lock out interrupts
    704 	movl	a0@,d1			| and check again...
    705 	bclr	d0,d1
    706 	jeq	Lsw1			| proc moved, rescan
    707 	movl	d1,a0@			| update whichqs
    708 	moveq	#1,d1			| double check for higher priority
    709 	lsll	d0,d1			| process (which may have snuck in
    710 	subql	#1,d1			| while we were finding this one)
    711 	andl	a0@,d1
    712 	jeq	Lswok			| no one got in, continue
    713 	movl	a0@,d1
    714 	bset	d0,d1			| otherwise put this one back
    715 	movl	d1,a0@
    716 	jra	Lsw1			| and rescan
    717 Lswok:
    718 	movl	d0,d1
    719 	lslb	#3,d1			| convert queue number to index
    720 	addl	#_qs,d1			| locate queue (q)
    721 	movl	d1,a1
    722 	cmpl	a1@(P_FORW),a1		| anyone on queue?
    723 	jeq	Lbadsw			| no, panic
    724 	movl	a1@(P_FORW),a0		| p = q->p_forw
    725 	movl	a0@(P_FORW),a1@(P_FORW)	| q->p_forw = p->p_forw
    726 	movl	a0@(P_FORW),a1		| q = p->p_forw
    727 	movl	a0@(P_BACK),a1@(P_BACK)	| q->p_back = p->p_back
    728 	cmpl	a0@(P_FORW),d1		| anyone left on queue?
    729 	jeq	Lsw2			| no, skip
    730 	movl	_C_LABEL(whichqs),d1
    731 	bset	d0,d1			| yes, reset bit
    732 	movl	d1,_C_LABEL(whichqs)
    733 Lsw2:
    734 	movl	a0,_C_LABEL(curproc)
    735 	clrl	_C_LABEL(want_resched)
    736 #ifdef notyet
    737 	movl	sp@+,a1			| XXX - Make this work!
    738 	cmpl	a0,a1			| switching to same proc?
    739 	jeq	Lswdone			| yes, skip save and restore
    740 #endif
    741 	/*
    742 	 * Save state of previous process in its pcb.
    743 	 */
    744 	movl	_C_LABEL(curpcb),a1
    745 	moveml	#0xFCFC,a1@(PCB_REGS)	| save non-scratch registers
    746 	movl	usp,a2			| grab USP (a2 has been saved)
    747 	movl	a2,a1@(PCB_USP)		| and save it
    748 
    749 	tstl	_C_LABEL(fputype)	| Do we have an fpu?
    750 	jeq	Lswnofpsave		| No?  Then don't try save.
    751 	lea	a1@(PCB_FPCTX),a2	| pointer to FP save area
    752 	fsave	a2@			| save FP state
    753 	tstb	a2@			| null state frame?
    754 	jeq	Lswnofpsave		| yes, all done
    755 	fmovem	fp0-fp7,a2@(FPF_REGS)		| save FP general regs
    756 	fmovem	fpcr/fpsr/fpi,a2@(FPF_FPCR)	| save FP control regs
    757 Lswnofpsave:
    758 
    759 	/*
    760 	 * Now that we have saved all the registers that must be
    761 	 * preserved, we are free to use those registers until
    762 	 * we load the registers for the switched-to process.
    763 	 * In this section, keep:  a0=curproc, a1=curpcb
    764 	 */
    765 
    766 #ifdef DIAGNOSTIC
    767 	tstl	a0@(P_WCHAN)
    768 	jne	Lbadsw
    769 	cmpb	#SRUN,a0@(P_STAT)
    770 	jne	Lbadsw
    771 #endif
    772 	clrl	a0@(P_BACK)		| clear back link
    773 	movl	a0@(P_ADDR),a1		| get p_addr
    774 	movl	a1,_C_LABEL(curpcb)
    775 
    776 	/*
    777 	 * Load the new VM context (new MMU root pointer)
    778 	 */
    779 	movl	a0@(P_VMSPACE),a2	| vm = p->p_vmspace
    780 #ifdef DIAGNOSTIC
    781 	tstl	a2			| vm == VM_MAP_NULL?
    782 	jeq	Lbadsw			| panic
    783 #endif
    784 #ifdef PMAP_DEBUG
    785 	/* When debugging just call _pmap_switch(). */
    786 	movl	a2@(VM_PMAP),a2 	| pmap = vm->vm_map.pmap
    787 	pea	a2@			| push pmap
    788 	jbsr	_C_LABEL(_pmap_switch)	| _pmap_switch(pmap)
    789 	addql	#4,sp
    790 	movl	_C_LABEL(curpcb),a1	| restore p_addr
    791 #else
    792 	/* Otherwise, use this inline version. */
    793 	lea	_C_LABEL(kernel_crp), a3 | our CPU Root Ptr. (CRP)
    794 	movl	a2@(VM_PMAP),a2 	| pmap = vm->vm_map.pmap
    795 	movl	a2@(PM_A_PHYS),d0	| phys = pmap->pm_a_phys
    796 	cmpl	a3@(4),d0		|  == kernel_crp.rp_addr ?
    797 	jeq	Lsame_mmuctx		| skip loadcrp/flush
    798 	/* OK, it is a new MMU context.  Load it up. */
    799 	movl	d0,a3@(4)
    800 	movl	#CACHE_CLR,d0
    801 	movc	d0,cacr			| invalidate cache(s)
    802 	pflusha				| flush entire TLB
    803 	pmove	a3@,crp			| load new user root pointer
    804 Lsame_mmuctx:
    805 #endif
    806 
    807 	/*
    808 	 * Reload the registers for the new process.
    809 	 * After this point we can only use d0,d1,a0,a1
    810 	 */
    811 	moveml	a1@(PCB_REGS),#0xFCFC	| reload registers
    812 	movl	a1@(PCB_USP),a0
    813 	movl	a0,usp			| and USP
    814 
    815 	tstl	_C_LABEL(fputype)	| If we don't have an fpu,
    816 	jeq	Lres_skip		|  don't try to restore it.
    817 	lea	a1@(PCB_FPCTX),a0	| pointer to FP save area
    818 	tstb	a0@			| null state frame?
    819 	jeq	Lresfprest		| yes, easy
    820 	fmovem	a0@(FPF_FPCR),fpcr/fpsr/fpi	| restore FP control regs
    821 	fmovem	a0@(FPF_REGS),fp0-fp7		| restore FP general regs
    822 Lresfprest:
    823 	frestore a0@			| restore state
    824 Lres_skip:
    825 	movw	a1@(PCB_PS),d0		| no, restore PS
    826 #ifdef DIAGNOSTIC
    827 	btst	#13,d0			| supervisor mode?
    828 	jeq	Lbadsw			| no? panic!
    829 #endif
    830 	movw	d0,sr			| OK, restore PS
    831 	moveq	#1,d0			| return 1 (for alternate returns)
    832 	rts
    833 
    834 /*
    835  * savectx(pcb)
    836  * Update pcb, saving current processor state.
    837  */
    838 ENTRY(savectx)
    839 	movl	sp@(4),a1
    840 	movw	sr,a1@(PCB_PS)
    841 	movl	usp,a0			| grab USP
    842 	movl	a0,a1@(PCB_USP)		| and save it
    843 	moveml	#0xFCFC,a1@(PCB_REGS)	| save non-scratch registers
    844 
    845 	tstl	_C_LABEL(fputype)	| Do we have FPU?
    846 	jeq	Lsavedone		| No?  Then don't save state.
    847 	lea	a1@(PCB_FPCTX),a0	| pointer to FP save area
    848 	fsave	a0@			| save FP state
    849 	tstb	a0@			| null state frame?
    850 	jeq	Lsavedone		| yes, all done
    851 	fmovem	fp0-fp7,a0@(FPF_REGS)		| save FP general regs
    852 	fmovem	fpcr/fpsr/fpi,a0@(FPF_FPCR)	| save FP control regs
    853 Lsavedone:
    854 	moveq	#0,d0			| return 0
    855 	rts
    856 
    857 /* suline() */
    858 
    859 #ifdef DEBUG
    860 	.data
    861 ASGLOBAL(fulltflush)
    862 	.long	0
    863 ASGLOBAL(fullcflush)
    864 	.long	0
    865 	.text
    866 #endif
    867 
    868 /*
    869  * Invalidate entire TLB.
    870  */
    871 ENTRY(TBIA)
    872 _C_LABEL(_TBIA):
    873 	pflusha
    874 	movl	#DC_CLEAR,d0
    875 	movc	d0,cacr			| invalidate on-chip d-cache
    876 	rts
    877 
    878 /*
    879  * Invalidate any TLB entry for given VA (TB Invalidate Single)
    880  */
    881 ENTRY(TBIS)
    882 #ifdef DEBUG
    883 	tstl	_ASM_LABEL(fulltflush)	| being conservative?
    884 	jne	_C_LABEL(_TBIA)		| yes, flush entire TLB
    885 #endif
    886 	movl	sp@(4),a0
    887 	pflush	#0,#0,a0@		| flush address from both sides
    888 	movl	#DC_CLEAR,d0
    889 	movc	d0,cacr			| invalidate on-chip data cache
    890 	rts
    891 
    892 /*
    893  * Invalidate supervisor side of TLB
    894  */
    895 ENTRY(TBIAS)
    896 #ifdef DEBUG
    897 	tstl	_ASM_LABEL(fulltflush)	| being conservative?
    898 	jne	_C_LABEL(_TBIA)		| yes, flush everything
    899 #endif
    900 	pflush	#4,#4			| flush supervisor TLB entries
    901 	movl	#DC_CLEAR,d0
    902 	movc	d0,cacr			| invalidate on-chip d-cache
    903 	rts
    904 
    905 /*
    906  * Invalidate user side of TLB
    907  */
    908 ENTRY(TBIAU)
    909 #ifdef DEBUG
    910 	tstl	_ASM_LABEL(fulltflush)	| being conservative?
    911 	jne	_C_LABEL(_TBIA)		| yes, flush everything
    912 #endif
    913 	pflush	#0,#4			| flush user TLB entries
    914 	movl	#DC_CLEAR,d0
    915 	movc	d0,cacr			| invalidate on-chip d-cache
    916 	rts
    917 
    918 /*
    919  * Invalidate instruction cache
    920  */
    921 ENTRY(ICIA)
    922 	movl	#IC_CLEAR,d0
    923 	movc	d0,cacr			| invalidate i-cache
    924 	rts
    925 
    926 /*
    927  * Invalidate data cache.
    928  * NOTE: we do not flush 68030 on-chip cache as there are no aliasing
    929  * problems with DC_WA.  The only cases we have to worry about are context
    930  * switch and TLB changes, both of which are handled "in-line" in resume
    931  * and TBI*.
    932  */
    933 ENTRY(DCIA)
    934 __DCIA:
    935 	rts
    936 
    937 ENTRY(DCIS)
    938 __DCIS:
    939 	rts
    940 
    941 /*
    942  * Invalidate data cache.
    943  */
    944 ENTRY(DCIU)
    945 	movl	#DC_CLEAR,d0
    946 	movc	d0,cacr			| invalidate on-chip d-cache
    947 	rts
    948 
    949 /* ICPL, ICPP, DCPL, DCPP, DCPA, DCFL, DCFP */
    950 
    951 ENTRY(PCIA)
    952 	movl	#DC_CLEAR,d0
    953 	movc	d0,cacr			| invalidate on-chip d-cache
    954 	rts
    955 
    956 ENTRY(ecacheon)
    957 	rts
    958 
    959 ENTRY(ecacheoff)
    960 	rts
    961 
    962 /*
    963  * Get callers current SP value.
    964  * Note that simply taking the address of a local variable in a C function
    965  * doesn't work because callee saved registers may be outside the stack frame
    966  * defined by A6 (e.g. GCC generated code).
    967  *
    968  * [I don't think the ENTRY() macro will do the right thing with this -- glass]
    969  */
    970 GLOBAL(getsp)
    971 	movl	sp,d0			| get current SP
    972 	addql	#4,d0			| compensate for return address
    973 	rts
    974 
    975 ENTRY(getsfc)
    976 	movc	sfc,d0
    977 	rts
    978 
    979 ENTRY(getdfc)
    980 	movc	dfc,d0
    981 	rts
    982 
    983 ENTRY(getvbr)
    984 	movc vbr, d0
    985 	rts
    986 
    987 ENTRY(setvbr)
    988 	movl sp@(4), d0
    989 	movc d0, vbr
    990 	rts
    991 
    992 /*
    993  * Load a new CPU Root Pointer (CRP) into the MMU.
    994  *	void	loadcrp(struct mmu_rootptr *);
    995  */
    996 ENTRY(loadcrp)
    997 	movl	sp@(4),a0		| arg1: &CRP
    998 	movl	#CACHE_CLR,d0
    999 	movc	d0,cacr			| invalidate cache(s)
   1000 	pflusha				| flush entire TLB
   1001 	pmove	a0@,crp			| load new user root pointer
   1002 	rts
   1003 
   1004 /*
   1005  * Get the physical address of the PTE for a given VA.
   1006  */
   1007 ENTRY(ptest_addr)
   1008 	movl	sp@(4),a0		| VA
   1009 	ptestr	#5,a0@,#7,a1		| a1 = addr of PTE
   1010 	movl	a1,d0
   1011 	rts
   1012 
   1013 /*
   1014  * Set processor priority level calls.  Most are implemented with
   1015  * inline asm expansions.  However, we need one instantiation here
   1016  * in case some non-optimized code makes external references.
   1017  * Most places will use the inlined functions param.h supplies.
   1018  */
   1019 
   1020 ENTRY(_getsr)
   1021 	clrl	d0
   1022 	movw	sr,d0
   1023 	rts
   1024 
   1025 ENTRY(_spl)
   1026 	clrl	d0
   1027 	movw	sr,d0
   1028 	movl	sp@(4),d1
   1029 	movw	d1,sr
   1030 	rts
   1031 
   1032 ENTRY(_splraise)
   1033 	clrl	d0
   1034 	movw	sr,d0
   1035 	movl	d0,d1
   1036 	andl	#PSL_HIGHIPL,d1 	| old &= PSL_HIGHIPL
   1037 	cmpl	sp@(4),d1		| (old - new)
   1038 	bge	Lsplr
   1039 	movl	sp@(4),d1
   1040 	movw	d1,sr
   1041 Lsplr:
   1042 	rts
   1043 
   1044 /*
   1045  * Save and restore 68881 state.
   1046  */
   1047 ENTRY(m68881_save)
   1048 	movl	sp@(4),a0		| save area pointer
   1049 	fsave	a0@			| save state
   1050 	tstb	a0@			| null state frame?
   1051 	jeq	Lm68881sdone		| yes, all done
   1052 	fmovem fp0-fp7,a0@(FPF_REGS)		| save FP general regs
   1053 	fmovem fpcr/fpsr/fpi,a0@(FPF_FPCR)	| save FP control regs
   1054 Lm68881sdone:
   1055 	rts
   1056 
   1057 ENTRY(m68881_restore)
   1058 	movl	sp@(4),a0		| save area pointer
   1059 	tstb	a0@			| null state frame?
   1060 	jeq	Lm68881rdone		| yes, easy
   1061 	fmovem	a0@(FPF_FPCR),fpcr/fpsr/fpi	| restore FP control regs
   1062 	fmovem	a0@(FPF_REGS),fp0-fp7		| restore FP general regs
   1063 Lm68881rdone:
   1064 	frestore a0@			| restore state
   1065 	rts
   1066 
   1067 /*
   1068  * _delay(unsigned N)
   1069  * Delay for at least (N/256) microseconds.
   1070  * This routine depends on the variable:  delay_divisor
   1071  * which should be set based on the CPU clock rate.
   1072  * XXX: Currently this is set based on the CPU model,
   1073  * XXX: but this should be determined at run time...
   1074  */
   1075 GLOBAL(_delay)
   1076 	| d0 = arg = (usecs << 8)
   1077 	movl	sp@(4),d0
   1078 	| d1 = delay_divisor;
   1079 	movl	_C_LABEL(delay_divisor),d1
   1080 	jra	L_delay			/* Jump into the loop! */
   1081 
   1082 	/*
   1083 	 * Align the branch target of the loop to a half-line (8-byte)
   1084 	 * boundary to minimize cache effects.  This guarantees both
   1085 	 * that there will be no prefetch stalls due to cache line burst
   1086 	 * operations and that the loop will run from a single cache
   1087 	 * half-line.
   1088 	 */
   1089 	.align	8
   1090 L_delay:
   1091 	subl	d1,d0
   1092 	jgt	L_delay
   1093 	rts
   1094 
   1095 
   1096 | Define some addresses, mostly so DDB can print useful info.
   1097 | Not using _C_LABEL() here because these symbols are never
   1098 | referenced by any C code, and if the leading underscore
   1099 | ever goes away, these lines turn into syntax errors...
   1100 	.set	_KERNBASE,KERNBASE
   1101 	.set	_MONSTART,SUN3X_MONSTART
   1102 	.set	_PROM_BASE,SUN3X_PROM_BASE
   1103 	.set	_MONEND,SUN3X_MONEND
   1104 
   1105 |The end!
   1106