Home | History | Annotate | Line # | Download | only in sun3x
locore.s revision 1.21
      1 /*	$NetBSD: locore.s,v 1.21 1997/05/29 22:20:01 gwr Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1988 University of Utah.
      5  * Copyright (c) 1980, 1990, 1993
      6  *	The Regents of the University of California.  All rights reserved.
      7  *
      8  * This code is derived from software contributed to Berkeley by
      9  * the Systems Programming Group of the University of Utah Computer
     10  * Science Department.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by the University of
     23  *	California, Berkeley and its contributors.
     24  * 4. Neither the name of the University nor the names of its contributors
     25  *    may be used to endorse or promote products derived from this software
     26  *    without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     38  * SUCH DAMAGE.
     39  *
     40  *	from: Utah $Hdr: locore.s 1.66 92/12/22$
     41  *	@(#)locore.s	8.6 (Berkeley) 5/27/94
     42  */
     43 
     44 #include "assym.h"
     45 #include <machine/asm.h>
     46 #include <machine/trap.h>
     47 
     48 | Remember this is a fun project!
     49 
     50 	.data
     51 GLOBAL(mon_crp)
     52 	.long	0,0
     53 
     54 | This is for kvm_mkdb, and should be the address of the beginning
     55 | of the kernel text segment (not necessarily the same as kernbase).
     56 	.text
     57 GLOBAL(kernel_text)
     58 
     59 | This is the entry point, as well as the end of the temporary stack
     60 | used during process switch (one 8K page ending at start)
     61 ASGLOBAL(tmpstk)
     62 ASGLOBAL(start)
     63 
     64 | The first step, after disabling interrupts, is to map enough of the kernel
     65 | into high virtual address space so that we can use position dependent code.
     66 | This is a tricky task on the sun3x because the MMU is already enabled and
     67 | the ROM monitor provides no indication of where the root MMU table is mapped.
     68 | Therefore we must use one of the 68030's 'transparent translation' registers
     69 | to define a range in the address space where the MMU translation is
     70 | turned off.  Once this is complete we can modify the MMU table directly
     71 | without the need for it to be mapped into virtual memory.
     72 | All code must be position independent until otherwise noted, as the
     73 | boot loader has loaded us into low memory but all the symbols in this
     74 | code have been linked high.
     75 	movw	#PSL_HIGHIPL, sr	| no interrupts
     76 	movl	#KERNBASE, a5		| for vtop conversion
     77 	lea	_C_LABEL(mon_crp), a0	| where to store the CRP
     78 	subl	a5, a0
     79 	| Note: borrowing mon_crp for tt0 setup...
     80 	movl	#0x3F8107, a0@		| map the low 1GB v=p with the
     81 	.long	0xf0100800		| transparent translation reg0
     82 					| [ pmove a0@, tt0 ]
     83 | In order to map the kernel into high memory we will copy the root table
     84 | entry which maps the 16 megabytes of memory starting at 0x0 into the
     85 | entry which maps the 16 megabytes starting at KERNBASE.
     86 	pmove	crp, a0@		| Get monitor CPU root pointer
     87 	movl	a0@(4), a1		| 2nd word is PA of level A table
     88 
     89 	movl	a1, a0			| compute the descriptor address
     90 	addl	#0x3e0, a1		| for VA starting at KERNBASE
     91 	movl	a0@, a1@		| copy descriptor type
     92 	movl	a0@(4), a1@(4)		| copy physical address
     93 
     94 | Kernel is now double mapped at zero and KERNBASE.
     95 | Force a long jump to the relocated code (high VA).
     96 	movl	#IC_CLEAR, d0		| Flush the I-cache
     97 	movc	d0, cacr
     98 	jmp L_high_code:l		| long jump
     99 
    100 L_high_code:
    101 | We are now running in the correctly relocated kernel, so
    102 | we are no longer restricted to position-independent code.
    103 | It is handy to leave transparent translation enabled while
    104 | for the low 1GB while _bootstrap() is doing its thing.
    105 
    106 | Do bootstrap stuff needed before main() gets called.
    107 | Our boot loader leaves a copy of the kernel's exec header
    108 | just before the start of the kernel text segment, so the
    109 | kernel can sanity-check the DDB symbols at [end...esym].
    110 | Pass the struct exec at tmpstk-32 to _bootstrap().
    111 | Also, make sure the initial frame pointer is zero so that
    112 | the backtrace algorithm used by KGDB terminates nicely.
    113 	lea	_ASM_LABEL(tmpstk)-32, sp
    114 	movl	#0,a6
    115 	jsr	_C_LABEL(_bootstrap)	| See _startup.c
    116 
    117 | Now turn off the transparent translation of the low 1GB.
    118 | (this also flushes the ATC)
    119 	clrl	sp@-
    120 	.long	0xf0170800		| pmove	sp@,tt0
    121 	addql	#4,sp
    122 
    123 | Now that _bootstrap() is done using the PROM functions,
    124 | we can safely set the sfc/dfc to something != FC_CONTROL
    125 	moveq	#FC_USERD, d0		| make movs access "user data"
    126 	movc	d0, sfc			| space for copyin/copyout
    127 	movc	d0, dfc
    128 
    129 | Setup process zero user/kernel stacks.
    130 	movl	_C_LABEL(proc0paddr),a1	| get proc0 pcb addr
    131 	lea	a1@(USPACE-4),sp	| set SSP to last word
    132 	movl	#USRSTACK-4,a2
    133 	movl	a2,usp			| init user SP
    134 
    135 | Note curpcb was already set in _bootstrap().
    136 | Will do fpu initialization during autoconfig (see fpu.c)
    137 | The interrupt vector table and stack are now ready.
    138 | Interrupts will be enabled later, AFTER  autoconfiguration
    139 | is finished, to avoid spurrious interrupts.
    140 
    141 /*
    142  * Final preparation for calling main.
    143  *
    144  * Create a fake exception frame that returns to user mode,
    145  * and save its address in p->p_md.md_regs for cpu_fork().
    146  * The new frames for process 1 and 2 will be adjusted by
    147  * cpu_set_kpc() to arrange for a call to a kernel function
    148  * before the new process does its rte out to user mode.
    149  */
    150 	clrw	sp@-			| tf_format,tf_vector
    151 	clrl	sp@-			| tf_pc (filled in later)
    152 	movw	#PSL_USER,sp@-		| tf_sr for user mode
    153 	clrl	sp@-			| tf_stackadj
    154 	lea	sp@(-64),sp		| tf_regs[16]
    155 	movl	sp,a1			| a1=trapframe
    156 	lea	_C_LABEL(proc0),a0	| proc0.p_md.md_regs =
    157 	movl	a1,a0@(P_MDREGS)	|   trapframe
    158 	movl	a2,a1@(FR_SP)		| a2 == usp (from above)
    159 	pea	a1@			| push &trapframe
    160 	jbsr	_C_LABEL(main)		| main(&trapframe)
    161 	addql	#4,sp			| help DDB backtrace
    162 	trap	#15			| should not get here
    163 
    164 | This is used by cpu_fork() to return to user mode.
    165 | It is called with SP pointing to a struct trapframe.
    166 GLOBAL(proc_do_uret)
    167 	movl	sp@(FR_SP),a0		| grab and load
    168 	movl	a0,usp			|   user SP
    169 	moveml	sp@+,#0x7FFF		| load most registers (all but SSP)
    170 	addql	#8,sp			| pop SSP and stack adjust count
    171 	rte
    172 
    173 /*
    174  * proc_trampoline:
    175  * This is used by cpu_set_kpc() to "push" a function call onto the
    176  * kernel stack of some process, very much like a signal delivery.
    177  * When we get here, the stack has:
    178  *
    179  * SP+8:	switchframe from before cpu_set_kpc
    180  * SP+4:	void *proc;
    181  * SP:  	u_long func;
    182  *
    183  * On entry, the switchframe pushed by cpu_set_kpc has already been
    184  * popped off the stack, so all this needs to do is pop the function
    185  * pointer into a register, call it, then pop the arg, and finally
    186  * return using the switchframe that remains on the stack.
    187  */
    188 GLOBAL(proc_trampoline)
    189 	movl	sp@+,a0			| function pointer
    190 	jbsr	a0@			| (*func)(procp)
    191 	addql	#4,sp			| toss the arg
    192 	rts				| as cpu_switch would do
    193 
    194 | That is all the assembly startup code we need on the sun3x!
    195 | The rest of this is like the hp300/locore.s where possible.
    196 
    197 /*
    198  * Trap/interrupt vector routines
    199  */
    200 #include <m68k/m68k/trap_subr.s>
    201 
    202 GLOBAL(buserr)
    203 	tstl	_C_LABEL(nofault)	| device probe?
    204 	jeq	_C_LABEL(addrerr)	| no, handle as usual
    205 	movl	_C_LABEL(nofault),sp@-	| yes,
    206 	jbsr	_C_LABEL(longjmp)	|  longjmp(nofault)
    207 GLOBAL(addrerr)
    208 	clrl	sp@-			| stack adjust count
    209 	moveml	#0xFFFF,sp@-		| save user registers
    210 	movl	usp,a0			| save the user SP
    211 	movl	a0,sp@(FR_SP)		|   in the savearea
    212 	lea	sp@(FR_HW),a1		| grab base of HW berr frame
    213 	moveq	#0,d0
    214 	movw	a1@(10),d0		| grab SSW for fault processing
    215 	btst	#12,d0			| RB set?
    216 	jeq	LbeX0			| no, test RC
    217 	bset	#14,d0			| yes, must set FB
    218 	movw	d0,a1@(10)		| for hardware too
    219 LbeX0:
    220 	btst	#13,d0			| RC set?
    221 	jeq	LbeX1			| no, skip
    222 	bset	#15,d0			| yes, must set FC
    223 	movw	d0,a1@(10)		| for hardware too
    224 LbeX1:
    225 	btst	#8,d0			| data fault?
    226 	jeq	Lbe0			| no, check for hard cases
    227 	movl	a1@(16),d1		| fault address is as given in frame
    228 	jra	Lbe10			| thats it
    229 Lbe0:
    230 	btst	#4,a1@(6)		| long (type B) stack frame?
    231 	jne	Lbe4			| yes, go handle
    232 	movl	a1@(2),d1		| no, can use save PC
    233 	btst	#14,d0			| FB set?
    234 	jeq	Lbe3			| no, try FC
    235 	addql	#4,d1			| yes, adjust address
    236 	jra	Lbe10			| done
    237 Lbe3:
    238 	btst	#15,d0			| FC set?
    239 	jeq	Lbe10			| no, done
    240 	addql	#2,d1			| yes, adjust address
    241 	jra	Lbe10			| done
    242 Lbe4:
    243 	movl	a1@(36),d1		| long format, use stage B address
    244 	btst	#15,d0			| FC set?
    245 	jeq	Lbe10			| no, all done
    246 	subql	#2,d1			| yes, adjust address
    247 Lbe10:
    248 	movl	d1,sp@-			| push fault VA
    249 	movl	d0,sp@-			| and padded SSW
    250 	movw	a1@(6),d0		| get frame format/vector offset
    251 	andw	#0x0FFF,d0		| clear out frame format
    252 	cmpw	#12,d0			| address error vector?
    253 	jeq	Lisaerr			| yes, go to it
    254 
    255 /* MMU-specific code to determine reason for bus error. */
    256 	movl	d1,a0			| fault address
    257 	movl	sp@,d0			| function code from ssw
    258 	btst	#8,d0			| data fault?
    259 	jne	Lbe10a
    260 	movql	#1,d0			| user program access FC
    261 					| (we dont separate data/program)
    262 	btst	#5,a1@			| supervisor mode?
    263 	jeq	Lbe10a			| if no, done
    264 	movql	#5,d0			| else supervisor program access
    265 Lbe10a:
    266 	ptestr	d0,a0@,#7		| do a table search
    267 	pmove	psr,sp@			| save result
    268 	movb	sp@,d1
    269 	btst	#2,d1			| invalid? (incl. limit viol and berr)
    270 	jeq	Lmightnotbemerr		| no -> wp check
    271 	btst	#7,d1			| is it MMU table berr?
    272 	jeq	Lismerr			| no, must be fast
    273 	jra	Lisberr1		| real bus err needs not be fast
    274 Lmightnotbemerr:
    275 	btst	#3,d1			| write protect bit set?
    276 	jeq	Lisberr1		| no, must be bus error
    277 	movl	sp@,d0			| ssw into low word of d0
    278 	andw	#0xc0,d0		| write protect is set on page:
    279 	cmpw	#0x40,d0		| was it read cycle?
    280 	jeq	Lisberr1		| yes, was not WPE, must be bus err
    281 /* End of MMU-specific bus error code. */
    282 
    283 Lismerr:
    284 	movl	#T_MMUFLT,sp@-		| show that we are an MMU fault
    285 	jra	_ASM_LABEL(faultstkadj)	| and deal with it
    286 Lisaerr:
    287 	movl	#T_ADDRERR,sp@-		| mark address error
    288 	jra	_ASM_LABEL(faultstkadj)	| and deal with it
    289 Lisberr1:
    290 	clrw	sp@			| re-clear pad word
    291 Lisberr:
    292 	movl	#T_BUSERR,sp@-		| mark bus error
    293 	jra	_ASM_LABEL(faultstkadj)	| and deal with it
    294 
    295 /*
    296  * FP exceptions.
    297  */
    298 GLOBAL(fpfline)
    299 	clrl	sp@-			| stack adjust count
    300 	moveml	#0xFFFF,sp@-		| save registers
    301 	moveq	#T_FPEMULI,d0		| denote as FP emulation trap
    302 	jra	_ASM_LABEL(fault)	| do it
    303 
    304 GLOBAL(fpunsupp)
    305 	clrl	sp@-			| stack adjust count
    306 	moveml	#0xFFFF,sp@-		| save registers
    307 	moveq	#T_FPEMULD,d0		| denote as FP emulation trap
    308 	jra	_ASM_LABEL(fault)	| do it
    309 
    310 /*
    311  * Handles all other FP coprocessor exceptions.
    312  * Note that since some FP exceptions generate mid-instruction frames
    313  * and may cause signal delivery, we need to test for stack adjustment
    314  * after the trap call.
    315  */
    316 GLOBAL(fpfault)
    317 	clrl	sp@-		| stack adjust count
    318 	moveml	#0xFFFF,sp@-	| save user registers
    319 	movl	usp,a0		| and save
    320 	movl	a0,sp@(FR_SP)	|   the user stack pointer
    321 	clrl	sp@-		| no VA arg
    322 	movl	_C_LABEL(curpcb),a0	| current pcb
    323 	lea	a0@(PCB_FPCTX),a0 | address of FP savearea
    324 	fsave	a0@		| save state
    325 	tstb	a0@		| null state frame?
    326 	jeq	Lfptnull	| yes, safe
    327 	clrw	d0		| no, need to tweak BIU
    328 	movb	a0@(1),d0	| get frame size
    329 	bset	#3,a0@(0,d0:w)	| set exc_pend bit of BIU
    330 Lfptnull:
    331 	fmovem	fpsr,sp@-	| push fpsr as code argument
    332 	frestore a0@		| restore state
    333 	movl	#T_FPERR,sp@-	| push type arg
    334 	jra	_ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
    335 
    336 /*
    337  * Other exceptions only cause four and six word stack frame and require
    338  * no post-trap stack adjustment.
    339  */
    340 GLOBAL(badtrap)
    341 	clrl	sp@-			| stack adjust count
    342 	moveml	#0xFFFF,sp@-		| save std frame regs
    343 	jbsr	_C_LABEL(straytrap)	| report
    344 	moveml	sp@+,#0xFFFF		| restore regs
    345 	addql	#4, sp			| stack adjust count
    346 	jra	_ASM_LABEL(rei)		| all done
    347 
    348 /*
    349  * Trap 0 is for system calls
    350  */
    351 GLOBAL(trap0)
    352 	clrl	sp@-			| stack adjust count
    353 	moveml	#0xFFFF,sp@-		| save user registers
    354 	movl	usp,a0			| save the user SP
    355 	movl	a0,sp@(FR_SP)		|   in the savearea
    356 	movl	d0,sp@-			| push syscall number
    357 	jbsr	_C_LABEL(syscall)	| handle it
    358 	addql	#4,sp			| pop syscall arg
    359 	movl	sp@(FR_SP),a0		| grab and restore
    360 	movl	a0,usp			|   user SP
    361 	moveml	sp@+,#0x7FFF		| restore most registers
    362 	addql	#8,sp			| pop SP and stack adjust
    363 	jra	_ASM_LABEL(rei)		| all done
    364 
    365 /*
    366  * Trap 1 action depends on the emulation type:
    367  * NetBSD: sigreturn "syscall"
    368  *   HPUX: user breakpoint
    369  */
    370 GLOBAL(trap1)
    371 #if 0 /* COMPAT_HPUX */
    372 	/* If process is HPUX, this is a user breakpoint. */
    373 	jne	_C_LABEL(trap15)	| HPUX user breakpoint
    374 #endif
    375 	jra	_ASM_LABEL(sigreturn)	| NetBSD
    376 
    377 /*
    378  * Trap 2 action depends on the emulation type:
    379  * NetBSD: user breakpoint -- See XXX below...
    380  *  SunOS: cache flush
    381  *   HPUX: sigreturn
    382  */
    383 GLOBAL(trap2)
    384 #if 0 /* COMPAT_HPUX */
    385 	/* If process is HPUX, this is a sigreturn call */
    386 	jne	_ASM_LABEL(sigreturn)
    387 #endif
    388 	jra	_C_LABEL(trap15)	| NetBSD user breakpoint
    389 | XXX - Make NetBSD use trap 15 for breakpoints?
    390 | XXX - That way, we can allow this cache flush...
    391 | XXX SunOS trap #2 (and NetBSD?)
    392 | Flush on-chip cache (leave it enabled)
    393 |	movl	#CACHE_CLR,d0
    394 |	movc	d0,cacr
    395 |	rte
    396 
    397 /*
    398  * Trap 12 is the entry point for the cachectl "syscall"
    399  *	cachectl(command, addr, length)
    400  * command in d0, addr in a1, length in d1
    401  */
    402 GLOBAL(trap12)
    403 	movl	d1,sp@-			| push length
    404 	movl	a1,sp@-			| push addr
    405 	movl	d0,sp@-			| push command
    406 	jbsr	_C_LABEL(cachectl)	| do it
    407 	lea	sp@(12),sp		| pop args
    408 	jra	_ASM_LABEL(rei)		| all done
    409 
    410 /*
    411  * Trace (single-step) trap.  Kernel-mode is special.
    412  * User mode traps are simply passed on to trap().
    413  */
    414 GLOBAL(trace)
    415 	clrl	sp@-			| stack adjust count
    416 	moveml	#0xFFFF,sp@-
    417 	moveq	#T_TRACE,d0
    418 	btst	#5,sp@(FR_HW)		| was supervisor mode?
    419 	jne	_ASM_LABEL(kbrkpt)	|  yes, kernel brkpt
    420 	jra	_ASM_LABEL(fault)	| no, user-mode fault
    421 
    422 /*
    423  * Trap 15 is used for:
    424  *	- GDB breakpoints (in user programs)
    425  *	- KGDB breakpoints (in the kernel)
    426  *	- trace traps for SUN binaries (not fully supported yet)
    427  * User mode traps are simply passed to trap().
    428  */
    429 GLOBAL(trap15)
    430 	clrl	sp@-			| stack adjust count
    431 	moveml	#0xFFFF,sp@-
    432 	moveq	#T_TRAP15,d0
    433 	btst	#5,sp@(FR_HW)		| was supervisor mode?
    434 	jne	_ASM_LABEL(kbrkpt)	|  yes, kernel brkpt
    435 	jra	_ASM_LABEL(fault)	| no, user-mode fault
    436 
    437 ASLOCAL(kbrkpt)
    438 	| Kernel-mode breakpoint or trace trap. (d0=trap_type)
    439 	| Save the system sp rather than the user sp.
    440 	movw	#PSL_HIGHIPL,sr		| lock out interrupts
    441 	lea	sp@(FR_SIZE),a6		| Save stack pointer
    442 	movl	a6,sp@(FR_SP)		|  from before trap
    443 
    444 	| If we are not on tmpstk switch to it.
    445 	| (so debugger can change the stack pointer)
    446 	movl	a6,d1
    447 	cmpl	#_ASM_LABEL(tmpstk),d1
    448 	jls	Lbrkpt2 		| already on tmpstk
    449 	| Copy frame to the temporary stack
    450 	movl	sp,a0			| a0=src
    451 	lea	_ASM_LABEL(tmpstk)-96,a1	| a1=dst
    452 	movl	a1,sp			| sp=new frame
    453 	moveq	#FR_SIZE,d1
    454 Lbrkpt1:
    455 	movl	a0@+,a1@+
    456 	subql	#4,d1
    457 	bgt	Lbrkpt1
    458 
    459 Lbrkpt2:
    460 	| Call the trap handler for the kernel debugger.
    461 	| Do not call trap() to handle it, so that we can
    462 	| set breakpoints in trap() if we want.  We know
    463 	| the trap type is either T_TRACE or T_BREAKPOINT.
    464 	movl	d0,sp@-			| push trap type
    465 	jbsr	_C_LABEL(trap_kdebug)
    466 	addql	#4,sp			| pop args
    467 
    468 	| The stack pointer may have been modified, or
    469 	| data below it modified (by kgdb push call),
    470 	| so push the hardware frame at the current sp
    471 	| before restoring registers and returning.
    472 	movl	sp@(FR_SP),a0		| modified sp
    473 	lea	sp@(FR_SIZE),a1		| end of our frame
    474 	movl	a1@-,a0@-		| copy 2 longs with
    475 	movl	a1@-,a0@-		| ... predecrement
    476 	movl	a0,sp@(FR_SP)		| sp = h/w frame
    477 	moveml	sp@+,#0x7FFF		| restore all but sp
    478 	movl	sp@,sp			| ... and sp
    479 	rte				| all done
    480 
    481 /* Use common m68k sigreturn */
    482 #include <m68k/m68k/sigreturn.s>
    483 
    484 /*
    485  * Interrupt handlers.  Most are auto-vectored,
    486  * and hard-wired the same way on all sun3 models.
    487  * Format in the stack is:
    488  *   d0,d1,a0,a1, sr, pc, vo
    489  */
    490 
    491 #define INTERRUPT_SAVEREG \
    492 	moveml	#0xC0C0,sp@-
    493 
    494 #define INTERRUPT_RESTORE \
    495 	moveml	sp@+,#0x0303
    496 
    497 /*
    498  * This is the common auto-vector interrupt handler,
    499  * for which the CPU provides the vector=0x18+level.
    500  * These are installed in the interrupt vector table.
    501  */
    502 	.align	2
    503 GLOBAL(_isr_autovec)
    504 	INTERRUPT_SAVEREG
    505 	jbsr	_C_LABEL(isr_autovec)
    506 	INTERRUPT_RESTORE
    507 	jra	_ASM_LABEL(rei)
    508 
    509 /* clock: see clock.c */
    510 	.align	2
    511 GLOBAL(_isr_clock)
    512 	INTERRUPT_SAVEREG
    513 	jbsr	_C_LABEL(clock_intr)
    514 	INTERRUPT_RESTORE
    515 	jra	_ASM_LABEL(rei)
    516 
    517 | Handler for all vectored interrupts (i.e. VME interrupts)
    518 	.align	2
    519 GLOBAL(_isr_vectored)
    520 	INTERRUPT_SAVEREG
    521 	jbsr	_C_LABEL(isr_vectored)
    522 	INTERRUPT_RESTORE
    523 	jra	_ASM_LABEL(rei)
    524 
    525 #undef	INTERRUPT_SAVEREG
    526 #undef	INTERRUPT_RESTORE
    527 
    528 /* interrupt counters (needed by vmstat) */
    529 GLOBAL(intrnames)
    530 	.asciz	"spur"	| 0
    531 	.asciz	"lev1"	| 1
    532 	.asciz	"lev2"	| 2
    533 	.asciz	"lev3"	| 3
    534 	.asciz	"lev4"	| 4
    535 	.asciz	"clock"	| 5
    536 	.asciz	"lev6"	| 6
    537 	.asciz	"nmi"	| 7
    538 GLOBAL(eintrnames)
    539 
    540 	.data
    541 	.even
    542 GLOBAL(intrcnt)
    543 	.long	0,0,0,0,0,0,0,0,0,0
    544 GLOBAL(eintrcnt)
    545 	.text
    546 
    547 /*
    548  * Emulation of VAX REI instruction.
    549  *
    550  * This code is (mostly) un-altered from the hp300 code,
    551  * except that sun machines do not need a simulated SIR
    552  * because they have a real software interrupt register.
    553  *
    554  * This code deals with checking for and servicing ASTs
    555  * (profiling, scheduling) and software interrupts (network, softclock).
    556  * We check for ASTs first, just like the VAX.  To avoid excess overhead
    557  * the T_ASTFLT handling code will also check for software interrupts so we
    558  * do not have to do it here.  After identifying that we need an AST we
    559  * drop the IPL to allow device interrupts.
    560  *
    561  * This code is complicated by the fact that sendsig may have been called
    562  * necessitating a stack cleanup.
    563  */
    564 
    565 ASGLOBAL(rei)
    566 #ifdef	DIAGNOSTIC
    567 	tstl	_C_LABEL(panicstr)	| have we paniced?
    568 	jne	Ldorte			| yes, do not make matters worse
    569 #endif
    570 	tstl	_C_LABEL(astpending)	| AST pending?
    571 	jeq	Ldorte			| no, done
    572 Lrei1:
    573 	btst	#5,sp@			| yes, are we returning to user mode?
    574 	jne	Ldorte			| no, done
    575 	movw	#PSL_LOWIPL,sr		| lower SPL
    576 	clrl	sp@-			| stack adjust
    577 	moveml	#0xFFFF,sp@-		| save all registers
    578 	movl	usp,a1			| including
    579 	movl	a1,sp@(FR_SP)		|    the users SP
    580 	clrl	sp@-			| VA == none
    581 	clrl	sp@-			| code == none
    582 	movl	#T_ASTFLT,sp@-		| type == async system trap
    583 	jbsr	_C_LABEL(trap)		| go handle it
    584 	lea	sp@(12),sp		| pop value args
    585 	movl	sp@(FR_SP),a0		| restore user SP
    586 	movl	a0,usp			|   from save area
    587 	movw	sp@(FR_ADJ),d0		| need to adjust stack?
    588 	jne	Laststkadj		| yes, go to it
    589 	moveml	sp@+,#0x7FFF		| no, restore most user regs
    590 	addql	#8,sp			| toss SP and stack adjust
    591 	rte				| and do real RTE
    592 Laststkadj:
    593 	lea	sp@(FR_HW),a1		| pointer to HW frame
    594 	addql	#8,a1			| source pointer
    595 	movl	a1,a0			| source
    596 	addw	d0,a0			|  + hole size = dest pointer
    597 	movl	a1@-,a0@-		| copy
    598 	movl	a1@-,a0@-		|  8 bytes
    599 	movl	a0,sp@(FR_SP)		| new SSP
    600 	moveml	sp@+,#0x7FFF		| restore user registers
    601 	movl	sp@,sp			| and our SP
    602 Ldorte:
    603 	rte				| real return
    604 
    605 /*
    606  * Initialization is at the beginning of this file, because the
    607  * kernel entry point needs to be at zero for compatibility with
    608  * the Sun boot loader.  This works on Sun machines because the
    609  * interrupt vector table for reset is NOT at address zero.
    610  * (The MMU has a "boot" bit that forces access to the PROM)
    611  */
    612 
    613 /*
    614  * Use common m68k sigcode.
    615  */
    616 #include <m68k/m68k/sigcode.s>
    617 
    618 	.text
    619 
    620 /*
    621  * Primitives
    622  */
    623 
    624 /*
    625  * Use common m68k support routines.
    626  */
    627 #include <m68k/m68k/support.s>
    628 
    629 BSS(want_resched,4)
    630 
    631 /*
    632  * Use common m68k process manipulation routines.
    633  */
    634 #include <m68k/m68k/proc_subr.s>
    635 
    636 | Message for Lbadsw panic
    637 Lsw0:
    638 	.asciz	"cpu_switch"
    639 	.even
    640 
    641 	.data
    642 GLOBAL(masterpaddr)		| XXX compatibility (debuggers)
    643 GLOBAL(curpcb)
    644 	.long	0
    645 ASBSS(nullpcb,SIZEOF_PCB)
    646 	.text
    647 
    648 /*
    649  * At exit of a process, do a cpu_switch for the last time.
    650  * Switch to a safe stack and PCB, and deallocate the process's resources.
    651  * The ipl is high enough to prevent the memory from being reallocated.
    652  */
    653 ENTRY(switch_exit)
    654 	movl	sp@(4),a0		| struct proc *p
    655 					| save state into garbage pcb
    656 	movl	#_ASM_LABEL(nullpcb),_C_LABEL(curpcb)
    657 	lea	_ASM_LABEL(tmpstk),sp	| goto a tmp stack
    658 	movl	a0,sp@-			| pass proc ptr down
    659 
    660 	/* Free old process's u-area. */
    661 	movl	#USPACE,sp@-		| size of u-area
    662 	movl	a0@(P_ADDR),sp@-	| address of process's u-area
    663 	movl	_C_LABEL(kernel_map),sp@-	| map it was allocated in
    664 	jbsr	_C_LABEL(kmem_free)		| deallocate it
    665 	lea	sp@(12),sp		| pop args
    666 
    667 	jra	_C_LABEL(cpu_switch)
    668 
    669 /*
    670  * When no processes are on the runq, cpu_switch() branches to idle
    671  * to wait for something to come ready.
    672  */
    673 	.data
    674 GLOBAL(Idle_count)
    675 	.long	0
    676 	.text
    677 
    678 Lidle:
    679 	stop	#PSL_LOWIPL
    680 GLOBAL(_Idle)				| See clock.c
    681 	movw	#PSL_HIGHIPL,sr
    682 	addql	#1, _C_LABEL(Idle_count)
    683 	tstl	_C_LABEL(whichqs)
    684 	jeq	Lidle
    685 	movw	#PSL_LOWIPL,sr
    686 	jra	Lsw1
    687 
    688 Lbadsw:
    689 	movl	#Lsw0,sp@-
    690 	jbsr	_C_LABEL(panic)
    691 	/*NOTREACHED*/
    692 
    693 /*
    694  * cpu_switch()
    695  * Hacked for sun3
    696  * XXX - Arg 1 is a proc pointer (curproc) but this doesn't use it.
    697  * XXX - Sould we use p->p_addr instead of curpcb? -gwr
    698  */
    699 ENTRY(cpu_switch)
    700 	movl	_C_LABEL(curpcb),a1	| current pcb
    701 	movw	sr,a1@(PCB_PS)		| save sr before changing ipl
    702 #ifdef notyet
    703 	movl	_C_LABEL(curproc),sp@-	| remember last proc running
    704 #endif
    705 	clrl	_C_LABEL(curproc)
    706 
    707 Lsw1:
    708 	/*
    709 	 * Find the highest-priority queue that isn't empty,
    710 	 * then take the first proc from that queue.
    711 	 */
    712 	clrl	d0
    713 	lea	_C_LABEL(whichqs),a0
    714 	movl	a0@,d1
    715 Lswchk:
    716 	btst	d0,d1
    717 	jne	Lswfnd
    718 	addqb	#1,d0
    719 	cmpb	#32,d0
    720 	jne	Lswchk
    721 	jra	_C_LABEL(_Idle)
    722 Lswfnd:
    723 	movw	#PSL_HIGHIPL,sr		| lock out interrupts
    724 	movl	a0@,d1			| and check again...
    725 	bclr	d0,d1
    726 	jeq	Lsw1			| proc moved, rescan
    727 	movl	d1,a0@			| update whichqs
    728 	moveq	#1,d1			| double check for higher priority
    729 	lsll	d0,d1			| process (which may have snuck in
    730 	subql	#1,d1			| while we were finding this one)
    731 	andl	a0@,d1
    732 	jeq	Lswok			| no one got in, continue
    733 	movl	a0@,d1
    734 	bset	d0,d1			| otherwise put this one back
    735 	movl	d1,a0@
    736 	jra	Lsw1			| and rescan
    737 Lswok:
    738 	movl	d0,d1
    739 	lslb	#3,d1			| convert queue number to index
    740 	addl	#_qs,d1			| locate queue (q)
    741 	movl	d1,a1
    742 	cmpl	a1@(P_FORW),a1		| anyone on queue?
    743 	jeq	Lbadsw			| no, panic
    744 	movl	a1@(P_FORW),a0		| p = q->p_forw
    745 	movl	a0@(P_FORW),a1@(P_FORW)	| q->p_forw = p->p_forw
    746 	movl	a0@(P_FORW),a1		| q = p->p_forw
    747 	movl	a0@(P_BACK),a1@(P_BACK)	| q->p_back = p->p_back
    748 	cmpl	a0@(P_FORW),d1		| anyone left on queue?
    749 	jeq	Lsw2			| no, skip
    750 	movl	_C_LABEL(whichqs),d1
    751 	bset	d0,d1			| yes, reset bit
    752 	movl	d1,_C_LABEL(whichqs)
    753 Lsw2:
    754 	movl	a0,_C_LABEL(curproc)
    755 	clrl	_C_LABEL(want_resched)
    756 #ifdef notyet
    757 	movl	sp@+,a1			| XXX - Make this work!
    758 	cmpl	a0,a1			| switching to same proc?
    759 	jeq	Lswdone			| yes, skip save and restore
    760 #endif
    761 	/*
    762 	 * Save state of previous process in its pcb.
    763 	 */
    764 	movl	_C_LABEL(curpcb),a1
    765 	moveml	#0xFCFC,a1@(PCB_REGS)	| save non-scratch registers
    766 	movl	usp,a2			| grab USP (a2 has been saved)
    767 	movl	a2,a1@(PCB_USP)		| and save it
    768 
    769 	tstl	_C_LABEL(fputype)	| Do we have an fpu?
    770 	jeq	Lswnofpsave		| No?  Then don't try save.
    771 	lea	a1@(PCB_FPCTX),a2	| pointer to FP save area
    772 	fsave	a2@			| save FP state
    773 	tstb	a2@			| null state frame?
    774 	jeq	Lswnofpsave		| yes, all done
    775 	fmovem	fp0-fp7,a2@(FPF_REGS)		| save FP general regs
    776 	fmovem	fpcr/fpsr/fpi,a2@(FPF_FPCR)	| save FP control regs
    777 Lswnofpsave:
    778 
    779 	/*
    780 	 * Now that we have saved all the registers that must be
    781 	 * preserved, we are free to use those registers until
    782 	 * we load the registers for the switched-to process.
    783 	 * In this section, keep:  a0=curproc, a1=curpcb
    784 	 */
    785 
    786 #ifdef DIAGNOSTIC
    787 	tstl	a0@(P_WCHAN)
    788 	jne	Lbadsw
    789 	cmpb	#SRUN,a0@(P_STAT)
    790 	jne	Lbadsw
    791 #endif
    792 	clrl	a0@(P_BACK)		| clear back link
    793 	movl	a0@(P_ADDR),a1		| get p_addr
    794 	movl	a1,_C_LABEL(curpcb)
    795 
    796 	/*
    797 	 * Load the new VM context (new MMU root pointer)
    798 	 */
    799 	movl	a0@(P_VMSPACE),a2	| vm = p->p_vmspace
    800 #ifdef DIAGNOSTIC
    801 	tstl	a2			| vm == VM_MAP_NULL?
    802 	jeq	Lbadsw			| panic
    803 #endif
    804 #ifdef PMAP_DEBUG
    805 	/*
    806 	 * Just call pmap_activate() for now.  Later on,
    807 	 * use the in-line version below (for speed).
    808 	 */
    809 	movl	a2@(VM_PMAP),a2 	| pmap = vm->vm_map.pmap
    810 	pea	a2@			| push pmap
    811 	jbsr	_C_LABEL(pmap_activate)	| pmap_activate(pmap)
    812 	addql	#4,sp
    813 	movl	_C_LABEL(curpcb),a1	| restore p_addr
    814 #else
    815 	/* XXX - Later, use this inline version. */
    816 	/* Just load the new CPU Root Pointer (MMU) */
    817 	lea	_C_LABEL(kernel_crp), a3 | our CPU Root Ptr. (CRP)
    818 	movl	a2@(VM_PMAP),a2 	| pmap = vm->vm_map.pmap
    819 	movl	a2@(PM_A_PHYS),d0	| phys = pmap->pm_a_phys
    820 	cmpl	a3@(4),d0		|  == kernel_crp.rp_addr ?
    821 	jeq	Lsame_mmuctx		| skip loadcrp/flush
    822 	/* OK, it is a new MMU context.  Load it up. */
    823 	movl	d0,a3@(4)
    824 	movl	#CACHE_CLR,d0
    825 	movc	d0,cacr			| invalidate cache(s)
    826 	pflusha				| flush entire TLB
    827 	pmove	a3@,crp			| load new user root pointer
    828 Lsame_mmuctx:
    829 #endif
    830 
    831 	/*
    832 	 * Reload the registers for the new process.
    833 	 * After this point we can only use d0,d1,a0,a1
    834 	 */
    835 	moveml	a1@(PCB_REGS),#0xFCFC	| reload registers
    836 	movl	a1@(PCB_USP),a0
    837 	movl	a0,usp			| and USP
    838 
    839 	tstl	_C_LABEL(fputype)	| If we don't have an fpu,
    840 	jeq	Lres_skip		|  don't try to restore it.
    841 	lea	a1@(PCB_FPCTX),a0	| pointer to FP save area
    842 	tstb	a0@			| null state frame?
    843 	jeq	Lresfprest		| yes, easy
    844 	fmovem	a0@(FPF_FPCR),fpcr/fpsr/fpi	| restore FP control regs
    845 	fmovem	a0@(FPF_REGS),fp0-fp7		| restore FP general regs
    846 Lresfprest:
    847 	frestore a0@			| restore state
    848 Lres_skip:
    849 	movw	a1@(PCB_PS),d0		| no, restore PS
    850 #ifdef DIAGNOSTIC
    851 	btst	#13,d0			| supervisor mode?
    852 	jeq	Lbadsw			| no? panic!
    853 #endif
    854 	movw	d0,sr			| OK, restore PS
    855 	moveq	#1,d0			| return 1 (for alternate returns)
    856 	rts
    857 
    858 /*
    859  * savectx(pcb)
    860  * Update pcb, saving current processor state.
    861  */
    862 ENTRY(savectx)
    863 	movl	sp@(4),a1
    864 	movw	sr,a1@(PCB_PS)
    865 	movl	usp,a0			| grab USP
    866 	movl	a0,a1@(PCB_USP)		| and save it
    867 	moveml	#0xFCFC,a1@(PCB_REGS)	| save non-scratch registers
    868 
    869 	tstl	_C_LABEL(fputype)	| Do we have FPU?
    870 	jeq	Lsavedone		| No?  Then don't save state.
    871 	lea	a1@(PCB_FPCTX),a0	| pointer to FP save area
    872 	fsave	a0@			| save FP state
    873 	tstb	a0@			| null state frame?
    874 	jeq	Lsavedone		| yes, all done
    875 	fmovem	fp0-fp7,a0@(FPF_REGS)		| save FP general regs
    876 	fmovem	fpcr/fpsr/fpi,a0@(FPF_FPCR)	| save FP control regs
    877 Lsavedone:
    878 	moveq	#0,d0			| return 0
    879 	rts
    880 
    881 /* suline() */
    882 
    883 #ifdef DEBUG
    884 	.data
    885 ASGLOBAL(fulltflush)
    886 	.long	0
    887 ASGLOBAL(fullcflush)
    888 	.long	0
    889 	.text
    890 #endif
    891 
    892 /*
    893  * Invalidate entire TLB.
    894  */
    895 ENTRY(TBIA)
    896 _C_LABEL(_TBIA):
    897 	pflusha
    898 	movl	#DC_CLEAR,d0
    899 	movc	d0,cacr			| invalidate on-chip d-cache
    900 	rts
    901 
    902 /*
    903  * Invalidate any TLB entry for given VA (TB Invalidate Single)
    904  */
    905 ENTRY(TBIS)
    906 #ifdef DEBUG
    907 	tstl	_ASM_LABEL(fulltflush)	| being conservative?
    908 	jne	_C_LABEL(_TBIA)		| yes, flush entire TLB
    909 #endif
    910 	movl	sp@(4),a0
    911 	pflush	#0,#0,a0@		| flush address from both sides
    912 	movl	#DC_CLEAR,d0
    913 	movc	d0,cacr			| invalidate on-chip data cache
    914 	rts
    915 
    916 /*
    917  * Invalidate supervisor side of TLB
    918  */
    919 ENTRY(TBIAS)
    920 #ifdef DEBUG
    921 	tstl	_ASM_LABEL(fulltflush)	| being conservative?
    922 	jne	_C_LABEL(_TBIA)		| yes, flush everything
    923 #endif
    924 	pflush	#4,#4			| flush supervisor TLB entries
    925 	movl	#DC_CLEAR,d0
    926 	movc	d0,cacr			| invalidate on-chip d-cache
    927 	rts
    928 
    929 /*
    930  * Invalidate user side of TLB
    931  */
    932 ENTRY(TBIAU)
    933 #ifdef DEBUG
    934 	tstl	_ASM_LABEL(fulltflush)	| being conservative?
    935 	jne	_C_LABEL(_TBIA)		| yes, flush everything
    936 #endif
    937 	pflush	#0,#4			| flush user TLB entries
    938 	movl	#DC_CLEAR,d0
    939 	movc	d0,cacr			| invalidate on-chip d-cache
    940 	rts
    941 
    942 /*
    943  * Invalidate instruction cache
    944  */
    945 ENTRY(ICIA)
    946 	movl	#IC_CLEAR,d0
    947 	movc	d0,cacr			| invalidate i-cache
    948 	rts
    949 
    950 /*
    951  * Invalidate data cache.
    952  * NOTE: we do not flush 68030 on-chip cache as there are no aliasing
    953  * problems with DC_WA.  The only cases we have to worry about are context
    954  * switch and TLB changes, both of which are handled "in-line" in resume
    955  * and TBI*.
    956  */
    957 ENTRY(DCIA)
    958 __DCIA:
    959 	rts
    960 
    961 ENTRY(DCIS)
    962 __DCIS:
    963 	rts
    964 
    965 /*
    966  * Invalidate data cache.
    967  */
    968 ENTRY(DCIU)
    969 	movl	#DC_CLEAR,d0
    970 	movc	d0,cacr			| invalidate on-chip d-cache
    971 	rts
    972 
    973 /* ICPL, ICPP, DCPL, DCPP, DCPA, DCFL, DCFP */
    974 
    975 ENTRY(PCIA)
    976 	movl	#DC_CLEAR,d0
    977 	movc	d0,cacr			| invalidate on-chip d-cache
    978 	rts
    979 
    980 ENTRY(ecacheon)
    981 	rts
    982 
    983 ENTRY(ecacheoff)
    984 	rts
    985 
    986 /*
    987  * Get callers current SP value.
    988  * Note that simply taking the address of a local variable in a C function
    989  * doesn't work because callee saved registers may be outside the stack frame
    990  * defined by A6 (e.g. GCC generated code).
    991  *
    992  * [I don't think the ENTRY() macro will do the right thing with this -- glass]
    993  */
    994 GLOBAL(getsp)
    995 	movl	sp,d0			| get current SP
    996 	addql	#4,d0			| compensate for return address
    997 	rts
    998 
    999 ENTRY(getsfc)
   1000 	movc	sfc,d0
   1001 	rts
   1002 
   1003 ENTRY(getdfc)
   1004 	movc	dfc,d0
   1005 	rts
   1006 
   1007 ENTRY(getvbr)
   1008 	movc vbr, d0
   1009 	rts
   1010 
   1011 ENTRY(setvbr)
   1012 	movl sp@(4), d0
   1013 	movc d0, vbr
   1014 	rts
   1015 
   1016 /*
   1017  * Load a new CPU Root Pointer (CRP) into the MMU.
   1018  *	void	loadcrp(struct mmu_rootptr *);
   1019  */
   1020 ENTRY(loadcrp)
   1021 	movl	sp@(4),a0		| arg1: &CRP
   1022 	movl	#CACHE_CLR,d0
   1023 	movc	d0,cacr			| invalidate cache(s)
   1024 	pflusha				| flush entire TLB
   1025 	pmove	a0@,crp			| load new user root pointer
   1026 	rts
   1027 
   1028 /*
   1029  * Get the physical address of the PTE for a given VA.
   1030  */
   1031 ENTRY(ptest_addr)
   1032 	movl	sp@(4),a0		| VA
   1033 	ptestr	#5,a0@,#7,a1		| a1 = addr of PTE
   1034 	movl	a1,d0
   1035 	rts
   1036 
   1037 /*
   1038  * Set processor priority level calls.  Most are implemented with
   1039  * inline asm expansions.  However, we need one instantiation here
   1040  * in case some non-optimized code makes external references.
   1041  * Most places will use the inlined functions param.h supplies.
   1042  */
   1043 
   1044 ENTRY(_getsr)
   1045 	clrl	d0
   1046 	movw	sr,d0
   1047 	rts
   1048 
   1049 ENTRY(_spl)
   1050 	clrl	d0
   1051 	movw	sr,d0
   1052 	movl	sp@(4),d1
   1053 	movw	d1,sr
   1054 	rts
   1055 
   1056 ENTRY(_splraise)
   1057 	clrl	d0
   1058 	movw	sr,d0
   1059 	movl	d0,d1
   1060 	andl	#PSL_HIGHIPL,d1 	| old &= PSL_HIGHIPL
   1061 	cmpl	sp@(4),d1		| (old - new)
   1062 	bge	Lsplr
   1063 	movl	sp@(4),d1
   1064 	movw	d1,sr
   1065 Lsplr:
   1066 	rts
   1067 
   1068 /*
   1069  * Save and restore 68881 state.
   1070  */
   1071 ENTRY(m68881_save)
   1072 	movl	sp@(4),a0		| save area pointer
   1073 	fsave	a0@			| save state
   1074 	tstb	a0@			| null state frame?
   1075 	jeq	Lm68881sdone		| yes, all done
   1076 	fmovem fp0-fp7,a0@(FPF_REGS)		| save FP general regs
   1077 	fmovem fpcr/fpsr/fpi,a0@(FPF_FPCR)	| save FP control regs
   1078 Lm68881sdone:
   1079 	rts
   1080 
   1081 ENTRY(m68881_restore)
   1082 	movl	sp@(4),a0		| save area pointer
   1083 	tstb	a0@			| null state frame?
   1084 	jeq	Lm68881rdone		| yes, easy
   1085 	fmovem	a0@(FPF_FPCR),fpcr/fpsr/fpi	| restore FP control regs
   1086 	fmovem	a0@(FPF_REGS),fp0-fp7		| restore FP general regs
   1087 Lm68881rdone:
   1088 	frestore a0@			| restore state
   1089 	rts
   1090 
   1091 /*
   1092  * _delay(unsigned N)
   1093  * Delay for at least (N/256) microseconds.
   1094  * This routine depends on the variable:  delay_divisor
   1095  * which should be set based on the CPU clock rate.
   1096  * XXX: Currently this is set in sun3_startup.c based on the
   1097  * XXX: CPU model but this should be determined at run time...
   1098  */
   1099 GLOBAL(_delay)
   1100 	| d0 = arg = (usecs << 8)
   1101 	movl	sp@(4),d0
   1102 	| d1 = delay_divisor;
   1103 	movl	_C_LABEL(delay_divisor),d1
   1104 L_delay:
   1105 	subl	d1,d0
   1106 	jgt	L_delay
   1107 	rts
   1108 
   1109 
   1110 | Define some addresses, mostly so DDB can print useful info.
   1111 	.globl	_C_LABEL(kernbase)
   1112 	.set	_C_LABEL(kernbase),KERNBASE
   1113 	.globl	_C_LABEL(dvma_base)
   1114 	.set	_C_LABEL(dvma_base),DVMA_SPACE_START
   1115 	.globl	_C_LABEL(prom_start)
   1116 	.set	_C_LABEL(prom_start),MONSTART
   1117 	.globl	_C_LABEL(prom_base)
   1118 	.set	_C_LABEL(prom_base),PROM_BASE
   1119 
   1120 |The end!
   1121