Home | History | Annotate | Line # | Download | only in cesfic
      1 /*	$NetBSD: locore.s,v 1.52 2025/12/04 02:55:23 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1980, 1990, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from software contributed to Berkeley by
      8  * the Systems Programming Group of the University of Utah Computer
      9  * Science Department.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. Neither the name of the University nor the names of its contributors
     20  *    may be used to endorse or promote products derived from this software
     21  *    without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     33  * SUCH DAMAGE.
     34  *
     35  * from: Utah $Hdr: locore.s 1.66 92/12/22$
     36  *
     37  *	@(#)locore.s	8.6 (Berkeley) 5/27/94
     38  */
     39 
     40 /*
     41  * Copyright (c) 1994, 1995 Gordon W. Ross
     42  * Copyright (c) 1988 University of Utah.
     43  *
     44  * This code is derived from software contributed to Berkeley by
     45  * the Systems Programming Group of the University of Utah Computer
     46  * Science Department.
     47  *
     48  * Redistribution and use in source and binary forms, with or without
     49  * modification, are permitted provided that the following conditions
     50  * are met:
     51  * 1. Redistributions of source code must retain the above copyright
     52  *    notice, this list of conditions and the following disclaimer.
     53  * 2. Redistributions in binary form must reproduce the above copyright
     54  *    notice, this list of conditions and the following disclaimer in the
     55  *    documentation and/or other materials provided with the distribution.
     56  * 3. All advertising materials mentioning features or use of this software
     57  *    must display the following acknowledgement:
     58  *	This product includes software developed by the University of
     59  *	California, Berkeley and its contributors.
     60  * 4. Neither the name of the University nor the names of its contributors
     61  *    may be used to endorse or promote products derived from this software
     62  *    without specific prior written permission.
     63  *
     64  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     65  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     66  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     67  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     68  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     69  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     70  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     71  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     72  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     73  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     74  * SUCH DAMAGE.
     75  *
     76  * from: Utah $Hdr: locore.s 1.66 92/12/22$
     77  *
     78  *	@(#)locore.s	8.6 (Berkeley) 5/27/94
     79  */
     80 
     81 #include "opt_compat_netbsd.h"
     82 #include "opt_compat_sunos.h"
     83 #include "opt_ddb.h"
     84 #include "opt_fpsp.h"
     85 #include "opt_fpu_emulate.h"
     86 #include "opt_m68k_arch.h"
     87 
     88 #include "ksyms.h"
     89 #include "assym.h"
     90 #include <machine/asm.h>
     91 #include <machine/trap.h>
     92 
     93 /*
     94  * This is for kvm_mkdb, and should be the address of the beginning
     95  * of the kernel text segment (not necessarily the same as kernbase).
     96  */
     97 	.text
     98 GLOBAL(kernel_text)
     99 
    100 /*
    101  * Temporary stack for a variety of purposes.
    102  * Try and make this the first thing is the data segment so it
    103  * is page aligned.  Note that if we overflow here, we run into
    104  * our text segment.
    105  */
    106 	.data
    107 	.space	PAGE_SIZE
    108 ASLOCAL(tmpstk)
    109 
    110 	.text
    111 /*
    112  * Macro to relocate a symbol, used before MMU is enabled.
    113  */
    114 #define	_RELOC(var, ar)		\
    115 	lea	var-KERNBASE,ar;		\
    116 	addl	%a5,ar
    117 
    118 #define	RELOC(var, ar)		_RELOC(_C_LABEL(var), ar)
    119 #define	ASRELOC(var, ar)	_RELOC(_ASM_LABEL(var), ar)
    120 
    121 /*
    122  * Initialization
    123  *
    124  * A4 contains the address of the end of the symtab
    125  * A5 contains physical load point from boot
    126  * VBR contains zero from ROM.  Exceptions will continue to vector
    127  * through ROM until MMU is turned on at which time they will vector
    128  * through our table (vectors.s).
    129  */
    130 
    131 BSS(lowram,4)
    132 BSS(esym,4)
    133 
    134 	.text
    135 ASENTRY_NOPROFILE(start)
    136 	movw	#PSL_HIGHIPL, %sr	| no interrupts
    137 	movl	#CACHE_OFF, %d0
    138 	movc	%d0, %cacr		| clear and disable on-chip cache(s)
    139 
    140 	/* XXX fixed load address */
    141 	movl	#0x20100000, %a5
    142 
    143 	movl	#0x20000000, %a0
    144 	RELOC(edata, %a1)
    145 1:
    146 	movl	%a5@+, %a0@+
    147 	cmpl	%a5, %a1
    148 	bne	1b
    149 
    150 	movl	#0x20000000, %a5
    151 
    152 	ASRELOC(tmpstk, %a0)
    153 	movl	%a0, %sp		| give ourselves a temporary stack
    154 
    155 	RELOC(edata, %a0)
    156 	RELOC(end, %a1)
    157 2:
    158 	clrb	%a0@+
    159 	cmpl	%a0, %a1
    160 	bne	2b
    161 
    162 	RELOC(esym, %a0)
    163 #if 0
    164 	movl	%a4, %a0@		| store end of symbol table
    165 #else
    166 	clrl	%a0@			| no symbol table, yet
    167 #endif
    168 
    169 	RELOC(lowram, %a0)
    170 	movl	%a5, %a0@		| store start of physical memory
    171 
    172 #if 0
    173 	RELOC(boothowto, %a0)		| save reboot flags
    174 	movl	%d7, %a0@
    175 	RELOC(bootdev, %a0)		|   and boot device
    176 	movl	%d6, %a0@
    177 #endif
    178 
    179 	/*
    180 	 * All data registers are now free.  All address registers
    181 	 * except a5 are free.  a5 is used by the RELOC() macro,
    182 	 * and cannot be used until after the MMU is enabled.
    183 	 */
    184 
    185 /* determine our CPU/MMU combo - check for all regardless of kernel config */
    186 	movl	#0x200,%d0		| data freeze bit
    187 	movc	%d0,%cacr		|   only exists on 68030
    188 	movc	%cacr,%d0		| read it back
    189 	tstl	%d0			| zero?
    190 	jeq	Lnot68030		| yes, we have 68020/68040
    191 	RELOC(mmutype, %a0)		| no, we have 68030
    192 	movl	#MMU_68030,%a0@		| set to reflect 68030 PMMU
    193 	RELOC(cputype, %a0)
    194 	movl	#CPU_68030,%a0@		| and 68030 CPU
    195 	jra	Lstart1
    196 Lnot68030:
    197 	bset	#31,%d0			| data cache enable bit
    198 	movc	%d0,%cacr		|   only exists on 68040
    199 	movc	%cacr,%d0		| read it back
    200 	tstl	%d0			| zero?
    201 	beq	Lis68020		| yes, we have 68020
    202 	moveq	#0,%d0			| now turn it back off
    203 	movec	%d0,%cacr		|   before we access any data
    204 	RELOC(mmutype, %a0)
    205 	movl	#MMU_68040,%a0@		| with a 68040 MMU
    206 	RELOC(cputype, %a0)
    207 	movl	#CPU_68040,%a0@		| and a 68040 CPU
    208 	RELOC(fputype, %a0)
    209 	movl	#FPU_68040,%a0@		| ...and FPU
    210 	jra	Lstart1
    211 Lis68020:
    212 	/* impossible */
    213 
    214 Lstart1:
    215 
    216 /* initialize memory size (for pmap_bootstrap) */
    217 	movl	0x5c00ac00, %d0
    218 	andb	#0x60, %d0
    219 	jne	Lnot8M
    220 	movl	#0x20800000, %d1	| memory end, 8M
    221 	jra	Lmemok
    222 Lnot8M:
    223 	cmpb	#0x20, %d0
    224 	jne	Lunkmem
    225 	movl	#0x22000000, %d1	| memory end, 32M
    226 	jra	Lmemok
    227 Lunkmem:
    228 	/* ??? */
    229 	movl	#0x20400000, %d1	| memory end, assume at least 4M
    230 
    231 Lmemok:
    232 	moveq	#PGSHIFT,%d2
    233 	lsrl	%d2,%d1			| convert to page (click) number
    234 	movl	%a5,%d0			| lowram value from ROM via boot
    235 	lsrl	%d2,%d0			| convert to page number
    236 	subl	%d0,%d1			| compute amount of RAM present
    237 	RELOC(physmem, %a0)
    238 	movl	%d1,%a0@		| and physmem
    239 /* configure kernel and lwp0 VA space so we can get going */
    240 	.globl	_Sysseg_pa, _pmap_bootstrap, _avail_start
    241 #if NKSYMS || defined(DDB) || defined(MODULAR)
    242 	RELOC(esym,%a0)			| end of static kernel test/data/syms
    243 	movl	%a0@,%a4
    244 	tstl	%a4
    245 	jne	Lstart2
    246 #endif
    247 	movl	#_C_LABEL(end),%a4	| end of static kernel text/data
    248 Lstart2:
    249 	addl	%a5,%a4			| convert to PA
    250 	subl	#KERNBASE, %a4
    251 	pea	%a5@			| firstpa
    252 	pea	%a4@			| nextpa
    253 	RELOC(pmap_bootstrap,%a0)
    254 	jbsr	%a0@			| pmap_bootstrap(firstpa, nextpa)
    255 	addql	#8,%sp
    256 
    257 /*
    258  * Prepare to enable MMU.
    259  */
    260 	RELOC(Sysseg_pa, %a0)		| system segment table addr
    261 	movl	%a0@,%d1		| read value (a PA)
    262 	subl	#KERNBASE, %d1
    263 
    264 	RELOC(mmutype, %a0)
    265 	cmpl	#MMU_68040,%a0@		| 68040?
    266 	jne	Lmotommu1		| no, skip
    267 	.long	0x4e7b1807		| movc d1,srp
    268 	jra	Lstploaddone
    269 Lmotommu1:
    270 #ifdef M68030
    271 	RELOC(protorp, %a0)
    272 	movl	%d1,%a0@(4)		| segtable address
    273 	pmove	%a0@,%srp		| load the supervisor root pointer
    274 #endif /* M68030 */
    275 Lstploaddone:
    276 
    277 	RELOC(mmutype, %a0)
    278 	cmpl	#MMU_68040,%a0@		| 68040?
    279 	jne	Lmotommu2		| no, skip
    280 
    281 	movel #0x2000c000, %d0		| double map RAM
    282 	.long	0x4e7b0004		| movc d0,itt0
    283 	.long	0x4e7b0006		| movc d0,dtt0
    284 	moveq	#0, %d0			| ensure TT regs are disabled
    285 	.long	0x4e7b0005		| movc d0,itt1
    286 	.long	0x4e7b0007		| movc d0,dtt1
    287 
    288 	.word	0xf4d8			| cinva bc
    289 	.word	0xf518			| pflusha
    290 
    291 	movl	#MMU40_TCR_BITS, %d0
    292 	.long	0x4e7b0003		| movc d0,tc
    293 	movl	#0x80008000, %d0
    294 	movc	%d0, %cacr		| turn on both caches
    295 
    296 	jmp	Lenab1:l		| avoid pc-relative
    297 Lmotommu2:
    298 	/* XXX do TT here */
    299 	pflusha
    300 	RELOC(prototc, %a2)
    301 	movl	#MMU51_TCR_BITS,%a2@	| value to load TC with
    302 	pmove	%a2@,%tc		| load it
    303 	jmp	Lenab1
    304 
    305 /*
    306  * Should be running mapped from this point on
    307  */
    308 Lenab1:
    309 	.word	0xf4d8			| cinva bc
    310 	.word	0xf518			| pflusha
    311 	nop
    312 	nop
    313 	nop
    314 	nop
    315 	nop
    316 	moveq	#0,%d0			| ensure TT regs are disabled
    317 	.long	0x4e7b0004		| movc d0,itt0
    318 	.long	0x4e7b0005		| movc d0,itt1
    319 	.long	0x4e7b0006		| movc d0,dtt0
    320 	.long	0x4e7b0007		| movc d0,dtt1
    321 
    322 	lea	_ASM_LABEL(tmpstk),%sp	| re-load temporary stack
    323 	jbsr	_C_LABEL(vec_init)	| initialize vector table
    324 /* phase 2 of pmap setup, returns pointer to lwp0 uarea in %a0 */
    325 	jbsr	_C_LABEL(pmap_bootstrap2)
    326 /* set kernel stack, user SP */
    327 	lea	%a0@(USPACE-4),%sp	| set kernel stack to end of area
    328 	movl	#USRSTACK-4,%a2
    329 	movl	%a2,%usp		| init user SP
    330 
    331 	tstl	_C_LABEL(fputype)	| Have an FPU?
    332 	jeq	Lenab2			| No, skip.
    333 	clrl	%a0@(PCB_FPCTX)		| ensure null FP context
    334 	pea	%a0@(PCB_FPCTX)
    335 	jbsr	_C_LABEL(m68881_restore)   | restore it (does not kill %a0)
    336 	addql	#4,%sp
    337 Lenab2:
    338 
    339 /* flush TLB and turn on caches */
    340 	jbsr	_C_LABEL(_TBIA)		| invalidate TLB
    341 	cmpl	#MMU_68040,_C_LABEL(mmutype)	| 68040?
    342 	jeq	Lnocache0		| yes, cache already on
    343 	movl	#CACHE_ON,%d0
    344 	movc	%d0,%cacr		| clear cache(s)
    345 Lnocache0:
    346 
    347 /* Final setup for call to main(). */
    348 	jbsr	_C_LABEL(fic_init)
    349 
    350 /*
    351  * Create a fake exception frame so that cpu_lwp_fork() can copy it.
    352  * main() nevers returns; we exit to user mode from a forked process
    353  * later on.
    354  */
    355 	clrw	%sp@-			| vector offset/frame type
    356 	clrl	%sp@-			| PC - filled in by "execve"
    357 	movw	#PSL_USER,%sp@-		| in user mode
    358 	clrl	%sp@-			| stack adjust count and padding
    359 	lea	%sp@(-64),%sp		| construct space for D0-D7/A0-A7
    360 	lea	_C_LABEL(lwp0),%a0	| save pointer to frame
    361 	movl	%sp,%a0@(L_MD_REGS)	|   in lwp0.l_md.md_regs
    362 
    363 	jra	_C_LABEL(main)		| main()
    364 
    365 	pea	Lmainreturned		| Yow!  Main returned!
    366 	jbsr	_C_LABEL(panic)
    367 	/* NOTREACHED */
    368 Lmainreturned:
    369 	.asciz	"main() returned"
    370 	.even
    371 
    372 /*
    373  * Trap/interrupt vector routines
    374  */
    375 #include <m68k/m68k/trap_subr.s>
    376 
    377 /*
    378  * Use common m68k bus error and address error handlers.
    379  */
    380 #include <m68k/m68k/busaddrerr.s>
    381 
    382 /*
    383  * FP exceptions.
    384  */
    385 ENTRY_NOPROFILE(fpfline)
    386 #if defined(M68040)
    387 	cmpl	#FPU_68040,_C_LABEL(fputype) | 68040 FPU?
    388 	jne	Lfp_unimp		| no, skip FPSP
    389 	cmpw	#0x202c,%sp@(6)		| format type 2?
    390 	jne	_C_LABEL(illinst)	| no, not an FP emulation
    391 Ldofp_unimp:
    392 #ifdef FPSP
    393 #if 0
    394 	addl	#1, _C_LABEL(evcnt_fpsp_unimp)+EVCNT_COUNT
    395 #endif
    396 	jmp	_ASM_LABEL(fpsp_unimp)	| yes, go handle it
    397 #endif
    398 Lfp_unimp:
    399 #endif /* M68040 */
    400 #ifdef FPU_EMULATE
    401 	clrl	%sp@-			| stack adjust count
    402 	moveml	#0xFFFF,%sp@-		| save registers
    403 	moveq	#T_FPEMULI,%d0		| denote as FP emulation trap
    404 	jra	_ASM_LABEL(fault)	| do it
    405 #else
    406 	jra	_C_LABEL(illinst)
    407 #endif
    408 
    409 ENTRY_NOPROFILE(fpunsupp)
    410 #if defined(M68040)
    411 	cmpl	#FPU_68040,_C_LABEL(fputype) | 68040 FPU?
    412 	jne	_C_LABEL(illinst)	| no, treat as illinst
    413 #ifdef FPSP
    414 #if 0
    415 	addl	#1, _C_LABEL(evcnt_fpsp_unsupp)+EVCNT_COUNT
    416 #endif
    417 	jmp	_ASM_LABEL(fpsp_unsupp)	| yes, go handle it
    418 #endif
    419 Lfp_unsupp:
    420 #endif /* M68040 */
    421 #ifdef FPU_EMULATE
    422 	clrl	%sp@-			| stack adjust count
    423 	moveml	#0xFFFF,%sp@-		| save registers
    424 	moveq	#T_FPEMULD,%d0		| denote as FP emulation trap
    425 	jra	_ASM_LABEL(fault)	| do it
    426 #else
    427 	jra	_C_LABEL(illinst)
    428 #endif
    429 
    430 /*
    431  * Handles all other FP coprocessor exceptions.
    432  * Note that since some FP exceptions generate mid-instruction frames
    433  * and may cause signal delivery, we need to test for stack adjustment
    434  * after the trap call.
    435  */
    436 ENTRY_NOPROFILE(fpfault)
    437 	clrl	%sp@-		| stack adjust count
    438 	moveml	#0xFFFF,%sp@-	| save user registers
    439 	movl	%usp,%a0		| and save
    440 	movl	%a0,%sp@(FR_SP)	|   the user stack pointer
    441 	clrl	%sp@-		| no VA arg
    442 	movl	_C_LABEL(curpcb),%a0 | current pcb
    443 	lea	%a0@(PCB_FPCTX),%a0 | address of FP savearea
    444 	fsave	%a0@		| save state
    445 #if defined(M68040) || defined(M68060)
    446 	/* always null state frame on 68040, 68060 */
    447 	cmpl	#FPU_68040,_C_LABEL(fputype)
    448 	jge	Lfptnull
    449 #endif
    450 	tstb	%a0@		| null state frame?
    451 	jeq	Lfptnull	| yes, safe
    452 	clrw	%d0		| no, need to tweak BIU
    453 	movb	%a0@(1),%d0	| get frame size
    454 	bset	#3,%a0@(0,%d0:w)	| set exc_pend bit of BIU
    455 Lfptnull:
    456 	fmovem	%fpsr,%sp@-	| push %fpsr as code argument
    457 	frestore %a0@		| restore state
    458 	movl	#T_FPERR,%sp@-	| push type arg
    459 	jra	_ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
    460 
    461 
    462 ENTRY_NOPROFILE(badtrap)
    463 	moveml	#0xC0C0,%sp@-		| save scratch regs
    464 	movw	%sp@(22),%sp@-		| push exception vector info
    465 	clrw	%sp@-
    466 	movl	%sp@(22),%sp@-		| and PC
    467 	jbsr	_C_LABEL(straytrap)	| report
    468 	addql	#8,%sp			| pop args
    469 	moveml	%sp@+,#0x0303		| restore regs
    470 	jra	_ASM_LABEL(rei)		| all done
    471 
    472 ENTRY_NOPROFILE(trap0)
    473 	clrl	%sp@-			| stack adjust count
    474 	moveml	#0xFFFF,%sp@-		| save user registers
    475 	movl	%usp,%a0			| save the user SP
    476 	movl	%a0,%sp@(FR_SP)		|   in the savearea
    477 	movl	%d0,%sp@-			| push syscall number
    478 	jbsr	_C_LABEL(syscall)	| handle it
    479 	addql	#4,%sp			| pop syscall arg
    480 	tstl	_C_LABEL(astpending)
    481 	jne	Lrei2
    482 	tstb	_C_LABEL(ssir)
    483 	jeq	Ltrap1
    484 	movw	#SPL1,%sr
    485 	tstb	_C_LABEL(ssir)
    486 	jne	Lsir1
    487 Ltrap1:
    488 	movl	%sp@(FR_SP),%a0		| grab and restore
    489 	movl	%a0,%usp			|   user SP
    490 	moveml	%sp@+,#0x7FFF		| restore most registers
    491 	addql	#8,%sp			| pop SP and stack adjust
    492 	rte
    493 
    494 /*
    495  * Trap 12 is the entry point for the cachectl "syscall" (both HPUX & BSD)
    496  *	cachectl(command, addr, length)
    497  * command in d0, addr in a1, length in d1
    498  */
    499 ENTRY_NOPROFILE(trap12)
    500 	movl	_C_LABEL(curlwp),%a0
    501 	movl	%a0@(L_PROC),%sp@-	| push current proc pointer
    502 	movl	%d1,%sp@-			| push length
    503 	movl	%a1,%sp@-			| push addr
    504 	movl	%d0,%sp@-			| push command
    505 	jbsr	_C_LABEL(cachectl1)	| do it
    506 	lea	%sp@(16),%sp		| pop args
    507 	jra	_ASM_LABEL(rei)		| all done
    508 
    509 /*
    510  * Trace (single-step) trap.  Kernel-mode is special.
    511  * User mode traps are simply passed on to trap().
    512  */
    513 ENTRY_NOPROFILE(trace)
    514 	clrl	%sp@-			| stack adjust count
    515 	moveml	#0xFFFF,%sp@-
    516 	moveq	#T_TRACE,%d0
    517 
    518 	| Check PSW and see what happen.
    519 	|   T=0 S=0	(should not happen)
    520 	|   T=1 S=0	trace trap from user mode
    521 	|   T=0 S=1	trace trap on a trap instruction
    522 	|   T=1 S=1	trace trap from system mode (kernel breakpoint)
    523 
    524 	movw	%sp@(FR_HW),%d1		| get PSW
    525 	notw	%d1			| XXX no support for T0 on 680[234]0
    526 	andw	#PSL_TS,%d1		| from system mode (T=1, S=1)?
    527 	jeq	Lkbrkpt			| yes, kernel breakpoint
    528 	jra	_ASM_LABEL(fault)	| no, user-mode fault
    529 
    530 
    531 /*
    532  * Trap 15 is used for:
    533  *	- GDB breakpoints (in user programs)
    534  *	- KGDB breakpoints (in the kernel)
    535  *	- trace traps for SUN binaries (not fully supported yet)
    536  * User mode traps are simply passed to trap().
    537  */
    538 ENTRY_NOPROFILE(trap15)
    539 	clrl	%sp@-			| stack adjust count
    540 	moveml	#0xFFFF,%sp@-
    541 	moveq	#T_TRAP15,%d0
    542 	movw	%sp@(FR_HW),%d1		| get PSW
    543 	andw	#PSL_S,%d1		| from system mode?
    544 	jne	Lkbrkpt			| yes, kernel breakpoint
    545 	jra	_ASM_LABEL(fault)	| no, user-mode fault
    546 
    547 Lkbrkpt: | Kernel-mode breakpoint or trace trap. (%d0=trap_type)
    548 	| Save the system sp rather than the user sp.
    549 	movw	#PSL_HIGHIPL,%sr		| lock out interrupts
    550 	lea	%sp@(FR_SIZE),%a6		| Save stack pointer
    551 	movl	%a6,%sp@(FR_SP)		|  from before trap
    552 
    553 	| If were are not on tmpstk switch to it.
    554 	| (so debugger can change the stack pointer)
    555 	movl	%a6,%d1
    556 	cmpl	#_ASM_LABEL(tmpstk),%d1
    557 	jls	Lbrkpt2			| already on tmpstk
    558 	| Copy frame to the temporary stack
    559 	movl	%sp,%a0			| %a0=src
    560 	lea	_ASM_LABEL(tmpstk)-96,%a1 | a1=dst
    561 	movl	%a1,%sp			| %sp=new frame
    562 	moveq	#FR_SIZE,%d1
    563 Lbrkpt1:
    564 	movl	%a0@+,%a1@+
    565 	subql	#4,%d1
    566 	bgt	Lbrkpt1
    567 
    568 Lbrkpt2:
    569 	| Call the trap handler for the kernel debugger.
    570 	| Do not call trap() to do it, so that we can
    571 	| set breakpoints in trap() if we want.  We know
    572 	| the trap type is either T_TRACE or T_BREAKPOINT.
    573 	movl	%d0,%sp@-		| push trap type
    574 	jbsr	_C_LABEL(trap_kdebug)
    575 	addql	#4,%sp			| pop args
    576 
    577 	| The stack pointer may have been modified, or
    578 	| data below it modified (by kgdb push call),
    579 	| so push the hardware frame at the current sp
    580 	| before restoring registers and returning.
    581 
    582 	movl	%sp@(FR_SP),%a0		| modified %sp
    583 	lea	%sp@(FR_SIZE),%a1		| end of our frame
    584 	movl	%a1@-,%a0@-		| copy 2 longs with
    585 	movl	%a1@-,%a0@-		| ... predecrement
    586 	movl	%a0,%sp@(FR_SP)		| %sp = h/w frame
    587 	moveml	%sp@+,#0x7FFF		| restore all but %sp
    588 	movl	%sp@,%sp			| ... and %sp
    589 	rte				| all done
    590 
    591 /*
    592  * Interrupt handlers.
    593  */
    594 
    595 ENTRY_NOPROFILE(lev6intr)	/* Level 6: clock */
    596 	INTERRUPT_SAVEREG
    597 	/* XXX */
    598 	movl _C_LABEL(clockbase), %a0
    599 	movl %a0@, %d0
    600 	movl %d0, %a0@
    601 	btst #2, %d0
    602 	jeq 1f
    603 	addql	#1,_C_LABEL(m68k_intr_evcnt)+CLOCK_INTRCNT
    604 	lea	%sp@(0), %a1		| a1 = &clockframe
    605 	movl	%a1, %sp@-
    606 	jbsr	_C_LABEL(hardclock)	| hardclock(&frame)
    607 	addql	#4, %sp
    608 	jra 2f
    609 1:
    610 	movl	%d0, %sp@-
    611 	jbsr	_C_LABEL(otherclock)
    612 	addql	#4, %sp
    613 2:
    614 	INTERRUPT_RESTOREREG
    615 	jra	_ASM_LABEL(rei)		| all done
    616 
    617 ENTRY_NOPROFILE(lev7intr)	/* level 7: parity errors, reset key */
    618 	addql	#1,_C_LABEL(m68k_intr_evcnt)+NMI_INTRCNT
    619 	clrl	%sp@-
    620 	moveml	#0xFFFF,%sp@-		| save registers
    621 	movl	%usp,%a0			| and save
    622 	movl	%a0,%sp@(FR_SP)		|   the user stack pointer
    623 	jbsr	_C_LABEL(nmihand)	| call handler
    624 	movl	%sp@(FR_SP),%a0		| restore
    625 	movl	%a0,%usp			|   user SP
    626 	moveml	%sp@+,#0x7FFF		| and remaining registers
    627 	addql	#8,%sp			| pop SP and stack adjust
    628 	jra	_ASM_LABEL(rei)		| all done
    629 
    630 /*
    631  * Emulation of VAX REI instruction.
    632  *
    633  * This code deals with checking for and servicing ASTs
    634  * (profiling, scheduling) and software interrupts (network, softclock).
    635  * We check for ASTs first, just like the VAX.  To avoid excess overhead
    636  * the T_ASTFLT handling code will also check for software interrupts so we
    637  * do not have to do it here.  After identifying that we need an AST we
    638  * drop the IPL to allow device interrupts.
    639  *
    640  * This code is complicated by the fact that sendsig may have been called
    641  * necessitating a stack cleanup.
    642  */
    643 BSS(ssir,1)
    644 
    645 ASENTRY_NOPROFILE(rei)
    646 	tstl	_C_LABEL(astpending)	| AST pending?
    647 	jeq	Lchksir			| no, go check for SIR
    648 Lrei1:
    649 	btst	#5,%sp@			| yes, are we returning to user mode?
    650 	jne	Lchksir			| no, go check for SIR
    651 	movw	#PSL_LOWIPL,%sr		| lower SPL
    652 	clrl	%sp@-			| stack adjust
    653 	moveml	#0xFFFF,%sp@-		| save all registers
    654 	movl	%usp,%a1			| including
    655 	movl	%a1,%sp@(FR_SP)		|    the users SP
    656 Lrei2:
    657 	clrl	%sp@-			| VA == none
    658 	clrl	%sp@-			| code == none
    659 	movl	#T_ASTFLT,%sp@-		| type == async system trap
    660 	pea	%sp@(12)		| fp == address of trap frame
    661 	jbsr	_C_LABEL(trap)		| go handle it
    662 	lea	%sp@(16),%sp		| pop value args
    663 	movl	%sp@(FR_SP),%a0		| restore user SP
    664 	movl	%a0,%usp			|   from save area
    665 	movw	%sp@(FR_ADJ),%d0		| need to adjust stack?
    666 	jne	Laststkadj		| yes, go to it
    667 	moveml	%sp@+,#0x7FFF		| no, restore most user regs
    668 	addql	#8,%sp			| toss SP and stack adjust
    669 	rte				| and do real RTE
    670 Laststkadj:
    671 	lea	%sp@(FR_HW),%a1		| pointer to HW frame
    672 	addql	#8,%a1			| source pointer
    673 	movl	%a1,%a0			| source
    674 	addw	%d0,%a0			|  + hole size = dest pointer
    675 	movl	%a1@-,%a0@-		| copy
    676 	movl	%a1@-,%a0@-		|  8 bytes
    677 	movl	%a0,%sp@(FR_SP)		| new SSP
    678 	moveml	%sp@+,#0x7FFF		| restore user registers
    679 	movl	%sp@,%sp			| and our SP
    680 	rte				| and do real RTE
    681 Lchksir:
    682 	tstb	_C_LABEL(ssir)		| SIR pending?
    683 	jeq	Ldorte			| no, all done
    684 	movl	%d0,%sp@-			| need a scratch register
    685 	movw	%sp@(4),%d0		| get SR
    686 	andw	#PSL_IPL7,%d0		| mask all but IPL
    687 	jne	Lnosir			| came from interrupt, no can do
    688 	movl	%sp@+,%d0			| restore scratch register
    689 Lgotsir:
    690 	movw	#SPL1,%sr		| prevent others from servicing int
    691 	tstb	_C_LABEL(ssir)		| too late?
    692 	jeq	Ldorte			| yes, oh well...
    693 	clrl	%sp@-			| stack adjust
    694 	moveml	#0xFFFF,%sp@-		| save all registers
    695 	movl	%usp,%a1			| including
    696 	movl	%a1,%sp@(FR_SP)		|    the users SP
    697 Lsir1:
    698 	clrl	%sp@-			| VA == none
    699 	clrl	%sp@-			| code == none
    700 	movl	#T_SSIR,%sp@-		| type == software interrupt
    701 	pea	%sp@(12)		| fp == address of trap frame
    702 	jbsr	_C_LABEL(trap)		| go handle it
    703 	lea	%sp@(16),%sp		| pop value args
    704 	movl	%sp@(FR_SP),%a0		| restore
    705 	movl	%a0,%usp			|   user SP
    706 	moveml	%sp@+,#0x7FFF		| and all remaining registers
    707 	addql	#8,%sp			| pop SP and stack adjust
    708 	rte
    709 Lnosir:
    710 	movl	%sp@+,%d0			| restore scratch register
    711 Ldorte:
    712 	rte				| real return
    713 
    714 /*
    715  * Primitives
    716  */
    717 
    718 /*
    719  * Use common m68k process/lwp switch and context save subroutines.
    720  */
    721 #define	FPCOPROC	/* XXX: Temporarily required */
    722 #include <m68k/m68k/switch_subr.s>
    723 
    724 
    725 #if defined(M68040)
    726 ENTRY(suline)
    727 	movl	%sp@(4),%a0		| address to write
    728 	movl	_C_LABEL(curpcb),%a1	| current pcb
    729 	movl	#Lslerr,%a1@(PCB_ONFAULT) | where to return to on a fault
    730 	movl	%sp@(8),%a1		| address of line
    731 	movl	%a1@+,%d0			| get lword
    732 	movsl	%d0,%a0@+			| put lword
    733 	nop				| sync
    734 	movl	%a1@+,%d0			| get lword
    735 	movsl	%d0,%a0@+			| put lword
    736 	nop				| sync
    737 	movl	%a1@+,%d0			| get lword
    738 	movsl	%d0,%a0@+			| put lword
    739 	nop				| sync
    740 	movl	%a1@+,%d0			| get lword
    741 	movsl	%d0,%a0@+			| put lword
    742 	nop				| sync
    743 	moveq	#0,%d0			| indicate no fault
    744 	jra	Lsldone
    745 Lslerr:
    746 	moveq	#-1,%d0
    747 Lsldone:
    748 	movl	_C_LABEL(curpcb),%a1	| current pcb
    749 	clrl	%a1@(PCB_ONFAULT) 	| clear fault address
    750 	rts
    751 #endif
    752 
    753 /*
    754  * Set processor priority level calls.  Most are implemented with
    755  * inline asm expansions.  However, spl0 requires special handling
    756  * as we need to check for our emulated software interrupts.
    757  */
    758 
    759 ENTRY(spl0)
    760 	moveq	#0,%d0
    761 	movw	%sr,%d0			| get old SR for return
    762 	movw	#PSL_LOWIPL,%sr		| restore new SR
    763 	tstb	_C_LABEL(ssir)		| software interrupt pending?
    764 	jeq	Lspldone		| no, all done
    765 	subql	#4,%sp			| make room for RTE frame
    766 	movl	%sp@(4),%sp@(2)		| position return address
    767 	clrw	%sp@(6)			| set frame type 0
    768 	movw	#PSL_LOWIPL,%sp@		| and new SR
    769 	jra	Lgotsir			| go handle it
    770 Lspldone:
    771 	rts
    772 
    773 /*
    774  * _delay(u_int N)
    775  *
    776  * Delay for at least (N/256) microseconds.
    777  * This routine depends on the variable:  delay_divisor
    778  * which should be set based on the CPU clock rate.
    779  */
    780 ENTRY_NOPROFILE(_delay)
    781 	| d0 = arg = (usecs << 8)
    782 	movl	%sp@(4),%d0
    783 	| d1 = delay_divisor
    784 	movl	_C_LABEL(delay_divisor),%d1
    785 L_delay:
    786 	subl	%d1,%d0
    787 	jgt	L_delay
    788 	rts
    789 
    790 ENTRY_NOPROFILE(doboot)
    791 	movl #0x5c00c060, %d0		| want phys addressing
    792 	.long	0x4e7b0006		| movc d0,dtt0
    793 	movl	#1, 0x5c00b800		| reset
    794 	stop	#0x2700			| paranoia
    795 
    796 	.data
    797 GLOBAL(mmutype)
    798 	.long	MMU_HP		| default to HP MMU
    799 GLOBAL(cputype)
    800 	.long	CPU_68020	| default to 68020 CPU
    801 GLOBAL(fputype)
    802 	.long	FPU_68881	| default to 68881 FPU
    803 GLOBAL(prototc)
    804 	.long	0		| prototype translation control
    805 
    806 #ifdef DEBUG
    807 	.globl	fulltflush, fullcflush
    808 fulltflush:
    809 	.long	0
    810 fullcflush:
    811 	.long	0
    812 #endif
    813