Home | History | Annotate | Line # | Download | only in vax
subr.S revision 1.1
      1  1.1  matt /*	$NetBSD: subr.S,v 1.1 2002/02/23 23:48:04 matt Exp $	   */
      2  1.1  matt 
      3  1.1  matt /*
      4  1.1  matt  * Copyright (c) 1994 Ludd, University of Lule}, Sweden.
      5  1.1  matt  * All rights reserved.
      6  1.1  matt  *
      7  1.1  matt  * Redistribution and use in source and binary forms, with or without
      8  1.1  matt  * modification, are permitted provided that the following conditions
      9  1.1  matt  * are met:
     10  1.1  matt  * 1. Redistributions of source code must retain the above copyright
     11  1.1  matt  *    notice, this list of conditions and the following disclaimer.
     12  1.1  matt  * 2. Redistributions in binary form must reproduce the above copyright
     13  1.1  matt  *    notice, this list of conditions and the following disclaimer in the
     14  1.1  matt  *    documentation and/or other materials provided with the distribution.
     15  1.1  matt  * 3. All advertising materials mentioning features or use of this software
     16  1.1  matt  *    must display the following acknowledgement:
     17  1.1  matt  *     This product includes software developed at Ludd, University of Lule}.
     18  1.1  matt  * 4. The name of the author may not be used to endorse or promote products
     19  1.1  matt  *    derived from this software without specific prior written permission
     20  1.1  matt  *
     21  1.1  matt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     22  1.1  matt  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     23  1.1  matt  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     24  1.1  matt  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     25  1.1  matt  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     26  1.1  matt  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     27  1.1  matt  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     28  1.1  matt  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     29  1.1  matt  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     30  1.1  matt  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     31  1.1  matt  */
     32  1.1  matt 
     33  1.1  matt #include <machine/asm.h>
     34  1.1  matt 
     35  1.1  matt #include "assym.h"
     36  1.1  matt #include "opt_ddb.h"
     37  1.1  matt #include "opt_multiprocessor.h"
     38  1.1  matt #include "opt_lockdebug.h"
     39  1.1  matt #include "opt_compat_netbsd.h"
     40  1.1  matt #include "opt_compat_ibcs2.h"
     41  1.1  matt #ifdef COMPAT_IBCS2
     42  1.1  matt #include <compat/ibcs2/ibcs2_syscall.h>
     43  1.1  matt #endif
     44  1.1  matt #include "opt_compat_ultrix.h"
     45  1.1  matt #ifdef COMPAT_ULTRIX
     46  1.1  matt #include <compat/ultrix/ultrix_syscall.h>
     47  1.1  matt #endif
     48  1.1  matt 
     49  1.1  matt #define JSBENTRY(x)	.globl x ; .align 2 ; x :
     50  1.1  matt 
     51  1.1  matt 		.text
     52  1.1  matt 
     53  1.1  matt #ifdef	KERNEL_LOADABLE_BY_MOP
     54  1.1  matt /*
     55  1.1  matt  * This is a little tricky. The kernel is not loaded at the correct
     56  1.1  matt  * address, so the kernel must first be relocated, then copied, then
     57  1.1  matt  * jump back to the correct address.
     58  1.1  matt  */
     59  1.1  matt /* Copy routine */
     60  1.1  matt cps:
     61  1.1  matt 2:	movb	(r0)+,(r1)+
     62  1.1  matt 	cmpl	r0,r7
     63  1.1  matt 	bneq	2b
     64  1.1  matt 
     65  1.1  matt 3:	clrb	(r1)+
     66  1.1  matt 	incl	r0
     67  1.1  matt 	cmpl	r0,r6
     68  1.1  matt 	bneq	3b
     69  1.1  matt 	clrl	-(sp)
     70  1.1  matt 	movl	sp,ap
     71  1.1  matt 	movl	$_cca,r7
     72  1.1  matt 	movl	r8,(r7)
     73  1.1  matt 	movpsl	-(sp)
     74  1.1  matt 	pushl	r2
     75  1.1  matt 	rei
     76  1.1  matt cpe:
     77  1.1  matt 
     78  1.1  matt /* Copy the copy routine */
     79  1.1  matt 1:	movab	cps,r0
     80  1.1  matt 	movab	cpe,r1
     81  1.1  matt 	movl	$0x300000,sp
     82  1.1  matt 	movl	sp,r3
     83  1.1  matt 4:	movb	(r0)+,(r3)+
     84  1.1  matt 	cmpl	r0,r1
     85  1.1  matt 	bneq	4b
     86  1.1  matt 	movl	r7,r8
     87  1.1  matt /* Ok, copy routine copied, set registers and rei */
     88  1.1  matt 	movab	_edata,r7
     89  1.1  matt 	movab	_end,r6
     90  1.1  matt 	movl	$0x80000000,r1
     91  1.1  matt 	movl	$0x80000200,r0
     92  1.1  matt 	subl3	$0x200,r6,r9
     93  1.1  matt 	movab	2f,r2
     94  1.1  matt 	subl2	$0x200,r2
     95  1.1  matt 	movpsl	-(sp)
     96  1.1  matt 	pushab	4(sp)
     97  1.1  matt 	rei
     98  1.1  matt 
     99  1.1  matt /*
    100  1.1  matt  * First entry routine from boot. This should be in a file called locore.
    101  1.1  matt  */
    102  1.1  matt JSBENTRY(start)
    103  1.1  matt 	brb	1b				# Netbooted starts here
    104  1.1  matt #else
    105  1.1  matt ASENTRY(start, 0)
    106  1.1  matt #endif
    107  1.1  matt 2:	bisl3	$0x80000000,r9,_C_LABEL(esym)	# End of loaded code
    108  1.1  matt 	pushl	$0x1f0000			# Push a nice PSL
    109  1.1  matt 	pushl	$to				# Address to jump to
    110  1.1  matt 	rei					# change to kernel stack
    111  1.1  matt to:	movw	$0xfff,_C_LABEL(panic)		# Save all regs in panic
    112  1.1  matt 	cmpb	(ap),$3				# symbols info present?
    113  1.1  matt 	blssu	3f				# nope, skip
    114  1.1  matt 	bisl3	$0x80000000,8(ap),_C_LABEL(symtab_start)
    115  1.1  matt 						#   save start of symtab
    116  1.1  matt 	movl	12(ap),_C_LABEL(symtab_nsyms)	#   save end of symtab
    117  1.1  matt 3:	addl3	_C_LABEL(esym),$0x3ff,r0	# Round symbol table end
    118  1.1  matt 	bicl3	$0x3ff,r0,_C_LABEL(proc0paddr)	# save proc0 uarea pointer
    119  1.1  matt 	bicl3	$0x80000000,_C_LABEL(proc0paddr),r0 # get phys proc0 uarea addr
    120  1.1  matt 	mtpr	r0,$PR_PCBB			# Save in IPR PCBB
    121  1.1  matt 	addl3	$USPACE,_C_LABEL(proc0paddr),r0	# Get kernel stack top
    122  1.1  matt 	mtpr	r0,$PR_KSP			# put in IPR KSP
    123  1.1  matt 	movl	r0,_C_LABEL(Sysmap)		# SPT start addr after KSP
    124  1.1  matt 	movl	_C_LABEL(proc0paddr),r0		# get PCB virtual address
    125  1.1  matt 	movab	IFTRAP(r0),4(r0)		# Save trap address in ESP
    126  1.1  matt 	mtpr	4(r0),$PR_ESP			# Put it in ESP also
    127  1.1  matt 
    128  1.1  matt # Set some registers in known state
    129  1.1  matt 	movl	_C_LABEL(proc0paddr),r0
    130  1.1  matt 	clrl	P0LR(r0)
    131  1.1  matt 	clrl	P1LR(r0)
    132  1.1  matt 	mtpr	$0,$PR_P0LR
    133  1.1  matt 	mtpr	$0,$PR_P1LR
    134  1.1  matt 	movl	$0x80000000,r1
    135  1.1  matt 	movl	r1,P0BR(r0)
    136  1.1  matt 	movl	r1,P1BR(r0)
    137  1.1  matt 	mtpr	r1,$PR_P0BR
    138  1.1  matt 	mtpr	r1,$PR_P1BR
    139  1.1  matt 	clrl	IFTRAP(r0)
    140  1.1  matt 	mtpr	$0,$PR_SCBB
    141  1.1  matt 
    142  1.1  matt # Copy the RPB to its new position
    143  1.1  matt #if defined(COMPAT_14)
    144  1.1  matt 	tstl	(ap)				# Any arguments?
    145  1.1  matt 	bneq	1f				# Yes, called from new boot
    146  1.1  matt 	movl	r11,_C_LABEL(boothowto)		# Howto boot (single etc...)
    147  1.1  matt #	movl	r10,_C_LABEL(bootdev)		# uninteresting, will complain
    148  1.1  matt 	movl	r8,_C_LABEL(avail_end)		# Usable memory (from VMB)
    149  1.1  matt 	clrl	-(sp)				# Have no RPB
    150  1.1  matt 	brb	2f
    151  1.1  matt #endif
    152  1.1  matt 
    153  1.1  matt 1:	pushl	4(ap)				# Address of old rpb
    154  1.1  matt 2:	calls	$1,_C_LABEL(_start)		# Jump away.
    155  1.1  matt 	/* NOTREACHED */
    156  1.1  matt 
    157  1.1  matt 
    158  1.1  matt /*
    159  1.1  matt  * Signal handler code.
    160  1.1  matt  */
    161  1.1  matt 
    162  1.1  matt 	.align	2
    163  1.1  matt 	.globl	_C_LABEL(sigcode),_C_LABEL(esigcode)
    164  1.1  matt _C_LABEL(sigcode):
    165  1.1  matt 	pushr	$0x3f
    166  1.1  matt 	subl2	$0xc,sp
    167  1.1  matt 	movl	0x24(sp),r0
    168  1.1  matt 	calls	$3,(r0)
    169  1.1  matt 	popr	$0x3f
    170  1.1  matt 	chmk	$SYS___sigreturn14
    171  1.1  matt 	chmk	$SYS_exit
    172  1.1  matt 	halt
    173  1.1  matt _C_LABEL(esigcode):
    174  1.1  matt 
    175  1.1  matt #ifdef COMPAT_IBCS2
    176  1.1  matt 	.align	2
    177  1.1  matt 	.globl	_C_LABEL(ibcs2_sigcode),_C_LABEL(ibcs2_esigcode)
    178  1.1  matt _C_LABEL(ibcs2_sigcode):
    179  1.1  matt 	pushr	$0x3f
    180  1.1  matt 	subl2	$0xc,sp
    181  1.1  matt 	movl	0x24(sp),r0
    182  1.1  matt 	calls	$3,(r0)
    183  1.1  matt 	popr	$0x3f
    184  1.1  matt 	chmk	$SYS___sigreturn14
    185  1.1  matt 	chmk	$SYS_exit
    186  1.1  matt 	halt
    187  1.1  matt _C_LABEL(ibcs2_esigcode):
    188  1.1  matt #endif /* COMPAT_IBCS2 */
    189  1.1  matt 
    190  1.1  matt #ifdef COMPAT_ULTRIX
    191  1.1  matt 	.align	2
    192  1.1  matt 	.globl	_C_LABEL(ultrix_sigcode),_C_LABEL(ultrix_esigcode)
    193  1.1  matt _C_LABEL(ultrix_sigcode):
    194  1.1  matt 	pushr	$0x3f
    195  1.1  matt 	subl2	$0xc,sp
    196  1.1  matt 	movl	0x24(sp),r0
    197  1.1  matt 	calls	$3,(r0)
    198  1.1  matt 	popr	$0x3f
    199  1.1  matt 	chmk	$ULTRIX_SYS_sigreturn
    200  1.1  matt 	chmk	$SYS_exit
    201  1.1  matt 	halt
    202  1.1  matt _C_LABEL(ultrix_esigcode):
    203  1.1  matt #endif
    204  1.1  matt 
    205  1.1  matt 	.align	2
    206  1.1  matt 	.globl	_C_LABEL(idsptch), _C_LABEL(eidsptch)
    207  1.1  matt _C_LABEL(idsptch):	pushr	$0x3f
    208  1.1  matt 	.word	0x9f16		# jsb to absolute address
    209  1.1  matt 	.long	_C_LABEL(cmn_idsptch)	# the absolute address
    210  1.1  matt 	.long	0		# the callback interrupt routine
    211  1.1  matt 	.long	0		# its argument
    212  1.1  matt 	.long	0		# ptr to correspond evcnt struct
    213  1.1  matt _C_LABEL(eidsptch):
    214  1.1  matt 
    215  1.1  matt _C_LABEL(cmn_idsptch):
    216  1.1  matt 	movl	(sp)+,r0	# get pointer to idspvec
    217  1.1  matt 	movl	8(r0),r1	# get evcnt pointer
    218  1.1  matt 	beql	1f		# no ptr, skip increment
    219  1.1  matt 	incl	EV_COUNT(r1)	# increment low longword
    220  1.1  matt 	adwc	$0,EV_COUNT+4(r1) # add any carry to hi longword
    221  1.1  matt 1:	pushl	4(r0)		# push argument
    222  1.1  matt 	calls	$1,*(r0)	# call interrupt routine
    223  1.1  matt 	popr	$0x3f		# pop registers
    224  1.1  matt 	rei			# return from interrut
    225  1.1  matt 
    226  1.1  matt ENTRY(badaddr,0)			# Called with addr,b/w/l
    227  1.1  matt 	mfpr	$PR_IPL,r0	# splhigh()
    228  1.1  matt 	mtpr	$IPL_HIGH,$PR_IPL
    229  1.1  matt 	movl	4(ap),r2	# First argument, the address
    230  1.1  matt 	movl	8(ap),r1	# Sec arg, b,w,l
    231  1.1  matt 	pushl	r0		# Save old IPL
    232  1.1  matt 	clrl	r3
    233  1.1  matt 	movab	4f,_C_LABEL(memtest)	# Set the return address
    234  1.1  matt 
    235  1.1  matt 	caseb	r1,$1,$4	# What is the size
    236  1.1  matt 1:	.word	1f-1b
    237  1.1  matt 	.word	2f-1b
    238  1.1  matt 	.word	3f-1b		# This is unused
    239  1.1  matt 	.word	3f-1b
    240  1.1  matt 
    241  1.1  matt 1:	movb	(r2),r1		# Test a byte
    242  1.1  matt 	brb	5f
    243  1.1  matt 
    244  1.1  matt 2:	movw	(r2),r1		# Test a word
    245  1.1  matt 	brb	5f
    246  1.1  matt 
    247  1.1  matt 3:	movl	(r2),r1		# Test a long
    248  1.1  matt 	brb	5f
    249  1.1  matt 
    250  1.1  matt 4:	incl	r3		# Got machine chk => addr bad
    251  1.1  matt 5:	mtpr	(sp)+,$PR_IPL
    252  1.1  matt 	movl	r3,r0
    253  1.1  matt 	ret
    254  1.1  matt 
    255  1.1  matt #ifdef DDB
    256  1.1  matt /*
    257  1.1  matt  * DDB is the only routine that uses setjmp/longjmp.
    258  1.1  matt  */
    259  1.1  matt 	.globl	_C_LABEL(setjmp), _C_LABEL(longjmp)
    260  1.1  matt _C_LABEL(setjmp):.word	0
    261  1.1  matt 	movl	4(ap), r0
    262  1.1  matt 	movl	8(fp), (r0)
    263  1.1  matt 	movl	12(fp), 4(r0)
    264  1.1  matt 	movl	16(fp), 8(r0)
    265  1.1  matt 	moval	28(fp),12(r0)
    266  1.1  matt 	clrl	r0
    267  1.1  matt 	ret
    268  1.1  matt 
    269  1.1  matt _C_LABEL(longjmp):.word	0
    270  1.1  matt 	movl	4(ap), r1
    271  1.1  matt 	movl	8(ap), r0
    272  1.1  matt 	movl	(r1), ap
    273  1.1  matt 	movl	4(r1), fp
    274  1.1  matt 	movl	12(r1), sp
    275  1.1  matt 	jmp	*8(r1)
    276  1.1  matt #endif
    277  1.1  matt 
    278  1.1  matt #
    279  1.1  matt # setrunqueue/remrunqueue fast variants.
    280  1.1  matt #
    281  1.1  matt 
    282  1.1  matt JSBENTRY(Setrq)
    283  1.1  matt #ifdef DIAGNOSTIC
    284  1.1  matt 	tstl	4(r0)	# Check that process actually are off the queue
    285  1.1  matt 	beql	1f
    286  1.1  matt 	pushab	setrq
    287  1.1  matt 	calls	$1,_C_LABEL(panic)
    288  1.1  matt setrq:	.asciz	"setrunqueue"
    289  1.1  matt #endif
    290  1.1  matt 1:	extzv	$2,$6,P_PRIORITY(r0),r1		# get priority
    291  1.1  matt 	movaq	_C_LABEL(sched_qs)[r1],r2	# get address of queue
    292  1.1  matt 	insque	(r0),*PH_RLINK(r2)		# put proc last in queue
    293  1.1  matt 	bbss	r1,_C_LABEL(sched_whichqs),1f	# set queue bit.
    294  1.1  matt 1:	rsb
    295  1.1  matt 
    296  1.1  matt JSBENTRY(Remrq)
    297  1.1  matt 	extzv	$2,$6,P_PRIORITY(r0),r1
    298  1.1  matt #ifdef DIAGNOSTIC
    299  1.1  matt 	bbs	r1,_C_LABEL(sched_whichqs),1f
    300  1.1  matt 	pushab	remrq
    301  1.1  matt 	calls	$1,_C_LABEL(panic)
    302  1.1  matt remrq:	.asciz	"remrunqueue"
    303  1.1  matt #endif
    304  1.1  matt 1:	remque	(r0),r2
    305  1.1  matt 	bneq	2f			# Not last process on queue
    306  1.1  matt 	bbsc	r1,_C_LABEL(sched_whichqs),2f
    307  1.1  matt 2:	clrl	P_BACK(r0)		# saftey belt
    308  1.1  matt 	rsb
    309  1.1  matt 
    310  1.1  matt #
    311  1.1  matt # Idle loop. Here we could do something fun, maybe, like calculating
    312  1.1  matt # pi or something.
    313  1.1  matt #
    314  1.1  matt idle:
    315  1.1  matt #if defined(LOCKDEBUG)
    316  1.1  matt 	calls	$0,_C_LABEL(sched_unlock_idle)
    317  1.1  matt #elif defined(MULTIPROCESSOR)
    318  1.1  matt 	clrl	_C_LABEL(sched_lock)	# release sched lock
    319  1.1  matt #endif
    320  1.1  matt 	mtpr	$1,$PR_IPL 		# IPL cannot be 0 because we are
    321  1.1  matt 					# running on the interrupt stack
    322  1.1  matt 					# and may get interrupts
    323  1.1  matt 
    324  1.1  matt 1:	tstl	_C_LABEL(sched_whichqs)	# Anything ready to run?
    325  1.1  matt 	beql	1b			# no, run the idle loop again.
    326  1.1  matt /* Now try the test the long way */
    327  1.1  matt 	mtpr	$IPL_HIGH,$PR_IPL	# block all types of interrupts
    328  1.1  matt #if defined(LOCKDEBUG)
    329  1.1  matt 	calls	$0,_C_LABEL(sched_lock_idle)
    330  1.1  matt #elif defined(MULTIPROCESSOR)
    331  1.1  matt 3:	bbssi	$0,_C_LABEL(sched_lock),3b	# acquire sched lock
    332  1.1  matt #endif
    333  1.1  matt 	brb	lp			# check sched_whichqs again
    334  1.1  matt 
    335  1.1  matt #
    336  1.1  matt # cpu_switch, cpu_exit and the idle loop implemented in assembler
    337  1.1  matt # for efficiency. r6 contains pointer to last process.  This is
    338  1.1  matt # called at IPL_HIGH.
    339  1.1  matt #
    340  1.1  matt 
    341  1.1  matt JSBENTRY(Swtch)
    342  1.1  matt 	mfpr	$PR_SSP,r1		# Get ptr to this cpu_info struct
    343  1.1  matt 	clrl	CI_CURPROC(r1)		# Stop process accounting
    344  1.1  matt 	svpctx				# Save context if another CPU
    345  1.1  matt 					# get control first (must be on
    346  1.1  matt 					# the interrupt stack when idling)
    347  1.1  matt 
    348  1.1  matt 
    349  1.1  matt lp:	ffs	$0,$32,_C_LABEL(sched_whichqs),r3 # Search for bit set
    350  1.1  matt 	beql	idle			# no bit set, go to idle loop
    351  1.1  matt 
    352  1.1  matt 	movaq	_C_LABEL(sched_qs)[r3],r1	# get address of queue head
    353  1.1  matt 	remque	*(r1),r2		# remove proc pointed to by queue head
    354  1.1  matt 					# proc ptr is now in r2
    355  1.1  matt #ifdef DIAGNOSTIC
    356  1.1  matt 	bvc	1f			# check if something on queue
    357  1.1  matt 	pushab	noque
    358  1.1  matt 	calls	$1,_C_LABEL(panic)
    359  1.1  matt #endif
    360  1.1  matt 
    361  1.1  matt 1:	bneq	2f			# more processes on queue?
    362  1.1  matt 	bbsc	r3,_C_LABEL(sched_whichqs),2f	# no, clear bit in whichqs
    363  1.1  matt 2:	clrl	P_BACK(r2)		# clear proc backpointer
    364  1.1  matt 	mfpr	$PR_SSP,r1		# Get ptr to this cpu_info struct
    365  1.1  matt 	/* p->p_cpu initialized in fork1() for single-processor */
    366  1.1  matt #if defined(MULTIPROCESSOR)
    367  1.1  matt 	movl	r1,P_CPU(r2)		# p->p_cpu = curcpu();
    368  1.1  matt #endif
    369  1.1  matt 	movb	$SONPROC,P_STAT(r2)	# p->p_stat = SONPROC;
    370  1.1  matt 	movl	r2,CI_CURPROC(r1)	# set new process running
    371  1.1  matt 	clrl	CI_WANT_RESCHED(r1)	# we are now changing process
    372  1.1  matt 	movl	P_ADDR(r2),r0		# Get pointer to new pcb.
    373  1.1  matt 	addl3	r0,$IFTRAP,r1		# Save for copy* functions.
    374  1.1  matt 	mtpr	r1,$PR_ESP		# Use ESP as CPU-specific pointer
    375  1.1  matt 	movl	r1,ESP(r0)		# Must save in PCB also.
    376  1.1  matt 	mfpr	$PR_SSP,r1		# New process must inherit cpu_info
    377  1.1  matt 	movl	r1,SSP(r0)		# Put it in new PCB
    378  1.1  matt 
    379  1.1  matt #
    380  1.1  matt # Nice routine to get physical from virtual adresses.
    381  1.1  matt #
    382  1.1  matt 	extzv	$9,$21,r0,r1		# extract offset
    383  1.1  matt 	ashl	$9,*_C_LABEL(Sysmap)[r1],r3
    384  1.1  matt 
    385  1.1  matt 	mtpr	r3,$PR_PCBB
    386  1.1  matt 	ldpctx
    387  1.1  matt #if defined(LOCKDEBUG)
    388  1.1  matt 	calls	$0,_C_LABEL(sched_unlock_idle)
    389  1.1  matt #elif defined(MULTIPROCESSOR)
    390  1.1  matt 	clrl	_C_LABEL(sched_lock)	# clear sched lock
    391  1.1  matt #endif
    392  1.1  matt 	rei
    393  1.1  matt 
    394  1.1  matt #if defined(MULTIPROCESSOR)
    395  1.1  matt 	.align 2
    396  1.1  matt 	.globl	_C_LABEL(tramp)	# used to kick off multiprocessor systems.
    397  1.1  matt _C_LABEL(tramp):
    398  1.1  matt 	ldpctx
    399  1.1  matt 	rei
    400  1.1  matt #endif
    401  1.1  matt 
    402  1.1  matt #
    403  1.1  matt # the last routine called by a process.
    404  1.1  matt #
    405  1.1  matt 
    406  1.1  matt ENTRY(cpu_exit,0)
    407  1.1  matt 	movl	4(ap),r6	# Process pointer in r6
    408  1.1  matt 	mtpr	$IPL_CLOCK,$PR_IPL # Block almost everything
    409  1.1  matt 	mfpr	$PR_SSP,r7	# get cpu_info ptr
    410  1.1  matt 	movl	CI_EXIT(r7),r8	# scratch page address
    411  1.1  matt 	movab	512(r8),sp	# change stack
    412  1.1  matt 	bicl2	$0xc0000000,r8	# get physical address
    413  1.1  matt 	mtpr	r8,$PR_PCBB	# new PCB
    414  1.1  matt 	mtpr	r7,$PR_SSP	# In case...
    415  1.1  matt 	pushl	r6
    416  1.1  matt 	calls	$1,_C_LABEL(exit2)	# release last resources.
    417  1.1  matt 	mtpr	$IPL_HIGH,$PR_IPL	# block all types of interrupts
    418  1.1  matt #if defined(LOCKDEBUG)
    419  1.1  matt 	calls	$0,_C_LABEL(sched_lock_idle)
    420  1.1  matt #elif defined(MULTIPROCESSOR)
    421  1.1  matt 1:	bbssi	$0,_C_LABEL(sched_lock),1b	# acquire sched lock
    422  1.1  matt #endif
    423  1.1  matt 	clrl	r6
    424  1.1  matt 	brw	Swtch
    425  1.1  matt 
    426  1.1  matt #
    427  1.1  matt # copy/fetch/store routines.
    428  1.1  matt #
    429  1.1  matt 
    430  1.1  matt ENTRY(copyout, 0)
    431  1.1  matt 	movl	8(ap),r2
    432  1.1  matt 	blss	3f		# kernel space
    433  1.1  matt 	movl	4(ap),r1
    434  1.1  matt 	brb	2f
    435  1.1  matt 
    436  1.1  matt ENTRY(copyin, 0)
    437  1.1  matt 	movl	4(ap),r1
    438  1.1  matt 	blss	3f		# kernel space
    439  1.1  matt 	movl	8(ap),r2
    440  1.1  matt 2:	mfpr	$PR_ESP,r3
    441  1.1  matt 	movab	1f,(r3)
    442  1.1  matt 	movc3	12(ap),(r1),(r2)
    443  1.1  matt 1:	mfpr	$PR_ESP,r3
    444  1.1  matt 	clrl	(r3)
    445  1.1  matt 	ret
    446  1.1  matt 
    447  1.1  matt 3:	mnegl	$1,r0
    448  1.1  matt 	ret
    449  1.1  matt 
    450  1.1  matt ENTRY(kcopy,0)
    451  1.1  matt 	mfpr	$PR_ESP,r3
    452  1.1  matt 	movl	(r3),-(sp)
    453  1.1  matt 	movab	1f,(r3)
    454  1.1  matt 	movl	4(ap),r1
    455  1.1  matt 	movl	8(ap),r2
    456  1.1  matt 	movc3	12(ap),(r1), (r2)
    457  1.1  matt 	clrl	r1
    458  1.1  matt 1:	mfpr	$PR_ESP,r3
    459  1.1  matt 	movl	(sp)+,(r3)
    460  1.1  matt 	movl	r1,r0
    461  1.1  matt 	ret
    462  1.1  matt 
    463  1.1  matt /*
    464  1.1  matt  * copy{in,out}str() copies data from/to user space to/from kernel space.
    465  1.1  matt  * Security checks:
    466  1.1  matt  *	1) user space address must be < KERNBASE
    467  1.1  matt  *	2) the VM system will do the checks while copying
    468  1.1  matt  */
    469  1.1  matt ENTRY(copyinstr, 0)
    470  1.1  matt 	tstl	4(ap)		# kernel address?
    471  1.1  matt 	bgeq	8f		# no, continue
    472  1.1  matt 6:	movl	$EFAULT,r0
    473  1.1  matt 	movl	16(ap),r2
    474  1.1  matt 	beql	7f
    475  1.1  matt 	clrl	(r2)
    476  1.1  matt 7:	ret
    477  1.1  matt 
    478  1.1  matt ENTRY(copyoutstr, 0)
    479  1.1  matt 	tstl	8(ap)		# kernel address?
    480  1.1  matt 	bgeq	8f		# no, continue
    481  1.1  matt 	brb	6b		# yes, return EFAULT
    482  1.1  matt 
    483  1.1  matt ENTRY(copystr,0)
    484  1.1  matt 8:	movl	4(ap),r5	# from
    485  1.1  matt 	movl	8(ap),r4	# to
    486  1.1  matt 	movl	12(ap),r3	# len
    487  1.1  matt 	movl	16(ap),r2	# copied
    488  1.1  matt 	clrl	r0
    489  1.1  matt 	mfpr	$PR_ESP,r1
    490  1.1  matt 	movab	3f,(r1)
    491  1.1  matt 
    492  1.1  matt 	tstl	r3		# any chars to copy?
    493  1.1  matt 	bneq	1f		# yes, jump for more
    494  1.1  matt 0:	tstl	r2		# save copied len?
    495  1.1  matt 	beql	2f		# no
    496  1.1  matt 	subl3	4(ap),r5,(r2)	# save copied len
    497  1.1  matt 2:	ret
    498  1.1  matt 
    499  1.1  matt 1:	movb	(r5)+,(r4)+	# copy one char
    500  1.1  matt 	beql	0b		# jmp if last char
    501  1.1  matt 	sobgtr	r3,1b		# copy one more
    502  1.1  matt 	movl	$ENAMETOOLONG,r0 # inform about too long string
    503  1.1  matt 	brb	0b		# out of chars
    504  1.1  matt 
    505  1.1  matt 3:	mfpr	$PR_ESP,r1
    506  1.1  matt 	clrl	(r1)
    507  1.1  matt 	brb	0b
    508  1.1  matt 
    509  1.1  matt ENTRY(subyte,0)
    510  1.1  matt 	movl	4(ap),r0
    511  1.1  matt 	blss	3f		# illegal space
    512  1.1  matt 	mfpr	$PR_ESP,r1
    513  1.1  matt 	movab	1f,(r1)
    514  1.1  matt 	movb	8(ap),(r0)
    515  1.1  matt 	clrl	r1
    516  1.1  matt 1:	mfpr	$PR_ESP,r2
    517  1.1  matt 	clrl	(r2)
    518  1.1  matt 	movl	r1,r0
    519  1.1  matt 	ret
    520  1.1  matt 
    521  1.1  matt ENTRY(suword,0)
    522  1.1  matt 	movl	4(ap),r0
    523  1.1  matt 	blss	3f		# illegal space
    524  1.1  matt 	mfpr	$PR_ESP,r1
    525  1.1  matt 	movab	1f,(r1)
    526  1.1  matt 	movl	8(ap),(r0)
    527  1.1  matt 	clrl	r1
    528  1.1  matt 1:	mfpr	$PR_ESP,r2
    529  1.1  matt 	clrl	(r2)
    530  1.1  matt 	movl	r1,r0
    531  1.1  matt 	ret
    532  1.1  matt 
    533  1.1  matt ENTRY(suswintr,0)
    534  1.1  matt 	movl	4(ap),r0
    535  1.1  matt 	blss	3f		# illegal space
    536  1.1  matt 	mfpr	$PR_ESP,r1
    537  1.1  matt 	movab	1f,(r1)
    538  1.1  matt 	movw	8(ap),(r0)
    539  1.1  matt 	clrl	r1
    540  1.1  matt 1:	mfpr	$PR_ESP,r2
    541  1.1  matt 	clrl	(r2)
    542  1.1  matt 	movl	r1,r0
    543  1.1  matt 	ret
    544  1.1  matt 
    545  1.1  matt 3:	mnegl	$1,r0
    546  1.1  matt 	ret
    547  1.1  matt 
    548  1.1  matt 	.align	2
    549  1.1  matt ALTENTRY(fusword)
    550  1.1  matt ENTRY(fuswintr,0)
    551  1.1  matt 	movl	4(ap),r0
    552  1.1  matt 	blss	3b
    553  1.1  matt 	mfpr	$PR_ESP,r1
    554  1.1  matt 	movab	1f,(r1)
    555  1.1  matt 	movzwl	(r0),r1
    556  1.1  matt 1:	mfpr	$PR_ESP,r2
    557  1.1  matt 	clrl	(r2)
    558  1.1  matt 	movl	r1,r0
    559  1.1  matt 	ret
    560  1.1  matt 
    561  1.1  matt #if defined(MULTIPROCESSOR)
    562  1.1  matt 
    563  1.1  matt JSBENTRY(Slock)
    564  1.1  matt 1:	bbssi	$0,(r1),1b
    565  1.1  matt 	rsb
    566  1.1  matt 
    567  1.1  matt JSBENTRY(Slocktry)
    568  1.1  matt 	clrl	r0
    569  1.1  matt 	bbssi	$0,(r1),1f
    570  1.1  matt 	incl	r0
    571  1.1  matt 1:	rsb
    572  1.1  matt 
    573  1.1  matt JSBENTRY(Sunlock)
    574  1.1  matt 	bbcci	$0,(r1),1f
    575  1.1  matt 1:	rsb
    576  1.1  matt 
    577  1.1  matt #endif
    578  1.1  matt 
    579  1.1  matt #
    580  1.1  matt # data department
    581  1.1  matt #
    582  1.1  matt 	.data
    583  1.1  matt 
    584  1.1  matt 	.globl _C_LABEL(memtest)
    585  1.1  matt _C_LABEL(memtest):		# memory test in progress
    586  1.1  matt 	.long 0
    587  1.1  matt 
    588  1.1  matt #ifdef __ELF__
    589  1.1  matt 	.section	.rodata
    590  1.1  matt #endif
    591  1.1  matt noque:	.asciz	"swtch"
    592