Home | History | Annotate | Line # | Download | only in vax
      1  1.43   kalvisd /*	$NetBSD: subr.S,v 1.43 2023/12/18 22:40:01 kalvisd Exp $	   */
      2   1.1      matt 
      3   1.1      matt /*
      4   1.1      matt  * Copyright (c) 1994 Ludd, University of Lule}, Sweden.
      5   1.1      matt  * All rights reserved.
      6   1.1      matt  *
      7   1.1      matt  * Redistribution and use in source and binary forms, with or without
      8   1.1      matt  * modification, are permitted provided that the following conditions
      9   1.1      matt  * are met:
     10   1.1      matt  * 1. Redistributions of source code must retain the above copyright
     11   1.1      matt  *    notice, this list of conditions and the following disclaimer.
     12   1.1      matt  * 2. Redistributions in binary form must reproduce the above copyright
     13   1.1      matt  *    notice, this list of conditions and the following disclaimer in the
     14   1.1      matt  *    documentation and/or other materials provided with the distribution.
     15   1.1      matt  *
     16   1.1      matt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17   1.1      matt  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18   1.1      matt  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19   1.1      matt  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20   1.1      matt  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     21   1.1      matt  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     22   1.1      matt  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     23   1.1      matt  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     24   1.1      matt  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     25   1.1      matt  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26   1.1      matt  */
     27   1.1      matt 
     28   1.1      matt #include <machine/asm.h>
     29   1.1      matt 
     30   1.1      matt #include "assym.h"
     31   1.1      matt #include "opt_ddb.h"
     32   1.1      matt #include "opt_multiprocessor.h"
     33   1.1      matt #include "opt_lockdebug.h"
     34   1.1      matt #include "opt_compat_netbsd.h"
     35   1.1      matt #include "opt_compat_ultrix.h"
     36   1.1      matt #ifdef COMPAT_ULTRIX
     37   1.1      matt #include <compat/ultrix/ultrix_syscall.h>
     38   1.1      matt #endif
     39   1.1      matt 
     40   1.1      matt #define JSBENTRY(x)	.globl x ; .align 2 ; x :
     41  1.21      matt #define SCBENTRY(name) \
     42  1.21      matt 	.text			; \
     43  1.21      matt 	.align 2		; \
     44  1.21      matt 	.globl __CONCAT(X,name)	; \
     45  1.21      matt __CONCAT(X,name):
     46   1.1      matt 
     47   1.1      matt 		.text
     48   1.1      matt 
     49   1.1      matt #ifdef	KERNEL_LOADABLE_BY_MOP
     50   1.1      matt /*
     51   1.1      matt  * This is a little tricky. The kernel is not loaded at the correct
     52   1.1      matt  * address, so the kernel must first be relocated, then copied, then
     53   1.1      matt  * jump back to the correct address.
     54   1.1      matt  */
     55   1.1      matt /* Copy routine */
     56   1.1      matt cps:
     57   1.2      matt 2:	movb	(%r0)+,(%r1)+
     58   1.2      matt 	cmpl	%r0,%r7
     59   1.1      matt 	bneq	2b
     60   1.1      matt 
     61   1.2      matt 3:	clrb	(%r1)+
     62   1.2      matt 	incl	%r0
     63   1.2      matt 	cmpl	%r0,%r6
     64   1.1      matt 	bneq	3b
     65   1.2      matt 	clrl	-(%sp)
     66   1.2      matt 	movl	%sp,%ap
     67   1.2      matt 	movl	$_cca,%r7
     68   1.2      matt 	movl	%r8,(%r7)
     69   1.2      matt 	movpsl	-(%sp)
     70   1.2      matt 	pushl	%r2
     71   1.1      matt 	rei
     72   1.1      matt cpe:
     73   1.1      matt 
     74   1.1      matt /* Copy the copy routine */
     75   1.2      matt 1:	movab	cps,%r0
     76   1.2      matt 	movab	cpe,%r1
     77   1.2      matt 	movl	$0x300000,%sp
     78   1.2      matt 	movl	%sp,%r3
     79   1.2      matt 4:	movb	(%r0)+,(%r3)+
     80   1.2      matt 	cmpl	%r0,%r1
     81   1.1      matt 	bneq	4b
     82   1.2      matt 	movl	%r7,%r8
     83   1.1      matt /* Ok, copy routine copied, set registers and rei */
     84   1.2      matt 	movab	_edata,%r7
     85   1.2      matt 	movab	_end,%r6
     86   1.2      matt 	movl	$0x80000000,%r1
     87   1.2      matt 	movl	$0x80000200,%r0
     88   1.2      matt 	subl3	$0x200,%r6,%r9
     89   1.2      matt 	movab	2f,%r2
     90   1.2      matt 	subl2	$0x200,%r2
     91   1.2      matt 	movpsl	-(%sp)
     92   1.2      matt 	pushab	4(%sp)
     93   1.1      matt 	rei
     94   1.1      matt 
     95   1.1      matt /*
     96   1.1      matt  * First entry routine from boot. This should be in a file called locore.
     97   1.1      matt  */
     98   1.1      matt JSBENTRY(start)
     99   1.1      matt 	brb	1b				# Netbooted starts here
    100   1.1      matt #else
    101   1.1      matt ASENTRY(start, 0)
    102   1.1      matt #endif
    103   1.2      matt 2:	bisl3	$0x80000000,%r9,_C_LABEL(esym)	# End of loaded code
    104   1.1      matt 	pushl	$0x1f0000			# Push a nice PSL
    105   1.1      matt 	pushl	$to				# Address to jump to
    106   1.1      matt 	rei					# change to kernel stack
    107   1.1      matt to:	movw	$0xfff,_C_LABEL(panic)		# Save all regs in panic
    108   1.3      matt 	cmpb	(%ap),$3			# symbols info present?
    109   1.1      matt 	blssu	3f				# nope, skip
    110   1.2      matt 	bisl3	$0x80000000,8(%ap),_C_LABEL(symtab_start)
    111   1.1      matt 						#   save start of symtab
    112   1.2      matt 	movl	12(%ap),_C_LABEL(symtab_nsyms)	#   save number of symtab
    113   1.3      matt 	bisl3	$0x80000000,%r9,_C_LABEL(symtab_end)
    114   1.2      matt 						#   save end of symtab
    115   1.2      matt 3:	addl3	_C_LABEL(esym),$0x3ff,%r0	# Round symbol table end
    116  1.27      matt 	bicl3	$0x3ff,%r0,%r1			#
    117  1.28     rmind 	movl	%r1,_C_LABEL(lwp0)+L_PCB	# lwp0 pcb, XXXuvm_lwp_getuarea
    118  1.29    martin 	bicl3	$0x80000000,%r1,%r0		# get phys lwp0 uarea addr
    119   1.2      matt 	mtpr	%r0,$PR_PCBB			# Save in IPR PCBB
    120  1.27      matt 	addl3	$USPACE,%r1,%r0			# Get kernel stack top
    121   1.2      matt 	mtpr	%r0,$PR_KSP			# put in IPR KSP
    122   1.2      matt 	movl	%r0,_C_LABEL(Sysmap)		# SPT start addr after KSP
    123  1.28     rmind 	movl	_C_LABEL(lwp0)+L_PCB,%r0	# get PCB virtual address
    124  1.21      matt 	mfpr	$PR_PCBB,PCB_PADDR(%r0)		# save PCB physical address
    125  1.30       chs 	movab	PCB_ONFAULT(%r0),ESP(%r0)	# Save trap address in ESP
    126   1.2      matt 	mtpr	4(%r0),$PR_ESP			# Put it in ESP also
    127   1.1      matt 
    128   1.1      matt # Set some registers in known state
    129  1.28     rmind 	movl	%r1,%r0				# get lwp0 pcb
    130   1.2      matt 	clrl	P0LR(%r0)
    131   1.2      matt 	clrl	P1LR(%r0)
    132   1.1      matt 	mtpr	$0,$PR_P0LR
    133   1.1      matt 	mtpr	$0,$PR_P1LR
    134   1.2      matt 	movl	$0x80000000,%r1
    135   1.2      matt 	movl	%r1,P0BR(%r0)
    136   1.2      matt 	movl	%r1,P1BR(%r0)
    137   1.2      matt 	mtpr	%r1,$PR_P0BR
    138   1.2      matt 	mtpr	%r1,$PR_P1BR
    139  1.30       chs 	clrl	PCB_ONFAULT(%r0)
    140   1.1      matt 	mtpr	$0,$PR_SCBB
    141   1.1      matt 
    142   1.1      matt # Copy the RPB to its new position
    143   1.1      matt #if defined(COMPAT_14)
    144   1.2      matt 	tstl	(%ap)				# Any arguments?
    145   1.1      matt 	bneq	1f				# Yes, called from new boot
    146   1.2      matt 	movl	%r11,_C_LABEL(boothowto)		# Howto boot (single etc...)
    147   1.2      matt #	movl	%r10,_C_LABEL(bootdev)		# uninteresting, will complain
    148   1.2      matt 	movl	%r8,_C_LABEL(avail_end)		# Usable memory (from VMB)
    149   1.2      matt 	clrl	-(%sp)				# Have no RPB
    150   1.1      matt 	brb	2f
    151   1.1      matt #endif
    152   1.1      matt 
    153   1.2      matt 1:	pushl	4(%ap)				# Address of old rpb
    154   1.1      matt 2:	calls	$1,_C_LABEL(_start)		# Jump away.
    155   1.1      matt 	/* NOTREACHED */
    156   1.1      matt 
    157   1.1      matt 
    158   1.1      matt /*
    159   1.1      matt  * Signal handler code.
    160   1.1      matt  */
    161   1.1      matt 
    162   1.1      matt 	.align	2
    163  1.11      matt 	.globl	_C_LABEL(sigcode),_C_LABEL(esigcode)
    164   1.1      matt _C_LABEL(sigcode):
    165   1.1      matt 	pushr	$0x3f
    166   1.2      matt 	subl2	$0xc,%sp
    167   1.2      matt 	movl	0x24(%sp),%r0
    168   1.2      matt 	calls	$3,(%r0)
    169   1.1      matt 	popr	$0x3f
    170   1.8      matt 	chmk	$SYS_compat_16___sigreturn14
    171   1.1      matt 	chmk	$SYS_exit
    172   1.1      matt 	halt
    173   1.1      matt _C_LABEL(esigcode):
    174   1.1      matt 
    175   1.1      matt #ifdef COMPAT_ULTRIX
    176   1.1      matt 	.align	2
    177   1.1      matt 	.globl	_C_LABEL(ultrix_sigcode),_C_LABEL(ultrix_esigcode)
    178   1.1      matt _C_LABEL(ultrix_sigcode):
    179   1.1      matt 	pushr	$0x3f
    180   1.2      matt 	subl2	$0xc,%sp
    181   1.2      matt 	movl	0x24(%sp),%r0
    182   1.2      matt 	calls	$3,(%r0)
    183   1.1      matt 	popr	$0x3f
    184   1.1      matt 	chmk	$ULTRIX_SYS_sigreturn
    185   1.1      matt 	chmk	$SYS_exit
    186   1.1      matt 	halt
    187   1.1      matt _C_LABEL(ultrix_esigcode):
    188   1.1      matt #endif
    189   1.1      matt 
    190   1.1      matt 	.align	2
    191   1.1      matt 	.globl	_C_LABEL(idsptch), _C_LABEL(eidsptch)
    192  1.16      matt _C_LABEL(idsptch):
    193  1.16      matt 	pushr	$0x3f
    194   1.1      matt 	.word	0x9f16		# jsb to absolute address
    195   1.1      matt 	.long	_C_LABEL(cmn_idsptch)	# the absolute address
    196   1.1      matt 	.long	0		# the callback interrupt routine
    197   1.1      matt 	.long	0		# its argument
    198   1.1      matt 	.long	0		# ptr to correspond evcnt struct
    199   1.1      matt _C_LABEL(eidsptch):
    200   1.1      matt 
    201   1.1      matt _C_LABEL(cmn_idsptch):
    202  1.16      matt #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    203  1.16      matt 	calls	$0,_C_LABEL(krnlock)
    204  1.16      matt #endif
    205   1.2      matt 	movl	(%sp)+,%r0	# get pointer to idspvec
    206  1.20      matt 	mtpr	$IPL_VM,$PR_IPL	# Make sure we are at IPL_VM
    207   1.2      matt 	movl	8(%r0),%r1	# get evcnt pointer
    208   1.1      matt 	beql	1f		# no ptr, skip increment
    209   1.2      matt 	incl	EV_COUNT(%r1)	# increment low longword
    210   1.2      matt 	adwc	$0,EV_COUNT+4(%r1) # add any carry to hi longword
    211  1.33      matt 1:	mfpr	$PR_SSP, %r2	# get curlwp
    212  1.33      matt 	movl	L_CPU(%r2), %r2 # get curcpu
    213  1.33      matt 	incl	CI_NINTR(%r2)	# increment ci_data.cpu_nintr
    214  1.33      matt 	adwc	$0,(CI_NINTR+4)(%r2)
    215  1.16      matt #if 0
    216  1.16      matt 	pushl	%r0
    217  1.16      matt 	movq	(%r0),-(%sp)
    218  1.16      matt 	pushab	2f
    219  1.16      matt 	calls	$3,_C_LABEL(printf)
    220  1.16      matt 	movl	(%sp)+,%r0
    221  1.16      matt #endif
    222   1.6     ragge 	pushl	4(%r0)		# push argument
    223   1.2      matt 	calls	$1,*(%r0)	# call interrupt routine
    224  1.16      matt #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    225  1.16      matt 	calls	$0,_C_LABEL(krnunlock)
    226  1.16      matt #endif
    227   1.1      matt 	popr	$0x3f		# pop registers
    228  1.41    andvar 	rei			# return from interrupt
    229  1.16      matt #if 0
    230  1.16      matt 2:	.asciz	"intr %p(%p)\n"
    231  1.16      matt #endif
    232   1.1      matt 
    233   1.1      matt ENTRY(badaddr,0)			# Called with addr,b/w/l
    234   1.2      matt 	mfpr	$PR_IPL,%r0	# splhigh()
    235   1.1      matt 	mtpr	$IPL_HIGH,$PR_IPL
    236   1.2      matt 	movl	4(%ap),%r2	# First argument, the address
    237   1.2      matt 	movl	8(%ap),%r1	# Sec arg, b,w,l
    238   1.2      matt 	pushl	%r0		# Save old IPL
    239   1.2      matt 	clrl	%r3
    240   1.1      matt 	movab	4f,_C_LABEL(memtest)	# Set the return address
    241   1.1      matt 
    242   1.2      matt 	caseb	%r1,$1,$4	# What is the size
    243   1.1      matt 1:	.word	1f-1b
    244   1.1      matt 	.word	2f-1b
    245   1.1      matt 	.word	3f-1b		# This is unused
    246   1.1      matt 	.word	3f-1b
    247   1.1      matt 
    248   1.2      matt 1:	movb	(%r2),%r1		# Test a byte
    249   1.1      matt 	brb	5f
    250   1.1      matt 
    251   1.2      matt 2:	movw	(%r2),%r1		# Test a word
    252   1.1      matt 	brb	5f
    253   1.1      matt 
    254   1.2      matt 3:	movl	(%r2),%r1		# Test a long
    255   1.1      matt 	brb	5f
    256   1.1      matt 
    257   1.2      matt 4:	incl	%r3		# Got machine chk => addr bad
    258   1.2      matt 5:	mtpr	(%sp)+,$PR_IPL
    259   1.2      matt 	movl	%r3,%r0
    260   1.1      matt 	ret
    261   1.1      matt 
    262   1.1      matt #ifdef DDB
    263   1.1      matt /*
    264   1.1      matt  * DDB is the only routine that uses setjmp/longjmp.
    265   1.1      matt  */
    266   1.1      matt 	.globl	_C_LABEL(setjmp), _C_LABEL(longjmp)
    267   1.1      matt _C_LABEL(setjmp):.word	0
    268   1.2      matt 	movl	4(%ap), %r0
    269   1.2      matt 	movl	8(%fp), (%r0)
    270   1.2      matt 	movl	12(%fp), 4(%r0)
    271   1.2      matt 	movl	16(%fp), 8(%r0)
    272   1.2      matt 	moval	28(%fp),12(%r0)
    273   1.2      matt 	clrl	%r0
    274   1.1      matt 	ret
    275   1.1      matt 
    276   1.1      matt _C_LABEL(longjmp):.word	0
    277   1.2      matt 	movl	4(%ap), %r1
    278   1.2      matt 	movl	8(%ap), %r0
    279   1.2      matt 	movl	(%r1), %ap
    280   1.2      matt 	movl	4(%r1), %fp
    281   1.2      matt 	movl	12(%r1), %sp
    282   1.2      matt 	jmp	*8(%r1)
    283   1.1      matt #endif
    284   1.1      matt 
    285   1.1      matt #if defined(MULTIPROCESSOR)
    286   1.1      matt 	.align 2
    287  1.12        he 	.globl	_C_LABEL(vax_mp_tramp)	# used to kick off multiprocessor systems.
    288  1.12        he _C_LABEL(vax_mp_tramp):
    289   1.1      matt 	ldpctx
    290   1.1      matt 	rei
    291   1.1      matt #endif
    292   1.5   thorpej 
    293  1.22      matt 	.globl	softint_cleanup,softint_exit,softint_process
    294  1.22      matt 	.type	softint_cleanup@function
    295  1.22      matt 	.type	softint_exit@function
    296  1.22      matt 	.type	softint_process@function
    297  1.21      matt softint_cleanup:
    298  1.24      matt 	movl    L_CPU(%r0),%r1		/* get cpu_info */
    299  1.21      matt 	incl    CI_MTX_COUNT(%r1)	/* increment mutex count */
    300  1.28     rmind 	movl	L_PCB(%r0),%r1		/* get PCB of softint LWP */
    301  1.21      matt softint_exit:
    302  1.21      matt 	popr	$0x3			/* restore r0 and r1 */
    303  1.21      matt 	rei				/* return from interrupt */
    304  1.21      matt 
    305  1.21      matt softint_process:
    306  1.21      matt 	/*
    307  1.21      matt 	 * R6 contains pinned LWP
    308  1.21      matt 	 * R7 contains ipl to dispatch with
    309  1.21      matt 	 */
    310  1.21      matt 	movq	%r6,-(%sp)		/* push old lwp and ipl onto stack */
    311  1.21      matt 	calls	$2,_C_LABEL(softint_dispatch) /* dispatch it */
    312  1.21      matt 
    313  1.21      matt 	/* We can use any register because ldpctx will overwrite them */
    314  1.28     rmind 	movl	L_PCB(%r6),%r3		/* get pcb */
    315  1.21      matt 	movab	softint_exit,PCB_PC(%r3)/* do a quick exit */
    316  1.24      matt #ifdef MULTIPROCESSOR
    317  1.24      matt 	movl	L_CPU(%r6),%r8
    318  1.42  riastrad 	/* XXX store-before-store barrier -- see cpu_switchto */
    319  1.24      matt 	movl	%r6,CI_CURLWP(%r8)
    320  1.42  riastrad 	/* XXX store-before-load barrier -- see cpu_switchto */
    321  1.24      matt #endif
    322  1.43   kalvisd 	/* copy AST level from current LWP to pinned LWP, reset
    323  1.43   kalvisd 	   current AST level */
    324  1.43   kalvisd 	mfpr	$PR_SSP,%r4		/* current LWP */
    325  1.43   kalvisd 	movl	L_PCB(%r4),%r4		/* PCB address */
    326  1.43   kalvisd 	movl	P0LR(%r4),%r0		/* LR and ASTLVL field, current PCB */
    327  1.43   kalvisd 	movl	P0LR(%r3),%r1		/* same, pinned LWP */
    328  1.43   kalvisd 	cmpl	%r0,%r1
    329  1.43   kalvisd 	bgtru	1f			/* AST(current) >= AST(pinned) */
    330  1.43   kalvisd 	extv	$24,$3,%r0,%r0		/* ASTLVL field for current LWP */
    331  1.43   kalvisd 	insv	%r0,$24,$3,P0LR(%r3)	/* copy to pinned LWP */
    332  1.43   kalvisd 	insv	$4,$24,$3,P0LR(%r4)	/* reset AST for current LWP */
    333  1.43   kalvisd 1:
    334  1.21      matt 	mtpr	PCB_PADDR(%r3),$PR_PCBB	/* restore PA of interrupted pcb */
    335  1.40   msaitoh 	ldpctx				/* implicitly updates curlwp */
    336  1.21      matt 	rei
    337  1.21      matt 
    338  1.21      matt 
    339  1.21      matt softint_common:
    340  1.21      matt 	mfpr	$PR_IPL,%r1
    341  1.21      matt 	mtpr	$IPL_HIGH,$PR_IPL	/* we need to be at IPL_HIGH */
    342  1.21      matt 	movpsl	-(%sp)			/* add cleanup hook */
    343  1.21      matt 	pushab	softint_cleanup
    344  1.21      matt 	svpctx
    345  1.21      matt 
    346  1.21      matt 	/* We can use any register because ldpctx will overwrite them */
    347  1.24      matt 	mfpr	$PR_SSP,%r6		/* Get curlwp */
    348  1.24      matt 	movl	L_CPU(%r6),%r8		/* get cpu_info */
    349  1.21      matt 	movl	CI_SOFTLWPS(%r8)[%r0],%r2 /* get softlwp to switch to */
    350  1.28     rmind 	movl	L_PCB(%r2),%r3		/* Get pointer to its pcb. */
    351  1.24      matt 	movl	%r6,PCB_R6(%r3)		/* move old lwp into new pcb */
    352  1.21      matt 	movl	%r1,PCB_R7(%r3)		/* move IPL into new pcb */
    353  1.24      matt #ifdef MULTIPROCESSOR
    354  1.42  riastrad 	/* XXX store-before-store barrier -- see cpu_switchto */
    355  1.21      matt 	movl	%r2,CI_CURLWP(%r8)	/* update ci_curlwp */
    356  1.42  riastrad 	/* XXX store-before-load barrier -- see cpu_switchto */
    357  1.24      matt #endif
    358  1.23      matt 
    359  1.23      matt 	/*
    360  1.23      matt 	 * Now reset the PCB since we no idea what state it was last in
    361  1.23      matt 	 */
    362  1.23      matt 	movab	(USPACE-TRAPFRAMELEN-CALLSFRAMELEN)(%r3),%r0
    363  1.23      matt 					/* calculate where KSP should be */
    364  1.23      matt 	movl	%r0,KSP(%r3)		/* save it as SP */
    365  1.23      matt 	movl	%r0,PCB_FP(%r3)		/* and as the FP too */
    366  1.23      matt 	movab	CA_ARGNO(%r0),PCB_AP(%r3) /* update the AP as well */
    367  1.23      matt 	movab	softint_process,PCB_PC(%r3) /* and where we will start */
    368  1.32       snj 	movl	$PSL_HIGHIPL,PCB_PSL(%r3) /* Needs to be running at IPL_HIGH */
    369  1.23      matt 
    370  1.21      matt 	mtpr	PCB_PADDR(%r3),$PR_PCBB	/* set PA of new pcb */
    371  1.21      matt 	ldpctx				/* load it */
    372  1.21      matt 	rei				/* get off interrupt stack */
    373  1.21      matt 
    374  1.21      matt SCBENTRY(softclock)
    375  1.21      matt 	pushr	$0x3			/* save r0 and r1 */
    376  1.21      matt 	movl	$SOFTINT_CLOCK,%r0
    377  1.21      matt 	brb	softint_common
    378  1.21      matt 
    379  1.21      matt SCBENTRY(softbio)
    380  1.21      matt 	pushr	$0x3			/* save r0 and r1 */
    381  1.21      matt 	movl	$SOFTINT_BIO,%r0
    382  1.21      matt 	brb	softint_common
    383  1.21      matt 
    384  1.21      matt SCBENTRY(softnet)
    385  1.21      matt 	pushr	$0x3			/* save r0 and r1 */
    386  1.21      matt 	movl	$SOFTINT_NET,%r0
    387  1.21      matt 	brb	softint_common
    388  1.21      matt 
    389  1.21      matt SCBENTRY(softserial)
    390  1.21      matt 	pushr	$0x3			/* save r0 and r1 */
    391  1.21      matt 	movl	$SOFTINT_SERIAL,%r0
    392  1.21      matt 	brb	softint_common
    393  1.21      matt 
    394  1.17      yamt /*
    395  1.17      yamt  * Helper routine for cpu_lwp_fork.  It get invoked by Swtchto.
    396  1.17      yamt  * It let's the kernel know the lwp is alive and then calls func(arg)
    397  1.17      yamt  * and possibly returns to sret.
    398  1.17      yamt  */
    399  1.17      yamt ENTRY(cpu_lwp_bootstrap, 0)
    400  1.17      yamt 	movq	%r2,-(%sp)			/* save func & arg */
    401  1.17      yamt 	movq	%r0,-(%sp)			/* push oldl/newl */
    402  1.17      yamt 	calls	$2,_C_LABEL(lwp_startup)	/* startup the lwp */
    403  1.17      yamt 	movl	(%sp)+,%r0			/* grab func */
    404  1.17      yamt 	calls	$1,(%r0)			/* call it with arg */
    405  1.17      yamt 	ret
    406  1.17      yamt 
    407  1.17      yamt /*
    408  1.17      yamt  * r1 = newlwp
    409  1.17      yamt  * r0 = oldlwp
    410  1.17      yamt  */
    411   1.5   thorpej JSBENTRY(Swtchto)
    412  1.17      yamt 	/* this pops the pc and psw from the stack and puts them in the pcb. */
    413   1.5   thorpej 	svpctx				# Now on interrupt stack
    414   1.5   thorpej 
    415  1.17      yamt 	/* We can know use any register because ldpctx will overwrite them */
    416  1.17      yamt 	/* New LWP already in %r1 */
    417  1.28     rmind 	movl	L_PCB(%r1),%r3		# Get pointer to new pcb.
    418  1.17      yamt 	movl	%r0,PCB_R0(%r3)		# move r0 into new pcb (return value)
    419  1.21      matt #ifdef MULTIPROCESSOR
    420  1.24      matt 	movl	L_CPU(%r0), %r8		/* get cpu_info of old lwp */
    421  1.24      matt 	movl	%r8, L_CPU(%r1)		/* update cpu_info of new lwp */
    422  1.42  riastrad 	/*
    423  1.42  riastrad 	 * Issue barriers to coordinate mutex_exit on this CPU with
    424  1.42  riastrad 	 * mutex_vector_enter on another CPU.
    425  1.42  riastrad 	 *
    426  1.42  riastrad 	 * 1. Any prior mutex_exit by oldlwp must be visible to other
    427  1.42  riastrad 	 *    CPUs before we set ci_curlwp := newlwp on this one,
    428  1.42  riastrad 	 *    requiring a store-before-store barrier.
    429  1.42  riastrad 	 *
    430  1.42  riastrad 	 * 2. ci_curlwp := newlwp must be visible on all other CPUs
    431  1.42  riastrad 	 *    before any subsequent mutex_exit by newlwp can even test
    432  1.42  riastrad 	 *    whether there might be waiters, requiring a
    433  1.42  riastrad 	 *    store-before-load barrier.
    434  1.42  riastrad 	 *
    435  1.42  riastrad 	 * See kern_mutex.c for details -- this is necessary for
    436  1.42  riastrad 	 * adaptive mutexes to detect whether the lwp is on the CPU in
    437  1.42  riastrad 	 * order to safely block without requiring atomic r/m/w in
    438  1.42  riastrad 	 * mutex_exit.
    439  1.42  riastrad 	 *
    440  1.42  riastrad 	 * XXX I'm fuzzy on the memory model of VAX.  I would guess
    441  1.42  riastrad 	 * it's TSO like x86 but I can't find a store-before-load
    442  1.42  riastrad 	 * barrier, which is the only one TSO requires explicitly.
    443  1.42  riastrad 	 */
    444  1.42  riastrad 	/* XXX store-before-store barrier */
    445  1.24      matt 	movl	%r1,CI_CURLWP(%r8)	/* update ci_curlwp */
    446  1.42  riastrad 	/* XXX store-before-load barrier */
    447  1.21      matt #endif
    448  1.17      yamt 
    449  1.21      matt 	mtpr	PCB_PADDR(%r3),$PR_PCBB	# set PA of new pcb
    450  1.21      matt 	mtpr	$IPL_HIGH,$PR_IPL	/* we need to be at IPL_HIGH */
    451  1.17      yamt 	ldpctx				# load it
    452  1.17      yamt 	/* r0 already has previous lwp */
    453  1.17      yamt 	/* r1 already has this lwp */
    454  1.17      yamt 	/* r2/r3 and r4/r5 restored */
    455  1.17      yamt 	rei				/* get off interrupt stack */
    456   1.1      matt 
    457   1.1      matt #
    458   1.1      matt # copy/fetch/store routines.
    459   1.1      matt #
    460   1.1      matt 
    461   1.1      matt ENTRY(copyout, 0)
    462  1.26   tsutsui 	movl	8(%ap),%r3
    463   1.1      matt 	blss	3f		# kernel space
    464   1.2      matt 	movl	4(%ap),%r1
    465   1.1      matt 	brb	2f
    466   1.1      matt 
    467   1.1      matt ENTRY(copyin, 0)
    468   1.2      matt 	movl	4(%ap),%r1
    469   1.1      matt 	blss	3f		# kernel space
    470  1.26   tsutsui 	movl	8(%ap),%r3
    471  1.26   tsutsui 2:	mfpr	$PR_ESP,%r2
    472  1.30       chs 	movab	1f,(%r2)	# set pcb_onfault
    473  1.26   tsutsui 4:	tstw	14(%ap)		# check if >= 64K
    474  1.26   tsutsui 	bneq	5f
    475  1.26   tsutsui 	movc3	12(%ap),(%r1),(%r3)
    476  1.30       chs 	clrl	%r0
    477  1.26   tsutsui 1:	mfpr	$PR_ESP,%r2
    478  1.30       chs 	clrl	(%r2)		# clear pcb_onfault
    479   1.1      matt 	ret
    480  1.26   tsutsui 5:	movc3	$0xfffc,(%r1),(%r3)
    481  1.26   tsutsui 	subl2	$0xfffc,12(%ap)
    482  1.26   tsutsui 	brb	4b
    483   1.1      matt 
    484  1.30       chs 3:	movl	$EFAULT,%r0
    485   1.1      matt 	ret
    486   1.1      matt 
    487   1.1      matt ENTRY(kcopy,0)
    488   1.2      matt 	mfpr	$PR_ESP,%r3
    489  1.30       chs 	movl	(%r3),-(%sp)	# save current pcb_onfault
    490  1.30       chs 	movab	1f,(%r3)	# set pcb_onfault
    491   1.2      matt 	movl	4(%ap),%r1
    492   1.2      matt 	movl	8(%ap),%r2
    493   1.2      matt 	movc3	12(%ap),(%r1), (%r2)
    494  1.30       chs 	clrl	%r0
    495   1.2      matt 1:	mfpr	$PR_ESP,%r3
    496  1.30       chs 	movl	(%sp)+,(%r3)	# restore pcb_onfault
    497   1.1      matt 	ret
    498   1.1      matt 
    499   1.1      matt /*
    500   1.1      matt  * copy{in,out}str() copies data from/to user space to/from kernel space.
    501   1.1      matt  * Security checks:
    502   1.1      matt  *	1) user space address must be < KERNBASE
    503   1.1      matt  *	2) the VM system will do the checks while copying
    504   1.1      matt  */
    505   1.1      matt ENTRY(copyinstr, 0)
    506   1.2      matt 	tstl	4(%ap)		# kernel address?
    507   1.1      matt 	bgeq	8f		# no, continue
    508   1.2      matt 6:	movl	$EFAULT,%r0
    509   1.2      matt 	movl	16(%ap),%r2
    510   1.1      matt 	beql	7f
    511   1.2      matt 	clrl	(%r2)
    512   1.1      matt 7:	ret
    513   1.1      matt 
    514   1.1      matt ENTRY(copyoutstr, 0)
    515   1.2      matt 	tstl	8(%ap)		# kernel address?
    516   1.1      matt 	bgeq	8f		# no, continue
    517   1.1      matt 	brb	6b		# yes, return EFAULT
    518   1.1      matt 
    519   1.2      matt 8:	movl	4(%ap),%r5	# from
    520   1.2      matt 	movl	8(%ap),%r4	# to
    521   1.2      matt 	movl	12(%ap),%r3	# len
    522   1.2      matt 	movl	16(%ap),%r2	# copied
    523   1.2      matt 	clrl	%r0
    524   1.2      matt 	mfpr	$PR_ESP,%r1
    525  1.30       chs 	movab	2f,(%r1)	# set pcb_onfault
    526   1.1      matt 
    527   1.2      matt 	tstl	%r3		# any chars to copy?
    528   1.1      matt 	bneq	1f		# yes, jump for more
    529   1.2      matt 0:	tstl	%r2		# save copied len?
    530   1.1      matt 	beql	2f		# no
    531   1.2      matt 	subl3	4(%ap),%r5,(%r2)	# save copied len
    532  1.30       chs 2:	mfpr	$PR_ESP,%r1
    533  1.30       chs 	clrl	(%r1)		# clear pcb_onfault
    534  1.30       chs 	ret
    535   1.1      matt 
    536   1.2      matt 1:	movb	(%r5)+,(%r4)+	# copy one char
    537   1.1      matt 	beql	0b		# jmp if last char
    538   1.2      matt 	sobgtr	%r3,1b		# copy one more
    539   1.2      matt 	movl	$ENAMETOOLONG,%r0 # inform about too long string
    540   1.1      matt 	brb	0b		# out of chars
    541   1.1      matt 
    542  1.36   thorpej /**************************************************************************/
    543   1.1      matt 
    544  1.36   thorpej 	.align	2
    545   1.1      matt 
    546  1.36   thorpej #define	UFETCHSTORE_PROLOGUE						 \
    547  1.39    andvar 	tstl	4(%ap)			/* uaddr in userspace? */	;\
    548  1.36   thorpej 	blss	1f			/* nope, fault */		;\
    549  1.36   thorpej 	mfpr	$PR_ESP,%r1		/* &pcb_onfault is in ESP */	;\
    550  1.36   thorpej 	movab	2f,(%r1)		/* set pcb_onfault */
    551  1.36   thorpej 
    552  1.36   thorpej #define	UFETCHSTORE_EPILOGUE						 \
    553  1.36   thorpej 	mfpr	$PR_ESP,%r1		/* &pcb_onfault is in ESP */	;\
    554  1.36   thorpej 	clrl	(%r1)			/* pcb_onfault = NULL */
    555  1.36   thorpej 
    556  1.36   thorpej #define	UFETCHSTORE_RETURN						 \
    557  1.36   thorpej 	clrl	%r0			/* return success */		;\
    558  1.36   thorpej 	ret								;\
    559  1.36   thorpej 1:	movl	$EFAULT,%r0						;\
    560  1.36   thorpej 	ret				/* return EFAULT */		;\
    561  1.36   thorpej 2:	UFETCHSTORE_EPILOGUE						;\
    562  1.36   thorpej 	ret				/* error already in %r0 */
    563  1.36   thorpej 
    564  1.36   thorpej /* LINTSTUB: int _ufetch_8(const uint8_t *uaddr, uint8_t *valp); */
    565  1.36   thorpej ENTRY(_ufetch_8,0)
    566  1.36   thorpej 	UFETCHSTORE_PROLOGUE
    567  1.36   thorpej 	movb	*4(%ap),*8(%ap)		# *valp = *uaddr
    568  1.36   thorpej 	UFETCHSTORE_EPILOGUE
    569  1.36   thorpej 	UFETCHSTORE_RETURN
    570  1.36   thorpej 
    571  1.36   thorpej /* LINTSTUB: int _ufetch_16(const uint16_t *uaddr, uint16_t *valp); */
    572  1.36   thorpej ENTRY(_ufetch_16,0)
    573  1.36   thorpej 	UFETCHSTORE_PROLOGUE
    574  1.36   thorpej 	movw	*4(%ap),*8(%ap)		# *valp = *uaddr
    575  1.36   thorpej 	UFETCHSTORE_EPILOGUE
    576  1.36   thorpej 	UFETCHSTORE_RETURN
    577  1.36   thorpej 
    578  1.36   thorpej /* LINTSTUB: int _ufetch_32(const uint32_t *uaddr, uint32_t *valp); */
    579  1.36   thorpej ENTRY(_ufetch_32,0)
    580  1.36   thorpej 	UFETCHSTORE_PROLOGUE
    581  1.36   thorpej 	movl	*4(%ap),*8(%ap)		# *valp = *uaddr
    582  1.36   thorpej 	UFETCHSTORE_EPILOGUE
    583  1.36   thorpej 	UFETCHSTORE_RETURN
    584  1.36   thorpej 
    585  1.36   thorpej /* LINTSTUB: int _ustore_8(uint8_t *uaddr, uint8_t val); */
    586  1.36   thorpej ENTRY(_ustore_8,0)
    587  1.36   thorpej 	UFETCHSTORE_PROLOGUE
    588  1.36   thorpej 	movb	8(%ap),*4(%ap)		# *uaddr = val
    589  1.36   thorpej 	UFETCHSTORE_EPILOGUE
    590  1.36   thorpej 	UFETCHSTORE_RETURN
    591  1.36   thorpej 
    592  1.36   thorpej /* LINTSTUB: int _ustore_16(uint16_t *uaddr, uint16_t val); */
    593  1.36   thorpej ENTRY(_ustore_16,0)
    594  1.36   thorpej 	UFETCHSTORE_PROLOGUE
    595  1.36   thorpej 	movw	8(%ap),*4(%ap)		# *uaddr = val
    596  1.36   thorpej 	UFETCHSTORE_EPILOGUE
    597  1.36   thorpej 	UFETCHSTORE_RETURN
    598  1.36   thorpej 
    599  1.36   thorpej /* LINTSTUB: int _ustore_32(uint32_t *uaddr, uint32_t val); */
    600  1.36   thorpej ENTRY(_ustore_32,0)
    601  1.36   thorpej 	UFETCHSTORE_PROLOGUE
    602  1.36   thorpej 	movl	8(%ap),*4(%ap)		# *uaddr = val
    603  1.36   thorpej 	UFETCHSTORE_EPILOGUE
    604  1.36   thorpej 	UFETCHSTORE_RETURN
    605   1.1      matt 
    606  1.36   thorpej /**************************************************************************/
    607   1.1      matt 
    608   1.1      matt 	.align	2
    609   1.1      matt 
    610   1.1      matt JSBENTRY(Slock)
    611   1.2      matt 1:	bbssi	$0,(%r1),1b
    612   1.1      matt 	rsb
    613   1.1      matt 
    614   1.1      matt JSBENTRY(Slocktry)
    615   1.2      matt 	clrl	%r0
    616   1.2      matt 	bbssi	$0,(%r1),1f
    617   1.2      matt 	incl	%r0
    618   1.1      matt 1:	rsb
    619   1.1      matt 
    620   1.1      matt JSBENTRY(Sunlock)
    621   1.2      matt 	bbcci	$0,(%r1),1f
    622   1.1      matt 1:	rsb
    623   1.1      matt 
    624   1.1      matt #
    625   1.1      matt # data department
    626   1.1      matt #
    627   1.1      matt 	.data
    628   1.1      matt 
    629   1.1      matt 	.globl _C_LABEL(memtest)
    630   1.1      matt _C_LABEL(memtest):		# memory test in progress
    631   1.1      matt 	.long 0
    632