Home | History | Annotate | Line # | Download | only in rmi
rmixl_spl.S revision 1.3.32.1
      1  1.3.32.1  skrll /*	$NetBSD: rmixl_spl.S,v 1.3.32.1 2015/09/22 12:05:47 skrll Exp $	*/
      2       1.2   matt 
      3       1.2   matt /*-
      4       1.2   matt  * Copyright (c) 2009, 2010 The NetBSD Foundation, Inc.
      5       1.2   matt  * All rights reserved.
      6       1.2   matt  *
      7       1.2   matt  * This code is derived from software contributed to The NetBSD Foundation
      8       1.2   matt  * by Matt Thomas <matt (at) 3am-software.com>.
      9       1.2   matt  *
     10       1.2   matt  * Redistribution and use in source and binary forms, with or without
     11       1.2   matt  * modification, are permitted provided that the following conditions
     12       1.2   matt  * are met:
     13       1.2   matt  * 1. Redistributions of source code must retain the above copyright
     14       1.2   matt  *    notice, this list of conditions and the following disclaimer.
     15       1.2   matt  * 2. Redistributions in binary form must reproduce the above copyright
     16       1.2   matt  *    notice, this list of conditions and the following disclaimer in the
     17       1.2   matt  *    documentation and/or other materials provided with the distribution.
     18       1.2   matt  *
     19       1.2   matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20       1.2   matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21       1.2   matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22       1.2   matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23       1.2   matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24       1.2   matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25       1.2   matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26       1.2   matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27       1.2   matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28       1.2   matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29       1.2   matt  * POSSIBILITY OF SUCH DAMAGE.
     30       1.2   matt  */
     31       1.2   matt 
     32       1.2   matt #include "opt_cputype.h"	/* which mips CPU levels do we support? */
     33       1.2   matt 
     34       1.2   matt #include <sys/cdefs.h>
     35       1.2   matt 
     36       1.2   matt #include <mips/asm.h>
     37       1.2   matt #include <mips/cpuregs.h>
     38       1.2   matt 
     39  1.3.32.1  skrll RCSID("$NetBSD: rmixl_spl.S,v 1.3.32.1 2015/09/22 12:05:47 skrll Exp $");
     40       1.2   matt 
     41       1.2   matt #include "assym.h"
     42       1.2   matt 
     43       1.2   matt #define MAP_SCALESHIFT		3
     44       1.2   matt #define RMIXL_SOFT_INT_MASK_1	(MIPS_SOFT_INT_MASK_1 >> 8)
     45       1.2   matt #define RMIXL_SOFT_INT_MASK	(MIPS_SOFT_INT_MASK >> 8)
     46       1.3  cliff #define RMIXL_INT_MASK_5	(MIPS_INT_MASK_5 >> 8)
     47       1.3  cliff #define RMIXL_EIRR_PRESERVE	(RMIXL_INT_MASK_5 | RMIXL_SOFT_INT_MASK)
     48       1.2   matt #define RMIXL_INT_MASK_1	(MIPS_INT_MASK_1 >> 8)
     49       1.2   matt #define RMIXL_INT_MASK_5	(MIPS_INT_MASK_5 >> 8)
     50       1.2   matt #define RMIXL_COP_0_EIRR	_(9), 6
     51       1.2   matt #define RMIXL_COP_0_EIMR	_(9), 7
     52       1.2   matt 
     53       1.2   matt 	.set noreorder
     54       1.2   matt 
     55       1.2   matt /*
     56       1.2   matt  * Array of mask of bits to set in the EIMR when we go to a
     57       1.2   matt  * given hardware interrupt priority level.
     58       1.2   matt  * The softint bits in [IPL_NONE] and [IPL_SOFTCLOCK] should stay constant
     59       1.2   matt  * Hard intr bits are managed by rmixl_vec_establish and rmixl_vec_disestablish.
     60       1.2   matt  */
     61       1.2   matt 	.data
     62       1.2   matt 	.globl	_C_LABEL(ipl_eimr_map)
     63       1.2   matt 	.type	_C_LABEL(ipl_eimr_map),@object
     64       1.2   matt 	.p2align MAP_SCALESHIFT
     65       1.2   matt _C_LABEL(ipl_eimr_map):
     66       1.2   matt 	.dword	RMIXL_SOFT_INT_MASK	/* IPL_NONE */
     67       1.2   matt 	.dword	RMIXL_SOFT_INT_MASK_1	/* IPL_SOFT{CLOCK,BIO} */
     68       1.2   matt 	.dword	0			/* IPL_SOFT{NET,SERIAL} */
     69       1.2   matt 	.dword	0			/* IPL_VM */
     70       1.2   matt 	.dword	0			/* IPL_SCHED */
     71       1.2   matt 	.dword	0			/* IPL_DDB */
     72       1.2   matt 	.dword	0			/* IPL_HIGH */
     73       1.2   matt 
     74       1.2   matt 	.text
     75       1.2   matt 
     76       1.2   matt /*
     77       1.2   matt  * initialize cp0 interrupt control for this cpu
     78       1.2   matt  * - set STATUS[IE]
     79       1.2   matt  * - clear EIRR and EIMR
     80       1.2   matt  * on return, all interrupts are disabled by EIMR
     81       1.2   matt  *
     82       1.2   matt  * henceforth STATUS[IE] is expected to remain normally set
     83       1.2   matt  * but may be cleared and restored for temporary interrupt disablement
     84       1.2   matt  *
     85       1.2   matt  * call before the first call to spl0 on this cpu
     86       1.2   matt  */
     87       1.2   matt LEAF_NOPROFILE(rmixl_spl_init_cpu)
     88       1.2   matt 	mfc0	t0, MIPS_COP_0_STATUS		# get STATUS
     89       1.2   matt 	ori	t0, MIPS_SR_INT_IE		# set IE
     90       1.2   matt 	mtc0	zero, MIPS_COP_0_STATUS		## disable all ints in STATUS
     91       1.3  cliff 	COP0_SYNC
     92       1.2   matt 	dmtc0	zero, RMIXL_COP_0_EIMR		##  "       "   "   "  EIMR
     93       1.3  cliff 	COP0_SYNC
     94       1.2   matt 	dmtc0	zero, RMIXL_COP_0_EIRR		## clear EIRR
     95       1.3  cliff 	COP0_SYNC
     96       1.2   matt 	mtc0	t0, MIPS_COP_0_STATUS		## set STATUS | IE
     97       1.3  cliff 	JR_HB_RA
     98       1.2   matt END(rmixl_spl_init_cpu)
     99       1.2   matt 
    100       1.2   matt /*
    101       1.2   matt  * RMIXL processor interrupt control
    102       1.2   matt  *
    103       1.2   matt  * Used as building blocks for spl(9) kernel interface.
    104       1.2   matt  */
    105       1.3  cliff 
    106       1.2   matt _splraise:
    107       1.2   matt 	/*
    108       1.3  cliff 	 * a0 = EIMR bits requested to be set for this IPL
    109       1.2   matt 	 * a1 = this IPL (IPL_*)
    110       1.2   matt 	 * Can only use a0-a3 and v0-v1
    111       1.3  cliff 	 * old IPL is returned in v0
    112       1.2   matt 	 */
    113       1.3  cliff 	dmfc0	a2, RMIXL_COP_0_EIMR		# save EIMR
    114       1.3  cliff 	dmtc0	zero, RMIXL_COP_0_EIMR		## disable all interrupts
    115       1.3  cliff 	COP0_SYNC
    116       1.3  cliff 	PTR_L	a3, L_CPU(MIPS_CURLWP)		##
    117       1.3  cliff 	INT_L	v0, CPU_INFO_CPL(a3)		## get current IPL from cpu_info
    118       1.3  cliff 	sltu	v1, a1, v0			## newipl < curipl
    119       1.3  cliff 	bnez	v1, 1f				## yes, don't change.
    120       1.2   matt 	 nop
    121       1.2   matt 	INT_S	a1, CPU_INFO_CPL(a3)		## save IPL in cpu_info
    122       1.2   matt 	dmtc0	a0, RMIXL_COP_0_EIMR		## set new EIMR
    123       1.3  cliff 	JR_HB_RA
    124       1.3  cliff 1:
    125       1.3  cliff 	dmtc0	a2, RMIXL_COP_0_EIMR		## restore saved EIMR
    126       1.3  cliff 	JR_HB_RA
    127       1.2   matt 
    128       1.2   matt STATIC_LEAF(_splsw_splx)
    129       1.2   matt STATIC_XLEAF(_splsw_splx_noprof)		# does not get mcount hooks
    130       1.2   matt 	PTR_LA	v1, _C_LABEL(ipl_eimr_map)	# get address of table
    131       1.2   matt 	sll	a2, a0, MAP_SCALESHIFT		# convert IPL to array offset
    132       1.2   matt 	PTR_ADDU v1, a2				# add to table addr
    133       1.2   matt 	REG_L	v1, (v1)			# load EIMR bits for this IPL
    134       1.3  cliff 
    135       1.2   matt 	dmtc0	zero, RMIXL_COP_0_EIMR		## disable all interrupts
    136       1.3  cliff 	COP0_SYNC
    137       1.3  cliff 	PTR_L	a3, L_CPU(MIPS_CURLWP)		## get cpu_info
    138       1.2   matt 	INT_S	a0, CPU_INFO_CPL(a3)		## save IPL in cpu_info
    139       1.2   matt 	dmtc0	v1, RMIXL_COP_0_EIMR		## set new EIMR
    140       1.3  cliff 	JR_HB_RA
    141       1.2   matt END(_splsw_splx)
    142       1.2   matt 
    143       1.2   matt STATIC_LEAF(_splsw_spl0)
    144       1.2   matt 	REG_L	v1, _C_LABEL(ipl_eimr_map) + 8*IPL_NONE
    145       1.2   matt 	dmtc0	zero, RMIXL_COP_0_EIMR		## disable all interrupts
    146       1.3  cliff 	COP0_SYNC
    147       1.3  cliff 	PTR_L	a3, L_CPU(MIPS_CURLWP)		## get cpu_info
    148       1.2   matt #if IPL_NONE == 0
    149       1.3  cliff 	INT_S	zero, CPU_INFO_CPL(a3)		## save IPL in cpu_info
    150       1.2   matt #else
    151       1.2   matt #error IPL_NONE != 0
    152       1.2   matt #endif
    153       1.2   matt 	dmtc0	v1, RMIXL_COP_0_EIMR		## set new EIMR
    154       1.3  cliff 	JR_HB_RA
    155       1.2   matt END(_splsw_spl0)
    156       1.2   matt 
    157       1.2   matt STATIC_LEAF(_splsw_setsoftintr)
    158       1.3  cliff 	dsrl	a0, 8				# convert CAUSE bit to EIRR bit
    159       1.3  cliff 	and	a0, RMIXL_SOFT_INT_MASK		# restrict to softint bits
    160       1.2   matt 	dmfc0	v1, RMIXL_COP_0_EIMR		# save EIMR register
    161       1.2   matt 	dmtc0	zero, RMIXL_COP_0_EIMR		## disable all interrupts
    162       1.3  cliff 	COP0_SYNC
    163       1.3  cliff 	dmfc0	v0, RMIXL_COP_0_EIRR		## load EIRR
    164       1.3  cliff 	and	v0, RMIXL_EIRR_PRESERVE		## preserve clock & softints
    165       1.3  cliff 	or	v0, a0				## set new softint bit
    166       1.3  cliff 	dmtc0	v0, RMIXL_COP_0_EIRR		## store EIRR
    167       1.3  cliff 	COP0_SYNC
    168       1.2   matt 	dmtc0	v1, RMIXL_COP_0_EIMR		## restore EIMR
    169       1.3  cliff 	JR_HB_RA
    170       1.2   matt END(_splsw_setsoftintr)
    171       1.2   matt 
    172       1.2   matt STATIC_LEAF(_splsw_clrsoftintr)
    173       1.3  cliff 	dsrl	a0, 8				# convert CAUSE bit to EIRR bit
    174       1.3  cliff 	and	a0, RMIXL_SOFT_INT_MASK		# restrict to softint bits
    175       1.3  cliff 	xor	a0, RMIXL_EIRR_PRESERVE		# clear from preserve mask
    176       1.2   matt 	dmfc0	v1, RMIXL_COP_0_EIMR		# save EIMR register
    177       1.2   matt 	dmtc0	zero, RMIXL_COP_0_EIMR		## disable all interrupts
    178       1.3  cliff 	COP0_SYNC
    179       1.3  cliff 	dmfc0	v0, RMIXL_COP_0_EIRR		## load EIRR
    180       1.3  cliff 	and	v0, a0				## apply preserve mask
    181       1.3  cliff 	dmtc0	v0, RMIXL_COP_0_EIRR		## store EIRR
    182       1.3  cliff 	COP0_SYNC
    183       1.3  cliff 	dmtc0	v1, RMIXL_COP_0_EIMR		## restore EIMR
    184       1.3  cliff 	JR_HB_RA
    185       1.2   matt END(_splsw_clrsoftintr)
    186       1.2   matt 
    187       1.2   matt STATIC_LEAF(_splsw_splraise)
    188       1.2   matt 	move	a1, a0
    189       1.2   matt 	PTR_LA	v1, _C_LABEL(ipl_eimr_map)
    190       1.2   matt 	sll	a2, a0, MAP_SCALESHIFT
    191       1.2   matt 	PTR_ADDU v1, a2
    192       1.2   matt 	REG_L	a0, (v1)
    193       1.2   matt 	b	_splraise
    194       1.2   matt 	 nop
    195       1.2   matt END(_splsw_splraise)
    196       1.2   matt 
    197       1.2   matt STATIC_LEAF(_splsw_splhigh)
    198       1.2   matt STATIC_XLEAF(_splsw_splhigh_noprof)
    199       1.2   matt 	dmtc0	zero, RMIXL_COP_0_EIMR		## disable all interrupts
    200       1.3  cliff 	COP0_SYNC
    201       1.3  cliff 	PTR_L	a3, L_CPU(MIPS_CURLWP)		## get cpu_info from curlwp
    202       1.3  cliff 	li	a1, IPL_HIGH			##
    203       1.3  cliff 	INT_L	v0, CPU_INFO_CPL(a3)		## old IPL for return value
    204       1.2   matt 	INT_S	a1, CPU_INFO_CPL(a3)		## save IPL in cpu_info
    205       1.2   matt 						## interrupts remain disabled!
    206       1.2   matt 	j	ra				# return
    207       1.2   matt 	 nop
    208       1.2   matt END(_splsw_splhigh)
    209       1.2   matt 
    210       1.2   matt STATIC_LEAF(_splsw_splddb)
    211       1.2   matt 	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_DDB
    212       1.2   matt 	li	a1, IPL_DDB
    213       1.2   matt 	b	_splraise
    214       1.2   matt 	 nop
    215       1.2   matt END(_splsw_splddb)
    216       1.2   matt 
    217       1.2   matt STATIC_LEAF(_splsw_splsched)
    218       1.2   matt 	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SCHED
    219       1.2   matt 	li	a1, IPL_SCHED
    220       1.2   matt 	b	_splraise
    221       1.2   matt 	 nop
    222       1.2   matt END(_splsw_splsched)
    223       1.2   matt 
    224       1.2   matt STATIC_LEAF(_splsw_splvm)
    225       1.2   matt 	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_VM
    226       1.2   matt 	li	a1, IPL_VM
    227       1.2   matt 	b	_splraise
    228       1.2   matt 	 nop
    229       1.2   matt END(_splsw_splvm)
    230       1.2   matt 
    231       1.2   matt STATIC_LEAF(_splsw_splsoftserial)
    232       1.2   matt 	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTSERIAL
    233       1.2   matt 	li	a1, IPL_SOFTSERIAL
    234       1.2   matt 	b	_splraise
    235       1.2   matt 	 nop
    236       1.2   matt END(_splsw_splsoftserial)
    237       1.2   matt 
    238       1.2   matt STATIC_LEAF(_splsw_splsoftnet)
    239       1.2   matt 	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTNET
    240       1.2   matt 	li	a1, IPL_SOFTNET
    241       1.2   matt 	b	_splraise
    242       1.2   matt 	 nop
    243       1.2   matt END(_splsw_splsoftnet)
    244       1.2   matt 
    245       1.2   matt STATIC_LEAF(_splsw_splsoftbio)
    246       1.2   matt 	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTBIO
    247       1.2   matt 	li	a1, IPL_SOFTBIO
    248       1.2   matt 	b	_splraise
    249       1.2   matt 	 nop
    250       1.2   matt END(_splsw_splsoftbio)
    251       1.2   matt 
    252       1.2   matt STATIC_LEAF(_splsw_splsoftclock)
    253       1.2   matt 	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTCLOCK
    254       1.2   matt 	li	a1, IPL_SOFTCLOCK
    255       1.2   matt 	b	_splraise
    256       1.2   matt 	 nop
    257       1.2   matt END(_splsw_splsoftclock)
    258       1.2   matt 
    259       1.2   matt STATIC_LEAF(_splsw_splintr)
    260       1.2   matt 	dmfc0	ta1, RMIXL_COP_0_EIRR		# get active interrupts
    261       1.2   matt 						# restrict to hard int bits:
    262       1.2   matt 	and	v1, ta1, RMIXL_SOFT_INT_MASK	#  v1 = ta1 & ~RMIXL_SOFT_INT_MASK
    263       1.2   matt 	xor	v1, ta1				#   "       "
    264       1.2   matt 
    265       1.2   matt 	li	v0, IPL_NONE
    266       1.2   matt 	PTR_LA	ta3, _C_LABEL(ipl_eimr_map) + 8*IPL_VM
    267       1.2   matt 	REG_L	ta2, -8(ta3)			# load 'enabled' bits for IPL_SOFTSERIAL
    268       1.2   matt 	and	v1, ta2				# apply to pending bits
    269       1.2   matt 	beq	v1, zero, 4f			# if nothing pending...
    270       1.2   matt 	 nop					# ... return IPL_NONE
    271       1.2   matt 
    272       1.2   matt 	li	v0, IPL_VM			# ipl=IPL_VM
    273       1.2   matt 1:
    274       1.2   matt 	REG_L	ta2, (ta3)			# load 'enabled' bits for ipl
    275       1.2   matt 	and	ta2, v1				# any match to pending intrs?
    276       1.2   matt 	beq	ta2, zero, 2f			#  no, return ipl
    277       1.2   matt 	 PTR_ADDI ta3, 1 << MAP_SCALESHIFT	#  point to next entry
    278       1.2   matt 	addiu	v0, 1				# ipl++
    279       1.2   matt 	move	v1, ta2				# update highest pending
    280       1.2   matt 	b	1b				# loop
    281       1.2   matt 	 nop
    282       1.2   matt 
    283       1.2   matt 2:
    284       1.2   matt 	/*
    285       1.2   matt 	 * Emulate the CP0_SR 'IM' bits in 'pending'
    286       1.2   matt 	 * - if clock intr is requested, set MIPS_INT_MASK_5
    287       1.2   matt 	 * - if other HW intr is requested, set MIPS_INT_MASK_1 as summary bit
    288       1.2   matt 	 *   the RMI evbmips_iointr function will sort through
    289       1.2   matt 	 *   individial EIRR requests
    290       1.2   matt 	 */
    291       1.2   matt 	li	t2, RMIXL_INT_MASK_5		# load RMIXL_INT_MASK_5
    292       1.2   matt 	and	t1, v1, t2			# save count/compare intr request value
    293       1.2   matt 	nor	t0, zero, t2			# invert the mask
    294       1.2   matt 	and	v1, t0				# v1 &= ~RMIXL_INT_MASK_5
    295       1.2   matt 	beq	v1, zero, 3f			# no non-clock intrs? skip ahead
    296       1.2   matt 	 li	v1, RMIXL_INT_MASK_1		# use INT_MASK_1 as 'summary' bit
    297       1.2   matt 						#  for non-clock hw intrs
    298       1.2   matt 3:
    299       1.2   matt 	or	v1, t1				# combine clock and non-clock-summary
    300       1.2   matt 	sll	v1, MIPS_INT_MASK_SHIFT		# shift to emulate COP0_SR 'IM' bits
    301       1.2   matt 4:
    302       1.2   matt 	INT_S	v1, (a0)			# set a (fake) new pending mask
    303       1.2   matt 	j	ra				# and return highest ipl pending
    304       1.2   matt 	 nop
    305       1.2   matt END(_splsw_splintr)
    306       1.2   matt 
    307       1.2   matt STATIC_LEAF(_splsw_splcheck)
    308       1.2   matt #ifdef PARANOIA
    309       1.2   matt 	PTR_L	t0, L_CPU(MIPS_CURLWP)
    310       1.2   matt 	INT_L	t1, CPU_INFO_CPL(t0)		# get current priority level
    311       1.2   matt 
    312       1.2   matt 	dmfc0	t0, RMIXL_COP_0_EIMR		# get current EIMR
    313       1.2   matt 
    314       1.2   matt 	PTR_LA	t2, _C_LABEL(ipl_eimr_map)
    315       1.2   matt 	sll	t1, MAP_SCALESHIFT		# shift cpl to array index
    316       1.2   matt 	PTR_ADDU t2, t1
    317       1.2   matt 	REG_L	t3, (t2)			# load value
    318       1.2   matt 1:	bne	t0, t3, 1b			# loop forever if not equal
    319       1.2   matt 	 nop
    320       1.2   matt #endif /* PARANOIA */
    321       1.2   matt 	j	ra
    322       1.2   matt 	 nop
    323       1.2   matt END(_splsw_splcheck)
    324       1.2   matt 
    325       1.2   matt 	.rdata
    326       1.2   matt 	.globl _C_LABEL(rmixl_splsw)
    327       1.2   matt _C_LABEL(rmixl_splsw):
    328       1.2   matt         PTR_WORD _C_LABEL(_splsw_splhigh)
    329       1.2   matt         PTR_WORD _C_LABEL(_splsw_splsched)
    330       1.2   matt         PTR_WORD _C_LABEL(_splsw_splvm)
    331       1.2   matt         PTR_WORD _C_LABEL(_splsw_splsoftserial)
    332       1.2   matt         PTR_WORD _C_LABEL(_splsw_splsoftnet)
    333       1.2   matt         PTR_WORD _C_LABEL(_splsw_splsoftbio)
    334       1.2   matt         PTR_WORD _C_LABEL(_splsw_splsoftclock)
    335       1.2   matt         PTR_WORD _C_LABEL(_splsw_splraise)
    336       1.2   matt         PTR_WORD _C_LABEL(_splsw_spl0)
    337       1.2   matt         PTR_WORD _C_LABEL(_splsw_splx)
    338       1.2   matt         PTR_WORD _C_LABEL(_splsw_splhigh_noprof)
    339       1.2   matt         PTR_WORD _C_LABEL(_splsw_splx_noprof)
    340       1.2   matt 	PTR_WORD _C_LABEL(_splsw_setsoftintr)
    341       1.2   matt 	PTR_WORD _C_LABEL(_splsw_clrsoftintr)
    342       1.2   matt 	PTR_WORD _C_LABEL(_splsw_splintr)
    343       1.2   matt 	PTR_WORD _C_LABEL(_splsw_splcheck)
    344