Home | History | Annotate | Line # | Download | only in rmi
rmixl_spl.S revision 1.5.2.1
      1  1.5.2.1  thorpej /*	$NetBSD: rmixl_spl.S,v 1.5.2.1 2020/12/14 14:37:57 thorpej Exp $	*/
      2      1.2     matt 
      3      1.2     matt /*-
      4      1.2     matt  * Copyright (c) 2009, 2010 The NetBSD Foundation, Inc.
      5      1.2     matt  * All rights reserved.
      6      1.2     matt  *
      7      1.2     matt  * This code is derived from software contributed to The NetBSD Foundation
      8      1.2     matt  * by Matt Thomas <matt (at) 3am-software.com>.
      9      1.2     matt  *
     10      1.2     matt  * Redistribution and use in source and binary forms, with or without
     11      1.2     matt  * modification, are permitted provided that the following conditions
     12      1.2     matt  * are met:
     13      1.2     matt  * 1. Redistributions of source code must retain the above copyright
     14      1.2     matt  *    notice, this list of conditions and the following disclaimer.
     15      1.2     matt  * 2. Redistributions in binary form must reproduce the above copyright
     16      1.2     matt  *    notice, this list of conditions and the following disclaimer in the
     17      1.2     matt  *    documentation and/or other materials provided with the distribution.
     18      1.2     matt  *
     19      1.2     matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20      1.2     matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21      1.2     matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22      1.2     matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23      1.2     matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24      1.2     matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25      1.2     matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26      1.2     matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27      1.2     matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28      1.2     matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29      1.2     matt  * POSSIBILITY OF SUCH DAMAGE.
     30      1.2     matt  */
     31      1.2     matt 
     32      1.2     matt #include "opt_cputype.h"	/* which mips CPU levels do we support? */
     33      1.2     matt 
     34      1.2     matt #include <sys/cdefs.h>
     35      1.2     matt 
     36      1.2     matt #include <mips/asm.h>
     37      1.2     matt #include <mips/cpuregs.h>
     38      1.2     matt 
     39  1.5.2.1  thorpej RCSID("$NetBSD: rmixl_spl.S,v 1.5.2.1 2020/12/14 14:37:57 thorpej Exp $");
     40      1.2     matt 
     41      1.2     matt #include "assym.h"
     42      1.2     matt 
     43      1.2     matt #define MAP_SCALESHIFT		3
     44      1.2     matt #define RMIXL_SOFT_INT_MASK_1	(MIPS_SOFT_INT_MASK_1 >> 8)
     45      1.2     matt #define RMIXL_SOFT_INT_MASK	(MIPS_SOFT_INT_MASK >> 8)
     46      1.3    cliff #define RMIXL_INT_MASK_5	(MIPS_INT_MASK_5 >> 8)
     47      1.3    cliff #define RMIXL_EIRR_PRESERVE	(RMIXL_INT_MASK_5 | RMIXL_SOFT_INT_MASK)
     48      1.2     matt #define RMIXL_INT_MASK_1	(MIPS_INT_MASK_1 >> 8)
     49      1.2     matt #define RMIXL_INT_MASK_5	(MIPS_INT_MASK_5 >> 8)
     50      1.2     matt 
     51      1.2     matt 	.set noreorder
     52      1.2     matt 
     53  1.5.2.1  thorpej /*
     54      1.2     matt  * Array of mask of bits to set in the EIMR when we go to a
     55      1.2     matt  * given hardware interrupt priority level.
     56      1.2     matt  * The softint bits in [IPL_NONE] and [IPL_SOFTCLOCK] should stay constant
     57      1.2     matt  * Hard intr bits are managed by rmixl_vec_establish and rmixl_vec_disestablish.
     58      1.2     matt  */
     59      1.2     matt 	.data
     60      1.2     matt 	.globl	_C_LABEL(ipl_eimr_map)
     61      1.2     matt 	.type	_C_LABEL(ipl_eimr_map),@object
     62      1.2     matt 	.p2align MAP_SCALESHIFT
     63      1.2     matt _C_LABEL(ipl_eimr_map):
     64      1.2     matt 	.dword	RMIXL_SOFT_INT_MASK	/* IPL_NONE */
     65      1.2     matt 	.dword	RMIXL_SOFT_INT_MASK_1	/* IPL_SOFT{CLOCK,BIO} */
     66      1.2     matt 	.dword	0			/* IPL_SOFT{NET,SERIAL} */
     67      1.2     matt 	.dword	0			/* IPL_VM */
     68      1.2     matt 	.dword	0			/* IPL_SCHED */
     69      1.2     matt 	.dword	0			/* IPL_DDB */
     70      1.2     matt 	.dword	0			/* IPL_HIGH */
     71      1.2     matt 
     72      1.2     matt 	.text
     73      1.2     matt 
     74      1.2     matt /*
     75      1.2     matt  * initialize cp0 interrupt control for this cpu
     76      1.2     matt  * - set STATUS[IE]
     77      1.2     matt  * - clear EIRR and EIMR
     78      1.2     matt  * on return, all interrupts are disabled by EIMR
     79      1.2     matt  *
     80      1.2     matt  * henceforth STATUS[IE] is expected to remain normally set
     81      1.2     matt  * but may be cleared and restored for temporary interrupt disablement
     82      1.2     matt  *
     83      1.2     matt  * call before the first call to spl0 on this cpu
     84      1.2     matt  */
     85      1.2     matt LEAF_NOPROFILE(rmixl_spl_init_cpu)
     86      1.2     matt 	mfc0	t0, MIPS_COP_0_STATUS		# get STATUS
     87      1.2     matt 	ori	t0, MIPS_SR_INT_IE		# set IE
     88      1.2     matt 	mtc0	zero, MIPS_COP_0_STATUS		## disable all ints in STATUS
     89      1.3    cliff 	COP0_SYNC
     90      1.5   simonb 	dmtc0	zero, MIPS_COP_0_EIMR		##  "       "   "   "  EIMR
     91      1.3    cliff 	COP0_SYNC
     92      1.5   simonb 	dmtc0	zero, MIPS_COP_0_EIRR		## clear EIRR
     93      1.3    cliff 	COP0_SYNC
     94      1.2     matt 	mtc0	t0, MIPS_COP_0_STATUS		## set STATUS | IE
     95      1.3    cliff 	JR_HB_RA
     96      1.2     matt END(rmixl_spl_init_cpu)
     97      1.2     matt 
     98      1.2     matt /*
     99      1.2     matt  * RMIXL processor interrupt control
    100      1.2     matt  *
    101      1.2     matt  * Used as building blocks for spl(9) kernel interface.
    102      1.2     matt  */
    103      1.3    cliff 
    104      1.2     matt _splraise:
    105      1.2     matt 	/*
    106      1.3    cliff 	 * a0 = EIMR bits requested to be set for this IPL
    107      1.2     matt 	 * a1 = this IPL (IPL_*)
    108      1.2     matt 	 * Can only use a0-a3 and v0-v1
    109      1.3    cliff 	 * old IPL is returned in v0
    110      1.2     matt 	 */
    111      1.5   simonb 	dmfc0	a2, MIPS_COP_0_EIMR		# save EIMR
    112      1.5   simonb 	dmtc0	zero, MIPS_COP_0_EIMR		## disable all interrupts
    113      1.3    cliff 	COP0_SYNC
    114      1.3    cliff 	PTR_L	a3, L_CPU(MIPS_CURLWP)		##
    115      1.3    cliff 	INT_L	v0, CPU_INFO_CPL(a3)		## get current IPL from cpu_info
    116      1.3    cliff 	sltu	v1, a1, v0			## newipl < curipl
    117      1.3    cliff 	bnez	v1, 1f				## yes, don't change.
    118      1.2     matt 	 nop
    119      1.2     matt 	INT_S	a1, CPU_INFO_CPL(a3)		## save IPL in cpu_info
    120      1.5   simonb 	dmtc0	a0, MIPS_COP_0_EIMR		## set new EIMR
    121      1.3    cliff 	JR_HB_RA
    122      1.3    cliff 1:
    123      1.5   simonb 	dmtc0	a2, MIPS_COP_0_EIMR		## restore saved EIMR
    124      1.3    cliff 	JR_HB_RA
    125      1.2     matt 
    126      1.2     matt STATIC_LEAF(_splsw_splx)
    127      1.2     matt STATIC_XLEAF(_splsw_splx_noprof)		# does not get mcount hooks
    128      1.2     matt 	PTR_LA	v1, _C_LABEL(ipl_eimr_map)	# get address of table
    129      1.2     matt 	sll	a2, a0, MAP_SCALESHIFT		# convert IPL to array offset
    130      1.2     matt 	PTR_ADDU v1, a2				# add to table addr
    131      1.2     matt 	REG_L	v1, (v1)			# load EIMR bits for this IPL
    132      1.3    cliff 
    133      1.5   simonb 	dmtc0	zero, MIPS_COP_0_EIMR		## disable all interrupts
    134      1.3    cliff 	COP0_SYNC
    135      1.3    cliff 	PTR_L	a3, L_CPU(MIPS_CURLWP)		## get cpu_info
    136      1.2     matt 	INT_S	a0, CPU_INFO_CPL(a3)		## save IPL in cpu_info
    137      1.5   simonb 	dmtc0	v1, MIPS_COP_0_EIMR		## set new EIMR
    138      1.3    cliff 	JR_HB_RA
    139      1.2     matt END(_splsw_splx)
    140      1.2     matt 
    141      1.2     matt STATIC_LEAF(_splsw_spl0)
    142      1.2     matt 	REG_L	v1, _C_LABEL(ipl_eimr_map) + 8*IPL_NONE
    143      1.5   simonb 	dmtc0	zero, MIPS_COP_0_EIMR		## disable all interrupts
    144      1.3    cliff 	COP0_SYNC
    145      1.3    cliff 	PTR_L	a3, L_CPU(MIPS_CURLWP)		## get cpu_info
    146      1.2     matt #if IPL_NONE == 0
    147      1.3    cliff 	INT_S	zero, CPU_INFO_CPL(a3)		## save IPL in cpu_info
    148      1.2     matt #else
    149      1.2     matt #error IPL_NONE != 0
    150      1.2     matt #endif
    151      1.5   simonb 	dmtc0	v1, MIPS_COP_0_EIMR		## set new EIMR
    152      1.3    cliff 	JR_HB_RA
    153      1.2     matt END(_splsw_spl0)
    154      1.2     matt 
    155      1.2     matt STATIC_LEAF(_splsw_setsoftintr)
    156      1.3    cliff 	dsrl	a0, 8				# convert CAUSE bit to EIRR bit
    157      1.3    cliff 	and	a0, RMIXL_SOFT_INT_MASK		# restrict to softint bits
    158      1.5   simonb 	dmfc0	v1, MIPS_COP_0_EIMR		# save EIMR register
    159      1.5   simonb 	dmtc0	zero, MIPS_COP_0_EIMR		## disable all interrupts
    160      1.3    cliff 	COP0_SYNC
    161      1.5   simonb 	dmfc0	v0, MIPS_COP_0_EIRR		## load EIRR
    162      1.3    cliff 	and	v0, RMIXL_EIRR_PRESERVE		## preserve clock & softints
    163      1.3    cliff 	or	v0, a0				## set new softint bit
    164      1.5   simonb 	dmtc0	v0, MIPS_COP_0_EIRR		## store EIRR
    165      1.3    cliff 	COP0_SYNC
    166      1.5   simonb 	dmtc0	v1, MIPS_COP_0_EIMR		## restore EIMR
    167      1.3    cliff 	JR_HB_RA
    168      1.2     matt END(_splsw_setsoftintr)
    169      1.2     matt 
    170      1.2     matt STATIC_LEAF(_splsw_clrsoftintr)
    171      1.3    cliff 	dsrl	a0, 8				# convert CAUSE bit to EIRR bit
    172      1.3    cliff 	and	a0, RMIXL_SOFT_INT_MASK		# restrict to softint bits
    173      1.3    cliff 	xor	a0, RMIXL_EIRR_PRESERVE		# clear from preserve mask
    174      1.5   simonb 	dmfc0	v1, MIPS_COP_0_EIMR		# save EIMR register
    175      1.5   simonb 	dmtc0	zero, MIPS_COP_0_EIMR		## disable all interrupts
    176      1.3    cliff 	COP0_SYNC
    177      1.5   simonb 	dmfc0	v0, MIPS_COP_0_EIRR		## load EIRR
    178      1.3    cliff 	and	v0, a0				## apply preserve mask
    179      1.5   simonb 	dmtc0	v0, MIPS_COP_0_EIRR		## store EIRR
    180      1.3    cliff 	COP0_SYNC
    181      1.5   simonb 	dmtc0	v1, MIPS_COP_0_EIMR		## restore EIMR
    182      1.3    cliff 	JR_HB_RA
    183      1.2     matt END(_splsw_clrsoftintr)
    184      1.2     matt 
    185      1.2     matt STATIC_LEAF(_splsw_splraise)
    186      1.2     matt 	move	a1, a0
    187      1.2     matt 	PTR_LA	v1, _C_LABEL(ipl_eimr_map)
    188      1.2     matt 	sll	a2, a0, MAP_SCALESHIFT
    189      1.2     matt 	PTR_ADDU v1, a2
    190      1.2     matt 	REG_L	a0, (v1)
    191      1.2     matt 	b	_splraise
    192      1.2     matt 	 nop
    193      1.2     matt END(_splsw_splraise)
    194      1.2     matt 
    195      1.2     matt STATIC_LEAF(_splsw_splhigh)
    196      1.2     matt STATIC_XLEAF(_splsw_splhigh_noprof)
    197      1.5   simonb 	dmtc0	zero, MIPS_COP_0_EIMR		## disable all interrupts
    198      1.3    cliff 	COP0_SYNC
    199      1.3    cliff 	PTR_L	a3, L_CPU(MIPS_CURLWP)		## get cpu_info from curlwp
    200      1.3    cliff 	li	a1, IPL_HIGH			##
    201      1.3    cliff 	INT_L	v0, CPU_INFO_CPL(a3)		## old IPL for return value
    202      1.2     matt 	INT_S	a1, CPU_INFO_CPL(a3)		## save IPL in cpu_info
    203      1.2     matt 						## interrupts remain disabled!
    204      1.2     matt 	j	ra				# return
    205      1.2     matt 	 nop
    206      1.2     matt END(_splsw_splhigh)
    207      1.2     matt 
    208      1.2     matt STATIC_LEAF(_splsw_splddb)
    209      1.2     matt 	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_DDB
    210      1.2     matt 	li	a1, IPL_DDB
    211      1.2     matt 	b	_splraise
    212      1.2     matt 	 nop
    213      1.2     matt END(_splsw_splddb)
    214      1.2     matt 
    215      1.2     matt STATIC_LEAF(_splsw_splsched)
    216      1.2     matt 	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SCHED
    217      1.2     matt 	li	a1, IPL_SCHED
    218      1.2     matt 	b	_splraise
    219      1.2     matt 	 nop
    220      1.2     matt END(_splsw_splsched)
    221      1.2     matt 
    222      1.2     matt STATIC_LEAF(_splsw_splvm)
    223      1.2     matt 	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_VM
    224      1.2     matt 	li	a1, IPL_VM
    225      1.2     matt 	b	_splraise
    226      1.2     matt 	 nop
    227      1.2     matt END(_splsw_splvm)
    228      1.2     matt 
    229      1.2     matt STATIC_LEAF(_splsw_splsoftserial)
    230      1.2     matt 	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTSERIAL
    231      1.2     matt 	li	a1, IPL_SOFTSERIAL
    232      1.2     matt 	b	_splraise
    233      1.2     matt 	 nop
    234      1.2     matt END(_splsw_splsoftserial)
    235      1.2     matt 
    236      1.2     matt STATIC_LEAF(_splsw_splsoftnet)
    237      1.2     matt 	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTNET
    238      1.2     matt 	li	a1, IPL_SOFTNET
    239      1.2     matt 	b	_splraise
    240      1.2     matt 	 nop
    241      1.2     matt END(_splsw_splsoftnet)
    242      1.2     matt 
    243      1.2     matt STATIC_LEAF(_splsw_splsoftbio)
    244      1.2     matt 	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTBIO
    245      1.2     matt 	li	a1, IPL_SOFTBIO
    246      1.2     matt 	b	_splraise
    247      1.2     matt 	 nop
    248      1.2     matt END(_splsw_splsoftbio)
    249      1.2     matt 
    250      1.2     matt STATIC_LEAF(_splsw_splsoftclock)
    251      1.2     matt 	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTCLOCK
    252      1.2     matt 	li	a1, IPL_SOFTCLOCK
    253      1.2     matt 	b	_splraise
    254      1.2     matt 	 nop
    255      1.2     matt END(_splsw_splsoftclock)
    256      1.2     matt 
    257      1.2     matt STATIC_LEAF(_splsw_splintr)
    258      1.5   simonb 	dmfc0	ta1, MIPS_COP_0_EIRR		# get active interrupts
    259      1.2     matt 						# restrict to hard int bits:
    260      1.2     matt 	and	v1, ta1, RMIXL_SOFT_INT_MASK	#  v1 = ta1 & ~RMIXL_SOFT_INT_MASK
    261      1.2     matt 	xor	v1, ta1				#   "       "
    262      1.2     matt 
    263      1.2     matt 	li	v0, IPL_NONE
    264      1.2     matt 	PTR_LA	ta3, _C_LABEL(ipl_eimr_map) + 8*IPL_VM
    265      1.2     matt 	REG_L	ta2, -8(ta3)			# load 'enabled' bits for IPL_SOFTSERIAL
    266      1.2     matt 	and	v1, ta2				# apply to pending bits
    267      1.2     matt 	beq	v1, zero, 4f			# if nothing pending...
    268      1.2     matt 	 nop					# ... return IPL_NONE
    269      1.2     matt 
    270      1.2     matt 	li	v0, IPL_VM			# ipl=IPL_VM
    271      1.2     matt 1:
    272      1.2     matt 	REG_L	ta2, (ta3)			# load 'enabled' bits for ipl
    273      1.2     matt 	and	ta2, v1				# any match to pending intrs?
    274      1.2     matt 	beq	ta2, zero, 2f			#  no, return ipl
    275      1.2     matt 	 PTR_ADDI ta3, 1 << MAP_SCALESHIFT	#  point to next entry
    276      1.2     matt 	addiu	v0, 1				# ipl++
    277      1.2     matt 	move	v1, ta2				# update highest pending
    278      1.2     matt 	b	1b				# loop
    279      1.2     matt 	 nop
    280      1.2     matt 
    281      1.2     matt 2:
    282      1.2     matt 	/*
    283      1.2     matt 	 * Emulate the CP0_SR 'IM' bits in 'pending'
    284      1.2     matt 	 * - if clock intr is requested, set MIPS_INT_MASK_5
    285      1.2     matt 	 * - if other HW intr is requested, set MIPS_INT_MASK_1 as summary bit
    286      1.2     matt 	 *   the RMI evbmips_iointr function will sort through
    287      1.2     matt 	 *   individial EIRR requests
    288      1.2     matt 	 */
    289      1.2     matt 	li	t2, RMIXL_INT_MASK_5		# load RMIXL_INT_MASK_5
    290      1.2     matt 	and	t1, v1, t2			# save count/compare intr request value
    291      1.2     matt 	nor	t0, zero, t2			# invert the mask
    292      1.2     matt 	and	v1, t0				# v1 &= ~RMIXL_INT_MASK_5
    293      1.2     matt 	beq	v1, zero, 3f			# no non-clock intrs? skip ahead
    294      1.2     matt 	 li	v1, RMIXL_INT_MASK_1		# use INT_MASK_1 as 'summary' bit
    295      1.2     matt 						#  for non-clock hw intrs
    296      1.2     matt 3:
    297      1.2     matt 	or	v1, t1				# combine clock and non-clock-summary
    298      1.2     matt 	sll	v1, MIPS_INT_MASK_SHIFT		# shift to emulate COP0_SR 'IM' bits
    299      1.2     matt 4:
    300      1.2     matt 	INT_S	v1, (a0)			# set a (fake) new pending mask
    301      1.2     matt 	j	ra				# and return highest ipl pending
    302      1.2     matt 	 nop
    303      1.2     matt END(_splsw_splintr)
    304      1.2     matt 
    305      1.2     matt STATIC_LEAF(_splsw_splcheck)
    306      1.2     matt #ifdef PARANOIA
    307      1.2     matt 	PTR_L	t0, L_CPU(MIPS_CURLWP)
    308      1.2     matt 	INT_L	t1, CPU_INFO_CPL(t0)		# get current priority level
    309      1.2     matt 
    310      1.5   simonb 	dmfc0	t0, MIPS_COP_0_EIMR		# get current EIMR
    311      1.2     matt 
    312      1.2     matt 	PTR_LA	t2, _C_LABEL(ipl_eimr_map)
    313      1.2     matt 	sll	t1, MAP_SCALESHIFT		# shift cpl to array index
    314      1.2     matt 	PTR_ADDU t2, t1
    315      1.2     matt 	REG_L	t3, (t2)			# load value
    316      1.2     matt 1:	bne	t0, t3, 1b			# loop forever if not equal
    317      1.2     matt 	 nop
    318      1.2     matt #endif /* PARANOIA */
    319      1.2     matt 	j	ra
    320      1.2     matt 	 nop
    321      1.2     matt END(_splsw_splcheck)
    322      1.2     matt 
    323      1.2     matt 	.rdata
    324      1.2     matt 	.globl _C_LABEL(rmixl_splsw)
    325      1.2     matt _C_LABEL(rmixl_splsw):
    326      1.2     matt         PTR_WORD _C_LABEL(_splsw_splhigh)
    327      1.2     matt         PTR_WORD _C_LABEL(_splsw_splsched)
    328      1.2     matt         PTR_WORD _C_LABEL(_splsw_splvm)
    329      1.2     matt         PTR_WORD _C_LABEL(_splsw_splsoftserial)
    330      1.2     matt         PTR_WORD _C_LABEL(_splsw_splsoftnet)
    331      1.2     matt         PTR_WORD _C_LABEL(_splsw_splsoftbio)
    332      1.2     matt         PTR_WORD _C_LABEL(_splsw_splsoftclock)
    333      1.2     matt         PTR_WORD _C_LABEL(_splsw_splraise)
    334      1.2     matt         PTR_WORD _C_LABEL(_splsw_spl0)
    335      1.2     matt         PTR_WORD _C_LABEL(_splsw_splx)
    336      1.2     matt         PTR_WORD _C_LABEL(_splsw_splhigh_noprof)
    337      1.2     matt         PTR_WORD _C_LABEL(_splsw_splx_noprof)
    338      1.2     matt 	PTR_WORD _C_LABEL(_splsw_setsoftintr)
    339      1.2     matt 	PTR_WORD _C_LABEL(_splsw_clrsoftintr)
    340      1.2     matt 	PTR_WORD _C_LABEL(_splsw_splintr)
    341      1.2     matt 	PTR_WORD _C_LABEL(_splsw_splcheck)
    342