Home | History | Annotate | Line # | Download | only in rmi
rmixl_spl.S revision 1.1.2.1
      1 /*	$NetBSD: rmixl_spl.S,v 1.1.2.1 2010/03/21 21:17:01 cliff Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2009, 2010 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Matt Thomas <matt (at) 3am-software.com>.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include "opt_cputype.h"	/* which mips CPU levels do we support? */
     33 
     34 #include <sys/cdefs.h>
     35 
     36 #include <machine/param.h>
     37 #include <mips/asm.h>
     38 #include <mips/cpuregs.h>
     39 
     40 RCSID("$NetBSD: rmixl_spl.S,v 1.1.2.1 2010/03/21 21:17:01 cliff Exp $");
     41 
     42 #include "assym.h"
     43 
     44 #define PARANOIA	/* XXX TMP FIXME */
     45 
     46 #define MAP_SCALESHIFT		3
     47 #define RMIXL_SOFT_INT_MASK_0	(MIPS_SOFT_INT_MASK_1 >> 8)
     48 #define RMIXL_SOFT_INT_MASK	(MIPS_SOFT_INT_MASK >> 8)
     49 #define RMIXL_INT_MASK_1	(MIPS_INT_MASK_1 >> 8)
     50 #define RMIXL_INT_MASK_5	(MIPS_INT_MASK_5 >> 8)
     51 #define RMIXL_COP_0_EIRR	_(9), 6
     52 #define RMIXL_COP_0_EIMR	_(9), 7
     53 
     54 
     55 /*
     56  * Array of mask of bits to set in the EIMR when we go to a
     57  * given hardware interrupt priority level.
     58  */
     59 	.rdata
     60 	.globl	_C_LABEL(ipl_eimr_map)
     61 	.type	_C_LABEL(ipl_eimr_map),@object
     62 	.p2align MAP_SCALESHIFT
     63 _C_LABEL(ipl_eimr_map):
     64 	.dword	~0			/* IPL_NONE */
     65 	.dword	~RMIXL_SOFT_INT_MASK_0	/* IPL_SOFT{CLOCK,BIO} */
     66 	.dword	~RMIXL_SOFT_INT_MASK	/* IPL_SOFT{NET,SERIAL} */
     67 	.dword	RMIXL_INT_MASK_5	/* IPL_VM */
     68 	.dword	0			/* IPL_SCHED */
     69 #if IPL_SCHED != IPL_HIGH
     70 	.dword	0			/* IPL_HIGH */
     71 #endif
     72 
     73 	.text
     74 /*
     75  * RMIXL processor interrupt control
     76  *
     77  * Used as building blocks for spl(9) kernel interface.
     78  */
     79 _splraise:
     80 	/*
     81 	 * a0 = EIMR bits to be set for this IPL
     82 	 * a1 = this IPL (IPL_*)
     83 	 * Can only use a0-a3 and v0-v1
     84 	 */
     85 	PTR_L	a3, L_CPU(MIPS_CURLWP)
     86 	INT_L	v0, CPU_INFO_CPL(a3)		# get current IPL from cpu_info
     87 	sltu	v1, a1, v0			# newipl < curipl
     88 	bnez	v1, 2f				# yes, don't change.
     89 	 mfc0	a2, MIPS_COP_0_STATUS		# load STATUS
     90 	and	a2, ~MIPS_INT_MASK		# clear STATUS[IM]
     91 	sll	v1, a0, 8			# EIMR[7:0] to STATUS[15:8]
     92 	and	v1, MIPS_INT_MASK		#  "        "   "
     93 	or	v1, a2				# new STATUS value
     94 	mtc0	zero, MIPS_COP_0_STATUS		## disable all ints in STATUS
     95 	INT_S	a1, CPU_INFO_CPL(a3)		## save IPL in cpu_info
     96 	dmtc0	a0, RMIXL_COP_0_EIMR		## set new EIMR
     97 	mtc0	v1, MIPS_COP_0_STATUS		## set new STATUS
     98 #ifdef PARANOIA
     99 	j	ra
    100 	 nop
    101 #endif /* PARANOIA */
    102 #ifdef PARANOIA
    103 	dmfc0	v0, RMIXL_COP_0_EIMR		# get EIMR
    104 1:	bne	a0, v0, 1b			# loop forever if not equal
    105 	 nop
    106 #endif /* PARANOIA */
    107 2:	j	ra
    108 	 nop
    109 
    110 STATIC_LEAF(_splsw_splx)
    111 STATIC_XLEAF(_splsw_splx_noprof)		# does not get mcount hooks
    112 	PTR_L	a3, L_CPU(MIPS_CURLWP)		# get cpu_info
    113 	INT_L	a2, CPU_INFO_CPL(a3)		# get IPL from cpu_info
    114 	beq	a0, a2, 2f			# if same, nothing to do
    115 	 nop
    116 #ifdef PARANOIA
    117 	sltu	v0, a0, a2			# v0 = a0 < a2
    118 99:	beqz	v0, 99b				# loop forever if false
    119 	 nop
    120 #endif /* PARANOIA */
    121 	#move	a1, zero			# avoid lookup on splx(IPL_NONE)
    122 	#beq	a0, zero, 1f			# skip load
    123 	PTR_LA	v1, _C_LABEL(ipl_eimr_map)	# get address of table
    124 	sll	a2, a0, MAP_SCALESHIFT		# convert IPL to array offset
    125 	PTR_ADDU v1, a2				# add to table addr
    126 	REG_L	v1, (v1)			# load EIMR bits for this IPL
    127 1:
    128 	dmfc0	a2, MIPS_COP_0_STATUS		# load STATUS
    129 	and	a2, ~MIPS_INT_MASK		# clear STATUS[IM]
    130 	sll	v0, v1, 8			# EIMR[7:0] to STATUS[15:8]
    131 	and	v0, MIPS_INT_MASK		#  "        "   "
    132 	or	v0, a2				# new STATUS value
    133 	dmtc0	zero, RMIXL_COP_0_EIMR		## disable all interrupts
    134 	INT_S	a0, CPU_INFO_CPL(a3)		## save IPL in cpu_info (KSEG0)
    135 	dmtc0	v1, RMIXL_COP_0_EIMR		## set new EIMR
    136 	mtc0	v0, MIPS_COP_0_STATUS		## set new STATUS
    137 #ifdef PARANOIA
    138 	j	ra
    139 	 nop
    140 #endif /* PARANOIA */
    141 2:
    142 #ifdef PARANOIA
    143 	PTR_LA	v1, _C_LABEL(ipl_eimr_map)	# get address of table
    144 	sll	a2, a0, MAP_SCALESHIFT		# convert IPL to array offset
    145 	PTR_ADDU v1, a2				# add to table addr
    146 	REG_L	a1, (v1)			# load EIMR bits for this IPL
    147 	dmfc0	v1, RMIXL_COP_0_EIMR		# get EIMR
    148 3:	bne	a1, v1, 3b			# loop forever if not equal
    149 	 nop
    150 #endif /* PARANOIA */
    151 	j	ra
    152 	 nop
    153 END(_splsw_splx)
    154 
    155 STATIC_LEAF(_splsw_spl0)
    156 	REG_L	v1, _C_LABEL(ipl_eimr_map) + 8*IPL_NONE
    157 	PTR_L	a3, L_CPU(MIPS_CURLWP)
    158 	mtc0	zero, MIPS_COP_0_CAUSE		# clear SOFT_INT bits
    159 	dmfc0	a2, MIPS_COP_0_STATUS		# load STATUS
    160 	and	a2, ~MIPS_INT_MASK		# clear STATUS[IM]
    161 	sll	v0, v1, 8			# EIMR[7:0] to STATUS[15:8]
    162 	and	v0, MIPS_INT_MASK		#  "        "   "
    163 	or	v0, MIPS_SR_INT_IE		# set STATUS[IE]
    164 	or	v0, a2				# new STATUS value
    165 	mtc0	zero, MIPS_COP_0_STATUS		## disable all interrupts
    166 #if IPL_NONE == 0
    167 	INT_S	zero, CPU_INFO_CPL(a3)		## set ipl to 0
    168 #else
    169 #error IPL_NONE != 0
    170 #endif
    171 	dmtc0	v1, RMIXL_COP_0_EIMR		## set new EIMR
    172 	mtc0	v0, MIPS_COP_0_STATUS		## set new STATUS
    173 	nop
    174 	nop
    175 	j	ra
    176 	 nop
    177 END(_splsw_spl0)
    178 
    179 LEAF_NOPROFILE(rmixl_spln)
    180 	PTR_LA	v1, _C_LABEL(ipl_eimr_map)	# get address of table
    181 	sll	a2, a0, MAP_SCALESHIFT		# convert IPL to array offset
    182 	PTR_ADDU v1, a2				# add to table addr
    183 	REG_L	v0, (v1)			# load EIMR bits for this IPL
    184 	j	ra
    185 	 nop
    186 END(rmixl_spln)
    187 
    188 STATIC_LEAF(_splsw_setsoftintr)
    189 	mfc0	v1, MIPS_COP_0_STATUS		# save status register
    190 	mtc0	zero, MIPS_COP_0_STATUS		## disable interrupts (2 cycles)
    191 	nop
    192 	nop
    193 	mfc0	v0, MIPS_COP_0_CAUSE		## load cause register
    194 	nop
    195 	or	v0, v0, a0			## set soft intr. bits
    196 	mtc0	v0, MIPS_COP_0_CAUSE		## store back
    197 	mtc0	v1, MIPS_COP_0_STATUS		## enable interrupts
    198 	j	ra
    199 	nop
    200 END(_splsw_setsoftintr)
    201 
    202 STATIC_LEAF(_splsw_clrsoftintr)
    203 	mfc0	v1, MIPS_COP_0_STATUS		# save status register
    204 	mtc0	zero, MIPS_COP_0_STATUS		## disable interrupts (2 cycles)
    205 	nop
    206 	nop
    207 	mfc0	v0, MIPS_COP_0_CAUSE		## load cause register
    208 	nor	a0, zero, a0			## bitwise inverse of A0
    209 	and	v0, v0, a0			## clear soft intr. bits
    210 	mtc0	v0, MIPS_COP_0_CAUSE		## store back
    211 	mtc0	v1, MIPS_COP_0_STATUS		## enable interrupts
    212 	j	ra
    213 	nop
    214 END(_splsw_clrsoftintr)
    215 
    216 STATIC_LEAF(_splsw_splraise)
    217 	move	a1, a0
    218 	PTR_LA	v1, _C_LABEL(ipl_eimr_map)
    219 	sll	a2, a0, MAP_SCALESHIFT
    220 	PTR_ADDU v1, a2
    221 	REG_L	a0, (v1)
    222 	b	_splraise
    223 	 nop
    224 END(_splsw_splraise)
    225 
    226 STATIC_LEAF(_splsw_splhigh)
    227 STATIC_XLEAF(_splsw_splhigh_noprof)
    228 	PTR_L	a3, L_CPU(MIPS_CURLWP)
    229 	INT_L	v0, CPU_INFO_CPL(a3)		# get current IPL from cpu_info
    230 	li	a1, IPL_HIGH			#
    231 	beq	v0, a1, 1f			# don't do anything if IPL_HIGH
    232 	mfc0	v1, MIPS_COP_0_STATUS		# load STATUS
    233 	move	a2, zero			# clear for EIMR
    234 	and	a0, v1, MIPS_INT_MASK		# select all interrupts
    235 	xor	a0, v1				# clear STATUS[IM]
    236 	mtc0	zero, MIPS_COP_0_STATUS		## disable all interrupts
    237 	INT_S	a1, CPU_INFO_CPL(a3)		## save IPL in cpu_info
    238 	dmtc0	a2, RMIXL_COP_0_EIMR		## set new EIMR
    239 	mtc0	a0, MIPS_COP_0_STATUS		## set new STATUS
    240 	 nop					# XXXXX
    241 #ifdef PARANOIA
    242 	j	ra				# return
    243 	 nop
    244 #endif /* PARANOIA */
    245 1:
    246 #ifdef PARANOIA
    247 	dmfc0	v1, RMIXL_COP_0_EIMR		# load EIMR
    248 2:	bnez	v1, 2b				# loop forever if not 0.
    249 	 nop
    250 #endif /* PARANOIA */
    251 	j	ra				## return
    252 	 nop
    253 END(_splsw_splhigh)
    254 
    255 STATIC_LEAF(_splsw_splsched)
    256 	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SCHED
    257 	li	a1, IPL_SCHED
    258 	b	_splraise
    259 	 nop
    260 END(_splsw_splsched)
    261 
    262 STATIC_LEAF(_splsw_splvm)
    263 	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_VM
    264 	li	a1, IPL_VM
    265 	b	_splraise
    266 END(_splsw_splvm)
    267 
    268 STATIC_LEAF(_splsw_splsoftserial)
    269 	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTSERIAL
    270 	li	a1, IPL_SOFTSERIAL
    271 	b	_splraise
    272 	 nop
    273 END(_splsw_splsoftserial)
    274 
    275 STATIC_LEAF(_splsw_splsoftnet)
    276 	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTNET
    277 	li	a1, IPL_SOFTNET
    278 	b	_splraise
    279 	 nop
    280 END(_splsw_splsoftnet)
    281 
    282 STATIC_LEAF(_splsw_splsoftbio)
    283 	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTBIO
    284 	li	a1, IPL_SOFTBIO
    285 	b	_splraise
    286 	 nop
    287 END(_splsw_splsoftbio)
    288 
    289 STATIC_LEAF(_splsw_splsoftclock)
    290 	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTCLOCK
    291 	li	a1, IPL_SOFTCLOCK
    292 	b	_splraise
    293 	 nop
    294 END(_splsw_splsoftclock)
    295 
    296 STATIC_LEAF(_splsw_splintr)
    297 	dmfc0	ta1, RMIXL_COP_0_EIRR		# get active interrupts
    298 						# restrict to hard int bits:
    299 	and	v1, ta1, RMIXL_SOFT_INT_MASK	#  v1 &= ~RMIXL_SOFT_INT_MASK
    300 	xor	v1, ta1				#   "      "
    301 
    302 	li	v0, IPL_NONE			# return IPL_NONE ...
    303 	beq	v1, zero, 2f			# ... if nothing pending
    304 	 nop
    305 
    306 	li	v0, IPL_VM			# start at IPL_VM
    307 	PTR_LA	ta3, _C_LABEL(ipl_eimr_map) + 8*IPL_VM
    308 	REG_L	ta2, -8(ta3)			# load 'enabled' bits for IPL_SOFTSERIAL
    309 						#  ta2 has 'enabled' ints
    310 	and	v1, ta2				# apply to pending bits
    311 
    312 1:
    313 	REG_L	ta2, (ta3)			# load 'enabled' bits for ipl in v0
    314 	and	ta2, v1				# any match to pending intrs?
    315 	beq	ta2, zero, 2f			#  no, return ipl
    316 
    317 	PTR_ADDU ta3, 1 << MAP_SCALESHIFT	# point to next entry
    318 	addiu	v0, 1				#  increase ipl by 1
    319 	move	v1, ta2				# reduce down pending intrs
    320 	b	1b				# and check them
    321 
    322 2:
    323 	/*
    324 	 * Emulate the CP0_SR 'IM' bits in 'pending'
    325 	 * - if clock intr is requested, set MIPS_INT_MASK_5
    326 	 * - if other HW intr is requested, set MIPS_INT_MASK_1 as summary bit
    327 	 *   the RMI evbmips_iointr function will sort through
    328 	 *   individial EIRR requests
    329 	 */
    330 	beq	v1, zero, 4f			# skip ahead if nothing pending
    331 	 li	t2, RMIXL_INT_MASK_5		# load RMIXL_INT_MASK_5
    332 	and	t1, v1, t2			# save count/compare intr request value
    333 	nor	t0, zero, t2			# invert the mask
    334 	and	v1, t0				# v1 &= ~RMIXL_INT_MASK_5
    335 	beq	v1, zero, 3f			# no non-clock intrs? skip ahead
    336 	 li	v1, RMIXL_INT_MASK_1		# use INT_MASK_1 as 'summary' bit
    337 						#  for non-clock hw intrs
    338 3:
    339 	or	v1, t1				# combine clock and non-clock-summary
    340 	sll	v1, MIPS_INT_MASK_SHIFT		# shift to emulate COP0_SR 'IM' bits
    341 4:
    342 	INT_S	v1, (a0)			# set a (fake) new pending mask
    343 	j	ra				# and return highest ipl pending
    344 	 nop
    345 END(_splsw_splintr)
    346 
    347 STATIC_LEAF(_splsw_splcheck)
    348 #ifdef PARANOIA
    349 	PTR_L	t0, L_CPU(MIPS_CURLWP)
    350 	INT_L	t1, CPU_INFO_CPL(t0)		# get current priority level
    351 
    352 	dmfc0	t0, RMIXL_COP_0_EIMR		# get current EIMR
    353 
    354 	PTR_LA	t2, _C_LABEL(ipl_eimr_map)
    355 	sll	t1, MAP_SCALESHIFT		# shift cpl to array index
    356 	PTR_ADDU t2, t1
    357 	REG_L	t3, (t2)			# load value
    358 1:	bne	t0, t3, 1b			# loop forever if not equal
    359 	 nop
    360 #endif /* PARANOIA */
    361 	j	ra
    362 	 nop
    363 END(_splsw_splcheck)
    364 
    365 	.rdata
    366 	.globl _C_LABEL(rmixl_splsw)
    367 _C_LABEL(rmixl_splsw):
    368         PTR_WORD _C_LABEL(_splsw_splhigh)
    369         PTR_WORD _C_LABEL(_splsw_splsched)
    370         PTR_WORD _C_LABEL(_splsw_splvm)
    371         PTR_WORD _C_LABEL(_splsw_splsoftserial)
    372         PTR_WORD _C_LABEL(_splsw_splsoftnet)
    373         PTR_WORD _C_LABEL(_splsw_splsoftbio)
    374         PTR_WORD _C_LABEL(_splsw_splsoftclock)
    375         PTR_WORD _C_LABEL(_splsw_splraise)
    376         PTR_WORD _C_LABEL(_splsw_spl0)
    377         PTR_WORD _C_LABEL(_splsw_splx)
    378         PTR_WORD _C_LABEL(_splsw_splhigh_noprof)
    379         PTR_WORD _C_LABEL(_splsw_splx_noprof)
    380 	PTR_WORD _C_LABEL(_splsw_setsoftintr)
    381 	PTR_WORD _C_LABEL(_splsw_clrsoftintr)
    382 	PTR_WORD _C_LABEL(_splsw_splintr)
    383 	PTR_WORD _C_LABEL(_splsw_splcheck)
    384