rmixl_spl.S revision 1.1.2.3 1 1.1.2.3 cliff /* $NetBSD: rmixl_spl.S,v 1.1.2.3 2010/05/21 23:35:21 cliff Exp $ */
2 1.1.2.1 cliff
3 1.1.2.1 cliff /*-
4 1.1.2.1 cliff * Copyright (c) 2009, 2010 The NetBSD Foundation, Inc.
5 1.1.2.1 cliff * All rights reserved.
6 1.1.2.1 cliff *
7 1.1.2.1 cliff * This code is derived from software contributed to The NetBSD Foundation
8 1.1.2.1 cliff * by Matt Thomas <matt (at) 3am-software.com>.
9 1.1.2.1 cliff *
10 1.1.2.1 cliff * Redistribution and use in source and binary forms, with or without
11 1.1.2.1 cliff * modification, are permitted provided that the following conditions
12 1.1.2.1 cliff * are met:
13 1.1.2.1 cliff * 1. Redistributions of source code must retain the above copyright
14 1.1.2.1 cliff * notice, this list of conditions and the following disclaimer.
15 1.1.2.1 cliff * 2. Redistributions in binary form must reproduce the above copyright
16 1.1.2.1 cliff * notice, this list of conditions and the following disclaimer in the
17 1.1.2.1 cliff * documentation and/or other materials provided with the distribution.
18 1.1.2.1 cliff *
19 1.1.2.1 cliff * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1.2.1 cliff * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1.2.1 cliff * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1.2.1 cliff * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1.2.1 cliff * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1.2.1 cliff * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1.2.1 cliff * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1.2.1 cliff * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1.2.1 cliff * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1.2.1 cliff * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1.2.1 cliff * POSSIBILITY OF SUCH DAMAGE.
30 1.1.2.1 cliff */
31 1.1.2.1 cliff
32 1.1.2.1 cliff #include "opt_cputype.h" /* which mips CPU levels do we support? */
33 1.1.2.1 cliff
34 1.1.2.1 cliff #include <sys/cdefs.h>
35 1.1.2.1 cliff
36 1.1.2.1 cliff #include <machine/param.h>
37 1.1.2.1 cliff #include <mips/asm.h>
38 1.1.2.1 cliff #include <mips/cpuregs.h>
39 1.1.2.1 cliff
40 1.1.2.3 cliff RCSID("$NetBSD: rmixl_spl.S,v 1.1.2.3 2010/05/21 23:35:21 cliff Exp $");
41 1.1.2.1 cliff
42 1.1.2.1 cliff #include "assym.h"
43 1.1.2.1 cliff
44 1.1.2.1 cliff
45 1.1.2.1 cliff #define MAP_SCALESHIFT 3
46 1.1.2.3 cliff #define RMIXL_SOFT_INT_MASK_1 (MIPS_SOFT_INT_MASK_1 >> 8)
47 1.1.2.1 cliff #define RMIXL_SOFT_INT_MASK (MIPS_SOFT_INT_MASK >> 8)
48 1.1.2.1 cliff #define RMIXL_INT_MASK_1 (MIPS_INT_MASK_1 >> 8)
49 1.1.2.1 cliff #define RMIXL_INT_MASK_5 (MIPS_INT_MASK_5 >> 8)
50 1.1.2.1 cliff #define RMIXL_COP_0_EIRR _(9), 6
51 1.1.2.1 cliff #define RMIXL_COP_0_EIMR _(9), 7
52 1.1.2.1 cliff
53 1.1.2.1 cliff
54 1.1.2.1 cliff /*
55 1.1.2.1 cliff * Array of mask of bits to set in the EIMR when we go to a
56 1.1.2.1 cliff * given hardware interrupt priority level.
57 1.1.2.3 cliff * The softint bits in [IPL_NONE] and [IPL_SOFTCLOCK] should stay constant
58 1.1.2.3 cliff * Hard intr bits are managed by rmixl_vec_establish and rmixl_vec_disestablish.
59 1.1.2.1 cliff */
60 1.1.2.3 cliff .data
61 1.1.2.1 cliff .globl _C_LABEL(ipl_eimr_map)
62 1.1.2.1 cliff .type _C_LABEL(ipl_eimr_map),@object
63 1.1.2.1 cliff .p2align MAP_SCALESHIFT
64 1.1.2.1 cliff _C_LABEL(ipl_eimr_map):
65 1.1.2.3 cliff .dword RMIXL_SOFT_INT_MASK /* IPL_NONE */
66 1.1.2.3 cliff .dword RMIXL_SOFT_INT_MASK_1 /* IPL_SOFT{CLOCK,BIO} */
67 1.1.2.3 cliff .dword 0 /* IPL_SOFT{NET,SERIAL} */
68 1.1.2.3 cliff .dword 0 /* IPL_VM */
69 1.1.2.1 cliff .dword 0 /* IPL_SCHED */
70 1.1.2.3 cliff .dword 0 /* IPL_DDB */
71 1.1.2.1 cliff .dword 0 /* IPL_HIGH */
72 1.1.2.1 cliff
73 1.1.2.1 cliff .text
74 1.1.2.1 cliff /*
75 1.1.2.1 cliff * RMIXL processor interrupt control
76 1.1.2.1 cliff *
77 1.1.2.1 cliff * Used as building blocks for spl(9) kernel interface.
78 1.1.2.1 cliff */
79 1.1.2.1 cliff _splraise:
80 1.1.2.1 cliff /*
81 1.1.2.1 cliff * a0 = EIMR bits to be set for this IPL
82 1.1.2.1 cliff * a1 = this IPL (IPL_*)
83 1.1.2.1 cliff * Can only use a0-a3 and v0-v1
84 1.1.2.1 cliff */
85 1.1.2.1 cliff PTR_L a3, L_CPU(MIPS_CURLWP)
86 1.1.2.1 cliff INT_L v0, CPU_INFO_CPL(a3) # get current IPL from cpu_info
87 1.1.2.1 cliff sltu v1, a1, v0 # newipl < curipl
88 1.1.2.1 cliff bnez v1, 2f # yes, don't change.
89 1.1.2.1 cliff mfc0 a2, MIPS_COP_0_STATUS # load STATUS
90 1.1.2.1 cliff and a2, ~MIPS_INT_MASK # clear STATUS[IM]
91 1.1.2.1 cliff sll v1, a0, 8 # EIMR[7:0] to STATUS[15:8]
92 1.1.2.1 cliff and v1, MIPS_INT_MASK # " " "
93 1.1.2.1 cliff or v1, a2 # new STATUS value
94 1.1.2.1 cliff mtc0 zero, MIPS_COP_0_STATUS ## disable all ints in STATUS
95 1.1.2.1 cliff INT_S a1, CPU_INFO_CPL(a3) ## save IPL in cpu_info
96 1.1.2.1 cliff dmtc0 a0, RMIXL_COP_0_EIMR ## set new EIMR
97 1.1.2.1 cliff mtc0 v1, MIPS_COP_0_STATUS ## set new STATUS
98 1.1.2.1 cliff #ifdef PARANOIA
99 1.1.2.1 cliff j ra
100 1.1.2.1 cliff nop
101 1.1.2.1 cliff #endif /* PARANOIA */
102 1.1.2.1 cliff #ifdef PARANOIA
103 1.1.2.1 cliff dmfc0 v0, RMIXL_COP_0_EIMR # get EIMR
104 1.1.2.1 cliff 1: bne a0, v0, 1b # loop forever if not equal
105 1.1.2.1 cliff nop
106 1.1.2.1 cliff #endif /* PARANOIA */
107 1.1.2.1 cliff 2: j ra
108 1.1.2.1 cliff nop
109 1.1.2.1 cliff
110 1.1.2.1 cliff STATIC_LEAF(_splsw_splx)
111 1.1.2.1 cliff STATIC_XLEAF(_splsw_splx_noprof) # does not get mcount hooks
112 1.1.2.1 cliff PTR_L a3, L_CPU(MIPS_CURLWP) # get cpu_info
113 1.1.2.1 cliff INT_L a2, CPU_INFO_CPL(a3) # get IPL from cpu_info
114 1.1.2.1 cliff beq a0, a2, 2f # if same, nothing to do
115 1.1.2.1 cliff nop
116 1.1.2.1 cliff #ifdef PARANOIA
117 1.1.2.1 cliff sltu v0, a0, a2 # v0 = a0 < a2
118 1.1.2.1 cliff 99: beqz v0, 99b # loop forever if false
119 1.1.2.1 cliff nop
120 1.1.2.1 cliff #endif /* PARANOIA */
121 1.1.2.1 cliff #move a1, zero # avoid lookup on splx(IPL_NONE)
122 1.1.2.1 cliff #beq a0, zero, 1f # skip load
123 1.1.2.1 cliff PTR_LA v1, _C_LABEL(ipl_eimr_map) # get address of table
124 1.1.2.1 cliff sll a2, a0, MAP_SCALESHIFT # convert IPL to array offset
125 1.1.2.1 cliff PTR_ADDU v1, a2 # add to table addr
126 1.1.2.1 cliff REG_L v1, (v1) # load EIMR bits for this IPL
127 1.1.2.1 cliff 1:
128 1.1.2.1 cliff dmfc0 a2, MIPS_COP_0_STATUS # load STATUS
129 1.1.2.1 cliff and a2, ~MIPS_INT_MASK # clear STATUS[IM]
130 1.1.2.1 cliff sll v0, v1, 8 # EIMR[7:0] to STATUS[15:8]
131 1.1.2.1 cliff and v0, MIPS_INT_MASK # " " "
132 1.1.2.1 cliff or v0, a2 # new STATUS value
133 1.1.2.1 cliff dmtc0 zero, RMIXL_COP_0_EIMR ## disable all interrupts
134 1.1.2.1 cliff INT_S a0, CPU_INFO_CPL(a3) ## save IPL in cpu_info (KSEG0)
135 1.1.2.1 cliff dmtc0 v1, RMIXL_COP_0_EIMR ## set new EIMR
136 1.1.2.1 cliff mtc0 v0, MIPS_COP_0_STATUS ## set new STATUS
137 1.1.2.1 cliff #ifdef PARANOIA
138 1.1.2.1 cliff j ra
139 1.1.2.1 cliff nop
140 1.1.2.1 cliff #endif /* PARANOIA */
141 1.1.2.1 cliff 2:
142 1.1.2.1 cliff #ifdef PARANOIA
143 1.1.2.1 cliff PTR_LA v1, _C_LABEL(ipl_eimr_map) # get address of table
144 1.1.2.1 cliff sll a2, a0, MAP_SCALESHIFT # convert IPL to array offset
145 1.1.2.1 cliff PTR_ADDU v1, a2 # add to table addr
146 1.1.2.1 cliff REG_L a1, (v1) # load EIMR bits for this IPL
147 1.1.2.1 cliff dmfc0 v1, RMIXL_COP_0_EIMR # get EIMR
148 1.1.2.1 cliff 3: bne a1, v1, 3b # loop forever if not equal
149 1.1.2.1 cliff nop
150 1.1.2.1 cliff #endif /* PARANOIA */
151 1.1.2.1 cliff j ra
152 1.1.2.1 cliff nop
153 1.1.2.1 cliff END(_splsw_splx)
154 1.1.2.1 cliff
155 1.1.2.1 cliff STATIC_LEAF(_splsw_spl0)
156 1.1.2.1 cliff REG_L v1, _C_LABEL(ipl_eimr_map) + 8*IPL_NONE
157 1.1.2.1 cliff PTR_L a3, L_CPU(MIPS_CURLWP)
158 1.1.2.1 cliff mtc0 zero, MIPS_COP_0_CAUSE # clear SOFT_INT bits
159 1.1.2.1 cliff dmfc0 a2, MIPS_COP_0_STATUS # load STATUS
160 1.1.2.1 cliff and a2, ~MIPS_INT_MASK # clear STATUS[IM]
161 1.1.2.1 cliff sll v0, v1, 8 # EIMR[7:0] to STATUS[15:8]
162 1.1.2.1 cliff and v0, MIPS_INT_MASK # " " "
163 1.1.2.1 cliff or v0, MIPS_SR_INT_IE # set STATUS[IE]
164 1.1.2.1 cliff or v0, a2 # new STATUS value
165 1.1.2.1 cliff mtc0 zero, MIPS_COP_0_STATUS ## disable all interrupts
166 1.1.2.1 cliff #if IPL_NONE == 0
167 1.1.2.1 cliff INT_S zero, CPU_INFO_CPL(a3) ## set ipl to 0
168 1.1.2.1 cliff #else
169 1.1.2.1 cliff #error IPL_NONE != 0
170 1.1.2.1 cliff #endif
171 1.1.2.1 cliff dmtc0 v1, RMIXL_COP_0_EIMR ## set new EIMR
172 1.1.2.1 cliff mtc0 v0, MIPS_COP_0_STATUS ## set new STATUS
173 1.1.2.1 cliff nop
174 1.1.2.1 cliff nop
175 1.1.2.1 cliff j ra
176 1.1.2.1 cliff nop
177 1.1.2.1 cliff END(_splsw_spl0)
178 1.1.2.1 cliff
179 1.1.2.1 cliff LEAF_NOPROFILE(rmixl_spln)
180 1.1.2.1 cliff PTR_LA v1, _C_LABEL(ipl_eimr_map) # get address of table
181 1.1.2.1 cliff sll a2, a0, MAP_SCALESHIFT # convert IPL to array offset
182 1.1.2.1 cliff PTR_ADDU v1, a2 # add to table addr
183 1.1.2.1 cliff REG_L v0, (v1) # load EIMR bits for this IPL
184 1.1.2.1 cliff j ra
185 1.1.2.1 cliff nop
186 1.1.2.1 cliff END(rmixl_spln)
187 1.1.2.1 cliff
188 1.1.2.1 cliff STATIC_LEAF(_splsw_setsoftintr)
189 1.1.2.1 cliff mfc0 v1, MIPS_COP_0_STATUS # save status register
190 1.1.2.1 cliff mtc0 zero, MIPS_COP_0_STATUS ## disable interrupts (2 cycles)
191 1.1.2.1 cliff nop
192 1.1.2.1 cliff nop
193 1.1.2.1 cliff mfc0 v0, MIPS_COP_0_CAUSE ## load cause register
194 1.1.2.1 cliff nop
195 1.1.2.1 cliff or v0, v0, a0 ## set soft intr. bits
196 1.1.2.1 cliff mtc0 v0, MIPS_COP_0_CAUSE ## store back
197 1.1.2.1 cliff mtc0 v1, MIPS_COP_0_STATUS ## enable interrupts
198 1.1.2.1 cliff j ra
199 1.1.2.1 cliff nop
200 1.1.2.1 cliff END(_splsw_setsoftintr)
201 1.1.2.1 cliff
202 1.1.2.1 cliff STATIC_LEAF(_splsw_clrsoftintr)
203 1.1.2.1 cliff mfc0 v1, MIPS_COP_0_STATUS # save status register
204 1.1.2.1 cliff mtc0 zero, MIPS_COP_0_STATUS ## disable interrupts (2 cycles)
205 1.1.2.1 cliff nop
206 1.1.2.1 cliff nop
207 1.1.2.1 cliff mfc0 v0, MIPS_COP_0_CAUSE ## load cause register
208 1.1.2.1 cliff nor a0, zero, a0 ## bitwise inverse of A0
209 1.1.2.1 cliff and v0, v0, a0 ## clear soft intr. bits
210 1.1.2.1 cliff mtc0 v0, MIPS_COP_0_CAUSE ## store back
211 1.1.2.1 cliff mtc0 v1, MIPS_COP_0_STATUS ## enable interrupts
212 1.1.2.1 cliff j ra
213 1.1.2.1 cliff nop
214 1.1.2.1 cliff END(_splsw_clrsoftintr)
215 1.1.2.1 cliff
216 1.1.2.1 cliff STATIC_LEAF(_splsw_splraise)
217 1.1.2.1 cliff move a1, a0
218 1.1.2.1 cliff PTR_LA v1, _C_LABEL(ipl_eimr_map)
219 1.1.2.1 cliff sll a2, a0, MAP_SCALESHIFT
220 1.1.2.1 cliff PTR_ADDU v1, a2
221 1.1.2.1 cliff REG_L a0, (v1)
222 1.1.2.1 cliff b _splraise
223 1.1.2.1 cliff nop
224 1.1.2.1 cliff END(_splsw_splraise)
225 1.1.2.1 cliff
226 1.1.2.1 cliff STATIC_LEAF(_splsw_splhigh)
227 1.1.2.1 cliff STATIC_XLEAF(_splsw_splhigh_noprof)
228 1.1.2.1 cliff PTR_L a3, L_CPU(MIPS_CURLWP)
229 1.1.2.1 cliff INT_L v0, CPU_INFO_CPL(a3) # get current IPL from cpu_info
230 1.1.2.1 cliff li a1, IPL_HIGH #
231 1.1.2.1 cliff beq v0, a1, 1f # don't do anything if IPL_HIGH
232 1.1.2.1 cliff mfc0 v1, MIPS_COP_0_STATUS # load STATUS
233 1.1.2.1 cliff move a2, zero # clear for EIMR
234 1.1.2.1 cliff and a0, v1, MIPS_INT_MASK # select all interrupts
235 1.1.2.1 cliff xor a0, v1 # clear STATUS[IM]
236 1.1.2.1 cliff mtc0 zero, MIPS_COP_0_STATUS ## disable all interrupts
237 1.1.2.1 cliff INT_S a1, CPU_INFO_CPL(a3) ## save IPL in cpu_info
238 1.1.2.1 cliff dmtc0 a2, RMIXL_COP_0_EIMR ## set new EIMR
239 1.1.2.1 cliff mtc0 a0, MIPS_COP_0_STATUS ## set new STATUS
240 1.1.2.1 cliff nop # XXXXX
241 1.1.2.1 cliff #ifdef PARANOIA
242 1.1.2.1 cliff j ra # return
243 1.1.2.1 cliff nop
244 1.1.2.1 cliff #endif /* PARANOIA */
245 1.1.2.1 cliff 1:
246 1.1.2.1 cliff #ifdef PARANOIA
247 1.1.2.1 cliff dmfc0 v1, RMIXL_COP_0_EIMR # load EIMR
248 1.1.2.1 cliff 2: bnez v1, 2b # loop forever if not 0.
249 1.1.2.1 cliff nop
250 1.1.2.1 cliff #endif /* PARANOIA */
251 1.1.2.1 cliff j ra ## return
252 1.1.2.1 cliff nop
253 1.1.2.1 cliff END(_splsw_splhigh)
254 1.1.2.1 cliff
255 1.1.2.3 cliff STATIC_LEAF(_splsw_splddb)
256 1.1.2.3 cliff REG_L a0, _C_LABEL(ipl_eimr_map) + 8*IPL_DDB
257 1.1.2.3 cliff li a1, IPL_DDB
258 1.1.2.3 cliff b _splraise
259 1.1.2.3 cliff nop
260 1.1.2.3 cliff END(_splsw_splddb)
261 1.1.2.3 cliff
262 1.1.2.1 cliff STATIC_LEAF(_splsw_splsched)
263 1.1.2.1 cliff REG_L a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SCHED
264 1.1.2.1 cliff li a1, IPL_SCHED
265 1.1.2.1 cliff b _splraise
266 1.1.2.1 cliff nop
267 1.1.2.1 cliff END(_splsw_splsched)
268 1.1.2.1 cliff
269 1.1.2.1 cliff STATIC_LEAF(_splsw_splvm)
270 1.1.2.1 cliff REG_L a0, _C_LABEL(ipl_eimr_map) + 8*IPL_VM
271 1.1.2.1 cliff li a1, IPL_VM
272 1.1.2.1 cliff b _splraise
273 1.1.2.2 cliff nop
274 1.1.2.1 cliff END(_splsw_splvm)
275 1.1.2.1 cliff
276 1.1.2.1 cliff STATIC_LEAF(_splsw_splsoftserial)
277 1.1.2.1 cliff REG_L a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTSERIAL
278 1.1.2.1 cliff li a1, IPL_SOFTSERIAL
279 1.1.2.1 cliff b _splraise
280 1.1.2.1 cliff nop
281 1.1.2.1 cliff END(_splsw_splsoftserial)
282 1.1.2.1 cliff
283 1.1.2.1 cliff STATIC_LEAF(_splsw_splsoftnet)
284 1.1.2.1 cliff REG_L a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTNET
285 1.1.2.1 cliff li a1, IPL_SOFTNET
286 1.1.2.1 cliff b _splraise
287 1.1.2.1 cliff nop
288 1.1.2.1 cliff END(_splsw_splsoftnet)
289 1.1.2.1 cliff
290 1.1.2.1 cliff STATIC_LEAF(_splsw_splsoftbio)
291 1.1.2.1 cliff REG_L a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTBIO
292 1.1.2.1 cliff li a1, IPL_SOFTBIO
293 1.1.2.1 cliff b _splraise
294 1.1.2.1 cliff nop
295 1.1.2.1 cliff END(_splsw_splsoftbio)
296 1.1.2.1 cliff
297 1.1.2.1 cliff STATIC_LEAF(_splsw_splsoftclock)
298 1.1.2.1 cliff REG_L a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTCLOCK
299 1.1.2.1 cliff li a1, IPL_SOFTCLOCK
300 1.1.2.1 cliff b _splraise
301 1.1.2.1 cliff nop
302 1.1.2.1 cliff END(_splsw_splsoftclock)
303 1.1.2.1 cliff
304 1.1.2.1 cliff STATIC_LEAF(_splsw_splintr)
305 1.1.2.1 cliff dmfc0 ta1, RMIXL_COP_0_EIRR # get active interrupts
306 1.1.2.1 cliff # restrict to hard int bits:
307 1.1.2.3 cliff and v1, ta1, RMIXL_SOFT_INT_MASK # v1 = ta1 & ~RMIXL_SOFT_INT_MASK
308 1.1.2.3 cliff xor v1, ta1 # " "
309 1.1.2.1 cliff
310 1.1.2.3 cliff li v0, IPL_NONE
311 1.1.2.1 cliff PTR_LA ta3, _C_LABEL(ipl_eimr_map) + 8*IPL_VM
312 1.1.2.1 cliff REG_L ta2, -8(ta3) # load 'enabled' bits for IPL_SOFTSERIAL
313 1.1.2.1 cliff and v1, ta2 # apply to pending bits
314 1.1.2.3 cliff beq v1, zero, 4f # if nothing pending...
315 1.1.2.3 cliff nop # ... return IPL_NONE
316 1.1.2.1 cliff
317 1.1.2.3 cliff li v0, IPL_VM # ipl=IPL_VM
318 1.1.2.1 cliff 1:
319 1.1.2.3 cliff REG_L ta2, (ta3) # load 'enabled' bits for ipl
320 1.1.2.1 cliff and ta2, v1 # any match to pending intrs?
321 1.1.2.1 cliff beq ta2, zero, 2f # no, return ipl
322 1.1.2.3 cliff PTR_ADDI ta3, 1 << MAP_SCALESHIFT # point to next entry
323 1.1.2.3 cliff addiu v0, 1 # ipl++
324 1.1.2.3 cliff move v1, ta2 # update highest pending
325 1.1.2.3 cliff b 1b # loop
326 1.1.2.1 cliff
327 1.1.2.1 cliff 2:
328 1.1.2.1 cliff /*
329 1.1.2.1 cliff * Emulate the CP0_SR 'IM' bits in 'pending'
330 1.1.2.1 cliff * - if clock intr is requested, set MIPS_INT_MASK_5
331 1.1.2.1 cliff * - if other HW intr is requested, set MIPS_INT_MASK_1 as summary bit
332 1.1.2.1 cliff * the RMI evbmips_iointr function will sort through
333 1.1.2.1 cliff * individial EIRR requests
334 1.1.2.1 cliff */
335 1.1.2.3 cliff li t2, RMIXL_INT_MASK_5 # load RMIXL_INT_MASK_5
336 1.1.2.1 cliff and t1, v1, t2 # save count/compare intr request value
337 1.1.2.1 cliff nor t0, zero, t2 # invert the mask
338 1.1.2.1 cliff and v1, t0 # v1 &= ~RMIXL_INT_MASK_5
339 1.1.2.1 cliff beq v1, zero, 3f # no non-clock intrs? skip ahead
340 1.1.2.1 cliff li v1, RMIXL_INT_MASK_1 # use INT_MASK_1 as 'summary' bit
341 1.1.2.1 cliff # for non-clock hw intrs
342 1.1.2.1 cliff 3:
343 1.1.2.1 cliff or v1, t1 # combine clock and non-clock-summary
344 1.1.2.1 cliff sll v1, MIPS_INT_MASK_SHIFT # shift to emulate COP0_SR 'IM' bits
345 1.1.2.1 cliff 4:
346 1.1.2.1 cliff INT_S v1, (a0) # set a (fake) new pending mask
347 1.1.2.1 cliff j ra # and return highest ipl pending
348 1.1.2.1 cliff nop
349 1.1.2.1 cliff END(_splsw_splintr)
350 1.1.2.1 cliff
351 1.1.2.1 cliff STATIC_LEAF(_splsw_splcheck)
352 1.1.2.1 cliff #ifdef PARANOIA
353 1.1.2.1 cliff PTR_L t0, L_CPU(MIPS_CURLWP)
354 1.1.2.1 cliff INT_L t1, CPU_INFO_CPL(t0) # get current priority level
355 1.1.2.1 cliff
356 1.1.2.1 cliff dmfc0 t0, RMIXL_COP_0_EIMR # get current EIMR
357 1.1.2.1 cliff
358 1.1.2.1 cliff PTR_LA t2, _C_LABEL(ipl_eimr_map)
359 1.1.2.1 cliff sll t1, MAP_SCALESHIFT # shift cpl to array index
360 1.1.2.1 cliff PTR_ADDU t2, t1
361 1.1.2.1 cliff REG_L t3, (t2) # load value
362 1.1.2.1 cliff 1: bne t0, t3, 1b # loop forever if not equal
363 1.1.2.1 cliff nop
364 1.1.2.1 cliff #endif /* PARANOIA */
365 1.1.2.1 cliff j ra
366 1.1.2.1 cliff nop
367 1.1.2.1 cliff END(_splsw_splcheck)
368 1.1.2.1 cliff
369 1.1.2.1 cliff .rdata
370 1.1.2.1 cliff .globl _C_LABEL(rmixl_splsw)
371 1.1.2.1 cliff _C_LABEL(rmixl_splsw):
372 1.1.2.1 cliff PTR_WORD _C_LABEL(_splsw_splhigh)
373 1.1.2.1 cliff PTR_WORD _C_LABEL(_splsw_splsched)
374 1.1.2.1 cliff PTR_WORD _C_LABEL(_splsw_splvm)
375 1.1.2.1 cliff PTR_WORD _C_LABEL(_splsw_splsoftserial)
376 1.1.2.1 cliff PTR_WORD _C_LABEL(_splsw_splsoftnet)
377 1.1.2.1 cliff PTR_WORD _C_LABEL(_splsw_splsoftbio)
378 1.1.2.1 cliff PTR_WORD _C_LABEL(_splsw_splsoftclock)
379 1.1.2.1 cliff PTR_WORD _C_LABEL(_splsw_splraise)
380 1.1.2.1 cliff PTR_WORD _C_LABEL(_splsw_spl0)
381 1.1.2.1 cliff PTR_WORD _C_LABEL(_splsw_splx)
382 1.1.2.1 cliff PTR_WORD _C_LABEL(_splsw_splhigh_noprof)
383 1.1.2.1 cliff PTR_WORD _C_LABEL(_splsw_splx_noprof)
384 1.1.2.1 cliff PTR_WORD _C_LABEL(_splsw_setsoftintr)
385 1.1.2.1 cliff PTR_WORD _C_LABEL(_splsw_clrsoftintr)
386 1.1.2.1 cliff PTR_WORD _C_LABEL(_splsw_splintr)
387 1.1.2.1 cliff PTR_WORD _C_LABEL(_splsw_splcheck)
388