rmixl_spl.S revision 1.1.2.4 1 /* $NetBSD: rmixl_spl.S,v 1.1.2.4 2010/05/28 22:14:53 cliff Exp $ */
2
3 /*-
4 * Copyright (c) 2009, 2010 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas <matt (at) 3am-software.com>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include "opt_cputype.h" /* which mips CPU levels do we support? */
33
34 #include <sys/cdefs.h>
35
36 #include <machine/param.h>
37 #include <mips/asm.h>
38 #include <mips/cpuregs.h>
39
40 RCSID("$NetBSD: rmixl_spl.S,v 1.1.2.4 2010/05/28 22:14:53 cliff Exp $");
41
42 #include "assym.h"
43
44
45 #define MAP_SCALESHIFT 3
46 #define RMIXL_SOFT_INT_MASK_1 (MIPS_SOFT_INT_MASK_1 >> 8)
47 #define RMIXL_SOFT_INT_MASK (MIPS_SOFT_INT_MASK >> 8)
48 #define RMIXL_INT_MASK_1 (MIPS_INT_MASK_1 >> 8)
49 #define RMIXL_INT_MASK_5 (MIPS_INT_MASK_5 >> 8)
50 #define RMIXL_COP_0_EIRR _(9), 6
51 #define RMIXL_COP_0_EIMR _(9), 7
52
53
54 /*
55 * Array of mask of bits to set in the EIMR when we go to a
56 * given hardware interrupt priority level.
57 * The softint bits in [IPL_NONE] and [IPL_SOFTCLOCK] should stay constant
58 * Hard intr bits are managed by rmixl_vec_establish and rmixl_vec_disestablish.
59 */
60 .data
61 .globl _C_LABEL(ipl_eimr_map)
62 .type _C_LABEL(ipl_eimr_map),@object
63 .p2align MAP_SCALESHIFT
64 _C_LABEL(ipl_eimr_map):
65 .dword RMIXL_SOFT_INT_MASK /* IPL_NONE */
66 .dword RMIXL_SOFT_INT_MASK_1 /* IPL_SOFT{CLOCK,BIO} */
67 .dword 0 /* IPL_SOFT{NET,SERIAL} */
68 .dword 0 /* IPL_VM */
69 .dword 0 /* IPL_SCHED */
70 .dword 0 /* IPL_DDB */
71 .dword 0 /* IPL_HIGH */
72
73 .text
74
75 /*
76 * initialize cp0 interrupt control for this cpu
77 * - set STATUS[IE]
78 * - clear EIRR and EIMR
79 * on return, all interrupts are disabled by EIMR
80 *
81 * henceforth STATUS[IE] is expected to remain normally set
82 * but may be cleared and restored for temporary interrupt disablement
83 *
84 * call before the first call to spl0 on this cpu
85 */
86 LEAF_NOPROFILE(rmixl_spl_init_cpu)
87 mfc0 t0, MIPS_COP_0_STATUS # get STATUS
88 ori t0, MIPS_SR_INT_IE # set IE
89 mtc0 zero, MIPS_COP_0_STATUS ## disable all ints in STATUS
90 dmtc0 zero, RMIXL_COP_0_EIMR ## " " " " EIMR
91 dmtc0 zero, RMIXL_COP_0_EIRR ## clear EIRR
92 mtc0 t0, MIPS_COP_0_STATUS ## set STATUS | IE
93 j ra
94 nop
95 END(rmixl_spl_init_cpu)
96
97 /*
98 * RMIXL processor interrupt control
99 *
100 * Used as building blocks for spl(9) kernel interface.
101 */
102 _splraise:
103 /*
104 * a0 = EIMR bits to be set for this IPL
105 * a1 = this IPL (IPL_*)
106 * Can only use a0-a3 and v0-v1
107 */
108 PTR_L a3, L_CPU(MIPS_CURLWP)
109 INT_L v0, CPU_INFO_CPL(a3) # get current IPL from cpu_info
110 sltu v1, a1, v0 # newipl < curipl
111 bnez v1, 2f # yes, don't change.
112 nop
113 dmtc0 zero, RMIXL_COP_0_EIMR ## disable all interrupts
114 INT_S a1, CPU_INFO_CPL(a3) ## save IPL in cpu_info
115 dmtc0 a0, RMIXL_COP_0_EIMR ## set new EIMR
116 #ifdef PARANOIA
117 j ra
118 nop
119 #endif /* PARANOIA */
120 #ifdef PARANOIA
121 dmfc0 v0, RMIXL_COP_0_EIMR # get EIMR
122 1: bne a0, v0, 1b # loop forever if not equal
123 nop
124 #endif /* PARANOIA */
125 2: j ra
126 nop
127
128 STATIC_LEAF(_splsw_splx)
129 STATIC_XLEAF(_splsw_splx_noprof) # does not get mcount hooks
130 PTR_L a3, L_CPU(MIPS_CURLWP) # get cpu_info
131 INT_L a2, CPU_INFO_CPL(a3) # get IPL from cpu_info
132 beq a0, a2, 2f # if same, nothing to do
133 nop
134 #ifdef PARANOIA
135 sltu v0, a0, a2 # v0 = a0 < a2
136 99: beqz v0, 99b # loop forever if false
137 nop
138 #endif /* PARANOIA */
139 PTR_LA v1, _C_LABEL(ipl_eimr_map) # get address of table
140 sll a2, a0, MAP_SCALESHIFT # convert IPL to array offset
141 PTR_ADDU v1, a2 # add to table addr
142 REG_L v1, (v1) # load EIMR bits for this IPL
143 1:
144 dmtc0 zero, RMIXL_COP_0_EIMR ## disable all interrupts
145 INT_S a0, CPU_INFO_CPL(a3) ## save IPL in cpu_info
146 dmtc0 v1, RMIXL_COP_0_EIMR ## set new EIMR
147 #ifdef PARANOIA
148 j ra
149 nop
150 #endif /* PARANOIA */
151 2:
152 #ifdef PARANOIA
153 PTR_LA v1, _C_LABEL(ipl_eimr_map) # get address of table
154 sll a2, a0, MAP_SCALESHIFT # convert IPL to array offset
155 PTR_ADDU v1, a2 # add to table addr
156 REG_L a1, (v1) # load EIMR bits for this IPL
157 dmfc0 v1, RMIXL_COP_0_EIMR # get EIMR
158 3: bne a1, v1, 3b # loop forever if not equal
159 nop
160 #endif /* PARANOIA */
161 j ra
162 nop
163 END(_splsw_splx)
164
165 STATIC_LEAF(_splsw_spl0)
166 REG_L v1, _C_LABEL(ipl_eimr_map) + 8*IPL_NONE
167 PTR_L a3, L_CPU(MIPS_CURLWP)
168 dmtc0 zero, RMIXL_COP_0_EIMR ## disable all interrupts
169 #if IPL_NONE == 0
170 INT_S zero, CPU_INFO_CPL(a3) ## set ipl to 0
171 #else
172 #error IPL_NONE != 0
173 #endif
174 dmtc0 v1, RMIXL_COP_0_EIMR ## set new EIMR
175 j ra
176 nop
177 END(_splsw_spl0)
178
179 LEAF_NOPROFILE(rmixl_spln)
180 PTR_LA v1, _C_LABEL(ipl_eimr_map) # get address of table
181 sll a2, a0, MAP_SCALESHIFT # convert IPL to array offset
182 PTR_ADDU v1, a2 # add to table addr
183 REG_L v0, (v1) # load EIMR bits for this IPL
184 j ra
185 nop
186 END(rmixl_spln)
187
188 STATIC_LEAF(_splsw_setsoftintr)
189 dmfc0 v1, RMIXL_COP_0_EIMR # save EIMR register
190 dmtc0 zero, RMIXL_COP_0_EIMR ## disable all interrupts
191 mfc0 v0, MIPS_COP_0_CAUSE ## load cause register
192 or v0, v0, a0 ## set soft intr. bits
193 mtc0 v0, MIPS_COP_0_CAUSE ## store back
194 dmtc0 v1, RMIXL_COP_0_EIMR ## restore EIMR
195 j ra
196 nop
197 END(_splsw_setsoftintr)
198
199 STATIC_LEAF(_splsw_clrsoftintr)
200 dmfc0 v1, RMIXL_COP_0_EIMR # save EIMR register
201 dmtc0 zero, RMIXL_COP_0_EIMR ## disable all interrupts
202 mfc0 v0, MIPS_COP_0_CAUSE ## load cause register
203 nor a0, zero, a0 ## bitwise inverse of A0
204 and v0, v0, a0 ## clear soft intr. bits
205 mtc0 v0, MIPS_COP_0_CAUSE ## store back
206 dmtc0 v1, RMIXL_COP_0_EIMR ## enable EIMR
207 j ra
208 nop
209 END(_splsw_clrsoftintr)
210
211 STATIC_LEAF(_splsw_splraise)
212 move a1, a0
213 PTR_LA v1, _C_LABEL(ipl_eimr_map)
214 sll a2, a0, MAP_SCALESHIFT
215 PTR_ADDU v1, a2
216 REG_L a0, (v1)
217 b _splraise
218 nop
219 END(_splsw_splraise)
220
221 STATIC_LEAF(_splsw_splhigh)
222 STATIC_XLEAF(_splsw_splhigh_noprof)
223 PTR_L a3, L_CPU(MIPS_CURLWP)
224 INT_L v0, CPU_INFO_CPL(a3) # get current IPL from cpu_info
225 li a1, IPL_HIGH #
226 beq v0, a1, 1f # don't do anything if IPL_HIGH
227 nop
228 dmtc0 zero, RMIXL_COP_0_EIMR ## disable all interrupts
229 INT_S a1, CPU_INFO_CPL(a3) ## save IPL in cpu_info
230 ## interrupts remain disabled!
231 #ifdef PARANOIA
232 j ra # return
233 nop
234 #endif /* PARANOIA */
235 1:
236 #ifdef PARANOIA
237 dmfc0 v1, RMIXL_COP_0_EIMR # load EIMR
238 2: bnez v1, 2b # loop forever if not 0.
239 nop
240 #endif /* PARANOIA */
241 j ra ## return
242 nop
243 END(_splsw_splhigh)
244
245 STATIC_LEAF(_splsw_splddb)
246 REG_L a0, _C_LABEL(ipl_eimr_map) + 8*IPL_DDB
247 li a1, IPL_DDB
248 b _splraise
249 nop
250 END(_splsw_splddb)
251
252 STATIC_LEAF(_splsw_splsched)
253 REG_L a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SCHED
254 li a1, IPL_SCHED
255 b _splraise
256 nop
257 END(_splsw_splsched)
258
259 STATIC_LEAF(_splsw_splvm)
260 REG_L a0, _C_LABEL(ipl_eimr_map) + 8*IPL_VM
261 li a1, IPL_VM
262 b _splraise
263 nop
264 END(_splsw_splvm)
265
266 STATIC_LEAF(_splsw_splsoftserial)
267 REG_L a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTSERIAL
268 li a1, IPL_SOFTSERIAL
269 b _splraise
270 nop
271 END(_splsw_splsoftserial)
272
273 STATIC_LEAF(_splsw_splsoftnet)
274 REG_L a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTNET
275 li a1, IPL_SOFTNET
276 b _splraise
277 nop
278 END(_splsw_splsoftnet)
279
280 STATIC_LEAF(_splsw_splsoftbio)
281 REG_L a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTBIO
282 li a1, IPL_SOFTBIO
283 b _splraise
284 nop
285 END(_splsw_splsoftbio)
286
287 STATIC_LEAF(_splsw_splsoftclock)
288 REG_L a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTCLOCK
289 li a1, IPL_SOFTCLOCK
290 b _splraise
291 nop
292 END(_splsw_splsoftclock)
293
294 STATIC_LEAF(_splsw_splintr)
295 dmfc0 ta1, RMIXL_COP_0_EIRR # get active interrupts
296 # restrict to hard int bits:
297 and v1, ta1, RMIXL_SOFT_INT_MASK # v1 = ta1 & ~RMIXL_SOFT_INT_MASK
298 xor v1, ta1 # " "
299
300 li v0, IPL_NONE
301 PTR_LA ta3, _C_LABEL(ipl_eimr_map) + 8*IPL_VM
302 REG_L ta2, -8(ta3) # load 'enabled' bits for IPL_SOFTSERIAL
303 and v1, ta2 # apply to pending bits
304 beq v1, zero, 4f # if nothing pending...
305 nop # ... return IPL_NONE
306
307 li v0, IPL_VM # ipl=IPL_VM
308 1:
309 REG_L ta2, (ta3) # load 'enabled' bits for ipl
310 and ta2, v1 # any match to pending intrs?
311 beq ta2, zero, 2f # no, return ipl
312 PTR_ADDI ta3, 1 << MAP_SCALESHIFT # point to next entry
313 addiu v0, 1 # ipl++
314 move v1, ta2 # update highest pending
315 b 1b # loop
316 nop
317
318 2:
319 /*
320 * Emulate the CP0_SR 'IM' bits in 'pending'
321 * - if clock intr is requested, set MIPS_INT_MASK_5
322 * - if other HW intr is requested, set MIPS_INT_MASK_1 as summary bit
323 * the RMI evbmips_iointr function will sort through
324 * individial EIRR requests
325 */
326 li t2, RMIXL_INT_MASK_5 # load RMIXL_INT_MASK_5
327 and t1, v1, t2 # save count/compare intr request value
328 nor t0, zero, t2 # invert the mask
329 and v1, t0 # v1 &= ~RMIXL_INT_MASK_5
330 beq v1, zero, 3f # no non-clock intrs? skip ahead
331 li v1, RMIXL_INT_MASK_1 # use INT_MASK_1 as 'summary' bit
332 # for non-clock hw intrs
333 3:
334 or v1, t1 # combine clock and non-clock-summary
335 sll v1, MIPS_INT_MASK_SHIFT # shift to emulate COP0_SR 'IM' bits
336 4:
337 INT_S v1, (a0) # set a (fake) new pending mask
338 j ra # and return highest ipl pending
339 nop
340 END(_splsw_splintr)
341
342 STATIC_LEAF(_splsw_splcheck)
343 #ifdef PARANOIA
344 PTR_L t0, L_CPU(MIPS_CURLWP)
345 INT_L t1, CPU_INFO_CPL(t0) # get current priority level
346
347 dmfc0 t0, RMIXL_COP_0_EIMR # get current EIMR
348
349 PTR_LA t2, _C_LABEL(ipl_eimr_map)
350 sll t1, MAP_SCALESHIFT # shift cpl to array index
351 PTR_ADDU t2, t1
352 REG_L t3, (t2) # load value
353 1: bne t0, t3, 1b # loop forever if not equal
354 nop
355 #endif /* PARANOIA */
356 j ra
357 nop
358 END(_splsw_splcheck)
359
360 .rdata
361 .globl _C_LABEL(rmixl_splsw)
362 _C_LABEL(rmixl_splsw):
363 PTR_WORD _C_LABEL(_splsw_splhigh)
364 PTR_WORD _C_LABEL(_splsw_splsched)
365 PTR_WORD _C_LABEL(_splsw_splvm)
366 PTR_WORD _C_LABEL(_splsw_splsoftserial)
367 PTR_WORD _C_LABEL(_splsw_splsoftnet)
368 PTR_WORD _C_LABEL(_splsw_splsoftbio)
369 PTR_WORD _C_LABEL(_splsw_splsoftclock)
370 PTR_WORD _C_LABEL(_splsw_splraise)
371 PTR_WORD _C_LABEL(_splsw_spl0)
372 PTR_WORD _C_LABEL(_splsw_splx)
373 PTR_WORD _C_LABEL(_splsw_splhigh_noprof)
374 PTR_WORD _C_LABEL(_splsw_splx_noprof)
375 PTR_WORD _C_LABEL(_splsw_setsoftintr)
376 PTR_WORD _C_LABEL(_splsw_clrsoftintr)
377 PTR_WORD _C_LABEL(_splsw_splintr)
378 PTR_WORD _C_LABEL(_splsw_splcheck)
379