rmixl_spl.S revision 1.2 1 1.2 matt /* $NetBSD: rmixl_spl.S,v 1.2 2011/02/20 07:48:37 matt Exp $ */
2 1.2 matt
3 1.2 matt /*-
4 1.2 matt * Copyright (c) 2009, 2010 The NetBSD Foundation, Inc.
5 1.2 matt * All rights reserved.
6 1.2 matt *
7 1.2 matt * This code is derived from software contributed to The NetBSD Foundation
8 1.2 matt * by Matt Thomas <matt (at) 3am-software.com>.
9 1.2 matt *
10 1.2 matt * Redistribution and use in source and binary forms, with or without
11 1.2 matt * modification, are permitted provided that the following conditions
12 1.2 matt * are met:
13 1.2 matt * 1. Redistributions of source code must retain the above copyright
14 1.2 matt * notice, this list of conditions and the following disclaimer.
15 1.2 matt * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 matt * notice, this list of conditions and the following disclaimer in the
17 1.2 matt * documentation and/or other materials provided with the distribution.
18 1.2 matt *
19 1.2 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2 matt * POSSIBILITY OF SUCH DAMAGE.
30 1.2 matt */
31 1.2 matt
32 1.2 matt #include "opt_cputype.h" /* which mips CPU levels do we support? */
33 1.2 matt
34 1.2 matt #include <sys/cdefs.h>
35 1.2 matt
36 1.2 matt #include <machine/param.h>
37 1.2 matt #include <mips/asm.h>
38 1.2 matt #include <mips/cpuregs.h>
39 1.2 matt
40 1.2 matt RCSID("$NetBSD: rmixl_spl.S,v 1.2 2011/02/20 07:48:37 matt Exp $");
41 1.2 matt
42 1.2 matt #include "assym.h"
43 1.2 matt
44 1.2 matt
45 1.2 matt #define MAP_SCALESHIFT 3
46 1.2 matt #define RMIXL_SOFT_INT_MASK_1 (MIPS_SOFT_INT_MASK_1 >> 8)
47 1.2 matt #define RMIXL_SOFT_INT_MASK (MIPS_SOFT_INT_MASK >> 8)
48 1.2 matt #define RMIXL_INT_MASK_1 (MIPS_INT_MASK_1 >> 8)
49 1.2 matt #define RMIXL_INT_MASK_5 (MIPS_INT_MASK_5 >> 8)
50 1.2 matt #define RMIXL_COP_0_EIRR _(9), 6
51 1.2 matt #define RMIXL_COP_0_EIMR _(9), 7
52 1.2 matt
53 1.2 matt .set noreorder
54 1.2 matt
55 1.2 matt /*
56 1.2 matt * Array of mask of bits to set in the EIMR when we go to a
57 1.2 matt * given hardware interrupt priority level.
58 1.2 matt * The softint bits in [IPL_NONE] and [IPL_SOFTCLOCK] should stay constant
59 1.2 matt * Hard intr bits are managed by rmixl_vec_establish and rmixl_vec_disestablish.
60 1.2 matt */
61 1.2 matt .data
62 1.2 matt .globl _C_LABEL(ipl_eimr_map)
63 1.2 matt .type _C_LABEL(ipl_eimr_map),@object
64 1.2 matt .p2align MAP_SCALESHIFT
65 1.2 matt _C_LABEL(ipl_eimr_map):
66 1.2 matt .dword RMIXL_SOFT_INT_MASK /* IPL_NONE */
67 1.2 matt .dword RMIXL_SOFT_INT_MASK_1 /* IPL_SOFT{CLOCK,BIO} */
68 1.2 matt .dword 0 /* IPL_SOFT{NET,SERIAL} */
69 1.2 matt .dword 0 /* IPL_VM */
70 1.2 matt .dword 0 /* IPL_SCHED */
71 1.2 matt .dword 0 /* IPL_DDB */
72 1.2 matt .dword 0 /* IPL_HIGH */
73 1.2 matt
74 1.2 matt .text
75 1.2 matt
76 1.2 matt /*
77 1.2 matt * initialize cp0 interrupt control for this cpu
78 1.2 matt * - set STATUS[IE]
79 1.2 matt * - clear EIRR and EIMR
80 1.2 matt * on return, all interrupts are disabled by EIMR
81 1.2 matt *
82 1.2 matt * henceforth STATUS[IE] is expected to remain normally set
83 1.2 matt * but may be cleared and restored for temporary interrupt disablement
84 1.2 matt *
85 1.2 matt * call before the first call to spl0 on this cpu
86 1.2 matt */
87 1.2 matt LEAF_NOPROFILE(rmixl_spl_init_cpu)
88 1.2 matt mfc0 t0, MIPS_COP_0_STATUS # get STATUS
89 1.2 matt ori t0, MIPS_SR_INT_IE # set IE
90 1.2 matt mtc0 zero, MIPS_COP_0_STATUS ## disable all ints in STATUS
91 1.2 matt dmtc0 zero, RMIXL_COP_0_EIMR ## " " " " EIMR
92 1.2 matt dmtc0 zero, RMIXL_COP_0_EIRR ## clear EIRR
93 1.2 matt mtc0 t0, MIPS_COP_0_STATUS ## set STATUS | IE
94 1.2 matt j ra
95 1.2 matt nop
96 1.2 matt END(rmixl_spl_init_cpu)
97 1.2 matt
98 1.2 matt /*
99 1.2 matt * RMIXL processor interrupt control
100 1.2 matt *
101 1.2 matt * Used as building blocks for spl(9) kernel interface.
102 1.2 matt */
103 1.2 matt _splraise:
104 1.2 matt /*
105 1.2 matt * a0 = EIMR bits to be set for this IPL
106 1.2 matt * a1 = this IPL (IPL_*)
107 1.2 matt * Can only use a0-a3 and v0-v1
108 1.2 matt */
109 1.2 matt PTR_L a3, L_CPU(MIPS_CURLWP)
110 1.2 matt INT_L v0, CPU_INFO_CPL(a3) # get current IPL from cpu_info
111 1.2 matt sltu v1, a1, v0 # newipl < curipl
112 1.2 matt bnez v1, 2f # yes, don't change.
113 1.2 matt nop
114 1.2 matt dmtc0 zero, RMIXL_COP_0_EIMR ## disable all interrupts
115 1.2 matt PTR_L a3, L_CPU(MIPS_CURLWP) ## reload L_CPU in case we were
116 1.2 matt ## preempted and moved...
117 1.2 matt INT_S a1, CPU_INFO_CPL(a3) ## save IPL in cpu_info
118 1.2 matt dmtc0 a0, RMIXL_COP_0_EIMR ## set new EIMR
119 1.2 matt #ifdef PARANOIA
120 1.2 matt j ra
121 1.2 matt nop
122 1.2 matt #endif /* PARANOIA */
123 1.2 matt #ifdef PARANOIA
124 1.2 matt dmfc0 v0, RMIXL_COP_0_EIMR # get EIMR
125 1.2 matt 1: bne a0, v0, 1b # loop forever if not equal
126 1.2 matt nop
127 1.2 matt #endif /* PARANOIA */
128 1.2 matt 2: j ra
129 1.2 matt nop
130 1.2 matt
131 1.2 matt STATIC_LEAF(_splsw_splx)
132 1.2 matt STATIC_XLEAF(_splsw_splx_noprof) # does not get mcount hooks
133 1.2 matt PTR_L a3, L_CPU(MIPS_CURLWP) # get cpu_info
134 1.2 matt INT_L a2, CPU_INFO_CPL(a3) # get IPL from cpu_info
135 1.2 matt beq a0, a2, 2f # if same, nothing to do
136 1.2 matt nop
137 1.2 matt #ifdef PARANOIA
138 1.2 matt sltu v0, a0, a2 # v0 = a0 < a2
139 1.2 matt 99: beqz v0, 99b # loop forever if false
140 1.2 matt nop
141 1.2 matt #endif /* PARANOIA */
142 1.2 matt PTR_LA v1, _C_LABEL(ipl_eimr_map) # get address of table
143 1.2 matt sll a2, a0, MAP_SCALESHIFT # convert IPL to array offset
144 1.2 matt PTR_ADDU v1, a2 # add to table addr
145 1.2 matt REG_L v1, (v1) # load EIMR bits for this IPL
146 1.2 matt 1:
147 1.2 matt dmtc0 zero, RMIXL_COP_0_EIMR ## disable all interrupts
148 1.2 matt INT_S a0, CPU_INFO_CPL(a3) ## save IPL in cpu_info
149 1.2 matt dmtc0 v1, RMIXL_COP_0_EIMR ## set new EIMR
150 1.2 matt #ifdef PARANOIA
151 1.2 matt j ra
152 1.2 matt nop
153 1.2 matt #endif /* PARANOIA */
154 1.2 matt 2:
155 1.2 matt #ifdef PARANOIA
156 1.2 matt PTR_LA v1, _C_LABEL(ipl_eimr_map) # get address of table
157 1.2 matt sll a2, a0, MAP_SCALESHIFT # convert IPL to array offset
158 1.2 matt PTR_ADDU v1, a2 # add to table addr
159 1.2 matt REG_L a1, (v1) # load EIMR bits for this IPL
160 1.2 matt dmfc0 v1, RMIXL_COP_0_EIMR # get EIMR
161 1.2 matt 3: bne a1, v1, 3b # loop forever if not equal
162 1.2 matt nop
163 1.2 matt #endif /* PARANOIA */
164 1.2 matt j ra
165 1.2 matt nop
166 1.2 matt END(_splsw_splx)
167 1.2 matt
168 1.2 matt STATIC_LEAF(_splsw_spl0)
169 1.2 matt REG_L v1, _C_LABEL(ipl_eimr_map) + 8*IPL_NONE
170 1.2 matt PTR_L a3, L_CPU(MIPS_CURLWP)
171 1.2 matt dmtc0 zero, RMIXL_COP_0_EIMR ## disable all interrupts
172 1.2 matt #if IPL_NONE == 0
173 1.2 matt INT_S zero, CPU_INFO_CPL(a3) ## set ipl to 0
174 1.2 matt #else
175 1.2 matt #error IPL_NONE != 0
176 1.2 matt #endif
177 1.2 matt dmtc0 v1, RMIXL_COP_0_EIMR ## set new EIMR
178 1.2 matt j ra
179 1.2 matt nop
180 1.2 matt END(_splsw_spl0)
181 1.2 matt
182 1.2 matt LEAF_NOPROFILE(rmixl_spln)
183 1.2 matt PTR_LA v1, _C_LABEL(ipl_eimr_map) # get address of table
184 1.2 matt sll a2, a0, MAP_SCALESHIFT # convert IPL to array offset
185 1.2 matt PTR_ADDU v1, a2 # add to table addr
186 1.2 matt REG_L v0, (v1) # load EIMR bits for this IPL
187 1.2 matt j ra
188 1.2 matt nop
189 1.2 matt END(rmixl_spln)
190 1.2 matt
191 1.2 matt STATIC_LEAF(_splsw_setsoftintr)
192 1.2 matt dmfc0 v1, RMIXL_COP_0_EIMR # save EIMR register
193 1.2 matt dmtc0 zero, RMIXL_COP_0_EIMR ## disable all interrupts
194 1.2 matt mfc0 v0, MIPS_COP_0_CAUSE ## load cause register
195 1.2 matt or v0, v0, a0 ## set soft intr. bits
196 1.2 matt mtc0 v0, MIPS_COP_0_CAUSE ## store back
197 1.2 matt dmtc0 v1, RMIXL_COP_0_EIMR ## restore EIMR
198 1.2 matt j ra
199 1.2 matt nop
200 1.2 matt END(_splsw_setsoftintr)
201 1.2 matt
202 1.2 matt STATIC_LEAF(_splsw_clrsoftintr)
203 1.2 matt dmfc0 v1, RMIXL_COP_0_EIMR # save EIMR register
204 1.2 matt dmtc0 zero, RMIXL_COP_0_EIMR ## disable all interrupts
205 1.2 matt mfc0 v0, MIPS_COP_0_CAUSE ## load cause register
206 1.2 matt nor a0, zero, a0 ## bitwise inverse of A0
207 1.2 matt and v0, v0, a0 ## clear soft intr. bits
208 1.2 matt mtc0 v0, MIPS_COP_0_CAUSE ## store back
209 1.2 matt dmtc0 v1, RMIXL_COP_0_EIMR ## enable EIMR
210 1.2 matt j ra
211 1.2 matt nop
212 1.2 matt END(_splsw_clrsoftintr)
213 1.2 matt
214 1.2 matt STATIC_LEAF(_splsw_splraise)
215 1.2 matt move a1, a0
216 1.2 matt PTR_LA v1, _C_LABEL(ipl_eimr_map)
217 1.2 matt sll a2, a0, MAP_SCALESHIFT
218 1.2 matt PTR_ADDU v1, a2
219 1.2 matt REG_L a0, (v1)
220 1.2 matt b _splraise
221 1.2 matt nop
222 1.2 matt END(_splsw_splraise)
223 1.2 matt
224 1.2 matt STATIC_LEAF(_splsw_splhigh)
225 1.2 matt STATIC_XLEAF(_splsw_splhigh_noprof)
226 1.2 matt PTR_L a3, L_CPU(MIPS_CURLWP)
227 1.2 matt INT_L v0, CPU_INFO_CPL(a3) # get current IPL from cpu_info
228 1.2 matt li a1, IPL_HIGH #
229 1.2 matt beq v0, a1, 1f # don't do anything if IPL_HIGH
230 1.2 matt nop
231 1.2 matt dmtc0 zero, RMIXL_COP_0_EIMR ## disable all interrupts
232 1.2 matt PTR_L a3, L_CPU(MIPS_CURLWP) ## reload L_CPU in case we were
233 1.2 matt ## preempted and moved...
234 1.2 matt INT_S a1, CPU_INFO_CPL(a3) ## save IPL in cpu_info
235 1.2 matt ## interrupts remain disabled!
236 1.2 matt #ifdef PARANOIA
237 1.2 matt j ra # return
238 1.2 matt nop
239 1.2 matt #endif /* PARANOIA */
240 1.2 matt 1:
241 1.2 matt #ifdef PARANOIA
242 1.2 matt dmfc0 v1, RMIXL_COP_0_EIMR # load EIMR
243 1.2 matt 2: bnez v1, 2b # loop forever if not 0.
244 1.2 matt nop
245 1.2 matt #endif /* PARANOIA */
246 1.2 matt j ra ## return
247 1.2 matt nop
248 1.2 matt END(_splsw_splhigh)
249 1.2 matt
250 1.2 matt STATIC_LEAF(_splsw_splddb)
251 1.2 matt REG_L a0, _C_LABEL(ipl_eimr_map) + 8*IPL_DDB
252 1.2 matt li a1, IPL_DDB
253 1.2 matt b _splraise
254 1.2 matt nop
255 1.2 matt END(_splsw_splddb)
256 1.2 matt
257 1.2 matt STATIC_LEAF(_splsw_splsched)
258 1.2 matt REG_L a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SCHED
259 1.2 matt li a1, IPL_SCHED
260 1.2 matt b _splraise
261 1.2 matt nop
262 1.2 matt END(_splsw_splsched)
263 1.2 matt
264 1.2 matt STATIC_LEAF(_splsw_splvm)
265 1.2 matt REG_L a0, _C_LABEL(ipl_eimr_map) + 8*IPL_VM
266 1.2 matt li a1, IPL_VM
267 1.2 matt b _splraise
268 1.2 matt nop
269 1.2 matt END(_splsw_splvm)
270 1.2 matt
271 1.2 matt STATIC_LEAF(_splsw_splsoftserial)
272 1.2 matt REG_L a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTSERIAL
273 1.2 matt li a1, IPL_SOFTSERIAL
274 1.2 matt b _splraise
275 1.2 matt nop
276 1.2 matt END(_splsw_splsoftserial)
277 1.2 matt
278 1.2 matt STATIC_LEAF(_splsw_splsoftnet)
279 1.2 matt REG_L a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTNET
280 1.2 matt li a1, IPL_SOFTNET
281 1.2 matt b _splraise
282 1.2 matt nop
283 1.2 matt END(_splsw_splsoftnet)
284 1.2 matt
285 1.2 matt STATIC_LEAF(_splsw_splsoftbio)
286 1.2 matt REG_L a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTBIO
287 1.2 matt li a1, IPL_SOFTBIO
288 1.2 matt b _splraise
289 1.2 matt nop
290 1.2 matt END(_splsw_splsoftbio)
291 1.2 matt
292 1.2 matt STATIC_LEAF(_splsw_splsoftclock)
293 1.2 matt REG_L a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTCLOCK
294 1.2 matt li a1, IPL_SOFTCLOCK
295 1.2 matt b _splraise
296 1.2 matt nop
297 1.2 matt END(_splsw_splsoftclock)
298 1.2 matt
299 1.2 matt STATIC_LEAF(_splsw_splintr)
300 1.2 matt dmfc0 ta1, RMIXL_COP_0_EIRR # get active interrupts
301 1.2 matt # restrict to hard int bits:
302 1.2 matt and v1, ta1, RMIXL_SOFT_INT_MASK # v1 = ta1 & ~RMIXL_SOFT_INT_MASK
303 1.2 matt xor v1, ta1 # " "
304 1.2 matt
305 1.2 matt li v0, IPL_NONE
306 1.2 matt PTR_LA ta3, _C_LABEL(ipl_eimr_map) + 8*IPL_VM
307 1.2 matt REG_L ta2, -8(ta3) # load 'enabled' bits for IPL_SOFTSERIAL
308 1.2 matt and v1, ta2 # apply to pending bits
309 1.2 matt beq v1, zero, 4f # if nothing pending...
310 1.2 matt nop # ... return IPL_NONE
311 1.2 matt
312 1.2 matt li v0, IPL_VM # ipl=IPL_VM
313 1.2 matt 1:
314 1.2 matt REG_L ta2, (ta3) # load 'enabled' bits for ipl
315 1.2 matt and ta2, v1 # any match to pending intrs?
316 1.2 matt beq ta2, zero, 2f # no, return ipl
317 1.2 matt PTR_ADDI ta3, 1 << MAP_SCALESHIFT # point to next entry
318 1.2 matt addiu v0, 1 # ipl++
319 1.2 matt move v1, ta2 # update highest pending
320 1.2 matt b 1b # loop
321 1.2 matt nop
322 1.2 matt
323 1.2 matt 2:
324 1.2 matt /*
325 1.2 matt * Emulate the CP0_SR 'IM' bits in 'pending'
326 1.2 matt * - if clock intr is requested, set MIPS_INT_MASK_5
327 1.2 matt * - if other HW intr is requested, set MIPS_INT_MASK_1 as summary bit
328 1.2 matt * the RMI evbmips_iointr function will sort through
329 1.2 matt * individial EIRR requests
330 1.2 matt */
331 1.2 matt li t2, RMIXL_INT_MASK_5 # load RMIXL_INT_MASK_5
332 1.2 matt and t1, v1, t2 # save count/compare intr request value
333 1.2 matt nor t0, zero, t2 # invert the mask
334 1.2 matt and v1, t0 # v1 &= ~RMIXL_INT_MASK_5
335 1.2 matt beq v1, zero, 3f # no non-clock intrs? skip ahead
336 1.2 matt li v1, RMIXL_INT_MASK_1 # use INT_MASK_1 as 'summary' bit
337 1.2 matt # for non-clock hw intrs
338 1.2 matt 3:
339 1.2 matt or v1, t1 # combine clock and non-clock-summary
340 1.2 matt sll v1, MIPS_INT_MASK_SHIFT # shift to emulate COP0_SR 'IM' bits
341 1.2 matt 4:
342 1.2 matt INT_S v1, (a0) # set a (fake) new pending mask
343 1.2 matt j ra # and return highest ipl pending
344 1.2 matt nop
345 1.2 matt END(_splsw_splintr)
346 1.2 matt
347 1.2 matt STATIC_LEAF(_splsw_splcheck)
348 1.2 matt #ifdef PARANOIA
349 1.2 matt PTR_L t0, L_CPU(MIPS_CURLWP)
350 1.2 matt INT_L t1, CPU_INFO_CPL(t0) # get current priority level
351 1.2 matt
352 1.2 matt dmfc0 t0, RMIXL_COP_0_EIMR # get current EIMR
353 1.2 matt
354 1.2 matt PTR_LA t2, _C_LABEL(ipl_eimr_map)
355 1.2 matt sll t1, MAP_SCALESHIFT # shift cpl to array index
356 1.2 matt PTR_ADDU t2, t1
357 1.2 matt REG_L t3, (t2) # load value
358 1.2 matt 1: bne t0, t3, 1b # loop forever if not equal
359 1.2 matt nop
360 1.2 matt #endif /* PARANOIA */
361 1.2 matt j ra
362 1.2 matt nop
363 1.2 matt END(_splsw_splcheck)
364 1.2 matt
365 1.2 matt .rdata
366 1.2 matt .globl _C_LABEL(rmixl_splsw)
367 1.2 matt _C_LABEL(rmixl_splsw):
368 1.2 matt PTR_WORD _C_LABEL(_splsw_splhigh)
369 1.2 matt PTR_WORD _C_LABEL(_splsw_splsched)
370 1.2 matt PTR_WORD _C_LABEL(_splsw_splvm)
371 1.2 matt PTR_WORD _C_LABEL(_splsw_splsoftserial)
372 1.2 matt PTR_WORD _C_LABEL(_splsw_splsoftnet)
373 1.2 matt PTR_WORD _C_LABEL(_splsw_splsoftbio)
374 1.2 matt PTR_WORD _C_LABEL(_splsw_splsoftclock)
375 1.2 matt PTR_WORD _C_LABEL(_splsw_splraise)
376 1.2 matt PTR_WORD _C_LABEL(_splsw_spl0)
377 1.2 matt PTR_WORD _C_LABEL(_splsw_splx)
378 1.2 matt PTR_WORD _C_LABEL(_splsw_splhigh_noprof)
379 1.2 matt PTR_WORD _C_LABEL(_splsw_splx_noprof)
380 1.2 matt PTR_WORD _C_LABEL(_splsw_setsoftintr)
381 1.2 matt PTR_WORD _C_LABEL(_splsw_clrsoftintr)
382 1.2 matt PTR_WORD _C_LABEL(_splsw_splintr)
383 1.2 matt PTR_WORD _C_LABEL(_splsw_splcheck)
384