frame.h revision 1.20.8.1 1 1.20.8.1 matt /* $NetBSD: frame.h,v 1.20.8.1 2014/02/15 16:18:36 matt Exp $ */
2 1.1 reinoud
3 1.1 reinoud /*
4 1.1 reinoud * Copyright (c) 1994-1997 Mark Brinicombe.
5 1.1 reinoud * Copyright (c) 1994 Brini.
6 1.1 reinoud * All rights reserved.
7 1.1 reinoud *
8 1.1 reinoud * This code is derived from software written for Brini by Mark Brinicombe
9 1.1 reinoud *
10 1.1 reinoud * Redistribution and use in source and binary forms, with or without
11 1.1 reinoud * modification, are permitted provided that the following conditions
12 1.1 reinoud * are met:
13 1.1 reinoud * 1. Redistributions of source code must retain the above copyright
14 1.1 reinoud * notice, this list of conditions and the following disclaimer.
15 1.1 reinoud * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 reinoud * notice, this list of conditions and the following disclaimer in the
17 1.1 reinoud * documentation and/or other materials provided with the distribution.
18 1.1 reinoud * 3. All advertising materials mentioning features or use of this software
19 1.1 reinoud * must display the following acknowledgement:
20 1.1 reinoud * This product includes software developed by Brini.
21 1.1 reinoud * 4. The name of the company nor the name of the author may be used to
22 1.1 reinoud * endorse or promote products derived from this software without specific
23 1.1 reinoud * prior written permission.
24 1.1 reinoud *
25 1.1 reinoud * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 1.1 reinoud * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 1.1 reinoud * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 1.1 reinoud * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29 1.1 reinoud * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30 1.1 reinoud * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31 1.1 reinoud * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 1.1 reinoud * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 1.1 reinoud * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 1.1 reinoud * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 1.1 reinoud * SUCH DAMAGE.
36 1.1 reinoud *
37 1.1 reinoud * RiscBSD kernel project
38 1.1 reinoud *
39 1.1 reinoud * frame.h
40 1.1 reinoud *
41 1.1 reinoud * Stack frames structures
42 1.1 reinoud *
43 1.1 reinoud * Created : 30/09/94
44 1.1 reinoud */
45 1.1 reinoud
46 1.1 reinoud #ifndef _ARM32_FRAME_H_
47 1.1 reinoud #define _ARM32_FRAME_H_
48 1.1 reinoud
49 1.1 reinoud #include <arm/frame.h> /* Common ARM stack frames */
50 1.1 reinoud
51 1.1 reinoud #ifndef _LOCORE
52 1.1 reinoud
53 1.1 reinoud /*
54 1.16 skrll * Switch frame.
55 1.16 skrll *
56 1.16 skrll * Should be a multiple of 8 bytes for dumpsys.
57 1.1 reinoud */
58 1.1 reinoud
59 1.1 reinoud struct switchframe {
60 1.1 reinoud u_int sf_r4;
61 1.1 reinoud u_int sf_r5;
62 1.1 reinoud u_int sf_r6;
63 1.1 reinoud u_int sf_r7;
64 1.16 skrll u_int sf_sp;
65 1.5 bjh21 u_int sf_pc;
66 1.1 reinoud };
67 1.1 reinoud
68 1.1 reinoud /*
69 1.1 reinoud * Stack frame. Used during stack traces (db_trace.c)
70 1.1 reinoud */
71 1.1 reinoud struct frame {
72 1.1 reinoud u_int fr_fp;
73 1.1 reinoud u_int fr_sp;
74 1.1 reinoud u_int fr_lr;
75 1.1 reinoud u_int fr_pc;
76 1.1 reinoud };
77 1.1 reinoud
78 1.1 reinoud #ifdef _KERNEL
79 1.20.8.1 matt void validate_trapframe(trapframe_t *, int);
80 1.1 reinoud #endif /* _KERNEL */
81 1.1 reinoud
82 1.1 reinoud #else /* _LOCORE */
83 1.1 reinoud
84 1.7 scw #include "opt_compat_netbsd.h"
85 1.7 scw #include "opt_execfmt.h"
86 1.7 scw #include "opt_multiprocessor.h"
87 1.17 matt #include "opt_cpuoptions.h"
88 1.15 thorpej #include "opt_arm_debug.h"
89 1.20.8.1 matt #include "opt_cputypes.h"
90 1.7 scw
91 1.20.8.1 matt #include <arm/locore.h>
92 1.17 matt
93 1.7 scw /*
94 1.18 matt * This macro is used by DO_AST_AND_RESTORE_ALIGNMENT_FAULTS to process
95 1.18 matt * any pending softints.
96 1.18 matt */
97 1.20.8.1 matt #ifdef _ARM_ARCH_4T
98 1.20.8.1 matt #define B_CF_CONTROL(rX) ;\
99 1.20.8.1 matt ldr ip, [rX, #CF_CONTROL] /* get function addr */ ;\
100 1.20.8.1 matt bx ip /* branch to cpu_control */
101 1.20.8.1 matt #else
102 1.20.8.1 matt #define B_CF_CONTROL(rX) ;\
103 1.20.8.1 matt ldr pc, [rX, #CF_CONTROL] /* branch to cpu_control */
104 1.20.8.1 matt #endif
105 1.20.8.1 matt #ifdef _ARM_ARCH_5T
106 1.20.8.1 matt #define BL_CF_CONTROL(rX) ;\
107 1.20.8.1 matt ldr ip, [rX, #CF_CONTROL] /* get function addr */ ;\
108 1.20.8.1 matt blx ip /* call cpu_control */
109 1.20.8.1 matt #else
110 1.20.8.1 matt #define BL_CF_CONTROL(rX) ;\
111 1.20.8.1 matt mov lr, pc ;\
112 1.20.8.1 matt ldr pc, [rX, #CF_CONTROL] /* call cpu_control */
113 1.20.8.1 matt #endif
114 1.20.8.1 matt #if defined(__HAVE_FAST_SOFTINTS) && !defined(__HAVE_PIC_FAST_SOFTINTS)
115 1.18 matt #define DO_PENDING_SOFTINTS \
116 1.19 matt ldr r0, [r4, #CI_INTR_DEPTH]/* Get current intr depth */ ;\
117 1.20.8.1 matt cmp r0, #0 /* Test for 0. */ ;\
118 1.20.8.1 matt bne 10f /* skip softints if != 0 */ ;\
119 1.19 matt ldr r0, [r4, #CI_CPL] /* Get current priority level */;\
120 1.18 matt ldr r1, [r4, #CI_SOFTINTS] /* Get pending softint mask */ ;\
121 1.20.8.1 matt lsrs r0, r1, r0 /* shift mask by cpl */ ;\
122 1.20 matt blne _C_LABEL(dosoftints) /* dosoftints(void) */ ;\
123 1.18 matt 10:
124 1.18 matt #else
125 1.18 matt #define DO_PENDING_SOFTINTS /* nothing */
126 1.18 matt #endif
127 1.18 matt
128 1.20.8.1 matt #ifdef MULTIPROCESSOR
129 1.20.8.1 matt #define KERNEL_LOCK \
130 1.20.8.1 matt mov r0, #1 ;\
131 1.20.8.1 matt mov r1, #0 ;\
132 1.20.8.1 matt bl _C_LABEL(_kernel_lock)
133 1.20.8.1 matt
134 1.20.8.1 matt #define KERNEL_UNLOCK \
135 1.20.8.1 matt mov r0, #1 ;\
136 1.20.8.1 matt mov r1, #0 ;\
137 1.20.8.1 matt mov r2, #0 ;\
138 1.20.8.1 matt bl _C_LABEL(_kernel_unlock)
139 1.20.8.1 matt #else
140 1.20.8.1 matt #define KERNEL_LOCK /* nothing */
141 1.20.8.1 matt #define KERNEL_UNLOCK /* nothing */
142 1.20.8.1 matt #endif
143 1.20.8.1 matt
144 1.20.8.1 matt #ifdef _ARM_ARCH_6
145 1.20.8.1 matt #define GET_CPSR(rb) /* nothing */
146 1.20.8.1 matt #define CPSID_I(ra,rb) cpsid i
147 1.20.8.1 matt #define CPSIE_I(ra,rb) cpsie i
148 1.20.8.1 matt #else
149 1.20.8.1 matt #define GET_CPSR(rb) \
150 1.20.8.1 matt mrs rb, cpsr /* fetch CPSR */
151 1.20.8.1 matt
152 1.20.8.1 matt #define CPSID_I(ra,rb) \
153 1.20.8.1 matt orr ra, rb, #(IF32_bits) ;\
154 1.20.8.1 matt msr cpsr_c, ra /* Disable interrupts */
155 1.20.8.1 matt
156 1.20.8.1 matt #define CPSIE_I(ra,rb) \
157 1.20.8.1 matt bic ra, rb, #(IF32_bits) ;\
158 1.20.8.1 matt msr cpsr_c, ra /* Restore interrupts */
159 1.20.8.1 matt #endif
160 1.20.8.1 matt
161 1.18 matt /*
162 1.7 scw * AST_ALIGNMENT_FAULT_LOCALS and ENABLE_ALIGNMENT_FAULTS
163 1.7 scw * These are used in order to support dynamic enabling/disabling of
164 1.7 scw * alignment faults when executing old a.out ARM binaries.
165 1.17 matt *
166 1.17 matt * Note that when ENABLE_ALIGNMENTS_FAULTS finishes r4 will contain
167 1.17 matt * pointer to the cpu's cpu_info. DO_AST_AND_RESTORE_ALIGNMENT_FAULTS
168 1.17 matt * relies on r4 being preserved.
169 1.7 scw */
170 1.14 manu #ifdef EXEC_AOUT
171 1.17 matt #define AST_ALIGNMENT_FAULT_LOCALS \
172 1.17 matt .Laflt_cpufuncs: ;\
173 1.17 matt .word _C_LABEL(cpufuncs)
174 1.17 matt
175 1.7 scw /*
176 1.7 scw * This macro must be invoked following PUSHFRAMEINSVC or PUSHFRAME at
177 1.7 scw * the top of interrupt/exception handlers.
178 1.7 scw *
179 1.7 scw * When invoked, r0 *must* contain the value of SPSR on the current
180 1.7 scw * trap/interrupt frame. This is always the case if ENABLE_ALIGNMENT_FAULTS
181 1.7 scw * is invoked immediately after PUSHFRAMEINSVC or PUSHFRAME.
182 1.7 scw */
183 1.7 scw #define ENABLE_ALIGNMENT_FAULTS \
184 1.20.8.1 matt and r7, r0, #(PSR_MODE) /* Test for USR32 mode */ ;\
185 1.20.8.1 matt teq r7, #(PSR_USR32_MODE) ;\
186 1.17 matt GET_CURCPU(r4) /* r4 = cpuinfo */ ;\
187 1.7 scw bne 1f /* Not USR mode skip AFLT */ ;\
188 1.20.8.1 matt ldr r1, [r4, #CI_CURLWP] /* get curlwp from cpu_info */ ;\
189 1.20.8.1 matt ldr r1, [r1, #L_MD_FLAGS] /* Fetch l_md.md_flags */ ;\
190 1.20.8.1 matt tst r1, #MDLWP_NOALIGNFLT ;\
191 1.7 scw beq 1f /* AFLTs already enabled */ ;\
192 1.7 scw ldr r2, .Laflt_cpufuncs ;\
193 1.17 matt ldr r1, [r4, #CI_CTRL] /* Fetch control register */ ;\
194 1.7 scw mov r0, #-1 ;\
195 1.20.8.1 matt BL_CF_CONTROL(r2) /* Enable alignment faults */ ;\
196 1.20.8.1 matt 1: KERNEL_LOCK
197 1.7 scw
198 1.7 scw /*
199 1.7 scw * This macro must be invoked just before PULLFRAMEFROMSVCANDEXIT or
200 1.17 matt * PULLFRAME at the end of interrupt/exception handlers. We know that
201 1.17 matt * r4 points to cpu_info since that is what ENABLE_ALIGNMENT_FAULTS did
202 1.17 matt * for use.
203 1.7 scw */
204 1.7 scw #define DO_AST_AND_RESTORE_ALIGNMENT_FAULTS \
205 1.18 matt DO_PENDING_SOFTINTS ;\
206 1.20.8.1 matt GET_CPSR(r5) /* save CPSR */ ;\
207 1.20.8.1 matt CPSID_I(r1, r5) /* Disable interrupts */ ;\
208 1.20.8.1 matt teq r7, #(PSR_USR32_MODE) /* Returning to USR mode? */ ;\
209 1.7 scw bne 3f /* Nope, get out now */ ;\
210 1.18 matt 1: ldr r1, [r4, #CI_ASTPENDING] /* Pending AST? */ ;\
211 1.18 matt teq r1, #0x00000000 ;\
212 1.7 scw bne 2f /* Yup. Go deal with it */ ;\
213 1.20.8.1 matt ldr r1, [r4, #CI_CURLWP] /* get curlwp from cpu_info */ ;\
214 1.20.8.1 matt ldr r0, [r1, #L_MD_FLAGS] /* get md_flags from lwp */ ;\
215 1.20.8.1 matt tst r0, #MDLWP_NOALIGNFLT ;\
216 1.7 scw beq 3f /* Keep AFLTs enabled */ ;\
217 1.17 matt ldr r1, [r4, #CI_CTRL] /* Fetch control register */ ;\
218 1.7 scw ldr r2, .Laflt_cpufuncs ;\
219 1.7 scw mov r0, #-1 ;\
220 1.7 scw bic r1, r1, #CPU_CONTROL_AFLT_ENABLE /* Disable AFLTs */ ;\
221 1.7 scw adr lr, 3f ;\
222 1.20.8.1 matt B_CF_CONTROL(r2) /* Set new CTRL reg value */ ;\
223 1.17 matt /* NOTREACHED */ \
224 1.7 scw 2: mov r1, #0x00000000 ;\
225 1.17 matt str r1, [r4, #CI_ASTPENDING] /* Clear astpending */ ;\
226 1.20.8.1 matt CPSIE_I(r5, r5) /* Restore interrupts */ ;\
227 1.7 scw mov r0, sp ;\
228 1.11 scw bl _C_LABEL(ast) /* ast(frame) */ ;\
229 1.20.8.1 matt CPSID_I(r0, r5) /* Disable interrupts */ ;\
230 1.11 scw b 1b /* Back around again */ ;\
231 1.20.8.1 matt 3: KERNEL_UNLOCK
232 1.7 scw
233 1.14 manu #else /* !EXEC_AOUT */
234 1.7 scw
235 1.17 matt #define AST_ALIGNMENT_FAULT_LOCALS
236 1.17 matt
237 1.20.8.1 matt #define ENABLE_ALIGNMENT_FAULTS \
238 1.20.8.1 matt and r7, r0, #(PSR_MODE) /* Test for USR32 mode */ ;\
239 1.20.8.1 matt GET_CURCPU(r4) /* r4 = cpuinfo */ ;\
240 1.20.8.1 matt KERNEL_LOCK
241 1.7 scw
242 1.7 scw #define DO_AST_AND_RESTORE_ALIGNMENT_FAULTS \
243 1.18 matt DO_PENDING_SOFTINTS ;\
244 1.20.8.1 matt GET_CPSR(r5) /* save CPSR */ ;\
245 1.20.8.1 matt CPSID_I(r1, r5) /* Disable interrupts */ ;\
246 1.20.8.1 matt teq r7, #(PSR_USR32_MODE) ;\
247 1.7 scw bne 2f /* Nope, get out now */ ;\
248 1.17 matt 1: ldr r1, [r4, #CI_ASTPENDING] /* Pending AST? */ ;\
249 1.7 scw teq r1, #0x00000000 ;\
250 1.7 scw beq 2f /* Nope. Just bail */ ;\
251 1.17 matt mov r1, #0x00000000 ;\
252 1.17 matt str r1, [r4, #CI_ASTPENDING] /* Clear astpending */ ;\
253 1.20.8.1 matt CPSIE_I(r5, r5) /* Restore interrupts */ ;\
254 1.7 scw mov r0, sp ;\
255 1.11 scw bl _C_LABEL(ast) /* ast(frame) */ ;\
256 1.20.8.1 matt CPSID_I(r0, r5) /* Disable interrupts */ ;\
257 1.17 matt b 1b ;\
258 1.20.8.1 matt 2: KERNEL_UNLOCK /* unlock the kernel */
259 1.14 manu #endif /* EXEC_AOUT */
260 1.7 scw
261 1.20.8.1 matt #ifndef _ARM_ARCH_6
262 1.15 thorpej #ifdef ARM_LOCK_CAS_DEBUG
263 1.15 thorpej #define LOCK_CAS_DEBUG_LOCALS \
264 1.15 thorpej .L_lock_cas_restart: ;\
265 1.15 thorpej .word _C_LABEL(_lock_cas_restart)
266 1.15 thorpej
267 1.15 thorpej #if defined(__ARMEB__)
268 1.15 thorpej #define LOCK_CAS_DEBUG_COUNT_RESTART \
269 1.15 thorpej ble 99f ;\
270 1.15 thorpej ldr r0, .L_lock_cas_restart ;\
271 1.15 thorpej ldmia r0, {r1-r2} /* load ev_count */ ;\
272 1.15 thorpej adds r2, r2, #1 /* 64-bit incr (lo) */ ;\
273 1.15 thorpej adc r1, r1, #0 /* 64-bit incr (hi) */ ;\
274 1.15 thorpej stmia r0, {r1-r2} /* store ev_count */
275 1.15 thorpej #else /* __ARMEB__ */
276 1.15 thorpej #define LOCK_CAS_DEBUG_COUNT_RESTART \
277 1.15 thorpej ble 99f ;\
278 1.15 thorpej ldr r0, .L_lock_cas_restart ;\
279 1.15 thorpej ldmia r0, {r1-r2} /* load ev_count */ ;\
280 1.15 thorpej adds r1, r1, #1 /* 64-bit incr (lo) */ ;\
281 1.15 thorpej adc r2, r2, #0 /* 64-bit incr (hi) */ ;\
282 1.15 thorpej stmia r0, {r1-r2} /* store ev_count */
283 1.15 thorpej #endif /* __ARMEB__ */
284 1.15 thorpej #else /* ARM_LOCK_CAS_DEBUG */
285 1.15 thorpej #define LOCK_CAS_DEBUG_LOCALS /* nothing */
286 1.15 thorpej #define LOCK_CAS_DEBUG_COUNT_RESTART /* nothing */
287 1.15 thorpej #endif /* ARM_LOCK_CAS_DEBUG */
288 1.15 thorpej
289 1.15 thorpej #define LOCK_CAS_CHECK_LOCALS \
290 1.15 thorpej .L_lock_cas: ;\
291 1.15 thorpej .word _C_LABEL(_lock_cas) ;\
292 1.15 thorpej .L_lock_cas_end: ;\
293 1.15 thorpej .word _C_LABEL(_lock_cas_end) ;\
294 1.15 thorpej LOCK_CAS_DEBUG_LOCALS
295 1.15 thorpej
296 1.15 thorpej #define LOCK_CAS_CHECK \
297 1.15 thorpej ldr r0, [sp] /* get saved PSR */ ;\
298 1.15 thorpej and r0, r0, #(PSR_MODE) /* check for SVC32 mode */ ;\
299 1.15 thorpej teq r0, #(PSR_SVC32_MODE) ;\
300 1.15 thorpej bne 99f /* nope, get out now */ ;\
301 1.20.8.1 matt ldr r0, [sp, #(TF_PC)] ;\
302 1.15 thorpej ldr r1, .L_lock_cas_end ;\
303 1.15 thorpej cmp r0, r1 ;\
304 1.15 thorpej bge 99f ;\
305 1.15 thorpej ldr r1, .L_lock_cas ;\
306 1.15 thorpej cmp r0, r1 ;\
307 1.20.8.1 matt strgt r1, [sp, #(TF_PC)] ;\
308 1.15 thorpej LOCK_CAS_DEBUG_COUNT_RESTART ;\
309 1.15 thorpej 99:
310 1.15 thorpej
311 1.20.8.1 matt #else
312 1.20.8.1 matt #define LOCK_CAS_CHECK /* nothing */
313 1.20.8.1 matt #define LOCK_CAS_CHECK_LOCALS /* nothing */
314 1.20.8.1 matt #endif
315 1.20.8.1 matt
316 1.1 reinoud /*
317 1.1 reinoud * ASM macros for pushing and pulling trapframes from the stack
318 1.1 reinoud *
319 1.20.8.1 matt * These macros are used to handle the trapframe structure defined above.
320 1.1 reinoud */
321 1.1 reinoud
322 1.1 reinoud /*
323 1.1 reinoud * PUSHFRAME - macro to push a trap frame on the stack in the current mode
324 1.1 reinoud * Since the current mode is used, the SVC lr field is not defined.
325 1.20.8.1 matt */
326 1.20.8.1 matt
327 1.20.8.1 matt #ifdef CPU_SA110
328 1.20.8.1 matt /*
329 1.1 reinoud * NOTE: r13 and r14 are stored separately as a work around for the
330 1.1 reinoud * SA110 rev 2 STM^ bug
331 1.1 reinoud */
332 1.20.8.1 matt #define PUSHUSERREGS \
333 1.20.8.1 matt stmia sp, {r0-r12}; /* Push the user mode registers */ \
334 1.20.8.1 matt add r0, sp, #(TF_USR_SP-TF_R0); /* Adjust the stack pointer */ \
335 1.20.8.1 matt stmia r0, {r13-r14}^ /* Push the user mode registers */
336 1.20.8.1 matt #else
337 1.20.8.1 matt #define PUSHUSERREGS \
338 1.20.8.1 matt stmia sp, {r0-r14}^ /* Push the user mode registers */
339 1.20.8.1 matt #endif
340 1.1 reinoud
341 1.1 reinoud #define PUSHFRAME \
342 1.1 reinoud str lr, [sp, #-4]!; /* Push the return address */ \
343 1.20.8.1 matt sub sp, sp, #(TF_PC-TF_R0); /* Adjust the stack pointer */ \
344 1.20.8.1 matt PUSHUSERREGS; /* Push the user mode registers */ \
345 1.20.8.1 matt mov r0, r0; /* NOP for previous instruction */ \
346 1.20.8.1 matt mrs r0, spsr; /* Get the SPSR */ \
347 1.20.8.1 matt str r0, [sp, #-TF_R0]! /* Push the SPSR on the stack */
348 1.20.8.1 matt
349 1.20.8.1 matt /*
350 1.20.8.1 matt * Push a minimal trapframe so we can dispatch an interrupt from the
351 1.20.8.1 matt * idle loop. The only reason the idle loop wakes up is to dispatch
352 1.20.8.1 matt * interrupts so why take the avoid of a full exception when we can do
353 1.20.8.1 matt * something minimal.
354 1.20.8.1 matt */
355 1.20.8.1 matt #define PUSHIDLEFRAME \
356 1.20.8.1 matt str lr, [sp, #-4]!; /* save SVC32 lr */ \
357 1.20.8.1 matt str r6, [sp, #(TF_R6-TF_PC)]!; /* save callee-saved r6 */ \
358 1.20.8.1 matt str r4, [sp, #(TF_R4-TF_R6)]!; /* save callee-saved r4 */ \
359 1.20.8.1 matt mrs r0, cpsr; /* Get the CPSR */ \
360 1.20.8.1 matt str r0, [sp, #(-TF_R4)]! /* Push the CPSR on the stack */
361 1.20.8.1 matt
362 1.20.8.1 matt /*
363 1.20.8.1 matt * Push a trapframe to be used by cpu_switchto
364 1.20.8.1 matt */
365 1.20.8.1 matt #define PUSHSWITCHFRAME(rX) \
366 1.20.8.1 matt mov ip, sp; \
367 1.20.8.1 matt sub sp, sp, #(TRAPFRAMESIZE-TF_R12); /* Adjust the stack pointer */ \
368 1.20.8.1 matt push {r4-r11}; /* Push the callee saved registers */ \
369 1.20.8.1 matt sub sp, sp, #TF_R4; /* reserve rest of trapframe */ \
370 1.20.8.1 matt str ip, [sp, #TF_SVC_SP]; \
371 1.20.8.1 matt str lr, [sp, #TF_SVC_LR]; \
372 1.20.8.1 matt str lr, [sp, #TF_PC]; \
373 1.20.8.1 matt mrs rX, cpsr; /* Get the CPSR */ \
374 1.20.8.1 matt str rX, [sp, #TF_SPSR] /* save in trapframe */
375 1.20.8.1 matt
376 1.20.8.1 matt #define PUSHSWITCHFRAME1 \
377 1.20.8.1 matt mov ip, sp; \
378 1.20.8.1 matt sub sp, sp, #(TRAPFRAMESIZE-TF_R8); /* Adjust the stack pointer */ \
379 1.20.8.1 matt push {r4-r7}; /* Push some of the callee saved registers */ \
380 1.20.8.1 matt sub sp, sp, #TF_R4; /* reserve rest of trapframe */ \
381 1.20.8.1 matt str ip, [sp, #TF_SVC_SP]; \
382 1.20.8.1 matt str lr, [sp, #TF_SVC_LR]; \
383 1.20.8.1 matt str lr, [sp, #TF_PC]
384 1.20.8.1 matt
385 1.20.8.1 matt #if defined(_ARM_ARCH_DWORD_OK) && __ARM_EABI__
386 1.20.8.1 matt #define PUSHSWITCHFRAME2 \
387 1.20.8.1 matt strd r10, [sp, #TF_R10]; /* save r10 & r11 */ \
388 1.20.8.1 matt strd r8, [sp, #TF_R8]; /* save r8 & r9 */ \
389 1.20.8.1 matt mrs r0, cpsr; /* Get the CPSR */ \
390 1.20.8.1 matt str r0, [sp, #TF_SPSR] /* save in trapframe */
391 1.20.8.1 matt #else
392 1.20.8.1 matt #define PUSHSWITCHFRAME2 \
393 1.20.8.1 matt add r0, sp, #TF_R8; /* get ptr to r8 and above */ \
394 1.20.8.1 matt stmia r0, {r8-r11}; /* save rest of registers */ \
395 1.20.8.1 matt mrs r0, cpsr; /* Get the CPSR */ \
396 1.20.8.1 matt str r0, [sp, #TF_SPSR] /* save in trapframe */
397 1.20.8.1 matt #endif
398 1.1 reinoud
399 1.1 reinoud /*
400 1.1 reinoud * PULLFRAME - macro to pull a trap frame from the stack in the current mode
401 1.1 reinoud * Since the current mode is used, the SVC lr field is ignored.
402 1.1 reinoud */
403 1.1 reinoud
404 1.1 reinoud #define PULLFRAME \
405 1.20.8.1 matt ldr r0, [sp], #TF_R0; /* Pop the SPSR from stack */ \
406 1.20.8.1 matt msr spsr_fsxc, r0; \
407 1.20.8.1 matt ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \
408 1.20.8.1 matt mov r0, r0; /* NOP for previous instruction */ \
409 1.20.8.1 matt add sp, sp, #(TF_PC-TF_R0); /* Adjust the stack pointer */ \
410 1.20.8.1 matt ldr lr, [sp], #0x0004 /* Pop the return address */
411 1.20.8.1 matt
412 1.20.8.1 matt #define PULLIDLEFRAME \
413 1.20.8.1 matt add sp, sp, #TF_R4; /* Adjust the stack pointer */ \
414 1.20.8.1 matt ldr r4, [sp], #(TF_R6-TF_R4); /* restore callee-saved r4 */ \
415 1.20.8.1 matt ldr r6, [sp], #(TF_PC-TF_R6); /* restore callee-saved r6 */ \
416 1.20.8.1 matt ldr lr, [sp], #4 /* Pop the return address */
417 1.20.8.1 matt
418 1.20.8.1 matt /*
419 1.20.8.1 matt * Pop a trapframe to be used by cpu_switchto (don't touch r0 & r1).
420 1.20.8.1 matt */
421 1.20.8.1 matt #define PULLSWITCHFRAME \
422 1.20.8.1 matt add sp, sp, #TF_R4; /* Adjust the stack pointer */ \
423 1.20.8.1 matt pop {r4-r11}; /* pop the callee saved registers */ \
424 1.20.8.1 matt add sp, sp, #(TF_PC-TF_R12); /* Adjust the stack pointer */ \
425 1.20.8.1 matt ldr lr, [sp], #4; /* pop the return address */
426 1.1 reinoud
427 1.1 reinoud /*
428 1.1 reinoud * PUSHFRAMEINSVC - macro to push a trap frame on the stack in SVC32 mode
429 1.1 reinoud * This should only be used if the processor is not currently in SVC32
430 1.1 reinoud * mode. The processor mode is switched to SVC mode and the trap frame is
431 1.1 reinoud * stored. The SVC lr field is used to store the previous value of
432 1.1 reinoud * lr in SVC mode.
433 1.1 reinoud *
434 1.1 reinoud * NOTE: r13 and r14 are stored separately as a work around for the
435 1.1 reinoud * SA110 rev 2 STM^ bug
436 1.1 reinoud */
437 1.1 reinoud
438 1.20.8.1 matt #ifdef _ARM_ARCH_6
439 1.20.8.1 matt #define SET_CPSR_MODE(tmp, mode) \
440 1.20.8.1 matt cps #(mode)
441 1.20.8.1 matt #else
442 1.20.8.1 matt #define SET_CPSR_MODE(tmp, mode) \
443 1.20.8.1 matt mrs tmp, cpsr; /* Get the CPSR */ \
444 1.20.8.1 matt bic tmp, tmp, #(PSR_MODE); /* Fix for SVC mode */ \
445 1.20.8.1 matt orr tmp, tmp, #(mode); \
446 1.20.8.1 matt msr cpsr_c, tmp /* Punch into SVC mode */
447 1.20.8.1 matt #endif
448 1.20.8.1 matt
449 1.1 reinoud #define PUSHFRAMEINSVC \
450 1.1 reinoud stmdb sp, {r0-r3}; /* Save 4 registers */ \
451 1.1 reinoud mov r0, lr; /* Save xxx32 r14 */ \
452 1.1 reinoud mov r1, sp; /* Save xxx32 sp */ \
453 1.3 thorpej mrs r3, spsr; /* Save xxx32 spsr */ \
454 1.20.8.1 matt SET_CPSR_MODE(r2, PSR_SVC32_MODE); \
455 1.20.8.1 matt bic r2, sp, #7; /* Align new SVC sp */ \
456 1.20.8.1 matt str r0, [r2, #-4]!; /* Push return address */ \
457 1.20.8.1 matt stmdb r2!, {sp, lr}; /* Push SVC sp, lr */ \
458 1.20.8.1 matt mov sp, r2; /* Keep stack aligned */ \
459 1.20.8.1 matt msr spsr_fsxc, r3; /* Restore correct spsr */ \
460 1.1 reinoud ldmdb r1, {r0-r3}; /* Restore 4 regs from xxx mode */ \
461 1.20.8.1 matt sub sp, sp, #(TF_SVC_SP-TF_R0); /* Adjust the stack pointer */ \
462 1.20.8.1 matt PUSHUSERREGS; /* Push the user mode registers */ \
463 1.20.8.1 matt mov r0, r0; /* NOP for previous instruction */ \
464 1.20.8.1 matt mrs r0, spsr; /* Get the SPSR */ \
465 1.20.8.1 matt str r0, [sp, #-TF_R0]! /* Push the SPSR onto the stack */
466 1.1 reinoud
467 1.1 reinoud /*
468 1.1 reinoud * PULLFRAMEFROMSVCANDEXIT - macro to pull a trap frame from the stack
469 1.1 reinoud * in SVC32 mode and restore the saved processor mode and PC.
470 1.1 reinoud * This should be used when the SVC lr register needs to be restored on
471 1.1 reinoud * exit.
472 1.1 reinoud */
473 1.1 reinoud
474 1.1 reinoud #define PULLFRAMEFROMSVCANDEXIT \
475 1.20.8.1 matt ldr r0, [sp], #0x0008; /* Pop the SPSR from stack */ \
476 1.20.8.1 matt msr spsr_fsxc, r0; /* restore SPSR */ \
477 1.20.8.1 matt ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \
478 1.20.8.1 matt mov r0, r0; /* NOP for previous instruction */ \
479 1.20.8.1 matt add sp, sp, #(TF_SVC_SP-TF_R0); /* Adjust the stack pointer */ \
480 1.1 reinoud ldmia sp, {sp, lr, pc}^ /* Restore lr and exit */
481 1.1 reinoud
482 1.2 simonb #endif /* _LOCORE */
483 1.1 reinoud
484 1.1 reinoud #endif /* _ARM32_FRAME_H_ */
485