1 1.48 skrll /* $NetBSD: frame.h,v 1.48 2020/08/14 16:18:36 skrll Exp $ */ 2 1.1 reinoud 3 1.1 reinoud /* 4 1.1 reinoud * Copyright (c) 1994-1997 Mark Brinicombe. 5 1.1 reinoud * Copyright (c) 1994 Brini. 6 1.1 reinoud * All rights reserved. 7 1.1 reinoud * 8 1.1 reinoud * This code is derived from software written for Brini by Mark Brinicombe 9 1.1 reinoud * 10 1.1 reinoud * Redistribution and use in source and binary forms, with or without 11 1.1 reinoud * modification, are permitted provided that the following conditions 12 1.1 reinoud * are met: 13 1.1 reinoud * 1. Redistributions of source code must retain the above copyright 14 1.1 reinoud * notice, this list of conditions and the following disclaimer. 15 1.1 reinoud * 2. Redistributions in binary form must reproduce the above copyright 16 1.1 reinoud * notice, this list of conditions and the following disclaimer in the 17 1.1 reinoud * documentation and/or other materials provided with the distribution. 18 1.1 reinoud * 3. All advertising materials mentioning features or use of this software 19 1.1 reinoud * must display the following acknowledgement: 20 1.1 reinoud * This product includes software developed by Brini. 21 1.1 reinoud * 4. The name of the company nor the name of the author may be used to 22 1.1 reinoud * endorse or promote products derived from this software without specific 23 1.1 reinoud * prior written permission. 24 1.1 reinoud * 25 1.1 reinoud * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 1.1 reinoud * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 1.1 reinoud * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 28 1.1 reinoud * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 29 1.1 reinoud * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 30 1.1 reinoud * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 31 1.1 reinoud * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 1.1 reinoud * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 1.1 reinoud * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 1.1 reinoud * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 1.1 reinoud * SUCH DAMAGE. 36 1.1 reinoud * 37 1.1 reinoud * RiscBSD kernel project 38 1.1 reinoud * 39 1.1 reinoud * frame.h 40 1.1 reinoud * 41 1.1 reinoud * Stack frames structures 42 1.1 reinoud * 43 1.1 reinoud * Created : 30/09/94 44 1.1 reinoud */ 45 1.1 reinoud 46 1.1 reinoud #ifndef _ARM32_FRAME_H_ 47 1.1 reinoud #define _ARM32_FRAME_H_ 48 1.1 reinoud 49 1.1 reinoud #include <arm/frame.h> /* Common ARM stack frames */ 50 1.1 reinoud 51 1.1 reinoud #ifndef _LOCORE 52 1.1 reinoud 53 1.1 reinoud /* 54 1.16 skrll * Switch frame. 55 1.16 skrll * 56 1.16 skrll * Should be a multiple of 8 bytes for dumpsys. 57 1.1 reinoud */ 58 1.1 reinoud 59 1.1 reinoud struct switchframe { 60 1.1 reinoud u_int sf_r4; 61 1.1 reinoud u_int sf_r5; 62 1.1 reinoud u_int sf_r6; 63 1.1 reinoud u_int sf_r7; 64 1.16 skrll u_int sf_sp; 65 1.5 bjh21 u_int sf_pc; 66 1.1 reinoud }; 67 1.43 skrll 68 1.1 reinoud /* 69 1.36 matt * System stack frames. 70 1.36 matt */ 71 1.36 matt 72 1.36 matt struct clockframe { 73 1.36 matt struct trapframe cf_tf; 74 1.36 matt }; 75 1.36 matt 76 1.36 matt /* 77 1.1 reinoud * Stack frame. Used during stack traces (db_trace.c) 78 1.1 reinoud */ 79 1.1 reinoud struct frame { 80 1.1 reinoud u_int fr_fp; 81 1.1 reinoud u_int fr_sp; 82 1.1 reinoud u_int fr_lr; 83 1.1 reinoud u_int fr_pc; 84 1.1 reinoud }; 85 1.1 reinoud 86 1.1 reinoud #ifdef _KERNEL 87 1.21 matt void validate_trapframe(trapframe_t *, int); 88 1.1 reinoud #endif /* _KERNEL */ 89 1.1 reinoud 90 1.1 reinoud #else /* _LOCORE */ 91 1.1 reinoud 92 1.7 scw #include "opt_compat_netbsd.h" 93 1.7 scw #include "opt_execfmt.h" 94 1.7 scw #include "opt_multiprocessor.h" 95 1.17 matt #include "opt_cpuoptions.h" 96 1.15 thorpej #include "opt_arm_debug.h" 97 1.26 matt #include "opt_cputypes.h" 98 1.44 chs #include "opt_dtrace.h" 99 1.7 scw 100 1.36 matt #include <arm/locore.h> 101 1.17 matt 102 1.7 scw /* 103 1.18 matt * This macro is used by DO_AST_AND_RESTORE_ALIGNMENT_FAULTS to process 104 1.18 matt * any pending softints. 105 1.18 matt */ 106 1.36 matt #ifdef _ARM_ARCH_4T 107 1.36 matt #define B_CF_CONTROL(rX) ;\ 108 1.36 matt ldr ip, [rX, #CF_CONTROL] /* get function addr */ ;\ 109 1.36 matt bx ip /* branch to cpu_control */ 110 1.36 matt #else 111 1.36 matt #define B_CF_CONTROL(rX) ;\ 112 1.36 matt ldr pc, [rX, #CF_CONTROL] /* branch to cpu_control */ 113 1.36 matt #endif 114 1.36 matt #ifdef _ARM_ARCH_5T 115 1.36 matt #define BL_CF_CONTROL(rX) ;\ 116 1.36 matt ldr ip, [rX, #CF_CONTROL] /* get function addr */ ;\ 117 1.36 matt blx ip /* call cpu_control */ 118 1.36 matt #else 119 1.36 matt #define BL_CF_CONTROL(rX) ;\ 120 1.36 matt mov lr, pc ;\ 121 1.36 matt ldr pc, [rX, #CF_CONTROL] /* call cpu_control */ 122 1.36 matt #endif 123 1.33 matt #if defined(__HAVE_FAST_SOFTINTS) && !defined(__HAVE_PIC_FAST_SOFTINTS) 124 1.18 matt #define DO_PENDING_SOFTINTS \ 125 1.19 matt ldr r0, [r4, #CI_INTR_DEPTH]/* Get current intr depth */ ;\ 126 1.36 matt cmp r0, #0 /* Test for 0. */ ;\ 127 1.21 matt bne 10f /* skip softints if != 0 */ ;\ 128 1.19 matt ldr r0, [r4, #CI_CPL] /* Get current priority level */;\ 129 1.18 matt ldr r1, [r4, #CI_SOFTINTS] /* Get pending softint mask */ ;\ 130 1.33 matt lsrs r0, r1, r0 /* shift mask by cpl */ ;\ 131 1.20 matt blne _C_LABEL(dosoftints) /* dosoftints(void) */ ;\ 132 1.18 matt 10: 133 1.18 matt #else 134 1.18 matt #define DO_PENDING_SOFTINTS /* nothing */ 135 1.18 matt #endif 136 1.18 matt 137 1.33 matt #ifdef _ARM_ARCH_6 138 1.33 matt #define GET_CPSR(rb) /* nothing */ 139 1.33 matt #define CPSID_I(ra,rb) cpsid i 140 1.33 matt #define CPSIE_I(ra,rb) cpsie i 141 1.33 matt #else 142 1.33 matt #define GET_CPSR(rb) \ 143 1.33 matt mrs rb, cpsr /* fetch CPSR */ 144 1.33 matt 145 1.33 matt #define CPSID_I(ra,rb) \ 146 1.33 matt orr ra, rb, #(IF32_bits) ;\ 147 1.33 matt msr cpsr_c, ra /* Disable interrupts */ 148 1.33 matt 149 1.33 matt #define CPSIE_I(ra,rb) \ 150 1.33 matt bic ra, rb, #(IF32_bits) ;\ 151 1.33 matt msr cpsr_c, ra /* Restore interrupts */ 152 1.33 matt #endif 153 1.33 matt 154 1.42 matt #define DO_PENDING_AST(lbl) ;\ 155 1.48 skrll 1: ldr r1, [r5, #L_MD_ASTPENDING] /* Pending AST? */ ;\ 156 1.48 skrll tst r1, #1 ;\ 157 1.42 matt beq lbl /* Nope. Just bail */ ;\ 158 1.48 skrll bic r0, r1, #1 /* clear AST */ ;\ 159 1.48 skrll str r0, [r5, #L_MD_ASTPENDING] ;\ 160 1.48 skrll CPSIE_I(r6, r6) /* Restore interrupts */ ;\ 161 1.42 matt mov r0, sp ;\ 162 1.42 matt bl _C_LABEL(ast) /* ast(frame) */ ;\ 163 1.48 skrll CPSID_I(r0, r6) /* Disable interrupts */ ;\ 164 1.42 matt b 1b /* test again */ 165 1.42 matt 166 1.18 matt /* 167 1.7 scw * AST_ALIGNMENT_FAULT_LOCALS and ENABLE_ALIGNMENT_FAULTS 168 1.7 scw * These are used in order to support dynamic enabling/disabling of 169 1.7 scw * alignment faults when executing old a.out ARM binaries. 170 1.17 matt * 171 1.17 matt * Note that when ENABLE_ALIGNMENTS_FAULTS finishes r4 will contain 172 1.48 skrll * curcpu() and r5 containing curlwp. DO_AST_AND_RESTORE_ALIGNMENT_FAULTS 173 1.48 skrll * relies on r4 and r5 being preserved. 174 1.7 scw */ 175 1.14 manu #ifdef EXEC_AOUT 176 1.17 matt #define AST_ALIGNMENT_FAULT_LOCALS \ 177 1.17 matt .Laflt_cpufuncs: ;\ 178 1.17 matt .word _C_LABEL(cpufuncs) 179 1.17 matt 180 1.7 scw /* 181 1.7 scw * This macro must be invoked following PUSHFRAMEINSVC or PUSHFRAME at 182 1.7 scw * the top of interrupt/exception handlers. 183 1.7 scw * 184 1.7 scw * When invoked, r0 *must* contain the value of SPSR on the current 185 1.7 scw * trap/interrupt frame. This is always the case if ENABLE_ALIGNMENT_FAULTS 186 1.7 scw * is invoked immediately after PUSHFRAMEINSVC or PUSHFRAME. 187 1.7 scw */ 188 1.7 scw #define ENABLE_ALIGNMENT_FAULTS \ 189 1.33 matt and r7, r0, #(PSR_MODE) /* Test for USR32 mode */ ;\ 190 1.40 matt cmp r7, #(PSR_USR32_MODE) ;\ 191 1.48 skrll GET_CURX(r4, r5) /* r4 = curcpu, r5 = curlwp */ ;\ 192 1.7 scw bne 1f /* Not USR mode skip AFLT */ ;\ 193 1.48 skrll ldr r1, [r5, #L_MD_FLAGS] /* Fetch l_md.md_flags */ ;\ 194 1.31 matt tst r1, #MDLWP_NOALIGNFLT ;\ 195 1.7 scw beq 1f /* AFLTs already enabled */ ;\ 196 1.7 scw ldr r2, .Laflt_cpufuncs ;\ 197 1.17 matt ldr r1, [r4, #CI_CTRL] /* Fetch control register */ ;\ 198 1.7 scw mov r0, #-1 ;\ 199 1.36 matt BL_CF_CONTROL(r2) /* Enable alignment faults */ ;\ 200 1.39 matt 1: /* done */ 201 1.7 scw 202 1.7 scw /* 203 1.7 scw * This macro must be invoked just before PULLFRAMEFROMSVCANDEXIT or 204 1.17 matt * PULLFRAME at the end of interrupt/exception handlers. We know that 205 1.48 skrll * r4 points to curcpu() and r5 points to curlwp since that is what 206 1.48 skrll * ENABLE_ALIGNMENT_FAULTS did for us. 207 1.7 scw */ 208 1.7 scw #define DO_AST_AND_RESTORE_ALIGNMENT_FAULTS \ 209 1.18 matt DO_PENDING_SOFTINTS ;\ 210 1.48 skrll GET_CPSR(r6) /* save CPSR */ ;\ 211 1.48 skrll CPSID_I(r1, r6) /* Disable interrupts */ ;\ 212 1.40 matt cmp r7, #(PSR_USR32_MODE) /* Returning to USR mode? */ ;\ 213 1.7 scw bne 3f /* Nope, get out now */ ;\ 214 1.42 matt DO_PENDING_AST(2f) /* Pending AST? */ ;\ 215 1.42 matt 2: ldr r1, [r4, #CI_CURLWP] /* get curlwp from cpu_info */ ;\ 216 1.31 matt ldr r0, [r1, #L_MD_FLAGS] /* get md_flags from lwp */ ;\ 217 1.31 matt tst r0, #MDLWP_NOALIGNFLT ;\ 218 1.7 scw beq 3f /* Keep AFLTs enabled */ ;\ 219 1.17 matt ldr r1, [r4, #CI_CTRL] /* Fetch control register */ ;\ 220 1.7 scw ldr r2, .Laflt_cpufuncs ;\ 221 1.7 scw mov r0, #-1 ;\ 222 1.7 scw bic r1, r1, #CPU_CONTROL_AFLT_ENABLE /* Disable AFLTs */ ;\ 223 1.42 matt BL_CF_CONTROL(r2) /* Set new CTRL reg value */ ;\ 224 1.39 matt 3: /* done */ 225 1.7 scw 226 1.14 manu #else /* !EXEC_AOUT */ 227 1.7 scw 228 1.17 matt #define AST_ALIGNMENT_FAULT_LOCALS 229 1.17 matt 230 1.33 matt #define ENABLE_ALIGNMENT_FAULTS \ 231 1.33 matt and r7, r0, #(PSR_MODE) /* Test for USR32 mode */ ;\ 232 1.48 skrll GET_CURX(r4, r5) /* r4 = curcpu, r5 = curlwp */ 233 1.43 skrll 234 1.7 scw 235 1.7 scw #define DO_AST_AND_RESTORE_ALIGNMENT_FAULTS \ 236 1.18 matt DO_PENDING_SOFTINTS ;\ 237 1.48 skrll GET_CPSR(r6) /* save CPSR */ ;\ 238 1.48 skrll CPSID_I(r1, r6) /* Disable interrupts */ ;\ 239 1.40 matt cmp r7, #(PSR_USR32_MODE) ;\ 240 1.7 scw bne 2f /* Nope, get out now */ ;\ 241 1.42 matt DO_PENDING_AST(2f) /* Pending AST? */ ;\ 242 1.39 matt 2: /* done */ 243 1.14 manu #endif /* EXEC_AOUT */ 244 1.7 scw 245 1.31 matt #ifndef _ARM_ARCH_6 246 1.15 thorpej #ifdef ARM_LOCK_CAS_DEBUG 247 1.15 thorpej #define LOCK_CAS_DEBUG_LOCALS \ 248 1.15 thorpej .L_lock_cas_restart: ;\ 249 1.15 thorpej .word _C_LABEL(_lock_cas_restart) 250 1.15 thorpej 251 1.15 thorpej #if defined(__ARMEB__) 252 1.15 thorpej #define LOCK_CAS_DEBUG_COUNT_RESTART \ 253 1.15 thorpej ble 99f ;\ 254 1.15 thorpej ldr r0, .L_lock_cas_restart ;\ 255 1.15 thorpej ldmia r0, {r1-r2} /* load ev_count */ ;\ 256 1.15 thorpej adds r2, r2, #1 /* 64-bit incr (lo) */ ;\ 257 1.15 thorpej adc r1, r1, #0 /* 64-bit incr (hi) */ ;\ 258 1.15 thorpej stmia r0, {r1-r2} /* store ev_count */ 259 1.15 thorpej #else /* __ARMEB__ */ 260 1.15 thorpej #define LOCK_CAS_DEBUG_COUNT_RESTART \ 261 1.15 thorpej ble 99f ;\ 262 1.15 thorpej ldr r0, .L_lock_cas_restart ;\ 263 1.15 thorpej ldmia r0, {r1-r2} /* load ev_count */ ;\ 264 1.15 thorpej adds r1, r1, #1 /* 64-bit incr (lo) */ ;\ 265 1.15 thorpej adc r2, r2, #0 /* 64-bit incr (hi) */ ;\ 266 1.15 thorpej stmia r0, {r1-r2} /* store ev_count */ 267 1.15 thorpej #endif /* __ARMEB__ */ 268 1.15 thorpej #else /* ARM_LOCK_CAS_DEBUG */ 269 1.15 thorpej #define LOCK_CAS_DEBUG_LOCALS /* nothing */ 270 1.15 thorpej #define LOCK_CAS_DEBUG_COUNT_RESTART /* nothing */ 271 1.15 thorpej #endif /* ARM_LOCK_CAS_DEBUG */ 272 1.15 thorpej 273 1.15 thorpej #define LOCK_CAS_CHECK_LOCALS \ 274 1.15 thorpej .L_lock_cas: ;\ 275 1.15 thorpej .word _C_LABEL(_lock_cas) ;\ 276 1.15 thorpej .L_lock_cas_end: ;\ 277 1.15 thorpej .word _C_LABEL(_lock_cas_end) ;\ 278 1.15 thorpej LOCK_CAS_DEBUG_LOCALS 279 1.15 thorpej 280 1.15 thorpej #define LOCK_CAS_CHECK \ 281 1.15 thorpej ldr r0, [sp] /* get saved PSR */ ;\ 282 1.15 thorpej and r0, r0, #(PSR_MODE) /* check for SVC32 mode */ ;\ 283 1.40 matt cmp r0, #(PSR_SVC32_MODE) ;\ 284 1.15 thorpej bne 99f /* nope, get out now */ ;\ 285 1.30 skrll ldr r0, [sp, #(TF_PC)] ;\ 286 1.15 thorpej ldr r1, .L_lock_cas_end ;\ 287 1.15 thorpej cmp r0, r1 ;\ 288 1.15 thorpej bge 99f ;\ 289 1.15 thorpej ldr r1, .L_lock_cas ;\ 290 1.15 thorpej cmp r0, r1 ;\ 291 1.30 skrll strgt r1, [sp, #(TF_PC)] ;\ 292 1.15 thorpej LOCK_CAS_DEBUG_COUNT_RESTART ;\ 293 1.15 thorpej 99: 294 1.15 thorpej 295 1.31 matt #else 296 1.31 matt #define LOCK_CAS_CHECK /* nothing */ 297 1.31 matt #define LOCK_CAS_CHECK_LOCALS /* nothing */ 298 1.31 matt #endif 299 1.31 matt 300 1.1 reinoud /* 301 1.1 reinoud * ASM macros for pushing and pulling trapframes from the stack 302 1.1 reinoud * 303 1.30 skrll * These macros are used to handle the trapframe structure defined above. 304 1.1 reinoud */ 305 1.1 reinoud 306 1.1 reinoud /* 307 1.1 reinoud * PUSHFRAME - macro to push a trap frame on the stack in the current mode 308 1.1 reinoud * Since the current mode is used, the SVC lr field is not defined. 309 1.26 matt */ 310 1.26 matt 311 1.26 matt #ifdef CPU_SA110 312 1.26 matt /* 313 1.1 reinoud * NOTE: r13 and r14 are stored separately as a work around for the 314 1.1 reinoud * SA110 rev 2 STM^ bug 315 1.1 reinoud */ 316 1.26 matt #define PUSHUSERREGS \ 317 1.26 matt stmia sp, {r0-r12}; /* Push the user mode registers */ \ 318 1.35 matt add r0, sp, #(TF_USR_SP-TF_R0); /* Adjust the stack pointer */ \ 319 1.26 matt stmia r0, {r13-r14}^ /* Push the user mode registers */ 320 1.26 matt #else 321 1.26 matt #define PUSHUSERREGS \ 322 1.26 matt stmia sp, {r0-r14}^ /* Push the user mode registers */ 323 1.26 matt #endif 324 1.1 reinoud 325 1.1 reinoud #define PUSHFRAME \ 326 1.1 reinoud str lr, [sp, #-4]!; /* Push the return address */ \ 327 1.35 matt sub sp, sp, #(TF_PC-TF_R0); /* Adjust the stack pointer */ \ 328 1.26 matt PUSHUSERREGS; /* Push the user mode registers */ \ 329 1.24 matt mov r0, r0; /* NOP for previous instruction */ \ 330 1.37 joerg mrs r0, spsr; /* Get the SPSR */ \ 331 1.35 matt str r0, [sp, #-TF_R0]! /* Push the SPSR on the stack */ 332 1.1 reinoud 333 1.1 reinoud /* 334 1.34 matt * Push a minimal trapframe so we can dispatch an interrupt from the 335 1.34 matt * idle loop. The only reason the idle loop wakes up is to dispatch 336 1.34 matt * interrupts so why take the avoid of a full exception when we can do 337 1.34 matt * something minimal. 338 1.34 matt */ 339 1.34 matt #define PUSHIDLEFRAME \ 340 1.34 matt str lr, [sp, #-4]!; /* save SVC32 lr */ \ 341 1.34 matt str r6, [sp, #(TF_R6-TF_PC)]!; /* save callee-saved r6 */ \ 342 1.35 matt str r4, [sp, #(TF_R4-TF_R6)]!; /* save callee-saved r4 */ \ 343 1.37 joerg mrs r0, cpsr; /* Get the CPSR */ \ 344 1.34 matt str r0, [sp, #(-TF_R4)]! /* Push the CPSR on the stack */ 345 1.34 matt 346 1.34 matt /* 347 1.36 matt * Push a trapframe to be used by cpu_switchto 348 1.36 matt */ 349 1.36 matt #define PUSHSWITCHFRAME(rX) \ 350 1.36 matt mov ip, sp; \ 351 1.36 matt sub sp, sp, #(TRAPFRAMESIZE-TF_R12); /* Adjust the stack pointer */ \ 352 1.36 matt push {r4-r11}; /* Push the callee saved registers */ \ 353 1.36 matt sub sp, sp, #TF_R4; /* reserve rest of trapframe */ \ 354 1.36 matt str ip, [sp, #TF_SVC_SP]; \ 355 1.36 matt str lr, [sp, #TF_SVC_LR]; \ 356 1.36 matt str lr, [sp, #TF_PC]; \ 357 1.37 joerg mrs rX, cpsr; /* Get the CPSR */ \ 358 1.36 matt str rX, [sp, #TF_SPSR] /* save in trapframe */ 359 1.36 matt 360 1.36 matt #define PUSHSWITCHFRAME1 \ 361 1.36 matt mov ip, sp; \ 362 1.36 matt sub sp, sp, #(TRAPFRAMESIZE-TF_R8); /* Adjust the stack pointer */ \ 363 1.36 matt push {r4-r7}; /* Push some of the callee saved registers */ \ 364 1.36 matt sub sp, sp, #TF_R4; /* reserve rest of trapframe */ \ 365 1.36 matt str ip, [sp, #TF_SVC_SP]; \ 366 1.36 matt str lr, [sp, #TF_SVC_LR]; \ 367 1.36 matt str lr, [sp, #TF_PC] 368 1.36 matt 369 1.36 matt #if defined(_ARM_ARCH_DWORD_OK) && __ARM_EABI__ 370 1.36 matt #define PUSHSWITCHFRAME2 \ 371 1.36 matt strd r10, [sp, #TF_R10]; /* save r10 & r11 */ \ 372 1.36 matt strd r8, [sp, #TF_R8]; /* save r8 & r9 */ \ 373 1.37 joerg mrs r0, cpsr; /* Get the CPSR */ \ 374 1.36 matt str r0, [sp, #TF_SPSR] /* save in trapframe */ 375 1.36 matt #else 376 1.36 matt #define PUSHSWITCHFRAME2 \ 377 1.36 matt add r0, sp, #TF_R8; /* get ptr to r8 and above */ \ 378 1.36 matt stmia r0, {r8-r11}; /* save rest of registers */ \ 379 1.37 joerg mrs r0, cpsr; /* Get the CPSR */ \ 380 1.36 matt str r0, [sp, #TF_SPSR] /* save in trapframe */ 381 1.36 matt #endif 382 1.36 matt 383 1.36 matt /* 384 1.1 reinoud * PULLFRAME - macro to pull a trap frame from the stack in the current mode 385 1.1 reinoud * Since the current mode is used, the SVC lr field is ignored. 386 1.1 reinoud */ 387 1.1 reinoud 388 1.1 reinoud #define PULLFRAME \ 389 1.35 matt ldr r0, [sp], #TF_R0; /* Pop the SPSR from stack */ \ 390 1.38 matt msr spsr_fsxc, r0; \ 391 1.24 matt ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \ 392 1.24 matt mov r0, r0; /* NOP for previous instruction */ \ 393 1.35 matt add sp, sp, #(TF_PC-TF_R0); /* Adjust the stack pointer */ \ 394 1.47 skrll ldr lr, [sp], #4 /* Pop the return address */ 395 1.1 reinoud 396 1.34 matt #define PULLIDLEFRAME \ 397 1.34 matt add sp, sp, #TF_R4; /* Adjust the stack pointer */ \ 398 1.34 matt ldr r4, [sp], #(TF_R6-TF_R4); /* restore callee-saved r4 */ \ 399 1.34 matt ldr r6, [sp], #(TF_PC-TF_R6); /* restore callee-saved r6 */ \ 400 1.34 matt ldr lr, [sp], #4 /* Pop the return address */ 401 1.34 matt 402 1.1 reinoud /* 403 1.36 matt * Pop a trapframe to be used by cpu_switchto (don't touch r0 & r1). 404 1.36 matt */ 405 1.36 matt #define PULLSWITCHFRAME \ 406 1.36 matt add sp, sp, #TF_R4; /* Adjust the stack pointer */ \ 407 1.36 matt pop {r4-r11}; /* pop the callee saved registers */ \ 408 1.36 matt add sp, sp, #(TF_PC-TF_R12); /* Adjust the stack pointer */ \ 409 1.36 matt ldr lr, [sp], #4; /* pop the return address */ 410 1.36 matt 411 1.36 matt /* 412 1.1 reinoud * PUSHFRAMEINSVC - macro to push a trap frame on the stack in SVC32 mode 413 1.1 reinoud * This should only be used if the processor is not currently in SVC32 414 1.1 reinoud * mode. The processor mode is switched to SVC mode and the trap frame is 415 1.1 reinoud * stored. The SVC lr field is used to store the previous value of 416 1.43 skrll * lr in SVC mode. 417 1.1 reinoud * 418 1.1 reinoud * NOTE: r13 and r14 are stored separately as a work around for the 419 1.1 reinoud * SA110 rev 2 STM^ bug 420 1.1 reinoud */ 421 1.1 reinoud 422 1.25 matt #ifdef _ARM_ARCH_6 423 1.25 matt #define SET_CPSR_MODE(tmp, mode) \ 424 1.25 matt cps #(mode) 425 1.25 matt #else 426 1.25 matt #define SET_CPSR_MODE(tmp, mode) \ 427 1.25 matt mrs tmp, cpsr; /* Get the CPSR */ \ 428 1.25 matt bic tmp, tmp, #(PSR_MODE); /* Fix for SVC mode */ \ 429 1.25 matt orr tmp, tmp, #(mode); \ 430 1.25 matt msr cpsr_c, tmp /* Punch into SVC mode */ 431 1.25 matt #endif 432 1.25 matt 433 1.44 chs #define PUSHXXXREGSANDSWITCH \ 434 1.1 reinoud stmdb sp, {r0-r3}; /* Save 4 registers */ \ 435 1.1 reinoud mov r0, lr; /* Save xxx32 r14 */ \ 436 1.1 reinoud mov r1, sp; /* Save xxx32 sp */ \ 437 1.3 thorpej mrs r3, spsr; /* Save xxx32 spsr */ \ 438 1.44 chs SET_CPSR_MODE(r2, PSR_SVC32_MODE) 439 1.44 chs 440 1.44 chs #ifdef KDTRACE_HOOKS 441 1.44 chs #define PUSHDTRACEGAP \ 442 1.44 chs and r2, r3, #(PSR_MODE); \ 443 1.44 chs cmp r2, #(PSR_SVC32_MODE); /* were we in SVC mode? */ \ 444 1.44 chs mov r2, sp; \ 445 1.44 chs subeq r2, r2, #(4 * 16); /* if so, leave a gap for dtrace */ 446 1.44 chs #else 447 1.45 skrll #define PUSHDTRACEGAP \ 448 1.45 skrll mov r2, sp 449 1.44 chs #endif 450 1.44 chs 451 1.44 chs #define PUSHTRAPFRAME(rX) \ 452 1.44 chs bic r2, rX, #7; /* Align new SVC sp */ \ 453 1.28 matt str r0, [r2, #-4]!; /* Push return address */ \ 454 1.28 matt stmdb r2!, {sp, lr}; /* Push SVC sp, lr */ \ 455 1.29 matt mov sp, r2; /* Keep stack aligned */ \ 456 1.38 matt msr spsr_fsxc, r3; /* Restore correct spsr */ \ 457 1.1 reinoud ldmdb r1, {r0-r3}; /* Restore 4 regs from xxx mode */ \ 458 1.35 matt sub sp, sp, #(TF_SVC_SP-TF_R0); /* Adjust the stack pointer */ \ 459 1.26 matt PUSHUSERREGS; /* Push the user mode registers */ \ 460 1.24 matt mov r0, r0; /* NOP for previous instruction */ \ 461 1.37 joerg mrs r0, spsr; /* Get the SPSR */ \ 462 1.35 matt str r0, [sp, #-TF_R0]! /* Push the SPSR onto the stack */ 463 1.1 reinoud 464 1.44 chs #define PUSHFRAMEINSVC \ 465 1.44 chs PUSHXXXREGSANDSWITCH; \ 466 1.44 chs PUSHTRAPFRAME(sp) 467 1.44 chs 468 1.1 reinoud /* 469 1.1 reinoud * PULLFRAMEFROMSVCANDEXIT - macro to pull a trap frame from the stack 470 1.1 reinoud * in SVC32 mode and restore the saved processor mode and PC. 471 1.1 reinoud * This should be used when the SVC lr register needs to be restored on 472 1.1 reinoud * exit. 473 1.1 reinoud */ 474 1.1 reinoud 475 1.1 reinoud #define PULLFRAMEFROMSVCANDEXIT \ 476 1.46 skrll ldr r0, [sp], #TF_R0; /* Pop the SPSR from stack */ \ 477 1.38 matt msr spsr_fsxc, r0; /* restore SPSR */ \ 478 1.24 matt ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \ 479 1.24 matt mov r0, r0; /* NOP for previous instruction */ \ 480 1.35 matt add sp, sp, #(TF_SVC_SP-TF_R0); /* Adjust the stack pointer */ \ 481 1.1 reinoud ldmia sp, {sp, lr, pc}^ /* Restore lr and exit */ 482 1.1 reinoud 483 1.2 simonb #endif /* _LOCORE */ 484 1.1 reinoud 485 1.1 reinoud #endif /* _ARM32_FRAME_H_ */ 486