1 1.18 skrll /* $NetBSD: profile.h,v 1.18 2018/01/24 09:04:45 skrll Exp $ */ 2 1.1 reinoud 3 1.1 reinoud /* 4 1.2 bjh21 * Copyright (c) 2001 Ben Harris 5 1.1 reinoud * Copyright (c) 1995-1996 Mark Brinicombe 6 1.1 reinoud * 7 1.1 reinoud * Redistribution and use in source and binary forms, with or without 8 1.1 reinoud * modification, are permitted provided that the following conditions 9 1.1 reinoud * are met: 10 1.1 reinoud * 1. Redistributions of source code must retain the above copyright 11 1.1 reinoud * notice, this list of conditions and the following disclaimer. 12 1.1 reinoud * 2. Redistributions in binary form must reproduce the above copyright 13 1.1 reinoud * notice, this list of conditions and the following disclaimer in the 14 1.1 reinoud * documentation and/or other materials provided with the distribution. 15 1.1 reinoud * 3. All advertising materials mentioning features or use of this software 16 1.1 reinoud * must display the following acknowledgement: 17 1.1 reinoud * This product includes software developed by Mark Brinicombe. 18 1.1 reinoud * 4. The name of the author may not be used to endorse or promote products 19 1.1 reinoud * derived from this software without specific prior written permission. 20 1.1 reinoud * 21 1.1 reinoud * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 22 1.1 reinoud * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 23 1.1 reinoud * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 24 1.1 reinoud * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 25 1.1 reinoud * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 26 1.1 reinoud * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 1.1 reinoud * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 1.1 reinoud * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 1.1 reinoud * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 30 1.1 reinoud * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 1.1 reinoud */ 32 1.1 reinoud 33 1.1 reinoud #define _MCOUNT_DECL void _mcount 34 1.1 reinoud 35 1.1 reinoud /* 36 1.1 reinoud * Cannot implement mcount in C as GCC will trash the ip register when it 37 1.1 reinoud * pushes a trapframe. Pity we cannot insert assembly before the function 38 1.1 reinoud * prologue. 39 1.1 reinoud */ 40 1.2 bjh21 41 1.2 bjh21 #define MCOUNT_ASM_NAME "__mcount" 42 1.3 matt #define PLTSYM 43 1.3 matt 44 1.11 matt #if !defined(__ARM_EABI__) 45 1.1 reinoud #define MCOUNT \ 46 1.9 matt __asm(".text"); \ 47 1.7 perry __asm(".align 0"); \ 48 1.11 matt __asm(".arm"); \ 49 1.7 perry __asm(".type " MCOUNT_ASM_NAME ",%function"); \ 50 1.9 matt __asm(".global " MCOUNT_ASM_NAME); \ 51 1.7 perry __asm(MCOUNT_ASM_NAME ":"); \ 52 1.1 reinoud /* \ 53 1.1 reinoud * Preserve registers that are trashed during mcount \ 54 1.1 reinoud */ \ 55 1.9 matt __asm("push {r0-r3, ip, lr}"); \ 56 1.2 bjh21 /* Check what mode we're in. EQ => 32, NE => 26 */ \ 57 1.7 perry __asm("teq r0, r0"); \ 58 1.7 perry __asm("teq pc, r15"); \ 59 1.1 reinoud /* \ 60 1.1 reinoud * find the return address for mcount, \ 61 1.1 reinoud * and the return address for mcount's caller. \ 62 1.1 reinoud * \ 63 1.1 reinoud * frompcindex = pc pushed by call into self. \ 64 1.1 reinoud */ \ 65 1.7 perry __asm("moveq r0, ip"); \ 66 1.7 perry __asm("bicne r0, ip, #0xfc000003"); \ 67 1.1 reinoud /* \ 68 1.1 reinoud * selfpc = pc pushed by mcount call \ 69 1.1 reinoud */ \ 70 1.7 perry __asm("moveq r1, lr"); \ 71 1.7 perry __asm("bicne r1, lr, #0xfc000003"); \ 72 1.1 reinoud /* \ 73 1.1 reinoud * Call the real mcount code \ 74 1.1 reinoud */ \ 75 1.7 perry __asm("bl " ___STRING(_C_LABEL(_mcount)) PLTSYM); \ 76 1.1 reinoud /* \ 77 1.1 reinoud * Restore registers that were trashed during mcount \ 78 1.1 reinoud */ \ 79 1.17 joerg __asm("pop {r0-r3, lr}"); \ 80 1.17 joerg __asm("pop {pc}"); \ 81 1.9 matt __asm(".size " MCOUNT_ASM_NAME ", .-" MCOUNT_ASM_NAME); 82 1.14 joerg #elif defined(__ARM_DWARF_EH__) 83 1.14 joerg #define MCOUNT \ 84 1.14 joerg __asm(".text"); \ 85 1.14 joerg __asm(".align 0"); \ 86 1.14 joerg __asm(".arm"); \ 87 1.14 joerg __asm(".type " MCOUNT_ASM_NAME ",%function"); \ 88 1.14 joerg __asm(".global " MCOUNT_ASM_NAME); \ 89 1.14 joerg __asm(MCOUNT_ASM_NAME ":"); \ 90 1.14 joerg __asm(".cfi_startproc"); \ 91 1.14 joerg /* \ 92 1.14 joerg * Preserve registers that are trashed during mcount \ 93 1.14 joerg */ \ 94 1.15 matt __asm("push {r0-r3, ip, lr}"); \ 95 1.14 joerg __asm(".cfi_def_cfa_offset 24"); \ 96 1.14 joerg __asm(".cfi_offset 14, -4"); \ 97 1.15 matt __asm(".cfi_offset 12, -8"); \ 98 1.14 joerg __asm(".cfi_offset 3, -12"); \ 99 1.14 joerg __asm(".cfi_offset 2, -16"); \ 100 1.14 joerg __asm(".cfi_offset 1, -20"); \ 101 1.14 joerg __asm(".cfi_offset 0, -24"); \ 102 1.14 joerg /* \ 103 1.14 joerg * find the return address for mcount, \ 104 1.14 joerg * and the return address for mcount's caller. \ 105 1.14 joerg * \ 106 1.14 joerg * frompcindex = pc pushed by call into self. \ 107 1.14 joerg */ \ 108 1.14 joerg __asm("mov r0, ip"); \ 109 1.14 joerg /* \ 110 1.14 joerg * selfpc = pc pushed by mcount call \ 111 1.14 joerg */ \ 112 1.14 joerg __asm("mov r1, lr"); \ 113 1.14 joerg /* \ 114 1.14 joerg * Call the real mcount code \ 115 1.14 joerg */ \ 116 1.14 joerg __asm("bl " ___STRING(_C_LABEL(_mcount)) PLTSYM); \ 117 1.14 joerg /* \ 118 1.14 joerg * Restore registers that were trashed during mcount \ 119 1.14 joerg */ \ 120 1.17 joerg __asm("pop {r0-r3, lr}"); \ 121 1.17 joerg __asm("pop {pc}"); \ 122 1.14 joerg __asm(".cfi_endproc"); \ 123 1.14 joerg __asm(".size " MCOUNT_ASM_NAME ", .-" MCOUNT_ASM_NAME); 124 1.9 matt #else 125 1.9 matt #define MCOUNT \ 126 1.9 matt __asm(".text"); \ 127 1.9 matt __asm(".align 0"); \ 128 1.11 matt __asm(".arm"); \ 129 1.9 matt __asm(".type " MCOUNT_ASM_NAME ",%function"); \ 130 1.9 matt __asm(".global " MCOUNT_ASM_NAME); \ 131 1.9 matt __asm(MCOUNT_ASM_NAME ":"); \ 132 1.10 matt __asm(".fnstart"); \ 133 1.10 matt __asm(".cfi_startproc"); \ 134 1.9 matt /* \ 135 1.9 matt * Preserve registers that are trashed during mcount \ 136 1.9 matt */ \ 137 1.15 matt __asm("push {r0-r3, ip, lr}"); \ 138 1.15 matt __asm(".save {r0-r3, lr}"); \ 139 1.10 matt __asm(".cfi_def_cfa_offset 24"); \ 140 1.10 matt __asm(".cfi_offset 14, -4"); \ 141 1.15 matt __asm(".cfi_offset 12, -8"); \ 142 1.10 matt __asm(".cfi_offset 3, -12"); \ 143 1.10 matt __asm(".cfi_offset 2, -16"); \ 144 1.10 matt __asm(".cfi_offset 1, -20"); \ 145 1.10 matt __asm(".cfi_offset 0, -24"); \ 146 1.9 matt /* \ 147 1.9 matt * find the return address for mcount, \ 148 1.9 matt * and the return address for mcount's caller. \ 149 1.9 matt * \ 150 1.9 matt * frompcindex = pc pushed by call into self. \ 151 1.9 matt */ \ 152 1.9 matt __asm("mov r0, ip"); \ 153 1.9 matt /* \ 154 1.9 matt * selfpc = pc pushed by mcount call \ 155 1.9 matt */ \ 156 1.9 matt __asm("mov r1, lr"); \ 157 1.9 matt /* \ 158 1.9 matt * Call the real mcount code \ 159 1.9 matt */ \ 160 1.9 matt __asm("bl " ___STRING(_C_LABEL(_mcount)) PLTSYM); \ 161 1.9 matt /* \ 162 1.9 matt * Restore registers that were trashed during mcount \ 163 1.9 matt */ \ 164 1.17 joerg __asm("pop {r0-r3, lr}"); \ 165 1.17 joerg __asm("pop {pc}"); \ 166 1.10 matt __asm(".cfi_endproc"); \ 167 1.10 matt __asm(".fnend"); \ 168 1.9 matt __asm(".size " MCOUNT_ASM_NAME ", .-" MCOUNT_ASM_NAME); 169 1.9 matt #endif 170 1.1 reinoud 171 1.1 reinoud #ifdef _KERNEL 172 1.4 thorpej #include <arm/cpufunc.h> 173 1.1 reinoud /* 174 1.1 reinoud * splhigh() and splx() are heavyweight, and call mcount(). Therefore 175 1.1 reinoud * we disabled interrupts (IRQ, but not FIQ) directly on the CPU. 176 1.1 reinoud * 177 1.1 reinoud * We're lucky that the CPSR and 's' both happen to be 'int's. 178 1.1 reinoud */ 179 1.6 briggs #define MCOUNT_ENTER s = __set_cpsr_c(0x0080, 0x0080); /* kill IRQ */ 180 1.6 briggs #define MCOUNT_EXIT __set_cpsr_c(0xffffffff, s); /* restore old value */ 181 1.1 reinoud #endif /* _KERNEL */ 182