profile.h revision 1.10 1 1.10 matt /* $NetBSD: profile.h,v 1.10 2013/08/16 21:42:48 matt Exp $ */
2 1.1 reinoud
3 1.1 reinoud /*
4 1.2 bjh21 * Copyright (c) 2001 Ben Harris
5 1.1 reinoud * Copyright (c) 1995-1996 Mark Brinicombe
6 1.1 reinoud *
7 1.1 reinoud * Redistribution and use in source and binary forms, with or without
8 1.1 reinoud * modification, are permitted provided that the following conditions
9 1.1 reinoud * are met:
10 1.1 reinoud * 1. Redistributions of source code must retain the above copyright
11 1.1 reinoud * notice, this list of conditions and the following disclaimer.
12 1.1 reinoud * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 reinoud * notice, this list of conditions and the following disclaimer in the
14 1.1 reinoud * documentation and/or other materials provided with the distribution.
15 1.1 reinoud * 3. All advertising materials mentioning features or use of this software
16 1.1 reinoud * must display the following acknowledgement:
17 1.1 reinoud * This product includes software developed by Mark Brinicombe.
18 1.1 reinoud * 4. The name of the author may not be used to endorse or promote products
19 1.1 reinoud * derived from this software without specific prior written permission.
20 1.1 reinoud *
21 1.1 reinoud * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 1.1 reinoud * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 1.1 reinoud * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 1.1 reinoud * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 1.1 reinoud * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 1.1 reinoud * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 1.1 reinoud * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 1.1 reinoud * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 1.1 reinoud * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 1.1 reinoud * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 1.1 reinoud */
32 1.1 reinoud
33 1.1 reinoud #define _MCOUNT_DECL void _mcount
34 1.1 reinoud
35 1.1 reinoud /*
36 1.1 reinoud * Cannot implement mcount in C as GCC will trash the ip register when it
37 1.1 reinoud * pushes a trapframe. Pity we cannot insert assembly before the function
38 1.1 reinoud * prologue.
39 1.1 reinoud */
40 1.2 bjh21
41 1.2 bjh21 #define MCOUNT_ASM_NAME "__mcount"
42 1.3 matt #ifdef PIC
43 1.3 matt #define PLTSYM "(PLT)"
44 1.3 matt #endif
45 1.2 bjh21
46 1.3 matt #ifndef PLTSYM
47 1.3 matt #define PLTSYM
48 1.3 matt #endif
49 1.3 matt
50 1.9 matt #if !defined(__thumb__) && !defined(__ARM_EABI__)
51 1.1 reinoud #define MCOUNT \
52 1.9 matt __asm(".text"); \
53 1.7 perry __asm(".align 0"); \
54 1.7 perry __asm(".type " MCOUNT_ASM_NAME ",%function"); \
55 1.9 matt __asm(".global " MCOUNT_ASM_NAME); \
56 1.7 perry __asm(MCOUNT_ASM_NAME ":"); \
57 1.10 matt __asm(".cfi_startproc"); \
58 1.10 matt __asm(".fnstart"); \
59 1.1 reinoud /* \
60 1.1 reinoud * Preserve registers that are trashed during mcount \
61 1.1 reinoud */ \
62 1.9 matt __asm("push {r0-r3, ip, lr}"); \
63 1.10 matt __asm(".save {r0-r3, ip, lr}"); \
64 1.10 matt __asm(".cfi_def_cfa_offset 24"); \
65 1.10 matt __asm(".cfi_offset 14, -4"); \
66 1.10 matt __asm(".cfi_offset 12, -8"); \
67 1.10 matt __asm(".cfi_offset 3, -12"); \
68 1.10 matt __asm(".cfi_offset 2, -16"); \
69 1.10 matt __asm(".cfi_offset 1, -20"); \
70 1.10 matt __asm(".cfi_offset 0, -24"); \
71 1.2 bjh21 /* Check what mode we're in. EQ => 32, NE => 26 */ \
72 1.7 perry __asm("teq r0, r0"); \
73 1.7 perry __asm("teq pc, r15"); \
74 1.1 reinoud /* \
75 1.1 reinoud * find the return address for mcount, \
76 1.1 reinoud * and the return address for mcount's caller. \
77 1.1 reinoud * \
78 1.1 reinoud * frompcindex = pc pushed by call into self. \
79 1.1 reinoud */ \
80 1.7 perry __asm("moveq r0, ip"); \
81 1.7 perry __asm("bicne r0, ip, #0xfc000003"); \
82 1.1 reinoud /* \
83 1.1 reinoud * selfpc = pc pushed by mcount call \
84 1.1 reinoud */ \
85 1.7 perry __asm("moveq r1, lr"); \
86 1.7 perry __asm("bicne r1, lr, #0xfc000003"); \
87 1.1 reinoud /* \
88 1.1 reinoud * Call the real mcount code \
89 1.1 reinoud */ \
90 1.7 perry __asm("bl " ___STRING(_C_LABEL(_mcount)) PLTSYM); \
91 1.1 reinoud /* \
92 1.1 reinoud * Restore registers that were trashed during mcount \
93 1.1 reinoud */ \
94 1.10 matt __asm("pop {r0-r3, pc}"); \
95 1.10 matt __asm(".cfi_endproc"); \
96 1.10 matt __asm(".fnend"); \
97 1.9 matt __asm(".size " MCOUNT_ASM_NAME ", .-" MCOUNT_ASM_NAME);
98 1.9 matt #else
99 1.9 matt #define MCOUNT \
100 1.9 matt __asm(".text"); \
101 1.9 matt __asm(".align 0"); \
102 1.9 matt __asm(".type " MCOUNT_ASM_NAME ",%function"); \
103 1.9 matt __asm(".global " MCOUNT_ASM_NAME); \
104 1.10 matt __asm(".arm"); \
105 1.9 matt __asm(MCOUNT_ASM_NAME ":"); \
106 1.10 matt __asm(".fnstart"); \
107 1.10 matt __asm(".cfi_startproc"); \
108 1.9 matt /* \
109 1.9 matt * Preserve registers that are trashed during mcount \
110 1.9 matt */ \
111 1.10 matt __asm("push {r0-r4, ip, lr}"); \
112 1.10 matt __asm(".save {r0-r4, lr}"); \
113 1.10 matt __asm(".cfi_def_cfa_offset 24"); \
114 1.10 matt __asm(".cfi_offset 14, -4"); \
115 1.10 matt __asm(".cfi_offset 4, -8"); \
116 1.10 matt __asm(".cfi_offset 3, -12"); \
117 1.10 matt __asm(".cfi_offset 2, -16"); \
118 1.10 matt __asm(".cfi_offset 1, -20"); \
119 1.10 matt __asm(".cfi_offset 0, -24"); \
120 1.9 matt /* \
121 1.9 matt * find the return address for mcount, \
122 1.9 matt * and the return address for mcount's caller. \
123 1.9 matt * \
124 1.9 matt * frompcindex = pc pushed by call into self. \
125 1.9 matt */ \
126 1.9 matt __asm("mov r0, ip"); \
127 1.9 matt /* \
128 1.9 matt * selfpc = pc pushed by mcount call \
129 1.9 matt */ \
130 1.9 matt __asm("mov r1, lr"); \
131 1.9 matt /* \
132 1.9 matt * Call the real mcount code \
133 1.9 matt */ \
134 1.9 matt __asm("bl " ___STRING(_C_LABEL(_mcount)) PLTSYM); \
135 1.9 matt /* \
136 1.9 matt * Restore registers that were trashed during mcount \
137 1.9 matt */ \
138 1.10 matt __asm("pop {r0-r4, lr, pc}"); \
139 1.10 matt __asm(".cfi_endproc"); \
140 1.10 matt __asm(".fnend"); \
141 1.9 matt __asm(".size " MCOUNT_ASM_NAME ", .-" MCOUNT_ASM_NAME);
142 1.9 matt #endif
143 1.1 reinoud
144 1.1 reinoud #ifdef _KERNEL
145 1.6 briggs #ifdef __PROG26
146 1.2 bjh21 extern int int_off_save(void);
147 1.2 bjh21 extern void int_restore(int);
148 1.2 bjh21 #define MCOUNT_ENTER (s = int_off_save())
149 1.2 bjh21 #define MCOUNT_EXIT int_restore(s)
150 1.2 bjh21 #else
151 1.4 thorpej #include <arm/cpufunc.h>
152 1.1 reinoud /*
153 1.1 reinoud * splhigh() and splx() are heavyweight, and call mcount(). Therefore
154 1.1 reinoud * we disabled interrupts (IRQ, but not FIQ) directly on the CPU.
155 1.1 reinoud *
156 1.1 reinoud * We're lucky that the CPSR and 's' both happen to be 'int's.
157 1.1 reinoud */
158 1.6 briggs #define MCOUNT_ENTER s = __set_cpsr_c(0x0080, 0x0080); /* kill IRQ */
159 1.6 briggs #define MCOUNT_EXIT __set_cpsr_c(0xffffffff, s); /* restore old value */
160 1.5 bjh21 #endif /* !acorn26 */
161 1.1 reinoud #endif /* _KERNEL */
162