profile.h revision 1.7.6.2 1 /* $NetBSD: profile.h,v 1.7.6.2 2007/12/03 18:34:42 ad Exp $ */
2
3 /*
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)profile.h 8.1 (Berkeley) 6/11/93
32 */
33
34 #ifdef _KERNEL_OPT
35 #include "opt_multiprocessor.h"
36 #include "opt_xen.h"
37 #endif
38
39 #include <machine/atomic.h>
40
41 #define _MCOUNT_DECL void _mcount
42
43 #define EPROL_EXPORT __asm(".globl _eprol")
44
45 #ifdef PIC
46 #define __MCPLT "@PLT"
47 #else
48 #define __MCPLT
49 #endif
50
51 #define MCOUNT \
52 __weak_alias(mcount, __mcount) \
53 __asm(" .globl __mcount \n" \
54 " .type __mcount,@function\n" \
55 "__mcount: \n" \
56 " pushq %rbp \n" \
57 " movq %rsp,%rbp \n" \
58 " subq $56,%rsp \n" \
59 " movq %rdi,0(%rsp) \n" \
60 " movq %rsi,8(%rsp) \n" \
61 " movq %rdx,16(%rsp) \n" \
62 " movq %rcx,24(%rsp) \n" \
63 " movq %r8,32(%rsp) \n" \
64 " movq %r9,40(%rsp) \n" \
65 " movq %rax,48(%rsp) \n" \
66 " movq 0(%rbp),%r11 \n" \
67 " movq 8(%r11),%rdi \n" \
68 " movq 8(%rbp),%rsi \n" \
69 " call _mcount"__MCPLT " \n" \
70 " movq 0(%rsp),%rdi \n" \
71 " movq 8(%rsp),%rsi \n" \
72 " movq 16(%rsp),%rdx \n" \
73 " movq 24(%rsp),%rcx \n" \
74 " movq 32(%rsp),%r8 \n" \
75 " movq 40(%rsp),%r9 \n" \
76 " movq 48(%rsp),%rax \n" \
77 " leave \n" \
78 " ret \n" \
79 " .size __mcount,.-__mcount");
80
81
82 #ifdef _KERNEL
83 #ifdef MULTIPROCESSOR
84 __cpu_simple_lock_t __mcount_lock;
85
86 static inline void
87 MCOUNT_ENTER_MP(void)
88 {
89 while (x86_atomic_testset_b(&__mcount_lock, __SIMPLELOCK_LOCKED)
90 != __SIMPLELOCK_UNLOCKED) {
91 while (__mcount_lock == __SIMPLELOCK_LOCKED)
92 ;
93 }
94 __insn_barrier();
95 }
96
97 #ifdef XEN
98 static inline void
99 mcount_disable_intr(void)
100 {
101 /* works because __cli is a macro */
102 __cli();
103 }
104
105 static inline u_long
106 mcount_read_psl(void)
107 {
108 return (HYPERVISOR_shared_info->vcpu_info[0].evtchn_upcall_mask);
109 }
110
111 static inline void
112 mcount_write_psl(u_long psl)
113 {
114 HYPERVISOR_shared_info->vcpu_info[0].evtchn_upcall_mask = psl;
115 x86_lfence();
116 /* XXX can't call hypervisor_force_callback() because we're in mcount*/
117 }
118
119 #else /* XEN */
120 static inline void
121 MCOUNT_EXIT_MP(void)
122 {
123 __insn_barrier();
124 __mcount_lock = __SIMPLELOCK_UNLOCKED;
125 }
126 #else
127 #define MCOUNT_ENTER_MP()
128 #define MCOUNT_EXIT_MP()
129 #endif
130
131 #ifdef XEN
132 static inline void
133 mcount_disable_intr(void)
134 {
135 /* works because __cli is a macro */
136 __cli();
137 }
138
139 static inline u_long
140 mcount_read_psl(void)
141 {
142 return (HYPERVISOR_shared_info->vcpu_info[0].evtchn_upcall_mask);
143 }
144
145 static inline void
146 mcount_write_psl(u_long psl)
147 {
148 HYPERVISOR_shared_info->vcpu_info[0].evtchn_upcall_mask = psl;
149 x86_lfence();
150 /* XXX can't call hypervisor_force_callback() because we're in mcount*/
151 }
152
153 #else /* XEN */
154 static inline void
155 mcount_disable_intr(void)
156 {
157 __asm volatile("cli");
158 }
159
160 static inline u_long
161 mcount_read_psl(void)
162 {
163 u_long ef;
164
165 __asm volatile("pushfl; popl %0" : "=r" (ef));
166 return (ef);
167 }
168
169 static inline void
170 mcount_write_psl(u_long ef)
171 {
172 __asm volatile("pushl %0; popfl" : : "r" (ef));
173 }
174
175 #endif /* XEN */
176 #define MCOUNT_ENTER \
177 s = (int)mcount_read_psl(); \
178 mcount_disable_intr(); \
179 MCOUNT_ENTER_MP();
180
181 #define MCOUNT_EXIT \
182 MCOUNT_EXIT_MP(); \
183 mcount_write_psl(s);
184
185 #endif /* _KERNEL */
186