pthread_md.h revision 1.10 1 /* $NetBSD: pthread_md.h,v 1.10 2007/09/24 12:19:40 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #ifndef _LIB_PTHREAD_I386_MD_H
40 #define _LIB_PTHREAD_I386_MD_H
41
42 #include <sys/ucontext.h>
43 #include <ucontext.h>
44
45 extern int (*_md_getcontext_u)(ucontext_t *);
46 extern int (*_md_setcontext_u)(const ucontext_t *);
47 extern int (*_md_swapcontext_u)(ucontext_t *, const ucontext_t *);
48
49 #define _getcontext_u(uc) (*_md_getcontext_u)((uc))
50 #define _setcontext_u(uc) (*_md_setcontext_u)((uc))
51 #define _swapcontext_u(ouc, nuc) (*_md_swapcontext_u)((ouc), (nuc))
52
53 int _getcontext_u_s87(ucontext_t *);
54 int _setcontext_u_s87(const ucontext_t *);
55 int _swapcontext_u_s87(ucontext_t *, const ucontext_t *);
56 int _getcontext_u_xmm(ucontext_t *);
57 int _setcontext_u_xmm(const ucontext_t *);
58 int _swapcontext_u_xmm(ucontext_t *, const ucontext_t *);
59
60 void pthread__i386_init(void);
61
62 #define PTHREAD_MD_INIT pthread__i386_init();
63
64 static inline long
65 pthread__sp(void)
66 {
67 long ret;
68 __asm("movl %%esp, %0" : "=g" (ret));
69
70 return ret;
71 }
72
73 #define pthread__uc_sp(ucp) ((ucp)->uc_mcontext.__gregs[_REG_UESP])
74 #define pthread__uc_pc(ucp) ((ucp)->uc_mcontext.__gregs[_REG_EIP])
75
76 /*
77 * Set initial, sane values for registers whose values aren't just
78 * "don't care".
79 *
80 * We use the current context instead of a guessed one because we cannot
81 * assume how the GDT entries are ordered: what is true on i386 is not
82 * true anymore on amd64.
83 */
84 #define _INITCONTEXT_U_MD(ucp) \
85 do { \
86 ucontext_t ucur; \
87 (void)getcontext(&ucur); \
88 (ucp)->uc_mcontext.__gregs[_REG_GS] = \
89 ucur.uc_mcontext.__gregs[_REG_GS], \
90 (ucp)->uc_mcontext.__gregs[_REG_FS] = \
91 ucur.uc_mcontext.__gregs[_REG_FS], \
92 (ucp)->uc_mcontext.__gregs[_REG_ES] = \
93 ucur.uc_mcontext.__gregs[_REG_ES], \
94 (ucp)->uc_mcontext.__gregs[_REG_DS] = \
95 ucur.uc_mcontext.__gregs[_REG_DS], \
96 (ucp)->uc_mcontext.__gregs[_REG_CS] = \
97 ucur.uc_mcontext.__gregs[_REG_CS], \
98 (ucp)->uc_mcontext.__gregs[_REG_SS] = \
99 ucur.uc_mcontext.__gregs[_REG_SS], \
100 (ucp)->uc_mcontext.__gregs[_REG_EFL] = \
101 ucur.uc_mcontext.__gregs[_REG_EFL]; \
102 } while (/*CONSTCOND*/0);
103
104 /*
105 * Usable stack space below the ucontext_t.
106 * See comment in pthread_switch.S about STACK_SWITCH.
107 */
108 #define STACKSPACE 32 /* room for 8 integer values */
109
110 /*
111 * Conversions between struct reg and struct mcontext. Used by
112 * libpthread_dbg.
113 */
114
115 #define PTHREAD_UCONTEXT_TO_REG(reg, uc) do { \
116 (reg)->r_gs = (uc)->uc_mcontext.__gregs[_REG_GS]; \
117 (reg)->r_fs = (uc)->uc_mcontext.__gregs[_REG_FS]; \
118 (reg)->r_es = (uc)->uc_mcontext.__gregs[_REG_ES]; \
119 (reg)->r_ds = (uc)->uc_mcontext.__gregs[_REG_DS]; \
120 (reg)->r_edi = (uc)->uc_mcontext.__gregs[_REG_EDI]; \
121 (reg)->r_esi = (uc)->uc_mcontext.__gregs[_REG_ESI]; \
122 (reg)->r_ebp = (uc)->uc_mcontext.__gregs[_REG_EBP]; \
123 (reg)->r_ebx = (uc)->uc_mcontext.__gregs[_REG_EBX]; \
124 (reg)->r_edx = (uc)->uc_mcontext.__gregs[_REG_EDX]; \
125 (reg)->r_ecx = (uc)->uc_mcontext.__gregs[_REG_ECX]; \
126 (reg)->r_eax = (uc)->uc_mcontext.__gregs[_REG_EAX]; \
127 (reg)->r_eip = (uc)->uc_mcontext.__gregs[_REG_EIP]; \
128 (reg)->r_cs = (uc)->uc_mcontext.__gregs[_REG_CS]; \
129 (reg)->r_eflags = (uc)->uc_mcontext.__gregs[_REG_EFL]; \
130 (reg)->r_esp = (uc)->uc_mcontext.__gregs[_REG_UESP]; \
131 (reg)->r_ss = (uc)->uc_mcontext.__gregs[_REG_SS]; \
132 } while (/*CONSTCOND*/0)
133
134 #define PTHREAD_REG_TO_UCONTEXT(uc, reg) do { \
135 (uc)->uc_mcontext.__gregs[_REG_GS] = (reg)->r_gs; \
136 (uc)->uc_mcontext.__gregs[_REG_FS] = (reg)->r_fs; \
137 (uc)->uc_mcontext.__gregs[_REG_ES] = (reg)->r_es; \
138 (uc)->uc_mcontext.__gregs[_REG_DS] = (reg)->r_ds; \
139 (uc)->uc_mcontext.__gregs[_REG_EDI] = (reg)->r_edi; \
140 (uc)->uc_mcontext.__gregs[_REG_ESI] = (reg)->r_esi; \
141 (uc)->uc_mcontext.__gregs[_REG_EBP] = (reg)->r_ebp; \
142 (uc)->uc_mcontext.__gregs[_REG_EBX] = (reg)->r_ebx; \
143 (uc)->uc_mcontext.__gregs[_REG_EDX] = (reg)->r_edx; \
144 (uc)->uc_mcontext.__gregs[_REG_ECX] = (reg)->r_ecx; \
145 (uc)->uc_mcontext.__gregs[_REG_EAX] = (reg)->r_eax; \
146 (uc)->uc_mcontext.__gregs[_REG_EIP] = (reg)->r_eip; \
147 (uc)->uc_mcontext.__gregs[_REG_CS] = (reg)->r_cs; \
148 (uc)->uc_mcontext.__gregs[_REG_EFL] = (reg)->r_eflags; \
149 (uc)->uc_mcontext.__gregs[_REG_UESP]= (reg)->r_esp; \
150 (uc)->uc_mcontext.__gregs[_REG_SS] = (reg)->r_ss; \
151 /*LINTED precision loss */ \
152 (uc)->uc_flags = ((uc)->uc_flags | _UC_CPU) & ~_UC_USER; \
153 } while (/*CONSTCOND*/0)
154
155
156 #define PTHREAD_UCONTEXT_TO_FPREG(freg, uc) \
157 (void)memcpy((freg)->__data, \
158 (uc)->uc_mcontext.__fpregs.__fp_reg_set.__fpchip_state.__fp_state, \
159 sizeof(struct fpreg))
160
161 #define PTHREAD_FPREG_TO_UCONTEXT(uc, freg) do { \
162 (void)memcpy( \
163 (uc)->uc_mcontext.__fpregs.__fp_reg_set.__fpchip_state.__fp_state, \
164 (freg)->__data, sizeof(struct fpreg)); \
165 /*LINTED precision loss */ \
166 (uc)->uc_flags = ((uc)->uc_flags | _UC_FPU) & ~_UC_USER; \
167 } while (/*CONSTCOND*/0)
168
169 #define PTHREAD_UCONTEXT_XREG_FLAG _UC_FXSAVE
170
171 #define PTHREAD_UCONTEXT_TO_XREG(xreg, uc) \
172 (void)memcpy((xreg), \
173 (uc)->uc_mcontext.__fpregs.__fp_reg_set.__fp_xmm_state.__fp_xmm, \
174 sizeof(struct xmmregs))
175
176 #define PTHREAD_XREG_TO_UCONTEXT(uc, xreg) do { \
177 (void)memcpy( \
178 (uc)->uc_mcontext.__fpregs.__fp_reg_set.__fp_xmm_state.__fp_xmm, \
179 (xreg), \
180 sizeof(struct xmmregs)); \
181 (uc)->uc_flags = ((uc)->uc_flags | _UC_FXSAVE) & ~_UC_USER; \
182 } while (/*CONSTCOND*/0)
183
184 #define pthread__smt_pause() __asm __volatile("rep; nop" ::: "memory")
185 #define PTHREAD__HAVE_ATOMIC
186
187 #endif /* _LIB_PTHREAD_I386_MD_H */
188