pthread_md.h revision 1.10.8.2 1 /* $NetBSD: pthread_md.h,v 1.10.8.2 2008/04/28 20:23:03 martin Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Adapted for x86_64 by fvdl (at) NetBSD.org
32 */
33
34 #ifndef _LIB_PTHREAD_X86_64_MD_H
35 #define _LIB_PTHREAD_X86_64_MD_H
36
37 #include <sys/ucontext.h>
38
39 static inline long
40 pthread__sp(void)
41 {
42 long ret;
43 __asm("movq %%rsp, %0" : "=g" (ret));
44
45 return ret;
46 }
47
48 #define pthread__uc_sp(ucp) ((ucp)->uc_mcontext.__gregs[_REG_URSP])
49 #define pthread__uc_pc(ucp) ((ucp)->uc_mcontext.__gregs[_REG_RIP])
50
51 /*
52 * Set initial, sane values for registers whose values aren't just
53 * "don't care".
54 * 0x23 is GSEL(GUDATA_SEL, SEL_UPL), and
55 * 0x1b is GSEL(GUCODE_SEL, SEL_UPL).
56 * 0x202 is PSL_USERSET.
57 */
58 #define _INITCONTEXT_U_MD(ucp) \
59 (ucp)->uc_mcontext.__gregs[_REG_GS] = 0x23, \
60 (ucp)->uc_mcontext.__gregs[_REG_FS] = 0x23, \
61 (ucp)->uc_mcontext.__gregs[_REG_ES] = 0x23, \
62 (ucp)->uc_mcontext.__gregs[_REG_DS] = 0x23, \
63 (ucp)->uc_mcontext.__gregs[_REG_CS] = 0x1b, \
64 (ucp)->uc_mcontext.__gregs[_REG_SS] = 0x23, \
65 (ucp)->uc_mcontext.__gregs[_REG_RFL] = 0x202;
66
67 /*
68 * Usable stack space below the ucontext_t.
69 * See comment in pthread_switch.S about STACK_SWITCH.
70 */
71 #define STACKSPACE 64 /* room for 8 long values */
72
73 /*
74 * Conversions between struct reg and struct mcontext. Used by
75 * libpthread_dbg.
76 */
77
78 #define PTHREAD_UCONTEXT_TO_REG(reg, uc) \
79 memcpy(reg, (uc)->uc_mcontext.__gregs, _NGREG * sizeof (long));
80
81 #define PTHREAD_REG_TO_UCONTEXT(uc, reg) do { \
82 memcpy((uc)->uc_mcontext.__gregs, reg, _NGREG * sizeof (long)); \
83 (uc)->uc_flags = ((uc)->uc_flags | _UC_CPU) & ~_UC_USER; \
84 } while (/*CONSTCOND*/0)
85
86
87 #define PTHREAD_UCONTEXT_TO_FPREG(freg, uc) \
88 (void)memcpy(&(freg)->fxstate, \
89 (uc)->uc_mcontext.__fpregs, sizeof(struct fpreg))
90
91 #define PTHREAD_FPREG_TO_UCONTEXT(uc, freg) do { \
92 (void)memcpy( \
93 (uc)->uc_mcontext.__fpregs, \
94 &(freg)->fxstate, sizeof(struct fpreg)); \
95 /*LINTED precision loss */ \
96 (uc)->uc_flags = ((uc)->uc_flags | _UC_FPU) & ~_UC_USER; \
97 } while (/*CONSTCOND*/0)
98
99 #define pthread__smt_pause() __asm __volatile("rep; nop" ::: "memory")
100
101 /* Don't need additional memory barriers. */
102 #define PTHREAD__ATOMIC_IS_MEMBAR
103
104 static inline void *
105 _atomic_cas_ptr(volatile void *ptr, void *old, void *new)
106 {
107 volatile uintptr_t *cast = ptr;
108 void *ret;
109
110 __asm __volatile ("lock; cmpxchgq %2, %1"
111 : "=a" (ret), "=m" (*cast)
112 : "r" (new), "m" (*cast), "0" (old));
113
114 return ret;
115 }
116
117 static inline void *
118 _atomic_cas_ptr_ni(volatile void *ptr, void *old, void *new)
119 {
120 volatile uintptr_t *cast = ptr;
121 void *ret;
122
123 __asm __volatile ("cmpxchgq %2, %1"
124 : "=a" (ret), "=m" (*cast)
125 : "r" (new), "m" (*cast), "0" (old));
126
127 return ret;
128 }
129
130 #endif /* _LIB_PTHREAD_X86_64_MD_H */
131