Home | History | Annotate | Line # | Download | only in i386
pthread_md.h revision 1.17.2.1
      1 /*	$NetBSD: pthread_md.h,v 1.17.2.1 2011/02/08 16:19:02 bouyer Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001, 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nathan J. Williams, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef _LIB_PTHREAD_I386_MD_H
     33 #define _LIB_PTHREAD_I386_MD_H
     34 
     35 #include <sys/ucontext.h>
     36 #include <ucontext.h>
     37 
     38 static inline unsigned long
     39 pthread__sp(void)
     40 {
     41 	unsigned long ret;
     42 	__asm("movl %%esp, %0" : "=g" (ret));
     43 
     44 	return ret;
     45 }
     46 
     47 #define pthread__uc_sp(ucp) ((ucp)->uc_mcontext.__gregs[_REG_UESP])
     48 
     49 /*
     50  * Set initial, sane values for registers whose values aren't just
     51  * "don't care".
     52  *
     53  * We use the current context instead of a guessed one because we cannot
     54  * assume how the GDT entries are ordered:  what is true on i386 is not
     55  * true anymore on amd64.
     56  */
     57 #define _INITCONTEXT_U_MD(ucp)						\
     58 	do {								\
     59 		ucontext_t ucur;					\
     60 		(void)getcontext(&ucur);				\
     61 		(ucp)->uc_mcontext.__gregs[_REG_GS] =			\
     62 		    ucur.uc_mcontext.__gregs[_REG_GS],			\
     63 		(ucp)->uc_mcontext.__gregs[_REG_FS] =			\
     64 		    ucur.uc_mcontext.__gregs[_REG_FS],			\
     65 		(ucp)->uc_mcontext.__gregs[_REG_ES] =			\
     66 		    ucur.uc_mcontext.__gregs[_REG_ES],			\
     67 		(ucp)->uc_mcontext.__gregs[_REG_DS] =			\
     68 		    ucur.uc_mcontext.__gregs[_REG_DS],			\
     69 		(ucp)->uc_mcontext.__gregs[_REG_CS] =			\
     70 		    ucur.uc_mcontext.__gregs[_REG_CS],			\
     71 		(ucp)->uc_mcontext.__gregs[_REG_SS] =			\
     72 		    ucur.uc_mcontext.__gregs[_REG_SS],			\
     73 		(ucp)->uc_mcontext.__gregs[_REG_EFL] =			\
     74 		    ucur.uc_mcontext.__gregs[_REG_EFL];			\
     75 	} while (/*CONSTCOND*/0);
     76 
     77 #define	pthread__smt_pause()	__asm __volatile("rep; nop" ::: "memory")
     78 /*	#define	PTHREAD__HAVE_THREADREG	*/
     79 
     80 /* Don't need additional memory barriers. */
     81 #define	PTHREAD__ATOMIC_IS_MEMBAR
     82 
     83 static inline pthread_t
     84 #ifdef __GNUC__
     85 __attribute__ ((__const__))
     86 #endif
     87 pthread__threadreg_get(void)
     88 {
     89 	pthread_t self;
     90 
     91 	__asm volatile("movl %%gs:0, %0"
     92 		: "=r" (self)
     93 		:);
     94 
     95 	return self;
     96 }
     97 
     98 static inline void *
     99 _atomic_cas_ptr(volatile void *ptr, void *old, void *new)
    100 {
    101 	volatile uintptr_t *cast = ptr;
    102 	void *ret;
    103 
    104 	__asm __volatile ("lock; cmpxchgl %2, %1"
    105 		: "=a" (ret), "=m" (*cast)
    106 		: "r" (new), "m" (*cast), "0" (old));
    107 
    108 	return ret;
    109 }
    110 
    111 static inline void *
    112 _atomic_cas_ptr_ni(volatile void *ptr, void *old, void *new)
    113 {
    114 	volatile uintptr_t *cast = ptr;
    115 	void *ret;
    116 
    117 	__asm __volatile ("cmpxchgl %2, %1"
    118 		: "=a" (ret), "=m" (*cast)
    119 		: "r" (new), "m" (*cast), "0" (old));
    120 
    121 	return ret;
    122 }
    123 
    124 #endif /* _LIB_PTHREAD_I386_MD_H */
    125