Home | History | Annotate | Line # | Download | only in libpthread
pthread_int.h revision 1.70.6.2
      1  1.70.6.2  ad /*	$NetBSD: pthread_int.h,v 1.70.6.2 2008/06/28 10:29:38 ad Exp $	*/
      2  1.70.6.2  ad 
      3  1.70.6.2  ad /*-
      4  1.70.6.2  ad  * Copyright (c) 2001, 2002, 2003, 2006, 2007, 2008 The NetBSD Foundation, Inc.
      5  1.70.6.2  ad  * All rights reserved.
      6  1.70.6.2  ad  *
      7  1.70.6.2  ad  * This code is derived from software contributed to The NetBSD Foundation
      8  1.70.6.2  ad  * by Nathan J. Williams and Andrew Doran.
      9  1.70.6.2  ad  *
     10  1.70.6.2  ad  * Redistribution and use in source and binary forms, with or without
     11  1.70.6.2  ad  * modification, are permitted provided that the following conditions
     12  1.70.6.2  ad  * are met:
     13  1.70.6.2  ad  * 1. Redistributions of source code must retain the above copyright
     14  1.70.6.2  ad  *    notice, this list of conditions and the following disclaimer.
     15  1.70.6.2  ad  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.70.6.2  ad  *    notice, this list of conditions and the following disclaimer in the
     17  1.70.6.2  ad  *    documentation and/or other materials provided with the distribution.
     18  1.70.6.2  ad  *
     19  1.70.6.2  ad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  1.70.6.2  ad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  1.70.6.2  ad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  1.70.6.2  ad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  1.70.6.2  ad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  1.70.6.2  ad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  1.70.6.2  ad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  1.70.6.2  ad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  1.70.6.2  ad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  1.70.6.2  ad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  1.70.6.2  ad  * POSSIBILITY OF SUCH DAMAGE.
     30  1.70.6.2  ad  */
     31  1.70.6.2  ad 
     32  1.70.6.2  ad /*
     33  1.70.6.2  ad  * NOTE: when changing anything in this file, please ensure that
     34  1.70.6.2  ad  * libpthread_dbg still compiles.
     35  1.70.6.2  ad  */
     36  1.70.6.2  ad 
     37  1.70.6.2  ad #ifndef _LIB_PTHREAD_INT_H
     38  1.70.6.2  ad #define _LIB_PTHREAD_INT_H
     39  1.70.6.2  ad 
     40  1.70.6.2  ad /* #define PTHREAD__DEBUG */
     41  1.70.6.2  ad #define ERRORCHECK
     42  1.70.6.2  ad 
     43  1.70.6.2  ad #include "pthread_types.h"
     44  1.70.6.2  ad #include "pthread_queue.h"
     45  1.70.6.2  ad #include "pthread_md.h"
     46  1.70.6.2  ad 
     47  1.70.6.2  ad /* Need to use libc-private names for atomic operations. */
     48  1.70.6.2  ad #include "../../common/lib/libc/atomic/atomic_op_namespace.h"
     49  1.70.6.2  ad 
     50  1.70.6.2  ad #include <sys/atomic.h>
     51  1.70.6.2  ad #include <sys/tree.h>
     52  1.70.6.2  ad 
     53  1.70.6.2  ad #include <lwp.h>
     54  1.70.6.2  ad #include <signal.h>
     55  1.70.6.2  ad 
     56  1.70.6.2  ad #ifdef __GNUC__
     57  1.70.6.2  ad #define	PTHREAD_HIDE	__attribute__ ((visibility("hidden")))
     58  1.70.6.2  ad #else
     59  1.70.6.2  ad #define	PTHREAD_HIDE	/* nothing */
     60  1.70.6.2  ad #endif
     61  1.70.6.2  ad 
     62  1.70.6.2  ad #define PTHREAD_KEYS_MAX 	256
     63  1.70.6.2  ad #define	PTHREAD__UNPARK_MAX	32
     64  1.70.6.2  ad 
     65  1.70.6.2  ad /*
     66  1.70.6.2  ad  * The size of this structure needs to be no larger than struct
     67  1.70.6.2  ad  * __pthread_cleanup_store, defined in pthread.h.
     68  1.70.6.2  ad  */
     69  1.70.6.2  ad struct pt_clean_t {
     70  1.70.6.2  ad 	PTQ_ENTRY(pt_clean_t)	ptc_next;
     71  1.70.6.2  ad 	void	(*ptc_cleanup)(void *);
     72  1.70.6.2  ad 	void	*ptc_arg;
     73  1.70.6.2  ad };
     74  1.70.6.2  ad 
     75  1.70.6.2  ad /* Private data for pthread_attr_t */
     76  1.70.6.2  ad struct pthread_attr_private {
     77  1.70.6.2  ad 	char ptap_name[PTHREAD_MAX_NAMELEN_NP];
     78  1.70.6.2  ad 	void *ptap_namearg;
     79  1.70.6.2  ad 	void *ptap_stackaddr;
     80  1.70.6.2  ad 	size_t ptap_stacksize;
     81  1.70.6.2  ad 	size_t ptap_guardsize;
     82  1.70.6.2  ad 	struct sched_param ptap_sp;
     83  1.70.6.2  ad 	int ptap_policy;
     84  1.70.6.2  ad };
     85  1.70.6.2  ad 
     86  1.70.6.2  ad struct pthread_lock_ops {
     87  1.70.6.2  ad 	void	(*plo_init)(__cpu_simple_lock_t *);
     88  1.70.6.2  ad 	int	(*plo_try)(__cpu_simple_lock_t *);
     89  1.70.6.2  ad 	void	(*plo_unlock)(__cpu_simple_lock_t *);
     90  1.70.6.2  ad 	void	(*plo_lock)(__cpu_simple_lock_t *);
     91  1.70.6.2  ad };
     92  1.70.6.2  ad 
     93  1.70.6.2  ad struct	__pthread_st {
     94  1.70.6.2  ad 	pthread_t	pt_self;	/* Must be first. */
     95  1.70.6.2  ad 	unsigned int	pt_magic;	/* Magic number */
     96  1.70.6.2  ad 	int		pt_state;	/* running, blocked, etc. */
     97  1.70.6.2  ad 	pthread_mutex_t	pt_lock;	/* lock on state */
     98  1.70.6.2  ad 	int		pt_flags;	/* see PT_FLAG_* below */
     99  1.70.6.2  ad 	int		pt_cancel;	/* Deferred cancellation */
    100  1.70.6.2  ad 	int		pt_errno;	/* Thread-specific errno. */
    101  1.70.6.2  ad 	stack_t		pt_stack;	/* Our stack */
    102  1.70.6.2  ad 	void		*pt_exitval;	/* Read by pthread_join() */
    103  1.70.6.2  ad 	char		*pt_name;	/* Thread's name, set by the app. */
    104  1.70.6.2  ad 	int		pt_willpark;	/* About to park */
    105  1.70.6.2  ad 	lwpid_t		pt_unpark;	/* Unpark this when parking */
    106  1.70.6.2  ad 	struct pthread_lock_ops pt_lockops;/* Cached to avoid PIC overhead */
    107  1.70.6.2  ad 	pthread_mutex_t	*pt_droplock;	/* Drop this lock if cancelled */
    108  1.70.6.2  ad 	pthread_cond_t	pt_joiners;	/* Threads waiting to join. */
    109  1.70.6.2  ad 
    110  1.70.6.2  ad 	/* Threads to defer waking, usually until pthread_mutex_unlock(). */
    111  1.70.6.2  ad 	lwpid_t		pt_waiters[PTHREAD__UNPARK_MAX];
    112  1.70.6.2  ad 	size_t		pt_nwaiters;
    113  1.70.6.2  ad 
    114  1.70.6.2  ad 	/* Stack of cancellation cleanup handlers and their arguments */
    115  1.70.6.2  ad 	PTQ_HEAD(, pt_clean_t)	pt_cleanup_stack;
    116  1.70.6.2  ad 
    117  1.70.6.2  ad 	/* LWP ID and entry on the list of all threads. */
    118  1.70.6.2  ad 	lwpid_t		pt_lid;
    119  1.70.6.2  ad 	RB_ENTRY(__pthread_st) pt_alltree;
    120  1.70.6.2  ad 	PTQ_ENTRY(__pthread_st) pt_allq;
    121  1.70.6.2  ad 	PTQ_ENTRY(__pthread_st)	pt_deadq;
    122  1.70.6.2  ad 
    123  1.70.6.2  ad 	/*
    124  1.70.6.2  ad 	 * General synchronization data.  We try to align, as threads
    125  1.70.6.2  ad 	 * on other CPUs will access this data frequently.
    126  1.70.6.2  ad 	 */
    127  1.70.6.2  ad 	int		pt_dummy1 __aligned(128);
    128  1.70.6.2  ad 	struct lwpctl 	*pt_lwpctl;	/* Kernel/user comms area */
    129  1.70.6.2  ad 	volatile int	pt_blocking;	/* Blocking in userspace */
    130  1.70.6.2  ad 	volatile int	pt_rwlocked;	/* Handed rwlock successfully */
    131  1.70.6.2  ad 	volatile int	pt_signalled;	/* Received pthread_cond_signal() */
    132  1.70.6.2  ad 	volatile int	pt_mutexwait;	/* Waiting to acquire mutex */
    133  1.70.6.2  ad 	void * volatile pt_mutexnext;	/* Next thread in chain */
    134  1.70.6.2  ad 	void * volatile	pt_sleepobj;	/* Object slept on */
    135  1.70.6.2  ad 	PTQ_ENTRY(__pthread_st) pt_sleep;
    136  1.70.6.2  ad 	void		(*pt_early)(void *);
    137  1.70.6.2  ad 	int		pt_dummy2 __aligned(128);
    138  1.70.6.2  ad 
    139  1.70.6.2  ad 	/* Thread-specific data.  Large so it sits close to the end. */
    140  1.70.6.2  ad 	int		pt_havespecific;
    141  1.70.6.2  ad 	void		*pt_specific[PTHREAD_KEYS_MAX];
    142  1.70.6.2  ad 
    143  1.70.6.2  ad 	/*
    144  1.70.6.2  ad 	 * Context for thread creation.  At the end as it's cached
    145  1.70.6.2  ad 	 * and then only ever passed to _lwp_create().
    146  1.70.6.2  ad 	 */
    147  1.70.6.2  ad 	ucontext_t	pt_uc;
    148  1.70.6.2  ad };
    149  1.70.6.2  ad 
    150  1.70.6.2  ad /* Thread states */
    151  1.70.6.2  ad #define PT_STATE_RUNNING	1
    152  1.70.6.2  ad #define PT_STATE_ZOMBIE		5
    153  1.70.6.2  ad #define PT_STATE_DEAD		6
    154  1.70.6.2  ad 
    155  1.70.6.2  ad /* Flag values */
    156  1.70.6.2  ad 
    157  1.70.6.2  ad #define PT_FLAG_DETACHED	0x0001
    158  1.70.6.2  ad #define PT_FLAG_CS_DISABLED	0x0004	/* Cancellation disabled */
    159  1.70.6.2  ad #define PT_FLAG_CS_ASYNC	0x0008  /* Cancellation is async */
    160  1.70.6.2  ad #define PT_FLAG_CS_PENDING	0x0010
    161  1.70.6.2  ad #define PT_FLAG_SCOPE_SYSTEM	0x0040
    162  1.70.6.2  ad #define PT_FLAG_EXPLICIT_SCHED	0x0080
    163  1.70.6.2  ad #define PT_FLAG_SUSPENDED	0x0100	/* In the suspended queue */
    164  1.70.6.2  ad 
    165  1.70.6.2  ad #define PT_MAGIC	0x11110001
    166  1.70.6.2  ad #define PT_DEAD		0xDEAD0001
    167  1.70.6.2  ad 
    168  1.70.6.2  ad #define PT_ATTR_MAGIC	0x22220002
    169  1.70.6.2  ad #define PT_ATTR_DEAD	0xDEAD0002
    170  1.70.6.2  ad 
    171  1.70.6.2  ad extern int	pthread__stacksize_lg;
    172  1.70.6.2  ad extern size_t	pthread__stacksize;
    173  1.70.6.2  ad extern vaddr_t	pthread__stackmask;
    174  1.70.6.2  ad extern vaddr_t	pthread__threadmask;
    175  1.70.6.2  ad extern int	pthread__nspins;
    176  1.70.6.2  ad extern int	pthread__concurrency;
    177  1.70.6.2  ad extern int 	pthread__osrev;
    178  1.70.6.2  ad extern int 	pthread__unpark_max;
    179  1.70.6.2  ad 
    180  1.70.6.2  ad /* Flag to be used in a ucontext_t's uc_flags indicating that
    181  1.70.6.2  ad  * the saved register state is "user" state only, not full
    182  1.70.6.2  ad  * trap state.
    183  1.70.6.2  ad  */
    184  1.70.6.2  ad #define _UC_USER_BIT		30
    185  1.70.6.2  ad #define _UC_USER		(1LU << _UC_USER_BIT)
    186  1.70.6.2  ad 
    187  1.70.6.2  ad /* Utility functions */
    188  1.70.6.2  ad void	pthread__unpark_all(pthread_queue_t *, pthread_t, pthread_mutex_t *)
    189  1.70.6.2  ad     PTHREAD_HIDE;
    190  1.70.6.2  ad void	pthread__unpark(pthread_queue_t *, pthread_t, pthread_mutex_t *)
    191  1.70.6.2  ad     PTHREAD_HIDE;
    192  1.70.6.2  ad int	pthread__park(pthread_t, pthread_mutex_t *, pthread_queue_t *,
    193  1.70.6.2  ad 		      const struct timespec *, int, const void *)
    194  1.70.6.2  ad 		      PTHREAD_HIDE;
    195  1.70.6.2  ad pthread_mutex_t *pthread__hashlock(volatile const void *) PTHREAD_HIDE;
    196  1.70.6.2  ad 
    197  1.70.6.2  ad /* Internal locking primitives */
    198  1.70.6.2  ad void	pthread__lockprim_init(void) PTHREAD_HIDE;
    199  1.70.6.2  ad void	pthread_lockinit(pthread_spin_t *) PTHREAD_HIDE;
    200  1.70.6.2  ad 
    201  1.70.6.2  ad static inline void pthread__spinlock(pthread_t, pthread_spin_t *)
    202  1.70.6.2  ad     __attribute__((__always_inline__));
    203  1.70.6.2  ad static inline void
    204  1.70.6.2  ad pthread__spinlock(pthread_t self, pthread_spin_t *lock)
    205  1.70.6.2  ad {
    206  1.70.6.2  ad 	if (__predict_true((*self->pt_lockops.plo_try)(lock)))
    207  1.70.6.2  ad 		return;
    208  1.70.6.2  ad 	(*self->pt_lockops.plo_lock)(lock);
    209  1.70.6.2  ad }
    210  1.70.6.2  ad 
    211  1.70.6.2  ad static inline int pthread__spintrylock(pthread_t, pthread_spin_t *)
    212  1.70.6.2  ad     __attribute__((__always_inline__));
    213  1.70.6.2  ad static inline int
    214  1.70.6.2  ad pthread__spintrylock(pthread_t self, pthread_spin_t *lock)
    215  1.70.6.2  ad {
    216  1.70.6.2  ad 	return (*self->pt_lockops.plo_try)(lock);
    217  1.70.6.2  ad }
    218  1.70.6.2  ad 
    219  1.70.6.2  ad static inline void pthread__spinunlock(pthread_t, pthread_spin_t *)
    220  1.70.6.2  ad     __attribute__((__always_inline__));
    221  1.70.6.2  ad static inline void
    222  1.70.6.2  ad pthread__spinunlock(pthread_t self, pthread_spin_t *lock)
    223  1.70.6.2  ad {
    224  1.70.6.2  ad 	(*self->pt_lockops.plo_unlock)(lock);
    225  1.70.6.2  ad }
    226  1.70.6.2  ad 
    227  1.70.6.2  ad extern const struct pthread_lock_ops *pthread__lock_ops;
    228  1.70.6.2  ad 
    229  1.70.6.2  ad int	pthread__simple_locked_p(__cpu_simple_lock_t *) PTHREAD_HIDE;
    230  1.70.6.2  ad #define	pthread__simple_lock_init(alp)	(*pthread__lock_ops->plo_init)(alp)
    231  1.70.6.2  ad #define	pthread__simple_lock_try(alp)	(*pthread__lock_ops->plo_try)(alp)
    232  1.70.6.2  ad #define	pthread__simple_unlock(alp)	(*pthread__lock_ops->plo_unlock)(alp)
    233  1.70.6.2  ad 
    234  1.70.6.2  ad #ifndef _getcontext_u
    235  1.70.6.2  ad int	_getcontext_u(ucontext_t *) PTHREAD_HIDE;
    236  1.70.6.2  ad #endif
    237  1.70.6.2  ad #ifndef _setcontext_u
    238  1.70.6.2  ad int	_setcontext_u(const ucontext_t *) PTHREAD_HIDE;
    239  1.70.6.2  ad #endif
    240  1.70.6.2  ad #ifndef _swapcontext_u
    241  1.70.6.2  ad int	_swapcontext_u(ucontext_t *, const ucontext_t *) PTHREAD_HIDE;
    242  1.70.6.2  ad #endif
    243  1.70.6.2  ad 
    244  1.70.6.2  ad void	pthread__testcancel(pthread_t) PTHREAD_HIDE;
    245  1.70.6.2  ad int	pthread__find(pthread_t) PTHREAD_HIDE;
    246  1.70.6.2  ad 
    247  1.70.6.2  ad #ifndef PTHREAD_MD_INIT
    248  1.70.6.2  ad #define PTHREAD_MD_INIT
    249  1.70.6.2  ad #endif
    250  1.70.6.2  ad 
    251  1.70.6.2  ad #ifndef _INITCONTEXT_U_MD
    252  1.70.6.2  ad #define _INITCONTEXT_U_MD(ucp)
    253  1.70.6.2  ad #endif
    254  1.70.6.2  ad 
    255  1.70.6.2  ad #define _INITCONTEXT_U(ucp) do {					\
    256  1.70.6.2  ad 	(ucp)->uc_flags = _UC_CPU | _UC_STACK;				\
    257  1.70.6.2  ad 	_INITCONTEXT_U_MD(ucp)						\
    258  1.70.6.2  ad 	} while (/*CONSTCOND*/0)
    259  1.70.6.2  ad 
    260  1.70.6.2  ad /* Stack location of pointer to a particular thread */
    261  1.70.6.2  ad #define pthread__id(sp) \
    262  1.70.6.2  ad 	((pthread_t) (((vaddr_t)(sp)) & pthread__threadmask))
    263  1.70.6.2  ad 
    264  1.70.6.2  ad #ifdef PTHREAD__HAVE_THREADREG
    265  1.70.6.2  ad #define	pthread__self()		pthread__threadreg_get()
    266  1.70.6.2  ad #else
    267  1.70.6.2  ad #define pthread__self() 	(pthread__id(pthread__sp()))
    268  1.70.6.2  ad #endif
    269  1.70.6.2  ad 
    270  1.70.6.2  ad #define pthread__abort()						\
    271  1.70.6.2  ad 	pthread__assertfunc(__FILE__, __LINE__, __func__, "unreachable")
    272  1.70.6.2  ad 
    273  1.70.6.2  ad #define pthread__assert(e) do {						\
    274  1.70.6.2  ad 	if (__predict_false(!(e)))					\
    275  1.70.6.2  ad        	       pthread__assertfunc(__FILE__, __LINE__, __func__, #e);	\
    276  1.70.6.2  ad         } while (/*CONSTCOND*/0)
    277  1.70.6.2  ad 
    278  1.70.6.2  ad #define pthread__error(err, msg, e) do {				\
    279  1.70.6.2  ad 	if (__predict_false(!(e))) {					\
    280  1.70.6.2  ad        	       pthread__errorfunc(__FILE__, __LINE__, __func__, msg);	\
    281  1.70.6.2  ad 	       return (err);						\
    282  1.70.6.2  ad 	} 								\
    283  1.70.6.2  ad         } while (/*CONSTCOND*/0)
    284  1.70.6.2  ad 
    285  1.70.6.2  ad void	pthread__destroy_tsd(pthread_t) PTHREAD_HIDE;
    286  1.70.6.2  ad void	pthread__assertfunc(const char *, int, const char *, const char *)
    287  1.70.6.2  ad 			    PTHREAD_HIDE;
    288  1.70.6.2  ad void	pthread__errorfunc(const char *, int, const char *, const char *)
    289  1.70.6.2  ad 			   PTHREAD_HIDE;
    290  1.70.6.2  ad char	*pthread__getenv(const char *) PTHREAD_HIDE;
    291  1.70.6.2  ad void	pthread__cancelled(void) PTHREAD_HIDE;
    292  1.70.6.2  ad void	pthread__mutex_deferwake(pthread_t, pthread_mutex_t *) PTHREAD_HIDE;
    293  1.70.6.2  ad int	pthread__checkpri(int) PTHREAD_HIDE;
    294  1.70.6.2  ad 
    295  1.70.6.2  ad #ifndef pthread__smt_pause
    296  1.70.6.2  ad #define	pthread__smt_pause()	/* nothing */
    297  1.70.6.2  ad #endif
    298  1.70.6.2  ad 
    299  1.70.6.2  ad /*
    300  1.70.6.2  ad  * Bits in the owner field of the lock that indicate lock state.  If the
    301  1.70.6.2  ad  * WRITE_LOCKED bit is clear, then the owner field is actually a count of
    302  1.70.6.2  ad  * the number of readers.
    303  1.70.6.2  ad  */
    304  1.70.6.2  ad #define	RW_HAS_WAITERS		0x01	/* lock has waiters */
    305  1.70.6.2  ad #define	RW_WRITE_WANTED		0x02	/* >= 1 waiter is a writer */
    306  1.70.6.2  ad #define	RW_WRITE_LOCKED		0x04	/* lock is currently write locked */
    307  1.70.6.2  ad #define	RW_UNUSED		0x08	/* currently unused */
    308  1.70.6.2  ad 
    309  1.70.6.2  ad #define	RW_FLAGMASK		0x0f
    310  1.70.6.2  ad 
    311  1.70.6.2  ad #define	RW_READ_COUNT_SHIFT	4
    312  1.70.6.2  ad #define	RW_READ_INCR		(1 << RW_READ_COUNT_SHIFT)
    313  1.70.6.2  ad #define	RW_THREAD		((uintptr_t)-RW_READ_INCR)
    314  1.70.6.2  ad #define	RW_OWNER(rw)		((rw)->rw_owner & RW_THREAD)
    315  1.70.6.2  ad #define	RW_COUNT(rw)		((rw)->rw_owner & RW_THREAD)
    316  1.70.6.2  ad #define	RW_FLAGS(rw)		((rw)->rw_owner & ~RW_THREAD)
    317  1.70.6.2  ad 
    318  1.70.6.2  ad #endif /* _LIB_PTHREAD_INT_H */
    319