Home | History | Annotate | Line # | Download | only in libpthread
pthread_int.h revision 1.4
      1 /*	$NetBSD: pthread_int.h,v 1.4 2003/01/25 00:43:38 nathanw Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nathan J. Williams.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 #ifndef _LIB_PTHREAD_INT_H
     40 #define _LIB_PTHREAD_INT_H
     41 
     42 #define PTHREAD__DEBUG
     43 #define ERRORCHECK
     44 
     45 #include "pthread_types.h"
     46 #include "pthread_queue.h"
     47 #include "pthread_debug.h"
     48 #include "pthread_md.h"
     49 
     50 #include <sa.h>
     51 #include <signal.h>
     52 
     53 /*
     54  * The size of this structure needs to be no larger than struct
     55  * __pthread_cleanup_store, defined in pthread.h.
     56  */
     57 struct pt_clean_t {
     58 	PTQ_ENTRY(pt_clean_t)	ptc_next;
     59 	void	(*ptc_cleanup)(void *);
     60 	void	*ptc_arg;
     61 };
     62 
     63 struct pt_alarm_t {
     64 	PTQ_ENTRY(pt_alarm_t)	pta_next;
     65 	pthread_spin_t	pta_lock;
     66 	const struct timespec	*pta_time;
     67 	void	(*pta_func)(void *);
     68 	void	*pta_arg;
     69 	int	pta_fired;
     70 };
     71 
     72 struct	pthread_st {
     73 	unsigned int	pt_magic;
     74 	/* Identifier, for debugging and for preventing recycling. */
     75 	int		pt_num;
     76 
     77 	int	pt_type;	/* normal, upcall, or idle */
     78 	int	pt_state;	/* running, blocked, etc. */
     79 	pthread_spin_t pt_statelock;	/* lock on pt_state */
     80 	int	pt_flags;	/* see PT_FLAG_* below */
     81 	int	pt_cancel;	/* Deferred cancellation */
     82 	int	pt_spinlocks;	/* Number of spinlocks held. */
     83 	int	pt_blockedlwp;	/* LWP/SA number when blocked */
     84 
     85 	int	pt_errno;	/* Thread-specific errno. */
     86 
     87 	/* Entry on the run queue */
     88 	PTQ_ENTRY(pthread_st)	pt_runq;
     89 	/* Entry on the list of all threads */
     90 	PTQ_ENTRY(pthread_st)	pt_allq;
     91 	/* Entry on the sleep queue (xxx should be same as run queue?) */
     92 	PTQ_ENTRY(pthread_st)	pt_sleep;
     93 	/* Object we're sleeping on */
     94 	void	*pt_sleepobj;
     95 	/* Queue we're sleeping on */
     96 	struct pthread_queue_t		*pt_sleepq;
     97 	/* Lock protecting that queue */
     98 	pthread_spin_t		*pt_sleeplock;
     99 
    100 	stack_t		pt_stack;	/* Our stack */
    101 	ucontext_t	*pt_uc;		/* Saved context when we're stopped */
    102 
    103 	sigset_t	pt_sigmask;	/* Signals we won't take. */
    104 	sigset_t	pt_siglist;	/* Signals pending for us. */
    105 	sigset_t	pt_sigblocked;	/* Signals delivered while blocked. */
    106 	pthread_spin_t	pt_siglock;	/* Lock on above */
    107 
    108 	void *		pt_exitval;	/* Read by pthread_join() */
    109 
    110 	/* Stack of cancellation cleanup handlers and their arguments */
    111 	PTQ_HEAD(, pt_clean_t)	pt_cleanup_stack;
    112 
    113 	/* Other threads trying to pthread_join() us. */
    114 	struct pthread_queue_t	pt_joiners;
    115 	/* Lock for above, and for changing pt_state to ZOMBIE or DEAD,
    116 	 * and for setting the DETACHED flag
    117 	 */
    118 	pthread_spin_t	pt_join_lock;
    119 
    120 	/* Thread we were going to switch to before we were preempted
    121 	 * ourselves. Will be used by the upcall that's continuing us.
    122 	 */
    123 	pthread_t	pt_switchto;
    124 	ucontext_t*	pt_switchtouc;
    125 
    126 	/* The context we saved in pthread__locked_switch but which
    127 	 * was trashed when we were preempted before switching stacks.
    128 	 */
    129 	ucontext_t*	pt_sleepuc;
    130 
    131 	/* Threads that are preempted with spinlocks held will be
    132 	 * continued until they unlock their spinlock. When they do
    133 	 * so, they should jump ship to the thread pointed to by
    134 	 * pt_next.
    135 	 */
    136 	pthread_t	pt_next;
    137 
    138 	/* The upcall that is continuing this thread */
    139 	pthread_t	pt_parent;
    140 
    141 	/* A queue lock that this thread held while trying to
    142 	 * context switch to another process.
    143 	 */
    144 	pthread_spin_t*	pt_heldlock;
    145 
    146 	/* Thread-specific data */
    147 	void*		pt_specific[PTHREAD_KEYS_MAX];
    148 
    149 #ifdef PTHREAD__DEBUG
    150 	int	blocks;
    151 	int	preempts;
    152 	int	rescheds;
    153 #endif
    154 };
    155 
    156 struct pthread_lock_ops {
    157 	void	(*plo_init)(__cpu_simple_lock_t *);
    158 	int	(*plo_try)(__cpu_simple_lock_t *);
    159 	void	(*plo_unlock)(__cpu_simple_lock_t *);
    160 };
    161 
    162 /* Thread types */
    163 #define PT_THREAD_NORMAL	1
    164 #define PT_THREAD_UPCALL	2
    165 #define PT_THREAD_IDLE		3
    166 
    167 /* Thread states */
    168 #define PT_STATE_RUNNING	1
    169 #define PT_STATE_RUNNABLE	2
    170 #define PT_STATE_BLOCKED_SYS	3
    171 #define PT_STATE_BLOCKED_QUEUE	4
    172 #define PT_STATE_ZOMBIE		5
    173 #define PT_STATE_DEAD		6
    174 #define PT_STATE_RECYCLABLE	7
    175 
    176 /* Flag values */
    177 
    178 #define PT_FLAG_DETACHED	0x0001
    179 #define PT_FLAG_IDLED		0x0002
    180 #define PT_FLAG_CS_DISABLED	0x0004	/* Cancellation disabled */
    181 #define PT_FLAG_CS_ASYNC	0x0008  /* Cancellation is async */
    182 #define PT_FLAG_CS_PENDING	0x0010
    183 #define PT_FLAG_SIGDEFERRED     0x0020	/* There are signals to take */
    184 
    185 #define PT_MAGIC	0x11110001
    186 #define PT_DEAD		0xDEAD0001
    187 
    188 #define PT_ATTR_MAGIC	0x22220002
    189 #define PT_ATTR_DEAD	0xDEAD0002
    190 
    191 #define PT_STACKSIZE	(1<<18)
    192 #define PT_STACKMASK	(PT_STACKSIZE-1)
    193 
    194 #define PT_UPCALLSTACKS	16
    195 
    196 #define PT_ALARMTIMER_MAGIC	0x88880010
    197 #define PT_RRTIMER_MAGIC	0x88880020
    198 #define NIDLETHREADS	4
    199 #define IDLESPINS	1000
    200 
    201 /* Flag to be used in a ucontext_t's uc_flags indicating that
    202  * the saved register state is "user" state only, not full
    203  * trap state.
    204  */
    205 #define _UC_USER_BIT		30
    206 #define _UC_USER		(1LU << _UC_USER_BIT)
    207 
    208 void	pthread_init(void)  __attribute__ ((__constructor__));
    209 
    210 /* Utility functions */
    211 
    212 /* Set up/clean up a thread's basic state. */
    213 void	pthread__initthread(pthread_t self, pthread_t t);
    214 
    215 /* Go do something else. Don't go back on the run queue */
    216 void	pthread__block(pthread_t self, pthread_spin_t* queuelock);
    217 /* Put a thread back on the run queue */
    218 void	pthread__sched(pthread_t self, pthread_t thread);
    219 void	pthread__sched_idle(pthread_t self, pthread_t thread);
    220 void	pthread__sched_idle2(pthread_t self);
    221 
    222 void	pthread__sched_bulk(pthread_t self, pthread_t qhead);
    223 
    224 void	pthread__idle(void);
    225 
    226 /* Get the next thread */
    227 pthread_t pthread__next(pthread_t self);
    228 
    229 int	pthread__stackalloc(pthread_t *t);
    230 void	pthread__initmain(pthread_t *t);
    231 
    232 void	pthread__sa_start(void);
    233 void	pthread__sa_recycle(pthread_t old, pthread_t new);
    234 
    235 /* Alarm code */
    236 void	pthread__alarm_init(void);
    237 void	pthread__alarm_add(pthread_t, struct pt_alarm_t *,
    238     const struct timespec *, void (*)(void *), void *);
    239 void	pthread__alarm_del(pthread_t, struct pt_alarm_t *);
    240 int	pthread__alarm_fired(struct pt_alarm_t *);
    241 void	pthread__alarm_process(pthread_t self, void *arg);
    242 
    243 /* Internal locking primitives */
    244 void	pthread__lockprim_init(void);
    245 void	pthread_lockinit(pthread_spin_t *lock);
    246 void	pthread_spinlock(pthread_t thread, pthread_spin_t *lock);
    247 int	pthread_spintrylock(pthread_t thread, pthread_spin_t *lock);
    248 void	pthread_spinunlock(pthread_t thread, pthread_spin_t *lock);
    249 
    250 extern const struct pthread_lock_ops *pthread__lock_ops;
    251 
    252 #define	pthread__simple_lock_init(alp)	(*pthread__lock_ops->plo_init)(alp)
    253 #define	pthread__simple_lock_try(alp)	(*pthread__lock_ops->plo_try)(alp)
    254 #define	pthread__simple_unlock(alp)	(*pthread__lock_ops->plo_unlock)(alp)
    255 
    256 #ifndef _getcontext_u
    257 int	_getcontext_u(ucontext_t *);
    258 #endif
    259 #ifndef _setcontext_u
    260 int	_setcontext_u(const ucontext_t *);
    261 #endif
    262 #ifndef _swapcontext_u
    263 int	_swapcontext_u(ucontext_t *, const ucontext_t *);
    264 #endif
    265 
    266 void	pthread__testcancel(pthread_t self);
    267 int	pthread__find(pthread_t self, pthread_t target);
    268 
    269 #ifndef PTHREAD_MD_INIT
    270 #define PTHREAD_MD_INIT
    271 #endif
    272 
    273 #ifndef _INITCONTEXT_U_MD
    274 #define _INITCONTEXT_U_MD(ucp)
    275 #endif
    276 
    277 #define _INITCONTEXT_U(ucp) do {					\
    278 	(ucp)->uc_flags = _UC_CPU | _UC_STACK;				\
    279 	_INITCONTEXT_U_MD(ucp)						\
    280 	} while (/*CONSTCOND*/0)
    281 
    282 #ifdef __PTHREAD_SIGNAL_PRIVATE
    283 
    284 /*
    285  * Macros for converting from ucontext to sigcontext and vice-versa.
    286  * Note that going from sigcontext->ucontext is only safe for a
    287  * sigcontext that was first created from a ucontext.
    288  *
    289  * Arch-specific code can override this, if necessary.  It may also
    290  * be necessary for arch-specific code to include extra info along with
    291  * the sigcontext.
    292  */
    293 #ifndef PTHREAD_SIGCONTEXT_EXTRA
    294 #define	PTHREAD_SIGCONTEXT_EXTRA
    295 #endif
    296 
    297 struct pthread__sigcontext {
    298 	struct sigcontext	psc_context;
    299 	PTHREAD_SIGCONTEXT_EXTRA
    300 };
    301 
    302 #ifndef PTHREAD_UCONTEXT_TO_SIGCONTEXT
    303 #define	PTHREAD_UCONTEXT_TO_SIGCONTEXT(mask, uc, psc)			\
    304 do {									\
    305 	(uc)->uc_sigmask = *(mask);					\
    306 	/*								\
    307 	 * XXX We may want to check for _UC_USER here and do a		\
    308 	 * XXX _INITCONTEXT_U_MD() and clearing _UC_USER on such	\
    309 	 * XXX contexts before converting to a signcontext, thus	\
    310 	 * XXX allowing signal handlers to modify the non-_UC_USER	\
    311 	 * XXX registers.  Hazy territory; ignore it for now.		\
    312 	 */								\
    313 	_UCONTEXT_TO_SIGCONTEXT((uc), &(psc)->psc_context);		\
    314 } while (/*CONSTCOND*/0)
    315 
    316 #define	PTHREAD_SIGCONTEXT_TO_UCONTEXT(psc, uc)				\
    317 do {									\
    318 	_SIGCONTEXT_TO_UCONTEXT(&(psc)->psc_context, (uc));		\
    319 	(uc)->uc_flags &= ~_UC_SIGMASK;					\
    320 } while (/*CONSTCOND*/0)
    321 #else
    322 void	pthread__ucontext_to_sigcontext(const sigset_t *, ucontext_t *,
    323 	    struct pthread__sigcontext *);
    324 void	pthread__sigcontext_to_ucontext(const struct pthread__sigcontext *,
    325 	    ucontext_t *);
    326 #endif /* PTHREAD_UCONTEXT_TO_SIGCONTEXT */
    327 
    328 #endif /* __PTHREAD_SIGNAL_PRIVATE */
    329 
    330 /* Stack location of pointer to a particular thread */
    331 #define pthread__id(sp) \
    332 	((pthread_t) (((vaddr_t)(sp)) & ~PT_STACKMASK))
    333 
    334 #define pthread__self() (pthread__id(pthread__sp()))
    335 
    336 /* These three routines are defined in processor-specific code. */
    337 void	pthread__upcall_switch(pthread_t self, pthread_t next);
    338 void	pthread__switch(pthread_t self, pthread_t next);
    339 void	pthread__locked_switch(pthread_t self, pthread_t next,
    340     pthread_spin_t *lock);
    341 
    342 void	pthread__signal_init(void);
    343 
    344 void	pthread__signal(pthread_t self, pthread_t t, int sig, int code);
    345 void	pthread__deliver_signal(pthread_t self, pthread_t t, int sig, int code);
    346 void	pthread__signal_deferred(pthread_t self, pthread_t t);
    347 
    348 void	pthread__destroy_tsd(pthread_t self);
    349 
    350 
    351 #endif /* _LIB_PTHREAD_INT_H */
    352