Home | History | Annotate | Line # | Download | only in kern
kern_lwp.c revision 1.40.2.5
      1  1.40.2.5        ad /*	$NetBSD: kern_lwp.c,v 1.40.2.5 2006/11/18 21:39:22 ad Exp $	*/
      2       1.2   thorpej 
      3       1.2   thorpej /*-
      4  1.40.2.2        ad  * Copyright (c) 2001, 2006 The NetBSD Foundation, Inc.
      5       1.2   thorpej  * All rights reserved.
      6       1.2   thorpej  *
      7       1.2   thorpej  * This code is derived from software contributed to The NetBSD Foundation
      8  1.40.2.2        ad  * by Nathan J. Williams, and Andrew Doran.
      9       1.2   thorpej  *
     10       1.2   thorpej  * Redistribution and use in source and binary forms, with or without
     11       1.2   thorpej  * modification, are permitted provided that the following conditions
     12       1.2   thorpej  * are met:
     13       1.2   thorpej  * 1. Redistributions of source code must retain the above copyright
     14       1.2   thorpej  *    notice, this list of conditions and the following disclaimer.
     15       1.2   thorpej  * 2. Redistributions in binary form must reproduce the above copyright
     16       1.2   thorpej  *    notice, this list of conditions and the following disclaimer in the
     17       1.2   thorpej  *    documentation and/or other materials provided with the distribution.
     18       1.2   thorpej  * 3. All advertising materials mentioning features or use of this software
     19       1.2   thorpej  *    must display the following acknowledgement:
     20       1.2   thorpej  *        This product includes software developed by the NetBSD
     21       1.2   thorpej  *        Foundation, Inc. and its contributors.
     22       1.2   thorpej  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23       1.2   thorpej  *    contributors may be used to endorse or promote products derived
     24       1.2   thorpej  *    from this software without specific prior written permission.
     25       1.2   thorpej  *
     26       1.2   thorpej  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27       1.2   thorpej  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28       1.2   thorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29       1.2   thorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30       1.2   thorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31       1.2   thorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32       1.2   thorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33       1.2   thorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34       1.2   thorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35       1.2   thorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36       1.2   thorpej  * POSSIBILITY OF SUCH DAMAGE.
     37       1.2   thorpej  */
     38       1.9     lukem 
     39  1.40.2.4        ad /*
     40  1.40.2.4        ad  * Overview
     41  1.40.2.4        ad  *
     42  1.40.2.4        ad  *	Lightweight processes (LWPs) are the basic unit (or thread) of
     43  1.40.2.4        ad  *	execution within the kernel.  The core state of an LWP is described
     44  1.40.2.4        ad  *	by "struct lwp".
     45  1.40.2.4        ad  *
     46  1.40.2.4        ad  *	Each LWP is contained within a process (described by "struct proc"),
     47  1.40.2.4        ad  *	Every process contains at least one LWP, but may contain more.  The
     48  1.40.2.4        ad  *	process describes attributes shared among all of its LWPs such as a
     49  1.40.2.4        ad  *	private address space, global execution state (stopped, active,
     50  1.40.2.4        ad  *	zombie, ...), signal disposition and so on.  On a multiprocessor
     51  1.40.2.4        ad  *	machine, multiple LWPs be executing in kernel simultaneously.
     52  1.40.2.4        ad  *
     53  1.40.2.4        ad  *	Note that LWPs differ from kernel threads (kthreads) in that kernel
     54  1.40.2.4        ad  *	threads are distinct processes (system processes) with no user space
     55  1.40.2.4        ad  *	component, which themselves may contain one or more LWPs.
     56  1.40.2.4        ad  *
     57  1.40.2.4        ad  * Execution states
     58  1.40.2.4        ad  *
     59  1.40.2.4        ad  *	At any given time, an LWP has overall state that is described by
     60  1.40.2.4        ad  *	lwp::l_stat.  The states are broken into two sets below.  The first
     61  1.40.2.4        ad  *	set is guaranteed to represent the absolute, current state of the
     62  1.40.2.4        ad  *	LWP:
     63  1.40.2.4        ad  *
     64  1.40.2.4        ad  * 	LSONPROC
     65  1.40.2.4        ad  *
     66  1.40.2.4        ad  * 		On processor: the LWP is executing on a CPU, either in the
     67  1.40.2.4        ad  * 		kernel or in user space.
     68  1.40.2.4        ad  *
     69  1.40.2.4        ad  * 	LSRUN
     70  1.40.2.4        ad  *
     71  1.40.2.4        ad  * 		Runnable: the LWP is parked on a run queue, and may soon be
     72  1.40.2.4        ad  * 		chosen to run by a idle processor, or by a processor that
     73  1.40.2.4        ad  * 		has been asked to preempt a currently runnning but lower
     74  1.40.2.4        ad  * 		priority LWP.  If the LWP is not swapped in (L_INMEM == 0)
     75  1.40.2.4        ad  *		then the LWP is not on a run queue, but may be soon.
     76  1.40.2.4        ad  *
     77  1.40.2.4        ad  * 	LSIDL
     78  1.40.2.4        ad  *
     79  1.40.2.4        ad  * 		Idle: the LWP has been created but has not yet executed.
     80  1.40.2.4        ad  * 		Whoever created the new LWP can be expected to set it to
     81  1.40.2.4        ad  * 		another state shortly.
     82  1.40.2.4        ad  *
     83  1.40.2.4        ad  * 	LSZOMB
     84  1.40.2.4        ad  *
     85  1.40.2.4        ad  * 		Zombie: the LWP has exited, released all of its resources
     86  1.40.2.4        ad  * 		and can execute no further.  It will persist until 'reaped'
     87  1.40.2.4        ad  * 		by another LWP or process via the _lwp_wait() or wait()
     88  1.40.2.4        ad  * 		system calls.
     89  1.40.2.4        ad  *
     90  1.40.2.4        ad  * 	LSSUSPENDED:
     91  1.40.2.4        ad  *
     92  1.40.2.4        ad  * 		Suspended: the LWP has had its execution suspended by
     93  1.40.2.4        ad  *		another LWP in the same process using the _lwp_suspend()
     94  1.40.2.4        ad  *		system call.  User-level LWPs also enter the suspended
     95  1.40.2.4        ad  *		state when the system is shutting down.
     96  1.40.2.4        ad  *
     97  1.40.2.4        ad  *	The second set represent a "statement of intent" on behalf of the
     98  1.40.2.4        ad  *	LWP.  The LWP may in fact be executing on a processor, may be
     99  1.40.2.4        ad  *	sleeping, idle, or on a run queue. It is expected to take the
    100  1.40.2.4        ad  *	necessary action to stop executing or become "running" again within
    101  1.40.2.4        ad  *	a short timeframe.
    102  1.40.2.4        ad  *
    103  1.40.2.4        ad  * 	LSDEAD:
    104  1.40.2.4        ad  *
    105  1.40.2.4        ad  * 		Dead: the LWP has released most of its resources and is
    106  1.40.2.4        ad  * 		about to switch away into oblivion.  When it switches away,
    107  1.40.2.4        ad  * 		its few remaining resources will be collected and the LWP
    108  1.40.2.4        ad  * 		will enter the LSZOMB (zombie) state.
    109  1.40.2.4        ad  *
    110  1.40.2.4        ad  * 	LSSLEEP:
    111  1.40.2.4        ad  *
    112  1.40.2.4        ad  * 		Sleeping: the LWP has entered itself onto a sleep queue, and
    113  1.40.2.4        ad  * 		will switch away shortly to allow other LWPs to run on the
    114  1.40.2.4        ad  * 		CPU.
    115  1.40.2.4        ad  *
    116  1.40.2.4        ad  * 	LSSTOP:
    117  1.40.2.4        ad  *
    118  1.40.2.4        ad  * 		Stopped: the LWP has been stopped as a result of a job
    119  1.40.2.4        ad  * 		control signal, or as a result of the ptrace() interface.
    120  1.40.2.4        ad  * 		Stopped LWPs may run briefly within the kernel to handle
    121  1.40.2.4        ad  * 		signals that they receive, but will not return to user space
    122  1.40.2.4        ad  * 		until their process' state is changed away from stopped.
    123  1.40.2.4        ad  * 		Single LWPs within a process can not be set stopped
    124  1.40.2.4        ad  * 		selectively: all actions that can stop or continue LWPs
    125  1.40.2.4        ad  * 		occur at the process level.
    126  1.40.2.4        ad  *
    127  1.40.2.4        ad  * State transitions
    128  1.40.2.4        ad  *
    129  1.40.2.4        ad  *	Note that the LSSTOP and LSSUSPENDED states may only be set
    130  1.40.2.4        ad  *	when returning to user space in userret(), or when sleeping
    131  1.40.2.4        ad  *	interruptably.  Before setting those states, we try to ensure
    132  1.40.2.4        ad  *	that the LWPs will release all kernel locks that they hold,
    133  1.40.2.4        ad  *	and at a minimum try to ensure that the LWP can be set runnable
    134  1.40.2.4        ad  *	again by a signal.
    135  1.40.2.4        ad  *
    136  1.40.2.4        ad  *	LWPs may transition states in the following ways:
    137  1.40.2.4        ad  *
    138  1.40.2.4        ad  *	 IDL -------> SUSPENDED		DEAD -------> ZOMBIE
    139  1.40.2.4        ad  *		    > RUN
    140  1.40.2.4        ad  *
    141  1.40.2.4        ad  *	 RUN -------> ONPROC		ONPROC -----> RUN
    142  1.40.2.4        ad  *	            > STOPPED			    > SLEEP
    143  1.40.2.4        ad  *	            > SUSPENDED			    > STOPPED
    144  1.40.2.4        ad  *						    > SUSPENDED
    145  1.40.2.4        ad  *						    > DEAD
    146  1.40.2.4        ad  *
    147  1.40.2.4        ad  *	 STOPPED ---> RUN		SUSPENDED --> RUN
    148  1.40.2.4        ad  *	            > SLEEP			    > SLEEP
    149  1.40.2.4        ad  *
    150  1.40.2.4        ad  *	 SLEEP -----> ONPROC
    151  1.40.2.4        ad  *		    > RUN
    152  1.40.2.4        ad  *		    > STOPPED
    153  1.40.2.4        ad  *		    > SUSPENDED
    154  1.40.2.4        ad  *
    155  1.40.2.4        ad  * Locking
    156  1.40.2.4        ad  *
    157  1.40.2.4        ad  *	The majority of fields in 'struct lwp' are covered by a single,
    158  1.40.2.4        ad  *	general spin mutex pointed to by lwp::l_mutex.  The locks covering
    159  1.40.2.4        ad  *	each field are documented in sys/lwp.h.
    160  1.40.2.4        ad  *
    161  1.40.2.4        ad  *	State transitions must be made with the LWP's general lock held.  In
    162  1.40.2.4        ad  *	a multiprocessor kernel, state transitions may cause the LWP's lock
    163  1.40.2.4        ad  *	pointer to change.  On uniprocessor kernels, most scheduler and
    164  1.40.2.4        ad  *	synchronisation objects such as sleep queues and LWPs are protected
    165  1.40.2.4        ad  *	by only one mutex (sched_mutex).  In this case, LWPs' lock pointers
    166  1.40.2.4        ad  *	will never change and will always reference sched_mutex.
    167  1.40.2.4        ad  *
    168  1.40.2.4        ad  *	Manipulation of the general lock is not performed directly, but
    169  1.40.2.4        ad  *	through calls to lwp_lock(), lwp_relock() and similar.
    170  1.40.2.4        ad  *
    171  1.40.2.4        ad  *	States and their associated locks:
    172  1.40.2.4        ad  *
    173  1.40.2.4        ad  *	LSIDL, LSDEAD, LSZOMB
    174  1.40.2.4        ad  *
    175  1.40.2.4        ad  *		Always covered by lwp_mutex (the idle mutex).
    176  1.40.2.4        ad  *
    177  1.40.2.4        ad  *	LSONPROC, LSRUN:
    178  1.40.2.4        ad  *
    179  1.40.2.4        ad  *		Always covered by sched_mutex, which protects the run queues
    180  1.40.2.4        ad  *		and other miscellaneous items.  If the scheduler is changed
    181  1.40.2.4        ad  *		to use per-CPU run queues, this may become a per-CPU mutex.
    182  1.40.2.4        ad  *
    183  1.40.2.4        ad  *	LSSLEEP:
    184  1.40.2.4        ad  *
    185  1.40.2.4        ad  *		Covered by a mutex associated with the sleep queue that the
    186  1.40.2.4        ad  *		LWP resides on, indirectly referenced by l_sleepq->sq_mutex.
    187  1.40.2.4        ad  *
    188  1.40.2.4        ad  *	LSSTOP, LSSUSPENDED:
    189  1.40.2.4        ad  *
    190  1.40.2.4        ad  *		If the LWP was previously sleeping (l_wchan != NULL), then
    191  1.40.2.4        ad  *		l_mutex references the sleep queue mutex.  If the LWP was
    192  1.40.2.4        ad  *		runnable or on the CPU when halted, or has been removed from
    193  1.40.2.4        ad  *		the sleep queue since halted, then the mutex is lwp_mutex.
    194  1.40.2.4        ad  *
    195  1.40.2.4        ad  *	The lock order for the various mutexes is as follows:
    196  1.40.2.4        ad  *
    197  1.40.2.4        ad  *		sleepq_t::sq_mutex -> lwp_mutex -> sched_mutex
    198  1.40.2.4        ad  *
    199  1.40.2.4        ad  *	Each process has an scheduler state mutex (proc::p_smutex), and a
    200  1.40.2.4        ad  *	number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
    201  1.40.2.4        ad  *	so on.  When an LWP is to be entered into or removed from one of the
    202  1.40.2.4        ad  *	following states, p_mutex must be held and the process wide counters
    203  1.40.2.4        ad  *	adjusted:
    204  1.40.2.4        ad  *
    205  1.40.2.4        ad  *		LSIDL, LSDEAD, LSZOMB, LSSTOP, LSSUSPENDED
    206  1.40.2.4        ad  *
    207  1.40.2.4        ad  *	Note that an LWP is considered running or likely to run soon if in
    208  1.40.2.4        ad  *	one of the following states.  This affects the value of p_nrlwps:
    209  1.40.2.4        ad  *
    210  1.40.2.4        ad  *		LSRUN, LSONPROC, LSSLEEP
    211  1.40.2.4        ad  *
    212  1.40.2.4        ad  *	p_smutex does not need to be held when transitioning among these
    213  1.40.2.4        ad  *	three states.
    214  1.40.2.4        ad  */
    215  1.40.2.4        ad 
    216       1.9     lukem #include <sys/cdefs.h>
    217  1.40.2.5        ad __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.40.2.5 2006/11/18 21:39:22 ad Exp $");
    218       1.8    martin 
    219       1.8    martin #include "opt_multiprocessor.h"
    220  1.40.2.4        ad #include "opt_lockdebug.h"
    221       1.2   thorpej 
    222  1.40.2.5        ad #define _LWP_API_PRIVATE
    223  1.40.2.5        ad 
    224       1.2   thorpej #include <sys/param.h>
    225       1.2   thorpej #include <sys/systm.h>
    226       1.2   thorpej #include <sys/pool.h>
    227       1.2   thorpej #include <sys/proc.h>
    228  1.40.2.4        ad #include <sys/sa.h>
    229       1.2   thorpej #include <sys/syscallargs.h>
    230      1.37        ad #include <sys/kauth.h>
    231  1.40.2.2        ad #include <sys/sleepq.h>
    232  1.40.2.2        ad #include <sys/lockdebug.h>
    233       1.2   thorpej 
    234       1.2   thorpej #include <uvm/uvm_extern.h>
    235       1.2   thorpej 
    236  1.40.2.1        ad struct lwplist	alllwp;
    237  1.40.2.1        ad kmutex_t	alllwp_mutex;
    238  1.40.2.2        ad kmutex_t	lwp_mutex;
    239       1.2   thorpej 
    240  1.40.2.5        ad POOL_INIT(lwp_pool, sizeof(struct lwp), 16, 0, 0, "lwppl",
    241  1.40.2.5        ad     &pool_allocator_nointr);
    242  1.40.2.5        ad POOL_INIT(lwp_uc_pool, sizeof(ucontext_t), 0, 0, 0, "lwpucpl",
    243  1.40.2.5        ad     &pool_allocator_nointr);
    244  1.40.2.5        ad 
    245  1.40.2.5        ad static specificdata_domain_t lwp_specificdata_domain;
    246  1.40.2.5        ad 
    247       1.2   thorpej #define LWP_DEBUG
    248       1.2   thorpej 
    249       1.2   thorpej #ifdef LWP_DEBUG
    250       1.2   thorpej int lwp_debug = 0;
    251       1.2   thorpej #define DPRINTF(x) if (lwp_debug) printf x
    252       1.2   thorpej #else
    253       1.2   thorpej #define DPRINTF(x)
    254       1.2   thorpej #endif
    255       1.2   thorpej 
    256  1.40.2.5        ad void
    257  1.40.2.5        ad lwpinit(void)
    258  1.40.2.5        ad {
    259  1.40.2.5        ad 
    260  1.40.2.5        ad 	lwp_specificdata_domain = specificdata_domain_create();
    261  1.40.2.5        ad 	KASSERT(lwp_specificdata_domain != NULL);
    262  1.40.2.5        ad }
    263  1.40.2.5        ad 
    264  1.40.2.2        ad /*
    265  1.40.2.2        ad  * Set an LWP halted or suspended.
    266  1.40.2.2        ad  *
    267  1.40.2.2        ad  * Must be called with p_smutex held, and the LWP locked.  Will unlock the
    268  1.40.2.2        ad  * LWP before return.
    269  1.40.2.2        ad  */
    270       1.2   thorpej int
    271  1.40.2.2        ad lwp_halt(struct lwp *curl, struct lwp *t, int state)
    272       1.2   thorpej {
    273  1.40.2.4        ad 	int error, want;
    274       1.2   thorpej 
    275  1.40.2.4        ad 	LOCK_ASSERT(mutex_owned(&t->l_proc->p_smutex)); /* XXXAD what now? */
    276  1.40.2.2        ad 	LOCK_ASSERT(lwp_locked(t, NULL));
    277       1.2   thorpej 
    278  1.40.2.2        ad 	KASSERT(curl != t || curl->l_stat == LSONPROC);
    279      1.17      manu 
    280  1.40.2.2        ad 	/*
    281  1.40.2.2        ad 	 * If the current LWP has been told to exit, we must not suspend anyone
    282  1.40.2.2        ad 	 * else or deadlock could occur.  We won't return to userspace.
    283  1.40.2.2        ad 	 */
    284  1.40.2.2        ad 	if ((curl->l_stat & (L_WEXIT | L_WCORE)) != 0)
    285  1.40.2.2        ad 		return (EDEADLK);
    286       1.2   thorpej 
    287  1.40.2.2        ad 	error = 0;
    288      1.17      manu 
    289  1.40.2.4        ad 	want = (state == LSSUSPENDED ? L_WSUSPEND : 0);
    290  1.40.2.4        ad 
    291  1.40.2.2        ad 	switch (t->l_stat) {
    292  1.40.2.2        ad 	case LSRUN:
    293  1.40.2.2        ad 	case LSONPROC:
    294  1.40.2.4        ad 		t->l_flag |= want;
    295  1.40.2.4        ad 		signotify(t);
    296  1.40.2.2        ad 		break;
    297  1.40.2.4        ad 
    298  1.40.2.2        ad 	case LSSLEEP:
    299  1.40.2.4        ad 		t->l_stat |= want;
    300  1.40.2.4        ad 
    301  1.40.2.4        ad 		/*
    302  1.40.2.4        ad 		 * Kick the LWP and try to get it to the kernel boundary
    303  1.40.2.4        ad 		 * so that it will release any locks that it holds.
    304  1.40.2.4        ad 		 * setrunnable() will release the lock.
    305  1.40.2.4        ad 		 */
    306  1.40.2.4        ad 		signotify(t);
    307  1.40.2.4        ad 		setrunnable(t);
    308  1.40.2.4        ad 		return 0;
    309  1.40.2.4        ad 
    310  1.40.2.2        ad 	case LSSUSPENDED:
    311  1.40.2.2        ad 	case LSSTOP:
    312  1.40.2.4        ad 		t->l_flag |= want;
    313  1.40.2.2        ad 		break;
    314  1.40.2.4        ad 
    315  1.40.2.2        ad 	case LSIDL:
    316  1.40.2.2        ad 	case LSZOMB:
    317  1.40.2.4        ad 	case LSDEAD:
    318  1.40.2.2        ad 		error = EINTR; /* It's what Solaris does..... */
    319  1.40.2.2        ad 		break;
    320       1.2   thorpej 	}
    321       1.2   thorpej 
    322  1.40.2.4        ad 	lwp_unlock(t);
    323       1.2   thorpej 
    324  1.40.2.2        ad 	return (error);
    325       1.2   thorpej }
    326       1.2   thorpej 
    327  1.40.2.2        ad /*
    328  1.40.2.2        ad  * Restart a suspended LWP.
    329  1.40.2.2        ad  *
    330  1.40.2.2        ad  * Must be called with p_smutex held, and the LWP locked.  Will unlock the
    331  1.40.2.2        ad  * LWP before return.
    332  1.40.2.2        ad  */
    333       1.2   thorpej void
    334       1.2   thorpej lwp_continue(struct lwp *l)
    335       1.2   thorpej {
    336       1.2   thorpej 
    337  1.40.2.2        ad 	LOCK_ASSERT(mutex_owned(&l->l_proc->p_smutex));
    338  1.40.2.4        ad 	LOCK_ASSERT(lwp_locked(l, NULL));
    339  1.40.2.2        ad 
    340       1.2   thorpej 	DPRINTF(("lwp_continue of %d.%d (%s), state %d, wchan %p\n",
    341       1.2   thorpej 	    l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm, l->l_stat,
    342       1.2   thorpej 	    l->l_wchan));
    343       1.2   thorpej 
    344  1.40.2.4        ad 	/* If rebooting or not suspended, then just bail out. */
    345  1.40.2.4        ad 	if ((l->l_flag & L_WREBOOT) != 0) {
    346  1.40.2.2        ad 		lwp_unlock(l);
    347       1.2   thorpej 		return;
    348  1.40.2.2        ad 	}
    349       1.2   thorpej 
    350  1.40.2.4        ad 	l->l_flag &= ~L_WSUSPEND;
    351  1.40.2.4        ad 
    352  1.40.2.4        ad 	if (l->l_stat != LSSUSPENDED) {
    353  1.40.2.2        ad 		lwp_unlock(l);
    354  1.40.2.4        ad 		return;
    355       1.2   thorpej 	}
    356  1.40.2.4        ad 
    357  1.40.2.4        ad 	/* setrunnable() will release the lock. */
    358  1.40.2.4        ad 	setrunnable(l);
    359       1.2   thorpej }
    360       1.2   thorpej 
    361  1.40.2.2        ad /*
    362  1.40.2.2        ad  * Wait for an LWP within the current process to exit.  If 'lid' is
    363  1.40.2.2        ad  * non-zero, we are waiting for a specific LWP.
    364  1.40.2.2        ad  *
    365  1.40.2.2        ad  * Must be called with p->p_smutex held.
    366  1.40.2.2        ad  */
    367       1.2   thorpej int
    368       1.2   thorpej lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
    369       1.2   thorpej {
    370       1.2   thorpej 	struct proc *p = l->l_proc;
    371  1.40.2.2        ad 	struct lwp *l2;
    372      1.19  jdolecek 	int nfound, error, wpri;
    373      1.18  jdolecek 	static const char waitstr1[] = "lwpwait";
    374      1.18  jdolecek 	static const char waitstr2[] = "lwpwait2";
    375       1.2   thorpej 
    376       1.2   thorpej 	DPRINTF(("lwp_wait1: %d.%d waiting for %d.\n",
    377       1.2   thorpej 	    p->p_pid, l->l_lid, lid));
    378       1.2   thorpej 
    379  1.40.2.2        ad 	LOCK_ASSERT(mutex_owned(&p->p_smutex));
    380       1.2   thorpej 
    381  1.40.2.2        ad 	/*
    382  1.40.2.2        ad 	 * Check for deadlock:
    383  1.40.2.2        ad 	 *
    384  1.40.2.2        ad 	 * 1) If all other LWPs are waiting for exits or suspended.
    385  1.40.2.2        ad 	 * 2) If we are trying to wait on ourself.
    386  1.40.2.2        ad 	 *
    387  1.40.2.2        ad 	 * XXX we'd like to check for a cycle of waiting LWPs (specific LID
    388  1.40.2.2        ad 	 * waits, not any-LWP waits) and detect that sort of deadlock, but
    389  1.40.2.2        ad 	 * we don't have a good place to store the lwp that is being waited
    390  1.40.2.2        ad 	 * for. wchan is already filled with &p->p_nlwps, and putting the
    391  1.40.2.2        ad 	 * lwp address in there for deadlock tracing would require exiting
    392  1.40.2.2        ad 	 * LWPs to call wakeup on both their own address and &p->p_nlwps, to
    393  1.40.2.2        ad 	 * get threads sleeping on any LWP exiting.
    394  1.40.2.2        ad 	 */
    395  1.40.2.2        ad 	if (lwp_lastlive(p->p_nlwpwait) || lid == l->l_lid)
    396  1.40.2.2        ad 		return (EDEADLK);
    397  1.40.2.2        ad 
    398  1.40.2.2        ad 	p->p_nlwpwait++;
    399  1.40.2.2        ad 	wpri = PWAIT;
    400  1.40.2.2        ad 	if ((flags & LWPWAIT_EXITCONTROL) == 0)
    401  1.40.2.2        ad 		wpri |= PCATCH;
    402       1.2   thorpej  loop:
    403       1.2   thorpej 	nfound = 0;
    404       1.2   thorpej 	LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
    405       1.2   thorpej 		if ((l2 == l) || (l2->l_flag & L_DETACHED) ||
    406       1.2   thorpej 		    ((lid != 0) && (lid != l2->l_lid)))
    407       1.2   thorpej 			continue;
    408       1.2   thorpej 		nfound++;
    409  1.40.2.2        ad 		if (l2->l_stat != LSZOMB)
    410  1.40.2.2        ad 			continue;
    411       1.2   thorpej 
    412  1.40.2.2        ad 		if (departed)
    413  1.40.2.2        ad 			*departed = l2->l_lid;
    414  1.40.2.2        ad 
    415  1.40.2.2        ad 		LIST_REMOVE(l2, l_sibling);
    416  1.40.2.2        ad 		p->p_nlwps--;
    417  1.40.2.2        ad 		p->p_nzlwps--;
    418  1.40.2.2        ad 		p->p_nlwpwait--;
    419  1.40.2.2        ad 		/* XXX decrement limits */
    420  1.40.2.2        ad 		pool_put(&lwp_pool, l2);
    421  1.40.2.2        ad 		return (0);
    422       1.2   thorpej 	}
    423       1.2   thorpej 
    424  1.40.2.2        ad 	if (nfound == 0) {
    425  1.40.2.2        ad 		p->p_nlwpwait--;
    426       1.2   thorpej 		return (ESRCH);
    427  1.40.2.2        ad 	}
    428       1.2   thorpej 
    429  1.40.2.2        ad 	if ((error = mtsleep(&p->p_nlwps, wpri,
    430  1.40.2.2        ad 	    (lid != 0) ? waitstr1 : waitstr2, 0, &p->p_smutex)) != 0)
    431       1.2   thorpej 		return (error);
    432       1.2   thorpej 
    433       1.2   thorpej 	goto loop;
    434       1.2   thorpej }
    435       1.2   thorpej 
    436  1.40.2.2        ad /*
    437  1.40.2.2        ad  * Create a new LWP within process 'p2', using LWP 'l1' as a template.
    438  1.40.2.2        ad  * The new LWP is created in state LSIDL and must be set running,
    439  1.40.2.2        ad  * suspended, or stopped by the caller.
    440  1.40.2.2        ad  */
    441       1.2   thorpej int
    442       1.2   thorpej newlwp(struct lwp *l1, struct proc *p2, vaddr_t uaddr, boolean_t inmem,
    443       1.2   thorpej     int flags, void *stack, size_t stacksize,
    444       1.2   thorpej     void (*func)(void *), void *arg, struct lwp **rnewlwpp)
    445       1.2   thorpej {
    446       1.2   thorpej 	struct lwp *l2;
    447       1.2   thorpej 
    448       1.2   thorpej 	l2 = pool_get(&lwp_pool, PR_WAITOK);
    449       1.2   thorpej 
    450       1.2   thorpej 	l2->l_stat = LSIDL;
    451       1.2   thorpej 	l2->l_forw = l2->l_back = NULL;
    452       1.2   thorpej 	l2->l_proc = p2;
    453  1.40.2.4        ad 	l2->l_refcnt = 1;
    454       1.2   thorpej 
    455  1.40.2.5        ad 	lwp_initspecific(l2);
    456  1.40.2.5        ad 
    457       1.2   thorpej 	memset(&l2->l_startzero, 0,
    458       1.2   thorpej 	       (unsigned) ((caddr_t)&l2->l_endzero -
    459       1.2   thorpej 			   (caddr_t)&l2->l_startzero));
    460  1.40.2.4        ad 
    461  1.40.2.4        ad 	/* The copy here is unlocked, but is unlikely to pose a problem. */
    462       1.2   thorpej 	memcpy(&l2->l_startcopy, &l1->l_startcopy,
    463       1.2   thorpej 	       (unsigned) ((caddr_t)&l2->l_endcopy -
    464       1.2   thorpej 			   (caddr_t)&l2->l_startcopy));
    465       1.2   thorpej 
    466       1.2   thorpej #if !defined(MULTIPROCESSOR)
    467       1.2   thorpej 	/*
    468       1.2   thorpej 	 * In the single-processor case, all processes will always run
    469       1.2   thorpej 	 * on the same CPU.  So, initialize the child's CPU to the parent's
    470       1.2   thorpej 	 * now.  In the multiprocessor case, the child's CPU will be
    471       1.2   thorpej 	 * initialized in the low-level context switch code when the
    472       1.2   thorpej 	 * process runs.
    473       1.2   thorpej 	 */
    474       1.5      matt 	KASSERT(l1->l_cpu != NULL);
    475       1.2   thorpej 	l2->l_cpu = l1->l_cpu;
    476       1.2   thorpej #else
    477       1.2   thorpej 	/*
    478  1.40.2.4        ad 	 * Zero child's CPU pointer so we don't get trash.
    479       1.2   thorpej 	 */
    480       1.2   thorpej 	l2->l_cpu = NULL;
    481       1.2   thorpej #endif /* ! MULTIPROCESSOR */
    482       1.2   thorpej 
    483  1.40.2.4        ad #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    484  1.40.2.4        ad 	l2->l_mutex = &lwp_mutex;
    485  1.40.2.4        ad #else
    486  1.40.2.4        ad 	l2->l_mutex = &sched_mutex;
    487  1.40.2.4        ad #endif
    488  1.40.2.4        ad 
    489       1.2   thorpej 	l2->l_flag = inmem ? L_INMEM : 0;
    490       1.2   thorpej 	l2->l_flag |= (flags & LWP_DETACHED) ? L_DETACHED : 0;
    491       1.2   thorpej 
    492  1.40.2.2        ad 	if (p2->p_flag & P_SYSTEM) {
    493  1.40.2.2        ad 		/*
    494  1.40.2.2        ad 		 * Mark it as a system process and not a candidate for
    495  1.40.2.2        ad 		 * swapping.
    496  1.40.2.2        ad 		 */
    497  1.40.2.2        ad 		l2->l_flag |= L_SYSTEM | L_INMEM;
    498  1.40.2.2        ad 	}
    499  1.40.2.2        ad 
    500      1.37        ad 	lwp_update_creds(l2);
    501       1.2   thorpej 	callout_init(&l2->l_tsleep_ch);
    502  1.40.2.1        ad 	l2->l_ts = pool_cache_get(&turnstile_cache, PR_WAITOK);
    503  1.40.2.4        ad 	l2->l_syncobj = &sched_syncobj;
    504       1.2   thorpej 
    505       1.2   thorpej 	if (rnewlwpp != NULL)
    506       1.2   thorpej 		*rnewlwpp = l2;
    507       1.2   thorpej 
    508      1.36      yamt 	l2->l_addr = UAREA_TO_USER(uaddr);
    509       1.2   thorpej 	uvm_lwp_fork(l1, l2, stack, stacksize, func,
    510       1.2   thorpej 	    (arg != NULL) ? arg : l2);
    511       1.2   thorpej 
    512  1.40.2.2        ad 	mutex_enter(&p2->p_smutex);
    513  1.40.2.3        ad 
    514  1.40.2.3        ad 	if ((p2->p_flag & P_SA) == 0) {
    515  1.40.2.3        ad 		l2->l_sigpend = &l2->l_sigstore.ss_pend;
    516  1.40.2.3        ad 		l2->l_sigmask = &l2->l_sigstore.ss_mask;
    517  1.40.2.3        ad 		l2->l_sigstk = &l2->l_sigstore.ss_stk;
    518  1.40.2.3        ad 		l2->l_sigmask = l1->l_sigmask;
    519  1.40.2.3        ad 		CIRCLEQ_INIT(&l2->l_sigpend->sp_info);
    520  1.40.2.3        ad 		sigemptyset(&l2->l_sigpend->sp_set);
    521  1.40.2.3        ad 	} else {
    522  1.40.2.3        ad 		l2->l_sigpend = &p2->p_sigstore.ss_pend;
    523  1.40.2.3        ad 		l2->l_sigmask = &p2->p_sigstore.ss_mask;
    524  1.40.2.3        ad 		l2->l_sigstk = &p2->p_sigstore.ss_stk;
    525  1.40.2.3        ad 	}
    526  1.40.2.3        ad 
    527       1.2   thorpej 	l2->l_lid = ++p2->p_nlwpid;
    528       1.2   thorpej 	LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
    529       1.2   thorpej 	p2->p_nlwps++;
    530  1.40.2.3        ad 
    531  1.40.2.2        ad 	mutex_exit(&p2->p_smutex);
    532       1.2   thorpej 
    533  1.40.2.1        ad 	mutex_enter(&alllwp_mutex);
    534       1.2   thorpej 	LIST_INSERT_HEAD(&alllwp, l2, l_list);
    535  1.40.2.1        ad 	mutex_exit(&alllwp_mutex);
    536       1.2   thorpej 
    537      1.16      manu 	if (p2->p_emul->e_lwp_fork)
    538      1.16      manu 		(*p2->p_emul->e_lwp_fork)(l1, l2);
    539      1.16      manu 
    540       1.2   thorpej 	return (0);
    541       1.2   thorpej }
    542       1.2   thorpej 
    543       1.2   thorpej /*
    544  1.40.2.2        ad  * Quit the process.  This will call cpu_exit, which will call cpu_switch,
    545  1.40.2.2        ad  * so this can only be used meaningfully if you're willing to switch away.
    546       1.2   thorpej  * Calling with l!=curlwp would be weird.
    547       1.2   thorpej  */
    548  1.40.2.4        ad int
    549  1.40.2.4        ad lwp_exit(struct lwp *l, int checksigs)
    550       1.2   thorpej {
    551       1.2   thorpej 	struct proc *p = l->l_proc;
    552       1.2   thorpej 
    553       1.2   thorpej 	DPRINTF(("lwp_exit: %d.%d exiting.\n", p->p_pid, l->l_lid));
    554  1.40.2.2        ad 	DPRINTF((" nlwps: %d nzlwps: %d\n", p->p_nlwps, p->p_nzlwps));
    555       1.2   thorpej 
    556  1.40.2.4        ad 	mutex_enter(&p->p_smutex);
    557  1.40.2.4        ad 
    558  1.40.2.4        ad 	/*
    559  1.40.2.4        ad 	 * If we've got pending signals that we haven't processed yet, make
    560  1.40.2.4        ad 	 * sure that we take them before exiting.
    561  1.40.2.4        ad 	 */
    562  1.40.2.4        ad 	if (checksigs && sigispending(l)) {
    563  1.40.2.4        ad 		mutex_exit(&p->p_smutex);
    564  1.40.2.4        ad 		return ERESTART;
    565  1.40.2.4        ad 	}
    566  1.40.2.4        ad 
    567      1.16      manu 	if (p->p_emul->e_lwp_exit)
    568      1.16      manu 		(*p->p_emul->e_lwp_exit)(l);
    569      1.16      manu 
    570       1.2   thorpej 	/*
    571  1.40.2.2        ad 	 * If we are the last live LWP in a process, we need to exit the
    572  1.40.2.2        ad 	 * entire process.  We do so with an exit status of zero, because
    573  1.40.2.2        ad 	 * it's a "controlled" exit, and because that's what Solaris does.
    574  1.40.2.2        ad 	 *
    575  1.40.2.2        ad 	 * We are not quite a zombie yet, but for accounting purposes we
    576  1.40.2.2        ad 	 * must increment the count of zombies here.
    577  1.40.2.5        ad 	 *
    578  1.40.2.5        ad 	 * Note: the last LWP's specificdata will be deleted here.
    579       1.2   thorpej 	 */
    580  1.40.2.2        ad 	p->p_nzlwps++;
    581  1.40.2.4        ad 	if (p->p_nlwps - p->p_nzlwps == 0) {
    582       1.2   thorpej 		DPRINTF(("lwp_exit: %d.%d calling exit1()\n",
    583       1.2   thorpej 		    p->p_pid, l->l_lid));
    584       1.2   thorpej 		exit1(l, 0);
    585      1.19  jdolecek 		/* NOTREACHED */
    586       1.2   thorpej 	}
    587       1.2   thorpej 
    588  1.40.2.5        ad 	/* Delete the specificdata while it's still safe to sleep. */
    589  1.40.2.5        ad 	specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
    590  1.40.2.5        ad 
    591  1.40.2.5        ad 	/*
    592  1.40.2.5        ad 	 * Release our cached credentials and collate accounting flags.
    593  1.40.2.5        ad 	 */
    594  1.40.2.5        ad 	kauth_cred_free(l->l_cred);
    595  1.40.2.5        ad 	mutex_enter(&p->p_mutex);
    596  1.40.2.5        ad 	p->p_acflag |= l->l_acflag;
    597  1.40.2.5        ad 	mutex_exit(&p->p_mutex);
    598  1.40.2.5        ad 
    599  1.40.2.2        ad 	lwp_lock(l);
    600  1.40.2.2        ad 	if ((l->l_flag & L_DETACHED) != 0) {
    601  1.40.2.2        ad 		LIST_REMOVE(l, l_sibling);
    602  1.40.2.2        ad 		p->p_nlwps--;
    603  1.40.2.2        ad 		curlwp = NULL;
    604  1.40.2.2        ad 		l->l_proc = NULL;
    605  1.40.2.2        ad 	}
    606  1.40.2.2        ad 	l->l_stat = LSDEAD;
    607  1.40.2.4        ad 	lwp_unlock_to(l, &lwp_mutex);
    608  1.40.2.4        ad 
    609  1.40.2.4        ad 	if ((p->p_flag & P_SA) == 0) {
    610  1.40.2.4        ad 		/*
    611  1.40.2.4        ad 		 * Clear any private, pending signals.   XXX We may loose
    612  1.40.2.4        ad 		 * process-wide signals that we didn't want to take.
    613  1.40.2.4        ad 		 */
    614  1.40.2.3        ad 		sigclear(l->l_sigpend, NULL);
    615  1.40.2.4        ad 	}
    616  1.40.2.4        ad 
    617  1.40.2.2        ad 	mutex_exit(&p->p_smutex);
    618  1.40.2.1        ad 
    619  1.40.2.2        ad 	/*
    620  1.40.2.4        ad 	 * Remove the LWP from the global list and from the parent process.
    621  1.40.2.4        ad 	 * Once done, mark it as dead.  Nothing should be able to find or
    622  1.40.2.4        ad 	 * update it past this point.
    623  1.40.2.4        ad 	 */
    624  1.40.2.4        ad 	mutex_enter(&alllwp_mutex);
    625  1.40.2.4        ad 	LIST_REMOVE(l, l_list);
    626  1.40.2.4        ad 	mutex_exit(&alllwp_mutex);
    627  1.40.2.4        ad 
    628  1.40.2.4        ad 	/*
    629  1.40.2.2        ad 	 * Verify that we hold no locks other than the kernel mutex, and
    630  1.40.2.4        ad 	 * release our turnstile.  We should no longer sleep past this
    631  1.40.2.4        ad 	 * point.
    632  1.40.2.2        ad 	 */
    633  1.40.2.4        ad 	LOCKDEBUG_BARRIER(&kernel_lock, 0);
    634  1.40.2.2        ad 	pool_cache_put(&turnstile_cache, l->l_ts);
    635      1.37        ad 
    636  1.40.2.2        ad 	/*
    637  1.40.2.2        ad 	 * Free MD LWP resources
    638  1.40.2.2        ad 	 */
    639      1.19  jdolecek #ifndef __NO_CPU_LWP_FREE
    640      1.19  jdolecek 	cpu_lwp_free(l, 0);
    641      1.19  jdolecek #endif
    642      1.31      yamt 	pmap_deactivate(l);
    643      1.31      yamt 
    644  1.40.2.2        ad 	/*
    645  1.40.2.2        ad 	 * Release the kernel lock, and switch away into oblivion.
    646  1.40.2.2        ad 	 */
    647  1.40.2.4        ad 	(void)KERNEL_UNLOCK(0, l);	/* XXXSMP assert count == 1 */
    648      1.19  jdolecek 	cpu_exit(l);
    649  1.40.2.4        ad 
    650  1.40.2.4        ad 	/* NOTREACHED */
    651  1.40.2.4        ad 	return 0;
    652       1.2   thorpej }
    653       1.2   thorpej 
    654      1.19  jdolecek /*
    655  1.40.2.4        ad  * We are called from cpu_exit() once it is safe to schedule the dead LWP's
    656  1.40.2.4        ad  * resources to be freed (i.e., once we've switched to the idle PCB for the
    657  1.40.2.4        ad  * current CPU).
    658  1.40.2.4        ad  *
    659  1.40.2.4        ad  * NOTE: One must be careful with locking in this routine.  It's called from
    660  1.40.2.4        ad  * a critical section in machine-dependent code.
    661      1.19  jdolecek  */
    662       1.2   thorpej void
    663       1.2   thorpej lwp_exit2(struct lwp *l)
    664       1.2   thorpej {
    665  1.40.2.4        ad 	struct proc *p;
    666  1.40.2.4        ad 	u_int refcnt;
    667       1.2   thorpej 
    668  1.40.2.4        ad 	/*
    669  1.40.2.4        ad 	 * If someone holds a reference on the LWP, let them clean us up.
    670  1.40.2.4        ad 	 */
    671  1.40.2.4        ad 	lwp_lock(l);
    672  1.40.2.4        ad 	refcnt = --l->l_refcnt;
    673  1.40.2.4        ad 	lwp_unlock(l);
    674  1.40.2.4        ad 	if (refcnt != 0)
    675  1.40.2.4        ad 		return;
    676  1.40.2.4        ad 
    677  1.40.2.4        ad 	KASSERT(l->l_stat == LSDEAD);
    678  1.40.2.4        ad 	KERNEL_LOCK(1, NULL);
    679  1.40.2.2        ad 
    680      1.19  jdolecek 	/*
    681      1.19  jdolecek 	 * Free the VM resources we're still holding on to.
    682      1.19  jdolecek 	 */
    683      1.19  jdolecek 	uvm_lwp_exit(l);
    684      1.19  jdolecek 
    685  1.40.2.4        ad 	p = l->l_proc;
    686  1.40.2.4        ad 
    687  1.40.2.4        ad 	if ((l->l_flag & L_DETACHED) != 0) {
    688  1.40.2.4        ad 		/*
    689  1.40.2.4        ad 		 * Nobody waits for detached LWPs.
    690  1.40.2.4        ad 		 */
    691      1.19  jdolecek 		pool_put(&lwp_pool, l);
    692  1.40.2.4        ad 		(void)KERNEL_UNLOCK(1, NULL);
    693  1.40.2.4        ad 
    694  1.40.2.4        ad 		/*
    695  1.40.2.4        ad 		 * If this is the last LWP in the process, wake up the
    696  1.40.2.4        ad 		 * parent so that it can reap us.
    697  1.40.2.4        ad 		 */
    698  1.40.2.4        ad 		mb_read();
    699  1.40.2.4        ad 		if (p->p_nlwps == 0) {
    700  1.40.2.4        ad 			KASSERT(p->p_stat == SDEAD);
    701  1.40.2.4        ad 			p->p_stat = SZOMB;
    702  1.40.2.4        ad 			mb_write();
    703  1.40.2.4        ad 
    704  1.40.2.4        ad 			/* XXXSMP too much locking */
    705  1.40.2.4        ad 			mutex_enter(&proclist_mutex);
    706  1.40.2.4        ad 			mutex_enter(&proc_stop_mutex);
    707  1.40.2.4        ad 			p = p->p_pptr;
    708  1.40.2.4        ad 			p->p_nstopchild++;
    709  1.40.2.4        ad 			cv_broadcast(&p->p_waitcv);
    710  1.40.2.4        ad 			mutex_exit(&proc_stop_mutex);
    711  1.40.2.4        ad 			mutex_exit(&proclist_mutex);
    712  1.40.2.4        ad 		}
    713      1.19  jdolecek 	} else {
    714  1.40.2.4        ad 		(void)KERNEL_UNLOCK(1, NULL);
    715  1.40.2.2        ad 		l->l_stat = LSZOMB;
    716  1.40.2.3        ad 		mb_write();
    717  1.40.2.4        ad 		mutex_enter(&p->p_smutex);
    718  1.40.2.4        ad 		wakeup(&p->p_nlwps);
    719  1.40.2.4        ad 		mutex_exit(&p->p_smutex);
    720      1.19  jdolecek 	}
    721       1.2   thorpej }
    722       1.2   thorpej 
    723       1.2   thorpej /*
    724       1.2   thorpej  * Pick a LWP to represent the process for those operations which
    725       1.2   thorpej  * want information about a "process" that is actually associated
    726       1.2   thorpej  * with a LWP.
    727  1.40.2.2        ad  *
    728  1.40.2.2        ad  * Must be called with p->p_smutex held, and will return the LWP locked.
    729  1.40.2.2        ad  * If 'locking' is false, no locking or lock checks are performed.  This
    730  1.40.2.2        ad  * is intended for use by DDB.
    731       1.2   thorpej  */
    732       1.2   thorpej struct lwp *
    733  1.40.2.2        ad proc_representative_lwp(struct proc *p, int *nrlwps, int locking)
    734       1.2   thorpej {
    735       1.2   thorpej 	struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
    736      1.27      matt 	struct lwp *signalled;
    737  1.40.2.2        ad 	int cnt;
    738  1.40.2.2        ad 
    739  1.40.2.5        ad 	if (locking) {
    740  1.40.2.2        ad 		LOCK_ASSERT(mutex_owned(&p->p_smutex));
    741  1.40.2.5        ad 	}
    742       1.2   thorpej 
    743       1.2   thorpej 	/* Trivial case: only one LWP */
    744  1.40.2.2        ad 	if (p->p_nlwps == 1) {
    745  1.40.2.2        ad 		l = LIST_FIRST(&p->p_lwps);
    746  1.40.2.2        ad 		if (nrlwps)
    747  1.40.2.2        ad 			*nrlwps = (l->l_stat == LSONPROC || LSRUN);
    748  1.40.2.3        ad 		if (locking)
    749  1.40.2.3        ad 			lwp_lock(l);
    750  1.40.2.2        ad 		return l;
    751  1.40.2.2        ad 	}
    752       1.2   thorpej 
    753  1.40.2.2        ad 	cnt = 0;
    754       1.2   thorpej 	switch (p->p_stat) {
    755       1.2   thorpej 	case SSTOP:
    756       1.2   thorpej 	case SACTIVE:
    757       1.2   thorpej 		/* Pick the most live LWP */
    758       1.2   thorpej 		onproc = running = sleeping = stopped = suspended = NULL;
    759      1.27      matt 		signalled = NULL;
    760       1.2   thorpej 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    761  1.40.2.2        ad 			if (locking)
    762  1.40.2.2        ad 				lwp_lock(l);
    763      1.27      matt 			if (l->l_lid == p->p_sigctx.ps_lwp)
    764      1.27      matt 				signalled = l;
    765       1.2   thorpej 			switch (l->l_stat) {
    766       1.2   thorpej 			case LSONPROC:
    767       1.2   thorpej 				onproc = l;
    768  1.40.2.2        ad 				cnt++;
    769       1.2   thorpej 				break;
    770       1.2   thorpej 			case LSRUN:
    771       1.2   thorpej 				running = l;
    772  1.40.2.2        ad 				cnt++;
    773       1.2   thorpej 				break;
    774       1.2   thorpej 			case LSSLEEP:
    775       1.2   thorpej 				sleeping = l;
    776       1.2   thorpej 				break;
    777       1.2   thorpej 			case LSSTOP:
    778       1.2   thorpej 				stopped = l;
    779       1.2   thorpej 				break;
    780       1.2   thorpej 			case LSSUSPENDED:
    781       1.2   thorpej 				suspended = l;
    782       1.2   thorpej 				break;
    783       1.2   thorpej 			}
    784  1.40.2.2        ad 			if (locking)
    785  1.40.2.2        ad 				lwp_unlock(l);
    786       1.2   thorpej 		}
    787  1.40.2.2        ad 		if (nrlwps)
    788  1.40.2.2        ad 			*nrlwps = cnt;
    789      1.27      matt 		if (signalled)
    790  1.40.2.2        ad 			l = signalled;
    791  1.40.2.2        ad 		else if (onproc)
    792  1.40.2.2        ad 			l = onproc;
    793  1.40.2.2        ad 		else if (running)
    794  1.40.2.2        ad 			l = running;
    795  1.40.2.2        ad 		else if (sleeping)
    796  1.40.2.2        ad 			l = sleeping;
    797  1.40.2.2        ad 		else if (stopped)
    798  1.40.2.2        ad 			l = stopped;
    799  1.40.2.2        ad 		else if (suspended)
    800  1.40.2.2        ad 			l = suspended;
    801  1.40.2.2        ad 		else
    802  1.40.2.2        ad 			break;
    803  1.40.2.2        ad 		if (locking)
    804  1.40.2.2        ad 			lwp_lock(l);
    805  1.40.2.2        ad 		return l;
    806       1.2   thorpej 	case SZOMB:
    807       1.2   thorpej 		/* Doesn't really matter... */
    808  1.40.2.2        ad 		if (nrlwps)
    809  1.40.2.2        ad 			*nrlwps = 0;
    810  1.40.2.2        ad 		l = LIST_FIRST(&p->p_lwps);
    811  1.40.2.2        ad 		if (locking)
    812  1.40.2.2        ad 			lwp_lock(l);
    813  1.40.2.2        ad 		return l;
    814       1.2   thorpej #ifdef DIAGNOSTIC
    815       1.2   thorpej 	case SIDL:
    816  1.40.2.2        ad 		if (locking)
    817  1.40.2.2        ad 			mutex_exit(&p->p_smutex);
    818       1.2   thorpej 		/* We have more than one LWP and we're in SIDL?
    819       1.2   thorpej 		 * How'd that happen?
    820       1.2   thorpej 		 */
    821  1.40.2.2        ad 		panic("Too many LWPs in SIDL process %d (%s)",
    822  1.40.2.2        ad 		    p->p_pid, p->p_comm);
    823       1.2   thorpej 	default:
    824  1.40.2.2        ad 		if (locking)
    825  1.40.2.2        ad 			mutex_exit(&p->p_smutex);
    826       1.2   thorpej 		panic("Process %d (%s) in unknown state %d",
    827       1.2   thorpej 		    p->p_pid, p->p_comm, p->p_stat);
    828       1.2   thorpej #endif
    829       1.2   thorpej 	}
    830       1.2   thorpej 
    831  1.40.2.2        ad 	if (locking)
    832  1.40.2.2        ad 		mutex_exit(&p->p_smutex);
    833       1.2   thorpej 	panic("proc_representative_lwp: couldn't find a lwp for process"
    834       1.2   thorpej 		" %d (%s)", p->p_pid, p->p_comm);
    835       1.2   thorpej 	/* NOTREACHED */
    836       1.2   thorpej 	return NULL;
    837       1.2   thorpej }
    838      1.37        ad 
    839      1.37        ad /*
    840  1.40.2.2        ad  * Look up a live LWP within the speicifed process, and return it locked.
    841  1.40.2.2        ad  *
    842  1.40.2.2        ad  * Must be called with p->p_smutex held.
    843  1.40.2.2        ad  */
    844  1.40.2.2        ad struct lwp *
    845  1.40.2.2        ad lwp_byid(struct proc *p, int id)
    846  1.40.2.2        ad {
    847  1.40.2.2        ad 	struct lwp *l;
    848  1.40.2.2        ad 
    849  1.40.2.2        ad 	LOCK_ASSERT(mutex_owned(&p->p_smutex));
    850  1.40.2.2        ad 
    851  1.40.2.2        ad 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    852  1.40.2.2        ad 		if (l->l_lid == id)
    853  1.40.2.2        ad 			break;
    854  1.40.2.2        ad 	}
    855  1.40.2.2        ad 
    856  1.40.2.2        ad 	if (l != NULL) {
    857  1.40.2.2        ad 		lwp_lock(l);
    858  1.40.2.2        ad 		if (l->l_stat == LSIDL || l->l_stat == LSZOMB ||
    859  1.40.2.2        ad 		    l->l_stat == LSDEAD) {
    860  1.40.2.2        ad 			lwp_unlock(l);
    861  1.40.2.2        ad 			l = NULL;
    862  1.40.2.2        ad 		}
    863  1.40.2.2        ad 	}
    864  1.40.2.2        ad 
    865  1.40.2.2        ad 	return l;
    866  1.40.2.2        ad }
    867  1.40.2.2        ad 
    868  1.40.2.2        ad /*
    869      1.37        ad  * Update an LWP's cached credentials to mirror the process' master copy.
    870      1.37        ad  *
    871      1.37        ad  * This happens early in the syscall path, on user trap, and on LWP
    872      1.37        ad  * creation.  A long-running LWP can also voluntarily choose to update
    873      1.37        ad  * it's credentials by calling this routine.  This may be called from
    874      1.37        ad  * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
    875      1.37        ad  */
    876      1.37        ad void
    877      1.37        ad lwp_update_creds(struct lwp *l)
    878      1.37        ad {
    879      1.37        ad 	kauth_cred_t oc;
    880      1.37        ad 	struct proc *p;
    881      1.37        ad 
    882      1.37        ad 	p = l->l_proc;
    883      1.37        ad 	oc = l->l_cred;
    884      1.37        ad 
    885  1.40.2.4        ad 	mutex_enter(&p->p_mutex);
    886      1.37        ad 	kauth_cred_hold(p->p_cred);
    887      1.37        ad 	l->l_cred = p->p_cred;
    888  1.40.2.4        ad 	mutex_exit(&p->p_mutex);
    889      1.37        ad 	if (oc != NULL)
    890      1.37        ad 		kauth_cred_free(oc);
    891      1.37        ad }
    892  1.40.2.2        ad 
    893  1.40.2.2        ad /*
    894  1.40.2.2        ad  * Verify that an LWP is locked, and optionally verify that the lock matches
    895  1.40.2.2        ad  * one we specify.
    896  1.40.2.2        ad  */
    897  1.40.2.2        ad int
    898  1.40.2.2        ad lwp_locked(struct lwp *l, kmutex_t *mtx)
    899  1.40.2.2        ad {
    900  1.40.2.3        ad 	kmutex_t *cur = l->l_mutex;
    901  1.40.2.2        ad 
    902  1.40.2.4        ad #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    903  1.40.2.3        ad 	return mutex_owned(cur) && (mtx == cur || mtx == NULL);
    904  1.40.2.3        ad #else
    905  1.40.2.4        ad 	return mutex_owned(cur);
    906  1.40.2.3        ad #endif
    907  1.40.2.2        ad }
    908  1.40.2.2        ad 
    909  1.40.2.4        ad #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    910  1.40.2.2        ad /*
    911  1.40.2.3        ad  * Lock an LWP.
    912  1.40.2.2        ad  */
    913  1.40.2.2        ad void
    914  1.40.2.4        ad lwp_lock_retry(struct lwp *l, kmutex_t *old)
    915  1.40.2.2        ad {
    916  1.40.2.3        ad 
    917  1.40.2.3        ad 	for (;;) {
    918  1.40.2.4        ad 		mutex_exit(old);
    919  1.40.2.4        ad 		old = l->l_mutex;
    920  1.40.2.4        ad 		mutex_enter(old);
    921  1.40.2.3        ad 
    922  1.40.2.3        ad 		/*
    923  1.40.2.3        ad 		 * mutex_enter() will have posted a read barrier.  Re-test
    924  1.40.2.3        ad 		 * l->l_mutex.  If it has changed, we need to try again.
    925  1.40.2.3        ad 		 */
    926  1.40.2.4        ad 	} while (__predict_false(l->l_mutex != old));
    927  1.40.2.2        ad }
    928  1.40.2.3        ad #endif
    929  1.40.2.3        ad 
    930  1.40.2.3        ad /*
    931  1.40.2.4        ad  * Lend a new mutex to an LWP.  The old mutex must be held.
    932  1.40.2.3        ad  */
    933  1.40.2.3        ad void
    934  1.40.2.3        ad lwp_setlock(struct lwp *l, kmutex_t *new)
    935  1.40.2.3        ad {
    936  1.40.2.4        ad 
    937  1.40.2.2        ad 	LOCK_ASSERT(mutex_owned(l->l_mutex));
    938  1.40.2.2        ad 
    939  1.40.2.4        ad #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    940  1.40.2.3        ad 	mb_write();
    941  1.40.2.3        ad 	l->l_mutex = new;
    942  1.40.2.4        ad #else
    943  1.40.2.4        ad 	(void)new;
    944  1.40.2.3        ad #endif
    945  1.40.2.2        ad }
    946  1.40.2.2        ad 
    947  1.40.2.2        ad /*
    948  1.40.2.3        ad  * Lend a new mutex to an LWP, and release the old mutex.  The old mutex
    949  1.40.2.3        ad  * must be held.
    950  1.40.2.3        ad  */
    951  1.40.2.3        ad void
    952  1.40.2.4        ad lwp_unlock_to(struct lwp *l, kmutex_t *new)
    953  1.40.2.3        ad {
    954  1.40.2.3        ad 	kmutex_t *old;
    955  1.40.2.3        ad 
    956  1.40.2.3        ad 	LOCK_ASSERT(mutex_owned(l->l_mutex));
    957  1.40.2.3        ad 
    958  1.40.2.3        ad 	old = l->l_mutex;
    959  1.40.2.4        ad #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    960  1.40.2.3        ad 	mb_write();
    961  1.40.2.3        ad 	l->l_mutex = new;
    962  1.40.2.4        ad #else
    963  1.40.2.4        ad 	(void)new;
    964  1.40.2.3        ad #endif
    965  1.40.2.3        ad 	mutex_exit(old);
    966  1.40.2.3        ad }
    967  1.40.2.3        ad 
    968  1.40.2.3        ad /*
    969  1.40.2.3        ad  * Acquire a new mutex, and dontate it to an LWP.  The LWP must already be
    970  1.40.2.3        ad  * locked.
    971  1.40.2.2        ad  */
    972  1.40.2.2        ad void
    973  1.40.2.3        ad lwp_relock(struct lwp *l, kmutex_t *new)
    974  1.40.2.2        ad {
    975  1.40.2.4        ad #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    976  1.40.2.4        ad 	kmutex_t *old;
    977  1.40.2.4        ad #endif
    978  1.40.2.2        ad 
    979  1.40.2.2        ad 	LOCK_ASSERT(mutex_owned(l->l_mutex));
    980  1.40.2.2        ad 
    981  1.40.2.4        ad #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    982  1.40.2.4        ad 	old = l->l_mutex;
    983  1.40.2.4        ad 	if (old != new) {
    984  1.40.2.4        ad 		mutex_enter(new);
    985  1.40.2.4        ad 		l->l_mutex = new;
    986  1.40.2.4        ad 		mutex_exit(old);
    987  1.40.2.4        ad 	}
    988  1.40.2.4        ad #else
    989  1.40.2.4        ad 	(void)new;
    990  1.40.2.3        ad #endif
    991  1.40.2.2        ad }
    992  1.40.2.2        ad 
    993  1.40.2.2        ad /*
    994  1.40.2.2        ad  * Handle exceptions for mi_userret().  Called if L_USERRET is set.
    995  1.40.2.2        ad  */
    996  1.40.2.2        ad void
    997  1.40.2.2        ad lwp_userret(struct lwp *l)
    998  1.40.2.2        ad {
    999  1.40.2.2        ad 	struct proc *p;
   1000  1.40.2.4        ad 	int sig;
   1001  1.40.2.2        ad 
   1002  1.40.2.2        ad 	p = l->l_proc;
   1003  1.40.2.2        ad 
   1004  1.40.2.4        ad 	do {
   1005  1.40.2.4        ad 		/* Process pending signals first. */
   1006  1.40.2.4        ad 		if ((l->l_flag & L_PENDSIG) != 0) {
   1007  1.40.2.4        ad 			KERNEL_LOCK(1, l);	/* XXXSMP pool_put() below */
   1008  1.40.2.4        ad 			mutex_enter(&p->p_smutex);
   1009  1.40.2.4        ad 			while ((sig = issignal(l)) != 0)
   1010  1.40.2.4        ad 				postsig(sig);
   1011  1.40.2.4        ad 			mutex_exit(&p->p_smutex);
   1012  1.40.2.4        ad 			(void)KERNEL_UNLOCK(0, l);	/* XXXSMP */
   1013  1.40.2.4        ad 		}
   1014  1.40.2.2        ad 
   1015  1.40.2.4        ad 		/* Core-dump or suspend pending. */
   1016  1.40.2.4        ad 		if ((l->l_flag & L_WSUSPEND) != 0) {
   1017  1.40.2.4        ad 			/*
   1018  1.40.2.4        ad 			 * Suspend ourselves, so that the kernel stack and
   1019  1.40.2.4        ad 			 * therefore the userland registers saved in the
   1020  1.40.2.4        ad 			 * trapframe are around for coredump() to write them
   1021  1.40.2.4        ad 			 * out.  We issue a wakeup() on p->p_nrlwps so that
   1022  1.40.2.4        ad 			 * sigexit() will write the core file out once all
   1023  1.40.2.4        ad 			 * other LWPs are suspended.
   1024  1.40.2.4        ad 			 */
   1025  1.40.2.4        ad 			mutex_enter(&p->p_smutex);
   1026  1.40.2.4        ad 			lwp_lock(l);
   1027  1.40.2.4        ad 			lwp_relock(l, &lwp_mutex);
   1028  1.40.2.4        ad 			p->p_nrlwps--;
   1029  1.40.2.4        ad 			wakeup(&p->p_nrlwps);
   1030  1.40.2.4        ad 			l->l_stat = LSSUSPENDED;
   1031  1.40.2.4        ad 			mutex_exit(&p->p_smutex);
   1032  1.40.2.4        ad 			mi_switch(l, NULL);
   1033  1.40.2.4        ad 			lwp_lock(l);
   1034  1.40.2.4        ad 		}
   1035  1.40.2.2        ad 
   1036  1.40.2.4        ad 		/* Process is exiting. */
   1037  1.40.2.4        ad 		if ((l->l_flag & L_WEXIT) != 0) {
   1038  1.40.2.4        ad 			KERNEL_LOCK(1, l);
   1039  1.40.2.4        ad 			(void)lwp_exit(l, 0);
   1040  1.40.2.4        ad 			KASSERT(0);
   1041  1.40.2.4        ad 			/* NOTREACHED */
   1042  1.40.2.4        ad 		}
   1043  1.40.2.4        ad 	} while ((l->l_flag & L_USERRET) != 0);
   1044  1.40.2.2        ad }
   1045  1.40.2.2        ad 
   1046  1.40.2.2        ad /*
   1047  1.40.2.2        ad  * Return non-zero if this the last live LWP in the process.  Called when
   1048  1.40.2.2        ad  * exiting, dumping core, waiting for other LWPs to exit, etc.  Accepts a
   1049  1.40.2.2        ad  * 'bias' value for deadlock detection.
   1050  1.40.2.2        ad  *
   1051  1.40.2.2        ad  * Must be called with p->p_smutex held.
   1052  1.40.2.2        ad  */
   1053  1.40.2.2        ad int
   1054  1.40.2.2        ad lwp_lastlive(int bias)
   1055  1.40.2.2        ad {
   1056  1.40.2.2        ad 	struct lwp *l = curlwp;
   1057  1.40.2.2        ad 	struct proc *p = l->l_proc;
   1058  1.40.2.2        ad 
   1059  1.40.2.2        ad 	LOCK_ASSERT(mutex_owned(&p->p_smutex));
   1060  1.40.2.2        ad 	KASSERT(l->l_stat == LSONPROC || l->l_stat == LSSTOP);
   1061  1.40.2.2        ad 
   1062  1.40.2.2        ad 	return p->p_nrlwps - bias - (l->l_stat == LSONPROC) == 0;
   1063  1.40.2.2        ad }
   1064  1.40.2.4        ad 
   1065  1.40.2.4        ad /*
   1066  1.40.2.4        ad  * Add one reference to an LWP.  This will prevent the LWP from
   1067  1.40.2.4        ad  * transitioning from the LSDEAD state into LSZOMB, and thus keep
   1068  1.40.2.4        ad  * the lwp structure and PCB around to inspect.
   1069  1.40.2.4        ad  */
   1070  1.40.2.4        ad void
   1071  1.40.2.4        ad lwp_addref(struct lwp *l)
   1072  1.40.2.4        ad {
   1073  1.40.2.4        ad 
   1074  1.40.2.4        ad 	LOCK_ASSERT(lwp_locked(l, NULL));
   1075  1.40.2.4        ad 	KASSERT(l->l_stat != LSZOMB);
   1076  1.40.2.4        ad 	KASSERT(l->l_refcnt != 0);
   1077  1.40.2.4        ad 
   1078  1.40.2.4        ad 	l->l_refcnt++;
   1079  1.40.2.4        ad }
   1080  1.40.2.4        ad 
   1081  1.40.2.4        ad /*
   1082  1.40.2.4        ad  * Remove one reference to an LWP.  If this is the last reference,
   1083  1.40.2.4        ad  * then we must finalize the LWP's death.
   1084  1.40.2.4        ad  */
   1085  1.40.2.4        ad void
   1086  1.40.2.4        ad lwp_delref(struct lwp *l)
   1087  1.40.2.4        ad {
   1088  1.40.2.4        ad 
   1089  1.40.2.4        ad 	lwp_exit2(l);
   1090  1.40.2.4        ad }
   1091  1.40.2.5        ad 
   1092  1.40.2.5        ad /*
   1093  1.40.2.5        ad  * lwp_specific_key_create --
   1094  1.40.2.5        ad  *	Create a key for subsystem lwp-specific data.
   1095  1.40.2.5        ad  */
   1096  1.40.2.5        ad int
   1097  1.40.2.5        ad lwp_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor)
   1098  1.40.2.5        ad {
   1099  1.40.2.5        ad 
   1100  1.40.2.5        ad 	return (specificdata_key_create(lwp_specificdata_domain, keyp, dtor));
   1101  1.40.2.5        ad }
   1102  1.40.2.5        ad 
   1103  1.40.2.5        ad /*
   1104  1.40.2.5        ad  * lwp_specific_key_delete --
   1105  1.40.2.5        ad  *	Delete a key for subsystem lwp-specific data.
   1106  1.40.2.5        ad  */
   1107  1.40.2.5        ad void
   1108  1.40.2.5        ad lwp_specific_key_delete(specificdata_key_t key)
   1109  1.40.2.5        ad {
   1110  1.40.2.5        ad 
   1111  1.40.2.5        ad 	specificdata_key_delete(lwp_specificdata_domain, key);
   1112  1.40.2.5        ad }
   1113  1.40.2.5        ad 
   1114  1.40.2.5        ad /*
   1115  1.40.2.5        ad  * lwp_initspecific --
   1116  1.40.2.5        ad  *	Initialize an LWP's specificdata container.
   1117  1.40.2.5        ad  */
   1118  1.40.2.5        ad void
   1119  1.40.2.5        ad lwp_initspecific(struct lwp *l)
   1120  1.40.2.5        ad {
   1121  1.40.2.5        ad 	int error;
   1122  1.40.2.5        ad 
   1123  1.40.2.5        ad 	error = specificdata_init(lwp_specificdata_domain, &l->l_specdataref);
   1124  1.40.2.5        ad 	KASSERT(error == 0);
   1125  1.40.2.5        ad }
   1126  1.40.2.5        ad 
   1127  1.40.2.5        ad /*
   1128  1.40.2.5        ad  * lwp_finispecific --
   1129  1.40.2.5        ad  *	Finalize an LWP's specificdata container.
   1130  1.40.2.5        ad  */
   1131  1.40.2.5        ad void
   1132  1.40.2.5        ad lwp_finispecific(struct lwp *l)
   1133  1.40.2.5        ad {
   1134  1.40.2.5        ad 
   1135  1.40.2.5        ad 	specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
   1136  1.40.2.5        ad }
   1137  1.40.2.5        ad 
   1138  1.40.2.5        ad /*
   1139  1.40.2.5        ad  * lwp_getspecific --
   1140  1.40.2.5        ad  *	Return lwp-specific data corresponding to the specified key.
   1141  1.40.2.5        ad  *
   1142  1.40.2.5        ad  *	Note: LWP specific data is NOT INTERLOCKED.  An LWP should access
   1143  1.40.2.5        ad  *	only its OWN SPECIFIC DATA.  If it is necessary to access another
   1144  1.40.2.5        ad  *	LWP's specifc data, care must be taken to ensure that doing so
   1145  1.40.2.5        ad  *	would not cause internal data structure inconsistency (i.e. caller
   1146  1.40.2.5        ad  *	can guarantee that the target LWP is not inside an lwp_getspecific()
   1147  1.40.2.5        ad  *	or lwp_setspecific() call).
   1148  1.40.2.5        ad  */
   1149  1.40.2.5        ad void *
   1150  1.40.2.5        ad lwp_getspecific(specificdata_key_t key)
   1151  1.40.2.5        ad {
   1152  1.40.2.5        ad 
   1153  1.40.2.5        ad 	return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
   1154  1.40.2.5        ad 						  &curlwp->l_specdataref, key));
   1155  1.40.2.5        ad }
   1156  1.40.2.5        ad 
   1157  1.40.2.5        ad void *
   1158  1.40.2.5        ad _lwp_getspecific_by_lwp(struct lwp *l, specificdata_key_t key)
   1159  1.40.2.5        ad {
   1160  1.40.2.5        ad 
   1161  1.40.2.5        ad 	return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
   1162  1.40.2.5        ad 						  &l->l_specdataref, key));
   1163  1.40.2.5        ad }
   1164  1.40.2.5        ad 
   1165  1.40.2.5        ad /*
   1166  1.40.2.5        ad  * lwp_setspecific --
   1167  1.40.2.5        ad  *	Set lwp-specific data corresponding to the specified key.
   1168  1.40.2.5        ad  */
   1169  1.40.2.5        ad void
   1170  1.40.2.5        ad lwp_setspecific(specificdata_key_t key, void *data)
   1171  1.40.2.5        ad {
   1172  1.40.2.5        ad 
   1173  1.40.2.5        ad 	specificdata_setspecific(lwp_specificdata_domain,
   1174  1.40.2.5        ad 				 &curlwp->l_specdataref, key, data);
   1175  1.40.2.5        ad }
   1176