Home | History | Annotate | Line # | Download | only in kern
kern_lwp.c revision 1.138
      1  1.138    darran /*	$NetBSD: kern_lwp.c,v 1.138 2010/02/21 02:11:40 darran Exp $	*/
      2    1.2   thorpej 
      3    1.2   thorpej /*-
      4  1.127        ad  * Copyright (c) 2001, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
      5    1.2   thorpej  * All rights reserved.
      6    1.2   thorpej  *
      7    1.2   thorpej  * This code is derived from software contributed to The NetBSD Foundation
      8   1.52        ad  * by Nathan J. Williams, and Andrew Doran.
      9    1.2   thorpej  *
     10    1.2   thorpej  * Redistribution and use in source and binary forms, with or without
     11    1.2   thorpej  * modification, are permitted provided that the following conditions
     12    1.2   thorpej  * are met:
     13    1.2   thorpej  * 1. Redistributions of source code must retain the above copyright
     14    1.2   thorpej  *    notice, this list of conditions and the following disclaimer.
     15    1.2   thorpej  * 2. Redistributions in binary form must reproduce the above copyright
     16    1.2   thorpej  *    notice, this list of conditions and the following disclaimer in the
     17    1.2   thorpej  *    documentation and/or other materials provided with the distribution.
     18    1.2   thorpej  *
     19    1.2   thorpej  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20    1.2   thorpej  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21    1.2   thorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22    1.2   thorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23    1.2   thorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24    1.2   thorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25    1.2   thorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26    1.2   thorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27    1.2   thorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28    1.2   thorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29    1.2   thorpej  * POSSIBILITY OF SUCH DAMAGE.
     30    1.2   thorpej  */
     31    1.9     lukem 
     32   1.52        ad /*
     33   1.52        ad  * Overview
     34   1.52        ad  *
     35   1.66        ad  *	Lightweight processes (LWPs) are the basic unit or thread of
     36   1.52        ad  *	execution within the kernel.  The core state of an LWP is described
     37   1.66        ad  *	by "struct lwp", also known as lwp_t.
     38   1.52        ad  *
     39   1.52        ad  *	Each LWP is contained within a process (described by "struct proc"),
     40   1.52        ad  *	Every process contains at least one LWP, but may contain more.  The
     41   1.52        ad  *	process describes attributes shared among all of its LWPs such as a
     42   1.52        ad  *	private address space, global execution state (stopped, active,
     43   1.52        ad  *	zombie, ...), signal disposition and so on.  On a multiprocessor
     44   1.66        ad  *	machine, multiple LWPs be executing concurrently in the kernel.
     45   1.52        ad  *
     46   1.52        ad  * Execution states
     47   1.52        ad  *
     48   1.52        ad  *	At any given time, an LWP has overall state that is described by
     49   1.52        ad  *	lwp::l_stat.  The states are broken into two sets below.  The first
     50   1.52        ad  *	set is guaranteed to represent the absolute, current state of the
     51   1.52        ad  *	LWP:
     52  1.101     rmind  *
     53  1.101     rmind  *	LSONPROC
     54  1.101     rmind  *
     55  1.101     rmind  *		On processor: the LWP is executing on a CPU, either in the
     56  1.101     rmind  *		kernel or in user space.
     57  1.101     rmind  *
     58  1.101     rmind  *	LSRUN
     59  1.101     rmind  *
     60  1.101     rmind  *		Runnable: the LWP is parked on a run queue, and may soon be
     61  1.101     rmind  *		chosen to run by an idle processor, or by a processor that
     62  1.101     rmind  *		has been asked to preempt a currently runnning but lower
     63  1.134     rmind  *		priority LWP.
     64  1.101     rmind  *
     65  1.101     rmind  *	LSIDL
     66  1.101     rmind  *
     67  1.101     rmind  *		Idle: the LWP has been created but has not yet executed,
     68   1.66        ad  *		or it has ceased executing a unit of work and is waiting
     69   1.66        ad  *		to be started again.
     70  1.101     rmind  *
     71  1.101     rmind  *	LSSUSPENDED:
     72  1.101     rmind  *
     73  1.101     rmind  *		Suspended: the LWP has had its execution suspended by
     74   1.52        ad  *		another LWP in the same process using the _lwp_suspend()
     75   1.52        ad  *		system call.  User-level LWPs also enter the suspended
     76   1.52        ad  *		state when the system is shutting down.
     77   1.52        ad  *
     78   1.52        ad  *	The second set represent a "statement of intent" on behalf of the
     79   1.52        ad  *	LWP.  The LWP may in fact be executing on a processor, may be
     80   1.66        ad  *	sleeping or idle. It is expected to take the necessary action to
     81  1.101     rmind  *	stop executing or become "running" again within a short timeframe.
     82  1.115        ad  *	The LP_RUNNING flag in lwp::l_pflag indicates that an LWP is running.
     83  1.101     rmind  *	Importantly, it indicates that its state is tied to a CPU.
     84  1.101     rmind  *
     85  1.101     rmind  *	LSZOMB:
     86  1.101     rmind  *
     87  1.101     rmind  *		Dead or dying: the LWP has released most of its resources
     88  1.129        ad  *		and is about to switch away into oblivion, or has already
     89   1.66        ad  *		switched away.  When it switches away, its few remaining
     90   1.66        ad  *		resources can be collected.
     91  1.101     rmind  *
     92  1.101     rmind  *	LSSLEEP:
     93  1.101     rmind  *
     94  1.101     rmind  *		Sleeping: the LWP has entered itself onto a sleep queue, and
     95  1.101     rmind  *		has switched away or will switch away shortly to allow other
     96   1.66        ad  *		LWPs to run on the CPU.
     97  1.101     rmind  *
     98  1.101     rmind  *	LSSTOP:
     99  1.101     rmind  *
    100  1.101     rmind  *		Stopped: the LWP has been stopped as a result of a job
    101  1.101     rmind  *		control signal, or as a result of the ptrace() interface.
    102  1.101     rmind  *
    103  1.101     rmind  *		Stopped LWPs may run briefly within the kernel to handle
    104  1.101     rmind  *		signals that they receive, but will not return to user space
    105  1.101     rmind  *		until their process' state is changed away from stopped.
    106  1.101     rmind  *
    107  1.101     rmind  *		Single LWPs within a process can not be set stopped
    108  1.101     rmind  *		selectively: all actions that can stop or continue LWPs
    109  1.101     rmind  *		occur at the process level.
    110  1.101     rmind  *
    111   1.52        ad  * State transitions
    112   1.52        ad  *
    113   1.66        ad  *	Note that the LSSTOP state may only be set when returning to
    114   1.66        ad  *	user space in userret(), or when sleeping interruptably.  The
    115   1.66        ad  *	LSSUSPENDED state may only be set in userret().  Before setting
    116   1.66        ad  *	those states, we try to ensure that the LWPs will release all
    117   1.66        ad  *	locks that they hold, and at a minimum try to ensure that the
    118   1.66        ad  *	LWP can be set runnable again by a signal.
    119   1.52        ad  *
    120   1.52        ad  *	LWPs may transition states in the following ways:
    121   1.52        ad  *
    122   1.52        ad  *	 RUN -------> ONPROC		ONPROC -----> RUN
    123  1.129        ad  *		    				    > SLEEP
    124  1.129        ad  *		    				    > STOPPED
    125   1.52        ad  *						    > SUSPENDED
    126   1.52        ad  *						    > ZOMB
    127  1.129        ad  *						    > IDL (special cases)
    128   1.52        ad  *
    129   1.52        ad  *	 STOPPED ---> RUN		SUSPENDED --> RUN
    130  1.129        ad  *	            > SLEEP
    131   1.52        ad  *
    132   1.52        ad  *	 SLEEP -----> ONPROC		IDL --------> RUN
    133  1.101     rmind  *		    > RUN			    > SUSPENDED
    134  1.101     rmind  *		    > STOPPED			    > STOPPED
    135  1.129        ad  *						    > ONPROC (special cases)
    136   1.52        ad  *
    137  1.129        ad  *	Some state transitions are only possible with kernel threads (eg
    138  1.129        ad  *	ONPROC -> IDL) and happen under tightly controlled circumstances
    139  1.129        ad  *	free of unwanted side effects.
    140   1.66        ad  *
    141  1.114     rmind  * Migration
    142  1.114     rmind  *
    143  1.114     rmind  *	Migration of threads from one CPU to another could be performed
    144  1.114     rmind  *	internally by the scheduler via sched_takecpu() or sched_catchlwp()
    145  1.114     rmind  *	functions.  The universal lwp_migrate() function should be used for
    146  1.114     rmind  *	any other cases.  Subsystems in the kernel must be aware that CPU
    147  1.114     rmind  *	of LWP may change, while it is not locked.
    148  1.114     rmind  *
    149   1.52        ad  * Locking
    150   1.52        ad  *
    151   1.52        ad  *	The majority of fields in 'struct lwp' are covered by a single,
    152   1.66        ad  *	general spin lock pointed to by lwp::l_mutex.  The locks covering
    153   1.52        ad  *	each field are documented in sys/lwp.h.
    154   1.52        ad  *
    155   1.66        ad  *	State transitions must be made with the LWP's general lock held,
    156  1.101     rmind  *	and may cause the LWP's lock pointer to change. Manipulation of
    157   1.66        ad  *	the general lock is not performed directly, but through calls to
    158   1.66        ad  *	lwp_lock(), lwp_relock() and similar.
    159   1.52        ad  *
    160   1.52        ad  *	States and their associated locks:
    161   1.52        ad  *
    162   1.74     rmind  *	LSONPROC, LSZOMB:
    163   1.52        ad  *
    164   1.64      yamt  *		Always covered by spc_lwplock, which protects running LWPs.
    165  1.129        ad  *		This is a per-CPU lock and matches lwp::l_cpu.
    166   1.52        ad  *
    167   1.74     rmind  *	LSIDL, LSRUN:
    168   1.52        ad  *
    169   1.64      yamt  *		Always covered by spc_mutex, which protects the run queues.
    170  1.129        ad  *		This is a per-CPU lock and matches lwp::l_cpu.
    171   1.52        ad  *
    172   1.52        ad  *	LSSLEEP:
    173   1.52        ad  *
    174   1.66        ad  *		Covered by a lock associated with the sleep queue that the
    175  1.129        ad  *		LWP resides on.  Matches lwp::l_sleepq::sq_mutex.
    176   1.52        ad  *
    177   1.52        ad  *	LSSTOP, LSSUSPENDED:
    178  1.101     rmind  *
    179   1.52        ad  *		If the LWP was previously sleeping (l_wchan != NULL), then
    180   1.66        ad  *		l_mutex references the sleep queue lock.  If the LWP was
    181   1.52        ad  *		runnable or on the CPU when halted, or has been removed from
    182   1.66        ad  *		the sleep queue since halted, then the lock is spc_lwplock.
    183   1.52        ad  *
    184   1.52        ad  *	The lock order is as follows:
    185   1.52        ad  *
    186   1.64      yamt  *		spc::spc_lwplock ->
    187  1.112        ad  *		    sleeptab::st_mutex ->
    188   1.64      yamt  *			tschain_t::tc_mutex ->
    189   1.64      yamt  *			    spc::spc_mutex
    190   1.52        ad  *
    191  1.103        ad  *	Each process has an scheduler state lock (proc::p_lock), and a
    192   1.52        ad  *	number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
    193   1.52        ad  *	so on.  When an LWP is to be entered into or removed from one of the
    194  1.103        ad  *	following states, p_lock must be held and the process wide counters
    195   1.52        ad  *	adjusted:
    196   1.52        ad  *
    197   1.52        ad  *		LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
    198   1.52        ad  *
    199  1.129        ad  *	(But not always for kernel threads.  There are some special cases
    200  1.129        ad  *	as mentioned above.  See kern_softint.c.)
    201  1.129        ad  *
    202   1.52        ad  *	Note that an LWP is considered running or likely to run soon if in
    203   1.52        ad  *	one of the following states.  This affects the value of p_nrlwps:
    204   1.52        ad  *
    205   1.52        ad  *		LSRUN, LSONPROC, LSSLEEP
    206   1.52        ad  *
    207  1.103        ad  *	p_lock does not need to be held when transitioning among these
    208  1.129        ad  *	three states, hence p_lock is rarely taken for state transitions.
    209   1.52        ad  */
    210   1.52        ad 
    211    1.9     lukem #include <sys/cdefs.h>
    212  1.138    darran __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.138 2010/02/21 02:11:40 darran Exp $");
    213    1.8    martin 
    214   1.84      yamt #include "opt_ddb.h"
    215   1.52        ad #include "opt_lockdebug.h"
    216  1.124  wrstuden #include "opt_sa.h"
    217    1.2   thorpej 
    218   1.47   hannken #define _LWP_API_PRIVATE
    219   1.47   hannken 
    220    1.2   thorpej #include <sys/param.h>
    221    1.2   thorpej #include <sys/systm.h>
    222   1.64      yamt #include <sys/cpu.h>
    223    1.2   thorpej #include <sys/pool.h>
    224    1.2   thorpej #include <sys/proc.h>
    225  1.124  wrstuden #include <sys/sa.h>
    226  1.124  wrstuden #include <sys/savar.h>
    227    1.2   thorpej #include <sys/syscallargs.h>
    228   1.57       dsl #include <sys/syscall_stats.h>
    229   1.37        ad #include <sys/kauth.h>
    230   1.52        ad #include <sys/sleepq.h>
    231   1.52        ad #include <sys/lockdebug.h>
    232   1.52        ad #include <sys/kmem.h>
    233   1.91     rmind #include <sys/pset.h>
    234   1.75        ad #include <sys/intr.h>
    235   1.78        ad #include <sys/lwpctl.h>
    236   1.81        ad #include <sys/atomic.h>
    237  1.131        ad #include <sys/filedesc.h>
    238    1.2   thorpej 
    239  1.138    darran #ifdef KDTRACE_HOOKS
    240  1.138    darran #include <sys/dtrace_bsd.h>
    241  1.138    darran #endif
    242  1.138    darran 
    243    1.2   thorpej #include <uvm/uvm_extern.h>
    244   1.80     skrll #include <uvm/uvm_object.h>
    245    1.2   thorpej 
    246   1.77      matt struct lwplist	alllwp = LIST_HEAD_INITIALIZER(alllwp);
    247   1.52        ad 
    248  1.133     pooka struct pool lwp_uc_pool;
    249   1.41   thorpej 
    250   1.87        ad static pool_cache_t lwp_cache;
    251   1.41   thorpej static specificdata_domain_t lwp_specificdata_domain;
    252   1.41   thorpej 
    253   1.41   thorpej void
    254   1.41   thorpej lwpinit(void)
    255   1.41   thorpej {
    256   1.41   thorpej 
    257  1.133     pooka 	pool_init(&lwp_uc_pool, sizeof(ucontext_t), 0, 0, 0, "lwpucpl",
    258  1.133     pooka 	    &pool_allocator_nointr, IPL_NONE);
    259   1.41   thorpej 	lwp_specificdata_domain = specificdata_domain_create();
    260   1.41   thorpej 	KASSERT(lwp_specificdata_domain != NULL);
    261   1.52        ad 	lwp_sys_init();
    262   1.87        ad 	lwp_cache = pool_cache_init(sizeof(lwp_t), MIN_LWP_ALIGNMENT, 0, 0,
    263   1.87        ad 	    "lwppl", NULL, IPL_NONE, NULL, NULL, NULL);
    264   1.41   thorpej }
    265   1.41   thorpej 
    266   1.52        ad /*
    267   1.52        ad  * Set an suspended.
    268   1.52        ad  *
    269  1.103        ad  * Must be called with p_lock held, and the LWP locked.  Will unlock the
    270   1.52        ad  * LWP before return.
    271   1.52        ad  */
    272    1.2   thorpej int
    273   1.52        ad lwp_suspend(struct lwp *curl, struct lwp *t)
    274    1.2   thorpej {
    275   1.52        ad 	int error;
    276    1.2   thorpej 
    277  1.103        ad 	KASSERT(mutex_owned(t->l_proc->p_lock));
    278   1.63        ad 	KASSERT(lwp_locked(t, NULL));
    279   1.33       chs 
    280   1.52        ad 	KASSERT(curl != t || curl->l_stat == LSONPROC);
    281    1.2   thorpej 
    282   1.52        ad 	/*
    283   1.52        ad 	 * If the current LWP has been told to exit, we must not suspend anyone
    284   1.52        ad 	 * else or deadlock could occur.  We won't return to userspace.
    285    1.2   thorpej 	 */
    286  1.109     rmind 	if ((curl->l_flag & (LW_WEXIT | LW_WCORE)) != 0) {
    287   1.52        ad 		lwp_unlock(t);
    288   1.52        ad 		return (EDEADLK);
    289    1.2   thorpej 	}
    290    1.2   thorpej 
    291   1.52        ad 	error = 0;
    292    1.2   thorpej 
    293   1.52        ad 	switch (t->l_stat) {
    294   1.52        ad 	case LSRUN:
    295   1.52        ad 	case LSONPROC:
    296   1.56     pavel 		t->l_flag |= LW_WSUSPEND;
    297   1.52        ad 		lwp_need_userret(t);
    298   1.52        ad 		lwp_unlock(t);
    299   1.52        ad 		break;
    300    1.2   thorpej 
    301   1.52        ad 	case LSSLEEP:
    302   1.56     pavel 		t->l_flag |= LW_WSUSPEND;
    303    1.2   thorpej 
    304    1.2   thorpej 		/*
    305   1.52        ad 		 * Kick the LWP and try to get it to the kernel boundary
    306   1.52        ad 		 * so that it will release any locks that it holds.
    307   1.52        ad 		 * setrunnable() will release the lock.
    308    1.2   thorpej 		 */
    309   1.56     pavel 		if ((t->l_flag & LW_SINTR) != 0)
    310   1.52        ad 			setrunnable(t);
    311   1.52        ad 		else
    312   1.52        ad 			lwp_unlock(t);
    313   1.52        ad 		break;
    314    1.2   thorpej 
    315   1.52        ad 	case LSSUSPENDED:
    316   1.52        ad 		lwp_unlock(t);
    317   1.52        ad 		break;
    318   1.17      manu 
    319   1.52        ad 	case LSSTOP:
    320   1.56     pavel 		t->l_flag |= LW_WSUSPEND;
    321   1.52        ad 		setrunnable(t);
    322   1.52        ad 		break;
    323    1.2   thorpej 
    324   1.52        ad 	case LSIDL:
    325   1.52        ad 	case LSZOMB:
    326   1.52        ad 		error = EINTR; /* It's what Solaris does..... */
    327   1.52        ad 		lwp_unlock(t);
    328   1.52        ad 		break;
    329    1.2   thorpej 	}
    330    1.2   thorpej 
    331   1.69     rmind 	return (error);
    332    1.2   thorpej }
    333    1.2   thorpej 
    334   1.52        ad /*
    335   1.52        ad  * Restart a suspended LWP.
    336   1.52        ad  *
    337  1.103        ad  * Must be called with p_lock held, and the LWP locked.  Will unlock the
    338   1.52        ad  * LWP before return.
    339   1.52        ad  */
    340    1.2   thorpej void
    341    1.2   thorpej lwp_continue(struct lwp *l)
    342    1.2   thorpej {
    343    1.2   thorpej 
    344  1.103        ad 	KASSERT(mutex_owned(l->l_proc->p_lock));
    345   1.63        ad 	KASSERT(lwp_locked(l, NULL));
    346   1.52        ad 
    347   1.52        ad 	/* If rebooting or not suspended, then just bail out. */
    348   1.56     pavel 	if ((l->l_flag & LW_WREBOOT) != 0) {
    349   1.52        ad 		lwp_unlock(l);
    350    1.2   thorpej 		return;
    351   1.10      fvdl 	}
    352    1.2   thorpej 
    353   1.56     pavel 	l->l_flag &= ~LW_WSUSPEND;
    354    1.2   thorpej 
    355   1.52        ad 	if (l->l_stat != LSSUSPENDED) {
    356   1.52        ad 		lwp_unlock(l);
    357   1.52        ad 		return;
    358    1.2   thorpej 	}
    359    1.2   thorpej 
    360   1.52        ad 	/* setrunnable() will release the lock. */
    361   1.52        ad 	setrunnable(l);
    362    1.2   thorpej }
    363    1.2   thorpej 
    364   1.52        ad /*
    365   1.52        ad  * Wait for an LWP within the current process to exit.  If 'lid' is
    366   1.52        ad  * non-zero, we are waiting for a specific LWP.
    367   1.52        ad  *
    368  1.103        ad  * Must be called with p->p_lock held.
    369   1.52        ad  */
    370    1.2   thorpej int
    371    1.2   thorpej lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
    372    1.2   thorpej {
    373    1.2   thorpej 	struct proc *p = l->l_proc;
    374   1.52        ad 	struct lwp *l2;
    375   1.52        ad 	int nfound, error;
    376   1.63        ad 	lwpid_t curlid;
    377   1.63        ad 	bool exiting;
    378    1.2   thorpej 
    379  1.103        ad 	KASSERT(mutex_owned(p->p_lock));
    380   1.52        ad 
    381   1.52        ad 	p->p_nlwpwait++;
    382   1.63        ad 	l->l_waitingfor = lid;
    383   1.63        ad 	curlid = l->l_lid;
    384   1.63        ad 	exiting = ((flags & LWPWAIT_EXITCONTROL) != 0);
    385   1.52        ad 
    386   1.52        ad 	for (;;) {
    387   1.52        ad 		/*
    388   1.52        ad 		 * Avoid a race between exit1() and sigexit(): if the
    389   1.52        ad 		 * process is dumping core, then we need to bail out: call
    390   1.52        ad 		 * into lwp_userret() where we will be suspended until the
    391   1.52        ad 		 * deed is done.
    392   1.52        ad 		 */
    393   1.52        ad 		if ((p->p_sflag & PS_WCORE) != 0) {
    394  1.103        ad 			mutex_exit(p->p_lock);
    395   1.52        ad 			lwp_userret(l);
    396   1.52        ad #ifdef DIAGNOSTIC
    397   1.52        ad 			panic("lwp_wait1");
    398   1.52        ad #endif
    399   1.52        ad 			/* NOTREACHED */
    400   1.52        ad 		}
    401   1.52        ad 
    402   1.52        ad 		/*
    403   1.52        ad 		 * First off, drain any detached LWP that is waiting to be
    404   1.52        ad 		 * reaped.
    405   1.52        ad 		 */
    406   1.52        ad 		while ((l2 = p->p_zomblwp) != NULL) {
    407   1.52        ad 			p->p_zomblwp = NULL;
    408   1.63        ad 			lwp_free(l2, false, false);/* releases proc mutex */
    409  1.103        ad 			mutex_enter(p->p_lock);
    410   1.52        ad 		}
    411   1.52        ad 
    412   1.52        ad 		/*
    413   1.52        ad 		 * Now look for an LWP to collect.  If the whole process is
    414   1.52        ad 		 * exiting, count detached LWPs as eligible to be collected,
    415   1.52        ad 		 * but don't drain them here.
    416   1.52        ad 		 */
    417   1.52        ad 		nfound = 0;
    418   1.63        ad 		error = 0;
    419   1.52        ad 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
    420   1.63        ad 			/*
    421   1.63        ad 			 * If a specific wait and the target is waiting on
    422   1.63        ad 			 * us, then avoid deadlock.  This also traps LWPs
    423   1.63        ad 			 * that try to wait on themselves.
    424   1.63        ad 			 *
    425   1.63        ad 			 * Note that this does not handle more complicated
    426   1.63        ad 			 * cycles, like: t1 -> t2 -> t3 -> t1.  The process
    427   1.63        ad 			 * can still be killed so it is not a major problem.
    428   1.63        ad 			 */
    429   1.63        ad 			if (l2->l_lid == lid && l2->l_waitingfor == curlid) {
    430   1.63        ad 				error = EDEADLK;
    431   1.63        ad 				break;
    432   1.63        ad 			}
    433   1.63        ad 			if (l2 == l)
    434   1.52        ad 				continue;
    435   1.52        ad 			if ((l2->l_prflag & LPR_DETACHED) != 0) {
    436   1.63        ad 				nfound += exiting;
    437   1.63        ad 				continue;
    438   1.63        ad 			}
    439   1.63        ad 			if (lid != 0) {
    440   1.63        ad 				if (l2->l_lid != lid)
    441   1.63        ad 					continue;
    442   1.63        ad 				/*
    443   1.63        ad 				 * Mark this LWP as the first waiter, if there
    444   1.63        ad 				 * is no other.
    445   1.63        ad 				 */
    446   1.63        ad 				if (l2->l_waiter == 0)
    447   1.63        ad 					l2->l_waiter = curlid;
    448   1.63        ad 			} else if (l2->l_waiter != 0) {
    449   1.63        ad 				/*
    450   1.63        ad 				 * It already has a waiter - so don't
    451   1.63        ad 				 * collect it.  If the waiter doesn't
    452   1.63        ad 				 * grab it we'll get another chance
    453   1.63        ad 				 * later.
    454   1.63        ad 				 */
    455   1.63        ad 				nfound++;
    456   1.52        ad 				continue;
    457   1.52        ad 			}
    458   1.52        ad 			nfound++;
    459    1.2   thorpej 
    460   1.52        ad 			/* No need to lock the LWP in order to see LSZOMB. */
    461   1.52        ad 			if (l2->l_stat != LSZOMB)
    462   1.52        ad 				continue;
    463    1.2   thorpej 
    464   1.63        ad 			/*
    465   1.63        ad 			 * We're no longer waiting.  Reset the "first waiter"
    466   1.63        ad 			 * pointer on the target, in case it was us.
    467   1.63        ad 			 */
    468   1.63        ad 			l->l_waitingfor = 0;
    469   1.63        ad 			l2->l_waiter = 0;
    470   1.63        ad 			p->p_nlwpwait--;
    471    1.2   thorpej 			if (departed)
    472    1.2   thorpej 				*departed = l2->l_lid;
    473   1.75        ad 			sched_lwp_collect(l2);
    474   1.63        ad 
    475   1.63        ad 			/* lwp_free() releases the proc lock. */
    476   1.63        ad 			lwp_free(l2, false, false);
    477  1.103        ad 			mutex_enter(p->p_lock);
    478   1.52        ad 			return 0;
    479   1.52        ad 		}
    480    1.2   thorpej 
    481   1.63        ad 		if (error != 0)
    482   1.63        ad 			break;
    483   1.52        ad 		if (nfound == 0) {
    484   1.52        ad 			error = ESRCH;
    485   1.52        ad 			break;
    486   1.52        ad 		}
    487   1.63        ad 
    488   1.63        ad 		/*
    489   1.63        ad 		 * The kernel is careful to ensure that it can not deadlock
    490   1.63        ad 		 * when exiting - just keep waiting.
    491   1.63        ad 		 */
    492   1.63        ad 		if (exiting) {
    493   1.52        ad 			KASSERT(p->p_nlwps > 1);
    494  1.103        ad 			cv_wait(&p->p_lwpcv, p->p_lock);
    495   1.52        ad 			continue;
    496   1.52        ad 		}
    497   1.63        ad 
    498   1.63        ad 		/*
    499   1.63        ad 		 * If all other LWPs are waiting for exits or suspends
    500   1.63        ad 		 * and the supply of zombies and potential zombies is
    501   1.63        ad 		 * exhausted, then we are about to deadlock.
    502   1.63        ad 		 *
    503   1.63        ad 		 * If the process is exiting (and this LWP is not the one
    504   1.63        ad 		 * that is coordinating the exit) then bail out now.
    505   1.63        ad 		 */
    506   1.52        ad 		if ((p->p_sflag & PS_WEXIT) != 0 ||
    507   1.63        ad 		    p->p_nrlwps + p->p_nzlwps - p->p_ndlwps <= p->p_nlwpwait) {
    508   1.52        ad 			error = EDEADLK;
    509   1.52        ad 			break;
    510    1.2   thorpej 		}
    511   1.63        ad 
    512   1.63        ad 		/*
    513   1.63        ad 		 * Sit around and wait for something to happen.  We'll be
    514   1.63        ad 		 * awoken if any of the conditions examined change: if an
    515   1.63        ad 		 * LWP exits, is collected, or is detached.
    516   1.63        ad 		 */
    517  1.103        ad 		if ((error = cv_wait_sig(&p->p_lwpcv, p->p_lock)) != 0)
    518   1.52        ad 			break;
    519    1.2   thorpej 	}
    520    1.2   thorpej 
    521   1.63        ad 	/*
    522   1.63        ad 	 * We didn't find any LWPs to collect, we may have received a
    523   1.63        ad 	 * signal, or some other condition has caused us to bail out.
    524   1.63        ad 	 *
    525   1.63        ad 	 * If waiting on a specific LWP, clear the waiters marker: some
    526   1.63        ad 	 * other LWP may want it.  Then, kick all the remaining waiters
    527   1.63        ad 	 * so that they can re-check for zombies and for deadlock.
    528   1.63        ad 	 */
    529   1.63        ad 	if (lid != 0) {
    530   1.63        ad 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
    531   1.63        ad 			if (l2->l_lid == lid) {
    532   1.63        ad 				if (l2->l_waiter == curlid)
    533   1.63        ad 					l2->l_waiter = 0;
    534   1.63        ad 				break;
    535   1.63        ad 			}
    536   1.63        ad 		}
    537   1.63        ad 	}
    538   1.52        ad 	p->p_nlwpwait--;
    539   1.63        ad 	l->l_waitingfor = 0;
    540   1.63        ad 	cv_broadcast(&p->p_lwpcv);
    541   1.63        ad 
    542   1.52        ad 	return error;
    543    1.2   thorpej }
    544    1.2   thorpej 
    545   1.52        ad /*
    546   1.52        ad  * Create a new LWP within process 'p2', using LWP 'l1' as a template.
    547   1.52        ad  * The new LWP is created in state LSIDL and must be set running,
    548   1.52        ad  * suspended, or stopped by the caller.
    549   1.52        ad  */
    550    1.2   thorpej int
    551  1.134     rmind lwp_create(lwp_t *l1, proc_t *p2, vaddr_t uaddr, int flags,
    552   1.75        ad 	   void *stack, size_t stacksize, void (*func)(void *), void *arg,
    553   1.75        ad 	   lwp_t **rnewlwpp, int sclass)
    554    1.2   thorpej {
    555   1.52        ad 	struct lwp *l2, *isfree;
    556   1.52        ad 	turnstile_t *ts;
    557    1.2   thorpej 
    558  1.107        ad 	KASSERT(l1 == curlwp || l1->l_proc == &proc0);
    559  1.107        ad 
    560   1.52        ad 	/*
    561   1.52        ad 	 * First off, reap any detached LWP waiting to be collected.
    562   1.52        ad 	 * We can re-use its LWP structure and turnstile.
    563   1.52        ad 	 */
    564   1.52        ad 	isfree = NULL;
    565   1.52        ad 	if (p2->p_zomblwp != NULL) {
    566  1.103        ad 		mutex_enter(p2->p_lock);
    567   1.52        ad 		if ((isfree = p2->p_zomblwp) != NULL) {
    568   1.52        ad 			p2->p_zomblwp = NULL;
    569   1.63        ad 			lwp_free(isfree, true, false);/* releases proc mutex */
    570   1.52        ad 		} else
    571  1.103        ad 			mutex_exit(p2->p_lock);
    572   1.52        ad 	}
    573   1.52        ad 	if (isfree == NULL) {
    574   1.87        ad 		l2 = pool_cache_get(lwp_cache, PR_WAITOK);
    575   1.52        ad 		memset(l2, 0, sizeof(*l2));
    576   1.76        ad 		l2->l_ts = pool_cache_get(turnstile_cache, PR_WAITOK);
    577   1.60      yamt 		SLIST_INIT(&l2->l_pi_lenders);
    578   1.52        ad 	} else {
    579   1.52        ad 		l2 = isfree;
    580   1.52        ad 		ts = l2->l_ts;
    581   1.75        ad 		KASSERT(l2->l_inheritedprio == -1);
    582   1.60      yamt 		KASSERT(SLIST_EMPTY(&l2->l_pi_lenders));
    583   1.52        ad 		memset(l2, 0, sizeof(*l2));
    584   1.52        ad 		l2->l_ts = ts;
    585   1.52        ad 	}
    586    1.2   thorpej 
    587    1.2   thorpej 	l2->l_stat = LSIDL;
    588    1.2   thorpej 	l2->l_proc = p2;
    589   1.52        ad 	l2->l_refcnt = 1;
    590   1.75        ad 	l2->l_class = sclass;
    591  1.116        ad 
    592  1.116        ad 	/*
    593  1.116        ad 	 * If vfork(), we want the LWP to run fast and on the same CPU
    594  1.116        ad 	 * as its parent, so that it can reuse the VM context and cache
    595  1.116        ad 	 * footprint on the local CPU.
    596  1.116        ad 	 */
    597  1.116        ad 	l2->l_kpriority = ((flags & LWP_VFORK) ? true : false);
    598   1.82        ad 	l2->l_kpribase = PRI_KERNEL;
    599   1.52        ad 	l2->l_priority = l1->l_priority;
    600   1.75        ad 	l2->l_inheritedprio = -1;
    601  1.134     rmind 	l2->l_flag = 0;
    602   1.88        ad 	l2->l_pflag = LP_MPSAFE;
    603  1.131        ad 	TAILQ_INIT(&l2->l_ld_locks);
    604  1.131        ad 
    605  1.131        ad 	/*
    606  1.131        ad 	 * If not the first LWP in the process, grab a reference to the
    607  1.131        ad 	 * descriptor table.
    608  1.131        ad 	 */
    609   1.97        ad 	l2->l_fd = p2->p_fd;
    610  1.131        ad 	if (p2->p_nlwps != 0) {
    611  1.131        ad 		KASSERT(l1->l_proc == p2);
    612  1.136     rmind 		fd_hold(l2);
    613  1.131        ad 	} else {
    614  1.131        ad 		KASSERT(l1->l_proc != p2);
    615  1.131        ad 	}
    616   1.41   thorpej 
    617   1.56     pavel 	if (p2->p_flag & PK_SYSTEM) {
    618  1.134     rmind 		/* Mark it as a system LWP. */
    619   1.56     pavel 		l2->l_flag |= LW_SYSTEM;
    620   1.52        ad 	}
    621    1.2   thorpej 
    622  1.107        ad 	kpreempt_disable();
    623  1.107        ad 	l2->l_mutex = l1->l_cpu->ci_schedstate.spc_mutex;
    624  1.107        ad 	l2->l_cpu = l1->l_cpu;
    625  1.107        ad 	kpreempt_enable();
    626  1.107        ad 
    627  1.138    darran #ifdef KDTRACE_HOOKS
    628  1.138    darran 	kdtrace_thread_ctor(NULL, l2);
    629  1.138    darran #endif
    630   1.73     rmind 	lwp_initspecific(l2);
    631   1.75        ad 	sched_lwp_fork(l1, l2);
    632   1.37        ad 	lwp_update_creds(l2);
    633   1.70        ad 	callout_init(&l2->l_timeout_ch, CALLOUT_MPSAFE);
    634   1.70        ad 	callout_setfunc(&l2->l_timeout_ch, sleepq_timeout, l2);
    635   1.52        ad 	cv_init(&l2->l_sigcv, "sigwait");
    636   1.52        ad 	l2->l_syncobj = &sched_syncobj;
    637    1.2   thorpej 
    638    1.2   thorpej 	if (rnewlwpp != NULL)
    639    1.2   thorpej 		*rnewlwpp = l2;
    640    1.2   thorpej 
    641  1.137     rmind 	uvm_lwp_setuarea(l2, uaddr);
    642    1.2   thorpej 	uvm_lwp_fork(l1, l2, stack, stacksize, func,
    643    1.2   thorpej 	    (arg != NULL) ? arg : l2);
    644    1.2   thorpej 
    645  1.103        ad 	mutex_enter(p2->p_lock);
    646   1.52        ad 
    647   1.52        ad 	if ((flags & LWP_DETACHED) != 0) {
    648   1.52        ad 		l2->l_prflag = LPR_DETACHED;
    649   1.52        ad 		p2->p_ndlwps++;
    650   1.52        ad 	} else
    651   1.52        ad 		l2->l_prflag = 0;
    652   1.52        ad 
    653   1.52        ad 	l2->l_sigmask = l1->l_sigmask;
    654   1.52        ad 	CIRCLEQ_INIT(&l2->l_sigpend.sp_info);
    655   1.52        ad 	sigemptyset(&l2->l_sigpend.sp_set);
    656   1.52        ad 
    657   1.53      yamt 	p2->p_nlwpid++;
    658   1.53      yamt 	if (p2->p_nlwpid == 0)
    659   1.53      yamt 		p2->p_nlwpid++;
    660   1.53      yamt 	l2->l_lid = p2->p_nlwpid;
    661    1.2   thorpej 	LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
    662    1.2   thorpej 	p2->p_nlwps++;
    663    1.2   thorpej 
    664   1.91     rmind 	if ((p2->p_flag & PK_SYSTEM) == 0) {
    665   1.91     rmind 		/* Inherit an affinity */
    666  1.122     rmind 		if (l1->l_flag & LW_AFFINITY) {
    667  1.128     rmind 			/*
    668  1.128     rmind 			 * Note that we hold the state lock while inheriting
    669  1.128     rmind 			 * the affinity to avoid race with sched_setaffinity().
    670  1.128     rmind 			 */
    671  1.128     rmind 			lwp_lock(l1);
    672  1.122     rmind 			if (l1->l_flag & LW_AFFINITY) {
    673  1.122     rmind 				kcpuset_use(l1->l_affinity);
    674  1.122     rmind 				l2->l_affinity = l1->l_affinity;
    675  1.122     rmind 				l2->l_flag |= LW_AFFINITY;
    676  1.122     rmind 			}
    677  1.128     rmind 			lwp_unlock(l1);
    678  1.117  christos 		}
    679  1.128     rmind 		lwp_lock(l2);
    680  1.128     rmind 		/* Inherit a processor-set */
    681  1.128     rmind 		l2->l_psid = l1->l_psid;
    682   1.91     rmind 		/* Look for a CPU to start */
    683   1.91     rmind 		l2->l_cpu = sched_takecpu(l2);
    684   1.91     rmind 		lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_mutex);
    685   1.91     rmind 	}
    686  1.128     rmind 	mutex_exit(p2->p_lock);
    687  1.128     rmind 
    688  1.128     rmind 	mutex_enter(proc_lock);
    689  1.128     rmind 	LIST_INSERT_HEAD(&alllwp, l2, l_list);
    690  1.128     rmind 	mutex_exit(proc_lock);
    691   1.91     rmind 
    692   1.57       dsl 	SYSCALL_TIME_LWP_INIT(l2);
    693   1.57       dsl 
    694   1.16      manu 	if (p2->p_emul->e_lwp_fork)
    695   1.16      manu 		(*p2->p_emul->e_lwp_fork)(l1, l2);
    696   1.16      manu 
    697    1.2   thorpej 	return (0);
    698    1.2   thorpej }
    699    1.2   thorpej 
    700    1.2   thorpej /*
    701   1.64      yamt  * Called by MD code when a new LWP begins execution.  Must be called
    702   1.64      yamt  * with the previous LWP locked (so at splsched), or if there is no
    703   1.64      yamt  * previous LWP, at splsched.
    704   1.64      yamt  */
    705   1.64      yamt void
    706   1.64      yamt lwp_startup(struct lwp *prev, struct lwp *new)
    707   1.64      yamt {
    708   1.64      yamt 
    709  1.107        ad 	KASSERT(kpreempt_disabled());
    710   1.64      yamt 	if (prev != NULL) {
    711   1.81        ad 		/*
    712   1.81        ad 		 * Normalize the count of the spin-mutexes, it was
    713   1.81        ad 		 * increased in mi_switch().  Unmark the state of
    714   1.81        ad 		 * context switch - it is finished for previous LWP.
    715   1.81        ad 		 */
    716   1.81        ad 		curcpu()->ci_mtx_count++;
    717   1.81        ad 		membar_exit();
    718   1.81        ad 		prev->l_ctxswtch = 0;
    719   1.64      yamt 	}
    720  1.107        ad 	KPREEMPT_DISABLE(new);
    721  1.107        ad 	spl0();
    722  1.105        ad 	pmap_activate(new);
    723   1.64      yamt 	LOCKDEBUG_BARRIER(NULL, 0);
    724  1.107        ad 	KPREEMPT_ENABLE(new);
    725   1.65        ad 	if ((new->l_pflag & LP_MPSAFE) == 0) {
    726   1.65        ad 		KERNEL_LOCK(1, new);
    727   1.65        ad 	}
    728   1.64      yamt }
    729   1.64      yamt 
    730   1.64      yamt /*
    731   1.65        ad  * Exit an LWP.
    732    1.2   thorpej  */
    733    1.2   thorpej void
    734    1.2   thorpej lwp_exit(struct lwp *l)
    735    1.2   thorpej {
    736    1.2   thorpej 	struct proc *p = l->l_proc;
    737   1.52        ad 	struct lwp *l2;
    738   1.65        ad 	bool current;
    739   1.65        ad 
    740   1.65        ad 	current = (l == curlwp);
    741    1.2   thorpej 
    742  1.114     rmind 	KASSERT(current || (l->l_stat == LSIDL && l->l_target_cpu == NULL));
    743  1.131        ad 	KASSERT(p == curproc);
    744    1.2   thorpej 
    745   1.52        ad 	/*
    746   1.52        ad 	 * Verify that we hold no locks other than the kernel lock.
    747   1.52        ad 	 */
    748   1.52        ad 	LOCKDEBUG_BARRIER(&kernel_lock, 0);
    749   1.16      manu 
    750    1.2   thorpej 	/*
    751   1.52        ad 	 * If we are the last live LWP in a process, we need to exit the
    752   1.52        ad 	 * entire process.  We do so with an exit status of zero, because
    753   1.52        ad 	 * it's a "controlled" exit, and because that's what Solaris does.
    754   1.52        ad 	 *
    755   1.52        ad 	 * We are not quite a zombie yet, but for accounting purposes we
    756   1.52        ad 	 * must increment the count of zombies here.
    757   1.45   thorpej 	 *
    758   1.45   thorpej 	 * Note: the last LWP's specificdata will be deleted here.
    759    1.2   thorpej 	 */
    760  1.103        ad 	mutex_enter(p->p_lock);
    761   1.52        ad 	if (p->p_nlwps - p->p_nzlwps == 1) {
    762   1.65        ad 		KASSERT(current == true);
    763   1.88        ad 		/* XXXSMP kernel_lock not held */
    764    1.2   thorpej 		exit1(l, 0);
    765   1.19  jdolecek 		/* NOTREACHED */
    766    1.2   thorpej 	}
    767   1.52        ad 	p->p_nzlwps++;
    768  1.103        ad 	mutex_exit(p->p_lock);
    769   1.52        ad 
    770   1.52        ad 	if (p->p_emul->e_lwp_exit)
    771   1.52        ad 		(*p->p_emul->e_lwp_exit)(l);
    772    1.2   thorpej 
    773  1.131        ad 	/* Drop filedesc reference. */
    774  1.131        ad 	fd_free();
    775  1.131        ad 
    776   1.45   thorpej 	/* Delete the specificdata while it's still safe to sleep. */
    777   1.45   thorpej 	specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
    778   1.45   thorpej 
    779   1.52        ad 	/*
    780   1.52        ad 	 * Release our cached credentials.
    781   1.52        ad 	 */
    782   1.37        ad 	kauth_cred_free(l->l_cred);
    783   1.70        ad 	callout_destroy(&l->l_timeout_ch);
    784   1.65        ad 
    785   1.65        ad 	/*
    786   1.52        ad 	 * Remove the LWP from the global list.
    787   1.52        ad 	 */
    788  1.102        ad 	mutex_enter(proc_lock);
    789   1.52        ad 	LIST_REMOVE(l, l_list);
    790  1.102        ad 	mutex_exit(proc_lock);
    791   1.19  jdolecek 
    792   1.52        ad 	/*
    793   1.52        ad 	 * Get rid of all references to the LWP that others (e.g. procfs)
    794   1.52        ad 	 * may have, and mark the LWP as a zombie.  If the LWP is detached,
    795   1.52        ad 	 * mark it waiting for collection in the proc structure.  Note that
    796   1.52        ad 	 * before we can do that, we need to free any other dead, deatched
    797   1.52        ad 	 * LWP waiting to meet its maker.
    798   1.52        ad 	 */
    799  1.103        ad 	mutex_enter(p->p_lock);
    800   1.52        ad 	lwp_drainrefs(l);
    801   1.31      yamt 
    802   1.52        ad 	if ((l->l_prflag & LPR_DETACHED) != 0) {
    803   1.52        ad 		while ((l2 = p->p_zomblwp) != NULL) {
    804   1.52        ad 			p->p_zomblwp = NULL;
    805   1.63        ad 			lwp_free(l2, false, false);/* releases proc mutex */
    806  1.103        ad 			mutex_enter(p->p_lock);
    807   1.72        ad 			l->l_refcnt++;
    808   1.72        ad 			lwp_drainrefs(l);
    809   1.52        ad 		}
    810   1.52        ad 		p->p_zomblwp = l;
    811   1.52        ad 	}
    812   1.31      yamt 
    813   1.52        ad 	/*
    814   1.52        ad 	 * If we find a pending signal for the process and we have been
    815   1.52        ad 	 * asked to check for signals, then we loose: arrange to have
    816   1.52        ad 	 * all other LWPs in the process check for signals.
    817   1.52        ad 	 */
    818   1.56     pavel 	if ((l->l_flag & LW_PENDSIG) != 0 &&
    819   1.52        ad 	    firstsig(&p->p_sigpend.sp_set) != 0) {
    820   1.52        ad 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
    821   1.52        ad 			lwp_lock(l2);
    822   1.56     pavel 			l2->l_flag |= LW_PENDSIG;
    823   1.52        ad 			lwp_unlock(l2);
    824   1.52        ad 		}
    825   1.31      yamt 	}
    826   1.31      yamt 
    827   1.52        ad 	lwp_lock(l);
    828   1.52        ad 	l->l_stat = LSZOMB;
    829   1.90        ad 	if (l->l_name != NULL)
    830   1.90        ad 		strcpy(l->l_name, "(zombie)");
    831  1.128     rmind 	if (l->l_flag & LW_AFFINITY) {
    832  1.122     rmind 		l->l_flag &= ~LW_AFFINITY;
    833  1.128     rmind 	} else {
    834  1.128     rmind 		KASSERT(l->l_affinity == NULL);
    835  1.128     rmind 	}
    836   1.52        ad 	lwp_unlock(l);
    837    1.2   thorpej 	p->p_nrlwps--;
    838   1.52        ad 	cv_broadcast(&p->p_lwpcv);
    839   1.78        ad 	if (l->l_lwpctl != NULL)
    840   1.78        ad 		l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
    841  1.103        ad 	mutex_exit(p->p_lock);
    842   1.52        ad 
    843  1.122     rmind 	/* Safe without lock since LWP is in zombie state */
    844  1.122     rmind 	if (l->l_affinity) {
    845  1.122     rmind 		kcpuset_unuse(l->l_affinity, NULL);
    846  1.122     rmind 		l->l_affinity = NULL;
    847  1.122     rmind 	}
    848  1.122     rmind 
    849   1.52        ad 	/*
    850   1.52        ad 	 * We can no longer block.  At this point, lwp_free() may already
    851   1.52        ad 	 * be gunning for us.  On a multi-CPU system, we may be off p_lwps.
    852   1.52        ad 	 *
    853   1.52        ad 	 * Free MD LWP resources.
    854   1.52        ad 	 */
    855   1.52        ad 	cpu_lwp_free(l, 0);
    856    1.2   thorpej 
    857   1.65        ad 	if (current) {
    858   1.65        ad 		pmap_deactivate(l);
    859   1.65        ad 
    860   1.65        ad 		/*
    861   1.65        ad 		 * Release the kernel lock, and switch away into
    862   1.65        ad 		 * oblivion.
    863   1.65        ad 		 */
    864   1.52        ad #ifdef notyet
    865   1.65        ad 		/* XXXSMP hold in lwp_userret() */
    866   1.65        ad 		KERNEL_UNLOCK_LAST(l);
    867   1.52        ad #else
    868   1.65        ad 		KERNEL_UNLOCK_ALL(l, NULL);
    869   1.52        ad #endif
    870   1.65        ad 		lwp_exit_switchaway(l);
    871   1.65        ad 	}
    872    1.2   thorpej }
    873    1.2   thorpej 
    874   1.52        ad /*
    875   1.52        ad  * Free a dead LWP's remaining resources.
    876   1.52        ad  *
    877   1.52        ad  * XXXLWP limits.
    878   1.52        ad  */
    879   1.52        ad void
    880   1.63        ad lwp_free(struct lwp *l, bool recycle, bool last)
    881   1.52        ad {
    882   1.52        ad 	struct proc *p = l->l_proc;
    883  1.100        ad 	struct rusage *ru;
    884   1.52        ad 	ksiginfoq_t kq;
    885   1.52        ad 
    886   1.92      yamt 	KASSERT(l != curlwp);
    887   1.92      yamt 
    888   1.52        ad 	/*
    889   1.52        ad 	 * If this was not the last LWP in the process, then adjust
    890   1.52        ad 	 * counters and unlock.
    891   1.52        ad 	 */
    892   1.52        ad 	if (!last) {
    893   1.52        ad 		/*
    894   1.52        ad 		 * Add the LWP's run time to the process' base value.
    895   1.52        ad 		 * This needs to co-incide with coming off p_lwps.
    896   1.52        ad 		 */
    897   1.86      yamt 		bintime_add(&p->p_rtime, &l->l_rtime);
    898   1.64      yamt 		p->p_pctcpu += l->l_pctcpu;
    899  1.100        ad 		ru = &p->p_stats->p_ru;
    900  1.100        ad 		ruadd(ru, &l->l_ru);
    901  1.100        ad 		ru->ru_nvcsw += (l->l_ncsw - l->l_nivcsw);
    902  1.100        ad 		ru->ru_nivcsw += l->l_nivcsw;
    903   1.52        ad 		LIST_REMOVE(l, l_sibling);
    904   1.52        ad 		p->p_nlwps--;
    905   1.52        ad 		p->p_nzlwps--;
    906   1.52        ad 		if ((l->l_prflag & LPR_DETACHED) != 0)
    907   1.52        ad 			p->p_ndlwps--;
    908   1.63        ad 
    909   1.63        ad 		/*
    910   1.63        ad 		 * Have any LWPs sleeping in lwp_wait() recheck for
    911   1.63        ad 		 * deadlock.
    912   1.63        ad 		 */
    913   1.63        ad 		cv_broadcast(&p->p_lwpcv);
    914  1.103        ad 		mutex_exit(p->p_lock);
    915   1.63        ad 	}
    916   1.52        ad 
    917   1.52        ad #ifdef MULTIPROCESSOR
    918   1.63        ad 	/*
    919   1.63        ad 	 * In the unlikely event that the LWP is still on the CPU,
    920   1.63        ad 	 * then spin until it has switched away.  We need to release
    921   1.63        ad 	 * all locks to avoid deadlock against interrupt handlers on
    922   1.63        ad 	 * the target CPU.
    923   1.63        ad 	 */
    924  1.115        ad 	if ((l->l_pflag & LP_RUNNING) != 0 || l->l_cpu->ci_curlwp == l) {
    925   1.63        ad 		int count;
    926   1.64      yamt 		(void)count; /* XXXgcc */
    927   1.63        ad 		KERNEL_UNLOCK_ALL(curlwp, &count);
    928  1.115        ad 		while ((l->l_pflag & LP_RUNNING) != 0 ||
    929   1.64      yamt 		    l->l_cpu->ci_curlwp == l)
    930   1.63        ad 			SPINLOCK_BACKOFF_HOOK;
    931   1.63        ad 		KERNEL_LOCK(count, curlwp);
    932   1.63        ad 	}
    933   1.52        ad #endif
    934   1.52        ad 
    935   1.52        ad 	/*
    936   1.52        ad 	 * Destroy the LWP's remaining signal information.
    937   1.52        ad 	 */
    938   1.52        ad 	ksiginfo_queue_init(&kq);
    939   1.52        ad 	sigclear(&l->l_sigpend, NULL, &kq);
    940   1.52        ad 	ksiginfo_queue_drain(&kq);
    941   1.52        ad 	cv_destroy(&l->l_sigcv);
    942    1.2   thorpej 
    943   1.19  jdolecek 	/*
    944   1.52        ad 	 * Free the LWP's turnstile and the LWP structure itself unless the
    945   1.93      yamt 	 * caller wants to recycle them.  Also, free the scheduler specific
    946   1.93      yamt 	 * data.
    947   1.52        ad 	 *
    948   1.52        ad 	 * We can't return turnstile0 to the pool (it didn't come from it),
    949   1.52        ad 	 * so if it comes up just drop it quietly and move on.
    950   1.52        ad 	 *
    951   1.52        ad 	 * We don't recycle the VM resources at this time.
    952   1.19  jdolecek 	 */
    953   1.78        ad 	if (l->l_lwpctl != NULL)
    954   1.78        ad 		lwp_ctl_free(l);
    955   1.64      yamt 
    956   1.52        ad 	if (!recycle && l->l_ts != &turnstile0)
    957   1.76        ad 		pool_cache_put(turnstile_cache, l->l_ts);
    958   1.90        ad 	if (l->l_name != NULL)
    959   1.90        ad 		kmem_free(l->l_name, MAXCOMLEN);
    960  1.135     rmind 
    961   1.52        ad 	cpu_lwp_free2(l);
    962   1.19  jdolecek 	uvm_lwp_exit(l);
    963  1.134     rmind 
    964   1.60      yamt 	KASSERT(SLIST_EMPTY(&l->l_pi_lenders));
    965   1.75        ad 	KASSERT(l->l_inheritedprio == -1);
    966  1.138    darran #ifdef KDTRACE_HOOKS
    967  1.138    darran 	kdtrace_thread_dtor(NULL, l);
    968  1.138    darran #endif
    969   1.52        ad 	if (!recycle)
    970   1.87        ad 		pool_cache_put(lwp_cache, l);
    971    1.2   thorpej }
    972    1.2   thorpej 
    973    1.2   thorpej /*
    974   1.91     rmind  * Migrate the LWP to the another CPU.  Unlocks the LWP.
    975   1.91     rmind  */
    976   1.91     rmind void
    977  1.114     rmind lwp_migrate(lwp_t *l, struct cpu_info *tci)
    978   1.91     rmind {
    979  1.114     rmind 	struct schedstate_percpu *tspc;
    980  1.121     rmind 	int lstat = l->l_stat;
    981  1.121     rmind 
    982   1.91     rmind 	KASSERT(lwp_locked(l, NULL));
    983  1.114     rmind 	KASSERT(tci != NULL);
    984  1.114     rmind 
    985  1.121     rmind 	/* If LWP is still on the CPU, it must be handled like LSONPROC */
    986  1.121     rmind 	if ((l->l_pflag & LP_RUNNING) != 0) {
    987  1.121     rmind 		lstat = LSONPROC;
    988  1.121     rmind 	}
    989  1.121     rmind 
    990  1.114     rmind 	/*
    991  1.114     rmind 	 * The destination CPU could be changed while previous migration
    992  1.114     rmind 	 * was not finished.
    993  1.114     rmind 	 */
    994  1.121     rmind 	if (l->l_target_cpu != NULL) {
    995  1.114     rmind 		l->l_target_cpu = tci;
    996  1.114     rmind 		lwp_unlock(l);
    997  1.114     rmind 		return;
    998  1.114     rmind 	}
    999   1.91     rmind 
   1000  1.114     rmind 	/* Nothing to do if trying to migrate to the same CPU */
   1001  1.114     rmind 	if (l->l_cpu == tci) {
   1002   1.91     rmind 		lwp_unlock(l);
   1003   1.91     rmind 		return;
   1004   1.91     rmind 	}
   1005   1.91     rmind 
   1006  1.114     rmind 	KASSERT(l->l_target_cpu == NULL);
   1007  1.114     rmind 	tspc = &tci->ci_schedstate;
   1008  1.121     rmind 	switch (lstat) {
   1009   1.91     rmind 	case LSRUN:
   1010  1.134     rmind 		l->l_target_cpu = tci;
   1011  1.134     rmind 		break;
   1012   1.91     rmind 	case LSIDL:
   1013  1.114     rmind 		l->l_cpu = tci;
   1014  1.114     rmind 		lwp_unlock_to(l, tspc->spc_mutex);
   1015   1.91     rmind 		return;
   1016   1.91     rmind 	case LSSLEEP:
   1017  1.114     rmind 		l->l_cpu = tci;
   1018   1.91     rmind 		break;
   1019   1.91     rmind 	case LSSTOP:
   1020   1.91     rmind 	case LSSUSPENDED:
   1021  1.114     rmind 		l->l_cpu = tci;
   1022  1.114     rmind 		if (l->l_wchan == NULL) {
   1023  1.114     rmind 			lwp_unlock_to(l, tspc->spc_lwplock);
   1024  1.114     rmind 			return;
   1025   1.91     rmind 		}
   1026  1.114     rmind 		break;
   1027   1.91     rmind 	case LSONPROC:
   1028  1.114     rmind 		l->l_target_cpu = tci;
   1029  1.114     rmind 		spc_lock(l->l_cpu);
   1030  1.114     rmind 		cpu_need_resched(l->l_cpu, RESCHED_KPREEMPT);
   1031  1.114     rmind 		spc_unlock(l->l_cpu);
   1032   1.91     rmind 		break;
   1033   1.91     rmind 	}
   1034   1.91     rmind 	lwp_unlock(l);
   1035   1.91     rmind }
   1036   1.91     rmind 
   1037   1.91     rmind /*
   1038   1.94     rmind  * Find the LWP in the process.  Arguments may be zero, in such case,
   1039   1.94     rmind  * the calling process and first LWP in the list will be used.
   1040  1.103        ad  * On success - returns proc locked.
   1041   1.91     rmind  */
   1042   1.91     rmind struct lwp *
   1043   1.91     rmind lwp_find2(pid_t pid, lwpid_t lid)
   1044   1.91     rmind {
   1045   1.91     rmind 	proc_t *p;
   1046   1.91     rmind 	lwp_t *l;
   1047   1.91     rmind 
   1048   1.91     rmind 	/* Find the process */
   1049   1.94     rmind 	p = (pid == 0) ? curlwp->l_proc : p_find(pid, PFIND_UNLOCK_FAIL);
   1050   1.91     rmind 	if (p == NULL)
   1051   1.91     rmind 		return NULL;
   1052  1.103        ad 	mutex_enter(p->p_lock);
   1053   1.94     rmind 	if (pid != 0) {
   1054   1.94     rmind 		/* Case of p_find */
   1055  1.102        ad 		mutex_exit(proc_lock);
   1056   1.94     rmind 	}
   1057   1.91     rmind 
   1058   1.91     rmind 	/* Find the thread */
   1059   1.94     rmind 	l = (lid == 0) ? LIST_FIRST(&p->p_lwps) : lwp_find(p, lid);
   1060  1.103        ad 	if (l == NULL) {
   1061  1.103        ad 		mutex_exit(p->p_lock);
   1062  1.103        ad 	}
   1063   1.91     rmind 
   1064   1.91     rmind 	return l;
   1065   1.91     rmind }
   1066   1.91     rmind 
   1067   1.91     rmind /*
   1068   1.52        ad  * Look up a live LWP within the speicifed process, and return it locked.
   1069   1.52        ad  *
   1070  1.103        ad  * Must be called with p->p_lock held.
   1071   1.52        ad  */
   1072   1.52        ad struct lwp *
   1073   1.52        ad lwp_find(struct proc *p, int id)
   1074   1.52        ad {
   1075   1.52        ad 	struct lwp *l;
   1076   1.52        ad 
   1077  1.103        ad 	KASSERT(mutex_owned(p->p_lock));
   1078   1.52        ad 
   1079   1.52        ad 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1080   1.52        ad 		if (l->l_lid == id)
   1081   1.52        ad 			break;
   1082   1.52        ad 	}
   1083   1.52        ad 
   1084   1.52        ad 	/*
   1085   1.52        ad 	 * No need to lock - all of these conditions will
   1086   1.52        ad 	 * be visible with the process level mutex held.
   1087   1.52        ad 	 */
   1088   1.52        ad 	if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB))
   1089   1.52        ad 		l = NULL;
   1090   1.52        ad 
   1091   1.52        ad 	return l;
   1092   1.52        ad }
   1093   1.52        ad 
   1094   1.52        ad /*
   1095   1.37        ad  * Update an LWP's cached credentials to mirror the process' master copy.
   1096   1.37        ad  *
   1097   1.37        ad  * This happens early in the syscall path, on user trap, and on LWP
   1098   1.37        ad  * creation.  A long-running LWP can also voluntarily choose to update
   1099   1.37        ad  * it's credentials by calling this routine.  This may be called from
   1100   1.37        ad  * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
   1101   1.37        ad  */
   1102   1.37        ad void
   1103   1.37        ad lwp_update_creds(struct lwp *l)
   1104   1.37        ad {
   1105   1.37        ad 	kauth_cred_t oc;
   1106   1.37        ad 	struct proc *p;
   1107   1.37        ad 
   1108   1.37        ad 	p = l->l_proc;
   1109   1.37        ad 	oc = l->l_cred;
   1110   1.37        ad 
   1111  1.103        ad 	mutex_enter(p->p_lock);
   1112   1.37        ad 	kauth_cred_hold(p->p_cred);
   1113   1.37        ad 	l->l_cred = p->p_cred;
   1114   1.98        ad 	l->l_prflag &= ~LPR_CRMOD;
   1115  1.103        ad 	mutex_exit(p->p_lock);
   1116   1.88        ad 	if (oc != NULL)
   1117   1.37        ad 		kauth_cred_free(oc);
   1118   1.52        ad }
   1119   1.52        ad 
   1120   1.52        ad /*
   1121   1.52        ad  * Verify that an LWP is locked, and optionally verify that the lock matches
   1122   1.52        ad  * one we specify.
   1123   1.52        ad  */
   1124   1.52        ad int
   1125   1.52        ad lwp_locked(struct lwp *l, kmutex_t *mtx)
   1126   1.52        ad {
   1127   1.52        ad 	kmutex_t *cur = l->l_mutex;
   1128   1.52        ad 
   1129   1.52        ad 	return mutex_owned(cur) && (mtx == cur || mtx == NULL);
   1130   1.52        ad }
   1131   1.52        ad 
   1132   1.52        ad /*
   1133   1.52        ad  * Lock an LWP.
   1134   1.52        ad  */
   1135  1.119        ad kmutex_t *
   1136   1.52        ad lwp_lock_retry(struct lwp *l, kmutex_t *old)
   1137   1.52        ad {
   1138   1.52        ad 
   1139   1.52        ad 	/*
   1140   1.52        ad 	 * XXXgcc ignoring kmutex_t * volatile on i386
   1141   1.52        ad 	 *
   1142   1.52        ad 	 * gcc version 4.1.2 20061021 prerelease (NetBSD nb1 20061021)
   1143   1.52        ad 	 */
   1144   1.52        ad #if 1
   1145   1.52        ad 	while (l->l_mutex != old) {
   1146   1.52        ad #else
   1147   1.52        ad 	for (;;) {
   1148   1.52        ad #endif
   1149   1.52        ad 		mutex_spin_exit(old);
   1150   1.52        ad 		old = l->l_mutex;
   1151   1.52        ad 		mutex_spin_enter(old);
   1152   1.52        ad 
   1153   1.52        ad 		/*
   1154   1.52        ad 		 * mutex_enter() will have posted a read barrier.  Re-test
   1155   1.52        ad 		 * l->l_mutex.  If it has changed, we need to try again.
   1156   1.52        ad 		 */
   1157   1.52        ad #if 1
   1158   1.52        ad 	}
   1159   1.52        ad #else
   1160   1.52        ad 	} while (__predict_false(l->l_mutex != old));
   1161   1.52        ad #endif
   1162  1.119        ad 
   1163  1.119        ad 	return old;
   1164   1.52        ad }
   1165   1.52        ad 
   1166   1.52        ad /*
   1167   1.52        ad  * Lend a new mutex to an LWP.  The old mutex must be held.
   1168   1.52        ad  */
   1169   1.52        ad void
   1170   1.52        ad lwp_setlock(struct lwp *l, kmutex_t *new)
   1171   1.52        ad {
   1172   1.52        ad 
   1173   1.63        ad 	KASSERT(mutex_owned(l->l_mutex));
   1174   1.52        ad 
   1175  1.107        ad 	membar_exit();
   1176   1.52        ad 	l->l_mutex = new;
   1177   1.52        ad }
   1178   1.52        ad 
   1179   1.52        ad /*
   1180   1.52        ad  * Lend a new mutex to an LWP, and release the old mutex.  The old mutex
   1181   1.52        ad  * must be held.
   1182   1.52        ad  */
   1183   1.52        ad void
   1184   1.52        ad lwp_unlock_to(struct lwp *l, kmutex_t *new)
   1185   1.52        ad {
   1186   1.52        ad 	kmutex_t *old;
   1187   1.52        ad 
   1188   1.63        ad 	KASSERT(mutex_owned(l->l_mutex));
   1189   1.52        ad 
   1190   1.52        ad 	old = l->l_mutex;
   1191  1.107        ad 	membar_exit();
   1192   1.52        ad 	l->l_mutex = new;
   1193   1.52        ad 	mutex_spin_exit(old);
   1194   1.52        ad }
   1195   1.52        ad 
   1196   1.52        ad /*
   1197   1.52        ad  * Acquire a new mutex, and donate it to an LWP.  The LWP must already be
   1198   1.52        ad  * locked.
   1199   1.52        ad  */
   1200   1.52        ad void
   1201   1.52        ad lwp_relock(struct lwp *l, kmutex_t *new)
   1202   1.52        ad {
   1203   1.52        ad 	kmutex_t *old;
   1204   1.52        ad 
   1205   1.63        ad 	KASSERT(mutex_owned(l->l_mutex));
   1206   1.52        ad 
   1207   1.52        ad 	old = l->l_mutex;
   1208   1.52        ad 	if (old != new) {
   1209   1.52        ad 		mutex_spin_enter(new);
   1210   1.52        ad 		l->l_mutex = new;
   1211   1.52        ad 		mutex_spin_exit(old);
   1212   1.52        ad 	}
   1213   1.52        ad }
   1214   1.52        ad 
   1215   1.60      yamt int
   1216   1.60      yamt lwp_trylock(struct lwp *l)
   1217   1.60      yamt {
   1218   1.60      yamt 	kmutex_t *old;
   1219   1.60      yamt 
   1220   1.60      yamt 	for (;;) {
   1221   1.60      yamt 		if (!mutex_tryenter(old = l->l_mutex))
   1222   1.60      yamt 			return 0;
   1223   1.60      yamt 		if (__predict_true(l->l_mutex == old))
   1224   1.60      yamt 			return 1;
   1225   1.60      yamt 		mutex_spin_exit(old);
   1226   1.60      yamt 	}
   1227   1.60      yamt }
   1228   1.60      yamt 
   1229  1.134     rmind void
   1230   1.96        ad lwp_unsleep(lwp_t *l, bool cleanup)
   1231   1.96        ad {
   1232   1.96        ad 
   1233   1.96        ad 	KASSERT(mutex_owned(l->l_mutex));
   1234  1.134     rmind 	(*l->l_syncobj->sobj_unsleep)(l, cleanup);
   1235   1.96        ad }
   1236   1.96        ad 
   1237   1.96        ad 
   1238   1.52        ad /*
   1239   1.56     pavel  * Handle exceptions for mi_userret().  Called if a member of LW_USERRET is
   1240   1.52        ad  * set.
   1241   1.52        ad  */
   1242   1.52        ad void
   1243   1.52        ad lwp_userret(struct lwp *l)
   1244   1.52        ad {
   1245   1.52        ad 	struct proc *p;
   1246   1.54        ad 	void (*hook)(void);
   1247   1.52        ad 	int sig;
   1248   1.52        ad 
   1249  1.114     rmind 	KASSERT(l == curlwp);
   1250  1.114     rmind 	KASSERT(l->l_stat == LSONPROC);
   1251   1.52        ad 	p = l->l_proc;
   1252   1.52        ad 
   1253   1.75        ad #ifndef __HAVE_FAST_SOFTINTS
   1254   1.75        ad 	/* Run pending soft interrupts. */
   1255   1.75        ad 	if (l->l_cpu->ci_data.cpu_softints != 0)
   1256   1.75        ad 		softint_overlay();
   1257   1.75        ad #endif
   1258   1.75        ad 
   1259  1.125        ad #ifdef KERN_SA
   1260  1.125        ad 	/* Generate UNBLOCKED upcall if needed */
   1261  1.125        ad 	if (l->l_flag & LW_SA_BLOCKING) {
   1262  1.125        ad 		sa_unblock_userret(l);
   1263  1.125        ad 		/* NOTREACHED */
   1264  1.125        ad 	}
   1265  1.125        ad #endif
   1266  1.125        ad 
   1267   1.52        ad 	/*
   1268   1.52        ad 	 * It should be safe to do this read unlocked on a multiprocessor
   1269   1.52        ad 	 * system..
   1270  1.126  wrstuden 	 *
   1271  1.126  wrstuden 	 * LW_SA_UPCALL will be handled after the while() loop, so don't
   1272  1.126  wrstuden 	 * consider it now.
   1273   1.52        ad 	 */
   1274  1.126  wrstuden 	while ((l->l_flag & (LW_USERRET & ~(LW_SA_UPCALL))) != 0) {
   1275   1.52        ad 		/*
   1276   1.52        ad 		 * Process pending signals first, unless the process
   1277   1.61        ad 		 * is dumping core or exiting, where we will instead
   1278  1.101     rmind 		 * enter the LW_WSUSPEND case below.
   1279   1.52        ad 		 */
   1280   1.61        ad 		if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) ==
   1281   1.61        ad 		    LW_PENDSIG) {
   1282  1.103        ad 			mutex_enter(p->p_lock);
   1283   1.52        ad 			while ((sig = issignal(l)) != 0)
   1284   1.52        ad 				postsig(sig);
   1285  1.103        ad 			mutex_exit(p->p_lock);
   1286   1.52        ad 		}
   1287   1.52        ad 
   1288   1.52        ad 		/*
   1289   1.52        ad 		 * Core-dump or suspend pending.
   1290   1.52        ad 		 *
   1291   1.52        ad 		 * In case of core dump, suspend ourselves, so that the
   1292   1.52        ad 		 * kernel stack and therefore the userland registers saved
   1293   1.52        ad 		 * in the trapframe are around for coredump() to write them
   1294   1.52        ad 		 * out.  We issue a wakeup on p->p_lwpcv so that sigexit()
   1295   1.52        ad 		 * will write the core file out once all other LWPs are
   1296   1.52        ad 		 * suspended.
   1297   1.52        ad 		 */
   1298   1.56     pavel 		if ((l->l_flag & LW_WSUSPEND) != 0) {
   1299  1.103        ad 			mutex_enter(p->p_lock);
   1300   1.52        ad 			p->p_nrlwps--;
   1301   1.52        ad 			cv_broadcast(&p->p_lwpcv);
   1302   1.52        ad 			lwp_lock(l);
   1303   1.52        ad 			l->l_stat = LSSUSPENDED;
   1304  1.104        ad 			lwp_unlock(l);
   1305  1.103        ad 			mutex_exit(p->p_lock);
   1306  1.104        ad 			lwp_lock(l);
   1307   1.64      yamt 			mi_switch(l);
   1308   1.52        ad 		}
   1309   1.52        ad 
   1310   1.52        ad 		/* Process is exiting. */
   1311   1.56     pavel 		if ((l->l_flag & LW_WEXIT) != 0) {
   1312   1.52        ad 			lwp_exit(l);
   1313   1.52        ad 			KASSERT(0);
   1314   1.52        ad 			/* NOTREACHED */
   1315   1.52        ad 		}
   1316   1.54        ad 
   1317   1.54        ad 		/* Call userret hook; used by Linux emulation. */
   1318   1.56     pavel 		if ((l->l_flag & LW_WUSERRET) != 0) {
   1319   1.54        ad 			lwp_lock(l);
   1320   1.56     pavel 			l->l_flag &= ~LW_WUSERRET;
   1321   1.54        ad 			lwp_unlock(l);
   1322   1.54        ad 			hook = p->p_userret;
   1323   1.54        ad 			p->p_userret = NULL;
   1324   1.54        ad 			(*hook)();
   1325   1.54        ad 		}
   1326   1.52        ad 	}
   1327  1.124  wrstuden 
   1328  1.124  wrstuden #ifdef KERN_SA
   1329  1.124  wrstuden 	/*
   1330  1.124  wrstuden 	 * Timer events are handled specially.  We only try once to deliver
   1331  1.124  wrstuden 	 * pending timer upcalls; if if fails, we can try again on the next
   1332  1.124  wrstuden 	 * loop around.  If we need to re-enter lwp_userret(), MD code will
   1333  1.124  wrstuden 	 * bounce us back here through the trap path after we return.
   1334  1.124  wrstuden 	 */
   1335  1.124  wrstuden 	if (p->p_timerpend)
   1336  1.124  wrstuden 		timerupcall(l);
   1337  1.125        ad 	if (l->l_flag & LW_SA_UPCALL)
   1338  1.125        ad 		sa_upcall_userret(l);
   1339  1.124  wrstuden #endif /* KERN_SA */
   1340   1.52        ad }
   1341   1.52        ad 
   1342   1.52        ad /*
   1343   1.52        ad  * Force an LWP to enter the kernel, to take a trip through lwp_userret().
   1344   1.52        ad  */
   1345   1.52        ad void
   1346   1.52        ad lwp_need_userret(struct lwp *l)
   1347   1.52        ad {
   1348   1.63        ad 	KASSERT(lwp_locked(l, NULL));
   1349   1.52        ad 
   1350   1.52        ad 	/*
   1351   1.52        ad 	 * Since the tests in lwp_userret() are done unlocked, make sure
   1352   1.52        ad 	 * that the condition will be seen before forcing the LWP to enter
   1353   1.52        ad 	 * kernel mode.
   1354   1.52        ad 	 */
   1355   1.81        ad 	membar_producer();
   1356   1.52        ad 	cpu_signotify(l);
   1357   1.52        ad }
   1358   1.52        ad 
   1359   1.52        ad /*
   1360   1.52        ad  * Add one reference to an LWP.  This will prevent the LWP from
   1361   1.52        ad  * exiting, thus keep the lwp structure and PCB around to inspect.
   1362   1.52        ad  */
   1363   1.52        ad void
   1364   1.52        ad lwp_addref(struct lwp *l)
   1365   1.52        ad {
   1366   1.52        ad 
   1367  1.103        ad 	KASSERT(mutex_owned(l->l_proc->p_lock));
   1368   1.52        ad 	KASSERT(l->l_stat != LSZOMB);
   1369   1.52        ad 	KASSERT(l->l_refcnt != 0);
   1370   1.52        ad 
   1371   1.52        ad 	l->l_refcnt++;
   1372   1.52        ad }
   1373   1.52        ad 
   1374   1.52        ad /*
   1375   1.52        ad  * Remove one reference to an LWP.  If this is the last reference,
   1376   1.52        ad  * then we must finalize the LWP's death.
   1377   1.52        ad  */
   1378   1.52        ad void
   1379   1.52        ad lwp_delref(struct lwp *l)
   1380   1.52        ad {
   1381   1.52        ad 	struct proc *p = l->l_proc;
   1382   1.52        ad 
   1383  1.103        ad 	mutex_enter(p->p_lock);
   1384   1.72        ad 	KASSERT(l->l_stat != LSZOMB);
   1385   1.72        ad 	KASSERT(l->l_refcnt > 0);
   1386   1.52        ad 	if (--l->l_refcnt == 0)
   1387   1.76        ad 		cv_broadcast(&p->p_lwpcv);
   1388  1.103        ad 	mutex_exit(p->p_lock);
   1389   1.52        ad }
   1390   1.52        ad 
   1391   1.52        ad /*
   1392   1.52        ad  * Drain all references to the current LWP.
   1393   1.52        ad  */
   1394   1.52        ad void
   1395   1.52        ad lwp_drainrefs(struct lwp *l)
   1396   1.52        ad {
   1397   1.52        ad 	struct proc *p = l->l_proc;
   1398   1.52        ad 
   1399  1.103        ad 	KASSERT(mutex_owned(p->p_lock));
   1400   1.52        ad 	KASSERT(l->l_refcnt != 0);
   1401   1.52        ad 
   1402   1.52        ad 	l->l_refcnt--;
   1403   1.52        ad 	while (l->l_refcnt != 0)
   1404  1.103        ad 		cv_wait(&p->p_lwpcv, p->p_lock);
   1405   1.37        ad }
   1406   1.41   thorpej 
   1407   1.41   thorpej /*
   1408  1.127        ad  * Return true if the specified LWP is 'alive'.  Only p->p_lock need
   1409  1.127        ad  * be held.
   1410  1.127        ad  */
   1411  1.127        ad bool
   1412  1.127        ad lwp_alive(lwp_t *l)
   1413  1.127        ad {
   1414  1.127        ad 
   1415  1.127        ad 	KASSERT(mutex_owned(l->l_proc->p_lock));
   1416  1.127        ad 
   1417  1.127        ad 	switch (l->l_stat) {
   1418  1.127        ad 	case LSSLEEP:
   1419  1.127        ad 	case LSRUN:
   1420  1.127        ad 	case LSONPROC:
   1421  1.127        ad 	case LSSTOP:
   1422  1.127        ad 	case LSSUSPENDED:
   1423  1.127        ad 		return true;
   1424  1.127        ad 	default:
   1425  1.127        ad 		return false;
   1426  1.127        ad 	}
   1427  1.127        ad }
   1428  1.127        ad 
   1429  1.127        ad /*
   1430  1.127        ad  * Return first live LWP in the process.
   1431  1.127        ad  */
   1432  1.127        ad lwp_t *
   1433  1.127        ad lwp_find_first(proc_t *p)
   1434  1.127        ad {
   1435  1.127        ad 	lwp_t *l;
   1436  1.127        ad 
   1437  1.127        ad 	KASSERT(mutex_owned(p->p_lock));
   1438  1.127        ad 
   1439  1.127        ad 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1440  1.127        ad 		if (lwp_alive(l)) {
   1441  1.127        ad 			return l;
   1442  1.127        ad 		}
   1443  1.127        ad 	}
   1444  1.127        ad 
   1445  1.127        ad 	return NULL;
   1446  1.127        ad }
   1447  1.127        ad 
   1448  1.127        ad /*
   1449   1.41   thorpej  * lwp_specific_key_create --
   1450   1.41   thorpej  *	Create a key for subsystem lwp-specific data.
   1451   1.41   thorpej  */
   1452   1.41   thorpej int
   1453   1.41   thorpej lwp_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor)
   1454   1.41   thorpej {
   1455   1.41   thorpej 
   1456   1.45   thorpej 	return (specificdata_key_create(lwp_specificdata_domain, keyp, dtor));
   1457   1.41   thorpej }
   1458   1.41   thorpej 
   1459   1.41   thorpej /*
   1460   1.41   thorpej  * lwp_specific_key_delete --
   1461   1.41   thorpej  *	Delete a key for subsystem lwp-specific data.
   1462   1.41   thorpej  */
   1463   1.41   thorpej void
   1464   1.41   thorpej lwp_specific_key_delete(specificdata_key_t key)
   1465   1.41   thorpej {
   1466   1.41   thorpej 
   1467   1.41   thorpej 	specificdata_key_delete(lwp_specificdata_domain, key);
   1468   1.41   thorpej }
   1469   1.41   thorpej 
   1470   1.45   thorpej /*
   1471   1.45   thorpej  * lwp_initspecific --
   1472   1.45   thorpej  *	Initialize an LWP's specificdata container.
   1473   1.45   thorpej  */
   1474   1.42  christos void
   1475   1.42  christos lwp_initspecific(struct lwp *l)
   1476   1.42  christos {
   1477   1.42  christos 	int error;
   1478   1.45   thorpej 
   1479   1.42  christos 	error = specificdata_init(lwp_specificdata_domain, &l->l_specdataref);
   1480   1.42  christos 	KASSERT(error == 0);
   1481   1.42  christos }
   1482   1.42  christos 
   1483   1.41   thorpej /*
   1484   1.45   thorpej  * lwp_finispecific --
   1485   1.45   thorpej  *	Finalize an LWP's specificdata container.
   1486   1.45   thorpej  */
   1487   1.45   thorpej void
   1488   1.45   thorpej lwp_finispecific(struct lwp *l)
   1489   1.45   thorpej {
   1490   1.45   thorpej 
   1491   1.45   thorpej 	specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
   1492   1.45   thorpej }
   1493   1.45   thorpej 
   1494   1.45   thorpej /*
   1495   1.41   thorpej  * lwp_getspecific --
   1496   1.41   thorpej  *	Return lwp-specific data corresponding to the specified key.
   1497   1.41   thorpej  *
   1498   1.41   thorpej  *	Note: LWP specific data is NOT INTERLOCKED.  An LWP should access
   1499   1.41   thorpej  *	only its OWN SPECIFIC DATA.  If it is necessary to access another
   1500   1.41   thorpej  *	LWP's specifc data, care must be taken to ensure that doing so
   1501   1.41   thorpej  *	would not cause internal data structure inconsistency (i.e. caller
   1502   1.41   thorpej  *	can guarantee that the target LWP is not inside an lwp_getspecific()
   1503   1.41   thorpej  *	or lwp_setspecific() call).
   1504   1.41   thorpej  */
   1505   1.41   thorpej void *
   1506   1.44   thorpej lwp_getspecific(specificdata_key_t key)
   1507   1.41   thorpej {
   1508   1.41   thorpej 
   1509   1.41   thorpej 	return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
   1510   1.44   thorpej 						  &curlwp->l_specdataref, key));
   1511   1.41   thorpej }
   1512   1.41   thorpej 
   1513   1.47   hannken void *
   1514   1.47   hannken _lwp_getspecific_by_lwp(struct lwp *l, specificdata_key_t key)
   1515   1.47   hannken {
   1516   1.47   hannken 
   1517   1.47   hannken 	return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
   1518   1.47   hannken 						  &l->l_specdataref, key));
   1519   1.47   hannken }
   1520   1.47   hannken 
   1521   1.41   thorpej /*
   1522   1.41   thorpej  * lwp_setspecific --
   1523   1.41   thorpej  *	Set lwp-specific data corresponding to the specified key.
   1524   1.41   thorpej  */
   1525   1.41   thorpej void
   1526   1.45   thorpej lwp_setspecific(specificdata_key_t key, void *data)
   1527   1.41   thorpej {
   1528   1.41   thorpej 
   1529   1.41   thorpej 	specificdata_setspecific(lwp_specificdata_domain,
   1530   1.44   thorpej 				 &curlwp->l_specdataref, key, data);
   1531   1.41   thorpej }
   1532   1.78        ad 
   1533   1.78        ad /*
   1534   1.78        ad  * Allocate a new lwpctl structure for a user LWP.
   1535   1.78        ad  */
   1536   1.78        ad int
   1537   1.78        ad lwp_ctl_alloc(vaddr_t *uaddr)
   1538   1.78        ad {
   1539   1.78        ad 	lcproc_t *lp;
   1540   1.78        ad 	u_int bit, i, offset;
   1541   1.78        ad 	struct uvm_object *uao;
   1542   1.78        ad 	int error;
   1543   1.78        ad 	lcpage_t *lcp;
   1544   1.78        ad 	proc_t *p;
   1545   1.78        ad 	lwp_t *l;
   1546   1.78        ad 
   1547   1.78        ad 	l = curlwp;
   1548   1.78        ad 	p = l->l_proc;
   1549   1.78        ad 
   1550   1.81        ad 	if (l->l_lcpage != NULL) {
   1551   1.81        ad 		lcp = l->l_lcpage;
   1552   1.81        ad 		*uaddr = lcp->lcp_uaddr + (vaddr_t)l->l_lwpctl - lcp->lcp_kaddr;
   1553   1.78        ad 		return (EINVAL);
   1554   1.81        ad 	}
   1555   1.78        ad 
   1556   1.78        ad 	/* First time around, allocate header structure for the process. */
   1557   1.78        ad 	if ((lp = p->p_lwpctl) == NULL) {
   1558   1.78        ad 		lp = kmem_alloc(sizeof(*lp), KM_SLEEP);
   1559   1.78        ad 		mutex_init(&lp->lp_lock, MUTEX_DEFAULT, IPL_NONE);
   1560   1.78        ad 		lp->lp_uao = NULL;
   1561   1.78        ad 		TAILQ_INIT(&lp->lp_pages);
   1562  1.103        ad 		mutex_enter(p->p_lock);
   1563   1.78        ad 		if (p->p_lwpctl == NULL) {
   1564   1.78        ad 			p->p_lwpctl = lp;
   1565  1.103        ad 			mutex_exit(p->p_lock);
   1566   1.78        ad 		} else {
   1567  1.103        ad 			mutex_exit(p->p_lock);
   1568   1.78        ad 			mutex_destroy(&lp->lp_lock);
   1569   1.78        ad 			kmem_free(lp, sizeof(*lp));
   1570   1.78        ad 			lp = p->p_lwpctl;
   1571   1.78        ad 		}
   1572   1.78        ad 	}
   1573   1.78        ad 
   1574   1.78        ad  	/*
   1575   1.78        ad  	 * Set up an anonymous memory region to hold the shared pages.
   1576   1.78        ad  	 * Map them into the process' address space.  The user vmspace
   1577   1.78        ad  	 * gets the first reference on the UAO.
   1578   1.78        ad  	 */
   1579   1.78        ad 	mutex_enter(&lp->lp_lock);
   1580   1.78        ad 	if (lp->lp_uao == NULL) {
   1581   1.78        ad 		lp->lp_uao = uao_create(LWPCTL_UAREA_SZ, 0);
   1582   1.78        ad 		lp->lp_cur = 0;
   1583   1.78        ad 		lp->lp_max = LWPCTL_UAREA_SZ;
   1584   1.78        ad 		lp->lp_uva = p->p_emul->e_vm_default_addr(p,
   1585   1.78        ad 		     (vaddr_t)p->p_vmspace->vm_daddr, LWPCTL_UAREA_SZ);
   1586   1.78        ad 		error = uvm_map(&p->p_vmspace->vm_map, &lp->lp_uva,
   1587   1.78        ad 		    LWPCTL_UAREA_SZ, lp->lp_uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW,
   1588   1.78        ad 		    UVM_PROT_RW, UVM_INH_NONE, UVM_ADV_NORMAL, 0));
   1589   1.78        ad 		if (error != 0) {
   1590   1.78        ad 			uao_detach(lp->lp_uao);
   1591   1.78        ad 			lp->lp_uao = NULL;
   1592   1.78        ad 			mutex_exit(&lp->lp_lock);
   1593   1.78        ad 			return error;
   1594   1.78        ad 		}
   1595   1.78        ad 	}
   1596   1.78        ad 
   1597   1.78        ad 	/* Get a free block and allocate for this LWP. */
   1598   1.78        ad 	TAILQ_FOREACH(lcp, &lp->lp_pages, lcp_chain) {
   1599   1.78        ad 		if (lcp->lcp_nfree != 0)
   1600   1.78        ad 			break;
   1601   1.78        ad 	}
   1602   1.78        ad 	if (lcp == NULL) {
   1603   1.78        ad 		/* Nothing available - try to set up a free page. */
   1604   1.78        ad 		if (lp->lp_cur == lp->lp_max) {
   1605   1.78        ad 			mutex_exit(&lp->lp_lock);
   1606   1.78        ad 			return ENOMEM;
   1607   1.78        ad 		}
   1608   1.78        ad 		lcp = kmem_alloc(LWPCTL_LCPAGE_SZ, KM_SLEEP);
   1609   1.79      yamt 		if (lcp == NULL) {
   1610   1.79      yamt 			mutex_exit(&lp->lp_lock);
   1611   1.78        ad 			return ENOMEM;
   1612   1.79      yamt 		}
   1613   1.78        ad 		/*
   1614   1.78        ad 		 * Wire the next page down in kernel space.  Since this
   1615   1.78        ad 		 * is a new mapping, we must add a reference.
   1616   1.78        ad 		 */
   1617   1.78        ad 		uao = lp->lp_uao;
   1618   1.78        ad 		(*uao->pgops->pgo_reference)(uao);
   1619   1.99        ad 		lcp->lcp_kaddr = vm_map_min(kernel_map);
   1620   1.78        ad 		error = uvm_map(kernel_map, &lcp->lcp_kaddr, PAGE_SIZE,
   1621   1.78        ad 		    uao, lp->lp_cur, PAGE_SIZE,
   1622   1.78        ad 		    UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
   1623   1.78        ad 		    UVM_INH_NONE, UVM_ADV_RANDOM, 0));
   1624   1.78        ad 		if (error != 0) {
   1625   1.78        ad 			mutex_exit(&lp->lp_lock);
   1626   1.78        ad 			kmem_free(lcp, LWPCTL_LCPAGE_SZ);
   1627   1.78        ad 			(*uao->pgops->pgo_detach)(uao);
   1628   1.78        ad 			return error;
   1629   1.78        ad 		}
   1630   1.89      yamt 		error = uvm_map_pageable(kernel_map, lcp->lcp_kaddr,
   1631   1.89      yamt 		    lcp->lcp_kaddr + PAGE_SIZE, FALSE, 0);
   1632   1.89      yamt 		if (error != 0) {
   1633   1.89      yamt 			mutex_exit(&lp->lp_lock);
   1634   1.89      yamt 			uvm_unmap(kernel_map, lcp->lcp_kaddr,
   1635   1.89      yamt 			    lcp->lcp_kaddr + PAGE_SIZE);
   1636   1.89      yamt 			kmem_free(lcp, LWPCTL_LCPAGE_SZ);
   1637   1.89      yamt 			return error;
   1638   1.89      yamt 		}
   1639   1.78        ad 		/* Prepare the page descriptor and link into the list. */
   1640   1.78        ad 		lcp->lcp_uaddr = lp->lp_uva + lp->lp_cur;
   1641   1.78        ad 		lp->lp_cur += PAGE_SIZE;
   1642   1.78        ad 		lcp->lcp_nfree = LWPCTL_PER_PAGE;
   1643   1.78        ad 		lcp->lcp_rotor = 0;
   1644   1.78        ad 		memset(lcp->lcp_bitmap, 0xff, LWPCTL_BITMAP_SZ);
   1645   1.78        ad 		TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
   1646   1.78        ad 	}
   1647   1.78        ad 	for (i = lcp->lcp_rotor; lcp->lcp_bitmap[i] == 0;) {
   1648   1.78        ad 		if (++i >= LWPCTL_BITMAP_ENTRIES)
   1649   1.78        ad 			i = 0;
   1650   1.78        ad 	}
   1651   1.78        ad 	bit = ffs(lcp->lcp_bitmap[i]) - 1;
   1652   1.78        ad 	lcp->lcp_bitmap[i] ^= (1 << bit);
   1653   1.78        ad 	lcp->lcp_rotor = i;
   1654   1.78        ad 	lcp->lcp_nfree--;
   1655   1.78        ad 	l->l_lcpage = lcp;
   1656   1.78        ad 	offset = (i << 5) + bit;
   1657   1.78        ad 	l->l_lwpctl = (lwpctl_t *)lcp->lcp_kaddr + offset;
   1658   1.78        ad 	*uaddr = lcp->lcp_uaddr + offset * sizeof(lwpctl_t);
   1659   1.78        ad 	mutex_exit(&lp->lp_lock);
   1660   1.78        ad 
   1661  1.107        ad 	KPREEMPT_DISABLE(l);
   1662  1.111        ad 	l->l_lwpctl->lc_curcpu = (int)curcpu()->ci_data.cpu_index;
   1663  1.107        ad 	KPREEMPT_ENABLE(l);
   1664   1.78        ad 
   1665   1.78        ad 	return 0;
   1666   1.78        ad }
   1667   1.78        ad 
   1668   1.78        ad /*
   1669   1.78        ad  * Free an lwpctl structure back to the per-process list.
   1670   1.78        ad  */
   1671   1.78        ad void
   1672   1.78        ad lwp_ctl_free(lwp_t *l)
   1673   1.78        ad {
   1674   1.78        ad 	lcproc_t *lp;
   1675   1.78        ad 	lcpage_t *lcp;
   1676   1.78        ad 	u_int map, offset;
   1677   1.78        ad 
   1678   1.78        ad 	lp = l->l_proc->p_lwpctl;
   1679   1.78        ad 	KASSERT(lp != NULL);
   1680   1.78        ad 
   1681   1.78        ad 	lcp = l->l_lcpage;
   1682   1.78        ad 	offset = (u_int)((lwpctl_t *)l->l_lwpctl - (lwpctl_t *)lcp->lcp_kaddr);
   1683   1.78        ad 	KASSERT(offset < LWPCTL_PER_PAGE);
   1684   1.78        ad 
   1685   1.78        ad 	mutex_enter(&lp->lp_lock);
   1686   1.78        ad 	lcp->lcp_nfree++;
   1687   1.78        ad 	map = offset >> 5;
   1688   1.78        ad 	lcp->lcp_bitmap[map] |= (1 << (offset & 31));
   1689   1.78        ad 	if (lcp->lcp_bitmap[lcp->lcp_rotor] == 0)
   1690   1.78        ad 		lcp->lcp_rotor = map;
   1691   1.78        ad 	if (TAILQ_FIRST(&lp->lp_pages)->lcp_nfree == 0) {
   1692   1.78        ad 		TAILQ_REMOVE(&lp->lp_pages, lcp, lcp_chain);
   1693   1.78        ad 		TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
   1694   1.78        ad 	}
   1695   1.78        ad 	mutex_exit(&lp->lp_lock);
   1696   1.78        ad }
   1697   1.78        ad 
   1698   1.78        ad /*
   1699   1.78        ad  * Process is exiting; tear down lwpctl state.  This can only be safely
   1700   1.78        ad  * called by the last LWP in the process.
   1701   1.78        ad  */
   1702   1.78        ad void
   1703   1.78        ad lwp_ctl_exit(void)
   1704   1.78        ad {
   1705   1.78        ad 	lcpage_t *lcp, *next;
   1706   1.78        ad 	lcproc_t *lp;
   1707   1.78        ad 	proc_t *p;
   1708   1.78        ad 	lwp_t *l;
   1709   1.78        ad 
   1710   1.78        ad 	l = curlwp;
   1711   1.78        ad 	l->l_lwpctl = NULL;
   1712   1.95        ad 	l->l_lcpage = NULL;
   1713   1.78        ad 	p = l->l_proc;
   1714   1.78        ad 	lp = p->p_lwpctl;
   1715   1.78        ad 
   1716   1.78        ad 	KASSERT(lp != NULL);
   1717   1.78        ad 	KASSERT(p->p_nlwps == 1);
   1718   1.78        ad 
   1719   1.78        ad 	for (lcp = TAILQ_FIRST(&lp->lp_pages); lcp != NULL; lcp = next) {
   1720   1.78        ad 		next = TAILQ_NEXT(lcp, lcp_chain);
   1721   1.78        ad 		uvm_unmap(kernel_map, lcp->lcp_kaddr,
   1722   1.78        ad 		    lcp->lcp_kaddr + PAGE_SIZE);
   1723   1.78        ad 		kmem_free(lcp, LWPCTL_LCPAGE_SZ);
   1724   1.78        ad 	}
   1725   1.78        ad 
   1726   1.78        ad 	if (lp->lp_uao != NULL) {
   1727   1.78        ad 		uvm_unmap(&p->p_vmspace->vm_map, lp->lp_uva,
   1728   1.78        ad 		    lp->lp_uva + LWPCTL_UAREA_SZ);
   1729   1.78        ad 	}
   1730   1.78        ad 
   1731   1.78        ad 	mutex_destroy(&lp->lp_lock);
   1732   1.78        ad 	kmem_free(lp, sizeof(*lp));
   1733   1.78        ad 	p->p_lwpctl = NULL;
   1734   1.78        ad }
   1735   1.84      yamt 
   1736  1.130        ad /*
   1737  1.130        ad  * Return the current LWP's "preemption counter".  Used to detect
   1738  1.130        ad  * preemption across operations that can tolerate preemption without
   1739  1.130        ad  * crashing, but which may generate incorrect results if preempted.
   1740  1.130        ad  */
   1741  1.130        ad uint64_t
   1742  1.130        ad lwp_pctr(void)
   1743  1.130        ad {
   1744  1.130        ad 
   1745  1.130        ad 	return curlwp->l_ncsw;
   1746  1.130        ad }
   1747  1.130        ad 
   1748   1.84      yamt #if defined(DDB)
   1749   1.84      yamt void
   1750   1.84      yamt lwp_whatis(uintptr_t addr, void (*pr)(const char *, ...))
   1751   1.84      yamt {
   1752   1.84      yamt 	lwp_t *l;
   1753   1.84      yamt 
   1754   1.84      yamt 	LIST_FOREACH(l, &alllwp, l_list) {
   1755   1.84      yamt 		uintptr_t stack = (uintptr_t)KSTACK_LOWEST_ADDR(l);
   1756   1.84      yamt 
   1757   1.84      yamt 		if (addr < stack || stack + KSTACK_SIZE <= addr) {
   1758   1.84      yamt 			continue;
   1759   1.84      yamt 		}
   1760   1.84      yamt 		(*pr)("%p is %p+%zu, LWP %p's stack\n",
   1761   1.84      yamt 		    (void *)addr, (void *)stack,
   1762   1.84      yamt 		    (size_t)(addr - stack), l);
   1763   1.84      yamt 	}
   1764   1.84      yamt }
   1765   1.84      yamt #endif /* defined(DDB) */
   1766