Home | History | Annotate | Line # | Download | only in kern
kern_lwp.c revision 1.164.2.3
      1  1.164.2.3      yamt /*	$NetBSD: kern_lwp.c,v 1.164.2.3 2013/01/23 00:06:21 yamt Exp $	*/
      2        1.2   thorpej 
      3        1.2   thorpej /*-
      4      1.127        ad  * Copyright (c) 2001, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
      5        1.2   thorpej  * All rights reserved.
      6        1.2   thorpej  *
      7        1.2   thorpej  * This code is derived from software contributed to The NetBSD Foundation
      8       1.52        ad  * by Nathan J. Williams, and Andrew Doran.
      9        1.2   thorpej  *
     10        1.2   thorpej  * Redistribution and use in source and binary forms, with or without
     11        1.2   thorpej  * modification, are permitted provided that the following conditions
     12        1.2   thorpej  * are met:
     13        1.2   thorpej  * 1. Redistributions of source code must retain the above copyright
     14        1.2   thorpej  *    notice, this list of conditions and the following disclaimer.
     15        1.2   thorpej  * 2. Redistributions in binary form must reproduce the above copyright
     16        1.2   thorpej  *    notice, this list of conditions and the following disclaimer in the
     17        1.2   thorpej  *    documentation and/or other materials provided with the distribution.
     18        1.2   thorpej  *
     19        1.2   thorpej  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20        1.2   thorpej  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21        1.2   thorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22        1.2   thorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23        1.2   thorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24        1.2   thorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25        1.2   thorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26        1.2   thorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27        1.2   thorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28        1.2   thorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29        1.2   thorpej  * POSSIBILITY OF SUCH DAMAGE.
     30        1.2   thorpej  */
     31        1.9     lukem 
     32       1.52        ad /*
     33       1.52        ad  * Overview
     34       1.52        ad  *
     35       1.66        ad  *	Lightweight processes (LWPs) are the basic unit or thread of
     36       1.52        ad  *	execution within the kernel.  The core state of an LWP is described
     37       1.66        ad  *	by "struct lwp", also known as lwp_t.
     38       1.52        ad  *
     39       1.52        ad  *	Each LWP is contained within a process (described by "struct proc"),
     40       1.52        ad  *	Every process contains at least one LWP, but may contain more.  The
     41       1.52        ad  *	process describes attributes shared among all of its LWPs such as a
     42       1.52        ad  *	private address space, global execution state (stopped, active,
     43       1.52        ad  *	zombie, ...), signal disposition and so on.  On a multiprocessor
     44       1.66        ad  *	machine, multiple LWPs be executing concurrently in the kernel.
     45       1.52        ad  *
     46       1.52        ad  * Execution states
     47       1.52        ad  *
     48       1.52        ad  *	At any given time, an LWP has overall state that is described by
     49       1.52        ad  *	lwp::l_stat.  The states are broken into two sets below.  The first
     50       1.52        ad  *	set is guaranteed to represent the absolute, current state of the
     51       1.52        ad  *	LWP:
     52      1.101     rmind  *
     53      1.101     rmind  *	LSONPROC
     54      1.101     rmind  *
     55      1.101     rmind  *		On processor: the LWP is executing on a CPU, either in the
     56      1.101     rmind  *		kernel or in user space.
     57      1.101     rmind  *
     58      1.101     rmind  *	LSRUN
     59      1.101     rmind  *
     60      1.101     rmind  *		Runnable: the LWP is parked on a run queue, and may soon be
     61      1.101     rmind  *		chosen to run by an idle processor, or by a processor that
     62      1.101     rmind  *		has been asked to preempt a currently runnning but lower
     63      1.134     rmind  *		priority LWP.
     64      1.101     rmind  *
     65      1.101     rmind  *	LSIDL
     66      1.101     rmind  *
     67      1.101     rmind  *		Idle: the LWP has been created but has not yet executed,
     68       1.66        ad  *		or it has ceased executing a unit of work and is waiting
     69       1.66        ad  *		to be started again.
     70      1.101     rmind  *
     71      1.101     rmind  *	LSSUSPENDED:
     72      1.101     rmind  *
     73      1.101     rmind  *		Suspended: the LWP has had its execution suspended by
     74       1.52        ad  *		another LWP in the same process using the _lwp_suspend()
     75       1.52        ad  *		system call.  User-level LWPs also enter the suspended
     76       1.52        ad  *		state when the system is shutting down.
     77       1.52        ad  *
     78       1.52        ad  *	The second set represent a "statement of intent" on behalf of the
     79       1.52        ad  *	LWP.  The LWP may in fact be executing on a processor, may be
     80       1.66        ad  *	sleeping or idle. It is expected to take the necessary action to
     81      1.101     rmind  *	stop executing or become "running" again within a short timeframe.
     82      1.115        ad  *	The LP_RUNNING flag in lwp::l_pflag indicates that an LWP is running.
     83      1.101     rmind  *	Importantly, it indicates that its state is tied to a CPU.
     84      1.101     rmind  *
     85      1.101     rmind  *	LSZOMB:
     86      1.101     rmind  *
     87      1.101     rmind  *		Dead or dying: the LWP has released most of its resources
     88      1.129        ad  *		and is about to switch away into oblivion, or has already
     89       1.66        ad  *		switched away.  When it switches away, its few remaining
     90       1.66        ad  *		resources can be collected.
     91      1.101     rmind  *
     92      1.101     rmind  *	LSSLEEP:
     93      1.101     rmind  *
     94      1.101     rmind  *		Sleeping: the LWP has entered itself onto a sleep queue, and
     95      1.101     rmind  *		has switched away or will switch away shortly to allow other
     96       1.66        ad  *		LWPs to run on the CPU.
     97      1.101     rmind  *
     98      1.101     rmind  *	LSSTOP:
     99      1.101     rmind  *
    100      1.101     rmind  *		Stopped: the LWP has been stopped as a result of a job
    101      1.101     rmind  *		control signal, or as a result of the ptrace() interface.
    102      1.101     rmind  *
    103      1.101     rmind  *		Stopped LWPs may run briefly within the kernel to handle
    104      1.101     rmind  *		signals that they receive, but will not return to user space
    105      1.101     rmind  *		until their process' state is changed away from stopped.
    106      1.101     rmind  *
    107      1.101     rmind  *		Single LWPs within a process can not be set stopped
    108      1.101     rmind  *		selectively: all actions that can stop or continue LWPs
    109      1.101     rmind  *		occur at the process level.
    110      1.101     rmind  *
    111       1.52        ad  * State transitions
    112       1.52        ad  *
    113       1.66        ad  *	Note that the LSSTOP state may only be set when returning to
    114       1.66        ad  *	user space in userret(), or when sleeping interruptably.  The
    115       1.66        ad  *	LSSUSPENDED state may only be set in userret().  Before setting
    116       1.66        ad  *	those states, we try to ensure that the LWPs will release all
    117       1.66        ad  *	locks that they hold, and at a minimum try to ensure that the
    118       1.66        ad  *	LWP can be set runnable again by a signal.
    119       1.52        ad  *
    120       1.52        ad  *	LWPs may transition states in the following ways:
    121       1.52        ad  *
    122       1.52        ad  *	 RUN -------> ONPROC		ONPROC -----> RUN
    123      1.129        ad  *		    				    > SLEEP
    124      1.129        ad  *		    				    > STOPPED
    125       1.52        ad  *						    > SUSPENDED
    126       1.52        ad  *						    > ZOMB
    127      1.129        ad  *						    > IDL (special cases)
    128       1.52        ad  *
    129       1.52        ad  *	 STOPPED ---> RUN		SUSPENDED --> RUN
    130      1.129        ad  *	            > SLEEP
    131       1.52        ad  *
    132       1.52        ad  *	 SLEEP -----> ONPROC		IDL --------> RUN
    133      1.101     rmind  *		    > RUN			    > SUSPENDED
    134      1.101     rmind  *		    > STOPPED			    > STOPPED
    135      1.129        ad  *						    > ONPROC (special cases)
    136       1.52        ad  *
    137      1.129        ad  *	Some state transitions are only possible with kernel threads (eg
    138      1.129        ad  *	ONPROC -> IDL) and happen under tightly controlled circumstances
    139      1.129        ad  *	free of unwanted side effects.
    140       1.66        ad  *
    141      1.114     rmind  * Migration
    142      1.114     rmind  *
    143      1.114     rmind  *	Migration of threads from one CPU to another could be performed
    144      1.114     rmind  *	internally by the scheduler via sched_takecpu() or sched_catchlwp()
    145      1.114     rmind  *	functions.  The universal lwp_migrate() function should be used for
    146      1.114     rmind  *	any other cases.  Subsystems in the kernel must be aware that CPU
    147      1.114     rmind  *	of LWP may change, while it is not locked.
    148      1.114     rmind  *
    149       1.52        ad  * Locking
    150       1.52        ad  *
    151       1.52        ad  *	The majority of fields in 'struct lwp' are covered by a single,
    152       1.66        ad  *	general spin lock pointed to by lwp::l_mutex.  The locks covering
    153       1.52        ad  *	each field are documented in sys/lwp.h.
    154       1.52        ad  *
    155       1.66        ad  *	State transitions must be made with the LWP's general lock held,
    156      1.152     rmind  *	and may cause the LWP's lock pointer to change.  Manipulation of
    157       1.66        ad  *	the general lock is not performed directly, but through calls to
    158      1.152     rmind  *	lwp_lock(), lwp_unlock() and others.  It should be noted that the
    159      1.152     rmind  *	adaptive locks are not allowed to be released while the LWP's lock
    160      1.152     rmind  *	is being held (unlike for other spin-locks).
    161       1.52        ad  *
    162       1.52        ad  *	States and their associated locks:
    163       1.52        ad  *
    164       1.74     rmind  *	LSONPROC, LSZOMB:
    165       1.52        ad  *
    166       1.64      yamt  *		Always covered by spc_lwplock, which protects running LWPs.
    167      1.129        ad  *		This is a per-CPU lock and matches lwp::l_cpu.
    168       1.52        ad  *
    169       1.74     rmind  *	LSIDL, LSRUN:
    170       1.52        ad  *
    171       1.64      yamt  *		Always covered by spc_mutex, which protects the run queues.
    172      1.129        ad  *		This is a per-CPU lock and matches lwp::l_cpu.
    173       1.52        ad  *
    174       1.52        ad  *	LSSLEEP:
    175       1.52        ad  *
    176       1.66        ad  *		Covered by a lock associated with the sleep queue that the
    177      1.129        ad  *		LWP resides on.  Matches lwp::l_sleepq::sq_mutex.
    178       1.52        ad  *
    179       1.52        ad  *	LSSTOP, LSSUSPENDED:
    180      1.101     rmind  *
    181       1.52        ad  *		If the LWP was previously sleeping (l_wchan != NULL), then
    182       1.66        ad  *		l_mutex references the sleep queue lock.  If the LWP was
    183       1.52        ad  *		runnable or on the CPU when halted, or has been removed from
    184       1.66        ad  *		the sleep queue since halted, then the lock is spc_lwplock.
    185       1.52        ad  *
    186       1.52        ad  *	The lock order is as follows:
    187       1.52        ad  *
    188       1.64      yamt  *		spc::spc_lwplock ->
    189      1.112        ad  *		    sleeptab::st_mutex ->
    190       1.64      yamt  *			tschain_t::tc_mutex ->
    191       1.64      yamt  *			    spc::spc_mutex
    192       1.52        ad  *
    193      1.103        ad  *	Each process has an scheduler state lock (proc::p_lock), and a
    194       1.52        ad  *	number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
    195       1.52        ad  *	so on.  When an LWP is to be entered into or removed from one of the
    196      1.103        ad  *	following states, p_lock must be held and the process wide counters
    197       1.52        ad  *	adjusted:
    198       1.52        ad  *
    199       1.52        ad  *		LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
    200       1.52        ad  *
    201      1.129        ad  *	(But not always for kernel threads.  There are some special cases
    202      1.129        ad  *	as mentioned above.  See kern_softint.c.)
    203      1.129        ad  *
    204       1.52        ad  *	Note that an LWP is considered running or likely to run soon if in
    205       1.52        ad  *	one of the following states.  This affects the value of p_nrlwps:
    206       1.52        ad  *
    207       1.52        ad  *		LSRUN, LSONPROC, LSSLEEP
    208       1.52        ad  *
    209      1.103        ad  *	p_lock does not need to be held when transitioning among these
    210      1.129        ad  *	three states, hence p_lock is rarely taken for state transitions.
    211       1.52        ad  */
    212       1.52        ad 
    213        1.9     lukem #include <sys/cdefs.h>
    214  1.164.2.3      yamt __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.164.2.3 2013/01/23 00:06:21 yamt Exp $");
    215        1.8    martin 
    216       1.84      yamt #include "opt_ddb.h"
    217       1.52        ad #include "opt_lockdebug.h"
    218      1.139    darran #include "opt_dtrace.h"
    219        1.2   thorpej 
    220       1.47   hannken #define _LWP_API_PRIVATE
    221       1.47   hannken 
    222        1.2   thorpej #include <sys/param.h>
    223        1.2   thorpej #include <sys/systm.h>
    224       1.64      yamt #include <sys/cpu.h>
    225        1.2   thorpej #include <sys/pool.h>
    226        1.2   thorpej #include <sys/proc.h>
    227        1.2   thorpej #include <sys/syscallargs.h>
    228       1.57       dsl #include <sys/syscall_stats.h>
    229       1.37        ad #include <sys/kauth.h>
    230      1.161  christos #include <sys/pserialize.h>
    231       1.52        ad #include <sys/sleepq.h>
    232       1.52        ad #include <sys/lockdebug.h>
    233       1.52        ad #include <sys/kmem.h>
    234       1.91     rmind #include <sys/pset.h>
    235       1.75        ad #include <sys/intr.h>
    236       1.78        ad #include <sys/lwpctl.h>
    237       1.81        ad #include <sys/atomic.h>
    238      1.131        ad #include <sys/filedesc.h>
    239      1.138    darran #include <sys/dtrace_bsd.h>
    240      1.141    darran #include <sys/sdt.h>
    241      1.157     rmind #include <sys/xcall.h>
    242  1.164.2.2      yamt #include <sys/uidinfo.h>
    243  1.164.2.2      yamt #include <sys/sysctl.h>
    244      1.138    darran 
    245        1.2   thorpej #include <uvm/uvm_extern.h>
    246       1.80     skrll #include <uvm/uvm_object.h>
    247        1.2   thorpej 
    248      1.152     rmind static pool_cache_t	lwp_cache	__read_mostly;
    249      1.152     rmind struct lwplist		alllwp		__cacheline_aligned;
    250       1.41   thorpej 
    251      1.157     rmind static void		lwp_dtor(void *, void *);
    252      1.157     rmind 
    253      1.141    darran /* DTrace proc provider probes */
    254      1.141    darran SDT_PROBE_DEFINE(proc,,,lwp_create,
    255      1.141    darran 	"struct lwp *", NULL,
    256      1.141    darran 	NULL, NULL, NULL, NULL,
    257      1.141    darran 	NULL, NULL, NULL, NULL);
    258      1.141    darran SDT_PROBE_DEFINE(proc,,,lwp_start,
    259      1.141    darran 	"struct lwp *", NULL,
    260      1.141    darran 	NULL, NULL, NULL, NULL,
    261      1.141    darran 	NULL, NULL, NULL, NULL);
    262      1.141    darran SDT_PROBE_DEFINE(proc,,,lwp_exit,
    263      1.141    darran 	"struct lwp *", NULL,
    264      1.141    darran 	NULL, NULL, NULL, NULL,
    265      1.141    darran 	NULL, NULL, NULL, NULL);
    266      1.141    darran 
    267      1.147     pooka struct turnstile turnstile0;
    268      1.147     pooka struct lwp lwp0 __aligned(MIN_LWP_ALIGNMENT) = {
    269      1.147     pooka #ifdef LWP0_CPU_INFO
    270      1.147     pooka 	.l_cpu = LWP0_CPU_INFO,
    271      1.147     pooka #endif
    272      1.154      matt #ifdef LWP0_MD_INITIALIZER
    273      1.154      matt 	.l_md = LWP0_MD_INITIALIZER,
    274      1.154      matt #endif
    275      1.147     pooka 	.l_proc = &proc0,
    276      1.147     pooka 	.l_lid = 1,
    277      1.147     pooka 	.l_flag = LW_SYSTEM,
    278      1.147     pooka 	.l_stat = LSONPROC,
    279      1.147     pooka 	.l_ts = &turnstile0,
    280      1.147     pooka 	.l_syncobj = &sched_syncobj,
    281      1.147     pooka 	.l_refcnt = 1,
    282      1.147     pooka 	.l_priority = PRI_USER + NPRI_USER - 1,
    283      1.147     pooka 	.l_inheritedprio = -1,
    284      1.147     pooka 	.l_class = SCHED_OTHER,
    285      1.147     pooka 	.l_psid = PS_NONE,
    286      1.147     pooka 	.l_pi_lenders = SLIST_HEAD_INITIALIZER(&lwp0.l_pi_lenders),
    287      1.147     pooka 	.l_name = __UNCONST("swapper"),
    288      1.147     pooka 	.l_fd = &filedesc0,
    289      1.147     pooka };
    290      1.147     pooka 
    291  1.164.2.2      yamt static int sysctl_kern_maxlwp(SYSCTLFN_PROTO);
    292  1.164.2.2      yamt 
    293  1.164.2.2      yamt /*
    294  1.164.2.2      yamt  * sysctl helper routine for kern.maxlwp. Ensures that the new
    295  1.164.2.2      yamt  * values are not too low or too high.
    296  1.164.2.2      yamt  */
    297  1.164.2.2      yamt static int
    298  1.164.2.2      yamt sysctl_kern_maxlwp(SYSCTLFN_ARGS)
    299  1.164.2.2      yamt {
    300  1.164.2.2      yamt 	int error, nmaxlwp;
    301  1.164.2.2      yamt 	struct sysctlnode node;
    302  1.164.2.2      yamt 
    303  1.164.2.2      yamt 	nmaxlwp = maxlwp;
    304  1.164.2.2      yamt 	node = *rnode;
    305  1.164.2.2      yamt 	node.sysctl_data = &nmaxlwp;
    306  1.164.2.2      yamt 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    307  1.164.2.2      yamt 	if (error || newp == NULL)
    308  1.164.2.2      yamt 		return error;
    309  1.164.2.2      yamt 
    310  1.164.2.2      yamt 	if (nmaxlwp < 0 || nmaxlwp >= 65536)
    311  1.164.2.2      yamt 		return EINVAL;
    312  1.164.2.2      yamt 	if (nmaxlwp > cpu_maxlwp())
    313  1.164.2.2      yamt 		return EINVAL;
    314  1.164.2.2      yamt 	maxlwp = nmaxlwp;
    315  1.164.2.2      yamt 
    316  1.164.2.2      yamt 	return 0;
    317  1.164.2.2      yamt }
    318  1.164.2.2      yamt 
    319  1.164.2.2      yamt static void
    320  1.164.2.2      yamt sysctl_kern_lwp_setup(void)
    321  1.164.2.2      yamt {
    322  1.164.2.2      yamt 	struct sysctllog *clog = NULL;
    323  1.164.2.2      yamt 
    324  1.164.2.2      yamt 	sysctl_createv(&clog, 0, NULL, NULL,
    325  1.164.2.2      yamt 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    326  1.164.2.2      yamt 		       CTLTYPE_INT, "maxlwp",
    327  1.164.2.2      yamt 		       SYSCTL_DESCR("Maximum number of simultaneous threads"),
    328  1.164.2.2      yamt 		       sysctl_kern_maxlwp, 0, NULL, 0,
    329  1.164.2.2      yamt 		       CTL_KERN, CTL_CREATE, CTL_EOL);
    330  1.164.2.2      yamt }
    331  1.164.2.2      yamt 
    332       1.41   thorpej void
    333       1.41   thorpej lwpinit(void)
    334       1.41   thorpej {
    335       1.41   thorpej 
    336      1.152     rmind 	LIST_INIT(&alllwp);
    337      1.144     pooka 	lwpinit_specificdata();
    338       1.52        ad 	lwp_sys_init();
    339       1.87        ad 	lwp_cache = pool_cache_init(sizeof(lwp_t), MIN_LWP_ALIGNMENT, 0, 0,
    340      1.157     rmind 	    "lwppl", NULL, IPL_NONE, NULL, lwp_dtor, NULL);
    341  1.164.2.2      yamt 
    342  1.164.2.2      yamt 	maxlwp = cpu_maxlwp();
    343  1.164.2.2      yamt 	sysctl_kern_lwp_setup();
    344       1.41   thorpej }
    345       1.41   thorpej 
    346      1.147     pooka void
    347      1.147     pooka lwp0_init(void)
    348      1.147     pooka {
    349      1.147     pooka 	struct lwp *l = &lwp0;
    350      1.147     pooka 
    351      1.147     pooka 	KASSERT((void *)uvm_lwp_getuarea(l) != NULL);
    352      1.148     pooka 	KASSERT(l->l_lid == proc0.p_nlwpid);
    353      1.147     pooka 
    354      1.147     pooka 	LIST_INSERT_HEAD(&alllwp, l, l_list);
    355      1.147     pooka 
    356      1.147     pooka 	callout_init(&l->l_timeout_ch, CALLOUT_MPSAFE);
    357      1.147     pooka 	callout_setfunc(&l->l_timeout_ch, sleepq_timeout, l);
    358      1.147     pooka 	cv_init(&l->l_sigcv, "sigwait");
    359  1.164.2.2      yamt 	cv_init(&l->l_waitcv, "vfork");
    360      1.147     pooka 
    361      1.147     pooka 	kauth_cred_hold(proc0.p_cred);
    362      1.147     pooka 	l->l_cred = proc0.p_cred;
    363      1.147     pooka 
    364      1.164      yamt 	kdtrace_thread_ctor(NULL, l);
    365      1.147     pooka 	lwp_initspecific(l);
    366      1.147     pooka 
    367      1.147     pooka 	SYSCALL_TIME_LWP_INIT(l);
    368      1.147     pooka }
    369      1.147     pooka 
    370      1.157     rmind static void
    371      1.157     rmind lwp_dtor(void *arg, void *obj)
    372      1.157     rmind {
    373      1.157     rmind 	lwp_t *l = obj;
    374      1.157     rmind 	uint64_t where;
    375      1.157     rmind 	(void)l;
    376      1.157     rmind 
    377      1.157     rmind 	/*
    378      1.157     rmind 	 * Provide a barrier to ensure that all mutex_oncpu() and rw_oncpu()
    379      1.157     rmind 	 * calls will exit before memory of LWP is returned to the pool, where
    380      1.157     rmind 	 * KVA of LWP structure might be freed and re-used for other purposes.
    381      1.157     rmind 	 * Kernel preemption is disabled around mutex_oncpu() and rw_oncpu()
    382      1.157     rmind 	 * callers, therefore cross-call to all CPUs will do the job.  Also,
    383      1.157     rmind 	 * the value of l->l_cpu must be still valid at this point.
    384      1.157     rmind 	 */
    385      1.157     rmind 	KASSERT(l->l_cpu != NULL);
    386      1.157     rmind 	where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
    387      1.157     rmind 	xc_wait(where);
    388      1.157     rmind }
    389      1.157     rmind 
    390       1.52        ad /*
    391       1.52        ad  * Set an suspended.
    392       1.52        ad  *
    393      1.103        ad  * Must be called with p_lock held, and the LWP locked.  Will unlock the
    394       1.52        ad  * LWP before return.
    395       1.52        ad  */
    396        1.2   thorpej int
    397       1.52        ad lwp_suspend(struct lwp *curl, struct lwp *t)
    398        1.2   thorpej {
    399       1.52        ad 	int error;
    400        1.2   thorpej 
    401      1.103        ad 	KASSERT(mutex_owned(t->l_proc->p_lock));
    402       1.63        ad 	KASSERT(lwp_locked(t, NULL));
    403       1.33       chs 
    404       1.52        ad 	KASSERT(curl != t || curl->l_stat == LSONPROC);
    405        1.2   thorpej 
    406       1.52        ad 	/*
    407       1.52        ad 	 * If the current LWP has been told to exit, we must not suspend anyone
    408       1.52        ad 	 * else or deadlock could occur.  We won't return to userspace.
    409        1.2   thorpej 	 */
    410      1.109     rmind 	if ((curl->l_flag & (LW_WEXIT | LW_WCORE)) != 0) {
    411       1.52        ad 		lwp_unlock(t);
    412       1.52        ad 		return (EDEADLK);
    413        1.2   thorpej 	}
    414        1.2   thorpej 
    415       1.52        ad 	error = 0;
    416        1.2   thorpej 
    417       1.52        ad 	switch (t->l_stat) {
    418       1.52        ad 	case LSRUN:
    419       1.52        ad 	case LSONPROC:
    420       1.56     pavel 		t->l_flag |= LW_WSUSPEND;
    421       1.52        ad 		lwp_need_userret(t);
    422       1.52        ad 		lwp_unlock(t);
    423       1.52        ad 		break;
    424        1.2   thorpej 
    425       1.52        ad 	case LSSLEEP:
    426       1.56     pavel 		t->l_flag |= LW_WSUSPEND;
    427        1.2   thorpej 
    428        1.2   thorpej 		/*
    429       1.52        ad 		 * Kick the LWP and try to get it to the kernel boundary
    430       1.52        ad 		 * so that it will release any locks that it holds.
    431       1.52        ad 		 * setrunnable() will release the lock.
    432        1.2   thorpej 		 */
    433       1.56     pavel 		if ((t->l_flag & LW_SINTR) != 0)
    434       1.52        ad 			setrunnable(t);
    435       1.52        ad 		else
    436       1.52        ad 			lwp_unlock(t);
    437       1.52        ad 		break;
    438        1.2   thorpej 
    439       1.52        ad 	case LSSUSPENDED:
    440       1.52        ad 		lwp_unlock(t);
    441       1.52        ad 		break;
    442       1.17      manu 
    443       1.52        ad 	case LSSTOP:
    444       1.56     pavel 		t->l_flag |= LW_WSUSPEND;
    445       1.52        ad 		setrunnable(t);
    446       1.52        ad 		break;
    447        1.2   thorpej 
    448       1.52        ad 	case LSIDL:
    449       1.52        ad 	case LSZOMB:
    450       1.52        ad 		error = EINTR; /* It's what Solaris does..... */
    451       1.52        ad 		lwp_unlock(t);
    452       1.52        ad 		break;
    453        1.2   thorpej 	}
    454        1.2   thorpej 
    455       1.69     rmind 	return (error);
    456        1.2   thorpej }
    457        1.2   thorpej 
    458       1.52        ad /*
    459       1.52        ad  * Restart a suspended LWP.
    460       1.52        ad  *
    461      1.103        ad  * Must be called with p_lock held, and the LWP locked.  Will unlock the
    462       1.52        ad  * LWP before return.
    463       1.52        ad  */
    464        1.2   thorpej void
    465        1.2   thorpej lwp_continue(struct lwp *l)
    466        1.2   thorpej {
    467        1.2   thorpej 
    468      1.103        ad 	KASSERT(mutex_owned(l->l_proc->p_lock));
    469       1.63        ad 	KASSERT(lwp_locked(l, NULL));
    470       1.52        ad 
    471       1.52        ad 	/* If rebooting or not suspended, then just bail out. */
    472       1.56     pavel 	if ((l->l_flag & LW_WREBOOT) != 0) {
    473       1.52        ad 		lwp_unlock(l);
    474        1.2   thorpej 		return;
    475       1.10      fvdl 	}
    476        1.2   thorpej 
    477       1.56     pavel 	l->l_flag &= ~LW_WSUSPEND;
    478        1.2   thorpej 
    479       1.52        ad 	if (l->l_stat != LSSUSPENDED) {
    480       1.52        ad 		lwp_unlock(l);
    481       1.52        ad 		return;
    482        1.2   thorpej 	}
    483        1.2   thorpej 
    484       1.52        ad 	/* setrunnable() will release the lock. */
    485       1.52        ad 	setrunnable(l);
    486        1.2   thorpej }
    487        1.2   thorpej 
    488       1.52        ad /*
    489      1.142  christos  * Restart a stopped LWP.
    490      1.142  christos  *
    491      1.142  christos  * Must be called with p_lock held, and the LWP NOT locked.  Will unlock the
    492      1.142  christos  * LWP before return.
    493      1.142  christos  */
    494      1.142  christos void
    495      1.142  christos lwp_unstop(struct lwp *l)
    496      1.142  christos {
    497      1.142  christos 	struct proc *p = l->l_proc;
    498  1.164.2.1      yamt 
    499      1.142  christos 	KASSERT(mutex_owned(proc_lock));
    500      1.142  christos 	KASSERT(mutex_owned(p->p_lock));
    501      1.142  christos 
    502      1.142  christos 	lwp_lock(l);
    503      1.142  christos 
    504      1.142  christos 	/* If not stopped, then just bail out. */
    505      1.142  christos 	if (l->l_stat != LSSTOP) {
    506      1.142  christos 		lwp_unlock(l);
    507      1.142  christos 		return;
    508      1.142  christos 	}
    509      1.142  christos 
    510      1.142  christos 	p->p_stat = SACTIVE;
    511      1.142  christos 	p->p_sflag &= ~PS_STOPPING;
    512      1.142  christos 
    513      1.142  christos 	if (!p->p_waited)
    514      1.142  christos 		p->p_pptr->p_nstopchild--;
    515      1.142  christos 
    516      1.142  christos 	if (l->l_wchan == NULL) {
    517      1.142  christos 		/* setrunnable() will release the lock. */
    518      1.142  christos 		setrunnable(l);
    519      1.163  christos 	} else if (p->p_xstat && (l->l_flag & LW_SINTR) != 0) {
    520      1.163  christos 		/* setrunnable() so we can receive the signal */
    521      1.163  christos 		setrunnable(l);
    522      1.142  christos 	} else {
    523      1.142  christos 		l->l_stat = LSSLEEP;
    524      1.142  christos 		p->p_nrlwps++;
    525      1.142  christos 		lwp_unlock(l);
    526      1.142  christos 	}
    527      1.142  christos }
    528      1.142  christos 
    529      1.142  christos /*
    530       1.52        ad  * Wait for an LWP within the current process to exit.  If 'lid' is
    531       1.52        ad  * non-zero, we are waiting for a specific LWP.
    532       1.52        ad  *
    533      1.103        ad  * Must be called with p->p_lock held.
    534       1.52        ad  */
    535        1.2   thorpej int
    536  1.164.2.2      yamt lwp_wait(struct lwp *l, lwpid_t lid, lwpid_t *departed, bool exiting)
    537        1.2   thorpej {
    538  1.164.2.2      yamt 	const lwpid_t curlid = l->l_lid;
    539  1.164.2.2      yamt 	proc_t *p = l->l_proc;
    540  1.164.2.2      yamt 	lwp_t *l2;
    541  1.164.2.2      yamt 	int error;
    542        1.2   thorpej 
    543      1.103        ad 	KASSERT(mutex_owned(p->p_lock));
    544       1.52        ad 
    545       1.52        ad 	p->p_nlwpwait++;
    546       1.63        ad 	l->l_waitingfor = lid;
    547       1.52        ad 
    548       1.52        ad 	for (;;) {
    549  1.164.2.2      yamt 		int nfound;
    550  1.164.2.2      yamt 
    551       1.52        ad 		/*
    552       1.52        ad 		 * Avoid a race between exit1() and sigexit(): if the
    553       1.52        ad 		 * process is dumping core, then we need to bail out: call
    554       1.52        ad 		 * into lwp_userret() where we will be suspended until the
    555       1.52        ad 		 * deed is done.
    556       1.52        ad 		 */
    557       1.52        ad 		if ((p->p_sflag & PS_WCORE) != 0) {
    558      1.103        ad 			mutex_exit(p->p_lock);
    559       1.52        ad 			lwp_userret(l);
    560  1.164.2.2      yamt 			KASSERT(false);
    561       1.52        ad 		}
    562       1.52        ad 
    563       1.52        ad 		/*
    564       1.52        ad 		 * First off, drain any detached LWP that is waiting to be
    565       1.52        ad 		 * reaped.
    566       1.52        ad 		 */
    567       1.52        ad 		while ((l2 = p->p_zomblwp) != NULL) {
    568       1.52        ad 			p->p_zomblwp = NULL;
    569       1.63        ad 			lwp_free(l2, false, false);/* releases proc mutex */
    570      1.103        ad 			mutex_enter(p->p_lock);
    571       1.52        ad 		}
    572       1.52        ad 
    573       1.52        ad 		/*
    574       1.52        ad 		 * Now look for an LWP to collect.  If the whole process is
    575       1.52        ad 		 * exiting, count detached LWPs as eligible to be collected,
    576       1.52        ad 		 * but don't drain them here.
    577       1.52        ad 		 */
    578       1.52        ad 		nfound = 0;
    579       1.63        ad 		error = 0;
    580       1.52        ad 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
    581       1.63        ad 			/*
    582       1.63        ad 			 * If a specific wait and the target is waiting on
    583       1.63        ad 			 * us, then avoid deadlock.  This also traps LWPs
    584       1.63        ad 			 * that try to wait on themselves.
    585       1.63        ad 			 *
    586       1.63        ad 			 * Note that this does not handle more complicated
    587       1.63        ad 			 * cycles, like: t1 -> t2 -> t3 -> t1.  The process
    588       1.63        ad 			 * can still be killed so it is not a major problem.
    589       1.63        ad 			 */
    590       1.63        ad 			if (l2->l_lid == lid && l2->l_waitingfor == curlid) {
    591       1.63        ad 				error = EDEADLK;
    592       1.63        ad 				break;
    593       1.63        ad 			}
    594       1.63        ad 			if (l2 == l)
    595       1.52        ad 				continue;
    596       1.52        ad 			if ((l2->l_prflag & LPR_DETACHED) != 0) {
    597       1.63        ad 				nfound += exiting;
    598       1.63        ad 				continue;
    599       1.63        ad 			}
    600       1.63        ad 			if (lid != 0) {
    601       1.63        ad 				if (l2->l_lid != lid)
    602       1.63        ad 					continue;
    603       1.63        ad 				/*
    604       1.63        ad 				 * Mark this LWP as the first waiter, if there
    605       1.63        ad 				 * is no other.
    606       1.63        ad 				 */
    607       1.63        ad 				if (l2->l_waiter == 0)
    608       1.63        ad 					l2->l_waiter = curlid;
    609       1.63        ad 			} else if (l2->l_waiter != 0) {
    610       1.63        ad 				/*
    611       1.63        ad 				 * It already has a waiter - so don't
    612       1.63        ad 				 * collect it.  If the waiter doesn't
    613       1.63        ad 				 * grab it we'll get another chance
    614       1.63        ad 				 * later.
    615       1.63        ad 				 */
    616       1.63        ad 				nfound++;
    617       1.52        ad 				continue;
    618       1.52        ad 			}
    619       1.52        ad 			nfound++;
    620        1.2   thorpej 
    621       1.52        ad 			/* No need to lock the LWP in order to see LSZOMB. */
    622       1.52        ad 			if (l2->l_stat != LSZOMB)
    623       1.52        ad 				continue;
    624        1.2   thorpej 
    625       1.63        ad 			/*
    626       1.63        ad 			 * We're no longer waiting.  Reset the "first waiter"
    627       1.63        ad 			 * pointer on the target, in case it was us.
    628       1.63        ad 			 */
    629       1.63        ad 			l->l_waitingfor = 0;
    630       1.63        ad 			l2->l_waiter = 0;
    631       1.63        ad 			p->p_nlwpwait--;
    632        1.2   thorpej 			if (departed)
    633        1.2   thorpej 				*departed = l2->l_lid;
    634       1.75        ad 			sched_lwp_collect(l2);
    635       1.63        ad 
    636       1.63        ad 			/* lwp_free() releases the proc lock. */
    637       1.63        ad 			lwp_free(l2, false, false);
    638      1.103        ad 			mutex_enter(p->p_lock);
    639       1.52        ad 			return 0;
    640       1.52        ad 		}
    641        1.2   thorpej 
    642       1.63        ad 		if (error != 0)
    643       1.63        ad 			break;
    644       1.52        ad 		if (nfound == 0) {
    645       1.52        ad 			error = ESRCH;
    646       1.52        ad 			break;
    647       1.52        ad 		}
    648       1.63        ad 
    649       1.63        ad 		/*
    650  1.164.2.2      yamt 		 * Note: since the lock will be dropped, need to restart on
    651  1.164.2.2      yamt 		 * wakeup to run all LWPs again, e.g. there may be new LWPs.
    652       1.63        ad 		 */
    653       1.63        ad 		if (exiting) {
    654       1.52        ad 			KASSERT(p->p_nlwps > 1);
    655      1.103        ad 			cv_wait(&p->p_lwpcv, p->p_lock);
    656  1.164.2.2      yamt 			error = EAGAIN;
    657  1.164.2.2      yamt 			break;
    658       1.52        ad 		}
    659       1.63        ad 
    660       1.63        ad 		/*
    661       1.63        ad 		 * If all other LWPs are waiting for exits or suspends
    662       1.63        ad 		 * and the supply of zombies and potential zombies is
    663       1.63        ad 		 * exhausted, then we are about to deadlock.
    664       1.63        ad 		 *
    665       1.63        ad 		 * If the process is exiting (and this LWP is not the one
    666       1.63        ad 		 * that is coordinating the exit) then bail out now.
    667       1.63        ad 		 */
    668       1.52        ad 		if ((p->p_sflag & PS_WEXIT) != 0 ||
    669       1.63        ad 		    p->p_nrlwps + p->p_nzlwps - p->p_ndlwps <= p->p_nlwpwait) {
    670       1.52        ad 			error = EDEADLK;
    671       1.52        ad 			break;
    672        1.2   thorpej 		}
    673       1.63        ad 
    674       1.63        ad 		/*
    675       1.63        ad 		 * Sit around and wait for something to happen.  We'll be
    676       1.63        ad 		 * awoken if any of the conditions examined change: if an
    677       1.63        ad 		 * LWP exits, is collected, or is detached.
    678       1.63        ad 		 */
    679      1.103        ad 		if ((error = cv_wait_sig(&p->p_lwpcv, p->p_lock)) != 0)
    680       1.52        ad 			break;
    681        1.2   thorpej 	}
    682        1.2   thorpej 
    683       1.63        ad 	/*
    684       1.63        ad 	 * We didn't find any LWPs to collect, we may have received a
    685       1.63        ad 	 * signal, or some other condition has caused us to bail out.
    686       1.63        ad 	 *
    687       1.63        ad 	 * If waiting on a specific LWP, clear the waiters marker: some
    688       1.63        ad 	 * other LWP may want it.  Then, kick all the remaining waiters
    689       1.63        ad 	 * so that they can re-check for zombies and for deadlock.
    690       1.63        ad 	 */
    691       1.63        ad 	if (lid != 0) {
    692       1.63        ad 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
    693       1.63        ad 			if (l2->l_lid == lid) {
    694       1.63        ad 				if (l2->l_waiter == curlid)
    695       1.63        ad 					l2->l_waiter = 0;
    696       1.63        ad 				break;
    697       1.63        ad 			}
    698       1.63        ad 		}
    699       1.63        ad 	}
    700       1.52        ad 	p->p_nlwpwait--;
    701       1.63        ad 	l->l_waitingfor = 0;
    702       1.63        ad 	cv_broadcast(&p->p_lwpcv);
    703       1.63        ad 
    704       1.52        ad 	return error;
    705        1.2   thorpej }
    706        1.2   thorpej 
    707  1.164.2.3      yamt static lwpid_t
    708  1.164.2.3      yamt lwp_find_free_lid(lwpid_t try_lid, lwp_t * new_lwp, proc_t *p)
    709  1.164.2.3      yamt {
    710  1.164.2.3      yamt 	#define LID_SCAN (1u << 31)
    711  1.164.2.3      yamt 	lwp_t *scan, *free_before;
    712  1.164.2.3      yamt 	lwpid_t nxt_lid;
    713  1.164.2.3      yamt 
    714  1.164.2.3      yamt 	/*
    715  1.164.2.3      yamt 	 * We want the first unused lid greater than or equal to
    716  1.164.2.3      yamt 	 * try_lid (modulo 2^31).
    717  1.164.2.3      yamt 	 * (If nothing else ld.elf_so doesn't want lwpid with the top bit set.)
    718  1.164.2.3      yamt 	 * We must not return 0, and avoiding 'LID_SCAN - 1' makes
    719  1.164.2.3      yamt 	 * the outer test easier.
    720  1.164.2.3      yamt 	 * This would be much easier if the list were sorted in
    721  1.164.2.3      yamt 	 * increasing order.
    722  1.164.2.3      yamt 	 * The list is kept sorted in decreasing order.
    723  1.164.2.3      yamt 	 * This code is only used after a process has generated 2^31 lwp.
    724  1.164.2.3      yamt 	 *
    725  1.164.2.3      yamt 	 * Code assumes it can always find an id.
    726  1.164.2.3      yamt 	 */
    727  1.164.2.3      yamt 
    728  1.164.2.3      yamt 	try_lid &= LID_SCAN - 1;
    729  1.164.2.3      yamt 	if (try_lid <= 1)
    730  1.164.2.3      yamt 		try_lid = 2;
    731  1.164.2.3      yamt 
    732  1.164.2.3      yamt 	free_before = NULL;
    733  1.164.2.3      yamt 	nxt_lid = LID_SCAN - 1;
    734  1.164.2.3      yamt 	LIST_FOREACH(scan, &p->p_lwps, l_sibling) {
    735  1.164.2.3      yamt 		if (scan->l_lid != nxt_lid) {
    736  1.164.2.3      yamt 			/* There are available lid before this entry */
    737  1.164.2.3      yamt 			free_before = scan;
    738  1.164.2.3      yamt 			if (try_lid > scan->l_lid)
    739  1.164.2.3      yamt 				break;
    740  1.164.2.3      yamt 		}
    741  1.164.2.3      yamt 		if (try_lid == scan->l_lid) {
    742  1.164.2.3      yamt 			/* The ideal lid is busy, take a higher one */
    743  1.164.2.3      yamt 			if (free_before != NULL) {
    744  1.164.2.3      yamt 				try_lid = free_before->l_lid + 1;
    745  1.164.2.3      yamt 				break;
    746  1.164.2.3      yamt 			}
    747  1.164.2.3      yamt 			/* No higher ones, reuse low numbers */
    748  1.164.2.3      yamt 			try_lid = 2;
    749  1.164.2.3      yamt 		}
    750  1.164.2.3      yamt 
    751  1.164.2.3      yamt 		nxt_lid = scan->l_lid - 1;
    752  1.164.2.3      yamt 		if (LIST_NEXT(scan, l_sibling) == NULL) {
    753  1.164.2.3      yamt 		    /* The value we have is lower than any existing lwp */
    754  1.164.2.3      yamt 		    LIST_INSERT_AFTER(scan, new_lwp, l_sibling);
    755  1.164.2.3      yamt 		    return try_lid;
    756  1.164.2.3      yamt 		}
    757  1.164.2.3      yamt 	}
    758  1.164.2.3      yamt 
    759  1.164.2.3      yamt 	LIST_INSERT_BEFORE(free_before, new_lwp, l_sibling);
    760  1.164.2.3      yamt 	return try_lid;
    761  1.164.2.3      yamt }
    762  1.164.2.3      yamt 
    763       1.52        ad /*
    764       1.52        ad  * Create a new LWP within process 'p2', using LWP 'l1' as a template.
    765       1.52        ad  * The new LWP is created in state LSIDL and must be set running,
    766       1.52        ad  * suspended, or stopped by the caller.
    767       1.52        ad  */
    768        1.2   thorpej int
    769      1.134     rmind lwp_create(lwp_t *l1, proc_t *p2, vaddr_t uaddr, int flags,
    770       1.75        ad 	   void *stack, size_t stacksize, void (*func)(void *), void *arg,
    771       1.75        ad 	   lwp_t **rnewlwpp, int sclass)
    772        1.2   thorpej {
    773       1.52        ad 	struct lwp *l2, *isfree;
    774       1.52        ad 	turnstile_t *ts;
    775      1.151       chs 	lwpid_t lid;
    776        1.2   thorpej 
    777      1.107        ad 	KASSERT(l1 == curlwp || l1->l_proc == &proc0);
    778      1.107        ad 
    779       1.52        ad 	/*
    780  1.164.2.2      yamt 	 * Enforce limits, excluding the first lwp and kthreads.
    781  1.164.2.2      yamt 	 */
    782  1.164.2.2      yamt 	if (p2->p_nlwps != 0 && p2 != &proc0) {
    783  1.164.2.2      yamt 		uid_t uid = kauth_cred_getuid(l1->l_cred);
    784  1.164.2.2      yamt 		int count = chglwpcnt(uid, 1);
    785  1.164.2.2      yamt 		if (__predict_false(count >
    786  1.164.2.2      yamt 		    p2->p_rlimit[RLIMIT_NTHR].rlim_cur)) {
    787  1.164.2.2      yamt 			if (kauth_authorize_process(l1->l_cred,
    788  1.164.2.2      yamt 			    KAUTH_PROCESS_RLIMIT, p2,
    789  1.164.2.2      yamt 			    KAUTH_ARG(KAUTH_REQ_PROCESS_RLIMIT_BYPASS),
    790  1.164.2.2      yamt 			    &p2->p_rlimit[RLIMIT_NTHR], KAUTH_ARG(RLIMIT_NTHR))
    791  1.164.2.2      yamt 			    != 0) {
    792  1.164.2.2      yamt 				(void)chglwpcnt(uid, -1);
    793  1.164.2.2      yamt 				return EAGAIN;
    794  1.164.2.2      yamt 			}
    795  1.164.2.2      yamt 		}
    796  1.164.2.2      yamt 	}
    797  1.164.2.2      yamt 
    798  1.164.2.2      yamt 	/*
    799       1.52        ad 	 * First off, reap any detached LWP waiting to be collected.
    800       1.52        ad 	 * We can re-use its LWP structure and turnstile.
    801       1.52        ad 	 */
    802       1.52        ad 	isfree = NULL;
    803       1.52        ad 	if (p2->p_zomblwp != NULL) {
    804      1.103        ad 		mutex_enter(p2->p_lock);
    805       1.52        ad 		if ((isfree = p2->p_zomblwp) != NULL) {
    806       1.52        ad 			p2->p_zomblwp = NULL;
    807       1.63        ad 			lwp_free(isfree, true, false);/* releases proc mutex */
    808       1.52        ad 		} else
    809      1.103        ad 			mutex_exit(p2->p_lock);
    810       1.52        ad 	}
    811       1.52        ad 	if (isfree == NULL) {
    812       1.87        ad 		l2 = pool_cache_get(lwp_cache, PR_WAITOK);
    813       1.52        ad 		memset(l2, 0, sizeof(*l2));
    814       1.76        ad 		l2->l_ts = pool_cache_get(turnstile_cache, PR_WAITOK);
    815       1.60      yamt 		SLIST_INIT(&l2->l_pi_lenders);
    816       1.52        ad 	} else {
    817       1.52        ad 		l2 = isfree;
    818       1.52        ad 		ts = l2->l_ts;
    819       1.75        ad 		KASSERT(l2->l_inheritedprio == -1);
    820       1.60      yamt 		KASSERT(SLIST_EMPTY(&l2->l_pi_lenders));
    821       1.52        ad 		memset(l2, 0, sizeof(*l2));
    822       1.52        ad 		l2->l_ts = ts;
    823       1.52        ad 	}
    824        1.2   thorpej 
    825        1.2   thorpej 	l2->l_stat = LSIDL;
    826        1.2   thorpej 	l2->l_proc = p2;
    827       1.52        ad 	l2->l_refcnt = 1;
    828       1.75        ad 	l2->l_class = sclass;
    829      1.116        ad 
    830      1.116        ad 	/*
    831      1.116        ad 	 * If vfork(), we want the LWP to run fast and on the same CPU
    832      1.116        ad 	 * as its parent, so that it can reuse the VM context and cache
    833      1.116        ad 	 * footprint on the local CPU.
    834      1.116        ad 	 */
    835      1.116        ad 	l2->l_kpriority = ((flags & LWP_VFORK) ? true : false);
    836       1.82        ad 	l2->l_kpribase = PRI_KERNEL;
    837       1.52        ad 	l2->l_priority = l1->l_priority;
    838       1.75        ad 	l2->l_inheritedprio = -1;
    839      1.134     rmind 	l2->l_flag = 0;
    840       1.88        ad 	l2->l_pflag = LP_MPSAFE;
    841      1.131        ad 	TAILQ_INIT(&l2->l_ld_locks);
    842      1.131        ad 
    843      1.131        ad 	/*
    844      1.156     pooka 	 * For vfork, borrow parent's lwpctl context if it exists.
    845      1.156     pooka 	 * This also causes us to return via lwp_userret.
    846      1.156     pooka 	 */
    847      1.156     pooka 	if (flags & LWP_VFORK && l1->l_lwpctl) {
    848      1.156     pooka 		l2->l_lwpctl = l1->l_lwpctl;
    849      1.156     pooka 		l2->l_flag |= LW_LWPCTL;
    850      1.156     pooka 	}
    851      1.156     pooka 
    852      1.156     pooka 	/*
    853      1.131        ad 	 * If not the first LWP in the process, grab a reference to the
    854      1.131        ad 	 * descriptor table.
    855      1.131        ad 	 */
    856       1.97        ad 	l2->l_fd = p2->p_fd;
    857      1.131        ad 	if (p2->p_nlwps != 0) {
    858      1.131        ad 		KASSERT(l1->l_proc == p2);
    859      1.136     rmind 		fd_hold(l2);
    860      1.131        ad 	} else {
    861      1.131        ad 		KASSERT(l1->l_proc != p2);
    862      1.131        ad 	}
    863       1.41   thorpej 
    864       1.56     pavel 	if (p2->p_flag & PK_SYSTEM) {
    865      1.134     rmind 		/* Mark it as a system LWP. */
    866       1.56     pavel 		l2->l_flag |= LW_SYSTEM;
    867       1.52        ad 	}
    868        1.2   thorpej 
    869      1.107        ad 	kpreempt_disable();
    870      1.107        ad 	l2->l_mutex = l1->l_cpu->ci_schedstate.spc_mutex;
    871      1.107        ad 	l2->l_cpu = l1->l_cpu;
    872      1.107        ad 	kpreempt_enable();
    873      1.107        ad 
    874      1.138    darran 	kdtrace_thread_ctor(NULL, l2);
    875       1.73     rmind 	lwp_initspecific(l2);
    876       1.75        ad 	sched_lwp_fork(l1, l2);
    877       1.37        ad 	lwp_update_creds(l2);
    878       1.70        ad 	callout_init(&l2->l_timeout_ch, CALLOUT_MPSAFE);
    879       1.70        ad 	callout_setfunc(&l2->l_timeout_ch, sleepq_timeout, l2);
    880       1.52        ad 	cv_init(&l2->l_sigcv, "sigwait");
    881  1.164.2.2      yamt 	cv_init(&l2->l_waitcv, "vfork");
    882       1.52        ad 	l2->l_syncobj = &sched_syncobj;
    883        1.2   thorpej 
    884        1.2   thorpej 	if (rnewlwpp != NULL)
    885        1.2   thorpej 		*rnewlwpp = l2;
    886        1.2   thorpej 
    887      1.158      matt 	/*
    888      1.158      matt 	 * PCU state needs to be saved before calling uvm_lwp_fork() so that
    889      1.158      matt 	 * the MD cpu_lwp_fork() can copy the saved state to the new LWP.
    890      1.158      matt 	 */
    891      1.158      matt 	pcu_save_all(l1);
    892      1.158      matt 
    893      1.137     rmind 	uvm_lwp_setuarea(l2, uaddr);
    894        1.2   thorpej 	uvm_lwp_fork(l1, l2, stack, stacksize, func,
    895        1.2   thorpej 	    (arg != NULL) ? arg : l2);
    896        1.2   thorpej 
    897      1.151       chs 	if ((flags & LWP_PIDLID) != 0) {
    898      1.151       chs 		lid = proc_alloc_pid(p2);
    899      1.151       chs 		l2->l_pflag |= LP_PIDLID;
    900      1.151       chs 	} else {
    901      1.151       chs 		lid = 0;
    902      1.151       chs 	}
    903      1.151       chs 
    904      1.103        ad 	mutex_enter(p2->p_lock);
    905       1.52        ad 
    906       1.52        ad 	if ((flags & LWP_DETACHED) != 0) {
    907       1.52        ad 		l2->l_prflag = LPR_DETACHED;
    908       1.52        ad 		p2->p_ndlwps++;
    909       1.52        ad 	} else
    910       1.52        ad 		l2->l_prflag = 0;
    911       1.52        ad 
    912  1.164.2.1      yamt 	l2->l_sigstk = l1->l_sigstk;
    913       1.52        ad 	l2->l_sigmask = l1->l_sigmask;
    914       1.52        ad 	CIRCLEQ_INIT(&l2->l_sigpend.sp_info);
    915       1.52        ad 	sigemptyset(&l2->l_sigpend.sp_set);
    916       1.52        ad 
    917  1.164.2.3      yamt 	if (__predict_true(lid == 0)) {
    918  1.164.2.3      yamt 		/*
    919  1.164.2.3      yamt 		 * XXX: l_lid are expected to be unique (for a process)
    920  1.164.2.3      yamt 		 * if LWP_PIDLID is sometimes set this won't be true.
    921  1.164.2.3      yamt 		 * Once 2^31 threads have been allocated we have to
    922  1.164.2.3      yamt 		 * scan to ensure we allocate a unique value.
    923  1.164.2.3      yamt 		 */
    924  1.164.2.3      yamt 		lid = ++p2->p_nlwpid;
    925  1.164.2.3      yamt 		if (__predict_false(lid & LID_SCAN)) {
    926  1.164.2.3      yamt 			lid = lwp_find_free_lid(lid, l2, p2);
    927  1.164.2.3      yamt 			p2->p_nlwpid = lid | LID_SCAN;
    928  1.164.2.3      yamt 			/* l2 as been inserted into p_lwps in order */
    929  1.164.2.3      yamt 			goto skip_insert;
    930  1.164.2.3      yamt 		}
    931  1.164.2.3      yamt 		p2->p_nlwpid = lid;
    932      1.151       chs 	}
    933        1.2   thorpej 	LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
    934  1.164.2.3      yamt     skip_insert:
    935  1.164.2.3      yamt 	l2->l_lid = lid;
    936        1.2   thorpej 	p2->p_nlwps++;
    937      1.149      yamt 	p2->p_nrlwps++;
    938        1.2   thorpej 
    939      1.162     rmind 	KASSERT(l2->l_affinity == NULL);
    940      1.162     rmind 
    941       1.91     rmind 	if ((p2->p_flag & PK_SYSTEM) == 0) {
    942      1.162     rmind 		/* Inherit the affinity mask. */
    943      1.162     rmind 		if (l1->l_affinity) {
    944      1.128     rmind 			/*
    945      1.128     rmind 			 * Note that we hold the state lock while inheriting
    946      1.128     rmind 			 * the affinity to avoid race with sched_setaffinity().
    947      1.128     rmind 			 */
    948      1.128     rmind 			lwp_lock(l1);
    949      1.162     rmind 			if (l1->l_affinity) {
    950      1.122     rmind 				kcpuset_use(l1->l_affinity);
    951      1.122     rmind 				l2->l_affinity = l1->l_affinity;
    952      1.122     rmind 			}
    953      1.128     rmind 			lwp_unlock(l1);
    954      1.117  christos 		}
    955      1.128     rmind 		lwp_lock(l2);
    956      1.128     rmind 		/* Inherit a processor-set */
    957      1.128     rmind 		l2->l_psid = l1->l_psid;
    958       1.91     rmind 		/* Look for a CPU to start */
    959       1.91     rmind 		l2->l_cpu = sched_takecpu(l2);
    960       1.91     rmind 		lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_mutex);
    961       1.91     rmind 	}
    962      1.128     rmind 	mutex_exit(p2->p_lock);
    963      1.128     rmind 
    964      1.141    darran 	SDT_PROBE(proc,,,lwp_create, l2, 0,0,0,0);
    965      1.141    darran 
    966      1.128     rmind 	mutex_enter(proc_lock);
    967      1.128     rmind 	LIST_INSERT_HEAD(&alllwp, l2, l_list);
    968      1.128     rmind 	mutex_exit(proc_lock);
    969       1.91     rmind 
    970       1.57       dsl 	SYSCALL_TIME_LWP_INIT(l2);
    971       1.57       dsl 
    972       1.16      manu 	if (p2->p_emul->e_lwp_fork)
    973       1.16      manu 		(*p2->p_emul->e_lwp_fork)(l1, l2);
    974       1.16      manu 
    975        1.2   thorpej 	return (0);
    976        1.2   thorpej }
    977        1.2   thorpej 
    978        1.2   thorpej /*
    979       1.64      yamt  * Called by MD code when a new LWP begins execution.  Must be called
    980       1.64      yamt  * with the previous LWP locked (so at splsched), or if there is no
    981       1.64      yamt  * previous LWP, at splsched.
    982       1.64      yamt  */
    983       1.64      yamt void
    984       1.64      yamt lwp_startup(struct lwp *prev, struct lwp *new)
    985       1.64      yamt {
    986  1.164.2.2      yamt 	KASSERTMSG(new == curlwp, "l %p curlwp %p prevlwp %p", new, curlwp, prev);
    987       1.64      yamt 
    988      1.141    darran 	SDT_PROBE(proc,,,lwp_start, new, 0,0,0,0);
    989      1.141    darran 
    990      1.107        ad 	KASSERT(kpreempt_disabled());
    991       1.64      yamt 	if (prev != NULL) {
    992       1.81        ad 		/*
    993       1.81        ad 		 * Normalize the count of the spin-mutexes, it was
    994       1.81        ad 		 * increased in mi_switch().  Unmark the state of
    995       1.81        ad 		 * context switch - it is finished for previous LWP.
    996       1.81        ad 		 */
    997       1.81        ad 		curcpu()->ci_mtx_count++;
    998       1.81        ad 		membar_exit();
    999       1.81        ad 		prev->l_ctxswtch = 0;
   1000       1.64      yamt 	}
   1001      1.107        ad 	KPREEMPT_DISABLE(new);
   1002      1.107        ad 	spl0();
   1003  1.164.2.1      yamt 	if (__predict_true(new->l_proc->p_vmspace))
   1004  1.164.2.1      yamt 		pmap_activate(new);
   1005      1.161  christos 
   1006      1.161  christos 	/* Note trip through cpu_switchto(). */
   1007      1.161  christos 	pserialize_switchpoint();
   1008      1.161  christos 
   1009       1.64      yamt 	LOCKDEBUG_BARRIER(NULL, 0);
   1010      1.107        ad 	KPREEMPT_ENABLE(new);
   1011       1.65        ad 	if ((new->l_pflag & LP_MPSAFE) == 0) {
   1012       1.65        ad 		KERNEL_LOCK(1, new);
   1013       1.65        ad 	}
   1014       1.64      yamt }
   1015       1.64      yamt 
   1016       1.64      yamt /*
   1017       1.65        ad  * Exit an LWP.
   1018        1.2   thorpej  */
   1019        1.2   thorpej void
   1020        1.2   thorpej lwp_exit(struct lwp *l)
   1021        1.2   thorpej {
   1022        1.2   thorpej 	struct proc *p = l->l_proc;
   1023       1.52        ad 	struct lwp *l2;
   1024       1.65        ad 	bool current;
   1025       1.65        ad 
   1026       1.65        ad 	current = (l == curlwp);
   1027        1.2   thorpej 
   1028      1.114     rmind 	KASSERT(current || (l->l_stat == LSIDL && l->l_target_cpu == NULL));
   1029      1.131        ad 	KASSERT(p == curproc);
   1030        1.2   thorpej 
   1031      1.141    darran 	SDT_PROBE(proc,,,lwp_exit, l, 0,0,0,0);
   1032      1.141    darran 
   1033       1.52        ad 	/*
   1034       1.52        ad 	 * Verify that we hold no locks other than the kernel lock.
   1035       1.52        ad 	 */
   1036       1.52        ad 	LOCKDEBUG_BARRIER(&kernel_lock, 0);
   1037       1.16      manu 
   1038        1.2   thorpej 	/*
   1039       1.52        ad 	 * If we are the last live LWP in a process, we need to exit the
   1040       1.52        ad 	 * entire process.  We do so with an exit status of zero, because
   1041       1.52        ad 	 * it's a "controlled" exit, and because that's what Solaris does.
   1042       1.52        ad 	 *
   1043       1.52        ad 	 * We are not quite a zombie yet, but for accounting purposes we
   1044       1.52        ad 	 * must increment the count of zombies here.
   1045       1.45   thorpej 	 *
   1046       1.45   thorpej 	 * Note: the last LWP's specificdata will be deleted here.
   1047        1.2   thorpej 	 */
   1048      1.103        ad 	mutex_enter(p->p_lock);
   1049       1.52        ad 	if (p->p_nlwps - p->p_nzlwps == 1) {
   1050       1.65        ad 		KASSERT(current == true);
   1051  1.164.2.2      yamt 		KASSERT(p != &proc0);
   1052       1.88        ad 		/* XXXSMP kernel_lock not held */
   1053        1.2   thorpej 		exit1(l, 0);
   1054       1.19  jdolecek 		/* NOTREACHED */
   1055        1.2   thorpej 	}
   1056       1.52        ad 	p->p_nzlwps++;
   1057      1.103        ad 	mutex_exit(p->p_lock);
   1058       1.52        ad 
   1059       1.52        ad 	if (p->p_emul->e_lwp_exit)
   1060       1.52        ad 		(*p->p_emul->e_lwp_exit)(l);
   1061        1.2   thorpej 
   1062      1.131        ad 	/* Drop filedesc reference. */
   1063      1.131        ad 	fd_free();
   1064      1.131        ad 
   1065       1.45   thorpej 	/* Delete the specificdata while it's still safe to sleep. */
   1066      1.145     pooka 	lwp_finispecific(l);
   1067       1.45   thorpej 
   1068       1.52        ad 	/*
   1069       1.52        ad 	 * Release our cached credentials.
   1070       1.52        ad 	 */
   1071       1.37        ad 	kauth_cred_free(l->l_cred);
   1072       1.70        ad 	callout_destroy(&l->l_timeout_ch);
   1073       1.65        ad 
   1074       1.65        ad 	/*
   1075       1.52        ad 	 * Remove the LWP from the global list.
   1076      1.151       chs 	 * Free its LID from the PID namespace if needed.
   1077       1.52        ad 	 */
   1078      1.102        ad 	mutex_enter(proc_lock);
   1079       1.52        ad 	LIST_REMOVE(l, l_list);
   1080      1.151       chs 	if ((l->l_pflag & LP_PIDLID) != 0 && l->l_lid != p->p_pid) {
   1081      1.151       chs 		proc_free_pid(l->l_lid);
   1082      1.151       chs 	}
   1083      1.102        ad 	mutex_exit(proc_lock);
   1084       1.19  jdolecek 
   1085       1.52        ad 	/*
   1086       1.52        ad 	 * Get rid of all references to the LWP that others (e.g. procfs)
   1087       1.52        ad 	 * may have, and mark the LWP as a zombie.  If the LWP is detached,
   1088       1.52        ad 	 * mark it waiting for collection in the proc structure.  Note that
   1089       1.52        ad 	 * before we can do that, we need to free any other dead, deatched
   1090       1.52        ad 	 * LWP waiting to meet its maker.
   1091       1.52        ad 	 */
   1092      1.103        ad 	mutex_enter(p->p_lock);
   1093       1.52        ad 	lwp_drainrefs(l);
   1094       1.31      yamt 
   1095       1.52        ad 	if ((l->l_prflag & LPR_DETACHED) != 0) {
   1096       1.52        ad 		while ((l2 = p->p_zomblwp) != NULL) {
   1097       1.52        ad 			p->p_zomblwp = NULL;
   1098       1.63        ad 			lwp_free(l2, false, false);/* releases proc mutex */
   1099      1.103        ad 			mutex_enter(p->p_lock);
   1100       1.72        ad 			l->l_refcnt++;
   1101       1.72        ad 			lwp_drainrefs(l);
   1102       1.52        ad 		}
   1103       1.52        ad 		p->p_zomblwp = l;
   1104       1.52        ad 	}
   1105       1.31      yamt 
   1106       1.52        ad 	/*
   1107       1.52        ad 	 * If we find a pending signal for the process and we have been
   1108      1.151       chs 	 * asked to check for signals, then we lose: arrange to have
   1109       1.52        ad 	 * all other LWPs in the process check for signals.
   1110       1.52        ad 	 */
   1111       1.56     pavel 	if ((l->l_flag & LW_PENDSIG) != 0 &&
   1112       1.52        ad 	    firstsig(&p->p_sigpend.sp_set) != 0) {
   1113       1.52        ad 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
   1114       1.52        ad 			lwp_lock(l2);
   1115       1.56     pavel 			l2->l_flag |= LW_PENDSIG;
   1116       1.52        ad 			lwp_unlock(l2);
   1117       1.52        ad 		}
   1118       1.31      yamt 	}
   1119       1.31      yamt 
   1120      1.158      matt 	/*
   1121      1.158      matt 	 * Release any PCU resources before becoming a zombie.
   1122      1.158      matt 	 */
   1123      1.158      matt 	pcu_discard_all(l);
   1124      1.158      matt 
   1125       1.52        ad 	lwp_lock(l);
   1126       1.52        ad 	l->l_stat = LSZOMB;
   1127      1.162     rmind 	if (l->l_name != NULL) {
   1128       1.90        ad 		strcpy(l->l_name, "(zombie)");
   1129      1.128     rmind 	}
   1130       1.52        ad 	lwp_unlock(l);
   1131        1.2   thorpej 	p->p_nrlwps--;
   1132       1.52        ad 	cv_broadcast(&p->p_lwpcv);
   1133       1.78        ad 	if (l->l_lwpctl != NULL)
   1134       1.78        ad 		l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
   1135      1.103        ad 	mutex_exit(p->p_lock);
   1136       1.52        ad 
   1137       1.52        ad 	/*
   1138       1.52        ad 	 * We can no longer block.  At this point, lwp_free() may already
   1139       1.52        ad 	 * be gunning for us.  On a multi-CPU system, we may be off p_lwps.
   1140       1.52        ad 	 *
   1141       1.52        ad 	 * Free MD LWP resources.
   1142       1.52        ad 	 */
   1143       1.52        ad 	cpu_lwp_free(l, 0);
   1144        1.2   thorpej 
   1145       1.65        ad 	if (current) {
   1146       1.65        ad 		pmap_deactivate(l);
   1147       1.65        ad 
   1148       1.65        ad 		/*
   1149       1.65        ad 		 * Release the kernel lock, and switch away into
   1150       1.65        ad 		 * oblivion.
   1151       1.65        ad 		 */
   1152       1.52        ad #ifdef notyet
   1153       1.65        ad 		/* XXXSMP hold in lwp_userret() */
   1154       1.65        ad 		KERNEL_UNLOCK_LAST(l);
   1155       1.52        ad #else
   1156       1.65        ad 		KERNEL_UNLOCK_ALL(l, NULL);
   1157       1.52        ad #endif
   1158       1.65        ad 		lwp_exit_switchaway(l);
   1159       1.65        ad 	}
   1160        1.2   thorpej }
   1161        1.2   thorpej 
   1162       1.52        ad /*
   1163       1.52        ad  * Free a dead LWP's remaining resources.
   1164       1.52        ad  *
   1165       1.52        ad  * XXXLWP limits.
   1166       1.52        ad  */
   1167       1.52        ad void
   1168       1.63        ad lwp_free(struct lwp *l, bool recycle, bool last)
   1169       1.52        ad {
   1170       1.52        ad 	struct proc *p = l->l_proc;
   1171      1.100        ad 	struct rusage *ru;
   1172       1.52        ad 	ksiginfoq_t kq;
   1173       1.52        ad 
   1174       1.92      yamt 	KASSERT(l != curlwp);
   1175      1.160      yamt 	KASSERT(last || mutex_owned(p->p_lock));
   1176       1.92      yamt 
   1177  1.164.2.2      yamt 	if (p != &proc0 && p->p_nlwps != 1)
   1178  1.164.2.2      yamt 		(void)chglwpcnt(kauth_cred_getuid(l->l_cred), -1);
   1179       1.52        ad 	/*
   1180       1.52        ad 	 * If this was not the last LWP in the process, then adjust
   1181       1.52        ad 	 * counters and unlock.
   1182       1.52        ad 	 */
   1183       1.52        ad 	if (!last) {
   1184       1.52        ad 		/*
   1185       1.52        ad 		 * Add the LWP's run time to the process' base value.
   1186       1.52        ad 		 * This needs to co-incide with coming off p_lwps.
   1187       1.52        ad 		 */
   1188       1.86      yamt 		bintime_add(&p->p_rtime, &l->l_rtime);
   1189       1.64      yamt 		p->p_pctcpu += l->l_pctcpu;
   1190      1.100        ad 		ru = &p->p_stats->p_ru;
   1191      1.100        ad 		ruadd(ru, &l->l_ru);
   1192      1.100        ad 		ru->ru_nvcsw += (l->l_ncsw - l->l_nivcsw);
   1193      1.100        ad 		ru->ru_nivcsw += l->l_nivcsw;
   1194       1.52        ad 		LIST_REMOVE(l, l_sibling);
   1195       1.52        ad 		p->p_nlwps--;
   1196       1.52        ad 		p->p_nzlwps--;
   1197       1.52        ad 		if ((l->l_prflag & LPR_DETACHED) != 0)
   1198       1.52        ad 			p->p_ndlwps--;
   1199       1.63        ad 
   1200       1.63        ad 		/*
   1201       1.63        ad 		 * Have any LWPs sleeping in lwp_wait() recheck for
   1202       1.63        ad 		 * deadlock.
   1203       1.63        ad 		 */
   1204       1.63        ad 		cv_broadcast(&p->p_lwpcv);
   1205      1.103        ad 		mutex_exit(p->p_lock);
   1206       1.63        ad 	}
   1207       1.52        ad 
   1208       1.52        ad #ifdef MULTIPROCESSOR
   1209       1.63        ad 	/*
   1210       1.63        ad 	 * In the unlikely event that the LWP is still on the CPU,
   1211       1.63        ad 	 * then spin until it has switched away.  We need to release
   1212       1.63        ad 	 * all locks to avoid deadlock against interrupt handlers on
   1213       1.63        ad 	 * the target CPU.
   1214       1.63        ad 	 */
   1215      1.115        ad 	if ((l->l_pflag & LP_RUNNING) != 0 || l->l_cpu->ci_curlwp == l) {
   1216       1.63        ad 		int count;
   1217       1.64      yamt 		(void)count; /* XXXgcc */
   1218       1.63        ad 		KERNEL_UNLOCK_ALL(curlwp, &count);
   1219      1.115        ad 		while ((l->l_pflag & LP_RUNNING) != 0 ||
   1220       1.64      yamt 		    l->l_cpu->ci_curlwp == l)
   1221       1.63        ad 			SPINLOCK_BACKOFF_HOOK;
   1222       1.63        ad 		KERNEL_LOCK(count, curlwp);
   1223       1.63        ad 	}
   1224       1.52        ad #endif
   1225       1.52        ad 
   1226       1.52        ad 	/*
   1227       1.52        ad 	 * Destroy the LWP's remaining signal information.
   1228       1.52        ad 	 */
   1229       1.52        ad 	ksiginfo_queue_init(&kq);
   1230       1.52        ad 	sigclear(&l->l_sigpend, NULL, &kq);
   1231       1.52        ad 	ksiginfo_queue_drain(&kq);
   1232       1.52        ad 	cv_destroy(&l->l_sigcv);
   1233  1.164.2.2      yamt 	cv_destroy(&l->l_waitcv);
   1234        1.2   thorpej 
   1235       1.19  jdolecek 	/*
   1236      1.162     rmind 	 * Free lwpctl structure and affinity.
   1237      1.162     rmind 	 */
   1238      1.162     rmind 	if (l->l_lwpctl) {
   1239      1.162     rmind 		lwp_ctl_free(l);
   1240      1.162     rmind 	}
   1241      1.162     rmind 	if (l->l_affinity) {
   1242      1.162     rmind 		kcpuset_unuse(l->l_affinity, NULL);
   1243      1.162     rmind 		l->l_affinity = NULL;
   1244      1.162     rmind 	}
   1245      1.162     rmind 
   1246      1.162     rmind 	/*
   1247       1.52        ad 	 * Free the LWP's turnstile and the LWP structure itself unless the
   1248       1.93      yamt 	 * caller wants to recycle them.  Also, free the scheduler specific
   1249       1.93      yamt 	 * data.
   1250       1.52        ad 	 *
   1251       1.52        ad 	 * We can't return turnstile0 to the pool (it didn't come from it),
   1252       1.52        ad 	 * so if it comes up just drop it quietly and move on.
   1253       1.52        ad 	 *
   1254       1.52        ad 	 * We don't recycle the VM resources at this time.
   1255       1.19  jdolecek 	 */
   1256       1.64      yamt 
   1257       1.52        ad 	if (!recycle && l->l_ts != &turnstile0)
   1258       1.76        ad 		pool_cache_put(turnstile_cache, l->l_ts);
   1259       1.90        ad 	if (l->l_name != NULL)
   1260       1.90        ad 		kmem_free(l->l_name, MAXCOMLEN);
   1261      1.135     rmind 
   1262       1.52        ad 	cpu_lwp_free2(l);
   1263       1.19  jdolecek 	uvm_lwp_exit(l);
   1264      1.134     rmind 
   1265       1.60      yamt 	KASSERT(SLIST_EMPTY(&l->l_pi_lenders));
   1266       1.75        ad 	KASSERT(l->l_inheritedprio == -1);
   1267      1.155      matt 	KASSERT(l->l_blcnt == 0);
   1268      1.138    darran 	kdtrace_thread_dtor(NULL, l);
   1269       1.52        ad 	if (!recycle)
   1270       1.87        ad 		pool_cache_put(lwp_cache, l);
   1271        1.2   thorpej }
   1272        1.2   thorpej 
   1273        1.2   thorpej /*
   1274       1.91     rmind  * Migrate the LWP to the another CPU.  Unlocks the LWP.
   1275       1.91     rmind  */
   1276       1.91     rmind void
   1277      1.114     rmind lwp_migrate(lwp_t *l, struct cpu_info *tci)
   1278       1.91     rmind {
   1279      1.114     rmind 	struct schedstate_percpu *tspc;
   1280      1.121     rmind 	int lstat = l->l_stat;
   1281      1.121     rmind 
   1282       1.91     rmind 	KASSERT(lwp_locked(l, NULL));
   1283      1.114     rmind 	KASSERT(tci != NULL);
   1284      1.114     rmind 
   1285      1.121     rmind 	/* If LWP is still on the CPU, it must be handled like LSONPROC */
   1286      1.121     rmind 	if ((l->l_pflag & LP_RUNNING) != 0) {
   1287      1.121     rmind 		lstat = LSONPROC;
   1288      1.121     rmind 	}
   1289      1.121     rmind 
   1290      1.114     rmind 	/*
   1291      1.114     rmind 	 * The destination CPU could be changed while previous migration
   1292      1.114     rmind 	 * was not finished.
   1293      1.114     rmind 	 */
   1294      1.121     rmind 	if (l->l_target_cpu != NULL) {
   1295      1.114     rmind 		l->l_target_cpu = tci;
   1296      1.114     rmind 		lwp_unlock(l);
   1297      1.114     rmind 		return;
   1298      1.114     rmind 	}
   1299       1.91     rmind 
   1300      1.114     rmind 	/* Nothing to do if trying to migrate to the same CPU */
   1301      1.114     rmind 	if (l->l_cpu == tci) {
   1302       1.91     rmind 		lwp_unlock(l);
   1303       1.91     rmind 		return;
   1304       1.91     rmind 	}
   1305       1.91     rmind 
   1306      1.114     rmind 	KASSERT(l->l_target_cpu == NULL);
   1307      1.114     rmind 	tspc = &tci->ci_schedstate;
   1308      1.121     rmind 	switch (lstat) {
   1309       1.91     rmind 	case LSRUN:
   1310      1.134     rmind 		l->l_target_cpu = tci;
   1311      1.134     rmind 		break;
   1312       1.91     rmind 	case LSIDL:
   1313      1.114     rmind 		l->l_cpu = tci;
   1314      1.114     rmind 		lwp_unlock_to(l, tspc->spc_mutex);
   1315       1.91     rmind 		return;
   1316       1.91     rmind 	case LSSLEEP:
   1317      1.114     rmind 		l->l_cpu = tci;
   1318       1.91     rmind 		break;
   1319       1.91     rmind 	case LSSTOP:
   1320       1.91     rmind 	case LSSUSPENDED:
   1321      1.114     rmind 		l->l_cpu = tci;
   1322      1.114     rmind 		if (l->l_wchan == NULL) {
   1323      1.114     rmind 			lwp_unlock_to(l, tspc->spc_lwplock);
   1324      1.114     rmind 			return;
   1325       1.91     rmind 		}
   1326      1.114     rmind 		break;
   1327       1.91     rmind 	case LSONPROC:
   1328      1.114     rmind 		l->l_target_cpu = tci;
   1329      1.114     rmind 		spc_lock(l->l_cpu);
   1330      1.114     rmind 		cpu_need_resched(l->l_cpu, RESCHED_KPREEMPT);
   1331      1.114     rmind 		spc_unlock(l->l_cpu);
   1332       1.91     rmind 		break;
   1333       1.91     rmind 	}
   1334       1.91     rmind 	lwp_unlock(l);
   1335       1.91     rmind }
   1336       1.91     rmind 
   1337       1.91     rmind /*
   1338       1.94     rmind  * Find the LWP in the process.  Arguments may be zero, in such case,
   1339       1.94     rmind  * the calling process and first LWP in the list will be used.
   1340      1.103        ad  * On success - returns proc locked.
   1341       1.91     rmind  */
   1342       1.91     rmind struct lwp *
   1343       1.91     rmind lwp_find2(pid_t pid, lwpid_t lid)
   1344       1.91     rmind {
   1345       1.91     rmind 	proc_t *p;
   1346       1.91     rmind 	lwp_t *l;
   1347       1.91     rmind 
   1348      1.150     rmind 	/* Find the process. */
   1349       1.94     rmind 	if (pid != 0) {
   1350      1.150     rmind 		mutex_enter(proc_lock);
   1351      1.150     rmind 		p = proc_find(pid);
   1352      1.150     rmind 		if (p == NULL) {
   1353      1.150     rmind 			mutex_exit(proc_lock);
   1354      1.150     rmind 			return NULL;
   1355      1.150     rmind 		}
   1356      1.150     rmind 		mutex_enter(p->p_lock);
   1357      1.102        ad 		mutex_exit(proc_lock);
   1358      1.150     rmind 	} else {
   1359      1.150     rmind 		p = curlwp->l_proc;
   1360      1.150     rmind 		mutex_enter(p->p_lock);
   1361      1.150     rmind 	}
   1362      1.150     rmind 	/* Find the thread. */
   1363      1.150     rmind 	if (lid != 0) {
   1364      1.150     rmind 		l = lwp_find(p, lid);
   1365      1.150     rmind 	} else {
   1366      1.150     rmind 		l = LIST_FIRST(&p->p_lwps);
   1367       1.94     rmind 	}
   1368      1.103        ad 	if (l == NULL) {
   1369      1.103        ad 		mutex_exit(p->p_lock);
   1370      1.103        ad 	}
   1371       1.91     rmind 	return l;
   1372       1.91     rmind }
   1373       1.91     rmind 
   1374       1.91     rmind /*
   1375  1.164.2.1      yamt  * Look up a live LWP within the specified process.
   1376       1.52        ad  *
   1377      1.103        ad  * Must be called with p->p_lock held.
   1378       1.52        ad  */
   1379       1.52        ad struct lwp *
   1380      1.151       chs lwp_find(struct proc *p, lwpid_t id)
   1381       1.52        ad {
   1382       1.52        ad 	struct lwp *l;
   1383       1.52        ad 
   1384      1.103        ad 	KASSERT(mutex_owned(p->p_lock));
   1385       1.52        ad 
   1386       1.52        ad 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1387       1.52        ad 		if (l->l_lid == id)
   1388       1.52        ad 			break;
   1389       1.52        ad 	}
   1390       1.52        ad 
   1391       1.52        ad 	/*
   1392       1.52        ad 	 * No need to lock - all of these conditions will
   1393       1.52        ad 	 * be visible with the process level mutex held.
   1394       1.52        ad 	 */
   1395       1.52        ad 	if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB))
   1396       1.52        ad 		l = NULL;
   1397       1.52        ad 
   1398       1.52        ad 	return l;
   1399       1.52        ad }
   1400       1.52        ad 
   1401       1.52        ad /*
   1402       1.37        ad  * Update an LWP's cached credentials to mirror the process' master copy.
   1403       1.37        ad  *
   1404       1.37        ad  * This happens early in the syscall path, on user trap, and on LWP
   1405       1.37        ad  * creation.  A long-running LWP can also voluntarily choose to update
   1406       1.37        ad  * it's credentials by calling this routine.  This may be called from
   1407       1.37        ad  * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
   1408       1.37        ad  */
   1409       1.37        ad void
   1410       1.37        ad lwp_update_creds(struct lwp *l)
   1411       1.37        ad {
   1412       1.37        ad 	kauth_cred_t oc;
   1413       1.37        ad 	struct proc *p;
   1414       1.37        ad 
   1415       1.37        ad 	p = l->l_proc;
   1416       1.37        ad 	oc = l->l_cred;
   1417       1.37        ad 
   1418      1.103        ad 	mutex_enter(p->p_lock);
   1419       1.37        ad 	kauth_cred_hold(p->p_cred);
   1420       1.37        ad 	l->l_cred = p->p_cred;
   1421       1.98        ad 	l->l_prflag &= ~LPR_CRMOD;
   1422      1.103        ad 	mutex_exit(p->p_lock);
   1423       1.88        ad 	if (oc != NULL)
   1424       1.37        ad 		kauth_cred_free(oc);
   1425       1.52        ad }
   1426       1.52        ad 
   1427       1.52        ad /*
   1428       1.52        ad  * Verify that an LWP is locked, and optionally verify that the lock matches
   1429       1.52        ad  * one we specify.
   1430       1.52        ad  */
   1431       1.52        ad int
   1432       1.52        ad lwp_locked(struct lwp *l, kmutex_t *mtx)
   1433       1.52        ad {
   1434       1.52        ad 	kmutex_t *cur = l->l_mutex;
   1435       1.52        ad 
   1436       1.52        ad 	return mutex_owned(cur) && (mtx == cur || mtx == NULL);
   1437       1.52        ad }
   1438       1.52        ad 
   1439       1.52        ad /*
   1440       1.52        ad  * Lend a new mutex to an LWP.  The old mutex must be held.
   1441       1.52        ad  */
   1442       1.52        ad void
   1443       1.52        ad lwp_setlock(struct lwp *l, kmutex_t *new)
   1444       1.52        ad {
   1445       1.52        ad 
   1446       1.63        ad 	KASSERT(mutex_owned(l->l_mutex));
   1447       1.52        ad 
   1448      1.107        ad 	membar_exit();
   1449       1.52        ad 	l->l_mutex = new;
   1450       1.52        ad }
   1451       1.52        ad 
   1452       1.52        ad /*
   1453       1.52        ad  * Lend a new mutex to an LWP, and release the old mutex.  The old mutex
   1454       1.52        ad  * must be held.
   1455       1.52        ad  */
   1456       1.52        ad void
   1457       1.52        ad lwp_unlock_to(struct lwp *l, kmutex_t *new)
   1458       1.52        ad {
   1459       1.52        ad 	kmutex_t *old;
   1460       1.52        ad 
   1461      1.152     rmind 	KASSERT(lwp_locked(l, NULL));
   1462       1.52        ad 
   1463       1.52        ad 	old = l->l_mutex;
   1464      1.107        ad 	membar_exit();
   1465       1.52        ad 	l->l_mutex = new;
   1466       1.52        ad 	mutex_spin_exit(old);
   1467       1.52        ad }
   1468       1.52        ad 
   1469       1.60      yamt int
   1470       1.60      yamt lwp_trylock(struct lwp *l)
   1471       1.60      yamt {
   1472       1.60      yamt 	kmutex_t *old;
   1473       1.60      yamt 
   1474       1.60      yamt 	for (;;) {
   1475       1.60      yamt 		if (!mutex_tryenter(old = l->l_mutex))
   1476       1.60      yamt 			return 0;
   1477       1.60      yamt 		if (__predict_true(l->l_mutex == old))
   1478       1.60      yamt 			return 1;
   1479       1.60      yamt 		mutex_spin_exit(old);
   1480       1.60      yamt 	}
   1481       1.60      yamt }
   1482       1.60      yamt 
   1483      1.134     rmind void
   1484       1.96        ad lwp_unsleep(lwp_t *l, bool cleanup)
   1485       1.96        ad {
   1486       1.96        ad 
   1487       1.96        ad 	KASSERT(mutex_owned(l->l_mutex));
   1488      1.134     rmind 	(*l->l_syncobj->sobj_unsleep)(l, cleanup);
   1489       1.96        ad }
   1490       1.96        ad 
   1491       1.52        ad /*
   1492       1.56     pavel  * Handle exceptions for mi_userret().  Called if a member of LW_USERRET is
   1493       1.52        ad  * set.
   1494       1.52        ad  */
   1495       1.52        ad void
   1496       1.52        ad lwp_userret(struct lwp *l)
   1497       1.52        ad {
   1498       1.52        ad 	struct proc *p;
   1499       1.52        ad 	int sig;
   1500       1.52        ad 
   1501      1.114     rmind 	KASSERT(l == curlwp);
   1502      1.114     rmind 	KASSERT(l->l_stat == LSONPROC);
   1503       1.52        ad 	p = l->l_proc;
   1504       1.52        ad 
   1505       1.75        ad #ifndef __HAVE_FAST_SOFTINTS
   1506       1.75        ad 	/* Run pending soft interrupts. */
   1507       1.75        ad 	if (l->l_cpu->ci_data.cpu_softints != 0)
   1508       1.75        ad 		softint_overlay();
   1509       1.75        ad #endif
   1510       1.75        ad 
   1511       1.52        ad 	/*
   1512  1.164.2.1      yamt 	 * It is safe to do this read unlocked on a MP system..
   1513       1.52        ad 	 */
   1514  1.164.2.1      yamt 	while ((l->l_flag & LW_USERRET) != 0) {
   1515       1.52        ad 		/*
   1516       1.52        ad 		 * Process pending signals first, unless the process
   1517       1.61        ad 		 * is dumping core or exiting, where we will instead
   1518      1.101     rmind 		 * enter the LW_WSUSPEND case below.
   1519       1.52        ad 		 */
   1520       1.61        ad 		if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) ==
   1521       1.61        ad 		    LW_PENDSIG) {
   1522      1.103        ad 			mutex_enter(p->p_lock);
   1523       1.52        ad 			while ((sig = issignal(l)) != 0)
   1524       1.52        ad 				postsig(sig);
   1525      1.103        ad 			mutex_exit(p->p_lock);
   1526       1.52        ad 		}
   1527       1.52        ad 
   1528       1.52        ad 		/*
   1529       1.52        ad 		 * Core-dump or suspend pending.
   1530       1.52        ad 		 *
   1531      1.159      matt 		 * In case of core dump, suspend ourselves, so that the kernel
   1532      1.159      matt 		 * stack and therefore the userland registers saved in the
   1533      1.159      matt 		 * trapframe are around for coredump() to write them out.
   1534      1.159      matt 		 * We also need to save any PCU resources that we have so that
   1535      1.159      matt 		 * they accessible for coredump().  We issue a wakeup on
   1536      1.159      matt 		 * p->p_lwpcv so that sigexit() will write the core file out
   1537      1.159      matt 		 * once all other LWPs are suspended.
   1538       1.52        ad 		 */
   1539       1.56     pavel 		if ((l->l_flag & LW_WSUSPEND) != 0) {
   1540      1.159      matt 			pcu_save_all(l);
   1541      1.103        ad 			mutex_enter(p->p_lock);
   1542       1.52        ad 			p->p_nrlwps--;
   1543       1.52        ad 			cv_broadcast(&p->p_lwpcv);
   1544       1.52        ad 			lwp_lock(l);
   1545       1.52        ad 			l->l_stat = LSSUSPENDED;
   1546      1.104        ad 			lwp_unlock(l);
   1547      1.103        ad 			mutex_exit(p->p_lock);
   1548      1.104        ad 			lwp_lock(l);
   1549       1.64      yamt 			mi_switch(l);
   1550       1.52        ad 		}
   1551       1.52        ad 
   1552       1.52        ad 		/* Process is exiting. */
   1553       1.56     pavel 		if ((l->l_flag & LW_WEXIT) != 0) {
   1554       1.52        ad 			lwp_exit(l);
   1555       1.52        ad 			KASSERT(0);
   1556       1.52        ad 			/* NOTREACHED */
   1557       1.52        ad 		}
   1558      1.156     pooka 
   1559      1.156     pooka 		/* update lwpctl processor (for vfork child_return) */
   1560      1.156     pooka 		if (l->l_flag & LW_LWPCTL) {
   1561      1.156     pooka 			lwp_lock(l);
   1562      1.156     pooka 			KASSERT(kpreempt_disabled());
   1563      1.156     pooka 			l->l_lwpctl->lc_curcpu = (int)cpu_index(l->l_cpu);
   1564      1.156     pooka 			l->l_lwpctl->lc_pctr++;
   1565      1.156     pooka 			l->l_flag &= ~LW_LWPCTL;
   1566      1.156     pooka 			lwp_unlock(l);
   1567      1.156     pooka 		}
   1568       1.52        ad 	}
   1569       1.52        ad }
   1570       1.52        ad 
   1571       1.52        ad /*
   1572       1.52        ad  * Force an LWP to enter the kernel, to take a trip through lwp_userret().
   1573       1.52        ad  */
   1574       1.52        ad void
   1575       1.52        ad lwp_need_userret(struct lwp *l)
   1576       1.52        ad {
   1577       1.63        ad 	KASSERT(lwp_locked(l, NULL));
   1578       1.52        ad 
   1579       1.52        ad 	/*
   1580       1.52        ad 	 * Since the tests in lwp_userret() are done unlocked, make sure
   1581       1.52        ad 	 * that the condition will be seen before forcing the LWP to enter
   1582       1.52        ad 	 * kernel mode.
   1583       1.52        ad 	 */
   1584       1.81        ad 	membar_producer();
   1585       1.52        ad 	cpu_signotify(l);
   1586       1.52        ad }
   1587       1.52        ad 
   1588       1.52        ad /*
   1589       1.52        ad  * Add one reference to an LWP.  This will prevent the LWP from
   1590       1.52        ad  * exiting, thus keep the lwp structure and PCB around to inspect.
   1591       1.52        ad  */
   1592       1.52        ad void
   1593       1.52        ad lwp_addref(struct lwp *l)
   1594       1.52        ad {
   1595       1.52        ad 
   1596      1.103        ad 	KASSERT(mutex_owned(l->l_proc->p_lock));
   1597       1.52        ad 	KASSERT(l->l_stat != LSZOMB);
   1598       1.52        ad 	KASSERT(l->l_refcnt != 0);
   1599       1.52        ad 
   1600       1.52        ad 	l->l_refcnt++;
   1601       1.52        ad }
   1602       1.52        ad 
   1603       1.52        ad /*
   1604       1.52        ad  * Remove one reference to an LWP.  If this is the last reference,
   1605       1.52        ad  * then we must finalize the LWP's death.
   1606       1.52        ad  */
   1607       1.52        ad void
   1608       1.52        ad lwp_delref(struct lwp *l)
   1609       1.52        ad {
   1610       1.52        ad 	struct proc *p = l->l_proc;
   1611       1.52        ad 
   1612      1.103        ad 	mutex_enter(p->p_lock);
   1613      1.142  christos 	lwp_delref2(l);
   1614      1.142  christos 	mutex_exit(p->p_lock);
   1615      1.142  christos }
   1616      1.142  christos 
   1617      1.142  christos /*
   1618      1.142  christos  * Remove one reference to an LWP.  If this is the last reference,
   1619      1.142  christos  * then we must finalize the LWP's death.  The proc mutex is held
   1620      1.142  christos  * on entry.
   1621      1.142  christos  */
   1622      1.142  christos void
   1623      1.142  christos lwp_delref2(struct lwp *l)
   1624      1.142  christos {
   1625      1.142  christos 	struct proc *p = l->l_proc;
   1626      1.142  christos 
   1627      1.142  christos 	KASSERT(mutex_owned(p->p_lock));
   1628       1.72        ad 	KASSERT(l->l_stat != LSZOMB);
   1629       1.72        ad 	KASSERT(l->l_refcnt > 0);
   1630       1.52        ad 	if (--l->l_refcnt == 0)
   1631       1.76        ad 		cv_broadcast(&p->p_lwpcv);
   1632       1.52        ad }
   1633       1.52        ad 
   1634       1.52        ad /*
   1635       1.52        ad  * Drain all references to the current LWP.
   1636       1.52        ad  */
   1637       1.52        ad void
   1638       1.52        ad lwp_drainrefs(struct lwp *l)
   1639       1.52        ad {
   1640       1.52        ad 	struct proc *p = l->l_proc;
   1641       1.52        ad 
   1642      1.103        ad 	KASSERT(mutex_owned(p->p_lock));
   1643       1.52        ad 	KASSERT(l->l_refcnt != 0);
   1644       1.52        ad 
   1645       1.52        ad 	l->l_refcnt--;
   1646       1.52        ad 	while (l->l_refcnt != 0)
   1647      1.103        ad 		cv_wait(&p->p_lwpcv, p->p_lock);
   1648       1.37        ad }
   1649       1.41   thorpej 
   1650       1.41   thorpej /*
   1651      1.127        ad  * Return true if the specified LWP is 'alive'.  Only p->p_lock need
   1652      1.127        ad  * be held.
   1653      1.127        ad  */
   1654      1.127        ad bool
   1655      1.127        ad lwp_alive(lwp_t *l)
   1656      1.127        ad {
   1657      1.127        ad 
   1658      1.127        ad 	KASSERT(mutex_owned(l->l_proc->p_lock));
   1659      1.127        ad 
   1660      1.127        ad 	switch (l->l_stat) {
   1661      1.127        ad 	case LSSLEEP:
   1662      1.127        ad 	case LSRUN:
   1663      1.127        ad 	case LSONPROC:
   1664      1.127        ad 	case LSSTOP:
   1665      1.127        ad 	case LSSUSPENDED:
   1666      1.127        ad 		return true;
   1667      1.127        ad 	default:
   1668      1.127        ad 		return false;
   1669      1.127        ad 	}
   1670      1.127        ad }
   1671      1.127        ad 
   1672      1.127        ad /*
   1673      1.127        ad  * Return first live LWP in the process.
   1674      1.127        ad  */
   1675      1.127        ad lwp_t *
   1676      1.127        ad lwp_find_first(proc_t *p)
   1677      1.127        ad {
   1678      1.127        ad 	lwp_t *l;
   1679      1.127        ad 
   1680      1.127        ad 	KASSERT(mutex_owned(p->p_lock));
   1681      1.127        ad 
   1682      1.127        ad 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1683      1.127        ad 		if (lwp_alive(l)) {
   1684      1.127        ad 			return l;
   1685      1.127        ad 		}
   1686      1.127        ad 	}
   1687      1.127        ad 
   1688      1.127        ad 	return NULL;
   1689      1.127        ad }
   1690      1.127        ad 
   1691      1.127        ad /*
   1692       1.78        ad  * Allocate a new lwpctl structure for a user LWP.
   1693       1.78        ad  */
   1694       1.78        ad int
   1695       1.78        ad lwp_ctl_alloc(vaddr_t *uaddr)
   1696       1.78        ad {
   1697       1.78        ad 	lcproc_t *lp;
   1698       1.78        ad 	u_int bit, i, offset;
   1699       1.78        ad 	struct uvm_object *uao;
   1700       1.78        ad 	int error;
   1701       1.78        ad 	lcpage_t *lcp;
   1702       1.78        ad 	proc_t *p;
   1703       1.78        ad 	lwp_t *l;
   1704       1.78        ad 
   1705       1.78        ad 	l = curlwp;
   1706       1.78        ad 	p = l->l_proc;
   1707       1.78        ad 
   1708      1.156     pooka 	/* don't allow a vforked process to create lwp ctls */
   1709      1.156     pooka 	if (p->p_lflag & PL_PPWAIT)
   1710      1.156     pooka 		return EBUSY;
   1711      1.156     pooka 
   1712       1.81        ad 	if (l->l_lcpage != NULL) {
   1713       1.81        ad 		lcp = l->l_lcpage;
   1714       1.81        ad 		*uaddr = lcp->lcp_uaddr + (vaddr_t)l->l_lwpctl - lcp->lcp_kaddr;
   1715      1.143     njoly 		return 0;
   1716       1.81        ad 	}
   1717       1.78        ad 
   1718       1.78        ad 	/* First time around, allocate header structure for the process. */
   1719       1.78        ad 	if ((lp = p->p_lwpctl) == NULL) {
   1720       1.78        ad 		lp = kmem_alloc(sizeof(*lp), KM_SLEEP);
   1721       1.78        ad 		mutex_init(&lp->lp_lock, MUTEX_DEFAULT, IPL_NONE);
   1722       1.78        ad 		lp->lp_uao = NULL;
   1723       1.78        ad 		TAILQ_INIT(&lp->lp_pages);
   1724      1.103        ad 		mutex_enter(p->p_lock);
   1725       1.78        ad 		if (p->p_lwpctl == NULL) {
   1726       1.78        ad 			p->p_lwpctl = lp;
   1727      1.103        ad 			mutex_exit(p->p_lock);
   1728       1.78        ad 		} else {
   1729      1.103        ad 			mutex_exit(p->p_lock);
   1730       1.78        ad 			mutex_destroy(&lp->lp_lock);
   1731       1.78        ad 			kmem_free(lp, sizeof(*lp));
   1732       1.78        ad 			lp = p->p_lwpctl;
   1733       1.78        ad 		}
   1734       1.78        ad 	}
   1735       1.78        ad 
   1736       1.78        ad  	/*
   1737       1.78        ad  	 * Set up an anonymous memory region to hold the shared pages.
   1738       1.78        ad  	 * Map them into the process' address space.  The user vmspace
   1739       1.78        ad  	 * gets the first reference on the UAO.
   1740       1.78        ad  	 */
   1741       1.78        ad 	mutex_enter(&lp->lp_lock);
   1742       1.78        ad 	if (lp->lp_uao == NULL) {
   1743       1.78        ad 		lp->lp_uao = uao_create(LWPCTL_UAREA_SZ, 0);
   1744       1.78        ad 		lp->lp_cur = 0;
   1745       1.78        ad 		lp->lp_max = LWPCTL_UAREA_SZ;
   1746       1.78        ad 		lp->lp_uva = p->p_emul->e_vm_default_addr(p,
   1747       1.78        ad 		     (vaddr_t)p->p_vmspace->vm_daddr, LWPCTL_UAREA_SZ);
   1748       1.78        ad 		error = uvm_map(&p->p_vmspace->vm_map, &lp->lp_uva,
   1749       1.78        ad 		    LWPCTL_UAREA_SZ, lp->lp_uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW,
   1750       1.78        ad 		    UVM_PROT_RW, UVM_INH_NONE, UVM_ADV_NORMAL, 0));
   1751       1.78        ad 		if (error != 0) {
   1752       1.78        ad 			uao_detach(lp->lp_uao);
   1753       1.78        ad 			lp->lp_uao = NULL;
   1754       1.78        ad 			mutex_exit(&lp->lp_lock);
   1755       1.78        ad 			return error;
   1756       1.78        ad 		}
   1757       1.78        ad 	}
   1758       1.78        ad 
   1759       1.78        ad 	/* Get a free block and allocate for this LWP. */
   1760       1.78        ad 	TAILQ_FOREACH(lcp, &lp->lp_pages, lcp_chain) {
   1761       1.78        ad 		if (lcp->lcp_nfree != 0)
   1762       1.78        ad 			break;
   1763       1.78        ad 	}
   1764       1.78        ad 	if (lcp == NULL) {
   1765       1.78        ad 		/* Nothing available - try to set up a free page. */
   1766       1.78        ad 		if (lp->lp_cur == lp->lp_max) {
   1767       1.78        ad 			mutex_exit(&lp->lp_lock);
   1768       1.78        ad 			return ENOMEM;
   1769       1.78        ad 		}
   1770       1.78        ad 		lcp = kmem_alloc(LWPCTL_LCPAGE_SZ, KM_SLEEP);
   1771       1.79      yamt 		if (lcp == NULL) {
   1772       1.79      yamt 			mutex_exit(&lp->lp_lock);
   1773       1.78        ad 			return ENOMEM;
   1774       1.79      yamt 		}
   1775       1.78        ad 		/*
   1776       1.78        ad 		 * Wire the next page down in kernel space.  Since this
   1777       1.78        ad 		 * is a new mapping, we must add a reference.
   1778       1.78        ad 		 */
   1779       1.78        ad 		uao = lp->lp_uao;
   1780       1.78        ad 		(*uao->pgops->pgo_reference)(uao);
   1781       1.99        ad 		lcp->lcp_kaddr = vm_map_min(kernel_map);
   1782       1.78        ad 		error = uvm_map(kernel_map, &lcp->lcp_kaddr, PAGE_SIZE,
   1783       1.78        ad 		    uao, lp->lp_cur, PAGE_SIZE,
   1784       1.78        ad 		    UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
   1785       1.78        ad 		    UVM_INH_NONE, UVM_ADV_RANDOM, 0));
   1786       1.78        ad 		if (error != 0) {
   1787       1.78        ad 			mutex_exit(&lp->lp_lock);
   1788       1.78        ad 			kmem_free(lcp, LWPCTL_LCPAGE_SZ);
   1789       1.78        ad 			(*uao->pgops->pgo_detach)(uao);
   1790       1.78        ad 			return error;
   1791       1.78        ad 		}
   1792       1.89      yamt 		error = uvm_map_pageable(kernel_map, lcp->lcp_kaddr,
   1793       1.89      yamt 		    lcp->lcp_kaddr + PAGE_SIZE, FALSE, 0);
   1794       1.89      yamt 		if (error != 0) {
   1795       1.89      yamt 			mutex_exit(&lp->lp_lock);
   1796       1.89      yamt 			uvm_unmap(kernel_map, lcp->lcp_kaddr,
   1797       1.89      yamt 			    lcp->lcp_kaddr + PAGE_SIZE);
   1798       1.89      yamt 			kmem_free(lcp, LWPCTL_LCPAGE_SZ);
   1799       1.89      yamt 			return error;
   1800       1.89      yamt 		}
   1801       1.78        ad 		/* Prepare the page descriptor and link into the list. */
   1802       1.78        ad 		lcp->lcp_uaddr = lp->lp_uva + lp->lp_cur;
   1803       1.78        ad 		lp->lp_cur += PAGE_SIZE;
   1804       1.78        ad 		lcp->lcp_nfree = LWPCTL_PER_PAGE;
   1805       1.78        ad 		lcp->lcp_rotor = 0;
   1806       1.78        ad 		memset(lcp->lcp_bitmap, 0xff, LWPCTL_BITMAP_SZ);
   1807       1.78        ad 		TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
   1808       1.78        ad 	}
   1809       1.78        ad 	for (i = lcp->lcp_rotor; lcp->lcp_bitmap[i] == 0;) {
   1810       1.78        ad 		if (++i >= LWPCTL_BITMAP_ENTRIES)
   1811       1.78        ad 			i = 0;
   1812       1.78        ad 	}
   1813       1.78        ad 	bit = ffs(lcp->lcp_bitmap[i]) - 1;
   1814       1.78        ad 	lcp->lcp_bitmap[i] ^= (1 << bit);
   1815       1.78        ad 	lcp->lcp_rotor = i;
   1816       1.78        ad 	lcp->lcp_nfree--;
   1817       1.78        ad 	l->l_lcpage = lcp;
   1818       1.78        ad 	offset = (i << 5) + bit;
   1819       1.78        ad 	l->l_lwpctl = (lwpctl_t *)lcp->lcp_kaddr + offset;
   1820       1.78        ad 	*uaddr = lcp->lcp_uaddr + offset * sizeof(lwpctl_t);
   1821       1.78        ad 	mutex_exit(&lp->lp_lock);
   1822       1.78        ad 
   1823      1.107        ad 	KPREEMPT_DISABLE(l);
   1824      1.111        ad 	l->l_lwpctl->lc_curcpu = (int)curcpu()->ci_data.cpu_index;
   1825      1.107        ad 	KPREEMPT_ENABLE(l);
   1826       1.78        ad 
   1827       1.78        ad 	return 0;
   1828       1.78        ad }
   1829       1.78        ad 
   1830       1.78        ad /*
   1831       1.78        ad  * Free an lwpctl structure back to the per-process list.
   1832       1.78        ad  */
   1833       1.78        ad void
   1834       1.78        ad lwp_ctl_free(lwp_t *l)
   1835       1.78        ad {
   1836      1.156     pooka 	struct proc *p = l->l_proc;
   1837       1.78        ad 	lcproc_t *lp;
   1838       1.78        ad 	lcpage_t *lcp;
   1839       1.78        ad 	u_int map, offset;
   1840       1.78        ad 
   1841      1.156     pooka 	/* don't free a lwp context we borrowed for vfork */
   1842      1.156     pooka 	if (p->p_lflag & PL_PPWAIT) {
   1843      1.156     pooka 		l->l_lwpctl = NULL;
   1844      1.156     pooka 		return;
   1845      1.156     pooka 	}
   1846      1.156     pooka 
   1847      1.156     pooka 	lp = p->p_lwpctl;
   1848       1.78        ad 	KASSERT(lp != NULL);
   1849       1.78        ad 
   1850       1.78        ad 	lcp = l->l_lcpage;
   1851       1.78        ad 	offset = (u_int)((lwpctl_t *)l->l_lwpctl - (lwpctl_t *)lcp->lcp_kaddr);
   1852       1.78        ad 	KASSERT(offset < LWPCTL_PER_PAGE);
   1853       1.78        ad 
   1854       1.78        ad 	mutex_enter(&lp->lp_lock);
   1855       1.78        ad 	lcp->lcp_nfree++;
   1856       1.78        ad 	map = offset >> 5;
   1857       1.78        ad 	lcp->lcp_bitmap[map] |= (1 << (offset & 31));
   1858       1.78        ad 	if (lcp->lcp_bitmap[lcp->lcp_rotor] == 0)
   1859       1.78        ad 		lcp->lcp_rotor = map;
   1860       1.78        ad 	if (TAILQ_FIRST(&lp->lp_pages)->lcp_nfree == 0) {
   1861       1.78        ad 		TAILQ_REMOVE(&lp->lp_pages, lcp, lcp_chain);
   1862       1.78        ad 		TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
   1863       1.78        ad 	}
   1864       1.78        ad 	mutex_exit(&lp->lp_lock);
   1865       1.78        ad }
   1866       1.78        ad 
   1867       1.78        ad /*
   1868       1.78        ad  * Process is exiting; tear down lwpctl state.  This can only be safely
   1869       1.78        ad  * called by the last LWP in the process.
   1870       1.78        ad  */
   1871       1.78        ad void
   1872       1.78        ad lwp_ctl_exit(void)
   1873       1.78        ad {
   1874       1.78        ad 	lcpage_t *lcp, *next;
   1875       1.78        ad 	lcproc_t *lp;
   1876       1.78        ad 	proc_t *p;
   1877       1.78        ad 	lwp_t *l;
   1878       1.78        ad 
   1879       1.78        ad 	l = curlwp;
   1880       1.78        ad 	l->l_lwpctl = NULL;
   1881       1.95        ad 	l->l_lcpage = NULL;
   1882       1.78        ad 	p = l->l_proc;
   1883       1.78        ad 	lp = p->p_lwpctl;
   1884       1.78        ad 
   1885       1.78        ad 	KASSERT(lp != NULL);
   1886       1.78        ad 	KASSERT(p->p_nlwps == 1);
   1887       1.78        ad 
   1888       1.78        ad 	for (lcp = TAILQ_FIRST(&lp->lp_pages); lcp != NULL; lcp = next) {
   1889       1.78        ad 		next = TAILQ_NEXT(lcp, lcp_chain);
   1890       1.78        ad 		uvm_unmap(kernel_map, lcp->lcp_kaddr,
   1891       1.78        ad 		    lcp->lcp_kaddr + PAGE_SIZE);
   1892       1.78        ad 		kmem_free(lcp, LWPCTL_LCPAGE_SZ);
   1893       1.78        ad 	}
   1894       1.78        ad 
   1895       1.78        ad 	if (lp->lp_uao != NULL) {
   1896       1.78        ad 		uvm_unmap(&p->p_vmspace->vm_map, lp->lp_uva,
   1897       1.78        ad 		    lp->lp_uva + LWPCTL_UAREA_SZ);
   1898       1.78        ad 	}
   1899       1.78        ad 
   1900       1.78        ad 	mutex_destroy(&lp->lp_lock);
   1901       1.78        ad 	kmem_free(lp, sizeof(*lp));
   1902       1.78        ad 	p->p_lwpctl = NULL;
   1903       1.78        ad }
   1904       1.84      yamt 
   1905      1.130        ad /*
   1906      1.130        ad  * Return the current LWP's "preemption counter".  Used to detect
   1907      1.130        ad  * preemption across operations that can tolerate preemption without
   1908      1.130        ad  * crashing, but which may generate incorrect results if preempted.
   1909      1.130        ad  */
   1910      1.130        ad uint64_t
   1911      1.130        ad lwp_pctr(void)
   1912      1.130        ad {
   1913      1.130        ad 
   1914      1.130        ad 	return curlwp->l_ncsw;
   1915      1.130        ad }
   1916      1.130        ad 
   1917      1.151       chs /*
   1918      1.151       chs  * Set an LWP's private data pointer.
   1919      1.151       chs  */
   1920      1.151       chs int
   1921      1.151       chs lwp_setprivate(struct lwp *l, void *ptr)
   1922      1.151       chs {
   1923      1.151       chs 	int error = 0;
   1924      1.151       chs 
   1925      1.151       chs 	l->l_private = ptr;
   1926      1.151       chs #ifdef __HAVE_CPU_LWP_SETPRIVATE
   1927      1.151       chs 	error = cpu_lwp_setprivate(l, ptr);
   1928      1.151       chs #endif
   1929      1.151       chs 	return error;
   1930      1.151       chs }
   1931      1.151       chs 
   1932       1.84      yamt #if defined(DDB)
   1933      1.153     rmind #include <machine/pcb.h>
   1934      1.153     rmind 
   1935       1.84      yamt void
   1936       1.84      yamt lwp_whatis(uintptr_t addr, void (*pr)(const char *, ...))
   1937       1.84      yamt {
   1938       1.84      yamt 	lwp_t *l;
   1939       1.84      yamt 
   1940       1.84      yamt 	LIST_FOREACH(l, &alllwp, l_list) {
   1941       1.84      yamt 		uintptr_t stack = (uintptr_t)KSTACK_LOWEST_ADDR(l);
   1942       1.84      yamt 
   1943       1.84      yamt 		if (addr < stack || stack + KSTACK_SIZE <= addr) {
   1944       1.84      yamt 			continue;
   1945       1.84      yamt 		}
   1946       1.84      yamt 		(*pr)("%p is %p+%zu, LWP %p's stack\n",
   1947       1.84      yamt 		    (void *)addr, (void *)stack,
   1948       1.84      yamt 		    (size_t)(addr - stack), l);
   1949       1.84      yamt 	}
   1950       1.84      yamt }
   1951       1.84      yamt #endif /* defined(DDB) */
   1952