Home | History | Annotate | Line # | Download | only in kern
kern_lwp.c revision 1.249
      1  1.249       mrg /*	$NetBSD: kern_lwp.c,v 1.249 2022/05/07 19:44:40 mrg Exp $	*/
      2    1.2   thorpej 
      3    1.2   thorpej /*-
      4  1.220        ad  * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019, 2020
      5  1.220        ad  *     The NetBSD Foundation, Inc.
      6    1.2   thorpej  * All rights reserved.
      7    1.2   thorpej  *
      8    1.2   thorpej  * This code is derived from software contributed to The NetBSD Foundation
      9   1.52        ad  * by Nathan J. Williams, and Andrew Doran.
     10    1.2   thorpej  *
     11    1.2   thorpej  * Redistribution and use in source and binary forms, with or without
     12    1.2   thorpej  * modification, are permitted provided that the following conditions
     13    1.2   thorpej  * are met:
     14    1.2   thorpej  * 1. Redistributions of source code must retain the above copyright
     15    1.2   thorpej  *    notice, this list of conditions and the following disclaimer.
     16    1.2   thorpej  * 2. Redistributions in binary form must reproduce the above copyright
     17    1.2   thorpej  *    notice, this list of conditions and the following disclaimer in the
     18    1.2   thorpej  *    documentation and/or other materials provided with the distribution.
     19    1.2   thorpej  *
     20    1.2   thorpej  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21    1.2   thorpej  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22    1.2   thorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23    1.2   thorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24    1.2   thorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25    1.2   thorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26    1.2   thorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27    1.2   thorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28    1.2   thorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29    1.2   thorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30    1.2   thorpej  * POSSIBILITY OF SUCH DAMAGE.
     31    1.2   thorpej  */
     32    1.9     lukem 
     33   1.52        ad /*
     34   1.52        ad  * Overview
     35   1.52        ad  *
     36   1.66        ad  *	Lightweight processes (LWPs) are the basic unit or thread of
     37   1.52        ad  *	execution within the kernel.  The core state of an LWP is described
     38   1.66        ad  *	by "struct lwp", also known as lwp_t.
     39   1.52        ad  *
     40   1.52        ad  *	Each LWP is contained within a process (described by "struct proc"),
     41   1.52        ad  *	Every process contains at least one LWP, but may contain more.  The
     42   1.52        ad  *	process describes attributes shared among all of its LWPs such as a
     43   1.52        ad  *	private address space, global execution state (stopped, active,
     44   1.52        ad  *	zombie, ...), signal disposition and so on.  On a multiprocessor
     45   1.66        ad  *	machine, multiple LWPs be executing concurrently in the kernel.
     46   1.52        ad  *
     47   1.52        ad  * Execution states
     48   1.52        ad  *
     49   1.52        ad  *	At any given time, an LWP has overall state that is described by
     50   1.52        ad  *	lwp::l_stat.  The states are broken into two sets below.  The first
     51   1.52        ad  *	set is guaranteed to represent the absolute, current state of the
     52   1.52        ad  *	LWP:
     53  1.101     rmind  *
     54  1.101     rmind  *	LSONPROC
     55  1.101     rmind  *
     56  1.101     rmind  *		On processor: the LWP is executing on a CPU, either in the
     57  1.101     rmind  *		kernel or in user space.
     58  1.101     rmind  *
     59  1.101     rmind  *	LSRUN
     60  1.101     rmind  *
     61  1.101     rmind  *		Runnable: the LWP is parked on a run queue, and may soon be
     62  1.101     rmind  *		chosen to run by an idle processor, or by a processor that
     63  1.101     rmind  *		has been asked to preempt a currently runnning but lower
     64  1.134     rmind  *		priority LWP.
     65  1.101     rmind  *
     66  1.101     rmind  *	LSIDL
     67  1.101     rmind  *
     68  1.238        ad  *		Idle: the LWP has been created but has not yet executed, or
     69  1.238        ad  *		it has ceased executing a unit of work and is waiting to be
     70  1.238        ad  *		started again.  This state exists so that the LWP can occupy
     71  1.238        ad  *		a slot in the process & PID table, but without having to
     72  1.238        ad  *		worry about being touched; lookups of the LWP by ID will
     73  1.238        ad  *		fail while in this state.  The LWP will become visible for
     74  1.238        ad  *		lookup once its state transitions further.  Some special
     75  1.238        ad  *		kernel threads also (ab)use this state to indicate that they
     76  1.238        ad  *		are idle (soft interrupts and idle LWPs).
     77  1.101     rmind  *
     78  1.101     rmind  *	LSSUSPENDED:
     79  1.101     rmind  *
     80  1.101     rmind  *		Suspended: the LWP has had its execution suspended by
     81   1.52        ad  *		another LWP in the same process using the _lwp_suspend()
     82   1.52        ad  *		system call.  User-level LWPs also enter the suspended
     83   1.52        ad  *		state when the system is shutting down.
     84   1.52        ad  *
     85   1.52        ad  *	The second set represent a "statement of intent" on behalf of the
     86   1.52        ad  *	LWP.  The LWP may in fact be executing on a processor, may be
     87   1.66        ad  *	sleeping or idle. It is expected to take the necessary action to
     88  1.101     rmind  *	stop executing or become "running" again within a short timeframe.
     89  1.227        ad  *	The LP_RUNNING flag in lwp::l_pflag indicates that an LWP is running.
     90  1.101     rmind  *	Importantly, it indicates that its state is tied to a CPU.
     91  1.101     rmind  *
     92  1.101     rmind  *	LSZOMB:
     93  1.101     rmind  *
     94  1.101     rmind  *		Dead or dying: the LWP has released most of its resources
     95  1.129        ad  *		and is about to switch away into oblivion, or has already
     96   1.66        ad  *		switched away.  When it switches away, its few remaining
     97   1.66        ad  *		resources can be collected.
     98  1.101     rmind  *
     99  1.101     rmind  *	LSSLEEP:
    100  1.101     rmind  *
    101  1.101     rmind  *		Sleeping: the LWP has entered itself onto a sleep queue, and
    102  1.101     rmind  *		has switched away or will switch away shortly to allow other
    103   1.66        ad  *		LWPs to run on the CPU.
    104  1.101     rmind  *
    105  1.101     rmind  *	LSSTOP:
    106  1.101     rmind  *
    107  1.101     rmind  *		Stopped: the LWP has been stopped as a result of a job
    108  1.101     rmind  *		control signal, or as a result of the ptrace() interface.
    109  1.101     rmind  *
    110  1.101     rmind  *		Stopped LWPs may run briefly within the kernel to handle
    111  1.101     rmind  *		signals that they receive, but will not return to user space
    112  1.101     rmind  *		until their process' state is changed away from stopped.
    113  1.101     rmind  *
    114  1.101     rmind  *		Single LWPs within a process can not be set stopped
    115  1.101     rmind  *		selectively: all actions that can stop or continue LWPs
    116  1.101     rmind  *		occur at the process level.
    117  1.101     rmind  *
    118   1.52        ad  * State transitions
    119   1.52        ad  *
    120   1.66        ad  *	Note that the LSSTOP state may only be set when returning to
    121   1.66        ad  *	user space in userret(), or when sleeping interruptably.  The
    122   1.66        ad  *	LSSUSPENDED state may only be set in userret().  Before setting
    123   1.66        ad  *	those states, we try to ensure that the LWPs will release all
    124   1.66        ad  *	locks that they hold, and at a minimum try to ensure that the
    125   1.66        ad  *	LWP can be set runnable again by a signal.
    126   1.52        ad  *
    127   1.52        ad  *	LWPs may transition states in the following ways:
    128   1.52        ad  *
    129   1.52        ad  *	 RUN -------> ONPROC		ONPROC -----> RUN
    130  1.129        ad  *		    				    > SLEEP
    131  1.129        ad  *		    				    > STOPPED
    132   1.52        ad  *						    > SUSPENDED
    133   1.52        ad  *						    > ZOMB
    134  1.129        ad  *						    > IDL (special cases)
    135   1.52        ad  *
    136   1.52        ad  *	 STOPPED ---> RUN		SUSPENDED --> RUN
    137  1.129        ad  *	            > SLEEP
    138   1.52        ad  *
    139   1.52        ad  *	 SLEEP -----> ONPROC		IDL --------> RUN
    140  1.101     rmind  *		    > RUN			    > SUSPENDED
    141  1.101     rmind  *		    > STOPPED			    > STOPPED
    142  1.129        ad  *						    > ONPROC (special cases)
    143   1.52        ad  *
    144  1.129        ad  *	Some state transitions are only possible with kernel threads (eg
    145  1.129        ad  *	ONPROC -> IDL) and happen under tightly controlled circumstances
    146  1.129        ad  *	free of unwanted side effects.
    147   1.66        ad  *
    148  1.114     rmind  * Migration
    149  1.114     rmind  *
    150  1.114     rmind  *	Migration of threads from one CPU to another could be performed
    151  1.114     rmind  *	internally by the scheduler via sched_takecpu() or sched_catchlwp()
    152  1.114     rmind  *	functions.  The universal lwp_migrate() function should be used for
    153  1.114     rmind  *	any other cases.  Subsystems in the kernel must be aware that CPU
    154  1.114     rmind  *	of LWP may change, while it is not locked.
    155  1.114     rmind  *
    156   1.52        ad  * Locking
    157   1.52        ad  *
    158   1.52        ad  *	The majority of fields in 'struct lwp' are covered by a single,
    159   1.66        ad  *	general spin lock pointed to by lwp::l_mutex.  The locks covering
    160   1.52        ad  *	each field are documented in sys/lwp.h.
    161   1.52        ad  *
    162   1.66        ad  *	State transitions must be made with the LWP's general lock held,
    163  1.152     rmind  *	and may cause the LWP's lock pointer to change.  Manipulation of
    164   1.66        ad  *	the general lock is not performed directly, but through calls to
    165  1.152     rmind  *	lwp_lock(), lwp_unlock() and others.  It should be noted that the
    166  1.152     rmind  *	adaptive locks are not allowed to be released while the LWP's lock
    167  1.152     rmind  *	is being held (unlike for other spin-locks).
    168   1.52        ad  *
    169   1.52        ad  *	States and their associated locks:
    170   1.52        ad  *
    171  1.212        ad  *	LSIDL, LSONPROC, LSZOMB, LSSUPENDED:
    172   1.52        ad  *
    173  1.212        ad  *		Always covered by spc_lwplock, which protects LWPs not
    174  1.212        ad  *		associated with any other sync object.  This is a per-CPU
    175  1.212        ad  *		lock and matches lwp::l_cpu.
    176   1.52        ad  *
    177  1.212        ad  *	LSRUN:
    178   1.52        ad  *
    179   1.64      yamt  *		Always covered by spc_mutex, which protects the run queues.
    180  1.129        ad  *		This is a per-CPU lock and matches lwp::l_cpu.
    181   1.52        ad  *
    182   1.52        ad  *	LSSLEEP:
    183   1.52        ad  *
    184  1.212        ad  *		Covered by a lock associated with the sleep queue (sometimes
    185  1.221        ad  *		a turnstile sleep queue) that the LWP resides on.  This can
    186  1.221        ad  *		be spc_lwplock for SOBJ_SLEEPQ_NULL (an "untracked" sleep).
    187   1.52        ad  *
    188  1.212        ad  *	LSSTOP:
    189  1.101     rmind  *
    190   1.52        ad  *		If the LWP was previously sleeping (l_wchan != NULL), then
    191   1.66        ad  *		l_mutex references the sleep queue lock.  If the LWP was
    192   1.52        ad  *		runnable or on the CPU when halted, or has been removed from
    193   1.66        ad  *		the sleep queue since halted, then the lock is spc_lwplock.
    194   1.52        ad  *
    195   1.52        ad  *	The lock order is as follows:
    196   1.52        ad  *
    197  1.212        ad  *		sleepq -> turnstile -> spc_lwplock -> spc_mutex
    198   1.52        ad  *
    199  1.243     skrll  *	Each process has a scheduler state lock (proc::p_lock), and a
    200   1.52        ad  *	number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
    201   1.52        ad  *	so on.  When an LWP is to be entered into or removed from one of the
    202  1.103        ad  *	following states, p_lock must be held and the process wide counters
    203   1.52        ad  *	adjusted:
    204   1.52        ad  *
    205   1.52        ad  *		LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
    206   1.52        ad  *
    207  1.129        ad  *	(But not always for kernel threads.  There are some special cases
    208  1.212        ad  *	as mentioned above: soft interrupts, and the idle loops.)
    209  1.129        ad  *
    210   1.52        ad  *	Note that an LWP is considered running or likely to run soon if in
    211   1.52        ad  *	one of the following states.  This affects the value of p_nrlwps:
    212   1.52        ad  *
    213   1.52        ad  *		LSRUN, LSONPROC, LSSLEEP
    214   1.52        ad  *
    215  1.103        ad  *	p_lock does not need to be held when transitioning among these
    216  1.129        ad  *	three states, hence p_lock is rarely taken for state transitions.
    217   1.52        ad  */
    218   1.52        ad 
    219    1.9     lukem #include <sys/cdefs.h>
    220  1.249       mrg __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.249 2022/05/07 19:44:40 mrg Exp $");
    221    1.8    martin 
    222   1.84      yamt #include "opt_ddb.h"
    223   1.52        ad #include "opt_lockdebug.h"
    224  1.139    darran #include "opt_dtrace.h"
    225    1.2   thorpej 
    226   1.47   hannken #define _LWP_API_PRIVATE
    227   1.47   hannken 
    228    1.2   thorpej #include <sys/param.h>
    229    1.2   thorpej #include <sys/systm.h>
    230   1.64      yamt #include <sys/cpu.h>
    231    1.2   thorpej #include <sys/pool.h>
    232    1.2   thorpej #include <sys/proc.h>
    233    1.2   thorpej #include <sys/syscallargs.h>
    234   1.57       dsl #include <sys/syscall_stats.h>
    235   1.37        ad #include <sys/kauth.h>
    236   1.52        ad #include <sys/sleepq.h>
    237   1.52        ad #include <sys/lockdebug.h>
    238   1.52        ad #include <sys/kmem.h>
    239   1.91     rmind #include <sys/pset.h>
    240   1.75        ad #include <sys/intr.h>
    241   1.78        ad #include <sys/lwpctl.h>
    242   1.81        ad #include <sys/atomic.h>
    243  1.131        ad #include <sys/filedesc.h>
    244  1.196   hannken #include <sys/fstrans.h>
    245  1.138    darran #include <sys/dtrace_bsd.h>
    246  1.141    darran #include <sys/sdt.h>
    247  1.203     kamil #include <sys/ptrace.h>
    248  1.157     rmind #include <sys/xcall.h>
    249  1.169  christos #include <sys/uidinfo.h>
    250  1.169  christos #include <sys/sysctl.h>
    251  1.201     ozaki #include <sys/psref.h>
    252  1.208      maxv #include <sys/msan.h>
    253  1.232      maxv #include <sys/kcov.h>
    254  1.233   thorpej #include <sys/cprng.h>
    255  1.236   thorpej #include <sys/futex.h>
    256  1.138    darran 
    257    1.2   thorpej #include <uvm/uvm_extern.h>
    258   1.80     skrll #include <uvm/uvm_object.h>
    259    1.2   thorpej 
    260  1.152     rmind static pool_cache_t	lwp_cache	__read_mostly;
    261  1.152     rmind struct lwplist		alllwp		__cacheline_aligned;
    262   1.41   thorpej 
    263  1.238        ad static int		lwp_ctor(void *, void *, int);
    264  1.157     rmind static void		lwp_dtor(void *, void *);
    265  1.157     rmind 
    266  1.141    darran /* DTrace proc provider probes */
    267  1.180  christos SDT_PROVIDER_DEFINE(proc);
    268  1.180  christos 
    269  1.180  christos SDT_PROBE_DEFINE1(proc, kernel, , lwp__create, "struct lwp *");
    270  1.180  christos SDT_PROBE_DEFINE1(proc, kernel, , lwp__start, "struct lwp *");
    271  1.180  christos SDT_PROBE_DEFINE1(proc, kernel, , lwp__exit, "struct lwp *");
    272  1.141    darran 
    273  1.213        ad struct turnstile turnstile0 __cacheline_aligned;
    274  1.147     pooka struct lwp lwp0 __aligned(MIN_LWP_ALIGNMENT) = {
    275  1.147     pooka #ifdef LWP0_CPU_INFO
    276  1.147     pooka 	.l_cpu = LWP0_CPU_INFO,
    277  1.147     pooka #endif
    278  1.154      matt #ifdef LWP0_MD_INITIALIZER
    279  1.154      matt 	.l_md = LWP0_MD_INITIALIZER,
    280  1.154      matt #endif
    281  1.147     pooka 	.l_proc = &proc0,
    282  1.235   thorpej 	.l_lid = 0,		/* we own proc0's slot in the pid table */
    283  1.147     pooka 	.l_flag = LW_SYSTEM,
    284  1.147     pooka 	.l_stat = LSONPROC,
    285  1.147     pooka 	.l_ts = &turnstile0,
    286  1.147     pooka 	.l_syncobj = &sched_syncobj,
    287  1.231        ad 	.l_refcnt = 0,
    288  1.147     pooka 	.l_priority = PRI_USER + NPRI_USER - 1,
    289  1.147     pooka 	.l_inheritedprio = -1,
    290  1.147     pooka 	.l_class = SCHED_OTHER,
    291  1.147     pooka 	.l_psid = PS_NONE,
    292  1.147     pooka 	.l_pi_lenders = SLIST_HEAD_INITIALIZER(&lwp0.l_pi_lenders),
    293  1.147     pooka 	.l_name = __UNCONST("swapper"),
    294  1.147     pooka 	.l_fd = &filedesc0,
    295  1.147     pooka };
    296  1.147     pooka 
    297  1.249       mrg static int
    298  1.249       mrg lwp_maxlwp(void)
    299  1.249       mrg {
    300  1.249       mrg 	/* Assume 1 LWP per 1MiB. */
    301  1.249       mrg 	uint64_t lwps_per = ctob(physmem) / (1024 * 1024);
    302  1.249       mrg 
    303  1.249       mrg 	return MAX(MIN(MAXMAXLWP, lwps_per), MAXLWP);
    304  1.249       mrg }
    305  1.249       mrg 
    306  1.169  christos static int sysctl_kern_maxlwp(SYSCTLFN_PROTO);
    307  1.169  christos 
    308  1.169  christos /*
    309  1.169  christos  * sysctl helper routine for kern.maxlwp. Ensures that the new
    310  1.169  christos  * values are not too low or too high.
    311  1.169  christos  */
    312  1.169  christos static int
    313  1.169  christos sysctl_kern_maxlwp(SYSCTLFN_ARGS)
    314  1.169  christos {
    315  1.169  christos 	int error, nmaxlwp;
    316  1.169  christos 	struct sysctlnode node;
    317  1.169  christos 
    318  1.169  christos 	nmaxlwp = maxlwp;
    319  1.169  christos 	node = *rnode;
    320  1.169  christos 	node.sysctl_data = &nmaxlwp;
    321  1.169  christos 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    322  1.169  christos 	if (error || newp == NULL)
    323  1.169  christos 		return error;
    324  1.169  christos 
    325  1.249       mrg 	if (nmaxlwp < 0 || nmaxlwp >= MAXMAXLWP)
    326  1.169  christos 		return EINVAL;
    327  1.249       mrg 	if (nmaxlwp > lwp_maxlwp())
    328  1.169  christos 		return EINVAL;
    329  1.169  christos 	maxlwp = nmaxlwp;
    330  1.169  christos 
    331  1.169  christos 	return 0;
    332  1.169  christos }
    333  1.169  christos 
    334  1.169  christos static void
    335  1.169  christos sysctl_kern_lwp_setup(void)
    336  1.169  christos {
    337  1.242      maxv 	sysctl_createv(NULL, 0, NULL, NULL,
    338  1.169  christos 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    339  1.169  christos 		       CTLTYPE_INT, "maxlwp",
    340  1.169  christos 		       SYSCTL_DESCR("Maximum number of simultaneous threads"),
    341  1.169  christos 		       sysctl_kern_maxlwp, 0, NULL, 0,
    342  1.169  christos 		       CTL_KERN, CTL_CREATE, CTL_EOL);
    343  1.169  christos }
    344  1.169  christos 
    345   1.41   thorpej void
    346   1.41   thorpej lwpinit(void)
    347   1.41   thorpej {
    348   1.41   thorpej 
    349  1.152     rmind 	LIST_INIT(&alllwp);
    350  1.144     pooka 	lwpinit_specificdata();
    351  1.246   thorpej 	/*
    352  1.246   thorpej 	 * Provide a barrier to ensure that all mutex_oncpu() and rw_oncpu()
    353  1.246   thorpej 	 * calls will exit before memory of LWPs is returned to the pool, where
    354  1.246   thorpej 	 * KVA of LWP structure might be freed and re-used for other purposes.
    355  1.246   thorpej 	 * Kernel preemption is disabled around mutex_oncpu() and rw_oncpu()
    356  1.246   thorpej 	 * callers, therefore a regular passive serialization barrier will
    357  1.246   thorpej 	 * do the job.
    358  1.246   thorpej 	 */
    359  1.246   thorpej 	lwp_cache = pool_cache_init(sizeof(lwp_t), MIN_LWP_ALIGNMENT, 0,
    360  1.246   thorpej 	    PR_PSERIALIZE, "lwppl", NULL, IPL_NONE, lwp_ctor, lwp_dtor, NULL);
    361  1.169  christos 
    362  1.249       mrg 	maxlwp = lwp_maxlwp();
    363  1.169  christos 	sysctl_kern_lwp_setup();
    364   1.41   thorpej }
    365   1.41   thorpej 
    366  1.147     pooka void
    367  1.147     pooka lwp0_init(void)
    368  1.147     pooka {
    369  1.147     pooka 	struct lwp *l = &lwp0;
    370  1.147     pooka 
    371  1.147     pooka 	KASSERT((void *)uvm_lwp_getuarea(l) != NULL);
    372  1.147     pooka 
    373  1.147     pooka 	LIST_INSERT_HEAD(&alllwp, l, l_list);
    374  1.147     pooka 
    375  1.147     pooka 	callout_init(&l->l_timeout_ch, CALLOUT_MPSAFE);
    376  1.147     pooka 	callout_setfunc(&l->l_timeout_ch, sleepq_timeout, l);
    377  1.147     pooka 	cv_init(&l->l_sigcv, "sigwait");
    378  1.171     rmind 	cv_init(&l->l_waitcv, "vfork");
    379  1.147     pooka 
    380  1.147     pooka 	kauth_cred_hold(proc0.p_cred);
    381  1.147     pooka 	l->l_cred = proc0.p_cred;
    382  1.147     pooka 
    383  1.164      yamt 	kdtrace_thread_ctor(NULL, l);
    384  1.147     pooka 	lwp_initspecific(l);
    385  1.147     pooka 
    386  1.147     pooka 	SYSCALL_TIME_LWP_INIT(l);
    387  1.147     pooka }
    388  1.147     pooka 
    389  1.238        ad /*
    390  1.238        ad  * Initialize the non-zeroed portion of an lwp_t.
    391  1.238        ad  */
    392  1.238        ad static int
    393  1.238        ad lwp_ctor(void *arg, void *obj, int flags)
    394  1.238        ad {
    395  1.238        ad 	lwp_t *l = obj;
    396  1.238        ad 
    397  1.238        ad 	l->l_stat = LSIDL;
    398  1.238        ad 	l->l_cpu = curcpu();
    399  1.238        ad 	l->l_mutex = l->l_cpu->ci_schedstate.spc_lwplock;
    400  1.238        ad 	l->l_ts = pool_get(&turnstile_pool, flags);
    401  1.238        ad 
    402  1.238        ad 	if (l->l_ts == NULL) {
    403  1.238        ad 		return ENOMEM;
    404  1.238        ad 	} else {
    405  1.238        ad 		turnstile_ctor(l->l_ts);
    406  1.238        ad 		return 0;
    407  1.238        ad 	}
    408  1.238        ad }
    409  1.238        ad 
    410  1.157     rmind static void
    411  1.245   thorpej lwp_dtor(void *arg, void *obj)
    412  1.245   thorpej {
    413  1.245   thorpej 	lwp_t *l = obj;
    414  1.245   thorpej 
    415  1.245   thorpej 	/*
    416  1.245   thorpej 	 * The value of l->l_cpu must still be valid at this point.
    417  1.245   thorpej 	 */
    418  1.157     rmind 	KASSERT(l->l_cpu != NULL);
    419  1.238        ad 
    420  1.238        ad 	/*
    421  1.238        ad 	 * We can't return turnstile0 to the pool (it didn't come from it),
    422  1.238        ad 	 * so if it comes up just drop it quietly and move on.
    423  1.238        ad 	 */
    424  1.238        ad 	if (l->l_ts != &turnstile0)
    425  1.238        ad 		pool_put(&turnstile_pool, l->l_ts);
    426  1.157     rmind }
    427  1.157     rmind 
    428   1.52        ad /*
    429  1.238        ad  * Set an LWP suspended.
    430   1.52        ad  *
    431  1.103        ad  * Must be called with p_lock held, and the LWP locked.  Will unlock the
    432   1.52        ad  * LWP before return.
    433   1.52        ad  */
    434    1.2   thorpej int
    435   1.52        ad lwp_suspend(struct lwp *curl, struct lwp *t)
    436    1.2   thorpej {
    437   1.52        ad 	int error;
    438    1.2   thorpej 
    439  1.103        ad 	KASSERT(mutex_owned(t->l_proc->p_lock));
    440   1.63        ad 	KASSERT(lwp_locked(t, NULL));
    441   1.33       chs 
    442   1.52        ad 	KASSERT(curl != t || curl->l_stat == LSONPROC);
    443    1.2   thorpej 
    444   1.52        ad 	/*
    445   1.52        ad 	 * If the current LWP has been told to exit, we must not suspend anyone
    446   1.52        ad 	 * else or deadlock could occur.  We won't return to userspace.
    447    1.2   thorpej 	 */
    448  1.109     rmind 	if ((curl->l_flag & (LW_WEXIT | LW_WCORE)) != 0) {
    449   1.52        ad 		lwp_unlock(t);
    450   1.52        ad 		return (EDEADLK);
    451    1.2   thorpej 	}
    452    1.2   thorpej 
    453  1.204     kamil 	if ((t->l_flag & LW_DBGSUSPEND) != 0) {
    454  1.204     kamil 		lwp_unlock(t);
    455  1.204     kamil 		return 0;
    456  1.204     kamil 	}
    457  1.204     kamil 
    458   1.52        ad 	error = 0;
    459    1.2   thorpej 
    460   1.52        ad 	switch (t->l_stat) {
    461   1.52        ad 	case LSRUN:
    462   1.52        ad 	case LSONPROC:
    463   1.56     pavel 		t->l_flag |= LW_WSUSPEND;
    464   1.52        ad 		lwp_need_userret(t);
    465   1.52        ad 		lwp_unlock(t);
    466   1.52        ad 		break;
    467    1.2   thorpej 
    468   1.52        ad 	case LSSLEEP:
    469   1.56     pavel 		t->l_flag |= LW_WSUSPEND;
    470    1.2   thorpej 
    471    1.2   thorpej 		/*
    472   1.52        ad 		 * Kick the LWP and try to get it to the kernel boundary
    473   1.52        ad 		 * so that it will release any locks that it holds.
    474   1.52        ad 		 * setrunnable() will release the lock.
    475    1.2   thorpej 		 */
    476   1.56     pavel 		if ((t->l_flag & LW_SINTR) != 0)
    477   1.52        ad 			setrunnable(t);
    478   1.52        ad 		else
    479   1.52        ad 			lwp_unlock(t);
    480   1.52        ad 		break;
    481    1.2   thorpej 
    482   1.52        ad 	case LSSUSPENDED:
    483   1.52        ad 		lwp_unlock(t);
    484   1.52        ad 		break;
    485   1.17      manu 
    486   1.52        ad 	case LSSTOP:
    487   1.56     pavel 		t->l_flag |= LW_WSUSPEND;
    488   1.52        ad 		setrunnable(t);
    489   1.52        ad 		break;
    490    1.2   thorpej 
    491   1.52        ad 	case LSIDL:
    492   1.52        ad 	case LSZOMB:
    493   1.52        ad 		error = EINTR; /* It's what Solaris does..... */
    494   1.52        ad 		lwp_unlock(t);
    495   1.52        ad 		break;
    496    1.2   thorpej 	}
    497    1.2   thorpej 
    498   1.69     rmind 	return (error);
    499    1.2   thorpej }
    500    1.2   thorpej 
    501   1.52        ad /*
    502   1.52        ad  * Restart a suspended LWP.
    503   1.52        ad  *
    504  1.103        ad  * Must be called with p_lock held, and the LWP locked.  Will unlock the
    505   1.52        ad  * LWP before return.
    506   1.52        ad  */
    507    1.2   thorpej void
    508    1.2   thorpej lwp_continue(struct lwp *l)
    509    1.2   thorpej {
    510    1.2   thorpej 
    511  1.103        ad 	KASSERT(mutex_owned(l->l_proc->p_lock));
    512   1.63        ad 	KASSERT(lwp_locked(l, NULL));
    513   1.52        ad 
    514   1.52        ad 	/* If rebooting or not suspended, then just bail out. */
    515   1.56     pavel 	if ((l->l_flag & LW_WREBOOT) != 0) {
    516   1.52        ad 		lwp_unlock(l);
    517    1.2   thorpej 		return;
    518   1.10      fvdl 	}
    519    1.2   thorpej 
    520   1.56     pavel 	l->l_flag &= ~LW_WSUSPEND;
    521    1.2   thorpej 
    522  1.204     kamil 	if (l->l_stat != LSSUSPENDED || (l->l_flag & LW_DBGSUSPEND) != 0) {
    523   1.52        ad 		lwp_unlock(l);
    524   1.52        ad 		return;
    525    1.2   thorpej 	}
    526    1.2   thorpej 
    527   1.52        ad 	/* setrunnable() will release the lock. */
    528   1.52        ad 	setrunnable(l);
    529    1.2   thorpej }
    530    1.2   thorpej 
    531   1.52        ad /*
    532  1.142  christos  * Restart a stopped LWP.
    533  1.142  christos  *
    534  1.142  christos  * Must be called with p_lock held, and the LWP NOT locked.  Will unlock the
    535  1.142  christos  * LWP before return.
    536  1.142  christos  */
    537  1.142  christos void
    538  1.142  christos lwp_unstop(struct lwp *l)
    539  1.142  christos {
    540  1.142  christos 	struct proc *p = l->l_proc;
    541  1.167     rmind 
    542  1.239        ad 	KASSERT(mutex_owned(&proc_lock));
    543  1.142  christos 	KASSERT(mutex_owned(p->p_lock));
    544  1.142  christos 
    545  1.142  christos 	lwp_lock(l);
    546  1.142  christos 
    547  1.204     kamil 	KASSERT((l->l_flag & LW_DBGSUSPEND) == 0);
    548  1.204     kamil 
    549  1.142  christos 	/* If not stopped, then just bail out. */
    550  1.142  christos 	if (l->l_stat != LSSTOP) {
    551  1.142  christos 		lwp_unlock(l);
    552  1.142  christos 		return;
    553  1.142  christos 	}
    554  1.142  christos 
    555  1.142  christos 	p->p_stat = SACTIVE;
    556  1.142  christos 	p->p_sflag &= ~PS_STOPPING;
    557  1.142  christos 
    558  1.142  christos 	if (!p->p_waited)
    559  1.142  christos 		p->p_pptr->p_nstopchild--;
    560  1.142  christos 
    561  1.142  christos 	if (l->l_wchan == NULL) {
    562  1.142  christos 		/* setrunnable() will release the lock. */
    563  1.142  christos 		setrunnable(l);
    564  1.183  christos 	} else if (p->p_xsig && (l->l_flag & LW_SINTR) != 0) {
    565  1.163  christos 		/* setrunnable() so we can receive the signal */
    566  1.163  christos 		setrunnable(l);
    567  1.142  christos 	} else {
    568  1.142  christos 		l->l_stat = LSSLEEP;
    569  1.142  christos 		p->p_nrlwps++;
    570  1.142  christos 		lwp_unlock(l);
    571  1.142  christos 	}
    572  1.142  christos }
    573  1.142  christos 
    574  1.142  christos /*
    575   1.52        ad  * Wait for an LWP within the current process to exit.  If 'lid' is
    576   1.52        ad  * non-zero, we are waiting for a specific LWP.
    577   1.52        ad  *
    578  1.103        ad  * Must be called with p->p_lock held.
    579   1.52        ad  */
    580    1.2   thorpej int
    581  1.173     rmind lwp_wait(struct lwp *l, lwpid_t lid, lwpid_t *departed, bool exiting)
    582    1.2   thorpej {
    583  1.173     rmind 	const lwpid_t curlid = l->l_lid;
    584  1.173     rmind 	proc_t *p = l->l_proc;
    585  1.223        ad 	lwp_t *l2, *next;
    586  1.173     rmind 	int error;
    587    1.2   thorpej 
    588  1.103        ad 	KASSERT(mutex_owned(p->p_lock));
    589   1.52        ad 
    590   1.52        ad 	p->p_nlwpwait++;
    591   1.63        ad 	l->l_waitingfor = lid;
    592   1.52        ad 
    593   1.52        ad 	for (;;) {
    594  1.173     rmind 		int nfound;
    595  1.173     rmind 
    596   1.52        ad 		/*
    597   1.52        ad 		 * Avoid a race between exit1() and sigexit(): if the
    598   1.52        ad 		 * process is dumping core, then we need to bail out: call
    599   1.52        ad 		 * into lwp_userret() where we will be suspended until the
    600   1.52        ad 		 * deed is done.
    601   1.52        ad 		 */
    602   1.52        ad 		if ((p->p_sflag & PS_WCORE) != 0) {
    603  1.103        ad 			mutex_exit(p->p_lock);
    604   1.52        ad 			lwp_userret(l);
    605  1.173     rmind 			KASSERT(false);
    606   1.52        ad 		}
    607   1.52        ad 
    608   1.52        ad 		/*
    609   1.52        ad 		 * First off, drain any detached LWP that is waiting to be
    610   1.52        ad 		 * reaped.
    611   1.52        ad 		 */
    612   1.52        ad 		while ((l2 = p->p_zomblwp) != NULL) {
    613   1.52        ad 			p->p_zomblwp = NULL;
    614   1.63        ad 			lwp_free(l2, false, false);/* releases proc mutex */
    615  1.103        ad 			mutex_enter(p->p_lock);
    616   1.52        ad 		}
    617   1.52        ad 
    618   1.52        ad 		/*
    619   1.52        ad 		 * Now look for an LWP to collect.  If the whole process is
    620   1.52        ad 		 * exiting, count detached LWPs as eligible to be collected,
    621   1.52        ad 		 * but don't drain them here.
    622   1.52        ad 		 */
    623   1.52        ad 		nfound = 0;
    624   1.63        ad 		error = 0;
    625  1.223        ad 
    626  1.223        ad 		/*
    627  1.238        ad 		 * If given a specific LID, go via pid_table and make sure
    628  1.223        ad 		 * it's not detached.
    629  1.223        ad 		 */
    630  1.223        ad 		if (lid != 0) {
    631  1.235   thorpej 			l2 = proc_find_lwp(p, lid);
    632  1.223        ad 			if (l2 == NULL) {
    633  1.223        ad 				error = ESRCH;
    634  1.223        ad 				break;
    635  1.223        ad 			}
    636  1.223        ad 			KASSERT(l2->l_lid == lid);
    637  1.223        ad 			if ((l2->l_prflag & LPR_DETACHED) != 0) {
    638  1.223        ad 				error = EINVAL;
    639  1.223        ad 				break;
    640  1.223        ad 			}
    641  1.223        ad 		} else {
    642  1.223        ad 			l2 = LIST_FIRST(&p->p_lwps);
    643  1.223        ad 		}
    644  1.223        ad 		for (; l2 != NULL; l2 = next) {
    645  1.223        ad 			next = (lid != 0 ? NULL : LIST_NEXT(l2, l_sibling));
    646  1.223        ad 
    647   1.63        ad 			/*
    648   1.63        ad 			 * If a specific wait and the target is waiting on
    649   1.63        ad 			 * us, then avoid deadlock.  This also traps LWPs
    650   1.63        ad 			 * that try to wait on themselves.
    651   1.63        ad 			 *
    652   1.63        ad 			 * Note that this does not handle more complicated
    653   1.63        ad 			 * cycles, like: t1 -> t2 -> t3 -> t1.  The process
    654   1.63        ad 			 * can still be killed so it is not a major problem.
    655   1.63        ad 			 */
    656   1.63        ad 			if (l2->l_lid == lid && l2->l_waitingfor == curlid) {
    657   1.63        ad 				error = EDEADLK;
    658   1.63        ad 				break;
    659   1.63        ad 			}
    660   1.63        ad 			if (l2 == l)
    661   1.52        ad 				continue;
    662   1.52        ad 			if ((l2->l_prflag & LPR_DETACHED) != 0) {
    663   1.63        ad 				nfound += exiting;
    664   1.63        ad 				continue;
    665   1.63        ad 			}
    666   1.63        ad 			if (lid != 0) {
    667   1.63        ad 				/*
    668   1.63        ad 				 * Mark this LWP as the first waiter, if there
    669   1.63        ad 				 * is no other.
    670   1.63        ad 				 */
    671   1.63        ad 				if (l2->l_waiter == 0)
    672   1.63        ad 					l2->l_waiter = curlid;
    673   1.63        ad 			} else if (l2->l_waiter != 0) {
    674   1.63        ad 				/*
    675   1.63        ad 				 * It already has a waiter - so don't
    676   1.63        ad 				 * collect it.  If the waiter doesn't
    677   1.63        ad 				 * grab it we'll get another chance
    678   1.63        ad 				 * later.
    679   1.63        ad 				 */
    680   1.63        ad 				nfound++;
    681   1.52        ad 				continue;
    682   1.52        ad 			}
    683   1.52        ad 			nfound++;
    684    1.2   thorpej 
    685   1.52        ad 			/* No need to lock the LWP in order to see LSZOMB. */
    686   1.52        ad 			if (l2->l_stat != LSZOMB)
    687   1.52        ad 				continue;
    688    1.2   thorpej 
    689   1.63        ad 			/*
    690   1.63        ad 			 * We're no longer waiting.  Reset the "first waiter"
    691   1.63        ad 			 * pointer on the target, in case it was us.
    692   1.63        ad 			 */
    693   1.63        ad 			l->l_waitingfor = 0;
    694   1.63        ad 			l2->l_waiter = 0;
    695   1.63        ad 			p->p_nlwpwait--;
    696    1.2   thorpej 			if (departed)
    697    1.2   thorpej 				*departed = l2->l_lid;
    698   1.75        ad 			sched_lwp_collect(l2);
    699   1.63        ad 
    700   1.63        ad 			/* lwp_free() releases the proc lock. */
    701   1.63        ad 			lwp_free(l2, false, false);
    702  1.103        ad 			mutex_enter(p->p_lock);
    703   1.52        ad 			return 0;
    704   1.52        ad 		}
    705    1.2   thorpej 
    706   1.63        ad 		if (error != 0)
    707   1.63        ad 			break;
    708   1.52        ad 		if (nfound == 0) {
    709   1.52        ad 			error = ESRCH;
    710   1.52        ad 			break;
    711   1.52        ad 		}
    712   1.63        ad 
    713   1.63        ad 		/*
    714  1.173     rmind 		 * Note: since the lock will be dropped, need to restart on
    715  1.173     rmind 		 * wakeup to run all LWPs again, e.g. there may be new LWPs.
    716   1.63        ad 		 */
    717   1.63        ad 		if (exiting) {
    718   1.52        ad 			KASSERT(p->p_nlwps > 1);
    719  1.222        ad 			error = cv_timedwait(&p->p_lwpcv, p->p_lock, 1);
    720  1.173     rmind 			break;
    721   1.52        ad 		}
    722   1.63        ad 
    723   1.63        ad 		/*
    724  1.234        ad 		 * Break out if all LWPs are in _lwp_wait().  There are
    725  1.234        ad 		 * other ways to hang the process with _lwp_wait(), but the
    726  1.234        ad 		 * sleep is interruptable so little point checking for them.
    727   1.63        ad 		 */
    728  1.234        ad 		if (p->p_nlwpwait == p->p_nlwps) {
    729   1.52        ad 			error = EDEADLK;
    730   1.52        ad 			break;
    731    1.2   thorpej 		}
    732   1.63        ad 
    733   1.63        ad 		/*
    734   1.63        ad 		 * Sit around and wait for something to happen.  We'll be
    735   1.63        ad 		 * awoken if any of the conditions examined change: if an
    736   1.63        ad 		 * LWP exits, is collected, or is detached.
    737   1.63        ad 		 */
    738  1.103        ad 		if ((error = cv_wait_sig(&p->p_lwpcv, p->p_lock)) != 0)
    739   1.52        ad 			break;
    740    1.2   thorpej 	}
    741    1.2   thorpej 
    742   1.63        ad 	/*
    743   1.63        ad 	 * We didn't find any LWPs to collect, we may have received a
    744   1.63        ad 	 * signal, or some other condition has caused us to bail out.
    745   1.63        ad 	 *
    746   1.63        ad 	 * If waiting on a specific LWP, clear the waiters marker: some
    747   1.63        ad 	 * other LWP may want it.  Then, kick all the remaining waiters
    748   1.63        ad 	 * so that they can re-check for zombies and for deadlock.
    749   1.63        ad 	 */
    750   1.63        ad 	if (lid != 0) {
    751  1.235   thorpej 		l2 = proc_find_lwp(p, lid);
    752  1.223        ad 		KASSERT(l2 == NULL || l2->l_lid == lid);
    753  1.223        ad 
    754  1.223        ad 		if (l2 != NULL && l2->l_waiter == curlid)
    755  1.223        ad 			l2->l_waiter = 0;
    756   1.63        ad 	}
    757   1.52        ad 	p->p_nlwpwait--;
    758   1.63        ad 	l->l_waitingfor = 0;
    759   1.63        ad 	cv_broadcast(&p->p_lwpcv);
    760   1.63        ad 
    761   1.52        ad 	return error;
    762    1.2   thorpej }
    763    1.2   thorpej 
    764  1.223        ad /*
    765   1.52        ad  * Create a new LWP within process 'p2', using LWP 'l1' as a template.
    766   1.52        ad  * The new LWP is created in state LSIDL and must be set running,
    767   1.52        ad  * suspended, or stopped by the caller.
    768   1.52        ad  */
    769    1.2   thorpej int
    770  1.134     rmind lwp_create(lwp_t *l1, proc_t *p2, vaddr_t uaddr, int flags,
    771  1.188  christos     void *stack, size_t stacksize, void (*func)(void *), void *arg,
    772  1.188  christos     lwp_t **rnewlwpp, int sclass, const sigset_t *sigmask,
    773  1.188  christos     const stack_t *sigstk)
    774    1.2   thorpej {
    775  1.215        ad 	struct lwp *l2;
    776    1.2   thorpej 
    777  1.107        ad 	KASSERT(l1 == curlwp || l1->l_proc == &proc0);
    778  1.107        ad 
    779   1.52        ad 	/*
    780  1.215        ad 	 * Enforce limits, excluding the first lwp and kthreads.  We must
    781  1.215        ad 	 * use the process credentials here when adjusting the limit, as
    782  1.215        ad 	 * they are what's tied to the accounting entity.  However for
    783  1.215        ad 	 * authorizing the action, we'll use the LWP's credentials.
    784  1.169  christos 	 */
    785  1.215        ad 	mutex_enter(p2->p_lock);
    786  1.169  christos 	if (p2->p_nlwps != 0 && p2 != &proc0) {
    787  1.215        ad 		uid_t uid = kauth_cred_getuid(p2->p_cred);
    788  1.169  christos 		int count = chglwpcnt(uid, 1);
    789  1.169  christos 		if (__predict_false(count >
    790  1.169  christos 		    p2->p_rlimit[RLIMIT_NTHR].rlim_cur)) {
    791  1.169  christos 			if (kauth_authorize_process(l1->l_cred,
    792  1.169  christos 			    KAUTH_PROCESS_RLIMIT, p2,
    793  1.169  christos 			    KAUTH_ARG(KAUTH_REQ_PROCESS_RLIMIT_BYPASS),
    794  1.169  christos 			    &p2->p_rlimit[RLIMIT_NTHR], KAUTH_ARG(RLIMIT_NTHR))
    795  1.169  christos 			    != 0) {
    796  1.170  christos 				(void)chglwpcnt(uid, -1);
    797  1.215        ad 				mutex_exit(p2->p_lock);
    798  1.170  christos 				return EAGAIN;
    799  1.169  christos 			}
    800  1.169  christos 		}
    801  1.169  christos 	}
    802  1.169  christos 
    803  1.169  christos 	/*
    804   1.52        ad 	 * First off, reap any detached LWP waiting to be collected.
    805   1.52        ad 	 * We can re-use its LWP structure and turnstile.
    806   1.52        ad 	 */
    807  1.215        ad 	if ((l2 = p2->p_zomblwp) != NULL) {
    808  1.215        ad 		p2->p_zomblwp = NULL;
    809  1.215        ad 		lwp_free(l2, true, false);
    810  1.215        ad 		/* p2 now unlocked by lwp_free() */
    811  1.238        ad 		KASSERT(l2->l_ts != NULL);
    812   1.75        ad 		KASSERT(l2->l_inheritedprio == -1);
    813   1.60      yamt 		KASSERT(SLIST_EMPTY(&l2->l_pi_lenders));
    814  1.238        ad 		memset(&l2->l_startzero, 0, sizeof(*l2) -
    815  1.238        ad 		    offsetof(lwp_t, l_startzero));
    816  1.215        ad 	} else {
    817  1.215        ad 		mutex_exit(p2->p_lock);
    818  1.215        ad 		l2 = pool_cache_get(lwp_cache, PR_WAITOK);
    819  1.238        ad 		memset(&l2->l_startzero, 0, sizeof(*l2) -
    820  1.238        ad 		    offsetof(lwp_t, l_startzero));
    821  1.215        ad 		SLIST_INIT(&l2->l_pi_lenders);
    822   1.52        ad 	}
    823    1.2   thorpej 
    824  1.238        ad 	/*
    825  1.238        ad 	 * Because of lockless lookup via pid_table, the LWP can be locked
    826  1.238        ad 	 * and inspected briefly even after it's freed, so a few fields are
    827  1.238        ad 	 * kept stable.
    828  1.238        ad 	 */
    829  1.238        ad 	KASSERT(l2->l_stat == LSIDL);
    830  1.238        ad 	KASSERT(l2->l_cpu != NULL);
    831  1.238        ad 	KASSERT(l2->l_ts != NULL);
    832  1.238        ad 	KASSERT(l2->l_mutex == l2->l_cpu->ci_schedstate.spc_lwplock);
    833  1.238        ad 
    834    1.2   thorpej 	l2->l_proc = p2;
    835  1.231        ad 	l2->l_refcnt = 0;
    836   1.75        ad 	l2->l_class = sclass;
    837  1.116        ad 
    838  1.116        ad 	/*
    839  1.235   thorpej 	 * Allocate a process ID for this LWP.  We need to do this now
    840  1.235   thorpej 	 * while we can still unwind if it fails.  Beacuse we're marked
    841  1.238        ad 	 * as LSIDL, no lookups by the ID will succeed.
    842  1.235   thorpej 	 *
    843  1.235   thorpej 	 * N.B. this will always succeed for the first LWP in a process,
    844  1.235   thorpej 	 * because proc_alloc_lwpid() will usurp the slot.  Also note
    845  1.235   thorpej 	 * that l2->l_proc MUST be valid so that lookups of the proc
    846  1.235   thorpej 	 * will succeed, even if the LWP itself is not visible.
    847  1.235   thorpej 	 */
    848  1.235   thorpej 	if (__predict_false(proc_alloc_lwpid(p2, l2) == -1)) {
    849  1.235   thorpej 		pool_cache_put(lwp_cache, l2);
    850  1.235   thorpej 		return EAGAIN;
    851  1.235   thorpej 	}
    852  1.235   thorpej 
    853  1.235   thorpej 	/*
    854  1.116        ad 	 * If vfork(), we want the LWP to run fast and on the same CPU
    855  1.116        ad 	 * as its parent, so that it can reuse the VM context and cache
    856  1.116        ad 	 * footprint on the local CPU.
    857  1.116        ad 	 */
    858  1.116        ad 	l2->l_kpriority = ((flags & LWP_VFORK) ? true : false);
    859   1.82        ad 	l2->l_kpribase = PRI_KERNEL;
    860   1.52        ad 	l2->l_priority = l1->l_priority;
    861   1.75        ad 	l2->l_inheritedprio = -1;
    862  1.185  christos 	l2->l_protectprio = -1;
    863  1.185  christos 	l2->l_auxprio = -1;
    864  1.222        ad 	l2->l_flag = 0;
    865   1.88        ad 	l2->l_pflag = LP_MPSAFE;
    866  1.131        ad 	TAILQ_INIT(&l2->l_ld_locks);
    867  1.197     ozaki 	l2->l_psrefs = 0;
    868  1.208      maxv 	kmsan_lwp_alloc(l2);
    869  1.131        ad 
    870  1.131        ad 	/*
    871  1.156     pooka 	 * For vfork, borrow parent's lwpctl context if it exists.
    872  1.156     pooka 	 * This also causes us to return via lwp_userret.
    873  1.156     pooka 	 */
    874  1.156     pooka 	if (flags & LWP_VFORK && l1->l_lwpctl) {
    875  1.156     pooka 		l2->l_lwpctl = l1->l_lwpctl;
    876  1.156     pooka 		l2->l_flag |= LW_LWPCTL;
    877  1.156     pooka 	}
    878  1.156     pooka 
    879  1.156     pooka 	/*
    880  1.131        ad 	 * If not the first LWP in the process, grab a reference to the
    881  1.131        ad 	 * descriptor table.
    882  1.131        ad 	 */
    883   1.97        ad 	l2->l_fd = p2->p_fd;
    884  1.131        ad 	if (p2->p_nlwps != 0) {
    885  1.131        ad 		KASSERT(l1->l_proc == p2);
    886  1.136     rmind 		fd_hold(l2);
    887  1.131        ad 	} else {
    888  1.131        ad 		KASSERT(l1->l_proc != p2);
    889  1.131        ad 	}
    890   1.41   thorpej 
    891   1.56     pavel 	if (p2->p_flag & PK_SYSTEM) {
    892  1.134     rmind 		/* Mark it as a system LWP. */
    893   1.56     pavel 		l2->l_flag |= LW_SYSTEM;
    894   1.52        ad 	}
    895    1.2   thorpej 
    896  1.138    darran 	kdtrace_thread_ctor(NULL, l2);
    897   1.73     rmind 	lwp_initspecific(l2);
    898   1.75        ad 	sched_lwp_fork(l1, l2);
    899   1.37        ad 	lwp_update_creds(l2);
    900   1.70        ad 	callout_init(&l2->l_timeout_ch, CALLOUT_MPSAFE);
    901   1.70        ad 	callout_setfunc(&l2->l_timeout_ch, sleepq_timeout, l2);
    902   1.52        ad 	cv_init(&l2->l_sigcv, "sigwait");
    903  1.171     rmind 	cv_init(&l2->l_waitcv, "vfork");
    904   1.52        ad 	l2->l_syncobj = &sched_syncobj;
    905  1.201     ozaki 	PSREF_DEBUG_INIT_LWP(l2);
    906    1.2   thorpej 
    907    1.2   thorpej 	if (rnewlwpp != NULL)
    908    1.2   thorpej 		*rnewlwpp = l2;
    909    1.2   thorpej 
    910  1.158      matt 	/*
    911  1.158      matt 	 * PCU state needs to be saved before calling uvm_lwp_fork() so that
    912  1.158      matt 	 * the MD cpu_lwp_fork() can copy the saved state to the new LWP.
    913  1.158      matt 	 */
    914  1.158      matt 	pcu_save_all(l1);
    915  1.225    dogcow #if PCU_UNIT_COUNT > 0
    916  1.224  riastrad 	l2->l_pcu_valid = l1->l_pcu_valid;
    917  1.225    dogcow #endif
    918  1.158      matt 
    919  1.137     rmind 	uvm_lwp_setuarea(l2, uaddr);
    920  1.190     skrll 	uvm_lwp_fork(l1, l2, stack, stacksize, func, (arg != NULL) ? arg : l2);
    921    1.2   thorpej 
    922  1.235   thorpej 	mutex_enter(p2->p_lock);
    923   1.52        ad 	if ((flags & LWP_DETACHED) != 0) {
    924   1.52        ad 		l2->l_prflag = LPR_DETACHED;
    925   1.52        ad 		p2->p_ndlwps++;
    926   1.52        ad 	} else
    927   1.52        ad 		l2->l_prflag = 0;
    928   1.52        ad 
    929  1.223        ad 	if (l1->l_proc == p2) {
    930  1.223        ad 		/*
    931  1.223        ad 		 * These flags are set while p_lock is held.  Copy with
    932  1.223        ad 		 * p_lock held too, so the LWP doesn't sneak into the
    933  1.223        ad 		 * process without them being set.
    934  1.223        ad 		 */
    935  1.222        ad 		l2->l_flag |= (l1->l_flag & (LW_WEXIT | LW_WREBOOT | LW_WCORE));
    936  1.223        ad 	} else {
    937  1.223        ad 		/* fork(): pending core/exit doesn't apply to child. */
    938  1.222        ad 		l2->l_flag |= (l1->l_flag & LW_WREBOOT);
    939  1.223        ad 	}
    940  1.222        ad 
    941  1.188  christos 	l2->l_sigstk = *sigstk;
    942  1.188  christos 	l2->l_sigmask = *sigmask;
    943  1.176  christos 	TAILQ_INIT(&l2->l_sigpend.sp_info);
    944   1.52        ad 	sigemptyset(&l2->l_sigpend.sp_set);
    945  1.174       dsl 	LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
    946    1.2   thorpej 	p2->p_nlwps++;
    947  1.149      yamt 	p2->p_nrlwps++;
    948    1.2   thorpej 
    949  1.162     rmind 	KASSERT(l2->l_affinity == NULL);
    950  1.162     rmind 
    951  1.210        ad 	/* Inherit the affinity mask. */
    952  1.210        ad 	if (l1->l_affinity) {
    953  1.210        ad 		/*
    954  1.210        ad 		 * Note that we hold the state lock while inheriting
    955  1.210        ad 		 * the affinity to avoid race with sched_setaffinity().
    956  1.210        ad 		 */
    957  1.210        ad 		lwp_lock(l1);
    958  1.162     rmind 		if (l1->l_affinity) {
    959  1.210        ad 			kcpuset_use(l1->l_affinity);
    960  1.210        ad 			l2->l_affinity = l1->l_affinity;
    961  1.117  christos 		}
    962  1.210        ad 		lwp_unlock(l1);
    963   1.91     rmind 	}
    964  1.223        ad 
    965  1.223        ad 	/* This marks the end of the "must be atomic" section. */
    966  1.128     rmind 	mutex_exit(p2->p_lock);
    967  1.128     rmind 
    968  1.180  christos 	SDT_PROBE(proc, kernel, , lwp__create, l2, 0, 0, 0, 0);
    969  1.141    darran 
    970  1.239        ad 	mutex_enter(&proc_lock);
    971  1.128     rmind 	LIST_INSERT_HEAD(&alllwp, l2, l_list);
    972  1.210        ad 	/* Inherit a processor-set */
    973  1.210        ad 	l2->l_psid = l1->l_psid;
    974  1.239        ad 	mutex_exit(&proc_lock);
    975   1.91     rmind 
    976   1.57       dsl 	SYSCALL_TIME_LWP_INIT(l2);
    977   1.57       dsl 
    978   1.16      manu 	if (p2->p_emul->e_lwp_fork)
    979   1.16      manu 		(*p2->p_emul->e_lwp_fork)(l1, l2);
    980   1.16      manu 
    981    1.2   thorpej 	return (0);
    982    1.2   thorpej }
    983    1.2   thorpej 
    984    1.2   thorpej /*
    985  1.212        ad  * Set a new LWP running.  If the process is stopping, then the LWP is
    986  1.212        ad  * created stopped.
    987  1.212        ad  */
    988  1.212        ad void
    989  1.212        ad lwp_start(lwp_t *l, int flags)
    990  1.212        ad {
    991  1.212        ad 	proc_t *p = l->l_proc;
    992  1.212        ad 
    993  1.212        ad 	mutex_enter(p->p_lock);
    994  1.212        ad 	lwp_lock(l);
    995  1.212        ad 	KASSERT(l->l_stat == LSIDL);
    996  1.212        ad 	if ((flags & LWP_SUSPENDED) != 0) {
    997  1.212        ad 		/* It'll suspend itself in lwp_userret(). */
    998  1.212        ad 		l->l_flag |= LW_WSUSPEND;
    999  1.212        ad 	}
   1000  1.212        ad 	if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) {
   1001  1.212        ad 		KASSERT(l->l_wchan == NULL);
   1002  1.212        ad 	    	l->l_stat = LSSTOP;
   1003  1.212        ad 		p->p_nrlwps--;
   1004  1.212        ad 		lwp_unlock(l);
   1005  1.212        ad 	} else {
   1006  1.212        ad 		setrunnable(l);
   1007  1.212        ad 		/* LWP now unlocked */
   1008  1.212        ad 	}
   1009  1.212        ad 	mutex_exit(p->p_lock);
   1010  1.212        ad }
   1011  1.212        ad 
   1012  1.212        ad /*
   1013   1.64      yamt  * Called by MD code when a new LWP begins execution.  Must be called
   1014   1.64      yamt  * with the previous LWP locked (so at splsched), or if there is no
   1015   1.64      yamt  * previous LWP, at splsched.
   1016   1.64      yamt  */
   1017   1.64      yamt void
   1018  1.178      matt lwp_startup(struct lwp *prev, struct lwp *new_lwp)
   1019   1.64      yamt {
   1020  1.227        ad 	kmutex_t *lock;
   1021  1.218        ad 
   1022  1.178      matt 	KASSERTMSG(new_lwp == curlwp, "l %p curlwp %p prevlwp %p", new_lwp, curlwp, prev);
   1023  1.218        ad 	KASSERT(kpreempt_disabled());
   1024  1.218        ad 	KASSERT(prev != NULL);
   1025  1.227        ad 	KASSERT((prev->l_pflag & LP_RUNNING) != 0);
   1026  1.218        ad 	KASSERT(curcpu()->ci_mtx_count == -2);
   1027  1.218        ad 
   1028  1.227        ad 	/*
   1029  1.247  riastrad 	 * Immediately mark the previous LWP as no longer running and
   1030  1.247  riastrad 	 * unlock (to keep lock wait times short as possible).  If a
   1031  1.247  riastrad 	 * zombie, don't touch after clearing LP_RUNNING as it could be
   1032  1.247  riastrad 	 * reaped by another CPU.  Use atomic_store_release to ensure
   1033  1.247  riastrad 	 * this -- matches atomic_load_acquire in lwp_free.
   1034  1.227        ad 	 */
   1035  1.227        ad 	lock = prev->l_mutex;
   1036  1.227        ad 	if (__predict_false(prev->l_stat == LSZOMB)) {
   1037  1.247  riastrad 		atomic_store_release(&prev->l_pflag,
   1038  1.247  riastrad 		    prev->l_pflag & ~LP_RUNNING);
   1039  1.247  riastrad 	} else {
   1040  1.247  riastrad 		prev->l_pflag &= ~LP_RUNNING;
   1041  1.227        ad 	}
   1042  1.227        ad 	mutex_spin_exit(lock);
   1043   1.64      yamt 
   1044  1.218        ad 	/* Correct spin mutex count after mi_switch(). */
   1045  1.218        ad 	curcpu()->ci_mtx_count = 0;
   1046  1.141    darran 
   1047  1.218        ad 	/* Install new VM context. */
   1048  1.218        ad 	if (__predict_true(new_lwp->l_proc->p_vmspace)) {
   1049  1.218        ad 		pmap_activate(new_lwp);
   1050   1.64      yamt 	}
   1051  1.218        ad 
   1052  1.218        ad 	/* We remain at IPL_SCHED from mi_switch() - reset it. */
   1053  1.181     skrll 	spl0();
   1054  1.161  christos 
   1055   1.64      yamt 	LOCKDEBUG_BARRIER(NULL, 0);
   1056  1.218        ad 	SDT_PROBE(proc, kernel, , lwp__start, new_lwp, 0, 0, 0, 0);
   1057  1.218        ad 
   1058  1.218        ad 	/* For kthreads, acquire kernel lock if not MPSAFE. */
   1059  1.218        ad 	if (__predict_false((new_lwp->l_pflag & LP_MPSAFE) == 0)) {
   1060  1.178      matt 		KERNEL_LOCK(1, new_lwp);
   1061   1.65        ad 	}
   1062   1.64      yamt }
   1063   1.64      yamt 
   1064   1.64      yamt /*
   1065   1.65        ad  * Exit an LWP.
   1066  1.241        ad  *
   1067  1.241        ad  * *** WARNING *** This can be called with (l != curlwp) in error paths.
   1068    1.2   thorpej  */
   1069    1.2   thorpej void
   1070    1.2   thorpej lwp_exit(struct lwp *l)
   1071    1.2   thorpej {
   1072    1.2   thorpej 	struct proc *p = l->l_proc;
   1073   1.52        ad 	struct lwp *l2;
   1074   1.65        ad 	bool current;
   1075   1.65        ad 
   1076   1.65        ad 	current = (l == curlwp);
   1077    1.2   thorpej 
   1078  1.114     rmind 	KASSERT(current || (l->l_stat == LSIDL && l->l_target_cpu == NULL));
   1079  1.131        ad 	KASSERT(p == curproc);
   1080    1.2   thorpej 
   1081  1.180  christos 	SDT_PROBE(proc, kernel, , lwp__exit, l, 0, 0, 0, 0);
   1082  1.141    darran 
   1083  1.220        ad 	/* Verify that we hold no locks; for DIAGNOSTIC check kernel_lock. */
   1084  1.218        ad 	LOCKDEBUG_BARRIER(NULL, 0);
   1085  1.220        ad 	KASSERTMSG(curcpu()->ci_biglock_count == 0, "kernel_lock leaked");
   1086   1.16      manu 
   1087    1.2   thorpej 	/*
   1088   1.52        ad 	 * If we are the last live LWP in a process, we need to exit the
   1089   1.52        ad 	 * entire process.  We do so with an exit status of zero, because
   1090   1.52        ad 	 * it's a "controlled" exit, and because that's what Solaris does.
   1091   1.52        ad 	 *
   1092   1.52        ad 	 * We are not quite a zombie yet, but for accounting purposes we
   1093   1.52        ad 	 * must increment the count of zombies here.
   1094   1.45   thorpej 	 *
   1095   1.45   thorpej 	 * Note: the last LWP's specificdata will be deleted here.
   1096    1.2   thorpej 	 */
   1097  1.103        ad 	mutex_enter(p->p_lock);
   1098   1.52        ad 	if (p->p_nlwps - p->p_nzlwps == 1) {
   1099   1.65        ad 		KASSERT(current == true);
   1100  1.172      matt 		KASSERT(p != &proc0);
   1101  1.184  christos 		exit1(l, 0, 0);
   1102   1.19  jdolecek 		/* NOTREACHED */
   1103    1.2   thorpej 	}
   1104   1.52        ad 	p->p_nzlwps++;
   1105  1.233   thorpej 
   1106  1.233   thorpej 	/*
   1107  1.233   thorpej 	 * Perform any required thread cleanup.  Do this early so
   1108  1.235   thorpej 	 * anyone wanting to look us up with lwp_getref_lwpid() will
   1109  1.235   thorpej 	 * fail to find us before we become a zombie.
   1110  1.233   thorpej 	 *
   1111  1.233   thorpej 	 * N.B. this will unlock p->p_lock on our behalf.
   1112  1.233   thorpej 	 */
   1113  1.233   thorpej 	lwp_thread_cleanup(l);
   1114   1.52        ad 
   1115   1.52        ad 	if (p->p_emul->e_lwp_exit)
   1116   1.52        ad 		(*p->p_emul->e_lwp_exit)(l);
   1117    1.2   thorpej 
   1118  1.131        ad 	/* Drop filedesc reference. */
   1119  1.131        ad 	fd_free();
   1120  1.131        ad 
   1121  1.196   hannken 	/* Release fstrans private data. */
   1122  1.196   hannken 	fstrans_lwp_dtor(l);
   1123  1.196   hannken 
   1124   1.45   thorpej 	/* Delete the specificdata while it's still safe to sleep. */
   1125  1.145     pooka 	lwp_finispecific(l);
   1126   1.45   thorpej 
   1127   1.52        ad 	/*
   1128   1.52        ad 	 * Release our cached credentials.
   1129   1.52        ad 	 */
   1130   1.37        ad 	kauth_cred_free(l->l_cred);
   1131   1.70        ad 	callout_destroy(&l->l_timeout_ch);
   1132   1.65        ad 
   1133   1.65        ad 	/*
   1134  1.198     kamil 	 * If traced, report LWP exit event to the debugger.
   1135  1.198     kamil 	 *
   1136   1.52        ad 	 * Remove the LWP from the global list.
   1137  1.151       chs 	 * Free its LID from the PID namespace if needed.
   1138   1.52        ad 	 */
   1139  1.239        ad 	mutex_enter(&proc_lock);
   1140  1.198     kamil 
   1141  1.199     kamil 	if ((p->p_slflag & (PSL_TRACED|PSL_TRACELWP_EXIT)) ==
   1142  1.198     kamil 	    (PSL_TRACED|PSL_TRACELWP_EXIT)) {
   1143  1.198     kamil 		mutex_enter(p->p_lock);
   1144  1.202     kamil 		if (ISSET(p->p_sflag, PS_WEXIT)) {
   1145  1.202     kamil 			mutex_exit(p->p_lock);
   1146  1.202     kamil 			/*
   1147  1.202     kamil 			 * We are exiting, bail out without informing parent
   1148  1.202     kamil 			 * about a terminating LWP as it would deadlock.
   1149  1.202     kamil 			 */
   1150  1.202     kamil 		} else {
   1151  1.203     kamil 			eventswitch(TRAP_LWP, PTRACE_LWP_EXIT, l->l_lid);
   1152  1.239        ad 			mutex_enter(&proc_lock);
   1153  1.202     kamil 		}
   1154  1.198     kamil 	}
   1155  1.198     kamil 
   1156   1.52        ad 	LIST_REMOVE(l, l_list);
   1157  1.239        ad 	mutex_exit(&proc_lock);
   1158   1.19  jdolecek 
   1159   1.52        ad 	/*
   1160   1.52        ad 	 * Get rid of all references to the LWP that others (e.g. procfs)
   1161   1.52        ad 	 * may have, and mark the LWP as a zombie.  If the LWP is detached,
   1162   1.52        ad 	 * mark it waiting for collection in the proc structure.  Note that
   1163   1.52        ad 	 * before we can do that, we need to free any other dead, deatched
   1164   1.52        ad 	 * LWP waiting to meet its maker.
   1165  1.231        ad 	 *
   1166  1.231        ad 	 * All conditions need to be observed upon under the same hold of
   1167  1.231        ad 	 * p_lock, because if the lock is dropped any of them can change.
   1168   1.52        ad 	 */
   1169  1.103        ad 	mutex_enter(p->p_lock);
   1170  1.231        ad 	for (;;) {
   1171  1.233   thorpej 		if (lwp_drainrefs(l))
   1172  1.231        ad 			continue;
   1173  1.231        ad 		if ((l->l_prflag & LPR_DETACHED) != 0) {
   1174  1.231        ad 			if ((l2 = p->p_zomblwp) != NULL) {
   1175  1.231        ad 				p->p_zomblwp = NULL;
   1176  1.231        ad 				lwp_free(l2, false, false);
   1177  1.231        ad 				/* proc now unlocked */
   1178  1.231        ad 				mutex_enter(p->p_lock);
   1179  1.231        ad 				continue;
   1180  1.231        ad 			}
   1181  1.231        ad 			p->p_zomblwp = l;
   1182   1.52        ad 		}
   1183  1.231        ad 		break;
   1184   1.52        ad 	}
   1185   1.31      yamt 
   1186   1.52        ad 	/*
   1187   1.52        ad 	 * If we find a pending signal for the process and we have been
   1188  1.151       chs 	 * asked to check for signals, then we lose: arrange to have
   1189   1.52        ad 	 * all other LWPs in the process check for signals.
   1190   1.52        ad 	 */
   1191   1.56     pavel 	if ((l->l_flag & LW_PENDSIG) != 0 &&
   1192   1.52        ad 	    firstsig(&p->p_sigpend.sp_set) != 0) {
   1193   1.52        ad 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
   1194   1.52        ad 			lwp_lock(l2);
   1195  1.209        ad 			signotify(l2);
   1196   1.52        ad 			lwp_unlock(l2);
   1197   1.52        ad 		}
   1198   1.31      yamt 	}
   1199   1.31      yamt 
   1200  1.158      matt 	/*
   1201  1.158      matt 	 * Release any PCU resources before becoming a zombie.
   1202  1.158      matt 	 */
   1203  1.158      matt 	pcu_discard_all(l);
   1204  1.158      matt 
   1205   1.52        ad 	lwp_lock(l);
   1206   1.52        ad 	l->l_stat = LSZOMB;
   1207  1.162     rmind 	if (l->l_name != NULL) {
   1208   1.90        ad 		strcpy(l->l_name, "(zombie)");
   1209  1.128     rmind 	}
   1210   1.52        ad 	lwp_unlock(l);
   1211    1.2   thorpej 	p->p_nrlwps--;
   1212   1.52        ad 	cv_broadcast(&p->p_lwpcv);
   1213   1.78        ad 	if (l->l_lwpctl != NULL)
   1214   1.78        ad 		l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
   1215  1.103        ad 	mutex_exit(p->p_lock);
   1216   1.52        ad 
   1217   1.52        ad 	/*
   1218   1.52        ad 	 * We can no longer block.  At this point, lwp_free() may already
   1219   1.52        ad 	 * be gunning for us.  On a multi-CPU system, we may be off p_lwps.
   1220   1.52        ad 	 *
   1221   1.52        ad 	 * Free MD LWP resources.
   1222   1.52        ad 	 */
   1223   1.52        ad 	cpu_lwp_free(l, 0);
   1224    1.2   thorpej 
   1225   1.65        ad 	if (current) {
   1226  1.218        ad 		/* Switch away into oblivion. */
   1227  1.218        ad 		lwp_lock(l);
   1228  1.218        ad 		spc_lock(l->l_cpu);
   1229  1.218        ad 		mi_switch(l);
   1230  1.218        ad 		panic("lwp_exit");
   1231   1.65        ad 	}
   1232    1.2   thorpej }
   1233    1.2   thorpej 
   1234   1.52        ad /*
   1235   1.52        ad  * Free a dead LWP's remaining resources.
   1236   1.52        ad  *
   1237   1.52        ad  * XXXLWP limits.
   1238   1.52        ad  */
   1239   1.52        ad void
   1240   1.63        ad lwp_free(struct lwp *l, bool recycle, bool last)
   1241   1.52        ad {
   1242   1.52        ad 	struct proc *p = l->l_proc;
   1243  1.100        ad 	struct rusage *ru;
   1244   1.52        ad 	ksiginfoq_t kq;
   1245   1.52        ad 
   1246   1.92      yamt 	KASSERT(l != curlwp);
   1247  1.160      yamt 	KASSERT(last || mutex_owned(p->p_lock));
   1248   1.92      yamt 
   1249  1.177  christos 	/*
   1250  1.177  christos 	 * We use the process credentials instead of the lwp credentials here
   1251  1.177  christos 	 * because the lwp credentials maybe cached (just after a setuid call)
   1252  1.177  christos 	 * and we don't want pay for syncing, since the lwp is going away
   1253  1.177  christos 	 * anyway
   1254  1.177  christos 	 */
   1255  1.169  christos 	if (p != &proc0 && p->p_nlwps != 1)
   1256  1.177  christos 		(void)chglwpcnt(kauth_cred_getuid(p->p_cred), -1);
   1257  1.218        ad 
   1258   1.52        ad 	/*
   1259  1.238        ad 	 * In the unlikely event that the LWP is still on the CPU,
   1260  1.238        ad 	 * then spin until it has switched away.
   1261  1.247  riastrad 	 *
   1262  1.247  riastrad 	 * atomic_load_acquire matches atomic_store_release in
   1263  1.247  riastrad 	 * lwp_startup and mi_switch.
   1264  1.238        ad 	 */
   1265  1.247  riastrad 	while (__predict_false((atomic_load_acquire(&l->l_pflag) & LP_RUNNING)
   1266  1.247  riastrad 		!= 0)) {
   1267  1.238        ad 		SPINLOCK_BACKOFF_HOOK;
   1268  1.238        ad 	}
   1269  1.238        ad 
   1270  1.238        ad 	/*
   1271  1.238        ad 	 * Now that the LWP's known off the CPU, reset its state back to
   1272  1.238        ad 	 * LSIDL, which defeats anything that might have gotten a hold on
   1273  1.238        ad 	 * the LWP via pid_table before the ID was freed.  It's important
   1274  1.238        ad 	 * to do this with both the LWP locked and p_lock held.
   1275  1.238        ad 	 *
   1276  1.238        ad 	 * Also reset the CPU and lock pointer back to curcpu(), since the
   1277  1.238        ad 	 * LWP will in all likelyhood be cached with the current CPU in
   1278  1.238        ad 	 * lwp_cache when we free it and later allocated from there again
   1279  1.238        ad 	 * (avoid incidental lock contention).
   1280  1.238        ad 	 */
   1281  1.238        ad 	lwp_lock(l);
   1282  1.238        ad 	l->l_stat = LSIDL;
   1283  1.238        ad 	l->l_cpu = curcpu();
   1284  1.238        ad 	lwp_unlock_to(l, l->l_cpu->ci_schedstate.spc_lwplock);
   1285  1.238        ad 
   1286  1.238        ad 	/*
   1287  1.223        ad 	 * If this was not the last LWP in the process, then adjust counters
   1288  1.223        ad 	 * and unlock.  This is done differently for the last LWP in exit1().
   1289   1.52        ad 	 */
   1290   1.52        ad 	if (!last) {
   1291   1.52        ad 		/*
   1292   1.52        ad 		 * Add the LWP's run time to the process' base value.
   1293   1.52        ad 		 * This needs to co-incide with coming off p_lwps.
   1294   1.52        ad 		 */
   1295   1.86      yamt 		bintime_add(&p->p_rtime, &l->l_rtime);
   1296   1.64      yamt 		p->p_pctcpu += l->l_pctcpu;
   1297  1.100        ad 		ru = &p->p_stats->p_ru;
   1298  1.100        ad 		ruadd(ru, &l->l_ru);
   1299  1.100        ad 		ru->ru_nvcsw += (l->l_ncsw - l->l_nivcsw);
   1300  1.100        ad 		ru->ru_nivcsw += l->l_nivcsw;
   1301   1.52        ad 		LIST_REMOVE(l, l_sibling);
   1302   1.52        ad 		p->p_nlwps--;
   1303   1.52        ad 		p->p_nzlwps--;
   1304   1.52        ad 		if ((l->l_prflag & LPR_DETACHED) != 0)
   1305   1.52        ad 			p->p_ndlwps--;
   1306   1.63        ad 
   1307   1.63        ad 		/*
   1308   1.63        ad 		 * Have any LWPs sleeping in lwp_wait() recheck for
   1309   1.63        ad 		 * deadlock.
   1310   1.63        ad 		 */
   1311   1.63        ad 		cv_broadcast(&p->p_lwpcv);
   1312  1.103        ad 		mutex_exit(p->p_lock);
   1313   1.52        ad 
   1314  1.238        ad 		/* Free the LWP ID. */
   1315  1.239        ad 		mutex_enter(&proc_lock);
   1316  1.238        ad 		proc_free_lwpid(p, l->l_lid);
   1317  1.239        ad 		mutex_exit(&proc_lock);
   1318   1.63        ad 	}
   1319   1.52        ad 
   1320   1.52        ad 	/*
   1321   1.52        ad 	 * Destroy the LWP's remaining signal information.
   1322   1.52        ad 	 */
   1323   1.52        ad 	ksiginfo_queue_init(&kq);
   1324   1.52        ad 	sigclear(&l->l_sigpend, NULL, &kq);
   1325   1.52        ad 	ksiginfo_queue_drain(&kq);
   1326   1.52        ad 	cv_destroy(&l->l_sigcv);
   1327  1.171     rmind 	cv_destroy(&l->l_waitcv);
   1328    1.2   thorpej 
   1329   1.19  jdolecek 	/*
   1330  1.162     rmind 	 * Free lwpctl structure and affinity.
   1331  1.162     rmind 	 */
   1332  1.162     rmind 	if (l->l_lwpctl) {
   1333  1.162     rmind 		lwp_ctl_free(l);
   1334  1.162     rmind 	}
   1335  1.162     rmind 	if (l->l_affinity) {
   1336  1.162     rmind 		kcpuset_unuse(l->l_affinity, NULL);
   1337  1.162     rmind 		l->l_affinity = NULL;
   1338  1.162     rmind 	}
   1339  1.162     rmind 
   1340  1.162     rmind 	/*
   1341  1.238        ad 	 * Free remaining data structures and the LWP itself unless the
   1342  1.238        ad 	 * caller wants to recycle.
   1343   1.19  jdolecek 	 */
   1344   1.90        ad 	if (l->l_name != NULL)
   1345   1.90        ad 		kmem_free(l->l_name, MAXCOMLEN);
   1346  1.135     rmind 
   1347  1.208      maxv 	kmsan_lwp_free(l);
   1348  1.232      maxv 	kcov_lwp_free(l);
   1349   1.52        ad 	cpu_lwp_free2(l);
   1350   1.19  jdolecek 	uvm_lwp_exit(l);
   1351  1.134     rmind 
   1352   1.60      yamt 	KASSERT(SLIST_EMPTY(&l->l_pi_lenders));
   1353   1.75        ad 	KASSERT(l->l_inheritedprio == -1);
   1354  1.155      matt 	KASSERT(l->l_blcnt == 0);
   1355  1.138    darran 	kdtrace_thread_dtor(NULL, l);
   1356   1.52        ad 	if (!recycle)
   1357   1.87        ad 		pool_cache_put(lwp_cache, l);
   1358    1.2   thorpej }
   1359    1.2   thorpej 
   1360    1.2   thorpej /*
   1361   1.91     rmind  * Migrate the LWP to the another CPU.  Unlocks the LWP.
   1362   1.91     rmind  */
   1363   1.91     rmind void
   1364  1.114     rmind lwp_migrate(lwp_t *l, struct cpu_info *tci)
   1365   1.91     rmind {
   1366  1.114     rmind 	struct schedstate_percpu *tspc;
   1367  1.121     rmind 	int lstat = l->l_stat;
   1368  1.121     rmind 
   1369   1.91     rmind 	KASSERT(lwp_locked(l, NULL));
   1370  1.114     rmind 	KASSERT(tci != NULL);
   1371  1.114     rmind 
   1372  1.121     rmind 	/* If LWP is still on the CPU, it must be handled like LSONPROC */
   1373  1.227        ad 	if ((l->l_pflag & LP_RUNNING) != 0) {
   1374  1.121     rmind 		lstat = LSONPROC;
   1375  1.121     rmind 	}
   1376  1.121     rmind 
   1377  1.114     rmind 	/*
   1378  1.114     rmind 	 * The destination CPU could be changed while previous migration
   1379  1.114     rmind 	 * was not finished.
   1380  1.114     rmind 	 */
   1381  1.121     rmind 	if (l->l_target_cpu != NULL) {
   1382  1.114     rmind 		l->l_target_cpu = tci;
   1383  1.114     rmind 		lwp_unlock(l);
   1384  1.114     rmind 		return;
   1385  1.114     rmind 	}
   1386   1.91     rmind 
   1387  1.114     rmind 	/* Nothing to do if trying to migrate to the same CPU */
   1388  1.114     rmind 	if (l->l_cpu == tci) {
   1389   1.91     rmind 		lwp_unlock(l);
   1390   1.91     rmind 		return;
   1391   1.91     rmind 	}
   1392   1.91     rmind 
   1393  1.114     rmind 	KASSERT(l->l_target_cpu == NULL);
   1394  1.114     rmind 	tspc = &tci->ci_schedstate;
   1395  1.121     rmind 	switch (lstat) {
   1396   1.91     rmind 	case LSRUN:
   1397  1.134     rmind 		l->l_target_cpu = tci;
   1398  1.134     rmind 		break;
   1399   1.91     rmind 	case LSSLEEP:
   1400  1.114     rmind 		l->l_cpu = tci;
   1401   1.91     rmind 		break;
   1402  1.212        ad 	case LSIDL:
   1403   1.91     rmind 	case LSSTOP:
   1404   1.91     rmind 	case LSSUSPENDED:
   1405  1.114     rmind 		l->l_cpu = tci;
   1406  1.114     rmind 		if (l->l_wchan == NULL) {
   1407  1.114     rmind 			lwp_unlock_to(l, tspc->spc_lwplock);
   1408  1.114     rmind 			return;
   1409   1.91     rmind 		}
   1410  1.114     rmind 		break;
   1411   1.91     rmind 	case LSONPROC:
   1412  1.114     rmind 		l->l_target_cpu = tci;
   1413  1.114     rmind 		spc_lock(l->l_cpu);
   1414  1.212        ad 		sched_resched_cpu(l->l_cpu, PRI_USER_RT, true);
   1415  1.212        ad 		/* spc now unlocked */
   1416   1.91     rmind 		break;
   1417   1.91     rmind 	}
   1418   1.91     rmind 	lwp_unlock(l);
   1419   1.91     rmind }
   1420   1.91     rmind 
   1421  1.237   thorpej #define	lwp_find_exclude(l)					\
   1422  1.237   thorpej 	((l)->l_stat == LSIDL || (l)->l_stat == LSZOMB)
   1423  1.237   thorpej 
   1424   1.91     rmind /*
   1425   1.94     rmind  * Find the LWP in the process.  Arguments may be zero, in such case,
   1426   1.94     rmind  * the calling process and first LWP in the list will be used.
   1427  1.103        ad  * On success - returns proc locked.
   1428  1.237   thorpej  *
   1429  1.237   thorpej  * => pid == 0 -> look in curproc.
   1430  1.237   thorpej  * => pid == -1 -> match any proc.
   1431  1.237   thorpej  * => otherwise look up the proc.
   1432  1.237   thorpej  *
   1433  1.237   thorpej  * => lid == 0 -> first LWP in the proc
   1434  1.237   thorpej  * => otherwise specific LWP
   1435   1.91     rmind  */
   1436   1.91     rmind struct lwp *
   1437   1.91     rmind lwp_find2(pid_t pid, lwpid_t lid)
   1438   1.91     rmind {
   1439   1.91     rmind 	proc_t *p;
   1440   1.91     rmind 	lwp_t *l;
   1441   1.91     rmind 
   1442  1.237   thorpej 	/* First LWP of specified proc. */
   1443  1.237   thorpej 	if (lid == 0) {
   1444  1.237   thorpej 		switch (pid) {
   1445  1.237   thorpej 		case -1:
   1446  1.237   thorpej 			/* No lookup keys. */
   1447  1.237   thorpej 			return NULL;
   1448  1.237   thorpej 		case 0:
   1449  1.237   thorpej 			p = curproc;
   1450  1.237   thorpej 			mutex_enter(p->p_lock);
   1451  1.237   thorpej 			break;
   1452  1.237   thorpej 		default:
   1453  1.239        ad 			mutex_enter(&proc_lock);
   1454  1.237   thorpej 			p = proc_find(pid);
   1455  1.237   thorpej 			if (__predict_false(p == NULL)) {
   1456  1.239        ad 				mutex_exit(&proc_lock);
   1457  1.237   thorpej 				return NULL;
   1458  1.237   thorpej 			}
   1459  1.237   thorpej 			mutex_enter(p->p_lock);
   1460  1.239        ad 			mutex_exit(&proc_lock);
   1461  1.237   thorpej 			break;
   1462  1.237   thorpej 		}
   1463  1.237   thorpej 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1464  1.237   thorpej 			if (__predict_true(!lwp_find_exclude(l)))
   1465  1.237   thorpej 				break;
   1466  1.150     rmind 		}
   1467  1.237   thorpej 		goto out;
   1468  1.237   thorpej 	}
   1469  1.237   thorpej 
   1470  1.237   thorpej 	l = proc_find_lwp_acquire_proc(lid, &p);
   1471  1.237   thorpej 	if (l == NULL)
   1472  1.237   thorpej 		return NULL;
   1473  1.237   thorpej 	KASSERT(p != NULL);
   1474  1.237   thorpej 	KASSERT(mutex_owned(p->p_lock));
   1475  1.237   thorpej 
   1476  1.237   thorpej 	if (__predict_false(lwp_find_exclude(l))) {
   1477  1.237   thorpej 		l = NULL;
   1478  1.237   thorpej 		goto out;
   1479  1.150     rmind 	}
   1480  1.237   thorpej 
   1481  1.237   thorpej 	/* Apply proc filter, if applicable. */
   1482  1.237   thorpej 	switch (pid) {
   1483  1.237   thorpej 	case -1:
   1484  1.237   thorpej 		/* Match anything. */
   1485  1.237   thorpej 		break;
   1486  1.237   thorpej 	case 0:
   1487  1.237   thorpej 		if (p != curproc)
   1488  1.237   thorpej 			l = NULL;
   1489  1.237   thorpej 		break;
   1490  1.237   thorpej 	default:
   1491  1.237   thorpej 		if (p->p_pid != pid)
   1492  1.237   thorpej 			l = NULL;
   1493  1.237   thorpej 		break;
   1494   1.94     rmind 	}
   1495  1.237   thorpej 
   1496  1.237   thorpej  out:
   1497  1.237   thorpej 	if (__predict_false(l == NULL)) {
   1498  1.103        ad 		mutex_exit(p->p_lock);
   1499  1.103        ad 	}
   1500   1.91     rmind 	return l;
   1501   1.91     rmind }
   1502   1.91     rmind 
   1503   1.91     rmind /*
   1504  1.168      yamt  * Look up a live LWP within the specified process.
   1505   1.52        ad  *
   1506  1.223        ad  * Must be called with p->p_lock held (as it looks at the radix tree,
   1507  1.223        ad  * and also wants to exclude idle and zombie LWPs).
   1508   1.52        ad  */
   1509   1.52        ad struct lwp *
   1510  1.151       chs lwp_find(struct proc *p, lwpid_t id)
   1511   1.52        ad {
   1512   1.52        ad 	struct lwp *l;
   1513   1.52        ad 
   1514  1.103        ad 	KASSERT(mutex_owned(p->p_lock));
   1515   1.52        ad 
   1516  1.235   thorpej 	l = proc_find_lwp(p, id);
   1517  1.223        ad 	KASSERT(l == NULL || l->l_lid == id);
   1518   1.52        ad 
   1519   1.52        ad 	/*
   1520   1.52        ad 	 * No need to lock - all of these conditions will
   1521   1.52        ad 	 * be visible with the process level mutex held.
   1522   1.52        ad 	 */
   1523  1.237   thorpej 	if (__predict_false(l != NULL && lwp_find_exclude(l)))
   1524   1.52        ad 		l = NULL;
   1525   1.52        ad 
   1526   1.52        ad 	return l;
   1527   1.52        ad }
   1528   1.52        ad 
   1529   1.52        ad /*
   1530   1.37        ad  * Update an LWP's cached credentials to mirror the process' master copy.
   1531   1.37        ad  *
   1532   1.37        ad  * This happens early in the syscall path, on user trap, and on LWP
   1533   1.37        ad  * creation.  A long-running LWP can also voluntarily choose to update
   1534  1.179       snj  * its credentials by calling this routine.  This may be called from
   1535   1.37        ad  * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
   1536   1.37        ad  */
   1537   1.37        ad void
   1538   1.37        ad lwp_update_creds(struct lwp *l)
   1539   1.37        ad {
   1540   1.37        ad 	kauth_cred_t oc;
   1541   1.37        ad 	struct proc *p;
   1542   1.37        ad 
   1543   1.37        ad 	p = l->l_proc;
   1544   1.37        ad 	oc = l->l_cred;
   1545   1.37        ad 
   1546  1.103        ad 	mutex_enter(p->p_lock);
   1547   1.37        ad 	kauth_cred_hold(p->p_cred);
   1548   1.37        ad 	l->l_cred = p->p_cred;
   1549   1.98        ad 	l->l_prflag &= ~LPR_CRMOD;
   1550  1.103        ad 	mutex_exit(p->p_lock);
   1551   1.88        ad 	if (oc != NULL)
   1552   1.37        ad 		kauth_cred_free(oc);
   1553   1.52        ad }
   1554   1.52        ad 
   1555   1.52        ad /*
   1556   1.52        ad  * Verify that an LWP is locked, and optionally verify that the lock matches
   1557   1.52        ad  * one we specify.
   1558   1.52        ad  */
   1559   1.52        ad int
   1560   1.52        ad lwp_locked(struct lwp *l, kmutex_t *mtx)
   1561   1.52        ad {
   1562   1.52        ad 	kmutex_t *cur = l->l_mutex;
   1563   1.52        ad 
   1564   1.52        ad 	return mutex_owned(cur) && (mtx == cur || mtx == NULL);
   1565   1.52        ad }
   1566   1.52        ad 
   1567   1.52        ad /*
   1568   1.52        ad  * Lend a new mutex to an LWP.  The old mutex must be held.
   1569   1.52        ad  */
   1570  1.211        ad kmutex_t *
   1571  1.178      matt lwp_setlock(struct lwp *l, kmutex_t *mtx)
   1572   1.52        ad {
   1573  1.211        ad 	kmutex_t *oldmtx = l->l_mutex;
   1574   1.52        ad 
   1575  1.211        ad 	KASSERT(mutex_owned(oldmtx));
   1576   1.52        ad 
   1577  1.248  riastrad 	atomic_store_release(&l->l_mutex, mtx);
   1578  1.211        ad 	return oldmtx;
   1579   1.52        ad }
   1580   1.52        ad 
   1581   1.52        ad /*
   1582   1.52        ad  * Lend a new mutex to an LWP, and release the old mutex.  The old mutex
   1583   1.52        ad  * must be held.
   1584   1.52        ad  */
   1585   1.52        ad void
   1586  1.178      matt lwp_unlock_to(struct lwp *l, kmutex_t *mtx)
   1587   1.52        ad {
   1588   1.52        ad 	kmutex_t *old;
   1589   1.52        ad 
   1590  1.152     rmind 	KASSERT(lwp_locked(l, NULL));
   1591   1.52        ad 
   1592   1.52        ad 	old = l->l_mutex;
   1593  1.248  riastrad 	atomic_store_release(&l->l_mutex, mtx);
   1594   1.52        ad 	mutex_spin_exit(old);
   1595   1.52        ad }
   1596   1.52        ad 
   1597   1.60      yamt int
   1598   1.60      yamt lwp_trylock(struct lwp *l)
   1599   1.60      yamt {
   1600   1.60      yamt 	kmutex_t *old;
   1601   1.60      yamt 
   1602   1.60      yamt 	for (;;) {
   1603  1.248  riastrad 		if (!mutex_tryenter(old = atomic_load_consume(&l->l_mutex)))
   1604   1.60      yamt 			return 0;
   1605  1.248  riastrad 		if (__predict_true(atomic_load_relaxed(&l->l_mutex) == old))
   1606   1.60      yamt 			return 1;
   1607   1.60      yamt 		mutex_spin_exit(old);
   1608   1.60      yamt 	}
   1609   1.60      yamt }
   1610   1.60      yamt 
   1611  1.134     rmind void
   1612  1.211        ad lwp_unsleep(lwp_t *l, bool unlock)
   1613   1.96        ad {
   1614   1.96        ad 
   1615   1.96        ad 	KASSERT(mutex_owned(l->l_mutex));
   1616  1.211        ad 	(*l->l_syncobj->sobj_unsleep)(l, unlock);
   1617   1.96        ad }
   1618   1.96        ad 
   1619   1.52        ad /*
   1620   1.56     pavel  * Handle exceptions for mi_userret().  Called if a member of LW_USERRET is
   1621   1.52        ad  * set.
   1622   1.52        ad  */
   1623   1.52        ad void
   1624   1.52        ad lwp_userret(struct lwp *l)
   1625   1.52        ad {
   1626   1.52        ad 	struct proc *p;
   1627   1.52        ad 	int sig;
   1628   1.52        ad 
   1629  1.114     rmind 	KASSERT(l == curlwp);
   1630  1.114     rmind 	KASSERT(l->l_stat == LSONPROC);
   1631   1.52        ad 	p = l->l_proc;
   1632   1.52        ad 
   1633   1.52        ad 	/*
   1634  1.167     rmind 	 * It is safe to do this read unlocked on a MP system..
   1635   1.52        ad 	 */
   1636  1.167     rmind 	while ((l->l_flag & LW_USERRET) != 0) {
   1637   1.52        ad 		/*
   1638   1.52        ad 		 * Process pending signals first, unless the process
   1639   1.61        ad 		 * is dumping core or exiting, where we will instead
   1640  1.101     rmind 		 * enter the LW_WSUSPEND case below.
   1641   1.52        ad 		 */
   1642   1.61        ad 		if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) ==
   1643   1.61        ad 		    LW_PENDSIG) {
   1644  1.103        ad 			mutex_enter(p->p_lock);
   1645   1.52        ad 			while ((sig = issignal(l)) != 0)
   1646   1.52        ad 				postsig(sig);
   1647  1.103        ad 			mutex_exit(p->p_lock);
   1648   1.52        ad 		}
   1649   1.52        ad 
   1650   1.52        ad 		/*
   1651   1.52        ad 		 * Core-dump or suspend pending.
   1652   1.52        ad 		 *
   1653  1.159      matt 		 * In case of core dump, suspend ourselves, so that the kernel
   1654  1.159      matt 		 * stack and therefore the userland registers saved in the
   1655  1.159      matt 		 * trapframe are around for coredump() to write them out.
   1656  1.159      matt 		 * We also need to save any PCU resources that we have so that
   1657  1.159      matt 		 * they accessible for coredump().  We issue a wakeup on
   1658  1.159      matt 		 * p->p_lwpcv so that sigexit() will write the core file out
   1659  1.159      matt 		 * once all other LWPs are suspended.
   1660   1.52        ad 		 */
   1661   1.56     pavel 		if ((l->l_flag & LW_WSUSPEND) != 0) {
   1662  1.159      matt 			pcu_save_all(l);
   1663  1.103        ad 			mutex_enter(p->p_lock);
   1664   1.52        ad 			p->p_nrlwps--;
   1665   1.52        ad 			cv_broadcast(&p->p_lwpcv);
   1666   1.52        ad 			lwp_lock(l);
   1667   1.52        ad 			l->l_stat = LSSUSPENDED;
   1668  1.104        ad 			lwp_unlock(l);
   1669  1.103        ad 			mutex_exit(p->p_lock);
   1670  1.104        ad 			lwp_lock(l);
   1671  1.217        ad 			spc_lock(l->l_cpu);
   1672   1.64      yamt 			mi_switch(l);
   1673   1.52        ad 		}
   1674   1.52        ad 
   1675   1.52        ad 		/* Process is exiting. */
   1676   1.56     pavel 		if ((l->l_flag & LW_WEXIT) != 0) {
   1677   1.52        ad 			lwp_exit(l);
   1678   1.52        ad 			KASSERT(0);
   1679   1.52        ad 			/* NOTREACHED */
   1680   1.52        ad 		}
   1681  1.156     pooka 
   1682  1.156     pooka 		/* update lwpctl processor (for vfork child_return) */
   1683  1.156     pooka 		if (l->l_flag & LW_LWPCTL) {
   1684  1.156     pooka 			lwp_lock(l);
   1685  1.156     pooka 			KASSERT(kpreempt_disabled());
   1686  1.156     pooka 			l->l_lwpctl->lc_curcpu = (int)cpu_index(l->l_cpu);
   1687  1.156     pooka 			l->l_lwpctl->lc_pctr++;
   1688  1.156     pooka 			l->l_flag &= ~LW_LWPCTL;
   1689  1.156     pooka 			lwp_unlock(l);
   1690  1.156     pooka 		}
   1691   1.52        ad 	}
   1692   1.52        ad }
   1693   1.52        ad 
   1694   1.52        ad /*
   1695   1.52        ad  * Force an LWP to enter the kernel, to take a trip through lwp_userret().
   1696   1.52        ad  */
   1697   1.52        ad void
   1698   1.52        ad lwp_need_userret(struct lwp *l)
   1699   1.52        ad {
   1700  1.209        ad 
   1701  1.209        ad 	KASSERT(!cpu_intr_p());
   1702   1.63        ad 	KASSERT(lwp_locked(l, NULL));
   1703   1.52        ad 
   1704   1.52        ad 	/*
   1705  1.209        ad 	 * If the LWP is in any state other than LSONPROC, we know that it
   1706  1.209        ad 	 * is executing in-kernel and will hit userret() on the way out.
   1707  1.209        ad 	 *
   1708  1.209        ad 	 * If the LWP is curlwp, then we know we'll be back out to userspace
   1709  1.209        ad 	 * soon (can't be called from a hardware interrupt here).
   1710  1.209        ad 	 *
   1711  1.209        ad 	 * Otherwise, we can't be sure what the LWP is doing, so first make
   1712  1.209        ad 	 * sure the update to l_flag will be globally visible, and then
   1713  1.209        ad 	 * force the LWP to take a trip through trap() where it will do
   1714  1.209        ad 	 * userret().
   1715  1.209        ad 	 */
   1716  1.209        ad 	if (l->l_stat == LSONPROC && l != curlwp) {
   1717  1.209        ad 		membar_producer();
   1718  1.209        ad 		cpu_signotify(l);
   1719  1.209        ad 	}
   1720   1.52        ad }
   1721   1.52        ad 
   1722   1.52        ad /*
   1723   1.52        ad  * Add one reference to an LWP.  This will prevent the LWP from
   1724   1.52        ad  * exiting, thus keep the lwp structure and PCB around to inspect.
   1725   1.52        ad  */
   1726   1.52        ad void
   1727   1.52        ad lwp_addref(struct lwp *l)
   1728   1.52        ad {
   1729  1.103        ad 	KASSERT(mutex_owned(l->l_proc->p_lock));
   1730  1.237   thorpej 	KASSERT(l->l_stat != LSZOMB);
   1731  1.237   thorpej 	l->l_refcnt++;
   1732   1.52        ad }
   1733   1.52        ad 
   1734   1.52        ad /*
   1735   1.52        ad  * Remove one reference to an LWP.  If this is the last reference,
   1736   1.52        ad  * then we must finalize the LWP's death.
   1737   1.52        ad  */
   1738   1.52        ad void
   1739   1.52        ad lwp_delref(struct lwp *l)
   1740   1.52        ad {
   1741   1.52        ad 	struct proc *p = l->l_proc;
   1742   1.52        ad 
   1743  1.103        ad 	mutex_enter(p->p_lock);
   1744  1.142  christos 	lwp_delref2(l);
   1745  1.142  christos 	mutex_exit(p->p_lock);
   1746  1.142  christos }
   1747  1.142  christos 
   1748  1.142  christos /*
   1749  1.142  christos  * Remove one reference to an LWP.  If this is the last reference,
   1750  1.142  christos  * then we must finalize the LWP's death.  The proc mutex is held
   1751  1.142  christos  * on entry.
   1752  1.142  christos  */
   1753  1.142  christos void
   1754  1.142  christos lwp_delref2(struct lwp *l)
   1755  1.142  christos {
   1756  1.142  christos 	struct proc *p = l->l_proc;
   1757  1.142  christos 
   1758  1.142  christos 	KASSERT(mutex_owned(p->p_lock));
   1759   1.72        ad 	KASSERT(l->l_stat != LSZOMB);
   1760  1.237   thorpej 	KASSERT(l->l_refcnt > 0);
   1761  1.231        ad 
   1762  1.237   thorpej 	if (--l->l_refcnt == 0)
   1763   1.76        ad 		cv_broadcast(&p->p_lwpcv);
   1764   1.52        ad }
   1765   1.52        ad 
   1766   1.52        ad /*
   1767  1.233   thorpej  * Drain all references to the current LWP.  Returns true if
   1768  1.233   thorpej  * we blocked.
   1769   1.52        ad  */
   1770  1.233   thorpej bool
   1771   1.52        ad lwp_drainrefs(struct lwp *l)
   1772   1.52        ad {
   1773   1.52        ad 	struct proc *p = l->l_proc;
   1774  1.233   thorpej 	bool rv = false;
   1775   1.52        ad 
   1776  1.103        ad 	KASSERT(mutex_owned(p->p_lock));
   1777   1.52        ad 
   1778  1.233   thorpej 	l->l_prflag |= LPR_DRAINING;
   1779  1.233   thorpej 
   1780  1.237   thorpej 	while (l->l_refcnt > 0) {
   1781  1.233   thorpej 		rv = true;
   1782  1.103        ad 		cv_wait(&p->p_lwpcv, p->p_lock);
   1783  1.233   thorpej 	}
   1784  1.233   thorpej 	return rv;
   1785   1.37        ad }
   1786   1.41   thorpej 
   1787   1.41   thorpej /*
   1788  1.127        ad  * Return true if the specified LWP is 'alive'.  Only p->p_lock need
   1789  1.127        ad  * be held.
   1790  1.127        ad  */
   1791  1.127        ad bool
   1792  1.127        ad lwp_alive(lwp_t *l)
   1793  1.127        ad {
   1794  1.127        ad 
   1795  1.127        ad 	KASSERT(mutex_owned(l->l_proc->p_lock));
   1796  1.127        ad 
   1797  1.127        ad 	switch (l->l_stat) {
   1798  1.127        ad 	case LSSLEEP:
   1799  1.127        ad 	case LSRUN:
   1800  1.127        ad 	case LSONPROC:
   1801  1.127        ad 	case LSSTOP:
   1802  1.127        ad 	case LSSUSPENDED:
   1803  1.127        ad 		return true;
   1804  1.127        ad 	default:
   1805  1.127        ad 		return false;
   1806  1.127        ad 	}
   1807  1.127        ad }
   1808  1.127        ad 
   1809  1.127        ad /*
   1810  1.127        ad  * Return first live LWP in the process.
   1811  1.127        ad  */
   1812  1.127        ad lwp_t *
   1813  1.127        ad lwp_find_first(proc_t *p)
   1814  1.127        ad {
   1815  1.127        ad 	lwp_t *l;
   1816  1.127        ad 
   1817  1.127        ad 	KASSERT(mutex_owned(p->p_lock));
   1818  1.127        ad 
   1819  1.127        ad 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1820  1.127        ad 		if (lwp_alive(l)) {
   1821  1.127        ad 			return l;
   1822  1.127        ad 		}
   1823  1.127        ad 	}
   1824  1.127        ad 
   1825  1.127        ad 	return NULL;
   1826  1.127        ad }
   1827  1.127        ad 
   1828  1.127        ad /*
   1829   1.78        ad  * Allocate a new lwpctl structure for a user LWP.
   1830   1.78        ad  */
   1831   1.78        ad int
   1832   1.78        ad lwp_ctl_alloc(vaddr_t *uaddr)
   1833   1.78        ad {
   1834   1.78        ad 	lcproc_t *lp;
   1835   1.78        ad 	u_int bit, i, offset;
   1836   1.78        ad 	struct uvm_object *uao;
   1837   1.78        ad 	int error;
   1838   1.78        ad 	lcpage_t *lcp;
   1839   1.78        ad 	proc_t *p;
   1840   1.78        ad 	lwp_t *l;
   1841   1.78        ad 
   1842   1.78        ad 	l = curlwp;
   1843   1.78        ad 	p = l->l_proc;
   1844   1.78        ad 
   1845  1.156     pooka 	/* don't allow a vforked process to create lwp ctls */
   1846  1.156     pooka 	if (p->p_lflag & PL_PPWAIT)
   1847  1.156     pooka 		return EBUSY;
   1848  1.156     pooka 
   1849   1.81        ad 	if (l->l_lcpage != NULL) {
   1850   1.81        ad 		lcp = l->l_lcpage;
   1851   1.81        ad 		*uaddr = lcp->lcp_uaddr + (vaddr_t)l->l_lwpctl - lcp->lcp_kaddr;
   1852  1.143     njoly 		return 0;
   1853   1.81        ad 	}
   1854   1.78        ad 
   1855   1.78        ad 	/* First time around, allocate header structure for the process. */
   1856   1.78        ad 	if ((lp = p->p_lwpctl) == NULL) {
   1857   1.78        ad 		lp = kmem_alloc(sizeof(*lp), KM_SLEEP);
   1858   1.78        ad 		mutex_init(&lp->lp_lock, MUTEX_DEFAULT, IPL_NONE);
   1859   1.78        ad 		lp->lp_uao = NULL;
   1860   1.78        ad 		TAILQ_INIT(&lp->lp_pages);
   1861  1.103        ad 		mutex_enter(p->p_lock);
   1862   1.78        ad 		if (p->p_lwpctl == NULL) {
   1863   1.78        ad 			p->p_lwpctl = lp;
   1864  1.103        ad 			mutex_exit(p->p_lock);
   1865   1.78        ad 		} else {
   1866  1.103        ad 			mutex_exit(p->p_lock);
   1867   1.78        ad 			mutex_destroy(&lp->lp_lock);
   1868   1.78        ad 			kmem_free(lp, sizeof(*lp));
   1869   1.78        ad 			lp = p->p_lwpctl;
   1870   1.78        ad 		}
   1871   1.78        ad 	}
   1872   1.78        ad 
   1873   1.78        ad  	/*
   1874   1.78        ad  	 * Set up an anonymous memory region to hold the shared pages.
   1875   1.78        ad  	 * Map them into the process' address space.  The user vmspace
   1876   1.78        ad  	 * gets the first reference on the UAO.
   1877   1.78        ad  	 */
   1878   1.78        ad 	mutex_enter(&lp->lp_lock);
   1879   1.78        ad 	if (lp->lp_uao == NULL) {
   1880   1.78        ad 		lp->lp_uao = uao_create(LWPCTL_UAREA_SZ, 0);
   1881   1.78        ad 		lp->lp_cur = 0;
   1882   1.78        ad 		lp->lp_max = LWPCTL_UAREA_SZ;
   1883   1.78        ad 		lp->lp_uva = p->p_emul->e_vm_default_addr(p,
   1884  1.182    martin 		     (vaddr_t)p->p_vmspace->vm_daddr, LWPCTL_UAREA_SZ,
   1885  1.182    martin 		     p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
   1886   1.78        ad 		error = uvm_map(&p->p_vmspace->vm_map, &lp->lp_uva,
   1887   1.78        ad 		    LWPCTL_UAREA_SZ, lp->lp_uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW,
   1888   1.78        ad 		    UVM_PROT_RW, UVM_INH_NONE, UVM_ADV_NORMAL, 0));
   1889   1.78        ad 		if (error != 0) {
   1890   1.78        ad 			uao_detach(lp->lp_uao);
   1891   1.78        ad 			lp->lp_uao = NULL;
   1892   1.78        ad 			mutex_exit(&lp->lp_lock);
   1893   1.78        ad 			return error;
   1894   1.78        ad 		}
   1895   1.78        ad 	}
   1896   1.78        ad 
   1897   1.78        ad 	/* Get a free block and allocate for this LWP. */
   1898   1.78        ad 	TAILQ_FOREACH(lcp, &lp->lp_pages, lcp_chain) {
   1899   1.78        ad 		if (lcp->lcp_nfree != 0)
   1900   1.78        ad 			break;
   1901   1.78        ad 	}
   1902   1.78        ad 	if (lcp == NULL) {
   1903   1.78        ad 		/* Nothing available - try to set up a free page. */
   1904   1.78        ad 		if (lp->lp_cur == lp->lp_max) {
   1905   1.78        ad 			mutex_exit(&lp->lp_lock);
   1906   1.78        ad 			return ENOMEM;
   1907   1.78        ad 		}
   1908   1.78        ad 		lcp = kmem_alloc(LWPCTL_LCPAGE_SZ, KM_SLEEP);
   1909  1.189       chs 
   1910   1.78        ad 		/*
   1911   1.78        ad 		 * Wire the next page down in kernel space.  Since this
   1912   1.78        ad 		 * is a new mapping, we must add a reference.
   1913   1.78        ad 		 */
   1914   1.78        ad 		uao = lp->lp_uao;
   1915   1.78        ad 		(*uao->pgops->pgo_reference)(uao);
   1916   1.99        ad 		lcp->lcp_kaddr = vm_map_min(kernel_map);
   1917   1.78        ad 		error = uvm_map(kernel_map, &lcp->lcp_kaddr, PAGE_SIZE,
   1918   1.78        ad 		    uao, lp->lp_cur, PAGE_SIZE,
   1919   1.78        ad 		    UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
   1920   1.78        ad 		    UVM_INH_NONE, UVM_ADV_RANDOM, 0));
   1921   1.78        ad 		if (error != 0) {
   1922   1.78        ad 			mutex_exit(&lp->lp_lock);
   1923   1.78        ad 			kmem_free(lcp, LWPCTL_LCPAGE_SZ);
   1924   1.78        ad 			(*uao->pgops->pgo_detach)(uao);
   1925   1.78        ad 			return error;
   1926   1.78        ad 		}
   1927   1.89      yamt 		error = uvm_map_pageable(kernel_map, lcp->lcp_kaddr,
   1928   1.89      yamt 		    lcp->lcp_kaddr + PAGE_SIZE, FALSE, 0);
   1929   1.89      yamt 		if (error != 0) {
   1930   1.89      yamt 			mutex_exit(&lp->lp_lock);
   1931   1.89      yamt 			uvm_unmap(kernel_map, lcp->lcp_kaddr,
   1932   1.89      yamt 			    lcp->lcp_kaddr + PAGE_SIZE);
   1933   1.89      yamt 			kmem_free(lcp, LWPCTL_LCPAGE_SZ);
   1934   1.89      yamt 			return error;
   1935   1.89      yamt 		}
   1936   1.78        ad 		/* Prepare the page descriptor and link into the list. */
   1937   1.78        ad 		lcp->lcp_uaddr = lp->lp_uva + lp->lp_cur;
   1938   1.78        ad 		lp->lp_cur += PAGE_SIZE;
   1939   1.78        ad 		lcp->lcp_nfree = LWPCTL_PER_PAGE;
   1940   1.78        ad 		lcp->lcp_rotor = 0;
   1941   1.78        ad 		memset(lcp->lcp_bitmap, 0xff, LWPCTL_BITMAP_SZ);
   1942   1.78        ad 		TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
   1943   1.78        ad 	}
   1944   1.78        ad 	for (i = lcp->lcp_rotor; lcp->lcp_bitmap[i] == 0;) {
   1945   1.78        ad 		if (++i >= LWPCTL_BITMAP_ENTRIES)
   1946   1.78        ad 			i = 0;
   1947   1.78        ad 	}
   1948   1.78        ad 	bit = ffs(lcp->lcp_bitmap[i]) - 1;
   1949  1.193     kamil 	lcp->lcp_bitmap[i] ^= (1U << bit);
   1950   1.78        ad 	lcp->lcp_rotor = i;
   1951   1.78        ad 	lcp->lcp_nfree--;
   1952   1.78        ad 	l->l_lcpage = lcp;
   1953   1.78        ad 	offset = (i << 5) + bit;
   1954   1.78        ad 	l->l_lwpctl = (lwpctl_t *)lcp->lcp_kaddr + offset;
   1955   1.78        ad 	*uaddr = lcp->lcp_uaddr + offset * sizeof(lwpctl_t);
   1956   1.78        ad 	mutex_exit(&lp->lp_lock);
   1957   1.78        ad 
   1958  1.107        ad 	KPREEMPT_DISABLE(l);
   1959  1.195     skrll 	l->l_lwpctl->lc_curcpu = (int)cpu_index(curcpu());
   1960  1.107        ad 	KPREEMPT_ENABLE(l);
   1961   1.78        ad 
   1962   1.78        ad 	return 0;
   1963   1.78        ad }
   1964   1.78        ad 
   1965   1.78        ad /*
   1966   1.78        ad  * Free an lwpctl structure back to the per-process list.
   1967   1.78        ad  */
   1968   1.78        ad void
   1969   1.78        ad lwp_ctl_free(lwp_t *l)
   1970   1.78        ad {
   1971  1.156     pooka 	struct proc *p = l->l_proc;
   1972   1.78        ad 	lcproc_t *lp;
   1973   1.78        ad 	lcpage_t *lcp;
   1974   1.78        ad 	u_int map, offset;
   1975   1.78        ad 
   1976  1.156     pooka 	/* don't free a lwp context we borrowed for vfork */
   1977  1.156     pooka 	if (p->p_lflag & PL_PPWAIT) {
   1978  1.156     pooka 		l->l_lwpctl = NULL;
   1979  1.156     pooka 		return;
   1980  1.156     pooka 	}
   1981  1.156     pooka 
   1982  1.156     pooka 	lp = p->p_lwpctl;
   1983   1.78        ad 	KASSERT(lp != NULL);
   1984   1.78        ad 
   1985   1.78        ad 	lcp = l->l_lcpage;
   1986   1.78        ad 	offset = (u_int)((lwpctl_t *)l->l_lwpctl - (lwpctl_t *)lcp->lcp_kaddr);
   1987   1.78        ad 	KASSERT(offset < LWPCTL_PER_PAGE);
   1988   1.78        ad 
   1989   1.78        ad 	mutex_enter(&lp->lp_lock);
   1990   1.78        ad 	lcp->lcp_nfree++;
   1991   1.78        ad 	map = offset >> 5;
   1992  1.194     kamil 	lcp->lcp_bitmap[map] |= (1U << (offset & 31));
   1993   1.78        ad 	if (lcp->lcp_bitmap[lcp->lcp_rotor] == 0)
   1994   1.78        ad 		lcp->lcp_rotor = map;
   1995   1.78        ad 	if (TAILQ_FIRST(&lp->lp_pages)->lcp_nfree == 0) {
   1996   1.78        ad 		TAILQ_REMOVE(&lp->lp_pages, lcp, lcp_chain);
   1997   1.78        ad 		TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
   1998   1.78        ad 	}
   1999   1.78        ad 	mutex_exit(&lp->lp_lock);
   2000   1.78        ad }
   2001   1.78        ad 
   2002   1.78        ad /*
   2003   1.78        ad  * Process is exiting; tear down lwpctl state.  This can only be safely
   2004   1.78        ad  * called by the last LWP in the process.
   2005   1.78        ad  */
   2006   1.78        ad void
   2007   1.78        ad lwp_ctl_exit(void)
   2008   1.78        ad {
   2009   1.78        ad 	lcpage_t *lcp, *next;
   2010   1.78        ad 	lcproc_t *lp;
   2011   1.78        ad 	proc_t *p;
   2012   1.78        ad 	lwp_t *l;
   2013   1.78        ad 
   2014   1.78        ad 	l = curlwp;
   2015   1.78        ad 	l->l_lwpctl = NULL;
   2016   1.95        ad 	l->l_lcpage = NULL;
   2017   1.78        ad 	p = l->l_proc;
   2018   1.78        ad 	lp = p->p_lwpctl;
   2019   1.78        ad 
   2020   1.78        ad 	KASSERT(lp != NULL);
   2021   1.78        ad 	KASSERT(p->p_nlwps == 1);
   2022   1.78        ad 
   2023   1.78        ad 	for (lcp = TAILQ_FIRST(&lp->lp_pages); lcp != NULL; lcp = next) {
   2024   1.78        ad 		next = TAILQ_NEXT(lcp, lcp_chain);
   2025   1.78        ad 		uvm_unmap(kernel_map, lcp->lcp_kaddr,
   2026   1.78        ad 		    lcp->lcp_kaddr + PAGE_SIZE);
   2027   1.78        ad 		kmem_free(lcp, LWPCTL_LCPAGE_SZ);
   2028   1.78        ad 	}
   2029   1.78        ad 
   2030   1.78        ad 	if (lp->lp_uao != NULL) {
   2031   1.78        ad 		uvm_unmap(&p->p_vmspace->vm_map, lp->lp_uva,
   2032   1.78        ad 		    lp->lp_uva + LWPCTL_UAREA_SZ);
   2033   1.78        ad 	}
   2034   1.78        ad 
   2035   1.78        ad 	mutex_destroy(&lp->lp_lock);
   2036   1.78        ad 	kmem_free(lp, sizeof(*lp));
   2037   1.78        ad 	p->p_lwpctl = NULL;
   2038   1.78        ad }
   2039   1.84      yamt 
   2040  1.130        ad /*
   2041  1.130        ad  * Return the current LWP's "preemption counter".  Used to detect
   2042  1.130        ad  * preemption across operations that can tolerate preemption without
   2043  1.130        ad  * crashing, but which may generate incorrect results if preempted.
   2044  1.130        ad  */
   2045  1.130        ad uint64_t
   2046  1.130        ad lwp_pctr(void)
   2047  1.130        ad {
   2048  1.130        ad 
   2049  1.130        ad 	return curlwp->l_ncsw;
   2050  1.130        ad }
   2051  1.130        ad 
   2052  1.151       chs /*
   2053  1.151       chs  * Set an LWP's private data pointer.
   2054  1.151       chs  */
   2055  1.151       chs int
   2056  1.151       chs lwp_setprivate(struct lwp *l, void *ptr)
   2057  1.151       chs {
   2058  1.151       chs 	int error = 0;
   2059  1.151       chs 
   2060  1.151       chs 	l->l_private = ptr;
   2061  1.151       chs #ifdef __HAVE_CPU_LWP_SETPRIVATE
   2062  1.151       chs 	error = cpu_lwp_setprivate(l, ptr);
   2063  1.151       chs #endif
   2064  1.151       chs 	return error;
   2065  1.151       chs }
   2066  1.151       chs 
   2067  1.233   thorpej /*
   2068  1.233   thorpej  * Perform any thread-related cleanup on LWP exit.
   2069  1.233   thorpej  * N.B. l->l_proc->p_lock must be HELD on entry but will
   2070  1.233   thorpej  * be released before returning!
   2071  1.233   thorpej  */
   2072  1.233   thorpej void
   2073  1.233   thorpej lwp_thread_cleanup(struct lwp *l)
   2074  1.233   thorpej {
   2075  1.233   thorpej 
   2076  1.233   thorpej 	KASSERT(mutex_owned(l->l_proc->p_lock));
   2077  1.235   thorpej 	mutex_exit(l->l_proc->p_lock);
   2078  1.236   thorpej 
   2079  1.236   thorpej 	/*
   2080  1.236   thorpej 	 * If the LWP has robust futexes, release them all
   2081  1.236   thorpej 	 * now.
   2082  1.236   thorpej 	 */
   2083  1.236   thorpej 	if (__predict_false(l->l_robust_head != 0)) {
   2084  1.244   thorpej 		futex_release_all_lwp(l);
   2085  1.236   thorpej 	}
   2086  1.233   thorpej }
   2087  1.233   thorpej 
   2088   1.84      yamt #if defined(DDB)
   2089  1.153     rmind #include <machine/pcb.h>
   2090  1.153     rmind 
   2091   1.84      yamt void
   2092   1.84      yamt lwp_whatis(uintptr_t addr, void (*pr)(const char *, ...))
   2093   1.84      yamt {
   2094   1.84      yamt 	lwp_t *l;
   2095   1.84      yamt 
   2096   1.84      yamt 	LIST_FOREACH(l, &alllwp, l_list) {
   2097   1.84      yamt 		uintptr_t stack = (uintptr_t)KSTACK_LOWEST_ADDR(l);
   2098   1.84      yamt 
   2099   1.84      yamt 		if (addr < stack || stack + KSTACK_SIZE <= addr) {
   2100   1.84      yamt 			continue;
   2101   1.84      yamt 		}
   2102   1.84      yamt 		(*pr)("%p is %p+%zu, LWP %p's stack\n",
   2103   1.84      yamt 		    (void *)addr, (void *)stack,
   2104   1.84      yamt 		    (size_t)(addr - stack), l);
   2105   1.84      yamt 	}
   2106   1.84      yamt }
   2107   1.84      yamt #endif /* defined(DDB) */
   2108