Home | History | Annotate | Line # | Download | only in kern
kern_lwp.c revision 1.214
      1 /*	$NetBSD: kern_lwp.c,v 1.214 2019/11/24 13:23:57 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nathan J. Williams, and Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Overview
     34  *
     35  *	Lightweight processes (LWPs) are the basic unit or thread of
     36  *	execution within the kernel.  The core state of an LWP is described
     37  *	by "struct lwp", also known as lwp_t.
     38  *
     39  *	Each LWP is contained within a process (described by "struct proc"),
     40  *	Every process contains at least one LWP, but may contain more.  The
     41  *	process describes attributes shared among all of its LWPs such as a
     42  *	private address space, global execution state (stopped, active,
     43  *	zombie, ...), signal disposition and so on.  On a multiprocessor
     44  *	machine, multiple LWPs be executing concurrently in the kernel.
     45  *
     46  * Execution states
     47  *
     48  *	At any given time, an LWP has overall state that is described by
     49  *	lwp::l_stat.  The states are broken into two sets below.  The first
     50  *	set is guaranteed to represent the absolute, current state of the
     51  *	LWP:
     52  *
     53  *	LSONPROC
     54  *
     55  *		On processor: the LWP is executing on a CPU, either in the
     56  *		kernel or in user space.
     57  *
     58  *	LSRUN
     59  *
     60  *		Runnable: the LWP is parked on a run queue, and may soon be
     61  *		chosen to run by an idle processor, or by a processor that
     62  *		has been asked to preempt a currently runnning but lower
     63  *		priority LWP.
     64  *
     65  *	LSIDL
     66  *
     67  *		Idle: the LWP has been created but has not yet executed,
     68  *		or it has ceased executing a unit of work and is waiting
     69  *		to be started again.
     70  *
     71  *	LSSUSPENDED:
     72  *
     73  *		Suspended: the LWP has had its execution suspended by
     74  *		another LWP in the same process using the _lwp_suspend()
     75  *		system call.  User-level LWPs also enter the suspended
     76  *		state when the system is shutting down.
     77  *
     78  *	The second set represent a "statement of intent" on behalf of the
     79  *	LWP.  The LWP may in fact be executing on a processor, may be
     80  *	sleeping or idle. It is expected to take the necessary action to
     81  *	stop executing or become "running" again within a short timeframe.
     82  *	The LP_RUNNING flag in lwp::l_pflag indicates that an LWP is running.
     83  *	Importantly, it indicates that its state is tied to a CPU.
     84  *
     85  *	LSZOMB:
     86  *
     87  *		Dead or dying: the LWP has released most of its resources
     88  *		and is about to switch away into oblivion, or has already
     89  *		switched away.  When it switches away, its few remaining
     90  *		resources can be collected.
     91  *
     92  *	LSSLEEP:
     93  *
     94  *		Sleeping: the LWP has entered itself onto a sleep queue, and
     95  *		has switched away or will switch away shortly to allow other
     96  *		LWPs to run on the CPU.
     97  *
     98  *	LSSTOP:
     99  *
    100  *		Stopped: the LWP has been stopped as a result of a job
    101  *		control signal, or as a result of the ptrace() interface.
    102  *
    103  *		Stopped LWPs may run briefly within the kernel to handle
    104  *		signals that they receive, but will not return to user space
    105  *		until their process' state is changed away from stopped.
    106  *
    107  *		Single LWPs within a process can not be set stopped
    108  *		selectively: all actions that can stop or continue LWPs
    109  *		occur at the process level.
    110  *
    111  * State transitions
    112  *
    113  *	Note that the LSSTOP state may only be set when returning to
    114  *	user space in userret(), or when sleeping interruptably.  The
    115  *	LSSUSPENDED state may only be set in userret().  Before setting
    116  *	those states, we try to ensure that the LWPs will release all
    117  *	locks that they hold, and at a minimum try to ensure that the
    118  *	LWP can be set runnable again by a signal.
    119  *
    120  *	LWPs may transition states in the following ways:
    121  *
    122  *	 RUN -------> ONPROC		ONPROC -----> RUN
    123  *		    				    > SLEEP
    124  *		    				    > STOPPED
    125  *						    > SUSPENDED
    126  *						    > ZOMB
    127  *						    > IDL (special cases)
    128  *
    129  *	 STOPPED ---> RUN		SUSPENDED --> RUN
    130  *	            > SLEEP
    131  *
    132  *	 SLEEP -----> ONPROC		IDL --------> RUN
    133  *		    > RUN			    > SUSPENDED
    134  *		    > STOPPED			    > STOPPED
    135  *						    > ONPROC (special cases)
    136  *
    137  *	Some state transitions are only possible with kernel threads (eg
    138  *	ONPROC -> IDL) and happen under tightly controlled circumstances
    139  *	free of unwanted side effects.
    140  *
    141  * Migration
    142  *
    143  *	Migration of threads from one CPU to another could be performed
    144  *	internally by the scheduler via sched_takecpu() or sched_catchlwp()
    145  *	functions.  The universal lwp_migrate() function should be used for
    146  *	any other cases.  Subsystems in the kernel must be aware that CPU
    147  *	of LWP may change, while it is not locked.
    148  *
    149  * Locking
    150  *
    151  *	The majority of fields in 'struct lwp' are covered by a single,
    152  *	general spin lock pointed to by lwp::l_mutex.  The locks covering
    153  *	each field are documented in sys/lwp.h.
    154  *
    155  *	State transitions must be made with the LWP's general lock held,
    156  *	and may cause the LWP's lock pointer to change.  Manipulation of
    157  *	the general lock is not performed directly, but through calls to
    158  *	lwp_lock(), lwp_unlock() and others.  It should be noted that the
    159  *	adaptive locks are not allowed to be released while the LWP's lock
    160  *	is being held (unlike for other spin-locks).
    161  *
    162  *	States and their associated locks:
    163  *
    164  *	LSIDL, LSONPROC, LSZOMB, LSSUPENDED:
    165  *
    166  *		Always covered by spc_lwplock, which protects LWPs not
    167  *		associated with any other sync object.  This is a per-CPU
    168  *		lock and matches lwp::l_cpu.
    169  *
    170  *	LSRUN:
    171  *
    172  *		Always covered by spc_mutex, which protects the run queues.
    173  *		This is a per-CPU lock and matches lwp::l_cpu.
    174  *
    175  *	LSSLEEP:
    176  *
    177  *		Covered by a lock associated with the sleep queue (sometimes
    178  *		a turnstile sleep queue) that the LWP resides on.
    179  *
    180  *	LSSTOP:
    181  *
    182  *		If the LWP was previously sleeping (l_wchan != NULL), then
    183  *		l_mutex references the sleep queue lock.  If the LWP was
    184  *		runnable or on the CPU when halted, or has been removed from
    185  *		the sleep queue since halted, then the lock is spc_lwplock.
    186  *
    187  *	The lock order is as follows:
    188  *
    189  *		sleepq -> turnstile -> spc_lwplock -> spc_mutex
    190  *
    191  *	Each process has an scheduler state lock (proc::p_lock), and a
    192  *	number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
    193  *	so on.  When an LWP is to be entered into or removed from one of the
    194  *	following states, p_lock must be held and the process wide counters
    195  *	adjusted:
    196  *
    197  *		LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
    198  *
    199  *	(But not always for kernel threads.  There are some special cases
    200  *	as mentioned above: soft interrupts, and the idle loops.)
    201  *
    202  *	Note that an LWP is considered running or likely to run soon if in
    203  *	one of the following states.  This affects the value of p_nrlwps:
    204  *
    205  *		LSRUN, LSONPROC, LSSLEEP
    206  *
    207  *	p_lock does not need to be held when transitioning among these
    208  *	three states, hence p_lock is rarely taken for state transitions.
    209  */
    210 
    211 #include <sys/cdefs.h>
    212 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.214 2019/11/24 13:23:57 ad Exp $");
    213 
    214 #include "opt_ddb.h"
    215 #include "opt_lockdebug.h"
    216 #include "opt_dtrace.h"
    217 
    218 #define _LWP_API_PRIVATE
    219 
    220 #include <sys/param.h>
    221 #include <sys/systm.h>
    222 #include <sys/cpu.h>
    223 #include <sys/pool.h>
    224 #include <sys/proc.h>
    225 #include <sys/syscallargs.h>
    226 #include <sys/syscall_stats.h>
    227 #include <sys/kauth.h>
    228 #include <sys/pserialize.h>
    229 #include <sys/sleepq.h>
    230 #include <sys/lockdebug.h>
    231 #include <sys/kmem.h>
    232 #include <sys/pset.h>
    233 #include <sys/intr.h>
    234 #include <sys/lwpctl.h>
    235 #include <sys/atomic.h>
    236 #include <sys/filedesc.h>
    237 #include <sys/fstrans.h>
    238 #include <sys/dtrace_bsd.h>
    239 #include <sys/sdt.h>
    240 #include <sys/ptrace.h>
    241 #include <sys/xcall.h>
    242 #include <sys/uidinfo.h>
    243 #include <sys/sysctl.h>
    244 #include <sys/psref.h>
    245 #include <sys/msan.h>
    246 
    247 #include <uvm/uvm_extern.h>
    248 #include <uvm/uvm_object.h>
    249 
    250 static pool_cache_t	lwp_cache	__read_mostly;
    251 struct lwplist		alllwp		__cacheline_aligned;
    252 
    253 static void		lwp_dtor(void *, void *);
    254 
    255 /* DTrace proc provider probes */
    256 SDT_PROVIDER_DEFINE(proc);
    257 
    258 SDT_PROBE_DEFINE1(proc, kernel, , lwp__create, "struct lwp *");
    259 SDT_PROBE_DEFINE1(proc, kernel, , lwp__start, "struct lwp *");
    260 SDT_PROBE_DEFINE1(proc, kernel, , lwp__exit, "struct lwp *");
    261 
    262 struct turnstile turnstile0 __cacheline_aligned;
    263 struct lwp lwp0 __aligned(MIN_LWP_ALIGNMENT) = {
    264 #ifdef LWP0_CPU_INFO
    265 	.l_cpu = LWP0_CPU_INFO,
    266 #endif
    267 #ifdef LWP0_MD_INITIALIZER
    268 	.l_md = LWP0_MD_INITIALIZER,
    269 #endif
    270 	.l_proc = &proc0,
    271 	.l_lid = 1,
    272 	.l_flag = LW_SYSTEM,
    273 	.l_stat = LSONPROC,
    274 	.l_ts = &turnstile0,
    275 	.l_syncobj = &sched_syncobj,
    276 	.l_refcnt = 1,
    277 	.l_priority = PRI_USER + NPRI_USER - 1,
    278 	.l_inheritedprio = -1,
    279 	.l_class = SCHED_OTHER,
    280 	.l_psid = PS_NONE,
    281 	.l_pi_lenders = SLIST_HEAD_INITIALIZER(&lwp0.l_pi_lenders),
    282 	.l_name = __UNCONST("swapper"),
    283 	.l_fd = &filedesc0,
    284 };
    285 
    286 static int sysctl_kern_maxlwp(SYSCTLFN_PROTO);
    287 
    288 /*
    289  * sysctl helper routine for kern.maxlwp. Ensures that the new
    290  * values are not too low or too high.
    291  */
    292 static int
    293 sysctl_kern_maxlwp(SYSCTLFN_ARGS)
    294 {
    295 	int error, nmaxlwp;
    296 	struct sysctlnode node;
    297 
    298 	nmaxlwp = maxlwp;
    299 	node = *rnode;
    300 	node.sysctl_data = &nmaxlwp;
    301 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    302 	if (error || newp == NULL)
    303 		return error;
    304 
    305 	if (nmaxlwp < 0 || nmaxlwp >= 65536)
    306 		return EINVAL;
    307 	if (nmaxlwp > cpu_maxlwp())
    308 		return EINVAL;
    309 	maxlwp = nmaxlwp;
    310 
    311 	return 0;
    312 }
    313 
    314 static void
    315 sysctl_kern_lwp_setup(void)
    316 {
    317 	struct sysctllog *clog = NULL;
    318 
    319 	sysctl_createv(&clog, 0, NULL, NULL,
    320 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    321 		       CTLTYPE_INT, "maxlwp",
    322 		       SYSCTL_DESCR("Maximum number of simultaneous threads"),
    323 		       sysctl_kern_maxlwp, 0, NULL, 0,
    324 		       CTL_KERN, CTL_CREATE, CTL_EOL);
    325 }
    326 
    327 void
    328 lwpinit(void)
    329 {
    330 
    331 	LIST_INIT(&alllwp);
    332 	lwpinit_specificdata();
    333 	lwp_sys_init();
    334 	lwp_cache = pool_cache_init(sizeof(lwp_t), MIN_LWP_ALIGNMENT, 0, 0,
    335 	    "lwppl", NULL, IPL_NONE, NULL, lwp_dtor, NULL);
    336 
    337 	maxlwp = cpu_maxlwp();
    338 	sysctl_kern_lwp_setup();
    339 }
    340 
    341 void
    342 lwp0_init(void)
    343 {
    344 	struct lwp *l = &lwp0;
    345 
    346 	KASSERT((void *)uvm_lwp_getuarea(l) != NULL);
    347 	KASSERT(l->l_lid == proc0.p_nlwpid);
    348 
    349 	LIST_INSERT_HEAD(&alllwp, l, l_list);
    350 
    351 	callout_init(&l->l_timeout_ch, CALLOUT_MPSAFE);
    352 	callout_setfunc(&l->l_timeout_ch, sleepq_timeout, l);
    353 	cv_init(&l->l_sigcv, "sigwait");
    354 	cv_init(&l->l_waitcv, "vfork");
    355 
    356 	kauth_cred_hold(proc0.p_cred);
    357 	l->l_cred = proc0.p_cred;
    358 
    359 	kdtrace_thread_ctor(NULL, l);
    360 	lwp_initspecific(l);
    361 
    362 	SYSCALL_TIME_LWP_INIT(l);
    363 }
    364 
    365 static void
    366 lwp_dtor(void *arg, void *obj)
    367 {
    368 	lwp_t *l = obj;
    369 	(void)l;
    370 
    371 	/*
    372 	 * Provide a barrier to ensure that all mutex_oncpu() and rw_oncpu()
    373 	 * calls will exit before memory of LWP is returned to the pool, where
    374 	 * KVA of LWP structure might be freed and re-used for other purposes.
    375 	 * Kernel preemption is disabled around mutex_oncpu() and rw_oncpu()
    376 	 * callers, therefore cross-call to all CPUs will do the job.  Also,
    377 	 * the value of l->l_cpu must be still valid at this point.
    378 	 */
    379 	KASSERT(l->l_cpu != NULL);
    380 	xc_barrier(0);
    381 }
    382 
    383 /*
    384  * Set an suspended.
    385  *
    386  * Must be called with p_lock held, and the LWP locked.  Will unlock the
    387  * LWP before return.
    388  */
    389 int
    390 lwp_suspend(struct lwp *curl, struct lwp *t)
    391 {
    392 	int error;
    393 
    394 	KASSERT(mutex_owned(t->l_proc->p_lock));
    395 	KASSERT(lwp_locked(t, NULL));
    396 
    397 	KASSERT(curl != t || curl->l_stat == LSONPROC);
    398 
    399 	/*
    400 	 * If the current LWP has been told to exit, we must not suspend anyone
    401 	 * else or deadlock could occur.  We won't return to userspace.
    402 	 */
    403 	if ((curl->l_flag & (LW_WEXIT | LW_WCORE)) != 0) {
    404 		lwp_unlock(t);
    405 		return (EDEADLK);
    406 	}
    407 
    408 	if ((t->l_flag & LW_DBGSUSPEND) != 0) {
    409 		lwp_unlock(t);
    410 		return 0;
    411 	}
    412 
    413 	error = 0;
    414 
    415 	switch (t->l_stat) {
    416 	case LSRUN:
    417 	case LSONPROC:
    418 		t->l_flag |= LW_WSUSPEND;
    419 		lwp_need_userret(t);
    420 		lwp_unlock(t);
    421 		break;
    422 
    423 	case LSSLEEP:
    424 		t->l_flag |= LW_WSUSPEND;
    425 
    426 		/*
    427 		 * Kick the LWP and try to get it to the kernel boundary
    428 		 * so that it will release any locks that it holds.
    429 		 * setrunnable() will release the lock.
    430 		 */
    431 		if ((t->l_flag & LW_SINTR) != 0)
    432 			setrunnable(t);
    433 		else
    434 			lwp_unlock(t);
    435 		break;
    436 
    437 	case LSSUSPENDED:
    438 		lwp_unlock(t);
    439 		break;
    440 
    441 	case LSSTOP:
    442 		t->l_flag |= LW_WSUSPEND;
    443 		setrunnable(t);
    444 		break;
    445 
    446 	case LSIDL:
    447 	case LSZOMB:
    448 		error = EINTR; /* It's what Solaris does..... */
    449 		lwp_unlock(t);
    450 		break;
    451 	}
    452 
    453 	return (error);
    454 }
    455 
    456 /*
    457  * Restart a suspended LWP.
    458  *
    459  * Must be called with p_lock held, and the LWP locked.  Will unlock the
    460  * LWP before return.
    461  */
    462 void
    463 lwp_continue(struct lwp *l)
    464 {
    465 
    466 	KASSERT(mutex_owned(l->l_proc->p_lock));
    467 	KASSERT(lwp_locked(l, NULL));
    468 
    469 	/* If rebooting or not suspended, then just bail out. */
    470 	if ((l->l_flag & LW_WREBOOT) != 0) {
    471 		lwp_unlock(l);
    472 		return;
    473 	}
    474 
    475 	l->l_flag &= ~LW_WSUSPEND;
    476 
    477 	if (l->l_stat != LSSUSPENDED || (l->l_flag & LW_DBGSUSPEND) != 0) {
    478 		lwp_unlock(l);
    479 		return;
    480 	}
    481 
    482 	/* setrunnable() will release the lock. */
    483 	setrunnable(l);
    484 }
    485 
    486 /*
    487  * Restart a stopped LWP.
    488  *
    489  * Must be called with p_lock held, and the LWP NOT locked.  Will unlock the
    490  * LWP before return.
    491  */
    492 void
    493 lwp_unstop(struct lwp *l)
    494 {
    495 	struct proc *p = l->l_proc;
    496 
    497 	KASSERT(mutex_owned(proc_lock));
    498 	KASSERT(mutex_owned(p->p_lock));
    499 
    500 	lwp_lock(l);
    501 
    502 	KASSERT((l->l_flag & LW_DBGSUSPEND) == 0);
    503 
    504 	/* If not stopped, then just bail out. */
    505 	if (l->l_stat != LSSTOP) {
    506 		lwp_unlock(l);
    507 		return;
    508 	}
    509 
    510 	p->p_stat = SACTIVE;
    511 	p->p_sflag &= ~PS_STOPPING;
    512 
    513 	if (!p->p_waited)
    514 		p->p_pptr->p_nstopchild--;
    515 
    516 	if (l->l_wchan == NULL) {
    517 		/* setrunnable() will release the lock. */
    518 		setrunnable(l);
    519 	} else if (p->p_xsig && (l->l_flag & LW_SINTR) != 0) {
    520 		/* setrunnable() so we can receive the signal */
    521 		setrunnable(l);
    522 	} else {
    523 		l->l_stat = LSSLEEP;
    524 		p->p_nrlwps++;
    525 		lwp_unlock(l);
    526 	}
    527 }
    528 
    529 /*
    530  * Wait for an LWP within the current process to exit.  If 'lid' is
    531  * non-zero, we are waiting for a specific LWP.
    532  *
    533  * Must be called with p->p_lock held.
    534  */
    535 int
    536 lwp_wait(struct lwp *l, lwpid_t lid, lwpid_t *departed, bool exiting)
    537 {
    538 	const lwpid_t curlid = l->l_lid;
    539 	proc_t *p = l->l_proc;
    540 	lwp_t *l2;
    541 	int error;
    542 
    543 	KASSERT(mutex_owned(p->p_lock));
    544 
    545 	p->p_nlwpwait++;
    546 	l->l_waitingfor = lid;
    547 
    548 	for (;;) {
    549 		int nfound;
    550 
    551 		/*
    552 		 * Avoid a race between exit1() and sigexit(): if the
    553 		 * process is dumping core, then we need to bail out: call
    554 		 * into lwp_userret() where we will be suspended until the
    555 		 * deed is done.
    556 		 */
    557 		if ((p->p_sflag & PS_WCORE) != 0) {
    558 			mutex_exit(p->p_lock);
    559 			lwp_userret(l);
    560 			KASSERT(false);
    561 		}
    562 
    563 		/*
    564 		 * First off, drain any detached LWP that is waiting to be
    565 		 * reaped.
    566 		 */
    567 		while ((l2 = p->p_zomblwp) != NULL) {
    568 			p->p_zomblwp = NULL;
    569 			lwp_free(l2, false, false);/* releases proc mutex */
    570 			mutex_enter(p->p_lock);
    571 		}
    572 
    573 		/*
    574 		 * Now look for an LWP to collect.  If the whole process is
    575 		 * exiting, count detached LWPs as eligible to be collected,
    576 		 * but don't drain them here.
    577 		 */
    578 		nfound = 0;
    579 		error = 0;
    580 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
    581 			/*
    582 			 * If a specific wait and the target is waiting on
    583 			 * us, then avoid deadlock.  This also traps LWPs
    584 			 * that try to wait on themselves.
    585 			 *
    586 			 * Note that this does not handle more complicated
    587 			 * cycles, like: t1 -> t2 -> t3 -> t1.  The process
    588 			 * can still be killed so it is not a major problem.
    589 			 */
    590 			if (l2->l_lid == lid && l2->l_waitingfor == curlid) {
    591 				error = EDEADLK;
    592 				break;
    593 			}
    594 			if (l2 == l)
    595 				continue;
    596 			if ((l2->l_prflag & LPR_DETACHED) != 0) {
    597 				nfound += exiting;
    598 				continue;
    599 			}
    600 			if (lid != 0) {
    601 				if (l2->l_lid != lid)
    602 					continue;
    603 				/*
    604 				 * Mark this LWP as the first waiter, if there
    605 				 * is no other.
    606 				 */
    607 				if (l2->l_waiter == 0)
    608 					l2->l_waiter = curlid;
    609 			} else if (l2->l_waiter != 0) {
    610 				/*
    611 				 * It already has a waiter - so don't
    612 				 * collect it.  If the waiter doesn't
    613 				 * grab it we'll get another chance
    614 				 * later.
    615 				 */
    616 				nfound++;
    617 				continue;
    618 			}
    619 			nfound++;
    620 
    621 			/* No need to lock the LWP in order to see LSZOMB. */
    622 			if (l2->l_stat != LSZOMB)
    623 				continue;
    624 
    625 			/*
    626 			 * We're no longer waiting.  Reset the "first waiter"
    627 			 * pointer on the target, in case it was us.
    628 			 */
    629 			l->l_waitingfor = 0;
    630 			l2->l_waiter = 0;
    631 			p->p_nlwpwait--;
    632 			if (departed)
    633 				*departed = l2->l_lid;
    634 			sched_lwp_collect(l2);
    635 
    636 			/* lwp_free() releases the proc lock. */
    637 			lwp_free(l2, false, false);
    638 			mutex_enter(p->p_lock);
    639 			return 0;
    640 		}
    641 
    642 		if (error != 0)
    643 			break;
    644 		if (nfound == 0) {
    645 			error = ESRCH;
    646 			break;
    647 		}
    648 
    649 		/*
    650 		 * Note: since the lock will be dropped, need to restart on
    651 		 * wakeup to run all LWPs again, e.g. there may be new LWPs.
    652 		 */
    653 		if (exiting) {
    654 			KASSERT(p->p_nlwps > 1);
    655 			cv_wait(&p->p_lwpcv, p->p_lock);
    656 			error = EAGAIN;
    657 			break;
    658 		}
    659 
    660 		/*
    661 		 * If all other LWPs are waiting for exits or suspends
    662 		 * and the supply of zombies and potential zombies is
    663 		 * exhausted, then we are about to deadlock.
    664 		 *
    665 		 * If the process is exiting (and this LWP is not the one
    666 		 * that is coordinating the exit) then bail out now.
    667 		 */
    668 		if ((p->p_sflag & PS_WEXIT) != 0 ||
    669 		    p->p_nrlwps + p->p_nzlwps - p->p_ndlwps <= p->p_nlwpwait) {
    670 			error = EDEADLK;
    671 			break;
    672 		}
    673 
    674 		/*
    675 		 * Sit around and wait for something to happen.  We'll be
    676 		 * awoken if any of the conditions examined change: if an
    677 		 * LWP exits, is collected, or is detached.
    678 		 */
    679 		if ((error = cv_wait_sig(&p->p_lwpcv, p->p_lock)) != 0)
    680 			break;
    681 	}
    682 
    683 	/*
    684 	 * We didn't find any LWPs to collect, we may have received a
    685 	 * signal, or some other condition has caused us to bail out.
    686 	 *
    687 	 * If waiting on a specific LWP, clear the waiters marker: some
    688 	 * other LWP may want it.  Then, kick all the remaining waiters
    689 	 * so that they can re-check for zombies and for deadlock.
    690 	 */
    691 	if (lid != 0) {
    692 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
    693 			if (l2->l_lid == lid) {
    694 				if (l2->l_waiter == curlid)
    695 					l2->l_waiter = 0;
    696 				break;
    697 			}
    698 		}
    699 	}
    700 	p->p_nlwpwait--;
    701 	l->l_waitingfor = 0;
    702 	cv_broadcast(&p->p_lwpcv);
    703 
    704 	return error;
    705 }
    706 
    707 static lwpid_t
    708 lwp_find_free_lid(lwpid_t try_lid, lwp_t * new_lwp, proc_t *p)
    709 {
    710 	#define LID_SCAN (1u << 31)
    711 	lwp_t *scan, *free_before;
    712 	lwpid_t nxt_lid;
    713 
    714 	/*
    715 	 * We want the first unused lid greater than or equal to
    716 	 * try_lid (modulo 2^31).
    717 	 * (If nothing else ld.elf_so doesn't want lwpid with the top bit set.)
    718 	 * We must not return 0, and avoiding 'LID_SCAN - 1' makes
    719 	 * the outer test easier.
    720 	 * This would be much easier if the list were sorted in
    721 	 * increasing order.
    722 	 * The list is kept sorted in decreasing order.
    723 	 * This code is only used after a process has generated 2^31 lwp.
    724 	 *
    725 	 * Code assumes it can always find an id.
    726 	 */
    727 
    728 	try_lid &= LID_SCAN - 1;
    729 	if (try_lid <= 1)
    730 		try_lid = 2;
    731 
    732 	free_before = NULL;
    733 	nxt_lid = LID_SCAN - 1;
    734 	LIST_FOREACH(scan, &p->p_lwps, l_sibling) {
    735 		if (scan->l_lid != nxt_lid) {
    736 			/* There are available lid before this entry */
    737 			free_before = scan;
    738 			if (try_lid > scan->l_lid)
    739 				break;
    740 		}
    741 		if (try_lid == scan->l_lid) {
    742 			/* The ideal lid is busy, take a higher one */
    743 			if (free_before != NULL) {
    744 				try_lid = free_before->l_lid + 1;
    745 				break;
    746 			}
    747 			/* No higher ones, reuse low numbers */
    748 			try_lid = 2;
    749 		}
    750 
    751 		nxt_lid = scan->l_lid - 1;
    752 		if (LIST_NEXT(scan, l_sibling) == NULL) {
    753 		    /* The value we have is lower than any existing lwp */
    754 		    LIST_INSERT_AFTER(scan, new_lwp, l_sibling);
    755 		    return try_lid;
    756 		}
    757 	}
    758 
    759 	LIST_INSERT_BEFORE(free_before, new_lwp, l_sibling);
    760 	return try_lid;
    761 }
    762 
    763 /*
    764  * Create a new LWP within process 'p2', using LWP 'l1' as a template.
    765  * The new LWP is created in state LSIDL and must be set running,
    766  * suspended, or stopped by the caller.
    767  */
    768 int
    769 lwp_create(lwp_t *l1, proc_t *p2, vaddr_t uaddr, int flags,
    770     void *stack, size_t stacksize, void (*func)(void *), void *arg,
    771     lwp_t **rnewlwpp, int sclass, const sigset_t *sigmask,
    772     const stack_t *sigstk)
    773 {
    774 	struct lwp *l2, *isfree;
    775 	turnstile_t *ts;
    776 	lwpid_t lid;
    777 
    778 	KASSERT(l1 == curlwp || l1->l_proc == &proc0);
    779 
    780 	/*
    781 	 * Enforce limits, excluding the first lwp and kthreads.
    782 	 */
    783 	if (p2->p_nlwps != 0 && p2 != &proc0) {
    784 		uid_t uid = kauth_cred_getuid(l1->l_cred);
    785 		int count = chglwpcnt(uid, 1);
    786 		if (__predict_false(count >
    787 		    p2->p_rlimit[RLIMIT_NTHR].rlim_cur)) {
    788 			if (kauth_authorize_process(l1->l_cred,
    789 			    KAUTH_PROCESS_RLIMIT, p2,
    790 			    KAUTH_ARG(KAUTH_REQ_PROCESS_RLIMIT_BYPASS),
    791 			    &p2->p_rlimit[RLIMIT_NTHR], KAUTH_ARG(RLIMIT_NTHR))
    792 			    != 0) {
    793 				(void)chglwpcnt(uid, -1);
    794 				return EAGAIN;
    795 			}
    796 		}
    797 	}
    798 
    799 	/*
    800 	 * First off, reap any detached LWP waiting to be collected.
    801 	 * We can re-use its LWP structure and turnstile.
    802 	 */
    803 	isfree = NULL;
    804 	if (p2->p_zomblwp != NULL) {
    805 		mutex_enter(p2->p_lock);
    806 		if ((isfree = p2->p_zomblwp) != NULL) {
    807 			p2->p_zomblwp = NULL;
    808 			lwp_free(isfree, true, false);/* releases proc mutex */
    809 		} else
    810 			mutex_exit(p2->p_lock);
    811 	}
    812 	if (isfree == NULL) {
    813 		l2 = pool_cache_get(lwp_cache, PR_WAITOK);
    814 		memset(l2, 0, sizeof(*l2));
    815 		l2->l_ts = pool_cache_get(turnstile_cache, PR_WAITOK);
    816 		SLIST_INIT(&l2->l_pi_lenders);
    817 	} else {
    818 		l2 = isfree;
    819 		ts = l2->l_ts;
    820 		KASSERT(l2->l_inheritedprio == -1);
    821 		KASSERT(SLIST_EMPTY(&l2->l_pi_lenders));
    822 		memset(l2, 0, sizeof(*l2));
    823 		l2->l_ts = ts;
    824 	}
    825 
    826 	l2->l_stat = LSIDL;
    827 	l2->l_proc = p2;
    828 	l2->l_refcnt = 1;
    829 	l2->l_class = sclass;
    830 
    831 	/*
    832 	 * If vfork(), we want the LWP to run fast and on the same CPU
    833 	 * as its parent, so that it can reuse the VM context and cache
    834 	 * footprint on the local CPU.
    835 	 */
    836 	l2->l_kpriority = ((flags & LWP_VFORK) ? true : false);
    837 	l2->l_kpribase = PRI_KERNEL;
    838 	l2->l_priority = l1->l_priority;
    839 	l2->l_inheritedprio = -1;
    840 	l2->l_protectprio = -1;
    841 	l2->l_auxprio = -1;
    842 	l2->l_flag = (l1->l_flag & (LW_WEXIT | LW_WREBOOT | LW_WCORE));
    843 	l2->l_pflag = LP_MPSAFE;
    844 	TAILQ_INIT(&l2->l_ld_locks);
    845 	l2->l_psrefs = 0;
    846 	kmsan_lwp_alloc(l2);
    847 
    848 	/*
    849 	 * For vfork, borrow parent's lwpctl context if it exists.
    850 	 * This also causes us to return via lwp_userret.
    851 	 */
    852 	if (flags & LWP_VFORK && l1->l_lwpctl) {
    853 		l2->l_lwpctl = l1->l_lwpctl;
    854 		l2->l_flag |= LW_LWPCTL;
    855 	}
    856 
    857 	/*
    858 	 * If not the first LWP in the process, grab a reference to the
    859 	 * descriptor table.
    860 	 */
    861 	l2->l_fd = p2->p_fd;
    862 	if (p2->p_nlwps != 0) {
    863 		KASSERT(l1->l_proc == p2);
    864 		fd_hold(l2);
    865 	} else {
    866 		KASSERT(l1->l_proc != p2);
    867 	}
    868 
    869 	if (p2->p_flag & PK_SYSTEM) {
    870 		/* Mark it as a system LWP. */
    871 		l2->l_flag |= LW_SYSTEM;
    872 	}
    873 
    874 	kpreempt_disable();
    875 	l2->l_mutex = l1->l_cpu->ci_schedstate.spc_lwplock;
    876 	l2->l_cpu = l1->l_cpu;
    877 	kpreempt_enable();
    878 
    879 	kdtrace_thread_ctor(NULL, l2);
    880 	lwp_initspecific(l2);
    881 	sched_lwp_fork(l1, l2);
    882 	lwp_update_creds(l2);
    883 	callout_init(&l2->l_timeout_ch, CALLOUT_MPSAFE);
    884 	callout_setfunc(&l2->l_timeout_ch, sleepq_timeout, l2);
    885 	cv_init(&l2->l_sigcv, "sigwait");
    886 	cv_init(&l2->l_waitcv, "vfork");
    887 	l2->l_syncobj = &sched_syncobj;
    888 	PSREF_DEBUG_INIT_LWP(l2);
    889 
    890 	if (rnewlwpp != NULL)
    891 		*rnewlwpp = l2;
    892 
    893 	/*
    894 	 * PCU state needs to be saved before calling uvm_lwp_fork() so that
    895 	 * the MD cpu_lwp_fork() can copy the saved state to the new LWP.
    896 	 */
    897 	pcu_save_all(l1);
    898 
    899 	uvm_lwp_setuarea(l2, uaddr);
    900 	uvm_lwp_fork(l1, l2, stack, stacksize, func, (arg != NULL) ? arg : l2);
    901 
    902 	if ((flags & LWP_PIDLID) != 0) {
    903 		lid = proc_alloc_pid(p2);
    904 		l2->l_pflag |= LP_PIDLID;
    905 	} else if (p2->p_nlwps == 0) {
    906 		lid = l1->l_lid;
    907 		/*
    908 		 * Update next LWP ID, too. If this overflows to LID_SCAN,
    909 		 * the slow path of scanning will be used for the next LWP.
    910 		 */
    911 		p2->p_nlwpid = lid + 1;
    912 	} else {
    913 		lid = 0;
    914 	}
    915 
    916 	mutex_enter(p2->p_lock);
    917 
    918 	if ((flags & LWP_DETACHED) != 0) {
    919 		l2->l_prflag = LPR_DETACHED;
    920 		p2->p_ndlwps++;
    921 	} else
    922 		l2->l_prflag = 0;
    923 
    924 	l2->l_sigstk = *sigstk;
    925 	l2->l_sigmask = *sigmask;
    926 	TAILQ_INIT(&l2->l_sigpend.sp_info);
    927 	sigemptyset(&l2->l_sigpend.sp_set);
    928 
    929 	if (__predict_true(lid == 0)) {
    930 		/*
    931 		 * XXX: l_lid are expected to be unique (for a process)
    932 		 * if LWP_PIDLID is sometimes set this won't be true.
    933 		 * Once 2^31 threads have been allocated we have to
    934 		 * scan to ensure we allocate a unique value.
    935 		 */
    936 		lid = ++p2->p_nlwpid;
    937 		if (__predict_false(lid & LID_SCAN)) {
    938 			lid = lwp_find_free_lid(lid, l2, p2);
    939 			p2->p_nlwpid = lid | LID_SCAN;
    940 			/* l2 as been inserted into p_lwps in order */
    941 			goto skip_insert;
    942 		}
    943 		p2->p_nlwpid = lid;
    944 	}
    945 	LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
    946     skip_insert:
    947 	l2->l_lid = lid;
    948 	p2->p_nlwps++;
    949 	p2->p_nrlwps++;
    950 
    951 	KASSERT(l2->l_affinity == NULL);
    952 
    953 	/* Inherit the affinity mask. */
    954 	if (l1->l_affinity) {
    955 		/*
    956 		 * Note that we hold the state lock while inheriting
    957 		 * the affinity to avoid race with sched_setaffinity().
    958 		 */
    959 		lwp_lock(l1);
    960 		if (l1->l_affinity) {
    961 			kcpuset_use(l1->l_affinity);
    962 			l2->l_affinity = l1->l_affinity;
    963 		}
    964 		lwp_unlock(l1);
    965 	}
    966 	mutex_exit(p2->p_lock);
    967 
    968 	SDT_PROBE(proc, kernel, , lwp__create, l2, 0, 0, 0, 0);
    969 
    970 	mutex_enter(proc_lock);
    971 	LIST_INSERT_HEAD(&alllwp, l2, l_list);
    972 	/* Inherit a processor-set */
    973 	l2->l_psid = l1->l_psid;
    974 	mutex_exit(proc_lock);
    975 
    976 	SYSCALL_TIME_LWP_INIT(l2);
    977 
    978 	if (p2->p_emul->e_lwp_fork)
    979 		(*p2->p_emul->e_lwp_fork)(l1, l2);
    980 
    981 	return (0);
    982 }
    983 
    984 /*
    985  * Set a new LWP running.  If the process is stopping, then the LWP is
    986  * created stopped.
    987  */
    988 void
    989 lwp_start(lwp_t *l, int flags)
    990 {
    991 	proc_t *p = l->l_proc;
    992 
    993 	mutex_enter(p->p_lock);
    994 	lwp_lock(l);
    995 	KASSERT(l->l_stat == LSIDL);
    996 	if ((flags & LWP_SUSPENDED) != 0) {
    997 		/* It'll suspend itself in lwp_userret(). */
    998 		l->l_flag |= LW_WSUSPEND;
    999 	}
   1000 	if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) {
   1001 		KASSERT(l->l_wchan == NULL);
   1002 	    	l->l_stat = LSSTOP;
   1003 		p->p_nrlwps--;
   1004 		lwp_unlock(l);
   1005 	} else {
   1006 		setrunnable(l);
   1007 		/* LWP now unlocked */
   1008 	}
   1009 	mutex_exit(p->p_lock);
   1010 }
   1011 
   1012 /*
   1013  * Called by MD code when a new LWP begins execution.  Must be called
   1014  * with the previous LWP locked (so at splsched), or if there is no
   1015  * previous LWP, at splsched.
   1016  */
   1017 void
   1018 lwp_startup(struct lwp *prev, struct lwp *new_lwp)
   1019 {
   1020 	KASSERTMSG(new_lwp == curlwp, "l %p curlwp %p prevlwp %p", new_lwp, curlwp, prev);
   1021 
   1022 	SDT_PROBE(proc, kernel, , lwp__start, new_lwp, 0, 0, 0, 0);
   1023 
   1024 	KASSERT(kpreempt_disabled());
   1025 	if (prev != NULL) {
   1026 		/*
   1027 		 * Normalize the count of the spin-mutexes, it was
   1028 		 * increased in mi_switch().  Unmark the state of
   1029 		 * context switch - it is finished for previous LWP.
   1030 		 */
   1031 		curcpu()->ci_mtx_count++;
   1032 		membar_exit();
   1033 		prev->l_ctxswtch = 0;
   1034 	}
   1035 	KPREEMPT_DISABLE(new_lwp);
   1036 	if (__predict_true(new_lwp->l_proc->p_vmspace))
   1037 		pmap_activate(new_lwp);
   1038 	spl0();
   1039 
   1040 	/* Note trip through cpu_switchto(). */
   1041 	pserialize_switchpoint();
   1042 
   1043 	LOCKDEBUG_BARRIER(NULL, 0);
   1044 	KPREEMPT_ENABLE(new_lwp);
   1045 	if ((new_lwp->l_pflag & LP_MPSAFE) == 0) {
   1046 		KERNEL_LOCK(1, new_lwp);
   1047 	}
   1048 }
   1049 
   1050 /*
   1051  * Exit an LWP.
   1052  */
   1053 void
   1054 lwp_exit(struct lwp *l)
   1055 {
   1056 	struct proc *p = l->l_proc;
   1057 	struct lwp *l2;
   1058 	bool current;
   1059 
   1060 	current = (l == curlwp);
   1061 
   1062 	KASSERT(current || (l->l_stat == LSIDL && l->l_target_cpu == NULL));
   1063 	KASSERT(p == curproc);
   1064 
   1065 	SDT_PROBE(proc, kernel, , lwp__exit, l, 0, 0, 0, 0);
   1066 
   1067 	/*
   1068 	 * Verify that we hold no locks other than the kernel lock.
   1069 	 */
   1070 	LOCKDEBUG_BARRIER(&kernel_lock, 0);
   1071 
   1072 	/*
   1073 	 * If we are the last live LWP in a process, we need to exit the
   1074 	 * entire process.  We do so with an exit status of zero, because
   1075 	 * it's a "controlled" exit, and because that's what Solaris does.
   1076 	 *
   1077 	 * We are not quite a zombie yet, but for accounting purposes we
   1078 	 * must increment the count of zombies here.
   1079 	 *
   1080 	 * Note: the last LWP's specificdata will be deleted here.
   1081 	 */
   1082 	mutex_enter(p->p_lock);
   1083 	if (p->p_nlwps - p->p_nzlwps == 1) {
   1084 		KASSERT(current == true);
   1085 		KASSERT(p != &proc0);
   1086 		/* XXXSMP kernel_lock not held */
   1087 		exit1(l, 0, 0);
   1088 		/* NOTREACHED */
   1089 	}
   1090 	p->p_nzlwps++;
   1091 	mutex_exit(p->p_lock);
   1092 
   1093 	if (p->p_emul->e_lwp_exit)
   1094 		(*p->p_emul->e_lwp_exit)(l);
   1095 
   1096 	/* Drop filedesc reference. */
   1097 	fd_free();
   1098 
   1099 	/* Release fstrans private data. */
   1100 	fstrans_lwp_dtor(l);
   1101 
   1102 	/* Delete the specificdata while it's still safe to sleep. */
   1103 	lwp_finispecific(l);
   1104 
   1105 	/*
   1106 	 * Release our cached credentials.
   1107 	 */
   1108 	kauth_cred_free(l->l_cred);
   1109 	callout_destroy(&l->l_timeout_ch);
   1110 
   1111 	/*
   1112 	 * If traced, report LWP exit event to the debugger.
   1113 	 *
   1114 	 * Remove the LWP from the global list.
   1115 	 * Free its LID from the PID namespace if needed.
   1116 	 */
   1117 	mutex_enter(proc_lock);
   1118 
   1119 	if ((p->p_slflag & (PSL_TRACED|PSL_TRACELWP_EXIT)) ==
   1120 	    (PSL_TRACED|PSL_TRACELWP_EXIT)) {
   1121 		mutex_enter(p->p_lock);
   1122 		if (ISSET(p->p_sflag, PS_WEXIT)) {
   1123 			mutex_exit(p->p_lock);
   1124 			/*
   1125 			 * We are exiting, bail out without informing parent
   1126 			 * about a terminating LWP as it would deadlock.
   1127 			 */
   1128 		} else {
   1129 			eventswitch(TRAP_LWP, PTRACE_LWP_EXIT, l->l_lid);
   1130 			mutex_enter(proc_lock);
   1131 		}
   1132 	}
   1133 
   1134 	LIST_REMOVE(l, l_list);
   1135 	if ((l->l_pflag & LP_PIDLID) != 0 && l->l_lid != p->p_pid) {
   1136 		proc_free_pid(l->l_lid);
   1137 	}
   1138 	mutex_exit(proc_lock);
   1139 
   1140 	/*
   1141 	 * Get rid of all references to the LWP that others (e.g. procfs)
   1142 	 * may have, and mark the LWP as a zombie.  If the LWP is detached,
   1143 	 * mark it waiting for collection in the proc structure.  Note that
   1144 	 * before we can do that, we need to free any other dead, deatched
   1145 	 * LWP waiting to meet its maker.
   1146 	 */
   1147 	mutex_enter(p->p_lock);
   1148 	lwp_drainrefs(l);
   1149 
   1150 	if ((l->l_prflag & LPR_DETACHED) != 0) {
   1151 		while ((l2 = p->p_zomblwp) != NULL) {
   1152 			p->p_zomblwp = NULL;
   1153 			lwp_free(l2, false, false);/* releases proc mutex */
   1154 			mutex_enter(p->p_lock);
   1155 			l->l_refcnt++;
   1156 			lwp_drainrefs(l);
   1157 		}
   1158 		p->p_zomblwp = l;
   1159 	}
   1160 
   1161 	/*
   1162 	 * If we find a pending signal for the process and we have been
   1163 	 * asked to check for signals, then we lose: arrange to have
   1164 	 * all other LWPs in the process check for signals.
   1165 	 */
   1166 	if ((l->l_flag & LW_PENDSIG) != 0 &&
   1167 	    firstsig(&p->p_sigpend.sp_set) != 0) {
   1168 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
   1169 			lwp_lock(l2);
   1170 			signotify(l2);
   1171 			lwp_unlock(l2);
   1172 		}
   1173 	}
   1174 
   1175 	/*
   1176 	 * Release any PCU resources before becoming a zombie.
   1177 	 */
   1178 	pcu_discard_all(l);
   1179 
   1180 	lwp_lock(l);
   1181 	l->l_stat = LSZOMB;
   1182 	if (l->l_name != NULL) {
   1183 		strcpy(l->l_name, "(zombie)");
   1184 	}
   1185 	lwp_unlock(l);
   1186 	p->p_nrlwps--;
   1187 	cv_broadcast(&p->p_lwpcv);
   1188 	if (l->l_lwpctl != NULL)
   1189 		l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
   1190 	mutex_exit(p->p_lock);
   1191 
   1192 	/*
   1193 	 * We can no longer block.  At this point, lwp_free() may already
   1194 	 * be gunning for us.  On a multi-CPU system, we may be off p_lwps.
   1195 	 *
   1196 	 * Free MD LWP resources.
   1197 	 */
   1198 	cpu_lwp_free(l, 0);
   1199 
   1200 	if (current) {
   1201 		pmap_deactivate(l);
   1202 
   1203 		/*
   1204 		 * Release the kernel lock, and switch away into
   1205 		 * oblivion.
   1206 		 */
   1207 #ifdef notyet
   1208 		/* XXXSMP hold in lwp_userret() */
   1209 		KERNEL_UNLOCK_LAST(l);
   1210 #else
   1211 		KERNEL_UNLOCK_ALL(l, NULL);
   1212 #endif
   1213 		lwp_exit_switchaway(l);
   1214 	}
   1215 }
   1216 
   1217 /*
   1218  * Free a dead LWP's remaining resources.
   1219  *
   1220  * XXXLWP limits.
   1221  */
   1222 void
   1223 lwp_free(struct lwp *l, bool recycle, bool last)
   1224 {
   1225 	struct proc *p = l->l_proc;
   1226 	struct rusage *ru;
   1227 	ksiginfoq_t kq;
   1228 
   1229 	KASSERT(l != curlwp);
   1230 	KASSERT(last || mutex_owned(p->p_lock));
   1231 
   1232 	/*
   1233 	 * We use the process credentials instead of the lwp credentials here
   1234 	 * because the lwp credentials maybe cached (just after a setuid call)
   1235 	 * and we don't want pay for syncing, since the lwp is going away
   1236 	 * anyway
   1237 	 */
   1238 	if (p != &proc0 && p->p_nlwps != 1)
   1239 		(void)chglwpcnt(kauth_cred_getuid(p->p_cred), -1);
   1240 	/*
   1241 	 * If this was not the last LWP in the process, then adjust
   1242 	 * counters and unlock.
   1243 	 */
   1244 	if (!last) {
   1245 		/*
   1246 		 * Add the LWP's run time to the process' base value.
   1247 		 * This needs to co-incide with coming off p_lwps.
   1248 		 */
   1249 		bintime_add(&p->p_rtime, &l->l_rtime);
   1250 		p->p_pctcpu += l->l_pctcpu;
   1251 		ru = &p->p_stats->p_ru;
   1252 		ruadd(ru, &l->l_ru);
   1253 		ru->ru_nvcsw += (l->l_ncsw - l->l_nivcsw);
   1254 		ru->ru_nivcsw += l->l_nivcsw;
   1255 		LIST_REMOVE(l, l_sibling);
   1256 		p->p_nlwps--;
   1257 		p->p_nzlwps--;
   1258 		if ((l->l_prflag & LPR_DETACHED) != 0)
   1259 			p->p_ndlwps--;
   1260 
   1261 		/*
   1262 		 * Have any LWPs sleeping in lwp_wait() recheck for
   1263 		 * deadlock.
   1264 		 */
   1265 		cv_broadcast(&p->p_lwpcv);
   1266 		mutex_exit(p->p_lock);
   1267 	}
   1268 
   1269 #ifdef MULTIPROCESSOR
   1270 	/*
   1271 	 * In the unlikely event that the LWP is still on the CPU,
   1272 	 * then spin until it has switched away.  We need to release
   1273 	 * all locks to avoid deadlock against interrupt handlers on
   1274 	 * the target CPU.
   1275 	 */
   1276 	if ((l->l_pflag & LP_RUNNING) != 0 || l->l_cpu->ci_curlwp == l) {
   1277 		int count;
   1278 		(void)count; /* XXXgcc */
   1279 		KERNEL_UNLOCK_ALL(curlwp, &count);
   1280 		while ((l->l_pflag & LP_RUNNING) != 0 ||
   1281 		    l->l_cpu->ci_curlwp == l)
   1282 			SPINLOCK_BACKOFF_HOOK;
   1283 		KERNEL_LOCK(count, curlwp);
   1284 	}
   1285 #endif
   1286 
   1287 	/*
   1288 	 * Destroy the LWP's remaining signal information.
   1289 	 */
   1290 	ksiginfo_queue_init(&kq);
   1291 	sigclear(&l->l_sigpend, NULL, &kq);
   1292 	ksiginfo_queue_drain(&kq);
   1293 	cv_destroy(&l->l_sigcv);
   1294 	cv_destroy(&l->l_waitcv);
   1295 
   1296 	/*
   1297 	 * Free lwpctl structure and affinity.
   1298 	 */
   1299 	if (l->l_lwpctl) {
   1300 		lwp_ctl_free(l);
   1301 	}
   1302 	if (l->l_affinity) {
   1303 		kcpuset_unuse(l->l_affinity, NULL);
   1304 		l->l_affinity = NULL;
   1305 	}
   1306 
   1307 	/*
   1308 	 * Free the LWP's turnstile and the LWP structure itself unless the
   1309 	 * caller wants to recycle them.  Also, free the scheduler specific
   1310 	 * data.
   1311 	 *
   1312 	 * We can't return turnstile0 to the pool (it didn't come from it),
   1313 	 * so if it comes up just drop it quietly and move on.
   1314 	 *
   1315 	 * We don't recycle the VM resources at this time.
   1316 	 */
   1317 
   1318 	if (!recycle && l->l_ts != &turnstile0)
   1319 		pool_cache_put(turnstile_cache, l->l_ts);
   1320 	if (l->l_name != NULL)
   1321 		kmem_free(l->l_name, MAXCOMLEN);
   1322 
   1323 	kmsan_lwp_free(l);
   1324 	cpu_lwp_free2(l);
   1325 	uvm_lwp_exit(l);
   1326 
   1327 	KASSERT(SLIST_EMPTY(&l->l_pi_lenders));
   1328 	KASSERT(l->l_inheritedprio == -1);
   1329 	KASSERT(l->l_blcnt == 0);
   1330 	kdtrace_thread_dtor(NULL, l);
   1331 	if (!recycle)
   1332 		pool_cache_put(lwp_cache, l);
   1333 }
   1334 
   1335 /*
   1336  * Migrate the LWP to the another CPU.  Unlocks the LWP.
   1337  */
   1338 void
   1339 lwp_migrate(lwp_t *l, struct cpu_info *tci)
   1340 {
   1341 	struct schedstate_percpu *tspc;
   1342 	int lstat = l->l_stat;
   1343 
   1344 	KASSERT(lwp_locked(l, NULL));
   1345 	KASSERT(tci != NULL);
   1346 
   1347 	/* If LWP is still on the CPU, it must be handled like LSONPROC */
   1348 	if ((l->l_pflag & LP_RUNNING) != 0) {
   1349 		lstat = LSONPROC;
   1350 	}
   1351 
   1352 	/*
   1353 	 * The destination CPU could be changed while previous migration
   1354 	 * was not finished.
   1355 	 */
   1356 	if (l->l_target_cpu != NULL) {
   1357 		l->l_target_cpu = tci;
   1358 		lwp_unlock(l);
   1359 		return;
   1360 	}
   1361 
   1362 	/* Nothing to do if trying to migrate to the same CPU */
   1363 	if (l->l_cpu == tci) {
   1364 		lwp_unlock(l);
   1365 		return;
   1366 	}
   1367 
   1368 	KASSERT(l->l_target_cpu == NULL);
   1369 	tspc = &tci->ci_schedstate;
   1370 	switch (lstat) {
   1371 	case LSRUN:
   1372 		l->l_target_cpu = tci;
   1373 		break;
   1374 	case LSSLEEP:
   1375 		l->l_cpu = tci;
   1376 		break;
   1377 	case LSIDL:
   1378 	case LSSTOP:
   1379 	case LSSUSPENDED:
   1380 		l->l_cpu = tci;
   1381 		if (l->l_wchan == NULL) {
   1382 			lwp_unlock_to(l, tspc->spc_lwplock);
   1383 			return;
   1384 		}
   1385 		break;
   1386 	case LSONPROC:
   1387 		l->l_target_cpu = tci;
   1388 		spc_lock(l->l_cpu);
   1389 		sched_resched_cpu(l->l_cpu, PRI_USER_RT, true);
   1390 		/* spc now unlocked */
   1391 		break;
   1392 	}
   1393 	lwp_unlock(l);
   1394 }
   1395 
   1396 /*
   1397  * Find the LWP in the process.  Arguments may be zero, in such case,
   1398  * the calling process and first LWP in the list will be used.
   1399  * On success - returns proc locked.
   1400  */
   1401 struct lwp *
   1402 lwp_find2(pid_t pid, lwpid_t lid)
   1403 {
   1404 	proc_t *p;
   1405 	lwp_t *l;
   1406 
   1407 	/* Find the process. */
   1408 	if (pid != 0) {
   1409 		mutex_enter(proc_lock);
   1410 		p = proc_find(pid);
   1411 		if (p == NULL) {
   1412 			mutex_exit(proc_lock);
   1413 			return NULL;
   1414 		}
   1415 		mutex_enter(p->p_lock);
   1416 		mutex_exit(proc_lock);
   1417 	} else {
   1418 		p = curlwp->l_proc;
   1419 		mutex_enter(p->p_lock);
   1420 	}
   1421 	/* Find the thread. */
   1422 	if (lid != 0) {
   1423 		l = lwp_find(p, lid);
   1424 	} else {
   1425 		l = LIST_FIRST(&p->p_lwps);
   1426 	}
   1427 	if (l == NULL) {
   1428 		mutex_exit(p->p_lock);
   1429 	}
   1430 	return l;
   1431 }
   1432 
   1433 /*
   1434  * Look up a live LWP within the specified process.
   1435  *
   1436  * Must be called with p->p_lock held.
   1437  */
   1438 struct lwp *
   1439 lwp_find(struct proc *p, lwpid_t id)
   1440 {
   1441 	struct lwp *l;
   1442 
   1443 	KASSERT(mutex_owned(p->p_lock));
   1444 
   1445 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1446 		if (l->l_lid == id)
   1447 			break;
   1448 	}
   1449 
   1450 	/*
   1451 	 * No need to lock - all of these conditions will
   1452 	 * be visible with the process level mutex held.
   1453 	 */
   1454 	if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB))
   1455 		l = NULL;
   1456 
   1457 	return l;
   1458 }
   1459 
   1460 /*
   1461  * Update an LWP's cached credentials to mirror the process' master copy.
   1462  *
   1463  * This happens early in the syscall path, on user trap, and on LWP
   1464  * creation.  A long-running LWP can also voluntarily choose to update
   1465  * its credentials by calling this routine.  This may be called from
   1466  * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
   1467  */
   1468 void
   1469 lwp_update_creds(struct lwp *l)
   1470 {
   1471 	kauth_cred_t oc;
   1472 	struct proc *p;
   1473 
   1474 	p = l->l_proc;
   1475 	oc = l->l_cred;
   1476 
   1477 	mutex_enter(p->p_lock);
   1478 	kauth_cred_hold(p->p_cred);
   1479 	l->l_cred = p->p_cred;
   1480 	l->l_prflag &= ~LPR_CRMOD;
   1481 	mutex_exit(p->p_lock);
   1482 	if (oc != NULL)
   1483 		kauth_cred_free(oc);
   1484 }
   1485 
   1486 /*
   1487  * Verify that an LWP is locked, and optionally verify that the lock matches
   1488  * one we specify.
   1489  */
   1490 int
   1491 lwp_locked(struct lwp *l, kmutex_t *mtx)
   1492 {
   1493 	kmutex_t *cur = l->l_mutex;
   1494 
   1495 	return mutex_owned(cur) && (mtx == cur || mtx == NULL);
   1496 }
   1497 
   1498 /*
   1499  * Lend a new mutex to an LWP.  The old mutex must be held.
   1500  */
   1501 kmutex_t *
   1502 lwp_setlock(struct lwp *l, kmutex_t *mtx)
   1503 {
   1504 	kmutex_t *oldmtx = l->l_mutex;
   1505 
   1506 	KASSERT(mutex_owned(oldmtx));
   1507 
   1508 	membar_exit();
   1509 	l->l_mutex = mtx;
   1510 	return oldmtx;
   1511 }
   1512 
   1513 /*
   1514  * Lend a new mutex to an LWP, and release the old mutex.  The old mutex
   1515  * must be held.
   1516  */
   1517 void
   1518 lwp_unlock_to(struct lwp *l, kmutex_t *mtx)
   1519 {
   1520 	kmutex_t *old;
   1521 
   1522 	KASSERT(lwp_locked(l, NULL));
   1523 
   1524 	old = l->l_mutex;
   1525 	membar_exit();
   1526 	l->l_mutex = mtx;
   1527 	mutex_spin_exit(old);
   1528 }
   1529 
   1530 int
   1531 lwp_trylock(struct lwp *l)
   1532 {
   1533 	kmutex_t *old;
   1534 
   1535 	for (;;) {
   1536 		if (!mutex_tryenter(old = l->l_mutex))
   1537 			return 0;
   1538 		if (__predict_true(l->l_mutex == old))
   1539 			return 1;
   1540 		mutex_spin_exit(old);
   1541 	}
   1542 }
   1543 
   1544 void
   1545 lwp_unsleep(lwp_t *l, bool unlock)
   1546 {
   1547 
   1548 	KASSERT(mutex_owned(l->l_mutex));
   1549 	(*l->l_syncobj->sobj_unsleep)(l, unlock);
   1550 }
   1551 
   1552 /*
   1553  * Handle exceptions for mi_userret().  Called if a member of LW_USERRET is
   1554  * set.
   1555  */
   1556 void
   1557 lwp_userret(struct lwp *l)
   1558 {
   1559 	struct proc *p;
   1560 	int sig;
   1561 
   1562 	KASSERT(l == curlwp);
   1563 	KASSERT(l->l_stat == LSONPROC);
   1564 	p = l->l_proc;
   1565 
   1566 #ifndef __HAVE_FAST_SOFTINTS
   1567 	/* Run pending soft interrupts. */
   1568 	if (l->l_cpu->ci_data.cpu_softints != 0)
   1569 		softint_overlay();
   1570 #endif
   1571 
   1572 	/*
   1573 	 * It is safe to do this read unlocked on a MP system..
   1574 	 */
   1575 	while ((l->l_flag & LW_USERRET) != 0) {
   1576 		/*
   1577 		 * Process pending signals first, unless the process
   1578 		 * is dumping core or exiting, where we will instead
   1579 		 * enter the LW_WSUSPEND case below.
   1580 		 */
   1581 		if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) ==
   1582 		    LW_PENDSIG) {
   1583 			mutex_enter(p->p_lock);
   1584 			while ((sig = issignal(l)) != 0)
   1585 				postsig(sig);
   1586 			mutex_exit(p->p_lock);
   1587 		}
   1588 
   1589 		/*
   1590 		 * Core-dump or suspend pending.
   1591 		 *
   1592 		 * In case of core dump, suspend ourselves, so that the kernel
   1593 		 * stack and therefore the userland registers saved in the
   1594 		 * trapframe are around for coredump() to write them out.
   1595 		 * We also need to save any PCU resources that we have so that
   1596 		 * they accessible for coredump().  We issue a wakeup on
   1597 		 * p->p_lwpcv so that sigexit() will write the core file out
   1598 		 * once all other LWPs are suspended.
   1599 		 */
   1600 		if ((l->l_flag & LW_WSUSPEND) != 0) {
   1601 			pcu_save_all(l);
   1602 			mutex_enter(p->p_lock);
   1603 			p->p_nrlwps--;
   1604 			cv_broadcast(&p->p_lwpcv);
   1605 			lwp_lock(l);
   1606 			l->l_stat = LSSUSPENDED;
   1607 			lwp_unlock(l);
   1608 			mutex_exit(p->p_lock);
   1609 			lwp_lock(l);
   1610 			mi_switch(l);
   1611 		}
   1612 
   1613 		/* Process is exiting. */
   1614 		if ((l->l_flag & LW_WEXIT) != 0) {
   1615 			lwp_exit(l);
   1616 			KASSERT(0);
   1617 			/* NOTREACHED */
   1618 		}
   1619 
   1620 		/* update lwpctl processor (for vfork child_return) */
   1621 		if (l->l_flag & LW_LWPCTL) {
   1622 			lwp_lock(l);
   1623 			KASSERT(kpreempt_disabled());
   1624 			l->l_lwpctl->lc_curcpu = (int)cpu_index(l->l_cpu);
   1625 			l->l_lwpctl->lc_pctr++;
   1626 			l->l_flag &= ~LW_LWPCTL;
   1627 			lwp_unlock(l);
   1628 		}
   1629 	}
   1630 }
   1631 
   1632 /*
   1633  * Force an LWP to enter the kernel, to take a trip through lwp_userret().
   1634  */
   1635 void
   1636 lwp_need_userret(struct lwp *l)
   1637 {
   1638 
   1639 	KASSERT(!cpu_intr_p());
   1640 	KASSERT(lwp_locked(l, NULL));
   1641 
   1642 	/*
   1643 	 * If the LWP is in any state other than LSONPROC, we know that it
   1644 	 * is executing in-kernel and will hit userret() on the way out.
   1645 	 *
   1646 	 * If the LWP is curlwp, then we know we'll be back out to userspace
   1647 	 * soon (can't be called from a hardware interrupt here).
   1648 	 *
   1649 	 * Otherwise, we can't be sure what the LWP is doing, so first make
   1650 	 * sure the update to l_flag will be globally visible, and then
   1651 	 * force the LWP to take a trip through trap() where it will do
   1652 	 * userret().
   1653 	 */
   1654 	if (l->l_stat == LSONPROC && l != curlwp) {
   1655 		membar_producer();
   1656 		cpu_signotify(l);
   1657 	}
   1658 }
   1659 
   1660 /*
   1661  * Add one reference to an LWP.  This will prevent the LWP from
   1662  * exiting, thus keep the lwp structure and PCB around to inspect.
   1663  */
   1664 void
   1665 lwp_addref(struct lwp *l)
   1666 {
   1667 
   1668 	KASSERT(mutex_owned(l->l_proc->p_lock));
   1669 	KASSERT(l->l_stat != LSZOMB);
   1670 	KASSERT(l->l_refcnt != 0);
   1671 
   1672 	l->l_refcnt++;
   1673 }
   1674 
   1675 /*
   1676  * Remove one reference to an LWP.  If this is the last reference,
   1677  * then we must finalize the LWP's death.
   1678  */
   1679 void
   1680 lwp_delref(struct lwp *l)
   1681 {
   1682 	struct proc *p = l->l_proc;
   1683 
   1684 	mutex_enter(p->p_lock);
   1685 	lwp_delref2(l);
   1686 	mutex_exit(p->p_lock);
   1687 }
   1688 
   1689 /*
   1690  * Remove one reference to an LWP.  If this is the last reference,
   1691  * then we must finalize the LWP's death.  The proc mutex is held
   1692  * on entry.
   1693  */
   1694 void
   1695 lwp_delref2(struct lwp *l)
   1696 {
   1697 	struct proc *p = l->l_proc;
   1698 
   1699 	KASSERT(mutex_owned(p->p_lock));
   1700 	KASSERT(l->l_stat != LSZOMB);
   1701 	KASSERT(l->l_refcnt > 0);
   1702 	if (--l->l_refcnt == 0)
   1703 		cv_broadcast(&p->p_lwpcv);
   1704 }
   1705 
   1706 /*
   1707  * Drain all references to the current LWP.
   1708  */
   1709 void
   1710 lwp_drainrefs(struct lwp *l)
   1711 {
   1712 	struct proc *p = l->l_proc;
   1713 
   1714 	KASSERT(mutex_owned(p->p_lock));
   1715 	KASSERT(l->l_refcnt != 0);
   1716 
   1717 	l->l_refcnt--;
   1718 	while (l->l_refcnt != 0)
   1719 		cv_wait(&p->p_lwpcv, p->p_lock);
   1720 }
   1721 
   1722 /*
   1723  * Return true if the specified LWP is 'alive'.  Only p->p_lock need
   1724  * be held.
   1725  */
   1726 bool
   1727 lwp_alive(lwp_t *l)
   1728 {
   1729 
   1730 	KASSERT(mutex_owned(l->l_proc->p_lock));
   1731 
   1732 	switch (l->l_stat) {
   1733 	case LSSLEEP:
   1734 	case LSRUN:
   1735 	case LSONPROC:
   1736 	case LSSTOP:
   1737 	case LSSUSPENDED:
   1738 		return true;
   1739 	default:
   1740 		return false;
   1741 	}
   1742 }
   1743 
   1744 /*
   1745  * Return first live LWP in the process.
   1746  */
   1747 lwp_t *
   1748 lwp_find_first(proc_t *p)
   1749 {
   1750 	lwp_t *l;
   1751 
   1752 	KASSERT(mutex_owned(p->p_lock));
   1753 
   1754 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1755 		if (lwp_alive(l)) {
   1756 			return l;
   1757 		}
   1758 	}
   1759 
   1760 	return NULL;
   1761 }
   1762 
   1763 /*
   1764  * Allocate a new lwpctl structure for a user LWP.
   1765  */
   1766 int
   1767 lwp_ctl_alloc(vaddr_t *uaddr)
   1768 {
   1769 	lcproc_t *lp;
   1770 	u_int bit, i, offset;
   1771 	struct uvm_object *uao;
   1772 	int error;
   1773 	lcpage_t *lcp;
   1774 	proc_t *p;
   1775 	lwp_t *l;
   1776 
   1777 	l = curlwp;
   1778 	p = l->l_proc;
   1779 
   1780 	/* don't allow a vforked process to create lwp ctls */
   1781 	if (p->p_lflag & PL_PPWAIT)
   1782 		return EBUSY;
   1783 
   1784 	if (l->l_lcpage != NULL) {
   1785 		lcp = l->l_lcpage;
   1786 		*uaddr = lcp->lcp_uaddr + (vaddr_t)l->l_lwpctl - lcp->lcp_kaddr;
   1787 		return 0;
   1788 	}
   1789 
   1790 	/* First time around, allocate header structure for the process. */
   1791 	if ((lp = p->p_lwpctl) == NULL) {
   1792 		lp = kmem_alloc(sizeof(*lp), KM_SLEEP);
   1793 		mutex_init(&lp->lp_lock, MUTEX_DEFAULT, IPL_NONE);
   1794 		lp->lp_uao = NULL;
   1795 		TAILQ_INIT(&lp->lp_pages);
   1796 		mutex_enter(p->p_lock);
   1797 		if (p->p_lwpctl == NULL) {
   1798 			p->p_lwpctl = lp;
   1799 			mutex_exit(p->p_lock);
   1800 		} else {
   1801 			mutex_exit(p->p_lock);
   1802 			mutex_destroy(&lp->lp_lock);
   1803 			kmem_free(lp, sizeof(*lp));
   1804 			lp = p->p_lwpctl;
   1805 		}
   1806 	}
   1807 
   1808  	/*
   1809  	 * Set up an anonymous memory region to hold the shared pages.
   1810  	 * Map them into the process' address space.  The user vmspace
   1811  	 * gets the first reference on the UAO.
   1812  	 */
   1813 	mutex_enter(&lp->lp_lock);
   1814 	if (lp->lp_uao == NULL) {
   1815 		lp->lp_uao = uao_create(LWPCTL_UAREA_SZ, 0);
   1816 		lp->lp_cur = 0;
   1817 		lp->lp_max = LWPCTL_UAREA_SZ;
   1818 		lp->lp_uva = p->p_emul->e_vm_default_addr(p,
   1819 		     (vaddr_t)p->p_vmspace->vm_daddr, LWPCTL_UAREA_SZ,
   1820 		     p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
   1821 		error = uvm_map(&p->p_vmspace->vm_map, &lp->lp_uva,
   1822 		    LWPCTL_UAREA_SZ, lp->lp_uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW,
   1823 		    UVM_PROT_RW, UVM_INH_NONE, UVM_ADV_NORMAL, 0));
   1824 		if (error != 0) {
   1825 			uao_detach(lp->lp_uao);
   1826 			lp->lp_uao = NULL;
   1827 			mutex_exit(&lp->lp_lock);
   1828 			return error;
   1829 		}
   1830 	}
   1831 
   1832 	/* Get a free block and allocate for this LWP. */
   1833 	TAILQ_FOREACH(lcp, &lp->lp_pages, lcp_chain) {
   1834 		if (lcp->lcp_nfree != 0)
   1835 			break;
   1836 	}
   1837 	if (lcp == NULL) {
   1838 		/* Nothing available - try to set up a free page. */
   1839 		if (lp->lp_cur == lp->lp_max) {
   1840 			mutex_exit(&lp->lp_lock);
   1841 			return ENOMEM;
   1842 		}
   1843 		lcp = kmem_alloc(LWPCTL_LCPAGE_SZ, KM_SLEEP);
   1844 
   1845 		/*
   1846 		 * Wire the next page down in kernel space.  Since this
   1847 		 * is a new mapping, we must add a reference.
   1848 		 */
   1849 		uao = lp->lp_uao;
   1850 		(*uao->pgops->pgo_reference)(uao);
   1851 		lcp->lcp_kaddr = vm_map_min(kernel_map);
   1852 		error = uvm_map(kernel_map, &lcp->lcp_kaddr, PAGE_SIZE,
   1853 		    uao, lp->lp_cur, PAGE_SIZE,
   1854 		    UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
   1855 		    UVM_INH_NONE, UVM_ADV_RANDOM, 0));
   1856 		if (error != 0) {
   1857 			mutex_exit(&lp->lp_lock);
   1858 			kmem_free(lcp, LWPCTL_LCPAGE_SZ);
   1859 			(*uao->pgops->pgo_detach)(uao);
   1860 			return error;
   1861 		}
   1862 		error = uvm_map_pageable(kernel_map, lcp->lcp_kaddr,
   1863 		    lcp->lcp_kaddr + PAGE_SIZE, FALSE, 0);
   1864 		if (error != 0) {
   1865 			mutex_exit(&lp->lp_lock);
   1866 			uvm_unmap(kernel_map, lcp->lcp_kaddr,
   1867 			    lcp->lcp_kaddr + PAGE_SIZE);
   1868 			kmem_free(lcp, LWPCTL_LCPAGE_SZ);
   1869 			return error;
   1870 		}
   1871 		/* Prepare the page descriptor and link into the list. */
   1872 		lcp->lcp_uaddr = lp->lp_uva + lp->lp_cur;
   1873 		lp->lp_cur += PAGE_SIZE;
   1874 		lcp->lcp_nfree = LWPCTL_PER_PAGE;
   1875 		lcp->lcp_rotor = 0;
   1876 		memset(lcp->lcp_bitmap, 0xff, LWPCTL_BITMAP_SZ);
   1877 		TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
   1878 	}
   1879 	for (i = lcp->lcp_rotor; lcp->lcp_bitmap[i] == 0;) {
   1880 		if (++i >= LWPCTL_BITMAP_ENTRIES)
   1881 			i = 0;
   1882 	}
   1883 	bit = ffs(lcp->lcp_bitmap[i]) - 1;
   1884 	lcp->lcp_bitmap[i] ^= (1U << bit);
   1885 	lcp->lcp_rotor = i;
   1886 	lcp->lcp_nfree--;
   1887 	l->l_lcpage = lcp;
   1888 	offset = (i << 5) + bit;
   1889 	l->l_lwpctl = (lwpctl_t *)lcp->lcp_kaddr + offset;
   1890 	*uaddr = lcp->lcp_uaddr + offset * sizeof(lwpctl_t);
   1891 	mutex_exit(&lp->lp_lock);
   1892 
   1893 	KPREEMPT_DISABLE(l);
   1894 	l->l_lwpctl->lc_curcpu = (int)cpu_index(curcpu());
   1895 	KPREEMPT_ENABLE(l);
   1896 
   1897 	return 0;
   1898 }
   1899 
   1900 /*
   1901  * Free an lwpctl structure back to the per-process list.
   1902  */
   1903 void
   1904 lwp_ctl_free(lwp_t *l)
   1905 {
   1906 	struct proc *p = l->l_proc;
   1907 	lcproc_t *lp;
   1908 	lcpage_t *lcp;
   1909 	u_int map, offset;
   1910 
   1911 	/* don't free a lwp context we borrowed for vfork */
   1912 	if (p->p_lflag & PL_PPWAIT) {
   1913 		l->l_lwpctl = NULL;
   1914 		return;
   1915 	}
   1916 
   1917 	lp = p->p_lwpctl;
   1918 	KASSERT(lp != NULL);
   1919 
   1920 	lcp = l->l_lcpage;
   1921 	offset = (u_int)((lwpctl_t *)l->l_lwpctl - (lwpctl_t *)lcp->lcp_kaddr);
   1922 	KASSERT(offset < LWPCTL_PER_PAGE);
   1923 
   1924 	mutex_enter(&lp->lp_lock);
   1925 	lcp->lcp_nfree++;
   1926 	map = offset >> 5;
   1927 	lcp->lcp_bitmap[map] |= (1U << (offset & 31));
   1928 	if (lcp->lcp_bitmap[lcp->lcp_rotor] == 0)
   1929 		lcp->lcp_rotor = map;
   1930 	if (TAILQ_FIRST(&lp->lp_pages)->lcp_nfree == 0) {
   1931 		TAILQ_REMOVE(&lp->lp_pages, lcp, lcp_chain);
   1932 		TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
   1933 	}
   1934 	mutex_exit(&lp->lp_lock);
   1935 }
   1936 
   1937 /*
   1938  * Process is exiting; tear down lwpctl state.  This can only be safely
   1939  * called by the last LWP in the process.
   1940  */
   1941 void
   1942 lwp_ctl_exit(void)
   1943 {
   1944 	lcpage_t *lcp, *next;
   1945 	lcproc_t *lp;
   1946 	proc_t *p;
   1947 	lwp_t *l;
   1948 
   1949 	l = curlwp;
   1950 	l->l_lwpctl = NULL;
   1951 	l->l_lcpage = NULL;
   1952 	p = l->l_proc;
   1953 	lp = p->p_lwpctl;
   1954 
   1955 	KASSERT(lp != NULL);
   1956 	KASSERT(p->p_nlwps == 1);
   1957 
   1958 	for (lcp = TAILQ_FIRST(&lp->lp_pages); lcp != NULL; lcp = next) {
   1959 		next = TAILQ_NEXT(lcp, lcp_chain);
   1960 		uvm_unmap(kernel_map, lcp->lcp_kaddr,
   1961 		    lcp->lcp_kaddr + PAGE_SIZE);
   1962 		kmem_free(lcp, LWPCTL_LCPAGE_SZ);
   1963 	}
   1964 
   1965 	if (lp->lp_uao != NULL) {
   1966 		uvm_unmap(&p->p_vmspace->vm_map, lp->lp_uva,
   1967 		    lp->lp_uva + LWPCTL_UAREA_SZ);
   1968 	}
   1969 
   1970 	mutex_destroy(&lp->lp_lock);
   1971 	kmem_free(lp, sizeof(*lp));
   1972 	p->p_lwpctl = NULL;
   1973 }
   1974 
   1975 /*
   1976  * Return the current LWP's "preemption counter".  Used to detect
   1977  * preemption across operations that can tolerate preemption without
   1978  * crashing, but which may generate incorrect results if preempted.
   1979  */
   1980 uint64_t
   1981 lwp_pctr(void)
   1982 {
   1983 
   1984 	return curlwp->l_ncsw;
   1985 }
   1986 
   1987 /*
   1988  * Set an LWP's private data pointer.
   1989  */
   1990 int
   1991 lwp_setprivate(struct lwp *l, void *ptr)
   1992 {
   1993 	int error = 0;
   1994 
   1995 	l->l_private = ptr;
   1996 #ifdef __HAVE_CPU_LWP_SETPRIVATE
   1997 	error = cpu_lwp_setprivate(l, ptr);
   1998 #endif
   1999 	return error;
   2000 }
   2001 
   2002 #if defined(DDB)
   2003 #include <machine/pcb.h>
   2004 
   2005 void
   2006 lwp_whatis(uintptr_t addr, void (*pr)(const char *, ...))
   2007 {
   2008 	lwp_t *l;
   2009 
   2010 	LIST_FOREACH(l, &alllwp, l_list) {
   2011 		uintptr_t stack = (uintptr_t)KSTACK_LOWEST_ADDR(l);
   2012 
   2013 		if (addr < stack || stack + KSTACK_SIZE <= addr) {
   2014 			continue;
   2015 		}
   2016 		(*pr)("%p is %p+%zu, LWP %p's stack\n",
   2017 		    (void *)addr, (void *)stack,
   2018 		    (size_t)(addr - stack), l);
   2019 	}
   2020 }
   2021 #endif /* defined(DDB) */
   2022