Home | History | Annotate | Line # | Download | only in kern
kern_lwp.c revision 1.200
      1 /*	$NetBSD: kern_lwp.c,v 1.200 2019/05/03 22:34:21 kamil Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nathan J. Williams, and Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Overview
     34  *
     35  *	Lightweight processes (LWPs) are the basic unit or thread of
     36  *	execution within the kernel.  The core state of an LWP is described
     37  *	by "struct lwp", also known as lwp_t.
     38  *
     39  *	Each LWP is contained within a process (described by "struct proc"),
     40  *	Every process contains at least one LWP, but may contain more.  The
     41  *	process describes attributes shared among all of its LWPs such as a
     42  *	private address space, global execution state (stopped, active,
     43  *	zombie, ...), signal disposition and so on.  On a multiprocessor
     44  *	machine, multiple LWPs be executing concurrently in the kernel.
     45  *
     46  * Execution states
     47  *
     48  *	At any given time, an LWP has overall state that is described by
     49  *	lwp::l_stat.  The states are broken into two sets below.  The first
     50  *	set is guaranteed to represent the absolute, current state of the
     51  *	LWP:
     52  *
     53  *	LSONPROC
     54  *
     55  *		On processor: the LWP is executing on a CPU, either in the
     56  *		kernel or in user space.
     57  *
     58  *	LSRUN
     59  *
     60  *		Runnable: the LWP is parked on a run queue, and may soon be
     61  *		chosen to run by an idle processor, or by a processor that
     62  *		has been asked to preempt a currently runnning but lower
     63  *		priority LWP.
     64  *
     65  *	LSIDL
     66  *
     67  *		Idle: the LWP has been created but has not yet executed,
     68  *		or it has ceased executing a unit of work and is waiting
     69  *		to be started again.
     70  *
     71  *	LSSUSPENDED:
     72  *
     73  *		Suspended: the LWP has had its execution suspended by
     74  *		another LWP in the same process using the _lwp_suspend()
     75  *		system call.  User-level LWPs also enter the suspended
     76  *		state when the system is shutting down.
     77  *
     78  *	The second set represent a "statement of intent" on behalf of the
     79  *	LWP.  The LWP may in fact be executing on a processor, may be
     80  *	sleeping or idle. It is expected to take the necessary action to
     81  *	stop executing or become "running" again within a short timeframe.
     82  *	The LP_RUNNING flag in lwp::l_pflag indicates that an LWP is running.
     83  *	Importantly, it indicates that its state is tied to a CPU.
     84  *
     85  *	LSZOMB:
     86  *
     87  *		Dead or dying: the LWP has released most of its resources
     88  *		and is about to switch away into oblivion, or has already
     89  *		switched away.  When it switches away, its few remaining
     90  *		resources can be collected.
     91  *
     92  *	LSSLEEP:
     93  *
     94  *		Sleeping: the LWP has entered itself onto a sleep queue, and
     95  *		has switched away or will switch away shortly to allow other
     96  *		LWPs to run on the CPU.
     97  *
     98  *	LSSTOP:
     99  *
    100  *		Stopped: the LWP has been stopped as a result of a job
    101  *		control signal, or as a result of the ptrace() interface.
    102  *
    103  *		Stopped LWPs may run briefly within the kernel to handle
    104  *		signals that they receive, but will not return to user space
    105  *		until their process' state is changed away from stopped.
    106  *
    107  *		Single LWPs within a process can not be set stopped
    108  *		selectively: all actions that can stop or continue LWPs
    109  *		occur at the process level.
    110  *
    111  * State transitions
    112  *
    113  *	Note that the LSSTOP state may only be set when returning to
    114  *	user space in userret(), or when sleeping interruptably.  The
    115  *	LSSUSPENDED state may only be set in userret().  Before setting
    116  *	those states, we try to ensure that the LWPs will release all
    117  *	locks that they hold, and at a minimum try to ensure that the
    118  *	LWP can be set runnable again by a signal.
    119  *
    120  *	LWPs may transition states in the following ways:
    121  *
    122  *	 RUN -------> ONPROC		ONPROC -----> RUN
    123  *		    				    > SLEEP
    124  *		    				    > STOPPED
    125  *						    > SUSPENDED
    126  *						    > ZOMB
    127  *						    > IDL (special cases)
    128  *
    129  *	 STOPPED ---> RUN		SUSPENDED --> RUN
    130  *	            > SLEEP
    131  *
    132  *	 SLEEP -----> ONPROC		IDL --------> RUN
    133  *		    > RUN			    > SUSPENDED
    134  *		    > STOPPED			    > STOPPED
    135  *						    > ONPROC (special cases)
    136  *
    137  *	Some state transitions are only possible with kernel threads (eg
    138  *	ONPROC -> IDL) and happen under tightly controlled circumstances
    139  *	free of unwanted side effects.
    140  *
    141  * Migration
    142  *
    143  *	Migration of threads from one CPU to another could be performed
    144  *	internally by the scheduler via sched_takecpu() or sched_catchlwp()
    145  *	functions.  The universal lwp_migrate() function should be used for
    146  *	any other cases.  Subsystems in the kernel must be aware that CPU
    147  *	of LWP may change, while it is not locked.
    148  *
    149  * Locking
    150  *
    151  *	The majority of fields in 'struct lwp' are covered by a single,
    152  *	general spin lock pointed to by lwp::l_mutex.  The locks covering
    153  *	each field are documented in sys/lwp.h.
    154  *
    155  *	State transitions must be made with the LWP's general lock held,
    156  *	and may cause the LWP's lock pointer to change.  Manipulation of
    157  *	the general lock is not performed directly, but through calls to
    158  *	lwp_lock(), lwp_unlock() and others.  It should be noted that the
    159  *	adaptive locks are not allowed to be released while the LWP's lock
    160  *	is being held (unlike for other spin-locks).
    161  *
    162  *	States and their associated locks:
    163  *
    164  *	LSONPROC, LSZOMB:
    165  *
    166  *		Always covered by spc_lwplock, which protects running LWPs.
    167  *		This is a per-CPU lock and matches lwp::l_cpu.
    168  *
    169  *	LSIDL, LSRUN:
    170  *
    171  *		Always covered by spc_mutex, which protects the run queues.
    172  *		This is a per-CPU lock and matches lwp::l_cpu.
    173  *
    174  *	LSSLEEP:
    175  *
    176  *		Covered by a lock associated with the sleep queue that the
    177  *		LWP resides on.  Matches lwp::l_sleepq::sq_mutex.
    178  *
    179  *	LSSTOP, LSSUSPENDED:
    180  *
    181  *		If the LWP was previously sleeping (l_wchan != NULL), then
    182  *		l_mutex references the sleep queue lock.  If the LWP was
    183  *		runnable or on the CPU when halted, or has been removed from
    184  *		the sleep queue since halted, then the lock is spc_lwplock.
    185  *
    186  *	The lock order is as follows:
    187  *
    188  *		spc::spc_lwplock ->
    189  *		    sleeptab::st_mutex ->
    190  *			tschain_t::tc_mutex ->
    191  *			    spc::spc_mutex
    192  *
    193  *	Each process has an scheduler state lock (proc::p_lock), and a
    194  *	number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
    195  *	so on.  When an LWP is to be entered into or removed from one of the
    196  *	following states, p_lock must be held and the process wide counters
    197  *	adjusted:
    198  *
    199  *		LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
    200  *
    201  *	(But not always for kernel threads.  There are some special cases
    202  *	as mentioned above.  See kern_softint.c.)
    203  *
    204  *	Note that an LWP is considered running or likely to run soon if in
    205  *	one of the following states.  This affects the value of p_nrlwps:
    206  *
    207  *		LSRUN, LSONPROC, LSSLEEP
    208  *
    209  *	p_lock does not need to be held when transitioning among these
    210  *	three states, hence p_lock is rarely taken for state transitions.
    211  */
    212 
    213 #include <sys/cdefs.h>
    214 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.200 2019/05/03 22:34:21 kamil Exp $");
    215 
    216 #include "opt_ddb.h"
    217 #include "opt_lockdebug.h"
    218 #include "opt_dtrace.h"
    219 
    220 #define _LWP_API_PRIVATE
    221 
    222 #include <sys/param.h>
    223 #include <sys/systm.h>
    224 #include <sys/cpu.h>
    225 #include <sys/pool.h>
    226 #include <sys/proc.h>
    227 #include <sys/syscallargs.h>
    228 #include <sys/syscall_stats.h>
    229 #include <sys/kauth.h>
    230 #include <sys/pserialize.h>
    231 #include <sys/sleepq.h>
    232 #include <sys/lockdebug.h>
    233 #include <sys/kmem.h>
    234 #include <sys/pset.h>
    235 #include <sys/intr.h>
    236 #include <sys/lwpctl.h>
    237 #include <sys/atomic.h>
    238 #include <sys/filedesc.h>
    239 #include <sys/fstrans.h>
    240 #include <sys/dtrace_bsd.h>
    241 #include <sys/sdt.h>
    242 #include <sys/xcall.h>
    243 #include <sys/uidinfo.h>
    244 #include <sys/sysctl.h>
    245 
    246 #include <uvm/uvm_extern.h>
    247 #include <uvm/uvm_object.h>
    248 
    249 static pool_cache_t	lwp_cache	__read_mostly;
    250 struct lwplist		alllwp		__cacheline_aligned;
    251 
    252 static void		lwp_dtor(void *, void *);
    253 
    254 /* DTrace proc provider probes */
    255 SDT_PROVIDER_DEFINE(proc);
    256 
    257 SDT_PROBE_DEFINE1(proc, kernel, , lwp__create, "struct lwp *");
    258 SDT_PROBE_DEFINE1(proc, kernel, , lwp__start, "struct lwp *");
    259 SDT_PROBE_DEFINE1(proc, kernel, , lwp__exit, "struct lwp *");
    260 
    261 struct turnstile turnstile0;
    262 struct lwp lwp0 __aligned(MIN_LWP_ALIGNMENT) = {
    263 #ifdef LWP0_CPU_INFO
    264 	.l_cpu = LWP0_CPU_INFO,
    265 #endif
    266 #ifdef LWP0_MD_INITIALIZER
    267 	.l_md = LWP0_MD_INITIALIZER,
    268 #endif
    269 	.l_proc = &proc0,
    270 	.l_lid = 1,
    271 	.l_flag = LW_SYSTEM,
    272 	.l_stat = LSONPROC,
    273 	.l_ts = &turnstile0,
    274 	.l_syncobj = &sched_syncobj,
    275 	.l_refcnt = 1,
    276 	.l_priority = PRI_USER + NPRI_USER - 1,
    277 	.l_inheritedprio = -1,
    278 	.l_class = SCHED_OTHER,
    279 	.l_psid = PS_NONE,
    280 	.l_pi_lenders = SLIST_HEAD_INITIALIZER(&lwp0.l_pi_lenders),
    281 	.l_name = __UNCONST("swapper"),
    282 	.l_fd = &filedesc0,
    283 };
    284 
    285 static int sysctl_kern_maxlwp(SYSCTLFN_PROTO);
    286 
    287 /*
    288  * sysctl helper routine for kern.maxlwp. Ensures that the new
    289  * values are not too low or too high.
    290  */
    291 static int
    292 sysctl_kern_maxlwp(SYSCTLFN_ARGS)
    293 {
    294 	int error, nmaxlwp;
    295 	struct sysctlnode node;
    296 
    297 	nmaxlwp = maxlwp;
    298 	node = *rnode;
    299 	node.sysctl_data = &nmaxlwp;
    300 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    301 	if (error || newp == NULL)
    302 		return error;
    303 
    304 	if (nmaxlwp < 0 || nmaxlwp >= 65536)
    305 		return EINVAL;
    306 	if (nmaxlwp > cpu_maxlwp())
    307 		return EINVAL;
    308 	maxlwp = nmaxlwp;
    309 
    310 	return 0;
    311 }
    312 
    313 static void
    314 sysctl_kern_lwp_setup(void)
    315 {
    316 	struct sysctllog *clog = NULL;
    317 
    318 	sysctl_createv(&clog, 0, NULL, NULL,
    319 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    320 		       CTLTYPE_INT, "maxlwp",
    321 		       SYSCTL_DESCR("Maximum number of simultaneous threads"),
    322 		       sysctl_kern_maxlwp, 0, NULL, 0,
    323 		       CTL_KERN, CTL_CREATE, CTL_EOL);
    324 }
    325 
    326 void
    327 lwpinit(void)
    328 {
    329 
    330 	LIST_INIT(&alllwp);
    331 	lwpinit_specificdata();
    332 	lwp_sys_init();
    333 	lwp_cache = pool_cache_init(sizeof(lwp_t), MIN_LWP_ALIGNMENT, 0, 0,
    334 	    "lwppl", NULL, IPL_NONE, NULL, lwp_dtor, NULL);
    335 
    336 	maxlwp = cpu_maxlwp();
    337 	sysctl_kern_lwp_setup();
    338 }
    339 
    340 void
    341 lwp0_init(void)
    342 {
    343 	struct lwp *l = &lwp0;
    344 
    345 	KASSERT((void *)uvm_lwp_getuarea(l) != NULL);
    346 	KASSERT(l->l_lid == proc0.p_nlwpid);
    347 
    348 	LIST_INSERT_HEAD(&alllwp, l, l_list);
    349 
    350 	callout_init(&l->l_timeout_ch, CALLOUT_MPSAFE);
    351 	callout_setfunc(&l->l_timeout_ch, sleepq_timeout, l);
    352 	cv_init(&l->l_sigcv, "sigwait");
    353 	cv_init(&l->l_waitcv, "vfork");
    354 
    355 	kauth_cred_hold(proc0.p_cred);
    356 	l->l_cred = proc0.p_cred;
    357 
    358 	kdtrace_thread_ctor(NULL, l);
    359 	lwp_initspecific(l);
    360 
    361 	SYSCALL_TIME_LWP_INIT(l);
    362 }
    363 
    364 static void
    365 lwp_dtor(void *arg, void *obj)
    366 {
    367 	lwp_t *l = obj;
    368 	uint64_t where;
    369 	(void)l;
    370 
    371 	/*
    372 	 * Provide a barrier to ensure that all mutex_oncpu() and rw_oncpu()
    373 	 * calls will exit before memory of LWP is returned to the pool, where
    374 	 * KVA of LWP structure might be freed and re-used for other purposes.
    375 	 * Kernel preemption is disabled around mutex_oncpu() and rw_oncpu()
    376 	 * callers, therefore cross-call to all CPUs will do the job.  Also,
    377 	 * the value of l->l_cpu must be still valid at this point.
    378 	 */
    379 	KASSERT(l->l_cpu != NULL);
    380 	where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
    381 	xc_wait(where);
    382 }
    383 
    384 /*
    385  * Set an suspended.
    386  *
    387  * Must be called with p_lock held, and the LWP locked.  Will unlock the
    388  * LWP before return.
    389  */
    390 int
    391 lwp_suspend(struct lwp *curl, struct lwp *t)
    392 {
    393 	int error;
    394 
    395 	KASSERT(mutex_owned(t->l_proc->p_lock));
    396 	KASSERT(lwp_locked(t, NULL));
    397 
    398 	KASSERT(curl != t || curl->l_stat == LSONPROC);
    399 
    400 	/*
    401 	 * If the current LWP has been told to exit, we must not suspend anyone
    402 	 * else or deadlock could occur.  We won't return to userspace.
    403 	 */
    404 	if ((curl->l_flag & (LW_WEXIT | LW_WCORE)) != 0) {
    405 		lwp_unlock(t);
    406 		return (EDEADLK);
    407 	}
    408 
    409 	error = 0;
    410 
    411 	switch (t->l_stat) {
    412 	case LSRUN:
    413 	case LSONPROC:
    414 		t->l_flag |= LW_WSUSPEND;
    415 		lwp_need_userret(t);
    416 		lwp_unlock(t);
    417 		break;
    418 
    419 	case LSSLEEP:
    420 		t->l_flag |= LW_WSUSPEND;
    421 
    422 		/*
    423 		 * Kick the LWP and try to get it to the kernel boundary
    424 		 * so that it will release any locks that it holds.
    425 		 * setrunnable() will release the lock.
    426 		 */
    427 		if ((t->l_flag & LW_SINTR) != 0)
    428 			setrunnable(t);
    429 		else
    430 			lwp_unlock(t);
    431 		break;
    432 
    433 	case LSSUSPENDED:
    434 		lwp_unlock(t);
    435 		break;
    436 
    437 	case LSSTOP:
    438 		t->l_flag |= LW_WSUSPEND;
    439 		setrunnable(t);
    440 		break;
    441 
    442 	case LSIDL:
    443 	case LSZOMB:
    444 		error = EINTR; /* It's what Solaris does..... */
    445 		lwp_unlock(t);
    446 		break;
    447 	}
    448 
    449 	return (error);
    450 }
    451 
    452 /*
    453  * Restart a suspended LWP.
    454  *
    455  * Must be called with p_lock held, and the LWP locked.  Will unlock the
    456  * LWP before return.
    457  */
    458 void
    459 lwp_continue(struct lwp *l)
    460 {
    461 
    462 	KASSERT(mutex_owned(l->l_proc->p_lock));
    463 	KASSERT(lwp_locked(l, NULL));
    464 
    465 	/* If rebooting or not suspended, then just bail out. */
    466 	if ((l->l_flag & LW_WREBOOT) != 0) {
    467 		lwp_unlock(l);
    468 		return;
    469 	}
    470 
    471 	l->l_flag &= ~LW_WSUSPEND;
    472 
    473 	if (l->l_stat != LSSUSPENDED) {
    474 		lwp_unlock(l);
    475 		return;
    476 	}
    477 
    478 	/* setrunnable() will release the lock. */
    479 	setrunnable(l);
    480 }
    481 
    482 /*
    483  * Restart a stopped LWP.
    484  *
    485  * Must be called with p_lock held, and the LWP NOT locked.  Will unlock the
    486  * LWP before return.
    487  */
    488 void
    489 lwp_unstop(struct lwp *l)
    490 {
    491 	struct proc *p = l->l_proc;
    492 
    493 	KASSERT(mutex_owned(proc_lock));
    494 	KASSERT(mutex_owned(p->p_lock));
    495 
    496 	lwp_lock(l);
    497 
    498 	/* If not stopped, then just bail out. */
    499 	if (l->l_stat != LSSTOP) {
    500 		lwp_unlock(l);
    501 		return;
    502 	}
    503 
    504 	p->p_stat = SACTIVE;
    505 	p->p_sflag &= ~PS_STOPPING;
    506 
    507 	if (!p->p_waited)
    508 		p->p_pptr->p_nstopchild--;
    509 
    510 	if (l->l_wchan == NULL) {
    511 		/* setrunnable() will release the lock. */
    512 		setrunnable(l);
    513 	} else if (p->p_xsig && (l->l_flag & LW_SINTR) != 0) {
    514 		/* setrunnable() so we can receive the signal */
    515 		setrunnable(l);
    516 	} else {
    517 		l->l_stat = LSSLEEP;
    518 		p->p_nrlwps++;
    519 		lwp_unlock(l);
    520 	}
    521 }
    522 
    523 /*
    524  * Wait for an LWP within the current process to exit.  If 'lid' is
    525  * non-zero, we are waiting for a specific LWP.
    526  *
    527  * Must be called with p->p_lock held.
    528  */
    529 int
    530 lwp_wait(struct lwp *l, lwpid_t lid, lwpid_t *departed, bool exiting)
    531 {
    532 	const lwpid_t curlid = l->l_lid;
    533 	proc_t *p = l->l_proc;
    534 	lwp_t *l2;
    535 	int error;
    536 
    537 	KASSERT(mutex_owned(p->p_lock));
    538 
    539 	p->p_nlwpwait++;
    540 	l->l_waitingfor = lid;
    541 
    542 	for (;;) {
    543 		int nfound;
    544 
    545 		/*
    546 		 * Avoid a race between exit1() and sigexit(): if the
    547 		 * process is dumping core, then we need to bail out: call
    548 		 * into lwp_userret() where we will be suspended until the
    549 		 * deed is done.
    550 		 */
    551 		if ((p->p_sflag & PS_WCORE) != 0) {
    552 			mutex_exit(p->p_lock);
    553 			lwp_userret(l);
    554 			KASSERT(false);
    555 		}
    556 
    557 		/*
    558 		 * First off, drain any detached LWP that is waiting to be
    559 		 * reaped.
    560 		 */
    561 		while ((l2 = p->p_zomblwp) != NULL) {
    562 			p->p_zomblwp = NULL;
    563 			lwp_free(l2, false, false);/* releases proc mutex */
    564 			mutex_enter(p->p_lock);
    565 		}
    566 
    567 		/*
    568 		 * Now look for an LWP to collect.  If the whole process is
    569 		 * exiting, count detached LWPs as eligible to be collected,
    570 		 * but don't drain them here.
    571 		 */
    572 		nfound = 0;
    573 		error = 0;
    574 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
    575 			/*
    576 			 * If a specific wait and the target is waiting on
    577 			 * us, then avoid deadlock.  This also traps LWPs
    578 			 * that try to wait on themselves.
    579 			 *
    580 			 * Note that this does not handle more complicated
    581 			 * cycles, like: t1 -> t2 -> t3 -> t1.  The process
    582 			 * can still be killed so it is not a major problem.
    583 			 */
    584 			if (l2->l_lid == lid && l2->l_waitingfor == curlid) {
    585 				error = EDEADLK;
    586 				break;
    587 			}
    588 			if (l2 == l)
    589 				continue;
    590 			if ((l2->l_prflag & LPR_DETACHED) != 0) {
    591 				nfound += exiting;
    592 				continue;
    593 			}
    594 			if (lid != 0) {
    595 				if (l2->l_lid != lid)
    596 					continue;
    597 				/*
    598 				 * Mark this LWP as the first waiter, if there
    599 				 * is no other.
    600 				 */
    601 				if (l2->l_waiter == 0)
    602 					l2->l_waiter = curlid;
    603 			} else if (l2->l_waiter != 0) {
    604 				/*
    605 				 * It already has a waiter - so don't
    606 				 * collect it.  If the waiter doesn't
    607 				 * grab it we'll get another chance
    608 				 * later.
    609 				 */
    610 				nfound++;
    611 				continue;
    612 			}
    613 			nfound++;
    614 
    615 			/* No need to lock the LWP in order to see LSZOMB. */
    616 			if (l2->l_stat != LSZOMB)
    617 				continue;
    618 
    619 			/*
    620 			 * We're no longer waiting.  Reset the "first waiter"
    621 			 * pointer on the target, in case it was us.
    622 			 */
    623 			l->l_waitingfor = 0;
    624 			l2->l_waiter = 0;
    625 			p->p_nlwpwait--;
    626 			if (departed)
    627 				*departed = l2->l_lid;
    628 			sched_lwp_collect(l2);
    629 
    630 			/* lwp_free() releases the proc lock. */
    631 			lwp_free(l2, false, false);
    632 			mutex_enter(p->p_lock);
    633 			return 0;
    634 		}
    635 
    636 		if (error != 0)
    637 			break;
    638 		if (nfound == 0) {
    639 			error = ESRCH;
    640 			break;
    641 		}
    642 
    643 		/*
    644 		 * Note: since the lock will be dropped, need to restart on
    645 		 * wakeup to run all LWPs again, e.g. there may be new LWPs.
    646 		 */
    647 		if (exiting) {
    648 			KASSERT(p->p_nlwps > 1);
    649 			cv_wait(&p->p_lwpcv, p->p_lock);
    650 			error = EAGAIN;
    651 			break;
    652 		}
    653 
    654 		/*
    655 		 * If all other LWPs are waiting for exits or suspends
    656 		 * and the supply of zombies and potential zombies is
    657 		 * exhausted, then we are about to deadlock.
    658 		 *
    659 		 * If the process is exiting (and this LWP is not the one
    660 		 * that is coordinating the exit) then bail out now.
    661 		 */
    662 		if ((p->p_sflag & PS_WEXIT) != 0 ||
    663 		    p->p_nrlwps + p->p_nzlwps - p->p_ndlwps <= p->p_nlwpwait) {
    664 			error = EDEADLK;
    665 			break;
    666 		}
    667 
    668 		/*
    669 		 * Sit around and wait for something to happen.  We'll be
    670 		 * awoken if any of the conditions examined change: if an
    671 		 * LWP exits, is collected, or is detached.
    672 		 */
    673 		if ((error = cv_wait_sig(&p->p_lwpcv, p->p_lock)) != 0)
    674 			break;
    675 	}
    676 
    677 	/*
    678 	 * We didn't find any LWPs to collect, we may have received a
    679 	 * signal, or some other condition has caused us to bail out.
    680 	 *
    681 	 * If waiting on a specific LWP, clear the waiters marker: some
    682 	 * other LWP may want it.  Then, kick all the remaining waiters
    683 	 * so that they can re-check for zombies and for deadlock.
    684 	 */
    685 	if (lid != 0) {
    686 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
    687 			if (l2->l_lid == lid) {
    688 				if (l2->l_waiter == curlid)
    689 					l2->l_waiter = 0;
    690 				break;
    691 			}
    692 		}
    693 	}
    694 	p->p_nlwpwait--;
    695 	l->l_waitingfor = 0;
    696 	cv_broadcast(&p->p_lwpcv);
    697 
    698 	return error;
    699 }
    700 
    701 static lwpid_t
    702 lwp_find_free_lid(lwpid_t try_lid, lwp_t * new_lwp, proc_t *p)
    703 {
    704 	#define LID_SCAN (1u << 31)
    705 	lwp_t *scan, *free_before;
    706 	lwpid_t nxt_lid;
    707 
    708 	/*
    709 	 * We want the first unused lid greater than or equal to
    710 	 * try_lid (modulo 2^31).
    711 	 * (If nothing else ld.elf_so doesn't want lwpid with the top bit set.)
    712 	 * We must not return 0, and avoiding 'LID_SCAN - 1' makes
    713 	 * the outer test easier.
    714 	 * This would be much easier if the list were sorted in
    715 	 * increasing order.
    716 	 * The list is kept sorted in decreasing order.
    717 	 * This code is only used after a process has generated 2^31 lwp.
    718 	 *
    719 	 * Code assumes it can always find an id.
    720 	 */
    721 
    722 	try_lid &= LID_SCAN - 1;
    723 	if (try_lid <= 1)
    724 		try_lid = 2;
    725 
    726 	free_before = NULL;
    727 	nxt_lid = LID_SCAN - 1;
    728 	LIST_FOREACH(scan, &p->p_lwps, l_sibling) {
    729 		if (scan->l_lid != nxt_lid) {
    730 			/* There are available lid before this entry */
    731 			free_before = scan;
    732 			if (try_lid > scan->l_lid)
    733 				break;
    734 		}
    735 		if (try_lid == scan->l_lid) {
    736 			/* The ideal lid is busy, take a higher one */
    737 			if (free_before != NULL) {
    738 				try_lid = free_before->l_lid + 1;
    739 				break;
    740 			}
    741 			/* No higher ones, reuse low numbers */
    742 			try_lid = 2;
    743 		}
    744 
    745 		nxt_lid = scan->l_lid - 1;
    746 		if (LIST_NEXT(scan, l_sibling) == NULL) {
    747 		    /* The value we have is lower than any existing lwp */
    748 		    LIST_INSERT_AFTER(scan, new_lwp, l_sibling);
    749 		    return try_lid;
    750 		}
    751 	}
    752 
    753 	LIST_INSERT_BEFORE(free_before, new_lwp, l_sibling);
    754 	return try_lid;
    755 }
    756 
    757 /*
    758  * Create a new LWP within process 'p2', using LWP 'l1' as a template.
    759  * The new LWP is created in state LSIDL and must be set running,
    760  * suspended, or stopped by the caller.
    761  */
    762 int
    763 lwp_create(lwp_t *l1, proc_t *p2, vaddr_t uaddr, int flags,
    764     void *stack, size_t stacksize, void (*func)(void *), void *arg,
    765     lwp_t **rnewlwpp, int sclass, const sigset_t *sigmask,
    766     const stack_t *sigstk)
    767 {
    768 	struct lwp *l2, *isfree;
    769 	turnstile_t *ts;
    770 	lwpid_t lid;
    771 
    772 	KASSERT(l1 == curlwp || l1->l_proc == &proc0);
    773 
    774 	/*
    775 	 * Enforce limits, excluding the first lwp and kthreads.
    776 	 */
    777 	if (p2->p_nlwps != 0 && p2 != &proc0) {
    778 		uid_t uid = kauth_cred_getuid(l1->l_cred);
    779 		int count = chglwpcnt(uid, 1);
    780 		if (__predict_false(count >
    781 		    p2->p_rlimit[RLIMIT_NTHR].rlim_cur)) {
    782 			if (kauth_authorize_process(l1->l_cred,
    783 			    KAUTH_PROCESS_RLIMIT, p2,
    784 			    KAUTH_ARG(KAUTH_REQ_PROCESS_RLIMIT_BYPASS),
    785 			    &p2->p_rlimit[RLIMIT_NTHR], KAUTH_ARG(RLIMIT_NTHR))
    786 			    != 0) {
    787 				(void)chglwpcnt(uid, -1);
    788 				return EAGAIN;
    789 			}
    790 		}
    791 	}
    792 
    793 	/*
    794 	 * First off, reap any detached LWP waiting to be collected.
    795 	 * We can re-use its LWP structure and turnstile.
    796 	 */
    797 	isfree = NULL;
    798 	if (p2->p_zomblwp != NULL) {
    799 		mutex_enter(p2->p_lock);
    800 		if ((isfree = p2->p_zomblwp) != NULL) {
    801 			p2->p_zomblwp = NULL;
    802 			lwp_free(isfree, true, false);/* releases proc mutex */
    803 		} else
    804 			mutex_exit(p2->p_lock);
    805 	}
    806 	if (isfree == NULL) {
    807 		l2 = pool_cache_get(lwp_cache, PR_WAITOK);
    808 		memset(l2, 0, sizeof(*l2));
    809 		l2->l_ts = pool_cache_get(turnstile_cache, PR_WAITOK);
    810 		SLIST_INIT(&l2->l_pi_lenders);
    811 	} else {
    812 		l2 = isfree;
    813 		ts = l2->l_ts;
    814 		KASSERT(l2->l_inheritedprio == -1);
    815 		KASSERT(SLIST_EMPTY(&l2->l_pi_lenders));
    816 		memset(l2, 0, sizeof(*l2));
    817 		l2->l_ts = ts;
    818 	}
    819 
    820 	l2->l_stat = LSIDL;
    821 	l2->l_proc = p2;
    822 	l2->l_refcnt = 1;
    823 	l2->l_class = sclass;
    824 
    825 	/*
    826 	 * If vfork(), we want the LWP to run fast and on the same CPU
    827 	 * as its parent, so that it can reuse the VM context and cache
    828 	 * footprint on the local CPU.
    829 	 */
    830 	l2->l_kpriority = ((flags & LWP_VFORK) ? true : false);
    831 	l2->l_kpribase = PRI_KERNEL;
    832 	l2->l_priority = l1->l_priority;
    833 	l2->l_inheritedprio = -1;
    834 	l2->l_protectprio = -1;
    835 	l2->l_auxprio = -1;
    836 	l2->l_flag = 0;
    837 	l2->l_pflag = LP_MPSAFE;
    838 	TAILQ_INIT(&l2->l_ld_locks);
    839 	l2->l_psrefs = 0;
    840 
    841 	/*
    842 	 * For vfork, borrow parent's lwpctl context if it exists.
    843 	 * This also causes us to return via lwp_userret.
    844 	 */
    845 	if (flags & LWP_VFORK && l1->l_lwpctl) {
    846 		l2->l_lwpctl = l1->l_lwpctl;
    847 		l2->l_flag |= LW_LWPCTL;
    848 	}
    849 
    850 	/*
    851 	 * If not the first LWP in the process, grab a reference to the
    852 	 * descriptor table.
    853 	 */
    854 	l2->l_fd = p2->p_fd;
    855 	if (p2->p_nlwps != 0) {
    856 		KASSERT(l1->l_proc == p2);
    857 		fd_hold(l2);
    858 	} else {
    859 		KASSERT(l1->l_proc != p2);
    860 	}
    861 
    862 	if (p2->p_flag & PK_SYSTEM) {
    863 		/* Mark it as a system LWP. */
    864 		l2->l_flag |= LW_SYSTEM;
    865 	}
    866 
    867 	kpreempt_disable();
    868 	l2->l_mutex = l1->l_cpu->ci_schedstate.spc_mutex;
    869 	l2->l_cpu = l1->l_cpu;
    870 	kpreempt_enable();
    871 
    872 	kdtrace_thread_ctor(NULL, l2);
    873 	lwp_initspecific(l2);
    874 	sched_lwp_fork(l1, l2);
    875 	lwp_update_creds(l2);
    876 	callout_init(&l2->l_timeout_ch, CALLOUT_MPSAFE);
    877 	callout_setfunc(&l2->l_timeout_ch, sleepq_timeout, l2);
    878 	cv_init(&l2->l_sigcv, "sigwait");
    879 	cv_init(&l2->l_waitcv, "vfork");
    880 	l2->l_syncobj = &sched_syncobj;
    881 
    882 	if (rnewlwpp != NULL)
    883 		*rnewlwpp = l2;
    884 
    885 	/*
    886 	 * PCU state needs to be saved before calling uvm_lwp_fork() so that
    887 	 * the MD cpu_lwp_fork() can copy the saved state to the new LWP.
    888 	 */
    889 	pcu_save_all(l1);
    890 
    891 	uvm_lwp_setuarea(l2, uaddr);
    892 	uvm_lwp_fork(l1, l2, stack, stacksize, func, (arg != NULL) ? arg : l2);
    893 
    894 	if ((flags & LWP_PIDLID) != 0) {
    895 		lid = proc_alloc_pid(p2);
    896 		l2->l_pflag |= LP_PIDLID;
    897 	} else {
    898 		lid = 0;
    899 	}
    900 
    901 	mutex_enter(p2->p_lock);
    902 
    903 	if ((flags & LWP_DETACHED) != 0) {
    904 		l2->l_prflag = LPR_DETACHED;
    905 		p2->p_ndlwps++;
    906 	} else
    907 		l2->l_prflag = 0;
    908 
    909 	l2->l_sigstk = *sigstk;
    910 	l2->l_sigmask = *sigmask;
    911 	TAILQ_INIT(&l2->l_sigpend.sp_info);
    912 	sigemptyset(&l2->l_sigpend.sp_set);
    913 
    914 	if (__predict_true(lid == 0)) {
    915 		/*
    916 		 * XXX: l_lid are expected to be unique (for a process)
    917 		 * if LWP_PIDLID is sometimes set this won't be true.
    918 		 * Once 2^31 threads have been allocated we have to
    919 		 * scan to ensure we allocate a unique value.
    920 		 */
    921 		lid = ++p2->p_nlwpid;
    922 		if (__predict_false(lid & LID_SCAN)) {
    923 			lid = lwp_find_free_lid(lid, l2, p2);
    924 			p2->p_nlwpid = lid | LID_SCAN;
    925 			/* l2 as been inserted into p_lwps in order */
    926 			goto skip_insert;
    927 		}
    928 		p2->p_nlwpid = lid;
    929 	}
    930 	LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
    931     skip_insert:
    932 	l2->l_lid = lid;
    933 	p2->p_nlwps++;
    934 	p2->p_nrlwps++;
    935 
    936 	KASSERT(l2->l_affinity == NULL);
    937 
    938 	if ((p2->p_flag & PK_SYSTEM) == 0) {
    939 		/* Inherit the affinity mask. */
    940 		if (l1->l_affinity) {
    941 			/*
    942 			 * Note that we hold the state lock while inheriting
    943 			 * the affinity to avoid race with sched_setaffinity().
    944 			 */
    945 			lwp_lock(l1);
    946 			if (l1->l_affinity) {
    947 				kcpuset_use(l1->l_affinity);
    948 				l2->l_affinity = l1->l_affinity;
    949 			}
    950 			lwp_unlock(l1);
    951 		}
    952 		lwp_lock(l2);
    953 		/* Inherit a processor-set */
    954 		l2->l_psid = l1->l_psid;
    955 		/* Look for a CPU to start */
    956 		l2->l_cpu = sched_takecpu(l2);
    957 		lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_mutex);
    958 	}
    959 	mutex_exit(p2->p_lock);
    960 
    961 	SDT_PROBE(proc, kernel, , lwp__create, l2, 0, 0, 0, 0);
    962 
    963 	mutex_enter(proc_lock);
    964 	LIST_INSERT_HEAD(&alllwp, l2, l_list);
    965 	mutex_exit(proc_lock);
    966 
    967 	SYSCALL_TIME_LWP_INIT(l2);
    968 
    969 	if (p2->p_emul->e_lwp_fork)
    970 		(*p2->p_emul->e_lwp_fork)(l1, l2);
    971 
    972 	return (0);
    973 }
    974 
    975 /*
    976  * Called by MD code when a new LWP begins execution.  Must be called
    977  * with the previous LWP locked (so at splsched), or if there is no
    978  * previous LWP, at splsched.
    979  */
    980 void
    981 lwp_startup(struct lwp *prev, struct lwp *new_lwp)
    982 {
    983 	KASSERTMSG(new_lwp == curlwp, "l %p curlwp %p prevlwp %p", new_lwp, curlwp, prev);
    984 
    985 	SDT_PROBE(proc, kernel, , lwp__start, new_lwp, 0, 0, 0, 0);
    986 
    987 	KASSERT(kpreempt_disabled());
    988 	if (prev != NULL) {
    989 		/*
    990 		 * Normalize the count of the spin-mutexes, it was
    991 		 * increased in mi_switch().  Unmark the state of
    992 		 * context switch - it is finished for previous LWP.
    993 		 */
    994 		curcpu()->ci_mtx_count++;
    995 		membar_exit();
    996 		prev->l_ctxswtch = 0;
    997 	}
    998 	KPREEMPT_DISABLE(new_lwp);
    999 	if (__predict_true(new_lwp->l_proc->p_vmspace))
   1000 		pmap_activate(new_lwp);
   1001 	spl0();
   1002 
   1003 	/* Note trip through cpu_switchto(). */
   1004 	pserialize_switchpoint();
   1005 
   1006 	LOCKDEBUG_BARRIER(NULL, 0);
   1007 	KPREEMPT_ENABLE(new_lwp);
   1008 	if ((new_lwp->l_pflag & LP_MPSAFE) == 0) {
   1009 		KERNEL_LOCK(1, new_lwp);
   1010 	}
   1011 }
   1012 
   1013 /*
   1014  * Exit an LWP.
   1015  */
   1016 void
   1017 lwp_exit(struct lwp *l)
   1018 {
   1019 	struct proc *p = l->l_proc;
   1020 	struct lwp *l2;
   1021 	bool current;
   1022 
   1023 	current = (l == curlwp);
   1024 
   1025 	KASSERT(current || (l->l_stat == LSIDL && l->l_target_cpu == NULL));
   1026 	KASSERT(p == curproc);
   1027 
   1028 	SDT_PROBE(proc, kernel, , lwp__exit, l, 0, 0, 0, 0);
   1029 
   1030 	/*
   1031 	 * Verify that we hold no locks other than the kernel lock.
   1032 	 */
   1033 	LOCKDEBUG_BARRIER(&kernel_lock, 0);
   1034 
   1035 	/*
   1036 	 * If we are the last live LWP in a process, we need to exit the
   1037 	 * entire process.  We do so with an exit status of zero, because
   1038 	 * it's a "controlled" exit, and because that's what Solaris does.
   1039 	 *
   1040 	 * We are not quite a zombie yet, but for accounting purposes we
   1041 	 * must increment the count of zombies here.
   1042 	 *
   1043 	 * Note: the last LWP's specificdata will be deleted here.
   1044 	 */
   1045 	mutex_enter(p->p_lock);
   1046 	if (p->p_nlwps - p->p_nzlwps == 1) {
   1047 		KASSERT(current == true);
   1048 		KASSERT(p != &proc0);
   1049 		/* XXXSMP kernel_lock not held */
   1050 		exit1(l, 0, 0);
   1051 		/* NOTREACHED */
   1052 	}
   1053 	p->p_nzlwps++;
   1054 	mutex_exit(p->p_lock);
   1055 
   1056 	if (p->p_emul->e_lwp_exit)
   1057 		(*p->p_emul->e_lwp_exit)(l);
   1058 
   1059 	/* Drop filedesc reference. */
   1060 	fd_free();
   1061 
   1062 	/* Release fstrans private data. */
   1063 	fstrans_lwp_dtor(l);
   1064 
   1065 	/* Delete the specificdata while it's still safe to sleep. */
   1066 	lwp_finispecific(l);
   1067 
   1068 	/*
   1069 	 * Release our cached credentials.
   1070 	 */
   1071 	kauth_cred_free(l->l_cred);
   1072 	callout_destroy(&l->l_timeout_ch);
   1073 
   1074 	/*
   1075 	 * If traced, report LWP exit event to the debugger.
   1076 	 *
   1077 	 * Remove the LWP from the global list.
   1078 	 * Free its LID from the PID namespace if needed.
   1079 	 */
   1080 	mutex_enter(proc_lock);
   1081 
   1082 	if ((p->p_slflag & (PSL_TRACED|PSL_TRACELWP_EXIT)) ==
   1083 	    (PSL_TRACED|PSL_TRACELWP_EXIT)) {
   1084 		mutex_enter(p->p_lock);
   1085 		p->p_lwp_exited = l->l_lid;
   1086 		eventswitch(TRAP_LWP);
   1087 		mutex_enter(proc_lock);
   1088 	}
   1089 
   1090 	LIST_REMOVE(l, l_list);
   1091 	if ((l->l_pflag & LP_PIDLID) != 0 && l->l_lid != p->p_pid) {
   1092 		proc_free_pid(l->l_lid);
   1093 	}
   1094 	mutex_exit(proc_lock);
   1095 
   1096 	/*
   1097 	 * Get rid of all references to the LWP that others (e.g. procfs)
   1098 	 * may have, and mark the LWP as a zombie.  If the LWP is detached,
   1099 	 * mark it waiting for collection in the proc structure.  Note that
   1100 	 * before we can do that, we need to free any other dead, deatched
   1101 	 * LWP waiting to meet its maker.
   1102 	 */
   1103 	mutex_enter(p->p_lock);
   1104 	lwp_drainrefs(l);
   1105 
   1106 	if ((l->l_prflag & LPR_DETACHED) != 0) {
   1107 		while ((l2 = p->p_zomblwp) != NULL) {
   1108 			p->p_zomblwp = NULL;
   1109 			lwp_free(l2, false, false);/* releases proc mutex */
   1110 			mutex_enter(p->p_lock);
   1111 			l->l_refcnt++;
   1112 			lwp_drainrefs(l);
   1113 		}
   1114 		p->p_zomblwp = l;
   1115 	}
   1116 
   1117 	/*
   1118 	 * If we find a pending signal for the process and we have been
   1119 	 * asked to check for signals, then we lose: arrange to have
   1120 	 * all other LWPs in the process check for signals.
   1121 	 */
   1122 	if ((l->l_flag & LW_PENDSIG) != 0 &&
   1123 	    firstsig(&p->p_sigpend.sp_set) != 0) {
   1124 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
   1125 			lwp_lock(l2);
   1126 			l2->l_flag |= LW_PENDSIG;
   1127 			lwp_unlock(l2);
   1128 		}
   1129 	}
   1130 
   1131 	/*
   1132 	 * Release any PCU resources before becoming a zombie.
   1133 	 */
   1134 	pcu_discard_all(l);
   1135 
   1136 	lwp_lock(l);
   1137 	l->l_stat = LSZOMB;
   1138 	if (l->l_name != NULL) {
   1139 		strcpy(l->l_name, "(zombie)");
   1140 	}
   1141 	lwp_unlock(l);
   1142 	p->p_nrlwps--;
   1143 	cv_broadcast(&p->p_lwpcv);
   1144 	if (l->l_lwpctl != NULL)
   1145 		l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
   1146 	mutex_exit(p->p_lock);
   1147 
   1148 	/*
   1149 	 * We can no longer block.  At this point, lwp_free() may already
   1150 	 * be gunning for us.  On a multi-CPU system, we may be off p_lwps.
   1151 	 *
   1152 	 * Free MD LWP resources.
   1153 	 */
   1154 	cpu_lwp_free(l, 0);
   1155 
   1156 	if (current) {
   1157 		pmap_deactivate(l);
   1158 
   1159 		/*
   1160 		 * Release the kernel lock, and switch away into
   1161 		 * oblivion.
   1162 		 */
   1163 #ifdef notyet
   1164 		/* XXXSMP hold in lwp_userret() */
   1165 		KERNEL_UNLOCK_LAST(l);
   1166 #else
   1167 		KERNEL_UNLOCK_ALL(l, NULL);
   1168 #endif
   1169 		lwp_exit_switchaway(l);
   1170 	}
   1171 }
   1172 
   1173 /*
   1174  * Free a dead LWP's remaining resources.
   1175  *
   1176  * XXXLWP limits.
   1177  */
   1178 void
   1179 lwp_free(struct lwp *l, bool recycle, bool last)
   1180 {
   1181 	struct proc *p = l->l_proc;
   1182 	struct rusage *ru;
   1183 	ksiginfoq_t kq;
   1184 
   1185 	KASSERT(l != curlwp);
   1186 	KASSERT(last || mutex_owned(p->p_lock));
   1187 
   1188 	/*
   1189 	 * We use the process credentials instead of the lwp credentials here
   1190 	 * because the lwp credentials maybe cached (just after a setuid call)
   1191 	 * and we don't want pay for syncing, since the lwp is going away
   1192 	 * anyway
   1193 	 */
   1194 	if (p != &proc0 && p->p_nlwps != 1)
   1195 		(void)chglwpcnt(kauth_cred_getuid(p->p_cred), -1);
   1196 	/*
   1197 	 * If this was not the last LWP in the process, then adjust
   1198 	 * counters and unlock.
   1199 	 */
   1200 	if (!last) {
   1201 		/*
   1202 		 * Add the LWP's run time to the process' base value.
   1203 		 * This needs to co-incide with coming off p_lwps.
   1204 		 */
   1205 		bintime_add(&p->p_rtime, &l->l_rtime);
   1206 		p->p_pctcpu += l->l_pctcpu;
   1207 		ru = &p->p_stats->p_ru;
   1208 		ruadd(ru, &l->l_ru);
   1209 		ru->ru_nvcsw += (l->l_ncsw - l->l_nivcsw);
   1210 		ru->ru_nivcsw += l->l_nivcsw;
   1211 		LIST_REMOVE(l, l_sibling);
   1212 		p->p_nlwps--;
   1213 		p->p_nzlwps--;
   1214 		if ((l->l_prflag & LPR_DETACHED) != 0)
   1215 			p->p_ndlwps--;
   1216 
   1217 		/*
   1218 		 * Have any LWPs sleeping in lwp_wait() recheck for
   1219 		 * deadlock.
   1220 		 */
   1221 		cv_broadcast(&p->p_lwpcv);
   1222 		mutex_exit(p->p_lock);
   1223 	}
   1224 
   1225 #ifdef MULTIPROCESSOR
   1226 	/*
   1227 	 * In the unlikely event that the LWP is still on the CPU,
   1228 	 * then spin until it has switched away.  We need to release
   1229 	 * all locks to avoid deadlock against interrupt handlers on
   1230 	 * the target CPU.
   1231 	 */
   1232 	if ((l->l_pflag & LP_RUNNING) != 0 || l->l_cpu->ci_curlwp == l) {
   1233 		int count;
   1234 		(void)count; /* XXXgcc */
   1235 		KERNEL_UNLOCK_ALL(curlwp, &count);
   1236 		while ((l->l_pflag & LP_RUNNING) != 0 ||
   1237 		    l->l_cpu->ci_curlwp == l)
   1238 			SPINLOCK_BACKOFF_HOOK;
   1239 		KERNEL_LOCK(count, curlwp);
   1240 	}
   1241 #endif
   1242 
   1243 	/*
   1244 	 * Destroy the LWP's remaining signal information.
   1245 	 */
   1246 	ksiginfo_queue_init(&kq);
   1247 	sigclear(&l->l_sigpend, NULL, &kq);
   1248 	ksiginfo_queue_drain(&kq);
   1249 	cv_destroy(&l->l_sigcv);
   1250 	cv_destroy(&l->l_waitcv);
   1251 
   1252 	/*
   1253 	 * Free lwpctl structure and affinity.
   1254 	 */
   1255 	if (l->l_lwpctl) {
   1256 		lwp_ctl_free(l);
   1257 	}
   1258 	if (l->l_affinity) {
   1259 		kcpuset_unuse(l->l_affinity, NULL);
   1260 		l->l_affinity = NULL;
   1261 	}
   1262 
   1263 	/*
   1264 	 * Free the LWP's turnstile and the LWP structure itself unless the
   1265 	 * caller wants to recycle them.  Also, free the scheduler specific
   1266 	 * data.
   1267 	 *
   1268 	 * We can't return turnstile0 to the pool (it didn't come from it),
   1269 	 * so if it comes up just drop it quietly and move on.
   1270 	 *
   1271 	 * We don't recycle the VM resources at this time.
   1272 	 */
   1273 
   1274 	if (!recycle && l->l_ts != &turnstile0)
   1275 		pool_cache_put(turnstile_cache, l->l_ts);
   1276 	if (l->l_name != NULL)
   1277 		kmem_free(l->l_name, MAXCOMLEN);
   1278 
   1279 	cpu_lwp_free2(l);
   1280 	uvm_lwp_exit(l);
   1281 
   1282 	KASSERT(SLIST_EMPTY(&l->l_pi_lenders));
   1283 	KASSERT(l->l_inheritedprio == -1);
   1284 	KASSERT(l->l_blcnt == 0);
   1285 	kdtrace_thread_dtor(NULL, l);
   1286 	if (!recycle)
   1287 		pool_cache_put(lwp_cache, l);
   1288 }
   1289 
   1290 /*
   1291  * Migrate the LWP to the another CPU.  Unlocks the LWP.
   1292  */
   1293 void
   1294 lwp_migrate(lwp_t *l, struct cpu_info *tci)
   1295 {
   1296 	struct schedstate_percpu *tspc;
   1297 	int lstat = l->l_stat;
   1298 
   1299 	KASSERT(lwp_locked(l, NULL));
   1300 	KASSERT(tci != NULL);
   1301 
   1302 	/* If LWP is still on the CPU, it must be handled like LSONPROC */
   1303 	if ((l->l_pflag & LP_RUNNING) != 0) {
   1304 		lstat = LSONPROC;
   1305 	}
   1306 
   1307 	/*
   1308 	 * The destination CPU could be changed while previous migration
   1309 	 * was not finished.
   1310 	 */
   1311 	if (l->l_target_cpu != NULL) {
   1312 		l->l_target_cpu = tci;
   1313 		lwp_unlock(l);
   1314 		return;
   1315 	}
   1316 
   1317 	/* Nothing to do if trying to migrate to the same CPU */
   1318 	if (l->l_cpu == tci) {
   1319 		lwp_unlock(l);
   1320 		return;
   1321 	}
   1322 
   1323 	KASSERT(l->l_target_cpu == NULL);
   1324 	tspc = &tci->ci_schedstate;
   1325 	switch (lstat) {
   1326 	case LSRUN:
   1327 		l->l_target_cpu = tci;
   1328 		break;
   1329 	case LSIDL:
   1330 		l->l_cpu = tci;
   1331 		lwp_unlock_to(l, tspc->spc_mutex);
   1332 		return;
   1333 	case LSSLEEP:
   1334 		l->l_cpu = tci;
   1335 		break;
   1336 	case LSSTOP:
   1337 	case LSSUSPENDED:
   1338 		l->l_cpu = tci;
   1339 		if (l->l_wchan == NULL) {
   1340 			lwp_unlock_to(l, tspc->spc_lwplock);
   1341 			return;
   1342 		}
   1343 		break;
   1344 	case LSONPROC:
   1345 		l->l_target_cpu = tci;
   1346 		spc_lock(l->l_cpu);
   1347 		cpu_need_resched(l->l_cpu, RESCHED_KPREEMPT);
   1348 		spc_unlock(l->l_cpu);
   1349 		break;
   1350 	}
   1351 	lwp_unlock(l);
   1352 }
   1353 
   1354 /*
   1355  * Find the LWP in the process.  Arguments may be zero, in such case,
   1356  * the calling process and first LWP in the list will be used.
   1357  * On success - returns proc locked.
   1358  */
   1359 struct lwp *
   1360 lwp_find2(pid_t pid, lwpid_t lid)
   1361 {
   1362 	proc_t *p;
   1363 	lwp_t *l;
   1364 
   1365 	/* Find the process. */
   1366 	if (pid != 0) {
   1367 		mutex_enter(proc_lock);
   1368 		p = proc_find(pid);
   1369 		if (p == NULL) {
   1370 			mutex_exit(proc_lock);
   1371 			return NULL;
   1372 		}
   1373 		mutex_enter(p->p_lock);
   1374 		mutex_exit(proc_lock);
   1375 	} else {
   1376 		p = curlwp->l_proc;
   1377 		mutex_enter(p->p_lock);
   1378 	}
   1379 	/* Find the thread. */
   1380 	if (lid != 0) {
   1381 		l = lwp_find(p, lid);
   1382 	} else {
   1383 		l = LIST_FIRST(&p->p_lwps);
   1384 	}
   1385 	if (l == NULL) {
   1386 		mutex_exit(p->p_lock);
   1387 	}
   1388 	return l;
   1389 }
   1390 
   1391 /*
   1392  * Look up a live LWP within the specified process.
   1393  *
   1394  * Must be called with p->p_lock held.
   1395  */
   1396 struct lwp *
   1397 lwp_find(struct proc *p, lwpid_t id)
   1398 {
   1399 	struct lwp *l;
   1400 
   1401 	KASSERT(mutex_owned(p->p_lock));
   1402 
   1403 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1404 		if (l->l_lid == id)
   1405 			break;
   1406 	}
   1407 
   1408 	/*
   1409 	 * No need to lock - all of these conditions will
   1410 	 * be visible with the process level mutex held.
   1411 	 */
   1412 	if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB))
   1413 		l = NULL;
   1414 
   1415 	return l;
   1416 }
   1417 
   1418 /*
   1419  * Update an LWP's cached credentials to mirror the process' master copy.
   1420  *
   1421  * This happens early in the syscall path, on user trap, and on LWP
   1422  * creation.  A long-running LWP can also voluntarily choose to update
   1423  * its credentials by calling this routine.  This may be called from
   1424  * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
   1425  */
   1426 void
   1427 lwp_update_creds(struct lwp *l)
   1428 {
   1429 	kauth_cred_t oc;
   1430 	struct proc *p;
   1431 
   1432 	p = l->l_proc;
   1433 	oc = l->l_cred;
   1434 
   1435 	mutex_enter(p->p_lock);
   1436 	kauth_cred_hold(p->p_cred);
   1437 	l->l_cred = p->p_cred;
   1438 	l->l_prflag &= ~LPR_CRMOD;
   1439 	mutex_exit(p->p_lock);
   1440 	if (oc != NULL)
   1441 		kauth_cred_free(oc);
   1442 }
   1443 
   1444 /*
   1445  * Verify that an LWP is locked, and optionally verify that the lock matches
   1446  * one we specify.
   1447  */
   1448 int
   1449 lwp_locked(struct lwp *l, kmutex_t *mtx)
   1450 {
   1451 	kmutex_t *cur = l->l_mutex;
   1452 
   1453 	return mutex_owned(cur) && (mtx == cur || mtx == NULL);
   1454 }
   1455 
   1456 /*
   1457  * Lend a new mutex to an LWP.  The old mutex must be held.
   1458  */
   1459 void
   1460 lwp_setlock(struct lwp *l, kmutex_t *mtx)
   1461 {
   1462 
   1463 	KASSERT(mutex_owned(l->l_mutex));
   1464 
   1465 	membar_exit();
   1466 	l->l_mutex = mtx;
   1467 }
   1468 
   1469 /*
   1470  * Lend a new mutex to an LWP, and release the old mutex.  The old mutex
   1471  * must be held.
   1472  */
   1473 void
   1474 lwp_unlock_to(struct lwp *l, kmutex_t *mtx)
   1475 {
   1476 	kmutex_t *old;
   1477 
   1478 	KASSERT(lwp_locked(l, NULL));
   1479 
   1480 	old = l->l_mutex;
   1481 	membar_exit();
   1482 	l->l_mutex = mtx;
   1483 	mutex_spin_exit(old);
   1484 }
   1485 
   1486 int
   1487 lwp_trylock(struct lwp *l)
   1488 {
   1489 	kmutex_t *old;
   1490 
   1491 	for (;;) {
   1492 		if (!mutex_tryenter(old = l->l_mutex))
   1493 			return 0;
   1494 		if (__predict_true(l->l_mutex == old))
   1495 			return 1;
   1496 		mutex_spin_exit(old);
   1497 	}
   1498 }
   1499 
   1500 void
   1501 lwp_unsleep(lwp_t *l, bool cleanup)
   1502 {
   1503 
   1504 	KASSERT(mutex_owned(l->l_mutex));
   1505 	(*l->l_syncobj->sobj_unsleep)(l, cleanup);
   1506 }
   1507 
   1508 /*
   1509  * Handle exceptions for mi_userret().  Called if a member of LW_USERRET is
   1510  * set.
   1511  */
   1512 void
   1513 lwp_userret(struct lwp *l)
   1514 {
   1515 	struct proc *p;
   1516 	int sig;
   1517 
   1518 	KASSERT(l == curlwp);
   1519 	KASSERT(l->l_stat == LSONPROC);
   1520 	p = l->l_proc;
   1521 
   1522 #ifndef __HAVE_FAST_SOFTINTS
   1523 	/* Run pending soft interrupts. */
   1524 	if (l->l_cpu->ci_data.cpu_softints != 0)
   1525 		softint_overlay();
   1526 #endif
   1527 
   1528 	/*
   1529 	 * It is safe to do this read unlocked on a MP system..
   1530 	 */
   1531 	while ((l->l_flag & LW_USERRET) != 0) {
   1532 		/*
   1533 		 * Process pending signals first, unless the process
   1534 		 * is dumping core or exiting, where we will instead
   1535 		 * enter the LW_WSUSPEND case below.
   1536 		 */
   1537 		if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) ==
   1538 		    LW_PENDSIG) {
   1539 			mutex_enter(p->p_lock);
   1540 			while ((sig = issignal(l)) != 0)
   1541 				postsig(sig);
   1542 			mutex_exit(p->p_lock);
   1543 		}
   1544 
   1545 		/*
   1546 		 * Core-dump or suspend pending.
   1547 		 *
   1548 		 * In case of core dump, suspend ourselves, so that the kernel
   1549 		 * stack and therefore the userland registers saved in the
   1550 		 * trapframe are around for coredump() to write them out.
   1551 		 * We also need to save any PCU resources that we have so that
   1552 		 * they accessible for coredump().  We issue a wakeup on
   1553 		 * p->p_lwpcv so that sigexit() will write the core file out
   1554 		 * once all other LWPs are suspended.
   1555 		 */
   1556 		if ((l->l_flag & LW_WSUSPEND) != 0) {
   1557 			pcu_save_all(l);
   1558 			mutex_enter(p->p_lock);
   1559 			p->p_nrlwps--;
   1560 			cv_broadcast(&p->p_lwpcv);
   1561 			lwp_lock(l);
   1562 			l->l_stat = LSSUSPENDED;
   1563 			lwp_unlock(l);
   1564 			mutex_exit(p->p_lock);
   1565 			lwp_lock(l);
   1566 			mi_switch(l);
   1567 		}
   1568 
   1569 		/* Process is exiting. */
   1570 		if ((l->l_flag & LW_WEXIT) != 0) {
   1571 			lwp_exit(l);
   1572 			KASSERT(0);
   1573 			/* NOTREACHED */
   1574 		}
   1575 
   1576 		/* update lwpctl processor (for vfork child_return) */
   1577 		if (l->l_flag & LW_LWPCTL) {
   1578 			lwp_lock(l);
   1579 			KASSERT(kpreempt_disabled());
   1580 			l->l_lwpctl->lc_curcpu = (int)cpu_index(l->l_cpu);
   1581 			l->l_lwpctl->lc_pctr++;
   1582 			l->l_flag &= ~LW_LWPCTL;
   1583 			lwp_unlock(l);
   1584 		}
   1585 	}
   1586 }
   1587 
   1588 /*
   1589  * Force an LWP to enter the kernel, to take a trip through lwp_userret().
   1590  */
   1591 void
   1592 lwp_need_userret(struct lwp *l)
   1593 {
   1594 	KASSERT(lwp_locked(l, NULL));
   1595 
   1596 	/*
   1597 	 * Since the tests in lwp_userret() are done unlocked, make sure
   1598 	 * that the condition will be seen before forcing the LWP to enter
   1599 	 * kernel mode.
   1600 	 */
   1601 	membar_producer();
   1602 	cpu_signotify(l);
   1603 }
   1604 
   1605 /*
   1606  * Add one reference to an LWP.  This will prevent the LWP from
   1607  * exiting, thus keep the lwp structure and PCB around to inspect.
   1608  */
   1609 void
   1610 lwp_addref(struct lwp *l)
   1611 {
   1612 
   1613 	KASSERT(mutex_owned(l->l_proc->p_lock));
   1614 	KASSERT(l->l_stat != LSZOMB);
   1615 	KASSERT(l->l_refcnt != 0);
   1616 
   1617 	l->l_refcnt++;
   1618 }
   1619 
   1620 /*
   1621  * Remove one reference to an LWP.  If this is the last reference,
   1622  * then we must finalize the LWP's death.
   1623  */
   1624 void
   1625 lwp_delref(struct lwp *l)
   1626 {
   1627 	struct proc *p = l->l_proc;
   1628 
   1629 	mutex_enter(p->p_lock);
   1630 	lwp_delref2(l);
   1631 	mutex_exit(p->p_lock);
   1632 }
   1633 
   1634 /*
   1635  * Remove one reference to an LWP.  If this is the last reference,
   1636  * then we must finalize the LWP's death.  The proc mutex is held
   1637  * on entry.
   1638  */
   1639 void
   1640 lwp_delref2(struct lwp *l)
   1641 {
   1642 	struct proc *p = l->l_proc;
   1643 
   1644 	KASSERT(mutex_owned(p->p_lock));
   1645 	KASSERT(l->l_stat != LSZOMB);
   1646 	KASSERT(l->l_refcnt > 0);
   1647 	if (--l->l_refcnt == 0)
   1648 		cv_broadcast(&p->p_lwpcv);
   1649 }
   1650 
   1651 /*
   1652  * Drain all references to the current LWP.
   1653  */
   1654 void
   1655 lwp_drainrefs(struct lwp *l)
   1656 {
   1657 	struct proc *p = l->l_proc;
   1658 
   1659 	KASSERT(mutex_owned(p->p_lock));
   1660 	KASSERT(l->l_refcnt != 0);
   1661 
   1662 	l->l_refcnt--;
   1663 	while (l->l_refcnt != 0)
   1664 		cv_wait(&p->p_lwpcv, p->p_lock);
   1665 }
   1666 
   1667 /*
   1668  * Return true if the specified LWP is 'alive'.  Only p->p_lock need
   1669  * be held.
   1670  */
   1671 bool
   1672 lwp_alive(lwp_t *l)
   1673 {
   1674 
   1675 	KASSERT(mutex_owned(l->l_proc->p_lock));
   1676 
   1677 	switch (l->l_stat) {
   1678 	case LSSLEEP:
   1679 	case LSRUN:
   1680 	case LSONPROC:
   1681 	case LSSTOP:
   1682 	case LSSUSPENDED:
   1683 		return true;
   1684 	default:
   1685 		return false;
   1686 	}
   1687 }
   1688 
   1689 /*
   1690  * Return first live LWP in the process.
   1691  */
   1692 lwp_t *
   1693 lwp_find_first(proc_t *p)
   1694 {
   1695 	lwp_t *l;
   1696 
   1697 	KASSERT(mutex_owned(p->p_lock));
   1698 
   1699 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1700 		if (lwp_alive(l)) {
   1701 			return l;
   1702 		}
   1703 	}
   1704 
   1705 	return NULL;
   1706 }
   1707 
   1708 /*
   1709  * Allocate a new lwpctl structure for a user LWP.
   1710  */
   1711 int
   1712 lwp_ctl_alloc(vaddr_t *uaddr)
   1713 {
   1714 	lcproc_t *lp;
   1715 	u_int bit, i, offset;
   1716 	struct uvm_object *uao;
   1717 	int error;
   1718 	lcpage_t *lcp;
   1719 	proc_t *p;
   1720 	lwp_t *l;
   1721 
   1722 	l = curlwp;
   1723 	p = l->l_proc;
   1724 
   1725 	/* don't allow a vforked process to create lwp ctls */
   1726 	if (p->p_lflag & PL_PPWAIT)
   1727 		return EBUSY;
   1728 
   1729 	if (l->l_lcpage != NULL) {
   1730 		lcp = l->l_lcpage;
   1731 		*uaddr = lcp->lcp_uaddr + (vaddr_t)l->l_lwpctl - lcp->lcp_kaddr;
   1732 		return 0;
   1733 	}
   1734 
   1735 	/* First time around, allocate header structure for the process. */
   1736 	if ((lp = p->p_lwpctl) == NULL) {
   1737 		lp = kmem_alloc(sizeof(*lp), KM_SLEEP);
   1738 		mutex_init(&lp->lp_lock, MUTEX_DEFAULT, IPL_NONE);
   1739 		lp->lp_uao = NULL;
   1740 		TAILQ_INIT(&lp->lp_pages);
   1741 		mutex_enter(p->p_lock);
   1742 		if (p->p_lwpctl == NULL) {
   1743 			p->p_lwpctl = lp;
   1744 			mutex_exit(p->p_lock);
   1745 		} else {
   1746 			mutex_exit(p->p_lock);
   1747 			mutex_destroy(&lp->lp_lock);
   1748 			kmem_free(lp, sizeof(*lp));
   1749 			lp = p->p_lwpctl;
   1750 		}
   1751 	}
   1752 
   1753  	/*
   1754  	 * Set up an anonymous memory region to hold the shared pages.
   1755  	 * Map them into the process' address space.  The user vmspace
   1756  	 * gets the first reference on the UAO.
   1757  	 */
   1758 	mutex_enter(&lp->lp_lock);
   1759 	if (lp->lp_uao == NULL) {
   1760 		lp->lp_uao = uao_create(LWPCTL_UAREA_SZ, 0);
   1761 		lp->lp_cur = 0;
   1762 		lp->lp_max = LWPCTL_UAREA_SZ;
   1763 		lp->lp_uva = p->p_emul->e_vm_default_addr(p,
   1764 		     (vaddr_t)p->p_vmspace->vm_daddr, LWPCTL_UAREA_SZ,
   1765 		     p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
   1766 		error = uvm_map(&p->p_vmspace->vm_map, &lp->lp_uva,
   1767 		    LWPCTL_UAREA_SZ, lp->lp_uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW,
   1768 		    UVM_PROT_RW, UVM_INH_NONE, UVM_ADV_NORMAL, 0));
   1769 		if (error != 0) {
   1770 			uao_detach(lp->lp_uao);
   1771 			lp->lp_uao = NULL;
   1772 			mutex_exit(&lp->lp_lock);
   1773 			return error;
   1774 		}
   1775 	}
   1776 
   1777 	/* Get a free block and allocate for this LWP. */
   1778 	TAILQ_FOREACH(lcp, &lp->lp_pages, lcp_chain) {
   1779 		if (lcp->lcp_nfree != 0)
   1780 			break;
   1781 	}
   1782 	if (lcp == NULL) {
   1783 		/* Nothing available - try to set up a free page. */
   1784 		if (lp->lp_cur == lp->lp_max) {
   1785 			mutex_exit(&lp->lp_lock);
   1786 			return ENOMEM;
   1787 		}
   1788 		lcp = kmem_alloc(LWPCTL_LCPAGE_SZ, KM_SLEEP);
   1789 
   1790 		/*
   1791 		 * Wire the next page down in kernel space.  Since this
   1792 		 * is a new mapping, we must add a reference.
   1793 		 */
   1794 		uao = lp->lp_uao;
   1795 		(*uao->pgops->pgo_reference)(uao);
   1796 		lcp->lcp_kaddr = vm_map_min(kernel_map);
   1797 		error = uvm_map(kernel_map, &lcp->lcp_kaddr, PAGE_SIZE,
   1798 		    uao, lp->lp_cur, PAGE_SIZE,
   1799 		    UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
   1800 		    UVM_INH_NONE, UVM_ADV_RANDOM, 0));
   1801 		if (error != 0) {
   1802 			mutex_exit(&lp->lp_lock);
   1803 			kmem_free(lcp, LWPCTL_LCPAGE_SZ);
   1804 			(*uao->pgops->pgo_detach)(uao);
   1805 			return error;
   1806 		}
   1807 		error = uvm_map_pageable(kernel_map, lcp->lcp_kaddr,
   1808 		    lcp->lcp_kaddr + PAGE_SIZE, FALSE, 0);
   1809 		if (error != 0) {
   1810 			mutex_exit(&lp->lp_lock);
   1811 			uvm_unmap(kernel_map, lcp->lcp_kaddr,
   1812 			    lcp->lcp_kaddr + PAGE_SIZE);
   1813 			kmem_free(lcp, LWPCTL_LCPAGE_SZ);
   1814 			return error;
   1815 		}
   1816 		/* Prepare the page descriptor and link into the list. */
   1817 		lcp->lcp_uaddr = lp->lp_uva + lp->lp_cur;
   1818 		lp->lp_cur += PAGE_SIZE;
   1819 		lcp->lcp_nfree = LWPCTL_PER_PAGE;
   1820 		lcp->lcp_rotor = 0;
   1821 		memset(lcp->lcp_bitmap, 0xff, LWPCTL_BITMAP_SZ);
   1822 		TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
   1823 	}
   1824 	for (i = lcp->lcp_rotor; lcp->lcp_bitmap[i] == 0;) {
   1825 		if (++i >= LWPCTL_BITMAP_ENTRIES)
   1826 			i = 0;
   1827 	}
   1828 	bit = ffs(lcp->lcp_bitmap[i]) - 1;
   1829 	lcp->lcp_bitmap[i] ^= (1U << bit);
   1830 	lcp->lcp_rotor = i;
   1831 	lcp->lcp_nfree--;
   1832 	l->l_lcpage = lcp;
   1833 	offset = (i << 5) + bit;
   1834 	l->l_lwpctl = (lwpctl_t *)lcp->lcp_kaddr + offset;
   1835 	*uaddr = lcp->lcp_uaddr + offset * sizeof(lwpctl_t);
   1836 	mutex_exit(&lp->lp_lock);
   1837 
   1838 	KPREEMPT_DISABLE(l);
   1839 	l->l_lwpctl->lc_curcpu = (int)cpu_index(curcpu());
   1840 	KPREEMPT_ENABLE(l);
   1841 
   1842 	return 0;
   1843 }
   1844 
   1845 /*
   1846  * Free an lwpctl structure back to the per-process list.
   1847  */
   1848 void
   1849 lwp_ctl_free(lwp_t *l)
   1850 {
   1851 	struct proc *p = l->l_proc;
   1852 	lcproc_t *lp;
   1853 	lcpage_t *lcp;
   1854 	u_int map, offset;
   1855 
   1856 	/* don't free a lwp context we borrowed for vfork */
   1857 	if (p->p_lflag & PL_PPWAIT) {
   1858 		l->l_lwpctl = NULL;
   1859 		return;
   1860 	}
   1861 
   1862 	lp = p->p_lwpctl;
   1863 	KASSERT(lp != NULL);
   1864 
   1865 	lcp = l->l_lcpage;
   1866 	offset = (u_int)((lwpctl_t *)l->l_lwpctl - (lwpctl_t *)lcp->lcp_kaddr);
   1867 	KASSERT(offset < LWPCTL_PER_PAGE);
   1868 
   1869 	mutex_enter(&lp->lp_lock);
   1870 	lcp->lcp_nfree++;
   1871 	map = offset >> 5;
   1872 	lcp->lcp_bitmap[map] |= (1U << (offset & 31));
   1873 	if (lcp->lcp_bitmap[lcp->lcp_rotor] == 0)
   1874 		lcp->lcp_rotor = map;
   1875 	if (TAILQ_FIRST(&lp->lp_pages)->lcp_nfree == 0) {
   1876 		TAILQ_REMOVE(&lp->lp_pages, lcp, lcp_chain);
   1877 		TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
   1878 	}
   1879 	mutex_exit(&lp->lp_lock);
   1880 }
   1881 
   1882 /*
   1883  * Process is exiting; tear down lwpctl state.  This can only be safely
   1884  * called by the last LWP in the process.
   1885  */
   1886 void
   1887 lwp_ctl_exit(void)
   1888 {
   1889 	lcpage_t *lcp, *next;
   1890 	lcproc_t *lp;
   1891 	proc_t *p;
   1892 	lwp_t *l;
   1893 
   1894 	l = curlwp;
   1895 	l->l_lwpctl = NULL;
   1896 	l->l_lcpage = NULL;
   1897 	p = l->l_proc;
   1898 	lp = p->p_lwpctl;
   1899 
   1900 	KASSERT(lp != NULL);
   1901 	KASSERT(p->p_nlwps == 1);
   1902 
   1903 	for (lcp = TAILQ_FIRST(&lp->lp_pages); lcp != NULL; lcp = next) {
   1904 		next = TAILQ_NEXT(lcp, lcp_chain);
   1905 		uvm_unmap(kernel_map, lcp->lcp_kaddr,
   1906 		    lcp->lcp_kaddr + PAGE_SIZE);
   1907 		kmem_free(lcp, LWPCTL_LCPAGE_SZ);
   1908 	}
   1909 
   1910 	if (lp->lp_uao != NULL) {
   1911 		uvm_unmap(&p->p_vmspace->vm_map, lp->lp_uva,
   1912 		    lp->lp_uva + LWPCTL_UAREA_SZ);
   1913 	}
   1914 
   1915 	mutex_destroy(&lp->lp_lock);
   1916 	kmem_free(lp, sizeof(*lp));
   1917 	p->p_lwpctl = NULL;
   1918 }
   1919 
   1920 /*
   1921  * Return the current LWP's "preemption counter".  Used to detect
   1922  * preemption across operations that can tolerate preemption without
   1923  * crashing, but which may generate incorrect results if preempted.
   1924  */
   1925 uint64_t
   1926 lwp_pctr(void)
   1927 {
   1928 
   1929 	return curlwp->l_ncsw;
   1930 }
   1931 
   1932 /*
   1933  * Set an LWP's private data pointer.
   1934  */
   1935 int
   1936 lwp_setprivate(struct lwp *l, void *ptr)
   1937 {
   1938 	int error = 0;
   1939 
   1940 	l->l_private = ptr;
   1941 #ifdef __HAVE_CPU_LWP_SETPRIVATE
   1942 	error = cpu_lwp_setprivate(l, ptr);
   1943 #endif
   1944 	return error;
   1945 }
   1946 
   1947 #if defined(DDB)
   1948 #include <machine/pcb.h>
   1949 
   1950 void
   1951 lwp_whatis(uintptr_t addr, void (*pr)(const char *, ...))
   1952 {
   1953 	lwp_t *l;
   1954 
   1955 	LIST_FOREACH(l, &alllwp, l_list) {
   1956 		uintptr_t stack = (uintptr_t)KSTACK_LOWEST_ADDR(l);
   1957 
   1958 		if (addr < stack || stack + KSTACK_SIZE <= addr) {
   1959 			continue;
   1960 		}
   1961 		(*pr)("%p is %p+%zu, LWP %p's stack\n",
   1962 		    (void *)addr, (void *)stack,
   1963 		    (size_t)(addr - stack), l);
   1964 	}
   1965 }
   1966 #endif /* defined(DDB) */
   1967