Home | History | Annotate | Line # | Download | only in kern
kern_lwp.c revision 1.101.2.1
      1  1.101.2.1      yamt /*	$NetBSD: kern_lwp.c,v 1.101.2.1 2008/05/18 12:35:08 yamt Exp $	*/
      2        1.2   thorpej 
      3        1.2   thorpej /*-
      4       1.95        ad  * Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
      5        1.2   thorpej  * All rights reserved.
      6        1.2   thorpej  *
      7        1.2   thorpej  * This code is derived from software contributed to The NetBSD Foundation
      8       1.52        ad  * by Nathan J. Williams, and Andrew Doran.
      9        1.2   thorpej  *
     10        1.2   thorpej  * Redistribution and use in source and binary forms, with or without
     11        1.2   thorpej  * modification, are permitted provided that the following conditions
     12        1.2   thorpej  * are met:
     13        1.2   thorpej  * 1. Redistributions of source code must retain the above copyright
     14        1.2   thorpej  *    notice, this list of conditions and the following disclaimer.
     15        1.2   thorpej  * 2. Redistributions in binary form must reproduce the above copyright
     16        1.2   thorpej  *    notice, this list of conditions and the following disclaimer in the
     17        1.2   thorpej  *    documentation and/or other materials provided with the distribution.
     18        1.2   thorpej  *
     19        1.2   thorpej  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20        1.2   thorpej  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21        1.2   thorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22        1.2   thorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23        1.2   thorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24        1.2   thorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25        1.2   thorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26        1.2   thorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27        1.2   thorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28        1.2   thorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29        1.2   thorpej  * POSSIBILITY OF SUCH DAMAGE.
     30        1.2   thorpej  */
     31        1.9     lukem 
     32       1.52        ad /*
     33       1.52        ad  * Overview
     34       1.52        ad  *
     35       1.66        ad  *	Lightweight processes (LWPs) are the basic unit or thread of
     36       1.52        ad  *	execution within the kernel.  The core state of an LWP is described
     37       1.66        ad  *	by "struct lwp", also known as lwp_t.
     38       1.52        ad  *
     39       1.52        ad  *	Each LWP is contained within a process (described by "struct proc"),
     40       1.52        ad  *	Every process contains at least one LWP, but may contain more.  The
     41       1.52        ad  *	process describes attributes shared among all of its LWPs such as a
     42       1.52        ad  *	private address space, global execution state (stopped, active,
     43       1.52        ad  *	zombie, ...), signal disposition and so on.  On a multiprocessor
     44       1.66        ad  *	machine, multiple LWPs be executing concurrently in the kernel.
     45       1.52        ad  *
     46       1.52        ad  * Execution states
     47       1.52        ad  *
     48       1.52        ad  *	At any given time, an LWP has overall state that is described by
     49       1.52        ad  *	lwp::l_stat.  The states are broken into two sets below.  The first
     50       1.52        ad  *	set is guaranteed to represent the absolute, current state of the
     51       1.52        ad  *	LWP:
     52      1.101     rmind  *
     53      1.101     rmind  *	LSONPROC
     54      1.101     rmind  *
     55      1.101     rmind  *		On processor: the LWP is executing on a CPU, either in the
     56      1.101     rmind  *		kernel or in user space.
     57      1.101     rmind  *
     58      1.101     rmind  *	LSRUN
     59      1.101     rmind  *
     60      1.101     rmind  *		Runnable: the LWP is parked on a run queue, and may soon be
     61      1.101     rmind  *		chosen to run by an idle processor, or by a processor that
     62      1.101     rmind  *		has been asked to preempt a currently runnning but lower
     63      1.101     rmind  *		priority LWP.  If the LWP is not swapped in (LW_INMEM == 0)
     64       1.52        ad  *		then the LWP is not on a run queue, but may be soon.
     65      1.101     rmind  *
     66      1.101     rmind  *	LSIDL
     67      1.101     rmind  *
     68      1.101     rmind  *		Idle: the LWP has been created but has not yet executed,
     69       1.66        ad  *		or it has ceased executing a unit of work and is waiting
     70       1.66        ad  *		to be started again.
     71      1.101     rmind  *
     72      1.101     rmind  *	LSSUSPENDED:
     73      1.101     rmind  *
     74      1.101     rmind  *		Suspended: the LWP has had its execution suspended by
     75       1.52        ad  *		another LWP in the same process using the _lwp_suspend()
     76       1.52        ad  *		system call.  User-level LWPs also enter the suspended
     77       1.52        ad  *		state when the system is shutting down.
     78       1.52        ad  *
     79       1.52        ad  *	The second set represent a "statement of intent" on behalf of the
     80       1.52        ad  *	LWP.  The LWP may in fact be executing on a processor, may be
     81       1.66        ad  *	sleeping or idle. It is expected to take the necessary action to
     82      1.101     rmind  *	stop executing or become "running" again within a short timeframe.
     83       1.66        ad  *	The LW_RUNNING flag in lwp::l_flag indicates that an LWP is running.
     84      1.101     rmind  *	Importantly, it indicates that its state is tied to a CPU.
     85      1.101     rmind  *
     86      1.101     rmind  *	LSZOMB:
     87      1.101     rmind  *
     88      1.101     rmind  *		Dead or dying: the LWP has released most of its resources
     89      1.101     rmind  *		and is: a) about to switch away into oblivion b) has already
     90       1.66        ad  *		switched away.  When it switches away, its few remaining
     91       1.66        ad  *		resources can be collected.
     92      1.101     rmind  *
     93      1.101     rmind  *	LSSLEEP:
     94      1.101     rmind  *
     95      1.101     rmind  *		Sleeping: the LWP has entered itself onto a sleep queue, and
     96      1.101     rmind  *		has switched away or will switch away shortly to allow other
     97       1.66        ad  *		LWPs to run on the CPU.
     98      1.101     rmind  *
     99      1.101     rmind  *	LSSTOP:
    100      1.101     rmind  *
    101      1.101     rmind  *		Stopped: the LWP has been stopped as a result of a job
    102      1.101     rmind  *		control signal, or as a result of the ptrace() interface.
    103      1.101     rmind  *
    104      1.101     rmind  *		Stopped LWPs may run briefly within the kernel to handle
    105      1.101     rmind  *		signals that they receive, but will not return to user space
    106      1.101     rmind  *		until their process' state is changed away from stopped.
    107      1.101     rmind  *
    108      1.101     rmind  *		Single LWPs within a process can not be set stopped
    109      1.101     rmind  *		selectively: all actions that can stop or continue LWPs
    110      1.101     rmind  *		occur at the process level.
    111      1.101     rmind  *
    112       1.52        ad  * State transitions
    113       1.52        ad  *
    114       1.66        ad  *	Note that the LSSTOP state may only be set when returning to
    115       1.66        ad  *	user space in userret(), or when sleeping interruptably.  The
    116       1.66        ad  *	LSSUSPENDED state may only be set in userret().  Before setting
    117       1.66        ad  *	those states, we try to ensure that the LWPs will release all
    118       1.66        ad  *	locks that they hold, and at a minimum try to ensure that the
    119       1.66        ad  *	LWP can be set runnable again by a signal.
    120       1.52        ad  *
    121       1.52        ad  *	LWPs may transition states in the following ways:
    122       1.52        ad  *
    123       1.52        ad  *	 RUN -------> ONPROC		ONPROC -----> RUN
    124      1.101     rmind  *		    > STOPPED			    > SLEEP
    125      1.101     rmind  *		    > SUSPENDED			    > STOPPED
    126       1.52        ad  *						    > SUSPENDED
    127       1.52        ad  *						    > ZOMB
    128       1.52        ad  *
    129       1.52        ad  *	 STOPPED ---> RUN		SUSPENDED --> RUN
    130       1.52        ad  *	            > SLEEP			    > SLEEP
    131       1.52        ad  *
    132       1.52        ad  *	 SLEEP -----> ONPROC		IDL --------> RUN
    133      1.101     rmind  *		    > RUN			    > SUSPENDED
    134      1.101     rmind  *		    > STOPPED			    > STOPPED
    135       1.52        ad  *		    > SUSPENDED
    136       1.52        ad  *
    137       1.66        ad  *	Other state transitions are possible with kernel threads (eg
    138       1.66        ad  *	ONPROC -> IDL), but only happen under tightly controlled
    139       1.66        ad  *	circumstances the side effects are understood.
    140       1.66        ad  *
    141       1.52        ad  * Locking
    142       1.52        ad  *
    143       1.52        ad  *	The majority of fields in 'struct lwp' are covered by a single,
    144       1.66        ad  *	general spin lock pointed to by lwp::l_mutex.  The locks covering
    145       1.52        ad  *	each field are documented in sys/lwp.h.
    146       1.52        ad  *
    147       1.66        ad  *	State transitions must be made with the LWP's general lock held,
    148      1.101     rmind  *	and may cause the LWP's lock pointer to change. Manipulation of
    149       1.66        ad  *	the general lock is not performed directly, but through calls to
    150       1.66        ad  *	lwp_lock(), lwp_relock() and similar.
    151       1.52        ad  *
    152       1.52        ad  *	States and their associated locks:
    153       1.52        ad  *
    154       1.74     rmind  *	LSONPROC, LSZOMB:
    155       1.52        ad  *
    156       1.64      yamt  *		Always covered by spc_lwplock, which protects running LWPs.
    157       1.64      yamt  *		This is a per-CPU lock.
    158       1.52        ad  *
    159       1.74     rmind  *	LSIDL, LSRUN:
    160       1.52        ad  *
    161       1.64      yamt  *		Always covered by spc_mutex, which protects the run queues.
    162      1.101     rmind  *		This is a per-CPU lock.
    163       1.52        ad  *
    164       1.52        ad  *	LSSLEEP:
    165       1.52        ad  *
    166       1.66        ad  *		Covered by a lock associated with the sleep queue that the
    167       1.52        ad  *		LWP resides on, indirectly referenced by l_sleepq->sq_mutex.
    168       1.52        ad  *
    169       1.52        ad  *	LSSTOP, LSSUSPENDED:
    170      1.101     rmind  *
    171       1.52        ad  *		If the LWP was previously sleeping (l_wchan != NULL), then
    172       1.66        ad  *		l_mutex references the sleep queue lock.  If the LWP was
    173       1.52        ad  *		runnable or on the CPU when halted, or has been removed from
    174       1.66        ad  *		the sleep queue since halted, then the lock is spc_lwplock.
    175       1.52        ad  *
    176       1.52        ad  *	The lock order is as follows:
    177       1.52        ad  *
    178       1.64      yamt  *		spc::spc_lwplock ->
    179       1.64      yamt  *		    sleepq_t::sq_mutex ->
    180       1.64      yamt  *			tschain_t::tc_mutex ->
    181       1.64      yamt  *			    spc::spc_mutex
    182       1.52        ad  *
    183  1.101.2.1      yamt  *	Each process has an scheduler state lock (proc::p_lock), and a
    184       1.52        ad  *	number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
    185       1.52        ad  *	so on.  When an LWP is to be entered into or removed from one of the
    186  1.101.2.1      yamt  *	following states, p_lock must be held and the process wide counters
    187       1.52        ad  *	adjusted:
    188       1.52        ad  *
    189       1.52        ad  *		LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
    190       1.52        ad  *
    191       1.52        ad  *	Note that an LWP is considered running or likely to run soon if in
    192       1.52        ad  *	one of the following states.  This affects the value of p_nrlwps:
    193       1.52        ad  *
    194       1.52        ad  *		LSRUN, LSONPROC, LSSLEEP
    195       1.52        ad  *
    196  1.101.2.1      yamt  *	p_lock does not need to be held when transitioning among these
    197       1.52        ad  *	three states.
    198       1.52        ad  */
    199       1.52        ad 
    200        1.9     lukem #include <sys/cdefs.h>
    201  1.101.2.1      yamt __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.101.2.1 2008/05/18 12:35:08 yamt Exp $");
    202        1.8    martin 
    203       1.84      yamt #include "opt_ddb.h"
    204        1.8    martin #include "opt_multiprocessor.h"
    205       1.52        ad #include "opt_lockdebug.h"
    206        1.2   thorpej 
    207       1.47   hannken #define _LWP_API_PRIVATE
    208       1.47   hannken 
    209        1.2   thorpej #include <sys/param.h>
    210        1.2   thorpej #include <sys/systm.h>
    211       1.64      yamt #include <sys/cpu.h>
    212        1.2   thorpej #include <sys/pool.h>
    213        1.2   thorpej #include <sys/proc.h>
    214        1.2   thorpej #include <sys/syscallargs.h>
    215       1.57       dsl #include <sys/syscall_stats.h>
    216       1.37        ad #include <sys/kauth.h>
    217       1.52        ad #include <sys/sleepq.h>
    218       1.85      yamt #include <sys/user.h>
    219       1.52        ad #include <sys/lockdebug.h>
    220       1.52        ad #include <sys/kmem.h>
    221       1.91     rmind #include <sys/pset.h>
    222       1.75        ad #include <sys/intr.h>
    223       1.78        ad #include <sys/lwpctl.h>
    224       1.81        ad #include <sys/atomic.h>
    225        1.2   thorpej 
    226        1.2   thorpej #include <uvm/uvm_extern.h>
    227       1.80     skrll #include <uvm/uvm_object.h>
    228        1.2   thorpej 
    229       1.77      matt struct lwplist	alllwp = LIST_HEAD_INITIALIZER(alllwp);
    230       1.52        ad 
    231       1.41   thorpej POOL_INIT(lwp_uc_pool, sizeof(ucontext_t), 0, 0, 0, "lwpucpl",
    232       1.62        ad     &pool_allocator_nointr, IPL_NONE);
    233       1.41   thorpej 
    234       1.87        ad static pool_cache_t lwp_cache;
    235       1.41   thorpej static specificdata_domain_t lwp_specificdata_domain;
    236       1.41   thorpej 
    237       1.41   thorpej void
    238       1.41   thorpej lwpinit(void)
    239       1.41   thorpej {
    240       1.41   thorpej 
    241       1.41   thorpej 	lwp_specificdata_domain = specificdata_domain_create();
    242       1.41   thorpej 	KASSERT(lwp_specificdata_domain != NULL);
    243       1.52        ad 	lwp_sys_init();
    244       1.87        ad 	lwp_cache = pool_cache_init(sizeof(lwp_t), MIN_LWP_ALIGNMENT, 0, 0,
    245       1.87        ad 	    "lwppl", NULL, IPL_NONE, NULL, NULL, NULL);
    246       1.41   thorpej }
    247       1.41   thorpej 
    248       1.52        ad /*
    249       1.52        ad  * Set an suspended.
    250       1.52        ad  *
    251  1.101.2.1      yamt  * Must be called with p_lock held, and the LWP locked.  Will unlock the
    252       1.52        ad  * LWP before return.
    253       1.52        ad  */
    254        1.2   thorpej int
    255       1.52        ad lwp_suspend(struct lwp *curl, struct lwp *t)
    256        1.2   thorpej {
    257       1.52        ad 	int error;
    258        1.2   thorpej 
    259  1.101.2.1      yamt 	KASSERT(mutex_owned(t->l_proc->p_lock));
    260       1.63        ad 	KASSERT(lwp_locked(t, NULL));
    261       1.33       chs 
    262       1.52        ad 	KASSERT(curl != t || curl->l_stat == LSONPROC);
    263        1.2   thorpej 
    264       1.52        ad 	/*
    265       1.52        ad 	 * If the current LWP has been told to exit, we must not suspend anyone
    266       1.52        ad 	 * else or deadlock could occur.  We won't return to userspace.
    267        1.2   thorpej 	 */
    268  1.101.2.1      yamt 	if ((curl->l_flag & (LW_WEXIT | LW_WCORE)) != 0) {
    269       1.52        ad 		lwp_unlock(t);
    270       1.52        ad 		return (EDEADLK);
    271        1.2   thorpej 	}
    272        1.2   thorpej 
    273       1.52        ad 	error = 0;
    274        1.2   thorpej 
    275       1.52        ad 	switch (t->l_stat) {
    276       1.52        ad 	case LSRUN:
    277       1.52        ad 	case LSONPROC:
    278       1.56     pavel 		t->l_flag |= LW_WSUSPEND;
    279       1.52        ad 		lwp_need_userret(t);
    280       1.52        ad 		lwp_unlock(t);
    281       1.52        ad 		break;
    282        1.2   thorpej 
    283       1.52        ad 	case LSSLEEP:
    284       1.56     pavel 		t->l_flag |= LW_WSUSPEND;
    285        1.2   thorpej 
    286        1.2   thorpej 		/*
    287       1.52        ad 		 * Kick the LWP and try to get it to the kernel boundary
    288       1.52        ad 		 * so that it will release any locks that it holds.
    289       1.52        ad 		 * setrunnable() will release the lock.
    290        1.2   thorpej 		 */
    291       1.56     pavel 		if ((t->l_flag & LW_SINTR) != 0)
    292       1.52        ad 			setrunnable(t);
    293       1.52        ad 		else
    294       1.52        ad 			lwp_unlock(t);
    295       1.52        ad 		break;
    296        1.2   thorpej 
    297       1.52        ad 	case LSSUSPENDED:
    298       1.52        ad 		lwp_unlock(t);
    299       1.52        ad 		break;
    300       1.17      manu 
    301       1.52        ad 	case LSSTOP:
    302       1.56     pavel 		t->l_flag |= LW_WSUSPEND;
    303       1.52        ad 		setrunnable(t);
    304       1.52        ad 		break;
    305        1.2   thorpej 
    306       1.52        ad 	case LSIDL:
    307       1.52        ad 	case LSZOMB:
    308       1.52        ad 		error = EINTR; /* It's what Solaris does..... */
    309       1.52        ad 		lwp_unlock(t);
    310       1.52        ad 		break;
    311        1.2   thorpej 	}
    312        1.2   thorpej 
    313       1.69     rmind 	return (error);
    314        1.2   thorpej }
    315        1.2   thorpej 
    316       1.52        ad /*
    317       1.52        ad  * Restart a suspended LWP.
    318       1.52        ad  *
    319  1.101.2.1      yamt  * Must be called with p_lock held, and the LWP locked.  Will unlock the
    320       1.52        ad  * LWP before return.
    321       1.52        ad  */
    322        1.2   thorpej void
    323        1.2   thorpej lwp_continue(struct lwp *l)
    324        1.2   thorpej {
    325        1.2   thorpej 
    326  1.101.2.1      yamt 	KASSERT(mutex_owned(l->l_proc->p_lock));
    327       1.63        ad 	KASSERT(lwp_locked(l, NULL));
    328       1.52        ad 
    329       1.52        ad 	/* If rebooting or not suspended, then just bail out. */
    330       1.56     pavel 	if ((l->l_flag & LW_WREBOOT) != 0) {
    331       1.52        ad 		lwp_unlock(l);
    332        1.2   thorpej 		return;
    333       1.10      fvdl 	}
    334        1.2   thorpej 
    335       1.56     pavel 	l->l_flag &= ~LW_WSUSPEND;
    336        1.2   thorpej 
    337       1.52        ad 	if (l->l_stat != LSSUSPENDED) {
    338       1.52        ad 		lwp_unlock(l);
    339       1.52        ad 		return;
    340        1.2   thorpej 	}
    341        1.2   thorpej 
    342       1.52        ad 	/* setrunnable() will release the lock. */
    343       1.52        ad 	setrunnable(l);
    344        1.2   thorpej }
    345        1.2   thorpej 
    346       1.52        ad /*
    347       1.52        ad  * Wait for an LWP within the current process to exit.  If 'lid' is
    348       1.52        ad  * non-zero, we are waiting for a specific LWP.
    349       1.52        ad  *
    350  1.101.2.1      yamt  * Must be called with p->p_lock held.
    351       1.52        ad  */
    352        1.2   thorpej int
    353        1.2   thorpej lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
    354        1.2   thorpej {
    355        1.2   thorpej 	struct proc *p = l->l_proc;
    356       1.52        ad 	struct lwp *l2;
    357       1.52        ad 	int nfound, error;
    358       1.63        ad 	lwpid_t curlid;
    359       1.63        ad 	bool exiting;
    360        1.2   thorpej 
    361  1.101.2.1      yamt 	KASSERT(mutex_owned(p->p_lock));
    362       1.52        ad 
    363       1.52        ad 	p->p_nlwpwait++;
    364       1.63        ad 	l->l_waitingfor = lid;
    365       1.63        ad 	curlid = l->l_lid;
    366       1.63        ad 	exiting = ((flags & LWPWAIT_EXITCONTROL) != 0);
    367       1.52        ad 
    368       1.52        ad 	for (;;) {
    369       1.52        ad 		/*
    370       1.52        ad 		 * Avoid a race between exit1() and sigexit(): if the
    371       1.52        ad 		 * process is dumping core, then we need to bail out: call
    372       1.52        ad 		 * into lwp_userret() where we will be suspended until the
    373       1.52        ad 		 * deed is done.
    374       1.52        ad 		 */
    375       1.52        ad 		if ((p->p_sflag & PS_WCORE) != 0) {
    376  1.101.2.1      yamt 			mutex_exit(p->p_lock);
    377       1.52        ad 			lwp_userret(l);
    378       1.52        ad #ifdef DIAGNOSTIC
    379       1.52        ad 			panic("lwp_wait1");
    380       1.52        ad #endif
    381       1.52        ad 			/* NOTREACHED */
    382       1.52        ad 		}
    383       1.52        ad 
    384       1.52        ad 		/*
    385       1.52        ad 		 * First off, drain any detached LWP that is waiting to be
    386       1.52        ad 		 * reaped.
    387       1.52        ad 		 */
    388       1.52        ad 		while ((l2 = p->p_zomblwp) != NULL) {
    389       1.52        ad 			p->p_zomblwp = NULL;
    390       1.63        ad 			lwp_free(l2, false, false);/* releases proc mutex */
    391  1.101.2.1      yamt 			mutex_enter(p->p_lock);
    392       1.52        ad 		}
    393       1.52        ad 
    394       1.52        ad 		/*
    395       1.52        ad 		 * Now look for an LWP to collect.  If the whole process is
    396       1.52        ad 		 * exiting, count detached LWPs as eligible to be collected,
    397       1.52        ad 		 * but don't drain them here.
    398       1.52        ad 		 */
    399       1.52        ad 		nfound = 0;
    400       1.63        ad 		error = 0;
    401       1.52        ad 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
    402       1.63        ad 			/*
    403       1.63        ad 			 * If a specific wait and the target is waiting on
    404       1.63        ad 			 * us, then avoid deadlock.  This also traps LWPs
    405       1.63        ad 			 * that try to wait on themselves.
    406       1.63        ad 			 *
    407       1.63        ad 			 * Note that this does not handle more complicated
    408       1.63        ad 			 * cycles, like: t1 -> t2 -> t3 -> t1.  The process
    409       1.63        ad 			 * can still be killed so it is not a major problem.
    410       1.63        ad 			 */
    411       1.63        ad 			if (l2->l_lid == lid && l2->l_waitingfor == curlid) {
    412       1.63        ad 				error = EDEADLK;
    413       1.63        ad 				break;
    414       1.63        ad 			}
    415       1.63        ad 			if (l2 == l)
    416       1.52        ad 				continue;
    417       1.52        ad 			if ((l2->l_prflag & LPR_DETACHED) != 0) {
    418       1.63        ad 				nfound += exiting;
    419       1.63        ad 				continue;
    420       1.63        ad 			}
    421       1.63        ad 			if (lid != 0) {
    422       1.63        ad 				if (l2->l_lid != lid)
    423       1.63        ad 					continue;
    424       1.63        ad 				/*
    425       1.63        ad 				 * Mark this LWP as the first waiter, if there
    426       1.63        ad 				 * is no other.
    427       1.63        ad 				 */
    428       1.63        ad 				if (l2->l_waiter == 0)
    429       1.63        ad 					l2->l_waiter = curlid;
    430       1.63        ad 			} else if (l2->l_waiter != 0) {
    431       1.63        ad 				/*
    432       1.63        ad 				 * It already has a waiter - so don't
    433       1.63        ad 				 * collect it.  If the waiter doesn't
    434       1.63        ad 				 * grab it we'll get another chance
    435       1.63        ad 				 * later.
    436       1.63        ad 				 */
    437       1.63        ad 				nfound++;
    438       1.52        ad 				continue;
    439       1.52        ad 			}
    440       1.52        ad 			nfound++;
    441        1.2   thorpej 
    442       1.52        ad 			/* No need to lock the LWP in order to see LSZOMB. */
    443       1.52        ad 			if (l2->l_stat != LSZOMB)
    444       1.52        ad 				continue;
    445        1.2   thorpej 
    446       1.63        ad 			/*
    447       1.63        ad 			 * We're no longer waiting.  Reset the "first waiter"
    448       1.63        ad 			 * pointer on the target, in case it was us.
    449       1.63        ad 			 */
    450       1.63        ad 			l->l_waitingfor = 0;
    451       1.63        ad 			l2->l_waiter = 0;
    452       1.63        ad 			p->p_nlwpwait--;
    453        1.2   thorpej 			if (departed)
    454        1.2   thorpej 				*departed = l2->l_lid;
    455       1.75        ad 			sched_lwp_collect(l2);
    456       1.63        ad 
    457       1.63        ad 			/* lwp_free() releases the proc lock. */
    458       1.63        ad 			lwp_free(l2, false, false);
    459  1.101.2.1      yamt 			mutex_enter(p->p_lock);
    460       1.52        ad 			return 0;
    461       1.52        ad 		}
    462        1.2   thorpej 
    463       1.63        ad 		if (error != 0)
    464       1.63        ad 			break;
    465       1.52        ad 		if (nfound == 0) {
    466       1.52        ad 			error = ESRCH;
    467       1.52        ad 			break;
    468       1.52        ad 		}
    469       1.63        ad 
    470       1.63        ad 		/*
    471       1.63        ad 		 * The kernel is careful to ensure that it can not deadlock
    472       1.63        ad 		 * when exiting - just keep waiting.
    473       1.63        ad 		 */
    474       1.63        ad 		if (exiting) {
    475       1.52        ad 			KASSERT(p->p_nlwps > 1);
    476  1.101.2.1      yamt 			cv_wait(&p->p_lwpcv, p->p_lock);
    477       1.52        ad 			continue;
    478       1.52        ad 		}
    479       1.63        ad 
    480       1.63        ad 		/*
    481       1.63        ad 		 * If all other LWPs are waiting for exits or suspends
    482       1.63        ad 		 * and the supply of zombies and potential zombies is
    483       1.63        ad 		 * exhausted, then we are about to deadlock.
    484       1.63        ad 		 *
    485       1.63        ad 		 * If the process is exiting (and this LWP is not the one
    486       1.63        ad 		 * that is coordinating the exit) then bail out now.
    487       1.63        ad 		 */
    488       1.52        ad 		if ((p->p_sflag & PS_WEXIT) != 0 ||
    489       1.63        ad 		    p->p_nrlwps + p->p_nzlwps - p->p_ndlwps <= p->p_nlwpwait) {
    490       1.52        ad 			error = EDEADLK;
    491       1.52        ad 			break;
    492        1.2   thorpej 		}
    493       1.63        ad 
    494       1.63        ad 		/*
    495       1.63        ad 		 * Sit around and wait for something to happen.  We'll be
    496       1.63        ad 		 * awoken if any of the conditions examined change: if an
    497       1.63        ad 		 * LWP exits, is collected, or is detached.
    498       1.63        ad 		 */
    499  1.101.2.1      yamt 		if ((error = cv_wait_sig(&p->p_lwpcv, p->p_lock)) != 0)
    500       1.52        ad 			break;
    501        1.2   thorpej 	}
    502        1.2   thorpej 
    503       1.63        ad 	/*
    504       1.63        ad 	 * We didn't find any LWPs to collect, we may have received a
    505       1.63        ad 	 * signal, or some other condition has caused us to bail out.
    506       1.63        ad 	 *
    507       1.63        ad 	 * If waiting on a specific LWP, clear the waiters marker: some
    508       1.63        ad 	 * other LWP may want it.  Then, kick all the remaining waiters
    509       1.63        ad 	 * so that they can re-check for zombies and for deadlock.
    510       1.63        ad 	 */
    511       1.63        ad 	if (lid != 0) {
    512       1.63        ad 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
    513       1.63        ad 			if (l2->l_lid == lid) {
    514       1.63        ad 				if (l2->l_waiter == curlid)
    515       1.63        ad 					l2->l_waiter = 0;
    516       1.63        ad 				break;
    517       1.63        ad 			}
    518       1.63        ad 		}
    519       1.63        ad 	}
    520       1.52        ad 	p->p_nlwpwait--;
    521       1.63        ad 	l->l_waitingfor = 0;
    522       1.63        ad 	cv_broadcast(&p->p_lwpcv);
    523       1.63        ad 
    524       1.52        ad 	return error;
    525        1.2   thorpej }
    526        1.2   thorpej 
    527       1.52        ad /*
    528       1.52        ad  * Create a new LWP within process 'p2', using LWP 'l1' as a template.
    529       1.52        ad  * The new LWP is created in state LSIDL and must be set running,
    530       1.52        ad  * suspended, or stopped by the caller.
    531       1.52        ad  */
    532        1.2   thorpej int
    533       1.75        ad lwp_create(lwp_t *l1, proc_t *p2, vaddr_t uaddr, bool inmem, int flags,
    534       1.75        ad 	   void *stack, size_t stacksize, void (*func)(void *), void *arg,
    535       1.75        ad 	   lwp_t **rnewlwpp, int sclass)
    536        1.2   thorpej {
    537       1.52        ad 	struct lwp *l2, *isfree;
    538       1.52        ad 	turnstile_t *ts;
    539        1.2   thorpej 
    540  1.101.2.1      yamt 	KASSERT(l1 == curlwp || l1->l_proc == &proc0);
    541  1.101.2.1      yamt 
    542       1.52        ad 	/*
    543       1.52        ad 	 * First off, reap any detached LWP waiting to be collected.
    544       1.52        ad 	 * We can re-use its LWP structure and turnstile.
    545       1.52        ad 	 */
    546       1.52        ad 	isfree = NULL;
    547       1.52        ad 	if (p2->p_zomblwp != NULL) {
    548  1.101.2.1      yamt 		mutex_enter(p2->p_lock);
    549       1.52        ad 		if ((isfree = p2->p_zomblwp) != NULL) {
    550       1.52        ad 			p2->p_zomblwp = NULL;
    551       1.63        ad 			lwp_free(isfree, true, false);/* releases proc mutex */
    552       1.52        ad 		} else
    553  1.101.2.1      yamt 			mutex_exit(p2->p_lock);
    554       1.52        ad 	}
    555       1.52        ad 	if (isfree == NULL) {
    556       1.87        ad 		l2 = pool_cache_get(lwp_cache, PR_WAITOK);
    557       1.52        ad 		memset(l2, 0, sizeof(*l2));
    558       1.76        ad 		l2->l_ts = pool_cache_get(turnstile_cache, PR_WAITOK);
    559       1.60      yamt 		SLIST_INIT(&l2->l_pi_lenders);
    560       1.52        ad 	} else {
    561       1.52        ad 		l2 = isfree;
    562       1.52        ad 		ts = l2->l_ts;
    563       1.75        ad 		KASSERT(l2->l_inheritedprio == -1);
    564       1.60      yamt 		KASSERT(SLIST_EMPTY(&l2->l_pi_lenders));
    565       1.52        ad 		memset(l2, 0, sizeof(*l2));
    566       1.52        ad 		l2->l_ts = ts;
    567       1.52        ad 	}
    568        1.2   thorpej 
    569        1.2   thorpej 	l2->l_stat = LSIDL;
    570        1.2   thorpej 	l2->l_proc = p2;
    571       1.52        ad 	l2->l_refcnt = 1;
    572       1.75        ad 	l2->l_class = sclass;
    573       1.75        ad 	l2->l_kpriority = l1->l_kpriority;
    574       1.82        ad 	l2->l_kpribase = PRI_KERNEL;
    575       1.52        ad 	l2->l_priority = l1->l_priority;
    576       1.75        ad 	l2->l_inheritedprio = -1;
    577       1.56     pavel 	l2->l_flag = inmem ? LW_INMEM : 0;
    578       1.88        ad 	l2->l_pflag = LP_MPSAFE;
    579       1.97        ad 	l2->l_fd = p2->p_fd;
    580  1.101.2.1      yamt 	TAILQ_INIT(&l2->l_ld_locks);
    581       1.41   thorpej 
    582       1.56     pavel 	if (p2->p_flag & PK_SYSTEM) {
    583       1.91     rmind 		/* Mark it as a system LWP and not a candidate for swapping */
    584       1.56     pavel 		l2->l_flag |= LW_SYSTEM;
    585       1.52        ad 	}
    586        1.2   thorpej 
    587  1.101.2.1      yamt 	kpreempt_disable();
    588  1.101.2.1      yamt 	l2->l_mutex = l1->l_cpu->ci_schedstate.spc_mutex;
    589  1.101.2.1      yamt 	l2->l_cpu = l1->l_cpu;
    590  1.101.2.1      yamt 	kpreempt_enable();
    591  1.101.2.1      yamt 
    592       1.73     rmind 	lwp_initspecific(l2);
    593       1.75        ad 	sched_lwp_fork(l1, l2);
    594       1.37        ad 	lwp_update_creds(l2);
    595       1.70        ad 	callout_init(&l2->l_timeout_ch, CALLOUT_MPSAFE);
    596       1.70        ad 	callout_setfunc(&l2->l_timeout_ch, sleepq_timeout, l2);
    597       1.65        ad 	mutex_init(&l2->l_swaplock, MUTEX_DEFAULT, IPL_NONE);
    598       1.52        ad 	cv_init(&l2->l_sigcv, "sigwait");
    599       1.52        ad 	l2->l_syncobj = &sched_syncobj;
    600        1.2   thorpej 
    601        1.2   thorpej 	if (rnewlwpp != NULL)
    602        1.2   thorpej 		*rnewlwpp = l2;
    603        1.2   thorpej 
    604       1.36      yamt 	l2->l_addr = UAREA_TO_USER(uaddr);
    605        1.2   thorpej 	uvm_lwp_fork(l1, l2, stack, stacksize, func,
    606        1.2   thorpej 	    (arg != NULL) ? arg : l2);
    607        1.2   thorpej 
    608  1.101.2.1      yamt 	mutex_enter(p2->p_lock);
    609       1.52        ad 
    610       1.52        ad 	if ((flags & LWP_DETACHED) != 0) {
    611       1.52        ad 		l2->l_prflag = LPR_DETACHED;
    612       1.52        ad 		p2->p_ndlwps++;
    613       1.52        ad 	} else
    614       1.52        ad 		l2->l_prflag = 0;
    615       1.52        ad 
    616       1.52        ad 	l2->l_sigmask = l1->l_sigmask;
    617       1.52        ad 	CIRCLEQ_INIT(&l2->l_sigpend.sp_info);
    618       1.52        ad 	sigemptyset(&l2->l_sigpend.sp_set);
    619       1.52        ad 
    620       1.53      yamt 	p2->p_nlwpid++;
    621       1.53      yamt 	if (p2->p_nlwpid == 0)
    622       1.53      yamt 		p2->p_nlwpid++;
    623       1.53      yamt 	l2->l_lid = p2->p_nlwpid;
    624        1.2   thorpej 	LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
    625        1.2   thorpej 	p2->p_nlwps++;
    626        1.2   thorpej 
    627  1.101.2.1      yamt 	mutex_exit(p2->p_lock);
    628       1.52        ad 
    629  1.101.2.1      yamt 	mutex_enter(proc_lock);
    630        1.2   thorpej 	LIST_INSERT_HEAD(&alllwp, l2, l_list);
    631  1.101.2.1      yamt 	mutex_exit(proc_lock);
    632        1.2   thorpej 
    633       1.91     rmind 	if ((p2->p_flag & PK_SYSTEM) == 0) {
    634       1.91     rmind 		/* Locking is needed, since LWP is in the list of all LWPs */
    635       1.91     rmind 		lwp_lock(l2);
    636       1.91     rmind 		/* Inherit a processor-set */
    637       1.91     rmind 		l2->l_psid = l1->l_psid;
    638       1.91     rmind 		/* Inherit an affinity */
    639       1.91     rmind 		memcpy(&l2->l_affinity, &l1->l_affinity, sizeof(cpuset_t));
    640       1.91     rmind 		/* Look for a CPU to start */
    641       1.91     rmind 		l2->l_cpu = sched_takecpu(l2);
    642       1.91     rmind 		lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_mutex);
    643       1.91     rmind 	}
    644       1.91     rmind 
    645       1.57       dsl 	SYSCALL_TIME_LWP_INIT(l2);
    646       1.57       dsl 
    647       1.16      manu 	if (p2->p_emul->e_lwp_fork)
    648       1.16      manu 		(*p2->p_emul->e_lwp_fork)(l1, l2);
    649       1.16      manu 
    650        1.2   thorpej 	return (0);
    651        1.2   thorpej }
    652        1.2   thorpej 
    653        1.2   thorpej /*
    654       1.64      yamt  * Called by MD code when a new LWP begins execution.  Must be called
    655       1.64      yamt  * with the previous LWP locked (so at splsched), or if there is no
    656       1.64      yamt  * previous LWP, at splsched.
    657       1.64      yamt  */
    658       1.64      yamt void
    659       1.64      yamt lwp_startup(struct lwp *prev, struct lwp *new)
    660       1.64      yamt {
    661       1.64      yamt 
    662  1.101.2.1      yamt 	KASSERT(kpreempt_disabled());
    663       1.64      yamt 	if (prev != NULL) {
    664       1.81        ad 		/*
    665       1.81        ad 		 * Normalize the count of the spin-mutexes, it was
    666       1.81        ad 		 * increased in mi_switch().  Unmark the state of
    667       1.81        ad 		 * context switch - it is finished for previous LWP.
    668       1.81        ad 		 */
    669       1.81        ad 		curcpu()->ci_mtx_count++;
    670       1.81        ad 		membar_exit();
    671       1.81        ad 		prev->l_ctxswtch = 0;
    672       1.64      yamt 	}
    673  1.101.2.1      yamt 	KPREEMPT_DISABLE(new);
    674       1.64      yamt 	spl0();
    675       1.64      yamt 	pmap_activate(new);
    676       1.64      yamt 	LOCKDEBUG_BARRIER(NULL, 0);
    677  1.101.2.1      yamt 	KPREEMPT_ENABLE(new);
    678       1.65        ad 	if ((new->l_pflag & LP_MPSAFE) == 0) {
    679       1.65        ad 		KERNEL_LOCK(1, new);
    680       1.65        ad 	}
    681       1.64      yamt }
    682       1.64      yamt 
    683       1.64      yamt /*
    684       1.65        ad  * Exit an LWP.
    685        1.2   thorpej  */
    686        1.2   thorpej void
    687        1.2   thorpej lwp_exit(struct lwp *l)
    688        1.2   thorpej {
    689        1.2   thorpej 	struct proc *p = l->l_proc;
    690       1.52        ad 	struct lwp *l2;
    691       1.65        ad 	bool current;
    692       1.65        ad 
    693       1.65        ad 	current = (l == curlwp);
    694        1.2   thorpej 
    695       1.65        ad 	KASSERT(current || l->l_stat == LSIDL);
    696        1.2   thorpej 
    697       1.52        ad 	/*
    698       1.52        ad 	 * Verify that we hold no locks other than the kernel lock.
    699       1.52        ad 	 */
    700       1.52        ad #ifdef MULTIPROCESSOR
    701       1.52        ad 	LOCKDEBUG_BARRIER(&kernel_lock, 0);
    702       1.52        ad #else
    703       1.52        ad 	LOCKDEBUG_BARRIER(NULL, 0);
    704       1.52        ad #endif
    705       1.16      manu 
    706        1.2   thorpej 	/*
    707       1.52        ad 	 * If we are the last live LWP in a process, we need to exit the
    708       1.52        ad 	 * entire process.  We do so with an exit status of zero, because
    709       1.52        ad 	 * it's a "controlled" exit, and because that's what Solaris does.
    710       1.52        ad 	 *
    711       1.52        ad 	 * We are not quite a zombie yet, but for accounting purposes we
    712       1.52        ad 	 * must increment the count of zombies here.
    713       1.45   thorpej 	 *
    714       1.45   thorpej 	 * Note: the last LWP's specificdata will be deleted here.
    715        1.2   thorpej 	 */
    716  1.101.2.1      yamt 	mutex_enter(p->p_lock);
    717       1.52        ad 	if (p->p_nlwps - p->p_nzlwps == 1) {
    718       1.65        ad 		KASSERT(current == true);
    719       1.88        ad 		/* XXXSMP kernel_lock not held */
    720        1.2   thorpej 		exit1(l, 0);
    721       1.19  jdolecek 		/* NOTREACHED */
    722        1.2   thorpej 	}
    723       1.52        ad 	p->p_nzlwps++;
    724  1.101.2.1      yamt 	mutex_exit(p->p_lock);
    725       1.52        ad 
    726       1.52        ad 	if (p->p_emul->e_lwp_exit)
    727       1.52        ad 		(*p->p_emul->e_lwp_exit)(l);
    728        1.2   thorpej 
    729       1.45   thorpej 	/* Delete the specificdata while it's still safe to sleep. */
    730       1.45   thorpej 	specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
    731       1.45   thorpej 
    732       1.52        ad 	/*
    733       1.52        ad 	 * Release our cached credentials.
    734       1.52        ad 	 */
    735       1.37        ad 	kauth_cred_free(l->l_cred);
    736       1.70        ad 	callout_destroy(&l->l_timeout_ch);
    737       1.65        ad 
    738       1.65        ad 	/*
    739       1.65        ad 	 * While we can still block, mark the LWP as unswappable to
    740       1.65        ad 	 * prevent conflicts with the with the swapper.
    741       1.65        ad 	 */
    742       1.65        ad 	if (current)
    743       1.65        ad 		uvm_lwp_hold(l);
    744       1.37        ad 
    745       1.52        ad 	/*
    746       1.52        ad 	 * Remove the LWP from the global list.
    747       1.52        ad 	 */
    748  1.101.2.1      yamt 	mutex_enter(proc_lock);
    749       1.52        ad 	LIST_REMOVE(l, l_list);
    750  1.101.2.1      yamt 	mutex_exit(proc_lock);
    751       1.19  jdolecek 
    752       1.52        ad 	/*
    753       1.52        ad 	 * Get rid of all references to the LWP that others (e.g. procfs)
    754       1.52        ad 	 * may have, and mark the LWP as a zombie.  If the LWP is detached,
    755       1.52        ad 	 * mark it waiting for collection in the proc structure.  Note that
    756       1.52        ad 	 * before we can do that, we need to free any other dead, deatched
    757       1.52        ad 	 * LWP waiting to meet its maker.
    758       1.52        ad 	 */
    759  1.101.2.1      yamt 	mutex_enter(p->p_lock);
    760       1.52        ad 	lwp_drainrefs(l);
    761       1.31      yamt 
    762       1.52        ad 	if ((l->l_prflag & LPR_DETACHED) != 0) {
    763       1.52        ad 		while ((l2 = p->p_zomblwp) != NULL) {
    764       1.52        ad 			p->p_zomblwp = NULL;
    765       1.63        ad 			lwp_free(l2, false, false);/* releases proc mutex */
    766  1.101.2.1      yamt 			mutex_enter(p->p_lock);
    767       1.72        ad 			l->l_refcnt++;
    768       1.72        ad 			lwp_drainrefs(l);
    769       1.52        ad 		}
    770       1.52        ad 		p->p_zomblwp = l;
    771       1.52        ad 	}
    772       1.31      yamt 
    773       1.52        ad 	/*
    774       1.52        ad 	 * If we find a pending signal for the process and we have been
    775       1.52        ad 	 * asked to check for signals, then we loose: arrange to have
    776       1.52        ad 	 * all other LWPs in the process check for signals.
    777       1.52        ad 	 */
    778       1.56     pavel 	if ((l->l_flag & LW_PENDSIG) != 0 &&
    779       1.52        ad 	    firstsig(&p->p_sigpend.sp_set) != 0) {
    780       1.52        ad 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
    781       1.52        ad 			lwp_lock(l2);
    782       1.56     pavel 			l2->l_flag |= LW_PENDSIG;
    783       1.52        ad 			lwp_unlock(l2);
    784       1.52        ad 		}
    785       1.31      yamt 	}
    786       1.31      yamt 
    787       1.52        ad 	lwp_lock(l);
    788       1.52        ad 	l->l_stat = LSZOMB;
    789       1.90        ad 	if (l->l_name != NULL)
    790       1.90        ad 		strcpy(l->l_name, "(zombie)");
    791       1.52        ad 	lwp_unlock(l);
    792        1.2   thorpej 	p->p_nrlwps--;
    793       1.52        ad 	cv_broadcast(&p->p_lwpcv);
    794       1.78        ad 	if (l->l_lwpctl != NULL)
    795       1.78        ad 		l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
    796  1.101.2.1      yamt 	mutex_exit(p->p_lock);
    797       1.52        ad 
    798       1.52        ad 	/*
    799       1.52        ad 	 * We can no longer block.  At this point, lwp_free() may already
    800       1.52        ad 	 * be gunning for us.  On a multi-CPU system, we may be off p_lwps.
    801       1.52        ad 	 *
    802       1.52        ad 	 * Free MD LWP resources.
    803       1.52        ad 	 */
    804       1.52        ad #ifndef __NO_CPU_LWP_FREE
    805       1.52        ad 	cpu_lwp_free(l, 0);
    806       1.52        ad #endif
    807        1.2   thorpej 
    808       1.65        ad 	if (current) {
    809       1.65        ad 		pmap_deactivate(l);
    810       1.65        ad 
    811       1.65        ad 		/*
    812       1.65        ad 		 * Release the kernel lock, and switch away into
    813       1.65        ad 		 * oblivion.
    814       1.65        ad 		 */
    815       1.52        ad #ifdef notyet
    816       1.65        ad 		/* XXXSMP hold in lwp_userret() */
    817       1.65        ad 		KERNEL_UNLOCK_LAST(l);
    818       1.52        ad #else
    819       1.65        ad 		KERNEL_UNLOCK_ALL(l, NULL);
    820       1.52        ad #endif
    821       1.65        ad 		lwp_exit_switchaway(l);
    822       1.65        ad 	}
    823        1.2   thorpej }
    824        1.2   thorpej 
    825        1.2   thorpej void
    826       1.64      yamt lwp_exit_switchaway(struct lwp *l)
    827        1.2   thorpej {
    828       1.64      yamt 	struct cpu_info *ci;
    829       1.64      yamt 	struct lwp *idlelwp;
    830       1.64      yamt 
    831       1.64      yamt 	(void)splsched();
    832       1.64      yamt 	l->l_flag &= ~LW_RUNNING;
    833       1.64      yamt 	ci = curcpu();
    834       1.97        ad 	ci->ci_data.cpu_nswtch++;
    835       1.64      yamt 	idlelwp = ci->ci_data.cpu_idlelwp;
    836       1.64      yamt 	idlelwp->l_stat = LSONPROC;
    837       1.75        ad 
    838       1.75        ad 	/*
    839       1.75        ad 	 * cpu_onproc must be updated with the CPU locked, as
    840       1.75        ad 	 * aston() may try to set a AST pending on the LWP (and
    841       1.75        ad 	 * it does so with the CPU locked).  Otherwise, the LWP
    842       1.75        ad 	 * may be destroyed before the AST can be set, leading
    843       1.75        ad 	 * to a user-after-free.
    844       1.75        ad 	 */
    845       1.75        ad 	spc_lock(ci);
    846       1.75        ad 	ci->ci_data.cpu_onproc = idlelwp;
    847       1.75        ad 	spc_unlock(ci);
    848       1.75        ad 	cpu_switchto(NULL, idlelwp, false);
    849       1.52        ad }
    850       1.52        ad 
    851       1.52        ad /*
    852       1.52        ad  * Free a dead LWP's remaining resources.
    853       1.52        ad  *
    854       1.52        ad  * XXXLWP limits.
    855       1.52        ad  */
    856       1.52        ad void
    857       1.63        ad lwp_free(struct lwp *l, bool recycle, bool last)
    858       1.52        ad {
    859       1.52        ad 	struct proc *p = l->l_proc;
    860      1.100        ad 	struct rusage *ru;
    861       1.52        ad 	ksiginfoq_t kq;
    862       1.52        ad 
    863       1.92      yamt 	KASSERT(l != curlwp);
    864       1.92      yamt 
    865       1.52        ad 	/*
    866       1.52        ad 	 * If this was not the last LWP in the process, then adjust
    867       1.52        ad 	 * counters and unlock.
    868       1.52        ad 	 */
    869       1.52        ad 	if (!last) {
    870       1.52        ad 		/*
    871       1.52        ad 		 * Add the LWP's run time to the process' base value.
    872       1.52        ad 		 * This needs to co-incide with coming off p_lwps.
    873       1.52        ad 		 */
    874       1.86      yamt 		bintime_add(&p->p_rtime, &l->l_rtime);
    875       1.64      yamt 		p->p_pctcpu += l->l_pctcpu;
    876      1.100        ad 		ru = &p->p_stats->p_ru;
    877      1.100        ad 		ruadd(ru, &l->l_ru);
    878      1.100        ad 		ru->ru_nvcsw += (l->l_ncsw - l->l_nivcsw);
    879      1.100        ad 		ru->ru_nivcsw += l->l_nivcsw;
    880       1.52        ad 		LIST_REMOVE(l, l_sibling);
    881       1.52        ad 		p->p_nlwps--;
    882       1.52        ad 		p->p_nzlwps--;
    883       1.52        ad 		if ((l->l_prflag & LPR_DETACHED) != 0)
    884       1.52        ad 			p->p_ndlwps--;
    885       1.63        ad 
    886       1.63        ad 		/*
    887       1.63        ad 		 * Have any LWPs sleeping in lwp_wait() recheck for
    888       1.63        ad 		 * deadlock.
    889       1.63        ad 		 */
    890       1.63        ad 		cv_broadcast(&p->p_lwpcv);
    891  1.101.2.1      yamt 		mutex_exit(p->p_lock);
    892       1.63        ad 	}
    893       1.52        ad 
    894       1.52        ad #ifdef MULTIPROCESSOR
    895       1.63        ad 	/*
    896       1.63        ad 	 * In the unlikely event that the LWP is still on the CPU,
    897       1.63        ad 	 * then spin until it has switched away.  We need to release
    898       1.63        ad 	 * all locks to avoid deadlock against interrupt handlers on
    899       1.63        ad 	 * the target CPU.
    900       1.63        ad 	 */
    901       1.64      yamt 	if ((l->l_flag & LW_RUNNING) != 0 || l->l_cpu->ci_curlwp == l) {
    902       1.63        ad 		int count;
    903       1.64      yamt 		(void)count; /* XXXgcc */
    904       1.63        ad 		KERNEL_UNLOCK_ALL(curlwp, &count);
    905       1.64      yamt 		while ((l->l_flag & LW_RUNNING) != 0 ||
    906       1.64      yamt 		    l->l_cpu->ci_curlwp == l)
    907       1.63        ad 			SPINLOCK_BACKOFF_HOOK;
    908       1.63        ad 		KERNEL_LOCK(count, curlwp);
    909       1.63        ad 	}
    910       1.52        ad #endif
    911       1.52        ad 
    912       1.52        ad 	/*
    913       1.52        ad 	 * Destroy the LWP's remaining signal information.
    914       1.52        ad 	 */
    915       1.52        ad 	ksiginfo_queue_init(&kq);
    916       1.52        ad 	sigclear(&l->l_sigpend, NULL, &kq);
    917       1.52        ad 	ksiginfo_queue_drain(&kq);
    918       1.52        ad 	cv_destroy(&l->l_sigcv);
    919       1.65        ad 	mutex_destroy(&l->l_swaplock);
    920        1.2   thorpej 
    921       1.19  jdolecek 	/*
    922       1.52        ad 	 * Free the LWP's turnstile and the LWP structure itself unless the
    923       1.93      yamt 	 * caller wants to recycle them.  Also, free the scheduler specific
    924       1.93      yamt 	 * data.
    925       1.52        ad 	 *
    926       1.52        ad 	 * We can't return turnstile0 to the pool (it didn't come from it),
    927       1.52        ad 	 * so if it comes up just drop it quietly and move on.
    928       1.52        ad 	 *
    929       1.52        ad 	 * We don't recycle the VM resources at this time.
    930       1.19  jdolecek 	 */
    931       1.78        ad 	if (l->l_lwpctl != NULL)
    932       1.78        ad 		lwp_ctl_free(l);
    933       1.64      yamt 	sched_lwp_exit(l);
    934       1.64      yamt 
    935       1.52        ad 	if (!recycle && l->l_ts != &turnstile0)
    936       1.76        ad 		pool_cache_put(turnstile_cache, l->l_ts);
    937       1.90        ad 	if (l->l_name != NULL)
    938       1.90        ad 		kmem_free(l->l_name, MAXCOMLEN);
    939       1.52        ad #ifndef __NO_CPU_LWP_FREE
    940       1.52        ad 	cpu_lwp_free2(l);
    941       1.52        ad #endif
    942       1.92      yamt 	KASSERT((l->l_flag & LW_INMEM) != 0);
    943       1.19  jdolecek 	uvm_lwp_exit(l);
    944       1.60      yamt 	KASSERT(SLIST_EMPTY(&l->l_pi_lenders));
    945       1.75        ad 	KASSERT(l->l_inheritedprio == -1);
    946       1.52        ad 	if (!recycle)
    947       1.87        ad 		pool_cache_put(lwp_cache, l);
    948        1.2   thorpej }
    949        1.2   thorpej 
    950        1.2   thorpej /*
    951        1.2   thorpej  * Pick a LWP to represent the process for those operations which
    952        1.2   thorpej  * want information about a "process" that is actually associated
    953        1.2   thorpej  * with a LWP.
    954       1.52        ad  *
    955       1.52        ad  * If 'locking' is false, no locking or lock checks are performed.
    956       1.52        ad  * This is intended for use by DDB.
    957       1.52        ad  *
    958       1.52        ad  * We don't bother locking the LWP here, since code that uses this
    959       1.52        ad  * interface is broken by design and an exact match is not required.
    960        1.2   thorpej  */
    961        1.2   thorpej struct lwp *
    962       1.52        ad proc_representative_lwp(struct proc *p, int *nrlwps, int locking)
    963        1.2   thorpej {
    964        1.2   thorpej 	struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
    965       1.27      matt 	struct lwp *signalled;
    966       1.52        ad 	int cnt;
    967       1.52        ad 
    968       1.52        ad 	if (locking) {
    969  1.101.2.1      yamt 		KASSERT(mutex_owned(p->p_lock));
    970       1.52        ad 	}
    971        1.2   thorpej 
    972        1.2   thorpej 	/* Trivial case: only one LWP */
    973       1.52        ad 	if (p->p_nlwps == 1) {
    974       1.52        ad 		l = LIST_FIRST(&p->p_lwps);
    975       1.52        ad 		if (nrlwps)
    976       1.68       tnn 			*nrlwps = (l->l_stat == LSONPROC || l->l_stat == LSRUN);
    977       1.52        ad 		return l;
    978       1.52        ad 	}
    979        1.2   thorpej 
    980       1.52        ad 	cnt = 0;
    981        1.2   thorpej 	switch (p->p_stat) {
    982        1.2   thorpej 	case SSTOP:
    983        1.2   thorpej 	case SACTIVE:
    984        1.2   thorpej 		/* Pick the most live LWP */
    985        1.2   thorpej 		onproc = running = sleeping = stopped = suspended = NULL;
    986       1.27      matt 		signalled = NULL;
    987        1.2   thorpej 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    988       1.64      yamt 			if ((l->l_flag & LW_IDLE) != 0) {
    989       1.64      yamt 				continue;
    990       1.64      yamt 			}
    991       1.27      matt 			if (l->l_lid == p->p_sigctx.ps_lwp)
    992       1.27      matt 				signalled = l;
    993        1.2   thorpej 			switch (l->l_stat) {
    994        1.2   thorpej 			case LSONPROC:
    995        1.2   thorpej 				onproc = l;
    996       1.52        ad 				cnt++;
    997        1.2   thorpej 				break;
    998        1.2   thorpej 			case LSRUN:
    999        1.2   thorpej 				running = l;
   1000       1.52        ad 				cnt++;
   1001        1.2   thorpej 				break;
   1002        1.2   thorpej 			case LSSLEEP:
   1003        1.2   thorpej 				sleeping = l;
   1004        1.2   thorpej 				break;
   1005        1.2   thorpej 			case LSSTOP:
   1006        1.2   thorpej 				stopped = l;
   1007        1.2   thorpej 				break;
   1008        1.2   thorpej 			case LSSUSPENDED:
   1009        1.2   thorpej 				suspended = l;
   1010        1.2   thorpej 				break;
   1011        1.2   thorpej 			}
   1012        1.2   thorpej 		}
   1013       1.52        ad 		if (nrlwps)
   1014       1.52        ad 			*nrlwps = cnt;
   1015       1.27      matt 		if (signalled)
   1016       1.52        ad 			l = signalled;
   1017       1.52        ad 		else if (onproc)
   1018       1.52        ad 			l = onproc;
   1019       1.52        ad 		else if (running)
   1020       1.52        ad 			l = running;
   1021       1.52        ad 		else if (sleeping)
   1022       1.52        ad 			l = sleeping;
   1023       1.52        ad 		else if (stopped)
   1024       1.52        ad 			l = stopped;
   1025       1.52        ad 		else if (suspended)
   1026       1.52        ad 			l = suspended;
   1027       1.52        ad 		else
   1028       1.52        ad 			break;
   1029       1.52        ad 		return l;
   1030        1.2   thorpej #ifdef DIAGNOSTIC
   1031        1.2   thorpej 	case SIDL:
   1032       1.52        ad 	case SZOMB:
   1033       1.52        ad 	case SDYING:
   1034       1.52        ad 	case SDEAD:
   1035       1.52        ad 		if (locking)
   1036  1.101.2.1      yamt 			mutex_exit(p->p_lock);
   1037        1.2   thorpej 		/* We have more than one LWP and we're in SIDL?
   1038        1.2   thorpej 		 * How'd that happen?
   1039        1.2   thorpej 		 */
   1040       1.52        ad 		panic("Too many LWPs in idle/dying process %d (%s) stat = %d",
   1041       1.52        ad 		    p->p_pid, p->p_comm, p->p_stat);
   1042       1.52        ad 		break;
   1043        1.2   thorpej 	default:
   1044       1.52        ad 		if (locking)
   1045  1.101.2.1      yamt 			mutex_exit(p->p_lock);
   1046        1.2   thorpej 		panic("Process %d (%s) in unknown state %d",
   1047        1.2   thorpej 		    p->p_pid, p->p_comm, p->p_stat);
   1048        1.2   thorpej #endif
   1049        1.2   thorpej 	}
   1050        1.2   thorpej 
   1051       1.52        ad 	if (locking)
   1052  1.101.2.1      yamt 		mutex_exit(p->p_lock);
   1053        1.2   thorpej 	panic("proc_representative_lwp: couldn't find a lwp for process"
   1054        1.2   thorpej 		" %d (%s)", p->p_pid, p->p_comm);
   1055        1.2   thorpej 	/* NOTREACHED */
   1056        1.2   thorpej 	return NULL;
   1057        1.2   thorpej }
   1058       1.37        ad 
   1059       1.37        ad /*
   1060       1.91     rmind  * Migrate the LWP to the another CPU.  Unlocks the LWP.
   1061       1.91     rmind  */
   1062       1.91     rmind void
   1063       1.91     rmind lwp_migrate(lwp_t *l, struct cpu_info *ci)
   1064       1.91     rmind {
   1065       1.91     rmind 	struct schedstate_percpu *spc;
   1066       1.91     rmind 	KASSERT(lwp_locked(l, NULL));
   1067       1.91     rmind 
   1068       1.91     rmind 	if (l->l_cpu == ci) {
   1069       1.91     rmind 		lwp_unlock(l);
   1070       1.91     rmind 		return;
   1071       1.91     rmind 	}
   1072       1.91     rmind 
   1073       1.91     rmind 	spc = &ci->ci_schedstate;
   1074       1.91     rmind 	switch (l->l_stat) {
   1075       1.91     rmind 	case LSRUN:
   1076       1.91     rmind 		if (l->l_flag & LW_INMEM) {
   1077       1.91     rmind 			l->l_target_cpu = ci;
   1078       1.91     rmind 			break;
   1079       1.91     rmind 		}
   1080       1.91     rmind 	case LSIDL:
   1081       1.91     rmind 		l->l_cpu = ci;
   1082       1.91     rmind 		lwp_unlock_to(l, spc->spc_mutex);
   1083       1.91     rmind 		KASSERT(!mutex_owned(spc->spc_mutex));
   1084       1.91     rmind 		return;
   1085       1.91     rmind 	case LSSLEEP:
   1086       1.91     rmind 		l->l_cpu = ci;
   1087       1.91     rmind 		break;
   1088       1.91     rmind 	case LSSTOP:
   1089       1.91     rmind 	case LSSUSPENDED:
   1090       1.91     rmind 		if (l->l_wchan != NULL) {
   1091       1.91     rmind 			l->l_cpu = ci;
   1092       1.91     rmind 			break;
   1093       1.91     rmind 		}
   1094       1.91     rmind 	case LSONPROC:
   1095       1.91     rmind 		l->l_target_cpu = ci;
   1096       1.91     rmind 		break;
   1097       1.91     rmind 	}
   1098       1.91     rmind 	lwp_unlock(l);
   1099       1.91     rmind }
   1100       1.91     rmind 
   1101       1.91     rmind /*
   1102       1.94     rmind  * Find the LWP in the process.  Arguments may be zero, in such case,
   1103       1.94     rmind  * the calling process and first LWP in the list will be used.
   1104  1.101.2.1      yamt  * On success - returns proc locked.
   1105       1.91     rmind  */
   1106       1.91     rmind struct lwp *
   1107       1.91     rmind lwp_find2(pid_t pid, lwpid_t lid)
   1108       1.91     rmind {
   1109       1.91     rmind 	proc_t *p;
   1110       1.91     rmind 	lwp_t *l;
   1111       1.91     rmind 
   1112       1.91     rmind 	/* Find the process */
   1113       1.94     rmind 	p = (pid == 0) ? curlwp->l_proc : p_find(pid, PFIND_UNLOCK_FAIL);
   1114       1.91     rmind 	if (p == NULL)
   1115       1.91     rmind 		return NULL;
   1116  1.101.2.1      yamt 	mutex_enter(p->p_lock);
   1117       1.94     rmind 	if (pid != 0) {
   1118       1.94     rmind 		/* Case of p_find */
   1119  1.101.2.1      yamt 		mutex_exit(proc_lock);
   1120       1.94     rmind 	}
   1121       1.91     rmind 
   1122       1.91     rmind 	/* Find the thread */
   1123       1.94     rmind 	l = (lid == 0) ? LIST_FIRST(&p->p_lwps) : lwp_find(p, lid);
   1124  1.101.2.1      yamt 	if (l == NULL) {
   1125  1.101.2.1      yamt 		mutex_exit(p->p_lock);
   1126  1.101.2.1      yamt 	}
   1127       1.91     rmind 
   1128       1.91     rmind 	return l;
   1129       1.91     rmind }
   1130       1.91     rmind 
   1131       1.91     rmind /*
   1132       1.52        ad  * Look up a live LWP within the speicifed process, and return it locked.
   1133       1.52        ad  *
   1134  1.101.2.1      yamt  * Must be called with p->p_lock held.
   1135       1.52        ad  */
   1136       1.52        ad struct lwp *
   1137       1.52        ad lwp_find(struct proc *p, int id)
   1138       1.52        ad {
   1139       1.52        ad 	struct lwp *l;
   1140       1.52        ad 
   1141  1.101.2.1      yamt 	KASSERT(mutex_owned(p->p_lock));
   1142       1.52        ad 
   1143       1.52        ad 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1144       1.52        ad 		if (l->l_lid == id)
   1145       1.52        ad 			break;
   1146       1.52        ad 	}
   1147       1.52        ad 
   1148       1.52        ad 	/*
   1149       1.52        ad 	 * No need to lock - all of these conditions will
   1150       1.52        ad 	 * be visible with the process level mutex held.
   1151       1.52        ad 	 */
   1152       1.52        ad 	if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB))
   1153       1.52        ad 		l = NULL;
   1154       1.52        ad 
   1155       1.52        ad 	return l;
   1156       1.52        ad }
   1157       1.52        ad 
   1158       1.52        ad /*
   1159       1.37        ad  * Update an LWP's cached credentials to mirror the process' master copy.
   1160       1.37        ad  *
   1161       1.37        ad  * This happens early in the syscall path, on user trap, and on LWP
   1162       1.37        ad  * creation.  A long-running LWP can also voluntarily choose to update
   1163       1.37        ad  * it's credentials by calling this routine.  This may be called from
   1164       1.37        ad  * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
   1165       1.37        ad  */
   1166       1.37        ad void
   1167       1.37        ad lwp_update_creds(struct lwp *l)
   1168       1.37        ad {
   1169       1.37        ad 	kauth_cred_t oc;
   1170       1.37        ad 	struct proc *p;
   1171       1.37        ad 
   1172       1.37        ad 	p = l->l_proc;
   1173       1.37        ad 	oc = l->l_cred;
   1174       1.37        ad 
   1175  1.101.2.1      yamt 	mutex_enter(p->p_lock);
   1176       1.37        ad 	kauth_cred_hold(p->p_cred);
   1177       1.37        ad 	l->l_cred = p->p_cred;
   1178       1.98        ad 	l->l_prflag &= ~LPR_CRMOD;
   1179  1.101.2.1      yamt 	mutex_exit(p->p_lock);
   1180       1.88        ad 	if (oc != NULL)
   1181       1.37        ad 		kauth_cred_free(oc);
   1182       1.52        ad }
   1183       1.52        ad 
   1184       1.52        ad /*
   1185       1.52        ad  * Verify that an LWP is locked, and optionally verify that the lock matches
   1186       1.52        ad  * one we specify.
   1187       1.52        ad  */
   1188       1.52        ad int
   1189       1.52        ad lwp_locked(struct lwp *l, kmutex_t *mtx)
   1190       1.52        ad {
   1191       1.52        ad 	kmutex_t *cur = l->l_mutex;
   1192       1.52        ad 
   1193       1.52        ad 	return mutex_owned(cur) && (mtx == cur || mtx == NULL);
   1194       1.52        ad }
   1195       1.52        ad 
   1196       1.52        ad /*
   1197       1.52        ad  * Lock an LWP.
   1198       1.52        ad  */
   1199       1.52        ad void
   1200       1.52        ad lwp_lock_retry(struct lwp *l, kmutex_t *old)
   1201       1.52        ad {
   1202       1.52        ad 
   1203       1.52        ad 	/*
   1204       1.52        ad 	 * XXXgcc ignoring kmutex_t * volatile on i386
   1205       1.52        ad 	 *
   1206       1.52        ad 	 * gcc version 4.1.2 20061021 prerelease (NetBSD nb1 20061021)
   1207       1.52        ad 	 */
   1208       1.52        ad #if 1
   1209       1.52        ad 	while (l->l_mutex != old) {
   1210       1.52        ad #else
   1211       1.52        ad 	for (;;) {
   1212       1.52        ad #endif
   1213       1.52        ad 		mutex_spin_exit(old);
   1214       1.52        ad 		old = l->l_mutex;
   1215       1.52        ad 		mutex_spin_enter(old);
   1216       1.52        ad 
   1217       1.52        ad 		/*
   1218       1.52        ad 		 * mutex_enter() will have posted a read barrier.  Re-test
   1219       1.52        ad 		 * l->l_mutex.  If it has changed, we need to try again.
   1220       1.52        ad 		 */
   1221       1.52        ad #if 1
   1222       1.52        ad 	}
   1223       1.52        ad #else
   1224       1.52        ad 	} while (__predict_false(l->l_mutex != old));
   1225       1.52        ad #endif
   1226       1.52        ad }
   1227       1.52        ad 
   1228       1.52        ad /*
   1229       1.52        ad  * Lend a new mutex to an LWP.  The old mutex must be held.
   1230       1.52        ad  */
   1231       1.52        ad void
   1232       1.52        ad lwp_setlock(struct lwp *l, kmutex_t *new)
   1233       1.52        ad {
   1234       1.52        ad 
   1235       1.63        ad 	KASSERT(mutex_owned(l->l_mutex));
   1236       1.52        ad 
   1237  1.101.2.1      yamt 	membar_exit();
   1238       1.52        ad 	l->l_mutex = new;
   1239       1.52        ad }
   1240       1.52        ad 
   1241       1.52        ad /*
   1242       1.52        ad  * Lend a new mutex to an LWP, and release the old mutex.  The old mutex
   1243       1.52        ad  * must be held.
   1244       1.52        ad  */
   1245       1.52        ad void
   1246       1.52        ad lwp_unlock_to(struct lwp *l, kmutex_t *new)
   1247       1.52        ad {
   1248       1.52        ad 	kmutex_t *old;
   1249       1.52        ad 
   1250       1.63        ad 	KASSERT(mutex_owned(l->l_mutex));
   1251       1.52        ad 
   1252       1.52        ad 	old = l->l_mutex;
   1253  1.101.2.1      yamt 	membar_exit();
   1254       1.52        ad 	l->l_mutex = new;
   1255       1.52        ad 	mutex_spin_exit(old);
   1256       1.52        ad }
   1257       1.52        ad 
   1258       1.52        ad /*
   1259       1.52        ad  * Acquire a new mutex, and donate it to an LWP.  The LWP must already be
   1260       1.52        ad  * locked.
   1261       1.52        ad  */
   1262       1.52        ad void
   1263       1.52        ad lwp_relock(struct lwp *l, kmutex_t *new)
   1264       1.52        ad {
   1265       1.52        ad 	kmutex_t *old;
   1266       1.52        ad 
   1267       1.63        ad 	KASSERT(mutex_owned(l->l_mutex));
   1268       1.52        ad 
   1269       1.52        ad 	old = l->l_mutex;
   1270       1.52        ad 	if (old != new) {
   1271       1.52        ad 		mutex_spin_enter(new);
   1272       1.52        ad 		l->l_mutex = new;
   1273       1.52        ad 		mutex_spin_exit(old);
   1274       1.52        ad 	}
   1275       1.52        ad }
   1276       1.52        ad 
   1277       1.60      yamt int
   1278       1.60      yamt lwp_trylock(struct lwp *l)
   1279       1.60      yamt {
   1280       1.60      yamt 	kmutex_t *old;
   1281       1.60      yamt 
   1282       1.60      yamt 	for (;;) {
   1283       1.60      yamt 		if (!mutex_tryenter(old = l->l_mutex))
   1284       1.60      yamt 			return 0;
   1285       1.60      yamt 		if (__predict_true(l->l_mutex == old))
   1286       1.60      yamt 			return 1;
   1287       1.60      yamt 		mutex_spin_exit(old);
   1288       1.60      yamt 	}
   1289       1.60      yamt }
   1290       1.60      yamt 
   1291       1.96        ad u_int
   1292       1.96        ad lwp_unsleep(lwp_t *l, bool cleanup)
   1293       1.96        ad {
   1294       1.96        ad 
   1295       1.96        ad 	KASSERT(mutex_owned(l->l_mutex));
   1296       1.96        ad 
   1297       1.96        ad 	return (*l->l_syncobj->sobj_unsleep)(l, cleanup);
   1298       1.96        ad }
   1299       1.96        ad 
   1300       1.96        ad 
   1301       1.52        ad /*
   1302       1.56     pavel  * Handle exceptions for mi_userret().  Called if a member of LW_USERRET is
   1303       1.52        ad  * set.
   1304       1.52        ad  */
   1305       1.52        ad void
   1306       1.52        ad lwp_userret(struct lwp *l)
   1307       1.52        ad {
   1308       1.52        ad 	struct proc *p;
   1309       1.54        ad 	void (*hook)(void);
   1310       1.52        ad 	int sig;
   1311       1.52        ad 
   1312       1.52        ad 	p = l->l_proc;
   1313       1.52        ad 
   1314       1.75        ad #ifndef __HAVE_FAST_SOFTINTS
   1315       1.75        ad 	/* Run pending soft interrupts. */
   1316       1.75        ad 	if (l->l_cpu->ci_data.cpu_softints != 0)
   1317       1.75        ad 		softint_overlay();
   1318       1.75        ad #endif
   1319       1.75        ad 
   1320       1.52        ad 	/*
   1321       1.52        ad 	 * It should be safe to do this read unlocked on a multiprocessor
   1322       1.52        ad 	 * system..
   1323       1.52        ad 	 */
   1324       1.56     pavel 	while ((l->l_flag & LW_USERRET) != 0) {
   1325       1.52        ad 		/*
   1326       1.52        ad 		 * Process pending signals first, unless the process
   1327       1.61        ad 		 * is dumping core or exiting, where we will instead
   1328      1.101     rmind 		 * enter the LW_WSUSPEND case below.
   1329       1.52        ad 		 */
   1330       1.61        ad 		if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) ==
   1331       1.61        ad 		    LW_PENDSIG) {
   1332  1.101.2.1      yamt 			mutex_enter(p->p_lock);
   1333       1.52        ad 			while ((sig = issignal(l)) != 0)
   1334       1.52        ad 				postsig(sig);
   1335  1.101.2.1      yamt 			mutex_exit(p->p_lock);
   1336       1.52        ad 		}
   1337       1.52        ad 
   1338       1.52        ad 		/*
   1339       1.52        ad 		 * Core-dump or suspend pending.
   1340       1.52        ad 		 *
   1341       1.52        ad 		 * In case of core dump, suspend ourselves, so that the
   1342       1.52        ad 		 * kernel stack and therefore the userland registers saved
   1343       1.52        ad 		 * in the trapframe are around for coredump() to write them
   1344       1.52        ad 		 * out.  We issue a wakeup on p->p_lwpcv so that sigexit()
   1345       1.52        ad 		 * will write the core file out once all other LWPs are
   1346       1.52        ad 		 * suspended.
   1347       1.52        ad 		 */
   1348       1.56     pavel 		if ((l->l_flag & LW_WSUSPEND) != 0) {
   1349  1.101.2.1      yamt 			mutex_enter(p->p_lock);
   1350       1.52        ad 			p->p_nrlwps--;
   1351       1.52        ad 			cv_broadcast(&p->p_lwpcv);
   1352       1.52        ad 			lwp_lock(l);
   1353       1.52        ad 			l->l_stat = LSSUSPENDED;
   1354  1.101.2.1      yamt 			lwp_unlock(l);
   1355  1.101.2.1      yamt 			mutex_exit(p->p_lock);
   1356  1.101.2.1      yamt 			lwp_lock(l);
   1357       1.64      yamt 			mi_switch(l);
   1358       1.52        ad 		}
   1359       1.52        ad 
   1360       1.52        ad 		/* Process is exiting. */
   1361       1.56     pavel 		if ((l->l_flag & LW_WEXIT) != 0) {
   1362       1.52        ad 			lwp_exit(l);
   1363       1.52        ad 			KASSERT(0);
   1364       1.52        ad 			/* NOTREACHED */
   1365       1.52        ad 		}
   1366       1.54        ad 
   1367       1.54        ad 		/* Call userret hook; used by Linux emulation. */
   1368       1.56     pavel 		if ((l->l_flag & LW_WUSERRET) != 0) {
   1369       1.54        ad 			lwp_lock(l);
   1370       1.56     pavel 			l->l_flag &= ~LW_WUSERRET;
   1371       1.54        ad 			lwp_unlock(l);
   1372       1.54        ad 			hook = p->p_userret;
   1373       1.54        ad 			p->p_userret = NULL;
   1374       1.54        ad 			(*hook)();
   1375       1.54        ad 		}
   1376       1.52        ad 	}
   1377       1.52        ad }
   1378       1.52        ad 
   1379       1.52        ad /*
   1380       1.52        ad  * Force an LWP to enter the kernel, to take a trip through lwp_userret().
   1381       1.52        ad  */
   1382       1.52        ad void
   1383       1.52        ad lwp_need_userret(struct lwp *l)
   1384       1.52        ad {
   1385       1.63        ad 	KASSERT(lwp_locked(l, NULL));
   1386       1.52        ad 
   1387       1.52        ad 	/*
   1388       1.52        ad 	 * Since the tests in lwp_userret() are done unlocked, make sure
   1389       1.52        ad 	 * that the condition will be seen before forcing the LWP to enter
   1390       1.52        ad 	 * kernel mode.
   1391       1.52        ad 	 */
   1392       1.81        ad 	membar_producer();
   1393       1.52        ad 	cpu_signotify(l);
   1394       1.52        ad }
   1395       1.52        ad 
   1396       1.52        ad /*
   1397       1.52        ad  * Add one reference to an LWP.  This will prevent the LWP from
   1398       1.52        ad  * exiting, thus keep the lwp structure and PCB around to inspect.
   1399       1.52        ad  */
   1400       1.52        ad void
   1401       1.52        ad lwp_addref(struct lwp *l)
   1402       1.52        ad {
   1403       1.52        ad 
   1404  1.101.2.1      yamt 	KASSERT(mutex_owned(l->l_proc->p_lock));
   1405       1.52        ad 	KASSERT(l->l_stat != LSZOMB);
   1406       1.52        ad 	KASSERT(l->l_refcnt != 0);
   1407       1.52        ad 
   1408       1.52        ad 	l->l_refcnt++;
   1409       1.52        ad }
   1410       1.52        ad 
   1411       1.52        ad /*
   1412       1.52        ad  * Remove one reference to an LWP.  If this is the last reference,
   1413       1.52        ad  * then we must finalize the LWP's death.
   1414       1.52        ad  */
   1415       1.52        ad void
   1416       1.52        ad lwp_delref(struct lwp *l)
   1417       1.52        ad {
   1418       1.52        ad 	struct proc *p = l->l_proc;
   1419       1.52        ad 
   1420  1.101.2.1      yamt 	mutex_enter(p->p_lock);
   1421       1.72        ad 	KASSERT(l->l_stat != LSZOMB);
   1422       1.72        ad 	KASSERT(l->l_refcnt > 0);
   1423       1.52        ad 	if (--l->l_refcnt == 0)
   1424       1.76        ad 		cv_broadcast(&p->p_lwpcv);
   1425  1.101.2.1      yamt 	mutex_exit(p->p_lock);
   1426       1.52        ad }
   1427       1.52        ad 
   1428       1.52        ad /*
   1429       1.52        ad  * Drain all references to the current LWP.
   1430       1.52        ad  */
   1431       1.52        ad void
   1432       1.52        ad lwp_drainrefs(struct lwp *l)
   1433       1.52        ad {
   1434       1.52        ad 	struct proc *p = l->l_proc;
   1435       1.52        ad 
   1436  1.101.2.1      yamt 	KASSERT(mutex_owned(p->p_lock));
   1437       1.52        ad 	KASSERT(l->l_refcnt != 0);
   1438       1.52        ad 
   1439       1.52        ad 	l->l_refcnt--;
   1440       1.52        ad 	while (l->l_refcnt != 0)
   1441  1.101.2.1      yamt 		cv_wait(&p->p_lwpcv, p->p_lock);
   1442       1.37        ad }
   1443       1.41   thorpej 
   1444       1.41   thorpej /*
   1445       1.41   thorpej  * lwp_specific_key_create --
   1446       1.41   thorpej  *	Create a key for subsystem lwp-specific data.
   1447       1.41   thorpej  */
   1448       1.41   thorpej int
   1449       1.41   thorpej lwp_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor)
   1450       1.41   thorpej {
   1451       1.41   thorpej 
   1452       1.45   thorpej 	return (specificdata_key_create(lwp_specificdata_domain, keyp, dtor));
   1453       1.41   thorpej }
   1454       1.41   thorpej 
   1455       1.41   thorpej /*
   1456       1.41   thorpej  * lwp_specific_key_delete --
   1457       1.41   thorpej  *	Delete a key for subsystem lwp-specific data.
   1458       1.41   thorpej  */
   1459       1.41   thorpej void
   1460       1.41   thorpej lwp_specific_key_delete(specificdata_key_t key)
   1461       1.41   thorpej {
   1462       1.41   thorpej 
   1463       1.41   thorpej 	specificdata_key_delete(lwp_specificdata_domain, key);
   1464       1.41   thorpej }
   1465       1.41   thorpej 
   1466       1.45   thorpej /*
   1467       1.45   thorpej  * lwp_initspecific --
   1468       1.45   thorpej  *	Initialize an LWP's specificdata container.
   1469       1.45   thorpej  */
   1470       1.42  christos void
   1471       1.42  christos lwp_initspecific(struct lwp *l)
   1472       1.42  christos {
   1473       1.42  christos 	int error;
   1474       1.45   thorpej 
   1475       1.42  christos 	error = specificdata_init(lwp_specificdata_domain, &l->l_specdataref);
   1476       1.42  christos 	KASSERT(error == 0);
   1477       1.42  christos }
   1478       1.42  christos 
   1479       1.41   thorpej /*
   1480       1.45   thorpej  * lwp_finispecific --
   1481       1.45   thorpej  *	Finalize an LWP's specificdata container.
   1482       1.45   thorpej  */
   1483       1.45   thorpej void
   1484       1.45   thorpej lwp_finispecific(struct lwp *l)
   1485       1.45   thorpej {
   1486       1.45   thorpej 
   1487       1.45   thorpej 	specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
   1488       1.45   thorpej }
   1489       1.45   thorpej 
   1490       1.45   thorpej /*
   1491       1.41   thorpej  * lwp_getspecific --
   1492       1.41   thorpej  *	Return lwp-specific data corresponding to the specified key.
   1493       1.41   thorpej  *
   1494       1.41   thorpej  *	Note: LWP specific data is NOT INTERLOCKED.  An LWP should access
   1495       1.41   thorpej  *	only its OWN SPECIFIC DATA.  If it is necessary to access another
   1496       1.41   thorpej  *	LWP's specifc data, care must be taken to ensure that doing so
   1497       1.41   thorpej  *	would not cause internal data structure inconsistency (i.e. caller
   1498       1.41   thorpej  *	can guarantee that the target LWP is not inside an lwp_getspecific()
   1499       1.41   thorpej  *	or lwp_setspecific() call).
   1500       1.41   thorpej  */
   1501       1.41   thorpej void *
   1502       1.44   thorpej lwp_getspecific(specificdata_key_t key)
   1503       1.41   thorpej {
   1504       1.41   thorpej 
   1505       1.41   thorpej 	return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
   1506       1.44   thorpej 						  &curlwp->l_specdataref, key));
   1507       1.41   thorpej }
   1508       1.41   thorpej 
   1509       1.47   hannken void *
   1510       1.47   hannken _lwp_getspecific_by_lwp(struct lwp *l, specificdata_key_t key)
   1511       1.47   hannken {
   1512       1.47   hannken 
   1513       1.47   hannken 	return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
   1514       1.47   hannken 						  &l->l_specdataref, key));
   1515       1.47   hannken }
   1516       1.47   hannken 
   1517       1.41   thorpej /*
   1518       1.41   thorpej  * lwp_setspecific --
   1519       1.41   thorpej  *	Set lwp-specific data corresponding to the specified key.
   1520       1.41   thorpej  */
   1521       1.41   thorpej void
   1522       1.45   thorpej lwp_setspecific(specificdata_key_t key, void *data)
   1523       1.41   thorpej {
   1524       1.41   thorpej 
   1525       1.41   thorpej 	specificdata_setspecific(lwp_specificdata_domain,
   1526       1.44   thorpej 				 &curlwp->l_specdataref, key, data);
   1527       1.41   thorpej }
   1528       1.78        ad 
   1529       1.78        ad /*
   1530       1.78        ad  * Allocate a new lwpctl structure for a user LWP.
   1531       1.78        ad  */
   1532       1.78        ad int
   1533       1.78        ad lwp_ctl_alloc(vaddr_t *uaddr)
   1534       1.78        ad {
   1535       1.78        ad 	lcproc_t *lp;
   1536       1.78        ad 	u_int bit, i, offset;
   1537       1.78        ad 	struct uvm_object *uao;
   1538       1.78        ad 	int error;
   1539       1.78        ad 	lcpage_t *lcp;
   1540       1.78        ad 	proc_t *p;
   1541       1.78        ad 	lwp_t *l;
   1542       1.78        ad 
   1543       1.78        ad 	l = curlwp;
   1544       1.78        ad 	p = l->l_proc;
   1545       1.78        ad 
   1546       1.81        ad 	if (l->l_lcpage != NULL) {
   1547       1.81        ad 		lcp = l->l_lcpage;
   1548       1.81        ad 		*uaddr = lcp->lcp_uaddr + (vaddr_t)l->l_lwpctl - lcp->lcp_kaddr;
   1549       1.78        ad 		return (EINVAL);
   1550       1.81        ad 	}
   1551       1.78        ad 
   1552       1.78        ad 	/* First time around, allocate header structure for the process. */
   1553       1.78        ad 	if ((lp = p->p_lwpctl) == NULL) {
   1554       1.78        ad 		lp = kmem_alloc(sizeof(*lp), KM_SLEEP);
   1555       1.78        ad 		mutex_init(&lp->lp_lock, MUTEX_DEFAULT, IPL_NONE);
   1556       1.78        ad 		lp->lp_uao = NULL;
   1557       1.78        ad 		TAILQ_INIT(&lp->lp_pages);
   1558  1.101.2.1      yamt 		mutex_enter(p->p_lock);
   1559       1.78        ad 		if (p->p_lwpctl == NULL) {
   1560       1.78        ad 			p->p_lwpctl = lp;
   1561  1.101.2.1      yamt 			mutex_exit(p->p_lock);
   1562       1.78        ad 		} else {
   1563  1.101.2.1      yamt 			mutex_exit(p->p_lock);
   1564       1.78        ad 			mutex_destroy(&lp->lp_lock);
   1565       1.78        ad 			kmem_free(lp, sizeof(*lp));
   1566       1.78        ad 			lp = p->p_lwpctl;
   1567       1.78        ad 		}
   1568       1.78        ad 	}
   1569       1.78        ad 
   1570       1.78        ad  	/*
   1571       1.78        ad  	 * Set up an anonymous memory region to hold the shared pages.
   1572       1.78        ad  	 * Map them into the process' address space.  The user vmspace
   1573       1.78        ad  	 * gets the first reference on the UAO.
   1574       1.78        ad  	 */
   1575       1.78        ad 	mutex_enter(&lp->lp_lock);
   1576       1.78        ad 	if (lp->lp_uao == NULL) {
   1577       1.78        ad 		lp->lp_uao = uao_create(LWPCTL_UAREA_SZ, 0);
   1578       1.78        ad 		lp->lp_cur = 0;
   1579       1.78        ad 		lp->lp_max = LWPCTL_UAREA_SZ;
   1580       1.78        ad 		lp->lp_uva = p->p_emul->e_vm_default_addr(p,
   1581       1.78        ad 		     (vaddr_t)p->p_vmspace->vm_daddr, LWPCTL_UAREA_SZ);
   1582       1.78        ad 		error = uvm_map(&p->p_vmspace->vm_map, &lp->lp_uva,
   1583       1.78        ad 		    LWPCTL_UAREA_SZ, lp->lp_uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW,
   1584       1.78        ad 		    UVM_PROT_RW, UVM_INH_NONE, UVM_ADV_NORMAL, 0));
   1585       1.78        ad 		if (error != 0) {
   1586       1.78        ad 			uao_detach(lp->lp_uao);
   1587       1.78        ad 			lp->lp_uao = NULL;
   1588       1.78        ad 			mutex_exit(&lp->lp_lock);
   1589       1.78        ad 			return error;
   1590       1.78        ad 		}
   1591       1.78        ad 	}
   1592       1.78        ad 
   1593       1.78        ad 	/* Get a free block and allocate for this LWP. */
   1594       1.78        ad 	TAILQ_FOREACH(lcp, &lp->lp_pages, lcp_chain) {
   1595       1.78        ad 		if (lcp->lcp_nfree != 0)
   1596       1.78        ad 			break;
   1597       1.78        ad 	}
   1598       1.78        ad 	if (lcp == NULL) {
   1599       1.78        ad 		/* Nothing available - try to set up a free page. */
   1600       1.78        ad 		if (lp->lp_cur == lp->lp_max) {
   1601       1.78        ad 			mutex_exit(&lp->lp_lock);
   1602       1.78        ad 			return ENOMEM;
   1603       1.78        ad 		}
   1604       1.78        ad 		lcp = kmem_alloc(LWPCTL_LCPAGE_SZ, KM_SLEEP);
   1605       1.79      yamt 		if (lcp == NULL) {
   1606       1.79      yamt 			mutex_exit(&lp->lp_lock);
   1607       1.78        ad 			return ENOMEM;
   1608       1.79      yamt 		}
   1609       1.78        ad 		/*
   1610       1.78        ad 		 * Wire the next page down in kernel space.  Since this
   1611       1.78        ad 		 * is a new mapping, we must add a reference.
   1612       1.78        ad 		 */
   1613       1.78        ad 		uao = lp->lp_uao;
   1614       1.78        ad 		(*uao->pgops->pgo_reference)(uao);
   1615       1.99        ad 		lcp->lcp_kaddr = vm_map_min(kernel_map);
   1616       1.78        ad 		error = uvm_map(kernel_map, &lcp->lcp_kaddr, PAGE_SIZE,
   1617       1.78        ad 		    uao, lp->lp_cur, PAGE_SIZE,
   1618       1.78        ad 		    UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
   1619       1.78        ad 		    UVM_INH_NONE, UVM_ADV_RANDOM, 0));
   1620       1.78        ad 		if (error != 0) {
   1621       1.78        ad 			mutex_exit(&lp->lp_lock);
   1622       1.78        ad 			kmem_free(lcp, LWPCTL_LCPAGE_SZ);
   1623       1.78        ad 			(*uao->pgops->pgo_detach)(uao);
   1624       1.78        ad 			return error;
   1625       1.78        ad 		}
   1626       1.89      yamt 		error = uvm_map_pageable(kernel_map, lcp->lcp_kaddr,
   1627       1.89      yamt 		    lcp->lcp_kaddr + PAGE_SIZE, FALSE, 0);
   1628       1.89      yamt 		if (error != 0) {
   1629       1.89      yamt 			mutex_exit(&lp->lp_lock);
   1630       1.89      yamt 			uvm_unmap(kernel_map, lcp->lcp_kaddr,
   1631       1.89      yamt 			    lcp->lcp_kaddr + PAGE_SIZE);
   1632       1.89      yamt 			kmem_free(lcp, LWPCTL_LCPAGE_SZ);
   1633       1.89      yamt 			return error;
   1634       1.89      yamt 		}
   1635       1.78        ad 		/* Prepare the page descriptor and link into the list. */
   1636       1.78        ad 		lcp->lcp_uaddr = lp->lp_uva + lp->lp_cur;
   1637       1.78        ad 		lp->lp_cur += PAGE_SIZE;
   1638       1.78        ad 		lcp->lcp_nfree = LWPCTL_PER_PAGE;
   1639       1.78        ad 		lcp->lcp_rotor = 0;
   1640       1.78        ad 		memset(lcp->lcp_bitmap, 0xff, LWPCTL_BITMAP_SZ);
   1641       1.78        ad 		TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
   1642       1.78        ad 	}
   1643       1.78        ad 	for (i = lcp->lcp_rotor; lcp->lcp_bitmap[i] == 0;) {
   1644       1.78        ad 		if (++i >= LWPCTL_BITMAP_ENTRIES)
   1645       1.78        ad 			i = 0;
   1646       1.78        ad 	}
   1647       1.78        ad 	bit = ffs(lcp->lcp_bitmap[i]) - 1;
   1648       1.78        ad 	lcp->lcp_bitmap[i] ^= (1 << bit);
   1649       1.78        ad 	lcp->lcp_rotor = i;
   1650       1.78        ad 	lcp->lcp_nfree--;
   1651       1.78        ad 	l->l_lcpage = lcp;
   1652       1.78        ad 	offset = (i << 5) + bit;
   1653       1.78        ad 	l->l_lwpctl = (lwpctl_t *)lcp->lcp_kaddr + offset;
   1654       1.78        ad 	*uaddr = lcp->lcp_uaddr + offset * sizeof(lwpctl_t);
   1655       1.78        ad 	mutex_exit(&lp->lp_lock);
   1656       1.78        ad 
   1657  1.101.2.1      yamt 	KPREEMPT_DISABLE(l);
   1658       1.78        ad 	l->l_lwpctl->lc_curcpu = (short)curcpu()->ci_data.cpu_index;
   1659  1.101.2.1      yamt 	KPREEMPT_ENABLE(l);
   1660       1.78        ad 
   1661       1.78        ad 	return 0;
   1662       1.78        ad }
   1663       1.78        ad 
   1664       1.78        ad /*
   1665       1.78        ad  * Free an lwpctl structure back to the per-process list.
   1666       1.78        ad  */
   1667       1.78        ad void
   1668       1.78        ad lwp_ctl_free(lwp_t *l)
   1669       1.78        ad {
   1670       1.78        ad 	lcproc_t *lp;
   1671       1.78        ad 	lcpage_t *lcp;
   1672       1.78        ad 	u_int map, offset;
   1673       1.78        ad 
   1674       1.78        ad 	lp = l->l_proc->p_lwpctl;
   1675       1.78        ad 	KASSERT(lp != NULL);
   1676       1.78        ad 
   1677       1.78        ad 	lcp = l->l_lcpage;
   1678       1.78        ad 	offset = (u_int)((lwpctl_t *)l->l_lwpctl - (lwpctl_t *)lcp->lcp_kaddr);
   1679       1.78        ad 	KASSERT(offset < LWPCTL_PER_PAGE);
   1680       1.78        ad 
   1681       1.78        ad 	mutex_enter(&lp->lp_lock);
   1682       1.78        ad 	lcp->lcp_nfree++;
   1683       1.78        ad 	map = offset >> 5;
   1684       1.78        ad 	lcp->lcp_bitmap[map] |= (1 << (offset & 31));
   1685       1.78        ad 	if (lcp->lcp_bitmap[lcp->lcp_rotor] == 0)
   1686       1.78        ad 		lcp->lcp_rotor = map;
   1687       1.78        ad 	if (TAILQ_FIRST(&lp->lp_pages)->lcp_nfree == 0) {
   1688       1.78        ad 		TAILQ_REMOVE(&lp->lp_pages, lcp, lcp_chain);
   1689       1.78        ad 		TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
   1690       1.78        ad 	}
   1691       1.78        ad 	mutex_exit(&lp->lp_lock);
   1692       1.78        ad }
   1693       1.78        ad 
   1694       1.78        ad /*
   1695       1.78        ad  * Process is exiting; tear down lwpctl state.  This can only be safely
   1696       1.78        ad  * called by the last LWP in the process.
   1697       1.78        ad  */
   1698       1.78        ad void
   1699       1.78        ad lwp_ctl_exit(void)
   1700       1.78        ad {
   1701       1.78        ad 	lcpage_t *lcp, *next;
   1702       1.78        ad 	lcproc_t *lp;
   1703       1.78        ad 	proc_t *p;
   1704       1.78        ad 	lwp_t *l;
   1705       1.78        ad 
   1706       1.78        ad 	l = curlwp;
   1707       1.78        ad 	l->l_lwpctl = NULL;
   1708       1.95        ad 	l->l_lcpage = NULL;
   1709       1.78        ad 	p = l->l_proc;
   1710       1.78        ad 	lp = p->p_lwpctl;
   1711       1.78        ad 
   1712       1.78        ad 	KASSERT(lp != NULL);
   1713       1.78        ad 	KASSERT(p->p_nlwps == 1);
   1714       1.78        ad 
   1715       1.78        ad 	for (lcp = TAILQ_FIRST(&lp->lp_pages); lcp != NULL; lcp = next) {
   1716       1.78        ad 		next = TAILQ_NEXT(lcp, lcp_chain);
   1717       1.78        ad 		uvm_unmap(kernel_map, lcp->lcp_kaddr,
   1718       1.78        ad 		    lcp->lcp_kaddr + PAGE_SIZE);
   1719       1.78        ad 		kmem_free(lcp, LWPCTL_LCPAGE_SZ);
   1720       1.78        ad 	}
   1721       1.78        ad 
   1722       1.78        ad 	if (lp->lp_uao != NULL) {
   1723       1.78        ad 		uvm_unmap(&p->p_vmspace->vm_map, lp->lp_uva,
   1724       1.78        ad 		    lp->lp_uva + LWPCTL_UAREA_SZ);
   1725       1.78        ad 	}
   1726       1.78        ad 
   1727       1.78        ad 	mutex_destroy(&lp->lp_lock);
   1728       1.78        ad 	kmem_free(lp, sizeof(*lp));
   1729       1.78        ad 	p->p_lwpctl = NULL;
   1730       1.78        ad }
   1731       1.84      yamt 
   1732       1.84      yamt #if defined(DDB)
   1733       1.84      yamt void
   1734       1.84      yamt lwp_whatis(uintptr_t addr, void (*pr)(const char *, ...))
   1735       1.84      yamt {
   1736       1.84      yamt 	lwp_t *l;
   1737       1.84      yamt 
   1738       1.84      yamt 	LIST_FOREACH(l, &alllwp, l_list) {
   1739       1.84      yamt 		uintptr_t stack = (uintptr_t)KSTACK_LOWEST_ADDR(l);
   1740       1.84      yamt 
   1741       1.84      yamt 		if (addr < stack || stack + KSTACK_SIZE <= addr) {
   1742       1.84      yamt 			continue;
   1743       1.84      yamt 		}
   1744       1.84      yamt 		(*pr)("%p is %p+%zu, LWP %p's stack\n",
   1745       1.84      yamt 		    (void *)addr, (void *)stack,
   1746       1.84      yamt 		    (size_t)(addr - stack), l);
   1747       1.84      yamt 	}
   1748       1.84      yamt }
   1749       1.84      yamt #endif /* defined(DDB) */
   1750