Home | History | Annotate | Line # | Download | only in kern
kern_lwp.c revision 1.40.2.16
      1 /*	$NetBSD: kern_lwp.c,v 1.40.2.16 2007/01/30 13:51:40 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nathan J. Williams, and Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * Overview
     41  *
     42  *	Lightweight processes (LWPs) are the basic unit (or thread) of
     43  *	execution within the kernel.  The core state of an LWP is described
     44  *	by "struct lwp".
     45  *
     46  *	Each LWP is contained within a process (described by "struct proc"),
     47  *	Every process contains at least one LWP, but may contain more.  The
     48  *	process describes attributes shared among all of its LWPs such as a
     49  *	private address space, global execution state (stopped, active,
     50  *	zombie, ...), signal disposition and so on.  On a multiprocessor
     51  *	machine, multiple LWPs be executing in kernel simultaneously.
     52  *
     53  *	Note that LWPs differ from kernel threads (kthreads) in that kernel
     54  *	threads are distinct processes (system processes) with no user space
     55  *	component, which themselves may contain one or more LWPs.
     56  *
     57  * Execution states
     58  *
     59  *	At any given time, an LWP has overall state that is described by
     60  *	lwp::l_stat.  The states are broken into two sets below.  The first
     61  *	set is guaranteed to represent the absolute, current state of the
     62  *	LWP:
     63  *
     64  * 	LSONPROC
     65  *
     66  * 		On processor: the LWP is executing on a CPU, either in the
     67  * 		kernel or in user space.
     68  *
     69  * 	LSRUN
     70  *
     71  * 		Runnable: the LWP is parked on a run queue, and may soon be
     72  * 		chosen to run by a idle processor, or by a processor that
     73  * 		has been asked to preempt a currently runnning but lower
     74  * 		priority LWP.  If the LWP is not swapped in (L_INMEM == 0)
     75  *		then the LWP is not on a run queue, but may be soon.
     76  *
     77  * 	LSIDL
     78  *
     79  * 		Idle: the LWP has been created but has not yet executed.
     80  * 		Whoever created the new LWP can be expected to set it to
     81  * 		another state shortly.
     82  *
     83  * 	LSSUSPENDED:
     84  *
     85  * 		Suspended: the LWP has had its execution suspended by
     86  *		another LWP in the same process using the _lwp_suspend()
     87  *		system call.  User-level LWPs also enter the suspended
     88  *		state when the system is shutting down.
     89  *
     90  *	The second set represent a "statement of intent" on behalf of the
     91  *	LWP.  The LWP may in fact be executing on a processor, may be
     92  *	sleeping, idle, or on a run queue. It is expected to take the
     93  *	necessary action to stop executing or become "running" again within
     94  *	a short timeframe.
     95  *
     96  * 	LSZOMB:
     97  *
     98  * 		Dead: the LWP has released most of its resources and is
     99  * 		about to switch away into oblivion.  When it switches away,
    100  * 		its few remaining resources will be collected.
    101  *
    102  * 	LSSLEEP:
    103  *
    104  * 		Sleeping: the LWP has entered itself onto a sleep queue, and
    105  * 		will switch away shortly to allow other LWPs to run on the
    106  * 		CPU.
    107  *
    108  * 	LSSTOP:
    109  *
    110  * 		Stopped: the LWP has been stopped as a result of a job
    111  * 		control signal, or as a result of the ptrace() interface.
    112  * 		Stopped LWPs may run briefly within the kernel to handle
    113  * 		signals that they receive, but will not return to user space
    114  * 		until their process' state is changed away from stopped.
    115  * 		Single LWPs within a process can not be set stopped
    116  * 		selectively: all actions that can stop or continue LWPs
    117  * 		occur at the process level.
    118  *
    119  * State transitions
    120  *
    121  *	Note that the LSSTOP and LSSUSPENDED states may only be set
    122  *	when returning to user space in userret(), or when sleeping
    123  *	interruptably.  Before setting those states, we try to ensure
    124  *	that the LWPs will release all kernel locks that they hold,
    125  *	and at a minimum try to ensure that the LWP can be set runnable
    126  *	again by a signal.
    127  *
    128  *	LWPs may transition states in the following ways:
    129  *
    130  *	 RUN -------> ONPROC		ONPROC -----> RUN
    131  *	            > STOPPED			    > SLEEP
    132  *	            > SUSPENDED			    > STOPPED
    133  *						    > SUSPENDED
    134  *						    > ZOMB
    135  *
    136  *	 STOPPED ---> RUN		SUSPENDED --> RUN
    137  *	            > SLEEP			    > SLEEP
    138  *
    139  *	 SLEEP -----> ONPROC		IDL --------> RUN
    140  *		    > RUN		            > SUSPENDED
    141  *		    > STOPPED                       > STOPPED
    142  *		    > SUSPENDED
    143  *
    144  * Locking
    145  *
    146  *	The majority of fields in 'struct lwp' are covered by a single,
    147  *	general spin mutex pointed to by lwp::l_mutex.  The locks covering
    148  *	each field are documented in sys/lwp.h.
    149  *
    150  *	State transitions must be made with the LWP's general lock held.  In
    151  *	a multiprocessor kernel, state transitions may cause the LWP's lock
    152  *	pointer to change.  On uniprocessor kernels, most scheduler and
    153  *	synchronisation objects such as sleep queues and LWPs are protected
    154  *	by only one mutex (sched_mutex).  In this case, LWPs' lock pointers
    155  *	will never change and will always reference sched_mutex.
    156  *
    157  *	Manipulation of the general lock is not performed directly, but
    158  *	through calls to lwp_lock(), lwp_relock() and similar.
    159  *
    160  *	States and their associated locks:
    161  *
    162  *	LSIDL, LSZOMB
    163  *
    164  *		Always covered by sched_mutex.
    165  *
    166  *	LSONPROC, LSRUN:
    167  *
    168  *		Always covered by sched_mutex, which protects the run queues
    169  *		and other miscellaneous items.  If the scheduler is changed
    170  *		to use per-CPU run queues, this may become a per-CPU mutex.
    171  *
    172  *	LSSLEEP:
    173  *
    174  *		Covered by a mutex associated with the sleep queue that the
    175  *		LWP resides on, indirectly referenced by l_sleepq->sq_mutex.
    176  *
    177  *	LSSTOP, LSSUSPENDED:
    178  *
    179  *		If the LWP was previously sleeping (l_wchan != NULL), then
    180  *		l_mutex references the sleep queue mutex.  If the LWP was
    181  *		runnable or on the CPU when halted, or has been removed from
    182  *		the sleep queue since halted, then the mutex is sched_mutex.
    183  *
    184  *	The lock order is as follows:
    185  *
    186  *		sleepq_t::sq_mutex -> sched_mutex
    187  *
    188  *	Each process has an scheduler state mutex (proc::p_smutex), and a
    189  *	number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
    190  *	so on.  When an LWP is to be entered into or removed from one of the
    191  *	following states, p_mutex must be held and the process wide counters
    192  *	adjusted:
    193  *
    194  *		LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
    195  *
    196  *	Note that an LWP is considered running or likely to run soon if in
    197  *	one of the following states.  This affects the value of p_nrlwps:
    198  *
    199  *		LSRUN, LSONPROC, LSSLEEP
    200  *
    201  *	p_smutex does not need to be held when transitioning among these
    202  *	three states.
    203  */
    204 
    205 #include <sys/cdefs.h>
    206 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.40.2.16 2007/01/30 13:51:40 ad Exp $");
    207 
    208 #include "opt_multiprocessor.h"
    209 #include "opt_lockdebug.h"
    210 
    211 #define _LWP_API_PRIVATE
    212 
    213 #include <sys/param.h>
    214 #include <sys/systm.h>
    215 #include <sys/pool.h>
    216 #include <sys/proc.h>
    217 #include <sys/syscallargs.h>
    218 #include <sys/kauth.h>
    219 #include <sys/sleepq.h>
    220 #include <sys/lockdebug.h>
    221 #include <sys/kmem.h>
    222 
    223 #include <uvm/uvm_extern.h>
    224 
    225 struct lwplist	alllwp;
    226 
    227 POOL_INIT(lwp_pool, sizeof(struct lwp), 32, 0, 0, "lwppl",
    228     &pool_allocator_nointr);
    229 POOL_INIT(lwp_uc_pool, sizeof(ucontext_t), 0, 0, 0, "lwpucpl",
    230     &pool_allocator_nointr);
    231 
    232 static specificdata_domain_t lwp_specificdata_domain;
    233 
    234 #define LWP_DEBUG
    235 
    236 #ifdef LWP_DEBUG
    237 int lwp_debug = 0;
    238 #define DPRINTF(x) if (lwp_debug) printf x
    239 #else
    240 #define DPRINTF(x)
    241 #endif
    242 
    243 void
    244 lwpinit(void)
    245 {
    246 
    247 	lwp_specificdata_domain = specificdata_domain_create();
    248 	KASSERT(lwp_specificdata_domain != NULL);
    249 	lwp_sys_init();
    250 }
    251 
    252 /*
    253  * Set an suspended.
    254  *
    255  * Must be called with p_smutex held, and the LWP locked.  Will unlock the
    256  * LWP before return.
    257  */
    258 int
    259 lwp_suspend(struct lwp *curl, struct lwp *t)
    260 {
    261 	int error;
    262 
    263 	LOCK_ASSERT(mutex_owned(&t->l_proc->p_smutex));
    264 	LOCK_ASSERT(lwp_locked(t, NULL));
    265 
    266 	KASSERT(curl != t || curl->l_stat == LSONPROC);
    267 
    268 	/*
    269 	 * If the current LWP has been told to exit, we must not suspend anyone
    270 	 * else or deadlock could occur.  We won't return to userspace.
    271 	 */
    272 	if ((curl->l_stat & (L_WEXIT | L_WCORE)) != 0) {
    273 		lwp_unlock(t);
    274 		return (EDEADLK);
    275 	}
    276 
    277 	error = 0;
    278 
    279 	switch (t->l_stat) {
    280 	case LSRUN:
    281 	case LSONPROC:
    282 		t->l_flag |= L_WSUSPEND;
    283 		lwp_need_userret(t);
    284 		lwp_unlock(t);
    285 		break;
    286 
    287 	case LSSLEEP:
    288 		t->l_flag |= L_WSUSPEND;
    289 
    290 		/*
    291 		 * Kick the LWP and try to get it to the kernel boundary
    292 		 * so that it will release any locks that it holds.
    293 		 * setrunnable() will release the lock.
    294 		 */
    295 		if ((t->l_flag & L_SINTR) != 0)
    296 			setrunnable(t);
    297 		else
    298 			lwp_unlock(t);
    299 		break;
    300 
    301 	case LSSUSPENDED:
    302 		lwp_unlock(t);
    303 		break;
    304 
    305 	case LSSTOP:
    306 		t->l_flag |= L_WSUSPEND;
    307 		setrunnable(t);
    308 		break;
    309 
    310 	case LSIDL:
    311 	case LSZOMB:
    312 		error = EINTR; /* It's what Solaris does..... */
    313 		lwp_unlock(t);
    314 		break;
    315 	}
    316 
    317 	/*
    318 	 * XXXLWP Wait for:
    319 	 *
    320 	 * o process exiting
    321 	 * o target LWP suspended
    322 	 * o target LWP not suspended and L_WSUSPEND clear
    323 	 * o target LWP exited
    324 	 */
    325 
    326 	 return (error);
    327 }
    328 
    329 /*
    330  * Restart a suspended LWP.
    331  *
    332  * Must be called with p_smutex held, and the LWP locked.  Will unlock the
    333  * LWP before return.
    334  */
    335 void
    336 lwp_continue(struct lwp *l)
    337 {
    338 
    339 	LOCK_ASSERT(mutex_owned(&l->l_proc->p_smutex));
    340 	LOCK_ASSERT(lwp_locked(l, NULL));
    341 
    342 	DPRINTF(("lwp_continue of %d.%d (%s), state %d, wchan %p\n",
    343 	    l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm, l->l_stat,
    344 	    l->l_wchan));
    345 
    346 	/* If rebooting or not suspended, then just bail out. */
    347 	if ((l->l_flag & L_WREBOOT) != 0) {
    348 		lwp_unlock(l);
    349 		return;
    350 	}
    351 
    352 	l->l_flag &= ~L_WSUSPEND;
    353 
    354 	if (l->l_stat != LSSUSPENDED) {
    355 		lwp_unlock(l);
    356 		return;
    357 	}
    358 
    359 	/* setrunnable() will release the lock. */
    360 	setrunnable(l);
    361 }
    362 
    363 /*
    364  * Wait for an LWP within the current process to exit.  If 'lid' is
    365  * non-zero, we are waiting for a specific LWP.
    366  *
    367  * Must be called with p->p_smutex held.
    368  */
    369 int
    370 lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
    371 {
    372 	struct proc *p = l->l_proc;
    373 	struct lwp *l2;
    374 	int nfound, error;
    375 
    376 	DPRINTF(("lwp_wait1: %d.%d waiting for %d.\n",
    377 	    p->p_pid, l->l_lid, lid));
    378 
    379 	LOCK_ASSERT(mutex_owned(&p->p_smutex));
    380 
    381 	/*
    382 	 * We try to check for deadlock:
    383 	 *
    384 	 * 1) If all other LWPs are waiting for exits or suspended.
    385 	 * 2) If we are trying to wait on ourself.
    386 	 *
    387 	 * XXX we'd like to check for a cycle of waiting LWPs (specific LID
    388 	 * waits, not any-LWP waits) and detect that sort of deadlock, but
    389 	 * we don't have a good place to store the lwp that is being waited
    390 	 * for. wchan is already filled with &p->p_nlwps, and putting the
    391 	 * lwp address in there for deadlock tracing would require exiting
    392 	 * LWPs to call wakeup on both their own address and &p->p_nlwps, to
    393 	 * get threads sleeping on any LWP exiting.
    394 	 */
    395 	if (lid == l->l_lid)
    396 		return EDEADLK;
    397 
    398 	p->p_nlwpwait++;
    399 
    400 	for (;;) {
    401 		/*
    402 		 * Avoid a race between exit1() and sigexit(): if the
    403 		 * process is dumping core, then we need to bail out: call
    404 		 * into lwp_userret() where we will be suspended until the
    405 		 * deed is done.
    406 		 */
    407 		if ((p->p_sflag & PS_WCORE) != 0) {
    408 			mutex_exit(&p->p_smutex);
    409 			lwp_userret(l);
    410 #ifdef DIAGNOSTIC
    411 			panic("lwp_wait1");
    412 #endif
    413 			/* NOTREACHED */
    414 		}
    415 
    416 		/*
    417 		 * First off, drain any detached LWP that is waiting to be
    418 		 * reaped.
    419 		 */
    420 		while ((l2 = p->p_zomblwp) != NULL) {
    421 			p->p_zomblwp = NULL;
    422 			lwp_free(l2, 0, 0);	/* releases proc mutex */
    423 			mutex_enter(&p->p_smutex);
    424 		}
    425 
    426 		/*
    427 		 * Now look for an LWP to collect.  If the whole process is
    428 		 * exiting, count detached LWPs as eligible to be collected,
    429 		 * but don't drain them here.
    430 		 */
    431 		nfound = 0;
    432 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
    433 			if (l2 == l || (lid != 0 && l2->l_lid != lid))
    434 				continue;
    435 			if ((l2->l_prflag & LPR_DETACHED) != 0) {
    436 				nfound += ((flags & LWPWAIT_EXITCONTROL) != 0);
    437 				continue;
    438 			}
    439 			nfound++;
    440 
    441 			/* No need to lock the LWP in order to see LSZOMB. */
    442 			if (l2->l_stat != LSZOMB)
    443 				continue;
    444 
    445 			if (departed)
    446 				*departed = l2->l_lid;
    447 			lwp_free(l2, 0, 0);
    448 			mutex_enter(&p->p_smutex);
    449 			p->p_nlwpwait--;
    450 			return 0;
    451 		}
    452 
    453 		if (nfound == 0) {
    454 			error = ESRCH;
    455 			break;
    456 		}
    457 		if ((flags & LWPWAIT_EXITCONTROL) != 0) {
    458 			KASSERT(p->p_nlwps > 1);
    459 			cv_wait(&p->p_lwpcv, &p->p_smutex);
    460 			continue;
    461 		}
    462 		if ((p->p_sflag & PS_WEXIT) != 0 ||
    463 		    p->p_nrlwps <= p->p_nlwpwait + p->p_ndlwps) {
    464 			error = EDEADLK;
    465 			break;
    466 		}
    467 		if ((error = cv_wait_sig(&p->p_lwpcv, &p->p_smutex)) != 0)
    468 			break;
    469 	}
    470 
    471 	p->p_nlwpwait--;
    472 	return error;
    473 }
    474 
    475 /*
    476  * Create a new LWP within process 'p2', using LWP 'l1' as a template.
    477  * The new LWP is created in state LSIDL and must be set running,
    478  * suspended, or stopped by the caller.
    479  */
    480 int
    481 newlwp(struct lwp *l1, struct proc *p2, vaddr_t uaddr, boolean_t inmem,
    482     int flags, void *stack, size_t stacksize,
    483     void (*func)(void *), void *arg, struct lwp **rnewlwpp)
    484 {
    485 	struct lwp *l2, *isfree;
    486 	turnstile_t *ts;
    487 
    488 	/*
    489 	 * First off, reap any detached LWP waiting to be collected.
    490 	 * We can re-use its LWP structure and turnstile.
    491 	 */
    492 	isfree = NULL;
    493 	if (p2->p_zomblwp != NULL) {
    494 		mutex_enter(&p2->p_smutex);
    495 		if ((isfree = p2->p_zomblwp) != NULL) {
    496 			p2->p_zomblwp = NULL;
    497 			lwp_free(isfree, 1, 0);	/* releases proc mutex */
    498 		} else
    499 			mutex_exit(&p2->p_smutex);
    500 	}
    501 	if (isfree == NULL) {
    502 		l2 = pool_get(&lwp_pool, PR_WAITOK);
    503 		memset(l2, 0, sizeof(*l2));
    504 		l2->l_ts = pool_cache_get(&turnstile_cache, PR_WAITOK);
    505 	} else {
    506 		l2 = isfree;
    507 		ts = l2->l_ts;
    508 		memset(l2, 0, sizeof(*l2));
    509 		l2->l_ts = ts;
    510 	}
    511 
    512 	l2->l_stat = LSIDL;
    513 	l2->l_proc = p2;
    514 	l2->l_refcnt = 1;
    515 	l2->l_priority = l1->l_priority;
    516 	l2->l_usrpri = l1->l_usrpri;
    517 	l2->l_mutex = &sched_mutex;
    518 	l2->l_cpu = l1->l_cpu;
    519 	l2->l_flag = inmem ? L_INMEM : 0;
    520 	lwp_initspecific(l2);
    521 
    522 	if (p2->p_flag & P_SYSTEM) {
    523 		/*
    524 		 * Mark it as a system process and not a candidate for
    525 		 * swapping.
    526 		 */
    527 		l2->l_flag |= L_SYSTEM;
    528 	}
    529 
    530 	lwp_update_creds(l2);
    531 	callout_init(&l2->l_tsleep_ch);
    532 	cv_init(&l2->l_sigcv, "sigwait");
    533 	l2->l_syncobj = &sched_syncobj;
    534 
    535 	if (rnewlwpp != NULL)
    536 		*rnewlwpp = l2;
    537 
    538 	l2->l_addr = UAREA_TO_USER(uaddr);
    539 	uvm_lwp_fork(l1, l2, stack, stacksize, func,
    540 	    (arg != NULL) ? arg : l2);
    541 
    542 	mutex_enter(&p2->p_smutex);
    543 
    544 	if ((flags & LWP_DETACHED) != 0) {
    545 		l2->l_prflag = LPR_DETACHED;
    546 		p2->p_ndlwps++;
    547 	} else
    548 		l2->l_prflag = 0;
    549 
    550 	l2->l_sigmask = l1->l_sigmask;
    551 	CIRCLEQ_INIT(&l2->l_sigpend.sp_info);
    552 	sigemptyset(&l2->l_sigpend.sp_set);
    553 
    554 	l2->l_lid = ++p2->p_nlwpid;
    555 	LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
    556 	p2->p_nlwps++;
    557 
    558 	mutex_exit(&p2->p_smutex);
    559 
    560 	mutex_enter(&proclist_mutex);
    561 	LIST_INSERT_HEAD(&alllwp, l2, l_list);
    562 	mutex_exit(&proclist_mutex);
    563 
    564 	if (p2->p_emul->e_lwp_fork)
    565 		(*p2->p_emul->e_lwp_fork)(l1, l2);
    566 
    567 	return (0);
    568 }
    569 
    570 /*
    571  * Quit the process.  This will call cpu_exit, which will call cpu_switch,
    572  * so this can only be used meaningfully if you're willing to switch away.
    573  * Calling with l!=curlwp would be weird.
    574  */
    575 void
    576 lwp_exit(struct lwp *l)
    577 {
    578 	struct proc *p = l->l_proc;
    579 	struct lwp *l2;
    580 
    581 	DPRINTF(("lwp_exit: %d.%d exiting.\n", p->p_pid, l->l_lid));
    582 	DPRINTF((" nlwps: %d nzlwps: %d\n", p->p_nlwps, p->p_nzlwps));
    583 
    584 	/*
    585 	 * Verify that we hold no locks other than the kernel lock.
    586 	 */
    587 #ifdef MULTIPROCESSOR
    588 	LOCKDEBUG_BARRIER(&kernel_lock, 0);
    589 #else
    590 	LOCKDEBUG_BARRIER(NULL, 0);
    591 #endif
    592 
    593 	/*
    594 	 * If we are the last live LWP in a process, we need to exit the
    595 	 * entire process.  We do so with an exit status of zero, because
    596 	 * it's a "controlled" exit, and because that's what Solaris does.
    597 	 *
    598 	 * We are not quite a zombie yet, but for accounting purposes we
    599 	 * must increment the count of zombies here.
    600 	 *
    601 	 * Note: the last LWP's specificdata will be deleted here.
    602 	 */
    603 	mutex_enter(&p->p_smutex);
    604 	if (p->p_nlwps - p->p_nzlwps == 1) {
    605 		DPRINTF(("lwp_exit: %d.%d calling exit1()\n",
    606 		    p->p_pid, l->l_lid));
    607 		exit1(l, 0);
    608 		/* NOTREACHED */
    609 	}
    610 	p->p_nzlwps++;
    611 	mutex_exit(&p->p_smutex);
    612 
    613 	if (p->p_emul->e_lwp_exit)
    614 		(*p->p_emul->e_lwp_exit)(l);
    615 
    616 	/* Delete the specificdata while it's still safe to sleep. */
    617 	specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
    618 
    619 	/*
    620 	 * Release our cached credentials.
    621 	 */
    622 	kauth_cred_free(l->l_cred);
    623 
    624 	/*
    625 	 * Remove the LWP from the global list.
    626 	 */
    627 	mutex_enter(&proclist_mutex);
    628 	LIST_REMOVE(l, l_list);
    629 	mutex_exit(&proclist_mutex);
    630 
    631 	/*
    632 	 * Get rid of all references to the LWP that others (e.g. procfs)
    633 	 * may have, and mark the LWP as a zombie.  If the LWP is detached,
    634 	 * mark it waiting for collection in the proc structure.  Note that
    635 	 * before we can do that, we need to free any other dead, deatched
    636 	 * LWP waiting to meet its maker.
    637 	 *
    638 	 * XXXSMP disable preemption.
    639 	 */
    640 	mutex_enter(&p->p_smutex);
    641 	lwp_drainrefs(l);
    642 
    643 	if ((l->l_prflag & LPR_DETACHED) != 0) {
    644 		while ((l2 = p->p_zomblwp) != NULL) {
    645 			p->p_zomblwp = NULL;
    646 			lwp_free(l2, 0, 0);	/* releases proc mutex */
    647 			mutex_enter(&p->p_smutex);
    648 		}
    649 		p->p_zomblwp = l;
    650 	}
    651 
    652 	/*
    653 	 * Clear any private, pending signals.  If we find a pending signal
    654 	 * for the process and we have been asked to check for signals, then
    655 	 * we loose badly: arrange to have all other LWPs in the process check
    656 	 * for signals.
    657 	 */
    658 	sigclear(&l->l_sigpend, NULL);
    659 	if ((l->l_flag & L_PENDSIG) != 0 &&
    660 	    firstsig(&p->p_sigpend.sp_set) != 0)
    661 		LIST_FOREACH(l2, &p->p_lwps, l_sibling)
    662 			l2->l_flag |= L_PENDSIG;
    663 
    664 	lwp_lock(l);
    665 	l->l_stat = LSZOMB;
    666 	lwp_unlock(l);
    667 	p->p_nrlwps--;
    668 	mutex_exit(&p->p_smutex);
    669 
    670 	/*
    671 	 * We can no longer block.  At this point, lwp_free() may already
    672 	 * be gunning for us.  On a multi-CPU system, we may be off p_lwps.
    673 	 *
    674 	 * Free MD LWP resources.
    675 	 */
    676 #ifndef __NO_CPU_LWP_FREE
    677 	cpu_lwp_free(l, 0);
    678 #endif
    679 	pmap_deactivate(l);
    680 
    681 	/*
    682 	 * Release the kernel lock, signal another LWP to collect us,
    683 	 * and switch away into oblivion.
    684 	 */
    685 #ifdef notyet
    686 	/* XXXSMP hold in lwp_userret() */
    687 	KERNEL_UNLOCK_LAST(l);
    688 #else
    689 	KERNEL_UNLOCK_ALL(l, NULL);
    690 #endif
    691 
    692 	cv_broadcast(&p->p_lwpcv);
    693 	cpu_exit(l);
    694 }
    695 
    696 /*
    697  * We are called from cpu_exit() once it is safe to schedule the dead LWP's
    698  * resources to be freed (i.e., once we've switched to the idle PCB for the
    699  * current CPU).
    700  */
    701 void
    702 lwp_exit2(struct lwp *l)
    703 {
    704 	/* XXXSMP re-enable preemption */
    705 }
    706 
    707 /*
    708  * Free a dead LWP's remaining resources.
    709  *
    710  * XXXLWP limits.
    711  */
    712 void
    713 lwp_free(struct lwp *l, int recycle, int last)
    714 {
    715 	struct proc *p = l->l_proc;
    716 
    717 	/*
    718 	 * If this was not the last LWP in the process, then adjust
    719 	 * counters and unlock.
    720 	 */
    721 	if (!last) {
    722 		/*
    723 		 * Add the LWP's run time to the process' base value.
    724 		 * This needs to co-incide with coming off p_lwps.
    725 		 */
    726 		timeradd(&l->l_rtime, &p->p_rtime, &p->p_rtime);
    727 
    728 		LIST_REMOVE(l, l_sibling);
    729 		p->p_nlwps--;
    730 		p->p_nzlwps--;
    731 		if ((l->l_prflag & LPR_DETACHED) != 0)
    732 			p->p_ndlwps--;
    733 		mutex_exit(&p->p_smutex);
    734 
    735 #ifdef MULTIPROCESSOR
    736 		/*
    737 		 * In the unlikely event that the LWP is still on the CPU,
    738 		 * then spin until it has switched away.  We need to release
    739 		 * all locks to avoid deadlock against interrupt handlers on
    740 		 * the target CPU.
    741 		 */
    742 		if (l->l_cpu->ci_curlwp == l) {
    743 			int count;
    744 			KERNEL_UNLOCK_ALL(curlwp, &count);
    745 			while (l->l_cpu->ci_curlwp == l)
    746 				SPINLOCK_BACKOFF_HOOK;
    747 			KERNEL_LOCK(count, curlwp);
    748 		}
    749 #endif
    750 	}
    751 
    752 	/*
    753 	 * Free the LWP's turnstile and the LWP structure itself unless the
    754 	 * caller wants to recycle them.
    755 	 *
    756 	 * We can't return turnstile0 to the pool (it didn't come from it),
    757 	 * so if it comes up just drop it quietly and move on.
    758 	 *
    759 	 * We don't recycle the VM resources at this time.
    760 	 */
    761 	if (!recycle && l->l_ts != &turnstile0)
    762 		pool_cache_put(&turnstile_cache, l->l_ts);
    763 #ifndef __NO_CPU_LWP_FREE
    764 	cpu_lwp_free2(l);
    765 #endif
    766 	uvm_lwp_exit(l);
    767 	cv_destroy(&l->l_sigcv);
    768 	if (!recycle)
    769 		pool_put(&lwp_pool, l);
    770 }
    771 
    772 /*
    773  * Pick a LWP to represent the process for those operations which
    774  * want information about a "process" that is actually associated
    775  * with a LWP.
    776  *
    777  * If 'locking' is false, no locking or lock checks are performed.
    778  * This is intended for use by DDB.
    779  *
    780  * We don't bother locking the LWP here, since code that uses this
    781  * interface is broken by design and an exact match is not required.
    782  */
    783 struct lwp *
    784 proc_representative_lwp(struct proc *p, int *nrlwps, int locking)
    785 {
    786 	struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
    787 	struct lwp *signalled;
    788 	int cnt;
    789 
    790 	if (locking) {
    791 		LOCK_ASSERT(mutex_owned(&p->p_smutex));
    792 	}
    793 
    794 	/* Trivial case: only one LWP */
    795 	if (p->p_nlwps == 1) {
    796 		l = LIST_FIRST(&p->p_lwps);
    797 		if (nrlwps)
    798 			*nrlwps = (l->l_stat == LSONPROC || LSRUN);
    799 		return l;
    800 	}
    801 
    802 	cnt = 0;
    803 	switch (p->p_stat) {
    804 	case SSTOP:
    805 	case SACTIVE:
    806 		/* Pick the most live LWP */
    807 		onproc = running = sleeping = stopped = suspended = NULL;
    808 		signalled = NULL;
    809 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    810 			if (l->l_lid == p->p_sigctx.ps_lwp)
    811 				signalled = l;
    812 			switch (l->l_stat) {
    813 			case LSONPROC:
    814 				onproc = l;
    815 				cnt++;
    816 				break;
    817 			case LSRUN:
    818 				running = l;
    819 				cnt++;
    820 				break;
    821 			case LSSLEEP:
    822 				sleeping = l;
    823 				break;
    824 			case LSSTOP:
    825 				stopped = l;
    826 				break;
    827 			case LSSUSPENDED:
    828 				suspended = l;
    829 				break;
    830 			}
    831 		}
    832 		if (nrlwps)
    833 			*nrlwps = cnt;
    834 		if (signalled)
    835 			l = signalled;
    836 		else if (onproc)
    837 			l = onproc;
    838 		else if (running)
    839 			l = running;
    840 		else if (sleeping)
    841 			l = sleeping;
    842 		else if (stopped)
    843 			l = stopped;
    844 		else if (suspended)
    845 			l = suspended;
    846 		else
    847 			break;
    848 		return l;
    849 		if (nrlwps)
    850 			*nrlwps = 0;
    851 		l = LIST_FIRST(&p->p_lwps);
    852 		return l;
    853 #ifdef DIAGNOSTIC
    854 	case SIDL:
    855 	case SZOMB:
    856 	case SDYING:
    857 	case SDEAD:
    858 		if (locking)
    859 			mutex_exit(&p->p_smutex);
    860 		/* We have more than one LWP and we're in SIDL?
    861 		 * How'd that happen?
    862 		 */
    863 		panic("Too many LWPs in idle/dying process %d (%s) stat = %d",
    864 		    p->p_pid, p->p_comm, p->p_stat);
    865 		break;
    866 	default:
    867 		if (locking)
    868 			mutex_exit(&p->p_smutex);
    869 		panic("Process %d (%s) in unknown state %d",
    870 		    p->p_pid, p->p_comm, p->p_stat);
    871 #endif
    872 	}
    873 
    874 	if (locking)
    875 		mutex_exit(&p->p_smutex);
    876 	panic("proc_representative_lwp: couldn't find a lwp for process"
    877 		" %d (%s)", p->p_pid, p->p_comm);
    878 	/* NOTREACHED */
    879 	return NULL;
    880 }
    881 
    882 /*
    883  * Look up a live LWP within the speicifed process, and return it locked.
    884  *
    885  * Must be called with p->p_smutex held.
    886  */
    887 struct lwp *
    888 lwp_find(struct proc *p, int id)
    889 {
    890 	struct lwp *l;
    891 
    892 	LOCK_ASSERT(mutex_owned(&p->p_smutex));
    893 
    894 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    895 		if (l->l_lid == id)
    896 			break;
    897 	}
    898 
    899 	/*
    900 	 * No need to lock - all of these conditions will
    901 	 * be visible with the process level mutex held.
    902 	 */
    903 	if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB))
    904 		l = NULL;
    905 
    906 	return l;
    907 }
    908 
    909 /*
    910  * Update an LWP's cached credentials to mirror the process' master copy.
    911  *
    912  * This happens early in the syscall path, on user trap, and on LWP
    913  * creation.  A long-running LWP can also voluntarily choose to update
    914  * it's credentials by calling this routine.  This may be called from
    915  * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
    916  */
    917 void
    918 lwp_update_creds(struct lwp *l)
    919 {
    920 	kauth_cred_t oc;
    921 	struct proc *p;
    922 
    923 	p = l->l_proc;
    924 	oc = l->l_cred;
    925 
    926 	mutex_enter(&p->p_mutex);
    927 	kauth_cred_hold(p->p_cred);
    928 	l->l_cred = p->p_cred;
    929 	mutex_exit(&p->p_mutex);
    930 	if (oc != NULL) {
    931 		KERNEL_LOCK(1, l);	/* XXXSMP */
    932 		kauth_cred_free(oc);
    933 		KERNEL_UNLOCK_ONE(l);	/* XXXSMP */
    934 	}
    935 }
    936 
    937 /*
    938  * Verify that an LWP is locked, and optionally verify that the lock matches
    939  * one we specify.
    940  */
    941 int
    942 lwp_locked(struct lwp *l, kmutex_t *mtx)
    943 {
    944 	kmutex_t *cur = l->l_mutex;
    945 
    946 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    947 	return mutex_owned(cur) && (mtx == cur || mtx == NULL);
    948 #else
    949 	return mutex_owned(cur);
    950 #endif
    951 }
    952 
    953 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    954 /*
    955  * Lock an LWP.
    956  */
    957 void
    958 lwp_lock_retry(struct lwp *l, kmutex_t *old)
    959 {
    960 
    961 	/*
    962 	 * XXXgcc ignoring kmutex_t * volatile on i386
    963 	 *
    964 	 * gcc version 4.1.2 20061021 prerelease (NetBSD nb1 20061021)
    965 	 */
    966 #if 1
    967 	while (l->l_mutex != old) {
    968 #else
    969 	for (;;) {
    970 #endif
    971 		mutex_spin_exit(old);
    972 		old = l->l_mutex;
    973 		mutex_spin_enter(old);
    974 
    975 		/*
    976 		 * mutex_enter() will have posted a read barrier.  Re-test
    977 		 * l->l_mutex.  If it has changed, we need to try again.
    978 		 */
    979 #if 1
    980 	}
    981 #else
    982 	} while (__predict_false(l->l_mutex != old));
    983 #endif
    984 }
    985 #endif
    986 
    987 /*
    988  * Lend a new mutex to an LWP.  The old mutex must be held.
    989  */
    990 void
    991 lwp_setlock(struct lwp *l, kmutex_t *new)
    992 {
    993 
    994 	LOCK_ASSERT(mutex_owned(l->l_mutex));
    995 
    996 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    997 	mb_write();
    998 	l->l_mutex = new;
    999 #else
   1000 	(void)new;
   1001 #endif
   1002 }
   1003 
   1004 /*
   1005  * Lend a new mutex to an LWP, and release the old mutex.  The old mutex
   1006  * must be held.
   1007  */
   1008 void
   1009 lwp_unlock_to(struct lwp *l, kmutex_t *new)
   1010 {
   1011 	kmutex_t *old;
   1012 
   1013 	LOCK_ASSERT(mutex_owned(l->l_mutex));
   1014 
   1015 	old = l->l_mutex;
   1016 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
   1017 	mb_write();
   1018 	l->l_mutex = new;
   1019 #else
   1020 	(void)new;
   1021 #endif
   1022 	mutex_spin_exit(old);
   1023 }
   1024 
   1025 /*
   1026  * Acquire a new mutex, and donate it to an LWP.  The LWP must already be
   1027  * locked.
   1028  */
   1029 void
   1030 lwp_relock(struct lwp *l, kmutex_t *new)
   1031 {
   1032 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
   1033 	kmutex_t *old;
   1034 #endif
   1035 
   1036 	LOCK_ASSERT(mutex_owned(l->l_mutex));
   1037 
   1038 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
   1039 	old = l->l_mutex;
   1040 	if (old != new) {
   1041 		mutex_spin_enter(new);
   1042 		l->l_mutex = new;
   1043 		mutex_spin_exit(old);
   1044 	}
   1045 #else
   1046 	(void)new;
   1047 #endif
   1048 }
   1049 
   1050 /*
   1051  * Handle exceptions for mi_userret().  Called if a member of L_USERRET is
   1052  * set.
   1053  */
   1054 void
   1055 lwp_userret(struct lwp *l)
   1056 {
   1057 	struct proc *p;
   1058 	int sig;
   1059 
   1060 	p = l->l_proc;
   1061 
   1062 	/*
   1063 	 * It should be safe to do this read unlocked on a multiprocessor
   1064 	 * system..
   1065 	 */
   1066 	while ((l->l_flag & L_USERRET) != 0) {
   1067 		/*
   1068 		 * Process pending signals first, unless the process
   1069 		 * is dumping core, where we will instead enter the
   1070 		 * L_WSUSPEND case below.
   1071 		 */
   1072 		if ((l->l_flag & (L_PENDSIG | L_WCORE)) == L_PENDSIG) {
   1073 			KERNEL_LOCK(1, l);	/* XXXSMP pool_put() below */
   1074 			mutex_enter(&p->p_smutex);
   1075 			while ((sig = issignal(l)) != 0)
   1076 				postsig(sig);
   1077 			mutex_exit(&p->p_smutex);
   1078 			KERNEL_UNLOCK_LAST(l);	/* XXXSMP */
   1079 		}
   1080 
   1081 		/*
   1082 		 * Core-dump or suspend pending.
   1083 		 *
   1084 		 * In case of core dump, suspend ourselves, so that the
   1085 		 * kernel stack and therefore the userland registers saved
   1086 		 * in the trapframe are around for coredump() to write them
   1087 		 * out.  We issue a wakeup on p->p_lwpcv so that sigexit()
   1088 		 * will write the core file out once all other LWPs are
   1089 		 * suspended.
   1090 		 */
   1091 		if ((l->l_flag & L_WSUSPEND) != 0) {
   1092 			mutex_enter(&p->p_smutex);
   1093 			p->p_nrlwps--;
   1094 			cv_broadcast(&p->p_lwpcv);
   1095 			lwp_lock(l);
   1096 			l->l_stat = LSSUSPENDED;
   1097 			mutex_exit(&p->p_smutex);
   1098 			mi_switch(l, NULL);
   1099 		}
   1100 
   1101 		/* Process is exiting. */
   1102 		if ((l->l_flag & L_WEXIT) != 0) {
   1103 			KERNEL_LOCK(1, l);
   1104 			lwp_exit(l);
   1105 			KASSERT(0);
   1106 			/* NOTREACHED */
   1107 		}
   1108 	}
   1109 }
   1110 
   1111 /*
   1112  * Force an LWP to enter the kernel, to take a trip through lwp_userret().
   1113  */
   1114 void
   1115 lwp_need_userret(struct lwp *l)
   1116 {
   1117 	LOCK_ASSERT(lwp_locked(l, NULL));
   1118 
   1119 	/*
   1120 	 * Since the tests in lwp_userret() are done unlocked, make sure
   1121 	 * that the condition will be seen before forcing the LWP to enter
   1122 	 * kernel mode.
   1123 	 */
   1124 	mb_write();
   1125 
   1126 	if (l->l_priority > PUSER)
   1127 		lwp_changepri(l, PUSER);
   1128 	cpu_signotify(l);
   1129 }
   1130 
   1131 /*
   1132  * Add one reference to an LWP.  This will prevent the LWP from
   1133  * exiting, thus keep the lwp structure and PCB around to inspect.
   1134  */
   1135 void
   1136 lwp_addref(struct lwp *l)
   1137 {
   1138 
   1139 	LOCK_ASSERT(mutex_owned(&l->l_proc->p_smutex));
   1140 	KASSERT(l->l_stat != LSZOMB);
   1141 	KASSERT(l->l_refcnt != 0);
   1142 
   1143 	l->l_refcnt++;
   1144 }
   1145 
   1146 /*
   1147  * Remove one reference to an LWP.  If this is the last reference,
   1148  * then we must finalize the LWP's death.
   1149  */
   1150 void
   1151 lwp_delref(struct lwp *l)
   1152 {
   1153 	struct proc *p = l->l_proc;
   1154 	u_int refcnt;
   1155 
   1156 	mutex_enter(&p->p_smutex);
   1157 	refcnt = --l->l_refcnt;
   1158 	mutex_exit(&p->p_smutex);
   1159 
   1160 	if (refcnt == 0)
   1161 		cv_broadcast(&p->p_refcv);
   1162 }
   1163 
   1164 /*
   1165  * Drain all references to the current LWP.
   1166  */
   1167 void
   1168 lwp_drainrefs(struct lwp *l)
   1169 {
   1170 	struct proc *p = l->l_proc;
   1171 
   1172 	LOCK_ASSERT(mutex_owned(&p->p_smutex));
   1173 	KASSERT(l->l_refcnt != 0);
   1174 
   1175 	l->l_refcnt--;
   1176 	while (l->l_refcnt != 0)
   1177 		cv_wait(&p->p_refcv, &p->p_smutex);
   1178 }
   1179 
   1180 /*
   1181  * lwp_specific_key_create --
   1182  *	Create a key for subsystem lwp-specific data.
   1183  */
   1184 int
   1185 lwp_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor)
   1186 {
   1187 
   1188 	return (specificdata_key_create(lwp_specificdata_domain, keyp, dtor));
   1189 }
   1190 
   1191 /*
   1192  * lwp_specific_key_delete --
   1193  *	Delete a key for subsystem lwp-specific data.
   1194  */
   1195 void
   1196 lwp_specific_key_delete(specificdata_key_t key)
   1197 {
   1198 
   1199 	specificdata_key_delete(lwp_specificdata_domain, key);
   1200 }
   1201 
   1202 /*
   1203  * lwp_initspecific --
   1204  *	Initialize an LWP's specificdata container.
   1205  */
   1206 void
   1207 lwp_initspecific(struct lwp *l)
   1208 {
   1209 	int error;
   1210 
   1211 	error = specificdata_init(lwp_specificdata_domain, &l->l_specdataref);
   1212 	KASSERT(error == 0);
   1213 }
   1214 
   1215 /*
   1216  * lwp_finispecific --
   1217  *	Finalize an LWP's specificdata container.
   1218  */
   1219 void
   1220 lwp_finispecific(struct lwp *l)
   1221 {
   1222 
   1223 	specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
   1224 }
   1225 
   1226 /*
   1227  * lwp_getspecific --
   1228  *	Return lwp-specific data corresponding to the specified key.
   1229  *
   1230  *	Note: LWP specific data is NOT INTERLOCKED.  An LWP should access
   1231  *	only its OWN SPECIFIC DATA.  If it is necessary to access another
   1232  *	LWP's specifc data, care must be taken to ensure that doing so
   1233  *	would not cause internal data structure inconsistency (i.e. caller
   1234  *	can guarantee that the target LWP is not inside an lwp_getspecific()
   1235  *	or lwp_setspecific() call).
   1236  */
   1237 void *
   1238 lwp_getspecific(specificdata_key_t key)
   1239 {
   1240 
   1241 	return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
   1242 						  &curlwp->l_specdataref, key));
   1243 }
   1244 
   1245 void *
   1246 _lwp_getspecific_by_lwp(struct lwp *l, specificdata_key_t key)
   1247 {
   1248 
   1249 	return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
   1250 						  &l->l_specdataref, key));
   1251 }
   1252 
   1253 /*
   1254  * lwp_setspecific --
   1255  *	Set lwp-specific data corresponding to the specified key.
   1256  */
   1257 void
   1258 lwp_setspecific(specificdata_key_t key, void *data)
   1259 {
   1260 
   1261 	specificdata_setspecific(lwp_specificdata_domain,
   1262 				 &curlwp->l_specdataref, key, data);
   1263 }
   1264