Home | History | Annotate | Line # | Download | only in kern
kern_lwp.c revision 1.40.2.20
      1 /*	$NetBSD: kern_lwp.c,v 1.40.2.20 2007/02/05 13:16:49 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nathan J. Williams, and Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * Overview
     41  *
     42  *	Lightweight processes (LWPs) are the basic unit (or thread) of
     43  *	execution within the kernel.  The core state of an LWP is described
     44  *	by "struct lwp".
     45  *
     46  *	Each LWP is contained within a process (described by "struct proc"),
     47  *	Every process contains at least one LWP, but may contain more.  The
     48  *	process describes attributes shared among all of its LWPs such as a
     49  *	private address space, global execution state (stopped, active,
     50  *	zombie, ...), signal disposition and so on.  On a multiprocessor
     51  *	machine, multiple LWPs be executing in kernel simultaneously.
     52  *
     53  *	Note that LWPs differ from kernel threads (kthreads) in that kernel
     54  *	threads are distinct processes (system processes) with no user space
     55  *	component, which themselves may contain one or more LWPs.
     56  *
     57  * Execution states
     58  *
     59  *	At any given time, an LWP has overall state that is described by
     60  *	lwp::l_stat.  The states are broken into two sets below.  The first
     61  *	set is guaranteed to represent the absolute, current state of the
     62  *	LWP:
     63  *
     64  * 	LSONPROC
     65  *
     66  * 		On processor: the LWP is executing on a CPU, either in the
     67  * 		kernel or in user space.
     68  *
     69  * 	LSRUN
     70  *
     71  * 		Runnable: the LWP is parked on a run queue, and may soon be
     72  * 		chosen to run by a idle processor, or by a processor that
     73  * 		has been asked to preempt a currently runnning but lower
     74  * 		priority LWP.  If the LWP is not swapped in (L_INMEM == 0)
     75  *		then the LWP is not on a run queue, but may be soon.
     76  *
     77  * 	LSIDL
     78  *
     79  * 		Idle: the LWP has been created but has not yet executed.
     80  * 		Whoever created the new LWP can be expected to set it to
     81  * 		another state shortly.
     82  *
     83  * 	LSSUSPENDED:
     84  *
     85  * 		Suspended: the LWP has had its execution suspended by
     86  *		another LWP in the same process using the _lwp_suspend()
     87  *		system call.  User-level LWPs also enter the suspended
     88  *		state when the system is shutting down.
     89  *
     90  *	The second set represent a "statement of intent" on behalf of the
     91  *	LWP.  The LWP may in fact be executing on a processor, may be
     92  *	sleeping, idle, or on a run queue. It is expected to take the
     93  *	necessary action to stop executing or become "running" again within
     94  *	a short timeframe.
     95  *
     96  * 	LSZOMB:
     97  *
     98  * 		Dead: the LWP has released most of its resources and is
     99  * 		about to switch away into oblivion.  When it switches away,
    100  * 		its few remaining resources will be collected.
    101  *
    102  * 	LSSLEEP:
    103  *
    104  * 		Sleeping: the LWP has entered itself onto a sleep queue, and
    105  * 		will switch away shortly to allow other LWPs to run on the
    106  * 		CPU.
    107  *
    108  * 	LSSTOP:
    109  *
    110  * 		Stopped: the LWP has been stopped as a result of a job
    111  * 		control signal, or as a result of the ptrace() interface.
    112  * 		Stopped LWPs may run briefly within the kernel to handle
    113  * 		signals that they receive, but will not return to user space
    114  * 		until their process' state is changed away from stopped.
    115  * 		Single LWPs within a process can not be set stopped
    116  * 		selectively: all actions that can stop or continue LWPs
    117  * 		occur at the process level.
    118  *
    119  * State transitions
    120  *
    121  *	Note that the LSSTOP and LSSUSPENDED states may only be set
    122  *	when returning to user space in userret(), or when sleeping
    123  *	interruptably.  Before setting those states, we try to ensure
    124  *	that the LWPs will release all kernel locks that they hold,
    125  *	and at a minimum try to ensure that the LWP can be set runnable
    126  *	again by a signal.
    127  *
    128  *	LWPs may transition states in the following ways:
    129  *
    130  *	 RUN -------> ONPROC		ONPROC -----> RUN
    131  *	            > STOPPED			    > SLEEP
    132  *	            > SUSPENDED			    > STOPPED
    133  *						    > SUSPENDED
    134  *						    > ZOMB
    135  *
    136  *	 STOPPED ---> RUN		SUSPENDED --> RUN
    137  *	            > SLEEP			    > SLEEP
    138  *
    139  *	 SLEEP -----> ONPROC		IDL --------> RUN
    140  *		    > RUN		            > SUSPENDED
    141  *		    > STOPPED                       > STOPPED
    142  *		    > SUSPENDED
    143  *
    144  * Locking
    145  *
    146  *	The majority of fields in 'struct lwp' are covered by a single,
    147  *	general spin mutex pointed to by lwp::l_mutex.  The locks covering
    148  *	each field are documented in sys/lwp.h.
    149  *
    150  *	State transitions must be made with the LWP's general lock held.  In
    151  *	a multiprocessor kernel, state transitions may cause the LWP's lock
    152  *	pointer to change.  On uniprocessor kernels, most scheduler and
    153  *	synchronisation objects such as sleep queues and LWPs are protected
    154  *	by only one mutex (sched_mutex).  In this case, LWPs' lock pointers
    155  *	will never change and will always reference sched_mutex.
    156  *
    157  *	Manipulation of the general lock is not performed directly, but
    158  *	through calls to lwp_lock(), lwp_relock() and similar.
    159  *
    160  *	States and their associated locks:
    161  *
    162  *	LSIDL, LSZOMB
    163  *
    164  *		Always covered by sched_mutex.
    165  *
    166  *	LSONPROC, LSRUN:
    167  *
    168  *		Always covered by sched_mutex, which protects the run queues
    169  *		and other miscellaneous items.  If the scheduler is changed
    170  *		to use per-CPU run queues, this may become a per-CPU mutex.
    171  *
    172  *	LSSLEEP:
    173  *
    174  *		Covered by a mutex associated with the sleep queue that the
    175  *		LWP resides on, indirectly referenced by l_sleepq->sq_mutex.
    176  *
    177  *	LSSTOP, LSSUSPENDED:
    178  *
    179  *		If the LWP was previously sleeping (l_wchan != NULL), then
    180  *		l_mutex references the sleep queue mutex.  If the LWP was
    181  *		runnable or on the CPU when halted, or has been removed from
    182  *		the sleep queue since halted, then the mutex is sched_mutex.
    183  *
    184  *	The lock order is as follows:
    185  *
    186  *		sleepq_t::sq_mutex  |---> sched_mutex
    187  *		tschain_t::tc_mutex |
    188  *
    189  *	Each process has an scheduler state mutex (proc::p_smutex), and a
    190  *	number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
    191  *	so on.  When an LWP is to be entered into or removed from one of the
    192  *	following states, p_mutex must be held and the process wide counters
    193  *	adjusted:
    194  *
    195  *		LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
    196  *
    197  *	Note that an LWP is considered running or likely to run soon if in
    198  *	one of the following states.  This affects the value of p_nrlwps:
    199  *
    200  *		LSRUN, LSONPROC, LSSLEEP
    201  *
    202  *	p_smutex does not need to be held when transitioning among these
    203  *	three states.
    204  */
    205 
    206 #include <sys/cdefs.h>
    207 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.40.2.20 2007/02/05 13:16:49 ad Exp $");
    208 
    209 #include "opt_multiprocessor.h"
    210 #include "opt_lockdebug.h"
    211 
    212 #define _LWP_API_PRIVATE
    213 
    214 #include <sys/param.h>
    215 #include <sys/systm.h>
    216 #include <sys/pool.h>
    217 #include <sys/proc.h>
    218 #include <sys/syscallargs.h>
    219 #include <sys/kauth.h>
    220 #include <sys/sleepq.h>
    221 #include <sys/lockdebug.h>
    222 #include <sys/kmem.h>
    223 
    224 #include <uvm/uvm_extern.h>
    225 
    226 struct lwplist	alllwp;
    227 
    228 POOL_INIT(lwp_pool, sizeof(struct lwp), MIN_LWP_ALIGNMENT, 0, 0, "lwppl",
    229     &pool_allocator_nointr);
    230 POOL_INIT(lwp_uc_pool, sizeof(ucontext_t), 0, 0, 0, "lwpucpl",
    231     &pool_allocator_nointr);
    232 
    233 static specificdata_domain_t lwp_specificdata_domain;
    234 
    235 #define LWP_DEBUG
    236 
    237 #ifdef LWP_DEBUG
    238 int lwp_debug = 0;
    239 #define DPRINTF(x) if (lwp_debug) printf x
    240 #else
    241 #define DPRINTF(x)
    242 #endif
    243 
    244 void
    245 lwpinit(void)
    246 {
    247 
    248 	lwp_specificdata_domain = specificdata_domain_create();
    249 	KASSERT(lwp_specificdata_domain != NULL);
    250 	lwp_sys_init();
    251 }
    252 
    253 /*
    254  * Set an suspended.
    255  *
    256  * Must be called with p_smutex held, and the LWP locked.  Will unlock the
    257  * LWP before return.
    258  */
    259 int
    260 lwp_suspend(struct lwp *curl, struct lwp *t)
    261 {
    262 	int error;
    263 
    264 	LOCK_ASSERT(mutex_owned(&t->l_proc->p_smutex));
    265 	LOCK_ASSERT(lwp_locked(t, NULL));
    266 
    267 	KASSERT(curl != t || curl->l_stat == LSONPROC);
    268 
    269 	/*
    270 	 * If the current LWP has been told to exit, we must not suspend anyone
    271 	 * else or deadlock could occur.  We won't return to userspace.
    272 	 */
    273 	if ((curl->l_stat & (L_WEXIT | L_WCORE)) != 0) {
    274 		lwp_unlock(t);
    275 		return (EDEADLK);
    276 	}
    277 
    278 	error = 0;
    279 
    280 	switch (t->l_stat) {
    281 	case LSRUN:
    282 	case LSONPROC:
    283 		t->l_flag |= L_WSUSPEND;
    284 		lwp_need_userret(t);
    285 		lwp_unlock(t);
    286 		break;
    287 
    288 	case LSSLEEP:
    289 		t->l_flag |= L_WSUSPEND;
    290 
    291 		/*
    292 		 * Kick the LWP and try to get it to the kernel boundary
    293 		 * so that it will release any locks that it holds.
    294 		 * setrunnable() will release the lock.
    295 		 */
    296 		if ((t->l_flag & L_SINTR) != 0)
    297 			setrunnable(t);
    298 		else
    299 			lwp_unlock(t);
    300 		break;
    301 
    302 	case LSSUSPENDED:
    303 		lwp_unlock(t);
    304 		break;
    305 
    306 	case LSSTOP:
    307 		t->l_flag |= L_WSUSPEND;
    308 		setrunnable(t);
    309 		break;
    310 
    311 	case LSIDL:
    312 	case LSZOMB:
    313 		error = EINTR; /* It's what Solaris does..... */
    314 		lwp_unlock(t);
    315 		break;
    316 	}
    317 
    318 	/*
    319 	 * XXXLWP Wait for:
    320 	 *
    321 	 * o process exiting
    322 	 * o target LWP suspended
    323 	 * o target LWP not suspended and L_WSUSPEND clear
    324 	 * o target LWP exited
    325 	 */
    326 
    327 	 return (error);
    328 }
    329 
    330 /*
    331  * Restart a suspended LWP.
    332  *
    333  * Must be called with p_smutex held, and the LWP locked.  Will unlock the
    334  * LWP before return.
    335  */
    336 void
    337 lwp_continue(struct lwp *l)
    338 {
    339 
    340 	LOCK_ASSERT(mutex_owned(&l->l_proc->p_smutex));
    341 	LOCK_ASSERT(lwp_locked(l, NULL));
    342 
    343 	DPRINTF(("lwp_continue of %d.%d (%s), state %d, wchan %p\n",
    344 	    l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm, l->l_stat,
    345 	    l->l_wchan));
    346 
    347 	/* If rebooting or not suspended, then just bail out. */
    348 	if ((l->l_flag & L_WREBOOT) != 0) {
    349 		lwp_unlock(l);
    350 		return;
    351 	}
    352 
    353 	l->l_flag &= ~L_WSUSPEND;
    354 
    355 	if (l->l_stat != LSSUSPENDED) {
    356 		lwp_unlock(l);
    357 		return;
    358 	}
    359 
    360 	/* setrunnable() will release the lock. */
    361 	setrunnable(l);
    362 }
    363 
    364 /*
    365  * Wait for an LWP within the current process to exit.  If 'lid' is
    366  * non-zero, we are waiting for a specific LWP.
    367  *
    368  * Must be called with p->p_smutex held.
    369  */
    370 int
    371 lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
    372 {
    373 	struct proc *p = l->l_proc;
    374 	struct lwp *l2;
    375 	int nfound, error;
    376 
    377 	DPRINTF(("lwp_wait1: %d.%d waiting for %d.\n",
    378 	    p->p_pid, l->l_lid, lid));
    379 
    380 	LOCK_ASSERT(mutex_owned(&p->p_smutex));
    381 
    382 	/*
    383 	 * We try to check for deadlock:
    384 	 *
    385 	 * 1) If all other LWPs are waiting for exits or suspended.
    386 	 * 2) If we are trying to wait on ourself.
    387 	 *
    388 	 * XXX we'd like to check for a cycle of waiting LWPs (specific LID
    389 	 * waits, not any-LWP waits) and detect that sort of deadlock, but
    390 	 * we don't have a good place to store the lwp that is being waited
    391 	 * for. wchan is already filled with &p->p_nlwps, and putting the
    392 	 * lwp address in there for deadlock tracing would require exiting
    393 	 * LWPs to call wakeup on both their own address and &p->p_nlwps, to
    394 	 * get threads sleeping on any LWP exiting.
    395 	 */
    396 	if (lid == l->l_lid)
    397 		return EDEADLK;
    398 
    399 	p->p_nlwpwait++;
    400 
    401 	for (;;) {
    402 		/*
    403 		 * Avoid a race between exit1() and sigexit(): if the
    404 		 * process is dumping core, then we need to bail out: call
    405 		 * into lwp_userret() where we will be suspended until the
    406 		 * deed is done.
    407 		 */
    408 		if ((p->p_sflag & PS_WCORE) != 0) {
    409 			mutex_exit(&p->p_smutex);
    410 			lwp_userret(l);
    411 #ifdef DIAGNOSTIC
    412 			panic("lwp_wait1");
    413 #endif
    414 			/* NOTREACHED */
    415 		}
    416 
    417 		/*
    418 		 * First off, drain any detached LWP that is waiting to be
    419 		 * reaped.
    420 		 */
    421 		while ((l2 = p->p_zomblwp) != NULL) {
    422 			p->p_zomblwp = NULL;
    423 			lwp_free(l2, 0, 0);	/* releases proc mutex */
    424 			mutex_enter(&p->p_smutex);
    425 		}
    426 
    427 		/*
    428 		 * Now look for an LWP to collect.  If the whole process is
    429 		 * exiting, count detached LWPs as eligible to be collected,
    430 		 * but don't drain them here.
    431 		 */
    432 		nfound = 0;
    433 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
    434 			if (l2 == l || (lid != 0 && l2->l_lid != lid))
    435 				continue;
    436 			if ((l2->l_prflag & LPR_DETACHED) != 0) {
    437 				nfound += ((flags & LWPWAIT_EXITCONTROL) != 0);
    438 				continue;
    439 			}
    440 			nfound++;
    441 
    442 			/* No need to lock the LWP in order to see LSZOMB. */
    443 			if (l2->l_stat != LSZOMB)
    444 				continue;
    445 
    446 			if (departed)
    447 				*departed = l2->l_lid;
    448 			lwp_free(l2, 0, 0);
    449 			mutex_enter(&p->p_smutex);
    450 			p->p_nlwpwait--;
    451 			return 0;
    452 		}
    453 
    454 		if (nfound == 0) {
    455 			error = ESRCH;
    456 			break;
    457 		}
    458 		if ((flags & LWPWAIT_EXITCONTROL) != 0) {
    459 			KASSERT(p->p_nlwps > 1);
    460 			cv_wait(&p->p_lwpcv, &p->p_smutex);
    461 			continue;
    462 		}
    463 		if ((p->p_sflag & PS_WEXIT) != 0 ||
    464 		    p->p_nrlwps <= p->p_nlwpwait + p->p_ndlwps) {
    465 			error = EDEADLK;
    466 			break;
    467 		}
    468 		if ((error = cv_wait_sig(&p->p_lwpcv, &p->p_smutex)) != 0)
    469 			break;
    470 	}
    471 
    472 	p->p_nlwpwait--;
    473 	return error;
    474 }
    475 
    476 /*
    477  * Create a new LWP within process 'p2', using LWP 'l1' as a template.
    478  * The new LWP is created in state LSIDL and must be set running,
    479  * suspended, or stopped by the caller.
    480  */
    481 int
    482 newlwp(struct lwp *l1, struct proc *p2, vaddr_t uaddr, boolean_t inmem,
    483     int flags, void *stack, size_t stacksize,
    484     void (*func)(void *), void *arg, struct lwp **rnewlwpp)
    485 {
    486 	struct lwp *l2, *isfree;
    487 	turnstile_t *ts;
    488 
    489 	/*
    490 	 * First off, reap any detached LWP waiting to be collected.
    491 	 * We can re-use its LWP structure and turnstile.
    492 	 */
    493 	isfree = NULL;
    494 	if (p2->p_zomblwp != NULL) {
    495 		mutex_enter(&p2->p_smutex);
    496 		if ((isfree = p2->p_zomblwp) != NULL) {
    497 			p2->p_zomblwp = NULL;
    498 			lwp_free(isfree, 1, 0);	/* releases proc mutex */
    499 		} else
    500 			mutex_exit(&p2->p_smutex);
    501 	}
    502 	if (isfree == NULL) {
    503 		l2 = pool_get(&lwp_pool, PR_WAITOK);
    504 		memset(l2, 0, sizeof(*l2));
    505 		l2->l_ts = pool_cache_get(&turnstile_cache, PR_WAITOK);
    506 	} else {
    507 		l2 = isfree;
    508 		ts = l2->l_ts;
    509 		memset(l2, 0, sizeof(*l2));
    510 		l2->l_ts = ts;
    511 	}
    512 
    513 	l2->l_stat = LSIDL;
    514 	l2->l_proc = p2;
    515 	l2->l_refcnt = 1;
    516 	l2->l_priority = l1->l_priority;
    517 	l2->l_usrpri = l1->l_usrpri;
    518 	l2->l_mutex = &sched_mutex;
    519 	l2->l_cpu = l1->l_cpu;
    520 	l2->l_flag = inmem ? L_INMEM : 0;
    521 	lwp_initspecific(l2);
    522 
    523 	if (p2->p_flag & P_SYSTEM) {
    524 		/*
    525 		 * Mark it as a system process and not a candidate for
    526 		 * swapping.
    527 		 */
    528 		l2->l_flag |= L_SYSTEM;
    529 	}
    530 
    531 	lwp_update_creds(l2);
    532 	callout_init(&l2->l_tsleep_ch);
    533 	cv_init(&l2->l_sigcv, "sigwait");
    534 	l2->l_syncobj = &sched_syncobj;
    535 
    536 	if (rnewlwpp != NULL)
    537 		*rnewlwpp = l2;
    538 
    539 	l2->l_addr = UAREA_TO_USER(uaddr);
    540 	uvm_lwp_fork(l1, l2, stack, stacksize, func,
    541 	    (arg != NULL) ? arg : l2);
    542 
    543 	mutex_enter(&p2->p_smutex);
    544 
    545 	if ((flags & LWP_DETACHED) != 0) {
    546 		l2->l_prflag = LPR_DETACHED;
    547 		p2->p_ndlwps++;
    548 	} else
    549 		l2->l_prflag = 0;
    550 
    551 	l2->l_sigmask = l1->l_sigmask;
    552 	CIRCLEQ_INIT(&l2->l_sigpend.sp_info);
    553 	sigemptyset(&l2->l_sigpend.sp_set);
    554 
    555 	l2->l_lid = ++p2->p_nlwpid;
    556 	LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
    557 	p2->p_nlwps++;
    558 
    559 	mutex_exit(&p2->p_smutex);
    560 
    561 	mutex_enter(&proclist_mutex);
    562 	LIST_INSERT_HEAD(&alllwp, l2, l_list);
    563 	mutex_exit(&proclist_mutex);
    564 
    565 	if (p2->p_emul->e_lwp_fork)
    566 		(*p2->p_emul->e_lwp_fork)(l1, l2);
    567 
    568 	return (0);
    569 }
    570 
    571 /*
    572  * Quit the process.  This will call cpu_exit, which will call cpu_switch,
    573  * so this can only be used meaningfully if you're willing to switch away.
    574  * Calling with l!=curlwp would be weird.
    575  */
    576 void
    577 lwp_exit(struct lwp *l)
    578 {
    579 	struct proc *p = l->l_proc;
    580 	struct lwp *l2;
    581 
    582 	DPRINTF(("lwp_exit: %d.%d exiting.\n", p->p_pid, l->l_lid));
    583 	DPRINTF((" nlwps: %d nzlwps: %d\n", p->p_nlwps, p->p_nzlwps));
    584 
    585 	/*
    586 	 * Verify that we hold no locks other than the kernel lock.
    587 	 */
    588 #ifdef MULTIPROCESSOR
    589 	LOCKDEBUG_BARRIER(&kernel_lock, 0);
    590 #else
    591 	LOCKDEBUG_BARRIER(NULL, 0);
    592 #endif
    593 
    594 	/*
    595 	 * If we are the last live LWP in a process, we need to exit the
    596 	 * entire process.  We do so with an exit status of zero, because
    597 	 * it's a "controlled" exit, and because that's what Solaris does.
    598 	 *
    599 	 * We are not quite a zombie yet, but for accounting purposes we
    600 	 * must increment the count of zombies here.
    601 	 *
    602 	 * Note: the last LWP's specificdata will be deleted here.
    603 	 */
    604 	mutex_enter(&p->p_smutex);
    605 	if (p->p_nlwps - p->p_nzlwps == 1) {
    606 		DPRINTF(("lwp_exit: %d.%d calling exit1()\n",
    607 		    p->p_pid, l->l_lid));
    608 		exit1(l, 0);
    609 		/* NOTREACHED */
    610 	}
    611 	p->p_nzlwps++;
    612 	mutex_exit(&p->p_smutex);
    613 
    614 	if (p->p_emul->e_lwp_exit)
    615 		(*p->p_emul->e_lwp_exit)(l);
    616 
    617 	/* Delete the specificdata while it's still safe to sleep. */
    618 	specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
    619 
    620 	/*
    621 	 * Release our cached credentials.
    622 	 */
    623 	kauth_cred_free(l->l_cred);
    624 
    625 	/*
    626 	 * Remove the LWP from the global list.
    627 	 */
    628 	mutex_enter(&proclist_mutex);
    629 	LIST_REMOVE(l, l_list);
    630 	mutex_exit(&proclist_mutex);
    631 
    632 	/*
    633 	 * Get rid of all references to the LWP that others (e.g. procfs)
    634 	 * may have, and mark the LWP as a zombie.  If the LWP is detached,
    635 	 * mark it waiting for collection in the proc structure.  Note that
    636 	 * before we can do that, we need to free any other dead, deatched
    637 	 * LWP waiting to meet its maker.
    638 	 *
    639 	 * XXXSMP disable preemption.
    640 	 */
    641 	mutex_enter(&p->p_smutex);
    642 	lwp_drainrefs(l);
    643 
    644 	if ((l->l_prflag & LPR_DETACHED) != 0) {
    645 		while ((l2 = p->p_zomblwp) != NULL) {
    646 			p->p_zomblwp = NULL;
    647 			lwp_free(l2, 0, 0);	/* releases proc mutex */
    648 			mutex_enter(&p->p_smutex);
    649 		}
    650 		p->p_zomblwp = l;
    651 	}
    652 
    653 	/*
    654 	 * If we find a pending signal for the process and we have been
    655 	 * asked to check for signals, then we loose: arrange to have
    656 	 * all other LWPs in the process check for signals.
    657 	 */
    658 	if ((l->l_flag & L_PENDSIG) != 0 &&
    659 	    firstsig(&p->p_sigpend.sp_set) != 0) {
    660 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
    661 			lwp_lock(l2);
    662 			l2->l_flag |= L_PENDSIG;
    663 			lwp_unlock(l2);
    664 		}
    665 	}
    666 
    667 	lwp_lock(l);
    668 	l->l_stat = LSZOMB;
    669 	lwp_unlock(l);
    670 	p->p_nrlwps--;
    671 	cv_broadcast(&p->p_lwpcv);
    672 	mutex_exit(&p->p_smutex);
    673 
    674 	/*
    675 	 * We can no longer block.  At this point, lwp_free() may already
    676 	 * be gunning for us.  On a multi-CPU system, we may be off p_lwps.
    677 	 *
    678 	 * Free MD LWP resources.
    679 	 */
    680 #ifndef __NO_CPU_LWP_FREE
    681 	cpu_lwp_free(l, 0);
    682 #endif
    683 	pmap_deactivate(l);
    684 
    685 	/*
    686 	 * Release the kernel lock, signal another LWP to collect us,
    687 	 * and switch away into oblivion.
    688 	 */
    689 #ifdef notyet
    690 	/* XXXSMP hold in lwp_userret() */
    691 	KERNEL_UNLOCK_LAST(l);
    692 #else
    693 	KERNEL_UNLOCK_ALL(l, NULL);
    694 #endif
    695 
    696 	cpu_exit(l);
    697 }
    698 
    699 /*
    700  * We are called from cpu_exit() once it is safe to schedule the dead LWP's
    701  * resources to be freed (i.e., once we've switched to the idle PCB for the
    702  * current CPU).
    703  */
    704 void
    705 lwp_exit2(struct lwp *l)
    706 {
    707 	/* XXXSMP re-enable preemption */
    708 }
    709 
    710 /*
    711  * Free a dead LWP's remaining resources.
    712  *
    713  * XXXLWP limits.
    714  */
    715 void
    716 lwp_free(struct lwp *l, int recycle, int last)
    717 {
    718 	struct proc *p = l->l_proc;
    719 	ksiginfoq_t kq;
    720 
    721 	/*
    722 	 * If this was not the last LWP in the process, then adjust
    723 	 * counters and unlock.
    724 	 */
    725 	if (!last) {
    726 		/*
    727 		 * Add the LWP's run time to the process' base value.
    728 		 * This needs to co-incide with coming off p_lwps.
    729 		 */
    730 		timeradd(&l->l_rtime, &p->p_rtime, &p->p_rtime);
    731 		LIST_REMOVE(l, l_sibling);
    732 		p->p_nlwps--;
    733 		p->p_nzlwps--;
    734 		if ((l->l_prflag & LPR_DETACHED) != 0)
    735 			p->p_ndlwps--;
    736 		mutex_exit(&p->p_smutex);
    737 
    738 #ifdef MULTIPROCESSOR
    739 		/*
    740 		 * In the unlikely event that the LWP is still on the CPU,
    741 		 * then spin until it has switched away.  We need to release
    742 		 * all locks to avoid deadlock against interrupt handlers on
    743 		 * the target CPU.
    744 		 */
    745 		if (l->l_cpu->ci_curlwp == l) {
    746 			int count;
    747 			KERNEL_UNLOCK_ALL(curlwp, &count);
    748 			while (l->l_cpu->ci_curlwp == l)
    749 				SPINLOCK_BACKOFF_HOOK;
    750 			KERNEL_LOCK(count, curlwp);
    751 		}
    752 #endif
    753 	}
    754 
    755 	/*
    756 	 * Destroy the LWP's remaining signal information.
    757 	 */
    758 	ksiginfo_queue_init(&kq);
    759 	sigclear(&l->l_sigpend, NULL, &kq);
    760 	ksiginfo_queue_drain(&kq);
    761 	cv_destroy(&l->l_sigcv);
    762 
    763 	/*
    764 	 * Free the LWP's turnstile and the LWP structure itself unless the
    765 	 * caller wants to recycle them.
    766 	 *
    767 	 * We can't return turnstile0 to the pool (it didn't come from it),
    768 	 * so if it comes up just drop it quietly and move on.
    769 	 *
    770 	 * We don't recycle the VM resources at this time.
    771 	 */
    772 	KERNEL_LOCK(1, l);	/* XXXSMP */
    773 	if (!recycle && l->l_ts != &turnstile0)
    774 		pool_cache_put(&turnstile_cache, l->l_ts);
    775 #ifndef __NO_CPU_LWP_FREE
    776 	cpu_lwp_free2(l);
    777 #endif
    778 	uvm_lwp_exit(l);
    779 	if (!recycle)
    780 		pool_put(&lwp_pool, l);
    781 	KERNEL_UNLOCK_ONE(l);	/* XXXSMP */
    782 }
    783 
    784 /*
    785  * Pick a LWP to represent the process for those operations which
    786  * want information about a "process" that is actually associated
    787  * with a LWP.
    788  *
    789  * If 'locking' is false, no locking or lock checks are performed.
    790  * This is intended for use by DDB.
    791  *
    792  * We don't bother locking the LWP here, since code that uses this
    793  * interface is broken by design and an exact match is not required.
    794  */
    795 struct lwp *
    796 proc_representative_lwp(struct proc *p, int *nrlwps, int locking)
    797 {
    798 	struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
    799 	struct lwp *signalled;
    800 	int cnt;
    801 
    802 	if (locking) {
    803 		LOCK_ASSERT(mutex_owned(&p->p_smutex));
    804 	}
    805 
    806 	/* Trivial case: only one LWP */
    807 	if (p->p_nlwps == 1) {
    808 		l = LIST_FIRST(&p->p_lwps);
    809 		if (nrlwps)
    810 			*nrlwps = (l->l_stat == LSONPROC || LSRUN);
    811 		return l;
    812 	}
    813 
    814 	cnt = 0;
    815 	switch (p->p_stat) {
    816 	case SSTOP:
    817 	case SACTIVE:
    818 		/* Pick the most live LWP */
    819 		onproc = running = sleeping = stopped = suspended = NULL;
    820 		signalled = NULL;
    821 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    822 			if (l->l_lid == p->p_sigctx.ps_lwp)
    823 				signalled = l;
    824 			switch (l->l_stat) {
    825 			case LSONPROC:
    826 				onproc = l;
    827 				cnt++;
    828 				break;
    829 			case LSRUN:
    830 				running = l;
    831 				cnt++;
    832 				break;
    833 			case LSSLEEP:
    834 				sleeping = l;
    835 				break;
    836 			case LSSTOP:
    837 				stopped = l;
    838 				break;
    839 			case LSSUSPENDED:
    840 				suspended = l;
    841 				break;
    842 			}
    843 		}
    844 		if (nrlwps)
    845 			*nrlwps = cnt;
    846 		if (signalled)
    847 			l = signalled;
    848 		else if (onproc)
    849 			l = onproc;
    850 		else if (running)
    851 			l = running;
    852 		else if (sleeping)
    853 			l = sleeping;
    854 		else if (stopped)
    855 			l = stopped;
    856 		else if (suspended)
    857 			l = suspended;
    858 		else
    859 			break;
    860 		return l;
    861 		if (nrlwps)
    862 			*nrlwps = 0;
    863 		l = LIST_FIRST(&p->p_lwps);
    864 		return l;
    865 #ifdef DIAGNOSTIC
    866 	case SIDL:
    867 	case SZOMB:
    868 	case SDYING:
    869 	case SDEAD:
    870 		if (locking)
    871 			mutex_exit(&p->p_smutex);
    872 		/* We have more than one LWP and we're in SIDL?
    873 		 * How'd that happen?
    874 		 */
    875 		panic("Too many LWPs in idle/dying process %d (%s) stat = %d",
    876 		    p->p_pid, p->p_comm, p->p_stat);
    877 		break;
    878 	default:
    879 		if (locking)
    880 			mutex_exit(&p->p_smutex);
    881 		panic("Process %d (%s) in unknown state %d",
    882 		    p->p_pid, p->p_comm, p->p_stat);
    883 #endif
    884 	}
    885 
    886 	if (locking)
    887 		mutex_exit(&p->p_smutex);
    888 	panic("proc_representative_lwp: couldn't find a lwp for process"
    889 		" %d (%s)", p->p_pid, p->p_comm);
    890 	/* NOTREACHED */
    891 	return NULL;
    892 }
    893 
    894 /*
    895  * Look up a live LWP within the speicifed process, and return it locked.
    896  *
    897  * Must be called with p->p_smutex held.
    898  */
    899 struct lwp *
    900 lwp_find(struct proc *p, int id)
    901 {
    902 	struct lwp *l;
    903 
    904 	LOCK_ASSERT(mutex_owned(&p->p_smutex));
    905 
    906 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    907 		if (l->l_lid == id)
    908 			break;
    909 	}
    910 
    911 	/*
    912 	 * No need to lock - all of these conditions will
    913 	 * be visible with the process level mutex held.
    914 	 */
    915 	if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB))
    916 		l = NULL;
    917 
    918 	return l;
    919 }
    920 
    921 /*
    922  * Update an LWP's cached credentials to mirror the process' master copy.
    923  *
    924  * This happens early in the syscall path, on user trap, and on LWP
    925  * creation.  A long-running LWP can also voluntarily choose to update
    926  * it's credentials by calling this routine.  This may be called from
    927  * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
    928  */
    929 void
    930 lwp_update_creds(struct lwp *l)
    931 {
    932 	kauth_cred_t oc;
    933 	struct proc *p;
    934 
    935 	p = l->l_proc;
    936 	oc = l->l_cred;
    937 
    938 	mutex_enter(&p->p_mutex);
    939 	kauth_cred_hold(p->p_cred);
    940 	l->l_cred = p->p_cred;
    941 	mutex_exit(&p->p_mutex);
    942 	if (oc != NULL) {
    943 		KERNEL_LOCK(1, l);	/* XXXSMP */
    944 		kauth_cred_free(oc);
    945 		KERNEL_UNLOCK_ONE(l);	/* XXXSMP */
    946 	}
    947 }
    948 
    949 /*
    950  * Verify that an LWP is locked, and optionally verify that the lock matches
    951  * one we specify.
    952  */
    953 int
    954 lwp_locked(struct lwp *l, kmutex_t *mtx)
    955 {
    956 	kmutex_t *cur = l->l_mutex;
    957 
    958 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    959 	return mutex_owned(cur) && (mtx == cur || mtx == NULL);
    960 #else
    961 	return mutex_owned(cur);
    962 #endif
    963 }
    964 
    965 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    966 /*
    967  * Lock an LWP.
    968  */
    969 void
    970 lwp_lock_retry(struct lwp *l, kmutex_t *old)
    971 {
    972 
    973 	/*
    974 	 * XXXgcc ignoring kmutex_t * volatile on i386
    975 	 *
    976 	 * gcc version 4.1.2 20061021 prerelease (NetBSD nb1 20061021)
    977 	 */
    978 #if 1
    979 	while (l->l_mutex != old) {
    980 #else
    981 	for (;;) {
    982 #endif
    983 		mutex_spin_exit(old);
    984 		old = l->l_mutex;
    985 		mutex_spin_enter(old);
    986 
    987 		/*
    988 		 * mutex_enter() will have posted a read barrier.  Re-test
    989 		 * l->l_mutex.  If it has changed, we need to try again.
    990 		 */
    991 #if 1
    992 	}
    993 #else
    994 	} while (__predict_false(l->l_mutex != old));
    995 #endif
    996 }
    997 #endif
    998 
    999 /*
   1000  * Lend a new mutex to an LWP.  The old mutex must be held.
   1001  */
   1002 void
   1003 lwp_setlock(struct lwp *l, kmutex_t *new)
   1004 {
   1005 
   1006 	LOCK_ASSERT(mutex_owned(l->l_mutex));
   1007 
   1008 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
   1009 	mb_write();
   1010 	l->l_mutex = new;
   1011 #else
   1012 	(void)new;
   1013 #endif
   1014 }
   1015 
   1016 /*
   1017  * Lend a new mutex to an LWP, and release the old mutex.  The old mutex
   1018  * must be held.
   1019  */
   1020 void
   1021 lwp_unlock_to(struct lwp *l, kmutex_t *new)
   1022 {
   1023 	kmutex_t *old;
   1024 
   1025 	LOCK_ASSERT(mutex_owned(l->l_mutex));
   1026 
   1027 	old = l->l_mutex;
   1028 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
   1029 	mb_write();
   1030 	l->l_mutex = new;
   1031 #else
   1032 	(void)new;
   1033 #endif
   1034 	mutex_spin_exit(old);
   1035 }
   1036 
   1037 /*
   1038  * Acquire a new mutex, and donate it to an LWP.  The LWP must already be
   1039  * locked.
   1040  */
   1041 void
   1042 lwp_relock(struct lwp *l, kmutex_t *new)
   1043 {
   1044 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
   1045 	kmutex_t *old;
   1046 #endif
   1047 
   1048 	LOCK_ASSERT(mutex_owned(l->l_mutex));
   1049 
   1050 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
   1051 	old = l->l_mutex;
   1052 	if (old != new) {
   1053 		mutex_spin_enter(new);
   1054 		l->l_mutex = new;
   1055 		mutex_spin_exit(old);
   1056 	}
   1057 #else
   1058 	(void)new;
   1059 #endif
   1060 }
   1061 
   1062 /*
   1063  * Handle exceptions for mi_userret().  Called if a member of L_USERRET is
   1064  * set.
   1065  */
   1066 void
   1067 lwp_userret(struct lwp *l)
   1068 {
   1069 	struct proc *p;
   1070 	int sig;
   1071 
   1072 	p = l->l_proc;
   1073 
   1074 	/*
   1075 	 * It should be safe to do this read unlocked on a multiprocessor
   1076 	 * system..
   1077 	 */
   1078 	while ((l->l_flag & L_USERRET) != 0) {
   1079 		/*
   1080 		 * Process pending signals first, unless the process
   1081 		 * is dumping core, where we will instead enter the
   1082 		 * L_WSUSPEND case below.
   1083 		 */
   1084 		if ((l->l_flag & (L_PENDSIG | L_WCORE)) == L_PENDSIG) {
   1085 			KERNEL_LOCK(1, l);	/* XXXSMP pool_put() below */
   1086 			mutex_enter(&p->p_smutex);
   1087 			while ((sig = issignal(l)) != 0)
   1088 				postsig(sig);
   1089 			mutex_exit(&p->p_smutex);
   1090 			KERNEL_UNLOCK_LAST(l);	/* XXXSMP */
   1091 		}
   1092 
   1093 		/*
   1094 		 * Core-dump or suspend pending.
   1095 		 *
   1096 		 * In case of core dump, suspend ourselves, so that the
   1097 		 * kernel stack and therefore the userland registers saved
   1098 		 * in the trapframe are around for coredump() to write them
   1099 		 * out.  We issue a wakeup on p->p_lwpcv so that sigexit()
   1100 		 * will write the core file out once all other LWPs are
   1101 		 * suspended.
   1102 		 */
   1103 		if ((l->l_flag & L_WSUSPEND) != 0) {
   1104 			mutex_enter(&p->p_smutex);
   1105 			p->p_nrlwps--;
   1106 			cv_broadcast(&p->p_lwpcv);
   1107 			lwp_lock(l);
   1108 			l->l_stat = LSSUSPENDED;
   1109 			mutex_exit(&p->p_smutex);
   1110 			mi_switch(l, NULL);
   1111 		}
   1112 
   1113 		/* Process is exiting. */
   1114 		if ((l->l_flag & L_WEXIT) != 0) {
   1115 			KERNEL_LOCK(1, l);
   1116 			lwp_exit(l);
   1117 			KASSERT(0);
   1118 			/* NOTREACHED */
   1119 		}
   1120 	}
   1121 }
   1122 
   1123 /*
   1124  * Force an LWP to enter the kernel, to take a trip through lwp_userret().
   1125  */
   1126 void
   1127 lwp_need_userret(struct lwp *l)
   1128 {
   1129 	LOCK_ASSERT(lwp_locked(l, NULL));
   1130 
   1131 	/*
   1132 	 * Since the tests in lwp_userret() are done unlocked, make sure
   1133 	 * that the condition will be seen before forcing the LWP to enter
   1134 	 * kernel mode.
   1135 	 */
   1136 	mb_write();
   1137 
   1138 	if (l->l_priority > PUSER)
   1139 		lwp_changepri(l, PUSER);
   1140 	cpu_signotify(l);
   1141 }
   1142 
   1143 /*
   1144  * Add one reference to an LWP.  This will prevent the LWP from
   1145  * exiting, thus keep the lwp structure and PCB around to inspect.
   1146  */
   1147 void
   1148 lwp_addref(struct lwp *l)
   1149 {
   1150 
   1151 	LOCK_ASSERT(mutex_owned(&l->l_proc->p_smutex));
   1152 	KASSERT(l->l_stat != LSZOMB);
   1153 	KASSERT(l->l_refcnt != 0);
   1154 
   1155 	l->l_refcnt++;
   1156 }
   1157 
   1158 /*
   1159  * Remove one reference to an LWP.  If this is the last reference,
   1160  * then we must finalize the LWP's death.
   1161  */
   1162 void
   1163 lwp_delref(struct lwp *l)
   1164 {
   1165 	struct proc *p = l->l_proc;
   1166 
   1167 	mutex_enter(&p->p_smutex);
   1168 	if (--l->l_refcnt == 0)
   1169 		cv_broadcast(&p->p_refcv);
   1170 	mutex_exit(&p->p_smutex);
   1171 }
   1172 
   1173 /*
   1174  * Drain all references to the current LWP.
   1175  */
   1176 void
   1177 lwp_drainrefs(struct lwp *l)
   1178 {
   1179 	struct proc *p = l->l_proc;
   1180 
   1181 	LOCK_ASSERT(mutex_owned(&p->p_smutex));
   1182 	KASSERT(l->l_refcnt != 0);
   1183 
   1184 	l->l_refcnt--;
   1185 	while (l->l_refcnt != 0)
   1186 		cv_wait(&p->p_refcv, &p->p_smutex);
   1187 }
   1188 
   1189 /*
   1190  * lwp_specific_key_create --
   1191  *	Create a key for subsystem lwp-specific data.
   1192  */
   1193 int
   1194 lwp_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor)
   1195 {
   1196 
   1197 	return (specificdata_key_create(lwp_specificdata_domain, keyp, dtor));
   1198 }
   1199 
   1200 /*
   1201  * lwp_specific_key_delete --
   1202  *	Delete a key for subsystem lwp-specific data.
   1203  */
   1204 void
   1205 lwp_specific_key_delete(specificdata_key_t key)
   1206 {
   1207 
   1208 	specificdata_key_delete(lwp_specificdata_domain, key);
   1209 }
   1210 
   1211 /*
   1212  * lwp_initspecific --
   1213  *	Initialize an LWP's specificdata container.
   1214  */
   1215 void
   1216 lwp_initspecific(struct lwp *l)
   1217 {
   1218 	int error;
   1219 
   1220 	error = specificdata_init(lwp_specificdata_domain, &l->l_specdataref);
   1221 	KASSERT(error == 0);
   1222 }
   1223 
   1224 /*
   1225  * lwp_finispecific --
   1226  *	Finalize an LWP's specificdata container.
   1227  */
   1228 void
   1229 lwp_finispecific(struct lwp *l)
   1230 {
   1231 
   1232 	specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
   1233 }
   1234 
   1235 /*
   1236  * lwp_getspecific --
   1237  *	Return lwp-specific data corresponding to the specified key.
   1238  *
   1239  *	Note: LWP specific data is NOT INTERLOCKED.  An LWP should access
   1240  *	only its OWN SPECIFIC DATA.  If it is necessary to access another
   1241  *	LWP's specifc data, care must be taken to ensure that doing so
   1242  *	would not cause internal data structure inconsistency (i.e. caller
   1243  *	can guarantee that the target LWP is not inside an lwp_getspecific()
   1244  *	or lwp_setspecific() call).
   1245  */
   1246 void *
   1247 lwp_getspecific(specificdata_key_t key)
   1248 {
   1249 
   1250 	return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
   1251 						  &curlwp->l_specdataref, key));
   1252 }
   1253 
   1254 void *
   1255 _lwp_getspecific_by_lwp(struct lwp *l, specificdata_key_t key)
   1256 {
   1257 
   1258 	return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
   1259 						  &l->l_specdataref, key));
   1260 }
   1261 
   1262 /*
   1263  * lwp_setspecific --
   1264  *	Set lwp-specific data corresponding to the specified key.
   1265  */
   1266 void
   1267 lwp_setspecific(specificdata_key_t key, void *data)
   1268 {
   1269 
   1270 	specificdata_setspecific(lwp_specificdata_domain,
   1271 				 &curlwp->l_specdataref, key, data);
   1272 }
   1273