Home | History | Annotate | Line # | Download | only in kern
kern_lwp.c revision 1.103
      1 /*	$NetBSD: kern_lwp.c,v 1.103 2008/04/24 18:39:24 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nathan J. Williams, and Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * Overview
     41  *
     42  *	Lightweight processes (LWPs) are the basic unit or thread of
     43  *	execution within the kernel.  The core state of an LWP is described
     44  *	by "struct lwp", also known as lwp_t.
     45  *
     46  *	Each LWP is contained within a process (described by "struct proc"),
     47  *	Every process contains at least one LWP, but may contain more.  The
     48  *	process describes attributes shared among all of its LWPs such as a
     49  *	private address space, global execution state (stopped, active,
     50  *	zombie, ...), signal disposition and so on.  On a multiprocessor
     51  *	machine, multiple LWPs be executing concurrently in the kernel.
     52  *
     53  * Execution states
     54  *
     55  *	At any given time, an LWP has overall state that is described by
     56  *	lwp::l_stat.  The states are broken into two sets below.  The first
     57  *	set is guaranteed to represent the absolute, current state of the
     58  *	LWP:
     59  *
     60  *	LSONPROC
     61  *
     62  *		On processor: the LWP is executing on a CPU, either in the
     63  *		kernel or in user space.
     64  *
     65  *	LSRUN
     66  *
     67  *		Runnable: the LWP is parked on a run queue, and may soon be
     68  *		chosen to run by an idle processor, or by a processor that
     69  *		has been asked to preempt a currently runnning but lower
     70  *		priority LWP.  If the LWP is not swapped in (LW_INMEM == 0)
     71  *		then the LWP is not on a run queue, but may be soon.
     72  *
     73  *	LSIDL
     74  *
     75  *		Idle: the LWP has been created but has not yet executed,
     76  *		or it has ceased executing a unit of work and is waiting
     77  *		to be started again.
     78  *
     79  *	LSSUSPENDED:
     80  *
     81  *		Suspended: the LWP has had its execution suspended by
     82  *		another LWP in the same process using the _lwp_suspend()
     83  *		system call.  User-level LWPs also enter the suspended
     84  *		state when the system is shutting down.
     85  *
     86  *	The second set represent a "statement of intent" on behalf of the
     87  *	LWP.  The LWP may in fact be executing on a processor, may be
     88  *	sleeping or idle. It is expected to take the necessary action to
     89  *	stop executing or become "running" again within a short timeframe.
     90  *	The LW_RUNNING flag in lwp::l_flag indicates that an LWP is running.
     91  *	Importantly, it indicates that its state is tied to a CPU.
     92  *
     93  *	LSZOMB:
     94  *
     95  *		Dead or dying: the LWP has released most of its resources
     96  *		and is: a) about to switch away into oblivion b) has already
     97  *		switched away.  When it switches away, its few remaining
     98  *		resources can be collected.
     99  *
    100  *	LSSLEEP:
    101  *
    102  *		Sleeping: the LWP has entered itself onto a sleep queue, and
    103  *		has switched away or will switch away shortly to allow other
    104  *		LWPs to run on the CPU.
    105  *
    106  *	LSSTOP:
    107  *
    108  *		Stopped: the LWP has been stopped as a result of a job
    109  *		control signal, or as a result of the ptrace() interface.
    110  *
    111  *		Stopped LWPs may run briefly within the kernel to handle
    112  *		signals that they receive, but will not return to user space
    113  *		until their process' state is changed away from stopped.
    114  *
    115  *		Single LWPs within a process can not be set stopped
    116  *		selectively: all actions that can stop or continue LWPs
    117  *		occur at the process level.
    118  *
    119  * State transitions
    120  *
    121  *	Note that the LSSTOP state may only be set when returning to
    122  *	user space in userret(), or when sleeping interruptably.  The
    123  *	LSSUSPENDED state may only be set in userret().  Before setting
    124  *	those states, we try to ensure that the LWPs will release all
    125  *	locks that they hold, and at a minimum try to ensure that the
    126  *	LWP can be set runnable again by a signal.
    127  *
    128  *	LWPs may transition states in the following ways:
    129  *
    130  *	 RUN -------> ONPROC		ONPROC -----> RUN
    131  *		    > STOPPED			    > SLEEP
    132  *		    > SUSPENDED			    > STOPPED
    133  *						    > SUSPENDED
    134  *						    > ZOMB
    135  *
    136  *	 STOPPED ---> RUN		SUSPENDED --> RUN
    137  *	            > SLEEP			    > SLEEP
    138  *
    139  *	 SLEEP -----> ONPROC		IDL --------> RUN
    140  *		    > RUN			    > SUSPENDED
    141  *		    > STOPPED			    > STOPPED
    142  *		    > SUSPENDED
    143  *
    144  *	Other state transitions are possible with kernel threads (eg
    145  *	ONPROC -> IDL), but only happen under tightly controlled
    146  *	circumstances the side effects are understood.
    147  *
    148  * Locking
    149  *
    150  *	The majority of fields in 'struct lwp' are covered by a single,
    151  *	general spin lock pointed to by lwp::l_mutex.  The locks covering
    152  *	each field are documented in sys/lwp.h.
    153  *
    154  *	State transitions must be made with the LWP's general lock held,
    155  *	and may cause the LWP's lock pointer to change. Manipulation of
    156  *	the general lock is not performed directly, but through calls to
    157  *	lwp_lock(), lwp_relock() and similar.
    158  *
    159  *	States and their associated locks:
    160  *
    161  *	LSONPROC, LSZOMB:
    162  *
    163  *		Always covered by spc_lwplock, which protects running LWPs.
    164  *		This is a per-CPU lock.
    165  *
    166  *	LSIDL, LSRUN:
    167  *
    168  *		Always covered by spc_mutex, which protects the run queues.
    169  *		This is a per-CPU lock.
    170  *
    171  *	LSSLEEP:
    172  *
    173  *		Covered by a lock associated with the sleep queue that the
    174  *		LWP resides on, indirectly referenced by l_sleepq->sq_mutex.
    175  *
    176  *	LSSTOP, LSSUSPENDED:
    177  *
    178  *		If the LWP was previously sleeping (l_wchan != NULL), then
    179  *		l_mutex references the sleep queue lock.  If the LWP was
    180  *		runnable or on the CPU when halted, or has been removed from
    181  *		the sleep queue since halted, then the lock is spc_lwplock.
    182  *
    183  *	The lock order is as follows:
    184  *
    185  *		spc::spc_lwplock ->
    186  *		    sleepq_t::sq_mutex ->
    187  *			tschain_t::tc_mutex ->
    188  *			    spc::spc_mutex
    189  *
    190  *	Each process has an scheduler state lock (proc::p_lock), and a
    191  *	number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
    192  *	so on.  When an LWP is to be entered into or removed from one of the
    193  *	following states, p_lock must be held and the process wide counters
    194  *	adjusted:
    195  *
    196  *		LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
    197  *
    198  *	Note that an LWP is considered running or likely to run soon if in
    199  *	one of the following states.  This affects the value of p_nrlwps:
    200  *
    201  *		LSRUN, LSONPROC, LSSLEEP
    202  *
    203  *	p_lock does not need to be held when transitioning among these
    204  *	three states.
    205  */
    206 
    207 #include <sys/cdefs.h>
    208 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.103 2008/04/24 18:39:24 ad Exp $");
    209 
    210 #include "opt_ddb.h"
    211 #include "opt_multiprocessor.h"
    212 #include "opt_lockdebug.h"
    213 
    214 #define _LWP_API_PRIVATE
    215 
    216 #include <sys/param.h>
    217 #include <sys/systm.h>
    218 #include <sys/cpu.h>
    219 #include <sys/pool.h>
    220 #include <sys/proc.h>
    221 #include <sys/syscallargs.h>
    222 #include <sys/syscall_stats.h>
    223 #include <sys/kauth.h>
    224 #include <sys/sleepq.h>
    225 #include <sys/user.h>
    226 #include <sys/lockdebug.h>
    227 #include <sys/kmem.h>
    228 #include <sys/pset.h>
    229 #include <sys/intr.h>
    230 #include <sys/lwpctl.h>
    231 #include <sys/atomic.h>
    232 
    233 #include <uvm/uvm_extern.h>
    234 #include <uvm/uvm_object.h>
    235 
    236 struct lwplist	alllwp = LIST_HEAD_INITIALIZER(alllwp);
    237 
    238 POOL_INIT(lwp_uc_pool, sizeof(ucontext_t), 0, 0, 0, "lwpucpl",
    239     &pool_allocator_nointr, IPL_NONE);
    240 
    241 static pool_cache_t lwp_cache;
    242 static specificdata_domain_t lwp_specificdata_domain;
    243 
    244 void
    245 lwpinit(void)
    246 {
    247 
    248 	lwp_specificdata_domain = specificdata_domain_create();
    249 	KASSERT(lwp_specificdata_domain != NULL);
    250 	lwp_sys_init();
    251 	lwp_cache = pool_cache_init(sizeof(lwp_t), MIN_LWP_ALIGNMENT, 0, 0,
    252 	    "lwppl", NULL, IPL_NONE, NULL, NULL, NULL);
    253 }
    254 
    255 /*
    256  * Set an suspended.
    257  *
    258  * Must be called with p_lock held, and the LWP locked.  Will unlock the
    259  * LWP before return.
    260  */
    261 int
    262 lwp_suspend(struct lwp *curl, struct lwp *t)
    263 {
    264 	int error;
    265 
    266 	KASSERT(mutex_owned(t->l_proc->p_lock));
    267 	KASSERT(lwp_locked(t, NULL));
    268 
    269 	KASSERT(curl != t || curl->l_stat == LSONPROC);
    270 
    271 	/*
    272 	 * If the current LWP has been told to exit, we must not suspend anyone
    273 	 * else or deadlock could occur.  We won't return to userspace.
    274 	 */
    275 	if ((curl->l_stat & (LW_WEXIT | LW_WCORE)) != 0) {
    276 		lwp_unlock(t);
    277 		return (EDEADLK);
    278 	}
    279 
    280 	error = 0;
    281 
    282 	switch (t->l_stat) {
    283 	case LSRUN:
    284 	case LSONPROC:
    285 		t->l_flag |= LW_WSUSPEND;
    286 		lwp_need_userret(t);
    287 		lwp_unlock(t);
    288 		break;
    289 
    290 	case LSSLEEP:
    291 		t->l_flag |= LW_WSUSPEND;
    292 
    293 		/*
    294 		 * Kick the LWP and try to get it to the kernel boundary
    295 		 * so that it will release any locks that it holds.
    296 		 * setrunnable() will release the lock.
    297 		 */
    298 		if ((t->l_flag & LW_SINTR) != 0)
    299 			setrunnable(t);
    300 		else
    301 			lwp_unlock(t);
    302 		break;
    303 
    304 	case LSSUSPENDED:
    305 		lwp_unlock(t);
    306 		break;
    307 
    308 	case LSSTOP:
    309 		t->l_flag |= LW_WSUSPEND;
    310 		setrunnable(t);
    311 		break;
    312 
    313 	case LSIDL:
    314 	case LSZOMB:
    315 		error = EINTR; /* It's what Solaris does..... */
    316 		lwp_unlock(t);
    317 		break;
    318 	}
    319 
    320 	return (error);
    321 }
    322 
    323 /*
    324  * Restart a suspended LWP.
    325  *
    326  * Must be called with p_lock held, and the LWP locked.  Will unlock the
    327  * LWP before return.
    328  */
    329 void
    330 lwp_continue(struct lwp *l)
    331 {
    332 
    333 	KASSERT(mutex_owned(l->l_proc->p_lock));
    334 	KASSERT(lwp_locked(l, NULL));
    335 
    336 	/* If rebooting or not suspended, then just bail out. */
    337 	if ((l->l_flag & LW_WREBOOT) != 0) {
    338 		lwp_unlock(l);
    339 		return;
    340 	}
    341 
    342 	l->l_flag &= ~LW_WSUSPEND;
    343 
    344 	if (l->l_stat != LSSUSPENDED) {
    345 		lwp_unlock(l);
    346 		return;
    347 	}
    348 
    349 	/* setrunnable() will release the lock. */
    350 	setrunnable(l);
    351 }
    352 
    353 /*
    354  * Wait for an LWP within the current process to exit.  If 'lid' is
    355  * non-zero, we are waiting for a specific LWP.
    356  *
    357  * Must be called with p->p_lock held.
    358  */
    359 int
    360 lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
    361 {
    362 	struct proc *p = l->l_proc;
    363 	struct lwp *l2;
    364 	int nfound, error;
    365 	lwpid_t curlid;
    366 	bool exiting;
    367 
    368 	KASSERT(mutex_owned(p->p_lock));
    369 
    370 	p->p_nlwpwait++;
    371 	l->l_waitingfor = lid;
    372 	curlid = l->l_lid;
    373 	exiting = ((flags & LWPWAIT_EXITCONTROL) != 0);
    374 
    375 	for (;;) {
    376 		/*
    377 		 * Avoid a race between exit1() and sigexit(): if the
    378 		 * process is dumping core, then we need to bail out: call
    379 		 * into lwp_userret() where we will be suspended until the
    380 		 * deed is done.
    381 		 */
    382 		if ((p->p_sflag & PS_WCORE) != 0) {
    383 			mutex_exit(p->p_lock);
    384 			lwp_userret(l);
    385 #ifdef DIAGNOSTIC
    386 			panic("lwp_wait1");
    387 #endif
    388 			/* NOTREACHED */
    389 		}
    390 
    391 		/*
    392 		 * First off, drain any detached LWP that is waiting to be
    393 		 * reaped.
    394 		 */
    395 		while ((l2 = p->p_zomblwp) != NULL) {
    396 			p->p_zomblwp = NULL;
    397 			lwp_free(l2, false, false);/* releases proc mutex */
    398 			mutex_enter(p->p_lock);
    399 		}
    400 
    401 		/*
    402 		 * Now look for an LWP to collect.  If the whole process is
    403 		 * exiting, count detached LWPs as eligible to be collected,
    404 		 * but don't drain them here.
    405 		 */
    406 		nfound = 0;
    407 		error = 0;
    408 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
    409 			/*
    410 			 * If a specific wait and the target is waiting on
    411 			 * us, then avoid deadlock.  This also traps LWPs
    412 			 * that try to wait on themselves.
    413 			 *
    414 			 * Note that this does not handle more complicated
    415 			 * cycles, like: t1 -> t2 -> t3 -> t1.  The process
    416 			 * can still be killed so it is not a major problem.
    417 			 */
    418 			if (l2->l_lid == lid && l2->l_waitingfor == curlid) {
    419 				error = EDEADLK;
    420 				break;
    421 			}
    422 			if (l2 == l)
    423 				continue;
    424 			if ((l2->l_prflag & LPR_DETACHED) != 0) {
    425 				nfound += exiting;
    426 				continue;
    427 			}
    428 			if (lid != 0) {
    429 				if (l2->l_lid != lid)
    430 					continue;
    431 				/*
    432 				 * Mark this LWP as the first waiter, if there
    433 				 * is no other.
    434 				 */
    435 				if (l2->l_waiter == 0)
    436 					l2->l_waiter = curlid;
    437 			} else if (l2->l_waiter != 0) {
    438 				/*
    439 				 * It already has a waiter - so don't
    440 				 * collect it.  If the waiter doesn't
    441 				 * grab it we'll get another chance
    442 				 * later.
    443 				 */
    444 				nfound++;
    445 				continue;
    446 			}
    447 			nfound++;
    448 
    449 			/* No need to lock the LWP in order to see LSZOMB. */
    450 			if (l2->l_stat != LSZOMB)
    451 				continue;
    452 
    453 			/*
    454 			 * We're no longer waiting.  Reset the "first waiter"
    455 			 * pointer on the target, in case it was us.
    456 			 */
    457 			l->l_waitingfor = 0;
    458 			l2->l_waiter = 0;
    459 			p->p_nlwpwait--;
    460 			if (departed)
    461 				*departed = l2->l_lid;
    462 			sched_lwp_collect(l2);
    463 
    464 			/* lwp_free() releases the proc lock. */
    465 			lwp_free(l2, false, false);
    466 			mutex_enter(p->p_lock);
    467 			return 0;
    468 		}
    469 
    470 		if (error != 0)
    471 			break;
    472 		if (nfound == 0) {
    473 			error = ESRCH;
    474 			break;
    475 		}
    476 
    477 		/*
    478 		 * The kernel is careful to ensure that it can not deadlock
    479 		 * when exiting - just keep waiting.
    480 		 */
    481 		if (exiting) {
    482 			KASSERT(p->p_nlwps > 1);
    483 			cv_wait(&p->p_lwpcv, p->p_lock);
    484 			continue;
    485 		}
    486 
    487 		/*
    488 		 * If all other LWPs are waiting for exits or suspends
    489 		 * and the supply of zombies and potential zombies is
    490 		 * exhausted, then we are about to deadlock.
    491 		 *
    492 		 * If the process is exiting (and this LWP is not the one
    493 		 * that is coordinating the exit) then bail out now.
    494 		 */
    495 		if ((p->p_sflag & PS_WEXIT) != 0 ||
    496 		    p->p_nrlwps + p->p_nzlwps - p->p_ndlwps <= p->p_nlwpwait) {
    497 			error = EDEADLK;
    498 			break;
    499 		}
    500 
    501 		/*
    502 		 * Sit around and wait for something to happen.  We'll be
    503 		 * awoken if any of the conditions examined change: if an
    504 		 * LWP exits, is collected, or is detached.
    505 		 */
    506 		if ((error = cv_wait_sig(&p->p_lwpcv, p->p_lock)) != 0)
    507 			break;
    508 	}
    509 
    510 	/*
    511 	 * We didn't find any LWPs to collect, we may have received a
    512 	 * signal, or some other condition has caused us to bail out.
    513 	 *
    514 	 * If waiting on a specific LWP, clear the waiters marker: some
    515 	 * other LWP may want it.  Then, kick all the remaining waiters
    516 	 * so that they can re-check for zombies and for deadlock.
    517 	 */
    518 	if (lid != 0) {
    519 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
    520 			if (l2->l_lid == lid) {
    521 				if (l2->l_waiter == curlid)
    522 					l2->l_waiter = 0;
    523 				break;
    524 			}
    525 		}
    526 	}
    527 	p->p_nlwpwait--;
    528 	l->l_waitingfor = 0;
    529 	cv_broadcast(&p->p_lwpcv);
    530 
    531 	return error;
    532 }
    533 
    534 /*
    535  * Create a new LWP within process 'p2', using LWP 'l1' as a template.
    536  * The new LWP is created in state LSIDL and must be set running,
    537  * suspended, or stopped by the caller.
    538  */
    539 int
    540 lwp_create(lwp_t *l1, proc_t *p2, vaddr_t uaddr, bool inmem, int flags,
    541 	   void *stack, size_t stacksize, void (*func)(void *), void *arg,
    542 	   lwp_t **rnewlwpp, int sclass)
    543 {
    544 	struct lwp *l2, *isfree;
    545 	turnstile_t *ts;
    546 
    547 	/*
    548 	 * First off, reap any detached LWP waiting to be collected.
    549 	 * We can re-use its LWP structure and turnstile.
    550 	 */
    551 	isfree = NULL;
    552 	if (p2->p_zomblwp != NULL) {
    553 		mutex_enter(p2->p_lock);
    554 		if ((isfree = p2->p_zomblwp) != NULL) {
    555 			p2->p_zomblwp = NULL;
    556 			lwp_free(isfree, true, false);/* releases proc mutex */
    557 		} else
    558 			mutex_exit(p2->p_lock);
    559 	}
    560 	if (isfree == NULL) {
    561 		l2 = pool_cache_get(lwp_cache, PR_WAITOK);
    562 		memset(l2, 0, sizeof(*l2));
    563 		l2->l_ts = pool_cache_get(turnstile_cache, PR_WAITOK);
    564 		SLIST_INIT(&l2->l_pi_lenders);
    565 	} else {
    566 		l2 = isfree;
    567 		ts = l2->l_ts;
    568 		KASSERT(l2->l_inheritedprio == -1);
    569 		KASSERT(SLIST_EMPTY(&l2->l_pi_lenders));
    570 		memset(l2, 0, sizeof(*l2));
    571 		l2->l_ts = ts;
    572 	}
    573 
    574 	l2->l_stat = LSIDL;
    575 	l2->l_proc = p2;
    576 	l2->l_refcnt = 1;
    577 	l2->l_class = sclass;
    578 	l2->l_kpriority = l1->l_kpriority;
    579 	l2->l_kpribase = PRI_KERNEL;
    580 	l2->l_priority = l1->l_priority;
    581 	l2->l_inheritedprio = -1;
    582 	l2->l_mutex = l1->l_cpu->ci_schedstate.spc_mutex;
    583 	l2->l_cpu = l1->l_cpu;
    584 	l2->l_flag = inmem ? LW_INMEM : 0;
    585 	l2->l_pflag = LP_MPSAFE;
    586 	l2->l_fd = p2->p_fd;
    587 
    588 	if (p2->p_flag & PK_SYSTEM) {
    589 		/* Mark it as a system LWP and not a candidate for swapping */
    590 		l2->l_flag |= LW_SYSTEM;
    591 	}
    592 
    593 	lwp_initspecific(l2);
    594 	sched_lwp_fork(l1, l2);
    595 	lwp_update_creds(l2);
    596 	callout_init(&l2->l_timeout_ch, CALLOUT_MPSAFE);
    597 	callout_setfunc(&l2->l_timeout_ch, sleepq_timeout, l2);
    598 	mutex_init(&l2->l_swaplock, MUTEX_DEFAULT, IPL_NONE);
    599 	cv_init(&l2->l_sigcv, "sigwait");
    600 	l2->l_syncobj = &sched_syncobj;
    601 
    602 	if (rnewlwpp != NULL)
    603 		*rnewlwpp = l2;
    604 
    605 	l2->l_addr = UAREA_TO_USER(uaddr);
    606 	uvm_lwp_fork(l1, l2, stack, stacksize, func,
    607 	    (arg != NULL) ? arg : l2);
    608 
    609 	mutex_enter(p2->p_lock);
    610 
    611 	if ((flags & LWP_DETACHED) != 0) {
    612 		l2->l_prflag = LPR_DETACHED;
    613 		p2->p_ndlwps++;
    614 	} else
    615 		l2->l_prflag = 0;
    616 
    617 	l2->l_sigmask = l1->l_sigmask;
    618 	CIRCLEQ_INIT(&l2->l_sigpend.sp_info);
    619 	sigemptyset(&l2->l_sigpend.sp_set);
    620 
    621 	p2->p_nlwpid++;
    622 	if (p2->p_nlwpid == 0)
    623 		p2->p_nlwpid++;
    624 	l2->l_lid = p2->p_nlwpid;
    625 	LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
    626 	p2->p_nlwps++;
    627 
    628 	mutex_exit(p2->p_lock);
    629 
    630 	mutex_enter(proc_lock);
    631 	LIST_INSERT_HEAD(&alllwp, l2, l_list);
    632 	mutex_exit(proc_lock);
    633 
    634 	if ((p2->p_flag & PK_SYSTEM) == 0) {
    635 		/* Locking is needed, since LWP is in the list of all LWPs */
    636 		lwp_lock(l2);
    637 		/* Inherit a processor-set */
    638 		l2->l_psid = l1->l_psid;
    639 		/* Inherit an affinity */
    640 		memcpy(&l2->l_affinity, &l1->l_affinity, sizeof(cpuset_t));
    641 		/* Look for a CPU to start */
    642 		l2->l_cpu = sched_takecpu(l2);
    643 		lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_mutex);
    644 	}
    645 
    646 	SYSCALL_TIME_LWP_INIT(l2);
    647 
    648 	if (p2->p_emul->e_lwp_fork)
    649 		(*p2->p_emul->e_lwp_fork)(l1, l2);
    650 
    651 	return (0);
    652 }
    653 
    654 /*
    655  * Called by MD code when a new LWP begins execution.  Must be called
    656  * with the previous LWP locked (so at splsched), or if there is no
    657  * previous LWP, at splsched.
    658  */
    659 void
    660 lwp_startup(struct lwp *prev, struct lwp *new)
    661 {
    662 
    663 	if (prev != NULL) {
    664 		/*
    665 		 * Normalize the count of the spin-mutexes, it was
    666 		 * increased in mi_switch().  Unmark the state of
    667 		 * context switch - it is finished for previous LWP.
    668 		 */
    669 		curcpu()->ci_mtx_count++;
    670 		membar_exit();
    671 		prev->l_ctxswtch = 0;
    672 	}
    673 	spl0();
    674 	pmap_activate(new);
    675 	LOCKDEBUG_BARRIER(NULL, 0);
    676 	if ((new->l_pflag & LP_MPSAFE) == 0) {
    677 		KERNEL_LOCK(1, new);
    678 	}
    679 }
    680 
    681 /*
    682  * Exit an LWP.
    683  */
    684 void
    685 lwp_exit(struct lwp *l)
    686 {
    687 	struct proc *p = l->l_proc;
    688 	struct lwp *l2;
    689 	bool current;
    690 
    691 	current = (l == curlwp);
    692 
    693 	KASSERT(current || l->l_stat == LSIDL);
    694 
    695 	/*
    696 	 * Verify that we hold no locks other than the kernel lock.
    697 	 */
    698 #ifdef MULTIPROCESSOR
    699 	LOCKDEBUG_BARRIER(&kernel_lock, 0);
    700 #else
    701 	LOCKDEBUG_BARRIER(NULL, 0);
    702 #endif
    703 
    704 	/*
    705 	 * If we are the last live LWP in a process, we need to exit the
    706 	 * entire process.  We do so with an exit status of zero, because
    707 	 * it's a "controlled" exit, and because that's what Solaris does.
    708 	 *
    709 	 * We are not quite a zombie yet, but for accounting purposes we
    710 	 * must increment the count of zombies here.
    711 	 *
    712 	 * Note: the last LWP's specificdata will be deleted here.
    713 	 */
    714 	mutex_enter(p->p_lock);
    715 	if (p->p_nlwps - p->p_nzlwps == 1) {
    716 		KASSERT(current == true);
    717 		/* XXXSMP kernel_lock not held */
    718 		exit1(l, 0);
    719 		/* NOTREACHED */
    720 	}
    721 	p->p_nzlwps++;
    722 	mutex_exit(p->p_lock);
    723 
    724 	if (p->p_emul->e_lwp_exit)
    725 		(*p->p_emul->e_lwp_exit)(l);
    726 
    727 	/* Delete the specificdata while it's still safe to sleep. */
    728 	specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
    729 
    730 	/*
    731 	 * Release our cached credentials.
    732 	 */
    733 	kauth_cred_free(l->l_cred);
    734 	callout_destroy(&l->l_timeout_ch);
    735 
    736 	/*
    737 	 * While we can still block, mark the LWP as unswappable to
    738 	 * prevent conflicts with the with the swapper.
    739 	 */
    740 	if (current)
    741 		uvm_lwp_hold(l);
    742 
    743 	/*
    744 	 * Remove the LWP from the global list.
    745 	 */
    746 	mutex_enter(proc_lock);
    747 	LIST_REMOVE(l, l_list);
    748 	mutex_exit(proc_lock);
    749 
    750 	/*
    751 	 * Get rid of all references to the LWP that others (e.g. procfs)
    752 	 * may have, and mark the LWP as a zombie.  If the LWP is detached,
    753 	 * mark it waiting for collection in the proc structure.  Note that
    754 	 * before we can do that, we need to free any other dead, deatched
    755 	 * LWP waiting to meet its maker.
    756 	 *
    757 	 * XXXSMP disable preemption.
    758 	 */
    759 	mutex_enter(p->p_lock);
    760 	lwp_drainrefs(l);
    761 
    762 	if ((l->l_prflag & LPR_DETACHED) != 0) {
    763 		while ((l2 = p->p_zomblwp) != NULL) {
    764 			p->p_zomblwp = NULL;
    765 			lwp_free(l2, false, false);/* releases proc mutex */
    766 			mutex_enter(p->p_lock);
    767 			l->l_refcnt++;
    768 			lwp_drainrefs(l);
    769 		}
    770 		p->p_zomblwp = l;
    771 	}
    772 
    773 	/*
    774 	 * If we find a pending signal for the process and we have been
    775 	 * asked to check for signals, then we loose: arrange to have
    776 	 * all other LWPs in the process check for signals.
    777 	 */
    778 	if ((l->l_flag & LW_PENDSIG) != 0 &&
    779 	    firstsig(&p->p_sigpend.sp_set) != 0) {
    780 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
    781 			lwp_lock(l2);
    782 			l2->l_flag |= LW_PENDSIG;
    783 			lwp_unlock(l2);
    784 		}
    785 	}
    786 
    787 	lwp_lock(l);
    788 	l->l_stat = LSZOMB;
    789 	if (l->l_name != NULL)
    790 		strcpy(l->l_name, "(zombie)");
    791 	lwp_unlock(l);
    792 	p->p_nrlwps--;
    793 	cv_broadcast(&p->p_lwpcv);
    794 	if (l->l_lwpctl != NULL)
    795 		l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
    796 	mutex_exit(p->p_lock);
    797 
    798 	/*
    799 	 * We can no longer block.  At this point, lwp_free() may already
    800 	 * be gunning for us.  On a multi-CPU system, we may be off p_lwps.
    801 	 *
    802 	 * Free MD LWP resources.
    803 	 */
    804 #ifndef __NO_CPU_LWP_FREE
    805 	cpu_lwp_free(l, 0);
    806 #endif
    807 
    808 	if (current) {
    809 		pmap_deactivate(l);
    810 
    811 		/*
    812 		 * Release the kernel lock, and switch away into
    813 		 * oblivion.
    814 		 */
    815 #ifdef notyet
    816 		/* XXXSMP hold in lwp_userret() */
    817 		KERNEL_UNLOCK_LAST(l);
    818 #else
    819 		KERNEL_UNLOCK_ALL(l, NULL);
    820 #endif
    821 		lwp_exit_switchaway(l);
    822 	}
    823 }
    824 
    825 void
    826 lwp_exit_switchaway(struct lwp *l)
    827 {
    828 	struct cpu_info *ci;
    829 	struct lwp *idlelwp;
    830 
    831 	(void)splsched();
    832 	l->l_flag &= ~LW_RUNNING;
    833 	ci = curcpu();
    834 	ci->ci_data.cpu_nswtch++;
    835 	idlelwp = ci->ci_data.cpu_idlelwp;
    836 	idlelwp->l_stat = LSONPROC;
    837 
    838 	/*
    839 	 * cpu_onproc must be updated with the CPU locked, as
    840 	 * aston() may try to set a AST pending on the LWP (and
    841 	 * it does so with the CPU locked).  Otherwise, the LWP
    842 	 * may be destroyed before the AST can be set, leading
    843 	 * to a user-after-free.
    844 	 */
    845 	spc_lock(ci);
    846 	ci->ci_data.cpu_onproc = idlelwp;
    847 	spc_unlock(ci);
    848 	cpu_switchto(NULL, idlelwp, false);
    849 }
    850 
    851 /*
    852  * Free a dead LWP's remaining resources.
    853  *
    854  * XXXLWP limits.
    855  */
    856 void
    857 lwp_free(struct lwp *l, bool recycle, bool last)
    858 {
    859 	struct proc *p = l->l_proc;
    860 	struct rusage *ru;
    861 	ksiginfoq_t kq;
    862 
    863 	KASSERT(l != curlwp);
    864 
    865 	/*
    866 	 * If this was not the last LWP in the process, then adjust
    867 	 * counters and unlock.
    868 	 */
    869 	if (!last) {
    870 		/*
    871 		 * Add the LWP's run time to the process' base value.
    872 		 * This needs to co-incide with coming off p_lwps.
    873 		 */
    874 		bintime_add(&p->p_rtime, &l->l_rtime);
    875 		p->p_pctcpu += l->l_pctcpu;
    876 		ru = &p->p_stats->p_ru;
    877 		ruadd(ru, &l->l_ru);
    878 		ru->ru_nvcsw += (l->l_ncsw - l->l_nivcsw);
    879 		ru->ru_nivcsw += l->l_nivcsw;
    880 		LIST_REMOVE(l, l_sibling);
    881 		p->p_nlwps--;
    882 		p->p_nzlwps--;
    883 		if ((l->l_prflag & LPR_DETACHED) != 0)
    884 			p->p_ndlwps--;
    885 
    886 		/*
    887 		 * Have any LWPs sleeping in lwp_wait() recheck for
    888 		 * deadlock.
    889 		 */
    890 		cv_broadcast(&p->p_lwpcv);
    891 		mutex_exit(p->p_lock);
    892 	}
    893 
    894 #ifdef MULTIPROCESSOR
    895 	/*
    896 	 * In the unlikely event that the LWP is still on the CPU,
    897 	 * then spin until it has switched away.  We need to release
    898 	 * all locks to avoid deadlock against interrupt handlers on
    899 	 * the target CPU.
    900 	 */
    901 	if ((l->l_flag & LW_RUNNING) != 0 || l->l_cpu->ci_curlwp == l) {
    902 		int count;
    903 		(void)count; /* XXXgcc */
    904 		KERNEL_UNLOCK_ALL(curlwp, &count);
    905 		while ((l->l_flag & LW_RUNNING) != 0 ||
    906 		    l->l_cpu->ci_curlwp == l)
    907 			SPINLOCK_BACKOFF_HOOK;
    908 		KERNEL_LOCK(count, curlwp);
    909 	}
    910 #endif
    911 
    912 	/*
    913 	 * Destroy the LWP's remaining signal information.
    914 	 */
    915 	ksiginfo_queue_init(&kq);
    916 	sigclear(&l->l_sigpend, NULL, &kq);
    917 	ksiginfo_queue_drain(&kq);
    918 	cv_destroy(&l->l_sigcv);
    919 	mutex_destroy(&l->l_swaplock);
    920 
    921 	/*
    922 	 * Free the LWP's turnstile and the LWP structure itself unless the
    923 	 * caller wants to recycle them.  Also, free the scheduler specific
    924 	 * data.
    925 	 *
    926 	 * We can't return turnstile0 to the pool (it didn't come from it),
    927 	 * so if it comes up just drop it quietly and move on.
    928 	 *
    929 	 * We don't recycle the VM resources at this time.
    930 	 */
    931 	if (l->l_lwpctl != NULL)
    932 		lwp_ctl_free(l);
    933 	sched_lwp_exit(l);
    934 
    935 	if (!recycle && l->l_ts != &turnstile0)
    936 		pool_cache_put(turnstile_cache, l->l_ts);
    937 	if (l->l_name != NULL)
    938 		kmem_free(l->l_name, MAXCOMLEN);
    939 #ifndef __NO_CPU_LWP_FREE
    940 	cpu_lwp_free2(l);
    941 #endif
    942 	KASSERT((l->l_flag & LW_INMEM) != 0);
    943 	uvm_lwp_exit(l);
    944 	KASSERT(SLIST_EMPTY(&l->l_pi_lenders));
    945 	KASSERT(l->l_inheritedprio == -1);
    946 	if (!recycle)
    947 		pool_cache_put(lwp_cache, l);
    948 }
    949 
    950 /*
    951  * Pick a LWP to represent the process for those operations which
    952  * want information about a "process" that is actually associated
    953  * with a LWP.
    954  *
    955  * If 'locking' is false, no locking or lock checks are performed.
    956  * This is intended for use by DDB.
    957  *
    958  * We don't bother locking the LWP here, since code that uses this
    959  * interface is broken by design and an exact match is not required.
    960  */
    961 struct lwp *
    962 proc_representative_lwp(struct proc *p, int *nrlwps, int locking)
    963 {
    964 	struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
    965 	struct lwp *signalled;
    966 	int cnt;
    967 
    968 	if (locking) {
    969 		KASSERT(mutex_owned(p->p_lock));
    970 	}
    971 
    972 	/* Trivial case: only one LWP */
    973 	if (p->p_nlwps == 1) {
    974 		l = LIST_FIRST(&p->p_lwps);
    975 		if (nrlwps)
    976 			*nrlwps = (l->l_stat == LSONPROC || l->l_stat == LSRUN);
    977 		return l;
    978 	}
    979 
    980 	cnt = 0;
    981 	switch (p->p_stat) {
    982 	case SSTOP:
    983 	case SACTIVE:
    984 		/* Pick the most live LWP */
    985 		onproc = running = sleeping = stopped = suspended = NULL;
    986 		signalled = NULL;
    987 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    988 			if ((l->l_flag & LW_IDLE) != 0) {
    989 				continue;
    990 			}
    991 			if (l->l_lid == p->p_sigctx.ps_lwp)
    992 				signalled = l;
    993 			switch (l->l_stat) {
    994 			case LSONPROC:
    995 				onproc = l;
    996 				cnt++;
    997 				break;
    998 			case LSRUN:
    999 				running = l;
   1000 				cnt++;
   1001 				break;
   1002 			case LSSLEEP:
   1003 				sleeping = l;
   1004 				break;
   1005 			case LSSTOP:
   1006 				stopped = l;
   1007 				break;
   1008 			case LSSUSPENDED:
   1009 				suspended = l;
   1010 				break;
   1011 			}
   1012 		}
   1013 		if (nrlwps)
   1014 			*nrlwps = cnt;
   1015 		if (signalled)
   1016 			l = signalled;
   1017 		else if (onproc)
   1018 			l = onproc;
   1019 		else if (running)
   1020 			l = running;
   1021 		else if (sleeping)
   1022 			l = sleeping;
   1023 		else if (stopped)
   1024 			l = stopped;
   1025 		else if (suspended)
   1026 			l = suspended;
   1027 		else
   1028 			break;
   1029 		return l;
   1030 #ifdef DIAGNOSTIC
   1031 	case SIDL:
   1032 	case SZOMB:
   1033 	case SDYING:
   1034 	case SDEAD:
   1035 		if (locking)
   1036 			mutex_exit(p->p_lock);
   1037 		/* We have more than one LWP and we're in SIDL?
   1038 		 * How'd that happen?
   1039 		 */
   1040 		panic("Too many LWPs in idle/dying process %d (%s) stat = %d",
   1041 		    p->p_pid, p->p_comm, p->p_stat);
   1042 		break;
   1043 	default:
   1044 		if (locking)
   1045 			mutex_exit(p->p_lock);
   1046 		panic("Process %d (%s) in unknown state %d",
   1047 		    p->p_pid, p->p_comm, p->p_stat);
   1048 #endif
   1049 	}
   1050 
   1051 	if (locking)
   1052 		mutex_exit(p->p_lock);
   1053 	panic("proc_representative_lwp: couldn't find a lwp for process"
   1054 		" %d (%s)", p->p_pid, p->p_comm);
   1055 	/* NOTREACHED */
   1056 	return NULL;
   1057 }
   1058 
   1059 /*
   1060  * Migrate the LWP to the another CPU.  Unlocks the LWP.
   1061  */
   1062 void
   1063 lwp_migrate(lwp_t *l, struct cpu_info *ci)
   1064 {
   1065 	struct schedstate_percpu *spc;
   1066 	KASSERT(lwp_locked(l, NULL));
   1067 
   1068 	if (l->l_cpu == ci) {
   1069 		lwp_unlock(l);
   1070 		return;
   1071 	}
   1072 
   1073 	spc = &ci->ci_schedstate;
   1074 	switch (l->l_stat) {
   1075 	case LSRUN:
   1076 		if (l->l_flag & LW_INMEM) {
   1077 			l->l_target_cpu = ci;
   1078 			break;
   1079 		}
   1080 	case LSIDL:
   1081 		l->l_cpu = ci;
   1082 		lwp_unlock_to(l, spc->spc_mutex);
   1083 		KASSERT(!mutex_owned(spc->spc_mutex));
   1084 		return;
   1085 	case LSSLEEP:
   1086 		l->l_cpu = ci;
   1087 		break;
   1088 	case LSSTOP:
   1089 	case LSSUSPENDED:
   1090 		if (l->l_wchan != NULL) {
   1091 			l->l_cpu = ci;
   1092 			break;
   1093 		}
   1094 	case LSONPROC:
   1095 		l->l_target_cpu = ci;
   1096 		break;
   1097 	}
   1098 	lwp_unlock(l);
   1099 }
   1100 
   1101 /*
   1102  * Find the LWP in the process.  Arguments may be zero, in such case,
   1103  * the calling process and first LWP in the list will be used.
   1104  * On success - returns proc locked.
   1105  */
   1106 struct lwp *
   1107 lwp_find2(pid_t pid, lwpid_t lid)
   1108 {
   1109 	proc_t *p;
   1110 	lwp_t *l;
   1111 
   1112 	/* Find the process */
   1113 	p = (pid == 0) ? curlwp->l_proc : p_find(pid, PFIND_UNLOCK_FAIL);
   1114 	if (p == NULL)
   1115 		return NULL;
   1116 	mutex_enter(p->p_lock);
   1117 	if (pid != 0) {
   1118 		/* Case of p_find */
   1119 		mutex_exit(proc_lock);
   1120 	}
   1121 
   1122 	/* Find the thread */
   1123 	l = (lid == 0) ? LIST_FIRST(&p->p_lwps) : lwp_find(p, lid);
   1124 	if (l == NULL) {
   1125 		mutex_exit(p->p_lock);
   1126 	}
   1127 
   1128 	return l;
   1129 }
   1130 
   1131 /*
   1132  * Look up a live LWP within the speicifed process, and return it locked.
   1133  *
   1134  * Must be called with p->p_lock held.
   1135  */
   1136 struct lwp *
   1137 lwp_find(struct proc *p, int id)
   1138 {
   1139 	struct lwp *l;
   1140 
   1141 	KASSERT(mutex_owned(p->p_lock));
   1142 
   1143 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1144 		if (l->l_lid == id)
   1145 			break;
   1146 	}
   1147 
   1148 	/*
   1149 	 * No need to lock - all of these conditions will
   1150 	 * be visible with the process level mutex held.
   1151 	 */
   1152 	if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB))
   1153 		l = NULL;
   1154 
   1155 	return l;
   1156 }
   1157 
   1158 /*
   1159  * Update an LWP's cached credentials to mirror the process' master copy.
   1160  *
   1161  * This happens early in the syscall path, on user trap, and on LWP
   1162  * creation.  A long-running LWP can also voluntarily choose to update
   1163  * it's credentials by calling this routine.  This may be called from
   1164  * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
   1165  */
   1166 void
   1167 lwp_update_creds(struct lwp *l)
   1168 {
   1169 	kauth_cred_t oc;
   1170 	struct proc *p;
   1171 
   1172 	p = l->l_proc;
   1173 	oc = l->l_cred;
   1174 
   1175 	mutex_enter(p->p_lock);
   1176 	kauth_cred_hold(p->p_cred);
   1177 	l->l_cred = p->p_cred;
   1178 	l->l_prflag &= ~LPR_CRMOD;
   1179 	mutex_exit(p->p_lock);
   1180 	if (oc != NULL)
   1181 		kauth_cred_free(oc);
   1182 }
   1183 
   1184 /*
   1185  * Verify that an LWP is locked, and optionally verify that the lock matches
   1186  * one we specify.
   1187  */
   1188 int
   1189 lwp_locked(struct lwp *l, kmutex_t *mtx)
   1190 {
   1191 	kmutex_t *cur = l->l_mutex;
   1192 
   1193 	return mutex_owned(cur) && (mtx == cur || mtx == NULL);
   1194 }
   1195 
   1196 /*
   1197  * Lock an LWP.
   1198  */
   1199 void
   1200 lwp_lock_retry(struct lwp *l, kmutex_t *old)
   1201 {
   1202 
   1203 	/*
   1204 	 * XXXgcc ignoring kmutex_t * volatile on i386
   1205 	 *
   1206 	 * gcc version 4.1.2 20061021 prerelease (NetBSD nb1 20061021)
   1207 	 */
   1208 #if 1
   1209 	while (l->l_mutex != old) {
   1210 #else
   1211 	for (;;) {
   1212 #endif
   1213 		mutex_spin_exit(old);
   1214 		old = l->l_mutex;
   1215 		mutex_spin_enter(old);
   1216 
   1217 		/*
   1218 		 * mutex_enter() will have posted a read barrier.  Re-test
   1219 		 * l->l_mutex.  If it has changed, we need to try again.
   1220 		 */
   1221 #if 1
   1222 	}
   1223 #else
   1224 	} while (__predict_false(l->l_mutex != old));
   1225 #endif
   1226 }
   1227 
   1228 /*
   1229  * Lend a new mutex to an LWP.  The old mutex must be held.
   1230  */
   1231 void
   1232 lwp_setlock(struct lwp *l, kmutex_t *new)
   1233 {
   1234 
   1235 	KASSERT(mutex_owned(l->l_mutex));
   1236 
   1237 	membar_producer();
   1238 	l->l_mutex = new;
   1239 }
   1240 
   1241 /*
   1242  * Lend a new mutex to an LWP, and release the old mutex.  The old mutex
   1243  * must be held.
   1244  */
   1245 void
   1246 lwp_unlock_to(struct lwp *l, kmutex_t *new)
   1247 {
   1248 	kmutex_t *old;
   1249 
   1250 	KASSERT(mutex_owned(l->l_mutex));
   1251 
   1252 	old = l->l_mutex;
   1253 	membar_producer();
   1254 	l->l_mutex = new;
   1255 	mutex_spin_exit(old);
   1256 }
   1257 
   1258 /*
   1259  * Acquire a new mutex, and donate it to an LWP.  The LWP must already be
   1260  * locked.
   1261  */
   1262 void
   1263 lwp_relock(struct lwp *l, kmutex_t *new)
   1264 {
   1265 	kmutex_t *old;
   1266 
   1267 	KASSERT(mutex_owned(l->l_mutex));
   1268 
   1269 	old = l->l_mutex;
   1270 	if (old != new) {
   1271 		mutex_spin_enter(new);
   1272 		l->l_mutex = new;
   1273 		mutex_spin_exit(old);
   1274 	}
   1275 }
   1276 
   1277 int
   1278 lwp_trylock(struct lwp *l)
   1279 {
   1280 	kmutex_t *old;
   1281 
   1282 	for (;;) {
   1283 		if (!mutex_tryenter(old = l->l_mutex))
   1284 			return 0;
   1285 		if (__predict_true(l->l_mutex == old))
   1286 			return 1;
   1287 		mutex_spin_exit(old);
   1288 	}
   1289 }
   1290 
   1291 u_int
   1292 lwp_unsleep(lwp_t *l, bool cleanup)
   1293 {
   1294 
   1295 	KASSERT(mutex_owned(l->l_mutex));
   1296 
   1297 	return (*l->l_syncobj->sobj_unsleep)(l, cleanup);
   1298 }
   1299 
   1300 
   1301 /*
   1302  * Handle exceptions for mi_userret().  Called if a member of LW_USERRET is
   1303  * set.
   1304  */
   1305 void
   1306 lwp_userret(struct lwp *l)
   1307 {
   1308 	struct proc *p;
   1309 	void (*hook)(void);
   1310 	int sig;
   1311 
   1312 	p = l->l_proc;
   1313 
   1314 #ifndef __HAVE_FAST_SOFTINTS
   1315 	/* Run pending soft interrupts. */
   1316 	if (l->l_cpu->ci_data.cpu_softints != 0)
   1317 		softint_overlay();
   1318 #endif
   1319 
   1320 	/*
   1321 	 * It should be safe to do this read unlocked on a multiprocessor
   1322 	 * system..
   1323 	 */
   1324 	while ((l->l_flag & LW_USERRET) != 0) {
   1325 		/*
   1326 		 * Process pending signals first, unless the process
   1327 		 * is dumping core or exiting, where we will instead
   1328 		 * enter the LW_WSUSPEND case below.
   1329 		 */
   1330 		if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) ==
   1331 		    LW_PENDSIG) {
   1332 			mutex_enter(p->p_lock);
   1333 			while ((sig = issignal(l)) != 0)
   1334 				postsig(sig);
   1335 			mutex_exit(p->p_lock);
   1336 		}
   1337 
   1338 		/*
   1339 		 * Core-dump or suspend pending.
   1340 		 *
   1341 		 * In case of core dump, suspend ourselves, so that the
   1342 		 * kernel stack and therefore the userland registers saved
   1343 		 * in the trapframe are around for coredump() to write them
   1344 		 * out.  We issue a wakeup on p->p_lwpcv so that sigexit()
   1345 		 * will write the core file out once all other LWPs are
   1346 		 * suspended.
   1347 		 */
   1348 		if ((l->l_flag & LW_WSUSPEND) != 0) {
   1349 			mutex_enter(p->p_lock);
   1350 			p->p_nrlwps--;
   1351 			cv_broadcast(&p->p_lwpcv);
   1352 			lwp_lock(l);
   1353 			l->l_stat = LSSUSPENDED;
   1354 			mutex_exit(p->p_lock);
   1355 			mi_switch(l);
   1356 		}
   1357 
   1358 		/* Process is exiting. */
   1359 		if ((l->l_flag & LW_WEXIT) != 0) {
   1360 			lwp_exit(l);
   1361 			KASSERT(0);
   1362 			/* NOTREACHED */
   1363 		}
   1364 
   1365 		/* Call userret hook; used by Linux emulation. */
   1366 		if ((l->l_flag & LW_WUSERRET) != 0) {
   1367 			lwp_lock(l);
   1368 			l->l_flag &= ~LW_WUSERRET;
   1369 			lwp_unlock(l);
   1370 			hook = p->p_userret;
   1371 			p->p_userret = NULL;
   1372 			(*hook)();
   1373 		}
   1374 	}
   1375 }
   1376 
   1377 /*
   1378  * Force an LWP to enter the kernel, to take a trip through lwp_userret().
   1379  */
   1380 void
   1381 lwp_need_userret(struct lwp *l)
   1382 {
   1383 	KASSERT(lwp_locked(l, NULL));
   1384 
   1385 	/*
   1386 	 * Since the tests in lwp_userret() are done unlocked, make sure
   1387 	 * that the condition will be seen before forcing the LWP to enter
   1388 	 * kernel mode.
   1389 	 */
   1390 	membar_producer();
   1391 	cpu_signotify(l);
   1392 }
   1393 
   1394 /*
   1395  * Add one reference to an LWP.  This will prevent the LWP from
   1396  * exiting, thus keep the lwp structure and PCB around to inspect.
   1397  */
   1398 void
   1399 lwp_addref(struct lwp *l)
   1400 {
   1401 
   1402 	KASSERT(mutex_owned(l->l_proc->p_lock));
   1403 	KASSERT(l->l_stat != LSZOMB);
   1404 	KASSERT(l->l_refcnt != 0);
   1405 
   1406 	l->l_refcnt++;
   1407 }
   1408 
   1409 /*
   1410  * Remove one reference to an LWP.  If this is the last reference,
   1411  * then we must finalize the LWP's death.
   1412  */
   1413 void
   1414 lwp_delref(struct lwp *l)
   1415 {
   1416 	struct proc *p = l->l_proc;
   1417 
   1418 	mutex_enter(p->p_lock);
   1419 	KASSERT(l->l_stat != LSZOMB);
   1420 	KASSERT(l->l_refcnt > 0);
   1421 	if (--l->l_refcnt == 0)
   1422 		cv_broadcast(&p->p_lwpcv);
   1423 	mutex_exit(p->p_lock);
   1424 }
   1425 
   1426 /*
   1427  * Drain all references to the current LWP.
   1428  */
   1429 void
   1430 lwp_drainrefs(struct lwp *l)
   1431 {
   1432 	struct proc *p = l->l_proc;
   1433 
   1434 	KASSERT(mutex_owned(p->p_lock));
   1435 	KASSERT(l->l_refcnt != 0);
   1436 
   1437 	l->l_refcnt--;
   1438 	while (l->l_refcnt != 0)
   1439 		cv_wait(&p->p_lwpcv, p->p_lock);
   1440 }
   1441 
   1442 /*
   1443  * lwp_specific_key_create --
   1444  *	Create a key for subsystem lwp-specific data.
   1445  */
   1446 int
   1447 lwp_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor)
   1448 {
   1449 
   1450 	return (specificdata_key_create(lwp_specificdata_domain, keyp, dtor));
   1451 }
   1452 
   1453 /*
   1454  * lwp_specific_key_delete --
   1455  *	Delete a key for subsystem lwp-specific data.
   1456  */
   1457 void
   1458 lwp_specific_key_delete(specificdata_key_t key)
   1459 {
   1460 
   1461 	specificdata_key_delete(lwp_specificdata_domain, key);
   1462 }
   1463 
   1464 /*
   1465  * lwp_initspecific --
   1466  *	Initialize an LWP's specificdata container.
   1467  */
   1468 void
   1469 lwp_initspecific(struct lwp *l)
   1470 {
   1471 	int error;
   1472 
   1473 	error = specificdata_init(lwp_specificdata_domain, &l->l_specdataref);
   1474 	KASSERT(error == 0);
   1475 }
   1476 
   1477 /*
   1478  * lwp_finispecific --
   1479  *	Finalize an LWP's specificdata container.
   1480  */
   1481 void
   1482 lwp_finispecific(struct lwp *l)
   1483 {
   1484 
   1485 	specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
   1486 }
   1487 
   1488 /*
   1489  * lwp_getspecific --
   1490  *	Return lwp-specific data corresponding to the specified key.
   1491  *
   1492  *	Note: LWP specific data is NOT INTERLOCKED.  An LWP should access
   1493  *	only its OWN SPECIFIC DATA.  If it is necessary to access another
   1494  *	LWP's specifc data, care must be taken to ensure that doing so
   1495  *	would not cause internal data structure inconsistency (i.e. caller
   1496  *	can guarantee that the target LWP is not inside an lwp_getspecific()
   1497  *	or lwp_setspecific() call).
   1498  */
   1499 void *
   1500 lwp_getspecific(specificdata_key_t key)
   1501 {
   1502 
   1503 	return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
   1504 						  &curlwp->l_specdataref, key));
   1505 }
   1506 
   1507 void *
   1508 _lwp_getspecific_by_lwp(struct lwp *l, specificdata_key_t key)
   1509 {
   1510 
   1511 	return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
   1512 						  &l->l_specdataref, key));
   1513 }
   1514 
   1515 /*
   1516  * lwp_setspecific --
   1517  *	Set lwp-specific data corresponding to the specified key.
   1518  */
   1519 void
   1520 lwp_setspecific(specificdata_key_t key, void *data)
   1521 {
   1522 
   1523 	specificdata_setspecific(lwp_specificdata_domain,
   1524 				 &curlwp->l_specdataref, key, data);
   1525 }
   1526 
   1527 /*
   1528  * Allocate a new lwpctl structure for a user LWP.
   1529  */
   1530 int
   1531 lwp_ctl_alloc(vaddr_t *uaddr)
   1532 {
   1533 	lcproc_t *lp;
   1534 	u_int bit, i, offset;
   1535 	struct uvm_object *uao;
   1536 	int error;
   1537 	lcpage_t *lcp;
   1538 	proc_t *p;
   1539 	lwp_t *l;
   1540 
   1541 	l = curlwp;
   1542 	p = l->l_proc;
   1543 
   1544 	if (l->l_lcpage != NULL) {
   1545 		lcp = l->l_lcpage;
   1546 		*uaddr = lcp->lcp_uaddr + (vaddr_t)l->l_lwpctl - lcp->lcp_kaddr;
   1547 		return (EINVAL);
   1548 	}
   1549 
   1550 	/* First time around, allocate header structure for the process. */
   1551 	if ((lp = p->p_lwpctl) == NULL) {
   1552 		lp = kmem_alloc(sizeof(*lp), KM_SLEEP);
   1553 		mutex_init(&lp->lp_lock, MUTEX_DEFAULT, IPL_NONE);
   1554 		lp->lp_uao = NULL;
   1555 		TAILQ_INIT(&lp->lp_pages);
   1556 		mutex_enter(p->p_lock);
   1557 		if (p->p_lwpctl == NULL) {
   1558 			p->p_lwpctl = lp;
   1559 			mutex_exit(p->p_lock);
   1560 		} else {
   1561 			mutex_exit(p->p_lock);
   1562 			mutex_destroy(&lp->lp_lock);
   1563 			kmem_free(lp, sizeof(*lp));
   1564 			lp = p->p_lwpctl;
   1565 		}
   1566 	}
   1567 
   1568  	/*
   1569  	 * Set up an anonymous memory region to hold the shared pages.
   1570  	 * Map them into the process' address space.  The user vmspace
   1571  	 * gets the first reference on the UAO.
   1572  	 */
   1573 	mutex_enter(&lp->lp_lock);
   1574 	if (lp->lp_uao == NULL) {
   1575 		lp->lp_uao = uao_create(LWPCTL_UAREA_SZ, 0);
   1576 		lp->lp_cur = 0;
   1577 		lp->lp_max = LWPCTL_UAREA_SZ;
   1578 		lp->lp_uva = p->p_emul->e_vm_default_addr(p,
   1579 		     (vaddr_t)p->p_vmspace->vm_daddr, LWPCTL_UAREA_SZ);
   1580 		error = uvm_map(&p->p_vmspace->vm_map, &lp->lp_uva,
   1581 		    LWPCTL_UAREA_SZ, lp->lp_uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW,
   1582 		    UVM_PROT_RW, UVM_INH_NONE, UVM_ADV_NORMAL, 0));
   1583 		if (error != 0) {
   1584 			uao_detach(lp->lp_uao);
   1585 			lp->lp_uao = NULL;
   1586 			mutex_exit(&lp->lp_lock);
   1587 			return error;
   1588 		}
   1589 	}
   1590 
   1591 	/* Get a free block and allocate for this LWP. */
   1592 	TAILQ_FOREACH(lcp, &lp->lp_pages, lcp_chain) {
   1593 		if (lcp->lcp_nfree != 0)
   1594 			break;
   1595 	}
   1596 	if (lcp == NULL) {
   1597 		/* Nothing available - try to set up a free page. */
   1598 		if (lp->lp_cur == lp->lp_max) {
   1599 			mutex_exit(&lp->lp_lock);
   1600 			return ENOMEM;
   1601 		}
   1602 		lcp = kmem_alloc(LWPCTL_LCPAGE_SZ, KM_SLEEP);
   1603 		if (lcp == NULL) {
   1604 			mutex_exit(&lp->lp_lock);
   1605 			return ENOMEM;
   1606 		}
   1607 		/*
   1608 		 * Wire the next page down in kernel space.  Since this
   1609 		 * is a new mapping, we must add a reference.
   1610 		 */
   1611 		uao = lp->lp_uao;
   1612 		(*uao->pgops->pgo_reference)(uao);
   1613 		lcp->lcp_kaddr = vm_map_min(kernel_map);
   1614 		error = uvm_map(kernel_map, &lcp->lcp_kaddr, PAGE_SIZE,
   1615 		    uao, lp->lp_cur, PAGE_SIZE,
   1616 		    UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
   1617 		    UVM_INH_NONE, UVM_ADV_RANDOM, 0));
   1618 		if (error != 0) {
   1619 			mutex_exit(&lp->lp_lock);
   1620 			kmem_free(lcp, LWPCTL_LCPAGE_SZ);
   1621 			(*uao->pgops->pgo_detach)(uao);
   1622 			return error;
   1623 		}
   1624 		error = uvm_map_pageable(kernel_map, lcp->lcp_kaddr,
   1625 		    lcp->lcp_kaddr + PAGE_SIZE, FALSE, 0);
   1626 		if (error != 0) {
   1627 			mutex_exit(&lp->lp_lock);
   1628 			uvm_unmap(kernel_map, lcp->lcp_kaddr,
   1629 			    lcp->lcp_kaddr + PAGE_SIZE);
   1630 			kmem_free(lcp, LWPCTL_LCPAGE_SZ);
   1631 			return error;
   1632 		}
   1633 		/* Prepare the page descriptor and link into the list. */
   1634 		lcp->lcp_uaddr = lp->lp_uva + lp->lp_cur;
   1635 		lp->lp_cur += PAGE_SIZE;
   1636 		lcp->lcp_nfree = LWPCTL_PER_PAGE;
   1637 		lcp->lcp_rotor = 0;
   1638 		memset(lcp->lcp_bitmap, 0xff, LWPCTL_BITMAP_SZ);
   1639 		TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
   1640 	}
   1641 	for (i = lcp->lcp_rotor; lcp->lcp_bitmap[i] == 0;) {
   1642 		if (++i >= LWPCTL_BITMAP_ENTRIES)
   1643 			i = 0;
   1644 	}
   1645 	bit = ffs(lcp->lcp_bitmap[i]) - 1;
   1646 	lcp->lcp_bitmap[i] ^= (1 << bit);
   1647 	lcp->lcp_rotor = i;
   1648 	lcp->lcp_nfree--;
   1649 	l->l_lcpage = lcp;
   1650 	offset = (i << 5) + bit;
   1651 	l->l_lwpctl = (lwpctl_t *)lcp->lcp_kaddr + offset;
   1652 	*uaddr = lcp->lcp_uaddr + offset * sizeof(lwpctl_t);
   1653 	mutex_exit(&lp->lp_lock);
   1654 
   1655 	l->l_lwpctl->lc_curcpu = (short)curcpu()->ci_data.cpu_index;
   1656 
   1657 	return 0;
   1658 }
   1659 
   1660 /*
   1661  * Free an lwpctl structure back to the per-process list.
   1662  */
   1663 void
   1664 lwp_ctl_free(lwp_t *l)
   1665 {
   1666 	lcproc_t *lp;
   1667 	lcpage_t *lcp;
   1668 	u_int map, offset;
   1669 
   1670 	lp = l->l_proc->p_lwpctl;
   1671 	KASSERT(lp != NULL);
   1672 
   1673 	lcp = l->l_lcpage;
   1674 	offset = (u_int)((lwpctl_t *)l->l_lwpctl - (lwpctl_t *)lcp->lcp_kaddr);
   1675 	KASSERT(offset < LWPCTL_PER_PAGE);
   1676 
   1677 	mutex_enter(&lp->lp_lock);
   1678 	lcp->lcp_nfree++;
   1679 	map = offset >> 5;
   1680 	lcp->lcp_bitmap[map] |= (1 << (offset & 31));
   1681 	if (lcp->lcp_bitmap[lcp->lcp_rotor] == 0)
   1682 		lcp->lcp_rotor = map;
   1683 	if (TAILQ_FIRST(&lp->lp_pages)->lcp_nfree == 0) {
   1684 		TAILQ_REMOVE(&lp->lp_pages, lcp, lcp_chain);
   1685 		TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
   1686 	}
   1687 	mutex_exit(&lp->lp_lock);
   1688 }
   1689 
   1690 /*
   1691  * Process is exiting; tear down lwpctl state.  This can only be safely
   1692  * called by the last LWP in the process.
   1693  */
   1694 void
   1695 lwp_ctl_exit(void)
   1696 {
   1697 	lcpage_t *lcp, *next;
   1698 	lcproc_t *lp;
   1699 	proc_t *p;
   1700 	lwp_t *l;
   1701 
   1702 	l = curlwp;
   1703 	l->l_lwpctl = NULL;
   1704 	l->l_lcpage = NULL;
   1705 	p = l->l_proc;
   1706 	lp = p->p_lwpctl;
   1707 
   1708 	KASSERT(lp != NULL);
   1709 	KASSERT(p->p_nlwps == 1);
   1710 
   1711 	for (lcp = TAILQ_FIRST(&lp->lp_pages); lcp != NULL; lcp = next) {
   1712 		next = TAILQ_NEXT(lcp, lcp_chain);
   1713 		uvm_unmap(kernel_map, lcp->lcp_kaddr,
   1714 		    lcp->lcp_kaddr + PAGE_SIZE);
   1715 		kmem_free(lcp, LWPCTL_LCPAGE_SZ);
   1716 	}
   1717 
   1718 	if (lp->lp_uao != NULL) {
   1719 		uvm_unmap(&p->p_vmspace->vm_map, lp->lp_uva,
   1720 		    lp->lp_uva + LWPCTL_UAREA_SZ);
   1721 	}
   1722 
   1723 	mutex_destroy(&lp->lp_lock);
   1724 	kmem_free(lp, sizeof(*lp));
   1725 	p->p_lwpctl = NULL;
   1726 }
   1727 
   1728 #if defined(DDB)
   1729 void
   1730 lwp_whatis(uintptr_t addr, void (*pr)(const char *, ...))
   1731 {
   1732 	lwp_t *l;
   1733 
   1734 	LIST_FOREACH(l, &alllwp, l_list) {
   1735 		uintptr_t stack = (uintptr_t)KSTACK_LOWEST_ADDR(l);
   1736 
   1737 		if (addr < stack || stack + KSTACK_SIZE <= addr) {
   1738 			continue;
   1739 		}
   1740 		(*pr)("%p is %p+%zu, LWP %p's stack\n",
   1741 		    (void *)addr, (void *)stack,
   1742 		    (size_t)(addr - stack), l);
   1743 	}
   1744 }
   1745 #endif /* defined(DDB) */
   1746