Home | History | Annotate | Line # | Download | only in kern
kern_lwp.c revision 1.29.6.8
      1  1.29.6.8      yamt /*	$NetBSD: kern_lwp.c,v 1.29.6.8 2008/01/21 09:46:06 yamt Exp $	*/
      2       1.2   thorpej 
      3       1.2   thorpej /*-
      4  1.29.6.3      yamt  * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
      5       1.2   thorpej  * All rights reserved.
      6       1.2   thorpej  *
      7       1.2   thorpej  * This code is derived from software contributed to The NetBSD Foundation
      8  1.29.6.3      yamt  * by Nathan J. Williams, and Andrew Doran.
      9       1.2   thorpej  *
     10       1.2   thorpej  * Redistribution and use in source and binary forms, with or without
     11       1.2   thorpej  * modification, are permitted provided that the following conditions
     12       1.2   thorpej  * are met:
     13       1.2   thorpej  * 1. Redistributions of source code must retain the above copyright
     14       1.2   thorpej  *    notice, this list of conditions and the following disclaimer.
     15       1.2   thorpej  * 2. Redistributions in binary form must reproduce the above copyright
     16       1.2   thorpej  *    notice, this list of conditions and the following disclaimer in the
     17       1.2   thorpej  *    documentation and/or other materials provided with the distribution.
     18       1.2   thorpej  * 3. All advertising materials mentioning features or use of this software
     19       1.2   thorpej  *    must display the following acknowledgement:
     20       1.2   thorpej  *        This product includes software developed by the NetBSD
     21       1.2   thorpej  *        Foundation, Inc. and its contributors.
     22       1.2   thorpej  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23       1.2   thorpej  *    contributors may be used to endorse or promote products derived
     24       1.2   thorpej  *    from this software without specific prior written permission.
     25       1.2   thorpej  *
     26       1.2   thorpej  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27       1.2   thorpej  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28       1.2   thorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29       1.2   thorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30       1.2   thorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31       1.2   thorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32       1.2   thorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33       1.2   thorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34       1.2   thorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35       1.2   thorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36       1.2   thorpej  * POSSIBILITY OF SUCH DAMAGE.
     37       1.2   thorpej  */
     38       1.9     lukem 
     39  1.29.6.3      yamt /*
     40  1.29.6.3      yamt  * Overview
     41  1.29.6.3      yamt  *
     42  1.29.6.4      yamt  *	Lightweight processes (LWPs) are the basic unit or thread of
     43  1.29.6.3      yamt  *	execution within the kernel.  The core state of an LWP is described
     44  1.29.6.4      yamt  *	by "struct lwp", also known as lwp_t.
     45  1.29.6.3      yamt  *
     46  1.29.6.3      yamt  *	Each LWP is contained within a process (described by "struct proc"),
     47  1.29.6.3      yamt  *	Every process contains at least one LWP, but may contain more.  The
     48  1.29.6.3      yamt  *	process describes attributes shared among all of its LWPs such as a
     49  1.29.6.3      yamt  *	private address space, global execution state (stopped, active,
     50  1.29.6.3      yamt  *	zombie, ...), signal disposition and so on.  On a multiprocessor
     51  1.29.6.4      yamt  *	machine, multiple LWPs be executing concurrently in the kernel.
     52  1.29.6.3      yamt  *
     53  1.29.6.3      yamt  * Execution states
     54  1.29.6.3      yamt  *
     55  1.29.6.3      yamt  *	At any given time, an LWP has overall state that is described by
     56  1.29.6.3      yamt  *	lwp::l_stat.  The states are broken into two sets below.  The first
     57  1.29.6.3      yamt  *	set is guaranteed to represent the absolute, current state of the
     58  1.29.6.3      yamt  *	LWP:
     59  1.29.6.3      yamt  *
     60  1.29.6.3      yamt  * 	LSONPROC
     61  1.29.6.3      yamt  *
     62  1.29.6.3      yamt  * 		On processor: the LWP is executing on a CPU, either in the
     63  1.29.6.3      yamt  * 		kernel or in user space.
     64  1.29.6.3      yamt  *
     65  1.29.6.3      yamt  * 	LSRUN
     66  1.29.6.3      yamt  *
     67  1.29.6.3      yamt  * 		Runnable: the LWP is parked on a run queue, and may soon be
     68  1.29.6.3      yamt  * 		chosen to run by a idle processor, or by a processor that
     69  1.29.6.3      yamt  * 		has been asked to preempt a currently runnning but lower
     70  1.29.6.3      yamt  * 		priority LWP.  If the LWP is not swapped in (L_INMEM == 0)
     71  1.29.6.3      yamt  *		then the LWP is not on a run queue, but may be soon.
     72  1.29.6.3      yamt  *
     73  1.29.6.3      yamt  * 	LSIDL
     74  1.29.6.3      yamt  *
     75  1.29.6.4      yamt  * 		Idle: the LWP has been created but has not yet executed,
     76  1.29.6.4      yamt  *		or it has ceased executing a unit of work and is waiting
     77  1.29.6.4      yamt  *		to be started again.
     78  1.29.6.3      yamt  *
     79  1.29.6.3      yamt  * 	LSSUSPENDED:
     80  1.29.6.3      yamt  *
     81  1.29.6.3      yamt  * 		Suspended: the LWP has had its execution suspended by
     82  1.29.6.3      yamt  *		another LWP in the same process using the _lwp_suspend()
     83  1.29.6.3      yamt  *		system call.  User-level LWPs also enter the suspended
     84  1.29.6.3      yamt  *		state when the system is shutting down.
     85  1.29.6.3      yamt  *
     86  1.29.6.3      yamt  *	The second set represent a "statement of intent" on behalf of the
     87  1.29.6.3      yamt  *	LWP.  The LWP may in fact be executing on a processor, may be
     88  1.29.6.4      yamt  *	sleeping or idle. It is expected to take the necessary action to
     89  1.29.6.4      yamt  *	stop executing or become "running" again within	a short timeframe.
     90  1.29.6.4      yamt  *	The LW_RUNNING flag in lwp::l_flag indicates that an LWP is running.
     91  1.29.6.4      yamt  *	Importantly, in indicates that its state is tied to a CPU.
     92  1.29.6.3      yamt  *
     93  1.29.6.3      yamt  * 	LSZOMB:
     94  1.29.6.3      yamt  *
     95  1.29.6.4      yamt  * 		Dead or dying: the LWP has released most of its resources
     96  1.29.6.4      yamt  *		and is a) about to switch away into oblivion b) has already
     97  1.29.6.4      yamt  *		switched away.  When it switches away, its few remaining
     98  1.29.6.4      yamt  *		resources can be collected.
     99  1.29.6.3      yamt  *
    100  1.29.6.3      yamt  * 	LSSLEEP:
    101  1.29.6.3      yamt  *
    102  1.29.6.3      yamt  * 		Sleeping: the LWP has entered itself onto a sleep queue, and
    103  1.29.6.4      yamt  * 		has switched away or will switch away shortly to allow other
    104  1.29.6.4      yamt  *		LWPs to run on the CPU.
    105  1.29.6.3      yamt  *
    106  1.29.6.3      yamt  * 	LSSTOP:
    107  1.29.6.3      yamt  *
    108  1.29.6.3      yamt  * 		Stopped: the LWP has been stopped as a result of a job
    109  1.29.6.3      yamt  * 		control signal, or as a result of the ptrace() interface.
    110  1.29.6.4      yamt  *
    111  1.29.6.3      yamt  * 		Stopped LWPs may run briefly within the kernel to handle
    112  1.29.6.3      yamt  * 		signals that they receive, but will not return to user space
    113  1.29.6.3      yamt  * 		until their process' state is changed away from stopped.
    114  1.29.6.4      yamt  *
    115  1.29.6.3      yamt  * 		Single LWPs within a process can not be set stopped
    116  1.29.6.3      yamt  * 		selectively: all actions that can stop or continue LWPs
    117  1.29.6.3      yamt  * 		occur at the process level.
    118  1.29.6.3      yamt  *
    119  1.29.6.3      yamt  * State transitions
    120  1.29.6.3      yamt  *
    121  1.29.6.4      yamt  *	Note that the LSSTOP state may only be set when returning to
    122  1.29.6.4      yamt  *	user space in userret(), or when sleeping interruptably.  The
    123  1.29.6.4      yamt  *	LSSUSPENDED state may only be set in userret().  Before setting
    124  1.29.6.4      yamt  *	those states, we try to ensure that the LWPs will release all
    125  1.29.6.4      yamt  *	locks that they hold, and at a minimum try to ensure that the
    126  1.29.6.4      yamt  *	LWP can be set runnable again by a signal.
    127  1.29.6.3      yamt  *
    128  1.29.6.3      yamt  *	LWPs may transition states in the following ways:
    129  1.29.6.3      yamt  *
    130  1.29.6.3      yamt  *	 RUN -------> ONPROC		ONPROC -----> RUN
    131  1.29.6.3      yamt  *	            > STOPPED			    > SLEEP
    132  1.29.6.3      yamt  *	            > SUSPENDED			    > STOPPED
    133  1.29.6.3      yamt  *						    > SUSPENDED
    134  1.29.6.3      yamt  *						    > ZOMB
    135  1.29.6.3      yamt  *
    136  1.29.6.3      yamt  *	 STOPPED ---> RUN		SUSPENDED --> RUN
    137  1.29.6.3      yamt  *	            > SLEEP			    > SLEEP
    138  1.29.6.3      yamt  *
    139  1.29.6.3      yamt  *	 SLEEP -----> ONPROC		IDL --------> RUN
    140  1.29.6.3      yamt  *		    > RUN		            > SUSPENDED
    141  1.29.6.3      yamt  *		    > STOPPED                       > STOPPED
    142  1.29.6.3      yamt  *		    > SUSPENDED
    143  1.29.6.3      yamt  *
    144  1.29.6.4      yamt  *	Other state transitions are possible with kernel threads (eg
    145  1.29.6.4      yamt  *	ONPROC -> IDL), but only happen under tightly controlled
    146  1.29.6.4      yamt  *	circumstances the side effects are understood.
    147  1.29.6.4      yamt  *
    148  1.29.6.3      yamt  * Locking
    149  1.29.6.3      yamt  *
    150  1.29.6.3      yamt  *	The majority of fields in 'struct lwp' are covered by a single,
    151  1.29.6.4      yamt  *	general spin lock pointed to by lwp::l_mutex.  The locks covering
    152  1.29.6.3      yamt  *	each field are documented in sys/lwp.h.
    153  1.29.6.3      yamt  *
    154  1.29.6.4      yamt  *	State transitions must be made with the LWP's general lock held,
    155  1.29.6.4      yamt  * 	and may cause the LWP's lock pointer to change. Manipulation of
    156  1.29.6.4      yamt  *	the general lock is not performed directly, but through calls to
    157  1.29.6.4      yamt  *	lwp_lock(), lwp_relock() and similar.
    158  1.29.6.3      yamt  *
    159  1.29.6.3      yamt  *	States and their associated locks:
    160  1.29.6.3      yamt  *
    161  1.29.6.5      yamt  *	LSONPROC, LSZOMB:
    162  1.29.6.3      yamt  *
    163  1.29.6.4      yamt  *		Always covered by spc_lwplock, which protects running LWPs.
    164  1.29.6.4      yamt  *		This is a per-CPU lock.
    165  1.29.6.3      yamt  *
    166  1.29.6.5      yamt  *	LSIDL, LSRUN:
    167  1.29.6.3      yamt  *
    168  1.29.6.4      yamt  *		Always covered by spc_mutex, which protects the run queues.
    169  1.29.6.4      yamt  *		This may be a per-CPU lock, depending on the scheduler.
    170  1.29.6.3      yamt  *
    171  1.29.6.3      yamt  *	LSSLEEP:
    172  1.29.6.3      yamt  *
    173  1.29.6.4      yamt  *		Covered by a lock associated with the sleep queue that the
    174  1.29.6.3      yamt  *		LWP resides on, indirectly referenced by l_sleepq->sq_mutex.
    175  1.29.6.3      yamt  *
    176  1.29.6.3      yamt  *	LSSTOP, LSSUSPENDED:
    177  1.29.6.3      yamt  *
    178  1.29.6.3      yamt  *		If the LWP was previously sleeping (l_wchan != NULL), then
    179  1.29.6.4      yamt  *		l_mutex references the sleep queue lock.  If the LWP was
    180  1.29.6.3      yamt  *		runnable or on the CPU when halted, or has been removed from
    181  1.29.6.4      yamt  *		the sleep queue since halted, then the lock is spc_lwplock.
    182  1.29.6.3      yamt  *
    183  1.29.6.3      yamt  *	The lock order is as follows:
    184  1.29.6.3      yamt  *
    185  1.29.6.4      yamt  *		spc::spc_lwplock ->
    186  1.29.6.4      yamt  *		    sleepq_t::sq_mutex ->
    187  1.29.6.4      yamt  *			tschain_t::tc_mutex ->
    188  1.29.6.4      yamt  *			    spc::spc_mutex
    189  1.29.6.3      yamt  *
    190  1.29.6.4      yamt  *	Each process has an scheduler state lock (proc::p_smutex), and a
    191  1.29.6.3      yamt  *	number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
    192  1.29.6.3      yamt  *	so on.  When an LWP is to be entered into or removed from one of the
    193  1.29.6.3      yamt  *	following states, p_mutex must be held and the process wide counters
    194  1.29.6.3      yamt  *	adjusted:
    195  1.29.6.3      yamt  *
    196  1.29.6.3      yamt  *		LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
    197  1.29.6.3      yamt  *
    198  1.29.6.3      yamt  *	Note that an LWP is considered running or likely to run soon if in
    199  1.29.6.3      yamt  *	one of the following states.  This affects the value of p_nrlwps:
    200  1.29.6.3      yamt  *
    201  1.29.6.3      yamt  *		LSRUN, LSONPROC, LSSLEEP
    202  1.29.6.3      yamt  *
    203  1.29.6.3      yamt  *	p_smutex does not need to be held when transitioning among these
    204  1.29.6.3      yamt  *	three states.
    205  1.29.6.3      yamt  */
    206  1.29.6.3      yamt 
    207       1.9     lukem #include <sys/cdefs.h>
    208  1.29.6.8      yamt __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.29.6.8 2008/01/21 09:46:06 yamt Exp $");
    209       1.8    martin 
    210  1.29.6.8      yamt #include "opt_ddb.h"
    211       1.8    martin #include "opt_multiprocessor.h"
    212  1.29.6.3      yamt #include "opt_lockdebug.h"
    213       1.2   thorpej 
    214  1.29.6.2      yamt #define _LWP_API_PRIVATE
    215  1.29.6.2      yamt 
    216       1.2   thorpej #include <sys/param.h>
    217       1.2   thorpej #include <sys/systm.h>
    218  1.29.6.4      yamt #include <sys/cpu.h>
    219       1.2   thorpej #include <sys/pool.h>
    220       1.2   thorpej #include <sys/proc.h>
    221       1.2   thorpej #include <sys/syscallargs.h>
    222  1.29.6.3      yamt #include <sys/syscall_stats.h>
    223  1.29.6.2      yamt #include <sys/kauth.h>
    224  1.29.6.3      yamt #include <sys/sleepq.h>
    225  1.29.6.8      yamt #include <sys/user.h>
    226  1.29.6.3      yamt #include <sys/lockdebug.h>
    227  1.29.6.3      yamt #include <sys/kmem.h>
    228  1.29.6.8      yamt #include <sys/pset.h>
    229  1.29.6.6      yamt #include <sys/intr.h>
    230  1.29.6.6      yamt #include <sys/lwpctl.h>
    231  1.29.6.7      yamt #include <sys/atomic.h>
    232       1.2   thorpej 
    233       1.2   thorpej #include <uvm/uvm_extern.h>
    234  1.29.6.6      yamt #include <uvm/uvm_object.h>
    235       1.2   thorpej 
    236  1.29.6.6      yamt struct lwplist	alllwp = LIST_HEAD_INITIALIZER(alllwp);
    237  1.29.6.3      yamt 
    238  1.29.6.2      yamt POOL_INIT(lwp_uc_pool, sizeof(ucontext_t), 0, 0, 0, "lwpucpl",
    239  1.29.6.4      yamt     &pool_allocator_nointr, IPL_NONE);
    240  1.29.6.2      yamt 
    241  1.29.6.8      yamt static pool_cache_t lwp_cache;
    242  1.29.6.2      yamt static specificdata_domain_t lwp_specificdata_domain;
    243  1.29.6.2      yamt 
    244  1.29.6.2      yamt void
    245  1.29.6.2      yamt lwpinit(void)
    246  1.29.6.2      yamt {
    247  1.29.6.2      yamt 
    248  1.29.6.2      yamt 	lwp_specificdata_domain = specificdata_domain_create();
    249  1.29.6.2      yamt 	KASSERT(lwp_specificdata_domain != NULL);
    250  1.29.6.3      yamt 	lwp_sys_init();
    251  1.29.6.8      yamt 	lwp_cache = pool_cache_init(sizeof(lwp_t), MIN_LWP_ALIGNMENT, 0, 0,
    252  1.29.6.8      yamt 	    "lwppl", NULL, IPL_NONE, NULL, NULL, NULL);
    253  1.29.6.2      yamt }
    254  1.29.6.2      yamt 
    255  1.29.6.3      yamt /*
    256  1.29.6.3      yamt  * Set an suspended.
    257  1.29.6.3      yamt  *
    258  1.29.6.3      yamt  * Must be called with p_smutex held, and the LWP locked.  Will unlock the
    259  1.29.6.3      yamt  * LWP before return.
    260  1.29.6.3      yamt  */
    261       1.2   thorpej int
    262  1.29.6.3      yamt lwp_suspend(struct lwp *curl, struct lwp *t)
    263       1.2   thorpej {
    264  1.29.6.3      yamt 	int error;
    265       1.2   thorpej 
    266  1.29.6.4      yamt 	KASSERT(mutex_owned(&t->l_proc->p_smutex));
    267  1.29.6.4      yamt 	KASSERT(lwp_locked(t, NULL));
    268       1.2   thorpej 
    269  1.29.6.3      yamt 	KASSERT(curl != t || curl->l_stat == LSONPROC);
    270       1.2   thorpej 
    271  1.29.6.3      yamt 	/*
    272  1.29.6.3      yamt 	 * If the current LWP has been told to exit, we must not suspend anyone
    273  1.29.6.3      yamt 	 * else or deadlock could occur.  We won't return to userspace.
    274       1.2   thorpej 	 */
    275  1.29.6.3      yamt 	if ((curl->l_stat & (LW_WEXIT | LW_WCORE)) != 0) {
    276  1.29.6.3      yamt 		lwp_unlock(t);
    277  1.29.6.3      yamt 		return (EDEADLK);
    278  1.29.6.2      yamt 	}
    279       1.2   thorpej 
    280  1.29.6.3      yamt 	error = 0;
    281       1.2   thorpej 
    282  1.29.6.3      yamt 	switch (t->l_stat) {
    283  1.29.6.3      yamt 	case LSRUN:
    284  1.29.6.3      yamt 	case LSONPROC:
    285  1.29.6.3      yamt 		t->l_flag |= LW_WSUSPEND;
    286  1.29.6.3      yamt 		lwp_need_userret(t);
    287  1.29.6.3      yamt 		lwp_unlock(t);
    288  1.29.6.3      yamt 		break;
    289       1.2   thorpej 
    290  1.29.6.3      yamt 	case LSSLEEP:
    291  1.29.6.3      yamt 		t->l_flag |= LW_WSUSPEND;
    292       1.2   thorpej 
    293       1.2   thorpej 		/*
    294  1.29.6.3      yamt 		 * Kick the LWP and try to get it to the kernel boundary
    295  1.29.6.3      yamt 		 * so that it will release any locks that it holds.
    296  1.29.6.3      yamt 		 * setrunnable() will release the lock.
    297       1.2   thorpej 		 */
    298  1.29.6.3      yamt 		if ((t->l_flag & LW_SINTR) != 0)
    299  1.29.6.3      yamt 			setrunnable(t);
    300  1.29.6.3      yamt 		else
    301  1.29.6.3      yamt 			lwp_unlock(t);
    302  1.29.6.3      yamt 		break;
    303      1.17      manu 
    304  1.29.6.3      yamt 	case LSSUSPENDED:
    305  1.29.6.3      yamt 		lwp_unlock(t);
    306  1.29.6.3      yamt 		break;
    307       1.2   thorpej 
    308  1.29.6.3      yamt 	case LSSTOP:
    309  1.29.6.3      yamt 		t->l_flag |= LW_WSUSPEND;
    310  1.29.6.3      yamt 		setrunnable(t);
    311  1.29.6.3      yamt 		break;
    312      1.17      manu 
    313  1.29.6.3      yamt 	case LSIDL:
    314  1.29.6.3      yamt 	case LSZOMB:
    315  1.29.6.3      yamt 		error = EINTR; /* It's what Solaris does..... */
    316  1.29.6.3      yamt 		lwp_unlock(t);
    317  1.29.6.3      yamt 		break;
    318       1.2   thorpej 	}
    319       1.2   thorpej 
    320  1.29.6.4      yamt 	return (error);
    321       1.2   thorpej }
    322       1.2   thorpej 
    323  1.29.6.3      yamt /*
    324  1.29.6.3      yamt  * Restart a suspended LWP.
    325  1.29.6.3      yamt  *
    326  1.29.6.3      yamt  * Must be called with p_smutex held, and the LWP locked.  Will unlock the
    327  1.29.6.3      yamt  * LWP before return.
    328  1.29.6.3      yamt  */
    329       1.2   thorpej void
    330       1.2   thorpej lwp_continue(struct lwp *l)
    331       1.2   thorpej {
    332       1.2   thorpej 
    333  1.29.6.4      yamt 	KASSERT(mutex_owned(&l->l_proc->p_smutex));
    334  1.29.6.4      yamt 	KASSERT(lwp_locked(l, NULL));
    335  1.29.6.3      yamt 
    336  1.29.6.3      yamt 	/* If rebooting or not suspended, then just bail out. */
    337  1.29.6.3      yamt 	if ((l->l_flag & LW_WREBOOT) != 0) {
    338  1.29.6.3      yamt 		lwp_unlock(l);
    339       1.2   thorpej 		return;
    340      1.10      fvdl 	}
    341       1.2   thorpej 
    342  1.29.6.3      yamt 	l->l_flag &= ~LW_WSUSPEND;
    343       1.2   thorpej 
    344  1.29.6.3      yamt 	if (l->l_stat != LSSUSPENDED) {
    345  1.29.6.3      yamt 		lwp_unlock(l);
    346  1.29.6.3      yamt 		return;
    347       1.2   thorpej 	}
    348       1.2   thorpej 
    349  1.29.6.3      yamt 	/* setrunnable() will release the lock. */
    350  1.29.6.3      yamt 	setrunnable(l);
    351       1.2   thorpej }
    352       1.2   thorpej 
    353  1.29.6.3      yamt /*
    354  1.29.6.3      yamt  * Wait for an LWP within the current process to exit.  If 'lid' is
    355  1.29.6.3      yamt  * non-zero, we are waiting for a specific LWP.
    356  1.29.6.3      yamt  *
    357  1.29.6.3      yamt  * Must be called with p->p_smutex held.
    358  1.29.6.3      yamt  */
    359       1.2   thorpej int
    360       1.2   thorpej lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
    361       1.2   thorpej {
    362       1.2   thorpej 	struct proc *p = l->l_proc;
    363  1.29.6.3      yamt 	struct lwp *l2;
    364  1.29.6.3      yamt 	int nfound, error;
    365  1.29.6.4      yamt 	lwpid_t curlid;
    366  1.29.6.4      yamt 	bool exiting;
    367       1.2   thorpej 
    368  1.29.6.4      yamt 	KASSERT(mutex_owned(&p->p_smutex));
    369       1.2   thorpej 
    370  1.29.6.3      yamt 	p->p_nlwpwait++;
    371  1.29.6.4      yamt 	l->l_waitingfor = lid;
    372  1.29.6.4      yamt 	curlid = l->l_lid;
    373  1.29.6.4      yamt 	exiting = ((flags & LWPWAIT_EXITCONTROL) != 0);
    374       1.2   thorpej 
    375  1.29.6.3      yamt 	for (;;) {
    376  1.29.6.3      yamt 		/*
    377  1.29.6.3      yamt 		 * Avoid a race between exit1() and sigexit(): if the
    378  1.29.6.3      yamt 		 * process is dumping core, then we need to bail out: call
    379  1.29.6.3      yamt 		 * into lwp_userret() where we will be suspended until the
    380  1.29.6.3      yamt 		 * deed is done.
    381  1.29.6.3      yamt 		 */
    382  1.29.6.3      yamt 		if ((p->p_sflag & PS_WCORE) != 0) {
    383  1.29.6.3      yamt 			mutex_exit(&p->p_smutex);
    384  1.29.6.3      yamt 			lwp_userret(l);
    385  1.29.6.3      yamt #ifdef DIAGNOSTIC
    386  1.29.6.3      yamt 			panic("lwp_wait1");
    387  1.29.6.3      yamt #endif
    388  1.29.6.3      yamt 			/* NOTREACHED */
    389  1.29.6.3      yamt 		}
    390       1.2   thorpej 
    391  1.29.6.3      yamt 		/*
    392  1.29.6.3      yamt 		 * First off, drain any detached LWP that is waiting to be
    393  1.29.6.3      yamt 		 * reaped.
    394  1.29.6.3      yamt 		 */
    395  1.29.6.3      yamt 		while ((l2 = p->p_zomblwp) != NULL) {
    396  1.29.6.3      yamt 			p->p_zomblwp = NULL;
    397  1.29.6.4      yamt 			lwp_free(l2, false, false);/* releases proc mutex */
    398  1.29.6.3      yamt 			mutex_enter(&p->p_smutex);
    399  1.29.6.3      yamt 		}
    400  1.29.6.3      yamt 
    401  1.29.6.3      yamt 		/*
    402  1.29.6.3      yamt 		 * Now look for an LWP to collect.  If the whole process is
    403  1.29.6.3      yamt 		 * exiting, count detached LWPs as eligible to be collected,
    404  1.29.6.3      yamt 		 * but don't drain them here.
    405  1.29.6.3      yamt 		 */
    406  1.29.6.3      yamt 		nfound = 0;
    407  1.29.6.4      yamt 		error = 0;
    408  1.29.6.3      yamt 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
    409  1.29.6.4      yamt 			/*
    410  1.29.6.4      yamt 			 * If a specific wait and the target is waiting on
    411  1.29.6.4      yamt 			 * us, then avoid deadlock.  This also traps LWPs
    412  1.29.6.4      yamt 			 * that try to wait on themselves.
    413  1.29.6.4      yamt 			 *
    414  1.29.6.4      yamt 			 * Note that this does not handle more complicated
    415  1.29.6.4      yamt 			 * cycles, like: t1 -> t2 -> t3 -> t1.  The process
    416  1.29.6.4      yamt 			 * can still be killed so it is not a major problem.
    417  1.29.6.4      yamt 			 */
    418  1.29.6.4      yamt 			if (l2->l_lid == lid && l2->l_waitingfor == curlid) {
    419  1.29.6.4      yamt 				error = EDEADLK;
    420  1.29.6.4      yamt 				break;
    421  1.29.6.4      yamt 			}
    422  1.29.6.4      yamt 			if (l2 == l)
    423  1.29.6.3      yamt 				continue;
    424  1.29.6.3      yamt 			if ((l2->l_prflag & LPR_DETACHED) != 0) {
    425  1.29.6.4      yamt 				nfound += exiting;
    426  1.29.6.4      yamt 				continue;
    427  1.29.6.4      yamt 			}
    428  1.29.6.4      yamt 			if (lid != 0) {
    429  1.29.6.4      yamt 				if (l2->l_lid != lid)
    430  1.29.6.4      yamt 					continue;
    431  1.29.6.4      yamt 				/*
    432  1.29.6.4      yamt 				 * Mark this LWP as the first waiter, if there
    433  1.29.6.4      yamt 				 * is no other.
    434  1.29.6.4      yamt 				 */
    435  1.29.6.4      yamt 				if (l2->l_waiter == 0)
    436  1.29.6.4      yamt 					l2->l_waiter = curlid;
    437  1.29.6.4      yamt 			} else if (l2->l_waiter != 0) {
    438  1.29.6.4      yamt 				/*
    439  1.29.6.4      yamt 				 * It already has a waiter - so don't
    440  1.29.6.4      yamt 				 * collect it.  If the waiter doesn't
    441  1.29.6.4      yamt 				 * grab it we'll get another chance
    442  1.29.6.4      yamt 				 * later.
    443  1.29.6.4      yamt 				 */
    444  1.29.6.4      yamt 				nfound++;
    445  1.29.6.3      yamt 				continue;
    446       1.2   thorpej 			}
    447  1.29.6.3      yamt 			nfound++;
    448  1.29.6.3      yamt 
    449  1.29.6.3      yamt 			/* No need to lock the LWP in order to see LSZOMB. */
    450  1.29.6.3      yamt 			if (l2->l_stat != LSZOMB)
    451  1.29.6.3      yamt 				continue;
    452       1.2   thorpej 
    453  1.29.6.4      yamt 			/*
    454  1.29.6.4      yamt 			 * We're no longer waiting.  Reset the "first waiter"
    455  1.29.6.4      yamt 			 * pointer on the target, in case it was us.
    456  1.29.6.4      yamt 			 */
    457  1.29.6.4      yamt 			l->l_waitingfor = 0;
    458  1.29.6.4      yamt 			l2->l_waiter = 0;
    459  1.29.6.4      yamt 			p->p_nlwpwait--;
    460  1.29.6.3      yamt 			if (departed)
    461  1.29.6.3      yamt 				*departed = l2->l_lid;
    462  1.29.6.6      yamt 			sched_lwp_collect(l2);
    463  1.29.6.4      yamt 
    464  1.29.6.4      yamt 			/* lwp_free() releases the proc lock. */
    465  1.29.6.4      yamt 			lwp_free(l2, false, false);
    466  1.29.6.3      yamt 			mutex_enter(&p->p_smutex);
    467  1.29.6.3      yamt 			return 0;
    468       1.2   thorpej 		}
    469       1.2   thorpej 
    470  1.29.6.4      yamt 		if (error != 0)
    471  1.29.6.4      yamt 			break;
    472  1.29.6.3      yamt 		if (nfound == 0) {
    473  1.29.6.3      yamt 			error = ESRCH;
    474  1.29.6.3      yamt 			break;
    475  1.29.6.3      yamt 		}
    476  1.29.6.4      yamt 
    477  1.29.6.4      yamt 		/*
    478  1.29.6.4      yamt 		 * The kernel is careful to ensure that it can not deadlock
    479  1.29.6.4      yamt 		 * when exiting - just keep waiting.
    480  1.29.6.4      yamt 		 */
    481  1.29.6.4      yamt 		if (exiting) {
    482  1.29.6.3      yamt 			KASSERT(p->p_nlwps > 1);
    483  1.29.6.3      yamt 			cv_wait(&p->p_lwpcv, &p->p_smutex);
    484  1.29.6.3      yamt 			continue;
    485  1.29.6.3      yamt 		}
    486  1.29.6.4      yamt 
    487  1.29.6.4      yamt 		/*
    488  1.29.6.4      yamt 		 * If all other LWPs are waiting for exits or suspends
    489  1.29.6.4      yamt 		 * and the supply of zombies and potential zombies is
    490  1.29.6.4      yamt 		 * exhausted, then we are about to deadlock.
    491  1.29.6.4      yamt 		 *
    492  1.29.6.4      yamt 		 * If the process is exiting (and this LWP is not the one
    493  1.29.6.4      yamt 		 * that is coordinating the exit) then bail out now.
    494  1.29.6.4      yamt 		 */
    495  1.29.6.3      yamt 		if ((p->p_sflag & PS_WEXIT) != 0 ||
    496  1.29.6.4      yamt 		    p->p_nrlwps + p->p_nzlwps - p->p_ndlwps <= p->p_nlwpwait) {
    497  1.29.6.3      yamt 			error = EDEADLK;
    498  1.29.6.3      yamt 			break;
    499  1.29.6.3      yamt 		}
    500  1.29.6.4      yamt 
    501  1.29.6.4      yamt 		/*
    502  1.29.6.4      yamt 		 * Sit around and wait for something to happen.  We'll be
    503  1.29.6.4      yamt 		 * awoken if any of the conditions examined change: if an
    504  1.29.6.4      yamt 		 * LWP exits, is collected, or is detached.
    505  1.29.6.4      yamt 		 */
    506  1.29.6.3      yamt 		if ((error = cv_wait_sig(&p->p_lwpcv, &p->p_smutex)) != 0)
    507  1.29.6.3      yamt 			break;
    508  1.29.6.3      yamt 	}
    509       1.2   thorpej 
    510  1.29.6.4      yamt 	/*
    511  1.29.6.4      yamt 	 * We didn't find any LWPs to collect, we may have received a
    512  1.29.6.4      yamt 	 * signal, or some other condition has caused us to bail out.
    513  1.29.6.4      yamt 	 *
    514  1.29.6.4      yamt 	 * If waiting on a specific LWP, clear the waiters marker: some
    515  1.29.6.4      yamt 	 * other LWP may want it.  Then, kick all the remaining waiters
    516  1.29.6.4      yamt 	 * so that they can re-check for zombies and for deadlock.
    517  1.29.6.4      yamt 	 */
    518  1.29.6.4      yamt 	if (lid != 0) {
    519  1.29.6.4      yamt 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
    520  1.29.6.4      yamt 			if (l2->l_lid == lid) {
    521  1.29.6.4      yamt 				if (l2->l_waiter == curlid)
    522  1.29.6.4      yamt 					l2->l_waiter = 0;
    523  1.29.6.4      yamt 				break;
    524  1.29.6.4      yamt 			}
    525  1.29.6.4      yamt 		}
    526  1.29.6.4      yamt 	}
    527  1.29.6.3      yamt 	p->p_nlwpwait--;
    528  1.29.6.4      yamt 	l->l_waitingfor = 0;
    529  1.29.6.4      yamt 	cv_broadcast(&p->p_lwpcv);
    530  1.29.6.4      yamt 
    531  1.29.6.3      yamt 	return error;
    532       1.2   thorpej }
    533       1.2   thorpej 
    534  1.29.6.3      yamt /*
    535  1.29.6.3      yamt  * Create a new LWP within process 'p2', using LWP 'l1' as a template.
    536  1.29.6.3      yamt  * The new LWP is created in state LSIDL and must be set running,
    537  1.29.6.3      yamt  * suspended, or stopped by the caller.
    538  1.29.6.3      yamt  */
    539       1.2   thorpej int
    540  1.29.6.6      yamt lwp_create(lwp_t *l1, proc_t *p2, vaddr_t uaddr, bool inmem, int flags,
    541  1.29.6.6      yamt 	   void *stack, size_t stacksize, void (*func)(void *), void *arg,
    542  1.29.6.6      yamt 	   lwp_t **rnewlwpp, int sclass)
    543       1.2   thorpej {
    544  1.29.6.3      yamt 	struct lwp *l2, *isfree;
    545  1.29.6.3      yamt 	turnstile_t *ts;
    546       1.2   thorpej 
    547  1.29.6.3      yamt 	/*
    548  1.29.6.3      yamt 	 * First off, reap any detached LWP waiting to be collected.
    549  1.29.6.3      yamt 	 * We can re-use its LWP structure and turnstile.
    550  1.29.6.3      yamt 	 */
    551  1.29.6.3      yamt 	isfree = NULL;
    552  1.29.6.3      yamt 	if (p2->p_zomblwp != NULL) {
    553  1.29.6.3      yamt 		mutex_enter(&p2->p_smutex);
    554  1.29.6.3      yamt 		if ((isfree = p2->p_zomblwp) != NULL) {
    555  1.29.6.3      yamt 			p2->p_zomblwp = NULL;
    556  1.29.6.4      yamt 			lwp_free(isfree, true, false);/* releases proc mutex */
    557  1.29.6.3      yamt 		} else
    558  1.29.6.3      yamt 			mutex_exit(&p2->p_smutex);
    559  1.29.6.3      yamt 	}
    560  1.29.6.3      yamt 	if (isfree == NULL) {
    561  1.29.6.8      yamt 		l2 = pool_cache_get(lwp_cache, PR_WAITOK);
    562  1.29.6.3      yamt 		memset(l2, 0, sizeof(*l2));
    563  1.29.6.6      yamt 		l2->l_ts = pool_cache_get(turnstile_cache, PR_WAITOK);
    564  1.29.6.4      yamt 		SLIST_INIT(&l2->l_pi_lenders);
    565  1.29.6.3      yamt 	} else {
    566  1.29.6.3      yamt 		l2 = isfree;
    567  1.29.6.3      yamt 		ts = l2->l_ts;
    568  1.29.6.6      yamt 		KASSERT(l2->l_inheritedprio == -1);
    569  1.29.6.4      yamt 		KASSERT(SLIST_EMPTY(&l2->l_pi_lenders));
    570  1.29.6.3      yamt 		memset(l2, 0, sizeof(*l2));
    571  1.29.6.3      yamt 		l2->l_ts = ts;
    572  1.29.6.3      yamt 	}
    573       1.2   thorpej 
    574       1.2   thorpej 	l2->l_stat = LSIDL;
    575       1.2   thorpej 	l2->l_proc = p2;
    576  1.29.6.3      yamt 	l2->l_refcnt = 1;
    577  1.29.6.6      yamt 	l2->l_class = sclass;
    578  1.29.6.6      yamt 	l2->l_kpriority = l1->l_kpriority;
    579  1.29.6.7      yamt 	l2->l_kpribase = PRI_KERNEL;
    580  1.29.6.3      yamt 	l2->l_priority = l1->l_priority;
    581  1.29.6.6      yamt 	l2->l_inheritedprio = -1;
    582  1.29.6.4      yamt 	l2->l_mutex = l1->l_cpu->ci_schedstate.spc_mutex;
    583       1.2   thorpej 	l2->l_cpu = l1->l_cpu;
    584  1.29.6.3      yamt 	l2->l_flag = inmem ? LW_INMEM : 0;
    585  1.29.6.8      yamt 	l2->l_pflag = LP_MPSAFE;
    586       1.2   thorpej 
    587  1.29.6.3      yamt 	if (p2->p_flag & PK_SYSTEM) {
    588  1.29.6.8      yamt 		/* Mark it as a system LWP and not a candidate for swapping */
    589  1.29.6.3      yamt 		l2->l_flag |= LW_SYSTEM;
    590  1.29.6.3      yamt 	}
    591       1.2   thorpej 
    592  1.29.6.5      yamt 	lwp_initspecific(l2);
    593  1.29.6.6      yamt 	sched_lwp_fork(l1, l2);
    594  1.29.6.2      yamt 	lwp_update_creds(l2);
    595  1.29.6.5      yamt 	callout_init(&l2->l_timeout_ch, CALLOUT_MPSAFE);
    596  1.29.6.5      yamt 	callout_setfunc(&l2->l_timeout_ch, sleepq_timeout, l2);
    597  1.29.6.4      yamt 	mutex_init(&l2->l_swaplock, MUTEX_DEFAULT, IPL_NONE);
    598  1.29.6.3      yamt 	cv_init(&l2->l_sigcv, "sigwait");
    599  1.29.6.3      yamt 	l2->l_syncobj = &sched_syncobj;
    600       1.2   thorpej 
    601       1.2   thorpej 	if (rnewlwpp != NULL)
    602       1.2   thorpej 		*rnewlwpp = l2;
    603       1.2   thorpej 
    604  1.29.6.1      yamt 	l2->l_addr = UAREA_TO_USER(uaddr);
    605       1.2   thorpej 	uvm_lwp_fork(l1, l2, stack, stacksize, func,
    606       1.2   thorpej 	    (arg != NULL) ? arg : l2);
    607       1.2   thorpej 
    608  1.29.6.3      yamt 	mutex_enter(&p2->p_smutex);
    609  1.29.6.3      yamt 
    610  1.29.6.3      yamt 	if ((flags & LWP_DETACHED) != 0) {
    611  1.29.6.3      yamt 		l2->l_prflag = LPR_DETACHED;
    612  1.29.6.3      yamt 		p2->p_ndlwps++;
    613  1.29.6.3      yamt 	} else
    614  1.29.6.3      yamt 		l2->l_prflag = 0;
    615  1.29.6.3      yamt 
    616  1.29.6.3      yamt 	l2->l_sigmask = l1->l_sigmask;
    617  1.29.6.3      yamt 	CIRCLEQ_INIT(&l2->l_sigpend.sp_info);
    618  1.29.6.3      yamt 	sigemptyset(&l2->l_sigpend.sp_set);
    619  1.29.6.3      yamt 
    620  1.29.6.3      yamt 	p2->p_nlwpid++;
    621  1.29.6.3      yamt 	if (p2->p_nlwpid == 0)
    622  1.29.6.3      yamt 		p2->p_nlwpid++;
    623  1.29.6.3      yamt 	l2->l_lid = p2->p_nlwpid;
    624       1.2   thorpej 	LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
    625       1.2   thorpej 	p2->p_nlwps++;
    626       1.2   thorpej 
    627  1.29.6.3      yamt 	mutex_exit(&p2->p_smutex);
    628  1.29.6.3      yamt 
    629  1.29.6.4      yamt 	mutex_enter(&proclist_lock);
    630       1.2   thorpej 	LIST_INSERT_HEAD(&alllwp, l2, l_list);
    631  1.29.6.4      yamt 	mutex_exit(&proclist_lock);
    632  1.29.6.3      yamt 
    633  1.29.6.8      yamt 	if ((p2->p_flag & PK_SYSTEM) == 0) {
    634  1.29.6.8      yamt 		/* Locking is needed, since LWP is in the list of all LWPs */
    635  1.29.6.8      yamt 		lwp_lock(l2);
    636  1.29.6.8      yamt 		/* Inherit a processor-set */
    637  1.29.6.8      yamt 		l2->l_psid = l1->l_psid;
    638  1.29.6.8      yamt 		/* Inherit an affinity */
    639  1.29.6.8      yamt 		memcpy(&l2->l_affinity, &l1->l_affinity, sizeof(cpuset_t));
    640  1.29.6.8      yamt 		/* Look for a CPU to start */
    641  1.29.6.8      yamt 		l2->l_cpu = sched_takecpu(l2);
    642  1.29.6.8      yamt 		lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_mutex);
    643  1.29.6.8      yamt 	}
    644  1.29.6.8      yamt 
    645  1.29.6.3      yamt 	SYSCALL_TIME_LWP_INIT(l2);
    646       1.2   thorpej 
    647      1.16      manu 	if (p2->p_emul->e_lwp_fork)
    648      1.16      manu 		(*p2->p_emul->e_lwp_fork)(l1, l2);
    649      1.16      manu 
    650       1.2   thorpej 	return (0);
    651       1.2   thorpej }
    652       1.2   thorpej 
    653       1.2   thorpej /*
    654  1.29.6.4      yamt  * Called by MD code when a new LWP begins execution.  Must be called
    655  1.29.6.4      yamt  * with the previous LWP locked (so at splsched), or if there is no
    656  1.29.6.4      yamt  * previous LWP, at splsched.
    657  1.29.6.4      yamt  */
    658  1.29.6.4      yamt void
    659  1.29.6.4      yamt lwp_startup(struct lwp *prev, struct lwp *new)
    660  1.29.6.4      yamt {
    661  1.29.6.4      yamt 
    662  1.29.6.4      yamt 	if (prev != NULL) {
    663  1.29.6.7      yamt 		/*
    664  1.29.6.7      yamt 		 * Normalize the count of the spin-mutexes, it was
    665  1.29.6.7      yamt 		 * increased in mi_switch().  Unmark the state of
    666  1.29.6.7      yamt 		 * context switch - it is finished for previous LWP.
    667  1.29.6.7      yamt 		 */
    668  1.29.6.7      yamt 		curcpu()->ci_mtx_count++;
    669  1.29.6.7      yamt 		membar_exit();
    670  1.29.6.7      yamt 		prev->l_ctxswtch = 0;
    671  1.29.6.4      yamt 	}
    672  1.29.6.4      yamt 	spl0();
    673  1.29.6.4      yamt 	pmap_activate(new);
    674  1.29.6.4      yamt 	LOCKDEBUG_BARRIER(NULL, 0);
    675  1.29.6.4      yamt 	if ((new->l_pflag & LP_MPSAFE) == 0) {
    676  1.29.6.4      yamt 		KERNEL_LOCK(1, new);
    677  1.29.6.4      yamt 	}
    678  1.29.6.4      yamt }
    679  1.29.6.4      yamt 
    680  1.29.6.4      yamt /*
    681  1.29.6.4      yamt  * Exit an LWP.
    682       1.2   thorpej  */
    683       1.2   thorpej void
    684       1.2   thorpej lwp_exit(struct lwp *l)
    685       1.2   thorpej {
    686       1.2   thorpej 	struct proc *p = l->l_proc;
    687  1.29.6.3      yamt 	struct lwp *l2;
    688  1.29.6.4      yamt 	bool current;
    689  1.29.6.4      yamt 
    690  1.29.6.4      yamt 	current = (l == curlwp);
    691       1.2   thorpej 
    692  1.29.6.4      yamt 	KASSERT(current || l->l_stat == LSIDL);
    693       1.2   thorpej 
    694  1.29.6.3      yamt 	/*
    695  1.29.6.3      yamt 	 * Verify that we hold no locks other than the kernel lock.
    696  1.29.6.3      yamt 	 */
    697  1.29.6.3      yamt #ifdef MULTIPROCESSOR
    698  1.29.6.3      yamt 	LOCKDEBUG_BARRIER(&kernel_lock, 0);
    699  1.29.6.3      yamt #else
    700  1.29.6.3      yamt 	LOCKDEBUG_BARRIER(NULL, 0);
    701  1.29.6.3      yamt #endif
    702      1.16      manu 
    703       1.2   thorpej 	/*
    704  1.29.6.3      yamt 	 * If we are the last live LWP in a process, we need to exit the
    705  1.29.6.3      yamt 	 * entire process.  We do so with an exit status of zero, because
    706  1.29.6.3      yamt 	 * it's a "controlled" exit, and because that's what Solaris does.
    707  1.29.6.3      yamt 	 *
    708  1.29.6.3      yamt 	 * We are not quite a zombie yet, but for accounting purposes we
    709  1.29.6.3      yamt 	 * must increment the count of zombies here.
    710  1.29.6.2      yamt 	 *
    711  1.29.6.2      yamt 	 * Note: the last LWP's specificdata will be deleted here.
    712       1.2   thorpej 	 */
    713  1.29.6.3      yamt 	mutex_enter(&p->p_smutex);
    714  1.29.6.3      yamt 	if (p->p_nlwps - p->p_nzlwps == 1) {
    715  1.29.6.4      yamt 		KASSERT(current == true);
    716  1.29.6.8      yamt 		/* XXXSMP kernel_lock not held */
    717       1.2   thorpej 		exit1(l, 0);
    718      1.19  jdolecek 		/* NOTREACHED */
    719       1.2   thorpej 	}
    720  1.29.6.3      yamt 	p->p_nzlwps++;
    721  1.29.6.3      yamt 	mutex_exit(&p->p_smutex);
    722  1.29.6.3      yamt 
    723  1.29.6.3      yamt 	if (p->p_emul->e_lwp_exit)
    724  1.29.6.3      yamt 		(*p->p_emul->e_lwp_exit)(l);
    725       1.2   thorpej 
    726  1.29.6.2      yamt 	/* Delete the specificdata while it's still safe to sleep. */
    727  1.29.6.2      yamt 	specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
    728  1.29.6.2      yamt 
    729  1.29.6.3      yamt 	/*
    730  1.29.6.3      yamt 	 * Release our cached credentials.
    731  1.29.6.3      yamt 	 */
    732  1.29.6.2      yamt 	kauth_cred_free(l->l_cred);
    733  1.29.6.5      yamt 	callout_destroy(&l->l_timeout_ch);
    734  1.29.6.4      yamt 
    735  1.29.6.4      yamt 	/*
    736  1.29.6.4      yamt 	 * While we can still block, mark the LWP as unswappable to
    737  1.29.6.4      yamt 	 * prevent conflicts with the with the swapper.
    738  1.29.6.4      yamt 	 */
    739  1.29.6.4      yamt 	if (current)
    740  1.29.6.4      yamt 		uvm_lwp_hold(l);
    741  1.29.6.2      yamt 
    742  1.29.6.3      yamt 	/*
    743  1.29.6.3      yamt 	 * Remove the LWP from the global list.
    744  1.29.6.3      yamt 	 */
    745  1.29.6.4      yamt 	mutex_enter(&proclist_lock);
    746  1.29.6.3      yamt 	mutex_enter(&proclist_mutex);
    747  1.29.6.3      yamt 	LIST_REMOVE(l, l_list);
    748  1.29.6.3      yamt 	mutex_exit(&proclist_mutex);
    749  1.29.6.4      yamt 	mutex_exit(&proclist_lock);
    750      1.19  jdolecek 
    751  1.29.6.3      yamt 	/*
    752  1.29.6.3      yamt 	 * Get rid of all references to the LWP that others (e.g. procfs)
    753  1.29.6.3      yamt 	 * may have, and mark the LWP as a zombie.  If the LWP is detached,
    754  1.29.6.3      yamt 	 * mark it waiting for collection in the proc structure.  Note that
    755  1.29.6.3      yamt 	 * before we can do that, we need to free any other dead, deatched
    756  1.29.6.3      yamt 	 * LWP waiting to meet its maker.
    757  1.29.6.3      yamt 	 *
    758  1.29.6.3      yamt 	 * XXXSMP disable preemption.
    759  1.29.6.3      yamt 	 */
    760  1.29.6.3      yamt 	mutex_enter(&p->p_smutex);
    761  1.29.6.3      yamt 	lwp_drainrefs(l);
    762  1.29.6.1      yamt 
    763  1.29.6.3      yamt 	if ((l->l_prflag & LPR_DETACHED) != 0) {
    764  1.29.6.3      yamt 		while ((l2 = p->p_zomblwp) != NULL) {
    765  1.29.6.3      yamt 			p->p_zomblwp = NULL;
    766  1.29.6.4      yamt 			lwp_free(l2, false, false);/* releases proc mutex */
    767  1.29.6.3      yamt 			mutex_enter(&p->p_smutex);
    768  1.29.6.5      yamt 			l->l_refcnt++;
    769  1.29.6.5      yamt 			lwp_drainrefs(l);
    770  1.29.6.3      yamt 		}
    771  1.29.6.3      yamt 		p->p_zomblwp = l;
    772  1.29.6.3      yamt 	}
    773       1.2   thorpej 
    774  1.29.6.3      yamt 	/*
    775  1.29.6.3      yamt 	 * If we find a pending signal for the process and we have been
    776  1.29.6.3      yamt 	 * asked to check for signals, then we loose: arrange to have
    777  1.29.6.3      yamt 	 * all other LWPs in the process check for signals.
    778  1.29.6.3      yamt 	 */
    779  1.29.6.3      yamt 	if ((l->l_flag & LW_PENDSIG) != 0 &&
    780  1.29.6.3      yamt 	    firstsig(&p->p_sigpend.sp_set) != 0) {
    781  1.29.6.3      yamt 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
    782  1.29.6.3      yamt 			lwp_lock(l2);
    783  1.29.6.3      yamt 			l2->l_flag |= LW_PENDSIG;
    784  1.29.6.3      yamt 			lwp_unlock(l2);
    785  1.29.6.3      yamt 		}
    786  1.29.6.1      yamt 	}
    787  1.29.6.1      yamt 
    788  1.29.6.3      yamt 	lwp_lock(l);
    789  1.29.6.3      yamt 	l->l_stat = LSZOMB;
    790  1.29.6.8      yamt 	if (l->l_name != NULL)
    791  1.29.6.8      yamt 		strcpy(l->l_name, "(zombie)");
    792  1.29.6.3      yamt 	lwp_unlock(l);
    793  1.29.6.1      yamt 	p->p_nrlwps--;
    794  1.29.6.3      yamt 	cv_broadcast(&p->p_lwpcv);
    795  1.29.6.6      yamt 	if (l->l_lwpctl != NULL)
    796  1.29.6.6      yamt 		l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
    797  1.29.6.3      yamt 	mutex_exit(&p->p_smutex);
    798  1.29.6.3      yamt 
    799  1.29.6.3      yamt 	/*
    800  1.29.6.3      yamt 	 * We can no longer block.  At this point, lwp_free() may already
    801  1.29.6.3      yamt 	 * be gunning for us.  On a multi-CPU system, we may be off p_lwps.
    802  1.29.6.3      yamt 	 *
    803  1.29.6.3      yamt 	 * Free MD LWP resources.
    804  1.29.6.3      yamt 	 */
    805  1.29.6.3      yamt #ifndef __NO_CPU_LWP_FREE
    806  1.29.6.3      yamt 	cpu_lwp_free(l, 0);
    807  1.29.6.3      yamt #endif
    808       1.2   thorpej 
    809  1.29.6.4      yamt 	if (current) {
    810  1.29.6.4      yamt 		pmap_deactivate(l);
    811  1.29.6.4      yamt 
    812  1.29.6.4      yamt 		/*
    813  1.29.6.4      yamt 		 * Release the kernel lock, and switch away into
    814  1.29.6.4      yamt 		 * oblivion.
    815  1.29.6.4      yamt 		 */
    816  1.29.6.3      yamt #ifdef notyet
    817  1.29.6.4      yamt 		/* XXXSMP hold in lwp_userret() */
    818  1.29.6.4      yamt 		KERNEL_UNLOCK_LAST(l);
    819  1.29.6.3      yamt #else
    820  1.29.6.4      yamt 		KERNEL_UNLOCK_ALL(l, NULL);
    821  1.29.6.3      yamt #endif
    822  1.29.6.4      yamt 		lwp_exit_switchaway(l);
    823  1.29.6.4      yamt 	}
    824       1.2   thorpej }
    825       1.2   thorpej 
    826       1.2   thorpej void
    827  1.29.6.4      yamt lwp_exit_switchaway(struct lwp *l)
    828       1.2   thorpej {
    829  1.29.6.4      yamt 	struct cpu_info *ci;
    830  1.29.6.4      yamt 	struct lwp *idlelwp;
    831  1.29.6.4      yamt 
    832  1.29.6.4      yamt 	/* Unlocked, but is for statistics only. */
    833  1.29.6.4      yamt 	uvmexp.swtch++;
    834  1.29.6.4      yamt 
    835  1.29.6.4      yamt 	(void)splsched();
    836  1.29.6.4      yamt 	l->l_flag &= ~LW_RUNNING;
    837  1.29.6.4      yamt 	ci = curcpu();
    838  1.29.6.4      yamt 	idlelwp = ci->ci_data.cpu_idlelwp;
    839  1.29.6.4      yamt 	idlelwp->l_stat = LSONPROC;
    840  1.29.6.6      yamt 
    841  1.29.6.6      yamt 	/*
    842  1.29.6.6      yamt 	 * cpu_onproc must be updated with the CPU locked, as
    843  1.29.6.6      yamt 	 * aston() may try to set a AST pending on the LWP (and
    844  1.29.6.6      yamt 	 * it does so with the CPU locked).  Otherwise, the LWP
    845  1.29.6.6      yamt 	 * may be destroyed before the AST can be set, leading
    846  1.29.6.6      yamt 	 * to a user-after-free.
    847  1.29.6.6      yamt 	 */
    848  1.29.6.6      yamt 	spc_lock(ci);
    849  1.29.6.6      yamt 	ci->ci_data.cpu_onproc = idlelwp;
    850  1.29.6.6      yamt 	spc_unlock(ci);
    851  1.29.6.6      yamt 	cpu_switchto(NULL, idlelwp, false);
    852  1.29.6.3      yamt }
    853  1.29.6.3      yamt 
    854  1.29.6.3      yamt /*
    855  1.29.6.3      yamt  * Free a dead LWP's remaining resources.
    856  1.29.6.3      yamt  *
    857  1.29.6.3      yamt  * XXXLWP limits.
    858  1.29.6.3      yamt  */
    859  1.29.6.3      yamt void
    860  1.29.6.4      yamt lwp_free(struct lwp *l, bool recycle, bool last)
    861  1.29.6.3      yamt {
    862  1.29.6.3      yamt 	struct proc *p = l->l_proc;
    863  1.29.6.3      yamt 	ksiginfoq_t kq;
    864       1.2   thorpej 
    865      1.19  jdolecek 	/*
    866  1.29.6.3      yamt 	 * If this was not the last LWP in the process, then adjust
    867  1.29.6.3      yamt 	 * counters and unlock.
    868      1.19  jdolecek 	 */
    869  1.29.6.3      yamt 	if (!last) {
    870  1.29.6.3      yamt 		/*
    871  1.29.6.3      yamt 		 * Add the LWP's run time to the process' base value.
    872  1.29.6.3      yamt 		 * This needs to co-incide with coming off p_lwps.
    873  1.29.6.3      yamt 		 */
    874  1.29.6.8      yamt 		bintime_add(&p->p_rtime, &l->l_rtime);
    875  1.29.6.4      yamt 		p->p_pctcpu += l->l_pctcpu;
    876  1.29.6.3      yamt 		LIST_REMOVE(l, l_sibling);
    877  1.29.6.3      yamt 		p->p_nlwps--;
    878  1.29.6.3      yamt 		p->p_nzlwps--;
    879  1.29.6.3      yamt 		if ((l->l_prflag & LPR_DETACHED) != 0)
    880  1.29.6.3      yamt 			p->p_ndlwps--;
    881      1.19  jdolecek 
    882  1.29.6.3      yamt 		/*
    883  1.29.6.4      yamt 		 * Have any LWPs sleeping in lwp_wait() recheck for
    884  1.29.6.4      yamt 		 * deadlock.
    885  1.29.6.3      yamt 		 */
    886  1.29.6.4      yamt 		cv_broadcast(&p->p_lwpcv);
    887  1.29.6.4      yamt 		mutex_exit(&p->p_smutex);
    888      1.19  jdolecek 	}
    889  1.29.6.3      yamt 
    890  1.29.6.4      yamt #ifdef MULTIPROCESSOR
    891  1.29.6.4      yamt 	/*
    892  1.29.6.4      yamt 	 * In the unlikely event that the LWP is still on the CPU,
    893  1.29.6.4      yamt 	 * then spin until it has switched away.  We need to release
    894  1.29.6.4      yamt 	 * all locks to avoid deadlock against interrupt handlers on
    895  1.29.6.4      yamt 	 * the target CPU.
    896  1.29.6.4      yamt 	 */
    897  1.29.6.4      yamt 	if ((l->l_flag & LW_RUNNING) != 0 || l->l_cpu->ci_curlwp == l) {
    898  1.29.6.4      yamt 		int count;
    899  1.29.6.4      yamt 		(void)count; /* XXXgcc */
    900  1.29.6.4      yamt 		KERNEL_UNLOCK_ALL(curlwp, &count);
    901  1.29.6.4      yamt 		while ((l->l_flag & LW_RUNNING) != 0 ||
    902  1.29.6.4      yamt 		    l->l_cpu->ci_curlwp == l)
    903  1.29.6.4      yamt 			SPINLOCK_BACKOFF_HOOK;
    904  1.29.6.4      yamt 		KERNEL_LOCK(count, curlwp);
    905  1.29.6.4      yamt 	}
    906  1.29.6.4      yamt #endif
    907  1.29.6.4      yamt 
    908  1.29.6.3      yamt 	/*
    909  1.29.6.3      yamt 	 * Destroy the LWP's remaining signal information.
    910  1.29.6.3      yamt 	 */
    911  1.29.6.3      yamt 	ksiginfo_queue_init(&kq);
    912  1.29.6.3      yamt 	sigclear(&l->l_sigpend, NULL, &kq);
    913  1.29.6.3      yamt 	ksiginfo_queue_drain(&kq);
    914  1.29.6.3      yamt 	cv_destroy(&l->l_sigcv);
    915  1.29.6.4      yamt 	mutex_destroy(&l->l_swaplock);
    916  1.29.6.3      yamt 
    917  1.29.6.3      yamt 	/*
    918  1.29.6.3      yamt 	 * Free the LWP's turnstile and the LWP structure itself unless the
    919  1.29.6.4      yamt 	 * caller wants to recycle them.  Also, free the scheduler specific data.
    920  1.29.6.3      yamt 	 *
    921  1.29.6.3      yamt 	 * We can't return turnstile0 to the pool (it didn't come from it),
    922  1.29.6.3      yamt 	 * so if it comes up just drop it quietly and move on.
    923  1.29.6.3      yamt 	 *
    924  1.29.6.3      yamt 	 * We don't recycle the VM resources at this time.
    925  1.29.6.3      yamt 	 */
    926  1.29.6.6      yamt 	if (l->l_lwpctl != NULL)
    927  1.29.6.6      yamt 		lwp_ctl_free(l);
    928  1.29.6.4      yamt 	sched_lwp_exit(l);
    929  1.29.6.4      yamt 
    930  1.29.6.3      yamt 	if (!recycle && l->l_ts != &turnstile0)
    931  1.29.6.6      yamt 		pool_cache_put(turnstile_cache, l->l_ts);
    932  1.29.6.8      yamt 	if (l->l_name != NULL)
    933  1.29.6.8      yamt 		kmem_free(l->l_name, MAXCOMLEN);
    934  1.29.6.3      yamt #ifndef __NO_CPU_LWP_FREE
    935  1.29.6.3      yamt 	cpu_lwp_free2(l);
    936  1.29.6.3      yamt #endif
    937  1.29.6.3      yamt 	uvm_lwp_exit(l);
    938  1.29.6.4      yamt 	KASSERT(SLIST_EMPTY(&l->l_pi_lenders));
    939  1.29.6.6      yamt 	KASSERT(l->l_inheritedprio == -1);
    940  1.29.6.3      yamt 	if (!recycle)
    941  1.29.6.8      yamt 		pool_cache_put(lwp_cache, l);
    942       1.2   thorpej }
    943       1.2   thorpej 
    944       1.2   thorpej /*
    945       1.2   thorpej  * Pick a LWP to represent the process for those operations which
    946       1.2   thorpej  * want information about a "process" that is actually associated
    947       1.2   thorpej  * with a LWP.
    948  1.29.6.3      yamt  *
    949  1.29.6.3      yamt  * If 'locking' is false, no locking or lock checks are performed.
    950  1.29.6.3      yamt  * This is intended for use by DDB.
    951  1.29.6.3      yamt  *
    952  1.29.6.3      yamt  * We don't bother locking the LWP here, since code that uses this
    953  1.29.6.3      yamt  * interface is broken by design and an exact match is not required.
    954       1.2   thorpej  */
    955       1.2   thorpej struct lwp *
    956  1.29.6.3      yamt proc_representative_lwp(struct proc *p, int *nrlwps, int locking)
    957       1.2   thorpej {
    958       1.2   thorpej 	struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
    959      1.27      matt 	struct lwp *signalled;
    960  1.29.6.3      yamt 	int cnt;
    961  1.29.6.3      yamt 
    962  1.29.6.3      yamt 	if (locking) {
    963  1.29.6.4      yamt 		KASSERT(mutex_owned(&p->p_smutex));
    964  1.29.6.3      yamt 	}
    965       1.2   thorpej 
    966       1.2   thorpej 	/* Trivial case: only one LWP */
    967  1.29.6.3      yamt 	if (p->p_nlwps == 1) {
    968  1.29.6.3      yamt 		l = LIST_FIRST(&p->p_lwps);
    969  1.29.6.3      yamt 		if (nrlwps)
    970  1.29.6.4      yamt 			*nrlwps = (l->l_stat == LSONPROC || l->l_stat == LSRUN);
    971  1.29.6.3      yamt 		return l;
    972  1.29.6.3      yamt 	}
    973       1.2   thorpej 
    974  1.29.6.3      yamt 	cnt = 0;
    975       1.2   thorpej 	switch (p->p_stat) {
    976       1.2   thorpej 	case SSTOP:
    977       1.2   thorpej 	case SACTIVE:
    978       1.2   thorpej 		/* Pick the most live LWP */
    979       1.2   thorpej 		onproc = running = sleeping = stopped = suspended = NULL;
    980      1.27      matt 		signalled = NULL;
    981       1.2   thorpej 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    982  1.29.6.4      yamt 			if ((l->l_flag & LW_IDLE) != 0) {
    983  1.29.6.4      yamt 				continue;
    984  1.29.6.4      yamt 			}
    985      1.27      matt 			if (l->l_lid == p->p_sigctx.ps_lwp)
    986      1.27      matt 				signalled = l;
    987       1.2   thorpej 			switch (l->l_stat) {
    988       1.2   thorpej 			case LSONPROC:
    989       1.2   thorpej 				onproc = l;
    990  1.29.6.3      yamt 				cnt++;
    991       1.2   thorpej 				break;
    992       1.2   thorpej 			case LSRUN:
    993       1.2   thorpej 				running = l;
    994  1.29.6.3      yamt 				cnt++;
    995       1.2   thorpej 				break;
    996       1.2   thorpej 			case LSSLEEP:
    997       1.2   thorpej 				sleeping = l;
    998       1.2   thorpej 				break;
    999       1.2   thorpej 			case LSSTOP:
   1000       1.2   thorpej 				stopped = l;
   1001       1.2   thorpej 				break;
   1002       1.2   thorpej 			case LSSUSPENDED:
   1003       1.2   thorpej 				suspended = l;
   1004       1.2   thorpej 				break;
   1005       1.2   thorpej 			}
   1006       1.2   thorpej 		}
   1007  1.29.6.3      yamt 		if (nrlwps)
   1008  1.29.6.3      yamt 			*nrlwps = cnt;
   1009      1.27      matt 		if (signalled)
   1010  1.29.6.3      yamt 			l = signalled;
   1011  1.29.6.3      yamt 		else if (onproc)
   1012  1.29.6.3      yamt 			l = onproc;
   1013  1.29.6.3      yamt 		else if (running)
   1014  1.29.6.3      yamt 			l = running;
   1015  1.29.6.3      yamt 		else if (sleeping)
   1016  1.29.6.3      yamt 			l = sleeping;
   1017  1.29.6.3      yamt 		else if (stopped)
   1018  1.29.6.3      yamt 			l = stopped;
   1019  1.29.6.3      yamt 		else if (suspended)
   1020  1.29.6.3      yamt 			l = suspended;
   1021  1.29.6.3      yamt 		else
   1022  1.29.6.3      yamt 			break;
   1023  1.29.6.3      yamt 		return l;
   1024       1.2   thorpej #ifdef DIAGNOSTIC
   1025       1.2   thorpej 	case SIDL:
   1026  1.29.6.3      yamt 	case SZOMB:
   1027  1.29.6.3      yamt 	case SDYING:
   1028  1.29.6.3      yamt 	case SDEAD:
   1029  1.29.6.3      yamt 		if (locking)
   1030  1.29.6.3      yamt 			mutex_exit(&p->p_smutex);
   1031       1.2   thorpej 		/* We have more than one LWP and we're in SIDL?
   1032       1.2   thorpej 		 * How'd that happen?
   1033       1.2   thorpej 		 */
   1034  1.29.6.3      yamt 		panic("Too many LWPs in idle/dying process %d (%s) stat = %d",
   1035  1.29.6.3      yamt 		    p->p_pid, p->p_comm, p->p_stat);
   1036  1.29.6.3      yamt 		break;
   1037       1.2   thorpej 	default:
   1038  1.29.6.3      yamt 		if (locking)
   1039  1.29.6.3      yamt 			mutex_exit(&p->p_smutex);
   1040       1.2   thorpej 		panic("Process %d (%s) in unknown state %d",
   1041       1.2   thorpej 		    p->p_pid, p->p_comm, p->p_stat);
   1042       1.2   thorpej #endif
   1043       1.2   thorpej 	}
   1044       1.2   thorpej 
   1045  1.29.6.3      yamt 	if (locking)
   1046  1.29.6.3      yamt 		mutex_exit(&p->p_smutex);
   1047       1.2   thorpej 	panic("proc_representative_lwp: couldn't find a lwp for process"
   1048       1.2   thorpej 		" %d (%s)", p->p_pid, p->p_comm);
   1049       1.2   thorpej 	/* NOTREACHED */
   1050       1.2   thorpej 	return NULL;
   1051       1.2   thorpej }
   1052  1.29.6.2      yamt 
   1053  1.29.6.2      yamt /*
   1054  1.29.6.8      yamt  * Migrate the LWP to the another CPU.  Unlocks the LWP.
   1055  1.29.6.8      yamt  */
   1056  1.29.6.8      yamt void
   1057  1.29.6.8      yamt lwp_migrate(lwp_t *l, struct cpu_info *ci)
   1058  1.29.6.8      yamt {
   1059  1.29.6.8      yamt 	struct schedstate_percpu *spc;
   1060  1.29.6.8      yamt 	KASSERT(lwp_locked(l, NULL));
   1061  1.29.6.8      yamt 
   1062  1.29.6.8      yamt 	if (l->l_cpu == ci) {
   1063  1.29.6.8      yamt 		lwp_unlock(l);
   1064  1.29.6.8      yamt 		return;
   1065  1.29.6.8      yamt 	}
   1066  1.29.6.8      yamt 
   1067  1.29.6.8      yamt 	spc = &ci->ci_schedstate;
   1068  1.29.6.8      yamt 	switch (l->l_stat) {
   1069  1.29.6.8      yamt 	case LSRUN:
   1070  1.29.6.8      yamt 		if (l->l_flag & LW_INMEM) {
   1071  1.29.6.8      yamt 			l->l_target_cpu = ci;
   1072  1.29.6.8      yamt 			break;
   1073  1.29.6.8      yamt 		}
   1074  1.29.6.8      yamt 	case LSIDL:
   1075  1.29.6.8      yamt 		l->l_cpu = ci;
   1076  1.29.6.8      yamt 		lwp_unlock_to(l, spc->spc_mutex);
   1077  1.29.6.8      yamt 		KASSERT(!mutex_owned(spc->spc_mutex));
   1078  1.29.6.8      yamt 		return;
   1079  1.29.6.8      yamt 	case LSSLEEP:
   1080  1.29.6.8      yamt 		l->l_cpu = ci;
   1081  1.29.6.8      yamt 		break;
   1082  1.29.6.8      yamt 	case LSSTOP:
   1083  1.29.6.8      yamt 	case LSSUSPENDED:
   1084  1.29.6.8      yamt 		if (l->l_wchan != NULL) {
   1085  1.29.6.8      yamt 			l->l_cpu = ci;
   1086  1.29.6.8      yamt 			break;
   1087  1.29.6.8      yamt 		}
   1088  1.29.6.8      yamt 	case LSONPROC:
   1089  1.29.6.8      yamt 		l->l_target_cpu = ci;
   1090  1.29.6.8      yamt 		break;
   1091  1.29.6.8      yamt 	}
   1092  1.29.6.8      yamt 	lwp_unlock(l);
   1093  1.29.6.8      yamt }
   1094  1.29.6.8      yamt 
   1095  1.29.6.8      yamt /*
   1096  1.29.6.8      yamt  * Find the LWP in the process.
   1097  1.29.6.8      yamt  * On success - returns LWP locked.
   1098  1.29.6.8      yamt  */
   1099  1.29.6.8      yamt struct lwp *
   1100  1.29.6.8      yamt lwp_find2(pid_t pid, lwpid_t lid)
   1101  1.29.6.8      yamt {
   1102  1.29.6.8      yamt 	proc_t *p;
   1103  1.29.6.8      yamt 	lwp_t *l;
   1104  1.29.6.8      yamt 
   1105  1.29.6.8      yamt 	/* Find the process */
   1106  1.29.6.8      yamt 	p = p_find(pid, PFIND_UNLOCK_FAIL);
   1107  1.29.6.8      yamt 	if (p == NULL)
   1108  1.29.6.8      yamt 		return NULL;
   1109  1.29.6.8      yamt 	mutex_enter(&p->p_smutex);
   1110  1.29.6.8      yamt 	mutex_exit(&proclist_lock);
   1111  1.29.6.8      yamt 
   1112  1.29.6.8      yamt 	/* Find the thread */
   1113  1.29.6.8      yamt 	l = lwp_find(p, lid);
   1114  1.29.6.8      yamt 	if (l != NULL)
   1115  1.29.6.8      yamt 		lwp_lock(l);
   1116  1.29.6.8      yamt 	mutex_exit(&p->p_smutex);
   1117  1.29.6.8      yamt 
   1118  1.29.6.8      yamt 	return l;
   1119  1.29.6.8      yamt }
   1120  1.29.6.8      yamt 
   1121  1.29.6.8      yamt /*
   1122  1.29.6.3      yamt  * Look up a live LWP within the speicifed process, and return it locked.
   1123  1.29.6.3      yamt  *
   1124  1.29.6.3      yamt  * Must be called with p->p_smutex held.
   1125  1.29.6.3      yamt  */
   1126  1.29.6.3      yamt struct lwp *
   1127  1.29.6.3      yamt lwp_find(struct proc *p, int id)
   1128  1.29.6.3      yamt {
   1129  1.29.6.3      yamt 	struct lwp *l;
   1130  1.29.6.3      yamt 
   1131  1.29.6.4      yamt 	KASSERT(mutex_owned(&p->p_smutex));
   1132  1.29.6.3      yamt 
   1133  1.29.6.3      yamt 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1134  1.29.6.3      yamt 		if (l->l_lid == id)
   1135  1.29.6.3      yamt 			break;
   1136  1.29.6.3      yamt 	}
   1137  1.29.6.3      yamt 
   1138  1.29.6.3      yamt 	/*
   1139  1.29.6.3      yamt 	 * No need to lock - all of these conditions will
   1140  1.29.6.3      yamt 	 * be visible with the process level mutex held.
   1141  1.29.6.3      yamt 	 */
   1142  1.29.6.3      yamt 	if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB))
   1143  1.29.6.3      yamt 		l = NULL;
   1144  1.29.6.3      yamt 
   1145  1.29.6.3      yamt 	return l;
   1146  1.29.6.3      yamt }
   1147  1.29.6.3      yamt 
   1148  1.29.6.3      yamt /*
   1149  1.29.6.2      yamt  * Update an LWP's cached credentials to mirror the process' master copy.
   1150  1.29.6.2      yamt  *
   1151  1.29.6.2      yamt  * This happens early in the syscall path, on user trap, and on LWP
   1152  1.29.6.2      yamt  * creation.  A long-running LWP can also voluntarily choose to update
   1153  1.29.6.2      yamt  * it's credentials by calling this routine.  This may be called from
   1154  1.29.6.2      yamt  * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
   1155  1.29.6.2      yamt  */
   1156  1.29.6.2      yamt void
   1157  1.29.6.2      yamt lwp_update_creds(struct lwp *l)
   1158  1.29.6.2      yamt {
   1159  1.29.6.2      yamt 	kauth_cred_t oc;
   1160  1.29.6.2      yamt 	struct proc *p;
   1161  1.29.6.2      yamt 
   1162  1.29.6.2      yamt 	p = l->l_proc;
   1163  1.29.6.2      yamt 	oc = l->l_cred;
   1164  1.29.6.2      yamt 
   1165  1.29.6.3      yamt 	mutex_enter(&p->p_mutex);
   1166  1.29.6.2      yamt 	kauth_cred_hold(p->p_cred);
   1167  1.29.6.2      yamt 	l->l_cred = p->p_cred;
   1168  1.29.6.3      yamt 	mutex_exit(&p->p_mutex);
   1169  1.29.6.8      yamt 	if (oc != NULL)
   1170  1.29.6.2      yamt 		kauth_cred_free(oc);
   1171  1.29.6.3      yamt }
   1172  1.29.6.3      yamt 
   1173  1.29.6.3      yamt /*
   1174  1.29.6.3      yamt  * Verify that an LWP is locked, and optionally verify that the lock matches
   1175  1.29.6.3      yamt  * one we specify.
   1176  1.29.6.3      yamt  */
   1177  1.29.6.3      yamt int
   1178  1.29.6.3      yamt lwp_locked(struct lwp *l, kmutex_t *mtx)
   1179  1.29.6.3      yamt {
   1180  1.29.6.3      yamt 	kmutex_t *cur = l->l_mutex;
   1181  1.29.6.3      yamt 
   1182  1.29.6.3      yamt 	return mutex_owned(cur) && (mtx == cur || mtx == NULL);
   1183  1.29.6.3      yamt }
   1184  1.29.6.3      yamt 
   1185  1.29.6.3      yamt /*
   1186  1.29.6.3      yamt  * Lock an LWP.
   1187  1.29.6.3      yamt  */
   1188  1.29.6.3      yamt void
   1189  1.29.6.3      yamt lwp_lock_retry(struct lwp *l, kmutex_t *old)
   1190  1.29.6.3      yamt {
   1191  1.29.6.3      yamt 
   1192  1.29.6.3      yamt 	/*
   1193  1.29.6.3      yamt 	 * XXXgcc ignoring kmutex_t * volatile on i386
   1194  1.29.6.3      yamt 	 *
   1195  1.29.6.3      yamt 	 * gcc version 4.1.2 20061021 prerelease (NetBSD nb1 20061021)
   1196  1.29.6.3      yamt 	 */
   1197  1.29.6.3      yamt #if 1
   1198  1.29.6.3      yamt 	while (l->l_mutex != old) {
   1199  1.29.6.3      yamt #else
   1200  1.29.6.3      yamt 	for (;;) {
   1201  1.29.6.3      yamt #endif
   1202  1.29.6.3      yamt 		mutex_spin_exit(old);
   1203  1.29.6.3      yamt 		old = l->l_mutex;
   1204  1.29.6.3      yamt 		mutex_spin_enter(old);
   1205  1.29.6.3      yamt 
   1206  1.29.6.3      yamt 		/*
   1207  1.29.6.3      yamt 		 * mutex_enter() will have posted a read barrier.  Re-test
   1208  1.29.6.3      yamt 		 * l->l_mutex.  If it has changed, we need to try again.
   1209  1.29.6.3      yamt 		 */
   1210  1.29.6.3      yamt #if 1
   1211  1.29.6.3      yamt 	}
   1212  1.29.6.3      yamt #else
   1213  1.29.6.3      yamt 	} while (__predict_false(l->l_mutex != old));
   1214  1.29.6.3      yamt #endif
   1215  1.29.6.3      yamt }
   1216  1.29.6.3      yamt 
   1217  1.29.6.3      yamt /*
   1218  1.29.6.3      yamt  * Lend a new mutex to an LWP.  The old mutex must be held.
   1219  1.29.6.3      yamt  */
   1220  1.29.6.3      yamt void
   1221  1.29.6.3      yamt lwp_setlock(struct lwp *l, kmutex_t *new)
   1222  1.29.6.3      yamt {
   1223  1.29.6.3      yamt 
   1224  1.29.6.4      yamt 	KASSERT(mutex_owned(l->l_mutex));
   1225  1.29.6.3      yamt 
   1226  1.29.6.7      yamt 	membar_producer();
   1227  1.29.6.3      yamt 	l->l_mutex = new;
   1228  1.29.6.3      yamt }
   1229  1.29.6.3      yamt 
   1230  1.29.6.3      yamt /*
   1231  1.29.6.3      yamt  * Lend a new mutex to an LWP, and release the old mutex.  The old mutex
   1232  1.29.6.3      yamt  * must be held.
   1233  1.29.6.3      yamt  */
   1234  1.29.6.3      yamt void
   1235  1.29.6.3      yamt lwp_unlock_to(struct lwp *l, kmutex_t *new)
   1236  1.29.6.3      yamt {
   1237  1.29.6.3      yamt 	kmutex_t *old;
   1238  1.29.6.3      yamt 
   1239  1.29.6.4      yamt 	KASSERT(mutex_owned(l->l_mutex));
   1240  1.29.6.3      yamt 
   1241  1.29.6.3      yamt 	old = l->l_mutex;
   1242  1.29.6.7      yamt 	membar_producer();
   1243  1.29.6.3      yamt 	l->l_mutex = new;
   1244  1.29.6.3      yamt 	mutex_spin_exit(old);
   1245  1.29.6.3      yamt }
   1246  1.29.6.3      yamt 
   1247  1.29.6.3      yamt /*
   1248  1.29.6.3      yamt  * Acquire a new mutex, and donate it to an LWP.  The LWP must already be
   1249  1.29.6.3      yamt  * locked.
   1250  1.29.6.3      yamt  */
   1251  1.29.6.3      yamt void
   1252  1.29.6.3      yamt lwp_relock(struct lwp *l, kmutex_t *new)
   1253  1.29.6.3      yamt {
   1254  1.29.6.3      yamt 	kmutex_t *old;
   1255  1.29.6.3      yamt 
   1256  1.29.6.4      yamt 	KASSERT(mutex_owned(l->l_mutex));
   1257  1.29.6.3      yamt 
   1258  1.29.6.3      yamt 	old = l->l_mutex;
   1259  1.29.6.3      yamt 	if (old != new) {
   1260  1.29.6.3      yamt 		mutex_spin_enter(new);
   1261  1.29.6.3      yamt 		l->l_mutex = new;
   1262  1.29.6.3      yamt 		mutex_spin_exit(old);
   1263  1.29.6.3      yamt 	}
   1264  1.29.6.4      yamt }
   1265  1.29.6.4      yamt 
   1266  1.29.6.4      yamt int
   1267  1.29.6.4      yamt lwp_trylock(struct lwp *l)
   1268  1.29.6.4      yamt {
   1269  1.29.6.4      yamt 	kmutex_t *old;
   1270  1.29.6.4      yamt 
   1271  1.29.6.4      yamt 	for (;;) {
   1272  1.29.6.4      yamt 		if (!mutex_tryenter(old = l->l_mutex))
   1273  1.29.6.4      yamt 			return 0;
   1274  1.29.6.4      yamt 		if (__predict_true(l->l_mutex == old))
   1275  1.29.6.4      yamt 			return 1;
   1276  1.29.6.4      yamt 		mutex_spin_exit(old);
   1277  1.29.6.4      yamt 	}
   1278  1.29.6.3      yamt }
   1279  1.29.6.3      yamt 
   1280  1.29.6.3      yamt /*
   1281  1.29.6.3      yamt  * Handle exceptions for mi_userret().  Called if a member of LW_USERRET is
   1282  1.29.6.3      yamt  * set.
   1283  1.29.6.3      yamt  */
   1284  1.29.6.3      yamt void
   1285  1.29.6.3      yamt lwp_userret(struct lwp *l)
   1286  1.29.6.3      yamt {
   1287  1.29.6.3      yamt 	struct proc *p;
   1288  1.29.6.3      yamt 	void (*hook)(void);
   1289  1.29.6.3      yamt 	int sig;
   1290  1.29.6.3      yamt 
   1291  1.29.6.3      yamt 	p = l->l_proc;
   1292  1.29.6.3      yamt 
   1293  1.29.6.6      yamt #ifndef __HAVE_FAST_SOFTINTS
   1294  1.29.6.6      yamt 	/* Run pending soft interrupts. */
   1295  1.29.6.6      yamt 	if (l->l_cpu->ci_data.cpu_softints != 0)
   1296  1.29.6.6      yamt 		softint_overlay();
   1297  1.29.6.6      yamt #endif
   1298  1.29.6.6      yamt 
   1299  1.29.6.3      yamt 	/*
   1300  1.29.6.3      yamt 	 * It should be safe to do this read unlocked on a multiprocessor
   1301  1.29.6.3      yamt 	 * system..
   1302  1.29.6.3      yamt 	 */
   1303  1.29.6.3      yamt 	while ((l->l_flag & LW_USERRET) != 0) {
   1304  1.29.6.3      yamt 		/*
   1305  1.29.6.3      yamt 		 * Process pending signals first, unless the process
   1306  1.29.6.4      yamt 		 * is dumping core or exiting, where we will instead
   1307  1.29.6.4      yamt 		 * enter the L_WSUSPEND case below.
   1308  1.29.6.3      yamt 		 */
   1309  1.29.6.4      yamt 		if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) ==
   1310  1.29.6.4      yamt 		    LW_PENDSIG) {
   1311  1.29.6.3      yamt 			mutex_enter(&p->p_smutex);
   1312  1.29.6.3      yamt 			while ((sig = issignal(l)) != 0)
   1313  1.29.6.3      yamt 				postsig(sig);
   1314  1.29.6.3      yamt 			mutex_exit(&p->p_smutex);
   1315  1.29.6.3      yamt 		}
   1316  1.29.6.3      yamt 
   1317  1.29.6.3      yamt 		/*
   1318  1.29.6.3      yamt 		 * Core-dump or suspend pending.
   1319  1.29.6.3      yamt 		 *
   1320  1.29.6.3      yamt 		 * In case of core dump, suspend ourselves, so that the
   1321  1.29.6.3      yamt 		 * kernel stack and therefore the userland registers saved
   1322  1.29.6.3      yamt 		 * in the trapframe are around for coredump() to write them
   1323  1.29.6.3      yamt 		 * out.  We issue a wakeup on p->p_lwpcv so that sigexit()
   1324  1.29.6.3      yamt 		 * will write the core file out once all other LWPs are
   1325  1.29.6.3      yamt 		 * suspended.
   1326  1.29.6.3      yamt 		 */
   1327  1.29.6.3      yamt 		if ((l->l_flag & LW_WSUSPEND) != 0) {
   1328  1.29.6.3      yamt 			mutex_enter(&p->p_smutex);
   1329  1.29.6.3      yamt 			p->p_nrlwps--;
   1330  1.29.6.3      yamt 			cv_broadcast(&p->p_lwpcv);
   1331  1.29.6.3      yamt 			lwp_lock(l);
   1332  1.29.6.3      yamt 			l->l_stat = LSSUSPENDED;
   1333  1.29.6.3      yamt 			mutex_exit(&p->p_smutex);
   1334  1.29.6.4      yamt 			mi_switch(l);
   1335  1.29.6.3      yamt 		}
   1336  1.29.6.3      yamt 
   1337  1.29.6.3      yamt 		/* Process is exiting. */
   1338  1.29.6.3      yamt 		if ((l->l_flag & LW_WEXIT) != 0) {
   1339  1.29.6.3      yamt 			lwp_exit(l);
   1340  1.29.6.3      yamt 			KASSERT(0);
   1341  1.29.6.3      yamt 			/* NOTREACHED */
   1342  1.29.6.3      yamt 		}
   1343  1.29.6.3      yamt 
   1344  1.29.6.3      yamt 		/* Call userret hook; used by Linux emulation. */
   1345  1.29.6.3      yamt 		if ((l->l_flag & LW_WUSERRET) != 0) {
   1346  1.29.6.3      yamt 			lwp_lock(l);
   1347  1.29.6.3      yamt 			l->l_flag &= ~LW_WUSERRET;
   1348  1.29.6.3      yamt 			lwp_unlock(l);
   1349  1.29.6.3      yamt 			hook = p->p_userret;
   1350  1.29.6.3      yamt 			p->p_userret = NULL;
   1351  1.29.6.3      yamt 			(*hook)();
   1352  1.29.6.3      yamt 		}
   1353  1.29.6.3      yamt 	}
   1354  1.29.6.3      yamt }
   1355  1.29.6.3      yamt 
   1356  1.29.6.3      yamt /*
   1357  1.29.6.3      yamt  * Force an LWP to enter the kernel, to take a trip through lwp_userret().
   1358  1.29.6.3      yamt  */
   1359  1.29.6.3      yamt void
   1360  1.29.6.3      yamt lwp_need_userret(struct lwp *l)
   1361  1.29.6.3      yamt {
   1362  1.29.6.4      yamt 	KASSERT(lwp_locked(l, NULL));
   1363  1.29.6.3      yamt 
   1364  1.29.6.3      yamt 	/*
   1365  1.29.6.3      yamt 	 * Since the tests in lwp_userret() are done unlocked, make sure
   1366  1.29.6.3      yamt 	 * that the condition will be seen before forcing the LWP to enter
   1367  1.29.6.3      yamt 	 * kernel mode.
   1368  1.29.6.3      yamt 	 */
   1369  1.29.6.7      yamt 	membar_producer();
   1370  1.29.6.3      yamt 	cpu_signotify(l);
   1371  1.29.6.3      yamt }
   1372  1.29.6.3      yamt 
   1373  1.29.6.3      yamt /*
   1374  1.29.6.3      yamt  * Add one reference to an LWP.  This will prevent the LWP from
   1375  1.29.6.3      yamt  * exiting, thus keep the lwp structure and PCB around to inspect.
   1376  1.29.6.3      yamt  */
   1377  1.29.6.3      yamt void
   1378  1.29.6.3      yamt lwp_addref(struct lwp *l)
   1379  1.29.6.3      yamt {
   1380  1.29.6.3      yamt 
   1381  1.29.6.4      yamt 	KASSERT(mutex_owned(&l->l_proc->p_smutex));
   1382  1.29.6.3      yamt 	KASSERT(l->l_stat != LSZOMB);
   1383  1.29.6.3      yamt 	KASSERT(l->l_refcnt != 0);
   1384  1.29.6.3      yamt 
   1385  1.29.6.3      yamt 	l->l_refcnt++;
   1386  1.29.6.3      yamt }
   1387  1.29.6.3      yamt 
   1388  1.29.6.3      yamt /*
   1389  1.29.6.3      yamt  * Remove one reference to an LWP.  If this is the last reference,
   1390  1.29.6.3      yamt  * then we must finalize the LWP's death.
   1391  1.29.6.3      yamt  */
   1392  1.29.6.3      yamt void
   1393  1.29.6.3      yamt lwp_delref(struct lwp *l)
   1394  1.29.6.3      yamt {
   1395  1.29.6.3      yamt 	struct proc *p = l->l_proc;
   1396  1.29.6.3      yamt 
   1397  1.29.6.3      yamt 	mutex_enter(&p->p_smutex);
   1398  1.29.6.5      yamt 	KASSERT(l->l_stat != LSZOMB);
   1399  1.29.6.5      yamt 	KASSERT(l->l_refcnt > 0);
   1400  1.29.6.3      yamt 	if (--l->l_refcnt == 0)
   1401  1.29.6.6      yamt 		cv_broadcast(&p->p_lwpcv);
   1402  1.29.6.3      yamt 	mutex_exit(&p->p_smutex);
   1403  1.29.6.3      yamt }
   1404  1.29.6.3      yamt 
   1405  1.29.6.3      yamt /*
   1406  1.29.6.3      yamt  * Drain all references to the current LWP.
   1407  1.29.6.3      yamt  */
   1408  1.29.6.3      yamt void
   1409  1.29.6.3      yamt lwp_drainrefs(struct lwp *l)
   1410  1.29.6.3      yamt {
   1411  1.29.6.3      yamt 	struct proc *p = l->l_proc;
   1412  1.29.6.3      yamt 
   1413  1.29.6.4      yamt 	KASSERT(mutex_owned(&p->p_smutex));
   1414  1.29.6.3      yamt 	KASSERT(l->l_refcnt != 0);
   1415  1.29.6.3      yamt 
   1416  1.29.6.3      yamt 	l->l_refcnt--;
   1417  1.29.6.3      yamt 	while (l->l_refcnt != 0)
   1418  1.29.6.6      yamt 		cv_wait(&p->p_lwpcv, &p->p_smutex);
   1419  1.29.6.2      yamt }
   1420  1.29.6.2      yamt 
   1421  1.29.6.2      yamt /*
   1422  1.29.6.2      yamt  * lwp_specific_key_create --
   1423  1.29.6.2      yamt  *	Create a key for subsystem lwp-specific data.
   1424  1.29.6.2      yamt  */
   1425  1.29.6.2      yamt int
   1426  1.29.6.2      yamt lwp_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor)
   1427  1.29.6.2      yamt {
   1428  1.29.6.2      yamt 
   1429  1.29.6.2      yamt 	return (specificdata_key_create(lwp_specificdata_domain, keyp, dtor));
   1430  1.29.6.2      yamt }
   1431  1.29.6.2      yamt 
   1432  1.29.6.2      yamt /*
   1433  1.29.6.2      yamt  * lwp_specific_key_delete --
   1434  1.29.6.2      yamt  *	Delete a key for subsystem lwp-specific data.
   1435  1.29.6.2      yamt  */
   1436  1.29.6.2      yamt void
   1437  1.29.6.2      yamt lwp_specific_key_delete(specificdata_key_t key)
   1438  1.29.6.2      yamt {
   1439  1.29.6.2      yamt 
   1440  1.29.6.2      yamt 	specificdata_key_delete(lwp_specificdata_domain, key);
   1441  1.29.6.2      yamt }
   1442  1.29.6.2      yamt 
   1443  1.29.6.2      yamt /*
   1444  1.29.6.2      yamt  * lwp_initspecific --
   1445  1.29.6.2      yamt  *	Initialize an LWP's specificdata container.
   1446  1.29.6.2      yamt  */
   1447  1.29.6.2      yamt void
   1448  1.29.6.2      yamt lwp_initspecific(struct lwp *l)
   1449  1.29.6.2      yamt {
   1450  1.29.6.2      yamt 	int error;
   1451  1.29.6.2      yamt 
   1452  1.29.6.2      yamt 	error = specificdata_init(lwp_specificdata_domain, &l->l_specdataref);
   1453  1.29.6.2      yamt 	KASSERT(error == 0);
   1454  1.29.6.2      yamt }
   1455  1.29.6.2      yamt 
   1456  1.29.6.2      yamt /*
   1457  1.29.6.2      yamt  * lwp_finispecific --
   1458  1.29.6.2      yamt  *	Finalize an LWP's specificdata container.
   1459  1.29.6.2      yamt  */
   1460  1.29.6.2      yamt void
   1461  1.29.6.2      yamt lwp_finispecific(struct lwp *l)
   1462  1.29.6.2      yamt {
   1463  1.29.6.2      yamt 
   1464  1.29.6.2      yamt 	specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
   1465  1.29.6.2      yamt }
   1466  1.29.6.2      yamt 
   1467  1.29.6.2      yamt /*
   1468  1.29.6.2      yamt  * lwp_getspecific --
   1469  1.29.6.2      yamt  *	Return lwp-specific data corresponding to the specified key.
   1470  1.29.6.2      yamt  *
   1471  1.29.6.2      yamt  *	Note: LWP specific data is NOT INTERLOCKED.  An LWP should access
   1472  1.29.6.2      yamt  *	only its OWN SPECIFIC DATA.  If it is necessary to access another
   1473  1.29.6.2      yamt  *	LWP's specifc data, care must be taken to ensure that doing so
   1474  1.29.6.2      yamt  *	would not cause internal data structure inconsistency (i.e. caller
   1475  1.29.6.2      yamt  *	can guarantee that the target LWP is not inside an lwp_getspecific()
   1476  1.29.6.2      yamt  *	or lwp_setspecific() call).
   1477  1.29.6.2      yamt  */
   1478  1.29.6.2      yamt void *
   1479  1.29.6.2      yamt lwp_getspecific(specificdata_key_t key)
   1480  1.29.6.2      yamt {
   1481  1.29.6.2      yamt 
   1482  1.29.6.2      yamt 	return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
   1483  1.29.6.2      yamt 						  &curlwp->l_specdataref, key));
   1484  1.29.6.2      yamt }
   1485  1.29.6.2      yamt 
   1486  1.29.6.2      yamt void *
   1487  1.29.6.2      yamt _lwp_getspecific_by_lwp(struct lwp *l, specificdata_key_t key)
   1488  1.29.6.2      yamt {
   1489  1.29.6.2      yamt 
   1490  1.29.6.2      yamt 	return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
   1491  1.29.6.2      yamt 						  &l->l_specdataref, key));
   1492  1.29.6.2      yamt }
   1493  1.29.6.2      yamt 
   1494  1.29.6.2      yamt /*
   1495  1.29.6.2      yamt  * lwp_setspecific --
   1496  1.29.6.2      yamt  *	Set lwp-specific data corresponding to the specified key.
   1497  1.29.6.2      yamt  */
   1498  1.29.6.2      yamt void
   1499  1.29.6.2      yamt lwp_setspecific(specificdata_key_t key, void *data)
   1500  1.29.6.2      yamt {
   1501  1.29.6.2      yamt 
   1502  1.29.6.2      yamt 	specificdata_setspecific(lwp_specificdata_domain,
   1503  1.29.6.2      yamt 				 &curlwp->l_specdataref, key, data);
   1504  1.29.6.2      yamt }
   1505  1.29.6.6      yamt 
   1506  1.29.6.6      yamt /*
   1507  1.29.6.6      yamt  * Allocate a new lwpctl structure for a user LWP.
   1508  1.29.6.6      yamt  */
   1509  1.29.6.6      yamt int
   1510  1.29.6.6      yamt lwp_ctl_alloc(vaddr_t *uaddr)
   1511  1.29.6.6      yamt {
   1512  1.29.6.6      yamt 	lcproc_t *lp;
   1513  1.29.6.6      yamt 	u_int bit, i, offset;
   1514  1.29.6.6      yamt 	struct uvm_object *uao;
   1515  1.29.6.6      yamt 	int error;
   1516  1.29.6.6      yamt 	lcpage_t *lcp;
   1517  1.29.6.6      yamt 	proc_t *p;
   1518  1.29.6.6      yamt 	lwp_t *l;
   1519  1.29.6.6      yamt 
   1520  1.29.6.6      yamt 	l = curlwp;
   1521  1.29.6.6      yamt 	p = l->l_proc;
   1522  1.29.6.6      yamt 
   1523  1.29.6.7      yamt 	if (l->l_lcpage != NULL) {
   1524  1.29.6.7      yamt 		lcp = l->l_lcpage;
   1525  1.29.6.7      yamt 		*uaddr = lcp->lcp_uaddr + (vaddr_t)l->l_lwpctl - lcp->lcp_kaddr;
   1526  1.29.6.6      yamt 		return (EINVAL);
   1527  1.29.6.7      yamt 	}
   1528  1.29.6.6      yamt 
   1529  1.29.6.6      yamt 	/* First time around, allocate header structure for the process. */
   1530  1.29.6.6      yamt 	if ((lp = p->p_lwpctl) == NULL) {
   1531  1.29.6.6      yamt 		lp = kmem_alloc(sizeof(*lp), KM_SLEEP);
   1532  1.29.6.6      yamt 		mutex_init(&lp->lp_lock, MUTEX_DEFAULT, IPL_NONE);
   1533  1.29.6.6      yamt 		lp->lp_uao = NULL;
   1534  1.29.6.6      yamt 		TAILQ_INIT(&lp->lp_pages);
   1535  1.29.6.6      yamt 		mutex_enter(&p->p_mutex);
   1536  1.29.6.6      yamt 		if (p->p_lwpctl == NULL) {
   1537  1.29.6.6      yamt 			p->p_lwpctl = lp;
   1538  1.29.6.6      yamt 			mutex_exit(&p->p_mutex);
   1539  1.29.6.6      yamt 		} else {
   1540  1.29.6.6      yamt 			mutex_exit(&p->p_mutex);
   1541  1.29.6.6      yamt 			mutex_destroy(&lp->lp_lock);
   1542  1.29.6.6      yamt 			kmem_free(lp, sizeof(*lp));
   1543  1.29.6.6      yamt 			lp = p->p_lwpctl;
   1544  1.29.6.6      yamt 		}
   1545  1.29.6.6      yamt 	}
   1546  1.29.6.6      yamt 
   1547  1.29.6.6      yamt  	/*
   1548  1.29.6.6      yamt  	 * Set up an anonymous memory region to hold the shared pages.
   1549  1.29.6.6      yamt  	 * Map them into the process' address space.  The user vmspace
   1550  1.29.6.6      yamt  	 * gets the first reference on the UAO.
   1551  1.29.6.6      yamt  	 */
   1552  1.29.6.6      yamt 	mutex_enter(&lp->lp_lock);
   1553  1.29.6.6      yamt 	if (lp->lp_uao == NULL) {
   1554  1.29.6.6      yamt 		lp->lp_uao = uao_create(LWPCTL_UAREA_SZ, 0);
   1555  1.29.6.6      yamt 		lp->lp_cur = 0;
   1556  1.29.6.6      yamt 		lp->lp_max = LWPCTL_UAREA_SZ;
   1557  1.29.6.6      yamt 		lp->lp_uva = p->p_emul->e_vm_default_addr(p,
   1558  1.29.6.6      yamt 		     (vaddr_t)p->p_vmspace->vm_daddr, LWPCTL_UAREA_SZ);
   1559  1.29.6.6      yamt 		error = uvm_map(&p->p_vmspace->vm_map, &lp->lp_uva,
   1560  1.29.6.6      yamt 		    LWPCTL_UAREA_SZ, lp->lp_uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW,
   1561  1.29.6.6      yamt 		    UVM_PROT_RW, UVM_INH_NONE, UVM_ADV_NORMAL, 0));
   1562  1.29.6.6      yamt 		if (error != 0) {
   1563  1.29.6.6      yamt 			uao_detach(lp->lp_uao);
   1564  1.29.6.6      yamt 			lp->lp_uao = NULL;
   1565  1.29.6.6      yamt 			mutex_exit(&lp->lp_lock);
   1566  1.29.6.6      yamt 			return error;
   1567  1.29.6.6      yamt 		}
   1568  1.29.6.6      yamt 	}
   1569  1.29.6.6      yamt 
   1570  1.29.6.6      yamt 	/* Get a free block and allocate for this LWP. */
   1571  1.29.6.6      yamt 	TAILQ_FOREACH(lcp, &lp->lp_pages, lcp_chain) {
   1572  1.29.6.6      yamt 		if (lcp->lcp_nfree != 0)
   1573  1.29.6.6      yamt 			break;
   1574  1.29.6.6      yamt 	}
   1575  1.29.6.6      yamt 	if (lcp == NULL) {
   1576  1.29.6.6      yamt 		/* Nothing available - try to set up a free page. */
   1577  1.29.6.6      yamt 		if (lp->lp_cur == lp->lp_max) {
   1578  1.29.6.6      yamt 			mutex_exit(&lp->lp_lock);
   1579  1.29.6.6      yamt 			return ENOMEM;
   1580  1.29.6.6      yamt 		}
   1581  1.29.6.6      yamt 		lcp = kmem_alloc(LWPCTL_LCPAGE_SZ, KM_SLEEP);
   1582  1.29.6.6      yamt 		if (lcp == NULL) {
   1583  1.29.6.6      yamt 			mutex_exit(&lp->lp_lock);
   1584  1.29.6.6      yamt 			return ENOMEM;
   1585  1.29.6.6      yamt 		}
   1586  1.29.6.6      yamt 		/*
   1587  1.29.6.6      yamt 		 * Wire the next page down in kernel space.  Since this
   1588  1.29.6.6      yamt 		 * is a new mapping, we must add a reference.
   1589  1.29.6.6      yamt 		 */
   1590  1.29.6.6      yamt 		uao = lp->lp_uao;
   1591  1.29.6.6      yamt 		(*uao->pgops->pgo_reference)(uao);
   1592  1.29.6.6      yamt 		error = uvm_map(kernel_map, &lcp->lcp_kaddr, PAGE_SIZE,
   1593  1.29.6.6      yamt 		    uao, lp->lp_cur, PAGE_SIZE,
   1594  1.29.6.6      yamt 		    UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
   1595  1.29.6.6      yamt 		    UVM_INH_NONE, UVM_ADV_RANDOM, 0));
   1596  1.29.6.6      yamt 		if (error != 0) {
   1597  1.29.6.6      yamt 			mutex_exit(&lp->lp_lock);
   1598  1.29.6.6      yamt 			kmem_free(lcp, LWPCTL_LCPAGE_SZ);
   1599  1.29.6.6      yamt 			(*uao->pgops->pgo_detach)(uao);
   1600  1.29.6.6      yamt 			return error;
   1601  1.29.6.6      yamt 		}
   1602  1.29.6.8      yamt 		error = uvm_map_pageable(kernel_map, lcp->lcp_kaddr,
   1603  1.29.6.8      yamt 		    lcp->lcp_kaddr + PAGE_SIZE, FALSE, 0);
   1604  1.29.6.8      yamt 		if (error != 0) {
   1605  1.29.6.8      yamt 			mutex_exit(&lp->lp_lock);
   1606  1.29.6.8      yamt 			uvm_unmap(kernel_map, lcp->lcp_kaddr,
   1607  1.29.6.8      yamt 			    lcp->lcp_kaddr + PAGE_SIZE);
   1608  1.29.6.8      yamt 			kmem_free(lcp, LWPCTL_LCPAGE_SZ);
   1609  1.29.6.8      yamt 			return error;
   1610  1.29.6.8      yamt 		}
   1611  1.29.6.6      yamt 		/* Prepare the page descriptor and link into the list. */
   1612  1.29.6.6      yamt 		lcp->lcp_uaddr = lp->lp_uva + lp->lp_cur;
   1613  1.29.6.6      yamt 		lp->lp_cur += PAGE_SIZE;
   1614  1.29.6.6      yamt 		lcp->lcp_nfree = LWPCTL_PER_PAGE;
   1615  1.29.6.6      yamt 		lcp->lcp_rotor = 0;
   1616  1.29.6.6      yamt 		memset(lcp->lcp_bitmap, 0xff, LWPCTL_BITMAP_SZ);
   1617  1.29.6.6      yamt 		TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
   1618  1.29.6.6      yamt 	}
   1619  1.29.6.6      yamt 	for (i = lcp->lcp_rotor; lcp->lcp_bitmap[i] == 0;) {
   1620  1.29.6.6      yamt 		if (++i >= LWPCTL_BITMAP_ENTRIES)
   1621  1.29.6.6      yamt 			i = 0;
   1622  1.29.6.6      yamt 	}
   1623  1.29.6.6      yamt 	bit = ffs(lcp->lcp_bitmap[i]) - 1;
   1624  1.29.6.6      yamt 	lcp->lcp_bitmap[i] ^= (1 << bit);
   1625  1.29.6.6      yamt 	lcp->lcp_rotor = i;
   1626  1.29.6.6      yamt 	lcp->lcp_nfree--;
   1627  1.29.6.6      yamt 	l->l_lcpage = lcp;
   1628  1.29.6.6      yamt 	offset = (i << 5) + bit;
   1629  1.29.6.6      yamt 	l->l_lwpctl = (lwpctl_t *)lcp->lcp_kaddr + offset;
   1630  1.29.6.6      yamt 	*uaddr = lcp->lcp_uaddr + offset * sizeof(lwpctl_t);
   1631  1.29.6.6      yamt 	mutex_exit(&lp->lp_lock);
   1632  1.29.6.6      yamt 
   1633  1.29.6.6      yamt 	l->l_lwpctl->lc_curcpu = (short)curcpu()->ci_data.cpu_index;
   1634  1.29.6.6      yamt 
   1635  1.29.6.6      yamt 	return 0;
   1636  1.29.6.6      yamt }
   1637  1.29.6.6      yamt 
   1638  1.29.6.6      yamt /*
   1639  1.29.6.6      yamt  * Free an lwpctl structure back to the per-process list.
   1640  1.29.6.6      yamt  */
   1641  1.29.6.6      yamt void
   1642  1.29.6.6      yamt lwp_ctl_free(lwp_t *l)
   1643  1.29.6.6      yamt {
   1644  1.29.6.6      yamt 	lcproc_t *lp;
   1645  1.29.6.6      yamt 	lcpage_t *lcp;
   1646  1.29.6.6      yamt 	u_int map, offset;
   1647  1.29.6.6      yamt 
   1648  1.29.6.6      yamt 	lp = l->l_proc->p_lwpctl;
   1649  1.29.6.6      yamt 	KASSERT(lp != NULL);
   1650  1.29.6.6      yamt 
   1651  1.29.6.6      yamt 	lcp = l->l_lcpage;
   1652  1.29.6.6      yamt 	offset = (u_int)((lwpctl_t *)l->l_lwpctl - (lwpctl_t *)lcp->lcp_kaddr);
   1653  1.29.6.6      yamt 	KASSERT(offset < LWPCTL_PER_PAGE);
   1654  1.29.6.6      yamt 
   1655  1.29.6.6      yamt 	mutex_enter(&lp->lp_lock);
   1656  1.29.6.6      yamt 	lcp->lcp_nfree++;
   1657  1.29.6.6      yamt 	map = offset >> 5;
   1658  1.29.6.6      yamt 	lcp->lcp_bitmap[map] |= (1 << (offset & 31));
   1659  1.29.6.6      yamt 	if (lcp->lcp_bitmap[lcp->lcp_rotor] == 0)
   1660  1.29.6.6      yamt 		lcp->lcp_rotor = map;
   1661  1.29.6.6      yamt 	if (TAILQ_FIRST(&lp->lp_pages)->lcp_nfree == 0) {
   1662  1.29.6.6      yamt 		TAILQ_REMOVE(&lp->lp_pages, lcp, lcp_chain);
   1663  1.29.6.6      yamt 		TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
   1664  1.29.6.6      yamt 	}
   1665  1.29.6.6      yamt 	mutex_exit(&lp->lp_lock);
   1666  1.29.6.6      yamt }
   1667  1.29.6.6      yamt 
   1668  1.29.6.6      yamt /*
   1669  1.29.6.6      yamt  * Process is exiting; tear down lwpctl state.  This can only be safely
   1670  1.29.6.6      yamt  * called by the last LWP in the process.
   1671  1.29.6.6      yamt  */
   1672  1.29.6.6      yamt void
   1673  1.29.6.6      yamt lwp_ctl_exit(void)
   1674  1.29.6.6      yamt {
   1675  1.29.6.6      yamt 	lcpage_t *lcp, *next;
   1676  1.29.6.6      yamt 	lcproc_t *lp;
   1677  1.29.6.6      yamt 	proc_t *p;
   1678  1.29.6.6      yamt 	lwp_t *l;
   1679  1.29.6.6      yamt 
   1680  1.29.6.6      yamt 	l = curlwp;
   1681  1.29.6.6      yamt 	l->l_lwpctl = NULL;
   1682  1.29.6.6      yamt 	p = l->l_proc;
   1683  1.29.6.6      yamt 	lp = p->p_lwpctl;
   1684  1.29.6.6      yamt 
   1685  1.29.6.6      yamt 	KASSERT(lp != NULL);
   1686  1.29.6.6      yamt 	KASSERT(p->p_nlwps == 1);
   1687  1.29.6.6      yamt 
   1688  1.29.6.6      yamt 	for (lcp = TAILQ_FIRST(&lp->lp_pages); lcp != NULL; lcp = next) {
   1689  1.29.6.6      yamt 		next = TAILQ_NEXT(lcp, lcp_chain);
   1690  1.29.6.6      yamt 		uvm_unmap(kernel_map, lcp->lcp_kaddr,
   1691  1.29.6.6      yamt 		    lcp->lcp_kaddr + PAGE_SIZE);
   1692  1.29.6.6      yamt 		kmem_free(lcp, LWPCTL_LCPAGE_SZ);
   1693  1.29.6.6      yamt 	}
   1694  1.29.6.6      yamt 
   1695  1.29.6.6      yamt 	if (lp->lp_uao != NULL) {
   1696  1.29.6.6      yamt 		uvm_unmap(&p->p_vmspace->vm_map, lp->lp_uva,
   1697  1.29.6.6      yamt 		    lp->lp_uva + LWPCTL_UAREA_SZ);
   1698  1.29.6.6      yamt 	}
   1699  1.29.6.6      yamt 
   1700  1.29.6.6      yamt 	mutex_destroy(&lp->lp_lock);
   1701  1.29.6.6      yamt 	kmem_free(lp, sizeof(*lp));
   1702  1.29.6.6      yamt 	p->p_lwpctl = NULL;
   1703  1.29.6.6      yamt }
   1704  1.29.6.8      yamt 
   1705  1.29.6.8      yamt #if defined(DDB)
   1706  1.29.6.8      yamt void
   1707  1.29.6.8      yamt lwp_whatis(uintptr_t addr, void (*pr)(const char *, ...))
   1708  1.29.6.8      yamt {
   1709  1.29.6.8      yamt 	lwp_t *l;
   1710  1.29.6.8      yamt 
   1711  1.29.6.8      yamt 	LIST_FOREACH(l, &alllwp, l_list) {
   1712  1.29.6.8      yamt 		uintptr_t stack = (uintptr_t)KSTACK_LOWEST_ADDR(l);
   1713  1.29.6.8      yamt 
   1714  1.29.6.8      yamt 		if (addr < stack || stack + KSTACK_SIZE <= addr) {
   1715  1.29.6.8      yamt 			continue;
   1716  1.29.6.8      yamt 		}
   1717  1.29.6.8      yamt 		(*pr)("%p is %p+%zu, LWP %p's stack\n",
   1718  1.29.6.8      yamt 		    (void *)addr, (void *)stack,
   1719  1.29.6.8      yamt 		    (size_t)(addr - stack), l);
   1720  1.29.6.8      yamt 	}
   1721  1.29.6.8      yamt }
   1722  1.29.6.8      yamt #endif /* defined(DDB) */
   1723