Home | History | Annotate | Line # | Download | only in kern
kern_lwp.c revision 1.40.2.2
      1 /*	$NetBSD: kern_lwp.c,v 1.40.2.2 2006/10/21 15:20:46 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001, 2006 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nathan J. Williams, and Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 #include <sys/cdefs.h>
     40 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.40.2.2 2006/10/21 15:20:46 ad Exp $");
     41 
     42 #include "opt_multiprocessor.h"
     43 
     44 #include <sys/param.h>
     45 #include <sys/systm.h>
     46 #include <sys/pool.h>
     47 #include <sys/proc.h>
     48 #include <sys/syscallargs.h>
     49 #include <sys/kauth.h>
     50 #include <sys/sleepq.h>
     51 #include <sys/lockdebug.h>
     52 
     53 #include <uvm/uvm_extern.h>
     54 
     55 struct lwplist	alllwp;
     56 kmutex_t	alllwp_mutex;
     57 kmutex_t	lwp_mutex;
     58 
     59 #define LWP_DEBUG
     60 
     61 #ifdef LWP_DEBUG
     62 int lwp_debug = 0;
     63 #define DPRINTF(x) if (lwp_debug) printf x
     64 #else
     65 #define DPRINTF(x)
     66 #endif
     67 
     68 /*
     69  * Set an LWP halted or suspended.
     70  *
     71  * Must be called with p_smutex held, and the LWP locked.  Will unlock the
     72  * LWP before return.
     73  */
     74 int
     75 lwp_halt(struct lwp *curl, struct lwp *t, int state)
     76 {
     77 	struct proc *p = t->l_proc;
     78 	int error;
     79 
     80 	LOCK_ASSERT(mutex_owned(&p->p_smutex));
     81 	LOCK_ASSERT(lwp_locked(t, NULL));
     82 
     83 	KASSERT(curl != t || curl->l_stat == LSONPROC);
     84 
     85 	/*
     86 	 * If the current LWP has been told to exit, we must not suspend anyone
     87 	 * else or deadlock could occur.  We won't return to userspace.
     88 	 */
     89 	if ((curl->l_stat & (L_WEXIT | L_WCORE)) != 0)
     90 		return (EDEADLK);
     91 
     92 	error = 0;
     93 
     94 	switch (t->l_stat) {
     95 	case LSRUN:
     96 		p->p_nrlwps--;
     97 		t->l_stat = state;
     98 		remrunqueue(t);
     99 		break;
    100 	case LSONPROC:
    101 		p->p_nrlwps--;
    102 		t->l_stat = state;
    103 		if (t != curl) {
    104 #ifdef MULTIPROCESSOR
    105 			cpu_need_resched(t->l_cpu);
    106 #elif defined(DIAGNOSTIC)
    107 			panic("lwp_halt: onproc but not self");
    108 #endif
    109 		}
    110 		break;
    111 	case LSSLEEP:
    112 		p->p_nrlwps--;
    113 		/* FALLTHROUGH */
    114 	case LSSUSPENDED:
    115 	case LSSTOP:
    116 		/* XXXAD What about restarting stopped -> suspended?? */
    117 		t->l_stat = state;
    118 		break;
    119 	case LSIDL:
    120 	case LSZOMB:
    121 		error = EINTR; /* It's what Solaris does..... */
    122 		break;
    123 	}
    124 
    125 	lwp_swaplock(t, &lwp_mutex);
    126 
    127 	return (error);
    128 }
    129 
    130 /*
    131  * Restart a suspended LWP.
    132  *
    133  * Must be called with p_smutex held, and the LWP locked.  Will unlock the
    134  * LWP before return.
    135  */
    136 void
    137 lwp_continue(struct lwp *l)
    138 {
    139 
    140 	LOCK_ASSERT(mutex_owned(&l->l_proc->p_smutex));
    141 	LOCK_ASSERT(mutex_owned(l->l_mutex));
    142 
    143 	DPRINTF(("lwp_continue of %d.%d (%s), state %d, wchan %p\n",
    144 	    l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm, l->l_stat,
    145 	    l->l_wchan));
    146 
    147 	if (l->l_stat != LSSUSPENDED) {
    148 		lwp_unlock(l);
    149 		return;
    150 	}
    151 
    152 	if (l->l_wchan == NULL) {
    153 		/*
    154 		 * LWP was runnable before being suspended.  setrunnable()
    155 		 * will release the lock.
    156 		 */
    157 		setrunnable(l);
    158 	} else {
    159 		/* LWP was sleeping before being suspended. */
    160 		l->l_proc->p_nrlwps++;
    161 		l->l_stat = LSSLEEP;
    162 		lwp_unlock(l);
    163 	}
    164 }
    165 
    166 /*
    167  * Wait for an LWP within the current process to exit.  If 'lid' is
    168  * non-zero, we are waiting for a specific LWP.
    169  *
    170  * Must be called with p->p_smutex held.
    171  */
    172 int
    173 lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
    174 {
    175 	struct proc *p = l->l_proc;
    176 	struct lwp *l2;
    177 	int nfound, error, wpri;
    178 	static const char waitstr1[] = "lwpwait";
    179 	static const char waitstr2[] = "lwpwait2";
    180 
    181 	DPRINTF(("lwp_wait1: %d.%d waiting for %d.\n",
    182 	    p->p_pid, l->l_lid, lid));
    183 
    184 	LOCK_ASSERT(mutex_owned(&p->p_smutex));
    185 
    186 	/*
    187 	 * Check for deadlock:
    188 	 *
    189 	 * 1) If all other LWPs are waiting for exits or suspended.
    190 	 * 2) If we are trying to wait on ourself.
    191 	 *
    192 	 * XXX we'd like to check for a cycle of waiting LWPs (specific LID
    193 	 * waits, not any-LWP waits) and detect that sort of deadlock, but
    194 	 * we don't have a good place to store the lwp that is being waited
    195 	 * for. wchan is already filled with &p->p_nlwps, and putting the
    196 	 * lwp address in there for deadlock tracing would require exiting
    197 	 * LWPs to call wakeup on both their own address and &p->p_nlwps, to
    198 	 * get threads sleeping on any LWP exiting.
    199 	 */
    200 	if (lwp_lastlive(p->p_nlwpwait) || lid == l->l_lid)
    201 		return (EDEADLK);
    202 
    203 	p->p_nlwpwait++;
    204 	wpri = PWAIT;
    205 	if ((flags & LWPWAIT_EXITCONTROL) == 0)
    206 		wpri |= PCATCH;
    207  loop:
    208 	nfound = 0;
    209 	LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
    210 		if ((l2 == l) || (l2->l_flag & L_DETACHED) ||
    211 		    ((lid != 0) && (lid != l2->l_lid)))
    212 			continue;
    213 		nfound++;
    214 		if (l2->l_stat != LSZOMB)
    215 			continue;
    216 
    217 		if (departed)
    218 			*departed = l2->l_lid;
    219 
    220 		LIST_REMOVE(l2, l_sibling);
    221 		p->p_nlwps--;
    222 		p->p_nzlwps--;
    223 		p->p_nlwpwait--;
    224 		/* XXX decrement limits */
    225 		pool_put(&lwp_pool, l2);
    226 		return (0);
    227 	}
    228 
    229 	if (nfound == 0) {
    230 		p->p_nlwpwait--;
    231 		return (ESRCH);
    232 	}
    233 
    234 	if ((error = mtsleep(&p->p_nlwps, wpri,
    235 	    (lid != 0) ? waitstr1 : waitstr2, 0, &p->p_smutex)) != 0)
    236 		return (error);
    237 
    238 	goto loop;
    239 }
    240 
    241 /*
    242  * Create a new LWP within process 'p2', using LWP 'l1' as a template.
    243  * The new LWP is created in state LSIDL and must be set running,
    244  * suspended, or stopped by the caller.
    245  */
    246 int
    247 newlwp(struct lwp *l1, struct proc *p2, vaddr_t uaddr, boolean_t inmem,
    248     int flags, void *stack, size_t stacksize,
    249     void (*func)(void *), void *arg, struct lwp **rnewlwpp)
    250 {
    251 	struct lwp *l2;
    252 
    253 	l2 = pool_get(&lwp_pool, PR_WAITOK);
    254 
    255 	l2->l_stat = LSIDL;
    256 	l2->l_forw = l2->l_back = NULL;
    257 	l2->l_proc = p2;
    258 
    259 	memset(&l2->l_startzero, 0,
    260 	       (unsigned) ((caddr_t)&l2->l_endzero -
    261 			   (caddr_t)&l2->l_startzero));
    262 	memcpy(&l2->l_startcopy, &l1->l_startcopy,
    263 	       (unsigned) ((caddr_t)&l2->l_endcopy -
    264 			   (caddr_t)&l2->l_startcopy));
    265 
    266 #if !defined(MULTIPROCESSOR)
    267 	/*
    268 	 * In the single-processor case, all processes will always run
    269 	 * on the same CPU.  So, initialize the child's CPU to the parent's
    270 	 * now.  In the multiprocessor case, the child's CPU will be
    271 	 * initialized in the low-level context switch code when the
    272 	 * process runs.
    273 	 */
    274 	KASSERT(l1->l_cpu != NULL);
    275 	l2->l_cpu = l1->l_cpu;
    276 #else
    277 	/*
    278 	 * zero child's CPU pointer so we don't get trash.
    279 	 */
    280 	l2->l_cpu = NULL;
    281 #endif /* ! MULTIPROCESSOR */
    282 
    283 	l2->l_flag = inmem ? L_INMEM : 0;
    284 	l2->l_flag |= (flags & LWP_DETACHED) ? L_DETACHED : 0;
    285 
    286 	if (p2->p_flag & P_SYSTEM) {
    287 		/*
    288 		 * Mark it as a system process and not a candidate for
    289 		 * swapping.
    290 		 */
    291 		l2->l_flag |= L_SYSTEM | L_INMEM;
    292 	}
    293 
    294 	lwp_update_creds(l2);
    295 	callout_init(&l2->l_tsleep_ch);
    296 	l2->l_ts = pool_cache_get(&turnstile_cache, PR_WAITOK);
    297 	l2->l_mutex = &lwp_mutex;
    298 
    299 	if (rnewlwpp != NULL)
    300 		*rnewlwpp = l2;
    301 
    302 	l2->l_addr = UAREA_TO_USER(uaddr);
    303 	uvm_lwp_fork(l1, l2, stack, stacksize, func,
    304 	    (arg != NULL) ? arg : l2);
    305 
    306 	CIRCLEQ_INIT(&l2->l_sigpend.sp_info);
    307 	sigemptyset(&l2->l_sigpend.sp_set);
    308 
    309 	mutex_enter(&p2->p_smutex);
    310 	l2->l_lid = ++p2->p_nlwpid;
    311 	LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
    312 	p2->p_nlwps++;
    313 	mutex_exit(&p2->p_smutex);
    314 
    315 	mutex_enter(&alllwp_mutex);
    316 	LIST_INSERT_HEAD(&alllwp, l2, l_list);
    317 	mutex_exit(&alllwp_mutex);
    318 
    319 	if (p2->p_emul->e_lwp_fork)
    320 		(*p2->p_emul->e_lwp_fork)(l1, l2);
    321 
    322 	return (0);
    323 }
    324 
    325 /*
    326  * Quit the process.  This will call cpu_exit, which will call cpu_switch,
    327  * so this can only be used meaningfully if you're willing to switch away.
    328  * Calling with l!=curlwp would be weird.
    329  */
    330 void
    331 lwp_exit(struct lwp *l)
    332 {
    333 	struct proc *p = l->l_proc;
    334 
    335 	DPRINTF(("lwp_exit: %d.%d exiting.\n", p->p_pid, l->l_lid));
    336 	DPRINTF((" nlwps: %d nzlwps: %d\n", p->p_nlwps, p->p_nzlwps));
    337 
    338 	if (p->p_emul->e_lwp_exit)
    339 		(*p->p_emul->e_lwp_exit)(l);
    340 
    341 	/*
    342 	 * If we are the last live LWP in a process, we need to exit the
    343 	 * entire process.  We do so with an exit status of zero, because
    344 	 * it's a "controlled" exit, and because that's what Solaris does.
    345 	 *
    346 	 * We are not quite a zombie yet, but for accounting purposes we
    347 	 * must increment the count of zombies here.
    348 	 */
    349 	mutex_enter(&p->p_smutex);
    350 	p->p_nzlwps++;
    351 	if ((p->p_nlwps - p->p_nzlwps) == (p->p_stat == LSONPROC)) {
    352 		DPRINTF(("lwp_exit: %d.%d calling exit1()\n",
    353 		    p->p_pid, l->l_lid));
    354 		exit1(l, 0);
    355 		/* NOTREACHED */
    356 	}
    357 	mutex_exit(&p->p_smutex);
    358 
    359 	/*
    360 	 * Remove the LWP from the global list, from the parent process and
    361 	 * then mark it as dead.  Nothing should be able to find or update
    362 	 * it past this point.
    363 	 */
    364 	mutex_enter(&alllwp_mutex);
    365 	LIST_REMOVE(l, l_list);
    366 	mutex_exit(&alllwp_mutex);
    367 
    368 	/*
    369 	 * Mark us as dead (almost a zombie) and bin any pending signals
    370 	 * that remain undelivered.
    371 	 *
    372 	 * XXX We should put whole-process signals back onto the process's
    373 	 * pending set and find someone else to deliver them.
    374 	 */
    375 	mutex_enter(&p->p_smutex);
    376 	lwp_lock(l);
    377 	if ((l->l_flag & L_DETACHED) != 0) {
    378 		LIST_REMOVE(l, l_sibling);
    379 		p->p_nlwps--;
    380 		curlwp = NULL;
    381 		l->l_proc = NULL;
    382 	}
    383 	l->l_stat = LSDEAD;
    384 	lwp_swaplock(l, &lwp_mutex);
    385 	sigclear(&l->l_sigpend, NULL);
    386 	mutex_exit(&p->p_smutex);
    387 
    388 	/*
    389 	 * Release our cached credentials and collate accounting flags.
    390 	 */
    391 	kauth_cred_free(l->l_cred);
    392 	mutex_enter(&p->p_crmutex);
    393 	p->p_acflag |= l->l_acflag;
    394 	mutex_exit(&p->p_crmutex);
    395 
    396 	/*
    397 	 * Verify that we hold no locks other than the kernel mutex, and
    398 	 * release our turnstile.  We can no longer acquire sleep locks
    399 	 * past this point.
    400 	 */
    401 	LOCKDEBUG_BARRIER(&kernel_mutex, 0);
    402 	pool_cache_put(&turnstile_cache, l->l_ts);
    403 
    404 	/*
    405 	 * Free MD LWP resources
    406 	 */
    407 #ifndef __NO_CPU_LWP_FREE
    408 	cpu_lwp_free(l, 0);
    409 #endif
    410 	pmap_deactivate(l);
    411 
    412 	/*
    413 	 * Release the kernel lock, and switch away into oblivion.
    414 	 */
    415 	KERNEL_PROC_UNLOCK(l);
    416 	cpu_exit(l);
    417 }
    418 
    419 /*
    420  * We are called from cpu_exit() once it is safe to schedule the
    421  * dead process's resources to be freed (i.e., once we've switched to
    422  * the idle PCB for the current CPU).
    423  *
    424  * NOTE: One must be careful with locking in this routine.  It's
    425  * called from a critical section in machine-dependent code, so
    426  * we should refrain from changing any interrupt state.
    427  */
    428 void
    429 lwp_exit2(struct lwp *l)
    430 {
    431 
    432 	KERNEL_LOCK(LK_EXCLUSIVE);
    433 
    434 	/*
    435 	 * Free the VM resources we're still holding on to.
    436 	 */
    437 	uvm_lwp_exit(l);
    438 
    439 	if (l->l_flag & L_DETACHED) {
    440 		/* Nobody waits for detached LWPs. */
    441 		pool_put(&lwp_pool, l);
    442 		KERNEL_UNLOCK();
    443 	} else {
    444 		KERNEL_UNLOCK();
    445 		lwp_lock(l);
    446 		l->l_stat = LSZOMB;
    447 		lwp_unlock(l);
    448 		wakeup(&l->l_proc->p_nlwps);
    449 	}
    450 }
    451 
    452 /*
    453  * Pick a LWP to represent the process for those operations which
    454  * want information about a "process" that is actually associated
    455  * with a LWP.
    456  *
    457  * Must be called with p->p_smutex held, and will return the LWP locked.
    458  * If 'locking' is false, no locking or lock checks are performed.  This
    459  * is intended for use by DDB.
    460  */
    461 struct lwp *
    462 proc_representative_lwp(struct proc *p, int *nrlwps, int locking)
    463 {
    464 	struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
    465 	struct lwp *signalled;
    466 	int cnt;
    467 
    468 	if (locking)
    469 		LOCK_ASSERT(mutex_owned(&p->p_smutex));
    470 
    471 	/* Trivial case: only one LWP */
    472 	if (p->p_nlwps == 1) {
    473 		l = LIST_FIRST(&p->p_lwps);
    474 		if (nrlwps)
    475 			*nrlwps = (l->l_stat == LSONPROC || LSRUN);
    476 		lwp_lock(l);
    477 		return l;
    478 	}
    479 
    480 	cnt = 0;
    481 	switch (p->p_stat) {
    482 	case SSTOP:
    483 	case SACTIVE:
    484 		/* Pick the most live LWP */
    485 		onproc = running = sleeping = stopped = suspended = NULL;
    486 		signalled = NULL;
    487 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    488 			if (locking)
    489 				lwp_lock(l);
    490 			if (l->l_lid == p->p_sigctx.ps_lwp)
    491 				signalled = l;
    492 			switch (l->l_stat) {
    493 			case LSONPROC:
    494 				onproc = l;
    495 				cnt++;
    496 				break;
    497 			case LSRUN:
    498 				running = l;
    499 				cnt++;
    500 				break;
    501 			case LSSLEEP:
    502 				sleeping = l;
    503 				break;
    504 			case LSSTOP:
    505 				stopped = l;
    506 				break;
    507 			case LSSUSPENDED:
    508 				suspended = l;
    509 				break;
    510 			}
    511 			if (locking)
    512 				lwp_unlock(l);
    513 		}
    514 		if (nrlwps)
    515 			*nrlwps = cnt;
    516 		if (signalled)
    517 			l = signalled;
    518 		else if (onproc)
    519 			l = onproc;
    520 		else if (running)
    521 			l = running;
    522 		else if (sleeping)
    523 			l = sleeping;
    524 		else if (stopped)
    525 			l = stopped;
    526 		else if (suspended)
    527 			l = suspended;
    528 		else
    529 			break;
    530 		if (locking)
    531 			lwp_lock(l);
    532 		return l;
    533 	case SZOMB:
    534 		/* Doesn't really matter... */
    535 		if (nrlwps)
    536 			*nrlwps = 0;
    537 		l = LIST_FIRST(&p->p_lwps);
    538 		if (locking)
    539 			lwp_lock(l);
    540 		return l;
    541 #ifdef DIAGNOSTIC
    542 	case SIDL:
    543 		if (locking)
    544 			mutex_exit(&p->p_smutex);
    545 		/* We have more than one LWP and we're in SIDL?
    546 		 * How'd that happen?
    547 		 */
    548 		panic("Too many LWPs in SIDL process %d (%s)",
    549 		    p->p_pid, p->p_comm);
    550 	default:
    551 		if (locking)
    552 			mutex_exit(&p->p_smutex);
    553 		panic("Process %d (%s) in unknown state %d",
    554 		    p->p_pid, p->p_comm, p->p_stat);
    555 #endif
    556 	}
    557 
    558 	if (locking)
    559 		mutex_exit(&p->p_smutex);
    560 	panic("proc_representative_lwp: couldn't find a lwp for process"
    561 		" %d (%s)", p->p_pid, p->p_comm);
    562 	/* NOTREACHED */
    563 	return NULL;
    564 }
    565 
    566 /*
    567  * Look up a live LWP within the speicifed process, and return it locked.
    568  *
    569  * Must be called with p->p_smutex held.
    570  */
    571 struct lwp *
    572 lwp_byid(struct proc *p, int id)
    573 {
    574 	struct lwp *l;
    575 
    576 	LOCK_ASSERT(mutex_owned(&p->p_smutex));
    577 
    578 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    579 		if (l->l_lid == id)
    580 			break;
    581 	}
    582 
    583 	if (l != NULL) {
    584 		lwp_lock(l);
    585 		if (l->l_stat == LSIDL || l->l_stat == LSZOMB ||
    586 		    l->l_stat == LSDEAD) {
    587 			lwp_unlock(l);
    588 			l = NULL;
    589 		}
    590 	}
    591 
    592 	return l;
    593 }
    594 
    595 /*
    596  * Update an LWP's cached credentials to mirror the process' master copy.
    597  *
    598  * This happens early in the syscall path, on user trap, and on LWP
    599  * creation.  A long-running LWP can also voluntarily choose to update
    600  * it's credentials by calling this routine.  This may be called from
    601  * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
    602  */
    603 void
    604 lwp_update_creds(struct lwp *l)
    605 {
    606 	kauth_cred_t oc;
    607 	struct proc *p;
    608 
    609 	p = l->l_proc;
    610 	oc = l->l_cred;
    611 
    612 	mutex_enter(&p->p_crmutex);
    613 	kauth_cred_hold(p->p_cred);
    614 	l->l_cred = p->p_cred;
    615 	mutex_exit(&p->p_crmutex);
    616 	if (oc != NULL)
    617 		kauth_cred_free(oc);
    618 }
    619 
    620 /*
    621  * Verify that an LWP is locked, and optionally verify that the lock matches
    622  * one we specify.
    623  */
    624 int
    625 lwp_locked(struct lwp *l, kmutex_t *mtx)
    626 {
    627 	kmutex_t *omutex = l->l_mutex;
    628 
    629 	return mutex_owned(l->l_mutex) && (mtx == omutex || mtx == NULL);
    630 }
    631 
    632 /*
    633  * Retry acquiring an LWP's mutex after it has changed.
    634  */
    635 void
    636 lwp_lock_retry(struct lwp *l, kmutex_t *omutex)
    637 {
    638 
    639 	do {
    640 		mutex_exit(omutex);
    641 		mutex_enter(omutex = l->l_mutex);
    642 	} while (l->l_mutex != omutex);
    643 }
    644 
    645 /*
    646  * Lend a new mutex to an LWP, and release the old mutex.
    647  *
    648  * Must be called with the LWP locked.  The new mutex must be held, and must
    649  * have been acquired before the LWP was locked.
    650  */
    651 void
    652 lwp_swaplock_linked(struct lwp *l, kmutex_t *new)
    653 {
    654 	kmutex_t *omutex;
    655 
    656 	LOCK_ASSERT(mutex_owned(l->l_mutex));
    657 	LOCK_ASSERT(mutex_owned(new));
    658 
    659 	omutex = l->l_mutex;
    660 	lwp_setlock(l, new);
    661 	mutex_exit_linked(omutex, new);
    662 }
    663 
    664 /*
    665  * Lend a new mutex to an LWP, and release the old mutex.
    666  *
    667  * Must be called with the LWP locked.  The new mutex (if held) must have
    668  * been acuired after the LWP was locked.
    669  */
    670 void
    671 lwp_swaplock(struct lwp *l, kmutex_t *new)
    672 {
    673 	kmutex_t *omutex;
    674 
    675 	LOCK_ASSERT(mutex_owned(l->l_mutex));
    676 
    677 	omutex = l->l_mutex;
    678 	lwp_setlock(l, new);
    679 	mutex_exit(omutex);
    680 }
    681 
    682 /*
    683  * Handle exceptions for mi_userret().  Called if L_USERRET is set.
    684  *
    685  * Must be called with the LWP locked.
    686  */
    687 void
    688 lwp_userret(struct lwp *l)
    689 {
    690 	struct proc *p;
    691 	int sig, flag;
    692 
    693 	p = l->l_proc;
    694 	flag = l->l_flag;
    695 
    696 #ifdef MULTIPROCESSOR
    697 	LOCK_ASSERT(lwp_locked(l, NULL));
    698 	lwp_unlock(l);
    699 #endif
    700 
    701 	/* Signals must be processed first. */
    702 	if ((flag & L_PENDSIG) != 0) {
    703 		mutex_enter(&p->p_smutex);
    704 		while ((sig = issignal(l)) != 0)
    705 			postsig(sig);
    706 		mutex_exit(&p->p_smutex);
    707 	}
    708 
    709 	if ((flag & L_WCORE) != 0) {
    710 		/*
    711 		 * Suspend ourselves, so that the kernel stack and therefore
    712 		 * the userland registers saved in the trapframe are around
    713 		 * for coredump() to write them out.  We issue a wakeup() on
    714 		 * p->p_nrlwps so that sigexit() will write the core file out
    715 		 * once all other LWPs are suspended.
    716 		 */
    717 		KERNEL_PROC_LOCK(l);
    718 		mutex_enter(&p->p_smutex);
    719 		p->p_nrlwps--;
    720 		wakeup(&p->p_nrlwps);
    721 		lwp_lock(l);
    722 		l->l_flag &= ~L_DETACHED;
    723 		l->l_stat = LSSUSPENDED;
    724 		mutex_exit_linked(&p->p_smutex, l->l_mutex);
    725 		mi_switch(l, NULL);
    726 		lwp_exit(l);
    727 		/* NOTREACHED */
    728 	}
    729 
    730 	if ((flag & L_WEXIT) != 0) {
    731 		KERNEL_PROC_LOCK(l);
    732 		lwp_exit(l);
    733 		/* NOTREACHED */
    734 	}
    735 
    736 #ifdef MULTIPROCESSOR
    737 	lwp_lock(l);
    738 #endif
    739 }
    740 
    741 /*
    742  * Return non-zero if this the last live LWP in the process.  Called when
    743  * exiting, dumping core, waiting for other LWPs to exit, etc.  Accepts a
    744  * 'bias' value for deadlock detection.
    745  *
    746  * Must be called with p->p_smutex held.
    747  */
    748 int
    749 lwp_lastlive(int bias)
    750 {
    751 	struct lwp *l = curlwp;
    752 	struct proc *p = l->l_proc;
    753 
    754 	LOCK_ASSERT(mutex_owned(&p->p_smutex));
    755 	KASSERT(l->l_stat == LSONPROC || l->l_stat == LSSTOP);
    756 
    757 	return p->p_nrlwps - bias - (l->l_stat == LSONPROC) == 0;
    758 }
    759