kern_lwp.c revision 1.40.2.3 1 /* $NetBSD: kern_lwp.c,v 1.40.2.3 2006/10/24 21:10:21 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2006 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.40.2.3 2006/10/24 21:10:21 ad Exp $");
41
42 #include "opt_multiprocessor.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/pool.h>
47 #include <sys/proc.h>
48 #include <sys/syscallargs.h>
49 #include <sys/kauth.h>
50 #include <sys/sleepq.h>
51 #include <sys/lockdebug.h>
52
53 #include <uvm/uvm_extern.h>
54
55 struct lwplist alllwp;
56 kmutex_t alllwp_mutex;
57 kmutex_t lwp_mutex;
58
59 #define LWP_DEBUG
60
61 #ifdef LWP_DEBUG
62 int lwp_debug = 0;
63 #define DPRINTF(x) if (lwp_debug) printf x
64 #else
65 #define DPRINTF(x)
66 #endif
67
68 /*
69 * Set an LWP halted or suspended.
70 *
71 * Must be called with p_smutex held, and the LWP locked. Will unlock the
72 * LWP before return.
73 */
74 int
75 lwp_halt(struct lwp *curl, struct lwp *t, int state)
76 {
77 struct proc *p = t->l_proc;
78 int error;
79
80 LOCK_ASSERT(mutex_owned(&p->p_smutex));
81 LOCK_ASSERT(lwp_locked(t, NULL));
82
83 KASSERT(curl != t || curl->l_stat == LSONPROC);
84
85 /*
86 * If the current LWP has been told to exit, we must not suspend anyone
87 * else or deadlock could occur. We won't return to userspace.
88 */
89 if ((curl->l_stat & (L_WEXIT | L_WCORE)) != 0)
90 return (EDEADLK);
91
92 error = 0;
93
94 switch (t->l_stat) {
95 case LSRUN:
96 p->p_nrlwps--;
97 t->l_stat = state;
98 remrunqueue(t);
99 break;
100 case LSONPROC:
101 p->p_nrlwps--;
102 t->l_stat = state;
103 if (t != curl) {
104 #ifdef MULTIPROCESSOR
105 cpu_need_resched(t->l_cpu);
106 #elif defined(DIAGNOSTIC)
107 panic("lwp_halt: onproc but not self");
108 #endif
109 }
110 break;
111 case LSSLEEP:
112 p->p_nrlwps--;
113 /* FALLTHROUGH */
114 case LSSUSPENDED:
115 case LSSTOP:
116 /* XXXAD What about restarting stopped -> suspended?? */
117 t->l_stat = state;
118 break;
119 case LSIDL:
120 case LSZOMB:
121 error = EINTR; /* It's what Solaris does..... */
122 break;
123 }
124
125 lwp_setlock_unlock(t, &lwp_mutex);
126
127 return (error);
128 }
129
130 /*
131 * Restart a suspended LWP.
132 *
133 * Must be called with p_smutex held, and the LWP locked. Will unlock the
134 * LWP before return.
135 */
136 void
137 lwp_continue(struct lwp *l)
138 {
139
140 LOCK_ASSERT(mutex_owned(&l->l_proc->p_smutex));
141 LOCK_ASSERT(mutex_owned(l->l_mutex));
142
143 DPRINTF(("lwp_continue of %d.%d (%s), state %d, wchan %p\n",
144 l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm, l->l_stat,
145 l->l_wchan));
146
147 if (l->l_stat != LSSUSPENDED) {
148 lwp_unlock(l);
149 return;
150 }
151
152 if (l->l_wchan == NULL) {
153 /*
154 * LWP was runnable before being suspended. setrunnable()
155 * will release the lock.
156 */
157 setrunnable(l);
158 } else {
159 /* LWP was sleeping before being suspended. */
160 l->l_proc->p_nrlwps++;
161 l->l_stat = LSSLEEP;
162 lwp_unlock(l);
163 }
164 }
165
166 /*
167 * Wait for an LWP within the current process to exit. If 'lid' is
168 * non-zero, we are waiting for a specific LWP.
169 *
170 * Must be called with p->p_smutex held.
171 */
172 int
173 lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
174 {
175 struct proc *p = l->l_proc;
176 struct lwp *l2;
177 int nfound, error, wpri;
178 static const char waitstr1[] = "lwpwait";
179 static const char waitstr2[] = "lwpwait2";
180
181 DPRINTF(("lwp_wait1: %d.%d waiting for %d.\n",
182 p->p_pid, l->l_lid, lid));
183
184 LOCK_ASSERT(mutex_owned(&p->p_smutex));
185
186 /*
187 * Check for deadlock:
188 *
189 * 1) If all other LWPs are waiting for exits or suspended.
190 * 2) If we are trying to wait on ourself.
191 *
192 * XXX we'd like to check for a cycle of waiting LWPs (specific LID
193 * waits, not any-LWP waits) and detect that sort of deadlock, but
194 * we don't have a good place to store the lwp that is being waited
195 * for. wchan is already filled with &p->p_nlwps, and putting the
196 * lwp address in there for deadlock tracing would require exiting
197 * LWPs to call wakeup on both their own address and &p->p_nlwps, to
198 * get threads sleeping on any LWP exiting.
199 */
200 if (lwp_lastlive(p->p_nlwpwait) || lid == l->l_lid)
201 return (EDEADLK);
202
203 p->p_nlwpwait++;
204 wpri = PWAIT;
205 if ((flags & LWPWAIT_EXITCONTROL) == 0)
206 wpri |= PCATCH;
207 loop:
208 nfound = 0;
209 LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
210 if ((l2 == l) || (l2->l_flag & L_DETACHED) ||
211 ((lid != 0) && (lid != l2->l_lid)))
212 continue;
213 nfound++;
214 if (l2->l_stat != LSZOMB)
215 continue;
216
217 if (departed)
218 *departed = l2->l_lid;
219
220 LIST_REMOVE(l2, l_sibling);
221 p->p_nlwps--;
222 p->p_nzlwps--;
223 p->p_nlwpwait--;
224 /* XXX decrement limits */
225 pool_put(&lwp_pool, l2);
226 return (0);
227 }
228
229 if (nfound == 0) {
230 p->p_nlwpwait--;
231 return (ESRCH);
232 }
233
234 if ((error = mtsleep(&p->p_nlwps, wpri,
235 (lid != 0) ? waitstr1 : waitstr2, 0, &p->p_smutex)) != 0)
236 return (error);
237
238 goto loop;
239 }
240
241 /*
242 * Create a new LWP within process 'p2', using LWP 'l1' as a template.
243 * The new LWP is created in state LSIDL and must be set running,
244 * suspended, or stopped by the caller.
245 */
246 int
247 newlwp(struct lwp *l1, struct proc *p2, vaddr_t uaddr, boolean_t inmem,
248 int flags, void *stack, size_t stacksize,
249 void (*func)(void *), void *arg, struct lwp **rnewlwpp)
250 {
251 struct lwp *l2;
252
253 l2 = pool_get(&lwp_pool, PR_WAITOK);
254
255 l2->l_stat = LSIDL;
256 l2->l_forw = l2->l_back = NULL;
257 l2->l_proc = p2;
258
259 memset(&l2->l_startzero, 0,
260 (unsigned) ((caddr_t)&l2->l_endzero -
261 (caddr_t)&l2->l_startzero));
262 memcpy(&l2->l_startcopy, &l1->l_startcopy,
263 (unsigned) ((caddr_t)&l2->l_endcopy -
264 (caddr_t)&l2->l_startcopy));
265
266 #if !defined(MULTIPROCESSOR)
267 /*
268 * In the single-processor case, all processes will always run
269 * on the same CPU. So, initialize the child's CPU to the parent's
270 * now. In the multiprocessor case, the child's CPU will be
271 * initialized in the low-level context switch code when the
272 * process runs.
273 */
274 KASSERT(l1->l_cpu != NULL);
275 l2->l_cpu = l1->l_cpu;
276 l2->l_mutex = &sched_mutex;
277 #else
278 /*
279 * zero child's CPU pointer so we don't get trash.
280 */
281 l2->l_cpu = NULL;
282 l2->l_mutex = &lwp_mutex;
283 #endif /* ! MULTIPROCESSOR */
284
285 l2->l_flag = inmem ? L_INMEM : 0;
286 l2->l_flag |= (flags & LWP_DETACHED) ? L_DETACHED : 0;
287
288 if (p2->p_flag & P_SYSTEM) {
289 /*
290 * Mark it as a system process and not a candidate for
291 * swapping.
292 */
293 l2->l_flag |= L_SYSTEM | L_INMEM;
294 }
295
296 lwp_update_creds(l2);
297 callout_init(&l2->l_tsleep_ch);
298 l2->l_ts = pool_cache_get(&turnstile_cache, PR_WAITOK);
299 l2->l_omutex = NULL;
300
301 if (rnewlwpp != NULL)
302 *rnewlwpp = l2;
303
304 l2->l_addr = UAREA_TO_USER(uaddr);
305 uvm_lwp_fork(l1, l2, stack, stacksize, func,
306 (arg != NULL) ? arg : l2);
307
308 mutex_enter(&p2->p_smutex);
309
310 if ((p2->p_flag & P_SA) == 0) {
311 l2->l_sigpend = &l2->l_sigstore.ss_pend;
312 l2->l_sigmask = &l2->l_sigstore.ss_mask;
313 l2->l_sigstk = &l2->l_sigstore.ss_stk;
314 l2->l_sigmask = l1->l_sigmask;
315 CIRCLEQ_INIT(&l2->l_sigpend->sp_info);
316 sigemptyset(&l2->l_sigpend->sp_set);
317 } else {
318 l2->l_sigpend = &p2->p_sigstore.ss_pend;
319 l2->l_sigmask = &p2->p_sigstore.ss_mask;
320 l2->l_sigstk = &p2->p_sigstore.ss_stk;
321 }
322
323 l2->l_lid = ++p2->p_nlwpid;
324 LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
325 p2->p_nlwps++;
326
327 mutex_exit(&p2->p_smutex);
328
329 mutex_enter(&alllwp_mutex);
330 LIST_INSERT_HEAD(&alllwp, l2, l_list);
331 mutex_exit(&alllwp_mutex);
332
333 /* XXXAD verify */
334 if (p2->p_emul->e_lwp_fork)
335 (*p2->p_emul->e_lwp_fork)(l1, l2);
336
337 return (0);
338 }
339
340 /*
341 * Quit the process. This will call cpu_exit, which will call cpu_switch,
342 * so this can only be used meaningfully if you're willing to switch away.
343 * Calling with l!=curlwp would be weird.
344 */
345 void
346 lwp_exit(struct lwp *l)
347 {
348 struct proc *p = l->l_proc;
349
350 DPRINTF(("lwp_exit: %d.%d exiting.\n", p->p_pid, l->l_lid));
351 DPRINTF((" nlwps: %d nzlwps: %d\n", p->p_nlwps, p->p_nzlwps));
352
353 if (p->p_emul->e_lwp_exit)
354 (*p->p_emul->e_lwp_exit)(l);
355
356 /*
357 * If we are the last live LWP in a process, we need to exit the
358 * entire process. We do so with an exit status of zero, because
359 * it's a "controlled" exit, and because that's what Solaris does.
360 *
361 * We are not quite a zombie yet, but for accounting purposes we
362 * must increment the count of zombies here.
363 */
364 mutex_enter(&p->p_smutex);
365 p->p_nzlwps++;
366 if ((p->p_nlwps - p->p_nzlwps) == (p->p_stat == LSONPROC)) {
367 DPRINTF(("lwp_exit: %d.%d calling exit1()\n",
368 p->p_pid, l->l_lid));
369 exit1(l, 0);
370 /* NOTREACHED */
371 }
372 mutex_exit(&p->p_smutex);
373
374 /*
375 * Remove the LWP from the global list, from the parent process and
376 * then mark it as dead. Nothing should be able to find or update
377 * it past this point.
378 */
379 mutex_enter(&alllwp_mutex);
380 LIST_REMOVE(l, l_list);
381 mutex_exit(&alllwp_mutex);
382
383 /*
384 * Mark us as dead (almost a zombie) and bin any pending signals
385 * that remain undelivered.
386 *
387 * XXX We should put whole-process signals back onto the process's
388 * pending set and find someone else to deliver them.
389 */
390 mutex_enter(&p->p_smutex);
391 lwp_lock(l);
392 if ((l->l_flag & L_DETACHED) != 0) {
393 LIST_REMOVE(l, l_sibling);
394 p->p_nlwps--;
395 curlwp = NULL;
396 l->l_proc = NULL;
397 }
398 l->l_stat = LSDEAD;
399 lwp_setlock_unlock(l, &lwp_mutex);
400 if ((p->p_flag & P_SA) == 0)
401 sigclear(l->l_sigpend, NULL);
402 mutex_exit(&p->p_smutex);
403
404 /*
405 * Release our cached credentials and collate accounting flags.
406 */
407 kauth_cred_free(l->l_cred);
408 mutex_enter(&p->p_crmutex);
409 p->p_acflag |= l->l_acflag;
410 mutex_exit(&p->p_crmutex);
411
412 /*
413 * Verify that we hold no locks other than the kernel mutex, and
414 * release our turnstile. We can no longer acquire sleep locks
415 * past this point.
416 */
417 LOCKDEBUG_BARRIER(&kernel_mutex, 0);
418 pool_cache_put(&turnstile_cache, l->l_ts);
419
420 /*
421 * Free MD LWP resources
422 */
423 #ifndef __NO_CPU_LWP_FREE
424 cpu_lwp_free(l, 0);
425 #endif
426 pmap_deactivate(l);
427
428 /*
429 * Release the kernel lock, and switch away into oblivion.
430 */
431 KERNEL_PROC_UNLOCK(l);
432 cpu_exit(l);
433 }
434
435 /*
436 * We are called from cpu_exit() once it is safe to schedule the
437 * dead process's resources to be freed (i.e., once we've switched to
438 * the idle PCB for the current CPU).
439 *
440 * NOTE: One must be careful with locking in this routine. It's
441 * called from a critical section in machine-dependent code, so
442 * we should refrain from changing any interrupt state.
443 */
444 void
445 lwp_exit2(struct lwp *l)
446 {
447
448 KERNEL_LOCK(LK_EXCLUSIVE);
449
450 /*
451 * Free the VM resources we're still holding on to.
452 */
453 uvm_lwp_exit(l);
454
455 if (l->l_flag & L_DETACHED) {
456 /* Nobody waits for detached LWPs. */
457 pool_put(&lwp_pool, l);
458 KERNEL_UNLOCK();
459 } else {
460 KERNEL_UNLOCK();
461 l->l_stat = LSZOMB;
462 mb_write();
463 wakeup(&l->l_proc->p_nlwps);
464 }
465 }
466
467 /*
468 * Pick a LWP to represent the process for those operations which
469 * want information about a "process" that is actually associated
470 * with a LWP.
471 *
472 * Must be called with p->p_smutex held, and will return the LWP locked.
473 * If 'locking' is false, no locking or lock checks are performed. This
474 * is intended for use by DDB.
475 */
476 struct lwp *
477 proc_representative_lwp(struct proc *p, int *nrlwps, int locking)
478 {
479 struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
480 struct lwp *signalled;
481 int cnt;
482
483 if (locking)
484 LOCK_ASSERT(mutex_owned(&p->p_smutex));
485
486 /* Trivial case: only one LWP */
487 if (p->p_nlwps == 1) {
488 l = LIST_FIRST(&p->p_lwps);
489 if (nrlwps)
490 *nrlwps = (l->l_stat == LSONPROC || LSRUN);
491 if (locking)
492 lwp_lock(l);
493 return l;
494 }
495
496 cnt = 0;
497 switch (p->p_stat) {
498 case SSTOP:
499 case SACTIVE:
500 /* Pick the most live LWP */
501 onproc = running = sleeping = stopped = suspended = NULL;
502 signalled = NULL;
503 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
504 if (locking)
505 lwp_lock(l);
506 if (l->l_lid == p->p_sigctx.ps_lwp)
507 signalled = l;
508 switch (l->l_stat) {
509 case LSONPROC:
510 onproc = l;
511 cnt++;
512 break;
513 case LSRUN:
514 running = l;
515 cnt++;
516 break;
517 case LSSLEEP:
518 sleeping = l;
519 break;
520 case LSSTOP:
521 stopped = l;
522 break;
523 case LSSUSPENDED:
524 suspended = l;
525 break;
526 }
527 if (locking)
528 lwp_unlock(l);
529 }
530 if (nrlwps)
531 *nrlwps = cnt;
532 if (signalled)
533 l = signalled;
534 else if (onproc)
535 l = onproc;
536 else if (running)
537 l = running;
538 else if (sleeping)
539 l = sleeping;
540 else if (stopped)
541 l = stopped;
542 else if (suspended)
543 l = suspended;
544 else
545 break;
546 if (locking)
547 lwp_lock(l);
548 return l;
549 case SZOMB:
550 /* Doesn't really matter... */
551 if (nrlwps)
552 *nrlwps = 0;
553 l = LIST_FIRST(&p->p_lwps);
554 if (locking)
555 lwp_lock(l);
556 return l;
557 #ifdef DIAGNOSTIC
558 case SIDL:
559 if (locking)
560 mutex_exit(&p->p_smutex);
561 /* We have more than one LWP and we're in SIDL?
562 * How'd that happen?
563 */
564 panic("Too many LWPs in SIDL process %d (%s)",
565 p->p_pid, p->p_comm);
566 default:
567 if (locking)
568 mutex_exit(&p->p_smutex);
569 panic("Process %d (%s) in unknown state %d",
570 p->p_pid, p->p_comm, p->p_stat);
571 #endif
572 }
573
574 if (locking)
575 mutex_exit(&p->p_smutex);
576 panic("proc_representative_lwp: couldn't find a lwp for process"
577 " %d (%s)", p->p_pid, p->p_comm);
578 /* NOTREACHED */
579 return NULL;
580 }
581
582 /*
583 * Look up a live LWP within the speicifed process, and return it locked.
584 *
585 * Must be called with p->p_smutex held.
586 */
587 struct lwp *
588 lwp_byid(struct proc *p, int id)
589 {
590 struct lwp *l;
591
592 LOCK_ASSERT(mutex_owned(&p->p_smutex));
593
594 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
595 if (l->l_lid == id)
596 break;
597 }
598
599 if (l != NULL) {
600 lwp_lock(l);
601 if (l->l_stat == LSIDL || l->l_stat == LSZOMB ||
602 l->l_stat == LSDEAD) {
603 lwp_unlock(l);
604 l = NULL;
605 }
606 }
607
608 return l;
609 }
610
611 /*
612 * Update an LWP's cached credentials to mirror the process' master copy.
613 *
614 * This happens early in the syscall path, on user trap, and on LWP
615 * creation. A long-running LWP can also voluntarily choose to update
616 * it's credentials by calling this routine. This may be called from
617 * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
618 */
619 void
620 lwp_update_creds(struct lwp *l)
621 {
622 kauth_cred_t oc;
623 struct proc *p;
624
625 p = l->l_proc;
626 oc = l->l_cred;
627
628 mutex_enter(&p->p_crmutex);
629 kauth_cred_hold(p->p_cred);
630 l->l_cred = p->p_cred;
631 mutex_exit(&p->p_crmutex);
632 if (oc != NULL)
633 kauth_cred_free(oc);
634 }
635
636 /*
637 * Verify that an LWP is locked, and optionally verify that the lock matches
638 * one we specify.
639 */
640 int
641 lwp_locked(struct lwp *l, kmutex_t *mtx)
642 {
643 #ifdef MULTIPROCESSOR
644 kmutex_t *cur = l->l_mutex;
645
646 return mutex_owned(cur) && (mtx == cur || mtx == NULL);
647 #else
648 return mutex_owned(l->l_mutex);
649 #endif
650 }
651
652 /*
653 * Lock an LWP.
654 */
655 void
656 lwp_lock(struct lwp *l)
657 {
658 #ifdef MULTIPROCESSOR
659 kmutex_t *old;
660
661 for (;;) {
662 mutex_enter(old = l->l_mutex);
663
664 /*
665 * mutex_enter() will have posted a read barrier. Re-test
666 * l->l_mutex. If it has changed, we need to try again.
667 */
668 if (__predict_true(l->l_mutex == old)) {
669 LOCK_ASSERT(l->l_omutex == NULL);
670 return;
671 }
672
673 mutex_exit(old);
674 }
675 #else
676 mutex_enter(l->l_mutex);
677 #endif
678 }
679
680 /*
681 * Unlock an LWP. If the LWP has been relocked, release the new mutex
682 * first, then the old mutex.
683 */
684 void
685 lwp_unlock(struct lwp *l)
686 {
687 #ifdef MULTIPROCESSOR
688 kmutex_t *old;
689
690 LOCK_ASSERT(mutex_owned(l->l_mutex));
691
692 if (__predict_true((old = l->l_omutex) == NULL)) {
693 mutex_exit(l->l_mutex);
694 return;
695 }
696
697 l->l_omutex = NULL;
698 mutex_exit(l->l_mutex);
699 mutex_exit(old);
700 #else
701 LOCK_ASSERT(mutex_owned(l->l_mutex));
702
703 mutex_exit(l->l_mutex);
704 #endif
705 }
706
707 /*
708 * Lend a new mutex to an LWP. Both the old and new mutexes must be held.
709 */
710 void
711 lwp_setlock(struct lwp *l, kmutex_t *new)
712 {
713 LOCK_ASSERT(mutex_owned(l->l_mutex));
714 LOCK_ASSERT(mutex_owned(new));
715 LOCK_ASSERT(l->l_omutex == NULL);
716
717 #ifdef MULTIPROCESSOR
718 mb_write();
719 l->l_mutex = new;
720 #endif
721 }
722
723 /*
724 * Lend a new mutex to an LWP, and release the old mutex. The old mutex
725 * must be held.
726 */
727 void
728 lwp_setlock_unlock(struct lwp *l, kmutex_t *new)
729 {
730 kmutex_t *old;
731
732 LOCK_ASSERT(mutex_owned(l->l_mutex));
733 LOCK_ASSERT(l->l_omutex == NULL);
734
735 old = l->l_mutex;
736 #ifdef MULTIPROCESSOR
737 mb_write();
738 l->l_mutex = new;
739 #endif
740 mutex_exit(old);
741 }
742
743 /*
744 * Acquire a new mutex, and dontate it to an LWP. The LWP must already be
745 * locked.
746 */
747 void
748 lwp_relock(struct lwp *l, kmutex_t *new)
749 {
750
751 LOCK_ASSERT(mutex_owned(l->l_mutex));
752 LOCK_ASSERT(l->l_omutex == NULL);
753
754 #ifdef MULTIPROCESSOR
755 mutex_enter(new);
756 l->l_omutex = l->l_mutex;
757 mb_write();
758 l->l_mutex = new;
759 #endif
760 }
761
762 /*
763 * Handle exceptions for mi_userret(). Called if L_USERRET is set.
764 *
765 * Must be called with the LWP locked.
766 */
767 void
768 lwp_userret(struct lwp *l)
769 {
770 struct proc *p;
771 int sig, flag;
772
773 p = l->l_proc;
774 flag = l->l_flag;
775
776 #ifdef MULTIPROCESSOR
777 LOCK_ASSERT(lwp_locked(l, NULL));
778 lwp_unlock(l);
779 #endif
780
781 /* Signals must be processed first. */
782 if ((flag & L_PENDSIG) != 0) {
783 mutex_enter(&p->p_smutex);
784 while ((sig = issignal(l)) != 0)
785 postsig(sig);
786 mutex_exit(&p->p_smutex);
787 }
788
789 if ((flag & L_WCORE) != 0) {
790 /*
791 * Suspend ourselves, so that the kernel stack and therefore
792 * the userland registers saved in the trapframe are around
793 * for coredump() to write them out. We issue a wakeup() on
794 * p->p_nrlwps so that sigexit() will write the core file out
795 * once all other LWPs are suspended.
796 */
797 KERNEL_PROC_LOCK(l);
798 mutex_enter(&p->p_smutex);
799 p->p_nrlwps--;
800 wakeup(&p->p_nrlwps);
801 lwp_lock(l);
802 l->l_flag &= ~L_DETACHED;
803 l->l_stat = LSSUSPENDED;
804 mutex_exit_linked(&p->p_smutex, l->l_mutex);
805 mi_switch(l, NULL);
806 lwp_exit(l);
807 /* NOTREACHED */
808 }
809
810 if ((flag & L_WEXIT) != 0) {
811 KERNEL_PROC_LOCK(l);
812 lwp_exit(l);
813 /* NOTREACHED */
814 }
815
816 #ifdef MULTIPROCESSOR
817 lwp_lock(l);
818 #endif
819 }
820
821 /*
822 * Return non-zero if this the last live LWP in the process. Called when
823 * exiting, dumping core, waiting for other LWPs to exit, etc. Accepts a
824 * 'bias' value for deadlock detection.
825 *
826 * Must be called with p->p_smutex held.
827 */
828 int
829 lwp_lastlive(int bias)
830 {
831 struct lwp *l = curlwp;
832 struct proc *p = l->l_proc;
833
834 LOCK_ASSERT(mutex_owned(&p->p_smutex));
835 KASSERT(l->l_stat == LSONPROC || l->l_stat == LSSTOP);
836
837 return p->p_nrlwps - bias - (l->l_stat == LSONPROC) == 0;
838 }
839