kern_lwp.c revision 1.61.2.16 1 /* $NetBSD: kern_lwp.c,v 1.61.2.16 2007/08/18 05:41:49 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Overview
41 *
42 * Lightweight processes (LWPs) are the basic unit (or thread) of
43 * execution within the kernel. The core state of an LWP is described
44 * by "struct lwp".
45 *
46 * Each LWP is contained within a process (described by "struct proc"),
47 * Every process contains at least one LWP, but may contain more. The
48 * process describes attributes shared among all of its LWPs such as a
49 * private address space, global execution state (stopped, active,
50 * zombie, ...), signal disposition and so on. On a multiprocessor
51 * machine, multiple LWPs be executing in kernel simultaneously.
52 *
53 * Execution states
54 *
55 * At any given time, an LWP has overall state that is described by
56 * lwp::l_stat. The states are broken into two sets below. The first
57 * set is guaranteed to represent the absolute, current state of the
58 * LWP:
59 *
60 * LSONPROC
61 *
62 * On processor: the LWP is executing on a CPU, either in the
63 * kernel or in user space.
64 *
65 * LSRUN
66 *
67 * Runnable: the LWP is parked on a run queue, and may soon be
68 * chosen to run by a idle processor, or by a processor that
69 * has been asked to preempt a currently runnning but lower
70 * priority LWP. If the LWP is not swapped in (L_INMEM == 0)
71 * then the LWP is not on a run queue, but may be soon.
72 *
73 * LSIDL
74 *
75 * Idle: the LWP has been created but has not yet executed.
76 * Whoever created the new LWP can be expected to set it to
77 * another state shortly.
78 *
79 * LSSUSPENDED:
80 *
81 * Suspended: the LWP has had its execution suspended by
82 * another LWP in the same process using the _lwp_suspend()
83 * system call. User-level LWPs also enter the suspended
84 * state when the system is shutting down.
85 *
86 * The second set represent a "statement of intent" on behalf of the
87 * LWP. The LWP may in fact be executing on a processor, may be
88 * sleeping, idle, or on a run queue. It is expected to take the
89 * necessary action to stop executing or become "running" again within
90 * a short timeframe.
91 *
92 * LSZOMB:
93 *
94 * Dead: the LWP has released most of its resources and is
95 * about to switch away into oblivion. When it switches away,
96 * its few remaining resources will be collected.
97 *
98 * LSSLEEP:
99 *
100 * Sleeping: the LWP has entered itself onto a sleep queue, and
101 * will switch away shortly to allow other LWPs to run on the
102 * CPU.
103 *
104 * LSSTOP:
105 *
106 * Stopped: the LWP has been stopped as a result of a job
107 * control signal, or as a result of the ptrace() interface.
108 * Stopped LWPs may run briefly within the kernel to handle
109 * signals that they receive, but will not return to user space
110 * until their process' state is changed away from stopped.
111 * Single LWPs within a process can not be set stopped
112 * selectively: all actions that can stop or continue LWPs
113 * occur at the process level.
114 *
115 * State transitions
116 *
117 * Note that the LSSTOP and LSSUSPENDED states may only be set
118 * when returning to user space in userret(), or when sleeping
119 * interruptably. Before setting those states, we try to ensure
120 * that the LWPs will release all kernel locks that they hold,
121 * and at a minimum try to ensure that the LWP can be set runnable
122 * again by a signal.
123 *
124 * LWPs may transition states in the following ways:
125 *
126 * RUN -------> ONPROC ONPROC -----> RUN
127 * > STOPPED > SLEEP
128 * > SUSPENDED > STOPPED
129 * > SUSPENDED
130 * > ZOMB
131 *
132 * STOPPED ---> RUN SUSPENDED --> RUN
133 * > SLEEP > SLEEP
134 *
135 * SLEEP -----> ONPROC IDL --------> RUN
136 * > RUN > SUSPENDED
137 * > STOPPED > STOPPED
138 * > SUSPENDED
139 *
140 * Locking
141 *
142 * The majority of fields in 'struct lwp' are covered by a single,
143 * general spin mutex pointed to by lwp::l_mutex. The locks covering
144 * each field are documented in sys/lwp.h.
145 *
146 * State transitions must be made with the LWP's general lock held. In
147 * a multiprocessor kernel, state transitions may cause the LWP's lock
148 * pointer to change. On uniprocessor kernels, most scheduler and
149 * synchronisation objects such as sleep queues and LWPs are protected
150 * by only one mutex (spc_mutex on single CPU). In this case, LWPs' lock
151 * pointers will never change and will always reference spc_mutex.
152 * Please note that in a multiprocessor kernel each CPU has own spc_mutex.
153 * (spc_mutex here refers to l->l_cpu->ci_schedstate.spc_mutex).
154 *
155 * Manipulation of the general lock is not performed directly, but
156 * through calls to lwp_lock(), lwp_relock() and similar.
157 *
158 * States and their associated locks:
159 *
160 * LSIDL, LSZOMB, LSONPROC:
161 *
162 * Always covered by spc_lwplock, which protects running LWPs.
163 * This is a per-CPU lock.
164 *
165 * LSRUN:
166 *
167 * Always covered by spc_mutex, which protects the run queues.
168 * This may be a per-CPU lock, depending on the scheduler.
169 *
170 * LSSLEEP:
171 *
172 * Covered by a mutex associated with the sleep queue that the
173 * LWP resides on, indirectly referenced by l_sleepq->sq_mutex.
174 *
175 * LSSTOP, LSSUSPENDED:
176 *
177 * If the LWP was previously sleeping (l_wchan != NULL), then
178 * l_mutex references the sleep queue mutex. If the LWP was
179 * runnable or on the CPU when halted, or has been removed from
180 * the sleep queue since halted, then the mutex is spc_lwplock.
181 *
182 * The lock order is as follows:
183 *
184 * spc::spc_lwplock ->
185 * sleepq_t::sq_mutex ->
186 * tschain_t::tc_mutex ->
187 * spc::spc_mutex
188 *
189 * Each process has an scheduler state mutex (proc::p_smutex), and a
190 * number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
191 * so on. When an LWP is to be entered into or removed from one of the
192 * following states, p_mutex must be held and the process wide counters
193 * adjusted:
194 *
195 * LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
196 *
197 * Note that an LWP is considered running or likely to run soon if in
198 * one of the following states. This affects the value of p_nrlwps:
199 *
200 * LSRUN, LSONPROC, LSSLEEP
201 *
202 * p_smutex does not need to be held when transitioning among these
203 * three states.
204 */
205
206 #include <sys/cdefs.h>
207 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.61.2.16 2007/08/18 05:41:49 yamt Exp $");
208
209 #include "opt_multiprocessor.h"
210 #include "opt_lockdebug.h"
211
212 #define _LWP_API_PRIVATE
213
214 #include <sys/param.h>
215 #include <sys/systm.h>
216 #include <sys/cpu.h>
217 #include <sys/pool.h>
218 #include <sys/proc.h>
219 #include <sys/syscallargs.h>
220 #include <sys/syscall_stats.h>
221 #include <sys/kauth.h>
222 #include <sys/sleepq.h>
223 #include <sys/lockdebug.h>
224 #include <sys/kmem.h>
225
226 #include <uvm/uvm_extern.h>
227
228 struct lwplist alllwp;
229
230 POOL_INIT(lwp_pool, sizeof(struct lwp), MIN_LWP_ALIGNMENT, 0, 0, "lwppl",
231 &pool_allocator_nointr, IPL_NONE);
232 POOL_INIT(lwp_uc_pool, sizeof(ucontext_t), 0, 0, 0, "lwpucpl",
233 &pool_allocator_nointr, IPL_NONE);
234
235 static specificdata_domain_t lwp_specificdata_domain;
236
237 #define LWP_DEBUG
238
239 #ifdef LWP_DEBUG
240 int lwp_debug = 0;
241 #define DPRINTF(x) if (lwp_debug) printf x
242 #else
243 #define DPRINTF(x)
244 #endif
245
246 void
247 lwpinit(void)
248 {
249
250 lwp_specificdata_domain = specificdata_domain_create();
251 KASSERT(lwp_specificdata_domain != NULL);
252 lwp_sys_init();
253 }
254
255 /*
256 * Set an suspended.
257 *
258 * Must be called with p_smutex held, and the LWP locked. Will unlock the
259 * LWP before return.
260 */
261 int
262 lwp_suspend(struct lwp *curl, struct lwp *t)
263 {
264 int error;
265
266 KASSERT(mutex_owned(&t->l_proc->p_smutex));
267 KASSERT(lwp_locked(t, NULL));
268
269 KASSERT(curl != t || curl->l_stat == LSONPROC);
270
271 /*
272 * If the current LWP has been told to exit, we must not suspend anyone
273 * else or deadlock could occur. We won't return to userspace.
274 */
275 if ((curl->l_stat & (LW_WEXIT | LW_WCORE)) != 0) {
276 lwp_unlock(t);
277 return (EDEADLK);
278 }
279
280 error = 0;
281
282 switch (t->l_stat) {
283 case LSRUN:
284 case LSONPROC:
285 t->l_flag |= LW_WSUSPEND;
286 lwp_need_userret(t);
287 lwp_unlock(t);
288 break;
289
290 case LSSLEEP:
291 t->l_flag |= LW_WSUSPEND;
292
293 /*
294 * Kick the LWP and try to get it to the kernel boundary
295 * so that it will release any locks that it holds.
296 * setrunnable() will release the lock.
297 */
298 if ((t->l_flag & LW_SINTR) != 0)
299 setrunnable(t);
300 else
301 lwp_unlock(t);
302 break;
303
304 case LSSUSPENDED:
305 lwp_unlock(t);
306 break;
307
308 case LSSTOP:
309 t->l_flag |= LW_WSUSPEND;
310 setrunnable(t);
311 break;
312
313 case LSIDL:
314 case LSZOMB:
315 error = EINTR; /* It's what Solaris does..... */
316 lwp_unlock(t);
317 break;
318 }
319
320 /*
321 * XXXLWP Wait for:
322 *
323 * o process exiting
324 * o target LWP suspended
325 * o target LWP not suspended and L_WSUSPEND clear
326 * o target LWP exited
327 */
328
329 return (error);
330 }
331
332 /*
333 * Restart a suspended LWP.
334 *
335 * Must be called with p_smutex held, and the LWP locked. Will unlock the
336 * LWP before return.
337 */
338 void
339 lwp_continue(struct lwp *l)
340 {
341
342 KASSERT(mutex_owned(&l->l_proc->p_smutex));
343 KASSERT(lwp_locked(l, NULL));
344
345 DPRINTF(("lwp_continue of %d.%d (%s), state %d, wchan %p\n",
346 l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm, l->l_stat,
347 l->l_wchan));
348
349 /* If rebooting or not suspended, then just bail out. */
350 if ((l->l_flag & LW_WREBOOT) != 0) {
351 lwp_unlock(l);
352 return;
353 }
354
355 l->l_flag &= ~LW_WSUSPEND;
356
357 if (l->l_stat != LSSUSPENDED) {
358 lwp_unlock(l);
359 return;
360 }
361
362 /* setrunnable() will release the lock. */
363 setrunnable(l);
364 }
365
366 /*
367 * Wait for an LWP within the current process to exit. If 'lid' is
368 * non-zero, we are waiting for a specific LWP.
369 *
370 * Must be called with p->p_smutex held.
371 */
372 int
373 lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
374 {
375 struct proc *p = l->l_proc;
376 struct lwp *l2;
377 int nfound, error;
378 lwpid_t curlid;
379 bool exiting;
380
381 DPRINTF(("lwp_wait1: %d.%d waiting for %d.\n",
382 p->p_pid, l->l_lid, lid));
383
384 KASSERT(mutex_owned(&p->p_smutex));
385
386 p->p_nlwpwait++;
387 l->l_waitingfor = lid;
388 curlid = l->l_lid;
389 exiting = ((flags & LWPWAIT_EXITCONTROL) != 0);
390
391 for (;;) {
392 /*
393 * Avoid a race between exit1() and sigexit(): if the
394 * process is dumping core, then we need to bail out: call
395 * into lwp_userret() where we will be suspended until the
396 * deed is done.
397 */
398 if ((p->p_sflag & PS_WCORE) != 0) {
399 mutex_exit(&p->p_smutex);
400 lwp_userret(l);
401 #ifdef DIAGNOSTIC
402 panic("lwp_wait1");
403 #endif
404 /* NOTREACHED */
405 }
406
407 /*
408 * First off, drain any detached LWP that is waiting to be
409 * reaped.
410 */
411 while ((l2 = p->p_zomblwp) != NULL) {
412 p->p_zomblwp = NULL;
413 lwp_free(l2, false, false);/* releases proc mutex */
414 mutex_enter(&p->p_smutex);
415 }
416
417 /*
418 * Now look for an LWP to collect. If the whole process is
419 * exiting, count detached LWPs as eligible to be collected,
420 * but don't drain them here.
421 */
422 nfound = 0;
423 error = 0;
424 LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
425 /*
426 * If a specific wait and the target is waiting on
427 * us, then avoid deadlock. This also traps LWPs
428 * that try to wait on themselves.
429 *
430 * Note that this does not handle more complicated
431 * cycles, like: t1 -> t2 -> t3 -> t1. The process
432 * can still be killed so it is not a major problem.
433 */
434 if (l2->l_lid == lid && l2->l_waitingfor == curlid) {
435 error = EDEADLK;
436 break;
437 }
438 if (l2 == l)
439 continue;
440 if ((l2->l_prflag & LPR_DETACHED) != 0) {
441 nfound += exiting;
442 continue;
443 }
444 if (lid != 0) {
445 if (l2->l_lid != lid)
446 continue;
447 /*
448 * Mark this LWP as the first waiter, if there
449 * is no other.
450 */
451 if (l2->l_waiter == 0)
452 l2->l_waiter = curlid;
453 } else if (l2->l_waiter != 0) {
454 /*
455 * It already has a waiter - so don't
456 * collect it. If the waiter doesn't
457 * grab it we'll get another chance
458 * later.
459 */
460 nfound++;
461 continue;
462 }
463 nfound++;
464
465 /* No need to lock the LWP in order to see LSZOMB. */
466 if (l2->l_stat != LSZOMB)
467 continue;
468
469 /*
470 * We're no longer waiting. Reset the "first waiter"
471 * pointer on the target, in case it was us.
472 */
473 l->l_waitingfor = 0;
474 l2->l_waiter = 0;
475 p->p_nlwpwait--;
476 if (departed)
477 *departed = l2->l_lid;
478
479 /* lwp_free() releases the proc lock. */
480 lwp_free(l2, false, false);
481 mutex_enter(&p->p_smutex);
482 return 0;
483 }
484
485 if (error != 0)
486 break;
487 if (nfound == 0) {
488 error = ESRCH;
489 break;
490 }
491
492 /*
493 * The kernel is careful to ensure that it can not deadlock
494 * when exiting - just keep waiting.
495 */
496 if (exiting) {
497 KASSERT(p->p_nlwps > 1);
498 cv_wait(&p->p_lwpcv, &p->p_smutex);
499 continue;
500 }
501
502 /*
503 * If all other LWPs are waiting for exits or suspends
504 * and the supply of zombies and potential zombies is
505 * exhausted, then we are about to deadlock.
506 *
507 * If the process is exiting (and this LWP is not the one
508 * that is coordinating the exit) then bail out now.
509 */
510 if ((p->p_sflag & PS_WEXIT) != 0 ||
511 p->p_nrlwps + p->p_nzlwps - p->p_ndlwps <= p->p_nlwpwait) {
512 error = EDEADLK;
513 break;
514 }
515
516 /*
517 * Sit around and wait for something to happen. We'll be
518 * awoken if any of the conditions examined change: if an
519 * LWP exits, is collected, or is detached.
520 */
521 if ((error = cv_wait_sig(&p->p_lwpcv, &p->p_smutex)) != 0)
522 break;
523 }
524
525 /*
526 * We didn't find any LWPs to collect, we may have received a
527 * signal, or some other condition has caused us to bail out.
528 *
529 * If waiting on a specific LWP, clear the waiters marker: some
530 * other LWP may want it. Then, kick all the remaining waiters
531 * so that they can re-check for zombies and for deadlock.
532 */
533 if (lid != 0) {
534 LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
535 if (l2->l_lid == lid) {
536 if (l2->l_waiter == curlid)
537 l2->l_waiter = 0;
538 break;
539 }
540 }
541 }
542 p->p_nlwpwait--;
543 l->l_waitingfor = 0;
544 cv_broadcast(&p->p_lwpcv);
545
546 return error;
547 }
548
549 /*
550 * Create a new LWP within process 'p2', using LWP 'l1' as a template.
551 * The new LWP is created in state LSIDL and must be set running,
552 * suspended, or stopped by the caller.
553 */
554 int
555 newlwp(struct lwp *l1, struct proc *p2, vaddr_t uaddr, bool inmem,
556 int flags, void *stack, size_t stacksize,
557 void (*func)(void *), void *arg, struct lwp **rnewlwpp)
558 {
559 struct lwp *l2, *isfree;
560 turnstile_t *ts;
561
562 /*
563 * First off, reap any detached LWP waiting to be collected.
564 * We can re-use its LWP structure and turnstile.
565 */
566 isfree = NULL;
567 if (p2->p_zomblwp != NULL) {
568 mutex_enter(&p2->p_smutex);
569 if ((isfree = p2->p_zomblwp) != NULL) {
570 p2->p_zomblwp = NULL;
571 lwp_free(isfree, true, false);/* releases proc mutex */
572 } else
573 mutex_exit(&p2->p_smutex);
574 }
575 if (isfree == NULL) {
576 l2 = pool_get(&lwp_pool, PR_WAITOK);
577 memset(l2, 0, sizeof(*l2));
578 l2->l_ts = pool_cache_get(&turnstile_cache, PR_WAITOK);
579 SLIST_INIT(&l2->l_pi_lenders);
580 } else {
581 l2 = isfree;
582 ts = l2->l_ts;
583 KASSERT(l2->l_inheritedprio == -1);
584 KASSERT(SLIST_EMPTY(&l2->l_pi_lenders));
585 memset(l2, 0, sizeof(*l2));
586 l2->l_ts = ts;
587 }
588
589 l2->l_stat = LSIDL;
590 l2->l_proc = p2;
591 l2->l_refcnt = 1;
592 l2->l_priority = l1->l_priority;
593 l2->l_usrpri = l1->l_usrpri;
594 l2->l_inheritedprio = -1;
595 l2->l_mutex = l1->l_cpu->ci_schedstate.spc_mutex;
596 l2->l_cpu = l1->l_cpu;
597 l2->l_flag = inmem ? LW_INMEM : 0;
598 lwp_initspecific(l2);
599 sched_lwp_fork(l2);
600
601 if (p2->p_flag & PK_SYSTEM) {
602 /*
603 * Mark it as a system process and not a candidate for
604 * swapping.
605 */
606 l2->l_flag |= LW_SYSTEM;
607 }
608
609 lwp_update_creds(l2);
610 callout_init(&l2->l_tsleep_ch, CALLOUT_MPSAFE);
611 mutex_init(&l2->l_swaplock, MUTEX_DEFAULT, IPL_NONE);
612 cv_init(&l2->l_sigcv, "sigwait");
613 l2->l_syncobj = &sched_syncobj;
614
615 if (rnewlwpp != NULL)
616 *rnewlwpp = l2;
617
618 l2->l_addr = UAREA_TO_USER(uaddr);
619 KERNEL_LOCK(1, curlwp);
620 uvm_lwp_fork(l1, l2, stack, stacksize, func,
621 (arg != NULL) ? arg : l2);
622 KERNEL_UNLOCK_ONE(curlwp);
623
624 mutex_enter(&p2->p_smutex);
625
626 if ((flags & LWP_DETACHED) != 0) {
627 l2->l_prflag = LPR_DETACHED;
628 p2->p_ndlwps++;
629 } else
630 l2->l_prflag = 0;
631
632 l2->l_sigmask = l1->l_sigmask;
633 CIRCLEQ_INIT(&l2->l_sigpend.sp_info);
634 sigemptyset(&l2->l_sigpend.sp_set);
635
636 p2->p_nlwpid++;
637 if (p2->p_nlwpid == 0)
638 p2->p_nlwpid++;
639 l2->l_lid = p2->p_nlwpid;
640 LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
641 p2->p_nlwps++;
642
643 mutex_exit(&p2->p_smutex);
644
645 mutex_enter(&proclist_lock);
646 mutex_enter(&proclist_mutex);
647 LIST_INSERT_HEAD(&alllwp, l2, l_list);
648 mutex_exit(&proclist_mutex);
649 mutex_exit(&proclist_lock);
650
651 SYSCALL_TIME_LWP_INIT(l2);
652
653 if (p2->p_emul->e_lwp_fork)
654 (*p2->p_emul->e_lwp_fork)(l1, l2);
655
656 return (0);
657 }
658
659 /*
660 * Called by MD code when a new LWP begins execution. Must be called
661 * with the previous LWP locked (so at splsched), or if there is no
662 * previous LWP, at splsched.
663 */
664 void
665 lwp_startup(struct lwp *prev, struct lwp *new)
666 {
667
668 curlwp = new;
669 if (prev != NULL) {
670 lwp_unlock(prev);
671 }
672 spl0();
673 pmap_activate(new);
674 LOCKDEBUG_BARRIER(NULL, 0);
675 if ((new->l_pflag & LP_MPSAFE) == 0) {
676 KERNEL_LOCK(1, new);
677 }
678 }
679
680 /*
681 * Exit an LWP.
682 */
683 void
684 lwp_exit(struct lwp *l)
685 {
686 struct proc *p = l->l_proc;
687 struct lwp *l2;
688 bool current;
689
690 current = (l == curlwp);
691
692 DPRINTF(("lwp_exit: %d.%d exiting.\n", p->p_pid, l->l_lid));
693 DPRINTF((" nlwps: %d nzlwps: %d\n", p->p_nlwps, p->p_nzlwps));
694 KASSERT(current || l->l_stat == LSIDL);
695
696 /*
697 * Verify that we hold no locks other than the kernel lock.
698 */
699 #ifdef MULTIPROCESSOR
700 LOCKDEBUG_BARRIER(&kernel_lock, 0);
701 #else
702 LOCKDEBUG_BARRIER(NULL, 0);
703 #endif
704
705 /*
706 * If we are the last live LWP in a process, we need to exit the
707 * entire process. We do so with an exit status of zero, because
708 * it's a "controlled" exit, and because that's what Solaris does.
709 *
710 * We are not quite a zombie yet, but for accounting purposes we
711 * must increment the count of zombies here.
712 *
713 * Note: the last LWP's specificdata will be deleted here.
714 */
715 mutex_enter(&p->p_smutex);
716 if (p->p_nlwps - p->p_nzlwps == 1) {
717 KASSERT(current == true);
718 DPRINTF(("lwp_exit: %d.%d calling exit1()\n",
719 p->p_pid, l->l_lid));
720 exit1(l, 0);
721 /* NOTREACHED */
722 }
723 p->p_nzlwps++;
724 mutex_exit(&p->p_smutex);
725
726 if (p->p_emul->e_lwp_exit)
727 (*p->p_emul->e_lwp_exit)(l);
728
729 /* Delete the specificdata while it's still safe to sleep. */
730 specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
731
732 /*
733 * Release our cached credentials.
734 */
735 kauth_cred_free(l->l_cred);
736 callout_destroy(&l->l_tsleep_ch);
737
738 /*
739 * While we can still block, mark the LWP as unswappable to
740 * prevent conflicts with the with the swapper.
741 */
742 if (current)
743 uvm_lwp_hold(l);
744
745 /*
746 * Remove the LWP from the global list.
747 */
748 mutex_enter(&proclist_lock);
749 mutex_enter(&proclist_mutex);
750 LIST_REMOVE(l, l_list);
751 mutex_exit(&proclist_mutex);
752 mutex_exit(&proclist_lock);
753
754 /*
755 * Get rid of all references to the LWP that others (e.g. procfs)
756 * may have, and mark the LWP as a zombie. If the LWP is detached,
757 * mark it waiting for collection in the proc structure. Note that
758 * before we can do that, we need to free any other dead, deatched
759 * LWP waiting to meet its maker.
760 *
761 * XXXSMP disable preemption.
762 */
763 mutex_enter(&p->p_smutex);
764 lwp_drainrefs(l);
765
766 if ((l->l_prflag & LPR_DETACHED) != 0) {
767 while ((l2 = p->p_zomblwp) != NULL) {
768 p->p_zomblwp = NULL;
769 lwp_free(l2, false, false);/* releases proc mutex */
770 mutex_enter(&p->p_smutex);
771 l->l_refcnt++;
772 lwp_drainrefs(l);
773 }
774 p->p_zomblwp = l;
775 }
776
777 /*
778 * If we find a pending signal for the process and we have been
779 * asked to check for signals, then we loose: arrange to have
780 * all other LWPs in the process check for signals.
781 */
782 if ((l->l_flag & LW_PENDSIG) != 0 &&
783 firstsig(&p->p_sigpend.sp_set) != 0) {
784 LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
785 lwp_lock(l2);
786 l2->l_flag |= LW_PENDSIG;
787 lwp_unlock(l2);
788 }
789 }
790
791 lwp_lock(l);
792 l->l_stat = LSZOMB;
793 lwp_unlock(l);
794 p->p_nrlwps--;
795 cv_broadcast(&p->p_lwpcv);
796 mutex_exit(&p->p_smutex);
797
798 /*
799 * We can no longer block. At this point, lwp_free() may already
800 * be gunning for us. On a multi-CPU system, we may be off p_lwps.
801 *
802 * Free MD LWP resources.
803 */
804 #ifndef __NO_CPU_LWP_FREE
805 cpu_lwp_free(l, 0);
806 #endif
807
808 if (current) {
809 pmap_deactivate(l);
810
811 /*
812 * Release the kernel lock, and switch away into
813 * oblivion.
814 */
815 #ifdef notyet
816 /* XXXSMP hold in lwp_userret() */
817 KERNEL_UNLOCK_LAST(l);
818 #else
819 KERNEL_UNLOCK_ALL(l, NULL);
820 #endif
821 lwp_exit_switchaway(l);
822 }
823 }
824
825 void
826 lwp_exit_switchaway(struct lwp *l)
827 {
828 struct cpu_info *ci;
829 struct lwp *idlelwp;
830
831 /* Unlocked, but is for statistics only. */
832 uvmexp.swtch++;
833
834 (void)splsched();
835 l->l_flag &= ~LW_RUNNING;
836 ci = curcpu();
837 idlelwp = ci->ci_data.cpu_idlelwp;
838 idlelwp->l_stat = LSONPROC;
839 cpu_switchto(NULL, idlelwp, false);
840 }
841
842 /*
843 * Free a dead LWP's remaining resources.
844 *
845 * XXXLWP limits.
846 */
847 void
848 lwp_free(struct lwp *l, bool recycle, bool last)
849 {
850 struct proc *p = l->l_proc;
851 ksiginfoq_t kq;
852
853 /*
854 * If this was not the last LWP in the process, then adjust
855 * counters and unlock.
856 */
857 if (!last) {
858 /*
859 * Add the LWP's run time to the process' base value.
860 * This needs to co-incide with coming off p_lwps.
861 */
862 timeradd(&l->l_rtime, &p->p_rtime, &p->p_rtime);
863 p->p_pctcpu += l->l_pctcpu;
864 LIST_REMOVE(l, l_sibling);
865 p->p_nlwps--;
866 p->p_nzlwps--;
867 if ((l->l_prflag & LPR_DETACHED) != 0)
868 p->p_ndlwps--;
869
870 /*
871 * Have any LWPs sleeping in lwp_wait() recheck for
872 * deadlock.
873 */
874 cv_broadcast(&p->p_lwpcv);
875 mutex_exit(&p->p_smutex);
876 }
877
878 #ifdef MULTIPROCESSOR
879 /*
880 * In the unlikely event that the LWP is still on the CPU,
881 * then spin until it has switched away. We need to release
882 * all locks to avoid deadlock against interrupt handlers on
883 * the target CPU.
884 */
885 if ((l->l_flag & LW_RUNNING) != 0 || l->l_cpu->ci_curlwp == l) {
886 int count;
887 (void)count; /* XXXgcc */
888 KERNEL_UNLOCK_ALL(curlwp, &count);
889 while ((l->l_flag & LW_RUNNING) != 0 ||
890 l->l_cpu->ci_curlwp == l)
891 SPINLOCK_BACKOFF_HOOK;
892 KERNEL_LOCK(count, curlwp);
893 }
894 #endif
895
896 /*
897 * Destroy the LWP's remaining signal information.
898 */
899 ksiginfo_queue_init(&kq);
900 sigclear(&l->l_sigpend, NULL, &kq);
901 ksiginfo_queue_drain(&kq);
902 cv_destroy(&l->l_sigcv);
903 mutex_destroy(&l->l_swaplock);
904
905 /*
906 * Free the LWP's turnstile and the LWP structure itself unless the
907 * caller wants to recycle them. Also, free the scheduler specific data.
908 *
909 * We can't return turnstile0 to the pool (it didn't come from it),
910 * so if it comes up just drop it quietly and move on.
911 *
912 * We don't recycle the VM resources at this time.
913 */
914 if (!recycle && l->l_ts != &turnstile0)
915 pool_cache_put(&turnstile_cache, l->l_ts);
916 #ifndef __NO_CPU_LWP_FREE
917 cpu_lwp_free2(l);
918 #endif
919 uvm_lwp_exit(l);
920 KASSERT(SLIST_EMPTY(&l->l_pi_lenders));
921 KASSERT(l->l_inheritedprio == -1);
922 sched_lwp_exit(l);
923 if (!recycle)
924 pool_put(&lwp_pool, l);
925 }
926
927 /*
928 * Pick a LWP to represent the process for those operations which
929 * want information about a "process" that is actually associated
930 * with a LWP.
931 *
932 * If 'locking' is false, no locking or lock checks are performed.
933 * This is intended for use by DDB.
934 *
935 * We don't bother locking the LWP here, since code that uses this
936 * interface is broken by design and an exact match is not required.
937 */
938 struct lwp *
939 proc_representative_lwp(struct proc *p, int *nrlwps, int locking)
940 {
941 struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
942 struct lwp *signalled;
943 int cnt;
944
945 if (locking) {
946 KASSERT(mutex_owned(&p->p_smutex));
947 }
948
949 /* Trivial case: only one LWP */
950 if (p->p_nlwps == 1) {
951 l = LIST_FIRST(&p->p_lwps);
952 if (nrlwps)
953 *nrlwps = (l->l_stat == LSONPROC || LSRUN);
954 return l;
955 }
956
957 cnt = 0;
958 switch (p->p_stat) {
959 case SSTOP:
960 case SACTIVE:
961 /* Pick the most live LWP */
962 onproc = running = sleeping = stopped = suspended = NULL;
963 signalled = NULL;
964 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
965 if ((l->l_flag & LW_IDLE) != 0) {
966 continue;
967 }
968 if (l->l_lid == p->p_sigctx.ps_lwp)
969 signalled = l;
970 switch (l->l_stat) {
971 case LSONPROC:
972 onproc = l;
973 cnt++;
974 break;
975 case LSRUN:
976 running = l;
977 cnt++;
978 break;
979 case LSSLEEP:
980 sleeping = l;
981 break;
982 case LSSTOP:
983 stopped = l;
984 break;
985 case LSSUSPENDED:
986 suspended = l;
987 break;
988 }
989 }
990 if (nrlwps)
991 *nrlwps = cnt;
992 if (signalled)
993 l = signalled;
994 else if (onproc)
995 l = onproc;
996 else if (running)
997 l = running;
998 else if (sleeping)
999 l = sleeping;
1000 else if (stopped)
1001 l = stopped;
1002 else if (suspended)
1003 l = suspended;
1004 else
1005 break;
1006 return l;
1007 if (nrlwps)
1008 *nrlwps = 0;
1009 l = LIST_FIRST(&p->p_lwps);
1010 return l;
1011 #ifdef DIAGNOSTIC
1012 case SIDL:
1013 case SZOMB:
1014 case SDYING:
1015 case SDEAD:
1016 if (locking)
1017 mutex_exit(&p->p_smutex);
1018 /* We have more than one LWP and we're in SIDL?
1019 * How'd that happen?
1020 */
1021 panic("Too many LWPs in idle/dying process %d (%s) stat = %d",
1022 p->p_pid, p->p_comm, p->p_stat);
1023 break;
1024 default:
1025 if (locking)
1026 mutex_exit(&p->p_smutex);
1027 panic("Process %d (%s) in unknown state %d",
1028 p->p_pid, p->p_comm, p->p_stat);
1029 #endif
1030 }
1031
1032 if (locking)
1033 mutex_exit(&p->p_smutex);
1034 panic("proc_representative_lwp: couldn't find a lwp for process"
1035 " %d (%s)", p->p_pid, p->p_comm);
1036 /* NOTREACHED */
1037 return NULL;
1038 }
1039
1040 /*
1041 * Look up a live LWP within the speicifed process, and return it locked.
1042 *
1043 * Must be called with p->p_smutex held.
1044 */
1045 struct lwp *
1046 lwp_find(struct proc *p, int id)
1047 {
1048 struct lwp *l;
1049
1050 KASSERT(mutex_owned(&p->p_smutex));
1051
1052 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1053 if (l->l_lid == id)
1054 break;
1055 }
1056
1057 /*
1058 * No need to lock - all of these conditions will
1059 * be visible with the process level mutex held.
1060 */
1061 if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB))
1062 l = NULL;
1063
1064 return l;
1065 }
1066
1067 /*
1068 * Update an LWP's cached credentials to mirror the process' master copy.
1069 *
1070 * This happens early in the syscall path, on user trap, and on LWP
1071 * creation. A long-running LWP can also voluntarily choose to update
1072 * it's credentials by calling this routine. This may be called from
1073 * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
1074 */
1075 void
1076 lwp_update_creds(struct lwp *l)
1077 {
1078 kauth_cred_t oc;
1079 struct proc *p;
1080
1081 p = l->l_proc;
1082 oc = l->l_cred;
1083
1084 mutex_enter(&p->p_mutex);
1085 kauth_cred_hold(p->p_cred);
1086 l->l_cred = p->p_cred;
1087 mutex_exit(&p->p_mutex);
1088 if (oc != NULL)
1089 kauth_cred_free(oc);
1090 }
1091
1092 /*
1093 * Verify that an LWP is locked, and optionally verify that the lock matches
1094 * one we specify.
1095 */
1096 int
1097 lwp_locked(struct lwp *l, kmutex_t *mtx)
1098 {
1099 kmutex_t *cur = l->l_mutex;
1100
1101 return mutex_owned(cur) && (mtx == cur || mtx == NULL);
1102 }
1103
1104 /*
1105 * Lock an LWP.
1106 */
1107 void
1108 lwp_lock_retry(struct lwp *l, kmutex_t *old)
1109 {
1110
1111 /*
1112 * XXXgcc ignoring kmutex_t * volatile on i386
1113 *
1114 * gcc version 4.1.2 20061021 prerelease (NetBSD nb1 20061021)
1115 */
1116 #if 1
1117 while (l->l_mutex != old) {
1118 #else
1119 for (;;) {
1120 #endif
1121 mutex_spin_exit(old);
1122 old = l->l_mutex;
1123 mutex_spin_enter(old);
1124
1125 /*
1126 * mutex_enter() will have posted a read barrier. Re-test
1127 * l->l_mutex. If it has changed, we need to try again.
1128 */
1129 #if 1
1130 }
1131 #else
1132 } while (__predict_false(l->l_mutex != old));
1133 #endif
1134 }
1135
1136 /*
1137 * Lend a new mutex to an LWP. The old mutex must be held.
1138 */
1139 void
1140 lwp_setlock(struct lwp *l, kmutex_t *new)
1141 {
1142
1143 KASSERT(mutex_owned(l->l_mutex));
1144
1145 mb_write();
1146 l->l_mutex = new;
1147 }
1148
1149 /*
1150 * Lend a new mutex to an LWP, and release the old mutex. The old mutex
1151 * must be held.
1152 */
1153 void
1154 lwp_unlock_to(struct lwp *l, kmutex_t *new)
1155 {
1156 kmutex_t *old;
1157
1158 KASSERT(mutex_owned(l->l_mutex));
1159
1160 old = l->l_mutex;
1161 mb_write();
1162 l->l_mutex = new;
1163 mutex_spin_exit(old);
1164 }
1165
1166 /*
1167 * Acquire a new mutex, and donate it to an LWP. The LWP must already be
1168 * locked.
1169 */
1170 void
1171 lwp_relock(struct lwp *l, kmutex_t *new)
1172 {
1173 kmutex_t *old;
1174
1175 KASSERT(mutex_owned(l->l_mutex));
1176
1177 old = l->l_mutex;
1178 if (old != new) {
1179 mutex_spin_enter(new);
1180 l->l_mutex = new;
1181 mutex_spin_exit(old);
1182 }
1183 }
1184
1185 int
1186 lwp_trylock(struct lwp *l)
1187 {
1188 kmutex_t *old;
1189
1190 for (;;) {
1191 if (!mutex_tryenter(old = l->l_mutex))
1192 return 0;
1193 if (__predict_true(l->l_mutex == old))
1194 return 1;
1195 mutex_spin_exit(old);
1196 }
1197 }
1198
1199 /*
1200 * Handle exceptions for mi_userret(). Called if a member of LW_USERRET is
1201 * set.
1202 */
1203 void
1204 lwp_userret(struct lwp *l)
1205 {
1206 struct proc *p;
1207 void (*hook)(void);
1208 int sig;
1209
1210 p = l->l_proc;
1211
1212 /*
1213 * It should be safe to do this read unlocked on a multiprocessor
1214 * system..
1215 */
1216 while ((l->l_flag & LW_USERRET) != 0) {
1217 /*
1218 * Process pending signals first, unless the process
1219 * is dumping core or exiting, where we will instead
1220 * enter the L_WSUSPEND case below.
1221 */
1222 if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) ==
1223 LW_PENDSIG) {
1224 mutex_enter(&p->p_smutex);
1225 while ((sig = issignal(l)) != 0)
1226 postsig(sig);
1227 mutex_exit(&p->p_smutex);
1228 }
1229
1230 /*
1231 * Core-dump or suspend pending.
1232 *
1233 * In case of core dump, suspend ourselves, so that the
1234 * kernel stack and therefore the userland registers saved
1235 * in the trapframe are around for coredump() to write them
1236 * out. We issue a wakeup on p->p_lwpcv so that sigexit()
1237 * will write the core file out once all other LWPs are
1238 * suspended.
1239 */
1240 if ((l->l_flag & LW_WSUSPEND) != 0) {
1241 mutex_enter(&p->p_smutex);
1242 p->p_nrlwps--;
1243 cv_broadcast(&p->p_lwpcv);
1244 lwp_lock(l);
1245 l->l_stat = LSSUSPENDED;
1246 mutex_exit(&p->p_smutex);
1247 mi_switch(l);
1248 }
1249
1250 /* Process is exiting. */
1251 if ((l->l_flag & LW_WEXIT) != 0) {
1252 KERNEL_LOCK(1, l);
1253 lwp_exit(l);
1254 KASSERT(0);
1255 /* NOTREACHED */
1256 }
1257
1258 /* Call userret hook; used by Linux emulation. */
1259 if ((l->l_flag & LW_WUSERRET) != 0) {
1260 lwp_lock(l);
1261 l->l_flag &= ~LW_WUSERRET;
1262 lwp_unlock(l);
1263 hook = p->p_userret;
1264 p->p_userret = NULL;
1265 (*hook)();
1266 }
1267 }
1268 }
1269
1270 /*
1271 * Force an LWP to enter the kernel, to take a trip through lwp_userret().
1272 */
1273 void
1274 lwp_need_userret(struct lwp *l)
1275 {
1276 KASSERT(lwp_locked(l, NULL));
1277
1278 /*
1279 * Since the tests in lwp_userret() are done unlocked, make sure
1280 * that the condition will be seen before forcing the LWP to enter
1281 * kernel mode.
1282 */
1283 mb_write();
1284 cpu_signotify(l);
1285 }
1286
1287 /*
1288 * Add one reference to an LWP. This will prevent the LWP from
1289 * exiting, thus keep the lwp structure and PCB around to inspect.
1290 */
1291 void
1292 lwp_addref(struct lwp *l)
1293 {
1294
1295 KASSERT(mutex_owned(&l->l_proc->p_smutex));
1296 KASSERT(l->l_stat != LSZOMB);
1297 KASSERT(l->l_refcnt != 0);
1298
1299 l->l_refcnt++;
1300 }
1301
1302 /*
1303 * Remove one reference to an LWP. If this is the last reference,
1304 * then we must finalize the LWP's death.
1305 */
1306 void
1307 lwp_delref(struct lwp *l)
1308 {
1309 struct proc *p = l->l_proc;
1310
1311 mutex_enter(&p->p_smutex);
1312 KASSERT(l->l_stat != LSZOMB);
1313 KASSERT(l->l_refcnt > 0);
1314 if (--l->l_refcnt == 0)
1315 cv_broadcast(&p->p_refcv);
1316 mutex_exit(&p->p_smutex);
1317 }
1318
1319 /*
1320 * Drain all references to the current LWP.
1321 */
1322 void
1323 lwp_drainrefs(struct lwp *l)
1324 {
1325 struct proc *p = l->l_proc;
1326
1327 KASSERT(mutex_owned(&p->p_smutex));
1328 KASSERT(l->l_refcnt != 0);
1329
1330 l->l_refcnt--;
1331 while (l->l_refcnt != 0)
1332 cv_wait(&p->p_refcv, &p->p_smutex);
1333 }
1334
1335 /*
1336 * lwp_specific_key_create --
1337 * Create a key for subsystem lwp-specific data.
1338 */
1339 int
1340 lwp_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor)
1341 {
1342
1343 return (specificdata_key_create(lwp_specificdata_domain, keyp, dtor));
1344 }
1345
1346 /*
1347 * lwp_specific_key_delete --
1348 * Delete a key for subsystem lwp-specific data.
1349 */
1350 void
1351 lwp_specific_key_delete(specificdata_key_t key)
1352 {
1353
1354 specificdata_key_delete(lwp_specificdata_domain, key);
1355 }
1356
1357 /*
1358 * lwp_initspecific --
1359 * Initialize an LWP's specificdata container.
1360 */
1361 void
1362 lwp_initspecific(struct lwp *l)
1363 {
1364 int error;
1365
1366 error = specificdata_init(lwp_specificdata_domain, &l->l_specdataref);
1367 KASSERT(error == 0);
1368 }
1369
1370 /*
1371 * lwp_finispecific --
1372 * Finalize an LWP's specificdata container.
1373 */
1374 void
1375 lwp_finispecific(struct lwp *l)
1376 {
1377
1378 specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
1379 }
1380
1381 /*
1382 * lwp_getspecific --
1383 * Return lwp-specific data corresponding to the specified key.
1384 *
1385 * Note: LWP specific data is NOT INTERLOCKED. An LWP should access
1386 * only its OWN SPECIFIC DATA. If it is necessary to access another
1387 * LWP's specifc data, care must be taken to ensure that doing so
1388 * would not cause internal data structure inconsistency (i.e. caller
1389 * can guarantee that the target LWP is not inside an lwp_getspecific()
1390 * or lwp_setspecific() call).
1391 */
1392 void *
1393 lwp_getspecific(specificdata_key_t key)
1394 {
1395
1396 return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
1397 &curlwp->l_specdataref, key));
1398 }
1399
1400 void *
1401 _lwp_getspecific_by_lwp(struct lwp *l, specificdata_key_t key)
1402 {
1403
1404 return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
1405 &l->l_specdataref, key));
1406 }
1407
1408 /*
1409 * lwp_setspecific --
1410 * Set lwp-specific data corresponding to the specified key.
1411 */
1412 void
1413 lwp_setspecific(specificdata_key_t key, void *data)
1414 {
1415
1416 specificdata_setspecific(lwp_specificdata_domain,
1417 &curlwp->l_specdataref, key, data);
1418 }
1419