kern_lwp.c revision 1.61 1 /* $NetBSD: kern_lwp.c,v 1.61 2007/03/04 20:59:00 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Overview
41 *
42 * Lightweight processes (LWPs) are the basic unit (or thread) of
43 * execution within the kernel. The core state of an LWP is described
44 * by "struct lwp".
45 *
46 * Each LWP is contained within a process (described by "struct proc"),
47 * Every process contains at least one LWP, but may contain more. The
48 * process describes attributes shared among all of its LWPs such as a
49 * private address space, global execution state (stopped, active,
50 * zombie, ...), signal disposition and so on. On a multiprocessor
51 * machine, multiple LWPs be executing in kernel simultaneously.
52 *
53 * Note that LWPs differ from kernel threads (kthreads) in that kernel
54 * threads are distinct processes (system processes) with no user space
55 * component, which themselves may contain one or more LWPs.
56 *
57 * Execution states
58 *
59 * At any given time, an LWP has overall state that is described by
60 * lwp::l_stat. The states are broken into two sets below. The first
61 * set is guaranteed to represent the absolute, current state of the
62 * LWP:
63 *
64 * LSONPROC
65 *
66 * On processor: the LWP is executing on a CPU, either in the
67 * kernel or in user space.
68 *
69 * LSRUN
70 *
71 * Runnable: the LWP is parked on a run queue, and may soon be
72 * chosen to run by a idle processor, or by a processor that
73 * has been asked to preempt a currently runnning but lower
74 * priority LWP. If the LWP is not swapped in (L_INMEM == 0)
75 * then the LWP is not on a run queue, but may be soon.
76 *
77 * LSIDL
78 *
79 * Idle: the LWP has been created but has not yet executed.
80 * Whoever created the new LWP can be expected to set it to
81 * another state shortly.
82 *
83 * LSSUSPENDED:
84 *
85 * Suspended: the LWP has had its execution suspended by
86 * another LWP in the same process using the _lwp_suspend()
87 * system call. User-level LWPs also enter the suspended
88 * state when the system is shutting down.
89 *
90 * The second set represent a "statement of intent" on behalf of the
91 * LWP. The LWP may in fact be executing on a processor, may be
92 * sleeping, idle, or on a run queue. It is expected to take the
93 * necessary action to stop executing or become "running" again within
94 * a short timeframe.
95 *
96 * LSZOMB:
97 *
98 * Dead: the LWP has released most of its resources and is
99 * about to switch away into oblivion. When it switches away,
100 * its few remaining resources will be collected.
101 *
102 * LSSLEEP:
103 *
104 * Sleeping: the LWP has entered itself onto a sleep queue, and
105 * will switch away shortly to allow other LWPs to run on the
106 * CPU.
107 *
108 * LSSTOP:
109 *
110 * Stopped: the LWP has been stopped as a result of a job
111 * control signal, or as a result of the ptrace() interface.
112 * Stopped LWPs may run briefly within the kernel to handle
113 * signals that they receive, but will not return to user space
114 * until their process' state is changed away from stopped.
115 * Single LWPs within a process can not be set stopped
116 * selectively: all actions that can stop or continue LWPs
117 * occur at the process level.
118 *
119 * State transitions
120 *
121 * Note that the LSSTOP and LSSUSPENDED states may only be set
122 * when returning to user space in userret(), or when sleeping
123 * interruptably. Before setting those states, we try to ensure
124 * that the LWPs will release all kernel locks that they hold,
125 * and at a minimum try to ensure that the LWP can be set runnable
126 * again by a signal.
127 *
128 * LWPs may transition states in the following ways:
129 *
130 * RUN -------> ONPROC ONPROC -----> RUN
131 * > STOPPED > SLEEP
132 * > SUSPENDED > STOPPED
133 * > SUSPENDED
134 * > ZOMB
135 *
136 * STOPPED ---> RUN SUSPENDED --> RUN
137 * > SLEEP > SLEEP
138 *
139 * SLEEP -----> ONPROC IDL --------> RUN
140 * > RUN > SUSPENDED
141 * > STOPPED > STOPPED
142 * > SUSPENDED
143 *
144 * Locking
145 *
146 * The majority of fields in 'struct lwp' are covered by a single,
147 * general spin mutex pointed to by lwp::l_mutex. The locks covering
148 * each field are documented in sys/lwp.h.
149 *
150 * State transitions must be made with the LWP's general lock held. In
151 * a multiprocessor kernel, state transitions may cause the LWP's lock
152 * pointer to change. On uniprocessor kernels, most scheduler and
153 * synchronisation objects such as sleep queues and LWPs are protected
154 * by only one mutex (sched_mutex). In this case, LWPs' lock pointers
155 * will never change and will always reference sched_mutex.
156 *
157 * Manipulation of the general lock is not performed directly, but
158 * through calls to lwp_lock(), lwp_relock() and similar.
159 *
160 * States and their associated locks:
161 *
162 * LSIDL, LSZOMB
163 *
164 * Always covered by sched_mutex.
165 *
166 * LSONPROC, LSRUN:
167 *
168 * Always covered by sched_mutex, which protects the run queues
169 * and other miscellaneous items. If the scheduler is changed
170 * to use per-CPU run queues, this may become a per-CPU mutex.
171 *
172 * LSSLEEP:
173 *
174 * Covered by a mutex associated with the sleep queue that the
175 * LWP resides on, indirectly referenced by l_sleepq->sq_mutex.
176 *
177 * LSSTOP, LSSUSPENDED:
178 *
179 * If the LWP was previously sleeping (l_wchan != NULL), then
180 * l_mutex references the sleep queue mutex. If the LWP was
181 * runnable or on the CPU when halted, or has been removed from
182 * the sleep queue since halted, then the mutex is sched_mutex.
183 *
184 * The lock order is as follows:
185 *
186 * sleepq_t::sq_mutex |---> sched_mutex
187 * tschain_t::tc_mutex |
188 *
189 * Each process has an scheduler state mutex (proc::p_smutex), and a
190 * number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
191 * so on. When an LWP is to be entered into or removed from one of the
192 * following states, p_mutex must be held and the process wide counters
193 * adjusted:
194 *
195 * LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
196 *
197 * Note that an LWP is considered running or likely to run soon if in
198 * one of the following states. This affects the value of p_nrlwps:
199 *
200 * LSRUN, LSONPROC, LSSLEEP
201 *
202 * p_smutex does not need to be held when transitioning among these
203 * three states.
204 */
205
206 #include <sys/cdefs.h>
207 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.61 2007/03/04 20:59:00 ad Exp $");
208
209 #include "opt_multiprocessor.h"
210 #include "opt_lockdebug.h"
211
212 #define _LWP_API_PRIVATE
213
214 #include <sys/param.h>
215 #include <sys/systm.h>
216 #include <sys/pool.h>
217 #include <sys/proc.h>
218 #include <sys/syscallargs.h>
219 #include <sys/syscall_stats.h>
220 #include <sys/kauth.h>
221 #include <sys/sleepq.h>
222 #include <sys/lockdebug.h>
223 #include <sys/kmem.h>
224
225 #include <uvm/uvm_extern.h>
226
227 struct lwplist alllwp;
228
229 POOL_INIT(lwp_pool, sizeof(struct lwp), MIN_LWP_ALIGNMENT, 0, 0, "lwppl",
230 &pool_allocator_nointr);
231 POOL_INIT(lwp_uc_pool, sizeof(ucontext_t), 0, 0, 0, "lwpucpl",
232 &pool_allocator_nointr);
233
234 static specificdata_domain_t lwp_specificdata_domain;
235
236 #define LWP_DEBUG
237
238 #ifdef LWP_DEBUG
239 int lwp_debug = 0;
240 #define DPRINTF(x) if (lwp_debug) printf x
241 #else
242 #define DPRINTF(x)
243 #endif
244
245 void
246 lwpinit(void)
247 {
248
249 lwp_specificdata_domain = specificdata_domain_create();
250 KASSERT(lwp_specificdata_domain != NULL);
251 lwp_sys_init();
252 }
253
254 /*
255 * Set an suspended.
256 *
257 * Must be called with p_smutex held, and the LWP locked. Will unlock the
258 * LWP before return.
259 */
260 int
261 lwp_suspend(struct lwp *curl, struct lwp *t)
262 {
263 int error;
264
265 LOCK_ASSERT(mutex_owned(&t->l_proc->p_smutex));
266 LOCK_ASSERT(lwp_locked(t, NULL));
267
268 KASSERT(curl != t || curl->l_stat == LSONPROC);
269
270 /*
271 * If the current LWP has been told to exit, we must not suspend anyone
272 * else or deadlock could occur. We won't return to userspace.
273 */
274 if ((curl->l_stat & (LW_WEXIT | LW_WCORE)) != 0) {
275 lwp_unlock(t);
276 return (EDEADLK);
277 }
278
279 error = 0;
280
281 switch (t->l_stat) {
282 case LSRUN:
283 case LSONPROC:
284 t->l_flag |= LW_WSUSPEND;
285 lwp_need_userret(t);
286 lwp_unlock(t);
287 break;
288
289 case LSSLEEP:
290 t->l_flag |= LW_WSUSPEND;
291
292 /*
293 * Kick the LWP and try to get it to the kernel boundary
294 * so that it will release any locks that it holds.
295 * setrunnable() will release the lock.
296 */
297 if ((t->l_flag & LW_SINTR) != 0)
298 setrunnable(t);
299 else
300 lwp_unlock(t);
301 break;
302
303 case LSSUSPENDED:
304 lwp_unlock(t);
305 break;
306
307 case LSSTOP:
308 t->l_flag |= LW_WSUSPEND;
309 setrunnable(t);
310 break;
311
312 case LSIDL:
313 case LSZOMB:
314 error = EINTR; /* It's what Solaris does..... */
315 lwp_unlock(t);
316 break;
317 }
318
319 /*
320 * XXXLWP Wait for:
321 *
322 * o process exiting
323 * o target LWP suspended
324 * o target LWP not suspended and L_WSUSPEND clear
325 * o target LWP exited
326 */
327
328 return (error);
329 }
330
331 /*
332 * Restart a suspended LWP.
333 *
334 * Must be called with p_smutex held, and the LWP locked. Will unlock the
335 * LWP before return.
336 */
337 void
338 lwp_continue(struct lwp *l)
339 {
340
341 LOCK_ASSERT(mutex_owned(&l->l_proc->p_smutex));
342 LOCK_ASSERT(lwp_locked(l, NULL));
343
344 DPRINTF(("lwp_continue of %d.%d (%s), state %d, wchan %p\n",
345 l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm, l->l_stat,
346 l->l_wchan));
347
348 /* If rebooting or not suspended, then just bail out. */
349 if ((l->l_flag & LW_WREBOOT) != 0) {
350 lwp_unlock(l);
351 return;
352 }
353
354 l->l_flag &= ~LW_WSUSPEND;
355
356 if (l->l_stat != LSSUSPENDED) {
357 lwp_unlock(l);
358 return;
359 }
360
361 /* setrunnable() will release the lock. */
362 setrunnable(l);
363 }
364
365 /*
366 * Wait for an LWP within the current process to exit. If 'lid' is
367 * non-zero, we are waiting for a specific LWP.
368 *
369 * Must be called with p->p_smutex held.
370 */
371 int
372 lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
373 {
374 struct proc *p = l->l_proc;
375 struct lwp *l2;
376 int nfound, error;
377
378 DPRINTF(("lwp_wait1: %d.%d waiting for %d.\n",
379 p->p_pid, l->l_lid, lid));
380
381 LOCK_ASSERT(mutex_owned(&p->p_smutex));
382
383 /*
384 * We try to check for deadlock:
385 *
386 * 1) If all other LWPs are waiting for exits or suspended.
387 * 2) If we are trying to wait on ourself.
388 *
389 * XXX we'd like to check for a cycle of waiting LWPs (specific LID
390 * waits, not any-LWP waits) and detect that sort of deadlock, but
391 * we don't have a good place to store the lwp that is being waited
392 * for. wchan is already filled with &p->p_nlwps, and putting the
393 * lwp address in there for deadlock tracing would require exiting
394 * LWPs to call wakeup on both their own address and &p->p_nlwps, to
395 * get threads sleeping on any LWP exiting.
396 */
397 if (lid == l->l_lid)
398 return EDEADLK;
399
400 p->p_nlwpwait++;
401
402 for (;;) {
403 /*
404 * Avoid a race between exit1() and sigexit(): if the
405 * process is dumping core, then we need to bail out: call
406 * into lwp_userret() where we will be suspended until the
407 * deed is done.
408 */
409 if ((p->p_sflag & PS_WCORE) != 0) {
410 mutex_exit(&p->p_smutex);
411 lwp_userret(l);
412 #ifdef DIAGNOSTIC
413 panic("lwp_wait1");
414 #endif
415 /* NOTREACHED */
416 }
417
418 /*
419 * First off, drain any detached LWP that is waiting to be
420 * reaped.
421 */
422 while ((l2 = p->p_zomblwp) != NULL) {
423 p->p_zomblwp = NULL;
424 lwp_free(l2, 0, 0); /* releases proc mutex */
425 mutex_enter(&p->p_smutex);
426 }
427
428 /*
429 * Now look for an LWP to collect. If the whole process is
430 * exiting, count detached LWPs as eligible to be collected,
431 * but don't drain them here.
432 */
433 nfound = 0;
434 LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
435 if (l2 == l || (lid != 0 && l2->l_lid != lid))
436 continue;
437 if ((l2->l_prflag & LPR_DETACHED) != 0) {
438 nfound += ((flags & LWPWAIT_EXITCONTROL) != 0);
439 continue;
440 }
441 nfound++;
442
443 /* No need to lock the LWP in order to see LSZOMB. */
444 if (l2->l_stat != LSZOMB)
445 continue;
446
447 if (departed)
448 *departed = l2->l_lid;
449 lwp_free(l2, 0, 0);
450 mutex_enter(&p->p_smutex);
451 p->p_nlwpwait--;
452 return 0;
453 }
454
455 if (nfound == 0) {
456 error = ESRCH;
457 break;
458 }
459 if ((flags & LWPWAIT_EXITCONTROL) != 0) {
460 KASSERT(p->p_nlwps > 1);
461 cv_wait(&p->p_lwpcv, &p->p_smutex);
462 continue;
463 }
464 if ((p->p_sflag & PS_WEXIT) != 0 ||
465 p->p_nrlwps <= p->p_nlwpwait + p->p_ndlwps) {
466 error = EDEADLK;
467 break;
468 }
469 if ((error = cv_wait_sig(&p->p_lwpcv, &p->p_smutex)) != 0)
470 break;
471 }
472
473 p->p_nlwpwait--;
474 return error;
475 }
476
477 /*
478 * Create a new LWP within process 'p2', using LWP 'l1' as a template.
479 * The new LWP is created in state LSIDL and must be set running,
480 * suspended, or stopped by the caller.
481 */
482 int
483 newlwp(struct lwp *l1, struct proc *p2, vaddr_t uaddr, bool inmem,
484 int flags, void *stack, size_t stacksize,
485 void (*func)(void *), void *arg, struct lwp **rnewlwpp)
486 {
487 struct lwp *l2, *isfree;
488 turnstile_t *ts;
489
490 /*
491 * First off, reap any detached LWP waiting to be collected.
492 * We can re-use its LWP structure and turnstile.
493 */
494 isfree = NULL;
495 if (p2->p_zomblwp != NULL) {
496 mutex_enter(&p2->p_smutex);
497 if ((isfree = p2->p_zomblwp) != NULL) {
498 p2->p_zomblwp = NULL;
499 lwp_free(isfree, 1, 0); /* releases proc mutex */
500 } else
501 mutex_exit(&p2->p_smutex);
502 }
503 if (isfree == NULL) {
504 l2 = pool_get(&lwp_pool, PR_WAITOK);
505 memset(l2, 0, sizeof(*l2));
506 l2->l_ts = pool_cache_get(&turnstile_cache, PR_WAITOK);
507 SLIST_INIT(&l2->l_pi_lenders);
508 } else {
509 l2 = isfree;
510 ts = l2->l_ts;
511 KASSERT(l2->l_inheritedprio == MAXPRI);
512 KASSERT(SLIST_EMPTY(&l2->l_pi_lenders));
513 memset(l2, 0, sizeof(*l2));
514 l2->l_ts = ts;
515 }
516
517 l2->l_stat = LSIDL;
518 l2->l_proc = p2;
519 l2->l_refcnt = 1;
520 l2->l_priority = l1->l_priority;
521 l2->l_usrpri = l1->l_usrpri;
522 l2->l_inheritedprio = MAXPRI;
523 l2->l_mutex = &sched_mutex;
524 l2->l_cpu = l1->l_cpu;
525 l2->l_flag = inmem ? LW_INMEM : 0;
526 lwp_initspecific(l2);
527
528 if (p2->p_flag & PK_SYSTEM) {
529 /*
530 * Mark it as a system process and not a candidate for
531 * swapping.
532 */
533 l2->l_flag |= LW_SYSTEM;
534 }
535
536 lwp_update_creds(l2);
537 callout_init(&l2->l_tsleep_ch);
538 cv_init(&l2->l_sigcv, "sigwait");
539 l2->l_syncobj = &sched_syncobj;
540
541 if (rnewlwpp != NULL)
542 *rnewlwpp = l2;
543
544 l2->l_addr = UAREA_TO_USER(uaddr);
545 uvm_lwp_fork(l1, l2, stack, stacksize, func,
546 (arg != NULL) ? arg : l2);
547
548 mutex_enter(&p2->p_smutex);
549
550 if ((flags & LWP_DETACHED) != 0) {
551 l2->l_prflag = LPR_DETACHED;
552 p2->p_ndlwps++;
553 } else
554 l2->l_prflag = 0;
555
556 l2->l_sigmask = l1->l_sigmask;
557 CIRCLEQ_INIT(&l2->l_sigpend.sp_info);
558 sigemptyset(&l2->l_sigpend.sp_set);
559
560 p2->p_nlwpid++;
561 if (p2->p_nlwpid == 0)
562 p2->p_nlwpid++;
563 l2->l_lid = p2->p_nlwpid;
564 LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
565 p2->p_nlwps++;
566
567 mutex_exit(&p2->p_smutex);
568
569 mutex_enter(&proclist_mutex);
570 LIST_INSERT_HEAD(&alllwp, l2, l_list);
571 mutex_exit(&proclist_mutex);
572
573 SYSCALL_TIME_LWP_INIT(l2);
574
575 if (p2->p_emul->e_lwp_fork)
576 (*p2->p_emul->e_lwp_fork)(l1, l2);
577
578 return (0);
579 }
580
581 /*
582 * Quit the process. This will call cpu_exit, which will call cpu_switch,
583 * so this can only be used meaningfully if you're willing to switch away.
584 * Calling with l!=curlwp would be weird.
585 */
586 void
587 lwp_exit(struct lwp *l)
588 {
589 struct proc *p = l->l_proc;
590 struct lwp *l2;
591
592 DPRINTF(("lwp_exit: %d.%d exiting.\n", p->p_pid, l->l_lid));
593 DPRINTF((" nlwps: %d nzlwps: %d\n", p->p_nlwps, p->p_nzlwps));
594
595 /*
596 * Verify that we hold no locks other than the kernel lock.
597 */
598 #ifdef MULTIPROCESSOR
599 LOCKDEBUG_BARRIER(&kernel_lock, 0);
600 #else
601 LOCKDEBUG_BARRIER(NULL, 0);
602 #endif
603
604 /*
605 * If we are the last live LWP in a process, we need to exit the
606 * entire process. We do so with an exit status of zero, because
607 * it's a "controlled" exit, and because that's what Solaris does.
608 *
609 * We are not quite a zombie yet, but for accounting purposes we
610 * must increment the count of zombies here.
611 *
612 * Note: the last LWP's specificdata will be deleted here.
613 */
614 mutex_enter(&p->p_smutex);
615 if (p->p_nlwps - p->p_nzlwps == 1) {
616 DPRINTF(("lwp_exit: %d.%d calling exit1()\n",
617 p->p_pid, l->l_lid));
618 exit1(l, 0);
619 /* NOTREACHED */
620 }
621 p->p_nzlwps++;
622 mutex_exit(&p->p_smutex);
623
624 if (p->p_emul->e_lwp_exit)
625 (*p->p_emul->e_lwp_exit)(l);
626
627 /* Delete the specificdata while it's still safe to sleep. */
628 specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
629
630 /*
631 * Release our cached credentials.
632 */
633 kauth_cred_free(l->l_cred);
634
635 /*
636 * Remove the LWP from the global list.
637 */
638 mutex_enter(&proclist_mutex);
639 LIST_REMOVE(l, l_list);
640 mutex_exit(&proclist_mutex);
641
642 /*
643 * Get rid of all references to the LWP that others (e.g. procfs)
644 * may have, and mark the LWP as a zombie. If the LWP is detached,
645 * mark it waiting for collection in the proc structure. Note that
646 * before we can do that, we need to free any other dead, deatched
647 * LWP waiting to meet its maker.
648 *
649 * XXXSMP disable preemption.
650 */
651 mutex_enter(&p->p_smutex);
652 lwp_drainrefs(l);
653
654 if ((l->l_prflag & LPR_DETACHED) != 0) {
655 while ((l2 = p->p_zomblwp) != NULL) {
656 p->p_zomblwp = NULL;
657 lwp_free(l2, 0, 0); /* releases proc mutex */
658 mutex_enter(&p->p_smutex);
659 }
660 p->p_zomblwp = l;
661 }
662
663 /*
664 * If we find a pending signal for the process and we have been
665 * asked to check for signals, then we loose: arrange to have
666 * all other LWPs in the process check for signals.
667 */
668 if ((l->l_flag & LW_PENDSIG) != 0 &&
669 firstsig(&p->p_sigpend.sp_set) != 0) {
670 LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
671 lwp_lock(l2);
672 l2->l_flag |= LW_PENDSIG;
673 lwp_unlock(l2);
674 }
675 }
676
677 lwp_lock(l);
678 l->l_stat = LSZOMB;
679 lwp_unlock(l);
680 p->p_nrlwps--;
681 cv_broadcast(&p->p_lwpcv);
682 mutex_exit(&p->p_smutex);
683
684 /*
685 * We can no longer block. At this point, lwp_free() may already
686 * be gunning for us. On a multi-CPU system, we may be off p_lwps.
687 *
688 * Free MD LWP resources.
689 */
690 #ifndef __NO_CPU_LWP_FREE
691 cpu_lwp_free(l, 0);
692 #endif
693 pmap_deactivate(l);
694
695 /*
696 * Release the kernel lock, signal another LWP to collect us,
697 * and switch away into oblivion.
698 */
699 #ifdef notyet
700 /* XXXSMP hold in lwp_userret() */
701 KERNEL_UNLOCK_LAST(l);
702 #else
703 KERNEL_UNLOCK_ALL(l, NULL);
704 #endif
705
706 cpu_exit(l);
707 }
708
709 /*
710 * We are called from cpu_exit() once it is safe to schedule the dead LWP's
711 * resources to be freed (i.e., once we've switched to the idle PCB for the
712 * current CPU).
713 */
714 void
715 lwp_exit2(struct lwp *l)
716 {
717 /* XXXSMP re-enable preemption */
718 }
719
720 /*
721 * Free a dead LWP's remaining resources.
722 *
723 * XXXLWP limits.
724 */
725 void
726 lwp_free(struct lwp *l, int recycle, int last)
727 {
728 struct proc *p = l->l_proc;
729 ksiginfoq_t kq;
730
731 /*
732 * If this was not the last LWP in the process, then adjust
733 * counters and unlock.
734 */
735 if (!last) {
736 /*
737 * Add the LWP's run time to the process' base value.
738 * This needs to co-incide with coming off p_lwps.
739 */
740 timeradd(&l->l_rtime, &p->p_rtime, &p->p_rtime);
741 LIST_REMOVE(l, l_sibling);
742 p->p_nlwps--;
743 p->p_nzlwps--;
744 if ((l->l_prflag & LPR_DETACHED) != 0)
745 p->p_ndlwps--;
746 mutex_exit(&p->p_smutex);
747
748 #ifdef MULTIPROCESSOR
749 /*
750 * In the unlikely event that the LWP is still on the CPU,
751 * then spin until it has switched away. We need to release
752 * all locks to avoid deadlock against interrupt handlers on
753 * the target CPU.
754 */
755 if (l->l_cpu->ci_curlwp == l) {
756 int count;
757 KERNEL_UNLOCK_ALL(curlwp, &count);
758 while (l->l_cpu->ci_curlwp == l)
759 SPINLOCK_BACKOFF_HOOK;
760 KERNEL_LOCK(count, curlwp);
761 }
762 #endif
763 }
764
765 /*
766 * Destroy the LWP's remaining signal information.
767 */
768 ksiginfo_queue_init(&kq);
769 sigclear(&l->l_sigpend, NULL, &kq);
770 ksiginfo_queue_drain(&kq);
771 cv_destroy(&l->l_sigcv);
772
773 /*
774 * Free the LWP's turnstile and the LWP structure itself unless the
775 * caller wants to recycle them.
776 *
777 * We can't return turnstile0 to the pool (it didn't come from it),
778 * so if it comes up just drop it quietly and move on.
779 *
780 * We don't recycle the VM resources at this time.
781 */
782 KERNEL_LOCK(1, curlwp); /* XXXSMP */
783 if (!recycle && l->l_ts != &turnstile0)
784 pool_cache_put(&turnstile_cache, l->l_ts);
785 #ifndef __NO_CPU_LWP_FREE
786 cpu_lwp_free2(l);
787 #endif
788 uvm_lwp_exit(l);
789 KASSERT(SLIST_EMPTY(&l->l_pi_lenders));
790 KASSERT(l->l_inheritedprio == MAXPRI);
791 if (!recycle)
792 pool_put(&lwp_pool, l);
793 KERNEL_UNLOCK_ONE(curlwp); /* XXXSMP */
794 }
795
796 /*
797 * Pick a LWP to represent the process for those operations which
798 * want information about a "process" that is actually associated
799 * with a LWP.
800 *
801 * If 'locking' is false, no locking or lock checks are performed.
802 * This is intended for use by DDB.
803 *
804 * We don't bother locking the LWP here, since code that uses this
805 * interface is broken by design and an exact match is not required.
806 */
807 struct lwp *
808 proc_representative_lwp(struct proc *p, int *nrlwps, int locking)
809 {
810 struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
811 struct lwp *signalled;
812 int cnt;
813
814 if (locking) {
815 LOCK_ASSERT(mutex_owned(&p->p_smutex));
816 }
817
818 /* Trivial case: only one LWP */
819 if (p->p_nlwps == 1) {
820 l = LIST_FIRST(&p->p_lwps);
821 if (nrlwps)
822 *nrlwps = (l->l_stat == LSONPROC || LSRUN);
823 return l;
824 }
825
826 cnt = 0;
827 switch (p->p_stat) {
828 case SSTOP:
829 case SACTIVE:
830 /* Pick the most live LWP */
831 onproc = running = sleeping = stopped = suspended = NULL;
832 signalled = NULL;
833 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
834 if (l->l_lid == p->p_sigctx.ps_lwp)
835 signalled = l;
836 switch (l->l_stat) {
837 case LSONPROC:
838 onproc = l;
839 cnt++;
840 break;
841 case LSRUN:
842 running = l;
843 cnt++;
844 break;
845 case LSSLEEP:
846 sleeping = l;
847 break;
848 case LSSTOP:
849 stopped = l;
850 break;
851 case LSSUSPENDED:
852 suspended = l;
853 break;
854 }
855 }
856 if (nrlwps)
857 *nrlwps = cnt;
858 if (signalled)
859 l = signalled;
860 else if (onproc)
861 l = onproc;
862 else if (running)
863 l = running;
864 else if (sleeping)
865 l = sleeping;
866 else if (stopped)
867 l = stopped;
868 else if (suspended)
869 l = suspended;
870 else
871 break;
872 return l;
873 if (nrlwps)
874 *nrlwps = 0;
875 l = LIST_FIRST(&p->p_lwps);
876 return l;
877 #ifdef DIAGNOSTIC
878 case SIDL:
879 case SZOMB:
880 case SDYING:
881 case SDEAD:
882 if (locking)
883 mutex_exit(&p->p_smutex);
884 /* We have more than one LWP and we're in SIDL?
885 * How'd that happen?
886 */
887 panic("Too many LWPs in idle/dying process %d (%s) stat = %d",
888 p->p_pid, p->p_comm, p->p_stat);
889 break;
890 default:
891 if (locking)
892 mutex_exit(&p->p_smutex);
893 panic("Process %d (%s) in unknown state %d",
894 p->p_pid, p->p_comm, p->p_stat);
895 #endif
896 }
897
898 if (locking)
899 mutex_exit(&p->p_smutex);
900 panic("proc_representative_lwp: couldn't find a lwp for process"
901 " %d (%s)", p->p_pid, p->p_comm);
902 /* NOTREACHED */
903 return NULL;
904 }
905
906 /*
907 * Look up a live LWP within the speicifed process, and return it locked.
908 *
909 * Must be called with p->p_smutex held.
910 */
911 struct lwp *
912 lwp_find(struct proc *p, int id)
913 {
914 struct lwp *l;
915
916 LOCK_ASSERT(mutex_owned(&p->p_smutex));
917
918 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
919 if (l->l_lid == id)
920 break;
921 }
922
923 /*
924 * No need to lock - all of these conditions will
925 * be visible with the process level mutex held.
926 */
927 if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB))
928 l = NULL;
929
930 return l;
931 }
932
933 /*
934 * Update an LWP's cached credentials to mirror the process' master copy.
935 *
936 * This happens early in the syscall path, on user trap, and on LWP
937 * creation. A long-running LWP can also voluntarily choose to update
938 * it's credentials by calling this routine. This may be called from
939 * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
940 */
941 void
942 lwp_update_creds(struct lwp *l)
943 {
944 kauth_cred_t oc;
945 struct proc *p;
946
947 p = l->l_proc;
948 oc = l->l_cred;
949
950 mutex_enter(&p->p_mutex);
951 kauth_cred_hold(p->p_cred);
952 l->l_cred = p->p_cred;
953 mutex_exit(&p->p_mutex);
954 if (oc != NULL) {
955 KERNEL_LOCK(1, l); /* XXXSMP */
956 kauth_cred_free(oc);
957 KERNEL_UNLOCK_ONE(l); /* XXXSMP */
958 }
959 }
960
961 /*
962 * Verify that an LWP is locked, and optionally verify that the lock matches
963 * one we specify.
964 */
965 int
966 lwp_locked(struct lwp *l, kmutex_t *mtx)
967 {
968 kmutex_t *cur = l->l_mutex;
969
970 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
971 return mutex_owned(cur) && (mtx == cur || mtx == NULL);
972 #else
973 return mutex_owned(cur);
974 #endif
975 }
976
977 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
978 /*
979 * Lock an LWP.
980 */
981 void
982 lwp_lock_retry(struct lwp *l, kmutex_t *old)
983 {
984
985 /*
986 * XXXgcc ignoring kmutex_t * volatile on i386
987 *
988 * gcc version 4.1.2 20061021 prerelease (NetBSD nb1 20061021)
989 */
990 #if 1
991 while (l->l_mutex != old) {
992 #else
993 for (;;) {
994 #endif
995 mutex_spin_exit(old);
996 old = l->l_mutex;
997 mutex_spin_enter(old);
998
999 /*
1000 * mutex_enter() will have posted a read barrier. Re-test
1001 * l->l_mutex. If it has changed, we need to try again.
1002 */
1003 #if 1
1004 }
1005 #else
1006 } while (__predict_false(l->l_mutex != old));
1007 #endif
1008 }
1009 #endif
1010
1011 /*
1012 * Lend a new mutex to an LWP. The old mutex must be held.
1013 */
1014 void
1015 lwp_setlock(struct lwp *l, kmutex_t *new)
1016 {
1017
1018 LOCK_ASSERT(mutex_owned(l->l_mutex));
1019
1020 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
1021 mb_write();
1022 l->l_mutex = new;
1023 #else
1024 (void)new;
1025 #endif
1026 }
1027
1028 /*
1029 * Lend a new mutex to an LWP, and release the old mutex. The old mutex
1030 * must be held.
1031 */
1032 void
1033 lwp_unlock_to(struct lwp *l, kmutex_t *new)
1034 {
1035 kmutex_t *old;
1036
1037 LOCK_ASSERT(mutex_owned(l->l_mutex));
1038
1039 old = l->l_mutex;
1040 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
1041 mb_write();
1042 l->l_mutex = new;
1043 #else
1044 (void)new;
1045 #endif
1046 mutex_spin_exit(old);
1047 }
1048
1049 /*
1050 * Acquire a new mutex, and donate it to an LWP. The LWP must already be
1051 * locked.
1052 */
1053 void
1054 lwp_relock(struct lwp *l, kmutex_t *new)
1055 {
1056 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
1057 kmutex_t *old;
1058 #endif
1059
1060 LOCK_ASSERT(mutex_owned(l->l_mutex));
1061
1062 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
1063 old = l->l_mutex;
1064 if (old != new) {
1065 mutex_spin_enter(new);
1066 l->l_mutex = new;
1067 mutex_spin_exit(old);
1068 }
1069 #else
1070 (void)new;
1071 #endif
1072 }
1073
1074 int
1075 lwp_trylock(struct lwp *l)
1076 {
1077 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
1078 kmutex_t *old;
1079
1080 for (;;) {
1081 if (!mutex_tryenter(old = l->l_mutex))
1082 return 0;
1083 if (__predict_true(l->l_mutex == old))
1084 return 1;
1085 mutex_spin_exit(old);
1086 }
1087 #else
1088 return mutex_tryenter(l->l_mutex);
1089 #endif
1090 }
1091
1092 /*
1093 * Handle exceptions for mi_userret(). Called if a member of LW_USERRET is
1094 * set.
1095 */
1096 void
1097 lwp_userret(struct lwp *l)
1098 {
1099 struct proc *p;
1100 void (*hook)(void);
1101 int sig;
1102
1103 p = l->l_proc;
1104
1105 /*
1106 * It should be safe to do this read unlocked on a multiprocessor
1107 * system..
1108 */
1109 while ((l->l_flag & LW_USERRET) != 0) {
1110 /*
1111 * Process pending signals first, unless the process
1112 * is dumping core or exiting, where we will instead
1113 * enter the L_WSUSPEND case below.
1114 */
1115 if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) ==
1116 LW_PENDSIG) {
1117 KERNEL_LOCK(1, l); /* XXXSMP pool_put() below */
1118 mutex_enter(&p->p_smutex);
1119 while ((sig = issignal(l)) != 0)
1120 postsig(sig);
1121 mutex_exit(&p->p_smutex);
1122 KERNEL_UNLOCK_LAST(l); /* XXXSMP */
1123 }
1124
1125 /*
1126 * Core-dump or suspend pending.
1127 *
1128 * In case of core dump, suspend ourselves, so that the
1129 * kernel stack and therefore the userland registers saved
1130 * in the trapframe are around for coredump() to write them
1131 * out. We issue a wakeup on p->p_lwpcv so that sigexit()
1132 * will write the core file out once all other LWPs are
1133 * suspended.
1134 */
1135 if ((l->l_flag & LW_WSUSPEND) != 0) {
1136 mutex_enter(&p->p_smutex);
1137 p->p_nrlwps--;
1138 cv_broadcast(&p->p_lwpcv);
1139 lwp_lock(l);
1140 l->l_stat = LSSUSPENDED;
1141 mutex_exit(&p->p_smutex);
1142 mi_switch(l, NULL);
1143 }
1144
1145 /* Process is exiting. */
1146 if ((l->l_flag & LW_WEXIT) != 0) {
1147 KERNEL_LOCK(1, l);
1148 lwp_exit(l);
1149 KASSERT(0);
1150 /* NOTREACHED */
1151 }
1152
1153 /* Call userret hook; used by Linux emulation. */
1154 if ((l->l_flag & LW_WUSERRET) != 0) {
1155 lwp_lock(l);
1156 l->l_flag &= ~LW_WUSERRET;
1157 lwp_unlock(l);
1158 hook = p->p_userret;
1159 p->p_userret = NULL;
1160 (*hook)();
1161 }
1162 }
1163 }
1164
1165 /*
1166 * Force an LWP to enter the kernel, to take a trip through lwp_userret().
1167 */
1168 void
1169 lwp_need_userret(struct lwp *l)
1170 {
1171 LOCK_ASSERT(lwp_locked(l, NULL));
1172
1173 /*
1174 * Since the tests in lwp_userret() are done unlocked, make sure
1175 * that the condition will be seen before forcing the LWP to enter
1176 * kernel mode.
1177 */
1178 mb_write();
1179 cpu_signotify(l);
1180 }
1181
1182 /*
1183 * Add one reference to an LWP. This will prevent the LWP from
1184 * exiting, thus keep the lwp structure and PCB around to inspect.
1185 */
1186 void
1187 lwp_addref(struct lwp *l)
1188 {
1189
1190 LOCK_ASSERT(mutex_owned(&l->l_proc->p_smutex));
1191 KASSERT(l->l_stat != LSZOMB);
1192 KASSERT(l->l_refcnt != 0);
1193
1194 l->l_refcnt++;
1195 }
1196
1197 /*
1198 * Remove one reference to an LWP. If this is the last reference,
1199 * then we must finalize the LWP's death.
1200 */
1201 void
1202 lwp_delref(struct lwp *l)
1203 {
1204 struct proc *p = l->l_proc;
1205
1206 mutex_enter(&p->p_smutex);
1207 if (--l->l_refcnt == 0)
1208 cv_broadcast(&p->p_refcv);
1209 mutex_exit(&p->p_smutex);
1210 }
1211
1212 /*
1213 * Drain all references to the current LWP.
1214 */
1215 void
1216 lwp_drainrefs(struct lwp *l)
1217 {
1218 struct proc *p = l->l_proc;
1219
1220 LOCK_ASSERT(mutex_owned(&p->p_smutex));
1221 KASSERT(l->l_refcnt != 0);
1222
1223 l->l_refcnt--;
1224 while (l->l_refcnt != 0)
1225 cv_wait(&p->p_refcv, &p->p_smutex);
1226 }
1227
1228 /*
1229 * lwp_specific_key_create --
1230 * Create a key for subsystem lwp-specific data.
1231 */
1232 int
1233 lwp_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor)
1234 {
1235
1236 return (specificdata_key_create(lwp_specificdata_domain, keyp, dtor));
1237 }
1238
1239 /*
1240 * lwp_specific_key_delete --
1241 * Delete a key for subsystem lwp-specific data.
1242 */
1243 void
1244 lwp_specific_key_delete(specificdata_key_t key)
1245 {
1246
1247 specificdata_key_delete(lwp_specificdata_domain, key);
1248 }
1249
1250 /*
1251 * lwp_initspecific --
1252 * Initialize an LWP's specificdata container.
1253 */
1254 void
1255 lwp_initspecific(struct lwp *l)
1256 {
1257 int error;
1258
1259 error = specificdata_init(lwp_specificdata_domain, &l->l_specdataref);
1260 KASSERT(error == 0);
1261 }
1262
1263 /*
1264 * lwp_finispecific --
1265 * Finalize an LWP's specificdata container.
1266 */
1267 void
1268 lwp_finispecific(struct lwp *l)
1269 {
1270
1271 specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
1272 }
1273
1274 /*
1275 * lwp_getspecific --
1276 * Return lwp-specific data corresponding to the specified key.
1277 *
1278 * Note: LWP specific data is NOT INTERLOCKED. An LWP should access
1279 * only its OWN SPECIFIC DATA. If it is necessary to access another
1280 * LWP's specifc data, care must be taken to ensure that doing so
1281 * would not cause internal data structure inconsistency (i.e. caller
1282 * can guarantee that the target LWP is not inside an lwp_getspecific()
1283 * or lwp_setspecific() call).
1284 */
1285 void *
1286 lwp_getspecific(specificdata_key_t key)
1287 {
1288
1289 return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
1290 &curlwp->l_specdataref, key));
1291 }
1292
1293 void *
1294 _lwp_getspecific_by_lwp(struct lwp *l, specificdata_key_t key)
1295 {
1296
1297 return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
1298 &l->l_specdataref, key));
1299 }
1300
1301 /*
1302 * lwp_setspecific --
1303 * Set lwp-specific data corresponding to the specified key.
1304 */
1305 void
1306 lwp_setspecific(specificdata_key_t key, void *data)
1307 {
1308
1309 specificdata_setspecific(lwp_specificdata_domain,
1310 &curlwp->l_specdataref, key, data);
1311 }
1312