kern_lwp.c revision 1.65.2.2 1 /* $NetBSD: kern_lwp.c,v 1.65.2.2 2007/09/10 10:55:58 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Overview
41 *
42 * Lightweight processes (LWPs) are the basic unit or thread of
43 * execution within the kernel. The core state of an LWP is described
44 * by "struct lwp", also known as lwp_t.
45 *
46 * Each LWP is contained within a process (described by "struct proc"),
47 * Every process contains at least one LWP, but may contain more. The
48 * process describes attributes shared among all of its LWPs such as a
49 * private address space, global execution state (stopped, active,
50 * zombie, ...), signal disposition and so on. On a multiprocessor
51 * machine, multiple LWPs be executing concurrently in the kernel.
52 *
53 * Execution states
54 *
55 * At any given time, an LWP has overall state that is described by
56 * lwp::l_stat. The states are broken into two sets below. The first
57 * set is guaranteed to represent the absolute, current state of the
58 * LWP:
59 *
60 * LSONPROC
61 *
62 * On processor: the LWP is executing on a CPU, either in the
63 * kernel or in user space.
64 *
65 * LSRUN
66 *
67 * Runnable: the LWP is parked on a run queue, and may soon be
68 * chosen to run by a idle processor, or by a processor that
69 * has been asked to preempt a currently runnning but lower
70 * priority LWP. If the LWP is not swapped in (L_INMEM == 0)
71 * then the LWP is not on a run queue, but may be soon.
72 *
73 * LSIDL
74 *
75 * Idle: the LWP has been created but has not yet executed,
76 * or it has ceased executing a unit of work and is waiting
77 * to be started again.
78 *
79 * LSSUSPENDED:
80 *
81 * Suspended: the LWP has had its execution suspended by
82 * another LWP in the same process using the _lwp_suspend()
83 * system call. User-level LWPs also enter the suspended
84 * state when the system is shutting down.
85 *
86 * The second set represent a "statement of intent" on behalf of the
87 * LWP. The LWP may in fact be executing on a processor, may be
88 * sleeping or idle. It is expected to take the necessary action to
89 * stop executing or become "running" again within a short timeframe.
90 * The LW_RUNNING flag in lwp::l_flag indicates that an LWP is running.
91 * Importantly, in indicates that its state is tied to a CPU.
92 *
93 * LSZOMB:
94 *
95 * Dead or dying: the LWP has released most of its resources
96 * and is a) about to switch away into oblivion b) has already
97 * switched away. When it switches away, its few remaining
98 * resources can be collected.
99 *
100 * LSSLEEP:
101 *
102 * Sleeping: the LWP has entered itself onto a sleep queue, and
103 * has switched away or will switch away shortly to allow other
104 * LWPs to run on the CPU.
105 *
106 * LSSTOP:
107 *
108 * Stopped: the LWP has been stopped as a result of a job
109 * control signal, or as a result of the ptrace() interface.
110 *
111 * Stopped LWPs may run briefly within the kernel to handle
112 * signals that they receive, but will not return to user space
113 * until their process' state is changed away from stopped.
114 *
115 * Single LWPs within a process can not be set stopped
116 * selectively: all actions that can stop or continue LWPs
117 * occur at the process level.
118 *
119 * State transitions
120 *
121 * Note that the LSSTOP state may only be set when returning to
122 * user space in userret(), or when sleeping interruptably. The
123 * LSSUSPENDED state may only be set in userret(). Before setting
124 * those states, we try to ensure that the LWPs will release all
125 * locks that they hold, and at a minimum try to ensure that the
126 * LWP can be set runnable again by a signal.
127 *
128 * LWPs may transition states in the following ways:
129 *
130 * RUN -------> ONPROC ONPROC -----> RUN
131 * > STOPPED > SLEEP
132 * > SUSPENDED > STOPPED
133 * > SUSPENDED
134 * > ZOMB
135 *
136 * STOPPED ---> RUN SUSPENDED --> RUN
137 * > SLEEP > SLEEP
138 *
139 * SLEEP -----> ONPROC IDL --------> RUN
140 * > RUN > SUSPENDED
141 * > STOPPED > STOPPED
142 * > SUSPENDED
143 *
144 * Other state transitions are possible with kernel threads (eg
145 * ONPROC -> IDL), but only happen under tightly controlled
146 * circumstances the side effects are understood.
147 *
148 * Locking
149 *
150 * The majority of fields in 'struct lwp' are covered by a single,
151 * general spin lock pointed to by lwp::l_mutex. The locks covering
152 * each field are documented in sys/lwp.h.
153 *
154 * State transitions must be made with the LWP's general lock held,
155 * and may cause the LWP's lock pointer to change. Manipulation of
156 * the general lock is not performed directly, but through calls to
157 * lwp_lock(), lwp_relock() and similar.
158 *
159 * States and their associated locks:
160 *
161 * LSIDL, LSZOMB, LSONPROC:
162 *
163 * Always covered by spc_lwplock, which protects running LWPs.
164 * This is a per-CPU lock.
165 *
166 * LSRUN:
167 *
168 * Always covered by spc_mutex, which protects the run queues.
169 * This may be a per-CPU lock, depending on the scheduler.
170 *
171 * LSSLEEP:
172 *
173 * Covered by a lock associated with the sleep queue that the
174 * LWP resides on, indirectly referenced by l_sleepq->sq_mutex.
175 *
176 * LSSTOP, LSSUSPENDED:
177 *
178 * If the LWP was previously sleeping (l_wchan != NULL), then
179 * l_mutex references the sleep queue lock. If the LWP was
180 * runnable or on the CPU when halted, or has been removed from
181 * the sleep queue since halted, then the lock is spc_lwplock.
182 *
183 * The lock order is as follows:
184 *
185 * spc::spc_lwplock ->
186 * sleepq_t::sq_mutex ->
187 * tschain_t::tc_mutex ->
188 * spc::spc_mutex
189 *
190 * Each process has an scheduler state lock (proc::p_smutex), and a
191 * number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
192 * so on. When an LWP is to be entered into or removed from one of the
193 * following states, p_mutex must be held and the process wide counters
194 * adjusted:
195 *
196 * LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
197 *
198 * Note that an LWP is considered running or likely to run soon if in
199 * one of the following states. This affects the value of p_nrlwps:
200 *
201 * LSRUN, LSONPROC, LSSLEEP
202 *
203 * p_smutex does not need to be held when transitioning among these
204 * three states.
205 */
206
207 #include <sys/cdefs.h>
208 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.65.2.2 2007/09/10 10:55:58 skrll Exp $");
209
210 #include "opt_multiprocessor.h"
211 #include "opt_lockdebug.h"
212
213 #define _LWP_API_PRIVATE
214
215 #include <sys/param.h>
216 #include <sys/systm.h>
217 #include <sys/cpu.h>
218 #include <sys/pool.h>
219 #include <sys/proc.h>
220 #include <sys/syscallargs.h>
221 #include <sys/syscall_stats.h>
222 #include <sys/kauth.h>
223 #include <sys/sleepq.h>
224 #include <sys/lockdebug.h>
225 #include <sys/kmem.h>
226
227 #include <uvm/uvm_extern.h>
228
229 struct lwplist alllwp;
230
231 POOL_INIT(lwp_pool, sizeof(struct lwp), MIN_LWP_ALIGNMENT, 0, 0, "lwppl",
232 &pool_allocator_nointr, IPL_NONE);
233 POOL_INIT(lwp_uc_pool, sizeof(ucontext_t), 0, 0, 0, "lwpucpl",
234 &pool_allocator_nointr, IPL_NONE);
235
236 static specificdata_domain_t lwp_specificdata_domain;
237
238 #define LWP_DEBUG
239
240 #ifdef LWP_DEBUG
241 int lwp_debug = 0;
242 #define DPRINTF(x) if (lwp_debug) printf x
243 #else
244 #define DPRINTF(x)
245 #endif
246
247 void
248 lwpinit(void)
249 {
250
251 lwp_specificdata_domain = specificdata_domain_create();
252 KASSERT(lwp_specificdata_domain != NULL);
253 lwp_sys_init();
254 }
255
256 /*
257 * Set an suspended.
258 *
259 * Must be called with p_smutex held, and the LWP locked. Will unlock the
260 * LWP before return.
261 */
262 int
263 lwp_suspend(struct lwp *curl, struct lwp *t)
264 {
265 int error;
266
267 KASSERT(mutex_owned(&t->l_proc->p_smutex));
268 KASSERT(lwp_locked(t, NULL));
269
270 KASSERT(curl != t || curl->l_stat == LSONPROC);
271
272 /*
273 * If the current LWP has been told to exit, we must not suspend anyone
274 * else or deadlock could occur. We won't return to userspace.
275 */
276 if ((curl->l_stat & (LW_WEXIT | LW_WCORE)) != 0) {
277 lwp_unlock(t);
278 return (EDEADLK);
279 }
280
281 error = 0;
282
283 switch (t->l_stat) {
284 case LSRUN:
285 case LSONPROC:
286 t->l_flag |= LW_WSUSPEND;
287 lwp_need_userret(t);
288 lwp_unlock(t);
289 break;
290
291 case LSSLEEP:
292 t->l_flag |= LW_WSUSPEND;
293
294 /*
295 * Kick the LWP and try to get it to the kernel boundary
296 * so that it will release any locks that it holds.
297 * setrunnable() will release the lock.
298 */
299 if ((t->l_flag & LW_SINTR) != 0)
300 setrunnable(t);
301 else
302 lwp_unlock(t);
303 break;
304
305 case LSSUSPENDED:
306 lwp_unlock(t);
307 break;
308
309 case LSSTOP:
310 t->l_flag |= LW_WSUSPEND;
311 setrunnable(t);
312 break;
313
314 case LSIDL:
315 case LSZOMB:
316 error = EINTR; /* It's what Solaris does..... */
317 lwp_unlock(t);
318 break;
319 }
320
321 return (error);
322 }
323
324 /*
325 * Restart a suspended LWP.
326 *
327 * Must be called with p_smutex held, and the LWP locked. Will unlock the
328 * LWP before return.
329 */
330 void
331 lwp_continue(struct lwp *l)
332 {
333
334 KASSERT(mutex_owned(&l->l_proc->p_smutex));
335 KASSERT(lwp_locked(l, NULL));
336
337 DPRINTF(("lwp_continue of %d.%d (%s), state %d, wchan %p\n",
338 l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm, l->l_stat,
339 l->l_wchan));
340
341 /* If rebooting or not suspended, then just bail out. */
342 if ((l->l_flag & LW_WREBOOT) != 0) {
343 lwp_unlock(l);
344 return;
345 }
346
347 l->l_flag &= ~LW_WSUSPEND;
348
349 if (l->l_stat != LSSUSPENDED) {
350 lwp_unlock(l);
351 return;
352 }
353
354 /* setrunnable() will release the lock. */
355 setrunnable(l);
356 }
357
358 /*
359 * Wait for an LWP within the current process to exit. If 'lid' is
360 * non-zero, we are waiting for a specific LWP.
361 *
362 * Must be called with p->p_smutex held.
363 */
364 int
365 lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
366 {
367 struct proc *p = l->l_proc;
368 struct lwp *l2;
369 int nfound, error;
370 lwpid_t curlid;
371 bool exiting;
372
373 DPRINTF(("lwp_wait1: %d.%d waiting for %d.\n",
374 p->p_pid, l->l_lid, lid));
375
376 KASSERT(mutex_owned(&p->p_smutex));
377
378 p->p_nlwpwait++;
379 l->l_waitingfor = lid;
380 curlid = l->l_lid;
381 exiting = ((flags & LWPWAIT_EXITCONTROL) != 0);
382
383 for (;;) {
384 /*
385 * Avoid a race between exit1() and sigexit(): if the
386 * process is dumping core, then we need to bail out: call
387 * into lwp_userret() where we will be suspended until the
388 * deed is done.
389 */
390 if ((p->p_sflag & PS_WCORE) != 0) {
391 mutex_exit(&p->p_smutex);
392 lwp_userret(l);
393 #ifdef DIAGNOSTIC
394 panic("lwp_wait1");
395 #endif
396 /* NOTREACHED */
397 }
398
399 /*
400 * First off, drain any detached LWP that is waiting to be
401 * reaped.
402 */
403 while ((l2 = p->p_zomblwp) != NULL) {
404 p->p_zomblwp = NULL;
405 lwp_free(l2, false, false);/* releases proc mutex */
406 mutex_enter(&p->p_smutex);
407 }
408
409 /*
410 * Now look for an LWP to collect. If the whole process is
411 * exiting, count detached LWPs as eligible to be collected,
412 * but don't drain them here.
413 */
414 nfound = 0;
415 error = 0;
416 LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
417 /*
418 * If a specific wait and the target is waiting on
419 * us, then avoid deadlock. This also traps LWPs
420 * that try to wait on themselves.
421 *
422 * Note that this does not handle more complicated
423 * cycles, like: t1 -> t2 -> t3 -> t1. The process
424 * can still be killed so it is not a major problem.
425 */
426 if (l2->l_lid == lid && l2->l_waitingfor == curlid) {
427 error = EDEADLK;
428 break;
429 }
430 if (l2 == l)
431 continue;
432 if ((l2->l_prflag & LPR_DETACHED) != 0) {
433 nfound += exiting;
434 continue;
435 }
436 if (lid != 0) {
437 if (l2->l_lid != lid)
438 continue;
439 /*
440 * Mark this LWP as the first waiter, if there
441 * is no other.
442 */
443 if (l2->l_waiter == 0)
444 l2->l_waiter = curlid;
445 } else if (l2->l_waiter != 0) {
446 /*
447 * It already has a waiter - so don't
448 * collect it. If the waiter doesn't
449 * grab it we'll get another chance
450 * later.
451 */
452 nfound++;
453 continue;
454 }
455 nfound++;
456
457 /* No need to lock the LWP in order to see LSZOMB. */
458 if (l2->l_stat != LSZOMB)
459 continue;
460
461 /*
462 * We're no longer waiting. Reset the "first waiter"
463 * pointer on the target, in case it was us.
464 */
465 l->l_waitingfor = 0;
466 l2->l_waiter = 0;
467 p->p_nlwpwait--;
468 if (departed)
469 *departed = l2->l_lid;
470
471 /* lwp_free() releases the proc lock. */
472 lwp_free(l2, false, false);
473 mutex_enter(&p->p_smutex);
474 return 0;
475 }
476
477 if (error != 0)
478 break;
479 if (nfound == 0) {
480 error = ESRCH;
481 break;
482 }
483
484 /*
485 * The kernel is careful to ensure that it can not deadlock
486 * when exiting - just keep waiting.
487 */
488 if (exiting) {
489 KASSERT(p->p_nlwps > 1);
490 cv_wait(&p->p_lwpcv, &p->p_smutex);
491 continue;
492 }
493
494 /*
495 * If all other LWPs are waiting for exits or suspends
496 * and the supply of zombies and potential zombies is
497 * exhausted, then we are about to deadlock.
498 *
499 * If the process is exiting (and this LWP is not the one
500 * that is coordinating the exit) then bail out now.
501 */
502 if ((p->p_sflag & PS_WEXIT) != 0 ||
503 p->p_nrlwps + p->p_nzlwps - p->p_ndlwps <= p->p_nlwpwait) {
504 error = EDEADLK;
505 break;
506 }
507
508 /*
509 * Sit around and wait for something to happen. We'll be
510 * awoken if any of the conditions examined change: if an
511 * LWP exits, is collected, or is detached.
512 */
513 if ((error = cv_wait_sig(&p->p_lwpcv, &p->p_smutex)) != 0)
514 break;
515 }
516
517 /*
518 * We didn't find any LWPs to collect, we may have received a
519 * signal, or some other condition has caused us to bail out.
520 *
521 * If waiting on a specific LWP, clear the waiters marker: some
522 * other LWP may want it. Then, kick all the remaining waiters
523 * so that they can re-check for zombies and for deadlock.
524 */
525 if (lid != 0) {
526 LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
527 if (l2->l_lid == lid) {
528 if (l2->l_waiter == curlid)
529 l2->l_waiter = 0;
530 break;
531 }
532 }
533 }
534 p->p_nlwpwait--;
535 l->l_waitingfor = 0;
536 cv_broadcast(&p->p_lwpcv);
537
538 return error;
539 }
540
541 /*
542 * Create a new LWP within process 'p2', using LWP 'l1' as a template.
543 * The new LWP is created in state LSIDL and must be set running,
544 * suspended, or stopped by the caller.
545 */
546 int
547 newlwp(struct lwp *l1, struct proc *p2, vaddr_t uaddr, bool inmem,
548 int flags, void *stack, size_t stacksize,
549 void (*func)(void *), void *arg, struct lwp **rnewlwpp)
550 {
551 struct lwp *l2, *isfree;
552 turnstile_t *ts;
553
554 /*
555 * First off, reap any detached LWP waiting to be collected.
556 * We can re-use its LWP structure and turnstile.
557 */
558 isfree = NULL;
559 if (p2->p_zomblwp != NULL) {
560 mutex_enter(&p2->p_smutex);
561 if ((isfree = p2->p_zomblwp) != NULL) {
562 p2->p_zomblwp = NULL;
563 lwp_free(isfree, true, false);/* releases proc mutex */
564 } else
565 mutex_exit(&p2->p_smutex);
566 }
567 if (isfree == NULL) {
568 l2 = pool_get(&lwp_pool, PR_WAITOK);
569 memset(l2, 0, sizeof(*l2));
570 l2->l_ts = pool_cache_get(&turnstile_cache, PR_WAITOK);
571 SLIST_INIT(&l2->l_pi_lenders);
572 } else {
573 l2 = isfree;
574 ts = l2->l_ts;
575 KASSERT(l2->l_inheritedprio == MAXPRI);
576 KASSERT(SLIST_EMPTY(&l2->l_pi_lenders));
577 memset(l2, 0, sizeof(*l2));
578 l2->l_ts = ts;
579 }
580
581 l2->l_stat = LSIDL;
582 l2->l_proc = p2;
583 l2->l_refcnt = 1;
584 l2->l_priority = l1->l_priority;
585 l2->l_usrpri = l1->l_usrpri;
586 l2->l_inheritedprio = MAXPRI;
587 l2->l_mutex = l1->l_cpu->ci_schedstate.spc_mutex;
588 l2->l_cpu = l1->l_cpu;
589 l2->l_flag = inmem ? LW_INMEM : 0;
590 lwp_initspecific(l2);
591 sched_lwp_fork(l2);
592
593 if (p2->p_flag & PK_SYSTEM) {
594 /*
595 * Mark it as a system process and not a candidate for
596 * swapping.
597 */
598 l2->l_flag |= LW_SYSTEM;
599 }
600
601 lwp_update_creds(l2);
602 callout_init(&l2->l_timeout_ch, CALLOUT_MPSAFE);
603 callout_setfunc(&l2->l_timeout_ch, sleepq_timeout, l2);
604 mutex_init(&l2->l_swaplock, MUTEX_DEFAULT, IPL_NONE);
605 cv_init(&l2->l_sigcv, "sigwait");
606 l2->l_syncobj = &sched_syncobj;
607
608 if (rnewlwpp != NULL)
609 *rnewlwpp = l2;
610
611 l2->l_addr = UAREA_TO_USER(uaddr);
612 uvm_lwp_fork(l1, l2, stack, stacksize, func,
613 (arg != NULL) ? arg : l2);
614
615 mutex_enter(&p2->p_smutex);
616
617 if ((flags & LWP_DETACHED) != 0) {
618 l2->l_prflag = LPR_DETACHED;
619 p2->p_ndlwps++;
620 } else
621 l2->l_prflag = 0;
622
623 l2->l_sigmask = l1->l_sigmask;
624 CIRCLEQ_INIT(&l2->l_sigpend.sp_info);
625 sigemptyset(&l2->l_sigpend.sp_set);
626
627 p2->p_nlwpid++;
628 if (p2->p_nlwpid == 0)
629 p2->p_nlwpid++;
630 l2->l_lid = p2->p_nlwpid;
631 LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
632 p2->p_nlwps++;
633
634 mutex_exit(&p2->p_smutex);
635
636 mutex_enter(&proclist_lock);
637 mutex_enter(&proclist_mutex);
638 LIST_INSERT_HEAD(&alllwp, l2, l_list);
639 mutex_exit(&proclist_mutex);
640 mutex_exit(&proclist_lock);
641
642 SYSCALL_TIME_LWP_INIT(l2);
643
644 if (p2->p_emul->e_lwp_fork)
645 (*p2->p_emul->e_lwp_fork)(l1, l2);
646
647 return (0);
648 }
649
650 /*
651 * Called by MD code when a new LWP begins execution. Must be called
652 * with the previous LWP locked (so at splsched), or if there is no
653 * previous LWP, at splsched.
654 */
655 void
656 lwp_startup(struct lwp *prev, struct lwp *new)
657 {
658
659 curlwp = new;
660 if (prev != NULL) {
661 lwp_unlock(prev);
662 }
663 spl0();
664 pmap_activate(new);
665 LOCKDEBUG_BARRIER(NULL, 0);
666 if ((new->l_pflag & LP_MPSAFE) == 0) {
667 KERNEL_LOCK(1, new);
668 }
669 }
670
671 /*
672 * Exit an LWP.
673 */
674 void
675 lwp_exit(struct lwp *l)
676 {
677 struct proc *p = l->l_proc;
678 struct lwp *l2;
679 bool current;
680
681 current = (l == curlwp);
682
683 DPRINTF(("lwp_exit: %d.%d exiting.\n", p->p_pid, l->l_lid));
684 DPRINTF((" nlwps: %d nzlwps: %d\n", p->p_nlwps, p->p_nzlwps));
685 KASSERT(current || l->l_stat == LSIDL);
686
687 /*
688 * Verify that we hold no locks other than the kernel lock.
689 */
690 #ifdef MULTIPROCESSOR
691 LOCKDEBUG_BARRIER(&kernel_lock, 0);
692 #else
693 LOCKDEBUG_BARRIER(NULL, 0);
694 #endif
695
696 /*
697 * If we are the last live LWP in a process, we need to exit the
698 * entire process. We do so with an exit status of zero, because
699 * it's a "controlled" exit, and because that's what Solaris does.
700 *
701 * We are not quite a zombie yet, but for accounting purposes we
702 * must increment the count of zombies here.
703 *
704 * Note: the last LWP's specificdata will be deleted here.
705 */
706 mutex_enter(&p->p_smutex);
707 if (p->p_nlwps - p->p_nzlwps == 1) {
708 KASSERT(current == true);
709 DPRINTF(("lwp_exit: %d.%d calling exit1()\n",
710 p->p_pid, l->l_lid));
711 exit1(l, 0);
712 /* NOTREACHED */
713 }
714 p->p_nzlwps++;
715 mutex_exit(&p->p_smutex);
716
717 if (p->p_emul->e_lwp_exit)
718 (*p->p_emul->e_lwp_exit)(l);
719
720 /* Delete the specificdata while it's still safe to sleep. */
721 specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
722
723 /*
724 * Release our cached credentials.
725 */
726 kauth_cred_free(l->l_cred);
727 callout_destroy(&l->l_timeout_ch);
728
729 /*
730 * While we can still block, mark the LWP as unswappable to
731 * prevent conflicts with the with the swapper.
732 */
733 if (current)
734 uvm_lwp_hold(l);
735
736 /*
737 * Remove the LWP from the global list.
738 */
739 mutex_enter(&proclist_lock);
740 mutex_enter(&proclist_mutex);
741 LIST_REMOVE(l, l_list);
742 mutex_exit(&proclist_mutex);
743 mutex_exit(&proclist_lock);
744
745 /*
746 * Get rid of all references to the LWP that others (e.g. procfs)
747 * may have, and mark the LWP as a zombie. If the LWP is detached,
748 * mark it waiting for collection in the proc structure. Note that
749 * before we can do that, we need to free any other dead, deatched
750 * LWP waiting to meet its maker.
751 *
752 * XXXSMP disable preemption.
753 */
754 mutex_enter(&p->p_smutex);
755 lwp_drainrefs(l);
756
757 if ((l->l_prflag & LPR_DETACHED) != 0) {
758 while ((l2 = p->p_zomblwp) != NULL) {
759 p->p_zomblwp = NULL;
760 lwp_free(l2, false, false);/* releases proc mutex */
761 mutex_enter(&p->p_smutex);
762 }
763 p->p_zomblwp = l;
764 }
765
766 /*
767 * If we find a pending signal for the process and we have been
768 * asked to check for signals, then we loose: arrange to have
769 * all other LWPs in the process check for signals.
770 */
771 if ((l->l_flag & LW_PENDSIG) != 0 &&
772 firstsig(&p->p_sigpend.sp_set) != 0) {
773 LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
774 lwp_lock(l2);
775 l2->l_flag |= LW_PENDSIG;
776 lwp_unlock(l2);
777 }
778 }
779
780 lwp_lock(l);
781 l->l_stat = LSZOMB;
782 lwp_unlock(l);
783 p->p_nrlwps--;
784 cv_broadcast(&p->p_lwpcv);
785 mutex_exit(&p->p_smutex);
786
787 /*
788 * We can no longer block. At this point, lwp_free() may already
789 * be gunning for us. On a multi-CPU system, we may be off p_lwps.
790 *
791 * Free MD LWP resources.
792 */
793 #ifndef __NO_CPU_LWP_FREE
794 cpu_lwp_free(l, 0);
795 #endif
796
797 if (current) {
798 pmap_deactivate(l);
799
800 /*
801 * Release the kernel lock, and switch away into
802 * oblivion.
803 */
804 #ifdef notyet
805 /* XXXSMP hold in lwp_userret() */
806 KERNEL_UNLOCK_LAST(l);
807 #else
808 KERNEL_UNLOCK_ALL(l, NULL);
809 #endif
810 lwp_exit_switchaway(l);
811 }
812 }
813
814 void
815 lwp_exit_switchaway(struct lwp *l)
816 {
817 struct cpu_info *ci;
818 struct lwp *idlelwp;
819
820 /* Unlocked, but is for statistics only. */
821 uvmexp.swtch++;
822
823 (void)splsched();
824 l->l_flag &= ~LW_RUNNING;
825 ci = curcpu();
826 idlelwp = ci->ci_data.cpu_idlelwp;
827 idlelwp->l_stat = LSONPROC;
828 cpu_switchto(NULL, idlelwp);
829 }
830
831 /*
832 * Free a dead LWP's remaining resources.
833 *
834 * XXXLWP limits.
835 */
836 void
837 lwp_free(struct lwp *l, bool recycle, bool last)
838 {
839 struct proc *p = l->l_proc;
840 ksiginfoq_t kq;
841
842 /*
843 * If this was not the last LWP in the process, then adjust
844 * counters and unlock.
845 */
846 if (!last) {
847 /*
848 * Add the LWP's run time to the process' base value.
849 * This needs to co-incide with coming off p_lwps.
850 */
851 timeradd(&l->l_rtime, &p->p_rtime, &p->p_rtime);
852 p->p_pctcpu += l->l_pctcpu;
853 LIST_REMOVE(l, l_sibling);
854 p->p_nlwps--;
855 p->p_nzlwps--;
856 if ((l->l_prflag & LPR_DETACHED) != 0)
857 p->p_ndlwps--;
858
859 /*
860 * Have any LWPs sleeping in lwp_wait() recheck for
861 * deadlock.
862 */
863 cv_broadcast(&p->p_lwpcv);
864 mutex_exit(&p->p_smutex);
865 }
866
867 #ifdef MULTIPROCESSOR
868 /*
869 * In the unlikely event that the LWP is still on the CPU,
870 * then spin until it has switched away. We need to release
871 * all locks to avoid deadlock against interrupt handlers on
872 * the target CPU.
873 */
874 if ((l->l_flag & LW_RUNNING) != 0 || l->l_cpu->ci_curlwp == l) {
875 int count;
876 (void)count; /* XXXgcc */
877 KERNEL_UNLOCK_ALL(curlwp, &count);
878 while ((l->l_flag & LW_RUNNING) != 0 ||
879 l->l_cpu->ci_curlwp == l)
880 SPINLOCK_BACKOFF_HOOK;
881 KERNEL_LOCK(count, curlwp);
882 }
883 #endif
884
885 /*
886 * Destroy the LWP's remaining signal information.
887 */
888 ksiginfo_queue_init(&kq);
889 sigclear(&l->l_sigpend, NULL, &kq);
890 ksiginfo_queue_drain(&kq);
891 cv_destroy(&l->l_sigcv);
892 mutex_destroy(&l->l_swaplock);
893
894 /*
895 * Free the LWP's turnstile and the LWP structure itself unless the
896 * caller wants to recycle them. Also, free the scheduler specific data.
897 *
898 * We can't return turnstile0 to the pool (it didn't come from it),
899 * so if it comes up just drop it quietly and move on.
900 *
901 * We don't recycle the VM resources at this time.
902 */
903 KERNEL_LOCK(1, curlwp); /* XXXSMP */
904
905 sched_lwp_exit(l);
906
907 if (!recycle && l->l_ts != &turnstile0)
908 pool_cache_put(&turnstile_cache, l->l_ts);
909 #ifndef __NO_CPU_LWP_FREE
910 cpu_lwp_free2(l);
911 #endif
912 uvm_lwp_exit(l);
913 KASSERT(SLIST_EMPTY(&l->l_pi_lenders));
914 KASSERT(l->l_inheritedprio == MAXPRI);
915 if (!recycle)
916 pool_put(&lwp_pool, l);
917 KERNEL_UNLOCK_ONE(curlwp); /* XXXSMP */
918 }
919
920 /*
921 * Pick a LWP to represent the process for those operations which
922 * want information about a "process" that is actually associated
923 * with a LWP.
924 *
925 * If 'locking' is false, no locking or lock checks are performed.
926 * This is intended for use by DDB.
927 *
928 * We don't bother locking the LWP here, since code that uses this
929 * interface is broken by design and an exact match is not required.
930 */
931 struct lwp *
932 proc_representative_lwp(struct proc *p, int *nrlwps, int locking)
933 {
934 struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
935 struct lwp *signalled;
936 int cnt;
937
938 if (locking) {
939 KASSERT(mutex_owned(&p->p_smutex));
940 }
941
942 /* Trivial case: only one LWP */
943 if (p->p_nlwps == 1) {
944 l = LIST_FIRST(&p->p_lwps);
945 if (nrlwps)
946 *nrlwps = (l->l_stat == LSONPROC || l->l_stat == LSRUN);
947 return l;
948 }
949
950 cnt = 0;
951 switch (p->p_stat) {
952 case SSTOP:
953 case SACTIVE:
954 /* Pick the most live LWP */
955 onproc = running = sleeping = stopped = suspended = NULL;
956 signalled = NULL;
957 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
958 if ((l->l_flag & LW_IDLE) != 0) {
959 continue;
960 }
961 if (l->l_lid == p->p_sigctx.ps_lwp)
962 signalled = l;
963 switch (l->l_stat) {
964 case LSONPROC:
965 onproc = l;
966 cnt++;
967 break;
968 case LSRUN:
969 running = l;
970 cnt++;
971 break;
972 case LSSLEEP:
973 sleeping = l;
974 break;
975 case LSSTOP:
976 stopped = l;
977 break;
978 case LSSUSPENDED:
979 suspended = l;
980 break;
981 }
982 }
983 if (nrlwps)
984 *nrlwps = cnt;
985 if (signalled)
986 l = signalled;
987 else if (onproc)
988 l = onproc;
989 else if (running)
990 l = running;
991 else if (sleeping)
992 l = sleeping;
993 else if (stopped)
994 l = stopped;
995 else if (suspended)
996 l = suspended;
997 else
998 break;
999 return l;
1000 #ifdef DIAGNOSTIC
1001 case SIDL:
1002 case SZOMB:
1003 case SDYING:
1004 case SDEAD:
1005 if (locking)
1006 mutex_exit(&p->p_smutex);
1007 /* We have more than one LWP and we're in SIDL?
1008 * How'd that happen?
1009 */
1010 panic("Too many LWPs in idle/dying process %d (%s) stat = %d",
1011 p->p_pid, p->p_comm, p->p_stat);
1012 break;
1013 default:
1014 if (locking)
1015 mutex_exit(&p->p_smutex);
1016 panic("Process %d (%s) in unknown state %d",
1017 p->p_pid, p->p_comm, p->p_stat);
1018 #endif
1019 }
1020
1021 if (locking)
1022 mutex_exit(&p->p_smutex);
1023 panic("proc_representative_lwp: couldn't find a lwp for process"
1024 " %d (%s)", p->p_pid, p->p_comm);
1025 /* NOTREACHED */
1026 return NULL;
1027 }
1028
1029 /*
1030 * Look up a live LWP within the speicifed process, and return it locked.
1031 *
1032 * Must be called with p->p_smutex held.
1033 */
1034 struct lwp *
1035 lwp_find(struct proc *p, int id)
1036 {
1037 struct lwp *l;
1038
1039 KASSERT(mutex_owned(&p->p_smutex));
1040
1041 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1042 if (l->l_lid == id)
1043 break;
1044 }
1045
1046 /*
1047 * No need to lock - all of these conditions will
1048 * be visible with the process level mutex held.
1049 */
1050 if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB))
1051 l = NULL;
1052
1053 return l;
1054 }
1055
1056 /*
1057 * Update an LWP's cached credentials to mirror the process' master copy.
1058 *
1059 * This happens early in the syscall path, on user trap, and on LWP
1060 * creation. A long-running LWP can also voluntarily choose to update
1061 * it's credentials by calling this routine. This may be called from
1062 * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
1063 */
1064 void
1065 lwp_update_creds(struct lwp *l)
1066 {
1067 kauth_cred_t oc;
1068 struct proc *p;
1069
1070 p = l->l_proc;
1071 oc = l->l_cred;
1072
1073 mutex_enter(&p->p_mutex);
1074 kauth_cred_hold(p->p_cred);
1075 l->l_cred = p->p_cred;
1076 mutex_exit(&p->p_mutex);
1077 if (oc != NULL) {
1078 KERNEL_LOCK(1, l); /* XXXSMP */
1079 kauth_cred_free(oc);
1080 KERNEL_UNLOCK_ONE(l); /* XXXSMP */
1081 }
1082 }
1083
1084 /*
1085 * Verify that an LWP is locked, and optionally verify that the lock matches
1086 * one we specify.
1087 */
1088 int
1089 lwp_locked(struct lwp *l, kmutex_t *mtx)
1090 {
1091 kmutex_t *cur = l->l_mutex;
1092
1093 return mutex_owned(cur) && (mtx == cur || mtx == NULL);
1094 }
1095
1096 /*
1097 * Lock an LWP.
1098 */
1099 void
1100 lwp_lock_retry(struct lwp *l, kmutex_t *old)
1101 {
1102
1103 /*
1104 * XXXgcc ignoring kmutex_t * volatile on i386
1105 *
1106 * gcc version 4.1.2 20061021 prerelease (NetBSD nb1 20061021)
1107 */
1108 #if 1
1109 while (l->l_mutex != old) {
1110 #else
1111 for (;;) {
1112 #endif
1113 mutex_spin_exit(old);
1114 old = l->l_mutex;
1115 mutex_spin_enter(old);
1116
1117 /*
1118 * mutex_enter() will have posted a read barrier. Re-test
1119 * l->l_mutex. If it has changed, we need to try again.
1120 */
1121 #if 1
1122 }
1123 #else
1124 } while (__predict_false(l->l_mutex != old));
1125 #endif
1126 }
1127
1128 /*
1129 * Lend a new mutex to an LWP. The old mutex must be held.
1130 */
1131 void
1132 lwp_setlock(struct lwp *l, kmutex_t *new)
1133 {
1134
1135 KASSERT(mutex_owned(l->l_mutex));
1136
1137 mb_write();
1138 l->l_mutex = new;
1139 }
1140
1141 /*
1142 * Lend a new mutex to an LWP, and release the old mutex. The old mutex
1143 * must be held.
1144 */
1145 void
1146 lwp_unlock_to(struct lwp *l, kmutex_t *new)
1147 {
1148 kmutex_t *old;
1149
1150 KASSERT(mutex_owned(l->l_mutex));
1151
1152 old = l->l_mutex;
1153 mb_write();
1154 l->l_mutex = new;
1155 mutex_spin_exit(old);
1156 }
1157
1158 /*
1159 * Acquire a new mutex, and donate it to an LWP. The LWP must already be
1160 * locked.
1161 */
1162 void
1163 lwp_relock(struct lwp *l, kmutex_t *new)
1164 {
1165 kmutex_t *old;
1166
1167 KASSERT(mutex_owned(l->l_mutex));
1168
1169 old = l->l_mutex;
1170 if (old != new) {
1171 mutex_spin_enter(new);
1172 l->l_mutex = new;
1173 mutex_spin_exit(old);
1174 }
1175 }
1176
1177 int
1178 lwp_trylock(struct lwp *l)
1179 {
1180 kmutex_t *old;
1181
1182 for (;;) {
1183 if (!mutex_tryenter(old = l->l_mutex))
1184 return 0;
1185 if (__predict_true(l->l_mutex == old))
1186 return 1;
1187 mutex_spin_exit(old);
1188 }
1189 }
1190
1191 /*
1192 * Handle exceptions for mi_userret(). Called if a member of LW_USERRET is
1193 * set.
1194 */
1195 void
1196 lwp_userret(struct lwp *l)
1197 {
1198 struct proc *p;
1199 void (*hook)(void);
1200 int sig;
1201
1202 p = l->l_proc;
1203
1204 /*
1205 * It should be safe to do this read unlocked on a multiprocessor
1206 * system..
1207 */
1208 while ((l->l_flag & LW_USERRET) != 0) {
1209 /*
1210 * Process pending signals first, unless the process
1211 * is dumping core or exiting, where we will instead
1212 * enter the L_WSUSPEND case below.
1213 */
1214 if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) ==
1215 LW_PENDSIG) {
1216 KERNEL_LOCK(1, l); /* XXXSMP pool_put() below */
1217 mutex_enter(&p->p_smutex);
1218 while ((sig = issignal(l)) != 0)
1219 postsig(sig);
1220 mutex_exit(&p->p_smutex);
1221 KERNEL_UNLOCK_LAST(l); /* XXXSMP */
1222 }
1223
1224 /*
1225 * Core-dump or suspend pending.
1226 *
1227 * In case of core dump, suspend ourselves, so that the
1228 * kernel stack and therefore the userland registers saved
1229 * in the trapframe are around for coredump() to write them
1230 * out. We issue a wakeup on p->p_lwpcv so that sigexit()
1231 * will write the core file out once all other LWPs are
1232 * suspended.
1233 */
1234 if ((l->l_flag & LW_WSUSPEND) != 0) {
1235 mutex_enter(&p->p_smutex);
1236 p->p_nrlwps--;
1237 cv_broadcast(&p->p_lwpcv);
1238 lwp_lock(l);
1239 l->l_stat = LSSUSPENDED;
1240 mutex_exit(&p->p_smutex);
1241 mi_switch(l);
1242 }
1243
1244 /* Process is exiting. */
1245 if ((l->l_flag & LW_WEXIT) != 0) {
1246 KERNEL_LOCK(1, l);
1247 lwp_exit(l);
1248 KASSERT(0);
1249 /* NOTREACHED */
1250 }
1251
1252 /* Call userret hook; used by Linux emulation. */
1253 if ((l->l_flag & LW_WUSERRET) != 0) {
1254 lwp_lock(l);
1255 l->l_flag &= ~LW_WUSERRET;
1256 lwp_unlock(l);
1257 hook = p->p_userret;
1258 p->p_userret = NULL;
1259 (*hook)();
1260 }
1261 }
1262 }
1263
1264 /*
1265 * Force an LWP to enter the kernel, to take a trip through lwp_userret().
1266 */
1267 void
1268 lwp_need_userret(struct lwp *l)
1269 {
1270 KASSERT(lwp_locked(l, NULL));
1271
1272 /*
1273 * Since the tests in lwp_userret() are done unlocked, make sure
1274 * that the condition will be seen before forcing the LWP to enter
1275 * kernel mode.
1276 */
1277 mb_write();
1278 cpu_signotify(l);
1279 }
1280
1281 /*
1282 * Add one reference to an LWP. This will prevent the LWP from
1283 * exiting, thus keep the lwp structure and PCB around to inspect.
1284 */
1285 void
1286 lwp_addref(struct lwp *l)
1287 {
1288
1289 KASSERT(mutex_owned(&l->l_proc->p_smutex));
1290 KASSERT(l->l_stat != LSZOMB);
1291 KASSERT(l->l_refcnt != 0);
1292
1293 l->l_refcnt++;
1294 }
1295
1296 /*
1297 * Remove one reference to an LWP. If this is the last reference,
1298 * then we must finalize the LWP's death.
1299 */
1300 void
1301 lwp_delref(struct lwp *l)
1302 {
1303 struct proc *p = l->l_proc;
1304
1305 mutex_enter(&p->p_smutex);
1306 if (--l->l_refcnt == 0)
1307 cv_broadcast(&p->p_refcv);
1308 mutex_exit(&p->p_smutex);
1309 }
1310
1311 /*
1312 * Drain all references to the current LWP.
1313 */
1314 void
1315 lwp_drainrefs(struct lwp *l)
1316 {
1317 struct proc *p = l->l_proc;
1318
1319 KASSERT(mutex_owned(&p->p_smutex));
1320 KASSERT(l->l_refcnt != 0);
1321
1322 l->l_refcnt--;
1323 while (l->l_refcnt != 0)
1324 cv_wait(&p->p_refcv, &p->p_smutex);
1325 }
1326
1327 /*
1328 * lwp_specific_key_create --
1329 * Create a key for subsystem lwp-specific data.
1330 */
1331 int
1332 lwp_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor)
1333 {
1334
1335 return (specificdata_key_create(lwp_specificdata_domain, keyp, dtor));
1336 }
1337
1338 /*
1339 * lwp_specific_key_delete --
1340 * Delete a key for subsystem lwp-specific data.
1341 */
1342 void
1343 lwp_specific_key_delete(specificdata_key_t key)
1344 {
1345
1346 specificdata_key_delete(lwp_specificdata_domain, key);
1347 }
1348
1349 /*
1350 * lwp_initspecific --
1351 * Initialize an LWP's specificdata container.
1352 */
1353 void
1354 lwp_initspecific(struct lwp *l)
1355 {
1356 int error;
1357
1358 error = specificdata_init(lwp_specificdata_domain, &l->l_specdataref);
1359 KASSERT(error == 0);
1360 }
1361
1362 /*
1363 * lwp_finispecific --
1364 * Finalize an LWP's specificdata container.
1365 */
1366 void
1367 lwp_finispecific(struct lwp *l)
1368 {
1369
1370 specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
1371 }
1372
1373 /*
1374 * lwp_getspecific --
1375 * Return lwp-specific data corresponding to the specified key.
1376 *
1377 * Note: LWP specific data is NOT INTERLOCKED. An LWP should access
1378 * only its OWN SPECIFIC DATA. If it is necessary to access another
1379 * LWP's specifc data, care must be taken to ensure that doing so
1380 * would not cause internal data structure inconsistency (i.e. caller
1381 * can guarantee that the target LWP is not inside an lwp_getspecific()
1382 * or lwp_setspecific() call).
1383 */
1384 void *
1385 lwp_getspecific(specificdata_key_t key)
1386 {
1387
1388 return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
1389 &curlwp->l_specdataref, key));
1390 }
1391
1392 void *
1393 _lwp_getspecific_by_lwp(struct lwp *l, specificdata_key_t key)
1394 {
1395
1396 return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
1397 &l->l_specdataref, key));
1398 }
1399
1400 /*
1401 * lwp_setspecific --
1402 * Set lwp-specific data corresponding to the specified key.
1403 */
1404 void
1405 lwp_setspecific(specificdata_key_t key, void *data)
1406 {
1407
1408 specificdata_setspecific(lwp_specificdata_domain,
1409 &curlwp->l_specdataref, key, data);
1410 }
1411