kern_lwp.c revision 1.70 1 1.70 ad /* $NetBSD: kern_lwp.c,v 1.70 2007/09/06 23:58:56 ad Exp $ */
2 1.2 thorpej
3 1.2 thorpej /*-
4 1.52 ad * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
5 1.2 thorpej * All rights reserved.
6 1.2 thorpej *
7 1.2 thorpej * This code is derived from software contributed to The NetBSD Foundation
8 1.52 ad * by Nathan J. Williams, and Andrew Doran.
9 1.2 thorpej *
10 1.2 thorpej * Redistribution and use in source and binary forms, with or without
11 1.2 thorpej * modification, are permitted provided that the following conditions
12 1.2 thorpej * are met:
13 1.2 thorpej * 1. Redistributions of source code must retain the above copyright
14 1.2 thorpej * notice, this list of conditions and the following disclaimer.
15 1.2 thorpej * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 thorpej * notice, this list of conditions and the following disclaimer in the
17 1.2 thorpej * documentation and/or other materials provided with the distribution.
18 1.2 thorpej * 3. All advertising materials mentioning features or use of this software
19 1.2 thorpej * must display the following acknowledgement:
20 1.2 thorpej * This product includes software developed by the NetBSD
21 1.2 thorpej * Foundation, Inc. and its contributors.
22 1.2 thorpej * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.2 thorpej * contributors may be used to endorse or promote products derived
24 1.2 thorpej * from this software without specific prior written permission.
25 1.2 thorpej *
26 1.2 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.2 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.2 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.2 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.2 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.2 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.2 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.2 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.2 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.2 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.2 thorpej * POSSIBILITY OF SUCH DAMAGE.
37 1.2 thorpej */
38 1.9 lukem
39 1.52 ad /*
40 1.52 ad * Overview
41 1.52 ad *
42 1.66 ad * Lightweight processes (LWPs) are the basic unit or thread of
43 1.52 ad * execution within the kernel. The core state of an LWP is described
44 1.66 ad * by "struct lwp", also known as lwp_t.
45 1.52 ad *
46 1.52 ad * Each LWP is contained within a process (described by "struct proc"),
47 1.52 ad * Every process contains at least one LWP, but may contain more. The
48 1.52 ad * process describes attributes shared among all of its LWPs such as a
49 1.52 ad * private address space, global execution state (stopped, active,
50 1.52 ad * zombie, ...), signal disposition and so on. On a multiprocessor
51 1.66 ad * machine, multiple LWPs be executing concurrently in the kernel.
52 1.52 ad *
53 1.52 ad * Execution states
54 1.52 ad *
55 1.52 ad * At any given time, an LWP has overall state that is described by
56 1.52 ad * lwp::l_stat. The states are broken into two sets below. The first
57 1.52 ad * set is guaranteed to represent the absolute, current state of the
58 1.52 ad * LWP:
59 1.52 ad *
60 1.52 ad * LSONPROC
61 1.52 ad *
62 1.52 ad * On processor: the LWP is executing on a CPU, either in the
63 1.52 ad * kernel or in user space.
64 1.52 ad *
65 1.52 ad * LSRUN
66 1.52 ad *
67 1.52 ad * Runnable: the LWP is parked on a run queue, and may soon be
68 1.52 ad * chosen to run by a idle processor, or by a processor that
69 1.52 ad * has been asked to preempt a currently runnning but lower
70 1.52 ad * priority LWP. If the LWP is not swapped in (L_INMEM == 0)
71 1.52 ad * then the LWP is not on a run queue, but may be soon.
72 1.52 ad *
73 1.52 ad * LSIDL
74 1.52 ad *
75 1.66 ad * Idle: the LWP has been created but has not yet executed,
76 1.66 ad * or it has ceased executing a unit of work and is waiting
77 1.66 ad * to be started again.
78 1.52 ad *
79 1.52 ad * LSSUSPENDED:
80 1.52 ad *
81 1.52 ad * Suspended: the LWP has had its execution suspended by
82 1.52 ad * another LWP in the same process using the _lwp_suspend()
83 1.52 ad * system call. User-level LWPs also enter the suspended
84 1.52 ad * state when the system is shutting down.
85 1.52 ad *
86 1.52 ad * The second set represent a "statement of intent" on behalf of the
87 1.52 ad * LWP. The LWP may in fact be executing on a processor, may be
88 1.66 ad * sleeping or idle. It is expected to take the necessary action to
89 1.66 ad * stop executing or become "running" again within a short timeframe.
90 1.66 ad * The LW_RUNNING flag in lwp::l_flag indicates that an LWP is running.
91 1.66 ad * Importantly, in indicates that its state is tied to a CPU.
92 1.52 ad *
93 1.52 ad * LSZOMB:
94 1.52 ad *
95 1.66 ad * Dead or dying: the LWP has released most of its resources
96 1.66 ad * and is a) about to switch away into oblivion b) has already
97 1.66 ad * switched away. When it switches away, its few remaining
98 1.66 ad * resources can be collected.
99 1.52 ad *
100 1.52 ad * LSSLEEP:
101 1.52 ad *
102 1.52 ad * Sleeping: the LWP has entered itself onto a sleep queue, and
103 1.66 ad * has switched away or will switch away shortly to allow other
104 1.66 ad * LWPs to run on the CPU.
105 1.52 ad *
106 1.52 ad * LSSTOP:
107 1.52 ad *
108 1.52 ad * Stopped: the LWP has been stopped as a result of a job
109 1.52 ad * control signal, or as a result of the ptrace() interface.
110 1.66 ad *
111 1.52 ad * Stopped LWPs may run briefly within the kernel to handle
112 1.52 ad * signals that they receive, but will not return to user space
113 1.52 ad * until their process' state is changed away from stopped.
114 1.66 ad *
115 1.52 ad * Single LWPs within a process can not be set stopped
116 1.52 ad * selectively: all actions that can stop or continue LWPs
117 1.52 ad * occur at the process level.
118 1.52 ad *
119 1.52 ad * State transitions
120 1.52 ad *
121 1.66 ad * Note that the LSSTOP state may only be set when returning to
122 1.66 ad * user space in userret(), or when sleeping interruptably. The
123 1.66 ad * LSSUSPENDED state may only be set in userret(). Before setting
124 1.66 ad * those states, we try to ensure that the LWPs will release all
125 1.66 ad * locks that they hold, and at a minimum try to ensure that the
126 1.66 ad * LWP can be set runnable again by a signal.
127 1.52 ad *
128 1.52 ad * LWPs may transition states in the following ways:
129 1.52 ad *
130 1.52 ad * RUN -------> ONPROC ONPROC -----> RUN
131 1.52 ad * > STOPPED > SLEEP
132 1.52 ad * > SUSPENDED > STOPPED
133 1.52 ad * > SUSPENDED
134 1.52 ad * > ZOMB
135 1.52 ad *
136 1.52 ad * STOPPED ---> RUN SUSPENDED --> RUN
137 1.52 ad * > SLEEP > SLEEP
138 1.52 ad *
139 1.52 ad * SLEEP -----> ONPROC IDL --------> RUN
140 1.52 ad * > RUN > SUSPENDED
141 1.52 ad * > STOPPED > STOPPED
142 1.52 ad * > SUSPENDED
143 1.52 ad *
144 1.66 ad * Other state transitions are possible with kernel threads (eg
145 1.66 ad * ONPROC -> IDL), but only happen under tightly controlled
146 1.66 ad * circumstances the side effects are understood.
147 1.66 ad *
148 1.52 ad * Locking
149 1.52 ad *
150 1.52 ad * The majority of fields in 'struct lwp' are covered by a single,
151 1.66 ad * general spin lock pointed to by lwp::l_mutex. The locks covering
152 1.52 ad * each field are documented in sys/lwp.h.
153 1.52 ad *
154 1.66 ad * State transitions must be made with the LWP's general lock held,
155 1.66 ad * and may cause the LWP's lock pointer to change. Manipulation of
156 1.66 ad * the general lock is not performed directly, but through calls to
157 1.66 ad * lwp_lock(), lwp_relock() and similar.
158 1.52 ad *
159 1.52 ad * States and their associated locks:
160 1.52 ad *
161 1.64 yamt * LSIDL, LSZOMB, LSONPROC:
162 1.52 ad *
163 1.64 yamt * Always covered by spc_lwplock, which protects running LWPs.
164 1.64 yamt * This is a per-CPU lock.
165 1.52 ad *
166 1.64 yamt * LSRUN:
167 1.52 ad *
168 1.64 yamt * Always covered by spc_mutex, which protects the run queues.
169 1.64 yamt * This may be a per-CPU lock, depending on the scheduler.
170 1.52 ad *
171 1.52 ad * LSSLEEP:
172 1.52 ad *
173 1.66 ad * Covered by a lock associated with the sleep queue that the
174 1.52 ad * LWP resides on, indirectly referenced by l_sleepq->sq_mutex.
175 1.52 ad *
176 1.52 ad * LSSTOP, LSSUSPENDED:
177 1.52 ad *
178 1.52 ad * If the LWP was previously sleeping (l_wchan != NULL), then
179 1.66 ad * l_mutex references the sleep queue lock. If the LWP was
180 1.52 ad * runnable or on the CPU when halted, or has been removed from
181 1.66 ad * the sleep queue since halted, then the lock is spc_lwplock.
182 1.52 ad *
183 1.52 ad * The lock order is as follows:
184 1.52 ad *
185 1.64 yamt * spc::spc_lwplock ->
186 1.64 yamt * sleepq_t::sq_mutex ->
187 1.64 yamt * tschain_t::tc_mutex ->
188 1.64 yamt * spc::spc_mutex
189 1.52 ad *
190 1.66 ad * Each process has an scheduler state lock (proc::p_smutex), and a
191 1.52 ad * number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
192 1.52 ad * so on. When an LWP is to be entered into or removed from one of the
193 1.52 ad * following states, p_mutex must be held and the process wide counters
194 1.52 ad * adjusted:
195 1.52 ad *
196 1.52 ad * LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
197 1.52 ad *
198 1.52 ad * Note that an LWP is considered running or likely to run soon if in
199 1.52 ad * one of the following states. This affects the value of p_nrlwps:
200 1.52 ad *
201 1.52 ad * LSRUN, LSONPROC, LSSLEEP
202 1.52 ad *
203 1.52 ad * p_smutex does not need to be held when transitioning among these
204 1.52 ad * three states.
205 1.52 ad */
206 1.52 ad
207 1.9 lukem #include <sys/cdefs.h>
208 1.70 ad __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.70 2007/09/06 23:58:56 ad Exp $");
209 1.8 martin
210 1.8 martin #include "opt_multiprocessor.h"
211 1.52 ad #include "opt_lockdebug.h"
212 1.2 thorpej
213 1.47 hannken #define _LWP_API_PRIVATE
214 1.47 hannken
215 1.2 thorpej #include <sys/param.h>
216 1.2 thorpej #include <sys/systm.h>
217 1.64 yamt #include <sys/cpu.h>
218 1.2 thorpej #include <sys/pool.h>
219 1.2 thorpej #include <sys/proc.h>
220 1.2 thorpej #include <sys/syscallargs.h>
221 1.57 dsl #include <sys/syscall_stats.h>
222 1.37 ad #include <sys/kauth.h>
223 1.52 ad #include <sys/sleepq.h>
224 1.52 ad #include <sys/lockdebug.h>
225 1.52 ad #include <sys/kmem.h>
226 1.2 thorpej
227 1.2 thorpej #include <uvm/uvm_extern.h>
228 1.2 thorpej
229 1.52 ad struct lwplist alllwp;
230 1.52 ad
231 1.52 ad POOL_INIT(lwp_pool, sizeof(struct lwp), MIN_LWP_ALIGNMENT, 0, 0, "lwppl",
232 1.62 ad &pool_allocator_nointr, IPL_NONE);
233 1.41 thorpej POOL_INIT(lwp_uc_pool, sizeof(ucontext_t), 0, 0, 0, "lwpucpl",
234 1.62 ad &pool_allocator_nointr, IPL_NONE);
235 1.41 thorpej
236 1.41 thorpej static specificdata_domain_t lwp_specificdata_domain;
237 1.41 thorpej
238 1.2 thorpej #define LWP_DEBUG
239 1.2 thorpej
240 1.2 thorpej #ifdef LWP_DEBUG
241 1.2 thorpej int lwp_debug = 0;
242 1.2 thorpej #define DPRINTF(x) if (lwp_debug) printf x
243 1.2 thorpej #else
244 1.2 thorpej #define DPRINTF(x)
245 1.2 thorpej #endif
246 1.41 thorpej
247 1.41 thorpej void
248 1.41 thorpej lwpinit(void)
249 1.41 thorpej {
250 1.41 thorpej
251 1.41 thorpej lwp_specificdata_domain = specificdata_domain_create();
252 1.41 thorpej KASSERT(lwp_specificdata_domain != NULL);
253 1.52 ad lwp_sys_init();
254 1.41 thorpej }
255 1.41 thorpej
256 1.52 ad /*
257 1.52 ad * Set an suspended.
258 1.52 ad *
259 1.52 ad * Must be called with p_smutex held, and the LWP locked. Will unlock the
260 1.52 ad * LWP before return.
261 1.52 ad */
262 1.2 thorpej int
263 1.52 ad lwp_suspend(struct lwp *curl, struct lwp *t)
264 1.2 thorpej {
265 1.52 ad int error;
266 1.2 thorpej
267 1.63 ad KASSERT(mutex_owned(&t->l_proc->p_smutex));
268 1.63 ad KASSERT(lwp_locked(t, NULL));
269 1.33 chs
270 1.52 ad KASSERT(curl != t || curl->l_stat == LSONPROC);
271 1.2 thorpej
272 1.52 ad /*
273 1.52 ad * If the current LWP has been told to exit, we must not suspend anyone
274 1.52 ad * else or deadlock could occur. We won't return to userspace.
275 1.2 thorpej */
276 1.56 pavel if ((curl->l_stat & (LW_WEXIT | LW_WCORE)) != 0) {
277 1.52 ad lwp_unlock(t);
278 1.52 ad return (EDEADLK);
279 1.2 thorpej }
280 1.2 thorpej
281 1.52 ad error = 0;
282 1.2 thorpej
283 1.52 ad switch (t->l_stat) {
284 1.52 ad case LSRUN:
285 1.52 ad case LSONPROC:
286 1.56 pavel t->l_flag |= LW_WSUSPEND;
287 1.52 ad lwp_need_userret(t);
288 1.52 ad lwp_unlock(t);
289 1.52 ad break;
290 1.2 thorpej
291 1.52 ad case LSSLEEP:
292 1.56 pavel t->l_flag |= LW_WSUSPEND;
293 1.2 thorpej
294 1.2 thorpej /*
295 1.52 ad * Kick the LWP and try to get it to the kernel boundary
296 1.52 ad * so that it will release any locks that it holds.
297 1.52 ad * setrunnable() will release the lock.
298 1.2 thorpej */
299 1.56 pavel if ((t->l_flag & LW_SINTR) != 0)
300 1.52 ad setrunnable(t);
301 1.52 ad else
302 1.52 ad lwp_unlock(t);
303 1.52 ad break;
304 1.2 thorpej
305 1.52 ad case LSSUSPENDED:
306 1.52 ad lwp_unlock(t);
307 1.52 ad break;
308 1.17 manu
309 1.52 ad case LSSTOP:
310 1.56 pavel t->l_flag |= LW_WSUSPEND;
311 1.52 ad setrunnable(t);
312 1.52 ad break;
313 1.2 thorpej
314 1.52 ad case LSIDL:
315 1.52 ad case LSZOMB:
316 1.52 ad error = EINTR; /* It's what Solaris does..... */
317 1.52 ad lwp_unlock(t);
318 1.52 ad break;
319 1.2 thorpej }
320 1.2 thorpej
321 1.69 rmind return (error);
322 1.2 thorpej }
323 1.2 thorpej
324 1.52 ad /*
325 1.52 ad * Restart a suspended LWP.
326 1.52 ad *
327 1.52 ad * Must be called with p_smutex held, and the LWP locked. Will unlock the
328 1.52 ad * LWP before return.
329 1.52 ad */
330 1.2 thorpej void
331 1.2 thorpej lwp_continue(struct lwp *l)
332 1.2 thorpej {
333 1.2 thorpej
334 1.63 ad KASSERT(mutex_owned(&l->l_proc->p_smutex));
335 1.63 ad KASSERT(lwp_locked(l, NULL));
336 1.52 ad
337 1.2 thorpej DPRINTF(("lwp_continue of %d.%d (%s), state %d, wchan %p\n",
338 1.2 thorpej l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm, l->l_stat,
339 1.2 thorpej l->l_wchan));
340 1.2 thorpej
341 1.52 ad /* If rebooting or not suspended, then just bail out. */
342 1.56 pavel if ((l->l_flag & LW_WREBOOT) != 0) {
343 1.52 ad lwp_unlock(l);
344 1.2 thorpej return;
345 1.10 fvdl }
346 1.2 thorpej
347 1.56 pavel l->l_flag &= ~LW_WSUSPEND;
348 1.2 thorpej
349 1.52 ad if (l->l_stat != LSSUSPENDED) {
350 1.52 ad lwp_unlock(l);
351 1.52 ad return;
352 1.2 thorpej }
353 1.2 thorpej
354 1.52 ad /* setrunnable() will release the lock. */
355 1.52 ad setrunnable(l);
356 1.2 thorpej }
357 1.2 thorpej
358 1.52 ad /*
359 1.52 ad * Wait for an LWP within the current process to exit. If 'lid' is
360 1.52 ad * non-zero, we are waiting for a specific LWP.
361 1.52 ad *
362 1.52 ad * Must be called with p->p_smutex held.
363 1.52 ad */
364 1.2 thorpej int
365 1.2 thorpej lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
366 1.2 thorpej {
367 1.2 thorpej struct proc *p = l->l_proc;
368 1.52 ad struct lwp *l2;
369 1.52 ad int nfound, error;
370 1.63 ad lwpid_t curlid;
371 1.63 ad bool exiting;
372 1.2 thorpej
373 1.2 thorpej DPRINTF(("lwp_wait1: %d.%d waiting for %d.\n",
374 1.2 thorpej p->p_pid, l->l_lid, lid));
375 1.2 thorpej
376 1.63 ad KASSERT(mutex_owned(&p->p_smutex));
377 1.52 ad
378 1.52 ad p->p_nlwpwait++;
379 1.63 ad l->l_waitingfor = lid;
380 1.63 ad curlid = l->l_lid;
381 1.63 ad exiting = ((flags & LWPWAIT_EXITCONTROL) != 0);
382 1.52 ad
383 1.52 ad for (;;) {
384 1.52 ad /*
385 1.52 ad * Avoid a race between exit1() and sigexit(): if the
386 1.52 ad * process is dumping core, then we need to bail out: call
387 1.52 ad * into lwp_userret() where we will be suspended until the
388 1.52 ad * deed is done.
389 1.52 ad */
390 1.52 ad if ((p->p_sflag & PS_WCORE) != 0) {
391 1.52 ad mutex_exit(&p->p_smutex);
392 1.52 ad lwp_userret(l);
393 1.52 ad #ifdef DIAGNOSTIC
394 1.52 ad panic("lwp_wait1");
395 1.52 ad #endif
396 1.52 ad /* NOTREACHED */
397 1.52 ad }
398 1.52 ad
399 1.52 ad /*
400 1.52 ad * First off, drain any detached LWP that is waiting to be
401 1.52 ad * reaped.
402 1.52 ad */
403 1.52 ad while ((l2 = p->p_zomblwp) != NULL) {
404 1.52 ad p->p_zomblwp = NULL;
405 1.63 ad lwp_free(l2, false, false);/* releases proc mutex */
406 1.52 ad mutex_enter(&p->p_smutex);
407 1.52 ad }
408 1.52 ad
409 1.52 ad /*
410 1.52 ad * Now look for an LWP to collect. If the whole process is
411 1.52 ad * exiting, count detached LWPs as eligible to be collected,
412 1.52 ad * but don't drain them here.
413 1.52 ad */
414 1.52 ad nfound = 0;
415 1.63 ad error = 0;
416 1.52 ad LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
417 1.63 ad /*
418 1.63 ad * If a specific wait and the target is waiting on
419 1.63 ad * us, then avoid deadlock. This also traps LWPs
420 1.63 ad * that try to wait on themselves.
421 1.63 ad *
422 1.63 ad * Note that this does not handle more complicated
423 1.63 ad * cycles, like: t1 -> t2 -> t3 -> t1. The process
424 1.63 ad * can still be killed so it is not a major problem.
425 1.63 ad */
426 1.63 ad if (l2->l_lid == lid && l2->l_waitingfor == curlid) {
427 1.63 ad error = EDEADLK;
428 1.63 ad break;
429 1.63 ad }
430 1.63 ad if (l2 == l)
431 1.52 ad continue;
432 1.52 ad if ((l2->l_prflag & LPR_DETACHED) != 0) {
433 1.63 ad nfound += exiting;
434 1.63 ad continue;
435 1.63 ad }
436 1.63 ad if (lid != 0) {
437 1.63 ad if (l2->l_lid != lid)
438 1.63 ad continue;
439 1.63 ad /*
440 1.63 ad * Mark this LWP as the first waiter, if there
441 1.63 ad * is no other.
442 1.63 ad */
443 1.63 ad if (l2->l_waiter == 0)
444 1.63 ad l2->l_waiter = curlid;
445 1.63 ad } else if (l2->l_waiter != 0) {
446 1.63 ad /*
447 1.63 ad * It already has a waiter - so don't
448 1.63 ad * collect it. If the waiter doesn't
449 1.63 ad * grab it we'll get another chance
450 1.63 ad * later.
451 1.63 ad */
452 1.63 ad nfound++;
453 1.52 ad continue;
454 1.52 ad }
455 1.52 ad nfound++;
456 1.2 thorpej
457 1.52 ad /* No need to lock the LWP in order to see LSZOMB. */
458 1.52 ad if (l2->l_stat != LSZOMB)
459 1.52 ad continue;
460 1.2 thorpej
461 1.63 ad /*
462 1.63 ad * We're no longer waiting. Reset the "first waiter"
463 1.63 ad * pointer on the target, in case it was us.
464 1.63 ad */
465 1.63 ad l->l_waitingfor = 0;
466 1.63 ad l2->l_waiter = 0;
467 1.63 ad p->p_nlwpwait--;
468 1.2 thorpej if (departed)
469 1.2 thorpej *departed = l2->l_lid;
470 1.63 ad
471 1.63 ad /* lwp_free() releases the proc lock. */
472 1.63 ad lwp_free(l2, false, false);
473 1.52 ad mutex_enter(&p->p_smutex);
474 1.52 ad return 0;
475 1.52 ad }
476 1.2 thorpej
477 1.63 ad if (error != 0)
478 1.63 ad break;
479 1.52 ad if (nfound == 0) {
480 1.52 ad error = ESRCH;
481 1.52 ad break;
482 1.52 ad }
483 1.63 ad
484 1.63 ad /*
485 1.63 ad * The kernel is careful to ensure that it can not deadlock
486 1.63 ad * when exiting - just keep waiting.
487 1.63 ad */
488 1.63 ad if (exiting) {
489 1.52 ad KASSERT(p->p_nlwps > 1);
490 1.52 ad cv_wait(&p->p_lwpcv, &p->p_smutex);
491 1.52 ad continue;
492 1.52 ad }
493 1.63 ad
494 1.63 ad /*
495 1.63 ad * If all other LWPs are waiting for exits or suspends
496 1.63 ad * and the supply of zombies and potential zombies is
497 1.63 ad * exhausted, then we are about to deadlock.
498 1.63 ad *
499 1.63 ad * If the process is exiting (and this LWP is not the one
500 1.63 ad * that is coordinating the exit) then bail out now.
501 1.63 ad */
502 1.52 ad if ((p->p_sflag & PS_WEXIT) != 0 ||
503 1.63 ad p->p_nrlwps + p->p_nzlwps - p->p_ndlwps <= p->p_nlwpwait) {
504 1.52 ad error = EDEADLK;
505 1.52 ad break;
506 1.2 thorpej }
507 1.63 ad
508 1.63 ad /*
509 1.63 ad * Sit around and wait for something to happen. We'll be
510 1.63 ad * awoken if any of the conditions examined change: if an
511 1.63 ad * LWP exits, is collected, or is detached.
512 1.63 ad */
513 1.52 ad if ((error = cv_wait_sig(&p->p_lwpcv, &p->p_smutex)) != 0)
514 1.52 ad break;
515 1.2 thorpej }
516 1.2 thorpej
517 1.63 ad /*
518 1.63 ad * We didn't find any LWPs to collect, we may have received a
519 1.63 ad * signal, or some other condition has caused us to bail out.
520 1.63 ad *
521 1.63 ad * If waiting on a specific LWP, clear the waiters marker: some
522 1.63 ad * other LWP may want it. Then, kick all the remaining waiters
523 1.63 ad * so that they can re-check for zombies and for deadlock.
524 1.63 ad */
525 1.63 ad if (lid != 0) {
526 1.63 ad LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
527 1.63 ad if (l2->l_lid == lid) {
528 1.63 ad if (l2->l_waiter == curlid)
529 1.63 ad l2->l_waiter = 0;
530 1.63 ad break;
531 1.63 ad }
532 1.63 ad }
533 1.63 ad }
534 1.52 ad p->p_nlwpwait--;
535 1.63 ad l->l_waitingfor = 0;
536 1.63 ad cv_broadcast(&p->p_lwpcv);
537 1.63 ad
538 1.52 ad return error;
539 1.2 thorpej }
540 1.2 thorpej
541 1.52 ad /*
542 1.52 ad * Create a new LWP within process 'p2', using LWP 'l1' as a template.
543 1.52 ad * The new LWP is created in state LSIDL and must be set running,
544 1.52 ad * suspended, or stopped by the caller.
545 1.52 ad */
546 1.2 thorpej int
547 1.59 thorpej newlwp(struct lwp *l1, struct proc *p2, vaddr_t uaddr, bool inmem,
548 1.2 thorpej int flags, void *stack, size_t stacksize,
549 1.2 thorpej void (*func)(void *), void *arg, struct lwp **rnewlwpp)
550 1.2 thorpej {
551 1.52 ad struct lwp *l2, *isfree;
552 1.52 ad turnstile_t *ts;
553 1.2 thorpej
554 1.52 ad /*
555 1.52 ad * First off, reap any detached LWP waiting to be collected.
556 1.52 ad * We can re-use its LWP structure and turnstile.
557 1.52 ad */
558 1.52 ad isfree = NULL;
559 1.52 ad if (p2->p_zomblwp != NULL) {
560 1.52 ad mutex_enter(&p2->p_smutex);
561 1.52 ad if ((isfree = p2->p_zomblwp) != NULL) {
562 1.52 ad p2->p_zomblwp = NULL;
563 1.63 ad lwp_free(isfree, true, false);/* releases proc mutex */
564 1.52 ad } else
565 1.52 ad mutex_exit(&p2->p_smutex);
566 1.52 ad }
567 1.52 ad if (isfree == NULL) {
568 1.52 ad l2 = pool_get(&lwp_pool, PR_WAITOK);
569 1.52 ad memset(l2, 0, sizeof(*l2));
570 1.52 ad l2->l_ts = pool_cache_get(&turnstile_cache, PR_WAITOK);
571 1.60 yamt SLIST_INIT(&l2->l_pi_lenders);
572 1.52 ad } else {
573 1.52 ad l2 = isfree;
574 1.52 ad ts = l2->l_ts;
575 1.60 yamt KASSERT(l2->l_inheritedprio == MAXPRI);
576 1.60 yamt KASSERT(SLIST_EMPTY(&l2->l_pi_lenders));
577 1.52 ad memset(l2, 0, sizeof(*l2));
578 1.52 ad l2->l_ts = ts;
579 1.52 ad }
580 1.2 thorpej
581 1.2 thorpej l2->l_stat = LSIDL;
582 1.2 thorpej l2->l_proc = p2;
583 1.52 ad l2->l_refcnt = 1;
584 1.52 ad l2->l_priority = l1->l_priority;
585 1.52 ad l2->l_usrpri = l1->l_usrpri;
586 1.60 yamt l2->l_inheritedprio = MAXPRI;
587 1.64 yamt l2->l_mutex = l1->l_cpu->ci_schedstate.spc_mutex;
588 1.52 ad l2->l_cpu = l1->l_cpu;
589 1.56 pavel l2->l_flag = inmem ? LW_INMEM : 0;
590 1.42 christos lwp_initspecific(l2);
591 1.64 yamt sched_lwp_fork(l2);
592 1.41 thorpej
593 1.56 pavel if (p2->p_flag & PK_SYSTEM) {
594 1.52 ad /*
595 1.52 ad * Mark it as a system process and not a candidate for
596 1.52 ad * swapping.
597 1.52 ad */
598 1.56 pavel l2->l_flag |= LW_SYSTEM;
599 1.52 ad }
600 1.2 thorpej
601 1.37 ad lwp_update_creds(l2);
602 1.70 ad callout_init(&l2->l_timeout_ch, CALLOUT_MPSAFE);
603 1.70 ad callout_setfunc(&l2->l_timeout_ch, sleepq_timeout, l2);
604 1.65 ad mutex_init(&l2->l_swaplock, MUTEX_DEFAULT, IPL_NONE);
605 1.52 ad cv_init(&l2->l_sigcv, "sigwait");
606 1.52 ad l2->l_syncobj = &sched_syncobj;
607 1.2 thorpej
608 1.2 thorpej if (rnewlwpp != NULL)
609 1.2 thorpej *rnewlwpp = l2;
610 1.2 thorpej
611 1.36 yamt l2->l_addr = UAREA_TO_USER(uaddr);
612 1.2 thorpej uvm_lwp_fork(l1, l2, stack, stacksize, func,
613 1.2 thorpej (arg != NULL) ? arg : l2);
614 1.2 thorpej
615 1.52 ad mutex_enter(&p2->p_smutex);
616 1.52 ad
617 1.52 ad if ((flags & LWP_DETACHED) != 0) {
618 1.52 ad l2->l_prflag = LPR_DETACHED;
619 1.52 ad p2->p_ndlwps++;
620 1.52 ad } else
621 1.52 ad l2->l_prflag = 0;
622 1.52 ad
623 1.52 ad l2->l_sigmask = l1->l_sigmask;
624 1.52 ad CIRCLEQ_INIT(&l2->l_sigpend.sp_info);
625 1.52 ad sigemptyset(&l2->l_sigpend.sp_set);
626 1.52 ad
627 1.53 yamt p2->p_nlwpid++;
628 1.53 yamt if (p2->p_nlwpid == 0)
629 1.53 yamt p2->p_nlwpid++;
630 1.53 yamt l2->l_lid = p2->p_nlwpid;
631 1.2 thorpej LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
632 1.2 thorpej p2->p_nlwps++;
633 1.2 thorpej
634 1.52 ad mutex_exit(&p2->p_smutex);
635 1.52 ad
636 1.65 ad mutex_enter(&proclist_lock);
637 1.52 ad mutex_enter(&proclist_mutex);
638 1.2 thorpej LIST_INSERT_HEAD(&alllwp, l2, l_list);
639 1.52 ad mutex_exit(&proclist_mutex);
640 1.65 ad mutex_exit(&proclist_lock);
641 1.2 thorpej
642 1.57 dsl SYSCALL_TIME_LWP_INIT(l2);
643 1.57 dsl
644 1.16 manu if (p2->p_emul->e_lwp_fork)
645 1.16 manu (*p2->p_emul->e_lwp_fork)(l1, l2);
646 1.16 manu
647 1.2 thorpej return (0);
648 1.2 thorpej }
649 1.2 thorpej
650 1.2 thorpej /*
651 1.64 yamt * Called by MD code when a new LWP begins execution. Must be called
652 1.64 yamt * with the previous LWP locked (so at splsched), or if there is no
653 1.64 yamt * previous LWP, at splsched.
654 1.64 yamt */
655 1.64 yamt void
656 1.64 yamt lwp_startup(struct lwp *prev, struct lwp *new)
657 1.64 yamt {
658 1.64 yamt
659 1.64 yamt curlwp = new;
660 1.64 yamt if (prev != NULL) {
661 1.64 yamt lwp_unlock(prev);
662 1.64 yamt }
663 1.64 yamt spl0();
664 1.64 yamt pmap_activate(new);
665 1.64 yamt LOCKDEBUG_BARRIER(NULL, 0);
666 1.65 ad if ((new->l_pflag & LP_MPSAFE) == 0) {
667 1.65 ad KERNEL_LOCK(1, new);
668 1.65 ad }
669 1.64 yamt }
670 1.64 yamt
671 1.64 yamt /*
672 1.65 ad * Exit an LWP.
673 1.2 thorpej */
674 1.2 thorpej void
675 1.2 thorpej lwp_exit(struct lwp *l)
676 1.2 thorpej {
677 1.2 thorpej struct proc *p = l->l_proc;
678 1.52 ad struct lwp *l2;
679 1.65 ad bool current;
680 1.65 ad
681 1.65 ad current = (l == curlwp);
682 1.2 thorpej
683 1.2 thorpej DPRINTF(("lwp_exit: %d.%d exiting.\n", p->p_pid, l->l_lid));
684 1.52 ad DPRINTF((" nlwps: %d nzlwps: %d\n", p->p_nlwps, p->p_nzlwps));
685 1.65 ad KASSERT(current || l->l_stat == LSIDL);
686 1.2 thorpej
687 1.52 ad /*
688 1.52 ad * Verify that we hold no locks other than the kernel lock.
689 1.52 ad */
690 1.52 ad #ifdef MULTIPROCESSOR
691 1.52 ad LOCKDEBUG_BARRIER(&kernel_lock, 0);
692 1.52 ad #else
693 1.52 ad LOCKDEBUG_BARRIER(NULL, 0);
694 1.52 ad #endif
695 1.16 manu
696 1.2 thorpej /*
697 1.52 ad * If we are the last live LWP in a process, we need to exit the
698 1.52 ad * entire process. We do so with an exit status of zero, because
699 1.52 ad * it's a "controlled" exit, and because that's what Solaris does.
700 1.52 ad *
701 1.52 ad * We are not quite a zombie yet, but for accounting purposes we
702 1.52 ad * must increment the count of zombies here.
703 1.45 thorpej *
704 1.45 thorpej * Note: the last LWP's specificdata will be deleted here.
705 1.2 thorpej */
706 1.52 ad mutex_enter(&p->p_smutex);
707 1.52 ad if (p->p_nlwps - p->p_nzlwps == 1) {
708 1.65 ad KASSERT(current == true);
709 1.2 thorpej DPRINTF(("lwp_exit: %d.%d calling exit1()\n",
710 1.2 thorpej p->p_pid, l->l_lid));
711 1.2 thorpej exit1(l, 0);
712 1.19 jdolecek /* NOTREACHED */
713 1.2 thorpej }
714 1.52 ad p->p_nzlwps++;
715 1.52 ad mutex_exit(&p->p_smutex);
716 1.52 ad
717 1.52 ad if (p->p_emul->e_lwp_exit)
718 1.52 ad (*p->p_emul->e_lwp_exit)(l);
719 1.2 thorpej
720 1.45 thorpej /* Delete the specificdata while it's still safe to sleep. */
721 1.45 thorpej specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
722 1.45 thorpej
723 1.52 ad /*
724 1.52 ad * Release our cached credentials.
725 1.52 ad */
726 1.37 ad kauth_cred_free(l->l_cred);
727 1.70 ad callout_destroy(&l->l_timeout_ch);
728 1.65 ad
729 1.65 ad /*
730 1.65 ad * While we can still block, mark the LWP as unswappable to
731 1.65 ad * prevent conflicts with the with the swapper.
732 1.65 ad */
733 1.65 ad if (current)
734 1.65 ad uvm_lwp_hold(l);
735 1.37 ad
736 1.52 ad /*
737 1.52 ad * Remove the LWP from the global list.
738 1.52 ad */
739 1.65 ad mutex_enter(&proclist_lock);
740 1.52 ad mutex_enter(&proclist_mutex);
741 1.52 ad LIST_REMOVE(l, l_list);
742 1.52 ad mutex_exit(&proclist_mutex);
743 1.65 ad mutex_exit(&proclist_lock);
744 1.19 jdolecek
745 1.52 ad /*
746 1.52 ad * Get rid of all references to the LWP that others (e.g. procfs)
747 1.52 ad * may have, and mark the LWP as a zombie. If the LWP is detached,
748 1.52 ad * mark it waiting for collection in the proc structure. Note that
749 1.52 ad * before we can do that, we need to free any other dead, deatched
750 1.52 ad * LWP waiting to meet its maker.
751 1.52 ad *
752 1.52 ad * XXXSMP disable preemption.
753 1.52 ad */
754 1.52 ad mutex_enter(&p->p_smutex);
755 1.52 ad lwp_drainrefs(l);
756 1.31 yamt
757 1.52 ad if ((l->l_prflag & LPR_DETACHED) != 0) {
758 1.52 ad while ((l2 = p->p_zomblwp) != NULL) {
759 1.52 ad p->p_zomblwp = NULL;
760 1.63 ad lwp_free(l2, false, false);/* releases proc mutex */
761 1.52 ad mutex_enter(&p->p_smutex);
762 1.52 ad }
763 1.52 ad p->p_zomblwp = l;
764 1.52 ad }
765 1.31 yamt
766 1.52 ad /*
767 1.52 ad * If we find a pending signal for the process and we have been
768 1.52 ad * asked to check for signals, then we loose: arrange to have
769 1.52 ad * all other LWPs in the process check for signals.
770 1.52 ad */
771 1.56 pavel if ((l->l_flag & LW_PENDSIG) != 0 &&
772 1.52 ad firstsig(&p->p_sigpend.sp_set) != 0) {
773 1.52 ad LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
774 1.52 ad lwp_lock(l2);
775 1.56 pavel l2->l_flag |= LW_PENDSIG;
776 1.52 ad lwp_unlock(l2);
777 1.52 ad }
778 1.31 yamt }
779 1.31 yamt
780 1.52 ad lwp_lock(l);
781 1.52 ad l->l_stat = LSZOMB;
782 1.52 ad lwp_unlock(l);
783 1.2 thorpej p->p_nrlwps--;
784 1.52 ad cv_broadcast(&p->p_lwpcv);
785 1.52 ad mutex_exit(&p->p_smutex);
786 1.52 ad
787 1.52 ad /*
788 1.52 ad * We can no longer block. At this point, lwp_free() may already
789 1.52 ad * be gunning for us. On a multi-CPU system, we may be off p_lwps.
790 1.52 ad *
791 1.52 ad * Free MD LWP resources.
792 1.52 ad */
793 1.52 ad #ifndef __NO_CPU_LWP_FREE
794 1.52 ad cpu_lwp_free(l, 0);
795 1.52 ad #endif
796 1.2 thorpej
797 1.65 ad if (current) {
798 1.65 ad pmap_deactivate(l);
799 1.65 ad
800 1.65 ad /*
801 1.65 ad * Release the kernel lock, and switch away into
802 1.65 ad * oblivion.
803 1.65 ad */
804 1.52 ad #ifdef notyet
805 1.65 ad /* XXXSMP hold in lwp_userret() */
806 1.65 ad KERNEL_UNLOCK_LAST(l);
807 1.52 ad #else
808 1.65 ad KERNEL_UNLOCK_ALL(l, NULL);
809 1.52 ad #endif
810 1.65 ad lwp_exit_switchaway(l);
811 1.65 ad }
812 1.2 thorpej }
813 1.2 thorpej
814 1.2 thorpej void
815 1.64 yamt lwp_exit_switchaway(struct lwp *l)
816 1.2 thorpej {
817 1.64 yamt struct cpu_info *ci;
818 1.64 yamt struct lwp *idlelwp;
819 1.64 yamt
820 1.64 yamt /* Unlocked, but is for statistics only. */
821 1.64 yamt uvmexp.swtch++;
822 1.64 yamt
823 1.64 yamt (void)splsched();
824 1.64 yamt l->l_flag &= ~LW_RUNNING;
825 1.64 yamt ci = curcpu();
826 1.64 yamt idlelwp = ci->ci_data.cpu_idlelwp;
827 1.64 yamt idlelwp->l_stat = LSONPROC;
828 1.64 yamt cpu_switchto(NULL, idlelwp);
829 1.52 ad }
830 1.52 ad
831 1.52 ad /*
832 1.52 ad * Free a dead LWP's remaining resources.
833 1.52 ad *
834 1.52 ad * XXXLWP limits.
835 1.52 ad */
836 1.52 ad void
837 1.63 ad lwp_free(struct lwp *l, bool recycle, bool last)
838 1.52 ad {
839 1.52 ad struct proc *p = l->l_proc;
840 1.52 ad ksiginfoq_t kq;
841 1.52 ad
842 1.52 ad /*
843 1.52 ad * If this was not the last LWP in the process, then adjust
844 1.52 ad * counters and unlock.
845 1.52 ad */
846 1.52 ad if (!last) {
847 1.52 ad /*
848 1.52 ad * Add the LWP's run time to the process' base value.
849 1.52 ad * This needs to co-incide with coming off p_lwps.
850 1.52 ad */
851 1.52 ad timeradd(&l->l_rtime, &p->p_rtime, &p->p_rtime);
852 1.64 yamt p->p_pctcpu += l->l_pctcpu;
853 1.52 ad LIST_REMOVE(l, l_sibling);
854 1.52 ad p->p_nlwps--;
855 1.52 ad p->p_nzlwps--;
856 1.52 ad if ((l->l_prflag & LPR_DETACHED) != 0)
857 1.52 ad p->p_ndlwps--;
858 1.63 ad
859 1.63 ad /*
860 1.63 ad * Have any LWPs sleeping in lwp_wait() recheck for
861 1.63 ad * deadlock.
862 1.63 ad */
863 1.63 ad cv_broadcast(&p->p_lwpcv);
864 1.52 ad mutex_exit(&p->p_smutex);
865 1.63 ad }
866 1.52 ad
867 1.52 ad #ifdef MULTIPROCESSOR
868 1.63 ad /*
869 1.63 ad * In the unlikely event that the LWP is still on the CPU,
870 1.63 ad * then spin until it has switched away. We need to release
871 1.63 ad * all locks to avoid deadlock against interrupt handlers on
872 1.63 ad * the target CPU.
873 1.63 ad */
874 1.64 yamt if ((l->l_flag & LW_RUNNING) != 0 || l->l_cpu->ci_curlwp == l) {
875 1.63 ad int count;
876 1.64 yamt (void)count; /* XXXgcc */
877 1.63 ad KERNEL_UNLOCK_ALL(curlwp, &count);
878 1.64 yamt while ((l->l_flag & LW_RUNNING) != 0 ||
879 1.64 yamt l->l_cpu->ci_curlwp == l)
880 1.63 ad SPINLOCK_BACKOFF_HOOK;
881 1.63 ad KERNEL_LOCK(count, curlwp);
882 1.63 ad }
883 1.52 ad #endif
884 1.52 ad
885 1.52 ad /*
886 1.52 ad * Destroy the LWP's remaining signal information.
887 1.52 ad */
888 1.52 ad ksiginfo_queue_init(&kq);
889 1.52 ad sigclear(&l->l_sigpend, NULL, &kq);
890 1.52 ad ksiginfo_queue_drain(&kq);
891 1.52 ad cv_destroy(&l->l_sigcv);
892 1.65 ad mutex_destroy(&l->l_swaplock);
893 1.2 thorpej
894 1.19 jdolecek /*
895 1.52 ad * Free the LWP's turnstile and the LWP structure itself unless the
896 1.64 yamt * caller wants to recycle them. Also, free the scheduler specific data.
897 1.52 ad *
898 1.52 ad * We can't return turnstile0 to the pool (it didn't come from it),
899 1.52 ad * so if it comes up just drop it quietly and move on.
900 1.52 ad *
901 1.52 ad * We don't recycle the VM resources at this time.
902 1.19 jdolecek */
903 1.55 ad KERNEL_LOCK(1, curlwp); /* XXXSMP */
904 1.64 yamt
905 1.64 yamt sched_lwp_exit(l);
906 1.64 yamt
907 1.52 ad if (!recycle && l->l_ts != &turnstile0)
908 1.52 ad pool_cache_put(&turnstile_cache, l->l_ts);
909 1.52 ad #ifndef __NO_CPU_LWP_FREE
910 1.52 ad cpu_lwp_free2(l);
911 1.52 ad #endif
912 1.19 jdolecek uvm_lwp_exit(l);
913 1.60 yamt KASSERT(SLIST_EMPTY(&l->l_pi_lenders));
914 1.60 yamt KASSERT(l->l_inheritedprio == MAXPRI);
915 1.52 ad if (!recycle)
916 1.19 jdolecek pool_put(&lwp_pool, l);
917 1.55 ad KERNEL_UNLOCK_ONE(curlwp); /* XXXSMP */
918 1.2 thorpej }
919 1.2 thorpej
920 1.2 thorpej /*
921 1.2 thorpej * Pick a LWP to represent the process for those operations which
922 1.2 thorpej * want information about a "process" that is actually associated
923 1.2 thorpej * with a LWP.
924 1.52 ad *
925 1.52 ad * If 'locking' is false, no locking or lock checks are performed.
926 1.52 ad * This is intended for use by DDB.
927 1.52 ad *
928 1.52 ad * We don't bother locking the LWP here, since code that uses this
929 1.52 ad * interface is broken by design and an exact match is not required.
930 1.2 thorpej */
931 1.2 thorpej struct lwp *
932 1.52 ad proc_representative_lwp(struct proc *p, int *nrlwps, int locking)
933 1.2 thorpej {
934 1.2 thorpej struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
935 1.27 matt struct lwp *signalled;
936 1.52 ad int cnt;
937 1.52 ad
938 1.52 ad if (locking) {
939 1.63 ad KASSERT(mutex_owned(&p->p_smutex));
940 1.52 ad }
941 1.2 thorpej
942 1.2 thorpej /* Trivial case: only one LWP */
943 1.52 ad if (p->p_nlwps == 1) {
944 1.52 ad l = LIST_FIRST(&p->p_lwps);
945 1.52 ad if (nrlwps)
946 1.68 tnn *nrlwps = (l->l_stat == LSONPROC || l->l_stat == LSRUN);
947 1.52 ad return l;
948 1.52 ad }
949 1.2 thorpej
950 1.52 ad cnt = 0;
951 1.2 thorpej switch (p->p_stat) {
952 1.2 thorpej case SSTOP:
953 1.2 thorpej case SACTIVE:
954 1.2 thorpej /* Pick the most live LWP */
955 1.2 thorpej onproc = running = sleeping = stopped = suspended = NULL;
956 1.27 matt signalled = NULL;
957 1.2 thorpej LIST_FOREACH(l, &p->p_lwps, l_sibling) {
958 1.64 yamt if ((l->l_flag & LW_IDLE) != 0) {
959 1.64 yamt continue;
960 1.64 yamt }
961 1.27 matt if (l->l_lid == p->p_sigctx.ps_lwp)
962 1.27 matt signalled = l;
963 1.2 thorpej switch (l->l_stat) {
964 1.2 thorpej case LSONPROC:
965 1.2 thorpej onproc = l;
966 1.52 ad cnt++;
967 1.2 thorpej break;
968 1.2 thorpej case LSRUN:
969 1.2 thorpej running = l;
970 1.52 ad cnt++;
971 1.2 thorpej break;
972 1.2 thorpej case LSSLEEP:
973 1.2 thorpej sleeping = l;
974 1.2 thorpej break;
975 1.2 thorpej case LSSTOP:
976 1.2 thorpej stopped = l;
977 1.2 thorpej break;
978 1.2 thorpej case LSSUSPENDED:
979 1.2 thorpej suspended = l;
980 1.2 thorpej break;
981 1.2 thorpej }
982 1.2 thorpej }
983 1.52 ad if (nrlwps)
984 1.52 ad *nrlwps = cnt;
985 1.27 matt if (signalled)
986 1.52 ad l = signalled;
987 1.52 ad else if (onproc)
988 1.52 ad l = onproc;
989 1.52 ad else if (running)
990 1.52 ad l = running;
991 1.52 ad else if (sleeping)
992 1.52 ad l = sleeping;
993 1.52 ad else if (stopped)
994 1.52 ad l = stopped;
995 1.52 ad else if (suspended)
996 1.52 ad l = suspended;
997 1.52 ad else
998 1.52 ad break;
999 1.52 ad return l;
1000 1.2 thorpej #ifdef DIAGNOSTIC
1001 1.2 thorpej case SIDL:
1002 1.52 ad case SZOMB:
1003 1.52 ad case SDYING:
1004 1.52 ad case SDEAD:
1005 1.52 ad if (locking)
1006 1.52 ad mutex_exit(&p->p_smutex);
1007 1.2 thorpej /* We have more than one LWP and we're in SIDL?
1008 1.2 thorpej * How'd that happen?
1009 1.2 thorpej */
1010 1.52 ad panic("Too many LWPs in idle/dying process %d (%s) stat = %d",
1011 1.52 ad p->p_pid, p->p_comm, p->p_stat);
1012 1.52 ad break;
1013 1.2 thorpej default:
1014 1.52 ad if (locking)
1015 1.52 ad mutex_exit(&p->p_smutex);
1016 1.2 thorpej panic("Process %d (%s) in unknown state %d",
1017 1.2 thorpej p->p_pid, p->p_comm, p->p_stat);
1018 1.2 thorpej #endif
1019 1.2 thorpej }
1020 1.2 thorpej
1021 1.52 ad if (locking)
1022 1.52 ad mutex_exit(&p->p_smutex);
1023 1.2 thorpej panic("proc_representative_lwp: couldn't find a lwp for process"
1024 1.2 thorpej " %d (%s)", p->p_pid, p->p_comm);
1025 1.2 thorpej /* NOTREACHED */
1026 1.2 thorpej return NULL;
1027 1.2 thorpej }
1028 1.37 ad
1029 1.37 ad /*
1030 1.52 ad * Look up a live LWP within the speicifed process, and return it locked.
1031 1.52 ad *
1032 1.52 ad * Must be called with p->p_smutex held.
1033 1.52 ad */
1034 1.52 ad struct lwp *
1035 1.52 ad lwp_find(struct proc *p, int id)
1036 1.52 ad {
1037 1.52 ad struct lwp *l;
1038 1.52 ad
1039 1.63 ad KASSERT(mutex_owned(&p->p_smutex));
1040 1.52 ad
1041 1.52 ad LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1042 1.52 ad if (l->l_lid == id)
1043 1.52 ad break;
1044 1.52 ad }
1045 1.52 ad
1046 1.52 ad /*
1047 1.52 ad * No need to lock - all of these conditions will
1048 1.52 ad * be visible with the process level mutex held.
1049 1.52 ad */
1050 1.52 ad if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB))
1051 1.52 ad l = NULL;
1052 1.52 ad
1053 1.52 ad return l;
1054 1.52 ad }
1055 1.52 ad
1056 1.52 ad /*
1057 1.37 ad * Update an LWP's cached credentials to mirror the process' master copy.
1058 1.37 ad *
1059 1.37 ad * This happens early in the syscall path, on user trap, and on LWP
1060 1.37 ad * creation. A long-running LWP can also voluntarily choose to update
1061 1.37 ad * it's credentials by calling this routine. This may be called from
1062 1.37 ad * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
1063 1.37 ad */
1064 1.37 ad void
1065 1.37 ad lwp_update_creds(struct lwp *l)
1066 1.37 ad {
1067 1.37 ad kauth_cred_t oc;
1068 1.37 ad struct proc *p;
1069 1.37 ad
1070 1.37 ad p = l->l_proc;
1071 1.37 ad oc = l->l_cred;
1072 1.37 ad
1073 1.52 ad mutex_enter(&p->p_mutex);
1074 1.37 ad kauth_cred_hold(p->p_cred);
1075 1.37 ad l->l_cred = p->p_cred;
1076 1.52 ad mutex_exit(&p->p_mutex);
1077 1.52 ad if (oc != NULL) {
1078 1.52 ad KERNEL_LOCK(1, l); /* XXXSMP */
1079 1.37 ad kauth_cred_free(oc);
1080 1.52 ad KERNEL_UNLOCK_ONE(l); /* XXXSMP */
1081 1.52 ad }
1082 1.52 ad }
1083 1.52 ad
1084 1.52 ad /*
1085 1.52 ad * Verify that an LWP is locked, and optionally verify that the lock matches
1086 1.52 ad * one we specify.
1087 1.52 ad */
1088 1.52 ad int
1089 1.52 ad lwp_locked(struct lwp *l, kmutex_t *mtx)
1090 1.52 ad {
1091 1.52 ad kmutex_t *cur = l->l_mutex;
1092 1.52 ad
1093 1.52 ad return mutex_owned(cur) && (mtx == cur || mtx == NULL);
1094 1.52 ad }
1095 1.52 ad
1096 1.52 ad /*
1097 1.52 ad * Lock an LWP.
1098 1.52 ad */
1099 1.52 ad void
1100 1.52 ad lwp_lock_retry(struct lwp *l, kmutex_t *old)
1101 1.52 ad {
1102 1.52 ad
1103 1.52 ad /*
1104 1.52 ad * XXXgcc ignoring kmutex_t * volatile on i386
1105 1.52 ad *
1106 1.52 ad * gcc version 4.1.2 20061021 prerelease (NetBSD nb1 20061021)
1107 1.52 ad */
1108 1.52 ad #if 1
1109 1.52 ad while (l->l_mutex != old) {
1110 1.52 ad #else
1111 1.52 ad for (;;) {
1112 1.52 ad #endif
1113 1.52 ad mutex_spin_exit(old);
1114 1.52 ad old = l->l_mutex;
1115 1.52 ad mutex_spin_enter(old);
1116 1.52 ad
1117 1.52 ad /*
1118 1.52 ad * mutex_enter() will have posted a read barrier. Re-test
1119 1.52 ad * l->l_mutex. If it has changed, we need to try again.
1120 1.52 ad */
1121 1.52 ad #if 1
1122 1.52 ad }
1123 1.52 ad #else
1124 1.52 ad } while (__predict_false(l->l_mutex != old));
1125 1.52 ad #endif
1126 1.52 ad }
1127 1.52 ad
1128 1.52 ad /*
1129 1.52 ad * Lend a new mutex to an LWP. The old mutex must be held.
1130 1.52 ad */
1131 1.52 ad void
1132 1.52 ad lwp_setlock(struct lwp *l, kmutex_t *new)
1133 1.52 ad {
1134 1.52 ad
1135 1.63 ad KASSERT(mutex_owned(l->l_mutex));
1136 1.52 ad
1137 1.52 ad mb_write();
1138 1.52 ad l->l_mutex = new;
1139 1.52 ad }
1140 1.52 ad
1141 1.52 ad /*
1142 1.52 ad * Lend a new mutex to an LWP, and release the old mutex. The old mutex
1143 1.52 ad * must be held.
1144 1.52 ad */
1145 1.52 ad void
1146 1.52 ad lwp_unlock_to(struct lwp *l, kmutex_t *new)
1147 1.52 ad {
1148 1.52 ad kmutex_t *old;
1149 1.52 ad
1150 1.63 ad KASSERT(mutex_owned(l->l_mutex));
1151 1.52 ad
1152 1.52 ad old = l->l_mutex;
1153 1.52 ad mb_write();
1154 1.52 ad l->l_mutex = new;
1155 1.52 ad mutex_spin_exit(old);
1156 1.52 ad }
1157 1.52 ad
1158 1.52 ad /*
1159 1.52 ad * Acquire a new mutex, and donate it to an LWP. The LWP must already be
1160 1.52 ad * locked.
1161 1.52 ad */
1162 1.52 ad void
1163 1.52 ad lwp_relock(struct lwp *l, kmutex_t *new)
1164 1.52 ad {
1165 1.52 ad kmutex_t *old;
1166 1.52 ad
1167 1.63 ad KASSERT(mutex_owned(l->l_mutex));
1168 1.52 ad
1169 1.52 ad old = l->l_mutex;
1170 1.52 ad if (old != new) {
1171 1.52 ad mutex_spin_enter(new);
1172 1.52 ad l->l_mutex = new;
1173 1.52 ad mutex_spin_exit(old);
1174 1.52 ad }
1175 1.52 ad }
1176 1.52 ad
1177 1.60 yamt int
1178 1.60 yamt lwp_trylock(struct lwp *l)
1179 1.60 yamt {
1180 1.60 yamt kmutex_t *old;
1181 1.60 yamt
1182 1.60 yamt for (;;) {
1183 1.60 yamt if (!mutex_tryenter(old = l->l_mutex))
1184 1.60 yamt return 0;
1185 1.60 yamt if (__predict_true(l->l_mutex == old))
1186 1.60 yamt return 1;
1187 1.60 yamt mutex_spin_exit(old);
1188 1.60 yamt }
1189 1.60 yamt }
1190 1.60 yamt
1191 1.52 ad /*
1192 1.56 pavel * Handle exceptions for mi_userret(). Called if a member of LW_USERRET is
1193 1.52 ad * set.
1194 1.52 ad */
1195 1.52 ad void
1196 1.52 ad lwp_userret(struct lwp *l)
1197 1.52 ad {
1198 1.52 ad struct proc *p;
1199 1.54 ad void (*hook)(void);
1200 1.52 ad int sig;
1201 1.52 ad
1202 1.52 ad p = l->l_proc;
1203 1.52 ad
1204 1.52 ad /*
1205 1.52 ad * It should be safe to do this read unlocked on a multiprocessor
1206 1.52 ad * system..
1207 1.52 ad */
1208 1.56 pavel while ((l->l_flag & LW_USERRET) != 0) {
1209 1.52 ad /*
1210 1.52 ad * Process pending signals first, unless the process
1211 1.61 ad * is dumping core or exiting, where we will instead
1212 1.61 ad * enter the L_WSUSPEND case below.
1213 1.52 ad */
1214 1.61 ad if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) ==
1215 1.61 ad LW_PENDSIG) {
1216 1.52 ad KERNEL_LOCK(1, l); /* XXXSMP pool_put() below */
1217 1.52 ad mutex_enter(&p->p_smutex);
1218 1.52 ad while ((sig = issignal(l)) != 0)
1219 1.52 ad postsig(sig);
1220 1.52 ad mutex_exit(&p->p_smutex);
1221 1.52 ad KERNEL_UNLOCK_LAST(l); /* XXXSMP */
1222 1.52 ad }
1223 1.52 ad
1224 1.52 ad /*
1225 1.52 ad * Core-dump or suspend pending.
1226 1.52 ad *
1227 1.52 ad * In case of core dump, suspend ourselves, so that the
1228 1.52 ad * kernel stack and therefore the userland registers saved
1229 1.52 ad * in the trapframe are around for coredump() to write them
1230 1.52 ad * out. We issue a wakeup on p->p_lwpcv so that sigexit()
1231 1.52 ad * will write the core file out once all other LWPs are
1232 1.52 ad * suspended.
1233 1.52 ad */
1234 1.56 pavel if ((l->l_flag & LW_WSUSPEND) != 0) {
1235 1.52 ad mutex_enter(&p->p_smutex);
1236 1.52 ad p->p_nrlwps--;
1237 1.52 ad cv_broadcast(&p->p_lwpcv);
1238 1.52 ad lwp_lock(l);
1239 1.52 ad l->l_stat = LSSUSPENDED;
1240 1.52 ad mutex_exit(&p->p_smutex);
1241 1.64 yamt mi_switch(l);
1242 1.52 ad }
1243 1.52 ad
1244 1.52 ad /* Process is exiting. */
1245 1.56 pavel if ((l->l_flag & LW_WEXIT) != 0) {
1246 1.52 ad KERNEL_LOCK(1, l);
1247 1.52 ad lwp_exit(l);
1248 1.52 ad KASSERT(0);
1249 1.52 ad /* NOTREACHED */
1250 1.52 ad }
1251 1.54 ad
1252 1.54 ad /* Call userret hook; used by Linux emulation. */
1253 1.56 pavel if ((l->l_flag & LW_WUSERRET) != 0) {
1254 1.54 ad lwp_lock(l);
1255 1.56 pavel l->l_flag &= ~LW_WUSERRET;
1256 1.54 ad lwp_unlock(l);
1257 1.54 ad hook = p->p_userret;
1258 1.54 ad p->p_userret = NULL;
1259 1.54 ad (*hook)();
1260 1.54 ad }
1261 1.52 ad }
1262 1.52 ad }
1263 1.52 ad
1264 1.52 ad /*
1265 1.52 ad * Force an LWP to enter the kernel, to take a trip through lwp_userret().
1266 1.52 ad */
1267 1.52 ad void
1268 1.52 ad lwp_need_userret(struct lwp *l)
1269 1.52 ad {
1270 1.63 ad KASSERT(lwp_locked(l, NULL));
1271 1.52 ad
1272 1.52 ad /*
1273 1.52 ad * Since the tests in lwp_userret() are done unlocked, make sure
1274 1.52 ad * that the condition will be seen before forcing the LWP to enter
1275 1.52 ad * kernel mode.
1276 1.52 ad */
1277 1.52 ad mb_write();
1278 1.52 ad cpu_signotify(l);
1279 1.52 ad }
1280 1.52 ad
1281 1.52 ad /*
1282 1.52 ad * Add one reference to an LWP. This will prevent the LWP from
1283 1.52 ad * exiting, thus keep the lwp structure and PCB around to inspect.
1284 1.52 ad */
1285 1.52 ad void
1286 1.52 ad lwp_addref(struct lwp *l)
1287 1.52 ad {
1288 1.52 ad
1289 1.63 ad KASSERT(mutex_owned(&l->l_proc->p_smutex));
1290 1.52 ad KASSERT(l->l_stat != LSZOMB);
1291 1.52 ad KASSERT(l->l_refcnt != 0);
1292 1.52 ad
1293 1.52 ad l->l_refcnt++;
1294 1.52 ad }
1295 1.52 ad
1296 1.52 ad /*
1297 1.52 ad * Remove one reference to an LWP. If this is the last reference,
1298 1.52 ad * then we must finalize the LWP's death.
1299 1.52 ad */
1300 1.52 ad void
1301 1.52 ad lwp_delref(struct lwp *l)
1302 1.52 ad {
1303 1.52 ad struct proc *p = l->l_proc;
1304 1.52 ad
1305 1.52 ad mutex_enter(&p->p_smutex);
1306 1.52 ad if (--l->l_refcnt == 0)
1307 1.52 ad cv_broadcast(&p->p_refcv);
1308 1.52 ad mutex_exit(&p->p_smutex);
1309 1.52 ad }
1310 1.52 ad
1311 1.52 ad /*
1312 1.52 ad * Drain all references to the current LWP.
1313 1.52 ad */
1314 1.52 ad void
1315 1.52 ad lwp_drainrefs(struct lwp *l)
1316 1.52 ad {
1317 1.52 ad struct proc *p = l->l_proc;
1318 1.52 ad
1319 1.63 ad KASSERT(mutex_owned(&p->p_smutex));
1320 1.52 ad KASSERT(l->l_refcnt != 0);
1321 1.52 ad
1322 1.52 ad l->l_refcnt--;
1323 1.52 ad while (l->l_refcnt != 0)
1324 1.52 ad cv_wait(&p->p_refcv, &p->p_smutex);
1325 1.37 ad }
1326 1.41 thorpej
1327 1.41 thorpej /*
1328 1.41 thorpej * lwp_specific_key_create --
1329 1.41 thorpej * Create a key for subsystem lwp-specific data.
1330 1.41 thorpej */
1331 1.41 thorpej int
1332 1.41 thorpej lwp_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor)
1333 1.41 thorpej {
1334 1.41 thorpej
1335 1.45 thorpej return (specificdata_key_create(lwp_specificdata_domain, keyp, dtor));
1336 1.41 thorpej }
1337 1.41 thorpej
1338 1.41 thorpej /*
1339 1.41 thorpej * lwp_specific_key_delete --
1340 1.41 thorpej * Delete a key for subsystem lwp-specific data.
1341 1.41 thorpej */
1342 1.41 thorpej void
1343 1.41 thorpej lwp_specific_key_delete(specificdata_key_t key)
1344 1.41 thorpej {
1345 1.41 thorpej
1346 1.41 thorpej specificdata_key_delete(lwp_specificdata_domain, key);
1347 1.41 thorpej }
1348 1.41 thorpej
1349 1.45 thorpej /*
1350 1.45 thorpej * lwp_initspecific --
1351 1.45 thorpej * Initialize an LWP's specificdata container.
1352 1.45 thorpej */
1353 1.42 christos void
1354 1.42 christos lwp_initspecific(struct lwp *l)
1355 1.42 christos {
1356 1.42 christos int error;
1357 1.45 thorpej
1358 1.42 christos error = specificdata_init(lwp_specificdata_domain, &l->l_specdataref);
1359 1.42 christos KASSERT(error == 0);
1360 1.42 christos }
1361 1.42 christos
1362 1.41 thorpej /*
1363 1.45 thorpej * lwp_finispecific --
1364 1.45 thorpej * Finalize an LWP's specificdata container.
1365 1.45 thorpej */
1366 1.45 thorpej void
1367 1.45 thorpej lwp_finispecific(struct lwp *l)
1368 1.45 thorpej {
1369 1.45 thorpej
1370 1.45 thorpej specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
1371 1.45 thorpej }
1372 1.45 thorpej
1373 1.45 thorpej /*
1374 1.41 thorpej * lwp_getspecific --
1375 1.41 thorpej * Return lwp-specific data corresponding to the specified key.
1376 1.41 thorpej *
1377 1.41 thorpej * Note: LWP specific data is NOT INTERLOCKED. An LWP should access
1378 1.41 thorpej * only its OWN SPECIFIC DATA. If it is necessary to access another
1379 1.41 thorpej * LWP's specifc data, care must be taken to ensure that doing so
1380 1.41 thorpej * would not cause internal data structure inconsistency (i.e. caller
1381 1.41 thorpej * can guarantee that the target LWP is not inside an lwp_getspecific()
1382 1.41 thorpej * or lwp_setspecific() call).
1383 1.41 thorpej */
1384 1.41 thorpej void *
1385 1.44 thorpej lwp_getspecific(specificdata_key_t key)
1386 1.41 thorpej {
1387 1.41 thorpej
1388 1.41 thorpej return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
1389 1.44 thorpej &curlwp->l_specdataref, key));
1390 1.41 thorpej }
1391 1.41 thorpej
1392 1.47 hannken void *
1393 1.47 hannken _lwp_getspecific_by_lwp(struct lwp *l, specificdata_key_t key)
1394 1.47 hannken {
1395 1.47 hannken
1396 1.47 hannken return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
1397 1.47 hannken &l->l_specdataref, key));
1398 1.47 hannken }
1399 1.47 hannken
1400 1.41 thorpej /*
1401 1.41 thorpej * lwp_setspecific --
1402 1.41 thorpej * Set lwp-specific data corresponding to the specified key.
1403 1.41 thorpej */
1404 1.41 thorpej void
1405 1.45 thorpej lwp_setspecific(specificdata_key_t key, void *data)
1406 1.41 thorpej {
1407 1.41 thorpej
1408 1.41 thorpej specificdata_setspecific(lwp_specificdata_domain,
1409 1.44 thorpej &curlwp->l_specdataref, key, data);
1410 1.41 thorpej }
1411