kern_lwp.c revision 1.60 1 1.60 yamt /* $NetBSD: kern_lwp.c,v 1.60 2007/02/26 09:20:53 yamt Exp $ */
2 1.2 thorpej
3 1.2 thorpej /*-
4 1.52 ad * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
5 1.2 thorpej * All rights reserved.
6 1.2 thorpej *
7 1.2 thorpej * This code is derived from software contributed to The NetBSD Foundation
8 1.52 ad * by Nathan J. Williams, and Andrew Doran.
9 1.2 thorpej *
10 1.2 thorpej * Redistribution and use in source and binary forms, with or without
11 1.2 thorpej * modification, are permitted provided that the following conditions
12 1.2 thorpej * are met:
13 1.2 thorpej * 1. Redistributions of source code must retain the above copyright
14 1.2 thorpej * notice, this list of conditions and the following disclaimer.
15 1.2 thorpej * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 thorpej * notice, this list of conditions and the following disclaimer in the
17 1.2 thorpej * documentation and/or other materials provided with the distribution.
18 1.2 thorpej * 3. All advertising materials mentioning features or use of this software
19 1.2 thorpej * must display the following acknowledgement:
20 1.2 thorpej * This product includes software developed by the NetBSD
21 1.2 thorpej * Foundation, Inc. and its contributors.
22 1.2 thorpej * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.2 thorpej * contributors may be used to endorse or promote products derived
24 1.2 thorpej * from this software without specific prior written permission.
25 1.2 thorpej *
26 1.2 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.2 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.2 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.2 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.2 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.2 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.2 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.2 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.2 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.2 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.2 thorpej * POSSIBILITY OF SUCH DAMAGE.
37 1.2 thorpej */
38 1.9 lukem
39 1.52 ad /*
40 1.52 ad * Overview
41 1.52 ad *
42 1.52 ad * Lightweight processes (LWPs) are the basic unit (or thread) of
43 1.52 ad * execution within the kernel. The core state of an LWP is described
44 1.52 ad * by "struct lwp".
45 1.52 ad *
46 1.52 ad * Each LWP is contained within a process (described by "struct proc"),
47 1.52 ad * Every process contains at least one LWP, but may contain more. The
48 1.52 ad * process describes attributes shared among all of its LWPs such as a
49 1.52 ad * private address space, global execution state (stopped, active,
50 1.52 ad * zombie, ...), signal disposition and so on. On a multiprocessor
51 1.52 ad * machine, multiple LWPs be executing in kernel simultaneously.
52 1.52 ad *
53 1.52 ad * Note that LWPs differ from kernel threads (kthreads) in that kernel
54 1.52 ad * threads are distinct processes (system processes) with no user space
55 1.52 ad * component, which themselves may contain one or more LWPs.
56 1.52 ad *
57 1.52 ad * Execution states
58 1.52 ad *
59 1.52 ad * At any given time, an LWP has overall state that is described by
60 1.52 ad * lwp::l_stat. The states are broken into two sets below. The first
61 1.52 ad * set is guaranteed to represent the absolute, current state of the
62 1.52 ad * LWP:
63 1.52 ad *
64 1.52 ad * LSONPROC
65 1.52 ad *
66 1.52 ad * On processor: the LWP is executing on a CPU, either in the
67 1.52 ad * kernel or in user space.
68 1.52 ad *
69 1.52 ad * LSRUN
70 1.52 ad *
71 1.52 ad * Runnable: the LWP is parked on a run queue, and may soon be
72 1.52 ad * chosen to run by a idle processor, or by a processor that
73 1.52 ad * has been asked to preempt a currently runnning but lower
74 1.52 ad * priority LWP. If the LWP is not swapped in (L_INMEM == 0)
75 1.52 ad * then the LWP is not on a run queue, but may be soon.
76 1.52 ad *
77 1.52 ad * LSIDL
78 1.52 ad *
79 1.52 ad * Idle: the LWP has been created but has not yet executed.
80 1.52 ad * Whoever created the new LWP can be expected to set it to
81 1.52 ad * another state shortly.
82 1.52 ad *
83 1.52 ad * LSSUSPENDED:
84 1.52 ad *
85 1.52 ad * Suspended: the LWP has had its execution suspended by
86 1.52 ad * another LWP in the same process using the _lwp_suspend()
87 1.52 ad * system call. User-level LWPs also enter the suspended
88 1.52 ad * state when the system is shutting down.
89 1.52 ad *
90 1.52 ad * The second set represent a "statement of intent" on behalf of the
91 1.52 ad * LWP. The LWP may in fact be executing on a processor, may be
92 1.52 ad * sleeping, idle, or on a run queue. It is expected to take the
93 1.52 ad * necessary action to stop executing or become "running" again within
94 1.52 ad * a short timeframe.
95 1.52 ad *
96 1.52 ad * LSZOMB:
97 1.52 ad *
98 1.52 ad * Dead: the LWP has released most of its resources and is
99 1.52 ad * about to switch away into oblivion. When it switches away,
100 1.52 ad * its few remaining resources will be collected.
101 1.52 ad *
102 1.52 ad * LSSLEEP:
103 1.52 ad *
104 1.52 ad * Sleeping: the LWP has entered itself onto a sleep queue, and
105 1.52 ad * will switch away shortly to allow other LWPs to run on the
106 1.52 ad * CPU.
107 1.52 ad *
108 1.52 ad * LSSTOP:
109 1.52 ad *
110 1.52 ad * Stopped: the LWP has been stopped as a result of a job
111 1.52 ad * control signal, or as a result of the ptrace() interface.
112 1.52 ad * Stopped LWPs may run briefly within the kernel to handle
113 1.52 ad * signals that they receive, but will not return to user space
114 1.52 ad * until their process' state is changed away from stopped.
115 1.52 ad * Single LWPs within a process can not be set stopped
116 1.52 ad * selectively: all actions that can stop or continue LWPs
117 1.52 ad * occur at the process level.
118 1.52 ad *
119 1.52 ad * State transitions
120 1.52 ad *
121 1.52 ad * Note that the LSSTOP and LSSUSPENDED states may only be set
122 1.52 ad * when returning to user space in userret(), or when sleeping
123 1.52 ad * interruptably. Before setting those states, we try to ensure
124 1.52 ad * that the LWPs will release all kernel locks that they hold,
125 1.52 ad * and at a minimum try to ensure that the LWP can be set runnable
126 1.52 ad * again by a signal.
127 1.52 ad *
128 1.52 ad * LWPs may transition states in the following ways:
129 1.52 ad *
130 1.52 ad * RUN -------> ONPROC ONPROC -----> RUN
131 1.52 ad * > STOPPED > SLEEP
132 1.52 ad * > SUSPENDED > STOPPED
133 1.52 ad * > SUSPENDED
134 1.52 ad * > ZOMB
135 1.52 ad *
136 1.52 ad * STOPPED ---> RUN SUSPENDED --> RUN
137 1.52 ad * > SLEEP > SLEEP
138 1.52 ad *
139 1.52 ad * SLEEP -----> ONPROC IDL --------> RUN
140 1.52 ad * > RUN > SUSPENDED
141 1.52 ad * > STOPPED > STOPPED
142 1.52 ad * > SUSPENDED
143 1.52 ad *
144 1.52 ad * Locking
145 1.52 ad *
146 1.52 ad * The majority of fields in 'struct lwp' are covered by a single,
147 1.52 ad * general spin mutex pointed to by lwp::l_mutex. The locks covering
148 1.52 ad * each field are documented in sys/lwp.h.
149 1.52 ad *
150 1.52 ad * State transitions must be made with the LWP's general lock held. In
151 1.52 ad * a multiprocessor kernel, state transitions may cause the LWP's lock
152 1.52 ad * pointer to change. On uniprocessor kernels, most scheduler and
153 1.52 ad * synchronisation objects such as sleep queues and LWPs are protected
154 1.52 ad * by only one mutex (sched_mutex). In this case, LWPs' lock pointers
155 1.52 ad * will never change and will always reference sched_mutex.
156 1.52 ad *
157 1.52 ad * Manipulation of the general lock is not performed directly, but
158 1.52 ad * through calls to lwp_lock(), lwp_relock() and similar.
159 1.52 ad *
160 1.52 ad * States and their associated locks:
161 1.52 ad *
162 1.52 ad * LSIDL, LSZOMB
163 1.52 ad *
164 1.52 ad * Always covered by sched_mutex.
165 1.52 ad *
166 1.52 ad * LSONPROC, LSRUN:
167 1.52 ad *
168 1.52 ad * Always covered by sched_mutex, which protects the run queues
169 1.52 ad * and other miscellaneous items. If the scheduler is changed
170 1.52 ad * to use per-CPU run queues, this may become a per-CPU mutex.
171 1.52 ad *
172 1.52 ad * LSSLEEP:
173 1.52 ad *
174 1.52 ad * Covered by a mutex associated with the sleep queue that the
175 1.52 ad * LWP resides on, indirectly referenced by l_sleepq->sq_mutex.
176 1.52 ad *
177 1.52 ad * LSSTOP, LSSUSPENDED:
178 1.52 ad *
179 1.52 ad * If the LWP was previously sleeping (l_wchan != NULL), then
180 1.52 ad * l_mutex references the sleep queue mutex. If the LWP was
181 1.52 ad * runnable or on the CPU when halted, or has been removed from
182 1.52 ad * the sleep queue since halted, then the mutex is sched_mutex.
183 1.52 ad *
184 1.52 ad * The lock order is as follows:
185 1.52 ad *
186 1.52 ad * sleepq_t::sq_mutex |---> sched_mutex
187 1.52 ad * tschain_t::tc_mutex |
188 1.52 ad *
189 1.52 ad * Each process has an scheduler state mutex (proc::p_smutex), and a
190 1.52 ad * number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
191 1.52 ad * so on. When an LWP is to be entered into or removed from one of the
192 1.52 ad * following states, p_mutex must be held and the process wide counters
193 1.52 ad * adjusted:
194 1.52 ad *
195 1.52 ad * LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
196 1.52 ad *
197 1.52 ad * Note that an LWP is considered running or likely to run soon if in
198 1.52 ad * one of the following states. This affects the value of p_nrlwps:
199 1.52 ad *
200 1.52 ad * LSRUN, LSONPROC, LSSLEEP
201 1.52 ad *
202 1.52 ad * p_smutex does not need to be held when transitioning among these
203 1.52 ad * three states.
204 1.52 ad */
205 1.52 ad
206 1.9 lukem #include <sys/cdefs.h>
207 1.60 yamt __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.60 2007/02/26 09:20:53 yamt Exp $");
208 1.8 martin
209 1.8 martin #include "opt_multiprocessor.h"
210 1.52 ad #include "opt_lockdebug.h"
211 1.2 thorpej
212 1.47 hannken #define _LWP_API_PRIVATE
213 1.47 hannken
214 1.2 thorpej #include <sys/param.h>
215 1.2 thorpej #include <sys/systm.h>
216 1.2 thorpej #include <sys/pool.h>
217 1.2 thorpej #include <sys/proc.h>
218 1.2 thorpej #include <sys/syscallargs.h>
219 1.57 dsl #include <sys/syscall_stats.h>
220 1.37 ad #include <sys/kauth.h>
221 1.52 ad #include <sys/sleepq.h>
222 1.52 ad #include <sys/lockdebug.h>
223 1.52 ad #include <sys/kmem.h>
224 1.2 thorpej
225 1.2 thorpej #include <uvm/uvm_extern.h>
226 1.2 thorpej
227 1.52 ad struct lwplist alllwp;
228 1.52 ad
229 1.52 ad POOL_INIT(lwp_pool, sizeof(struct lwp), MIN_LWP_ALIGNMENT, 0, 0, "lwppl",
230 1.41 thorpej &pool_allocator_nointr);
231 1.41 thorpej POOL_INIT(lwp_uc_pool, sizeof(ucontext_t), 0, 0, 0, "lwpucpl",
232 1.41 thorpej &pool_allocator_nointr);
233 1.41 thorpej
234 1.41 thorpej static specificdata_domain_t lwp_specificdata_domain;
235 1.41 thorpej
236 1.2 thorpej #define LWP_DEBUG
237 1.2 thorpej
238 1.2 thorpej #ifdef LWP_DEBUG
239 1.2 thorpej int lwp_debug = 0;
240 1.2 thorpej #define DPRINTF(x) if (lwp_debug) printf x
241 1.2 thorpej #else
242 1.2 thorpej #define DPRINTF(x)
243 1.2 thorpej #endif
244 1.41 thorpej
245 1.41 thorpej void
246 1.41 thorpej lwpinit(void)
247 1.41 thorpej {
248 1.41 thorpej
249 1.41 thorpej lwp_specificdata_domain = specificdata_domain_create();
250 1.41 thorpej KASSERT(lwp_specificdata_domain != NULL);
251 1.52 ad lwp_sys_init();
252 1.41 thorpej }
253 1.41 thorpej
254 1.52 ad /*
255 1.52 ad * Set an suspended.
256 1.52 ad *
257 1.52 ad * Must be called with p_smutex held, and the LWP locked. Will unlock the
258 1.52 ad * LWP before return.
259 1.52 ad */
260 1.2 thorpej int
261 1.52 ad lwp_suspend(struct lwp *curl, struct lwp *t)
262 1.2 thorpej {
263 1.52 ad int error;
264 1.2 thorpej
265 1.52 ad LOCK_ASSERT(mutex_owned(&t->l_proc->p_smutex));
266 1.52 ad LOCK_ASSERT(lwp_locked(t, NULL));
267 1.33 chs
268 1.52 ad KASSERT(curl != t || curl->l_stat == LSONPROC);
269 1.2 thorpej
270 1.52 ad /*
271 1.52 ad * If the current LWP has been told to exit, we must not suspend anyone
272 1.52 ad * else or deadlock could occur. We won't return to userspace.
273 1.2 thorpej */
274 1.56 pavel if ((curl->l_stat & (LW_WEXIT | LW_WCORE)) != 0) {
275 1.52 ad lwp_unlock(t);
276 1.52 ad return (EDEADLK);
277 1.2 thorpej }
278 1.2 thorpej
279 1.52 ad error = 0;
280 1.2 thorpej
281 1.52 ad switch (t->l_stat) {
282 1.52 ad case LSRUN:
283 1.52 ad case LSONPROC:
284 1.56 pavel t->l_flag |= LW_WSUSPEND;
285 1.52 ad lwp_need_userret(t);
286 1.52 ad lwp_unlock(t);
287 1.52 ad break;
288 1.2 thorpej
289 1.52 ad case LSSLEEP:
290 1.56 pavel t->l_flag |= LW_WSUSPEND;
291 1.2 thorpej
292 1.2 thorpej /*
293 1.52 ad * Kick the LWP and try to get it to the kernel boundary
294 1.52 ad * so that it will release any locks that it holds.
295 1.52 ad * setrunnable() will release the lock.
296 1.2 thorpej */
297 1.56 pavel if ((t->l_flag & LW_SINTR) != 0)
298 1.52 ad setrunnable(t);
299 1.52 ad else
300 1.52 ad lwp_unlock(t);
301 1.52 ad break;
302 1.2 thorpej
303 1.52 ad case LSSUSPENDED:
304 1.52 ad lwp_unlock(t);
305 1.52 ad break;
306 1.17 manu
307 1.52 ad case LSSTOP:
308 1.56 pavel t->l_flag |= LW_WSUSPEND;
309 1.52 ad setrunnable(t);
310 1.52 ad break;
311 1.2 thorpej
312 1.52 ad case LSIDL:
313 1.52 ad case LSZOMB:
314 1.52 ad error = EINTR; /* It's what Solaris does..... */
315 1.52 ad lwp_unlock(t);
316 1.52 ad break;
317 1.2 thorpej }
318 1.2 thorpej
319 1.52 ad /*
320 1.52 ad * XXXLWP Wait for:
321 1.52 ad *
322 1.52 ad * o process exiting
323 1.52 ad * o target LWP suspended
324 1.52 ad * o target LWP not suspended and L_WSUSPEND clear
325 1.52 ad * o target LWP exited
326 1.52 ad */
327 1.2 thorpej
328 1.52 ad return (error);
329 1.2 thorpej }
330 1.2 thorpej
331 1.52 ad /*
332 1.52 ad * Restart a suspended LWP.
333 1.52 ad *
334 1.52 ad * Must be called with p_smutex held, and the LWP locked. Will unlock the
335 1.52 ad * LWP before return.
336 1.52 ad */
337 1.2 thorpej void
338 1.2 thorpej lwp_continue(struct lwp *l)
339 1.2 thorpej {
340 1.2 thorpej
341 1.52 ad LOCK_ASSERT(mutex_owned(&l->l_proc->p_smutex));
342 1.52 ad LOCK_ASSERT(lwp_locked(l, NULL));
343 1.52 ad
344 1.2 thorpej DPRINTF(("lwp_continue of %d.%d (%s), state %d, wchan %p\n",
345 1.2 thorpej l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm, l->l_stat,
346 1.2 thorpej l->l_wchan));
347 1.2 thorpej
348 1.52 ad /* If rebooting or not suspended, then just bail out. */
349 1.56 pavel if ((l->l_flag & LW_WREBOOT) != 0) {
350 1.52 ad lwp_unlock(l);
351 1.2 thorpej return;
352 1.10 fvdl }
353 1.2 thorpej
354 1.56 pavel l->l_flag &= ~LW_WSUSPEND;
355 1.2 thorpej
356 1.52 ad if (l->l_stat != LSSUSPENDED) {
357 1.52 ad lwp_unlock(l);
358 1.52 ad return;
359 1.2 thorpej }
360 1.2 thorpej
361 1.52 ad /* setrunnable() will release the lock. */
362 1.52 ad setrunnable(l);
363 1.2 thorpej }
364 1.2 thorpej
365 1.52 ad /*
366 1.52 ad * Wait for an LWP within the current process to exit. If 'lid' is
367 1.52 ad * non-zero, we are waiting for a specific LWP.
368 1.52 ad *
369 1.52 ad * Must be called with p->p_smutex held.
370 1.52 ad */
371 1.2 thorpej int
372 1.2 thorpej lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
373 1.2 thorpej {
374 1.2 thorpej struct proc *p = l->l_proc;
375 1.52 ad struct lwp *l2;
376 1.52 ad int nfound, error;
377 1.2 thorpej
378 1.2 thorpej DPRINTF(("lwp_wait1: %d.%d waiting for %d.\n",
379 1.2 thorpej p->p_pid, l->l_lid, lid));
380 1.2 thorpej
381 1.52 ad LOCK_ASSERT(mutex_owned(&p->p_smutex));
382 1.52 ad
383 1.52 ad /*
384 1.52 ad * We try to check for deadlock:
385 1.52 ad *
386 1.52 ad * 1) If all other LWPs are waiting for exits or suspended.
387 1.52 ad * 2) If we are trying to wait on ourself.
388 1.52 ad *
389 1.52 ad * XXX we'd like to check for a cycle of waiting LWPs (specific LID
390 1.52 ad * waits, not any-LWP waits) and detect that sort of deadlock, but
391 1.52 ad * we don't have a good place to store the lwp that is being waited
392 1.52 ad * for. wchan is already filled with &p->p_nlwps, and putting the
393 1.52 ad * lwp address in there for deadlock tracing would require exiting
394 1.52 ad * LWPs to call wakeup on both their own address and &p->p_nlwps, to
395 1.52 ad * get threads sleeping on any LWP exiting.
396 1.52 ad */
397 1.2 thorpej if (lid == l->l_lid)
398 1.52 ad return EDEADLK;
399 1.52 ad
400 1.52 ad p->p_nlwpwait++;
401 1.52 ad
402 1.52 ad for (;;) {
403 1.52 ad /*
404 1.52 ad * Avoid a race between exit1() and sigexit(): if the
405 1.52 ad * process is dumping core, then we need to bail out: call
406 1.52 ad * into lwp_userret() where we will be suspended until the
407 1.52 ad * deed is done.
408 1.52 ad */
409 1.52 ad if ((p->p_sflag & PS_WCORE) != 0) {
410 1.52 ad mutex_exit(&p->p_smutex);
411 1.52 ad lwp_userret(l);
412 1.52 ad #ifdef DIAGNOSTIC
413 1.52 ad panic("lwp_wait1");
414 1.52 ad #endif
415 1.52 ad /* NOTREACHED */
416 1.52 ad }
417 1.52 ad
418 1.52 ad /*
419 1.52 ad * First off, drain any detached LWP that is waiting to be
420 1.52 ad * reaped.
421 1.52 ad */
422 1.52 ad while ((l2 = p->p_zomblwp) != NULL) {
423 1.52 ad p->p_zomblwp = NULL;
424 1.52 ad lwp_free(l2, 0, 0); /* releases proc mutex */
425 1.52 ad mutex_enter(&p->p_smutex);
426 1.52 ad }
427 1.52 ad
428 1.52 ad /*
429 1.52 ad * Now look for an LWP to collect. If the whole process is
430 1.52 ad * exiting, count detached LWPs as eligible to be collected,
431 1.52 ad * but don't drain them here.
432 1.52 ad */
433 1.52 ad nfound = 0;
434 1.52 ad LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
435 1.52 ad if (l2 == l || (lid != 0 && l2->l_lid != lid))
436 1.52 ad continue;
437 1.52 ad if ((l2->l_prflag & LPR_DETACHED) != 0) {
438 1.52 ad nfound += ((flags & LWPWAIT_EXITCONTROL) != 0);
439 1.52 ad continue;
440 1.52 ad }
441 1.52 ad nfound++;
442 1.2 thorpej
443 1.52 ad /* No need to lock the LWP in order to see LSZOMB. */
444 1.52 ad if (l2->l_stat != LSZOMB)
445 1.52 ad continue;
446 1.2 thorpej
447 1.2 thorpej if (departed)
448 1.2 thorpej *departed = l2->l_lid;
449 1.52 ad lwp_free(l2, 0, 0);
450 1.52 ad mutex_enter(&p->p_smutex);
451 1.52 ad p->p_nlwpwait--;
452 1.52 ad return 0;
453 1.52 ad }
454 1.2 thorpej
455 1.52 ad if (nfound == 0) {
456 1.52 ad error = ESRCH;
457 1.52 ad break;
458 1.52 ad }
459 1.52 ad if ((flags & LWPWAIT_EXITCONTROL) != 0) {
460 1.52 ad KASSERT(p->p_nlwps > 1);
461 1.52 ad cv_wait(&p->p_lwpcv, &p->p_smutex);
462 1.52 ad continue;
463 1.52 ad }
464 1.52 ad if ((p->p_sflag & PS_WEXIT) != 0 ||
465 1.52 ad p->p_nrlwps <= p->p_nlwpwait + p->p_ndlwps) {
466 1.52 ad error = EDEADLK;
467 1.52 ad break;
468 1.2 thorpej }
469 1.52 ad if ((error = cv_wait_sig(&p->p_lwpcv, &p->p_smutex)) != 0)
470 1.52 ad break;
471 1.2 thorpej }
472 1.2 thorpej
473 1.52 ad p->p_nlwpwait--;
474 1.52 ad return error;
475 1.2 thorpej }
476 1.2 thorpej
477 1.52 ad /*
478 1.52 ad * Create a new LWP within process 'p2', using LWP 'l1' as a template.
479 1.52 ad * The new LWP is created in state LSIDL and must be set running,
480 1.52 ad * suspended, or stopped by the caller.
481 1.52 ad */
482 1.2 thorpej int
483 1.59 thorpej newlwp(struct lwp *l1, struct proc *p2, vaddr_t uaddr, bool inmem,
484 1.2 thorpej int flags, void *stack, size_t stacksize,
485 1.2 thorpej void (*func)(void *), void *arg, struct lwp **rnewlwpp)
486 1.2 thorpej {
487 1.52 ad struct lwp *l2, *isfree;
488 1.52 ad turnstile_t *ts;
489 1.2 thorpej
490 1.52 ad /*
491 1.52 ad * First off, reap any detached LWP waiting to be collected.
492 1.52 ad * We can re-use its LWP structure and turnstile.
493 1.52 ad */
494 1.52 ad isfree = NULL;
495 1.52 ad if (p2->p_zomblwp != NULL) {
496 1.52 ad mutex_enter(&p2->p_smutex);
497 1.52 ad if ((isfree = p2->p_zomblwp) != NULL) {
498 1.52 ad p2->p_zomblwp = NULL;
499 1.52 ad lwp_free(isfree, 1, 0); /* releases proc mutex */
500 1.52 ad } else
501 1.52 ad mutex_exit(&p2->p_smutex);
502 1.52 ad }
503 1.52 ad if (isfree == NULL) {
504 1.52 ad l2 = pool_get(&lwp_pool, PR_WAITOK);
505 1.52 ad memset(l2, 0, sizeof(*l2));
506 1.52 ad l2->l_ts = pool_cache_get(&turnstile_cache, PR_WAITOK);
507 1.60 yamt SLIST_INIT(&l2->l_pi_lenders);
508 1.52 ad } else {
509 1.52 ad l2 = isfree;
510 1.52 ad ts = l2->l_ts;
511 1.60 yamt KASSERT(l2->l_inheritedprio == MAXPRI);
512 1.60 yamt KASSERT(SLIST_EMPTY(&l2->l_pi_lenders));
513 1.52 ad memset(l2, 0, sizeof(*l2));
514 1.52 ad l2->l_ts = ts;
515 1.52 ad }
516 1.2 thorpej
517 1.2 thorpej l2->l_stat = LSIDL;
518 1.2 thorpej l2->l_proc = p2;
519 1.52 ad l2->l_refcnt = 1;
520 1.52 ad l2->l_priority = l1->l_priority;
521 1.52 ad l2->l_usrpri = l1->l_usrpri;
522 1.60 yamt l2->l_inheritedprio = MAXPRI;
523 1.52 ad l2->l_mutex = &sched_mutex;
524 1.52 ad l2->l_cpu = l1->l_cpu;
525 1.56 pavel l2->l_flag = inmem ? LW_INMEM : 0;
526 1.42 christos lwp_initspecific(l2);
527 1.41 thorpej
528 1.56 pavel if (p2->p_flag & PK_SYSTEM) {
529 1.52 ad /*
530 1.52 ad * Mark it as a system process and not a candidate for
531 1.52 ad * swapping.
532 1.52 ad */
533 1.56 pavel l2->l_flag |= LW_SYSTEM;
534 1.52 ad }
535 1.2 thorpej
536 1.37 ad lwp_update_creds(l2);
537 1.2 thorpej callout_init(&l2->l_tsleep_ch);
538 1.52 ad cv_init(&l2->l_sigcv, "sigwait");
539 1.52 ad l2->l_syncobj = &sched_syncobj;
540 1.2 thorpej
541 1.2 thorpej if (rnewlwpp != NULL)
542 1.2 thorpej *rnewlwpp = l2;
543 1.2 thorpej
544 1.36 yamt l2->l_addr = UAREA_TO_USER(uaddr);
545 1.2 thorpej uvm_lwp_fork(l1, l2, stack, stacksize, func,
546 1.2 thorpej (arg != NULL) ? arg : l2);
547 1.2 thorpej
548 1.52 ad mutex_enter(&p2->p_smutex);
549 1.52 ad
550 1.52 ad if ((flags & LWP_DETACHED) != 0) {
551 1.52 ad l2->l_prflag = LPR_DETACHED;
552 1.52 ad p2->p_ndlwps++;
553 1.52 ad } else
554 1.52 ad l2->l_prflag = 0;
555 1.52 ad
556 1.52 ad l2->l_sigmask = l1->l_sigmask;
557 1.52 ad CIRCLEQ_INIT(&l2->l_sigpend.sp_info);
558 1.52 ad sigemptyset(&l2->l_sigpend.sp_set);
559 1.52 ad
560 1.53 yamt p2->p_nlwpid++;
561 1.53 yamt if (p2->p_nlwpid == 0)
562 1.53 yamt p2->p_nlwpid++;
563 1.53 yamt l2->l_lid = p2->p_nlwpid;
564 1.2 thorpej LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
565 1.2 thorpej p2->p_nlwps++;
566 1.2 thorpej
567 1.52 ad mutex_exit(&p2->p_smutex);
568 1.52 ad
569 1.52 ad mutex_enter(&proclist_mutex);
570 1.2 thorpej LIST_INSERT_HEAD(&alllwp, l2, l_list);
571 1.52 ad mutex_exit(&proclist_mutex);
572 1.2 thorpej
573 1.57 dsl SYSCALL_TIME_LWP_INIT(l2);
574 1.57 dsl
575 1.16 manu if (p2->p_emul->e_lwp_fork)
576 1.16 manu (*p2->p_emul->e_lwp_fork)(l1, l2);
577 1.16 manu
578 1.2 thorpej return (0);
579 1.2 thorpej }
580 1.2 thorpej
581 1.2 thorpej /*
582 1.52 ad * Quit the process. This will call cpu_exit, which will call cpu_switch,
583 1.52 ad * so this can only be used meaningfully if you're willing to switch away.
584 1.2 thorpej * Calling with l!=curlwp would be weird.
585 1.2 thorpej */
586 1.2 thorpej void
587 1.2 thorpej lwp_exit(struct lwp *l)
588 1.2 thorpej {
589 1.2 thorpej struct proc *p = l->l_proc;
590 1.52 ad struct lwp *l2;
591 1.2 thorpej
592 1.2 thorpej DPRINTF(("lwp_exit: %d.%d exiting.\n", p->p_pid, l->l_lid));
593 1.52 ad DPRINTF((" nlwps: %d nzlwps: %d\n", p->p_nlwps, p->p_nzlwps));
594 1.2 thorpej
595 1.52 ad /*
596 1.52 ad * Verify that we hold no locks other than the kernel lock.
597 1.52 ad */
598 1.52 ad #ifdef MULTIPROCESSOR
599 1.52 ad LOCKDEBUG_BARRIER(&kernel_lock, 0);
600 1.52 ad #else
601 1.52 ad LOCKDEBUG_BARRIER(NULL, 0);
602 1.52 ad #endif
603 1.16 manu
604 1.2 thorpej /*
605 1.52 ad * If we are the last live LWP in a process, we need to exit the
606 1.52 ad * entire process. We do so with an exit status of zero, because
607 1.52 ad * it's a "controlled" exit, and because that's what Solaris does.
608 1.52 ad *
609 1.52 ad * We are not quite a zombie yet, but for accounting purposes we
610 1.52 ad * must increment the count of zombies here.
611 1.45 thorpej *
612 1.45 thorpej * Note: the last LWP's specificdata will be deleted here.
613 1.2 thorpej */
614 1.52 ad mutex_enter(&p->p_smutex);
615 1.52 ad if (p->p_nlwps - p->p_nzlwps == 1) {
616 1.2 thorpej DPRINTF(("lwp_exit: %d.%d calling exit1()\n",
617 1.2 thorpej p->p_pid, l->l_lid));
618 1.2 thorpej exit1(l, 0);
619 1.19 jdolecek /* NOTREACHED */
620 1.2 thorpej }
621 1.52 ad p->p_nzlwps++;
622 1.52 ad mutex_exit(&p->p_smutex);
623 1.52 ad
624 1.52 ad if (p->p_emul->e_lwp_exit)
625 1.52 ad (*p->p_emul->e_lwp_exit)(l);
626 1.2 thorpej
627 1.45 thorpej /* Delete the specificdata while it's still safe to sleep. */
628 1.45 thorpej specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
629 1.45 thorpej
630 1.52 ad /*
631 1.52 ad * Release our cached credentials.
632 1.52 ad */
633 1.37 ad kauth_cred_free(l->l_cred);
634 1.37 ad
635 1.52 ad /*
636 1.52 ad * Remove the LWP from the global list.
637 1.52 ad */
638 1.52 ad mutex_enter(&proclist_mutex);
639 1.52 ad LIST_REMOVE(l, l_list);
640 1.52 ad mutex_exit(&proclist_mutex);
641 1.19 jdolecek
642 1.52 ad /*
643 1.52 ad * Get rid of all references to the LWP that others (e.g. procfs)
644 1.52 ad * may have, and mark the LWP as a zombie. If the LWP is detached,
645 1.52 ad * mark it waiting for collection in the proc structure. Note that
646 1.52 ad * before we can do that, we need to free any other dead, deatched
647 1.52 ad * LWP waiting to meet its maker.
648 1.52 ad *
649 1.52 ad * XXXSMP disable preemption.
650 1.52 ad */
651 1.52 ad mutex_enter(&p->p_smutex);
652 1.52 ad lwp_drainrefs(l);
653 1.31 yamt
654 1.52 ad if ((l->l_prflag & LPR_DETACHED) != 0) {
655 1.52 ad while ((l2 = p->p_zomblwp) != NULL) {
656 1.52 ad p->p_zomblwp = NULL;
657 1.52 ad lwp_free(l2, 0, 0); /* releases proc mutex */
658 1.52 ad mutex_enter(&p->p_smutex);
659 1.52 ad }
660 1.52 ad p->p_zomblwp = l;
661 1.52 ad }
662 1.31 yamt
663 1.52 ad /*
664 1.52 ad * If we find a pending signal for the process and we have been
665 1.52 ad * asked to check for signals, then we loose: arrange to have
666 1.52 ad * all other LWPs in the process check for signals.
667 1.52 ad */
668 1.56 pavel if ((l->l_flag & LW_PENDSIG) != 0 &&
669 1.52 ad firstsig(&p->p_sigpend.sp_set) != 0) {
670 1.52 ad LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
671 1.52 ad lwp_lock(l2);
672 1.56 pavel l2->l_flag |= LW_PENDSIG;
673 1.52 ad lwp_unlock(l2);
674 1.52 ad }
675 1.31 yamt }
676 1.31 yamt
677 1.52 ad lwp_lock(l);
678 1.52 ad l->l_stat = LSZOMB;
679 1.52 ad lwp_unlock(l);
680 1.2 thorpej p->p_nrlwps--;
681 1.52 ad cv_broadcast(&p->p_lwpcv);
682 1.52 ad mutex_exit(&p->p_smutex);
683 1.52 ad
684 1.52 ad /*
685 1.52 ad * We can no longer block. At this point, lwp_free() may already
686 1.52 ad * be gunning for us. On a multi-CPU system, we may be off p_lwps.
687 1.52 ad *
688 1.52 ad * Free MD LWP resources.
689 1.52 ad */
690 1.52 ad #ifndef __NO_CPU_LWP_FREE
691 1.52 ad cpu_lwp_free(l, 0);
692 1.52 ad #endif
693 1.52 ad pmap_deactivate(l);
694 1.2 thorpej
695 1.52 ad /*
696 1.52 ad * Release the kernel lock, signal another LWP to collect us,
697 1.52 ad * and switch away into oblivion.
698 1.52 ad */
699 1.52 ad #ifdef notyet
700 1.52 ad /* XXXSMP hold in lwp_userret() */
701 1.52 ad KERNEL_UNLOCK_LAST(l);
702 1.52 ad #else
703 1.52 ad KERNEL_UNLOCK_ALL(l, NULL);
704 1.52 ad #endif
705 1.2 thorpej
706 1.19 jdolecek cpu_exit(l);
707 1.2 thorpej }
708 1.2 thorpej
709 1.19 jdolecek /*
710 1.52 ad * We are called from cpu_exit() once it is safe to schedule the dead LWP's
711 1.52 ad * resources to be freed (i.e., once we've switched to the idle PCB for the
712 1.52 ad * current CPU).
713 1.19 jdolecek */
714 1.2 thorpej void
715 1.2 thorpej lwp_exit2(struct lwp *l)
716 1.2 thorpej {
717 1.52 ad /* XXXSMP re-enable preemption */
718 1.52 ad }
719 1.52 ad
720 1.52 ad /*
721 1.52 ad * Free a dead LWP's remaining resources.
722 1.52 ad *
723 1.52 ad * XXXLWP limits.
724 1.52 ad */
725 1.52 ad void
726 1.52 ad lwp_free(struct lwp *l, int recycle, int last)
727 1.52 ad {
728 1.52 ad struct proc *p = l->l_proc;
729 1.52 ad ksiginfoq_t kq;
730 1.52 ad
731 1.52 ad /*
732 1.52 ad * If this was not the last LWP in the process, then adjust
733 1.52 ad * counters and unlock.
734 1.52 ad */
735 1.52 ad if (!last) {
736 1.52 ad /*
737 1.52 ad * Add the LWP's run time to the process' base value.
738 1.52 ad * This needs to co-incide with coming off p_lwps.
739 1.52 ad */
740 1.52 ad timeradd(&l->l_rtime, &p->p_rtime, &p->p_rtime);
741 1.52 ad LIST_REMOVE(l, l_sibling);
742 1.52 ad p->p_nlwps--;
743 1.52 ad p->p_nzlwps--;
744 1.52 ad if ((l->l_prflag & LPR_DETACHED) != 0)
745 1.52 ad p->p_ndlwps--;
746 1.52 ad mutex_exit(&p->p_smutex);
747 1.52 ad
748 1.52 ad #ifdef MULTIPROCESSOR
749 1.52 ad /*
750 1.52 ad * In the unlikely event that the LWP is still on the CPU,
751 1.52 ad * then spin until it has switched away. We need to release
752 1.52 ad * all locks to avoid deadlock against interrupt handlers on
753 1.52 ad * the target CPU.
754 1.52 ad */
755 1.52 ad if (l->l_cpu->ci_curlwp == l) {
756 1.52 ad int count;
757 1.52 ad KERNEL_UNLOCK_ALL(curlwp, &count);
758 1.52 ad while (l->l_cpu->ci_curlwp == l)
759 1.52 ad SPINLOCK_BACKOFF_HOOK;
760 1.52 ad KERNEL_LOCK(count, curlwp);
761 1.52 ad }
762 1.52 ad #endif
763 1.52 ad }
764 1.52 ad
765 1.52 ad /*
766 1.52 ad * Destroy the LWP's remaining signal information.
767 1.52 ad */
768 1.52 ad ksiginfo_queue_init(&kq);
769 1.52 ad sigclear(&l->l_sigpend, NULL, &kq);
770 1.52 ad ksiginfo_queue_drain(&kq);
771 1.52 ad cv_destroy(&l->l_sigcv);
772 1.2 thorpej
773 1.19 jdolecek /*
774 1.52 ad * Free the LWP's turnstile and the LWP structure itself unless the
775 1.52 ad * caller wants to recycle them.
776 1.52 ad *
777 1.52 ad * We can't return turnstile0 to the pool (it didn't come from it),
778 1.52 ad * so if it comes up just drop it quietly and move on.
779 1.52 ad *
780 1.52 ad * We don't recycle the VM resources at this time.
781 1.19 jdolecek */
782 1.55 ad KERNEL_LOCK(1, curlwp); /* XXXSMP */
783 1.52 ad if (!recycle && l->l_ts != &turnstile0)
784 1.52 ad pool_cache_put(&turnstile_cache, l->l_ts);
785 1.52 ad #ifndef __NO_CPU_LWP_FREE
786 1.52 ad cpu_lwp_free2(l);
787 1.52 ad #endif
788 1.19 jdolecek uvm_lwp_exit(l);
789 1.60 yamt KASSERT(SLIST_EMPTY(&l->l_pi_lenders));
790 1.60 yamt KASSERT(l->l_inheritedprio == MAXPRI);
791 1.52 ad if (!recycle)
792 1.19 jdolecek pool_put(&lwp_pool, l);
793 1.55 ad KERNEL_UNLOCK_ONE(curlwp); /* XXXSMP */
794 1.2 thorpej }
795 1.2 thorpej
796 1.2 thorpej /*
797 1.2 thorpej * Pick a LWP to represent the process for those operations which
798 1.2 thorpej * want information about a "process" that is actually associated
799 1.2 thorpej * with a LWP.
800 1.52 ad *
801 1.52 ad * If 'locking' is false, no locking or lock checks are performed.
802 1.52 ad * This is intended for use by DDB.
803 1.52 ad *
804 1.52 ad * We don't bother locking the LWP here, since code that uses this
805 1.52 ad * interface is broken by design and an exact match is not required.
806 1.2 thorpej */
807 1.2 thorpej struct lwp *
808 1.52 ad proc_representative_lwp(struct proc *p, int *nrlwps, int locking)
809 1.2 thorpej {
810 1.2 thorpej struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
811 1.27 matt struct lwp *signalled;
812 1.52 ad int cnt;
813 1.52 ad
814 1.52 ad if (locking) {
815 1.52 ad LOCK_ASSERT(mutex_owned(&p->p_smutex));
816 1.52 ad }
817 1.2 thorpej
818 1.2 thorpej /* Trivial case: only one LWP */
819 1.52 ad if (p->p_nlwps == 1) {
820 1.52 ad l = LIST_FIRST(&p->p_lwps);
821 1.52 ad if (nrlwps)
822 1.52 ad *nrlwps = (l->l_stat == LSONPROC || LSRUN);
823 1.52 ad return l;
824 1.52 ad }
825 1.2 thorpej
826 1.52 ad cnt = 0;
827 1.2 thorpej switch (p->p_stat) {
828 1.2 thorpej case SSTOP:
829 1.2 thorpej case SACTIVE:
830 1.2 thorpej /* Pick the most live LWP */
831 1.2 thorpej onproc = running = sleeping = stopped = suspended = NULL;
832 1.27 matt signalled = NULL;
833 1.2 thorpej LIST_FOREACH(l, &p->p_lwps, l_sibling) {
834 1.27 matt if (l->l_lid == p->p_sigctx.ps_lwp)
835 1.27 matt signalled = l;
836 1.2 thorpej switch (l->l_stat) {
837 1.2 thorpej case LSONPROC:
838 1.2 thorpej onproc = l;
839 1.52 ad cnt++;
840 1.2 thorpej break;
841 1.2 thorpej case LSRUN:
842 1.2 thorpej running = l;
843 1.52 ad cnt++;
844 1.2 thorpej break;
845 1.2 thorpej case LSSLEEP:
846 1.2 thorpej sleeping = l;
847 1.2 thorpej break;
848 1.2 thorpej case LSSTOP:
849 1.2 thorpej stopped = l;
850 1.2 thorpej break;
851 1.2 thorpej case LSSUSPENDED:
852 1.2 thorpej suspended = l;
853 1.2 thorpej break;
854 1.2 thorpej }
855 1.2 thorpej }
856 1.52 ad if (nrlwps)
857 1.52 ad *nrlwps = cnt;
858 1.27 matt if (signalled)
859 1.52 ad l = signalled;
860 1.52 ad else if (onproc)
861 1.52 ad l = onproc;
862 1.52 ad else if (running)
863 1.52 ad l = running;
864 1.52 ad else if (sleeping)
865 1.52 ad l = sleeping;
866 1.52 ad else if (stopped)
867 1.52 ad l = stopped;
868 1.52 ad else if (suspended)
869 1.52 ad l = suspended;
870 1.52 ad else
871 1.52 ad break;
872 1.52 ad return l;
873 1.52 ad if (nrlwps)
874 1.52 ad *nrlwps = 0;
875 1.52 ad l = LIST_FIRST(&p->p_lwps);
876 1.52 ad return l;
877 1.2 thorpej #ifdef DIAGNOSTIC
878 1.2 thorpej case SIDL:
879 1.52 ad case SZOMB:
880 1.52 ad case SDYING:
881 1.52 ad case SDEAD:
882 1.52 ad if (locking)
883 1.52 ad mutex_exit(&p->p_smutex);
884 1.2 thorpej /* We have more than one LWP and we're in SIDL?
885 1.2 thorpej * How'd that happen?
886 1.2 thorpej */
887 1.52 ad panic("Too many LWPs in idle/dying process %d (%s) stat = %d",
888 1.52 ad p->p_pid, p->p_comm, p->p_stat);
889 1.52 ad break;
890 1.2 thorpej default:
891 1.52 ad if (locking)
892 1.52 ad mutex_exit(&p->p_smutex);
893 1.2 thorpej panic("Process %d (%s) in unknown state %d",
894 1.2 thorpej p->p_pid, p->p_comm, p->p_stat);
895 1.2 thorpej #endif
896 1.2 thorpej }
897 1.2 thorpej
898 1.52 ad if (locking)
899 1.52 ad mutex_exit(&p->p_smutex);
900 1.2 thorpej panic("proc_representative_lwp: couldn't find a lwp for process"
901 1.2 thorpej " %d (%s)", p->p_pid, p->p_comm);
902 1.2 thorpej /* NOTREACHED */
903 1.2 thorpej return NULL;
904 1.2 thorpej }
905 1.37 ad
906 1.37 ad /*
907 1.52 ad * Look up a live LWP within the speicifed process, and return it locked.
908 1.52 ad *
909 1.52 ad * Must be called with p->p_smutex held.
910 1.52 ad */
911 1.52 ad struct lwp *
912 1.52 ad lwp_find(struct proc *p, int id)
913 1.52 ad {
914 1.52 ad struct lwp *l;
915 1.52 ad
916 1.52 ad LOCK_ASSERT(mutex_owned(&p->p_smutex));
917 1.52 ad
918 1.52 ad LIST_FOREACH(l, &p->p_lwps, l_sibling) {
919 1.52 ad if (l->l_lid == id)
920 1.52 ad break;
921 1.52 ad }
922 1.52 ad
923 1.52 ad /*
924 1.52 ad * No need to lock - all of these conditions will
925 1.52 ad * be visible with the process level mutex held.
926 1.52 ad */
927 1.52 ad if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB))
928 1.52 ad l = NULL;
929 1.52 ad
930 1.52 ad return l;
931 1.52 ad }
932 1.52 ad
933 1.52 ad /*
934 1.37 ad * Update an LWP's cached credentials to mirror the process' master copy.
935 1.37 ad *
936 1.37 ad * This happens early in the syscall path, on user trap, and on LWP
937 1.37 ad * creation. A long-running LWP can also voluntarily choose to update
938 1.37 ad * it's credentials by calling this routine. This may be called from
939 1.37 ad * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
940 1.37 ad */
941 1.37 ad void
942 1.37 ad lwp_update_creds(struct lwp *l)
943 1.37 ad {
944 1.37 ad kauth_cred_t oc;
945 1.37 ad struct proc *p;
946 1.37 ad
947 1.37 ad p = l->l_proc;
948 1.37 ad oc = l->l_cred;
949 1.37 ad
950 1.52 ad mutex_enter(&p->p_mutex);
951 1.37 ad kauth_cred_hold(p->p_cred);
952 1.37 ad l->l_cred = p->p_cred;
953 1.52 ad mutex_exit(&p->p_mutex);
954 1.52 ad if (oc != NULL) {
955 1.52 ad KERNEL_LOCK(1, l); /* XXXSMP */
956 1.37 ad kauth_cred_free(oc);
957 1.52 ad KERNEL_UNLOCK_ONE(l); /* XXXSMP */
958 1.52 ad }
959 1.52 ad }
960 1.52 ad
961 1.52 ad /*
962 1.52 ad * Verify that an LWP is locked, and optionally verify that the lock matches
963 1.52 ad * one we specify.
964 1.52 ad */
965 1.52 ad int
966 1.52 ad lwp_locked(struct lwp *l, kmutex_t *mtx)
967 1.52 ad {
968 1.52 ad kmutex_t *cur = l->l_mutex;
969 1.52 ad
970 1.52 ad #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
971 1.52 ad return mutex_owned(cur) && (mtx == cur || mtx == NULL);
972 1.52 ad #else
973 1.52 ad return mutex_owned(cur);
974 1.52 ad #endif
975 1.52 ad }
976 1.52 ad
977 1.52 ad #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
978 1.52 ad /*
979 1.52 ad * Lock an LWP.
980 1.52 ad */
981 1.52 ad void
982 1.52 ad lwp_lock_retry(struct lwp *l, kmutex_t *old)
983 1.52 ad {
984 1.52 ad
985 1.52 ad /*
986 1.52 ad * XXXgcc ignoring kmutex_t * volatile on i386
987 1.52 ad *
988 1.52 ad * gcc version 4.1.2 20061021 prerelease (NetBSD nb1 20061021)
989 1.52 ad */
990 1.52 ad #if 1
991 1.52 ad while (l->l_mutex != old) {
992 1.52 ad #else
993 1.52 ad for (;;) {
994 1.52 ad #endif
995 1.52 ad mutex_spin_exit(old);
996 1.52 ad old = l->l_mutex;
997 1.52 ad mutex_spin_enter(old);
998 1.52 ad
999 1.52 ad /*
1000 1.52 ad * mutex_enter() will have posted a read barrier. Re-test
1001 1.52 ad * l->l_mutex. If it has changed, we need to try again.
1002 1.52 ad */
1003 1.52 ad #if 1
1004 1.52 ad }
1005 1.52 ad #else
1006 1.52 ad } while (__predict_false(l->l_mutex != old));
1007 1.52 ad #endif
1008 1.52 ad }
1009 1.52 ad #endif
1010 1.52 ad
1011 1.52 ad /*
1012 1.52 ad * Lend a new mutex to an LWP. The old mutex must be held.
1013 1.52 ad */
1014 1.52 ad void
1015 1.52 ad lwp_setlock(struct lwp *l, kmutex_t *new)
1016 1.52 ad {
1017 1.52 ad
1018 1.52 ad LOCK_ASSERT(mutex_owned(l->l_mutex));
1019 1.52 ad
1020 1.52 ad #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
1021 1.52 ad mb_write();
1022 1.52 ad l->l_mutex = new;
1023 1.52 ad #else
1024 1.52 ad (void)new;
1025 1.52 ad #endif
1026 1.52 ad }
1027 1.52 ad
1028 1.52 ad /*
1029 1.52 ad * Lend a new mutex to an LWP, and release the old mutex. The old mutex
1030 1.52 ad * must be held.
1031 1.52 ad */
1032 1.52 ad void
1033 1.52 ad lwp_unlock_to(struct lwp *l, kmutex_t *new)
1034 1.52 ad {
1035 1.52 ad kmutex_t *old;
1036 1.52 ad
1037 1.52 ad LOCK_ASSERT(mutex_owned(l->l_mutex));
1038 1.52 ad
1039 1.52 ad old = l->l_mutex;
1040 1.52 ad #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
1041 1.52 ad mb_write();
1042 1.52 ad l->l_mutex = new;
1043 1.52 ad #else
1044 1.52 ad (void)new;
1045 1.52 ad #endif
1046 1.52 ad mutex_spin_exit(old);
1047 1.52 ad }
1048 1.52 ad
1049 1.52 ad /*
1050 1.52 ad * Acquire a new mutex, and donate it to an LWP. The LWP must already be
1051 1.52 ad * locked.
1052 1.52 ad */
1053 1.52 ad void
1054 1.52 ad lwp_relock(struct lwp *l, kmutex_t *new)
1055 1.52 ad {
1056 1.52 ad #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
1057 1.52 ad kmutex_t *old;
1058 1.52 ad #endif
1059 1.52 ad
1060 1.52 ad LOCK_ASSERT(mutex_owned(l->l_mutex));
1061 1.52 ad
1062 1.52 ad #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
1063 1.52 ad old = l->l_mutex;
1064 1.52 ad if (old != new) {
1065 1.52 ad mutex_spin_enter(new);
1066 1.52 ad l->l_mutex = new;
1067 1.52 ad mutex_spin_exit(old);
1068 1.52 ad }
1069 1.52 ad #else
1070 1.52 ad (void)new;
1071 1.52 ad #endif
1072 1.52 ad }
1073 1.52 ad
1074 1.60 yamt int
1075 1.60 yamt lwp_trylock(struct lwp *l)
1076 1.60 yamt {
1077 1.60 yamt #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
1078 1.60 yamt kmutex_t *old;
1079 1.60 yamt
1080 1.60 yamt for (;;) {
1081 1.60 yamt if (!mutex_tryenter(old = l->l_mutex))
1082 1.60 yamt return 0;
1083 1.60 yamt if (__predict_true(l->l_mutex == old))
1084 1.60 yamt return 1;
1085 1.60 yamt mutex_spin_exit(old);
1086 1.60 yamt }
1087 1.60 yamt #else
1088 1.60 yamt return mutex_tryenter(l->l_mutex);
1089 1.60 yamt #endif
1090 1.60 yamt }
1091 1.60 yamt
1092 1.52 ad /*
1093 1.56 pavel * Handle exceptions for mi_userret(). Called if a member of LW_USERRET is
1094 1.52 ad * set.
1095 1.52 ad */
1096 1.52 ad void
1097 1.52 ad lwp_userret(struct lwp *l)
1098 1.52 ad {
1099 1.52 ad struct proc *p;
1100 1.54 ad void (*hook)(void);
1101 1.52 ad int sig;
1102 1.52 ad
1103 1.52 ad p = l->l_proc;
1104 1.52 ad
1105 1.52 ad /*
1106 1.52 ad * It should be safe to do this read unlocked on a multiprocessor
1107 1.52 ad * system..
1108 1.52 ad */
1109 1.56 pavel while ((l->l_flag & LW_USERRET) != 0) {
1110 1.52 ad /*
1111 1.52 ad * Process pending signals first, unless the process
1112 1.52 ad * is dumping core, where we will instead enter the
1113 1.52 ad * L_WSUSPEND case below.
1114 1.52 ad */
1115 1.56 pavel if ((l->l_flag & (LW_PENDSIG | LW_WCORE)) == LW_PENDSIG) {
1116 1.52 ad KERNEL_LOCK(1, l); /* XXXSMP pool_put() below */
1117 1.52 ad mutex_enter(&p->p_smutex);
1118 1.52 ad while ((sig = issignal(l)) != 0)
1119 1.52 ad postsig(sig);
1120 1.52 ad mutex_exit(&p->p_smutex);
1121 1.52 ad KERNEL_UNLOCK_LAST(l); /* XXXSMP */
1122 1.52 ad }
1123 1.52 ad
1124 1.52 ad /*
1125 1.52 ad * Core-dump or suspend pending.
1126 1.52 ad *
1127 1.52 ad * In case of core dump, suspend ourselves, so that the
1128 1.52 ad * kernel stack and therefore the userland registers saved
1129 1.52 ad * in the trapframe are around for coredump() to write them
1130 1.52 ad * out. We issue a wakeup on p->p_lwpcv so that sigexit()
1131 1.52 ad * will write the core file out once all other LWPs are
1132 1.52 ad * suspended.
1133 1.52 ad */
1134 1.56 pavel if ((l->l_flag & LW_WSUSPEND) != 0) {
1135 1.52 ad mutex_enter(&p->p_smutex);
1136 1.52 ad p->p_nrlwps--;
1137 1.52 ad cv_broadcast(&p->p_lwpcv);
1138 1.52 ad lwp_lock(l);
1139 1.52 ad l->l_stat = LSSUSPENDED;
1140 1.52 ad mutex_exit(&p->p_smutex);
1141 1.52 ad mi_switch(l, NULL);
1142 1.52 ad }
1143 1.52 ad
1144 1.52 ad /* Process is exiting. */
1145 1.56 pavel if ((l->l_flag & LW_WEXIT) != 0) {
1146 1.52 ad KERNEL_LOCK(1, l);
1147 1.52 ad lwp_exit(l);
1148 1.52 ad KASSERT(0);
1149 1.52 ad /* NOTREACHED */
1150 1.52 ad }
1151 1.54 ad
1152 1.54 ad /* Call userret hook; used by Linux emulation. */
1153 1.56 pavel if ((l->l_flag & LW_WUSERRET) != 0) {
1154 1.54 ad lwp_lock(l);
1155 1.56 pavel l->l_flag &= ~LW_WUSERRET;
1156 1.54 ad lwp_unlock(l);
1157 1.54 ad hook = p->p_userret;
1158 1.54 ad p->p_userret = NULL;
1159 1.54 ad (*hook)();
1160 1.54 ad }
1161 1.52 ad }
1162 1.52 ad }
1163 1.52 ad
1164 1.52 ad /*
1165 1.52 ad * Force an LWP to enter the kernel, to take a trip through lwp_userret().
1166 1.52 ad */
1167 1.52 ad void
1168 1.52 ad lwp_need_userret(struct lwp *l)
1169 1.52 ad {
1170 1.52 ad LOCK_ASSERT(lwp_locked(l, NULL));
1171 1.52 ad
1172 1.52 ad /*
1173 1.52 ad * Since the tests in lwp_userret() are done unlocked, make sure
1174 1.52 ad * that the condition will be seen before forcing the LWP to enter
1175 1.52 ad * kernel mode.
1176 1.52 ad */
1177 1.52 ad mb_write();
1178 1.52 ad cpu_signotify(l);
1179 1.52 ad }
1180 1.52 ad
1181 1.52 ad /*
1182 1.52 ad * Add one reference to an LWP. This will prevent the LWP from
1183 1.52 ad * exiting, thus keep the lwp structure and PCB around to inspect.
1184 1.52 ad */
1185 1.52 ad void
1186 1.52 ad lwp_addref(struct lwp *l)
1187 1.52 ad {
1188 1.52 ad
1189 1.52 ad LOCK_ASSERT(mutex_owned(&l->l_proc->p_smutex));
1190 1.52 ad KASSERT(l->l_stat != LSZOMB);
1191 1.52 ad KASSERT(l->l_refcnt != 0);
1192 1.52 ad
1193 1.52 ad l->l_refcnt++;
1194 1.52 ad }
1195 1.52 ad
1196 1.52 ad /*
1197 1.52 ad * Remove one reference to an LWP. If this is the last reference,
1198 1.52 ad * then we must finalize the LWP's death.
1199 1.52 ad */
1200 1.52 ad void
1201 1.52 ad lwp_delref(struct lwp *l)
1202 1.52 ad {
1203 1.52 ad struct proc *p = l->l_proc;
1204 1.52 ad
1205 1.52 ad mutex_enter(&p->p_smutex);
1206 1.52 ad if (--l->l_refcnt == 0)
1207 1.52 ad cv_broadcast(&p->p_refcv);
1208 1.52 ad mutex_exit(&p->p_smutex);
1209 1.52 ad }
1210 1.52 ad
1211 1.52 ad /*
1212 1.52 ad * Drain all references to the current LWP.
1213 1.52 ad */
1214 1.52 ad void
1215 1.52 ad lwp_drainrefs(struct lwp *l)
1216 1.52 ad {
1217 1.52 ad struct proc *p = l->l_proc;
1218 1.52 ad
1219 1.52 ad LOCK_ASSERT(mutex_owned(&p->p_smutex));
1220 1.52 ad KASSERT(l->l_refcnt != 0);
1221 1.52 ad
1222 1.52 ad l->l_refcnt--;
1223 1.52 ad while (l->l_refcnt != 0)
1224 1.52 ad cv_wait(&p->p_refcv, &p->p_smutex);
1225 1.37 ad }
1226 1.41 thorpej
1227 1.41 thorpej /*
1228 1.41 thorpej * lwp_specific_key_create --
1229 1.41 thorpej * Create a key for subsystem lwp-specific data.
1230 1.41 thorpej */
1231 1.41 thorpej int
1232 1.41 thorpej lwp_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor)
1233 1.41 thorpej {
1234 1.41 thorpej
1235 1.45 thorpej return (specificdata_key_create(lwp_specificdata_domain, keyp, dtor));
1236 1.41 thorpej }
1237 1.41 thorpej
1238 1.41 thorpej /*
1239 1.41 thorpej * lwp_specific_key_delete --
1240 1.41 thorpej * Delete a key for subsystem lwp-specific data.
1241 1.41 thorpej */
1242 1.41 thorpej void
1243 1.41 thorpej lwp_specific_key_delete(specificdata_key_t key)
1244 1.41 thorpej {
1245 1.41 thorpej
1246 1.41 thorpej specificdata_key_delete(lwp_specificdata_domain, key);
1247 1.41 thorpej }
1248 1.41 thorpej
1249 1.45 thorpej /*
1250 1.45 thorpej * lwp_initspecific --
1251 1.45 thorpej * Initialize an LWP's specificdata container.
1252 1.45 thorpej */
1253 1.42 christos void
1254 1.42 christos lwp_initspecific(struct lwp *l)
1255 1.42 christos {
1256 1.42 christos int error;
1257 1.45 thorpej
1258 1.42 christos error = specificdata_init(lwp_specificdata_domain, &l->l_specdataref);
1259 1.42 christos KASSERT(error == 0);
1260 1.42 christos }
1261 1.42 christos
1262 1.41 thorpej /*
1263 1.45 thorpej * lwp_finispecific --
1264 1.45 thorpej * Finalize an LWP's specificdata container.
1265 1.45 thorpej */
1266 1.45 thorpej void
1267 1.45 thorpej lwp_finispecific(struct lwp *l)
1268 1.45 thorpej {
1269 1.45 thorpej
1270 1.45 thorpej specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
1271 1.45 thorpej }
1272 1.45 thorpej
1273 1.45 thorpej /*
1274 1.41 thorpej * lwp_getspecific --
1275 1.41 thorpej * Return lwp-specific data corresponding to the specified key.
1276 1.41 thorpej *
1277 1.41 thorpej * Note: LWP specific data is NOT INTERLOCKED. An LWP should access
1278 1.41 thorpej * only its OWN SPECIFIC DATA. If it is necessary to access another
1279 1.41 thorpej * LWP's specifc data, care must be taken to ensure that doing so
1280 1.41 thorpej * would not cause internal data structure inconsistency (i.e. caller
1281 1.41 thorpej * can guarantee that the target LWP is not inside an lwp_getspecific()
1282 1.41 thorpej * or lwp_setspecific() call).
1283 1.41 thorpej */
1284 1.41 thorpej void *
1285 1.44 thorpej lwp_getspecific(specificdata_key_t key)
1286 1.41 thorpej {
1287 1.41 thorpej
1288 1.41 thorpej return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
1289 1.44 thorpej &curlwp->l_specdataref, key));
1290 1.41 thorpej }
1291 1.41 thorpej
1292 1.47 hannken void *
1293 1.47 hannken _lwp_getspecific_by_lwp(struct lwp *l, specificdata_key_t key)
1294 1.47 hannken {
1295 1.47 hannken
1296 1.47 hannken return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
1297 1.47 hannken &l->l_specdataref, key));
1298 1.47 hannken }
1299 1.47 hannken
1300 1.41 thorpej /*
1301 1.41 thorpej * lwp_setspecific --
1302 1.41 thorpej * Set lwp-specific data corresponding to the specified key.
1303 1.41 thorpej */
1304 1.41 thorpej void
1305 1.45 thorpej lwp_setspecific(specificdata_key_t key, void *data)
1306 1.41 thorpej {
1307 1.41 thorpej
1308 1.41 thorpej specificdata_setspecific(lwp_specificdata_domain,
1309 1.44 thorpej &curlwp->l_specdataref, key, data);
1310 1.41 thorpej }
1311