kern_lwp.c revision 1.78 1 1.78 ad /* $NetBSD: kern_lwp.c,v 1.78 2007/11/12 23:11:59 ad Exp $ */
2 1.2 thorpej
3 1.2 thorpej /*-
4 1.52 ad * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
5 1.2 thorpej * All rights reserved.
6 1.2 thorpej *
7 1.2 thorpej * This code is derived from software contributed to The NetBSD Foundation
8 1.52 ad * by Nathan J. Williams, and Andrew Doran.
9 1.2 thorpej *
10 1.2 thorpej * Redistribution and use in source and binary forms, with or without
11 1.2 thorpej * modification, are permitted provided that the following conditions
12 1.2 thorpej * are met:
13 1.2 thorpej * 1. Redistributions of source code must retain the above copyright
14 1.2 thorpej * notice, this list of conditions and the following disclaimer.
15 1.2 thorpej * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 thorpej * notice, this list of conditions and the following disclaimer in the
17 1.2 thorpej * documentation and/or other materials provided with the distribution.
18 1.2 thorpej * 3. All advertising materials mentioning features or use of this software
19 1.2 thorpej * must display the following acknowledgement:
20 1.2 thorpej * This product includes software developed by the NetBSD
21 1.2 thorpej * Foundation, Inc. and its contributors.
22 1.2 thorpej * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.2 thorpej * contributors may be used to endorse or promote products derived
24 1.2 thorpej * from this software without specific prior written permission.
25 1.2 thorpej *
26 1.2 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.2 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.2 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.2 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.2 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.2 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.2 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.2 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.2 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.2 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.2 thorpej * POSSIBILITY OF SUCH DAMAGE.
37 1.2 thorpej */
38 1.9 lukem
39 1.52 ad /*
40 1.52 ad * Overview
41 1.52 ad *
42 1.66 ad * Lightweight processes (LWPs) are the basic unit or thread of
43 1.52 ad * execution within the kernel. The core state of an LWP is described
44 1.66 ad * by "struct lwp", also known as lwp_t.
45 1.52 ad *
46 1.52 ad * Each LWP is contained within a process (described by "struct proc"),
47 1.52 ad * Every process contains at least one LWP, but may contain more. The
48 1.52 ad * process describes attributes shared among all of its LWPs such as a
49 1.52 ad * private address space, global execution state (stopped, active,
50 1.52 ad * zombie, ...), signal disposition and so on. On a multiprocessor
51 1.66 ad * machine, multiple LWPs be executing concurrently in the kernel.
52 1.52 ad *
53 1.52 ad * Execution states
54 1.52 ad *
55 1.52 ad * At any given time, an LWP has overall state that is described by
56 1.52 ad * lwp::l_stat. The states are broken into two sets below. The first
57 1.52 ad * set is guaranteed to represent the absolute, current state of the
58 1.52 ad * LWP:
59 1.52 ad *
60 1.52 ad * LSONPROC
61 1.52 ad *
62 1.52 ad * On processor: the LWP is executing on a CPU, either in the
63 1.52 ad * kernel or in user space.
64 1.52 ad *
65 1.52 ad * LSRUN
66 1.52 ad *
67 1.52 ad * Runnable: the LWP is parked on a run queue, and may soon be
68 1.52 ad * chosen to run by a idle processor, or by a processor that
69 1.52 ad * has been asked to preempt a currently runnning but lower
70 1.52 ad * priority LWP. If the LWP is not swapped in (L_INMEM == 0)
71 1.52 ad * then the LWP is not on a run queue, but may be soon.
72 1.52 ad *
73 1.52 ad * LSIDL
74 1.52 ad *
75 1.66 ad * Idle: the LWP has been created but has not yet executed,
76 1.66 ad * or it has ceased executing a unit of work and is waiting
77 1.66 ad * to be started again.
78 1.52 ad *
79 1.52 ad * LSSUSPENDED:
80 1.52 ad *
81 1.52 ad * Suspended: the LWP has had its execution suspended by
82 1.52 ad * another LWP in the same process using the _lwp_suspend()
83 1.52 ad * system call. User-level LWPs also enter the suspended
84 1.52 ad * state when the system is shutting down.
85 1.52 ad *
86 1.52 ad * The second set represent a "statement of intent" on behalf of the
87 1.52 ad * LWP. The LWP may in fact be executing on a processor, may be
88 1.66 ad * sleeping or idle. It is expected to take the necessary action to
89 1.66 ad * stop executing or become "running" again within a short timeframe.
90 1.66 ad * The LW_RUNNING flag in lwp::l_flag indicates that an LWP is running.
91 1.66 ad * Importantly, in indicates that its state is tied to a CPU.
92 1.52 ad *
93 1.52 ad * LSZOMB:
94 1.52 ad *
95 1.66 ad * Dead or dying: the LWP has released most of its resources
96 1.66 ad * and is a) about to switch away into oblivion b) has already
97 1.66 ad * switched away. When it switches away, its few remaining
98 1.66 ad * resources can be collected.
99 1.52 ad *
100 1.52 ad * LSSLEEP:
101 1.52 ad *
102 1.52 ad * Sleeping: the LWP has entered itself onto a sleep queue, and
103 1.66 ad * has switched away or will switch away shortly to allow other
104 1.66 ad * LWPs to run on the CPU.
105 1.52 ad *
106 1.52 ad * LSSTOP:
107 1.52 ad *
108 1.52 ad * Stopped: the LWP has been stopped as a result of a job
109 1.52 ad * control signal, or as a result of the ptrace() interface.
110 1.66 ad *
111 1.52 ad * Stopped LWPs may run briefly within the kernel to handle
112 1.52 ad * signals that they receive, but will not return to user space
113 1.52 ad * until their process' state is changed away from stopped.
114 1.66 ad *
115 1.52 ad * Single LWPs within a process can not be set stopped
116 1.52 ad * selectively: all actions that can stop or continue LWPs
117 1.52 ad * occur at the process level.
118 1.52 ad *
119 1.52 ad * State transitions
120 1.52 ad *
121 1.66 ad * Note that the LSSTOP state may only be set when returning to
122 1.66 ad * user space in userret(), or when sleeping interruptably. The
123 1.66 ad * LSSUSPENDED state may only be set in userret(). Before setting
124 1.66 ad * those states, we try to ensure that the LWPs will release all
125 1.66 ad * locks that they hold, and at a minimum try to ensure that the
126 1.66 ad * LWP can be set runnable again by a signal.
127 1.52 ad *
128 1.52 ad * LWPs may transition states in the following ways:
129 1.52 ad *
130 1.52 ad * RUN -------> ONPROC ONPROC -----> RUN
131 1.52 ad * > STOPPED > SLEEP
132 1.52 ad * > SUSPENDED > STOPPED
133 1.52 ad * > SUSPENDED
134 1.52 ad * > ZOMB
135 1.52 ad *
136 1.52 ad * STOPPED ---> RUN SUSPENDED --> RUN
137 1.52 ad * > SLEEP > SLEEP
138 1.52 ad *
139 1.52 ad * SLEEP -----> ONPROC IDL --------> RUN
140 1.52 ad * > RUN > SUSPENDED
141 1.52 ad * > STOPPED > STOPPED
142 1.52 ad * > SUSPENDED
143 1.52 ad *
144 1.66 ad * Other state transitions are possible with kernel threads (eg
145 1.66 ad * ONPROC -> IDL), but only happen under tightly controlled
146 1.66 ad * circumstances the side effects are understood.
147 1.66 ad *
148 1.52 ad * Locking
149 1.52 ad *
150 1.52 ad * The majority of fields in 'struct lwp' are covered by a single,
151 1.66 ad * general spin lock pointed to by lwp::l_mutex. The locks covering
152 1.52 ad * each field are documented in sys/lwp.h.
153 1.52 ad *
154 1.66 ad * State transitions must be made with the LWP's general lock held,
155 1.66 ad * and may cause the LWP's lock pointer to change. Manipulation of
156 1.66 ad * the general lock is not performed directly, but through calls to
157 1.66 ad * lwp_lock(), lwp_relock() and similar.
158 1.52 ad *
159 1.52 ad * States and their associated locks:
160 1.52 ad *
161 1.74 rmind * LSONPROC, LSZOMB:
162 1.52 ad *
163 1.64 yamt * Always covered by spc_lwplock, which protects running LWPs.
164 1.64 yamt * This is a per-CPU lock.
165 1.52 ad *
166 1.74 rmind * LSIDL, LSRUN:
167 1.52 ad *
168 1.64 yamt * Always covered by spc_mutex, which protects the run queues.
169 1.64 yamt * This may be a per-CPU lock, depending on the scheduler.
170 1.52 ad *
171 1.52 ad * LSSLEEP:
172 1.52 ad *
173 1.66 ad * Covered by a lock associated with the sleep queue that the
174 1.52 ad * LWP resides on, indirectly referenced by l_sleepq->sq_mutex.
175 1.52 ad *
176 1.52 ad * LSSTOP, LSSUSPENDED:
177 1.52 ad *
178 1.52 ad * If the LWP was previously sleeping (l_wchan != NULL), then
179 1.66 ad * l_mutex references the sleep queue lock. If the LWP was
180 1.52 ad * runnable or on the CPU when halted, or has been removed from
181 1.66 ad * the sleep queue since halted, then the lock is spc_lwplock.
182 1.52 ad *
183 1.52 ad * The lock order is as follows:
184 1.52 ad *
185 1.64 yamt * spc::spc_lwplock ->
186 1.64 yamt * sleepq_t::sq_mutex ->
187 1.64 yamt * tschain_t::tc_mutex ->
188 1.64 yamt * spc::spc_mutex
189 1.52 ad *
190 1.66 ad * Each process has an scheduler state lock (proc::p_smutex), and a
191 1.52 ad * number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
192 1.52 ad * so on. When an LWP is to be entered into or removed from one of the
193 1.52 ad * following states, p_mutex must be held and the process wide counters
194 1.52 ad * adjusted:
195 1.52 ad *
196 1.52 ad * LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
197 1.52 ad *
198 1.52 ad * Note that an LWP is considered running or likely to run soon if in
199 1.52 ad * one of the following states. This affects the value of p_nrlwps:
200 1.52 ad *
201 1.52 ad * LSRUN, LSONPROC, LSSLEEP
202 1.52 ad *
203 1.52 ad * p_smutex does not need to be held when transitioning among these
204 1.52 ad * three states.
205 1.52 ad */
206 1.52 ad
207 1.9 lukem #include <sys/cdefs.h>
208 1.78 ad __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.78 2007/11/12 23:11:59 ad Exp $");
209 1.8 martin
210 1.8 martin #include "opt_multiprocessor.h"
211 1.52 ad #include "opt_lockdebug.h"
212 1.2 thorpej
213 1.47 hannken #define _LWP_API_PRIVATE
214 1.47 hannken
215 1.2 thorpej #include <sys/param.h>
216 1.2 thorpej #include <sys/systm.h>
217 1.64 yamt #include <sys/cpu.h>
218 1.2 thorpej #include <sys/pool.h>
219 1.2 thorpej #include <sys/proc.h>
220 1.2 thorpej #include <sys/syscallargs.h>
221 1.57 dsl #include <sys/syscall_stats.h>
222 1.37 ad #include <sys/kauth.h>
223 1.52 ad #include <sys/sleepq.h>
224 1.52 ad #include <sys/lockdebug.h>
225 1.52 ad #include <sys/kmem.h>
226 1.75 ad #include <sys/intr.h>
227 1.78 ad #include <sys/lwpctl.h>
228 1.2 thorpej
229 1.2 thorpej #include <uvm/uvm_extern.h>
230 1.2 thorpej
231 1.77 matt struct lwplist alllwp = LIST_HEAD_INITIALIZER(alllwp);
232 1.52 ad
233 1.52 ad POOL_INIT(lwp_pool, sizeof(struct lwp), MIN_LWP_ALIGNMENT, 0, 0, "lwppl",
234 1.62 ad &pool_allocator_nointr, IPL_NONE);
235 1.41 thorpej POOL_INIT(lwp_uc_pool, sizeof(ucontext_t), 0, 0, 0, "lwpucpl",
236 1.62 ad &pool_allocator_nointr, IPL_NONE);
237 1.41 thorpej
238 1.41 thorpej static specificdata_domain_t lwp_specificdata_domain;
239 1.41 thorpej
240 1.41 thorpej void
241 1.41 thorpej lwpinit(void)
242 1.41 thorpej {
243 1.41 thorpej
244 1.41 thorpej lwp_specificdata_domain = specificdata_domain_create();
245 1.41 thorpej KASSERT(lwp_specificdata_domain != NULL);
246 1.52 ad lwp_sys_init();
247 1.41 thorpej }
248 1.41 thorpej
249 1.52 ad /*
250 1.52 ad * Set an suspended.
251 1.52 ad *
252 1.52 ad * Must be called with p_smutex held, and the LWP locked. Will unlock the
253 1.52 ad * LWP before return.
254 1.52 ad */
255 1.2 thorpej int
256 1.52 ad lwp_suspend(struct lwp *curl, struct lwp *t)
257 1.2 thorpej {
258 1.52 ad int error;
259 1.2 thorpej
260 1.63 ad KASSERT(mutex_owned(&t->l_proc->p_smutex));
261 1.63 ad KASSERT(lwp_locked(t, NULL));
262 1.33 chs
263 1.52 ad KASSERT(curl != t || curl->l_stat == LSONPROC);
264 1.2 thorpej
265 1.52 ad /*
266 1.52 ad * If the current LWP has been told to exit, we must not suspend anyone
267 1.52 ad * else or deadlock could occur. We won't return to userspace.
268 1.2 thorpej */
269 1.56 pavel if ((curl->l_stat & (LW_WEXIT | LW_WCORE)) != 0) {
270 1.52 ad lwp_unlock(t);
271 1.52 ad return (EDEADLK);
272 1.2 thorpej }
273 1.2 thorpej
274 1.52 ad error = 0;
275 1.2 thorpej
276 1.52 ad switch (t->l_stat) {
277 1.52 ad case LSRUN:
278 1.52 ad case LSONPROC:
279 1.56 pavel t->l_flag |= LW_WSUSPEND;
280 1.52 ad lwp_need_userret(t);
281 1.52 ad lwp_unlock(t);
282 1.52 ad break;
283 1.2 thorpej
284 1.52 ad case LSSLEEP:
285 1.56 pavel t->l_flag |= LW_WSUSPEND;
286 1.2 thorpej
287 1.2 thorpej /*
288 1.52 ad * Kick the LWP and try to get it to the kernel boundary
289 1.52 ad * so that it will release any locks that it holds.
290 1.52 ad * setrunnable() will release the lock.
291 1.2 thorpej */
292 1.56 pavel if ((t->l_flag & LW_SINTR) != 0)
293 1.52 ad setrunnable(t);
294 1.52 ad else
295 1.52 ad lwp_unlock(t);
296 1.52 ad break;
297 1.2 thorpej
298 1.52 ad case LSSUSPENDED:
299 1.52 ad lwp_unlock(t);
300 1.52 ad break;
301 1.17 manu
302 1.52 ad case LSSTOP:
303 1.56 pavel t->l_flag |= LW_WSUSPEND;
304 1.52 ad setrunnable(t);
305 1.52 ad break;
306 1.2 thorpej
307 1.52 ad case LSIDL:
308 1.52 ad case LSZOMB:
309 1.52 ad error = EINTR; /* It's what Solaris does..... */
310 1.52 ad lwp_unlock(t);
311 1.52 ad break;
312 1.2 thorpej }
313 1.2 thorpej
314 1.69 rmind return (error);
315 1.2 thorpej }
316 1.2 thorpej
317 1.52 ad /*
318 1.52 ad * Restart a suspended LWP.
319 1.52 ad *
320 1.52 ad * Must be called with p_smutex held, and the LWP locked. Will unlock the
321 1.52 ad * LWP before return.
322 1.52 ad */
323 1.2 thorpej void
324 1.2 thorpej lwp_continue(struct lwp *l)
325 1.2 thorpej {
326 1.2 thorpej
327 1.63 ad KASSERT(mutex_owned(&l->l_proc->p_smutex));
328 1.63 ad KASSERT(lwp_locked(l, NULL));
329 1.52 ad
330 1.52 ad /* If rebooting or not suspended, then just bail out. */
331 1.56 pavel if ((l->l_flag & LW_WREBOOT) != 0) {
332 1.52 ad lwp_unlock(l);
333 1.2 thorpej return;
334 1.10 fvdl }
335 1.2 thorpej
336 1.56 pavel l->l_flag &= ~LW_WSUSPEND;
337 1.2 thorpej
338 1.52 ad if (l->l_stat != LSSUSPENDED) {
339 1.52 ad lwp_unlock(l);
340 1.52 ad return;
341 1.2 thorpej }
342 1.2 thorpej
343 1.52 ad /* setrunnable() will release the lock. */
344 1.52 ad setrunnable(l);
345 1.2 thorpej }
346 1.2 thorpej
347 1.52 ad /*
348 1.52 ad * Wait for an LWP within the current process to exit. If 'lid' is
349 1.52 ad * non-zero, we are waiting for a specific LWP.
350 1.52 ad *
351 1.52 ad * Must be called with p->p_smutex held.
352 1.52 ad */
353 1.2 thorpej int
354 1.2 thorpej lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
355 1.2 thorpej {
356 1.2 thorpej struct proc *p = l->l_proc;
357 1.52 ad struct lwp *l2;
358 1.52 ad int nfound, error;
359 1.63 ad lwpid_t curlid;
360 1.63 ad bool exiting;
361 1.2 thorpej
362 1.63 ad KASSERT(mutex_owned(&p->p_smutex));
363 1.52 ad
364 1.52 ad p->p_nlwpwait++;
365 1.63 ad l->l_waitingfor = lid;
366 1.63 ad curlid = l->l_lid;
367 1.63 ad exiting = ((flags & LWPWAIT_EXITCONTROL) != 0);
368 1.52 ad
369 1.52 ad for (;;) {
370 1.52 ad /*
371 1.52 ad * Avoid a race between exit1() and sigexit(): if the
372 1.52 ad * process is dumping core, then we need to bail out: call
373 1.52 ad * into lwp_userret() where we will be suspended until the
374 1.52 ad * deed is done.
375 1.52 ad */
376 1.52 ad if ((p->p_sflag & PS_WCORE) != 0) {
377 1.52 ad mutex_exit(&p->p_smutex);
378 1.52 ad lwp_userret(l);
379 1.52 ad #ifdef DIAGNOSTIC
380 1.52 ad panic("lwp_wait1");
381 1.52 ad #endif
382 1.52 ad /* NOTREACHED */
383 1.52 ad }
384 1.52 ad
385 1.52 ad /*
386 1.52 ad * First off, drain any detached LWP that is waiting to be
387 1.52 ad * reaped.
388 1.52 ad */
389 1.52 ad while ((l2 = p->p_zomblwp) != NULL) {
390 1.52 ad p->p_zomblwp = NULL;
391 1.63 ad lwp_free(l2, false, false);/* releases proc mutex */
392 1.52 ad mutex_enter(&p->p_smutex);
393 1.52 ad }
394 1.52 ad
395 1.52 ad /*
396 1.52 ad * Now look for an LWP to collect. If the whole process is
397 1.52 ad * exiting, count detached LWPs as eligible to be collected,
398 1.52 ad * but don't drain them here.
399 1.52 ad */
400 1.52 ad nfound = 0;
401 1.63 ad error = 0;
402 1.52 ad LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
403 1.63 ad /*
404 1.63 ad * If a specific wait and the target is waiting on
405 1.63 ad * us, then avoid deadlock. This also traps LWPs
406 1.63 ad * that try to wait on themselves.
407 1.63 ad *
408 1.63 ad * Note that this does not handle more complicated
409 1.63 ad * cycles, like: t1 -> t2 -> t3 -> t1. The process
410 1.63 ad * can still be killed so it is not a major problem.
411 1.63 ad */
412 1.63 ad if (l2->l_lid == lid && l2->l_waitingfor == curlid) {
413 1.63 ad error = EDEADLK;
414 1.63 ad break;
415 1.63 ad }
416 1.63 ad if (l2 == l)
417 1.52 ad continue;
418 1.52 ad if ((l2->l_prflag & LPR_DETACHED) != 0) {
419 1.63 ad nfound += exiting;
420 1.63 ad continue;
421 1.63 ad }
422 1.63 ad if (lid != 0) {
423 1.63 ad if (l2->l_lid != lid)
424 1.63 ad continue;
425 1.63 ad /*
426 1.63 ad * Mark this LWP as the first waiter, if there
427 1.63 ad * is no other.
428 1.63 ad */
429 1.63 ad if (l2->l_waiter == 0)
430 1.63 ad l2->l_waiter = curlid;
431 1.63 ad } else if (l2->l_waiter != 0) {
432 1.63 ad /*
433 1.63 ad * It already has a waiter - so don't
434 1.63 ad * collect it. If the waiter doesn't
435 1.63 ad * grab it we'll get another chance
436 1.63 ad * later.
437 1.63 ad */
438 1.63 ad nfound++;
439 1.52 ad continue;
440 1.52 ad }
441 1.52 ad nfound++;
442 1.2 thorpej
443 1.52 ad /* No need to lock the LWP in order to see LSZOMB. */
444 1.52 ad if (l2->l_stat != LSZOMB)
445 1.52 ad continue;
446 1.2 thorpej
447 1.63 ad /*
448 1.63 ad * We're no longer waiting. Reset the "first waiter"
449 1.63 ad * pointer on the target, in case it was us.
450 1.63 ad */
451 1.63 ad l->l_waitingfor = 0;
452 1.63 ad l2->l_waiter = 0;
453 1.63 ad p->p_nlwpwait--;
454 1.2 thorpej if (departed)
455 1.2 thorpej *departed = l2->l_lid;
456 1.75 ad sched_lwp_collect(l2);
457 1.63 ad
458 1.63 ad /* lwp_free() releases the proc lock. */
459 1.63 ad lwp_free(l2, false, false);
460 1.52 ad mutex_enter(&p->p_smutex);
461 1.52 ad return 0;
462 1.52 ad }
463 1.2 thorpej
464 1.63 ad if (error != 0)
465 1.63 ad break;
466 1.52 ad if (nfound == 0) {
467 1.52 ad error = ESRCH;
468 1.52 ad break;
469 1.52 ad }
470 1.63 ad
471 1.63 ad /*
472 1.63 ad * The kernel is careful to ensure that it can not deadlock
473 1.63 ad * when exiting - just keep waiting.
474 1.63 ad */
475 1.63 ad if (exiting) {
476 1.52 ad KASSERT(p->p_nlwps > 1);
477 1.52 ad cv_wait(&p->p_lwpcv, &p->p_smutex);
478 1.52 ad continue;
479 1.52 ad }
480 1.63 ad
481 1.63 ad /*
482 1.63 ad * If all other LWPs are waiting for exits or suspends
483 1.63 ad * and the supply of zombies and potential zombies is
484 1.63 ad * exhausted, then we are about to deadlock.
485 1.63 ad *
486 1.63 ad * If the process is exiting (and this LWP is not the one
487 1.63 ad * that is coordinating the exit) then bail out now.
488 1.63 ad */
489 1.52 ad if ((p->p_sflag & PS_WEXIT) != 0 ||
490 1.63 ad p->p_nrlwps + p->p_nzlwps - p->p_ndlwps <= p->p_nlwpwait) {
491 1.52 ad error = EDEADLK;
492 1.52 ad break;
493 1.2 thorpej }
494 1.63 ad
495 1.63 ad /*
496 1.63 ad * Sit around and wait for something to happen. We'll be
497 1.63 ad * awoken if any of the conditions examined change: if an
498 1.63 ad * LWP exits, is collected, or is detached.
499 1.63 ad */
500 1.52 ad if ((error = cv_wait_sig(&p->p_lwpcv, &p->p_smutex)) != 0)
501 1.52 ad break;
502 1.2 thorpej }
503 1.2 thorpej
504 1.63 ad /*
505 1.63 ad * We didn't find any LWPs to collect, we may have received a
506 1.63 ad * signal, or some other condition has caused us to bail out.
507 1.63 ad *
508 1.63 ad * If waiting on a specific LWP, clear the waiters marker: some
509 1.63 ad * other LWP may want it. Then, kick all the remaining waiters
510 1.63 ad * so that they can re-check for zombies and for deadlock.
511 1.63 ad */
512 1.63 ad if (lid != 0) {
513 1.63 ad LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
514 1.63 ad if (l2->l_lid == lid) {
515 1.63 ad if (l2->l_waiter == curlid)
516 1.63 ad l2->l_waiter = 0;
517 1.63 ad break;
518 1.63 ad }
519 1.63 ad }
520 1.63 ad }
521 1.52 ad p->p_nlwpwait--;
522 1.63 ad l->l_waitingfor = 0;
523 1.63 ad cv_broadcast(&p->p_lwpcv);
524 1.63 ad
525 1.52 ad return error;
526 1.2 thorpej }
527 1.2 thorpej
528 1.52 ad /*
529 1.52 ad * Create a new LWP within process 'p2', using LWP 'l1' as a template.
530 1.52 ad * The new LWP is created in state LSIDL and must be set running,
531 1.52 ad * suspended, or stopped by the caller.
532 1.52 ad */
533 1.2 thorpej int
534 1.75 ad lwp_create(lwp_t *l1, proc_t *p2, vaddr_t uaddr, bool inmem, int flags,
535 1.75 ad void *stack, size_t stacksize, void (*func)(void *), void *arg,
536 1.75 ad lwp_t **rnewlwpp, int sclass)
537 1.2 thorpej {
538 1.52 ad struct lwp *l2, *isfree;
539 1.52 ad turnstile_t *ts;
540 1.2 thorpej
541 1.52 ad /*
542 1.52 ad * First off, reap any detached LWP waiting to be collected.
543 1.52 ad * We can re-use its LWP structure and turnstile.
544 1.52 ad */
545 1.52 ad isfree = NULL;
546 1.52 ad if (p2->p_zomblwp != NULL) {
547 1.52 ad mutex_enter(&p2->p_smutex);
548 1.52 ad if ((isfree = p2->p_zomblwp) != NULL) {
549 1.52 ad p2->p_zomblwp = NULL;
550 1.63 ad lwp_free(isfree, true, false);/* releases proc mutex */
551 1.52 ad } else
552 1.52 ad mutex_exit(&p2->p_smutex);
553 1.52 ad }
554 1.52 ad if (isfree == NULL) {
555 1.52 ad l2 = pool_get(&lwp_pool, PR_WAITOK);
556 1.52 ad memset(l2, 0, sizeof(*l2));
557 1.76 ad l2->l_ts = pool_cache_get(turnstile_cache, PR_WAITOK);
558 1.60 yamt SLIST_INIT(&l2->l_pi_lenders);
559 1.52 ad } else {
560 1.52 ad l2 = isfree;
561 1.52 ad ts = l2->l_ts;
562 1.75 ad KASSERT(l2->l_inheritedprio == -1);
563 1.60 yamt KASSERT(SLIST_EMPTY(&l2->l_pi_lenders));
564 1.52 ad memset(l2, 0, sizeof(*l2));
565 1.52 ad l2->l_ts = ts;
566 1.52 ad }
567 1.2 thorpej
568 1.2 thorpej l2->l_stat = LSIDL;
569 1.2 thorpej l2->l_proc = p2;
570 1.52 ad l2->l_refcnt = 1;
571 1.75 ad l2->l_class = sclass;
572 1.75 ad l2->l_kpriority = l1->l_kpriority;
573 1.52 ad l2->l_priority = l1->l_priority;
574 1.75 ad l2->l_inheritedprio = -1;
575 1.64 yamt l2->l_mutex = l1->l_cpu->ci_schedstate.spc_mutex;
576 1.52 ad l2->l_cpu = l1->l_cpu;
577 1.56 pavel l2->l_flag = inmem ? LW_INMEM : 0;
578 1.41 thorpej
579 1.56 pavel if (p2->p_flag & PK_SYSTEM) {
580 1.52 ad /*
581 1.52 ad * Mark it as a system process and not a candidate for
582 1.52 ad * swapping.
583 1.52 ad */
584 1.56 pavel l2->l_flag |= LW_SYSTEM;
585 1.73 rmind } else {
586 1.73 rmind /* Look for a CPU to start */
587 1.73 rmind l2->l_cpu = sched_takecpu(l2);
588 1.73 rmind l2->l_mutex = l2->l_cpu->ci_schedstate.spc_mutex;
589 1.52 ad }
590 1.2 thorpej
591 1.73 rmind lwp_initspecific(l2);
592 1.75 ad sched_lwp_fork(l1, l2);
593 1.37 ad lwp_update_creds(l2);
594 1.70 ad callout_init(&l2->l_timeout_ch, CALLOUT_MPSAFE);
595 1.70 ad callout_setfunc(&l2->l_timeout_ch, sleepq_timeout, l2);
596 1.65 ad mutex_init(&l2->l_swaplock, MUTEX_DEFAULT, IPL_NONE);
597 1.52 ad cv_init(&l2->l_sigcv, "sigwait");
598 1.52 ad l2->l_syncobj = &sched_syncobj;
599 1.2 thorpej
600 1.2 thorpej if (rnewlwpp != NULL)
601 1.2 thorpej *rnewlwpp = l2;
602 1.2 thorpej
603 1.36 yamt l2->l_addr = UAREA_TO_USER(uaddr);
604 1.2 thorpej uvm_lwp_fork(l1, l2, stack, stacksize, func,
605 1.2 thorpej (arg != NULL) ? arg : l2);
606 1.2 thorpej
607 1.52 ad mutex_enter(&p2->p_smutex);
608 1.52 ad
609 1.52 ad if ((flags & LWP_DETACHED) != 0) {
610 1.52 ad l2->l_prflag = LPR_DETACHED;
611 1.52 ad p2->p_ndlwps++;
612 1.52 ad } else
613 1.52 ad l2->l_prflag = 0;
614 1.52 ad
615 1.52 ad l2->l_sigmask = l1->l_sigmask;
616 1.52 ad CIRCLEQ_INIT(&l2->l_sigpend.sp_info);
617 1.52 ad sigemptyset(&l2->l_sigpend.sp_set);
618 1.52 ad
619 1.53 yamt p2->p_nlwpid++;
620 1.53 yamt if (p2->p_nlwpid == 0)
621 1.53 yamt p2->p_nlwpid++;
622 1.53 yamt l2->l_lid = p2->p_nlwpid;
623 1.2 thorpej LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
624 1.2 thorpej p2->p_nlwps++;
625 1.2 thorpej
626 1.52 ad mutex_exit(&p2->p_smutex);
627 1.52 ad
628 1.65 ad mutex_enter(&proclist_lock);
629 1.52 ad mutex_enter(&proclist_mutex);
630 1.2 thorpej LIST_INSERT_HEAD(&alllwp, l2, l_list);
631 1.52 ad mutex_exit(&proclist_mutex);
632 1.65 ad mutex_exit(&proclist_lock);
633 1.2 thorpej
634 1.57 dsl SYSCALL_TIME_LWP_INIT(l2);
635 1.57 dsl
636 1.16 manu if (p2->p_emul->e_lwp_fork)
637 1.16 manu (*p2->p_emul->e_lwp_fork)(l1, l2);
638 1.16 manu
639 1.2 thorpej return (0);
640 1.2 thorpej }
641 1.2 thorpej
642 1.2 thorpej /*
643 1.64 yamt * Called by MD code when a new LWP begins execution. Must be called
644 1.64 yamt * with the previous LWP locked (so at splsched), or if there is no
645 1.64 yamt * previous LWP, at splsched.
646 1.64 yamt */
647 1.64 yamt void
648 1.64 yamt lwp_startup(struct lwp *prev, struct lwp *new)
649 1.64 yamt {
650 1.64 yamt
651 1.64 yamt if (prev != NULL) {
652 1.64 yamt lwp_unlock(prev);
653 1.64 yamt }
654 1.64 yamt spl0();
655 1.64 yamt pmap_activate(new);
656 1.64 yamt LOCKDEBUG_BARRIER(NULL, 0);
657 1.65 ad if ((new->l_pflag & LP_MPSAFE) == 0) {
658 1.65 ad KERNEL_LOCK(1, new);
659 1.65 ad }
660 1.64 yamt }
661 1.64 yamt
662 1.64 yamt /*
663 1.65 ad * Exit an LWP.
664 1.2 thorpej */
665 1.2 thorpej void
666 1.2 thorpej lwp_exit(struct lwp *l)
667 1.2 thorpej {
668 1.2 thorpej struct proc *p = l->l_proc;
669 1.52 ad struct lwp *l2;
670 1.65 ad bool current;
671 1.65 ad
672 1.65 ad current = (l == curlwp);
673 1.2 thorpej
674 1.65 ad KASSERT(current || l->l_stat == LSIDL);
675 1.2 thorpej
676 1.52 ad /*
677 1.52 ad * Verify that we hold no locks other than the kernel lock.
678 1.52 ad */
679 1.52 ad #ifdef MULTIPROCESSOR
680 1.52 ad LOCKDEBUG_BARRIER(&kernel_lock, 0);
681 1.52 ad #else
682 1.52 ad LOCKDEBUG_BARRIER(NULL, 0);
683 1.52 ad #endif
684 1.16 manu
685 1.2 thorpej /*
686 1.52 ad * If we are the last live LWP in a process, we need to exit the
687 1.52 ad * entire process. We do so with an exit status of zero, because
688 1.52 ad * it's a "controlled" exit, and because that's what Solaris does.
689 1.52 ad *
690 1.52 ad * We are not quite a zombie yet, but for accounting purposes we
691 1.52 ad * must increment the count of zombies here.
692 1.45 thorpej *
693 1.45 thorpej * Note: the last LWP's specificdata will be deleted here.
694 1.2 thorpej */
695 1.52 ad mutex_enter(&p->p_smutex);
696 1.52 ad if (p->p_nlwps - p->p_nzlwps == 1) {
697 1.65 ad KASSERT(current == true);
698 1.2 thorpej exit1(l, 0);
699 1.19 jdolecek /* NOTREACHED */
700 1.2 thorpej }
701 1.52 ad p->p_nzlwps++;
702 1.52 ad mutex_exit(&p->p_smutex);
703 1.52 ad
704 1.52 ad if (p->p_emul->e_lwp_exit)
705 1.52 ad (*p->p_emul->e_lwp_exit)(l);
706 1.2 thorpej
707 1.45 thorpej /* Delete the specificdata while it's still safe to sleep. */
708 1.45 thorpej specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
709 1.45 thorpej
710 1.52 ad /*
711 1.52 ad * Release our cached credentials.
712 1.52 ad */
713 1.37 ad kauth_cred_free(l->l_cred);
714 1.70 ad callout_destroy(&l->l_timeout_ch);
715 1.65 ad
716 1.65 ad /*
717 1.65 ad * While we can still block, mark the LWP as unswappable to
718 1.65 ad * prevent conflicts with the with the swapper.
719 1.65 ad */
720 1.65 ad if (current)
721 1.65 ad uvm_lwp_hold(l);
722 1.37 ad
723 1.52 ad /*
724 1.52 ad * Remove the LWP from the global list.
725 1.52 ad */
726 1.65 ad mutex_enter(&proclist_lock);
727 1.52 ad mutex_enter(&proclist_mutex);
728 1.52 ad LIST_REMOVE(l, l_list);
729 1.52 ad mutex_exit(&proclist_mutex);
730 1.65 ad mutex_exit(&proclist_lock);
731 1.19 jdolecek
732 1.52 ad /*
733 1.52 ad * Get rid of all references to the LWP that others (e.g. procfs)
734 1.52 ad * may have, and mark the LWP as a zombie. If the LWP is detached,
735 1.52 ad * mark it waiting for collection in the proc structure. Note that
736 1.52 ad * before we can do that, we need to free any other dead, deatched
737 1.52 ad * LWP waiting to meet its maker.
738 1.52 ad *
739 1.52 ad * XXXSMP disable preemption.
740 1.52 ad */
741 1.52 ad mutex_enter(&p->p_smutex);
742 1.52 ad lwp_drainrefs(l);
743 1.31 yamt
744 1.52 ad if ((l->l_prflag & LPR_DETACHED) != 0) {
745 1.52 ad while ((l2 = p->p_zomblwp) != NULL) {
746 1.52 ad p->p_zomblwp = NULL;
747 1.63 ad lwp_free(l2, false, false);/* releases proc mutex */
748 1.52 ad mutex_enter(&p->p_smutex);
749 1.72 ad l->l_refcnt++;
750 1.72 ad lwp_drainrefs(l);
751 1.52 ad }
752 1.52 ad p->p_zomblwp = l;
753 1.52 ad }
754 1.31 yamt
755 1.52 ad /*
756 1.52 ad * If we find a pending signal for the process and we have been
757 1.52 ad * asked to check for signals, then we loose: arrange to have
758 1.52 ad * all other LWPs in the process check for signals.
759 1.52 ad */
760 1.56 pavel if ((l->l_flag & LW_PENDSIG) != 0 &&
761 1.52 ad firstsig(&p->p_sigpend.sp_set) != 0) {
762 1.52 ad LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
763 1.52 ad lwp_lock(l2);
764 1.56 pavel l2->l_flag |= LW_PENDSIG;
765 1.52 ad lwp_unlock(l2);
766 1.52 ad }
767 1.31 yamt }
768 1.31 yamt
769 1.52 ad lwp_lock(l);
770 1.52 ad l->l_stat = LSZOMB;
771 1.52 ad lwp_unlock(l);
772 1.2 thorpej p->p_nrlwps--;
773 1.52 ad cv_broadcast(&p->p_lwpcv);
774 1.78 ad if (l->l_lwpctl != NULL)
775 1.78 ad l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
776 1.52 ad mutex_exit(&p->p_smutex);
777 1.52 ad
778 1.52 ad /*
779 1.52 ad * We can no longer block. At this point, lwp_free() may already
780 1.52 ad * be gunning for us. On a multi-CPU system, we may be off p_lwps.
781 1.52 ad *
782 1.52 ad * Free MD LWP resources.
783 1.52 ad */
784 1.52 ad #ifndef __NO_CPU_LWP_FREE
785 1.52 ad cpu_lwp_free(l, 0);
786 1.52 ad #endif
787 1.2 thorpej
788 1.65 ad if (current) {
789 1.65 ad pmap_deactivate(l);
790 1.65 ad
791 1.65 ad /*
792 1.65 ad * Release the kernel lock, and switch away into
793 1.65 ad * oblivion.
794 1.65 ad */
795 1.52 ad #ifdef notyet
796 1.65 ad /* XXXSMP hold in lwp_userret() */
797 1.65 ad KERNEL_UNLOCK_LAST(l);
798 1.52 ad #else
799 1.65 ad KERNEL_UNLOCK_ALL(l, NULL);
800 1.52 ad #endif
801 1.65 ad lwp_exit_switchaway(l);
802 1.65 ad }
803 1.2 thorpej }
804 1.2 thorpej
805 1.2 thorpej void
806 1.64 yamt lwp_exit_switchaway(struct lwp *l)
807 1.2 thorpej {
808 1.64 yamt struct cpu_info *ci;
809 1.64 yamt struct lwp *idlelwp;
810 1.64 yamt
811 1.64 yamt /* Unlocked, but is for statistics only. */
812 1.64 yamt uvmexp.swtch++;
813 1.64 yamt
814 1.64 yamt (void)splsched();
815 1.64 yamt l->l_flag &= ~LW_RUNNING;
816 1.64 yamt ci = curcpu();
817 1.64 yamt idlelwp = ci->ci_data.cpu_idlelwp;
818 1.64 yamt idlelwp->l_stat = LSONPROC;
819 1.75 ad
820 1.75 ad /*
821 1.75 ad * cpu_onproc must be updated with the CPU locked, as
822 1.75 ad * aston() may try to set a AST pending on the LWP (and
823 1.75 ad * it does so with the CPU locked). Otherwise, the LWP
824 1.75 ad * may be destroyed before the AST can be set, leading
825 1.75 ad * to a user-after-free.
826 1.75 ad */
827 1.75 ad spc_lock(ci);
828 1.75 ad ci->ci_data.cpu_onproc = idlelwp;
829 1.75 ad spc_unlock(ci);
830 1.75 ad cpu_switchto(NULL, idlelwp, false);
831 1.52 ad }
832 1.52 ad
833 1.52 ad /*
834 1.52 ad * Free a dead LWP's remaining resources.
835 1.52 ad *
836 1.52 ad * XXXLWP limits.
837 1.52 ad */
838 1.52 ad void
839 1.63 ad lwp_free(struct lwp *l, bool recycle, bool last)
840 1.52 ad {
841 1.52 ad struct proc *p = l->l_proc;
842 1.52 ad ksiginfoq_t kq;
843 1.52 ad
844 1.52 ad /*
845 1.52 ad * If this was not the last LWP in the process, then adjust
846 1.52 ad * counters and unlock.
847 1.52 ad */
848 1.52 ad if (!last) {
849 1.52 ad /*
850 1.52 ad * Add the LWP's run time to the process' base value.
851 1.52 ad * This needs to co-incide with coming off p_lwps.
852 1.52 ad */
853 1.52 ad timeradd(&l->l_rtime, &p->p_rtime, &p->p_rtime);
854 1.64 yamt p->p_pctcpu += l->l_pctcpu;
855 1.52 ad LIST_REMOVE(l, l_sibling);
856 1.52 ad p->p_nlwps--;
857 1.52 ad p->p_nzlwps--;
858 1.52 ad if ((l->l_prflag & LPR_DETACHED) != 0)
859 1.52 ad p->p_ndlwps--;
860 1.63 ad
861 1.63 ad /*
862 1.63 ad * Have any LWPs sleeping in lwp_wait() recheck for
863 1.63 ad * deadlock.
864 1.63 ad */
865 1.63 ad cv_broadcast(&p->p_lwpcv);
866 1.52 ad mutex_exit(&p->p_smutex);
867 1.63 ad }
868 1.52 ad
869 1.52 ad #ifdef MULTIPROCESSOR
870 1.63 ad /*
871 1.63 ad * In the unlikely event that the LWP is still on the CPU,
872 1.63 ad * then spin until it has switched away. We need to release
873 1.63 ad * all locks to avoid deadlock against interrupt handlers on
874 1.63 ad * the target CPU.
875 1.63 ad */
876 1.64 yamt if ((l->l_flag & LW_RUNNING) != 0 || l->l_cpu->ci_curlwp == l) {
877 1.63 ad int count;
878 1.64 yamt (void)count; /* XXXgcc */
879 1.63 ad KERNEL_UNLOCK_ALL(curlwp, &count);
880 1.64 yamt while ((l->l_flag & LW_RUNNING) != 0 ||
881 1.64 yamt l->l_cpu->ci_curlwp == l)
882 1.63 ad SPINLOCK_BACKOFF_HOOK;
883 1.63 ad KERNEL_LOCK(count, curlwp);
884 1.63 ad }
885 1.52 ad #endif
886 1.52 ad
887 1.52 ad /*
888 1.52 ad * Destroy the LWP's remaining signal information.
889 1.52 ad */
890 1.52 ad ksiginfo_queue_init(&kq);
891 1.52 ad sigclear(&l->l_sigpend, NULL, &kq);
892 1.52 ad ksiginfo_queue_drain(&kq);
893 1.52 ad cv_destroy(&l->l_sigcv);
894 1.65 ad mutex_destroy(&l->l_swaplock);
895 1.2 thorpej
896 1.19 jdolecek /*
897 1.52 ad * Free the LWP's turnstile and the LWP structure itself unless the
898 1.64 yamt * caller wants to recycle them. Also, free the scheduler specific data.
899 1.52 ad *
900 1.52 ad * We can't return turnstile0 to the pool (it didn't come from it),
901 1.52 ad * so if it comes up just drop it quietly and move on.
902 1.52 ad *
903 1.52 ad * We don't recycle the VM resources at this time.
904 1.19 jdolecek */
905 1.55 ad KERNEL_LOCK(1, curlwp); /* XXXSMP */
906 1.64 yamt
907 1.78 ad if (l->l_lwpctl != NULL)
908 1.78 ad lwp_ctl_free(l);
909 1.64 yamt sched_lwp_exit(l);
910 1.64 yamt
911 1.52 ad if (!recycle && l->l_ts != &turnstile0)
912 1.76 ad pool_cache_put(turnstile_cache, l->l_ts);
913 1.52 ad #ifndef __NO_CPU_LWP_FREE
914 1.52 ad cpu_lwp_free2(l);
915 1.52 ad #endif
916 1.19 jdolecek uvm_lwp_exit(l);
917 1.60 yamt KASSERT(SLIST_EMPTY(&l->l_pi_lenders));
918 1.75 ad KASSERT(l->l_inheritedprio == -1);
919 1.52 ad if (!recycle)
920 1.19 jdolecek pool_put(&lwp_pool, l);
921 1.55 ad KERNEL_UNLOCK_ONE(curlwp); /* XXXSMP */
922 1.2 thorpej }
923 1.2 thorpej
924 1.2 thorpej /*
925 1.2 thorpej * Pick a LWP to represent the process for those operations which
926 1.2 thorpej * want information about a "process" that is actually associated
927 1.2 thorpej * with a LWP.
928 1.52 ad *
929 1.52 ad * If 'locking' is false, no locking or lock checks are performed.
930 1.52 ad * This is intended for use by DDB.
931 1.52 ad *
932 1.52 ad * We don't bother locking the LWP here, since code that uses this
933 1.52 ad * interface is broken by design and an exact match is not required.
934 1.2 thorpej */
935 1.2 thorpej struct lwp *
936 1.52 ad proc_representative_lwp(struct proc *p, int *nrlwps, int locking)
937 1.2 thorpej {
938 1.2 thorpej struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
939 1.27 matt struct lwp *signalled;
940 1.52 ad int cnt;
941 1.52 ad
942 1.52 ad if (locking) {
943 1.63 ad KASSERT(mutex_owned(&p->p_smutex));
944 1.52 ad }
945 1.2 thorpej
946 1.2 thorpej /* Trivial case: only one LWP */
947 1.52 ad if (p->p_nlwps == 1) {
948 1.52 ad l = LIST_FIRST(&p->p_lwps);
949 1.52 ad if (nrlwps)
950 1.68 tnn *nrlwps = (l->l_stat == LSONPROC || l->l_stat == LSRUN);
951 1.52 ad return l;
952 1.52 ad }
953 1.2 thorpej
954 1.52 ad cnt = 0;
955 1.2 thorpej switch (p->p_stat) {
956 1.2 thorpej case SSTOP:
957 1.2 thorpej case SACTIVE:
958 1.2 thorpej /* Pick the most live LWP */
959 1.2 thorpej onproc = running = sleeping = stopped = suspended = NULL;
960 1.27 matt signalled = NULL;
961 1.2 thorpej LIST_FOREACH(l, &p->p_lwps, l_sibling) {
962 1.64 yamt if ((l->l_flag & LW_IDLE) != 0) {
963 1.64 yamt continue;
964 1.64 yamt }
965 1.27 matt if (l->l_lid == p->p_sigctx.ps_lwp)
966 1.27 matt signalled = l;
967 1.2 thorpej switch (l->l_stat) {
968 1.2 thorpej case LSONPROC:
969 1.2 thorpej onproc = l;
970 1.52 ad cnt++;
971 1.2 thorpej break;
972 1.2 thorpej case LSRUN:
973 1.2 thorpej running = l;
974 1.52 ad cnt++;
975 1.2 thorpej break;
976 1.2 thorpej case LSSLEEP:
977 1.2 thorpej sleeping = l;
978 1.2 thorpej break;
979 1.2 thorpej case LSSTOP:
980 1.2 thorpej stopped = l;
981 1.2 thorpej break;
982 1.2 thorpej case LSSUSPENDED:
983 1.2 thorpej suspended = l;
984 1.2 thorpej break;
985 1.2 thorpej }
986 1.2 thorpej }
987 1.52 ad if (nrlwps)
988 1.52 ad *nrlwps = cnt;
989 1.27 matt if (signalled)
990 1.52 ad l = signalled;
991 1.52 ad else if (onproc)
992 1.52 ad l = onproc;
993 1.52 ad else if (running)
994 1.52 ad l = running;
995 1.52 ad else if (sleeping)
996 1.52 ad l = sleeping;
997 1.52 ad else if (stopped)
998 1.52 ad l = stopped;
999 1.52 ad else if (suspended)
1000 1.52 ad l = suspended;
1001 1.52 ad else
1002 1.52 ad break;
1003 1.52 ad return l;
1004 1.2 thorpej #ifdef DIAGNOSTIC
1005 1.2 thorpej case SIDL:
1006 1.52 ad case SZOMB:
1007 1.52 ad case SDYING:
1008 1.52 ad case SDEAD:
1009 1.52 ad if (locking)
1010 1.52 ad mutex_exit(&p->p_smutex);
1011 1.2 thorpej /* We have more than one LWP and we're in SIDL?
1012 1.2 thorpej * How'd that happen?
1013 1.2 thorpej */
1014 1.52 ad panic("Too many LWPs in idle/dying process %d (%s) stat = %d",
1015 1.52 ad p->p_pid, p->p_comm, p->p_stat);
1016 1.52 ad break;
1017 1.2 thorpej default:
1018 1.52 ad if (locking)
1019 1.52 ad mutex_exit(&p->p_smutex);
1020 1.2 thorpej panic("Process %d (%s) in unknown state %d",
1021 1.2 thorpej p->p_pid, p->p_comm, p->p_stat);
1022 1.2 thorpej #endif
1023 1.2 thorpej }
1024 1.2 thorpej
1025 1.52 ad if (locking)
1026 1.52 ad mutex_exit(&p->p_smutex);
1027 1.2 thorpej panic("proc_representative_lwp: couldn't find a lwp for process"
1028 1.2 thorpej " %d (%s)", p->p_pid, p->p_comm);
1029 1.2 thorpej /* NOTREACHED */
1030 1.2 thorpej return NULL;
1031 1.2 thorpej }
1032 1.37 ad
1033 1.37 ad /*
1034 1.52 ad * Look up a live LWP within the speicifed process, and return it locked.
1035 1.52 ad *
1036 1.52 ad * Must be called with p->p_smutex held.
1037 1.52 ad */
1038 1.52 ad struct lwp *
1039 1.52 ad lwp_find(struct proc *p, int id)
1040 1.52 ad {
1041 1.52 ad struct lwp *l;
1042 1.52 ad
1043 1.63 ad KASSERT(mutex_owned(&p->p_smutex));
1044 1.52 ad
1045 1.52 ad LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1046 1.52 ad if (l->l_lid == id)
1047 1.52 ad break;
1048 1.52 ad }
1049 1.52 ad
1050 1.52 ad /*
1051 1.52 ad * No need to lock - all of these conditions will
1052 1.52 ad * be visible with the process level mutex held.
1053 1.52 ad */
1054 1.52 ad if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB))
1055 1.52 ad l = NULL;
1056 1.52 ad
1057 1.52 ad return l;
1058 1.52 ad }
1059 1.52 ad
1060 1.52 ad /*
1061 1.37 ad * Update an LWP's cached credentials to mirror the process' master copy.
1062 1.37 ad *
1063 1.37 ad * This happens early in the syscall path, on user trap, and on LWP
1064 1.37 ad * creation. A long-running LWP can also voluntarily choose to update
1065 1.37 ad * it's credentials by calling this routine. This may be called from
1066 1.37 ad * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
1067 1.37 ad */
1068 1.37 ad void
1069 1.37 ad lwp_update_creds(struct lwp *l)
1070 1.37 ad {
1071 1.37 ad kauth_cred_t oc;
1072 1.37 ad struct proc *p;
1073 1.37 ad
1074 1.37 ad p = l->l_proc;
1075 1.37 ad oc = l->l_cred;
1076 1.37 ad
1077 1.52 ad mutex_enter(&p->p_mutex);
1078 1.37 ad kauth_cred_hold(p->p_cred);
1079 1.37 ad l->l_cred = p->p_cred;
1080 1.52 ad mutex_exit(&p->p_mutex);
1081 1.52 ad if (oc != NULL) {
1082 1.52 ad KERNEL_LOCK(1, l); /* XXXSMP */
1083 1.37 ad kauth_cred_free(oc);
1084 1.52 ad KERNEL_UNLOCK_ONE(l); /* XXXSMP */
1085 1.52 ad }
1086 1.52 ad }
1087 1.52 ad
1088 1.52 ad /*
1089 1.52 ad * Verify that an LWP is locked, and optionally verify that the lock matches
1090 1.52 ad * one we specify.
1091 1.52 ad */
1092 1.52 ad int
1093 1.52 ad lwp_locked(struct lwp *l, kmutex_t *mtx)
1094 1.52 ad {
1095 1.52 ad kmutex_t *cur = l->l_mutex;
1096 1.52 ad
1097 1.52 ad return mutex_owned(cur) && (mtx == cur || mtx == NULL);
1098 1.52 ad }
1099 1.52 ad
1100 1.52 ad /*
1101 1.52 ad * Lock an LWP.
1102 1.52 ad */
1103 1.52 ad void
1104 1.52 ad lwp_lock_retry(struct lwp *l, kmutex_t *old)
1105 1.52 ad {
1106 1.52 ad
1107 1.52 ad /*
1108 1.52 ad * XXXgcc ignoring kmutex_t * volatile on i386
1109 1.52 ad *
1110 1.52 ad * gcc version 4.1.2 20061021 prerelease (NetBSD nb1 20061021)
1111 1.52 ad */
1112 1.52 ad #if 1
1113 1.52 ad while (l->l_mutex != old) {
1114 1.52 ad #else
1115 1.52 ad for (;;) {
1116 1.52 ad #endif
1117 1.52 ad mutex_spin_exit(old);
1118 1.52 ad old = l->l_mutex;
1119 1.52 ad mutex_spin_enter(old);
1120 1.52 ad
1121 1.52 ad /*
1122 1.52 ad * mutex_enter() will have posted a read barrier. Re-test
1123 1.52 ad * l->l_mutex. If it has changed, we need to try again.
1124 1.52 ad */
1125 1.52 ad #if 1
1126 1.52 ad }
1127 1.52 ad #else
1128 1.52 ad } while (__predict_false(l->l_mutex != old));
1129 1.52 ad #endif
1130 1.52 ad }
1131 1.52 ad
1132 1.52 ad /*
1133 1.52 ad * Lend a new mutex to an LWP. The old mutex must be held.
1134 1.52 ad */
1135 1.52 ad void
1136 1.52 ad lwp_setlock(struct lwp *l, kmutex_t *new)
1137 1.52 ad {
1138 1.52 ad
1139 1.63 ad KASSERT(mutex_owned(l->l_mutex));
1140 1.52 ad
1141 1.52 ad mb_write();
1142 1.52 ad l->l_mutex = new;
1143 1.52 ad }
1144 1.52 ad
1145 1.52 ad /*
1146 1.52 ad * Lend a new mutex to an LWP, and release the old mutex. The old mutex
1147 1.52 ad * must be held.
1148 1.52 ad */
1149 1.52 ad void
1150 1.52 ad lwp_unlock_to(struct lwp *l, kmutex_t *new)
1151 1.52 ad {
1152 1.52 ad kmutex_t *old;
1153 1.52 ad
1154 1.63 ad KASSERT(mutex_owned(l->l_mutex));
1155 1.52 ad
1156 1.52 ad old = l->l_mutex;
1157 1.52 ad mb_write();
1158 1.52 ad l->l_mutex = new;
1159 1.52 ad mutex_spin_exit(old);
1160 1.52 ad }
1161 1.52 ad
1162 1.52 ad /*
1163 1.52 ad * Acquire a new mutex, and donate it to an LWP. The LWP must already be
1164 1.52 ad * locked.
1165 1.52 ad */
1166 1.52 ad void
1167 1.52 ad lwp_relock(struct lwp *l, kmutex_t *new)
1168 1.52 ad {
1169 1.52 ad kmutex_t *old;
1170 1.52 ad
1171 1.63 ad KASSERT(mutex_owned(l->l_mutex));
1172 1.52 ad
1173 1.52 ad old = l->l_mutex;
1174 1.52 ad if (old != new) {
1175 1.52 ad mutex_spin_enter(new);
1176 1.52 ad l->l_mutex = new;
1177 1.52 ad mutex_spin_exit(old);
1178 1.52 ad }
1179 1.52 ad }
1180 1.52 ad
1181 1.60 yamt int
1182 1.60 yamt lwp_trylock(struct lwp *l)
1183 1.60 yamt {
1184 1.60 yamt kmutex_t *old;
1185 1.60 yamt
1186 1.60 yamt for (;;) {
1187 1.60 yamt if (!mutex_tryenter(old = l->l_mutex))
1188 1.60 yamt return 0;
1189 1.60 yamt if (__predict_true(l->l_mutex == old))
1190 1.60 yamt return 1;
1191 1.60 yamt mutex_spin_exit(old);
1192 1.60 yamt }
1193 1.60 yamt }
1194 1.60 yamt
1195 1.52 ad /*
1196 1.56 pavel * Handle exceptions for mi_userret(). Called if a member of LW_USERRET is
1197 1.52 ad * set.
1198 1.52 ad */
1199 1.52 ad void
1200 1.52 ad lwp_userret(struct lwp *l)
1201 1.52 ad {
1202 1.52 ad struct proc *p;
1203 1.54 ad void (*hook)(void);
1204 1.52 ad int sig;
1205 1.52 ad
1206 1.52 ad p = l->l_proc;
1207 1.52 ad
1208 1.75 ad #ifndef __HAVE_FAST_SOFTINTS
1209 1.75 ad /* Run pending soft interrupts. */
1210 1.75 ad if (l->l_cpu->ci_data.cpu_softints != 0)
1211 1.75 ad softint_overlay();
1212 1.75 ad #endif
1213 1.75 ad
1214 1.52 ad /*
1215 1.52 ad * It should be safe to do this read unlocked on a multiprocessor
1216 1.52 ad * system..
1217 1.52 ad */
1218 1.56 pavel while ((l->l_flag & LW_USERRET) != 0) {
1219 1.52 ad /*
1220 1.52 ad * Process pending signals first, unless the process
1221 1.61 ad * is dumping core or exiting, where we will instead
1222 1.61 ad * enter the L_WSUSPEND case below.
1223 1.52 ad */
1224 1.61 ad if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) ==
1225 1.61 ad LW_PENDSIG) {
1226 1.52 ad KERNEL_LOCK(1, l); /* XXXSMP pool_put() below */
1227 1.52 ad mutex_enter(&p->p_smutex);
1228 1.52 ad while ((sig = issignal(l)) != 0)
1229 1.52 ad postsig(sig);
1230 1.52 ad mutex_exit(&p->p_smutex);
1231 1.52 ad KERNEL_UNLOCK_LAST(l); /* XXXSMP */
1232 1.52 ad }
1233 1.52 ad
1234 1.52 ad /*
1235 1.52 ad * Core-dump or suspend pending.
1236 1.52 ad *
1237 1.52 ad * In case of core dump, suspend ourselves, so that the
1238 1.52 ad * kernel stack and therefore the userland registers saved
1239 1.52 ad * in the trapframe are around for coredump() to write them
1240 1.52 ad * out. We issue a wakeup on p->p_lwpcv so that sigexit()
1241 1.52 ad * will write the core file out once all other LWPs are
1242 1.52 ad * suspended.
1243 1.52 ad */
1244 1.56 pavel if ((l->l_flag & LW_WSUSPEND) != 0) {
1245 1.52 ad mutex_enter(&p->p_smutex);
1246 1.52 ad p->p_nrlwps--;
1247 1.52 ad cv_broadcast(&p->p_lwpcv);
1248 1.52 ad lwp_lock(l);
1249 1.52 ad l->l_stat = LSSUSPENDED;
1250 1.52 ad mutex_exit(&p->p_smutex);
1251 1.64 yamt mi_switch(l);
1252 1.52 ad }
1253 1.52 ad
1254 1.52 ad /* Process is exiting. */
1255 1.56 pavel if ((l->l_flag & LW_WEXIT) != 0) {
1256 1.52 ad KERNEL_LOCK(1, l);
1257 1.52 ad lwp_exit(l);
1258 1.52 ad KASSERT(0);
1259 1.52 ad /* NOTREACHED */
1260 1.52 ad }
1261 1.54 ad
1262 1.54 ad /* Call userret hook; used by Linux emulation. */
1263 1.56 pavel if ((l->l_flag & LW_WUSERRET) != 0) {
1264 1.54 ad lwp_lock(l);
1265 1.56 pavel l->l_flag &= ~LW_WUSERRET;
1266 1.54 ad lwp_unlock(l);
1267 1.54 ad hook = p->p_userret;
1268 1.54 ad p->p_userret = NULL;
1269 1.54 ad (*hook)();
1270 1.54 ad }
1271 1.52 ad }
1272 1.52 ad }
1273 1.52 ad
1274 1.52 ad /*
1275 1.52 ad * Force an LWP to enter the kernel, to take a trip through lwp_userret().
1276 1.52 ad */
1277 1.52 ad void
1278 1.52 ad lwp_need_userret(struct lwp *l)
1279 1.52 ad {
1280 1.63 ad KASSERT(lwp_locked(l, NULL));
1281 1.52 ad
1282 1.52 ad /*
1283 1.52 ad * Since the tests in lwp_userret() are done unlocked, make sure
1284 1.52 ad * that the condition will be seen before forcing the LWP to enter
1285 1.52 ad * kernel mode.
1286 1.52 ad */
1287 1.52 ad mb_write();
1288 1.52 ad cpu_signotify(l);
1289 1.52 ad }
1290 1.52 ad
1291 1.52 ad /*
1292 1.52 ad * Add one reference to an LWP. This will prevent the LWP from
1293 1.52 ad * exiting, thus keep the lwp structure and PCB around to inspect.
1294 1.52 ad */
1295 1.52 ad void
1296 1.52 ad lwp_addref(struct lwp *l)
1297 1.52 ad {
1298 1.52 ad
1299 1.63 ad KASSERT(mutex_owned(&l->l_proc->p_smutex));
1300 1.52 ad KASSERT(l->l_stat != LSZOMB);
1301 1.52 ad KASSERT(l->l_refcnt != 0);
1302 1.52 ad
1303 1.52 ad l->l_refcnt++;
1304 1.52 ad }
1305 1.52 ad
1306 1.52 ad /*
1307 1.52 ad * Remove one reference to an LWP. If this is the last reference,
1308 1.52 ad * then we must finalize the LWP's death.
1309 1.52 ad */
1310 1.52 ad void
1311 1.52 ad lwp_delref(struct lwp *l)
1312 1.52 ad {
1313 1.52 ad struct proc *p = l->l_proc;
1314 1.52 ad
1315 1.52 ad mutex_enter(&p->p_smutex);
1316 1.72 ad KASSERT(l->l_stat != LSZOMB);
1317 1.72 ad KASSERT(l->l_refcnt > 0);
1318 1.52 ad if (--l->l_refcnt == 0)
1319 1.76 ad cv_broadcast(&p->p_lwpcv);
1320 1.52 ad mutex_exit(&p->p_smutex);
1321 1.52 ad }
1322 1.52 ad
1323 1.52 ad /*
1324 1.52 ad * Drain all references to the current LWP.
1325 1.52 ad */
1326 1.52 ad void
1327 1.52 ad lwp_drainrefs(struct lwp *l)
1328 1.52 ad {
1329 1.52 ad struct proc *p = l->l_proc;
1330 1.52 ad
1331 1.63 ad KASSERT(mutex_owned(&p->p_smutex));
1332 1.52 ad KASSERT(l->l_refcnt != 0);
1333 1.52 ad
1334 1.52 ad l->l_refcnt--;
1335 1.52 ad while (l->l_refcnt != 0)
1336 1.76 ad cv_wait(&p->p_lwpcv, &p->p_smutex);
1337 1.37 ad }
1338 1.41 thorpej
1339 1.41 thorpej /*
1340 1.41 thorpej * lwp_specific_key_create --
1341 1.41 thorpej * Create a key for subsystem lwp-specific data.
1342 1.41 thorpej */
1343 1.41 thorpej int
1344 1.41 thorpej lwp_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor)
1345 1.41 thorpej {
1346 1.41 thorpej
1347 1.45 thorpej return (specificdata_key_create(lwp_specificdata_domain, keyp, dtor));
1348 1.41 thorpej }
1349 1.41 thorpej
1350 1.41 thorpej /*
1351 1.41 thorpej * lwp_specific_key_delete --
1352 1.41 thorpej * Delete a key for subsystem lwp-specific data.
1353 1.41 thorpej */
1354 1.41 thorpej void
1355 1.41 thorpej lwp_specific_key_delete(specificdata_key_t key)
1356 1.41 thorpej {
1357 1.41 thorpej
1358 1.41 thorpej specificdata_key_delete(lwp_specificdata_domain, key);
1359 1.41 thorpej }
1360 1.41 thorpej
1361 1.45 thorpej /*
1362 1.45 thorpej * lwp_initspecific --
1363 1.45 thorpej * Initialize an LWP's specificdata container.
1364 1.45 thorpej */
1365 1.42 christos void
1366 1.42 christos lwp_initspecific(struct lwp *l)
1367 1.42 christos {
1368 1.42 christos int error;
1369 1.45 thorpej
1370 1.42 christos error = specificdata_init(lwp_specificdata_domain, &l->l_specdataref);
1371 1.42 christos KASSERT(error == 0);
1372 1.42 christos }
1373 1.42 christos
1374 1.41 thorpej /*
1375 1.45 thorpej * lwp_finispecific --
1376 1.45 thorpej * Finalize an LWP's specificdata container.
1377 1.45 thorpej */
1378 1.45 thorpej void
1379 1.45 thorpej lwp_finispecific(struct lwp *l)
1380 1.45 thorpej {
1381 1.45 thorpej
1382 1.45 thorpej specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
1383 1.45 thorpej }
1384 1.45 thorpej
1385 1.45 thorpej /*
1386 1.41 thorpej * lwp_getspecific --
1387 1.41 thorpej * Return lwp-specific data corresponding to the specified key.
1388 1.41 thorpej *
1389 1.41 thorpej * Note: LWP specific data is NOT INTERLOCKED. An LWP should access
1390 1.41 thorpej * only its OWN SPECIFIC DATA. If it is necessary to access another
1391 1.41 thorpej * LWP's specifc data, care must be taken to ensure that doing so
1392 1.41 thorpej * would not cause internal data structure inconsistency (i.e. caller
1393 1.41 thorpej * can guarantee that the target LWP is not inside an lwp_getspecific()
1394 1.41 thorpej * or lwp_setspecific() call).
1395 1.41 thorpej */
1396 1.41 thorpej void *
1397 1.44 thorpej lwp_getspecific(specificdata_key_t key)
1398 1.41 thorpej {
1399 1.41 thorpej
1400 1.41 thorpej return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
1401 1.44 thorpej &curlwp->l_specdataref, key));
1402 1.41 thorpej }
1403 1.41 thorpej
1404 1.47 hannken void *
1405 1.47 hannken _lwp_getspecific_by_lwp(struct lwp *l, specificdata_key_t key)
1406 1.47 hannken {
1407 1.47 hannken
1408 1.47 hannken return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
1409 1.47 hannken &l->l_specdataref, key));
1410 1.47 hannken }
1411 1.47 hannken
1412 1.41 thorpej /*
1413 1.41 thorpej * lwp_setspecific --
1414 1.41 thorpej * Set lwp-specific data corresponding to the specified key.
1415 1.41 thorpej */
1416 1.41 thorpej void
1417 1.45 thorpej lwp_setspecific(specificdata_key_t key, void *data)
1418 1.41 thorpej {
1419 1.41 thorpej
1420 1.41 thorpej specificdata_setspecific(lwp_specificdata_domain,
1421 1.44 thorpej &curlwp->l_specdataref, key, data);
1422 1.41 thorpej }
1423 1.78 ad
1424 1.78 ad /*
1425 1.78 ad * Allocate a new lwpctl structure for a user LWP.
1426 1.78 ad */
1427 1.78 ad int
1428 1.78 ad lwp_ctl_alloc(vaddr_t *uaddr)
1429 1.78 ad {
1430 1.78 ad lcproc_t *lp;
1431 1.78 ad u_int bit, i, offset;
1432 1.78 ad struct uvm_object *uao;
1433 1.78 ad int error;
1434 1.78 ad lcpage_t *lcp;
1435 1.78 ad proc_t *p;
1436 1.78 ad lwp_t *l;
1437 1.78 ad
1438 1.78 ad l = curlwp;
1439 1.78 ad p = l->l_proc;
1440 1.78 ad
1441 1.78 ad if (l->l_lcpage != NULL)
1442 1.78 ad return (EINVAL);
1443 1.78 ad
1444 1.78 ad /* First time around, allocate header structure for the process. */
1445 1.78 ad if ((lp = p->p_lwpctl) == NULL) {
1446 1.78 ad lp = kmem_alloc(sizeof(*lp), KM_SLEEP);
1447 1.78 ad mutex_init(&lp->lp_lock, MUTEX_DEFAULT, IPL_NONE);
1448 1.78 ad lp->lp_uao = NULL;
1449 1.78 ad TAILQ_INIT(&lp->lp_pages);
1450 1.78 ad mutex_enter(&p->p_mutex);
1451 1.78 ad if (p->p_lwpctl == NULL) {
1452 1.78 ad p->p_lwpctl = lp;
1453 1.78 ad mutex_exit(&p->p_mutex);
1454 1.78 ad } else {
1455 1.78 ad mutex_exit(&p->p_mutex);
1456 1.78 ad mutex_destroy(&lp->lp_lock);
1457 1.78 ad kmem_free(lp, sizeof(*lp));
1458 1.78 ad lp = p->p_lwpctl;
1459 1.78 ad }
1460 1.78 ad }
1461 1.78 ad
1462 1.78 ad /*
1463 1.78 ad * Set up an anonymous memory region to hold the shared pages.
1464 1.78 ad * Map them into the process' address space. The user vmspace
1465 1.78 ad * gets the first reference on the UAO.
1466 1.78 ad */
1467 1.78 ad mutex_enter(&lp->lp_lock);
1468 1.78 ad if (lp->lp_uao == NULL) {
1469 1.78 ad lp->lp_uao = uao_create(LWPCTL_UAREA_SZ, 0);
1470 1.78 ad lp->lp_cur = 0;
1471 1.78 ad lp->lp_max = LWPCTL_UAREA_SZ;
1472 1.78 ad lp->lp_uva = p->p_emul->e_vm_default_addr(p,
1473 1.78 ad (vaddr_t)p->p_vmspace->vm_daddr, LWPCTL_UAREA_SZ);
1474 1.78 ad error = uvm_map(&p->p_vmspace->vm_map, &lp->lp_uva,
1475 1.78 ad LWPCTL_UAREA_SZ, lp->lp_uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW,
1476 1.78 ad UVM_PROT_RW, UVM_INH_NONE, UVM_ADV_NORMAL, 0));
1477 1.78 ad if (error != 0) {
1478 1.78 ad uao_detach(lp->lp_uao);
1479 1.78 ad lp->lp_uao = NULL;
1480 1.78 ad mutex_exit(&lp->lp_lock);
1481 1.78 ad return error;
1482 1.78 ad }
1483 1.78 ad }
1484 1.78 ad
1485 1.78 ad /* Get a free block and allocate for this LWP. */
1486 1.78 ad TAILQ_FOREACH(lcp, &lp->lp_pages, lcp_chain) {
1487 1.78 ad if (lcp->lcp_nfree != 0)
1488 1.78 ad break;
1489 1.78 ad }
1490 1.78 ad if (lcp == NULL) {
1491 1.78 ad /* Nothing available - try to set up a free page. */
1492 1.78 ad if (lp->lp_cur == lp->lp_max) {
1493 1.78 ad mutex_exit(&lp->lp_lock);
1494 1.78 ad return ENOMEM;
1495 1.78 ad }
1496 1.78 ad lcp = kmem_alloc(LWPCTL_LCPAGE_SZ, KM_SLEEP);
1497 1.78 ad if (lcp == NULL)
1498 1.78 ad return ENOMEM;
1499 1.78 ad /*
1500 1.78 ad * Wire the next page down in kernel space. Since this
1501 1.78 ad * is a new mapping, we must add a reference.
1502 1.78 ad */
1503 1.78 ad uao = lp->lp_uao;
1504 1.78 ad (*uao->pgops->pgo_reference)(uao);
1505 1.78 ad error = uvm_map(kernel_map, &lcp->lcp_kaddr, PAGE_SIZE,
1506 1.78 ad uao, lp->lp_cur, PAGE_SIZE,
1507 1.78 ad UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
1508 1.78 ad UVM_INH_NONE, UVM_ADV_RANDOM, 0));
1509 1.78 ad if (error == 0)
1510 1.78 ad error = uvm_map_pageable(kernel_map, lcp->lcp_kaddr,
1511 1.78 ad lcp->lcp_kaddr + PAGE_SIZE, FALSE, 0);
1512 1.78 ad if (error != 0) {
1513 1.78 ad mutex_exit(&lp->lp_lock);
1514 1.78 ad kmem_free(lcp, LWPCTL_LCPAGE_SZ);
1515 1.78 ad (*uao->pgops->pgo_detach)(uao);
1516 1.78 ad return error;
1517 1.78 ad }
1518 1.78 ad /* Prepare the page descriptor and link into the list. */
1519 1.78 ad lcp->lcp_uaddr = lp->lp_uva + lp->lp_cur;
1520 1.78 ad lp->lp_cur += PAGE_SIZE;
1521 1.78 ad lcp->lcp_nfree = LWPCTL_PER_PAGE;
1522 1.78 ad lcp->lcp_rotor = 0;
1523 1.78 ad memset(lcp->lcp_bitmap, 0xff, LWPCTL_BITMAP_SZ);
1524 1.78 ad TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
1525 1.78 ad }
1526 1.78 ad for (i = lcp->lcp_rotor; lcp->lcp_bitmap[i] == 0;) {
1527 1.78 ad if (++i >= LWPCTL_BITMAP_ENTRIES)
1528 1.78 ad i = 0;
1529 1.78 ad }
1530 1.78 ad bit = ffs(lcp->lcp_bitmap[i]) - 1;
1531 1.78 ad lcp->lcp_bitmap[i] ^= (1 << bit);
1532 1.78 ad lcp->lcp_rotor = i;
1533 1.78 ad lcp->lcp_nfree--;
1534 1.78 ad l->l_lcpage = lcp;
1535 1.78 ad offset = (i << 5) + bit;
1536 1.78 ad l->l_lwpctl = (lwpctl_t *)lcp->lcp_kaddr + offset;
1537 1.78 ad *uaddr = lcp->lcp_uaddr + offset * sizeof(lwpctl_t);
1538 1.78 ad mutex_exit(&lp->lp_lock);
1539 1.78 ad
1540 1.78 ad l->l_lwpctl->lc_curcpu = (short)curcpu()->ci_data.cpu_index;
1541 1.78 ad
1542 1.78 ad return 0;
1543 1.78 ad }
1544 1.78 ad
1545 1.78 ad /*
1546 1.78 ad * Free an lwpctl structure back to the per-process list.
1547 1.78 ad */
1548 1.78 ad void
1549 1.78 ad lwp_ctl_free(lwp_t *l)
1550 1.78 ad {
1551 1.78 ad lcproc_t *lp;
1552 1.78 ad lcpage_t *lcp;
1553 1.78 ad u_int map, offset;
1554 1.78 ad
1555 1.78 ad lp = l->l_proc->p_lwpctl;
1556 1.78 ad KASSERT(lp != NULL);
1557 1.78 ad
1558 1.78 ad lcp = l->l_lcpage;
1559 1.78 ad offset = (u_int)((lwpctl_t *)l->l_lwpctl - (lwpctl_t *)lcp->lcp_kaddr);
1560 1.78 ad KASSERT(offset < LWPCTL_PER_PAGE);
1561 1.78 ad
1562 1.78 ad mutex_enter(&lp->lp_lock);
1563 1.78 ad lcp->lcp_nfree++;
1564 1.78 ad map = offset >> 5;
1565 1.78 ad lcp->lcp_bitmap[map] |= (1 << (offset & 31));
1566 1.78 ad if (lcp->lcp_bitmap[lcp->lcp_rotor] == 0)
1567 1.78 ad lcp->lcp_rotor = map;
1568 1.78 ad if (TAILQ_FIRST(&lp->lp_pages)->lcp_nfree == 0) {
1569 1.78 ad TAILQ_REMOVE(&lp->lp_pages, lcp, lcp_chain);
1570 1.78 ad TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
1571 1.78 ad }
1572 1.78 ad mutex_exit(&lp->lp_lock);
1573 1.78 ad }
1574 1.78 ad
1575 1.78 ad /*
1576 1.78 ad * Process is exiting; tear down lwpctl state. This can only be safely
1577 1.78 ad * called by the last LWP in the process.
1578 1.78 ad */
1579 1.78 ad void
1580 1.78 ad lwp_ctl_exit(void)
1581 1.78 ad {
1582 1.78 ad lcpage_t *lcp, *next;
1583 1.78 ad lcproc_t *lp;
1584 1.78 ad proc_t *p;
1585 1.78 ad lwp_t *l;
1586 1.78 ad
1587 1.78 ad l = curlwp;
1588 1.78 ad l->l_lwpctl = NULL;
1589 1.78 ad p = l->l_proc;
1590 1.78 ad lp = p->p_lwpctl;
1591 1.78 ad
1592 1.78 ad KASSERT(lp != NULL);
1593 1.78 ad KASSERT(p->p_nlwps == 1);
1594 1.78 ad
1595 1.78 ad for (lcp = TAILQ_FIRST(&lp->lp_pages); lcp != NULL; lcp = next) {
1596 1.78 ad next = TAILQ_NEXT(lcp, lcp_chain);
1597 1.78 ad uvm_unmap(kernel_map, lcp->lcp_kaddr,
1598 1.78 ad lcp->lcp_kaddr + PAGE_SIZE);
1599 1.78 ad kmem_free(lcp, LWPCTL_LCPAGE_SZ);
1600 1.78 ad }
1601 1.78 ad
1602 1.78 ad if (lp->lp_uao != NULL) {
1603 1.78 ad uvm_unmap(&p->p_vmspace->vm_map, lp->lp_uva,
1604 1.78 ad lp->lp_uva + LWPCTL_UAREA_SZ);
1605 1.78 ad }
1606 1.78 ad
1607 1.78 ad mutex_destroy(&lp->lp_lock);
1608 1.78 ad kmem_free(lp, sizeof(*lp));
1609 1.78 ad p->p_lwpctl = NULL;
1610 1.78 ad }
1611