kern_lwp.c revision 1.102 1 /* $NetBSD: kern_lwp.c,v 1.102 2008/04/24 15:35:29 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Overview
41 *
42 * Lightweight processes (LWPs) are the basic unit or thread of
43 * execution within the kernel. The core state of an LWP is described
44 * by "struct lwp", also known as lwp_t.
45 *
46 * Each LWP is contained within a process (described by "struct proc"),
47 * Every process contains at least one LWP, but may contain more. The
48 * process describes attributes shared among all of its LWPs such as a
49 * private address space, global execution state (stopped, active,
50 * zombie, ...), signal disposition and so on. On a multiprocessor
51 * machine, multiple LWPs be executing concurrently in the kernel.
52 *
53 * Execution states
54 *
55 * At any given time, an LWP has overall state that is described by
56 * lwp::l_stat. The states are broken into two sets below. The first
57 * set is guaranteed to represent the absolute, current state of the
58 * LWP:
59 *
60 * LSONPROC
61 *
62 * On processor: the LWP is executing on a CPU, either in the
63 * kernel or in user space.
64 *
65 * LSRUN
66 *
67 * Runnable: the LWP is parked on a run queue, and may soon be
68 * chosen to run by an idle processor, or by a processor that
69 * has been asked to preempt a currently runnning but lower
70 * priority LWP. If the LWP is not swapped in (LW_INMEM == 0)
71 * then the LWP is not on a run queue, but may be soon.
72 *
73 * LSIDL
74 *
75 * Idle: the LWP has been created but has not yet executed,
76 * or it has ceased executing a unit of work and is waiting
77 * to be started again.
78 *
79 * LSSUSPENDED:
80 *
81 * Suspended: the LWP has had its execution suspended by
82 * another LWP in the same process using the _lwp_suspend()
83 * system call. User-level LWPs also enter the suspended
84 * state when the system is shutting down.
85 *
86 * The second set represent a "statement of intent" on behalf of the
87 * LWP. The LWP may in fact be executing on a processor, may be
88 * sleeping or idle. It is expected to take the necessary action to
89 * stop executing or become "running" again within a short timeframe.
90 * The LW_RUNNING flag in lwp::l_flag indicates that an LWP is running.
91 * Importantly, it indicates that its state is tied to a CPU.
92 *
93 * LSZOMB:
94 *
95 * Dead or dying: the LWP has released most of its resources
96 * and is: a) about to switch away into oblivion b) has already
97 * switched away. When it switches away, its few remaining
98 * resources can be collected.
99 *
100 * LSSLEEP:
101 *
102 * Sleeping: the LWP has entered itself onto a sleep queue, and
103 * has switched away or will switch away shortly to allow other
104 * LWPs to run on the CPU.
105 *
106 * LSSTOP:
107 *
108 * Stopped: the LWP has been stopped as a result of a job
109 * control signal, or as a result of the ptrace() interface.
110 *
111 * Stopped LWPs may run briefly within the kernel to handle
112 * signals that they receive, but will not return to user space
113 * until their process' state is changed away from stopped.
114 *
115 * Single LWPs within a process can not be set stopped
116 * selectively: all actions that can stop or continue LWPs
117 * occur at the process level.
118 *
119 * State transitions
120 *
121 * Note that the LSSTOP state may only be set when returning to
122 * user space in userret(), or when sleeping interruptably. The
123 * LSSUSPENDED state may only be set in userret(). Before setting
124 * those states, we try to ensure that the LWPs will release all
125 * locks that they hold, and at a minimum try to ensure that the
126 * LWP can be set runnable again by a signal.
127 *
128 * LWPs may transition states in the following ways:
129 *
130 * RUN -------> ONPROC ONPROC -----> RUN
131 * > STOPPED > SLEEP
132 * > SUSPENDED > STOPPED
133 * > SUSPENDED
134 * > ZOMB
135 *
136 * STOPPED ---> RUN SUSPENDED --> RUN
137 * > SLEEP > SLEEP
138 *
139 * SLEEP -----> ONPROC IDL --------> RUN
140 * > RUN > SUSPENDED
141 * > STOPPED > STOPPED
142 * > SUSPENDED
143 *
144 * Other state transitions are possible with kernel threads (eg
145 * ONPROC -> IDL), but only happen under tightly controlled
146 * circumstances the side effects are understood.
147 *
148 * Locking
149 *
150 * The majority of fields in 'struct lwp' are covered by a single,
151 * general spin lock pointed to by lwp::l_mutex. The locks covering
152 * each field are documented in sys/lwp.h.
153 *
154 * State transitions must be made with the LWP's general lock held,
155 * and may cause the LWP's lock pointer to change. Manipulation of
156 * the general lock is not performed directly, but through calls to
157 * lwp_lock(), lwp_relock() and similar.
158 *
159 * States and their associated locks:
160 *
161 * LSONPROC, LSZOMB:
162 *
163 * Always covered by spc_lwplock, which protects running LWPs.
164 * This is a per-CPU lock.
165 *
166 * LSIDL, LSRUN:
167 *
168 * Always covered by spc_mutex, which protects the run queues.
169 * This is a per-CPU lock.
170 *
171 * LSSLEEP:
172 *
173 * Covered by a lock associated with the sleep queue that the
174 * LWP resides on, indirectly referenced by l_sleepq->sq_mutex.
175 *
176 * LSSTOP, LSSUSPENDED:
177 *
178 * If the LWP was previously sleeping (l_wchan != NULL), then
179 * l_mutex references the sleep queue lock. If the LWP was
180 * runnable or on the CPU when halted, or has been removed from
181 * the sleep queue since halted, then the lock is spc_lwplock.
182 *
183 * The lock order is as follows:
184 *
185 * spc::spc_lwplock ->
186 * sleepq_t::sq_mutex ->
187 * tschain_t::tc_mutex ->
188 * spc::spc_mutex
189 *
190 * Each process has an scheduler state lock (proc::p_smutex), and a
191 * number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
192 * so on. When an LWP is to be entered into or removed from one of the
193 * following states, p_mutex must be held and the process wide counters
194 * adjusted:
195 *
196 * LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
197 *
198 * Note that an LWP is considered running or likely to run soon if in
199 * one of the following states. This affects the value of p_nrlwps:
200 *
201 * LSRUN, LSONPROC, LSSLEEP
202 *
203 * p_smutex does not need to be held when transitioning among these
204 * three states.
205 */
206
207 #include <sys/cdefs.h>
208 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.102 2008/04/24 15:35:29 ad Exp $");
209
210 #include "opt_ddb.h"
211 #include "opt_multiprocessor.h"
212 #include "opt_lockdebug.h"
213
214 #define _LWP_API_PRIVATE
215
216 #include <sys/param.h>
217 #include <sys/systm.h>
218 #include <sys/cpu.h>
219 #include <sys/pool.h>
220 #include <sys/proc.h>
221 #include <sys/syscallargs.h>
222 #include <sys/syscall_stats.h>
223 #include <sys/kauth.h>
224 #include <sys/sleepq.h>
225 #include <sys/user.h>
226 #include <sys/lockdebug.h>
227 #include <sys/kmem.h>
228 #include <sys/pset.h>
229 #include <sys/intr.h>
230 #include <sys/lwpctl.h>
231 #include <sys/atomic.h>
232
233 #include <uvm/uvm_extern.h>
234 #include <uvm/uvm_object.h>
235
236 struct lwplist alllwp = LIST_HEAD_INITIALIZER(alllwp);
237
238 POOL_INIT(lwp_uc_pool, sizeof(ucontext_t), 0, 0, 0, "lwpucpl",
239 &pool_allocator_nointr, IPL_NONE);
240
241 static pool_cache_t lwp_cache;
242 static specificdata_domain_t lwp_specificdata_domain;
243
244 void
245 lwpinit(void)
246 {
247
248 lwp_specificdata_domain = specificdata_domain_create();
249 KASSERT(lwp_specificdata_domain != NULL);
250 lwp_sys_init();
251 lwp_cache = pool_cache_init(sizeof(lwp_t), MIN_LWP_ALIGNMENT, 0, 0,
252 "lwppl", NULL, IPL_NONE, NULL, NULL, NULL);
253 }
254
255 /*
256 * Set an suspended.
257 *
258 * Must be called with p_smutex held, and the LWP locked. Will unlock the
259 * LWP before return.
260 */
261 int
262 lwp_suspend(struct lwp *curl, struct lwp *t)
263 {
264 int error;
265
266 KASSERT(mutex_owned(&t->l_proc->p_smutex));
267 KASSERT(lwp_locked(t, NULL));
268
269 KASSERT(curl != t || curl->l_stat == LSONPROC);
270
271 /*
272 * If the current LWP has been told to exit, we must not suspend anyone
273 * else or deadlock could occur. We won't return to userspace.
274 */
275 if ((curl->l_stat & (LW_WEXIT | LW_WCORE)) != 0) {
276 lwp_unlock(t);
277 return (EDEADLK);
278 }
279
280 error = 0;
281
282 switch (t->l_stat) {
283 case LSRUN:
284 case LSONPROC:
285 t->l_flag |= LW_WSUSPEND;
286 lwp_need_userret(t);
287 lwp_unlock(t);
288 break;
289
290 case LSSLEEP:
291 t->l_flag |= LW_WSUSPEND;
292
293 /*
294 * Kick the LWP and try to get it to the kernel boundary
295 * so that it will release any locks that it holds.
296 * setrunnable() will release the lock.
297 */
298 if ((t->l_flag & LW_SINTR) != 0)
299 setrunnable(t);
300 else
301 lwp_unlock(t);
302 break;
303
304 case LSSUSPENDED:
305 lwp_unlock(t);
306 break;
307
308 case LSSTOP:
309 t->l_flag |= LW_WSUSPEND;
310 setrunnable(t);
311 break;
312
313 case LSIDL:
314 case LSZOMB:
315 error = EINTR; /* It's what Solaris does..... */
316 lwp_unlock(t);
317 break;
318 }
319
320 return (error);
321 }
322
323 /*
324 * Restart a suspended LWP.
325 *
326 * Must be called with p_smutex held, and the LWP locked. Will unlock the
327 * LWP before return.
328 */
329 void
330 lwp_continue(struct lwp *l)
331 {
332
333 KASSERT(mutex_owned(&l->l_proc->p_smutex));
334 KASSERT(lwp_locked(l, NULL));
335
336 /* If rebooting or not suspended, then just bail out. */
337 if ((l->l_flag & LW_WREBOOT) != 0) {
338 lwp_unlock(l);
339 return;
340 }
341
342 l->l_flag &= ~LW_WSUSPEND;
343
344 if (l->l_stat != LSSUSPENDED) {
345 lwp_unlock(l);
346 return;
347 }
348
349 /* setrunnable() will release the lock. */
350 setrunnable(l);
351 }
352
353 /*
354 * Wait for an LWP within the current process to exit. If 'lid' is
355 * non-zero, we are waiting for a specific LWP.
356 *
357 * Must be called with p->p_smutex held.
358 */
359 int
360 lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
361 {
362 struct proc *p = l->l_proc;
363 struct lwp *l2;
364 int nfound, error;
365 lwpid_t curlid;
366 bool exiting;
367
368 KASSERT(mutex_owned(&p->p_smutex));
369
370 p->p_nlwpwait++;
371 l->l_waitingfor = lid;
372 curlid = l->l_lid;
373 exiting = ((flags & LWPWAIT_EXITCONTROL) != 0);
374
375 for (;;) {
376 /*
377 * Avoid a race between exit1() and sigexit(): if the
378 * process is dumping core, then we need to bail out: call
379 * into lwp_userret() where we will be suspended until the
380 * deed is done.
381 */
382 if ((p->p_sflag & PS_WCORE) != 0) {
383 mutex_exit(&p->p_smutex);
384 lwp_userret(l);
385 #ifdef DIAGNOSTIC
386 panic("lwp_wait1");
387 #endif
388 /* NOTREACHED */
389 }
390
391 /*
392 * First off, drain any detached LWP that is waiting to be
393 * reaped.
394 */
395 while ((l2 = p->p_zomblwp) != NULL) {
396 p->p_zomblwp = NULL;
397 lwp_free(l2, false, false);/* releases proc mutex */
398 mutex_enter(&p->p_smutex);
399 }
400
401 /*
402 * Now look for an LWP to collect. If the whole process is
403 * exiting, count detached LWPs as eligible to be collected,
404 * but don't drain them here.
405 */
406 nfound = 0;
407 error = 0;
408 LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
409 /*
410 * If a specific wait and the target is waiting on
411 * us, then avoid deadlock. This also traps LWPs
412 * that try to wait on themselves.
413 *
414 * Note that this does not handle more complicated
415 * cycles, like: t1 -> t2 -> t3 -> t1. The process
416 * can still be killed so it is not a major problem.
417 */
418 if (l2->l_lid == lid && l2->l_waitingfor == curlid) {
419 error = EDEADLK;
420 break;
421 }
422 if (l2 == l)
423 continue;
424 if ((l2->l_prflag & LPR_DETACHED) != 0) {
425 nfound += exiting;
426 continue;
427 }
428 if (lid != 0) {
429 if (l2->l_lid != lid)
430 continue;
431 /*
432 * Mark this LWP as the first waiter, if there
433 * is no other.
434 */
435 if (l2->l_waiter == 0)
436 l2->l_waiter = curlid;
437 } else if (l2->l_waiter != 0) {
438 /*
439 * It already has a waiter - so don't
440 * collect it. If the waiter doesn't
441 * grab it we'll get another chance
442 * later.
443 */
444 nfound++;
445 continue;
446 }
447 nfound++;
448
449 /* No need to lock the LWP in order to see LSZOMB. */
450 if (l2->l_stat != LSZOMB)
451 continue;
452
453 /*
454 * We're no longer waiting. Reset the "first waiter"
455 * pointer on the target, in case it was us.
456 */
457 l->l_waitingfor = 0;
458 l2->l_waiter = 0;
459 p->p_nlwpwait--;
460 if (departed)
461 *departed = l2->l_lid;
462 sched_lwp_collect(l2);
463
464 /* lwp_free() releases the proc lock. */
465 lwp_free(l2, false, false);
466 mutex_enter(&p->p_smutex);
467 return 0;
468 }
469
470 if (error != 0)
471 break;
472 if (nfound == 0) {
473 error = ESRCH;
474 break;
475 }
476
477 /*
478 * The kernel is careful to ensure that it can not deadlock
479 * when exiting - just keep waiting.
480 */
481 if (exiting) {
482 KASSERT(p->p_nlwps > 1);
483 cv_wait(&p->p_lwpcv, &p->p_smutex);
484 continue;
485 }
486
487 /*
488 * If all other LWPs are waiting for exits or suspends
489 * and the supply of zombies and potential zombies is
490 * exhausted, then we are about to deadlock.
491 *
492 * If the process is exiting (and this LWP is not the one
493 * that is coordinating the exit) then bail out now.
494 */
495 if ((p->p_sflag & PS_WEXIT) != 0 ||
496 p->p_nrlwps + p->p_nzlwps - p->p_ndlwps <= p->p_nlwpwait) {
497 error = EDEADLK;
498 break;
499 }
500
501 /*
502 * Sit around and wait for something to happen. We'll be
503 * awoken if any of the conditions examined change: if an
504 * LWP exits, is collected, or is detached.
505 */
506 if ((error = cv_wait_sig(&p->p_lwpcv, &p->p_smutex)) != 0)
507 break;
508 }
509
510 /*
511 * We didn't find any LWPs to collect, we may have received a
512 * signal, or some other condition has caused us to bail out.
513 *
514 * If waiting on a specific LWP, clear the waiters marker: some
515 * other LWP may want it. Then, kick all the remaining waiters
516 * so that they can re-check for zombies and for deadlock.
517 */
518 if (lid != 0) {
519 LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
520 if (l2->l_lid == lid) {
521 if (l2->l_waiter == curlid)
522 l2->l_waiter = 0;
523 break;
524 }
525 }
526 }
527 p->p_nlwpwait--;
528 l->l_waitingfor = 0;
529 cv_broadcast(&p->p_lwpcv);
530
531 return error;
532 }
533
534 /*
535 * Create a new LWP within process 'p2', using LWP 'l1' as a template.
536 * The new LWP is created in state LSIDL and must be set running,
537 * suspended, or stopped by the caller.
538 */
539 int
540 lwp_create(lwp_t *l1, proc_t *p2, vaddr_t uaddr, bool inmem, int flags,
541 void *stack, size_t stacksize, void (*func)(void *), void *arg,
542 lwp_t **rnewlwpp, int sclass)
543 {
544 struct lwp *l2, *isfree;
545 turnstile_t *ts;
546
547 /*
548 * First off, reap any detached LWP waiting to be collected.
549 * We can re-use its LWP structure and turnstile.
550 */
551 isfree = NULL;
552 if (p2->p_zomblwp != NULL) {
553 mutex_enter(&p2->p_smutex);
554 if ((isfree = p2->p_zomblwp) != NULL) {
555 p2->p_zomblwp = NULL;
556 lwp_free(isfree, true, false);/* releases proc mutex */
557 } else
558 mutex_exit(&p2->p_smutex);
559 }
560 if (isfree == NULL) {
561 l2 = pool_cache_get(lwp_cache, PR_WAITOK);
562 memset(l2, 0, sizeof(*l2));
563 l2->l_ts = pool_cache_get(turnstile_cache, PR_WAITOK);
564 SLIST_INIT(&l2->l_pi_lenders);
565 } else {
566 l2 = isfree;
567 ts = l2->l_ts;
568 KASSERT(l2->l_inheritedprio == -1);
569 KASSERT(SLIST_EMPTY(&l2->l_pi_lenders));
570 memset(l2, 0, sizeof(*l2));
571 l2->l_ts = ts;
572 }
573
574 l2->l_stat = LSIDL;
575 l2->l_proc = p2;
576 l2->l_refcnt = 1;
577 l2->l_class = sclass;
578 l2->l_kpriority = l1->l_kpriority;
579 l2->l_kpribase = PRI_KERNEL;
580 l2->l_priority = l1->l_priority;
581 l2->l_inheritedprio = -1;
582 l2->l_mutex = l1->l_cpu->ci_schedstate.spc_mutex;
583 l2->l_cpu = l1->l_cpu;
584 l2->l_flag = inmem ? LW_INMEM : 0;
585 l2->l_pflag = LP_MPSAFE;
586 l2->l_fd = p2->p_fd;
587
588 if (p2->p_flag & PK_SYSTEM) {
589 /* Mark it as a system LWP and not a candidate for swapping */
590 l2->l_flag |= LW_SYSTEM;
591 }
592
593 lwp_initspecific(l2);
594 sched_lwp_fork(l1, l2);
595 lwp_update_creds(l2);
596 callout_init(&l2->l_timeout_ch, CALLOUT_MPSAFE);
597 callout_setfunc(&l2->l_timeout_ch, sleepq_timeout, l2);
598 mutex_init(&l2->l_swaplock, MUTEX_DEFAULT, IPL_NONE);
599 cv_init(&l2->l_sigcv, "sigwait");
600 l2->l_syncobj = &sched_syncobj;
601
602 if (rnewlwpp != NULL)
603 *rnewlwpp = l2;
604
605 l2->l_addr = UAREA_TO_USER(uaddr);
606 uvm_lwp_fork(l1, l2, stack, stacksize, func,
607 (arg != NULL) ? arg : l2);
608
609 mutex_enter(&p2->p_smutex);
610
611 if ((flags & LWP_DETACHED) != 0) {
612 l2->l_prflag = LPR_DETACHED;
613 p2->p_ndlwps++;
614 } else
615 l2->l_prflag = 0;
616
617 l2->l_sigmask = l1->l_sigmask;
618 CIRCLEQ_INIT(&l2->l_sigpend.sp_info);
619 sigemptyset(&l2->l_sigpend.sp_set);
620
621 p2->p_nlwpid++;
622 if (p2->p_nlwpid == 0)
623 p2->p_nlwpid++;
624 l2->l_lid = p2->p_nlwpid;
625 LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
626 p2->p_nlwps++;
627
628 mutex_exit(&p2->p_smutex);
629
630 mutex_enter(proc_lock);
631 LIST_INSERT_HEAD(&alllwp, l2, l_list);
632 mutex_exit(proc_lock);
633
634 if ((p2->p_flag & PK_SYSTEM) == 0) {
635 /* Locking is needed, since LWP is in the list of all LWPs */
636 lwp_lock(l2);
637 /* Inherit a processor-set */
638 l2->l_psid = l1->l_psid;
639 /* Inherit an affinity */
640 memcpy(&l2->l_affinity, &l1->l_affinity, sizeof(cpuset_t));
641 /* Look for a CPU to start */
642 l2->l_cpu = sched_takecpu(l2);
643 lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_mutex);
644 }
645
646 SYSCALL_TIME_LWP_INIT(l2);
647
648 if (p2->p_emul->e_lwp_fork)
649 (*p2->p_emul->e_lwp_fork)(l1, l2);
650
651 return (0);
652 }
653
654 /*
655 * Called by MD code when a new LWP begins execution. Must be called
656 * with the previous LWP locked (so at splsched), or if there is no
657 * previous LWP, at splsched.
658 */
659 void
660 lwp_startup(struct lwp *prev, struct lwp *new)
661 {
662
663 if (prev != NULL) {
664 /*
665 * Normalize the count of the spin-mutexes, it was
666 * increased in mi_switch(). Unmark the state of
667 * context switch - it is finished for previous LWP.
668 */
669 curcpu()->ci_mtx_count++;
670 membar_exit();
671 prev->l_ctxswtch = 0;
672 }
673 spl0();
674 pmap_activate(new);
675 LOCKDEBUG_BARRIER(NULL, 0);
676 if ((new->l_pflag & LP_MPSAFE) == 0) {
677 KERNEL_LOCK(1, new);
678 }
679 }
680
681 /*
682 * Exit an LWP.
683 */
684 void
685 lwp_exit(struct lwp *l)
686 {
687 struct proc *p = l->l_proc;
688 struct lwp *l2;
689 bool current;
690
691 current = (l == curlwp);
692
693 KASSERT(current || l->l_stat == LSIDL);
694
695 /*
696 * Verify that we hold no locks other than the kernel lock.
697 */
698 #ifdef MULTIPROCESSOR
699 LOCKDEBUG_BARRIER(&kernel_lock, 0);
700 #else
701 LOCKDEBUG_BARRIER(NULL, 0);
702 #endif
703
704 /*
705 * If we are the last live LWP in a process, we need to exit the
706 * entire process. We do so with an exit status of zero, because
707 * it's a "controlled" exit, and because that's what Solaris does.
708 *
709 * We are not quite a zombie yet, but for accounting purposes we
710 * must increment the count of zombies here.
711 *
712 * Note: the last LWP's specificdata will be deleted here.
713 */
714 mutex_enter(&p->p_smutex);
715 if (p->p_nlwps - p->p_nzlwps == 1) {
716 KASSERT(current == true);
717 /* XXXSMP kernel_lock not held */
718 exit1(l, 0);
719 /* NOTREACHED */
720 }
721 p->p_nzlwps++;
722 mutex_exit(&p->p_smutex);
723
724 if (p->p_emul->e_lwp_exit)
725 (*p->p_emul->e_lwp_exit)(l);
726
727 /* Delete the specificdata while it's still safe to sleep. */
728 specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
729
730 /*
731 * Release our cached credentials.
732 */
733 kauth_cred_free(l->l_cred);
734 callout_destroy(&l->l_timeout_ch);
735
736 /*
737 * While we can still block, mark the LWP as unswappable to
738 * prevent conflicts with the with the swapper.
739 */
740 if (current)
741 uvm_lwp_hold(l);
742
743 /*
744 * Remove the LWP from the global list.
745 */
746 mutex_enter(proc_lock);
747 LIST_REMOVE(l, l_list);
748 mutex_exit(proc_lock);
749
750 /*
751 * Get rid of all references to the LWP that others (e.g. procfs)
752 * may have, and mark the LWP as a zombie. If the LWP is detached,
753 * mark it waiting for collection in the proc structure. Note that
754 * before we can do that, we need to free any other dead, deatched
755 * LWP waiting to meet its maker.
756 *
757 * XXXSMP disable preemption.
758 */
759 mutex_enter(&p->p_smutex);
760 lwp_drainrefs(l);
761
762 if ((l->l_prflag & LPR_DETACHED) != 0) {
763 while ((l2 = p->p_zomblwp) != NULL) {
764 p->p_zomblwp = NULL;
765 lwp_free(l2, false, false);/* releases proc mutex */
766 mutex_enter(&p->p_smutex);
767 l->l_refcnt++;
768 lwp_drainrefs(l);
769 }
770 p->p_zomblwp = l;
771 }
772
773 /*
774 * If we find a pending signal for the process and we have been
775 * asked to check for signals, then we loose: arrange to have
776 * all other LWPs in the process check for signals.
777 */
778 if ((l->l_flag & LW_PENDSIG) != 0 &&
779 firstsig(&p->p_sigpend.sp_set) != 0) {
780 LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
781 lwp_lock(l2);
782 l2->l_flag |= LW_PENDSIG;
783 lwp_unlock(l2);
784 }
785 }
786
787 lwp_lock(l);
788 l->l_stat = LSZOMB;
789 if (l->l_name != NULL)
790 strcpy(l->l_name, "(zombie)");
791 lwp_unlock(l);
792 p->p_nrlwps--;
793 cv_broadcast(&p->p_lwpcv);
794 if (l->l_lwpctl != NULL)
795 l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
796 mutex_exit(&p->p_smutex);
797
798 /*
799 * We can no longer block. At this point, lwp_free() may already
800 * be gunning for us. On a multi-CPU system, we may be off p_lwps.
801 *
802 * Free MD LWP resources.
803 */
804 #ifndef __NO_CPU_LWP_FREE
805 cpu_lwp_free(l, 0);
806 #endif
807
808 if (current) {
809 pmap_deactivate(l);
810
811 /*
812 * Release the kernel lock, and switch away into
813 * oblivion.
814 */
815 #ifdef notyet
816 /* XXXSMP hold in lwp_userret() */
817 KERNEL_UNLOCK_LAST(l);
818 #else
819 KERNEL_UNLOCK_ALL(l, NULL);
820 #endif
821 lwp_exit_switchaway(l);
822 }
823 }
824
825 void
826 lwp_exit_switchaway(struct lwp *l)
827 {
828 struct cpu_info *ci;
829 struct lwp *idlelwp;
830
831 (void)splsched();
832 l->l_flag &= ~LW_RUNNING;
833 ci = curcpu();
834 ci->ci_data.cpu_nswtch++;
835 idlelwp = ci->ci_data.cpu_idlelwp;
836 idlelwp->l_stat = LSONPROC;
837
838 /*
839 * cpu_onproc must be updated with the CPU locked, as
840 * aston() may try to set a AST pending on the LWP (and
841 * it does so with the CPU locked). Otherwise, the LWP
842 * may be destroyed before the AST can be set, leading
843 * to a user-after-free.
844 */
845 spc_lock(ci);
846 ci->ci_data.cpu_onproc = idlelwp;
847 spc_unlock(ci);
848 cpu_switchto(NULL, idlelwp, false);
849 }
850
851 /*
852 * Free a dead LWP's remaining resources.
853 *
854 * XXXLWP limits.
855 */
856 void
857 lwp_free(struct lwp *l, bool recycle, bool last)
858 {
859 struct proc *p = l->l_proc;
860 struct rusage *ru;
861 ksiginfoq_t kq;
862
863 KASSERT(l != curlwp);
864
865 /*
866 * If this was not the last LWP in the process, then adjust
867 * counters and unlock.
868 */
869 if (!last) {
870 /*
871 * Add the LWP's run time to the process' base value.
872 * This needs to co-incide with coming off p_lwps.
873 */
874 bintime_add(&p->p_rtime, &l->l_rtime);
875 p->p_pctcpu += l->l_pctcpu;
876 ru = &p->p_stats->p_ru;
877 ruadd(ru, &l->l_ru);
878 ru->ru_nvcsw += (l->l_ncsw - l->l_nivcsw);
879 ru->ru_nivcsw += l->l_nivcsw;
880 LIST_REMOVE(l, l_sibling);
881 p->p_nlwps--;
882 p->p_nzlwps--;
883 if ((l->l_prflag & LPR_DETACHED) != 0)
884 p->p_ndlwps--;
885
886 /*
887 * Have any LWPs sleeping in lwp_wait() recheck for
888 * deadlock.
889 */
890 cv_broadcast(&p->p_lwpcv);
891 mutex_exit(&p->p_smutex);
892 }
893
894 #ifdef MULTIPROCESSOR
895 /*
896 * In the unlikely event that the LWP is still on the CPU,
897 * then spin until it has switched away. We need to release
898 * all locks to avoid deadlock against interrupt handlers on
899 * the target CPU.
900 */
901 if ((l->l_flag & LW_RUNNING) != 0 || l->l_cpu->ci_curlwp == l) {
902 int count;
903 (void)count; /* XXXgcc */
904 KERNEL_UNLOCK_ALL(curlwp, &count);
905 while ((l->l_flag & LW_RUNNING) != 0 ||
906 l->l_cpu->ci_curlwp == l)
907 SPINLOCK_BACKOFF_HOOK;
908 KERNEL_LOCK(count, curlwp);
909 }
910 #endif
911
912 /*
913 * Destroy the LWP's remaining signal information.
914 */
915 ksiginfo_queue_init(&kq);
916 sigclear(&l->l_sigpend, NULL, &kq);
917 ksiginfo_queue_drain(&kq);
918 cv_destroy(&l->l_sigcv);
919 mutex_destroy(&l->l_swaplock);
920
921 /*
922 * Free the LWP's turnstile and the LWP structure itself unless the
923 * caller wants to recycle them. Also, free the scheduler specific
924 * data.
925 *
926 * We can't return turnstile0 to the pool (it didn't come from it),
927 * so if it comes up just drop it quietly and move on.
928 *
929 * We don't recycle the VM resources at this time.
930 */
931 if (l->l_lwpctl != NULL)
932 lwp_ctl_free(l);
933 sched_lwp_exit(l);
934
935 if (!recycle && l->l_ts != &turnstile0)
936 pool_cache_put(turnstile_cache, l->l_ts);
937 if (l->l_name != NULL)
938 kmem_free(l->l_name, MAXCOMLEN);
939 #ifndef __NO_CPU_LWP_FREE
940 cpu_lwp_free2(l);
941 #endif
942 KASSERT((l->l_flag & LW_INMEM) != 0);
943 uvm_lwp_exit(l);
944 KASSERT(SLIST_EMPTY(&l->l_pi_lenders));
945 KASSERT(l->l_inheritedprio == -1);
946 if (!recycle)
947 pool_cache_put(lwp_cache, l);
948 }
949
950 /*
951 * Pick a LWP to represent the process for those operations which
952 * want information about a "process" that is actually associated
953 * with a LWP.
954 *
955 * If 'locking' is false, no locking or lock checks are performed.
956 * This is intended for use by DDB.
957 *
958 * We don't bother locking the LWP here, since code that uses this
959 * interface is broken by design and an exact match is not required.
960 */
961 struct lwp *
962 proc_representative_lwp(struct proc *p, int *nrlwps, int locking)
963 {
964 struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
965 struct lwp *signalled;
966 int cnt;
967
968 if (locking) {
969 KASSERT(mutex_owned(&p->p_smutex));
970 }
971
972 /* Trivial case: only one LWP */
973 if (p->p_nlwps == 1) {
974 l = LIST_FIRST(&p->p_lwps);
975 if (nrlwps)
976 *nrlwps = (l->l_stat == LSONPROC || l->l_stat == LSRUN);
977 return l;
978 }
979
980 cnt = 0;
981 switch (p->p_stat) {
982 case SSTOP:
983 case SACTIVE:
984 /* Pick the most live LWP */
985 onproc = running = sleeping = stopped = suspended = NULL;
986 signalled = NULL;
987 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
988 if ((l->l_flag & LW_IDLE) != 0) {
989 continue;
990 }
991 if (l->l_lid == p->p_sigctx.ps_lwp)
992 signalled = l;
993 switch (l->l_stat) {
994 case LSONPROC:
995 onproc = l;
996 cnt++;
997 break;
998 case LSRUN:
999 running = l;
1000 cnt++;
1001 break;
1002 case LSSLEEP:
1003 sleeping = l;
1004 break;
1005 case LSSTOP:
1006 stopped = l;
1007 break;
1008 case LSSUSPENDED:
1009 suspended = l;
1010 break;
1011 }
1012 }
1013 if (nrlwps)
1014 *nrlwps = cnt;
1015 if (signalled)
1016 l = signalled;
1017 else if (onproc)
1018 l = onproc;
1019 else if (running)
1020 l = running;
1021 else if (sleeping)
1022 l = sleeping;
1023 else if (stopped)
1024 l = stopped;
1025 else if (suspended)
1026 l = suspended;
1027 else
1028 break;
1029 return l;
1030 #ifdef DIAGNOSTIC
1031 case SIDL:
1032 case SZOMB:
1033 case SDYING:
1034 case SDEAD:
1035 if (locking)
1036 mutex_exit(&p->p_smutex);
1037 /* We have more than one LWP and we're in SIDL?
1038 * How'd that happen?
1039 */
1040 panic("Too many LWPs in idle/dying process %d (%s) stat = %d",
1041 p->p_pid, p->p_comm, p->p_stat);
1042 break;
1043 default:
1044 if (locking)
1045 mutex_exit(&p->p_smutex);
1046 panic("Process %d (%s) in unknown state %d",
1047 p->p_pid, p->p_comm, p->p_stat);
1048 #endif
1049 }
1050
1051 if (locking)
1052 mutex_exit(&p->p_smutex);
1053 panic("proc_representative_lwp: couldn't find a lwp for process"
1054 " %d (%s)", p->p_pid, p->p_comm);
1055 /* NOTREACHED */
1056 return NULL;
1057 }
1058
1059 /*
1060 * Migrate the LWP to the another CPU. Unlocks the LWP.
1061 */
1062 void
1063 lwp_migrate(lwp_t *l, struct cpu_info *ci)
1064 {
1065 struct schedstate_percpu *spc;
1066 KASSERT(lwp_locked(l, NULL));
1067
1068 if (l->l_cpu == ci) {
1069 lwp_unlock(l);
1070 return;
1071 }
1072
1073 spc = &ci->ci_schedstate;
1074 switch (l->l_stat) {
1075 case LSRUN:
1076 if (l->l_flag & LW_INMEM) {
1077 l->l_target_cpu = ci;
1078 break;
1079 }
1080 case LSIDL:
1081 l->l_cpu = ci;
1082 lwp_unlock_to(l, spc->spc_mutex);
1083 KASSERT(!mutex_owned(spc->spc_mutex));
1084 return;
1085 case LSSLEEP:
1086 l->l_cpu = ci;
1087 break;
1088 case LSSTOP:
1089 case LSSUSPENDED:
1090 if (l->l_wchan != NULL) {
1091 l->l_cpu = ci;
1092 break;
1093 }
1094 case LSONPROC:
1095 l->l_target_cpu = ci;
1096 break;
1097 }
1098 lwp_unlock(l);
1099 }
1100
1101 /*
1102 * Find the LWP in the process. Arguments may be zero, in such case,
1103 * the calling process and first LWP in the list will be used.
1104 * On success - returns LWP locked.
1105 */
1106 struct lwp *
1107 lwp_find2(pid_t pid, lwpid_t lid)
1108 {
1109 proc_t *p;
1110 lwp_t *l;
1111
1112 /* Find the process */
1113 p = (pid == 0) ? curlwp->l_proc : p_find(pid, PFIND_UNLOCK_FAIL);
1114 if (p == NULL)
1115 return NULL;
1116 mutex_enter(&p->p_smutex);
1117 if (pid != 0) {
1118 /* Case of p_find */
1119 mutex_exit(proc_lock);
1120 }
1121
1122 /* Find the thread */
1123 l = (lid == 0) ? LIST_FIRST(&p->p_lwps) : lwp_find(p, lid);
1124 if (l != NULL)
1125 lwp_lock(l);
1126 mutex_exit(&p->p_smutex);
1127
1128 return l;
1129 }
1130
1131 /*
1132 * Look up a live LWP within the speicifed process, and return it locked.
1133 *
1134 * Must be called with p->p_smutex held.
1135 */
1136 struct lwp *
1137 lwp_find(struct proc *p, int id)
1138 {
1139 struct lwp *l;
1140
1141 KASSERT(mutex_owned(&p->p_smutex));
1142
1143 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1144 if (l->l_lid == id)
1145 break;
1146 }
1147
1148 /*
1149 * No need to lock - all of these conditions will
1150 * be visible with the process level mutex held.
1151 */
1152 if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB))
1153 l = NULL;
1154
1155 return l;
1156 }
1157
1158 /*
1159 * Update an LWP's cached credentials to mirror the process' master copy.
1160 *
1161 * This happens early in the syscall path, on user trap, and on LWP
1162 * creation. A long-running LWP can also voluntarily choose to update
1163 * it's credentials by calling this routine. This may be called from
1164 * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
1165 */
1166 void
1167 lwp_update_creds(struct lwp *l)
1168 {
1169 kauth_cred_t oc;
1170 struct proc *p;
1171
1172 p = l->l_proc;
1173 oc = l->l_cred;
1174
1175 mutex_enter(&p->p_mutex);
1176 kauth_cred_hold(p->p_cred);
1177 l->l_cred = p->p_cred;
1178 mutex_enter(&p->p_smutex);
1179 l->l_prflag &= ~LPR_CRMOD;
1180 mutex_exit(&p->p_smutex);
1181 mutex_exit(&p->p_mutex);
1182 if (oc != NULL)
1183 kauth_cred_free(oc);
1184 }
1185
1186 /*
1187 * Verify that an LWP is locked, and optionally verify that the lock matches
1188 * one we specify.
1189 */
1190 int
1191 lwp_locked(struct lwp *l, kmutex_t *mtx)
1192 {
1193 kmutex_t *cur = l->l_mutex;
1194
1195 return mutex_owned(cur) && (mtx == cur || mtx == NULL);
1196 }
1197
1198 /*
1199 * Lock an LWP.
1200 */
1201 void
1202 lwp_lock_retry(struct lwp *l, kmutex_t *old)
1203 {
1204
1205 /*
1206 * XXXgcc ignoring kmutex_t * volatile on i386
1207 *
1208 * gcc version 4.1.2 20061021 prerelease (NetBSD nb1 20061021)
1209 */
1210 #if 1
1211 while (l->l_mutex != old) {
1212 #else
1213 for (;;) {
1214 #endif
1215 mutex_spin_exit(old);
1216 old = l->l_mutex;
1217 mutex_spin_enter(old);
1218
1219 /*
1220 * mutex_enter() will have posted a read barrier. Re-test
1221 * l->l_mutex. If it has changed, we need to try again.
1222 */
1223 #if 1
1224 }
1225 #else
1226 } while (__predict_false(l->l_mutex != old));
1227 #endif
1228 }
1229
1230 /*
1231 * Lend a new mutex to an LWP. The old mutex must be held.
1232 */
1233 void
1234 lwp_setlock(struct lwp *l, kmutex_t *new)
1235 {
1236
1237 KASSERT(mutex_owned(l->l_mutex));
1238
1239 membar_producer();
1240 l->l_mutex = new;
1241 }
1242
1243 /*
1244 * Lend a new mutex to an LWP, and release the old mutex. The old mutex
1245 * must be held.
1246 */
1247 void
1248 lwp_unlock_to(struct lwp *l, kmutex_t *new)
1249 {
1250 kmutex_t *old;
1251
1252 KASSERT(mutex_owned(l->l_mutex));
1253
1254 old = l->l_mutex;
1255 membar_producer();
1256 l->l_mutex = new;
1257 mutex_spin_exit(old);
1258 }
1259
1260 /*
1261 * Acquire a new mutex, and donate it to an LWP. The LWP must already be
1262 * locked.
1263 */
1264 void
1265 lwp_relock(struct lwp *l, kmutex_t *new)
1266 {
1267 kmutex_t *old;
1268
1269 KASSERT(mutex_owned(l->l_mutex));
1270
1271 old = l->l_mutex;
1272 if (old != new) {
1273 mutex_spin_enter(new);
1274 l->l_mutex = new;
1275 mutex_spin_exit(old);
1276 }
1277 }
1278
1279 int
1280 lwp_trylock(struct lwp *l)
1281 {
1282 kmutex_t *old;
1283
1284 for (;;) {
1285 if (!mutex_tryenter(old = l->l_mutex))
1286 return 0;
1287 if (__predict_true(l->l_mutex == old))
1288 return 1;
1289 mutex_spin_exit(old);
1290 }
1291 }
1292
1293 u_int
1294 lwp_unsleep(lwp_t *l, bool cleanup)
1295 {
1296
1297 KASSERT(mutex_owned(l->l_mutex));
1298
1299 return (*l->l_syncobj->sobj_unsleep)(l, cleanup);
1300 }
1301
1302
1303 /*
1304 * Handle exceptions for mi_userret(). Called if a member of LW_USERRET is
1305 * set.
1306 */
1307 void
1308 lwp_userret(struct lwp *l)
1309 {
1310 struct proc *p;
1311 void (*hook)(void);
1312 int sig;
1313
1314 p = l->l_proc;
1315
1316 #ifndef __HAVE_FAST_SOFTINTS
1317 /* Run pending soft interrupts. */
1318 if (l->l_cpu->ci_data.cpu_softints != 0)
1319 softint_overlay();
1320 #endif
1321
1322 /*
1323 * It should be safe to do this read unlocked on a multiprocessor
1324 * system..
1325 */
1326 while ((l->l_flag & LW_USERRET) != 0) {
1327 /*
1328 * Process pending signals first, unless the process
1329 * is dumping core or exiting, where we will instead
1330 * enter the LW_WSUSPEND case below.
1331 */
1332 if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) ==
1333 LW_PENDSIG) {
1334 mutex_enter(&p->p_smutex);
1335 while ((sig = issignal(l)) != 0)
1336 postsig(sig);
1337 mutex_exit(&p->p_smutex);
1338 }
1339
1340 /*
1341 * Core-dump or suspend pending.
1342 *
1343 * In case of core dump, suspend ourselves, so that the
1344 * kernel stack and therefore the userland registers saved
1345 * in the trapframe are around for coredump() to write them
1346 * out. We issue a wakeup on p->p_lwpcv so that sigexit()
1347 * will write the core file out once all other LWPs are
1348 * suspended.
1349 */
1350 if ((l->l_flag & LW_WSUSPEND) != 0) {
1351 mutex_enter(&p->p_smutex);
1352 p->p_nrlwps--;
1353 cv_broadcast(&p->p_lwpcv);
1354 lwp_lock(l);
1355 l->l_stat = LSSUSPENDED;
1356 mutex_exit(&p->p_smutex);
1357 mi_switch(l);
1358 }
1359
1360 /* Process is exiting. */
1361 if ((l->l_flag & LW_WEXIT) != 0) {
1362 lwp_exit(l);
1363 KASSERT(0);
1364 /* NOTREACHED */
1365 }
1366
1367 /* Call userret hook; used by Linux emulation. */
1368 if ((l->l_flag & LW_WUSERRET) != 0) {
1369 lwp_lock(l);
1370 l->l_flag &= ~LW_WUSERRET;
1371 lwp_unlock(l);
1372 hook = p->p_userret;
1373 p->p_userret = NULL;
1374 (*hook)();
1375 }
1376 }
1377 }
1378
1379 /*
1380 * Force an LWP to enter the kernel, to take a trip through lwp_userret().
1381 */
1382 void
1383 lwp_need_userret(struct lwp *l)
1384 {
1385 KASSERT(lwp_locked(l, NULL));
1386
1387 /*
1388 * Since the tests in lwp_userret() are done unlocked, make sure
1389 * that the condition will be seen before forcing the LWP to enter
1390 * kernel mode.
1391 */
1392 membar_producer();
1393 cpu_signotify(l);
1394 }
1395
1396 /*
1397 * Add one reference to an LWP. This will prevent the LWP from
1398 * exiting, thus keep the lwp structure and PCB around to inspect.
1399 */
1400 void
1401 lwp_addref(struct lwp *l)
1402 {
1403
1404 KASSERT(mutex_owned(&l->l_proc->p_smutex));
1405 KASSERT(l->l_stat != LSZOMB);
1406 KASSERT(l->l_refcnt != 0);
1407
1408 l->l_refcnt++;
1409 }
1410
1411 /*
1412 * Remove one reference to an LWP. If this is the last reference,
1413 * then we must finalize the LWP's death.
1414 */
1415 void
1416 lwp_delref(struct lwp *l)
1417 {
1418 struct proc *p = l->l_proc;
1419
1420 mutex_enter(&p->p_smutex);
1421 KASSERT(l->l_stat != LSZOMB);
1422 KASSERT(l->l_refcnt > 0);
1423 if (--l->l_refcnt == 0)
1424 cv_broadcast(&p->p_lwpcv);
1425 mutex_exit(&p->p_smutex);
1426 }
1427
1428 /*
1429 * Drain all references to the current LWP.
1430 */
1431 void
1432 lwp_drainrefs(struct lwp *l)
1433 {
1434 struct proc *p = l->l_proc;
1435
1436 KASSERT(mutex_owned(&p->p_smutex));
1437 KASSERT(l->l_refcnt != 0);
1438
1439 l->l_refcnt--;
1440 while (l->l_refcnt != 0)
1441 cv_wait(&p->p_lwpcv, &p->p_smutex);
1442 }
1443
1444 /*
1445 * lwp_specific_key_create --
1446 * Create a key for subsystem lwp-specific data.
1447 */
1448 int
1449 lwp_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor)
1450 {
1451
1452 return (specificdata_key_create(lwp_specificdata_domain, keyp, dtor));
1453 }
1454
1455 /*
1456 * lwp_specific_key_delete --
1457 * Delete a key for subsystem lwp-specific data.
1458 */
1459 void
1460 lwp_specific_key_delete(specificdata_key_t key)
1461 {
1462
1463 specificdata_key_delete(lwp_specificdata_domain, key);
1464 }
1465
1466 /*
1467 * lwp_initspecific --
1468 * Initialize an LWP's specificdata container.
1469 */
1470 void
1471 lwp_initspecific(struct lwp *l)
1472 {
1473 int error;
1474
1475 error = specificdata_init(lwp_specificdata_domain, &l->l_specdataref);
1476 KASSERT(error == 0);
1477 }
1478
1479 /*
1480 * lwp_finispecific --
1481 * Finalize an LWP's specificdata container.
1482 */
1483 void
1484 lwp_finispecific(struct lwp *l)
1485 {
1486
1487 specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
1488 }
1489
1490 /*
1491 * lwp_getspecific --
1492 * Return lwp-specific data corresponding to the specified key.
1493 *
1494 * Note: LWP specific data is NOT INTERLOCKED. An LWP should access
1495 * only its OWN SPECIFIC DATA. If it is necessary to access another
1496 * LWP's specifc data, care must be taken to ensure that doing so
1497 * would not cause internal data structure inconsistency (i.e. caller
1498 * can guarantee that the target LWP is not inside an lwp_getspecific()
1499 * or lwp_setspecific() call).
1500 */
1501 void *
1502 lwp_getspecific(specificdata_key_t key)
1503 {
1504
1505 return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
1506 &curlwp->l_specdataref, key));
1507 }
1508
1509 void *
1510 _lwp_getspecific_by_lwp(struct lwp *l, specificdata_key_t key)
1511 {
1512
1513 return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
1514 &l->l_specdataref, key));
1515 }
1516
1517 /*
1518 * lwp_setspecific --
1519 * Set lwp-specific data corresponding to the specified key.
1520 */
1521 void
1522 lwp_setspecific(specificdata_key_t key, void *data)
1523 {
1524
1525 specificdata_setspecific(lwp_specificdata_domain,
1526 &curlwp->l_specdataref, key, data);
1527 }
1528
1529 /*
1530 * Allocate a new lwpctl structure for a user LWP.
1531 */
1532 int
1533 lwp_ctl_alloc(vaddr_t *uaddr)
1534 {
1535 lcproc_t *lp;
1536 u_int bit, i, offset;
1537 struct uvm_object *uao;
1538 int error;
1539 lcpage_t *lcp;
1540 proc_t *p;
1541 lwp_t *l;
1542
1543 l = curlwp;
1544 p = l->l_proc;
1545
1546 if (l->l_lcpage != NULL) {
1547 lcp = l->l_lcpage;
1548 *uaddr = lcp->lcp_uaddr + (vaddr_t)l->l_lwpctl - lcp->lcp_kaddr;
1549 return (EINVAL);
1550 }
1551
1552 /* First time around, allocate header structure for the process. */
1553 if ((lp = p->p_lwpctl) == NULL) {
1554 lp = kmem_alloc(sizeof(*lp), KM_SLEEP);
1555 mutex_init(&lp->lp_lock, MUTEX_DEFAULT, IPL_NONE);
1556 lp->lp_uao = NULL;
1557 TAILQ_INIT(&lp->lp_pages);
1558 mutex_enter(&p->p_mutex);
1559 if (p->p_lwpctl == NULL) {
1560 p->p_lwpctl = lp;
1561 mutex_exit(&p->p_mutex);
1562 } else {
1563 mutex_exit(&p->p_mutex);
1564 mutex_destroy(&lp->lp_lock);
1565 kmem_free(lp, sizeof(*lp));
1566 lp = p->p_lwpctl;
1567 }
1568 }
1569
1570 /*
1571 * Set up an anonymous memory region to hold the shared pages.
1572 * Map them into the process' address space. The user vmspace
1573 * gets the first reference on the UAO.
1574 */
1575 mutex_enter(&lp->lp_lock);
1576 if (lp->lp_uao == NULL) {
1577 lp->lp_uao = uao_create(LWPCTL_UAREA_SZ, 0);
1578 lp->lp_cur = 0;
1579 lp->lp_max = LWPCTL_UAREA_SZ;
1580 lp->lp_uva = p->p_emul->e_vm_default_addr(p,
1581 (vaddr_t)p->p_vmspace->vm_daddr, LWPCTL_UAREA_SZ);
1582 error = uvm_map(&p->p_vmspace->vm_map, &lp->lp_uva,
1583 LWPCTL_UAREA_SZ, lp->lp_uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW,
1584 UVM_PROT_RW, UVM_INH_NONE, UVM_ADV_NORMAL, 0));
1585 if (error != 0) {
1586 uao_detach(lp->lp_uao);
1587 lp->lp_uao = NULL;
1588 mutex_exit(&lp->lp_lock);
1589 return error;
1590 }
1591 }
1592
1593 /* Get a free block and allocate for this LWP. */
1594 TAILQ_FOREACH(lcp, &lp->lp_pages, lcp_chain) {
1595 if (lcp->lcp_nfree != 0)
1596 break;
1597 }
1598 if (lcp == NULL) {
1599 /* Nothing available - try to set up a free page. */
1600 if (lp->lp_cur == lp->lp_max) {
1601 mutex_exit(&lp->lp_lock);
1602 return ENOMEM;
1603 }
1604 lcp = kmem_alloc(LWPCTL_LCPAGE_SZ, KM_SLEEP);
1605 if (lcp == NULL) {
1606 mutex_exit(&lp->lp_lock);
1607 return ENOMEM;
1608 }
1609 /*
1610 * Wire the next page down in kernel space. Since this
1611 * is a new mapping, we must add a reference.
1612 */
1613 uao = lp->lp_uao;
1614 (*uao->pgops->pgo_reference)(uao);
1615 lcp->lcp_kaddr = vm_map_min(kernel_map);
1616 error = uvm_map(kernel_map, &lcp->lcp_kaddr, PAGE_SIZE,
1617 uao, lp->lp_cur, PAGE_SIZE,
1618 UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
1619 UVM_INH_NONE, UVM_ADV_RANDOM, 0));
1620 if (error != 0) {
1621 mutex_exit(&lp->lp_lock);
1622 kmem_free(lcp, LWPCTL_LCPAGE_SZ);
1623 (*uao->pgops->pgo_detach)(uao);
1624 return error;
1625 }
1626 error = uvm_map_pageable(kernel_map, lcp->lcp_kaddr,
1627 lcp->lcp_kaddr + PAGE_SIZE, FALSE, 0);
1628 if (error != 0) {
1629 mutex_exit(&lp->lp_lock);
1630 uvm_unmap(kernel_map, lcp->lcp_kaddr,
1631 lcp->lcp_kaddr + PAGE_SIZE);
1632 kmem_free(lcp, LWPCTL_LCPAGE_SZ);
1633 return error;
1634 }
1635 /* Prepare the page descriptor and link into the list. */
1636 lcp->lcp_uaddr = lp->lp_uva + lp->lp_cur;
1637 lp->lp_cur += PAGE_SIZE;
1638 lcp->lcp_nfree = LWPCTL_PER_PAGE;
1639 lcp->lcp_rotor = 0;
1640 memset(lcp->lcp_bitmap, 0xff, LWPCTL_BITMAP_SZ);
1641 TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
1642 }
1643 for (i = lcp->lcp_rotor; lcp->lcp_bitmap[i] == 0;) {
1644 if (++i >= LWPCTL_BITMAP_ENTRIES)
1645 i = 0;
1646 }
1647 bit = ffs(lcp->lcp_bitmap[i]) - 1;
1648 lcp->lcp_bitmap[i] ^= (1 << bit);
1649 lcp->lcp_rotor = i;
1650 lcp->lcp_nfree--;
1651 l->l_lcpage = lcp;
1652 offset = (i << 5) + bit;
1653 l->l_lwpctl = (lwpctl_t *)lcp->lcp_kaddr + offset;
1654 *uaddr = lcp->lcp_uaddr + offset * sizeof(lwpctl_t);
1655 mutex_exit(&lp->lp_lock);
1656
1657 l->l_lwpctl->lc_curcpu = (short)curcpu()->ci_data.cpu_index;
1658
1659 return 0;
1660 }
1661
1662 /*
1663 * Free an lwpctl structure back to the per-process list.
1664 */
1665 void
1666 lwp_ctl_free(lwp_t *l)
1667 {
1668 lcproc_t *lp;
1669 lcpage_t *lcp;
1670 u_int map, offset;
1671
1672 lp = l->l_proc->p_lwpctl;
1673 KASSERT(lp != NULL);
1674
1675 lcp = l->l_lcpage;
1676 offset = (u_int)((lwpctl_t *)l->l_lwpctl - (lwpctl_t *)lcp->lcp_kaddr);
1677 KASSERT(offset < LWPCTL_PER_PAGE);
1678
1679 mutex_enter(&lp->lp_lock);
1680 lcp->lcp_nfree++;
1681 map = offset >> 5;
1682 lcp->lcp_bitmap[map] |= (1 << (offset & 31));
1683 if (lcp->lcp_bitmap[lcp->lcp_rotor] == 0)
1684 lcp->lcp_rotor = map;
1685 if (TAILQ_FIRST(&lp->lp_pages)->lcp_nfree == 0) {
1686 TAILQ_REMOVE(&lp->lp_pages, lcp, lcp_chain);
1687 TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
1688 }
1689 mutex_exit(&lp->lp_lock);
1690 }
1691
1692 /*
1693 * Process is exiting; tear down lwpctl state. This can only be safely
1694 * called by the last LWP in the process.
1695 */
1696 void
1697 lwp_ctl_exit(void)
1698 {
1699 lcpage_t *lcp, *next;
1700 lcproc_t *lp;
1701 proc_t *p;
1702 lwp_t *l;
1703
1704 l = curlwp;
1705 l->l_lwpctl = NULL;
1706 l->l_lcpage = NULL;
1707 p = l->l_proc;
1708 lp = p->p_lwpctl;
1709
1710 KASSERT(lp != NULL);
1711 KASSERT(p->p_nlwps == 1);
1712
1713 for (lcp = TAILQ_FIRST(&lp->lp_pages); lcp != NULL; lcp = next) {
1714 next = TAILQ_NEXT(lcp, lcp_chain);
1715 uvm_unmap(kernel_map, lcp->lcp_kaddr,
1716 lcp->lcp_kaddr + PAGE_SIZE);
1717 kmem_free(lcp, LWPCTL_LCPAGE_SZ);
1718 }
1719
1720 if (lp->lp_uao != NULL) {
1721 uvm_unmap(&p->p_vmspace->vm_map, lp->lp_uva,
1722 lp->lp_uva + LWPCTL_UAREA_SZ);
1723 }
1724
1725 mutex_destroy(&lp->lp_lock);
1726 kmem_free(lp, sizeof(*lp));
1727 p->p_lwpctl = NULL;
1728 }
1729
1730 #if defined(DDB)
1731 void
1732 lwp_whatis(uintptr_t addr, void (*pr)(const char *, ...))
1733 {
1734 lwp_t *l;
1735
1736 LIST_FOREACH(l, &alllwp, l_list) {
1737 uintptr_t stack = (uintptr_t)KSTACK_LOWEST_ADDR(l);
1738
1739 if (addr < stack || stack + KSTACK_SIZE <= addr) {
1740 continue;
1741 }
1742 (*pr)("%p is %p+%zu, LWP %p's stack\n",
1743 (void *)addr, (void *)stack,
1744 (size_t)(addr - stack), l);
1745 }
1746 }
1747 #endif /* defined(DDB) */
1748