kern_lwp.c revision 1.116.2.3 1 /* $NetBSD: kern_lwp.c,v 1.116.2.3 2008/07/18 16:37:49 simonb Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Overview
34 *
35 * Lightweight processes (LWPs) are the basic unit or thread of
36 * execution within the kernel. The core state of an LWP is described
37 * by "struct lwp", also known as lwp_t.
38 *
39 * Each LWP is contained within a process (described by "struct proc"),
40 * Every process contains at least one LWP, but may contain more. The
41 * process describes attributes shared among all of its LWPs such as a
42 * private address space, global execution state (stopped, active,
43 * zombie, ...), signal disposition and so on. On a multiprocessor
44 * machine, multiple LWPs be executing concurrently in the kernel.
45 *
46 * Execution states
47 *
48 * At any given time, an LWP has overall state that is described by
49 * lwp::l_stat. The states are broken into two sets below. The first
50 * set is guaranteed to represent the absolute, current state of the
51 * LWP:
52 *
53 * LSONPROC
54 *
55 * On processor: the LWP is executing on a CPU, either in the
56 * kernel or in user space.
57 *
58 * LSRUN
59 *
60 * Runnable: the LWP is parked on a run queue, and may soon be
61 * chosen to run by an idle processor, or by a processor that
62 * has been asked to preempt a currently runnning but lower
63 * priority LWP. If the LWP is not swapped in (LW_INMEM == 0)
64 * then the LWP is not on a run queue, but may be soon.
65 *
66 * LSIDL
67 *
68 * Idle: the LWP has been created but has not yet executed,
69 * or it has ceased executing a unit of work and is waiting
70 * to be started again.
71 *
72 * LSSUSPENDED:
73 *
74 * Suspended: the LWP has had its execution suspended by
75 * another LWP in the same process using the _lwp_suspend()
76 * system call. User-level LWPs also enter the suspended
77 * state when the system is shutting down.
78 *
79 * The second set represent a "statement of intent" on behalf of the
80 * LWP. The LWP may in fact be executing on a processor, may be
81 * sleeping or idle. It is expected to take the necessary action to
82 * stop executing or become "running" again within a short timeframe.
83 * The LP_RUNNING flag in lwp::l_pflag indicates that an LWP is running.
84 * Importantly, it indicates that its state is tied to a CPU.
85 *
86 * LSZOMB:
87 *
88 * Dead or dying: the LWP has released most of its resources
89 * and is: a) about to switch away into oblivion b) has already
90 * switched away. When it switches away, its few remaining
91 * resources can be collected.
92 *
93 * LSSLEEP:
94 *
95 * Sleeping: the LWP has entered itself onto a sleep queue, and
96 * has switched away or will switch away shortly to allow other
97 * LWPs to run on the CPU.
98 *
99 * LSSTOP:
100 *
101 * Stopped: the LWP has been stopped as a result of a job
102 * control signal, or as a result of the ptrace() interface.
103 *
104 * Stopped LWPs may run briefly within the kernel to handle
105 * signals that they receive, but will not return to user space
106 * until their process' state is changed away from stopped.
107 *
108 * Single LWPs within a process can not be set stopped
109 * selectively: all actions that can stop or continue LWPs
110 * occur at the process level.
111 *
112 * State transitions
113 *
114 * Note that the LSSTOP state may only be set when returning to
115 * user space in userret(), or when sleeping interruptably. The
116 * LSSUSPENDED state may only be set in userret(). Before setting
117 * those states, we try to ensure that the LWPs will release all
118 * locks that they hold, and at a minimum try to ensure that the
119 * LWP can be set runnable again by a signal.
120 *
121 * LWPs may transition states in the following ways:
122 *
123 * RUN -------> ONPROC ONPROC -----> RUN
124 * > STOPPED > SLEEP
125 * > SUSPENDED > STOPPED
126 * > SUSPENDED
127 * > ZOMB
128 *
129 * STOPPED ---> RUN SUSPENDED --> RUN
130 * > SLEEP > SLEEP
131 *
132 * SLEEP -----> ONPROC IDL --------> RUN
133 * > RUN > SUSPENDED
134 * > STOPPED > STOPPED
135 * > SUSPENDED
136 *
137 * Other state transitions are possible with kernel threads (eg
138 * ONPROC -> IDL), but only happen under tightly controlled
139 * circumstances the side effects are understood.
140 *
141 * Migration
142 *
143 * Migration of threads from one CPU to another could be performed
144 * internally by the scheduler via sched_takecpu() or sched_catchlwp()
145 * functions. The universal lwp_migrate() function should be used for
146 * any other cases. Subsystems in the kernel must be aware that CPU
147 * of LWP may change, while it is not locked.
148 *
149 * Locking
150 *
151 * The majority of fields in 'struct lwp' are covered by a single,
152 * general spin lock pointed to by lwp::l_mutex. The locks covering
153 * each field are documented in sys/lwp.h.
154 *
155 * State transitions must be made with the LWP's general lock held,
156 * and may cause the LWP's lock pointer to change. Manipulation of
157 * the general lock is not performed directly, but through calls to
158 * lwp_lock(), lwp_relock() and similar.
159 *
160 * States and their associated locks:
161 *
162 * LSONPROC, LSZOMB:
163 *
164 * Always covered by spc_lwplock, which protects running LWPs.
165 * This is a per-CPU lock.
166 *
167 * LSIDL, LSRUN:
168 *
169 * Always covered by spc_mutex, which protects the run queues.
170 * This is a per-CPU lock.
171 *
172 * LSSLEEP:
173 *
174 * Covered by a lock associated with the sleep queue that the
175 * LWP resides on.
176 *
177 * LSSTOP, LSSUSPENDED:
178 *
179 * If the LWP was previously sleeping (l_wchan != NULL), then
180 * l_mutex references the sleep queue lock. If the LWP was
181 * runnable or on the CPU when halted, or has been removed from
182 * the sleep queue since halted, then the lock is spc_lwplock.
183 *
184 * The lock order is as follows:
185 *
186 * spc::spc_lwplock ->
187 * sleeptab::st_mutex ->
188 * tschain_t::tc_mutex ->
189 * spc::spc_mutex
190 *
191 * Each process has an scheduler state lock (proc::p_lock), and a
192 * number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
193 * so on. When an LWP is to be entered into or removed from one of the
194 * following states, p_lock must be held and the process wide counters
195 * adjusted:
196 *
197 * LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
198 *
199 * Note that an LWP is considered running or likely to run soon if in
200 * one of the following states. This affects the value of p_nrlwps:
201 *
202 * LSRUN, LSONPROC, LSSLEEP
203 *
204 * p_lock does not need to be held when transitioning among these
205 * three states.
206 */
207
208 #include <sys/cdefs.h>
209 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.116.2.3 2008/07/18 16:37:49 simonb Exp $");
210
211 #include "opt_ddb.h"
212 #include "opt_lockdebug.h"
213
214 #define _LWP_API_PRIVATE
215
216 #include <sys/param.h>
217 #include <sys/systm.h>
218 #include <sys/cpu.h>
219 #include <sys/pool.h>
220 #include <sys/proc.h>
221 #include <sys/syscallargs.h>
222 #include <sys/syscall_stats.h>
223 #include <sys/kauth.h>
224 #include <sys/sleepq.h>
225 #include <sys/user.h>
226 #include <sys/lockdebug.h>
227 #include <sys/kmem.h>
228 #include <sys/pset.h>
229 #include <sys/intr.h>
230 #include <sys/lwpctl.h>
231 #include <sys/atomic.h>
232
233 #include <uvm/uvm_extern.h>
234 #include <uvm/uvm_object.h>
235
236 struct lwplist alllwp = LIST_HEAD_INITIALIZER(alllwp);
237
238 POOL_INIT(lwp_uc_pool, sizeof(ucontext_t), 0, 0, 0, "lwpucpl",
239 &pool_allocator_nointr, IPL_NONE);
240
241 static pool_cache_t lwp_cache;
242 static specificdata_domain_t lwp_specificdata_domain;
243
244 void
245 lwpinit(void)
246 {
247
248 lwp_specificdata_domain = specificdata_domain_create();
249 KASSERT(lwp_specificdata_domain != NULL);
250 lwp_sys_init();
251 lwp_cache = pool_cache_init(sizeof(lwp_t), MIN_LWP_ALIGNMENT, 0, 0,
252 "lwppl", NULL, IPL_NONE, NULL, NULL, NULL);
253 }
254
255 /*
256 * Set an suspended.
257 *
258 * Must be called with p_lock held, and the LWP locked. Will unlock the
259 * LWP before return.
260 */
261 int
262 lwp_suspend(struct lwp *curl, struct lwp *t)
263 {
264 int error;
265
266 KASSERT(mutex_owned(t->l_proc->p_lock));
267 KASSERT(lwp_locked(t, NULL));
268
269 KASSERT(curl != t || curl->l_stat == LSONPROC);
270
271 /*
272 * If the current LWP has been told to exit, we must not suspend anyone
273 * else or deadlock could occur. We won't return to userspace.
274 */
275 if ((curl->l_flag & (LW_WEXIT | LW_WCORE)) != 0) {
276 lwp_unlock(t);
277 return (EDEADLK);
278 }
279
280 error = 0;
281
282 switch (t->l_stat) {
283 case LSRUN:
284 case LSONPROC:
285 t->l_flag |= LW_WSUSPEND;
286 lwp_need_userret(t);
287 lwp_unlock(t);
288 break;
289
290 case LSSLEEP:
291 t->l_flag |= LW_WSUSPEND;
292
293 /*
294 * Kick the LWP and try to get it to the kernel boundary
295 * so that it will release any locks that it holds.
296 * setrunnable() will release the lock.
297 */
298 if ((t->l_flag & LW_SINTR) != 0)
299 setrunnable(t);
300 else
301 lwp_unlock(t);
302 break;
303
304 case LSSUSPENDED:
305 lwp_unlock(t);
306 break;
307
308 case LSSTOP:
309 t->l_flag |= LW_WSUSPEND;
310 setrunnable(t);
311 break;
312
313 case LSIDL:
314 case LSZOMB:
315 error = EINTR; /* It's what Solaris does..... */
316 lwp_unlock(t);
317 break;
318 }
319
320 return (error);
321 }
322
323 /*
324 * Restart a suspended LWP.
325 *
326 * Must be called with p_lock held, and the LWP locked. Will unlock the
327 * LWP before return.
328 */
329 void
330 lwp_continue(struct lwp *l)
331 {
332
333 KASSERT(mutex_owned(l->l_proc->p_lock));
334 KASSERT(lwp_locked(l, NULL));
335
336 /* If rebooting or not suspended, then just bail out. */
337 if ((l->l_flag & LW_WREBOOT) != 0) {
338 lwp_unlock(l);
339 return;
340 }
341
342 l->l_flag &= ~LW_WSUSPEND;
343
344 if (l->l_stat != LSSUSPENDED) {
345 lwp_unlock(l);
346 return;
347 }
348
349 /* setrunnable() will release the lock. */
350 setrunnable(l);
351 }
352
353 /*
354 * Wait for an LWP within the current process to exit. If 'lid' is
355 * non-zero, we are waiting for a specific LWP.
356 *
357 * Must be called with p->p_lock held.
358 */
359 int
360 lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
361 {
362 struct proc *p = l->l_proc;
363 struct lwp *l2;
364 int nfound, error;
365 lwpid_t curlid;
366 bool exiting;
367
368 KASSERT(mutex_owned(p->p_lock));
369
370 p->p_nlwpwait++;
371 l->l_waitingfor = lid;
372 curlid = l->l_lid;
373 exiting = ((flags & LWPWAIT_EXITCONTROL) != 0);
374
375 for (;;) {
376 /*
377 * Avoid a race between exit1() and sigexit(): if the
378 * process is dumping core, then we need to bail out: call
379 * into lwp_userret() where we will be suspended until the
380 * deed is done.
381 */
382 if ((p->p_sflag & PS_WCORE) != 0) {
383 mutex_exit(p->p_lock);
384 lwp_userret(l);
385 #ifdef DIAGNOSTIC
386 panic("lwp_wait1");
387 #endif
388 /* NOTREACHED */
389 }
390
391 /*
392 * First off, drain any detached LWP that is waiting to be
393 * reaped.
394 */
395 while ((l2 = p->p_zomblwp) != NULL) {
396 p->p_zomblwp = NULL;
397 lwp_free(l2, false, false);/* releases proc mutex */
398 mutex_enter(p->p_lock);
399 }
400
401 /*
402 * Now look for an LWP to collect. If the whole process is
403 * exiting, count detached LWPs as eligible to be collected,
404 * but don't drain them here.
405 */
406 nfound = 0;
407 error = 0;
408 LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
409 /*
410 * If a specific wait and the target is waiting on
411 * us, then avoid deadlock. This also traps LWPs
412 * that try to wait on themselves.
413 *
414 * Note that this does not handle more complicated
415 * cycles, like: t1 -> t2 -> t3 -> t1. The process
416 * can still be killed so it is not a major problem.
417 */
418 if (l2->l_lid == lid && l2->l_waitingfor == curlid) {
419 error = EDEADLK;
420 break;
421 }
422 if (l2 == l)
423 continue;
424 if ((l2->l_prflag & LPR_DETACHED) != 0) {
425 nfound += exiting;
426 continue;
427 }
428 if (lid != 0) {
429 if (l2->l_lid != lid)
430 continue;
431 /*
432 * Mark this LWP as the first waiter, if there
433 * is no other.
434 */
435 if (l2->l_waiter == 0)
436 l2->l_waiter = curlid;
437 } else if (l2->l_waiter != 0) {
438 /*
439 * It already has a waiter - so don't
440 * collect it. If the waiter doesn't
441 * grab it we'll get another chance
442 * later.
443 */
444 nfound++;
445 continue;
446 }
447 nfound++;
448
449 /* No need to lock the LWP in order to see LSZOMB. */
450 if (l2->l_stat != LSZOMB)
451 continue;
452
453 /*
454 * We're no longer waiting. Reset the "first waiter"
455 * pointer on the target, in case it was us.
456 */
457 l->l_waitingfor = 0;
458 l2->l_waiter = 0;
459 p->p_nlwpwait--;
460 if (departed)
461 *departed = l2->l_lid;
462 sched_lwp_collect(l2);
463
464 /* lwp_free() releases the proc lock. */
465 lwp_free(l2, false, false);
466 mutex_enter(p->p_lock);
467 return 0;
468 }
469
470 if (error != 0)
471 break;
472 if (nfound == 0) {
473 error = ESRCH;
474 break;
475 }
476
477 /*
478 * The kernel is careful to ensure that it can not deadlock
479 * when exiting - just keep waiting.
480 */
481 if (exiting) {
482 KASSERT(p->p_nlwps > 1);
483 cv_wait(&p->p_lwpcv, p->p_lock);
484 continue;
485 }
486
487 /*
488 * If all other LWPs are waiting for exits or suspends
489 * and the supply of zombies and potential zombies is
490 * exhausted, then we are about to deadlock.
491 *
492 * If the process is exiting (and this LWP is not the one
493 * that is coordinating the exit) then bail out now.
494 */
495 if ((p->p_sflag & PS_WEXIT) != 0 ||
496 p->p_nrlwps + p->p_nzlwps - p->p_ndlwps <= p->p_nlwpwait) {
497 error = EDEADLK;
498 break;
499 }
500
501 /*
502 * Sit around and wait for something to happen. We'll be
503 * awoken if any of the conditions examined change: if an
504 * LWP exits, is collected, or is detached.
505 */
506 if ((error = cv_wait_sig(&p->p_lwpcv, p->p_lock)) != 0)
507 break;
508 }
509
510 /*
511 * We didn't find any LWPs to collect, we may have received a
512 * signal, or some other condition has caused us to bail out.
513 *
514 * If waiting on a specific LWP, clear the waiters marker: some
515 * other LWP may want it. Then, kick all the remaining waiters
516 * so that they can re-check for zombies and for deadlock.
517 */
518 if (lid != 0) {
519 LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
520 if (l2->l_lid == lid) {
521 if (l2->l_waiter == curlid)
522 l2->l_waiter = 0;
523 break;
524 }
525 }
526 }
527 p->p_nlwpwait--;
528 l->l_waitingfor = 0;
529 cv_broadcast(&p->p_lwpcv);
530
531 return error;
532 }
533
534 /*
535 * Create a new LWP within process 'p2', using LWP 'l1' as a template.
536 * The new LWP is created in state LSIDL and must be set running,
537 * suspended, or stopped by the caller.
538 */
539 int
540 lwp_create(lwp_t *l1, proc_t *p2, vaddr_t uaddr, bool inmem, int flags,
541 void *stack, size_t stacksize, void (*func)(void *), void *arg,
542 lwp_t **rnewlwpp, int sclass)
543 {
544 struct lwp *l2, *isfree;
545 turnstile_t *ts;
546
547 KASSERT(l1 == curlwp || l1->l_proc == &proc0);
548
549 /*
550 * First off, reap any detached LWP waiting to be collected.
551 * We can re-use its LWP structure and turnstile.
552 */
553 isfree = NULL;
554 if (p2->p_zomblwp != NULL) {
555 mutex_enter(p2->p_lock);
556 if ((isfree = p2->p_zomblwp) != NULL) {
557 p2->p_zomblwp = NULL;
558 lwp_free(isfree, true, false);/* releases proc mutex */
559 } else
560 mutex_exit(p2->p_lock);
561 }
562 if (isfree == NULL) {
563 l2 = pool_cache_get(lwp_cache, PR_WAITOK);
564 memset(l2, 0, sizeof(*l2));
565 l2->l_ts = pool_cache_get(turnstile_cache, PR_WAITOK);
566 SLIST_INIT(&l2->l_pi_lenders);
567 } else {
568 l2 = isfree;
569 ts = l2->l_ts;
570 KASSERT(l2->l_inheritedprio == -1);
571 KASSERT(SLIST_EMPTY(&l2->l_pi_lenders));
572 memset(l2, 0, sizeof(*l2));
573 l2->l_ts = ts;
574 }
575
576 l2->l_stat = LSIDL;
577 l2->l_proc = p2;
578 l2->l_refcnt = 1;
579 l2->l_class = sclass;
580
581 /*
582 * If vfork(), we want the LWP to run fast and on the same CPU
583 * as its parent, so that it can reuse the VM context and cache
584 * footprint on the local CPU.
585 */
586 l2->l_kpriority = ((flags & LWP_VFORK) ? true : false);
587 l2->l_kpribase = PRI_KERNEL;
588 l2->l_priority = l1->l_priority;
589 l2->l_inheritedprio = -1;
590 l2->l_flag = inmem ? LW_INMEM : 0;
591 l2->l_pflag = LP_MPSAFE;
592 l2->l_fd = p2->p_fd;
593 TAILQ_INIT(&l2->l_ld_locks);
594
595 if (p2->p_flag & PK_SYSTEM) {
596 /* Mark it as a system LWP and not a candidate for swapping */
597 l2->l_flag |= LW_SYSTEM;
598 }
599
600 kpreempt_disable();
601 l2->l_mutex = l1->l_cpu->ci_schedstate.spc_mutex;
602 l2->l_cpu = l1->l_cpu;
603 kpreempt_enable();
604
605 lwp_initspecific(l2);
606 sched_lwp_fork(l1, l2);
607 lwp_update_creds(l2);
608 callout_init(&l2->l_timeout_ch, CALLOUT_MPSAFE);
609 callout_setfunc(&l2->l_timeout_ch, sleepq_timeout, l2);
610 mutex_init(&l2->l_swaplock, MUTEX_DEFAULT, IPL_NONE);
611 cv_init(&l2->l_sigcv, "sigwait");
612 l2->l_syncobj = &sched_syncobj;
613
614 if (rnewlwpp != NULL)
615 *rnewlwpp = l2;
616
617 l2->l_addr = UAREA_TO_USER(uaddr);
618 uvm_lwp_fork(l1, l2, stack, stacksize, func,
619 (arg != NULL) ? arg : l2);
620
621 mutex_enter(p2->p_lock);
622
623 if ((flags & LWP_DETACHED) != 0) {
624 l2->l_prflag = LPR_DETACHED;
625 p2->p_ndlwps++;
626 } else
627 l2->l_prflag = 0;
628
629 l2->l_sigmask = l1->l_sigmask;
630 CIRCLEQ_INIT(&l2->l_sigpend.sp_info);
631 sigemptyset(&l2->l_sigpend.sp_set);
632
633 p2->p_nlwpid++;
634 if (p2->p_nlwpid == 0)
635 p2->p_nlwpid++;
636 l2->l_lid = p2->p_nlwpid;
637 LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
638 p2->p_nlwps++;
639
640 mutex_exit(p2->p_lock);
641
642 mutex_enter(proc_lock);
643 LIST_INSERT_HEAD(&alllwp, l2, l_list);
644 mutex_exit(proc_lock);
645
646 if ((p2->p_flag & PK_SYSTEM) == 0) {
647 /* Locking is needed, since LWP is in the list of all LWPs */
648 lwp_lock(l2);
649 /* Inherit a processor-set */
650 l2->l_psid = l1->l_psid;
651 /* Inherit an affinity */
652 if (l1->l_flag & LW_AFFINITY) {
653 proc_t *p = l1->l_proc;
654
655 mutex_enter(p->p_lock);
656 if (l1->l_flag & LW_AFFINITY) {
657 kcpuset_use(l1->l_affinity);
658 l2->l_affinity = l1->l_affinity;
659 l2->l_flag |= LW_AFFINITY;
660 }
661 mutex_exit(p->p_lock);
662 }
663 /* Look for a CPU to start */
664 l2->l_cpu = sched_takecpu(l2);
665 lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_mutex);
666 }
667
668 SYSCALL_TIME_LWP_INIT(l2);
669
670 if (p2->p_emul->e_lwp_fork)
671 (*p2->p_emul->e_lwp_fork)(l1, l2);
672
673 return (0);
674 }
675
676 /*
677 * Called by MD code when a new LWP begins execution. Must be called
678 * with the previous LWP locked (so at splsched), or if there is no
679 * previous LWP, at splsched.
680 */
681 void
682 lwp_startup(struct lwp *prev, struct lwp *new)
683 {
684
685 KASSERT(kpreempt_disabled());
686 if (prev != NULL) {
687 /*
688 * Normalize the count of the spin-mutexes, it was
689 * increased in mi_switch(). Unmark the state of
690 * context switch - it is finished for previous LWP.
691 */
692 curcpu()->ci_mtx_count++;
693 membar_exit();
694 prev->l_ctxswtch = 0;
695 }
696 KPREEMPT_DISABLE(new);
697 spl0();
698 pmap_activate(new);
699 LOCKDEBUG_BARRIER(NULL, 0);
700 KPREEMPT_ENABLE(new);
701 if ((new->l_pflag & LP_MPSAFE) == 0) {
702 KERNEL_LOCK(1, new);
703 }
704 }
705
706 /*
707 * Exit an LWP.
708 */
709 void
710 lwp_exit(struct lwp *l)
711 {
712 struct proc *p = l->l_proc;
713 struct lwp *l2;
714 bool current;
715
716 current = (l == curlwp);
717
718 KASSERT(current || (l->l_stat == LSIDL && l->l_target_cpu == NULL));
719
720 /*
721 * Verify that we hold no locks other than the kernel lock.
722 */
723 LOCKDEBUG_BARRIER(&kernel_lock, 0);
724
725 /*
726 * If we are the last live LWP in a process, we need to exit the
727 * entire process. We do so with an exit status of zero, because
728 * it's a "controlled" exit, and because that's what Solaris does.
729 *
730 * We are not quite a zombie yet, but for accounting purposes we
731 * must increment the count of zombies here.
732 *
733 * Note: the last LWP's specificdata will be deleted here.
734 */
735 mutex_enter(p->p_lock);
736 if (p->p_nlwps - p->p_nzlwps == 1) {
737 KASSERT(current == true);
738 /* XXXSMP kernel_lock not held */
739 exit1(l, 0);
740 /* NOTREACHED */
741 }
742 p->p_nzlwps++;
743 mutex_exit(p->p_lock);
744
745 if (p->p_emul->e_lwp_exit)
746 (*p->p_emul->e_lwp_exit)(l);
747
748 /* Delete the specificdata while it's still safe to sleep. */
749 specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
750
751 /*
752 * Release our cached credentials.
753 */
754 kauth_cred_free(l->l_cred);
755 callout_destroy(&l->l_timeout_ch);
756
757 /*
758 * While we can still block, mark the LWP as unswappable to
759 * prevent conflicts with the with the swapper.
760 */
761 if (current)
762 uvm_lwp_hold(l);
763
764 /*
765 * Remove the LWP from the global list.
766 */
767 mutex_enter(proc_lock);
768 LIST_REMOVE(l, l_list);
769 mutex_exit(proc_lock);
770
771 /*
772 * Get rid of all references to the LWP that others (e.g. procfs)
773 * may have, and mark the LWP as a zombie. If the LWP is detached,
774 * mark it waiting for collection in the proc structure. Note that
775 * before we can do that, we need to free any other dead, deatched
776 * LWP waiting to meet its maker.
777 */
778 mutex_enter(p->p_lock);
779 lwp_drainrefs(l);
780
781 if ((l->l_prflag & LPR_DETACHED) != 0) {
782 while ((l2 = p->p_zomblwp) != NULL) {
783 p->p_zomblwp = NULL;
784 lwp_free(l2, false, false);/* releases proc mutex */
785 mutex_enter(p->p_lock);
786 l->l_refcnt++;
787 lwp_drainrefs(l);
788 }
789 p->p_zomblwp = l;
790 }
791
792 /*
793 * If we find a pending signal for the process and we have been
794 * asked to check for signals, then we loose: arrange to have
795 * all other LWPs in the process check for signals.
796 */
797 if ((l->l_flag & LW_PENDSIG) != 0 &&
798 firstsig(&p->p_sigpend.sp_set) != 0) {
799 LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
800 lwp_lock(l2);
801 l2->l_flag |= LW_PENDSIG;
802 lwp_unlock(l2);
803 }
804 }
805
806 lwp_lock(l);
807 l->l_stat = LSZOMB;
808 if (l->l_name != NULL)
809 strcpy(l->l_name, "(zombie)");
810 if (l->l_flag & LW_AFFINITY)
811 l->l_flag &= ~LW_AFFINITY;
812 lwp_unlock(l);
813 p->p_nrlwps--;
814 cv_broadcast(&p->p_lwpcv);
815 if (l->l_lwpctl != NULL)
816 l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
817 mutex_exit(p->p_lock);
818
819 /* Safe without lock since LWP is in zombie state */
820 if (l->l_affinity) {
821 kcpuset_unuse(l->l_affinity, NULL);
822 l->l_affinity = NULL;
823 }
824
825 /*
826 * We can no longer block. At this point, lwp_free() may already
827 * be gunning for us. On a multi-CPU system, we may be off p_lwps.
828 *
829 * Free MD LWP resources.
830 */
831 #ifndef __NO_CPU_LWP_FREE
832 cpu_lwp_free(l, 0);
833 #endif
834
835 if (current) {
836 pmap_deactivate(l);
837
838 /*
839 * Release the kernel lock, and switch away into
840 * oblivion.
841 */
842 #ifdef notyet
843 /* XXXSMP hold in lwp_userret() */
844 KERNEL_UNLOCK_LAST(l);
845 #else
846 KERNEL_UNLOCK_ALL(l, NULL);
847 #endif
848 lwp_exit_switchaway(l);
849 }
850 }
851
852 /*
853 * Free a dead LWP's remaining resources.
854 *
855 * XXXLWP limits.
856 */
857 void
858 lwp_free(struct lwp *l, bool recycle, bool last)
859 {
860 struct proc *p = l->l_proc;
861 struct rusage *ru;
862 ksiginfoq_t kq;
863
864 KASSERT(l != curlwp);
865
866 /*
867 * If this was not the last LWP in the process, then adjust
868 * counters and unlock.
869 */
870 if (!last) {
871 /*
872 * Add the LWP's run time to the process' base value.
873 * This needs to co-incide with coming off p_lwps.
874 */
875 bintime_add(&p->p_rtime, &l->l_rtime);
876 p->p_pctcpu += l->l_pctcpu;
877 ru = &p->p_stats->p_ru;
878 ruadd(ru, &l->l_ru);
879 ru->ru_nvcsw += (l->l_ncsw - l->l_nivcsw);
880 ru->ru_nivcsw += l->l_nivcsw;
881 LIST_REMOVE(l, l_sibling);
882 p->p_nlwps--;
883 p->p_nzlwps--;
884 if ((l->l_prflag & LPR_DETACHED) != 0)
885 p->p_ndlwps--;
886
887 /*
888 * Have any LWPs sleeping in lwp_wait() recheck for
889 * deadlock.
890 */
891 cv_broadcast(&p->p_lwpcv);
892 mutex_exit(p->p_lock);
893 }
894
895 #ifdef MULTIPROCESSOR
896 /*
897 * In the unlikely event that the LWP is still on the CPU,
898 * then spin until it has switched away. We need to release
899 * all locks to avoid deadlock against interrupt handlers on
900 * the target CPU.
901 */
902 if ((l->l_pflag & LP_RUNNING) != 0 || l->l_cpu->ci_curlwp == l) {
903 int count;
904 (void)count; /* XXXgcc */
905 KERNEL_UNLOCK_ALL(curlwp, &count);
906 while ((l->l_pflag & LP_RUNNING) != 0 ||
907 l->l_cpu->ci_curlwp == l)
908 SPINLOCK_BACKOFF_HOOK;
909 KERNEL_LOCK(count, curlwp);
910 }
911 #endif
912
913 /*
914 * Destroy the LWP's remaining signal information.
915 */
916 ksiginfo_queue_init(&kq);
917 sigclear(&l->l_sigpend, NULL, &kq);
918 ksiginfo_queue_drain(&kq);
919 cv_destroy(&l->l_sigcv);
920 mutex_destroy(&l->l_swaplock);
921
922 /*
923 * Free the LWP's turnstile and the LWP structure itself unless the
924 * caller wants to recycle them. Also, free the scheduler specific
925 * data.
926 *
927 * We can't return turnstile0 to the pool (it didn't come from it),
928 * so if it comes up just drop it quietly and move on.
929 *
930 * We don't recycle the VM resources at this time.
931 */
932 if (l->l_lwpctl != NULL)
933 lwp_ctl_free(l);
934 sched_lwp_exit(l);
935
936 if (!recycle && l->l_ts != &turnstile0)
937 pool_cache_put(turnstile_cache, l->l_ts);
938 if (l->l_name != NULL)
939 kmem_free(l->l_name, MAXCOMLEN);
940 #ifndef __NO_CPU_LWP_FREE
941 cpu_lwp_free2(l);
942 #endif
943 KASSERT((l->l_flag & LW_INMEM) != 0);
944 uvm_lwp_exit(l);
945 KASSERT(SLIST_EMPTY(&l->l_pi_lenders));
946 KASSERT(l->l_inheritedprio == -1);
947 if (!recycle)
948 pool_cache_put(lwp_cache, l);
949 }
950
951 /*
952 * Migrate the LWP to the another CPU. Unlocks the LWP.
953 */
954 void
955 lwp_migrate(lwp_t *l, struct cpu_info *tci)
956 {
957 struct schedstate_percpu *tspc;
958 int lstat = l->l_stat;
959
960 KASSERT(lwp_locked(l, NULL));
961 KASSERT(tci != NULL);
962
963 /* If LWP is still on the CPU, it must be handled like LSONPROC */
964 if ((l->l_pflag & LP_RUNNING) != 0) {
965 lstat = LSONPROC;
966 }
967
968 /*
969 * The destination CPU could be changed while previous migration
970 * was not finished.
971 */
972 if (l->l_target_cpu != NULL) {
973 l->l_target_cpu = tci;
974 lwp_unlock(l);
975 return;
976 }
977
978 /* Nothing to do if trying to migrate to the same CPU */
979 if (l->l_cpu == tci) {
980 lwp_unlock(l);
981 return;
982 }
983
984 KASSERT(l->l_target_cpu == NULL);
985 tspc = &tci->ci_schedstate;
986 switch (lstat) {
987 case LSRUN:
988 if (l->l_flag & LW_INMEM) {
989 l->l_target_cpu = tci;
990 lwp_unlock(l);
991 return;
992 }
993 case LSIDL:
994 l->l_cpu = tci;
995 lwp_unlock_to(l, tspc->spc_mutex);
996 return;
997 case LSSLEEP:
998 l->l_cpu = tci;
999 break;
1000 case LSSTOP:
1001 case LSSUSPENDED:
1002 l->l_cpu = tci;
1003 if (l->l_wchan == NULL) {
1004 lwp_unlock_to(l, tspc->spc_lwplock);
1005 return;
1006 }
1007 break;
1008 case LSONPROC:
1009 l->l_target_cpu = tci;
1010 spc_lock(l->l_cpu);
1011 cpu_need_resched(l->l_cpu, RESCHED_KPREEMPT);
1012 spc_unlock(l->l_cpu);
1013 break;
1014 }
1015 lwp_unlock(l);
1016 }
1017
1018 /*
1019 * Find the LWP in the process. Arguments may be zero, in such case,
1020 * the calling process and first LWP in the list will be used.
1021 * On success - returns proc locked.
1022 */
1023 struct lwp *
1024 lwp_find2(pid_t pid, lwpid_t lid)
1025 {
1026 proc_t *p;
1027 lwp_t *l;
1028
1029 /* Find the process */
1030 p = (pid == 0) ? curlwp->l_proc : p_find(pid, PFIND_UNLOCK_FAIL);
1031 if (p == NULL)
1032 return NULL;
1033 mutex_enter(p->p_lock);
1034 if (pid != 0) {
1035 /* Case of p_find */
1036 mutex_exit(proc_lock);
1037 }
1038
1039 /* Find the thread */
1040 l = (lid == 0) ? LIST_FIRST(&p->p_lwps) : lwp_find(p, lid);
1041 if (l == NULL) {
1042 mutex_exit(p->p_lock);
1043 }
1044
1045 return l;
1046 }
1047
1048 /*
1049 * Look up a live LWP within the speicifed process, and return it locked.
1050 *
1051 * Must be called with p->p_lock held.
1052 */
1053 struct lwp *
1054 lwp_find(struct proc *p, int id)
1055 {
1056 struct lwp *l;
1057
1058 KASSERT(mutex_owned(p->p_lock));
1059
1060 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1061 if (l->l_lid == id)
1062 break;
1063 }
1064
1065 /*
1066 * No need to lock - all of these conditions will
1067 * be visible with the process level mutex held.
1068 */
1069 if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB))
1070 l = NULL;
1071
1072 return l;
1073 }
1074
1075 /*
1076 * Update an LWP's cached credentials to mirror the process' master copy.
1077 *
1078 * This happens early in the syscall path, on user trap, and on LWP
1079 * creation. A long-running LWP can also voluntarily choose to update
1080 * it's credentials by calling this routine. This may be called from
1081 * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
1082 */
1083 void
1084 lwp_update_creds(struct lwp *l)
1085 {
1086 kauth_cred_t oc;
1087 struct proc *p;
1088
1089 p = l->l_proc;
1090 oc = l->l_cred;
1091
1092 mutex_enter(p->p_lock);
1093 kauth_cred_hold(p->p_cred);
1094 l->l_cred = p->p_cred;
1095 l->l_prflag &= ~LPR_CRMOD;
1096 mutex_exit(p->p_lock);
1097 if (oc != NULL)
1098 kauth_cred_free(oc);
1099 }
1100
1101 /*
1102 * Verify that an LWP is locked, and optionally verify that the lock matches
1103 * one we specify.
1104 */
1105 int
1106 lwp_locked(struct lwp *l, kmutex_t *mtx)
1107 {
1108 kmutex_t *cur = l->l_mutex;
1109
1110 return mutex_owned(cur) && (mtx == cur || mtx == NULL);
1111 }
1112
1113 /*
1114 * Lock an LWP.
1115 */
1116 kmutex_t *
1117 lwp_lock_retry(struct lwp *l, kmutex_t *old)
1118 {
1119
1120 /*
1121 * XXXgcc ignoring kmutex_t * volatile on i386
1122 *
1123 * gcc version 4.1.2 20061021 prerelease (NetBSD nb1 20061021)
1124 */
1125 #if 1
1126 while (l->l_mutex != old) {
1127 #else
1128 for (;;) {
1129 #endif
1130 mutex_spin_exit(old);
1131 old = l->l_mutex;
1132 mutex_spin_enter(old);
1133
1134 /*
1135 * mutex_enter() will have posted a read barrier. Re-test
1136 * l->l_mutex. If it has changed, we need to try again.
1137 */
1138 #if 1
1139 }
1140 #else
1141 } while (__predict_false(l->l_mutex != old));
1142 #endif
1143
1144 return old;
1145 }
1146
1147 /*
1148 * Lend a new mutex to an LWP. The old mutex must be held.
1149 */
1150 void
1151 lwp_setlock(struct lwp *l, kmutex_t *new)
1152 {
1153
1154 KASSERT(mutex_owned(l->l_mutex));
1155
1156 membar_exit();
1157 l->l_mutex = new;
1158 }
1159
1160 /*
1161 * Lend a new mutex to an LWP, and release the old mutex. The old mutex
1162 * must be held.
1163 */
1164 void
1165 lwp_unlock_to(struct lwp *l, kmutex_t *new)
1166 {
1167 kmutex_t *old;
1168
1169 KASSERT(mutex_owned(l->l_mutex));
1170
1171 old = l->l_mutex;
1172 membar_exit();
1173 l->l_mutex = new;
1174 mutex_spin_exit(old);
1175 }
1176
1177 /*
1178 * Acquire a new mutex, and donate it to an LWP. The LWP must already be
1179 * locked.
1180 */
1181 void
1182 lwp_relock(struct lwp *l, kmutex_t *new)
1183 {
1184 kmutex_t *old;
1185
1186 KASSERT(mutex_owned(l->l_mutex));
1187
1188 old = l->l_mutex;
1189 if (old != new) {
1190 mutex_spin_enter(new);
1191 l->l_mutex = new;
1192 mutex_spin_exit(old);
1193 }
1194 }
1195
1196 int
1197 lwp_trylock(struct lwp *l)
1198 {
1199 kmutex_t *old;
1200
1201 for (;;) {
1202 if (!mutex_tryenter(old = l->l_mutex))
1203 return 0;
1204 if (__predict_true(l->l_mutex == old))
1205 return 1;
1206 mutex_spin_exit(old);
1207 }
1208 }
1209
1210 u_int
1211 lwp_unsleep(lwp_t *l, bool cleanup)
1212 {
1213
1214 KASSERT(mutex_owned(l->l_mutex));
1215
1216 return (*l->l_syncobj->sobj_unsleep)(l, cleanup);
1217 }
1218
1219
1220 /*
1221 * Handle exceptions for mi_userret(). Called if a member of LW_USERRET is
1222 * set.
1223 */
1224 void
1225 lwp_userret(struct lwp *l)
1226 {
1227 struct proc *p;
1228 void (*hook)(void);
1229 int sig;
1230
1231 KASSERT(l == curlwp);
1232 KASSERT(l->l_stat == LSONPROC);
1233 p = l->l_proc;
1234
1235 #ifndef __HAVE_FAST_SOFTINTS
1236 /* Run pending soft interrupts. */
1237 if (l->l_cpu->ci_data.cpu_softints != 0)
1238 softint_overlay();
1239 #endif
1240
1241 /*
1242 * It should be safe to do this read unlocked on a multiprocessor
1243 * system..
1244 */
1245 while ((l->l_flag & LW_USERRET) != 0) {
1246 /*
1247 * Process pending signals first, unless the process
1248 * is dumping core or exiting, where we will instead
1249 * enter the LW_WSUSPEND case below.
1250 */
1251 if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) ==
1252 LW_PENDSIG) {
1253 mutex_enter(p->p_lock);
1254 while ((sig = issignal(l)) != 0)
1255 postsig(sig);
1256 mutex_exit(p->p_lock);
1257 }
1258
1259 /*
1260 * Core-dump or suspend pending.
1261 *
1262 * In case of core dump, suspend ourselves, so that the
1263 * kernel stack and therefore the userland registers saved
1264 * in the trapframe are around for coredump() to write them
1265 * out. We issue a wakeup on p->p_lwpcv so that sigexit()
1266 * will write the core file out once all other LWPs are
1267 * suspended.
1268 */
1269 if ((l->l_flag & LW_WSUSPEND) != 0) {
1270 mutex_enter(p->p_lock);
1271 p->p_nrlwps--;
1272 cv_broadcast(&p->p_lwpcv);
1273 lwp_lock(l);
1274 l->l_stat = LSSUSPENDED;
1275 lwp_unlock(l);
1276 mutex_exit(p->p_lock);
1277 lwp_lock(l);
1278 mi_switch(l);
1279 }
1280
1281 /* Process is exiting. */
1282 if ((l->l_flag & LW_WEXIT) != 0) {
1283 lwp_exit(l);
1284 KASSERT(0);
1285 /* NOTREACHED */
1286 }
1287
1288 /* Call userret hook; used by Linux emulation. */
1289 if ((l->l_flag & LW_WUSERRET) != 0) {
1290 lwp_lock(l);
1291 l->l_flag &= ~LW_WUSERRET;
1292 lwp_unlock(l);
1293 hook = p->p_userret;
1294 p->p_userret = NULL;
1295 (*hook)();
1296 }
1297 }
1298 }
1299
1300 /*
1301 * Force an LWP to enter the kernel, to take a trip through lwp_userret().
1302 */
1303 void
1304 lwp_need_userret(struct lwp *l)
1305 {
1306 KASSERT(lwp_locked(l, NULL));
1307
1308 /*
1309 * Since the tests in lwp_userret() are done unlocked, make sure
1310 * that the condition will be seen before forcing the LWP to enter
1311 * kernel mode.
1312 */
1313 membar_producer();
1314 cpu_signotify(l);
1315 }
1316
1317 /*
1318 * Add one reference to an LWP. This will prevent the LWP from
1319 * exiting, thus keep the lwp structure and PCB around to inspect.
1320 */
1321 void
1322 lwp_addref(struct lwp *l)
1323 {
1324
1325 KASSERT(mutex_owned(l->l_proc->p_lock));
1326 KASSERT(l->l_stat != LSZOMB);
1327 KASSERT(l->l_refcnt != 0);
1328
1329 l->l_refcnt++;
1330 }
1331
1332 /*
1333 * Remove one reference to an LWP. If this is the last reference,
1334 * then we must finalize the LWP's death.
1335 */
1336 void
1337 lwp_delref(struct lwp *l)
1338 {
1339 struct proc *p = l->l_proc;
1340
1341 mutex_enter(p->p_lock);
1342 KASSERT(l->l_stat != LSZOMB);
1343 KASSERT(l->l_refcnt > 0);
1344 if (--l->l_refcnt == 0)
1345 cv_broadcast(&p->p_lwpcv);
1346 mutex_exit(p->p_lock);
1347 }
1348
1349 /*
1350 * Drain all references to the current LWP.
1351 */
1352 void
1353 lwp_drainrefs(struct lwp *l)
1354 {
1355 struct proc *p = l->l_proc;
1356
1357 KASSERT(mutex_owned(p->p_lock));
1358 KASSERT(l->l_refcnt != 0);
1359
1360 l->l_refcnt--;
1361 while (l->l_refcnt != 0)
1362 cv_wait(&p->p_lwpcv, p->p_lock);
1363 }
1364
1365 /*
1366 * lwp_specific_key_create --
1367 * Create a key for subsystem lwp-specific data.
1368 */
1369 int
1370 lwp_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor)
1371 {
1372
1373 return (specificdata_key_create(lwp_specificdata_domain, keyp, dtor));
1374 }
1375
1376 /*
1377 * lwp_specific_key_delete --
1378 * Delete a key for subsystem lwp-specific data.
1379 */
1380 void
1381 lwp_specific_key_delete(specificdata_key_t key)
1382 {
1383
1384 specificdata_key_delete(lwp_specificdata_domain, key);
1385 }
1386
1387 /*
1388 * lwp_initspecific --
1389 * Initialize an LWP's specificdata container.
1390 */
1391 void
1392 lwp_initspecific(struct lwp *l)
1393 {
1394 int error;
1395
1396 error = specificdata_init(lwp_specificdata_domain, &l->l_specdataref);
1397 KASSERT(error == 0);
1398 }
1399
1400 /*
1401 * lwp_finispecific --
1402 * Finalize an LWP's specificdata container.
1403 */
1404 void
1405 lwp_finispecific(struct lwp *l)
1406 {
1407
1408 specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
1409 }
1410
1411 /*
1412 * lwp_getspecific --
1413 * Return lwp-specific data corresponding to the specified key.
1414 *
1415 * Note: LWP specific data is NOT INTERLOCKED. An LWP should access
1416 * only its OWN SPECIFIC DATA. If it is necessary to access another
1417 * LWP's specifc data, care must be taken to ensure that doing so
1418 * would not cause internal data structure inconsistency (i.e. caller
1419 * can guarantee that the target LWP is not inside an lwp_getspecific()
1420 * or lwp_setspecific() call).
1421 */
1422 void *
1423 lwp_getspecific(specificdata_key_t key)
1424 {
1425
1426 return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
1427 &curlwp->l_specdataref, key));
1428 }
1429
1430 void *
1431 _lwp_getspecific_by_lwp(struct lwp *l, specificdata_key_t key)
1432 {
1433
1434 return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
1435 &l->l_specdataref, key));
1436 }
1437
1438 /*
1439 * lwp_setspecific --
1440 * Set lwp-specific data corresponding to the specified key.
1441 */
1442 void
1443 lwp_setspecific(specificdata_key_t key, void *data)
1444 {
1445
1446 specificdata_setspecific(lwp_specificdata_domain,
1447 &curlwp->l_specdataref, key, data);
1448 }
1449
1450 /*
1451 * Allocate a new lwpctl structure for a user LWP.
1452 */
1453 int
1454 lwp_ctl_alloc(vaddr_t *uaddr)
1455 {
1456 lcproc_t *lp;
1457 u_int bit, i, offset;
1458 struct uvm_object *uao;
1459 int error;
1460 lcpage_t *lcp;
1461 proc_t *p;
1462 lwp_t *l;
1463
1464 l = curlwp;
1465 p = l->l_proc;
1466
1467 if (l->l_lcpage != NULL) {
1468 lcp = l->l_lcpage;
1469 *uaddr = lcp->lcp_uaddr + (vaddr_t)l->l_lwpctl - lcp->lcp_kaddr;
1470 return (EINVAL);
1471 }
1472
1473 /* First time around, allocate header structure for the process. */
1474 if ((lp = p->p_lwpctl) == NULL) {
1475 lp = kmem_alloc(sizeof(*lp), KM_SLEEP);
1476 mutex_init(&lp->lp_lock, MUTEX_DEFAULT, IPL_NONE);
1477 lp->lp_uao = NULL;
1478 TAILQ_INIT(&lp->lp_pages);
1479 mutex_enter(p->p_lock);
1480 if (p->p_lwpctl == NULL) {
1481 p->p_lwpctl = lp;
1482 mutex_exit(p->p_lock);
1483 } else {
1484 mutex_exit(p->p_lock);
1485 mutex_destroy(&lp->lp_lock);
1486 kmem_free(lp, sizeof(*lp));
1487 lp = p->p_lwpctl;
1488 }
1489 }
1490
1491 /*
1492 * Set up an anonymous memory region to hold the shared pages.
1493 * Map them into the process' address space. The user vmspace
1494 * gets the first reference on the UAO.
1495 */
1496 mutex_enter(&lp->lp_lock);
1497 if (lp->lp_uao == NULL) {
1498 lp->lp_uao = uao_create(LWPCTL_UAREA_SZ, 0);
1499 lp->lp_cur = 0;
1500 lp->lp_max = LWPCTL_UAREA_SZ;
1501 lp->lp_uva = p->p_emul->e_vm_default_addr(p,
1502 (vaddr_t)p->p_vmspace->vm_daddr, LWPCTL_UAREA_SZ);
1503 error = uvm_map(&p->p_vmspace->vm_map, &lp->lp_uva,
1504 LWPCTL_UAREA_SZ, lp->lp_uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW,
1505 UVM_PROT_RW, UVM_INH_NONE, UVM_ADV_NORMAL, 0));
1506 if (error != 0) {
1507 uao_detach(lp->lp_uao);
1508 lp->lp_uao = NULL;
1509 mutex_exit(&lp->lp_lock);
1510 return error;
1511 }
1512 }
1513
1514 /* Get a free block and allocate for this LWP. */
1515 TAILQ_FOREACH(lcp, &lp->lp_pages, lcp_chain) {
1516 if (lcp->lcp_nfree != 0)
1517 break;
1518 }
1519 if (lcp == NULL) {
1520 /* Nothing available - try to set up a free page. */
1521 if (lp->lp_cur == lp->lp_max) {
1522 mutex_exit(&lp->lp_lock);
1523 return ENOMEM;
1524 }
1525 lcp = kmem_alloc(LWPCTL_LCPAGE_SZ, KM_SLEEP);
1526 if (lcp == NULL) {
1527 mutex_exit(&lp->lp_lock);
1528 return ENOMEM;
1529 }
1530 /*
1531 * Wire the next page down in kernel space. Since this
1532 * is a new mapping, we must add a reference.
1533 */
1534 uao = lp->lp_uao;
1535 (*uao->pgops->pgo_reference)(uao);
1536 lcp->lcp_kaddr = vm_map_min(kernel_map);
1537 error = uvm_map(kernel_map, &lcp->lcp_kaddr, PAGE_SIZE,
1538 uao, lp->lp_cur, PAGE_SIZE,
1539 UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
1540 UVM_INH_NONE, UVM_ADV_RANDOM, 0));
1541 if (error != 0) {
1542 mutex_exit(&lp->lp_lock);
1543 kmem_free(lcp, LWPCTL_LCPAGE_SZ);
1544 (*uao->pgops->pgo_detach)(uao);
1545 return error;
1546 }
1547 error = uvm_map_pageable(kernel_map, lcp->lcp_kaddr,
1548 lcp->lcp_kaddr + PAGE_SIZE, FALSE, 0);
1549 if (error != 0) {
1550 mutex_exit(&lp->lp_lock);
1551 uvm_unmap(kernel_map, lcp->lcp_kaddr,
1552 lcp->lcp_kaddr + PAGE_SIZE);
1553 kmem_free(lcp, LWPCTL_LCPAGE_SZ);
1554 return error;
1555 }
1556 /* Prepare the page descriptor and link into the list. */
1557 lcp->lcp_uaddr = lp->lp_uva + lp->lp_cur;
1558 lp->lp_cur += PAGE_SIZE;
1559 lcp->lcp_nfree = LWPCTL_PER_PAGE;
1560 lcp->lcp_rotor = 0;
1561 memset(lcp->lcp_bitmap, 0xff, LWPCTL_BITMAP_SZ);
1562 TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
1563 }
1564 for (i = lcp->lcp_rotor; lcp->lcp_bitmap[i] == 0;) {
1565 if (++i >= LWPCTL_BITMAP_ENTRIES)
1566 i = 0;
1567 }
1568 bit = ffs(lcp->lcp_bitmap[i]) - 1;
1569 lcp->lcp_bitmap[i] ^= (1 << bit);
1570 lcp->lcp_rotor = i;
1571 lcp->lcp_nfree--;
1572 l->l_lcpage = lcp;
1573 offset = (i << 5) + bit;
1574 l->l_lwpctl = (lwpctl_t *)lcp->lcp_kaddr + offset;
1575 *uaddr = lcp->lcp_uaddr + offset * sizeof(lwpctl_t);
1576 mutex_exit(&lp->lp_lock);
1577
1578 KPREEMPT_DISABLE(l);
1579 l->l_lwpctl->lc_curcpu = (int)curcpu()->ci_data.cpu_index;
1580 KPREEMPT_ENABLE(l);
1581
1582 return 0;
1583 }
1584
1585 /*
1586 * Free an lwpctl structure back to the per-process list.
1587 */
1588 void
1589 lwp_ctl_free(lwp_t *l)
1590 {
1591 lcproc_t *lp;
1592 lcpage_t *lcp;
1593 u_int map, offset;
1594
1595 lp = l->l_proc->p_lwpctl;
1596 KASSERT(lp != NULL);
1597
1598 lcp = l->l_lcpage;
1599 offset = (u_int)((lwpctl_t *)l->l_lwpctl - (lwpctl_t *)lcp->lcp_kaddr);
1600 KASSERT(offset < LWPCTL_PER_PAGE);
1601
1602 mutex_enter(&lp->lp_lock);
1603 lcp->lcp_nfree++;
1604 map = offset >> 5;
1605 lcp->lcp_bitmap[map] |= (1 << (offset & 31));
1606 if (lcp->lcp_bitmap[lcp->lcp_rotor] == 0)
1607 lcp->lcp_rotor = map;
1608 if (TAILQ_FIRST(&lp->lp_pages)->lcp_nfree == 0) {
1609 TAILQ_REMOVE(&lp->lp_pages, lcp, lcp_chain);
1610 TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
1611 }
1612 mutex_exit(&lp->lp_lock);
1613 }
1614
1615 /*
1616 * Process is exiting; tear down lwpctl state. This can only be safely
1617 * called by the last LWP in the process.
1618 */
1619 void
1620 lwp_ctl_exit(void)
1621 {
1622 lcpage_t *lcp, *next;
1623 lcproc_t *lp;
1624 proc_t *p;
1625 lwp_t *l;
1626
1627 l = curlwp;
1628 l->l_lwpctl = NULL;
1629 l->l_lcpage = NULL;
1630 p = l->l_proc;
1631 lp = p->p_lwpctl;
1632
1633 KASSERT(lp != NULL);
1634 KASSERT(p->p_nlwps == 1);
1635
1636 for (lcp = TAILQ_FIRST(&lp->lp_pages); lcp != NULL; lcp = next) {
1637 next = TAILQ_NEXT(lcp, lcp_chain);
1638 uvm_unmap(kernel_map, lcp->lcp_kaddr,
1639 lcp->lcp_kaddr + PAGE_SIZE);
1640 kmem_free(lcp, LWPCTL_LCPAGE_SZ);
1641 }
1642
1643 if (lp->lp_uao != NULL) {
1644 uvm_unmap(&p->p_vmspace->vm_map, lp->lp_uva,
1645 lp->lp_uva + LWPCTL_UAREA_SZ);
1646 }
1647
1648 mutex_destroy(&lp->lp_lock);
1649 kmem_free(lp, sizeof(*lp));
1650 p->p_lwpctl = NULL;
1651 }
1652
1653 #if defined(DDB)
1654 void
1655 lwp_whatis(uintptr_t addr, void (*pr)(const char *, ...))
1656 {
1657 lwp_t *l;
1658
1659 LIST_FOREACH(l, &alllwp, l_list) {
1660 uintptr_t stack = (uintptr_t)KSTACK_LOWEST_ADDR(l);
1661
1662 if (addr < stack || stack + KSTACK_SIZE <= addr) {
1663 continue;
1664 }
1665 (*pr)("%p is %p+%zu, LWP %p's stack\n",
1666 (void *)addr, (void *)stack,
1667 (size_t)(addr - stack), l);
1668 }
1669 }
1670 #endif /* defined(DDB) */
1671