kern_lwp.c revision 1.194 1 /* $NetBSD: kern_lwp.c,v 1.194 2018/07/04 18:15:27 kamil Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Overview
34 *
35 * Lightweight processes (LWPs) are the basic unit or thread of
36 * execution within the kernel. The core state of an LWP is described
37 * by "struct lwp", also known as lwp_t.
38 *
39 * Each LWP is contained within a process (described by "struct proc"),
40 * Every process contains at least one LWP, but may contain more. The
41 * process describes attributes shared among all of its LWPs such as a
42 * private address space, global execution state (stopped, active,
43 * zombie, ...), signal disposition and so on. On a multiprocessor
44 * machine, multiple LWPs be executing concurrently in the kernel.
45 *
46 * Execution states
47 *
48 * At any given time, an LWP has overall state that is described by
49 * lwp::l_stat. The states are broken into two sets below. The first
50 * set is guaranteed to represent the absolute, current state of the
51 * LWP:
52 *
53 * LSONPROC
54 *
55 * On processor: the LWP is executing on a CPU, either in the
56 * kernel or in user space.
57 *
58 * LSRUN
59 *
60 * Runnable: the LWP is parked on a run queue, and may soon be
61 * chosen to run by an idle processor, or by a processor that
62 * has been asked to preempt a currently runnning but lower
63 * priority LWP.
64 *
65 * LSIDL
66 *
67 * Idle: the LWP has been created but has not yet executed,
68 * or it has ceased executing a unit of work and is waiting
69 * to be started again.
70 *
71 * LSSUSPENDED:
72 *
73 * Suspended: the LWP has had its execution suspended by
74 * another LWP in the same process using the _lwp_suspend()
75 * system call. User-level LWPs also enter the suspended
76 * state when the system is shutting down.
77 *
78 * The second set represent a "statement of intent" on behalf of the
79 * LWP. The LWP may in fact be executing on a processor, may be
80 * sleeping or idle. It is expected to take the necessary action to
81 * stop executing or become "running" again within a short timeframe.
82 * The LP_RUNNING flag in lwp::l_pflag indicates that an LWP is running.
83 * Importantly, it indicates that its state is tied to a CPU.
84 *
85 * LSZOMB:
86 *
87 * Dead or dying: the LWP has released most of its resources
88 * and is about to switch away into oblivion, or has already
89 * switched away. When it switches away, its few remaining
90 * resources can be collected.
91 *
92 * LSSLEEP:
93 *
94 * Sleeping: the LWP has entered itself onto a sleep queue, and
95 * has switched away or will switch away shortly to allow other
96 * LWPs to run on the CPU.
97 *
98 * LSSTOP:
99 *
100 * Stopped: the LWP has been stopped as a result of a job
101 * control signal, or as a result of the ptrace() interface.
102 *
103 * Stopped LWPs may run briefly within the kernel to handle
104 * signals that they receive, but will not return to user space
105 * until their process' state is changed away from stopped.
106 *
107 * Single LWPs within a process can not be set stopped
108 * selectively: all actions that can stop or continue LWPs
109 * occur at the process level.
110 *
111 * State transitions
112 *
113 * Note that the LSSTOP state may only be set when returning to
114 * user space in userret(), or when sleeping interruptably. The
115 * LSSUSPENDED state may only be set in userret(). Before setting
116 * those states, we try to ensure that the LWPs will release all
117 * locks that they hold, and at a minimum try to ensure that the
118 * LWP can be set runnable again by a signal.
119 *
120 * LWPs may transition states in the following ways:
121 *
122 * RUN -------> ONPROC ONPROC -----> RUN
123 * > SLEEP
124 * > STOPPED
125 * > SUSPENDED
126 * > ZOMB
127 * > IDL (special cases)
128 *
129 * STOPPED ---> RUN SUSPENDED --> RUN
130 * > SLEEP
131 *
132 * SLEEP -----> ONPROC IDL --------> RUN
133 * > RUN > SUSPENDED
134 * > STOPPED > STOPPED
135 * > ONPROC (special cases)
136 *
137 * Some state transitions are only possible with kernel threads (eg
138 * ONPROC -> IDL) and happen under tightly controlled circumstances
139 * free of unwanted side effects.
140 *
141 * Migration
142 *
143 * Migration of threads from one CPU to another could be performed
144 * internally by the scheduler via sched_takecpu() or sched_catchlwp()
145 * functions. The universal lwp_migrate() function should be used for
146 * any other cases. Subsystems in the kernel must be aware that CPU
147 * of LWP may change, while it is not locked.
148 *
149 * Locking
150 *
151 * The majority of fields in 'struct lwp' are covered by a single,
152 * general spin lock pointed to by lwp::l_mutex. The locks covering
153 * each field are documented in sys/lwp.h.
154 *
155 * State transitions must be made with the LWP's general lock held,
156 * and may cause the LWP's lock pointer to change. Manipulation of
157 * the general lock is not performed directly, but through calls to
158 * lwp_lock(), lwp_unlock() and others. It should be noted that the
159 * adaptive locks are not allowed to be released while the LWP's lock
160 * is being held (unlike for other spin-locks).
161 *
162 * States and their associated locks:
163 *
164 * LSONPROC, LSZOMB:
165 *
166 * Always covered by spc_lwplock, which protects running LWPs.
167 * This is a per-CPU lock and matches lwp::l_cpu.
168 *
169 * LSIDL, LSRUN:
170 *
171 * Always covered by spc_mutex, which protects the run queues.
172 * This is a per-CPU lock and matches lwp::l_cpu.
173 *
174 * LSSLEEP:
175 *
176 * Covered by a lock associated with the sleep queue that the
177 * LWP resides on. Matches lwp::l_sleepq::sq_mutex.
178 *
179 * LSSTOP, LSSUSPENDED:
180 *
181 * If the LWP was previously sleeping (l_wchan != NULL), then
182 * l_mutex references the sleep queue lock. If the LWP was
183 * runnable or on the CPU when halted, or has been removed from
184 * the sleep queue since halted, then the lock is spc_lwplock.
185 *
186 * The lock order is as follows:
187 *
188 * spc::spc_lwplock ->
189 * sleeptab::st_mutex ->
190 * tschain_t::tc_mutex ->
191 * spc::spc_mutex
192 *
193 * Each process has an scheduler state lock (proc::p_lock), and a
194 * number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
195 * so on. When an LWP is to be entered into or removed from one of the
196 * following states, p_lock must be held and the process wide counters
197 * adjusted:
198 *
199 * LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
200 *
201 * (But not always for kernel threads. There are some special cases
202 * as mentioned above. See kern_softint.c.)
203 *
204 * Note that an LWP is considered running or likely to run soon if in
205 * one of the following states. This affects the value of p_nrlwps:
206 *
207 * LSRUN, LSONPROC, LSSLEEP
208 *
209 * p_lock does not need to be held when transitioning among these
210 * three states, hence p_lock is rarely taken for state transitions.
211 */
212
213 #include <sys/cdefs.h>
214 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.194 2018/07/04 18:15:27 kamil Exp $");
215
216 #include "opt_ddb.h"
217 #include "opt_lockdebug.h"
218 #include "opt_dtrace.h"
219
220 #define _LWP_API_PRIVATE
221
222 #include <sys/param.h>
223 #include <sys/systm.h>
224 #include <sys/cpu.h>
225 #include <sys/pool.h>
226 #include <sys/proc.h>
227 #include <sys/syscallargs.h>
228 #include <sys/syscall_stats.h>
229 #include <sys/kauth.h>
230 #include <sys/pserialize.h>
231 #include <sys/sleepq.h>
232 #include <sys/lockdebug.h>
233 #include <sys/kmem.h>
234 #include <sys/pset.h>
235 #include <sys/intr.h>
236 #include <sys/lwpctl.h>
237 #include <sys/atomic.h>
238 #include <sys/filedesc.h>
239 #include <sys/dtrace_bsd.h>
240 #include <sys/sdt.h>
241 #include <sys/xcall.h>
242 #include <sys/uidinfo.h>
243 #include <sys/sysctl.h>
244
245 #include <uvm/uvm_extern.h>
246 #include <uvm/uvm_object.h>
247
248 static pool_cache_t lwp_cache __read_mostly;
249 struct lwplist alllwp __cacheline_aligned;
250
251 static void lwp_dtor(void *, void *);
252
253 /* DTrace proc provider probes */
254 SDT_PROVIDER_DEFINE(proc);
255
256 SDT_PROBE_DEFINE1(proc, kernel, , lwp__create, "struct lwp *");
257 SDT_PROBE_DEFINE1(proc, kernel, , lwp__start, "struct lwp *");
258 SDT_PROBE_DEFINE1(proc, kernel, , lwp__exit, "struct lwp *");
259
260 struct turnstile turnstile0;
261 struct lwp lwp0 __aligned(MIN_LWP_ALIGNMENT) = {
262 #ifdef LWP0_CPU_INFO
263 .l_cpu = LWP0_CPU_INFO,
264 #endif
265 #ifdef LWP0_MD_INITIALIZER
266 .l_md = LWP0_MD_INITIALIZER,
267 #endif
268 .l_proc = &proc0,
269 .l_lid = 1,
270 .l_flag = LW_SYSTEM,
271 .l_stat = LSONPROC,
272 .l_ts = &turnstile0,
273 .l_syncobj = &sched_syncobj,
274 .l_refcnt = 1,
275 .l_priority = PRI_USER + NPRI_USER - 1,
276 .l_inheritedprio = -1,
277 .l_class = SCHED_OTHER,
278 .l_psid = PS_NONE,
279 .l_pi_lenders = SLIST_HEAD_INITIALIZER(&lwp0.l_pi_lenders),
280 .l_name = __UNCONST("swapper"),
281 .l_fd = &filedesc0,
282 };
283
284 static int sysctl_kern_maxlwp(SYSCTLFN_PROTO);
285
286 /*
287 * sysctl helper routine for kern.maxlwp. Ensures that the new
288 * values are not too low or too high.
289 */
290 static int
291 sysctl_kern_maxlwp(SYSCTLFN_ARGS)
292 {
293 int error, nmaxlwp;
294 struct sysctlnode node;
295
296 nmaxlwp = maxlwp;
297 node = *rnode;
298 node.sysctl_data = &nmaxlwp;
299 error = sysctl_lookup(SYSCTLFN_CALL(&node));
300 if (error || newp == NULL)
301 return error;
302
303 if (nmaxlwp < 0 || nmaxlwp >= 65536)
304 return EINVAL;
305 if (nmaxlwp > cpu_maxlwp())
306 return EINVAL;
307 maxlwp = nmaxlwp;
308
309 return 0;
310 }
311
312 static void
313 sysctl_kern_lwp_setup(void)
314 {
315 struct sysctllog *clog = NULL;
316
317 sysctl_createv(&clog, 0, NULL, NULL,
318 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
319 CTLTYPE_INT, "maxlwp",
320 SYSCTL_DESCR("Maximum number of simultaneous threads"),
321 sysctl_kern_maxlwp, 0, NULL, 0,
322 CTL_KERN, CTL_CREATE, CTL_EOL);
323 }
324
325 void
326 lwpinit(void)
327 {
328
329 LIST_INIT(&alllwp);
330 lwpinit_specificdata();
331 lwp_sys_init();
332 lwp_cache = pool_cache_init(sizeof(lwp_t), MIN_LWP_ALIGNMENT, 0, 0,
333 "lwppl", NULL, IPL_NONE, NULL, lwp_dtor, NULL);
334
335 maxlwp = cpu_maxlwp();
336 sysctl_kern_lwp_setup();
337 }
338
339 void
340 lwp0_init(void)
341 {
342 struct lwp *l = &lwp0;
343
344 KASSERT((void *)uvm_lwp_getuarea(l) != NULL);
345 KASSERT(l->l_lid == proc0.p_nlwpid);
346
347 LIST_INSERT_HEAD(&alllwp, l, l_list);
348
349 callout_init(&l->l_timeout_ch, CALLOUT_MPSAFE);
350 callout_setfunc(&l->l_timeout_ch, sleepq_timeout, l);
351 cv_init(&l->l_sigcv, "sigwait");
352 cv_init(&l->l_waitcv, "vfork");
353
354 kauth_cred_hold(proc0.p_cred);
355 l->l_cred = proc0.p_cred;
356
357 kdtrace_thread_ctor(NULL, l);
358 lwp_initspecific(l);
359
360 SYSCALL_TIME_LWP_INIT(l);
361 }
362
363 static void
364 lwp_dtor(void *arg, void *obj)
365 {
366 lwp_t *l = obj;
367 uint64_t where;
368 (void)l;
369
370 /*
371 * Provide a barrier to ensure that all mutex_oncpu() and rw_oncpu()
372 * calls will exit before memory of LWP is returned to the pool, where
373 * KVA of LWP structure might be freed and re-used for other purposes.
374 * Kernel preemption is disabled around mutex_oncpu() and rw_oncpu()
375 * callers, therefore cross-call to all CPUs will do the job. Also,
376 * the value of l->l_cpu must be still valid at this point.
377 */
378 KASSERT(l->l_cpu != NULL);
379 where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
380 xc_wait(where);
381 }
382
383 /*
384 * Set an suspended.
385 *
386 * Must be called with p_lock held, and the LWP locked. Will unlock the
387 * LWP before return.
388 */
389 int
390 lwp_suspend(struct lwp *curl, struct lwp *t)
391 {
392 int error;
393
394 KASSERT(mutex_owned(t->l_proc->p_lock));
395 KASSERT(lwp_locked(t, NULL));
396
397 KASSERT(curl != t || curl->l_stat == LSONPROC);
398
399 /*
400 * If the current LWP has been told to exit, we must not suspend anyone
401 * else or deadlock could occur. We won't return to userspace.
402 */
403 if ((curl->l_flag & (LW_WEXIT | LW_WCORE)) != 0) {
404 lwp_unlock(t);
405 return (EDEADLK);
406 }
407
408 error = 0;
409
410 switch (t->l_stat) {
411 case LSRUN:
412 case LSONPROC:
413 t->l_flag |= LW_WSUSPEND;
414 lwp_need_userret(t);
415 lwp_unlock(t);
416 break;
417
418 case LSSLEEP:
419 t->l_flag |= LW_WSUSPEND;
420
421 /*
422 * Kick the LWP and try to get it to the kernel boundary
423 * so that it will release any locks that it holds.
424 * setrunnable() will release the lock.
425 */
426 if ((t->l_flag & LW_SINTR) != 0)
427 setrunnable(t);
428 else
429 lwp_unlock(t);
430 break;
431
432 case LSSUSPENDED:
433 lwp_unlock(t);
434 break;
435
436 case LSSTOP:
437 t->l_flag |= LW_WSUSPEND;
438 setrunnable(t);
439 break;
440
441 case LSIDL:
442 case LSZOMB:
443 error = EINTR; /* It's what Solaris does..... */
444 lwp_unlock(t);
445 break;
446 }
447
448 return (error);
449 }
450
451 /*
452 * Restart a suspended LWP.
453 *
454 * Must be called with p_lock held, and the LWP locked. Will unlock the
455 * LWP before return.
456 */
457 void
458 lwp_continue(struct lwp *l)
459 {
460
461 KASSERT(mutex_owned(l->l_proc->p_lock));
462 KASSERT(lwp_locked(l, NULL));
463
464 /* If rebooting or not suspended, then just bail out. */
465 if ((l->l_flag & LW_WREBOOT) != 0) {
466 lwp_unlock(l);
467 return;
468 }
469
470 l->l_flag &= ~LW_WSUSPEND;
471
472 if (l->l_stat != LSSUSPENDED) {
473 lwp_unlock(l);
474 return;
475 }
476
477 /* setrunnable() will release the lock. */
478 setrunnable(l);
479 }
480
481 /*
482 * Restart a stopped LWP.
483 *
484 * Must be called with p_lock held, and the LWP NOT locked. Will unlock the
485 * LWP before return.
486 */
487 void
488 lwp_unstop(struct lwp *l)
489 {
490 struct proc *p = l->l_proc;
491
492 KASSERT(mutex_owned(proc_lock));
493 KASSERT(mutex_owned(p->p_lock));
494
495 lwp_lock(l);
496
497 /* If not stopped, then just bail out. */
498 if (l->l_stat != LSSTOP) {
499 lwp_unlock(l);
500 return;
501 }
502
503 p->p_stat = SACTIVE;
504 p->p_sflag &= ~PS_STOPPING;
505
506 if (!p->p_waited)
507 p->p_pptr->p_nstopchild--;
508
509 if (l->l_wchan == NULL) {
510 /* setrunnable() will release the lock. */
511 setrunnable(l);
512 } else if (p->p_xsig && (l->l_flag & LW_SINTR) != 0) {
513 /* setrunnable() so we can receive the signal */
514 setrunnable(l);
515 } else {
516 l->l_stat = LSSLEEP;
517 p->p_nrlwps++;
518 lwp_unlock(l);
519 }
520 }
521
522 /*
523 * Wait for an LWP within the current process to exit. If 'lid' is
524 * non-zero, we are waiting for a specific LWP.
525 *
526 * Must be called with p->p_lock held.
527 */
528 int
529 lwp_wait(struct lwp *l, lwpid_t lid, lwpid_t *departed, bool exiting)
530 {
531 const lwpid_t curlid = l->l_lid;
532 proc_t *p = l->l_proc;
533 lwp_t *l2;
534 int error;
535
536 KASSERT(mutex_owned(p->p_lock));
537
538 p->p_nlwpwait++;
539 l->l_waitingfor = lid;
540
541 for (;;) {
542 int nfound;
543
544 /*
545 * Avoid a race between exit1() and sigexit(): if the
546 * process is dumping core, then we need to bail out: call
547 * into lwp_userret() where we will be suspended until the
548 * deed is done.
549 */
550 if ((p->p_sflag & PS_WCORE) != 0) {
551 mutex_exit(p->p_lock);
552 lwp_userret(l);
553 KASSERT(false);
554 }
555
556 /*
557 * First off, drain any detached LWP that is waiting to be
558 * reaped.
559 */
560 while ((l2 = p->p_zomblwp) != NULL) {
561 p->p_zomblwp = NULL;
562 lwp_free(l2, false, false);/* releases proc mutex */
563 mutex_enter(p->p_lock);
564 }
565
566 /*
567 * Now look for an LWP to collect. If the whole process is
568 * exiting, count detached LWPs as eligible to be collected,
569 * but don't drain them here.
570 */
571 nfound = 0;
572 error = 0;
573 LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
574 /*
575 * If a specific wait and the target is waiting on
576 * us, then avoid deadlock. This also traps LWPs
577 * that try to wait on themselves.
578 *
579 * Note that this does not handle more complicated
580 * cycles, like: t1 -> t2 -> t3 -> t1. The process
581 * can still be killed so it is not a major problem.
582 */
583 if (l2->l_lid == lid && l2->l_waitingfor == curlid) {
584 error = EDEADLK;
585 break;
586 }
587 if (l2 == l)
588 continue;
589 if ((l2->l_prflag & LPR_DETACHED) != 0) {
590 nfound += exiting;
591 continue;
592 }
593 if (lid != 0) {
594 if (l2->l_lid != lid)
595 continue;
596 /*
597 * Mark this LWP as the first waiter, if there
598 * is no other.
599 */
600 if (l2->l_waiter == 0)
601 l2->l_waiter = curlid;
602 } else if (l2->l_waiter != 0) {
603 /*
604 * It already has a waiter - so don't
605 * collect it. If the waiter doesn't
606 * grab it we'll get another chance
607 * later.
608 */
609 nfound++;
610 continue;
611 }
612 nfound++;
613
614 /* No need to lock the LWP in order to see LSZOMB. */
615 if (l2->l_stat != LSZOMB)
616 continue;
617
618 /*
619 * We're no longer waiting. Reset the "first waiter"
620 * pointer on the target, in case it was us.
621 */
622 l->l_waitingfor = 0;
623 l2->l_waiter = 0;
624 p->p_nlwpwait--;
625 if (departed)
626 *departed = l2->l_lid;
627 sched_lwp_collect(l2);
628
629 /* lwp_free() releases the proc lock. */
630 lwp_free(l2, false, false);
631 mutex_enter(p->p_lock);
632 return 0;
633 }
634
635 if (error != 0)
636 break;
637 if (nfound == 0) {
638 error = ESRCH;
639 break;
640 }
641
642 /*
643 * Note: since the lock will be dropped, need to restart on
644 * wakeup to run all LWPs again, e.g. there may be new LWPs.
645 */
646 if (exiting) {
647 KASSERT(p->p_nlwps > 1);
648 cv_wait(&p->p_lwpcv, p->p_lock);
649 error = EAGAIN;
650 break;
651 }
652
653 /*
654 * If all other LWPs are waiting for exits or suspends
655 * and the supply of zombies and potential zombies is
656 * exhausted, then we are about to deadlock.
657 *
658 * If the process is exiting (and this LWP is not the one
659 * that is coordinating the exit) then bail out now.
660 */
661 if ((p->p_sflag & PS_WEXIT) != 0 ||
662 p->p_nrlwps + p->p_nzlwps - p->p_ndlwps <= p->p_nlwpwait) {
663 error = EDEADLK;
664 break;
665 }
666
667 /*
668 * Sit around and wait for something to happen. We'll be
669 * awoken if any of the conditions examined change: if an
670 * LWP exits, is collected, or is detached.
671 */
672 if ((error = cv_wait_sig(&p->p_lwpcv, p->p_lock)) != 0)
673 break;
674 }
675
676 /*
677 * We didn't find any LWPs to collect, we may have received a
678 * signal, or some other condition has caused us to bail out.
679 *
680 * If waiting on a specific LWP, clear the waiters marker: some
681 * other LWP may want it. Then, kick all the remaining waiters
682 * so that they can re-check for zombies and for deadlock.
683 */
684 if (lid != 0) {
685 LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
686 if (l2->l_lid == lid) {
687 if (l2->l_waiter == curlid)
688 l2->l_waiter = 0;
689 break;
690 }
691 }
692 }
693 p->p_nlwpwait--;
694 l->l_waitingfor = 0;
695 cv_broadcast(&p->p_lwpcv);
696
697 return error;
698 }
699
700 static lwpid_t
701 lwp_find_free_lid(lwpid_t try_lid, lwp_t * new_lwp, proc_t *p)
702 {
703 #define LID_SCAN (1u << 31)
704 lwp_t *scan, *free_before;
705 lwpid_t nxt_lid;
706
707 /*
708 * We want the first unused lid greater than or equal to
709 * try_lid (modulo 2^31).
710 * (If nothing else ld.elf_so doesn't want lwpid with the top bit set.)
711 * We must not return 0, and avoiding 'LID_SCAN - 1' makes
712 * the outer test easier.
713 * This would be much easier if the list were sorted in
714 * increasing order.
715 * The list is kept sorted in decreasing order.
716 * This code is only used after a process has generated 2^31 lwp.
717 *
718 * Code assumes it can always find an id.
719 */
720
721 try_lid &= LID_SCAN - 1;
722 if (try_lid <= 1)
723 try_lid = 2;
724
725 free_before = NULL;
726 nxt_lid = LID_SCAN - 1;
727 LIST_FOREACH(scan, &p->p_lwps, l_sibling) {
728 if (scan->l_lid != nxt_lid) {
729 /* There are available lid before this entry */
730 free_before = scan;
731 if (try_lid > scan->l_lid)
732 break;
733 }
734 if (try_lid == scan->l_lid) {
735 /* The ideal lid is busy, take a higher one */
736 if (free_before != NULL) {
737 try_lid = free_before->l_lid + 1;
738 break;
739 }
740 /* No higher ones, reuse low numbers */
741 try_lid = 2;
742 }
743
744 nxt_lid = scan->l_lid - 1;
745 if (LIST_NEXT(scan, l_sibling) == NULL) {
746 /* The value we have is lower than any existing lwp */
747 LIST_INSERT_AFTER(scan, new_lwp, l_sibling);
748 return try_lid;
749 }
750 }
751
752 LIST_INSERT_BEFORE(free_before, new_lwp, l_sibling);
753 return try_lid;
754 }
755
756 /*
757 * Create a new LWP within process 'p2', using LWP 'l1' as a template.
758 * The new LWP is created in state LSIDL and must be set running,
759 * suspended, or stopped by the caller.
760 */
761 int
762 lwp_create(lwp_t *l1, proc_t *p2, vaddr_t uaddr, int flags,
763 void *stack, size_t stacksize, void (*func)(void *), void *arg,
764 lwp_t **rnewlwpp, int sclass, const sigset_t *sigmask,
765 const stack_t *sigstk)
766 {
767 struct lwp *l2, *isfree;
768 turnstile_t *ts;
769 lwpid_t lid;
770
771 KASSERT(l1 == curlwp || l1->l_proc == &proc0);
772
773 /*
774 * Enforce limits, excluding the first lwp and kthreads.
775 */
776 if (p2->p_nlwps != 0 && p2 != &proc0) {
777 uid_t uid = kauth_cred_getuid(l1->l_cred);
778 int count = chglwpcnt(uid, 1);
779 if (__predict_false(count >
780 p2->p_rlimit[RLIMIT_NTHR].rlim_cur)) {
781 if (kauth_authorize_process(l1->l_cred,
782 KAUTH_PROCESS_RLIMIT, p2,
783 KAUTH_ARG(KAUTH_REQ_PROCESS_RLIMIT_BYPASS),
784 &p2->p_rlimit[RLIMIT_NTHR], KAUTH_ARG(RLIMIT_NTHR))
785 != 0) {
786 (void)chglwpcnt(uid, -1);
787 return EAGAIN;
788 }
789 }
790 }
791
792 /*
793 * First off, reap any detached LWP waiting to be collected.
794 * We can re-use its LWP structure and turnstile.
795 */
796 isfree = NULL;
797 if (p2->p_zomblwp != NULL) {
798 mutex_enter(p2->p_lock);
799 if ((isfree = p2->p_zomblwp) != NULL) {
800 p2->p_zomblwp = NULL;
801 lwp_free(isfree, true, false);/* releases proc mutex */
802 } else
803 mutex_exit(p2->p_lock);
804 }
805 if (isfree == NULL) {
806 l2 = pool_cache_get(lwp_cache, PR_WAITOK);
807 memset(l2, 0, sizeof(*l2));
808 l2->l_ts = pool_cache_get(turnstile_cache, PR_WAITOK);
809 SLIST_INIT(&l2->l_pi_lenders);
810 } else {
811 l2 = isfree;
812 ts = l2->l_ts;
813 KASSERT(l2->l_inheritedprio == -1);
814 KASSERT(SLIST_EMPTY(&l2->l_pi_lenders));
815 memset(l2, 0, sizeof(*l2));
816 l2->l_ts = ts;
817 }
818
819 l2->l_stat = LSIDL;
820 l2->l_proc = p2;
821 l2->l_refcnt = 1;
822 l2->l_class = sclass;
823
824 /*
825 * If vfork(), we want the LWP to run fast and on the same CPU
826 * as its parent, so that it can reuse the VM context and cache
827 * footprint on the local CPU.
828 */
829 l2->l_kpriority = ((flags & LWP_VFORK) ? true : false);
830 l2->l_kpribase = PRI_KERNEL;
831 l2->l_priority = l1->l_priority;
832 l2->l_inheritedprio = -1;
833 l2->l_protectprio = -1;
834 l2->l_auxprio = -1;
835 l2->l_flag = 0;
836 l2->l_pflag = LP_MPSAFE;
837 TAILQ_INIT(&l2->l_ld_locks);
838
839 /*
840 * For vfork, borrow parent's lwpctl context if it exists.
841 * This also causes us to return via lwp_userret.
842 */
843 if (flags & LWP_VFORK && l1->l_lwpctl) {
844 l2->l_lwpctl = l1->l_lwpctl;
845 l2->l_flag |= LW_LWPCTL;
846 }
847
848 /*
849 * If not the first LWP in the process, grab a reference to the
850 * descriptor table.
851 */
852 l2->l_fd = p2->p_fd;
853 if (p2->p_nlwps != 0) {
854 KASSERT(l1->l_proc == p2);
855 fd_hold(l2);
856 } else {
857 KASSERT(l1->l_proc != p2);
858 }
859
860 if (p2->p_flag & PK_SYSTEM) {
861 /* Mark it as a system LWP. */
862 l2->l_flag |= LW_SYSTEM;
863 }
864
865 kpreempt_disable();
866 l2->l_mutex = l1->l_cpu->ci_schedstate.spc_mutex;
867 l2->l_cpu = l1->l_cpu;
868 kpreempt_enable();
869
870 kdtrace_thread_ctor(NULL, l2);
871 lwp_initspecific(l2);
872 sched_lwp_fork(l1, l2);
873 lwp_update_creds(l2);
874 callout_init(&l2->l_timeout_ch, CALLOUT_MPSAFE);
875 callout_setfunc(&l2->l_timeout_ch, sleepq_timeout, l2);
876 cv_init(&l2->l_sigcv, "sigwait");
877 cv_init(&l2->l_waitcv, "vfork");
878 l2->l_syncobj = &sched_syncobj;
879
880 if (rnewlwpp != NULL)
881 *rnewlwpp = l2;
882
883 /*
884 * PCU state needs to be saved before calling uvm_lwp_fork() so that
885 * the MD cpu_lwp_fork() can copy the saved state to the new LWP.
886 */
887 pcu_save_all(l1);
888
889 uvm_lwp_setuarea(l2, uaddr);
890 uvm_lwp_fork(l1, l2, stack, stacksize, func, (arg != NULL) ? arg : l2);
891
892 if ((flags & LWP_PIDLID) != 0) {
893 lid = proc_alloc_pid(p2);
894 l2->l_pflag |= LP_PIDLID;
895 } else {
896 lid = 0;
897 }
898
899 mutex_enter(p2->p_lock);
900
901 if ((flags & LWP_DETACHED) != 0) {
902 l2->l_prflag = LPR_DETACHED;
903 p2->p_ndlwps++;
904 } else
905 l2->l_prflag = 0;
906
907 l2->l_sigstk = *sigstk;
908 l2->l_sigmask = *sigmask;
909 TAILQ_INIT(&l2->l_sigpend.sp_info);
910 sigemptyset(&l2->l_sigpend.sp_set);
911
912 if (__predict_true(lid == 0)) {
913 /*
914 * XXX: l_lid are expected to be unique (for a process)
915 * if LWP_PIDLID is sometimes set this won't be true.
916 * Once 2^31 threads have been allocated we have to
917 * scan to ensure we allocate a unique value.
918 */
919 lid = ++p2->p_nlwpid;
920 if (__predict_false(lid & LID_SCAN)) {
921 lid = lwp_find_free_lid(lid, l2, p2);
922 p2->p_nlwpid = lid | LID_SCAN;
923 /* l2 as been inserted into p_lwps in order */
924 goto skip_insert;
925 }
926 p2->p_nlwpid = lid;
927 }
928 LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
929 skip_insert:
930 l2->l_lid = lid;
931 p2->p_nlwps++;
932 p2->p_nrlwps++;
933
934 KASSERT(l2->l_affinity == NULL);
935
936 if ((p2->p_flag & PK_SYSTEM) == 0) {
937 /* Inherit the affinity mask. */
938 if (l1->l_affinity) {
939 /*
940 * Note that we hold the state lock while inheriting
941 * the affinity to avoid race with sched_setaffinity().
942 */
943 lwp_lock(l1);
944 if (l1->l_affinity) {
945 kcpuset_use(l1->l_affinity);
946 l2->l_affinity = l1->l_affinity;
947 }
948 lwp_unlock(l1);
949 }
950 lwp_lock(l2);
951 /* Inherit a processor-set */
952 l2->l_psid = l1->l_psid;
953 /* Look for a CPU to start */
954 l2->l_cpu = sched_takecpu(l2);
955 lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_mutex);
956 }
957 mutex_exit(p2->p_lock);
958
959 SDT_PROBE(proc, kernel, , lwp__create, l2, 0, 0, 0, 0);
960
961 mutex_enter(proc_lock);
962 LIST_INSERT_HEAD(&alllwp, l2, l_list);
963 mutex_exit(proc_lock);
964
965 SYSCALL_TIME_LWP_INIT(l2);
966
967 if (p2->p_emul->e_lwp_fork)
968 (*p2->p_emul->e_lwp_fork)(l1, l2);
969
970 /* If the process is traced, report lwp creation to a debugger */
971 if ((p2->p_slflag & (PSL_TRACED|PSL_TRACELWP_CREATE|PSL_SYSCALL)) ==
972 (PSL_TRACED|PSL_TRACELWP_CREATE)) {
973 ksiginfo_t ksi;
974
975 /* Tracing */
976 KASSERT((l2->l_flag & LW_SYSTEM) == 0);
977
978 p2->p_lwp_created = l2->l_lid;
979
980 KSI_INIT_EMPTY(&ksi);
981 ksi.ksi_signo = SIGTRAP;
982 ksi.ksi_code = TRAP_LWP;
983 mutex_enter(proc_lock);
984 kpsignal(p2, &ksi, NULL);
985 mutex_exit(proc_lock);
986 }
987
988 return (0);
989 }
990
991 /*
992 * Called by MD code when a new LWP begins execution. Must be called
993 * with the previous LWP locked (so at splsched), or if there is no
994 * previous LWP, at splsched.
995 */
996 void
997 lwp_startup(struct lwp *prev, struct lwp *new_lwp)
998 {
999 KASSERTMSG(new_lwp == curlwp, "l %p curlwp %p prevlwp %p", new_lwp, curlwp, prev);
1000
1001 SDT_PROBE(proc, kernel, , lwp__start, new_lwp, 0, 0, 0, 0);
1002
1003 KASSERT(kpreempt_disabled());
1004 if (prev != NULL) {
1005 /*
1006 * Normalize the count of the spin-mutexes, it was
1007 * increased in mi_switch(). Unmark the state of
1008 * context switch - it is finished for previous LWP.
1009 */
1010 curcpu()->ci_mtx_count++;
1011 membar_exit();
1012 prev->l_ctxswtch = 0;
1013 }
1014 KPREEMPT_DISABLE(new_lwp);
1015 if (__predict_true(new_lwp->l_proc->p_vmspace))
1016 pmap_activate(new_lwp);
1017 spl0();
1018
1019 /* Note trip through cpu_switchto(). */
1020 pserialize_switchpoint();
1021
1022 LOCKDEBUG_BARRIER(NULL, 0);
1023 KPREEMPT_ENABLE(new_lwp);
1024 if ((new_lwp->l_pflag & LP_MPSAFE) == 0) {
1025 KERNEL_LOCK(1, new_lwp);
1026 }
1027 }
1028
1029 /*
1030 * Exit an LWP.
1031 */
1032 void
1033 lwp_exit(struct lwp *l)
1034 {
1035 struct proc *p = l->l_proc;
1036 struct lwp *l2;
1037 bool current;
1038
1039 current = (l == curlwp);
1040
1041 KASSERT(current || (l->l_stat == LSIDL && l->l_target_cpu == NULL));
1042 KASSERT(p == curproc);
1043
1044 SDT_PROBE(proc, kernel, , lwp__exit, l, 0, 0, 0, 0);
1045
1046 /*
1047 * Verify that we hold no locks other than the kernel lock.
1048 */
1049 LOCKDEBUG_BARRIER(&kernel_lock, 0);
1050
1051 /* If the process is traced, report lwp termination to a debugger */
1052 if ((p->p_slflag & (PSL_TRACED|PSL_TRACELWP_EXIT|PSL_SYSCALL)) ==
1053 (PSL_TRACED|PSL_TRACELWP_EXIT)) {
1054 ksiginfo_t ksi;
1055
1056 /* Tracing */
1057 KASSERT((l->l_flag & LW_SYSTEM) == 0);
1058
1059 p->p_lwp_exited = l->l_lid;
1060
1061 KSI_INIT_EMPTY(&ksi);
1062 ksi.ksi_signo = SIGTRAP;
1063 ksi.ksi_code = TRAP_LWP;
1064 mutex_enter(proc_lock);
1065 kpsignal(p, &ksi, NULL);
1066 mutex_exit(proc_lock);
1067 }
1068
1069 /*
1070 * If we are the last live LWP in a process, we need to exit the
1071 * entire process. We do so with an exit status of zero, because
1072 * it's a "controlled" exit, and because that's what Solaris does.
1073 *
1074 * We are not quite a zombie yet, but for accounting purposes we
1075 * must increment the count of zombies here.
1076 *
1077 * Note: the last LWP's specificdata will be deleted here.
1078 */
1079 mutex_enter(p->p_lock);
1080 if (p->p_nlwps - p->p_nzlwps == 1) {
1081 KASSERT(current == true);
1082 KASSERT(p != &proc0);
1083 /* XXXSMP kernel_lock not held */
1084 exit1(l, 0, 0);
1085 /* NOTREACHED */
1086 }
1087 p->p_nzlwps++;
1088 mutex_exit(p->p_lock);
1089
1090 if (p->p_emul->e_lwp_exit)
1091 (*p->p_emul->e_lwp_exit)(l);
1092
1093 /* Drop filedesc reference. */
1094 fd_free();
1095
1096 /* Delete the specificdata while it's still safe to sleep. */
1097 lwp_finispecific(l);
1098
1099 /*
1100 * Release our cached credentials.
1101 */
1102 kauth_cred_free(l->l_cred);
1103 callout_destroy(&l->l_timeout_ch);
1104
1105 /*
1106 * Remove the LWP from the global list.
1107 * Free its LID from the PID namespace if needed.
1108 */
1109 mutex_enter(proc_lock);
1110 LIST_REMOVE(l, l_list);
1111 if ((l->l_pflag & LP_PIDLID) != 0 && l->l_lid != p->p_pid) {
1112 proc_free_pid(l->l_lid);
1113 }
1114 mutex_exit(proc_lock);
1115
1116 /*
1117 * Get rid of all references to the LWP that others (e.g. procfs)
1118 * may have, and mark the LWP as a zombie. If the LWP is detached,
1119 * mark it waiting for collection in the proc structure. Note that
1120 * before we can do that, we need to free any other dead, deatched
1121 * LWP waiting to meet its maker.
1122 */
1123 mutex_enter(p->p_lock);
1124 lwp_drainrefs(l);
1125
1126 if ((l->l_prflag & LPR_DETACHED) != 0) {
1127 while ((l2 = p->p_zomblwp) != NULL) {
1128 p->p_zomblwp = NULL;
1129 lwp_free(l2, false, false);/* releases proc mutex */
1130 mutex_enter(p->p_lock);
1131 l->l_refcnt++;
1132 lwp_drainrefs(l);
1133 }
1134 p->p_zomblwp = l;
1135 }
1136
1137 /*
1138 * If we find a pending signal for the process and we have been
1139 * asked to check for signals, then we lose: arrange to have
1140 * all other LWPs in the process check for signals.
1141 */
1142 if ((l->l_flag & LW_PENDSIG) != 0 &&
1143 firstsig(&p->p_sigpend.sp_set) != 0) {
1144 LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
1145 lwp_lock(l2);
1146 l2->l_flag |= LW_PENDSIG;
1147 lwp_unlock(l2);
1148 }
1149 }
1150
1151 /*
1152 * Release any PCU resources before becoming a zombie.
1153 */
1154 pcu_discard_all(l);
1155
1156 lwp_lock(l);
1157 l->l_stat = LSZOMB;
1158 if (l->l_name != NULL) {
1159 strcpy(l->l_name, "(zombie)");
1160 }
1161 lwp_unlock(l);
1162 p->p_nrlwps--;
1163 cv_broadcast(&p->p_lwpcv);
1164 if (l->l_lwpctl != NULL)
1165 l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
1166 mutex_exit(p->p_lock);
1167
1168 /*
1169 * We can no longer block. At this point, lwp_free() may already
1170 * be gunning for us. On a multi-CPU system, we may be off p_lwps.
1171 *
1172 * Free MD LWP resources.
1173 */
1174 cpu_lwp_free(l, 0);
1175
1176 if (current) {
1177 pmap_deactivate(l);
1178
1179 /*
1180 * Release the kernel lock, and switch away into
1181 * oblivion.
1182 */
1183 #ifdef notyet
1184 /* XXXSMP hold in lwp_userret() */
1185 KERNEL_UNLOCK_LAST(l);
1186 #else
1187 KERNEL_UNLOCK_ALL(l, NULL);
1188 #endif
1189 lwp_exit_switchaway(l);
1190 }
1191 }
1192
1193 /*
1194 * Free a dead LWP's remaining resources.
1195 *
1196 * XXXLWP limits.
1197 */
1198 void
1199 lwp_free(struct lwp *l, bool recycle, bool last)
1200 {
1201 struct proc *p = l->l_proc;
1202 struct rusage *ru;
1203 ksiginfoq_t kq;
1204
1205 KASSERT(l != curlwp);
1206 KASSERT(last || mutex_owned(p->p_lock));
1207
1208 /*
1209 * We use the process credentials instead of the lwp credentials here
1210 * because the lwp credentials maybe cached (just after a setuid call)
1211 * and we don't want pay for syncing, since the lwp is going away
1212 * anyway
1213 */
1214 if (p != &proc0 && p->p_nlwps != 1)
1215 (void)chglwpcnt(kauth_cred_getuid(p->p_cred), -1);
1216 /*
1217 * If this was not the last LWP in the process, then adjust
1218 * counters and unlock.
1219 */
1220 if (!last) {
1221 /*
1222 * Add the LWP's run time to the process' base value.
1223 * This needs to co-incide with coming off p_lwps.
1224 */
1225 bintime_add(&p->p_rtime, &l->l_rtime);
1226 p->p_pctcpu += l->l_pctcpu;
1227 ru = &p->p_stats->p_ru;
1228 ruadd(ru, &l->l_ru);
1229 ru->ru_nvcsw += (l->l_ncsw - l->l_nivcsw);
1230 ru->ru_nivcsw += l->l_nivcsw;
1231 LIST_REMOVE(l, l_sibling);
1232 p->p_nlwps--;
1233 p->p_nzlwps--;
1234 if ((l->l_prflag & LPR_DETACHED) != 0)
1235 p->p_ndlwps--;
1236
1237 /*
1238 * Have any LWPs sleeping in lwp_wait() recheck for
1239 * deadlock.
1240 */
1241 cv_broadcast(&p->p_lwpcv);
1242 mutex_exit(p->p_lock);
1243 }
1244
1245 #ifdef MULTIPROCESSOR
1246 /*
1247 * In the unlikely event that the LWP is still on the CPU,
1248 * then spin until it has switched away. We need to release
1249 * all locks to avoid deadlock against interrupt handlers on
1250 * the target CPU.
1251 */
1252 if ((l->l_pflag & LP_RUNNING) != 0 || l->l_cpu->ci_curlwp == l) {
1253 int count;
1254 (void)count; /* XXXgcc */
1255 KERNEL_UNLOCK_ALL(curlwp, &count);
1256 while ((l->l_pflag & LP_RUNNING) != 0 ||
1257 l->l_cpu->ci_curlwp == l)
1258 SPINLOCK_BACKOFF_HOOK;
1259 KERNEL_LOCK(count, curlwp);
1260 }
1261 #endif
1262
1263 /*
1264 * Destroy the LWP's remaining signal information.
1265 */
1266 ksiginfo_queue_init(&kq);
1267 sigclear(&l->l_sigpend, NULL, &kq);
1268 ksiginfo_queue_drain(&kq);
1269 cv_destroy(&l->l_sigcv);
1270 cv_destroy(&l->l_waitcv);
1271
1272 /*
1273 * Free lwpctl structure and affinity.
1274 */
1275 if (l->l_lwpctl) {
1276 lwp_ctl_free(l);
1277 }
1278 if (l->l_affinity) {
1279 kcpuset_unuse(l->l_affinity, NULL);
1280 l->l_affinity = NULL;
1281 }
1282
1283 /*
1284 * Free the LWP's turnstile and the LWP structure itself unless the
1285 * caller wants to recycle them. Also, free the scheduler specific
1286 * data.
1287 *
1288 * We can't return turnstile0 to the pool (it didn't come from it),
1289 * so if it comes up just drop it quietly and move on.
1290 *
1291 * We don't recycle the VM resources at this time.
1292 */
1293
1294 if (!recycle && l->l_ts != &turnstile0)
1295 pool_cache_put(turnstile_cache, l->l_ts);
1296 if (l->l_name != NULL)
1297 kmem_free(l->l_name, MAXCOMLEN);
1298
1299 cpu_lwp_free2(l);
1300 uvm_lwp_exit(l);
1301
1302 KASSERT(SLIST_EMPTY(&l->l_pi_lenders));
1303 KASSERT(l->l_inheritedprio == -1);
1304 KASSERT(l->l_blcnt == 0);
1305 kdtrace_thread_dtor(NULL, l);
1306 if (!recycle)
1307 pool_cache_put(lwp_cache, l);
1308 }
1309
1310 /*
1311 * Migrate the LWP to the another CPU. Unlocks the LWP.
1312 */
1313 void
1314 lwp_migrate(lwp_t *l, struct cpu_info *tci)
1315 {
1316 struct schedstate_percpu *tspc;
1317 int lstat = l->l_stat;
1318
1319 KASSERT(lwp_locked(l, NULL));
1320 KASSERT(tci != NULL);
1321
1322 /* If LWP is still on the CPU, it must be handled like LSONPROC */
1323 if ((l->l_pflag & LP_RUNNING) != 0) {
1324 lstat = LSONPROC;
1325 }
1326
1327 /*
1328 * The destination CPU could be changed while previous migration
1329 * was not finished.
1330 */
1331 if (l->l_target_cpu != NULL) {
1332 l->l_target_cpu = tci;
1333 lwp_unlock(l);
1334 return;
1335 }
1336
1337 /* Nothing to do if trying to migrate to the same CPU */
1338 if (l->l_cpu == tci) {
1339 lwp_unlock(l);
1340 return;
1341 }
1342
1343 KASSERT(l->l_target_cpu == NULL);
1344 tspc = &tci->ci_schedstate;
1345 switch (lstat) {
1346 case LSRUN:
1347 l->l_target_cpu = tci;
1348 break;
1349 case LSIDL:
1350 l->l_cpu = tci;
1351 lwp_unlock_to(l, tspc->spc_mutex);
1352 return;
1353 case LSSLEEP:
1354 l->l_cpu = tci;
1355 break;
1356 case LSSTOP:
1357 case LSSUSPENDED:
1358 l->l_cpu = tci;
1359 if (l->l_wchan == NULL) {
1360 lwp_unlock_to(l, tspc->spc_lwplock);
1361 return;
1362 }
1363 break;
1364 case LSONPROC:
1365 l->l_target_cpu = tci;
1366 spc_lock(l->l_cpu);
1367 cpu_need_resched(l->l_cpu, RESCHED_KPREEMPT);
1368 spc_unlock(l->l_cpu);
1369 break;
1370 }
1371 lwp_unlock(l);
1372 }
1373
1374 /*
1375 * Find the LWP in the process. Arguments may be zero, in such case,
1376 * the calling process and first LWP in the list will be used.
1377 * On success - returns proc locked.
1378 */
1379 struct lwp *
1380 lwp_find2(pid_t pid, lwpid_t lid)
1381 {
1382 proc_t *p;
1383 lwp_t *l;
1384
1385 /* Find the process. */
1386 if (pid != 0) {
1387 mutex_enter(proc_lock);
1388 p = proc_find(pid);
1389 if (p == NULL) {
1390 mutex_exit(proc_lock);
1391 return NULL;
1392 }
1393 mutex_enter(p->p_lock);
1394 mutex_exit(proc_lock);
1395 } else {
1396 p = curlwp->l_proc;
1397 mutex_enter(p->p_lock);
1398 }
1399 /* Find the thread. */
1400 if (lid != 0) {
1401 l = lwp_find(p, lid);
1402 } else {
1403 l = LIST_FIRST(&p->p_lwps);
1404 }
1405 if (l == NULL) {
1406 mutex_exit(p->p_lock);
1407 }
1408 return l;
1409 }
1410
1411 /*
1412 * Look up a live LWP within the specified process.
1413 *
1414 * Must be called with p->p_lock held.
1415 */
1416 struct lwp *
1417 lwp_find(struct proc *p, lwpid_t id)
1418 {
1419 struct lwp *l;
1420
1421 KASSERT(mutex_owned(p->p_lock));
1422
1423 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1424 if (l->l_lid == id)
1425 break;
1426 }
1427
1428 /*
1429 * No need to lock - all of these conditions will
1430 * be visible with the process level mutex held.
1431 */
1432 if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB))
1433 l = NULL;
1434
1435 return l;
1436 }
1437
1438 /*
1439 * Update an LWP's cached credentials to mirror the process' master copy.
1440 *
1441 * This happens early in the syscall path, on user trap, and on LWP
1442 * creation. A long-running LWP can also voluntarily choose to update
1443 * its credentials by calling this routine. This may be called from
1444 * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
1445 */
1446 void
1447 lwp_update_creds(struct lwp *l)
1448 {
1449 kauth_cred_t oc;
1450 struct proc *p;
1451
1452 p = l->l_proc;
1453 oc = l->l_cred;
1454
1455 mutex_enter(p->p_lock);
1456 kauth_cred_hold(p->p_cred);
1457 l->l_cred = p->p_cred;
1458 l->l_prflag &= ~LPR_CRMOD;
1459 mutex_exit(p->p_lock);
1460 if (oc != NULL)
1461 kauth_cred_free(oc);
1462 }
1463
1464 /*
1465 * Verify that an LWP is locked, and optionally verify that the lock matches
1466 * one we specify.
1467 */
1468 int
1469 lwp_locked(struct lwp *l, kmutex_t *mtx)
1470 {
1471 kmutex_t *cur = l->l_mutex;
1472
1473 return mutex_owned(cur) && (mtx == cur || mtx == NULL);
1474 }
1475
1476 /*
1477 * Lend a new mutex to an LWP. The old mutex must be held.
1478 */
1479 void
1480 lwp_setlock(struct lwp *l, kmutex_t *mtx)
1481 {
1482
1483 KASSERT(mutex_owned(l->l_mutex));
1484
1485 membar_exit();
1486 l->l_mutex = mtx;
1487 }
1488
1489 /*
1490 * Lend a new mutex to an LWP, and release the old mutex. The old mutex
1491 * must be held.
1492 */
1493 void
1494 lwp_unlock_to(struct lwp *l, kmutex_t *mtx)
1495 {
1496 kmutex_t *old;
1497
1498 KASSERT(lwp_locked(l, NULL));
1499
1500 old = l->l_mutex;
1501 membar_exit();
1502 l->l_mutex = mtx;
1503 mutex_spin_exit(old);
1504 }
1505
1506 int
1507 lwp_trylock(struct lwp *l)
1508 {
1509 kmutex_t *old;
1510
1511 for (;;) {
1512 if (!mutex_tryenter(old = l->l_mutex))
1513 return 0;
1514 if (__predict_true(l->l_mutex == old))
1515 return 1;
1516 mutex_spin_exit(old);
1517 }
1518 }
1519
1520 void
1521 lwp_unsleep(lwp_t *l, bool cleanup)
1522 {
1523
1524 KASSERT(mutex_owned(l->l_mutex));
1525 (*l->l_syncobj->sobj_unsleep)(l, cleanup);
1526 }
1527
1528 /*
1529 * Handle exceptions for mi_userret(). Called if a member of LW_USERRET is
1530 * set.
1531 */
1532 void
1533 lwp_userret(struct lwp *l)
1534 {
1535 struct proc *p;
1536 int sig;
1537
1538 KASSERT(l == curlwp);
1539 KASSERT(l->l_stat == LSONPROC);
1540 p = l->l_proc;
1541
1542 #ifndef __HAVE_FAST_SOFTINTS
1543 /* Run pending soft interrupts. */
1544 if (l->l_cpu->ci_data.cpu_softints != 0)
1545 softint_overlay();
1546 #endif
1547
1548 /*
1549 * It is safe to do this read unlocked on a MP system..
1550 */
1551 while ((l->l_flag & LW_USERRET) != 0) {
1552 /*
1553 * Process pending signals first, unless the process
1554 * is dumping core or exiting, where we will instead
1555 * enter the LW_WSUSPEND case below.
1556 */
1557 if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) ==
1558 LW_PENDSIG) {
1559 mutex_enter(p->p_lock);
1560 while ((sig = issignal(l)) != 0)
1561 postsig(sig);
1562 mutex_exit(p->p_lock);
1563 }
1564
1565 /*
1566 * Core-dump or suspend pending.
1567 *
1568 * In case of core dump, suspend ourselves, so that the kernel
1569 * stack and therefore the userland registers saved in the
1570 * trapframe are around for coredump() to write them out.
1571 * We also need to save any PCU resources that we have so that
1572 * they accessible for coredump(). We issue a wakeup on
1573 * p->p_lwpcv so that sigexit() will write the core file out
1574 * once all other LWPs are suspended.
1575 */
1576 if ((l->l_flag & LW_WSUSPEND) != 0) {
1577 pcu_save_all(l);
1578 mutex_enter(p->p_lock);
1579 p->p_nrlwps--;
1580 cv_broadcast(&p->p_lwpcv);
1581 lwp_lock(l);
1582 l->l_stat = LSSUSPENDED;
1583 lwp_unlock(l);
1584 mutex_exit(p->p_lock);
1585 lwp_lock(l);
1586 mi_switch(l);
1587 }
1588
1589 /* Process is exiting. */
1590 if ((l->l_flag & LW_WEXIT) != 0) {
1591 lwp_exit(l);
1592 KASSERT(0);
1593 /* NOTREACHED */
1594 }
1595
1596 /* update lwpctl processor (for vfork child_return) */
1597 if (l->l_flag & LW_LWPCTL) {
1598 lwp_lock(l);
1599 KASSERT(kpreempt_disabled());
1600 l->l_lwpctl->lc_curcpu = (int)cpu_index(l->l_cpu);
1601 l->l_lwpctl->lc_pctr++;
1602 l->l_flag &= ~LW_LWPCTL;
1603 lwp_unlock(l);
1604 }
1605 }
1606 }
1607
1608 /*
1609 * Force an LWP to enter the kernel, to take a trip through lwp_userret().
1610 */
1611 void
1612 lwp_need_userret(struct lwp *l)
1613 {
1614 KASSERT(lwp_locked(l, NULL));
1615
1616 /*
1617 * Since the tests in lwp_userret() are done unlocked, make sure
1618 * that the condition will be seen before forcing the LWP to enter
1619 * kernel mode.
1620 */
1621 membar_producer();
1622 cpu_signotify(l);
1623 }
1624
1625 /*
1626 * Add one reference to an LWP. This will prevent the LWP from
1627 * exiting, thus keep the lwp structure and PCB around to inspect.
1628 */
1629 void
1630 lwp_addref(struct lwp *l)
1631 {
1632
1633 KASSERT(mutex_owned(l->l_proc->p_lock));
1634 KASSERT(l->l_stat != LSZOMB);
1635 KASSERT(l->l_refcnt != 0);
1636
1637 l->l_refcnt++;
1638 }
1639
1640 /*
1641 * Remove one reference to an LWP. If this is the last reference,
1642 * then we must finalize the LWP's death.
1643 */
1644 void
1645 lwp_delref(struct lwp *l)
1646 {
1647 struct proc *p = l->l_proc;
1648
1649 mutex_enter(p->p_lock);
1650 lwp_delref2(l);
1651 mutex_exit(p->p_lock);
1652 }
1653
1654 /*
1655 * Remove one reference to an LWP. If this is the last reference,
1656 * then we must finalize the LWP's death. The proc mutex is held
1657 * on entry.
1658 */
1659 void
1660 lwp_delref2(struct lwp *l)
1661 {
1662 struct proc *p = l->l_proc;
1663
1664 KASSERT(mutex_owned(p->p_lock));
1665 KASSERT(l->l_stat != LSZOMB);
1666 KASSERT(l->l_refcnt > 0);
1667 if (--l->l_refcnt == 0)
1668 cv_broadcast(&p->p_lwpcv);
1669 }
1670
1671 /*
1672 * Drain all references to the current LWP.
1673 */
1674 void
1675 lwp_drainrefs(struct lwp *l)
1676 {
1677 struct proc *p = l->l_proc;
1678
1679 KASSERT(mutex_owned(p->p_lock));
1680 KASSERT(l->l_refcnt != 0);
1681
1682 l->l_refcnt--;
1683 while (l->l_refcnt != 0)
1684 cv_wait(&p->p_lwpcv, p->p_lock);
1685 }
1686
1687 /*
1688 * Return true if the specified LWP is 'alive'. Only p->p_lock need
1689 * be held.
1690 */
1691 bool
1692 lwp_alive(lwp_t *l)
1693 {
1694
1695 KASSERT(mutex_owned(l->l_proc->p_lock));
1696
1697 switch (l->l_stat) {
1698 case LSSLEEP:
1699 case LSRUN:
1700 case LSONPROC:
1701 case LSSTOP:
1702 case LSSUSPENDED:
1703 return true;
1704 default:
1705 return false;
1706 }
1707 }
1708
1709 /*
1710 * Return first live LWP in the process.
1711 */
1712 lwp_t *
1713 lwp_find_first(proc_t *p)
1714 {
1715 lwp_t *l;
1716
1717 KASSERT(mutex_owned(p->p_lock));
1718
1719 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1720 if (lwp_alive(l)) {
1721 return l;
1722 }
1723 }
1724
1725 return NULL;
1726 }
1727
1728 /*
1729 * Allocate a new lwpctl structure for a user LWP.
1730 */
1731 int
1732 lwp_ctl_alloc(vaddr_t *uaddr)
1733 {
1734 lcproc_t *lp;
1735 u_int bit, i, offset;
1736 struct uvm_object *uao;
1737 int error;
1738 lcpage_t *lcp;
1739 proc_t *p;
1740 lwp_t *l;
1741
1742 l = curlwp;
1743 p = l->l_proc;
1744
1745 /* don't allow a vforked process to create lwp ctls */
1746 if (p->p_lflag & PL_PPWAIT)
1747 return EBUSY;
1748
1749 if (l->l_lcpage != NULL) {
1750 lcp = l->l_lcpage;
1751 *uaddr = lcp->lcp_uaddr + (vaddr_t)l->l_lwpctl - lcp->lcp_kaddr;
1752 return 0;
1753 }
1754
1755 /* First time around, allocate header structure for the process. */
1756 if ((lp = p->p_lwpctl) == NULL) {
1757 lp = kmem_alloc(sizeof(*lp), KM_SLEEP);
1758 mutex_init(&lp->lp_lock, MUTEX_DEFAULT, IPL_NONE);
1759 lp->lp_uao = NULL;
1760 TAILQ_INIT(&lp->lp_pages);
1761 mutex_enter(p->p_lock);
1762 if (p->p_lwpctl == NULL) {
1763 p->p_lwpctl = lp;
1764 mutex_exit(p->p_lock);
1765 } else {
1766 mutex_exit(p->p_lock);
1767 mutex_destroy(&lp->lp_lock);
1768 kmem_free(lp, sizeof(*lp));
1769 lp = p->p_lwpctl;
1770 }
1771 }
1772
1773 /*
1774 * Set up an anonymous memory region to hold the shared pages.
1775 * Map them into the process' address space. The user vmspace
1776 * gets the first reference on the UAO.
1777 */
1778 mutex_enter(&lp->lp_lock);
1779 if (lp->lp_uao == NULL) {
1780 lp->lp_uao = uao_create(LWPCTL_UAREA_SZ, 0);
1781 lp->lp_cur = 0;
1782 lp->lp_max = LWPCTL_UAREA_SZ;
1783 lp->lp_uva = p->p_emul->e_vm_default_addr(p,
1784 (vaddr_t)p->p_vmspace->vm_daddr, LWPCTL_UAREA_SZ,
1785 p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
1786 error = uvm_map(&p->p_vmspace->vm_map, &lp->lp_uva,
1787 LWPCTL_UAREA_SZ, lp->lp_uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW,
1788 UVM_PROT_RW, UVM_INH_NONE, UVM_ADV_NORMAL, 0));
1789 if (error != 0) {
1790 uao_detach(lp->lp_uao);
1791 lp->lp_uao = NULL;
1792 mutex_exit(&lp->lp_lock);
1793 return error;
1794 }
1795 }
1796
1797 /* Get a free block and allocate for this LWP. */
1798 TAILQ_FOREACH(lcp, &lp->lp_pages, lcp_chain) {
1799 if (lcp->lcp_nfree != 0)
1800 break;
1801 }
1802 if (lcp == NULL) {
1803 /* Nothing available - try to set up a free page. */
1804 if (lp->lp_cur == lp->lp_max) {
1805 mutex_exit(&lp->lp_lock);
1806 return ENOMEM;
1807 }
1808 lcp = kmem_alloc(LWPCTL_LCPAGE_SZ, KM_SLEEP);
1809
1810 /*
1811 * Wire the next page down in kernel space. Since this
1812 * is a new mapping, we must add a reference.
1813 */
1814 uao = lp->lp_uao;
1815 (*uao->pgops->pgo_reference)(uao);
1816 lcp->lcp_kaddr = vm_map_min(kernel_map);
1817 error = uvm_map(kernel_map, &lcp->lcp_kaddr, PAGE_SIZE,
1818 uao, lp->lp_cur, PAGE_SIZE,
1819 UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
1820 UVM_INH_NONE, UVM_ADV_RANDOM, 0));
1821 if (error != 0) {
1822 mutex_exit(&lp->lp_lock);
1823 kmem_free(lcp, LWPCTL_LCPAGE_SZ);
1824 (*uao->pgops->pgo_detach)(uao);
1825 return error;
1826 }
1827 error = uvm_map_pageable(kernel_map, lcp->lcp_kaddr,
1828 lcp->lcp_kaddr + PAGE_SIZE, FALSE, 0);
1829 if (error != 0) {
1830 mutex_exit(&lp->lp_lock);
1831 uvm_unmap(kernel_map, lcp->lcp_kaddr,
1832 lcp->lcp_kaddr + PAGE_SIZE);
1833 kmem_free(lcp, LWPCTL_LCPAGE_SZ);
1834 return error;
1835 }
1836 /* Prepare the page descriptor and link into the list. */
1837 lcp->lcp_uaddr = lp->lp_uva + lp->lp_cur;
1838 lp->lp_cur += PAGE_SIZE;
1839 lcp->lcp_nfree = LWPCTL_PER_PAGE;
1840 lcp->lcp_rotor = 0;
1841 memset(lcp->lcp_bitmap, 0xff, LWPCTL_BITMAP_SZ);
1842 TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
1843 }
1844 for (i = lcp->lcp_rotor; lcp->lcp_bitmap[i] == 0;) {
1845 if (++i >= LWPCTL_BITMAP_ENTRIES)
1846 i = 0;
1847 }
1848 bit = ffs(lcp->lcp_bitmap[i]) - 1;
1849 lcp->lcp_bitmap[i] ^= (1U << bit);
1850 lcp->lcp_rotor = i;
1851 lcp->lcp_nfree--;
1852 l->l_lcpage = lcp;
1853 offset = (i << 5) + bit;
1854 l->l_lwpctl = (lwpctl_t *)lcp->lcp_kaddr + offset;
1855 *uaddr = lcp->lcp_uaddr + offset * sizeof(lwpctl_t);
1856 mutex_exit(&lp->lp_lock);
1857
1858 KPREEMPT_DISABLE(l);
1859 l->l_lwpctl->lc_curcpu = (int)curcpu()->ci_data.cpu_index;
1860 KPREEMPT_ENABLE(l);
1861
1862 return 0;
1863 }
1864
1865 /*
1866 * Free an lwpctl structure back to the per-process list.
1867 */
1868 void
1869 lwp_ctl_free(lwp_t *l)
1870 {
1871 struct proc *p = l->l_proc;
1872 lcproc_t *lp;
1873 lcpage_t *lcp;
1874 u_int map, offset;
1875
1876 /* don't free a lwp context we borrowed for vfork */
1877 if (p->p_lflag & PL_PPWAIT) {
1878 l->l_lwpctl = NULL;
1879 return;
1880 }
1881
1882 lp = p->p_lwpctl;
1883 KASSERT(lp != NULL);
1884
1885 lcp = l->l_lcpage;
1886 offset = (u_int)((lwpctl_t *)l->l_lwpctl - (lwpctl_t *)lcp->lcp_kaddr);
1887 KASSERT(offset < LWPCTL_PER_PAGE);
1888
1889 mutex_enter(&lp->lp_lock);
1890 lcp->lcp_nfree++;
1891 map = offset >> 5;
1892 lcp->lcp_bitmap[map] |= (1U << (offset & 31));
1893 if (lcp->lcp_bitmap[lcp->lcp_rotor] == 0)
1894 lcp->lcp_rotor = map;
1895 if (TAILQ_FIRST(&lp->lp_pages)->lcp_nfree == 0) {
1896 TAILQ_REMOVE(&lp->lp_pages, lcp, lcp_chain);
1897 TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
1898 }
1899 mutex_exit(&lp->lp_lock);
1900 }
1901
1902 /*
1903 * Process is exiting; tear down lwpctl state. This can only be safely
1904 * called by the last LWP in the process.
1905 */
1906 void
1907 lwp_ctl_exit(void)
1908 {
1909 lcpage_t *lcp, *next;
1910 lcproc_t *lp;
1911 proc_t *p;
1912 lwp_t *l;
1913
1914 l = curlwp;
1915 l->l_lwpctl = NULL;
1916 l->l_lcpage = NULL;
1917 p = l->l_proc;
1918 lp = p->p_lwpctl;
1919
1920 KASSERT(lp != NULL);
1921 KASSERT(p->p_nlwps == 1);
1922
1923 for (lcp = TAILQ_FIRST(&lp->lp_pages); lcp != NULL; lcp = next) {
1924 next = TAILQ_NEXT(lcp, lcp_chain);
1925 uvm_unmap(kernel_map, lcp->lcp_kaddr,
1926 lcp->lcp_kaddr + PAGE_SIZE);
1927 kmem_free(lcp, LWPCTL_LCPAGE_SZ);
1928 }
1929
1930 if (lp->lp_uao != NULL) {
1931 uvm_unmap(&p->p_vmspace->vm_map, lp->lp_uva,
1932 lp->lp_uva + LWPCTL_UAREA_SZ);
1933 }
1934
1935 mutex_destroy(&lp->lp_lock);
1936 kmem_free(lp, sizeof(*lp));
1937 p->p_lwpctl = NULL;
1938 }
1939
1940 /*
1941 * Return the current LWP's "preemption counter". Used to detect
1942 * preemption across operations that can tolerate preemption without
1943 * crashing, but which may generate incorrect results if preempted.
1944 */
1945 uint64_t
1946 lwp_pctr(void)
1947 {
1948
1949 return curlwp->l_ncsw;
1950 }
1951
1952 /*
1953 * Set an LWP's private data pointer.
1954 */
1955 int
1956 lwp_setprivate(struct lwp *l, void *ptr)
1957 {
1958 int error = 0;
1959
1960 l->l_private = ptr;
1961 #ifdef __HAVE_CPU_LWP_SETPRIVATE
1962 error = cpu_lwp_setprivate(l, ptr);
1963 #endif
1964 return error;
1965 }
1966
1967 #if defined(DDB)
1968 #include <machine/pcb.h>
1969
1970 void
1971 lwp_whatis(uintptr_t addr, void (*pr)(const char *, ...))
1972 {
1973 lwp_t *l;
1974
1975 LIST_FOREACH(l, &alllwp, l_list) {
1976 uintptr_t stack = (uintptr_t)KSTACK_LOWEST_ADDR(l);
1977
1978 if (addr < stack || stack + KSTACK_SIZE <= addr) {
1979 continue;
1980 }
1981 (*pr)("%p is %p+%zu, LWP %p's stack\n",
1982 (void *)addr, (void *)stack,
1983 (size_t)(addr - stack), l);
1984 }
1985 }
1986 #endif /* defined(DDB) */
1987