kern_lwp.c revision 1.40.2.3 1 1.40.2.3 ad /* $NetBSD: kern_lwp.c,v 1.40.2.3 2006/10/24 21:10:21 ad Exp $ */
2 1.2 thorpej
3 1.2 thorpej /*-
4 1.40.2.2 ad * Copyright (c) 2001, 2006 The NetBSD Foundation, Inc.
5 1.2 thorpej * All rights reserved.
6 1.2 thorpej *
7 1.2 thorpej * This code is derived from software contributed to The NetBSD Foundation
8 1.40.2.2 ad * by Nathan J. Williams, and Andrew Doran.
9 1.2 thorpej *
10 1.2 thorpej * Redistribution and use in source and binary forms, with or without
11 1.2 thorpej * modification, are permitted provided that the following conditions
12 1.2 thorpej * are met:
13 1.2 thorpej * 1. Redistributions of source code must retain the above copyright
14 1.2 thorpej * notice, this list of conditions and the following disclaimer.
15 1.2 thorpej * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 thorpej * notice, this list of conditions and the following disclaimer in the
17 1.2 thorpej * documentation and/or other materials provided with the distribution.
18 1.2 thorpej * 3. All advertising materials mentioning features or use of this software
19 1.2 thorpej * must display the following acknowledgement:
20 1.2 thorpej * This product includes software developed by the NetBSD
21 1.2 thorpej * Foundation, Inc. and its contributors.
22 1.2 thorpej * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.2 thorpej * contributors may be used to endorse or promote products derived
24 1.2 thorpej * from this software without specific prior written permission.
25 1.2 thorpej *
26 1.2 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.2 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.2 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.2 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.2 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.2 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.2 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.2 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.2 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.2 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.2 thorpej * POSSIBILITY OF SUCH DAMAGE.
37 1.2 thorpej */
38 1.9 lukem
39 1.9 lukem #include <sys/cdefs.h>
40 1.40.2.3 ad __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.40.2.3 2006/10/24 21:10:21 ad Exp $");
41 1.8 martin
42 1.8 martin #include "opt_multiprocessor.h"
43 1.2 thorpej
44 1.2 thorpej #include <sys/param.h>
45 1.2 thorpej #include <sys/systm.h>
46 1.2 thorpej #include <sys/pool.h>
47 1.2 thorpej #include <sys/proc.h>
48 1.2 thorpej #include <sys/syscallargs.h>
49 1.37 ad #include <sys/kauth.h>
50 1.40.2.2 ad #include <sys/sleepq.h>
51 1.40.2.2 ad #include <sys/lockdebug.h>
52 1.2 thorpej
53 1.2 thorpej #include <uvm/uvm_extern.h>
54 1.2 thorpej
55 1.40.2.1 ad struct lwplist alllwp;
56 1.40.2.1 ad kmutex_t alllwp_mutex;
57 1.40.2.2 ad kmutex_t lwp_mutex;
58 1.2 thorpej
59 1.2 thorpej #define LWP_DEBUG
60 1.2 thorpej
61 1.2 thorpej #ifdef LWP_DEBUG
62 1.2 thorpej int lwp_debug = 0;
63 1.2 thorpej #define DPRINTF(x) if (lwp_debug) printf x
64 1.2 thorpej #else
65 1.2 thorpej #define DPRINTF(x)
66 1.2 thorpej #endif
67 1.2 thorpej
68 1.40.2.2 ad /*
69 1.40.2.2 ad * Set an LWP halted or suspended.
70 1.40.2.2 ad *
71 1.40.2.2 ad * Must be called with p_smutex held, and the LWP locked. Will unlock the
72 1.40.2.2 ad * LWP before return.
73 1.40.2.2 ad */
74 1.2 thorpej int
75 1.40.2.2 ad lwp_halt(struct lwp *curl, struct lwp *t, int state)
76 1.2 thorpej {
77 1.40.2.2 ad struct proc *p = t->l_proc;
78 1.40.2.2 ad int error;
79 1.2 thorpej
80 1.40.2.2 ad LOCK_ASSERT(mutex_owned(&p->p_smutex));
81 1.40.2.2 ad LOCK_ASSERT(lwp_locked(t, NULL));
82 1.2 thorpej
83 1.40.2.2 ad KASSERT(curl != t || curl->l_stat == LSONPROC);
84 1.17 manu
85 1.40.2.2 ad /*
86 1.40.2.2 ad * If the current LWP has been told to exit, we must not suspend anyone
87 1.40.2.2 ad * else or deadlock could occur. We won't return to userspace.
88 1.40.2.2 ad */
89 1.40.2.2 ad if ((curl->l_stat & (L_WEXIT | L_WCORE)) != 0)
90 1.40.2.2 ad return (EDEADLK);
91 1.2 thorpej
92 1.40.2.2 ad error = 0;
93 1.17 manu
94 1.40.2.2 ad switch (t->l_stat) {
95 1.40.2.2 ad case LSRUN:
96 1.35 yamt p->p_nrlwps--;
97 1.40.2.2 ad t->l_stat = state;
98 1.40.2.2 ad remrunqueue(t);
99 1.40.2.2 ad break;
100 1.40.2.2 ad case LSONPROC:
101 1.40.2.2 ad p->p_nrlwps--;
102 1.40.2.2 ad t->l_stat = state;
103 1.40.2.2 ad if (t != curl) {
104 1.40.2.2 ad #ifdef MULTIPROCESSOR
105 1.40.2.2 ad cpu_need_resched(t->l_cpu);
106 1.40.2.2 ad #elif defined(DIAGNOSTIC)
107 1.40.2.2 ad panic("lwp_halt: onproc but not self");
108 1.40.2.2 ad #endif
109 1.2 thorpej }
110 1.40.2.2 ad break;
111 1.40.2.2 ad case LSSLEEP:
112 1.40.2.2 ad p->p_nrlwps--;
113 1.40.2.2 ad /* FALLTHROUGH */
114 1.40.2.2 ad case LSSUSPENDED:
115 1.40.2.2 ad case LSSTOP:
116 1.40.2.2 ad /* XXXAD What about restarting stopped -> suspended?? */
117 1.40.2.2 ad t->l_stat = state;
118 1.40.2.2 ad break;
119 1.40.2.2 ad case LSIDL:
120 1.40.2.2 ad case LSZOMB:
121 1.40.2.2 ad error = EINTR; /* It's what Solaris does..... */
122 1.40.2.2 ad break;
123 1.2 thorpej }
124 1.2 thorpej
125 1.40.2.3 ad lwp_setlock_unlock(t, &lwp_mutex);
126 1.2 thorpej
127 1.40.2.2 ad return (error);
128 1.2 thorpej }
129 1.2 thorpej
130 1.40.2.2 ad /*
131 1.40.2.2 ad * Restart a suspended LWP.
132 1.40.2.2 ad *
133 1.40.2.2 ad * Must be called with p_smutex held, and the LWP locked. Will unlock the
134 1.40.2.2 ad * LWP before return.
135 1.40.2.2 ad */
136 1.2 thorpej void
137 1.2 thorpej lwp_continue(struct lwp *l)
138 1.2 thorpej {
139 1.2 thorpej
140 1.40.2.2 ad LOCK_ASSERT(mutex_owned(&l->l_proc->p_smutex));
141 1.40.2.2 ad LOCK_ASSERT(mutex_owned(l->l_mutex));
142 1.40.2.2 ad
143 1.2 thorpej DPRINTF(("lwp_continue of %d.%d (%s), state %d, wchan %p\n",
144 1.2 thorpej l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm, l->l_stat,
145 1.2 thorpej l->l_wchan));
146 1.2 thorpej
147 1.40.2.2 ad if (l->l_stat != LSSUSPENDED) {
148 1.40.2.2 ad lwp_unlock(l);
149 1.2 thorpej return;
150 1.40.2.2 ad }
151 1.2 thorpej
152 1.40.2.2 ad if (l->l_wchan == NULL) {
153 1.40.2.2 ad /*
154 1.40.2.2 ad * LWP was runnable before being suspended. setrunnable()
155 1.40.2.2 ad * will release the lock.
156 1.40.2.2 ad */
157 1.2 thorpej setrunnable(l);
158 1.2 thorpej } else {
159 1.2 thorpej /* LWP was sleeping before being suspended. */
160 1.40.2.2 ad l->l_proc->p_nrlwps++;
161 1.2 thorpej l->l_stat = LSSLEEP;
162 1.40.2.2 ad lwp_unlock(l);
163 1.2 thorpej }
164 1.2 thorpej }
165 1.2 thorpej
166 1.40.2.2 ad /*
167 1.40.2.2 ad * Wait for an LWP within the current process to exit. If 'lid' is
168 1.40.2.2 ad * non-zero, we are waiting for a specific LWP.
169 1.40.2.2 ad *
170 1.40.2.2 ad * Must be called with p->p_smutex held.
171 1.40.2.2 ad */
172 1.2 thorpej int
173 1.2 thorpej lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
174 1.2 thorpej {
175 1.2 thorpej struct proc *p = l->l_proc;
176 1.40.2.2 ad struct lwp *l2;
177 1.19 jdolecek int nfound, error, wpri;
178 1.18 jdolecek static const char waitstr1[] = "lwpwait";
179 1.18 jdolecek static const char waitstr2[] = "lwpwait2";
180 1.2 thorpej
181 1.2 thorpej DPRINTF(("lwp_wait1: %d.%d waiting for %d.\n",
182 1.2 thorpej p->p_pid, l->l_lid, lid));
183 1.2 thorpej
184 1.40.2.2 ad LOCK_ASSERT(mutex_owned(&p->p_smutex));
185 1.2 thorpej
186 1.40.2.2 ad /*
187 1.40.2.2 ad * Check for deadlock:
188 1.40.2.2 ad *
189 1.40.2.2 ad * 1) If all other LWPs are waiting for exits or suspended.
190 1.40.2.2 ad * 2) If we are trying to wait on ourself.
191 1.40.2.2 ad *
192 1.40.2.2 ad * XXX we'd like to check for a cycle of waiting LWPs (specific LID
193 1.40.2.2 ad * waits, not any-LWP waits) and detect that sort of deadlock, but
194 1.40.2.2 ad * we don't have a good place to store the lwp that is being waited
195 1.40.2.2 ad * for. wchan is already filled with &p->p_nlwps, and putting the
196 1.40.2.2 ad * lwp address in there for deadlock tracing would require exiting
197 1.40.2.2 ad * LWPs to call wakeup on both their own address and &p->p_nlwps, to
198 1.40.2.2 ad * get threads sleeping on any LWP exiting.
199 1.40.2.2 ad */
200 1.40.2.2 ad if (lwp_lastlive(p->p_nlwpwait) || lid == l->l_lid)
201 1.40.2.2 ad return (EDEADLK);
202 1.40.2.2 ad
203 1.40.2.2 ad p->p_nlwpwait++;
204 1.40.2.2 ad wpri = PWAIT;
205 1.40.2.2 ad if ((flags & LWPWAIT_EXITCONTROL) == 0)
206 1.40.2.2 ad wpri |= PCATCH;
207 1.2 thorpej loop:
208 1.2 thorpej nfound = 0;
209 1.2 thorpej LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
210 1.2 thorpej if ((l2 == l) || (l2->l_flag & L_DETACHED) ||
211 1.2 thorpej ((lid != 0) && (lid != l2->l_lid)))
212 1.2 thorpej continue;
213 1.2 thorpej nfound++;
214 1.40.2.2 ad if (l2->l_stat != LSZOMB)
215 1.40.2.2 ad continue;
216 1.2 thorpej
217 1.40.2.2 ad if (departed)
218 1.40.2.2 ad *departed = l2->l_lid;
219 1.40.2.2 ad
220 1.40.2.2 ad LIST_REMOVE(l2, l_sibling);
221 1.40.2.2 ad p->p_nlwps--;
222 1.40.2.2 ad p->p_nzlwps--;
223 1.40.2.2 ad p->p_nlwpwait--;
224 1.40.2.2 ad /* XXX decrement limits */
225 1.40.2.2 ad pool_put(&lwp_pool, l2);
226 1.40.2.2 ad return (0);
227 1.2 thorpej }
228 1.2 thorpej
229 1.40.2.2 ad if (nfound == 0) {
230 1.40.2.2 ad p->p_nlwpwait--;
231 1.2 thorpej return (ESRCH);
232 1.40.2.2 ad }
233 1.2 thorpej
234 1.40.2.2 ad if ((error = mtsleep(&p->p_nlwps, wpri,
235 1.40.2.2 ad (lid != 0) ? waitstr1 : waitstr2, 0, &p->p_smutex)) != 0)
236 1.2 thorpej return (error);
237 1.2 thorpej
238 1.2 thorpej goto loop;
239 1.2 thorpej }
240 1.2 thorpej
241 1.40.2.2 ad /*
242 1.40.2.2 ad * Create a new LWP within process 'p2', using LWP 'l1' as a template.
243 1.40.2.2 ad * The new LWP is created in state LSIDL and must be set running,
244 1.40.2.2 ad * suspended, or stopped by the caller.
245 1.40.2.2 ad */
246 1.2 thorpej int
247 1.2 thorpej newlwp(struct lwp *l1, struct proc *p2, vaddr_t uaddr, boolean_t inmem,
248 1.2 thorpej int flags, void *stack, size_t stacksize,
249 1.2 thorpej void (*func)(void *), void *arg, struct lwp **rnewlwpp)
250 1.2 thorpej {
251 1.2 thorpej struct lwp *l2;
252 1.2 thorpej
253 1.2 thorpej l2 = pool_get(&lwp_pool, PR_WAITOK);
254 1.2 thorpej
255 1.2 thorpej l2->l_stat = LSIDL;
256 1.2 thorpej l2->l_forw = l2->l_back = NULL;
257 1.2 thorpej l2->l_proc = p2;
258 1.2 thorpej
259 1.2 thorpej memset(&l2->l_startzero, 0,
260 1.2 thorpej (unsigned) ((caddr_t)&l2->l_endzero -
261 1.2 thorpej (caddr_t)&l2->l_startzero));
262 1.2 thorpej memcpy(&l2->l_startcopy, &l1->l_startcopy,
263 1.2 thorpej (unsigned) ((caddr_t)&l2->l_endcopy -
264 1.2 thorpej (caddr_t)&l2->l_startcopy));
265 1.2 thorpej
266 1.2 thorpej #if !defined(MULTIPROCESSOR)
267 1.2 thorpej /*
268 1.2 thorpej * In the single-processor case, all processes will always run
269 1.2 thorpej * on the same CPU. So, initialize the child's CPU to the parent's
270 1.2 thorpej * now. In the multiprocessor case, the child's CPU will be
271 1.2 thorpej * initialized in the low-level context switch code when the
272 1.2 thorpej * process runs.
273 1.2 thorpej */
274 1.5 matt KASSERT(l1->l_cpu != NULL);
275 1.2 thorpej l2->l_cpu = l1->l_cpu;
276 1.40.2.3 ad l2->l_mutex = &sched_mutex;
277 1.2 thorpej #else
278 1.2 thorpej /*
279 1.24 wiz * zero child's CPU pointer so we don't get trash.
280 1.2 thorpej */
281 1.2 thorpej l2->l_cpu = NULL;
282 1.40.2.3 ad l2->l_mutex = &lwp_mutex;
283 1.2 thorpej #endif /* ! MULTIPROCESSOR */
284 1.2 thorpej
285 1.2 thorpej l2->l_flag = inmem ? L_INMEM : 0;
286 1.2 thorpej l2->l_flag |= (flags & LWP_DETACHED) ? L_DETACHED : 0;
287 1.2 thorpej
288 1.40.2.2 ad if (p2->p_flag & P_SYSTEM) {
289 1.40.2.2 ad /*
290 1.40.2.2 ad * Mark it as a system process and not a candidate for
291 1.40.2.2 ad * swapping.
292 1.40.2.2 ad */
293 1.40.2.2 ad l2->l_flag |= L_SYSTEM | L_INMEM;
294 1.40.2.2 ad }
295 1.40.2.2 ad
296 1.37 ad lwp_update_creds(l2);
297 1.2 thorpej callout_init(&l2->l_tsleep_ch);
298 1.40.2.1 ad l2->l_ts = pool_cache_get(&turnstile_cache, PR_WAITOK);
299 1.40.2.3 ad l2->l_omutex = NULL;
300 1.2 thorpej
301 1.2 thorpej if (rnewlwpp != NULL)
302 1.2 thorpej *rnewlwpp = l2;
303 1.2 thorpej
304 1.36 yamt l2->l_addr = UAREA_TO_USER(uaddr);
305 1.2 thorpej uvm_lwp_fork(l1, l2, stack, stacksize, func,
306 1.2 thorpej (arg != NULL) ? arg : l2);
307 1.2 thorpej
308 1.40.2.2 ad mutex_enter(&p2->p_smutex);
309 1.40.2.3 ad
310 1.40.2.3 ad if ((p2->p_flag & P_SA) == 0) {
311 1.40.2.3 ad l2->l_sigpend = &l2->l_sigstore.ss_pend;
312 1.40.2.3 ad l2->l_sigmask = &l2->l_sigstore.ss_mask;
313 1.40.2.3 ad l2->l_sigstk = &l2->l_sigstore.ss_stk;
314 1.40.2.3 ad l2->l_sigmask = l1->l_sigmask;
315 1.40.2.3 ad CIRCLEQ_INIT(&l2->l_sigpend->sp_info);
316 1.40.2.3 ad sigemptyset(&l2->l_sigpend->sp_set);
317 1.40.2.3 ad } else {
318 1.40.2.3 ad l2->l_sigpend = &p2->p_sigstore.ss_pend;
319 1.40.2.3 ad l2->l_sigmask = &p2->p_sigstore.ss_mask;
320 1.40.2.3 ad l2->l_sigstk = &p2->p_sigstore.ss_stk;
321 1.40.2.3 ad }
322 1.40.2.3 ad
323 1.2 thorpej l2->l_lid = ++p2->p_nlwpid;
324 1.2 thorpej LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
325 1.2 thorpej p2->p_nlwps++;
326 1.40.2.3 ad
327 1.40.2.2 ad mutex_exit(&p2->p_smutex);
328 1.2 thorpej
329 1.40.2.1 ad mutex_enter(&alllwp_mutex);
330 1.2 thorpej LIST_INSERT_HEAD(&alllwp, l2, l_list);
331 1.40.2.1 ad mutex_exit(&alllwp_mutex);
332 1.2 thorpej
333 1.40.2.3 ad /* XXXAD verify */
334 1.16 manu if (p2->p_emul->e_lwp_fork)
335 1.16 manu (*p2->p_emul->e_lwp_fork)(l1, l2);
336 1.16 manu
337 1.2 thorpej return (0);
338 1.2 thorpej }
339 1.2 thorpej
340 1.2 thorpej /*
341 1.40.2.2 ad * Quit the process. This will call cpu_exit, which will call cpu_switch,
342 1.40.2.2 ad * so this can only be used meaningfully if you're willing to switch away.
343 1.2 thorpej * Calling with l!=curlwp would be weird.
344 1.2 thorpej */
345 1.2 thorpej void
346 1.2 thorpej lwp_exit(struct lwp *l)
347 1.2 thorpej {
348 1.2 thorpej struct proc *p = l->l_proc;
349 1.2 thorpej
350 1.2 thorpej DPRINTF(("lwp_exit: %d.%d exiting.\n", p->p_pid, l->l_lid));
351 1.40.2.2 ad DPRINTF((" nlwps: %d nzlwps: %d\n", p->p_nlwps, p->p_nzlwps));
352 1.2 thorpej
353 1.16 manu if (p->p_emul->e_lwp_exit)
354 1.16 manu (*p->p_emul->e_lwp_exit)(l);
355 1.16 manu
356 1.2 thorpej /*
357 1.40.2.2 ad * If we are the last live LWP in a process, we need to exit the
358 1.40.2.2 ad * entire process. We do so with an exit status of zero, because
359 1.40.2.2 ad * it's a "controlled" exit, and because that's what Solaris does.
360 1.40.2.2 ad *
361 1.40.2.2 ad * We are not quite a zombie yet, but for accounting purposes we
362 1.40.2.2 ad * must increment the count of zombies here.
363 1.2 thorpej */
364 1.40.2.2 ad mutex_enter(&p->p_smutex);
365 1.40.2.2 ad p->p_nzlwps++;
366 1.40.2.2 ad if ((p->p_nlwps - p->p_nzlwps) == (p->p_stat == LSONPROC)) {
367 1.2 thorpej DPRINTF(("lwp_exit: %d.%d calling exit1()\n",
368 1.2 thorpej p->p_pid, l->l_lid));
369 1.2 thorpej exit1(l, 0);
370 1.19 jdolecek /* NOTREACHED */
371 1.2 thorpej }
372 1.40.2.2 ad mutex_exit(&p->p_smutex);
373 1.2 thorpej
374 1.40.2.2 ad /*
375 1.40.2.2 ad * Remove the LWP from the global list, from the parent process and
376 1.40.2.2 ad * then mark it as dead. Nothing should be able to find or update
377 1.40.2.2 ad * it past this point.
378 1.40.2.2 ad */
379 1.40.2.1 ad mutex_enter(&alllwp_mutex);
380 1.2 thorpej LIST_REMOVE(l, l_list);
381 1.40.2.1 ad mutex_exit(&alllwp_mutex);
382 1.2 thorpej
383 1.40.2.2 ad /*
384 1.40.2.2 ad * Mark us as dead (almost a zombie) and bin any pending signals
385 1.40.2.2 ad * that remain undelivered.
386 1.40.2.2 ad *
387 1.40.2.2 ad * XXX We should put whole-process signals back onto the process's
388 1.40.2.2 ad * pending set and find someone else to deliver them.
389 1.40.2.2 ad */
390 1.40.2.2 ad mutex_enter(&p->p_smutex);
391 1.40.2.2 ad lwp_lock(l);
392 1.40.2.2 ad if ((l->l_flag & L_DETACHED) != 0) {
393 1.40.2.2 ad LIST_REMOVE(l, l_sibling);
394 1.40.2.2 ad p->p_nlwps--;
395 1.40.2.2 ad curlwp = NULL;
396 1.40.2.2 ad l->l_proc = NULL;
397 1.40.2.2 ad }
398 1.40.2.2 ad l->l_stat = LSDEAD;
399 1.40.2.3 ad lwp_setlock_unlock(l, &lwp_mutex);
400 1.40.2.3 ad if ((p->p_flag & P_SA) == 0)
401 1.40.2.3 ad sigclear(l->l_sigpend, NULL);
402 1.40.2.2 ad mutex_exit(&p->p_smutex);
403 1.40.2.1 ad
404 1.40.2.2 ad /*
405 1.40.2.2 ad * Release our cached credentials and collate accounting flags.
406 1.40.2.2 ad */
407 1.37 ad kauth_cred_free(l->l_cred);
408 1.40.2.2 ad mutex_enter(&p->p_crmutex);
409 1.37 ad p->p_acflag |= l->l_acflag;
410 1.40.2.2 ad mutex_exit(&p->p_crmutex);
411 1.40.2.2 ad
412 1.40.2.2 ad /*
413 1.40.2.2 ad * Verify that we hold no locks other than the kernel mutex, and
414 1.40.2.2 ad * release our turnstile. We can no longer acquire sleep locks
415 1.40.2.2 ad * past this point.
416 1.40.2.2 ad */
417 1.40.2.2 ad LOCKDEBUG_BARRIER(&kernel_mutex, 0);
418 1.40.2.2 ad pool_cache_put(&turnstile_cache, l->l_ts);
419 1.37 ad
420 1.40.2.2 ad /*
421 1.40.2.2 ad * Free MD LWP resources
422 1.40.2.2 ad */
423 1.19 jdolecek #ifndef __NO_CPU_LWP_FREE
424 1.19 jdolecek cpu_lwp_free(l, 0);
425 1.19 jdolecek #endif
426 1.31 yamt pmap_deactivate(l);
427 1.31 yamt
428 1.40.2.2 ad /*
429 1.40.2.2 ad * Release the kernel lock, and switch away into oblivion.
430 1.40.2.2 ad */
431 1.2 thorpej KERNEL_PROC_UNLOCK(l);
432 1.19 jdolecek cpu_exit(l);
433 1.2 thorpej }
434 1.2 thorpej
435 1.19 jdolecek /*
436 1.19 jdolecek * We are called from cpu_exit() once it is safe to schedule the
437 1.19 jdolecek * dead process's resources to be freed (i.e., once we've switched to
438 1.19 jdolecek * the idle PCB for the current CPU).
439 1.19 jdolecek *
440 1.19 jdolecek * NOTE: One must be careful with locking in this routine. It's
441 1.19 jdolecek * called from a critical section in machine-dependent code, so
442 1.19 jdolecek * we should refrain from changing any interrupt state.
443 1.19 jdolecek */
444 1.2 thorpej void
445 1.2 thorpej lwp_exit2(struct lwp *l)
446 1.2 thorpej {
447 1.2 thorpej
448 1.22 yamt KERNEL_LOCK(LK_EXCLUSIVE);
449 1.40.2.2 ad
450 1.19 jdolecek /*
451 1.19 jdolecek * Free the VM resources we're still holding on to.
452 1.19 jdolecek */
453 1.19 jdolecek uvm_lwp_exit(l);
454 1.19 jdolecek
455 1.19 jdolecek if (l->l_flag & L_DETACHED) {
456 1.19 jdolecek /* Nobody waits for detached LWPs. */
457 1.19 jdolecek pool_put(&lwp_pool, l);
458 1.22 yamt KERNEL_UNLOCK();
459 1.19 jdolecek } else {
460 1.22 yamt KERNEL_UNLOCK();
461 1.40.2.2 ad l->l_stat = LSZOMB;
462 1.40.2.3 ad mb_write();
463 1.40.2.2 ad wakeup(&l->l_proc->p_nlwps);
464 1.19 jdolecek }
465 1.2 thorpej }
466 1.2 thorpej
467 1.2 thorpej /*
468 1.2 thorpej * Pick a LWP to represent the process for those operations which
469 1.2 thorpej * want information about a "process" that is actually associated
470 1.2 thorpej * with a LWP.
471 1.40.2.2 ad *
472 1.40.2.2 ad * Must be called with p->p_smutex held, and will return the LWP locked.
473 1.40.2.2 ad * If 'locking' is false, no locking or lock checks are performed. This
474 1.40.2.2 ad * is intended for use by DDB.
475 1.2 thorpej */
476 1.2 thorpej struct lwp *
477 1.40.2.2 ad proc_representative_lwp(struct proc *p, int *nrlwps, int locking)
478 1.2 thorpej {
479 1.2 thorpej struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
480 1.27 matt struct lwp *signalled;
481 1.40.2.2 ad int cnt;
482 1.40.2.2 ad
483 1.40.2.2 ad if (locking)
484 1.40.2.2 ad LOCK_ASSERT(mutex_owned(&p->p_smutex));
485 1.2 thorpej
486 1.2 thorpej /* Trivial case: only one LWP */
487 1.40.2.2 ad if (p->p_nlwps == 1) {
488 1.40.2.2 ad l = LIST_FIRST(&p->p_lwps);
489 1.40.2.2 ad if (nrlwps)
490 1.40.2.2 ad *nrlwps = (l->l_stat == LSONPROC || LSRUN);
491 1.40.2.3 ad if (locking)
492 1.40.2.3 ad lwp_lock(l);
493 1.40.2.2 ad return l;
494 1.40.2.2 ad }
495 1.2 thorpej
496 1.40.2.2 ad cnt = 0;
497 1.2 thorpej switch (p->p_stat) {
498 1.2 thorpej case SSTOP:
499 1.2 thorpej case SACTIVE:
500 1.2 thorpej /* Pick the most live LWP */
501 1.2 thorpej onproc = running = sleeping = stopped = suspended = NULL;
502 1.27 matt signalled = NULL;
503 1.2 thorpej LIST_FOREACH(l, &p->p_lwps, l_sibling) {
504 1.40.2.2 ad if (locking)
505 1.40.2.2 ad lwp_lock(l);
506 1.27 matt if (l->l_lid == p->p_sigctx.ps_lwp)
507 1.27 matt signalled = l;
508 1.2 thorpej switch (l->l_stat) {
509 1.2 thorpej case LSONPROC:
510 1.2 thorpej onproc = l;
511 1.40.2.2 ad cnt++;
512 1.2 thorpej break;
513 1.2 thorpej case LSRUN:
514 1.2 thorpej running = l;
515 1.40.2.2 ad cnt++;
516 1.2 thorpej break;
517 1.2 thorpej case LSSLEEP:
518 1.2 thorpej sleeping = l;
519 1.2 thorpej break;
520 1.2 thorpej case LSSTOP:
521 1.2 thorpej stopped = l;
522 1.2 thorpej break;
523 1.2 thorpej case LSSUSPENDED:
524 1.2 thorpej suspended = l;
525 1.2 thorpej break;
526 1.2 thorpej }
527 1.40.2.2 ad if (locking)
528 1.40.2.2 ad lwp_unlock(l);
529 1.2 thorpej }
530 1.40.2.2 ad if (nrlwps)
531 1.40.2.2 ad *nrlwps = cnt;
532 1.27 matt if (signalled)
533 1.40.2.2 ad l = signalled;
534 1.40.2.2 ad else if (onproc)
535 1.40.2.2 ad l = onproc;
536 1.40.2.2 ad else if (running)
537 1.40.2.2 ad l = running;
538 1.40.2.2 ad else if (sleeping)
539 1.40.2.2 ad l = sleeping;
540 1.40.2.2 ad else if (stopped)
541 1.40.2.2 ad l = stopped;
542 1.40.2.2 ad else if (suspended)
543 1.40.2.2 ad l = suspended;
544 1.40.2.2 ad else
545 1.40.2.2 ad break;
546 1.40.2.2 ad if (locking)
547 1.40.2.2 ad lwp_lock(l);
548 1.40.2.2 ad return l;
549 1.2 thorpej case SZOMB:
550 1.2 thorpej /* Doesn't really matter... */
551 1.40.2.2 ad if (nrlwps)
552 1.40.2.2 ad *nrlwps = 0;
553 1.40.2.2 ad l = LIST_FIRST(&p->p_lwps);
554 1.40.2.2 ad if (locking)
555 1.40.2.2 ad lwp_lock(l);
556 1.40.2.2 ad return l;
557 1.2 thorpej #ifdef DIAGNOSTIC
558 1.2 thorpej case SIDL:
559 1.40.2.2 ad if (locking)
560 1.40.2.2 ad mutex_exit(&p->p_smutex);
561 1.2 thorpej /* We have more than one LWP and we're in SIDL?
562 1.2 thorpej * How'd that happen?
563 1.2 thorpej */
564 1.40.2.2 ad panic("Too many LWPs in SIDL process %d (%s)",
565 1.40.2.2 ad p->p_pid, p->p_comm);
566 1.2 thorpej default:
567 1.40.2.2 ad if (locking)
568 1.40.2.2 ad mutex_exit(&p->p_smutex);
569 1.2 thorpej panic("Process %d (%s) in unknown state %d",
570 1.2 thorpej p->p_pid, p->p_comm, p->p_stat);
571 1.2 thorpej #endif
572 1.2 thorpej }
573 1.2 thorpej
574 1.40.2.2 ad if (locking)
575 1.40.2.2 ad mutex_exit(&p->p_smutex);
576 1.2 thorpej panic("proc_representative_lwp: couldn't find a lwp for process"
577 1.2 thorpej " %d (%s)", p->p_pid, p->p_comm);
578 1.2 thorpej /* NOTREACHED */
579 1.2 thorpej return NULL;
580 1.2 thorpej }
581 1.37 ad
582 1.37 ad /*
583 1.40.2.2 ad * Look up a live LWP within the speicifed process, and return it locked.
584 1.40.2.2 ad *
585 1.40.2.2 ad * Must be called with p->p_smutex held.
586 1.40.2.2 ad */
587 1.40.2.2 ad struct lwp *
588 1.40.2.2 ad lwp_byid(struct proc *p, int id)
589 1.40.2.2 ad {
590 1.40.2.2 ad struct lwp *l;
591 1.40.2.2 ad
592 1.40.2.2 ad LOCK_ASSERT(mutex_owned(&p->p_smutex));
593 1.40.2.2 ad
594 1.40.2.2 ad LIST_FOREACH(l, &p->p_lwps, l_sibling) {
595 1.40.2.2 ad if (l->l_lid == id)
596 1.40.2.2 ad break;
597 1.40.2.2 ad }
598 1.40.2.2 ad
599 1.40.2.2 ad if (l != NULL) {
600 1.40.2.2 ad lwp_lock(l);
601 1.40.2.2 ad if (l->l_stat == LSIDL || l->l_stat == LSZOMB ||
602 1.40.2.2 ad l->l_stat == LSDEAD) {
603 1.40.2.2 ad lwp_unlock(l);
604 1.40.2.2 ad l = NULL;
605 1.40.2.2 ad }
606 1.40.2.2 ad }
607 1.40.2.2 ad
608 1.40.2.2 ad return l;
609 1.40.2.2 ad }
610 1.40.2.2 ad
611 1.40.2.2 ad /*
612 1.37 ad * Update an LWP's cached credentials to mirror the process' master copy.
613 1.37 ad *
614 1.37 ad * This happens early in the syscall path, on user trap, and on LWP
615 1.37 ad * creation. A long-running LWP can also voluntarily choose to update
616 1.37 ad * it's credentials by calling this routine. This may be called from
617 1.37 ad * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
618 1.37 ad */
619 1.37 ad void
620 1.37 ad lwp_update_creds(struct lwp *l)
621 1.37 ad {
622 1.37 ad kauth_cred_t oc;
623 1.37 ad struct proc *p;
624 1.37 ad
625 1.37 ad p = l->l_proc;
626 1.37 ad oc = l->l_cred;
627 1.37 ad
628 1.40.2.1 ad mutex_enter(&p->p_crmutex);
629 1.37 ad kauth_cred_hold(p->p_cred);
630 1.37 ad l->l_cred = p->p_cred;
631 1.40.2.1 ad mutex_exit(&p->p_crmutex);
632 1.37 ad if (oc != NULL)
633 1.37 ad kauth_cred_free(oc);
634 1.37 ad }
635 1.40.2.2 ad
636 1.40.2.2 ad /*
637 1.40.2.2 ad * Verify that an LWP is locked, and optionally verify that the lock matches
638 1.40.2.2 ad * one we specify.
639 1.40.2.2 ad */
640 1.40.2.2 ad int
641 1.40.2.2 ad lwp_locked(struct lwp *l, kmutex_t *mtx)
642 1.40.2.2 ad {
643 1.40.2.3 ad #ifdef MULTIPROCESSOR
644 1.40.2.3 ad kmutex_t *cur = l->l_mutex;
645 1.40.2.2 ad
646 1.40.2.3 ad return mutex_owned(cur) && (mtx == cur || mtx == NULL);
647 1.40.2.3 ad #else
648 1.40.2.3 ad return mutex_owned(l->l_mutex);
649 1.40.2.3 ad #endif
650 1.40.2.2 ad }
651 1.40.2.2 ad
652 1.40.2.2 ad /*
653 1.40.2.3 ad * Lock an LWP.
654 1.40.2.2 ad */
655 1.40.2.2 ad void
656 1.40.2.3 ad lwp_lock(struct lwp *l)
657 1.40.2.2 ad {
658 1.40.2.3 ad #ifdef MULTIPROCESSOR
659 1.40.2.3 ad kmutex_t *old;
660 1.40.2.3 ad
661 1.40.2.3 ad for (;;) {
662 1.40.2.3 ad mutex_enter(old = l->l_mutex);
663 1.40.2.3 ad
664 1.40.2.3 ad /*
665 1.40.2.3 ad * mutex_enter() will have posted a read barrier. Re-test
666 1.40.2.3 ad * l->l_mutex. If it has changed, we need to try again.
667 1.40.2.3 ad */
668 1.40.2.3 ad if (__predict_true(l->l_mutex == old)) {
669 1.40.2.3 ad LOCK_ASSERT(l->l_omutex == NULL);
670 1.40.2.3 ad return;
671 1.40.2.3 ad }
672 1.40.2.2 ad
673 1.40.2.3 ad mutex_exit(old);
674 1.40.2.3 ad }
675 1.40.2.3 ad #else
676 1.40.2.3 ad mutex_enter(l->l_mutex);
677 1.40.2.3 ad #endif
678 1.40.2.2 ad }
679 1.40.2.2 ad
680 1.40.2.2 ad /*
681 1.40.2.3 ad * Unlock an LWP. If the LWP has been relocked, release the new mutex
682 1.40.2.3 ad * first, then the old mutex.
683 1.40.2.2 ad */
684 1.40.2.2 ad void
685 1.40.2.3 ad lwp_unlock(struct lwp *l)
686 1.40.2.2 ad {
687 1.40.2.3 ad #ifdef MULTIPROCESSOR
688 1.40.2.3 ad kmutex_t *old;
689 1.40.2.3 ad
690 1.40.2.3 ad LOCK_ASSERT(mutex_owned(l->l_mutex));
691 1.40.2.2 ad
692 1.40.2.3 ad if (__predict_true((old = l->l_omutex) == NULL)) {
693 1.40.2.3 ad mutex_exit(l->l_mutex);
694 1.40.2.3 ad return;
695 1.40.2.3 ad }
696 1.40.2.3 ad
697 1.40.2.3 ad l->l_omutex = NULL;
698 1.40.2.3 ad mutex_exit(l->l_mutex);
699 1.40.2.3 ad mutex_exit(old);
700 1.40.2.3 ad #else
701 1.40.2.3 ad LOCK_ASSERT(mutex_owned(l->l_mutex));
702 1.40.2.3 ad
703 1.40.2.3 ad mutex_exit(l->l_mutex);
704 1.40.2.3 ad #endif
705 1.40.2.3 ad }
706 1.40.2.3 ad
707 1.40.2.3 ad /*
708 1.40.2.3 ad * Lend a new mutex to an LWP. Both the old and new mutexes must be held.
709 1.40.2.3 ad */
710 1.40.2.3 ad void
711 1.40.2.3 ad lwp_setlock(struct lwp *l, kmutex_t *new)
712 1.40.2.3 ad {
713 1.40.2.2 ad LOCK_ASSERT(mutex_owned(l->l_mutex));
714 1.40.2.2 ad LOCK_ASSERT(mutex_owned(new));
715 1.40.2.3 ad LOCK_ASSERT(l->l_omutex == NULL);
716 1.40.2.2 ad
717 1.40.2.3 ad #ifdef MULTIPROCESSOR
718 1.40.2.3 ad mb_write();
719 1.40.2.3 ad l->l_mutex = new;
720 1.40.2.3 ad #endif
721 1.40.2.2 ad }
722 1.40.2.2 ad
723 1.40.2.2 ad /*
724 1.40.2.3 ad * Lend a new mutex to an LWP, and release the old mutex. The old mutex
725 1.40.2.3 ad * must be held.
726 1.40.2.3 ad */
727 1.40.2.3 ad void
728 1.40.2.3 ad lwp_setlock_unlock(struct lwp *l, kmutex_t *new)
729 1.40.2.3 ad {
730 1.40.2.3 ad kmutex_t *old;
731 1.40.2.3 ad
732 1.40.2.3 ad LOCK_ASSERT(mutex_owned(l->l_mutex));
733 1.40.2.3 ad LOCK_ASSERT(l->l_omutex == NULL);
734 1.40.2.3 ad
735 1.40.2.3 ad old = l->l_mutex;
736 1.40.2.3 ad #ifdef MULTIPROCESSOR
737 1.40.2.3 ad mb_write();
738 1.40.2.3 ad l->l_mutex = new;
739 1.40.2.3 ad #endif
740 1.40.2.3 ad mutex_exit(old);
741 1.40.2.3 ad }
742 1.40.2.3 ad
743 1.40.2.3 ad /*
744 1.40.2.3 ad * Acquire a new mutex, and dontate it to an LWP. The LWP must already be
745 1.40.2.3 ad * locked.
746 1.40.2.2 ad */
747 1.40.2.2 ad void
748 1.40.2.3 ad lwp_relock(struct lwp *l, kmutex_t *new)
749 1.40.2.2 ad {
750 1.40.2.2 ad
751 1.40.2.2 ad LOCK_ASSERT(mutex_owned(l->l_mutex));
752 1.40.2.3 ad LOCK_ASSERT(l->l_omutex == NULL);
753 1.40.2.2 ad
754 1.40.2.3 ad #ifdef MULTIPROCESSOR
755 1.40.2.3 ad mutex_enter(new);
756 1.40.2.3 ad l->l_omutex = l->l_mutex;
757 1.40.2.3 ad mb_write();
758 1.40.2.3 ad l->l_mutex = new;
759 1.40.2.3 ad #endif
760 1.40.2.2 ad }
761 1.40.2.2 ad
762 1.40.2.2 ad /*
763 1.40.2.2 ad * Handle exceptions for mi_userret(). Called if L_USERRET is set.
764 1.40.2.2 ad *
765 1.40.2.2 ad * Must be called with the LWP locked.
766 1.40.2.2 ad */
767 1.40.2.2 ad void
768 1.40.2.2 ad lwp_userret(struct lwp *l)
769 1.40.2.2 ad {
770 1.40.2.2 ad struct proc *p;
771 1.40.2.2 ad int sig, flag;
772 1.40.2.2 ad
773 1.40.2.2 ad p = l->l_proc;
774 1.40.2.2 ad flag = l->l_flag;
775 1.40.2.2 ad
776 1.40.2.2 ad #ifdef MULTIPROCESSOR
777 1.40.2.2 ad LOCK_ASSERT(lwp_locked(l, NULL));
778 1.40.2.2 ad lwp_unlock(l);
779 1.40.2.2 ad #endif
780 1.40.2.2 ad
781 1.40.2.2 ad /* Signals must be processed first. */
782 1.40.2.2 ad if ((flag & L_PENDSIG) != 0) {
783 1.40.2.2 ad mutex_enter(&p->p_smutex);
784 1.40.2.2 ad while ((sig = issignal(l)) != 0)
785 1.40.2.2 ad postsig(sig);
786 1.40.2.2 ad mutex_exit(&p->p_smutex);
787 1.40.2.2 ad }
788 1.40.2.2 ad
789 1.40.2.2 ad if ((flag & L_WCORE) != 0) {
790 1.40.2.2 ad /*
791 1.40.2.2 ad * Suspend ourselves, so that the kernel stack and therefore
792 1.40.2.2 ad * the userland registers saved in the trapframe are around
793 1.40.2.2 ad * for coredump() to write them out. We issue a wakeup() on
794 1.40.2.2 ad * p->p_nrlwps so that sigexit() will write the core file out
795 1.40.2.2 ad * once all other LWPs are suspended.
796 1.40.2.2 ad */
797 1.40.2.2 ad KERNEL_PROC_LOCK(l);
798 1.40.2.2 ad mutex_enter(&p->p_smutex);
799 1.40.2.2 ad p->p_nrlwps--;
800 1.40.2.2 ad wakeup(&p->p_nrlwps);
801 1.40.2.2 ad lwp_lock(l);
802 1.40.2.2 ad l->l_flag &= ~L_DETACHED;
803 1.40.2.2 ad l->l_stat = LSSUSPENDED;
804 1.40.2.2 ad mutex_exit_linked(&p->p_smutex, l->l_mutex);
805 1.40.2.2 ad mi_switch(l, NULL);
806 1.40.2.2 ad lwp_exit(l);
807 1.40.2.2 ad /* NOTREACHED */
808 1.40.2.2 ad }
809 1.40.2.2 ad
810 1.40.2.2 ad if ((flag & L_WEXIT) != 0) {
811 1.40.2.2 ad KERNEL_PROC_LOCK(l);
812 1.40.2.2 ad lwp_exit(l);
813 1.40.2.2 ad /* NOTREACHED */
814 1.40.2.2 ad }
815 1.40.2.2 ad
816 1.40.2.2 ad #ifdef MULTIPROCESSOR
817 1.40.2.2 ad lwp_lock(l);
818 1.40.2.2 ad #endif
819 1.40.2.2 ad }
820 1.40.2.2 ad
821 1.40.2.2 ad /*
822 1.40.2.2 ad * Return non-zero if this the last live LWP in the process. Called when
823 1.40.2.2 ad * exiting, dumping core, waiting for other LWPs to exit, etc. Accepts a
824 1.40.2.2 ad * 'bias' value for deadlock detection.
825 1.40.2.2 ad *
826 1.40.2.2 ad * Must be called with p->p_smutex held.
827 1.40.2.2 ad */
828 1.40.2.2 ad int
829 1.40.2.2 ad lwp_lastlive(int bias)
830 1.40.2.2 ad {
831 1.40.2.2 ad struct lwp *l = curlwp;
832 1.40.2.2 ad struct proc *p = l->l_proc;
833 1.40.2.2 ad
834 1.40.2.2 ad LOCK_ASSERT(mutex_owned(&p->p_smutex));
835 1.40.2.2 ad KASSERT(l->l_stat == LSONPROC || l->l_stat == LSSTOP);
836 1.40.2.2 ad
837 1.40.2.2 ad return p->p_nrlwps - bias - (l->l_stat == LSONPROC) == 0;
838 1.40.2.2 ad }
839