kern_lwp.c revision 1.1.2.15 1 /* $NetBSD: kern_lwp.c,v 1.1.2.15 2002/08/29 17:46:01 nathanw Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/pool.h>
42 #include <sys/lock.h>
43 #include <sys/proc.h>
44 #include <sys/sa.h>
45 #include <sys/savar.h>
46 #include <sys/types.h>
47 #include <sys/ucontext.h>
48 #include <sys/resourcevar.h>
49 #include <sys/mount.h>
50 #include <sys/syscallargs.h>
51
52 #include <uvm/uvm_extern.h>
53
54 struct lwplist alllwp;
55 struct lwplist deadlwp;
56 struct lwplist zomblwp;
57
58 #define LWP_DEBUG
59
60 #ifdef LWP_DEBUG
61 int lwp_debug = 0;
62 #define DPRINTF(x) if (lwp_debug) printf x
63 #else
64 #define DPRINTF(x)
65 #endif
66 /* ARGSUSED */
67 int
68 sys__lwp_create(struct lwp *l, void *v, register_t *retval)
69 {
70 struct sys__lwp_create_args /* {
71 syscallarg(const ucontext_t *) ucp;
72 syscallarg(u_long) flags;
73 syscallarg(lwpid_t *) new_lwp;
74 } */ *uap = v;
75 struct proc *p = l->l_proc;
76 struct lwp *l2;
77 vaddr_t uaddr;
78 ucontext_t *newuc;
79 int s, error;
80
81 newuc = pool_get(&lwp_uc_pool, PR_WAITOK);
82
83 error = copyin(SCARG(uap, ucp), newuc, sizeof(*newuc));
84 if (error)
85 return (error);
86
87 /* XXX check against resource limits */
88
89 uaddr = uvm_km_valloc(kernel_map, USPACE);
90 if (__predict_false(uaddr == 0)) {
91 return (ENOMEM);
92 }
93
94 /* XXX flags:
95 * __LWP_ASLWP is probably needed for Solaris compat.
96 */
97
98 newlwp(l, p, uaddr,
99 SCARG(uap, flags) & LWP_DETACHED,
100 NULL, NULL, startlwp, newuc, &l2);
101
102 if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0) {
103 SCHED_LOCK(s);
104 l2->l_stat = LSRUN;
105 setrunqueue(l2);
106 SCHED_UNLOCK(s);
107 simple_lock(&p->p_lwplock);
108 p->p_nrlwps++;
109 simple_unlock(&p->p_lwplock);
110 } else {
111 l2->l_stat = LSSUSPENDED;
112 }
113
114 error = copyout(&l2->l_lid, SCARG(uap, new_lwp),
115 sizeof(l2->l_lid));
116 if (error)
117 return (error);
118
119 return (0);
120 }
121
122
123 int
124 sys__lwp_exit(struct lwp *l, void *v, register_t *retval)
125 {
126
127 lwp_exit(l);
128 /* NOTREACHED */
129 return (0);
130 }
131
132
133 int
134 sys__lwp_self(struct lwp *l, void *v, register_t *retval)
135 {
136
137 *retval = l->l_lid;
138
139 return (0);
140 }
141
142
143 int
144 sys__lwp_suspend(struct lwp *l, void *v, register_t *retval)
145 {
146 struct sys__lwp_suspend_args /* {
147 syscallarg(lwpid_t) target;
148 } */ *uap = v;
149 int target_lid;
150 struct proc *p = l->l_proc;
151 struct lwp *t, *t2;
152 int s;
153
154 target_lid = SCARG(uap, target);
155
156 LIST_FOREACH(t, &p->p_lwps, l_sibling)
157 if (t->l_lid == target_lid)
158 break;
159
160 if (t == NULL)
161 return (ESRCH);
162
163 if (t == l) {
164 /*
165 * Check for deadlock, which is only possible
166 * when we're suspending ourself.
167 */
168 LIST_FOREACH(t2, &p->p_lwps, l_sibling) {
169 if ((t2 != l) && (t2->l_stat != LSSUSPENDED))
170 break;
171 }
172
173 if (t2 == NULL) /* All other LWPs are suspended */
174 return (EDEADLK);
175
176 SCHED_LOCK(s);
177 l->l_stat = LSSUSPENDED;
178 /* XXX NJWLWP check if this makes sense here: */
179 l->l_proc->p_stats->p_ru.ru_nvcsw++;
180 mi_switch(l, NULL);
181 SCHED_ASSERT_UNLOCKED();
182 } else {
183 switch (t->l_stat) {
184 case LSSUSPENDED:
185 return (0); /* _lwp_suspend() is idempotent */
186 case LSRUN:
187 SCHED_LOCK(s);
188 remrunqueue(t);
189 t->l_stat = LSSUSPENDED;
190 SCHED_UNLOCK(s);
191 simple_lock(&p->p_lwplock);
192 p->p_nrlwps--;
193 simple_unlock(&p->p_lwplock);
194 break;
195 case LSSLEEP:
196 t->l_stat = LSSUSPENDED;
197 break;
198 case LSIDL:
199 case LSDEAD:
200 case LSZOMB:
201 return (EINTR); /* It's what Solaris does..... */
202 case LSSTOP:
203 panic("_lwp_suspend: Stopped LWP in running process!");
204 break;
205 case LSONPROC:
206 panic("XXX multiprocessor LWPs? Implement me!");
207 break;
208 }
209 }
210
211 return (0);
212 }
213
214
215 int
216 sys__lwp_continue(struct lwp *l, void *v, register_t *retval)
217 {
218 struct sys__lwp_continue_args /* {
219 syscallarg(lwpid_t) target;
220 } */ *uap = v;
221 int target_lid;
222 struct proc *p = l->l_proc;
223 struct lwp *t;
224
225 target_lid = SCARG(uap, target);
226
227 LIST_FOREACH(t, &p->p_lwps, l_sibling)
228 if (t->l_lid == target_lid)
229 break;
230
231 if (t == NULL)
232 return (ESRCH);
233
234 lwp_continue(t);
235
236 return (0);
237 }
238
239 void
240 lwp_continue(struct lwp *l)
241 {
242 int s;
243
244 DPRINTF(("lwp_continue of %d.%d (%s), state %d, wchan %p\n",
245 l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm, l->l_stat,
246 l->l_wchan));
247
248 if (l->l_stat != LSSUSPENDED)
249 return;
250
251 if (l->l_wchan == 0) {
252 /* LWP was runnable before being suspended. */
253 SCHED_LOCK(s);
254 setrunnable(l);
255 SCHED_UNLOCK(s);
256 } else {
257 /* LWP was sleeping before being suspended. */
258 l->l_stat = LSSLEEP;
259 }
260 }
261
262 int sys__lwp_wakeup(struct lwp *l, void *v, register_t *retval)
263 {
264 struct sys__lwp_wakeup_args /* {
265 syscallarg(lwpid_t) wakeup;
266 } */ *uap = v;
267 lwpid_t target_lid;
268 struct lwp *t;
269 struct proc *p;
270
271 p = l->l_proc;
272 target_lid = SCARG(uap, target);
273
274 LIST_FOREACH(t, &p->p_lwps, l_sibling)
275 if (t->l_lid == target_lid)
276 break;
277
278 if (t == NULL)
279 return (ESRCH);
280
281 if (t->l_stat != LSSLEEP)
282 return (ENODEV);
283
284 if ((l->l_flag & L_SINTR) == 0)
285 return (EBUSY);
286
287 setrunnable(l);
288
289 return 0;
290 }
291
292 int
293 sys__lwp_wait(struct lwp *l, void *v, register_t *retval)
294 {
295 struct sys__lwp_wait_args /* {
296 syscallarg(lwpid_t) wait_for;
297 syscallarg(lwpid_t *) departed;
298 } */ *uap = v;
299 int error;
300 lwpid_t dep;
301
302 error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0);
303 if (error)
304 return (error);
305
306 if (SCARG(uap, departed)) {
307 error = copyout(&dep, SCARG(uap, departed),
308 sizeof(dep));
309 if (error)
310 return (error);
311 }
312
313 return (0);
314 }
315
316
317 int
318 lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
319 {
320
321 struct proc *p = l->l_proc;
322 struct lwp *l2, *l3;
323 int nfound, error, s, wpri;
324 static char waitstr1[] = "lwpwait";
325 static char waitstr2[] = "lwpwait2";
326
327 DPRINTF(("lwp_wait1: %d.%d waiting for %d.\n",
328 p->p_pid, l->l_lid, lid));
329
330 if (lid == l->l_lid)
331 return (EDEADLK); /* Waiting for ourselves makes no sense. */
332
333 wpri = PWAIT |
334 ((flags & LWPWAIT_EXITCONTROL) ? PNOEXITERR : PCATCH);
335 loop:
336 nfound = 0;
337 LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
338 if ((l2 == l) || (l2->l_flag & L_DETACHED) ||
339 ((lid != 0) && (lid != l2->l_lid)))
340 continue;
341
342 nfound++;
343 if (l2->l_stat == LSZOMB) {
344 if (departed)
345 *departed = l2->l_lid;
346
347 s = proclist_lock_write();
348 LIST_REMOVE(l2, l_zlist); /* off zomblwp */
349 proclist_unlock_write(s);
350
351 simple_lock(&p->p_lwplock);
352 LIST_REMOVE(l2, l_sibling);
353 p->p_nlwps--;
354 p->p_nzlwps--;
355 simple_unlock(&p->p_lwplock);
356 /* XXX decrement limits */
357
358 pool_put(&lwp_pool, l2);
359
360 return (0);
361 } else if (l2->l_stat == LSSLEEP ||
362 l2->l_stat == LSSUSPENDED) {
363 /* Deadlock checks.
364 * 1. If all other LWPs are waiting for exits
365 * or suspended, we would deadlock.
366 */
367
368 LIST_FOREACH(l3, &p->p_lwps, l_sibling) {
369 if (l3 != l && (l3->l_stat != LSSUSPENDED) &&
370 !(l3->l_stat == LSSLEEP &&
371 l3->l_wchan == (caddr_t) &p->p_nlwps))
372 break;
373 }
374 if (l3 == NULL) /* Everyone else is waiting. */
375 return (EDEADLK);
376
377 /* XXX we'd like to check for a cycle of waiting
378 * LWPs (specific LID waits, not any-LWP waits)
379 * and detect that sort of deadlock, but we don't
380 * have a good place to store the lwp that is
381 * being waited for. wchan is already filled with
382 * &p->p_nlwps, and putting the lwp address in
383 * there for deadlock tracing would require
384 * exiting LWPs to call wakeup on both their
385 * own address and &p->p_nlwps, to get threads
386 * sleeping on any LWP exiting.
387 *
388 * Revisit later. Maybe another auxillary
389 * storage location associated with sleeping
390 * is in order.
391 */
392 }
393 }
394
395 if (nfound == 0)
396 return (ESRCH);
397
398 if ((error = tsleep((caddr_t) &p->p_nlwps, wpri,
399 (lid != 0) ? waitstr1 : waitstr2, 0)) != 0)
400 return (error);
401
402 goto loop;
403 }
404
405
406 int
407 newlwp(struct lwp *l1, struct proc *p2, vaddr_t uaddr,
408 int flags, void *stack, size_t stacksize,
409 void (*func)(void *), void *arg, struct lwp **rnewlwpp)
410 {
411 struct lwp *l2;
412 int s;
413
414 l2 = pool_get(&lwp_pool, PR_WAITOK);
415
416 l2->l_stat = LSIDL;
417 l2->l_forw = l2->l_back = NULL;
418 l2->l_proc = p2;
419
420
421 memset(&l2->l_startzero, 0,
422 (unsigned) ((caddr_t)&l2->l_endzero -
423 (caddr_t)&l2->l_startzero));
424 memcpy(&l2->l_startcopy, &l1->l_startcopy,
425 (unsigned) ((caddr_t)&l2->l_endcopy -
426 (caddr_t)&l2->l_startcopy));
427
428 #if !defined(MULTIPROCESSOR)
429 /*
430 * In the single-processor case, all processes will always run
431 * on the same CPU. So, initialize the child's CPU to the parent's
432 * now. In the multiprocessor case, the child's CPU will be
433 * initialized in the low-level context switch code when the
434 * process runs.
435 */
436 l2->l_cpu = l1->l_cpu;
437 #else
438 /*
439 * zero child's cpu pointer so we don't get trash.
440 */
441 l2->l_cpu = NULL;
442 #endif /* ! MULTIPROCESSOR */
443
444 l2->l_flag = L_INMEM;
445 l2->l_flag |= (flags & LWP_DETACHED) ? L_DETACHED : 0;
446
447 callout_init(&l2->l_tsleep_ch);
448
449 if (rnewlwpp != NULL)
450 *rnewlwpp = l2;
451
452 l2->l_addr = (struct user *)uaddr;
453 uvm_lwp_fork(l1, l2, stack, stacksize, func,
454 (arg != NULL) ? arg : l2);
455
456
457 simple_lock(&p2->p_lwplock);
458 l2->l_lid = ++p2->p_nlwpid;
459 LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
460 p2->p_nlwps++;
461 simple_unlock(&p2->p_lwplock);
462
463 /* XXX should be locked differently... */
464 s = proclist_lock_write();
465 LIST_INSERT_HEAD(&alllwp, l2, l_list);
466 proclist_unlock_write(s);
467
468 return (0);
469 }
470
471
472 /*
473 * Quit the process. This will call cpu_exit, which will call cpu_switch,
474 * so this can only be used meaningfully if you're willing to switch away.
475 * Calling with l!=curlwp would be weird.
476 */
477 void
478 lwp_exit(struct lwp *l)
479 {
480 struct proc *p = l->l_proc;
481 int s;
482
483 DPRINTF(("lwp_exit: %d.%d exiting.\n", p->p_pid, l->l_lid));
484 DPRINTF((" nlwps: %d nrlwps %d nzlwps: %d\n",
485 p->p_nlwps, p->p_nrlwps, p->p_nzlwps));
486 /*
487 * If we are the last live LWP in a process, we need to exit
488 * the entire process (if that's not already going on). We do
489 * so with an exit status of zero, because it's a "controlled"
490 * exit, and because that's what Solaris does.
491 */
492 if (((p->p_nlwps - p->p_nzlwps) == 1) && ((p->p_flag & P_WEXIT) == 0)) {
493 DPRINTF(("lwp_exit: %d.%d calling exit1()\n",
494 p->p_pid, l->l_lid));
495 exit1(l, 0);
496 }
497
498 s = proclist_lock_write();
499 LIST_REMOVE(l, l_list);
500 if ((l->l_flag & L_DETACHED) == 0) {
501 DPRINTF(("lwp_exit: %d.%d going on zombie list\n", p->p_pid,
502 l->l_lid));
503 LIST_INSERT_HEAD(&zomblwp, l, l_zlist);
504 }
505 proclist_unlock_write(s);
506
507 simple_lock(&p->p_lwplock);
508 p->p_nrlwps--;
509 simple_unlock(&p->p_lwplock);
510
511 l->l_stat = LSDEAD;
512
513 /* cpu_exit() will not return */
514 cpu_exit(l, 0);
515
516 }
517
518
519 void
520 lwp_exit2(struct lwp *l)
521 {
522
523 simple_lock(&deadproc_slock);
524 LIST_INSERT_HEAD(&deadlwp, l, l_list);
525 simple_unlock(&deadproc_slock);
526
527 wakeup(&deadproc);
528 }
529
530 /*
531 * Pick a LWP to represent the process for those operations which
532 * want information about a "process" that is actually associated
533 * with a LWP.
534 */
535 struct lwp *
536 proc_representative_lwp(p)
537 struct proc *p;
538 {
539 struct lwp *l = NULL;
540
541 /* Trivial case: only one LWP */
542 if (p->p_nrlwps == 1)
543 return (LIST_FIRST(&p->p_lwps));
544
545 switch (p->p_stat) {
546 case SSTOP:
547 /* Pick the first stopped LWP */
548 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
549 if (l->l_stat == LSSTOP)
550 return (l);
551 }
552 /* NOTREACHED */
553 break;
554 case SACTIVE:
555 /* Pick the first live LWP */
556 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
557 if (l->l_stat == LSRUN ||
558 l->l_stat == LSSLEEP ||
559 l->l_stat == LSONPROC ||
560 l->l_stat == LSSUSPENDED)
561 return (l);
562 }
563 break;
564 case SDEAD:
565 case SZOMB:
566 /* Doesn't really matter... */
567 l = LIST_FIRST(&p->p_lwps);
568 break;
569 #ifdef DIAGNOSTIC
570 case SIDL:
571 /* We have more than one LWP and we're in SIDL?
572 * How'd that happen?
573 */
574 panic("Too many LWPs (%d) in SIDL process %d (%s)",
575 p->p_nrlwps, p->p_pid, p->p_comm);
576 default:
577 panic("Process %d (%s) in unknown state %d",
578 p->p_pid, p->p_comm, p->p_stat);
579 #endif
580 }
581
582 panic("proc_representative_lwp: couldn't find a lwp for process"
583 " %d (%s)", p->p_pid, p->p_comm);
584 /* NOTREACHED */
585 return NULL;
586 }
587