kern_lwp.c revision 1.1.2.21 1 /* $NetBSD: kern_lwp.c,v 1.1.2.21 2002/12/15 22:18:55 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/pool.h>
42 #include <sys/lock.h>
43 #include <sys/proc.h>
44 #include <sys/sa.h>
45 #include <sys/savar.h>
46 #include <sys/types.h>
47 #include <sys/ucontext.h>
48 #include <sys/resourcevar.h>
49 #include <sys/mount.h>
50 #include <sys/syscallargs.h>
51
52 #include <uvm/uvm_extern.h>
53
54 struct lwplist alllwp;
55 struct lwplist deadlwp;
56 struct lwplist zomblwp;
57
58 #define LWP_DEBUG
59
60 #ifdef LWP_DEBUG
61 int lwp_debug = 0;
62 #define DPRINTF(x) if (lwp_debug) printf x
63 #else
64 #define DPRINTF(x)
65 #endif
66 /* ARGSUSED */
67 int
68 sys__lwp_create(struct lwp *l, void *v, register_t *retval)
69 {
70 struct sys__lwp_create_args /* {
71 syscallarg(const ucontext_t *) ucp;
72 syscallarg(u_long) flags;
73 syscallarg(lwpid_t *) new_lwp;
74 } */ *uap = v;
75 struct proc *p = l->l_proc;
76 struct lwp *l2;
77 vaddr_t uaddr;
78 boolean_t inmem;
79 ucontext_t *newuc;
80 int s, error;
81
82 newuc = pool_get(&lwp_uc_pool, PR_WAITOK);
83
84 error = copyin(SCARG(uap, ucp), newuc, sizeof(*newuc));
85 if (error)
86 return (error);
87
88 /* XXX check against resource limits */
89
90 inmem = uvm_uarea_alloc(&uaddr);
91 if (__predict_false(uaddr == 0)) {
92 return (ENOMEM);
93 }
94
95 /* XXX flags:
96 * __LWP_ASLWP is probably needed for Solaris compat.
97 */
98
99 newlwp(l, p, uaddr,
100 SCARG(uap, flags) & LWP_DETACHED,
101 NULL, NULL, startlwp, newuc, &l2);
102 if (inmem)
103 l2->l_flag |= L_INMEM;
104
105 if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0) {
106 SCHED_LOCK(s);
107 l2->l_stat = LSRUN;
108 setrunqueue(l2);
109 SCHED_UNLOCK(s);
110 simple_lock(&p->p_lwplock);
111 p->p_nrlwps++;
112 simple_unlock(&p->p_lwplock);
113 } else {
114 l2->l_stat = LSSUSPENDED;
115 }
116
117 error = copyout(&l2->l_lid, SCARG(uap, new_lwp),
118 sizeof(l2->l_lid));
119 if (error)
120 return (error);
121
122 return (0);
123 }
124
125
126 int
127 sys__lwp_exit(struct lwp *l, void *v, register_t *retval)
128 {
129
130 lwp_exit(l);
131 /* NOTREACHED */
132 return (0);
133 }
134
135
136 int
137 sys__lwp_self(struct lwp *l, void *v, register_t *retval)
138 {
139
140 *retval = l->l_lid;
141
142 return (0);
143 }
144
145
146 int
147 sys__lwp_suspend(struct lwp *l, void *v, register_t *retval)
148 {
149 struct sys__lwp_suspend_args /* {
150 syscallarg(lwpid_t) target;
151 } */ *uap = v;
152 int target_lid;
153 struct proc *p = l->l_proc;
154 struct lwp *t, *t2;
155 int s;
156
157 target_lid = SCARG(uap, target);
158
159 LIST_FOREACH(t, &p->p_lwps, l_sibling)
160 if (t->l_lid == target_lid)
161 break;
162
163 if (t == NULL)
164 return (ESRCH);
165
166 if (t == l) {
167 /*
168 * Check for deadlock, which is only possible
169 * when we're suspending ourself.
170 */
171 LIST_FOREACH(t2, &p->p_lwps, l_sibling) {
172 if ((t2 != l) && (t2->l_stat != LSSUSPENDED))
173 break;
174 }
175
176 if (t2 == NULL) /* All other LWPs are suspended */
177 return (EDEADLK);
178
179 SCHED_LOCK(s);
180 l->l_stat = LSSUSPENDED;
181 /* XXX NJWLWP check if this makes sense here: */
182 l->l_proc->p_stats->p_ru.ru_nvcsw++;
183 mi_switch(l, NULL);
184 SCHED_ASSERT_UNLOCKED();
185 splx(s);
186 } else {
187 switch (t->l_stat) {
188 case LSSUSPENDED:
189 return (0); /* _lwp_suspend() is idempotent */
190 case LSRUN:
191 SCHED_LOCK(s);
192 remrunqueue(t);
193 t->l_stat = LSSUSPENDED;
194 SCHED_UNLOCK(s);
195 simple_lock(&p->p_lwplock);
196 p->p_nrlwps--;
197 simple_unlock(&p->p_lwplock);
198 break;
199 case LSSLEEP:
200 t->l_stat = LSSUSPENDED;
201 break;
202 case LSIDL:
203 case LSDEAD:
204 case LSZOMB:
205 return (EINTR); /* It's what Solaris does..... */
206 case LSSTOP:
207 panic("_lwp_suspend: Stopped LWP in running process!");
208 break;
209 case LSONPROC:
210 panic("XXX multiprocessor LWPs? Implement me!");
211 break;
212 }
213 }
214
215 return (0);
216 }
217
218
219 int
220 sys__lwp_continue(struct lwp *l, void *v, register_t *retval)
221 {
222 struct sys__lwp_continue_args /* {
223 syscallarg(lwpid_t) target;
224 } */ *uap = v;
225 int target_lid;
226 struct proc *p = l->l_proc;
227 struct lwp *t;
228
229 target_lid = SCARG(uap, target);
230
231 LIST_FOREACH(t, &p->p_lwps, l_sibling)
232 if (t->l_lid == target_lid)
233 break;
234
235 if (t == NULL)
236 return (ESRCH);
237
238 lwp_continue(t);
239
240 return (0);
241 }
242
243 void
244 lwp_continue(struct lwp *l)
245 {
246 int s;
247
248 DPRINTF(("lwp_continue of %d.%d (%s), state %d, wchan %p\n",
249 l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm, l->l_stat,
250 l->l_wchan));
251
252 if (l->l_stat != LSSUSPENDED)
253 return;
254
255 if (l->l_wchan == 0) {
256 /* LWP was runnable before being suspended. */
257 SCHED_LOCK(s);
258 setrunnable(l);
259 SCHED_UNLOCK(s);
260 } else {
261 /* LWP was sleeping before being suspended. */
262 l->l_stat = LSSLEEP;
263 }
264 }
265
266 int sys__lwp_wakeup(struct lwp *l, void *v, register_t *retval)
267 {
268 struct sys__lwp_wakeup_args /* {
269 syscallarg(lwpid_t) wakeup;
270 } */ *uap = v;
271 lwpid_t target_lid;
272 struct lwp *t;
273 struct proc *p;
274
275 p = l->l_proc;
276 target_lid = SCARG(uap, target);
277
278 LIST_FOREACH(t, &p->p_lwps, l_sibling)
279 if (t->l_lid == target_lid)
280 break;
281
282 if (t == NULL)
283 return (ESRCH);
284
285 if (t->l_stat != LSSLEEP)
286 return (ENODEV);
287
288 if ((l->l_flag & L_SINTR) == 0)
289 return (EBUSY);
290
291 setrunnable(l);
292
293 return 0;
294 }
295
296 int
297 sys__lwp_wait(struct lwp *l, void *v, register_t *retval)
298 {
299 struct sys__lwp_wait_args /* {
300 syscallarg(lwpid_t) wait_for;
301 syscallarg(lwpid_t *) departed;
302 } */ *uap = v;
303 int error;
304 lwpid_t dep;
305
306 error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0);
307 if (error)
308 return (error);
309
310 if (SCARG(uap, departed)) {
311 error = copyout(&dep, SCARG(uap, departed),
312 sizeof(dep));
313 if (error)
314 return (error);
315 }
316
317 return (0);
318 }
319
320
321 int
322 lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
323 {
324
325 struct proc *p = l->l_proc;
326 struct lwp *l2, *l3;
327 int nfound, error, s, wpri;
328 static char waitstr1[] = "lwpwait";
329 static char waitstr2[] = "lwpwait2";
330
331 DPRINTF(("lwp_wait1: %d.%d waiting for %d.\n",
332 p->p_pid, l->l_lid, lid));
333
334 if (lid == l->l_lid)
335 return (EDEADLK); /* Waiting for ourselves makes no sense. */
336
337 wpri = PWAIT |
338 ((flags & LWPWAIT_EXITCONTROL) ? PNOEXITERR : PCATCH);
339 loop:
340 nfound = 0;
341 LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
342 if ((l2 == l) || (l2->l_flag & L_DETACHED) ||
343 ((lid != 0) && (lid != l2->l_lid)))
344 continue;
345
346 nfound++;
347 if (l2->l_stat == LSZOMB) {
348 if (departed)
349 *departed = l2->l_lid;
350
351 s = proclist_lock_write();
352 LIST_REMOVE(l2, l_zlist); /* off zomblwp */
353 proclist_unlock_write(s);
354
355 simple_lock(&p->p_lwplock);
356 LIST_REMOVE(l2, l_sibling);
357 p->p_nlwps--;
358 p->p_nzlwps--;
359 simple_unlock(&p->p_lwplock);
360 /* XXX decrement limits */
361
362 pool_put(&lwp_pool, l2);
363
364 return (0);
365 } else if (l2->l_stat == LSSLEEP ||
366 l2->l_stat == LSSUSPENDED) {
367 /* Deadlock checks.
368 * 1. If all other LWPs are waiting for exits
369 * or suspended, we would deadlock.
370 */
371
372 LIST_FOREACH(l3, &p->p_lwps, l_sibling) {
373 if (l3 != l && (l3->l_stat != LSSUSPENDED) &&
374 !(l3->l_stat == LSSLEEP &&
375 l3->l_wchan == (caddr_t) &p->p_nlwps))
376 break;
377 }
378 if (l3 == NULL) /* Everyone else is waiting. */
379 return (EDEADLK);
380
381 /* XXX we'd like to check for a cycle of waiting
382 * LWPs (specific LID waits, not any-LWP waits)
383 * and detect that sort of deadlock, but we don't
384 * have a good place to store the lwp that is
385 * being waited for. wchan is already filled with
386 * &p->p_nlwps, and putting the lwp address in
387 * there for deadlock tracing would require
388 * exiting LWPs to call wakeup on both their
389 * own address and &p->p_nlwps, to get threads
390 * sleeping on any LWP exiting.
391 *
392 * Revisit later. Maybe another auxillary
393 * storage location associated with sleeping
394 * is in order.
395 */
396 }
397 }
398
399 if (nfound == 0)
400 return (ESRCH);
401
402 if ((error = tsleep((caddr_t) &p->p_nlwps, wpri,
403 (lid != 0) ? waitstr1 : waitstr2, 0)) != 0)
404 return (error);
405
406 goto loop;
407 }
408
409
410 int
411 newlwp(struct lwp *l1, struct proc *p2, vaddr_t uaddr,
412 int flags, void *stack, size_t stacksize,
413 void (*func)(void *), void *arg, struct lwp **rnewlwpp)
414 {
415 struct lwp *l2;
416 int s;
417
418 l2 = pool_get(&lwp_pool, PR_WAITOK);
419
420 l2->l_stat = LSIDL;
421 l2->l_forw = l2->l_back = NULL;
422 l2->l_proc = p2;
423
424
425 memset(&l2->l_startzero, 0,
426 (unsigned) ((caddr_t)&l2->l_endzero -
427 (caddr_t)&l2->l_startzero));
428 memcpy(&l2->l_startcopy, &l1->l_startcopy,
429 (unsigned) ((caddr_t)&l2->l_endcopy -
430 (caddr_t)&l2->l_startcopy));
431
432 #if !defined(MULTIPROCESSOR)
433 /*
434 * In the single-processor case, all processes will always run
435 * on the same CPU. So, initialize the child's CPU to the parent's
436 * now. In the multiprocessor case, the child's CPU will be
437 * initialized in the low-level context switch code when the
438 * process runs.
439 */
440 l2->l_cpu = l1->l_cpu;
441 #else
442 /*
443 * zero child's cpu pointer so we don't get trash.
444 */
445 l2->l_cpu = NULL;
446 #endif /* ! MULTIPROCESSOR */
447
448 l2->l_flag = L_INMEM;
449 l2->l_flag |= (flags & LWP_DETACHED) ? L_DETACHED : 0;
450
451 callout_init(&l2->l_tsleep_ch);
452
453 if (rnewlwpp != NULL)
454 *rnewlwpp = l2;
455
456 l2->l_addr = (struct user *)uaddr;
457 uvm_lwp_fork(l1, l2, stack, stacksize, func,
458 (arg != NULL) ? arg : l2);
459
460
461 simple_lock(&p2->p_lwplock);
462 l2->l_lid = ++p2->p_nlwpid;
463 LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
464 p2->p_nlwps++;
465 simple_unlock(&p2->p_lwplock);
466
467 /* XXX should be locked differently... */
468 s = proclist_lock_write();
469 LIST_INSERT_HEAD(&alllwp, l2, l_list);
470 proclist_unlock_write(s);
471
472 return (0);
473 }
474
475
476 /*
477 * Quit the process. This will call cpu_exit, which will call cpu_switch,
478 * so this can only be used meaningfully if you're willing to switch away.
479 * Calling with l!=curlwp would be weird.
480 */
481 void
482 lwp_exit(struct lwp *l)
483 {
484 struct proc *p = l->l_proc;
485 int s;
486
487 DPRINTF(("lwp_exit: %d.%d exiting.\n", p->p_pid, l->l_lid));
488 DPRINTF((" nlwps: %d nrlwps %d nzlwps: %d\n",
489 p->p_nlwps, p->p_nrlwps, p->p_nzlwps));
490
491 /*
492 * If we are the last live LWP in a process, we need to exit
493 * the entire process (if that's not already going on). We do
494 * so with an exit status of zero, because it's a "controlled"
495 * exit, and because that's what Solaris does.
496 */
497 if (((p->p_nlwps - p->p_nzlwps) == 1) && ((p->p_flag & P_WEXIT) == 0)) {
498 DPRINTF(("lwp_exit: %d.%d calling exit1()\n",
499 p->p_pid, l->l_lid));
500 exit1(l, 0);
501 }
502
503 s = proclist_lock_write();
504 LIST_REMOVE(l, l_list);
505 if ((l->l_flag & L_DETACHED) == 0) {
506 DPRINTF(("lwp_exit: %d.%d going on zombie list\n", p->p_pid,
507 l->l_lid));
508 LIST_INSERT_HEAD(&zomblwp, l, l_zlist);
509 }
510 proclist_unlock_write(s);
511
512 simple_lock(&p->p_lwplock);
513 p->p_nrlwps--;
514 simple_unlock(&p->p_lwplock);
515
516 l->l_stat = LSDEAD;
517
518 /* This LWP no longer needs to hold the kernel lock. */
519 KERNEL_PROC_UNLOCK(l);
520
521 /* cpu_exit() will not return */
522 cpu_exit(l, 0);
523
524 }
525
526
527 void
528 lwp_exit2(struct lwp *l)
529 {
530
531 simple_lock(&deadproc_slock);
532 LIST_INSERT_HEAD(&deadlwp, l, l_list);
533 simple_unlock(&deadproc_slock);
534
535 wakeup(&deadproc);
536 }
537
538 /*
539 * Pick a LWP to represent the process for those operations which
540 * want information about a "process" that is actually associated
541 * with a LWP.
542 */
543 struct lwp *
544 proc_representative_lwp(p)
545 struct proc *p;
546 {
547 struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
548
549 /* Trivial case: only one LWP */
550 if (p->p_nlwps == 1)
551 return (LIST_FIRST(&p->p_lwps));
552
553 switch (p->p_stat) {
554 case SSTOP:
555 case SACTIVE:
556 /* Pick the most live LWP */
557 onproc = running = sleeping = stopped = suspended = NULL;
558 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
559 switch (l->l_stat) {
560 case LSONPROC:
561 onproc = l;
562 break;
563 case LSRUN:
564 running = l;
565 break;
566 case LSSLEEP:
567 sleeping = l;
568 break;
569 case LSSTOP:
570 stopped = l;
571 break;
572 case LSSUSPENDED:
573 suspended = l;
574 break;
575 }
576 if (onproc)
577 return onproc;
578 if (running)
579 return running;
580 if (sleeping)
581 return sleeping;
582 if (stopped)
583 return stopped;
584 if (suspended)
585 return suspended;
586 }
587 break;
588 case SDEAD:
589 case SZOMB:
590 /* Doesn't really matter... */
591 return (LIST_FIRST(&p->p_lwps));
592 break;
593 #ifdef DIAGNOSTIC
594 case SIDL:
595 /* We have more than one LWP and we're in SIDL?
596 * How'd that happen?
597 */
598 panic("Too many LWPs (%d) in SIDL process %d (%s)",
599 p->p_nrlwps, p->p_pid, p->p_comm);
600 default:
601 panic("Process %d (%s) in unknown state %d",
602 p->p_pid, p->p_comm, p->p_stat);
603 #endif
604 }
605
606 panic("proc_representative_lwp: couldn't find a lwp for process"
607 " %d (%s)", p->p_pid, p->p_comm);
608 /* NOTREACHED */
609 return NULL;
610 }
611