Home | History | Annotate | Line # | Download | only in kern
kern_lwp.c revision 1.14
      1 /*	$NetBSD: kern_lwp.c,v 1.14 2003/10/30 23:31:21 cl Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nathan J. Williams.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 #include <sys/cdefs.h>
     40 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.14 2003/10/30 23:31:21 cl Exp $");
     41 
     42 #include "opt_multiprocessor.h"
     43 
     44 #include <sys/param.h>
     45 #include <sys/systm.h>
     46 #include <sys/pool.h>
     47 #include <sys/lock.h>
     48 #include <sys/proc.h>
     49 #include <sys/sa.h>
     50 #include <sys/savar.h>
     51 #include <sys/types.h>
     52 #include <sys/ucontext.h>
     53 #include <sys/resourcevar.h>
     54 #include <sys/mount.h>
     55 #include <sys/syscallargs.h>
     56 
     57 #include <uvm/uvm_extern.h>
     58 
     59 struct lwplist alllwp;
     60 struct lwplist deadlwp;
     61 struct lwplist zomblwp;
     62 
     63 #define LWP_DEBUG
     64 
     65 #ifdef LWP_DEBUG
     66 int lwp_debug = 0;
     67 #define DPRINTF(x) if (lwp_debug) printf x
     68 #else
     69 #define DPRINTF(x)
     70 #endif
     71 /* ARGSUSED */
     72 int
     73 sys__lwp_create(struct lwp *l, void *v, register_t *retval)
     74 {
     75 	struct sys__lwp_create_args /* {
     76 		syscallarg(const ucontext_t *) ucp;
     77 		syscallarg(u_long) flags;
     78 		syscallarg(lwpid_t *) new_lwp;
     79 	} */ *uap = v;
     80 	struct proc *p = l->l_proc;
     81 	struct lwp *l2;
     82 	vaddr_t uaddr;
     83 	boolean_t inmem;
     84 	ucontext_t *newuc;
     85 	int s, error;
     86 
     87 	newuc = pool_get(&lwp_uc_pool, PR_WAITOK);
     88 
     89 	error = copyin(SCARG(uap, ucp), newuc, sizeof(*newuc));
     90 	if (error)
     91 		return (error);
     92 
     93 	/* XXX check against resource limits */
     94 
     95 	inmem = uvm_uarea_alloc(&uaddr);
     96 	if (__predict_false(uaddr == 0)) {
     97 		return (ENOMEM);
     98 	}
     99 
    100 	/* XXX flags:
    101 	 * __LWP_ASLWP is probably needed for Solaris compat.
    102 	 */
    103 
    104 	newlwp(l, p, uaddr, inmem,
    105 	    SCARG(uap, flags) & LWP_DETACHED,
    106 	    NULL, 0, startlwp, newuc, &l2);
    107 
    108 	if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0) {
    109 		SCHED_LOCK(s);
    110 		l2->l_stat = LSRUN;
    111 		setrunqueue(l2);
    112 		SCHED_UNLOCK(s);
    113 		simple_lock(&p->p_lwplock);
    114 		p->p_nrlwps++;
    115 		simple_unlock(&p->p_lwplock);
    116 	} else {
    117 		l2->l_stat = LSSUSPENDED;
    118 	}
    119 
    120 	error = copyout(&l2->l_lid, SCARG(uap, new_lwp),
    121 	    sizeof(l2->l_lid));
    122 	if (error)
    123 		return (error);
    124 
    125 	return (0);
    126 }
    127 
    128 
    129 int
    130 sys__lwp_exit(struct lwp *l, void *v, register_t *retval)
    131 {
    132 
    133 	lwp_exit(l);
    134 	/* NOTREACHED */
    135 	return (0);
    136 }
    137 
    138 
    139 int
    140 sys__lwp_self(struct lwp *l, void *v, register_t *retval)
    141 {
    142 
    143 	*retval = l->l_lid;
    144 
    145 	return (0);
    146 }
    147 
    148 
    149 int
    150 sys__lwp_getprivate(struct lwp *l, void *v, register_t *retval)
    151 {
    152 
    153 	*retval = (uintptr_t) l->l_private;
    154 
    155 	return (0);
    156 }
    157 
    158 
    159 int
    160 sys__lwp_setprivate(struct lwp *l, void *v, register_t *retval)
    161 {
    162 	struct sys__lwp_setprivate_args /* {
    163 		syscallarg(void *) ptr;
    164 	} */ *uap = v;
    165 
    166 	l->l_private = SCARG(uap, ptr);
    167 
    168 	return (0);
    169 }
    170 
    171 
    172 int
    173 sys__lwp_suspend(struct lwp *l, void *v, register_t *retval)
    174 {
    175 	struct sys__lwp_suspend_args /* {
    176 		syscallarg(lwpid_t) target;
    177 	} */ *uap = v;
    178 	int target_lid;
    179 	struct proc *p = l->l_proc;
    180 	struct lwp *t, *t2;
    181 	int s;
    182 
    183 	target_lid = SCARG(uap, target);
    184 
    185 	LIST_FOREACH(t, &p->p_lwps, l_sibling)
    186 		if (t->l_lid == target_lid)
    187 			break;
    188 
    189 	if (t == NULL)
    190 		return (ESRCH);
    191 
    192 	if (t == l) {
    193 		/*
    194 		 * Check for deadlock, which is only possible
    195 		 * when we're suspending ourself.
    196 		 */
    197 		LIST_FOREACH(t2, &p->p_lwps, l_sibling) {
    198 			if ((t2 != l) && (t2->l_stat != LSSUSPENDED))
    199 				break;
    200 		}
    201 
    202 		if (t2 == NULL) /* All other LWPs are suspended */
    203 			return (EDEADLK);
    204 
    205 		SCHED_LOCK(s);
    206 		l->l_stat = LSSUSPENDED;
    207 		/* XXX NJWLWP check if this makes sense here: */
    208 		l->l_proc->p_stats->p_ru.ru_nvcsw++;
    209 		mi_switch(l, NULL);
    210 		SCHED_ASSERT_UNLOCKED();
    211 		splx(s);
    212 	} else {
    213 		switch (t->l_stat) {
    214 		case LSSUSPENDED:
    215 			return (0); /* _lwp_suspend() is idempotent */
    216 		case LSRUN:
    217 			SCHED_LOCK(s);
    218 			remrunqueue(t);
    219 			t->l_stat = LSSUSPENDED;
    220 			SCHED_UNLOCK(s);
    221 			simple_lock(&p->p_lwplock);
    222 			p->p_nrlwps--;
    223 			simple_unlock(&p->p_lwplock);
    224 			break;
    225 		case LSSLEEP:
    226 			t->l_stat = LSSUSPENDED;
    227 			break;
    228 		case LSIDL:
    229 		case LSDEAD:
    230 		case LSZOMB:
    231 			return (EINTR); /* It's what Solaris does..... */
    232 		case LSSTOP:
    233 			panic("_lwp_suspend: Stopped LWP in running process!");
    234 			break;
    235 		case LSONPROC:
    236 			panic("XXX multiprocessor LWPs? Implement me!");
    237 			break;
    238 		}
    239 	}
    240 
    241 	return (0);
    242 }
    243 
    244 
    245 int
    246 sys__lwp_continue(struct lwp *l, void *v, register_t *retval)
    247 {
    248 	struct sys__lwp_continue_args /* {
    249 		syscallarg(lwpid_t) target;
    250 	} */ *uap = v;
    251 	int s, target_lid;
    252 	struct proc *p = l->l_proc;
    253 	struct lwp *t;
    254 
    255 	target_lid = SCARG(uap, target);
    256 
    257 	LIST_FOREACH(t, &p->p_lwps, l_sibling)
    258 		if (t->l_lid == target_lid)
    259 			break;
    260 
    261 	if (t == NULL)
    262 		return (ESRCH);
    263 
    264 	SCHED_LOCK(s);
    265 	lwp_continue(t);
    266 	SCHED_UNLOCK(s);
    267 
    268 	return (0);
    269 }
    270 
    271 void
    272 lwp_continue(struct lwp *l)
    273 {
    274 
    275 	DPRINTF(("lwp_continue of %d.%d (%s), state %d, wchan %p\n",
    276 	    l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm, l->l_stat,
    277 	    l->l_wchan));
    278 
    279 	if (l->l_stat != LSSUSPENDED)
    280 		return;
    281 
    282 	if (l->l_wchan == 0) {
    283 		/* LWP was runnable before being suspended. */
    284 		setrunnable(l);
    285 	} else {
    286 		/* LWP was sleeping before being suspended. */
    287 		l->l_stat = LSSLEEP;
    288 	}
    289 }
    290 
    291 int
    292 sys__lwp_wakeup(struct lwp *l, void *v, register_t *retval)
    293 {
    294 	struct sys__lwp_wakeup_args /* {
    295 		syscallarg(lwpid_t) wakeup;
    296 	} */ *uap = v;
    297 	lwpid_t target_lid;
    298 	struct lwp *t;
    299 	struct proc *p;
    300 	int error;
    301 	int s;
    302 
    303 	p = l->l_proc;
    304 	target_lid = SCARG(uap, target);
    305 
    306 	SCHED_LOCK(s);
    307 
    308 
    309 	LIST_FOREACH(t, &p->p_lwps, l_sibling)
    310 		if (t->l_lid == target_lid)
    311 			break;
    312 
    313 	if (t == NULL) {
    314 		error = ESRCH;
    315 		goto exit;
    316 	}
    317 
    318 	if (t->l_stat != LSSLEEP) {
    319 		error = ENODEV;
    320 		goto exit;
    321 	}
    322 
    323 	if ((t->l_flag & L_SINTR) == 0) {
    324 		error = EBUSY;
    325 		goto exit;
    326 	}
    327 	/*
    328 	 * Tell ltsleep to wakeup.
    329 	 */
    330 	t->l_flag |= L_CANCELLED;
    331 
    332 	setrunnable(t);
    333 	error = 0;
    334 exit:
    335 	SCHED_UNLOCK(s);
    336 
    337 
    338 	return error;
    339 }
    340 
    341 int
    342 sys__lwp_wait(struct lwp *l, void *v, register_t *retval)
    343 {
    344 	struct sys__lwp_wait_args /* {
    345 		syscallarg(lwpid_t) wait_for;
    346 		syscallarg(lwpid_t *) departed;
    347 	} */ *uap = v;
    348 	int error;
    349 	lwpid_t dep;
    350 
    351 	error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0);
    352 	if (error)
    353 		return (error);
    354 
    355 	if (SCARG(uap, departed)) {
    356 		error = copyout(&dep, SCARG(uap, departed),
    357 		    sizeof(dep));
    358 		if (error)
    359 			return (error);
    360 	}
    361 
    362 	return (0);
    363 }
    364 
    365 
    366 int
    367 lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
    368 {
    369 
    370 	struct proc *p = l->l_proc;
    371 	struct lwp *l2, *l3;
    372 	int nfound, error, s, wpri;
    373 	static char waitstr1[] = "lwpwait";
    374 	static char waitstr2[] = "lwpwait2";
    375 
    376 	DPRINTF(("lwp_wait1: %d.%d waiting for %d.\n",
    377 	    p->p_pid, l->l_lid, lid));
    378 
    379 	if (lid == l->l_lid)
    380 		return (EDEADLK); /* Waiting for ourselves makes no sense. */
    381 
    382 	wpri = PWAIT |
    383 	    ((flags & LWPWAIT_EXITCONTROL) ? PNOEXITERR : PCATCH);
    384  loop:
    385 	nfound = 0;
    386 	LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
    387 		if ((l2 == l) || (l2->l_flag & L_DETACHED) ||
    388 		    ((lid != 0) && (lid != l2->l_lid)))
    389 			continue;
    390 
    391 		nfound++;
    392 		if (l2->l_stat == LSZOMB) {
    393 			if (departed)
    394 				*departed = l2->l_lid;
    395 
    396 			s = proclist_lock_write();
    397 			LIST_REMOVE(l2, l_zlist); /* off zomblwp */
    398 			proclist_unlock_write(s);
    399 
    400 			simple_lock(&p->p_lwplock);
    401 			LIST_REMOVE(l2, l_sibling);
    402 			p->p_nlwps--;
    403 			p->p_nzlwps--;
    404 			simple_unlock(&p->p_lwplock);
    405 			/* XXX decrement limits */
    406 
    407 			pool_put(&lwp_pool, l2);
    408 
    409 			return (0);
    410 		} else if (l2->l_stat == LSSLEEP ||
    411 		           l2->l_stat == LSSUSPENDED) {
    412 			/* Deadlock checks.
    413 			 * 1. If all other LWPs are waiting for exits
    414 			 *    or suspended, we would deadlock.
    415 			 */
    416 
    417 			LIST_FOREACH(l3, &p->p_lwps, l_sibling) {
    418 				if (l3 != l && (l3->l_stat != LSSUSPENDED) &&
    419 				    !(l3->l_stat == LSSLEEP &&
    420 					l3->l_wchan == (caddr_t) &p->p_nlwps))
    421 					break;
    422 			}
    423 			if (l3 == NULL) /* Everyone else is waiting. */
    424 				return (EDEADLK);
    425 
    426 			/* XXX we'd like to check for a cycle of waiting
    427 			 * LWPs (specific LID waits, not any-LWP waits)
    428 			 * and detect that sort of deadlock, but we don't
    429 			 * have a good place to store the lwp that is
    430 			 * being waited for. wchan is already filled with
    431 			 * &p->p_nlwps, and putting the lwp address in
    432 			 * there for deadlock tracing would require
    433 			 * exiting LWPs to call wakeup on both their
    434 			 * own address and &p->p_nlwps, to get threads
    435 			 * sleeping on any LWP exiting.
    436 			 *
    437 			 * Revisit later. Maybe another auxillary
    438 			 * storage location associated with sleeping
    439 			 * is in order.
    440 			 */
    441 		}
    442 	}
    443 
    444 	if (nfound == 0)
    445 		return (ESRCH);
    446 
    447 	if ((error = tsleep((caddr_t) &p->p_nlwps, wpri,
    448 	    (lid != 0) ? waitstr1 : waitstr2, 0)) != 0)
    449 		return (error);
    450 
    451 	goto loop;
    452 }
    453 
    454 
    455 int
    456 newlwp(struct lwp *l1, struct proc *p2, vaddr_t uaddr, boolean_t inmem,
    457     int flags, void *stack, size_t stacksize,
    458     void (*func)(void *), void *arg, struct lwp **rnewlwpp)
    459 {
    460 	struct lwp *l2;
    461 	int s;
    462 
    463 	l2 = pool_get(&lwp_pool, PR_WAITOK);
    464 
    465 	l2->l_stat = LSIDL;
    466 	l2->l_forw = l2->l_back = NULL;
    467 	l2->l_proc = p2;
    468 
    469 
    470 	memset(&l2->l_startzero, 0,
    471 	       (unsigned) ((caddr_t)&l2->l_endzero -
    472 			   (caddr_t)&l2->l_startzero));
    473 	memcpy(&l2->l_startcopy, &l1->l_startcopy,
    474 	       (unsigned) ((caddr_t)&l2->l_endcopy -
    475 			   (caddr_t)&l2->l_startcopy));
    476 
    477 #if !defined(MULTIPROCESSOR)
    478 	/*
    479 	 * In the single-processor case, all processes will always run
    480 	 * on the same CPU.  So, initialize the child's CPU to the parent's
    481 	 * now.  In the multiprocessor case, the child's CPU will be
    482 	 * initialized in the low-level context switch code when the
    483 	 * process runs.
    484 	 */
    485 	KASSERT(l1->l_cpu != NULL);
    486 	l2->l_cpu = l1->l_cpu;
    487 #else
    488 	/*
    489 	 * zero child's cpu pointer so we don't get trash.
    490 	 */
    491 	l2->l_cpu = NULL;
    492 #endif /* ! MULTIPROCESSOR */
    493 
    494 	l2->l_flag = inmem ? L_INMEM : 0;
    495 	l2->l_flag |= (flags & LWP_DETACHED) ? L_DETACHED : 0;
    496 
    497 	callout_init(&l2->l_tsleep_ch);
    498 
    499 	if (rnewlwpp != NULL)
    500 		*rnewlwpp = l2;
    501 
    502 	l2->l_addr = (struct user *)uaddr;
    503 	uvm_lwp_fork(l1, l2, stack, stacksize, func,
    504 	    (arg != NULL) ? arg : l2);
    505 
    506 
    507 	simple_lock(&p2->p_lwplock);
    508 	l2->l_lid = ++p2->p_nlwpid;
    509 	LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
    510 	p2->p_nlwps++;
    511 	simple_unlock(&p2->p_lwplock);
    512 
    513 	/* XXX should be locked differently... */
    514 	s = proclist_lock_write();
    515 	LIST_INSERT_HEAD(&alllwp, l2, l_list);
    516 	proclist_unlock_write(s);
    517 
    518 	return (0);
    519 }
    520 
    521 
    522 /*
    523  * Quit the process. This will call cpu_exit, which will call cpu_switch,
    524  * so this can only be used meaningfully if you're willing to switch away.
    525  * Calling with l!=curlwp would be weird.
    526  */
    527 void
    528 lwp_exit(struct lwp *l)
    529 {
    530 	struct proc *p = l->l_proc;
    531 	int s;
    532 
    533 	DPRINTF(("lwp_exit: %d.%d exiting.\n", p->p_pid, l->l_lid));
    534 	DPRINTF((" nlwps: %d nrlwps %d nzlwps: %d\n",
    535 	    p->p_nlwps, p->p_nrlwps, p->p_nzlwps));
    536 
    537 	/*
    538 	 * If we are the last live LWP in a process, we need to exit
    539 	 * the entire process (if that's not already going on). We do
    540 	 * so with an exit status of zero, because it's a "controlled"
    541 	 * exit, and because that's what Solaris does.
    542 	 */
    543 	if (((p->p_nlwps - p->p_nzlwps) == 1) && ((p->p_flag & P_WEXIT) == 0)) {
    544 		DPRINTF(("lwp_exit: %d.%d calling exit1()\n",
    545 		    p->p_pid, l->l_lid));
    546 		exit1(l, 0);
    547 	}
    548 
    549 	s = proclist_lock_write();
    550 	LIST_REMOVE(l, l_list);
    551 	if ((l->l_flag & L_DETACHED) == 0) {
    552 		DPRINTF(("lwp_exit: %d.%d going on zombie list\n", p->p_pid,
    553 		    l->l_lid));
    554 		LIST_INSERT_HEAD(&zomblwp, l, l_zlist);
    555 	}
    556 	proclist_unlock_write(s);
    557 
    558 	simple_lock(&p->p_lwplock);
    559 	p->p_nrlwps--;
    560 	simple_unlock(&p->p_lwplock);
    561 
    562 	l->l_stat = LSDEAD;
    563 
    564 	/* This LWP no longer needs to hold the kernel lock. */
    565 	KERNEL_PROC_UNLOCK(l);
    566 
    567 	/* cpu_exit() will not return */
    568 	cpu_exit(l, 0);
    569 
    570 }
    571 
    572 
    573 void
    574 lwp_exit2(struct lwp *l)
    575 {
    576 
    577 	simple_lock(&deadproc_slock);
    578 	LIST_INSERT_HEAD(&deadlwp, l, l_list);
    579 	simple_unlock(&deadproc_slock);
    580 
    581 	wakeup(&deadprocs);
    582 }
    583 
    584 /*
    585  * Pick a LWP to represent the process for those operations which
    586  * want information about a "process" that is actually associated
    587  * with a LWP.
    588  */
    589 struct lwp *
    590 proc_representative_lwp(p)
    591 	struct proc *p;
    592 {
    593 	struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
    594 
    595 	/* Trivial case: only one LWP */
    596 	if (p->p_nlwps == 1)
    597 		return (LIST_FIRST(&p->p_lwps));
    598 
    599 	switch (p->p_stat) {
    600 	case SSTOP:
    601 	case SACTIVE:
    602 		/* Pick the most live LWP */
    603 		onproc = running = sleeping = stopped = suspended = NULL;
    604 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    605 			switch (l->l_stat) {
    606 			case LSONPROC:
    607 				onproc = l;
    608 				break;
    609 			case LSRUN:
    610 				running = l;
    611 				break;
    612 			case LSSLEEP:
    613 				sleeping = l;
    614 				break;
    615 			case LSSTOP:
    616 				stopped = l;
    617 				break;
    618 			case LSSUSPENDED:
    619 				suspended = l;
    620 				break;
    621 			}
    622 		}
    623 		if (onproc)
    624 			return onproc;
    625 		if (running)
    626 			return running;
    627 		if (sleeping)
    628 			return sleeping;
    629 		if (stopped)
    630 			return stopped;
    631 		if (suspended)
    632 			return suspended;
    633 		break;
    634 	case SDEAD:
    635 	case SZOMB:
    636 		/* Doesn't really matter... */
    637 		return (LIST_FIRST(&p->p_lwps));
    638 #ifdef DIAGNOSTIC
    639 	case SIDL:
    640 		/* We have more than one LWP and we're in SIDL?
    641 		 * How'd that happen?
    642 		 */
    643 		panic("Too many LWPs (%d) in SIDL process %d (%s)",
    644 		    p->p_nrlwps, p->p_pid, p->p_comm);
    645 	default:
    646 		panic("Process %d (%s) in unknown state %d",
    647 		    p->p_pid, p->p_comm, p->p_stat);
    648 #endif
    649 	}
    650 
    651 	panic("proc_representative_lwp: couldn't find a lwp for process"
    652 		" %d (%s)", p->p_pid, p->p_comm);
    653 	/* NOTREACHED */
    654 	return NULL;
    655 }
    656