Home | History | Annotate | Line # | Download | only in kern
kern_lwp.c revision 1.1.2.23
      1 /*	$NetBSD: kern_lwp.c,v 1.1.2.23 2003/01/17 02:59:29 thorpej Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nathan J. Williams.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 #include <sys/param.h>
     40 #include <sys/systm.h>
     41 #include <sys/pool.h>
     42 #include <sys/lock.h>
     43 #include <sys/proc.h>
     44 #include <sys/sa.h>
     45 #include <sys/savar.h>
     46 #include <sys/types.h>
     47 #include <sys/ucontext.h>
     48 #include <sys/resourcevar.h>
     49 #include <sys/mount.h>
     50 #include <sys/syscallargs.h>
     51 
     52 #include <uvm/uvm_extern.h>
     53 
     54 struct lwplist alllwp;
     55 struct lwplist deadlwp;
     56 struct lwplist zomblwp;
     57 
     58 #define LWP_DEBUG
     59 
     60 #ifdef LWP_DEBUG
     61 int lwp_debug = 0;
     62 #define DPRINTF(x) if (lwp_debug) printf x
     63 #else
     64 #define DPRINTF(x)
     65 #endif
     66 /* ARGSUSED */
     67 int
     68 sys__lwp_create(struct lwp *l, void *v, register_t *retval)
     69 {
     70 	struct sys__lwp_create_args /* {
     71 		syscallarg(const ucontext_t *) ucp;
     72 		syscallarg(u_long) flags;
     73 		syscallarg(lwpid_t *) new_lwp;
     74 	} */ *uap = v;
     75 	struct proc *p = l->l_proc;
     76 	struct lwp *l2;
     77 	vaddr_t uaddr;
     78 	boolean_t inmem;
     79 	ucontext_t *newuc;
     80 	int s, error;
     81 
     82 	newuc = pool_get(&lwp_uc_pool, PR_WAITOK);
     83 
     84 	error = copyin(SCARG(uap, ucp), newuc, sizeof(*newuc));
     85 	if (error)
     86 		return (error);
     87 
     88 	/* XXX check against resource limits */
     89 
     90 	inmem = uvm_uarea_alloc(&uaddr);
     91 	if (__predict_false(uaddr == 0)) {
     92 		return (ENOMEM);
     93 	}
     94 
     95 	/* XXX flags:
     96 	 * __LWP_ASLWP is probably needed for Solaris compat.
     97 	 */
     98 
     99 	newlwp(l, p, uaddr, inmem,
    100 	    SCARG(uap, flags) & LWP_DETACHED,
    101 	    NULL, NULL, startlwp, newuc, &l2);
    102 
    103 	if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0) {
    104 		SCHED_LOCK(s);
    105 		l2->l_stat = LSRUN;
    106 		setrunqueue(l2);
    107 		SCHED_UNLOCK(s);
    108 		simple_lock(&p->p_lwplock);
    109 		p->p_nrlwps++;
    110 		simple_unlock(&p->p_lwplock);
    111 	} else {
    112 		l2->l_stat = LSSUSPENDED;
    113 	}
    114 
    115 	error = copyout(&l2->l_lid, SCARG(uap, new_lwp),
    116 	    sizeof(l2->l_lid));
    117 	if (error)
    118 		return (error);
    119 
    120 	return (0);
    121 }
    122 
    123 
    124 int
    125 sys__lwp_exit(struct lwp *l, void *v, register_t *retval)
    126 {
    127 
    128 	lwp_exit(l);
    129 	/* NOTREACHED */
    130 	return (0);
    131 }
    132 
    133 
    134 int
    135 sys__lwp_self(struct lwp *l, void *v, register_t *retval)
    136 {
    137 
    138 	*retval = l->l_lid;
    139 
    140 	return (0);
    141 }
    142 
    143 
    144 int
    145 sys__lwp_suspend(struct lwp *l, void *v, register_t *retval)
    146 {
    147 	struct sys__lwp_suspend_args /* {
    148 		syscallarg(lwpid_t) target;
    149 	} */ *uap = v;
    150 	int target_lid;
    151 	struct proc *p = l->l_proc;
    152 	struct lwp *t, *t2;
    153 	int s;
    154 
    155 	target_lid = SCARG(uap, target);
    156 
    157 	LIST_FOREACH(t, &p->p_lwps, l_sibling)
    158 		if (t->l_lid == target_lid)
    159 			break;
    160 
    161 	if (t == NULL)
    162 		return (ESRCH);
    163 
    164 	if (t == l) {
    165 		/*
    166 		 * Check for deadlock, which is only possible
    167 		 * when we're suspending ourself.
    168 		 */
    169 		LIST_FOREACH(t2, &p->p_lwps, l_sibling) {
    170 			if ((t2 != l) && (t2->l_stat != LSSUSPENDED))
    171 				break;
    172 		}
    173 
    174 		if (t2 == NULL) /* All other LWPs are suspended */
    175 			return (EDEADLK);
    176 
    177 		SCHED_LOCK(s);
    178 		l->l_stat = LSSUSPENDED;
    179 		/* XXX NJWLWP check if this makes sense here: */
    180 		l->l_proc->p_stats->p_ru.ru_nvcsw++;
    181 		mi_switch(l, NULL);
    182 		SCHED_ASSERT_UNLOCKED();
    183 		splx(s);
    184 	} else {
    185 		switch (t->l_stat) {
    186 		case LSSUSPENDED:
    187 			return (0); /* _lwp_suspend() is idempotent */
    188 		case LSRUN:
    189 			SCHED_LOCK(s);
    190 			remrunqueue(t);
    191 			t->l_stat = LSSUSPENDED;
    192 			SCHED_UNLOCK(s);
    193 			simple_lock(&p->p_lwplock);
    194 			p->p_nrlwps--;
    195 			simple_unlock(&p->p_lwplock);
    196 			break;
    197 		case LSSLEEP:
    198 			t->l_stat = LSSUSPENDED;
    199 			break;
    200 		case LSIDL:
    201 		case LSDEAD:
    202 		case LSZOMB:
    203 			return (EINTR); /* It's what Solaris does..... */
    204 		case LSSTOP:
    205 			panic("_lwp_suspend: Stopped LWP in running process!");
    206 			break;
    207 		case LSONPROC:
    208 			panic("XXX multiprocessor LWPs? Implement me!");
    209 			break;
    210 		}
    211 	}
    212 
    213 	return (0);
    214 }
    215 
    216 
    217 int
    218 sys__lwp_continue(struct lwp *l, void *v, register_t *retval)
    219 {
    220 	struct sys__lwp_continue_args /* {
    221 		syscallarg(lwpid_t) target;
    222 	} */ *uap = v;
    223 	int target_lid;
    224 	struct proc *p = l->l_proc;
    225 	struct lwp *t;
    226 
    227 	target_lid = SCARG(uap, target);
    228 
    229 	LIST_FOREACH(t, &p->p_lwps, l_sibling)
    230 		if (t->l_lid == target_lid)
    231 			break;
    232 
    233 	if (t == NULL)
    234 		return (ESRCH);
    235 
    236 	lwp_continue(t);
    237 
    238 	return (0);
    239 }
    240 
    241 void
    242 lwp_continue(struct lwp *l)
    243 {
    244 	int s;
    245 
    246 	DPRINTF(("lwp_continue of %d.%d (%s), state %d, wchan %p\n",
    247 	    l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm, l->l_stat,
    248 	    l->l_wchan));
    249 
    250 	if (l->l_stat != LSSUSPENDED)
    251 		return;
    252 
    253 	if (l->l_wchan == 0) {
    254 		/* LWP was runnable before being suspended. */
    255 		SCHED_LOCK(s);
    256 		setrunnable(l);
    257 		SCHED_UNLOCK(s);
    258 	} else {
    259 		/* LWP was sleeping before being suspended. */
    260 		l->l_stat = LSSLEEP;
    261 	}
    262 }
    263 
    264 int
    265 sys__lwp_wakeup(struct lwp *l, void *v, register_t *retval)
    266 {
    267 	struct sys__lwp_wakeup_args /* {
    268 		syscallarg(lwpid_t) wakeup;
    269 	} */ *uap = v;
    270 	lwpid_t target_lid;
    271 	struct lwp *t;
    272 	struct proc *p;
    273 
    274 	p = l->l_proc;
    275 	target_lid = SCARG(uap, target);
    276 
    277 	LIST_FOREACH(t, &p->p_lwps, l_sibling)
    278 		if (t->l_lid == target_lid)
    279 			break;
    280 
    281 	if (t == NULL)
    282 		return (ESRCH);
    283 
    284 	if (t->l_stat != LSSLEEP)
    285 		return (ENODEV);
    286 
    287 	if ((l->l_flag & L_SINTR) == 0)
    288 		return (EBUSY);
    289 
    290 	setrunnable(l);
    291 
    292 	return 0;
    293 }
    294 
    295 int
    296 sys__lwp_wait(struct lwp *l, void *v, register_t *retval)
    297 {
    298 	struct sys__lwp_wait_args /* {
    299 		syscallarg(lwpid_t) wait_for;
    300 		syscallarg(lwpid_t *) departed;
    301 	} */ *uap = v;
    302 	int error;
    303 	lwpid_t dep;
    304 
    305 	error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0);
    306 	if (error)
    307 		return (error);
    308 
    309 	if (SCARG(uap, departed)) {
    310 		error = copyout(&dep, SCARG(uap, departed),
    311 		    sizeof(dep));
    312 		if (error)
    313 			return (error);
    314 	}
    315 
    316 	return (0);
    317 }
    318 
    319 
    320 int
    321 lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
    322 {
    323 
    324 	struct proc *p = l->l_proc;
    325 	struct lwp *l2, *l3;
    326 	int nfound, error, s, wpri;
    327 	static char waitstr1[] = "lwpwait";
    328 	static char waitstr2[] = "lwpwait2";
    329 
    330 	DPRINTF(("lwp_wait1: %d.%d waiting for %d.\n",
    331 	    p->p_pid, l->l_lid, lid));
    332 
    333 	if (lid == l->l_lid)
    334 		return (EDEADLK); /* Waiting for ourselves makes no sense. */
    335 
    336 	wpri = PWAIT |
    337 	    ((flags & LWPWAIT_EXITCONTROL) ? PNOEXITERR : PCATCH);
    338  loop:
    339 	nfound = 0;
    340 	LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
    341 		if ((l2 == l) || (l2->l_flag & L_DETACHED) ||
    342 		    ((lid != 0) && (lid != l2->l_lid)))
    343 			continue;
    344 
    345 		nfound++;
    346 		if (l2->l_stat == LSZOMB) {
    347 			if (departed)
    348 				*departed = l2->l_lid;
    349 
    350 			s = proclist_lock_write();
    351 			LIST_REMOVE(l2, l_zlist); /* off zomblwp */
    352 			proclist_unlock_write(s);
    353 
    354 			simple_lock(&p->p_lwplock);
    355 			LIST_REMOVE(l2, l_sibling);
    356 			p->p_nlwps--;
    357 			p->p_nzlwps--;
    358 			simple_unlock(&p->p_lwplock);
    359 			/* XXX decrement limits */
    360 
    361 			pool_put(&lwp_pool, l2);
    362 
    363 			return (0);
    364 		} else if (l2->l_stat == LSSLEEP ||
    365 		           l2->l_stat == LSSUSPENDED) {
    366 			/* Deadlock checks.
    367 			 * 1. If all other LWPs are waiting for exits
    368 			 *    or suspended, we would deadlock.
    369 			 */
    370 
    371 			LIST_FOREACH(l3, &p->p_lwps, l_sibling) {
    372 				if (l3 != l && (l3->l_stat != LSSUSPENDED) &&
    373 				    !(l3->l_stat == LSSLEEP &&
    374 					l3->l_wchan == (caddr_t) &p->p_nlwps))
    375 					break;
    376 			}
    377 			if (l3 == NULL) /* Everyone else is waiting. */
    378 				return (EDEADLK);
    379 
    380 			/* XXX we'd like to check for a cycle of waiting
    381 			 * LWPs (specific LID waits, not any-LWP waits)
    382 			 * and detect that sort of deadlock, but we don't
    383 			 * have a good place to store the lwp that is
    384 			 * being waited for. wchan is already filled with
    385 			 * &p->p_nlwps, and putting the lwp address in
    386 			 * there for deadlock tracing would require
    387 			 * exiting LWPs to call wakeup on both their
    388 			 * own address and &p->p_nlwps, to get threads
    389 			 * sleeping on any LWP exiting.
    390 			 *
    391 			 * Revisit later. Maybe another auxillary
    392 			 * storage location associated with sleeping
    393 			 * is in order.
    394 			 */
    395 		}
    396 	}
    397 
    398 	if (nfound == 0)
    399 		return (ESRCH);
    400 
    401 	if ((error = tsleep((caddr_t) &p->p_nlwps, wpri,
    402 	    (lid != 0) ? waitstr1 : waitstr2, 0)) != 0)
    403 		return (error);
    404 
    405 	goto loop;
    406 }
    407 
    408 
    409 int
    410 newlwp(struct lwp *l1, struct proc *p2, vaddr_t uaddr, boolean_t inmem,
    411     int flags, void *stack, size_t stacksize,
    412     void (*func)(void *), void *arg, struct lwp **rnewlwpp)
    413 {
    414 	struct lwp *l2;
    415 	int s;
    416 
    417 	l2 = pool_get(&lwp_pool, PR_WAITOK);
    418 
    419 	l2->l_stat = LSIDL;
    420 	l2->l_forw = l2->l_back = NULL;
    421 	l2->l_proc = p2;
    422 
    423 
    424 	memset(&l2->l_startzero, 0,
    425 	       (unsigned) ((caddr_t)&l2->l_endzero -
    426 			   (caddr_t)&l2->l_startzero));
    427 	memcpy(&l2->l_startcopy, &l1->l_startcopy,
    428 	       (unsigned) ((caddr_t)&l2->l_endcopy -
    429 			   (caddr_t)&l2->l_startcopy));
    430 
    431 #if !defined(MULTIPROCESSOR)
    432 	/*
    433 	 * In the single-processor case, all processes will always run
    434 	 * on the same CPU.  So, initialize the child's CPU to the parent's
    435 	 * now.  In the multiprocessor case, the child's CPU will be
    436 	 * initialized in the low-level context switch code when the
    437 	 * process runs.
    438 	 */
    439 	l2->l_cpu = l1->l_cpu;
    440 #else
    441 	/*
    442 	 * zero child's cpu pointer so we don't get trash.
    443 	 */
    444 	l2->l_cpu = NULL;
    445 #endif /* ! MULTIPROCESSOR */
    446 
    447 	l2->l_flag = inmem ? L_INMEM : 0;
    448 	l2->l_flag |= (flags & LWP_DETACHED) ? L_DETACHED : 0;
    449 
    450 	callout_init(&l2->l_tsleep_ch);
    451 
    452 	if (rnewlwpp != NULL)
    453 		*rnewlwpp = l2;
    454 
    455 	l2->l_addr = (struct user *)uaddr;
    456 	uvm_lwp_fork(l1, l2, stack, stacksize, func,
    457 	    (arg != NULL) ? arg : l2);
    458 
    459 
    460 	simple_lock(&p2->p_lwplock);
    461 	l2->l_lid = ++p2->p_nlwpid;
    462 	LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
    463 	p2->p_nlwps++;
    464 	simple_unlock(&p2->p_lwplock);
    465 
    466 	/* XXX should be locked differently... */
    467 	s = proclist_lock_write();
    468 	LIST_INSERT_HEAD(&alllwp, l2, l_list);
    469 	proclist_unlock_write(s);
    470 
    471 	return (0);
    472 }
    473 
    474 
    475 /*
    476  * Quit the process. This will call cpu_exit, which will call cpu_switch,
    477  * so this can only be used meaningfully if you're willing to switch away.
    478  * Calling with l!=curlwp would be weird.
    479  */
    480 void
    481 lwp_exit(struct lwp *l)
    482 {
    483 	struct proc *p = l->l_proc;
    484 	int s;
    485 
    486 	DPRINTF(("lwp_exit: %d.%d exiting.\n", p->p_pid, l->l_lid));
    487 	DPRINTF((" nlwps: %d nrlwps %d nzlwps: %d\n",
    488 	    p->p_nlwps, p->p_nrlwps, p->p_nzlwps));
    489 
    490 	/*
    491 	 * If we are the last live LWP in a process, we need to exit
    492 	 * the entire process (if that's not already going on). We do
    493 	 * so with an exit status of zero, because it's a "controlled"
    494 	 * exit, and because that's what Solaris does.
    495 	 */
    496 	if (((p->p_nlwps - p->p_nzlwps) == 1) && ((p->p_flag & P_WEXIT) == 0)) {
    497 		DPRINTF(("lwp_exit: %d.%d calling exit1()\n",
    498 		    p->p_pid, l->l_lid));
    499 		exit1(l, 0);
    500 	}
    501 
    502 	s = proclist_lock_write();
    503 	LIST_REMOVE(l, l_list);
    504 	if ((l->l_flag & L_DETACHED) == 0) {
    505 		DPRINTF(("lwp_exit: %d.%d going on zombie list\n", p->p_pid,
    506 		    l->l_lid));
    507 		LIST_INSERT_HEAD(&zomblwp, l, l_zlist);
    508 	}
    509 	proclist_unlock_write(s);
    510 
    511 	simple_lock(&p->p_lwplock);
    512 	p->p_nrlwps--;
    513 	simple_unlock(&p->p_lwplock);
    514 
    515 	l->l_stat = LSDEAD;
    516 
    517 	/* This LWP no longer needs to hold the kernel lock. */
    518 	KERNEL_PROC_UNLOCK(l);
    519 
    520 	/* cpu_exit() will not return */
    521 	cpu_exit(l, 0);
    522 
    523 }
    524 
    525 
    526 void
    527 lwp_exit2(struct lwp *l)
    528 {
    529 
    530 	simple_lock(&deadproc_slock);
    531 	LIST_INSERT_HEAD(&deadlwp, l, l_list);
    532 	simple_unlock(&deadproc_slock);
    533 
    534 	wakeup(&deadproc);
    535 }
    536 
    537 /*
    538  * Pick a LWP to represent the process for those operations which
    539  * want information about a "process" that is actually associated
    540  * with a LWP.
    541  */
    542 struct lwp *
    543 proc_representative_lwp(p)
    544 	struct proc *p;
    545 {
    546 	struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
    547 
    548 	/* Trivial case: only one LWP */
    549 	if (p->p_nlwps == 1)
    550 		return (LIST_FIRST(&p->p_lwps));
    551 
    552 	switch (p->p_stat) {
    553 	case SSTOP:
    554 	case SACTIVE:
    555 		/* Pick the most live LWP */
    556 		onproc = running = sleeping = stopped = suspended = NULL;
    557 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    558 			switch (l->l_stat) {
    559 			case LSONPROC:
    560 				onproc = l;
    561 				break;
    562 			case LSRUN:
    563 				running = l;
    564 				break;
    565 			case LSSLEEP:
    566 				sleeping = l;
    567 				break;
    568 			case LSSTOP:
    569 				stopped = l;
    570 				break;
    571 			case LSSUSPENDED:
    572 				suspended = l;
    573 				break;
    574 			}
    575 			if (onproc)
    576 				return onproc;
    577 			if (running)
    578 				return running;
    579 			if (sleeping)
    580 				return sleeping;
    581 			if (stopped)
    582 				return stopped;
    583 			if (suspended)
    584 				return suspended;
    585 		}
    586 		break;
    587 	case SDEAD:
    588 	case SZOMB:
    589 		/* Doesn't really matter... */
    590 		return (LIST_FIRST(&p->p_lwps));
    591 		break;
    592 #ifdef DIAGNOSTIC
    593 	case SIDL:
    594 		/* We have more than one LWP and we're in SIDL?
    595 		 * How'd that happen?
    596 		 */
    597 		panic("Too many LWPs (%d) in SIDL process %d (%s)",
    598 		    p->p_nrlwps, p->p_pid, p->p_comm);
    599 	default:
    600 		panic("Process %d (%s) in unknown state %d",
    601 		    p->p_pid, p->p_comm, p->p_stat);
    602 #endif
    603 	}
    604 
    605 	panic("proc_representative_lwp: couldn't find a lwp for process"
    606 		" %d (%s)", p->p_pid, p->p_comm);
    607 	/* NOTREACHED */
    608 	return NULL;
    609 }
    610