Home | History | Annotate | Line # | Download | only in kern
kern_lwp.c revision 1.1.2.20
      1 /*	$NetBSD: kern_lwp.c,v 1.1.2.20 2002/11/25 21:44:00 nathanw Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nathan J. Williams.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 #include <sys/param.h>
     40 #include <sys/systm.h>
     41 #include <sys/pool.h>
     42 #include <sys/lock.h>
     43 #include <sys/proc.h>
     44 #include <sys/sa.h>
     45 #include <sys/savar.h>
     46 #include <sys/types.h>
     47 #include <sys/ucontext.h>
     48 #include <sys/resourcevar.h>
     49 #include <sys/mount.h>
     50 #include <sys/syscallargs.h>
     51 
     52 #include <uvm/uvm_extern.h>
     53 
     54 struct lwplist alllwp;
     55 struct lwplist deadlwp;
     56 struct lwplist zomblwp;
     57 
     58 #define LWP_DEBUG
     59 
     60 #ifdef LWP_DEBUG
     61 int lwp_debug = 0;
     62 #define DPRINTF(x) if (lwp_debug) printf x
     63 #else
     64 #define DPRINTF(x)
     65 #endif
     66 /* ARGSUSED */
     67 int
     68 sys__lwp_create(struct lwp *l, void *v, register_t *retval)
     69 {
     70 	struct sys__lwp_create_args /* {
     71 	syscallarg(const ucontext_t *) ucp;
     72 	syscallarg(u_long) flags;
     73 	syscallarg(lwpid_t *) new_lwp;
     74 	} */ *uap = v;
     75 	struct proc *p = l->l_proc;
     76 	struct lwp *l2;
     77 	vaddr_t uaddr;
     78 	ucontext_t *newuc;
     79 	int s, error;
     80 
     81 	newuc = pool_get(&lwp_uc_pool, PR_WAITOK);
     82 
     83 	error = copyin(SCARG(uap, ucp), newuc, sizeof(*newuc));
     84 	if (error)
     85 		return (error);
     86 
     87 	/* XXX check against resource limits */
     88 
     89 	uaddr = uvm_km_valloc(kernel_map, USPACE);
     90 	if (__predict_false(uaddr == 0)) {
     91 		return (ENOMEM);
     92 	}
     93 
     94 	/* XXX flags:
     95 	 * __LWP_ASLWP is probably needed for Solaris compat.
     96 	 */
     97 
     98 	newlwp(l, p, uaddr,
     99 	    SCARG(uap, flags) & LWP_DETACHED,
    100 	    NULL, NULL, startlwp, newuc, &l2);
    101 
    102 	if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0) {
    103 		SCHED_LOCK(s);
    104 		l2->l_stat = LSRUN;
    105 		setrunqueue(l2);
    106 		SCHED_UNLOCK(s);
    107 		simple_lock(&p->p_lwplock);
    108 		p->p_nrlwps++;
    109 		simple_unlock(&p->p_lwplock);
    110 	} else {
    111 		l2->l_stat = LSSUSPENDED;
    112 	}
    113 
    114 	error = copyout(&l2->l_lid, SCARG(uap, new_lwp),
    115 	    sizeof(l2->l_lid));
    116 	if (error)
    117 		return (error);
    118 
    119 	return (0);
    120 }
    121 
    122 
    123 int
    124 sys__lwp_exit(struct lwp *l, void *v, register_t *retval)
    125 {
    126 
    127 	lwp_exit(l);
    128 	/* NOTREACHED */
    129 	return (0);
    130 }
    131 
    132 
    133 int
    134 sys__lwp_self(struct lwp *l, void *v, register_t *retval)
    135 {
    136 
    137 	*retval = l->l_lid;
    138 
    139 	return (0);
    140 }
    141 
    142 
    143 int
    144 sys__lwp_suspend(struct lwp *l, void *v, register_t *retval)
    145 {
    146 	struct sys__lwp_suspend_args /* {
    147 	syscallarg(lwpid_t) target;
    148 	} */ *uap = v;
    149 	int target_lid;
    150 	struct proc *p = l->l_proc;
    151 	struct lwp *t, *t2;
    152 	int s;
    153 
    154 	target_lid = SCARG(uap, target);
    155 
    156 	LIST_FOREACH(t, &p->p_lwps, l_sibling)
    157 		if (t->l_lid == target_lid)
    158 			break;
    159 
    160 	if (t == NULL)
    161 		return (ESRCH);
    162 
    163 	if (t == l) {
    164 		/*
    165 		 * Check for deadlock, which is only possible
    166 		 * when we're suspending ourself.
    167 		 */
    168 		LIST_FOREACH(t2, &p->p_lwps, l_sibling) {
    169 			if ((t2 != l) && (t2->l_stat != LSSUSPENDED))
    170 				break;
    171 		}
    172 
    173 		if (t2 == NULL) /* All other LWPs are suspended */
    174 			return (EDEADLK);
    175 
    176 		SCHED_LOCK(s);
    177 		l->l_stat = LSSUSPENDED;
    178 		/* XXX NJWLWP check if this makes sense here: */
    179 		l->l_proc->p_stats->p_ru.ru_nvcsw++;
    180 		mi_switch(l, NULL);
    181 		SCHED_ASSERT_UNLOCKED();
    182 		splx(s);
    183 	} else {
    184 		switch (t->l_stat) {
    185 		case LSSUSPENDED:
    186 			return (0); /* _lwp_suspend() is idempotent */
    187 		case LSRUN:
    188 			SCHED_LOCK(s);
    189 			remrunqueue(t);
    190 			t->l_stat = LSSUSPENDED;
    191 			SCHED_UNLOCK(s);
    192 			simple_lock(&p->p_lwplock);
    193 			p->p_nrlwps--;
    194 			simple_unlock(&p->p_lwplock);
    195 			break;
    196 		case LSSLEEP:
    197 			t->l_stat = LSSUSPENDED;
    198 			break;
    199 		case LSIDL:
    200 		case LSDEAD:
    201 		case LSZOMB:
    202 			return (EINTR); /* It's what Solaris does..... */
    203 		case LSSTOP:
    204 			panic("_lwp_suspend: Stopped LWP in running process!");
    205 			break;
    206 		case LSONPROC:
    207 			panic("XXX multiprocessor LWPs? Implement me!");
    208 			break;
    209 		}
    210 	}
    211 
    212 	return (0);
    213 }
    214 
    215 
    216 int
    217 sys__lwp_continue(struct lwp *l, void *v, register_t *retval)
    218 {
    219 	struct sys__lwp_continue_args /* {
    220 	syscallarg(lwpid_t) target;
    221 	} */ *uap = v;
    222 	int target_lid;
    223 	struct proc *p = l->l_proc;
    224 	struct lwp *t;
    225 
    226 	target_lid = SCARG(uap, target);
    227 
    228 	LIST_FOREACH(t, &p->p_lwps, l_sibling)
    229 		if (t->l_lid == target_lid)
    230 			break;
    231 
    232 	if (t == NULL)
    233 		return (ESRCH);
    234 
    235 	lwp_continue(t);
    236 
    237 	return (0);
    238 }
    239 
    240 void
    241 lwp_continue(struct lwp *l)
    242 {
    243 	int s;
    244 
    245 	DPRINTF(("lwp_continue of %d.%d (%s), state %d, wchan %p\n",
    246 	    l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm, l->l_stat,
    247 	    l->l_wchan));
    248 
    249 	if (l->l_stat != LSSUSPENDED)
    250 		return;
    251 
    252 	if (l->l_wchan == 0) {
    253 		/* LWP was runnable before being suspended. */
    254 		SCHED_LOCK(s);
    255 		setrunnable(l);
    256 		SCHED_UNLOCK(s);
    257 	} else {
    258 		/* LWP was sleeping before being suspended. */
    259 		l->l_stat = LSSLEEP;
    260 	}
    261 }
    262 
    263 int sys__lwp_wakeup(struct lwp *l, void *v, register_t *retval)
    264 {
    265 	struct sys__lwp_wakeup_args /* {
    266 	syscallarg(lwpid_t) wakeup;
    267 	} */ *uap = v;
    268 	lwpid_t target_lid;
    269 	struct lwp *t;
    270 	struct proc *p;
    271 
    272 	p = l->l_proc;
    273 	target_lid = SCARG(uap, target);
    274 
    275 	LIST_FOREACH(t, &p->p_lwps, l_sibling)
    276 		if (t->l_lid == target_lid)
    277 			break;
    278 
    279 	if (t == NULL)
    280 		return (ESRCH);
    281 
    282 	if (t->l_stat != LSSLEEP)
    283 		return (ENODEV);
    284 
    285 	if ((l->l_flag & L_SINTR) == 0)
    286 		return (EBUSY);
    287 
    288 	setrunnable(l);
    289 
    290 	return 0;
    291 }
    292 
    293 int
    294 sys__lwp_wait(struct lwp *l, void *v, register_t *retval)
    295 {
    296 	struct sys__lwp_wait_args /* {
    297 	syscallarg(lwpid_t) wait_for;
    298 	syscallarg(lwpid_t *) departed;
    299 	} */ *uap = v;
    300 	int error;
    301 	lwpid_t dep;
    302 
    303 	error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0);
    304 	if (error)
    305 		return (error);
    306 
    307 	if (SCARG(uap, departed)) {
    308 		error = copyout(&dep, SCARG(uap, departed),
    309 		    sizeof(dep));
    310 		if (error)
    311 			return (error);
    312 	}
    313 
    314 	return (0);
    315 }
    316 
    317 
    318 int
    319 lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
    320 {
    321 
    322 	struct proc *p = l->l_proc;
    323 	struct lwp *l2, *l3;
    324 	int nfound, error, s, wpri;
    325 	static char waitstr1[] = "lwpwait";
    326 	static char waitstr2[] = "lwpwait2";
    327 
    328 	DPRINTF(("lwp_wait1: %d.%d waiting for %d.\n",
    329 	    p->p_pid, l->l_lid, lid));
    330 
    331 	if (lid == l->l_lid)
    332 		return (EDEADLK); /* Waiting for ourselves makes no sense. */
    333 
    334 	wpri = PWAIT |
    335 	    ((flags & LWPWAIT_EXITCONTROL) ? PNOEXITERR : PCATCH);
    336  loop:
    337 	nfound = 0;
    338 	LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
    339 		if ((l2 == l) || (l2->l_flag & L_DETACHED) ||
    340 		    ((lid != 0) && (lid != l2->l_lid)))
    341 			continue;
    342 
    343 		nfound++;
    344 		if (l2->l_stat == LSZOMB) {
    345 			if (departed)
    346 				*departed = l2->l_lid;
    347 
    348 			s = proclist_lock_write();
    349 			LIST_REMOVE(l2, l_zlist); /* off zomblwp */
    350 			proclist_unlock_write(s);
    351 
    352 			simple_lock(&p->p_lwplock);
    353 			LIST_REMOVE(l2, l_sibling);
    354 			p->p_nlwps--;
    355 			p->p_nzlwps--;
    356 			simple_unlock(&p->p_lwplock);
    357 			/* XXX decrement limits */
    358 
    359 			pool_put(&lwp_pool, l2);
    360 
    361 			return (0);
    362 		} else if (l2->l_stat == LSSLEEP ||
    363 		           l2->l_stat == LSSUSPENDED) {
    364 			/* Deadlock checks.
    365 			 * 1. If all other LWPs are waiting for exits
    366 			 *    or suspended, we would deadlock.
    367 			 */
    368 
    369 			LIST_FOREACH(l3, &p->p_lwps, l_sibling) {
    370 				if (l3 != l && (l3->l_stat != LSSUSPENDED) &&
    371 				    !(l3->l_stat == LSSLEEP &&
    372 					l3->l_wchan == (caddr_t) &p->p_nlwps))
    373 					break;
    374 			}
    375 			if (l3 == NULL) /* Everyone else is waiting. */
    376 				return (EDEADLK);
    377 
    378 			/* XXX we'd like to check for a cycle of waiting
    379 			 * LWPs (specific LID waits, not any-LWP waits)
    380 			 * and detect that sort of deadlock, but we don't
    381 			 * have a good place to store the lwp that is
    382 			 * being waited for. wchan is already filled with
    383 			 * &p->p_nlwps, and putting the lwp address in
    384 			 * there for deadlock tracing would require
    385 			 * exiting LWPs to call wakeup on both their
    386 			 * own address and &p->p_nlwps, to get threads
    387 			 * sleeping on any LWP exiting.
    388 			 *
    389 			 * Revisit later. Maybe another auxillary
    390 			 * storage location associated with sleeping
    391 			 * is in order.
    392 			 */
    393 		}
    394 	}
    395 
    396 	if (nfound == 0)
    397 		return (ESRCH);
    398 
    399 	if ((error = tsleep((caddr_t) &p->p_nlwps, wpri,
    400 	    (lid != 0) ? waitstr1 : waitstr2, 0)) != 0)
    401 		return (error);
    402 
    403 	goto loop;
    404 }
    405 
    406 
    407 int
    408 newlwp(struct lwp *l1, struct proc *p2, vaddr_t uaddr,
    409     int flags, void *stack, size_t stacksize,
    410     void (*func)(void *), void *arg, struct lwp **rnewlwpp)
    411 {
    412 	struct lwp *l2;
    413 	int s;
    414 
    415 	l2 = pool_get(&lwp_pool, PR_WAITOK);
    416 
    417 	l2->l_stat = LSIDL;
    418 	l2->l_forw = l2->l_back = NULL;
    419 	l2->l_proc = p2;
    420 
    421 
    422 	memset(&l2->l_startzero, 0,
    423 	       (unsigned) ((caddr_t)&l2->l_endzero -
    424 			   (caddr_t)&l2->l_startzero));
    425 	memcpy(&l2->l_startcopy, &l1->l_startcopy,
    426 	       (unsigned) ((caddr_t)&l2->l_endcopy -
    427 			   (caddr_t)&l2->l_startcopy));
    428 
    429 #if !defined(MULTIPROCESSOR)
    430 	/*
    431 	 * In the single-processor case, all processes will always run
    432 	 * on the same CPU.  So, initialize the child's CPU to the parent's
    433 	 * now.  In the multiprocessor case, the child's CPU will be
    434 	 * initialized in the low-level context switch code when the
    435 	 * process runs.
    436 	 */
    437 	l2->l_cpu = l1->l_cpu;
    438 #else
    439 	/*
    440 	 * zero child's cpu pointer so we don't get trash.
    441 	 */
    442 	l2->l_cpu = NULL;
    443 #endif /* ! MULTIPROCESSOR */
    444 
    445 	l2->l_flag = L_INMEM;
    446 	l2->l_flag |= (flags & LWP_DETACHED) ? L_DETACHED : 0;
    447 
    448 	callout_init(&l2->l_tsleep_ch);
    449 
    450 	if (rnewlwpp != NULL)
    451 		*rnewlwpp = l2;
    452 
    453 	l2->l_addr = (struct user *)uaddr;
    454 	uvm_lwp_fork(l1, l2, stack, stacksize, func,
    455 	    (arg != NULL) ? arg : l2);
    456 
    457 
    458 	simple_lock(&p2->p_lwplock);
    459 	l2->l_lid = ++p2->p_nlwpid;
    460 	LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
    461 	p2->p_nlwps++;
    462 	simple_unlock(&p2->p_lwplock);
    463 
    464 	/* XXX should be locked differently... */
    465 	s = proclist_lock_write();
    466 	LIST_INSERT_HEAD(&alllwp, l2, l_list);
    467 	proclist_unlock_write(s);
    468 
    469 	return (0);
    470 }
    471 
    472 
    473 /*
    474  * Quit the process. This will call cpu_exit, which will call cpu_switch,
    475  * so this can only be used meaningfully if you're willing to switch away.
    476  * Calling with l!=curlwp would be weird.
    477  */
    478 void
    479 lwp_exit(struct lwp *l)
    480 {
    481 	struct proc *p = l->l_proc;
    482 	int s;
    483 
    484 	DPRINTF(("lwp_exit: %d.%d exiting.\n", p->p_pid, l->l_lid));
    485 	DPRINTF((" nlwps: %d nrlwps %d nzlwps: %d\n",
    486 	    p->p_nlwps, p->p_nrlwps, p->p_nzlwps));
    487 
    488 	/*
    489 	 * If we are the last live LWP in a process, we need to exit
    490 	 * the entire process (if that's not already going on). We do
    491 	 * so with an exit status of zero, because it's a "controlled"
    492 	 * exit, and because that's what Solaris does.
    493 	 */
    494 	if (((p->p_nlwps - p->p_nzlwps) == 1) && ((p->p_flag & P_WEXIT) == 0)) {
    495 		DPRINTF(("lwp_exit: %d.%d calling exit1()\n",
    496 		    p->p_pid, l->l_lid));
    497 		exit1(l, 0);
    498 	}
    499 
    500 	s = proclist_lock_write();
    501 	LIST_REMOVE(l, l_list);
    502 	if ((l->l_flag & L_DETACHED) == 0) {
    503 		DPRINTF(("lwp_exit: %d.%d going on zombie list\n", p->p_pid,
    504 		    l->l_lid));
    505 		LIST_INSERT_HEAD(&zomblwp, l, l_zlist);
    506 	}
    507 	proclist_unlock_write(s);
    508 
    509 	simple_lock(&p->p_lwplock);
    510 	p->p_nrlwps--;
    511 	simple_unlock(&p->p_lwplock);
    512 
    513 	l->l_stat = LSDEAD;
    514 
    515 	/* This LWP no longer needs to hold the kernel lock. */
    516 	KERNEL_PROC_UNLOCK(l);
    517 
    518 	/* cpu_exit() will not return */
    519 	cpu_exit(l, 0);
    520 
    521 }
    522 
    523 
    524 void
    525 lwp_exit2(struct lwp *l)
    526 {
    527 
    528 	simple_lock(&deadproc_slock);
    529 	LIST_INSERT_HEAD(&deadlwp, l, l_list);
    530 	simple_unlock(&deadproc_slock);
    531 
    532 	wakeup(&deadproc);
    533 }
    534 
    535 /*
    536  * Pick a LWP to represent the process for those operations which
    537  * want information about a "process" that is actually associated
    538  * with a LWP.
    539  */
    540 struct lwp *
    541 proc_representative_lwp(p)
    542 	struct proc *p;
    543 {
    544 	struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
    545 
    546 	/* Trivial case: only one LWP */
    547 	if (p->p_nlwps == 1)
    548 		return (LIST_FIRST(&p->p_lwps));
    549 
    550 	switch (p->p_stat) {
    551 	case SSTOP:
    552 	case SACTIVE:
    553 		/* Pick the most live LWP */
    554 		onproc = running = sleeping = stopped = suspended = NULL;
    555 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    556 			switch (l->l_stat) {
    557 			case LSONPROC:
    558 				onproc = l;
    559 				break;
    560 			case LSRUN:
    561 				running = l;
    562 				break;
    563 			case LSSLEEP:
    564 				sleeping = l;
    565 				break;
    566 			case LSSTOP:
    567 				stopped = l;
    568 				break;
    569 			case LSSUSPENDED:
    570 				suspended = l;
    571 				break;
    572 			}
    573 			if (onproc)
    574 				return onproc;
    575 			if (running)
    576 				return running;
    577 			if (sleeping)
    578 				return sleeping;
    579 			if (stopped)
    580 				return stopped;
    581 			if (suspended)
    582 				return suspended;
    583 		}
    584 		break;
    585 	case SDEAD:
    586 	case SZOMB:
    587 		/* Doesn't really matter... */
    588 		return (LIST_FIRST(&p->p_lwps));
    589 		break;
    590 #ifdef DIAGNOSTIC
    591 	case SIDL:
    592 		/* We have more than one LWP and we're in SIDL?
    593 		 * How'd that happen?
    594 		 */
    595 		panic("Too many LWPs (%d) in SIDL process %d (%s)",
    596 		    p->p_nrlwps, p->p_pid, p->p_comm);
    597 	default:
    598 		panic("Process %d (%s) in unknown state %d",
    599 		    p->p_pid, p->p_comm, p->p_stat);
    600 #endif
    601 	}
    602 
    603 	panic("proc_representative_lwp: couldn't find a lwp for process"
    604 		" %d (%s)", p->p_pid, p->p_comm);
    605 	/* NOTREACHED */
    606 	return NULL;
    607 }
    608