Home | History | Annotate | Line # | Download | only in kern
sys_sig.c revision 1.43.2.1
      1 /*	$NetBSD: sys_sig.c,v 1.43.2.1 2015/09/22 12:06:07 skrll Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Copyright (c) 1982, 1986, 1989, 1991, 1993
     34  *	The Regents of the University of California.  All rights reserved.
     35  * (c) UNIX System Laboratories, Inc.
     36  * All or some portions of this file are derived from material licensed
     37  * to the University of California by American Telephone and Telegraph
     38  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     39  * the permission of UNIX System Laboratories, Inc.
     40  *
     41  * Redistribution and use in source and binary forms, with or without
     42  * modification, are permitted provided that the following conditions
     43  * are met:
     44  * 1. Redistributions of source code must retain the above copyright
     45  *    notice, this list of conditions and the following disclaimer.
     46  * 2. Redistributions in binary form must reproduce the above copyright
     47  *    notice, this list of conditions and the following disclaimer in the
     48  *    documentation and/or other materials provided with the distribution.
     49  * 3. Neither the name of the University nor the names of its contributors
     50  *    may be used to endorse or promote products derived from this software
     51  *    without specific prior written permission.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     63  * SUCH DAMAGE.
     64  *
     65  *	@(#)kern_sig.c	8.14 (Berkeley) 5/14/95
     66  */
     67 
     68 #include <sys/cdefs.h>
     69 __KERNEL_RCSID(0, "$NetBSD: sys_sig.c,v 1.43.2.1 2015/09/22 12:06:07 skrll Exp $");
     70 
     71 #include <sys/param.h>
     72 #include <sys/kernel.h>
     73 #include <sys/signalvar.h>
     74 #include <sys/proc.h>
     75 #include <sys/pool.h>
     76 #include <sys/syscallargs.h>
     77 #include <sys/kauth.h>
     78 #include <sys/wait.h>
     79 #include <sys/kmem.h>
     80 #include <sys/module.h>
     81 
     82 int
     83 sys___sigaction_sigtramp(struct lwp *l,
     84     const struct sys___sigaction_sigtramp_args *uap, register_t *retval)
     85 {
     86 	/* {
     87 		syscallarg(int)				signum;
     88 		syscallarg(const struct sigaction *)	nsa;
     89 		syscallarg(struct sigaction *)		osa;
     90 		syscallarg(void *)			tramp;
     91 		syscallarg(int)				vers;
     92 	} */
     93 	struct sigaction nsa, osa;
     94 	int error;
     95 
     96 	if (SCARG(uap, nsa)) {
     97 		error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
     98 		if (error)
     99 			return (error);
    100 	}
    101 	error = sigaction1(l, SCARG(uap, signum),
    102 	    SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0,
    103 	    SCARG(uap, tramp), SCARG(uap, vers));
    104 	if (error)
    105 		return (error);
    106 	if (SCARG(uap, osa)) {
    107 		error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
    108 		if (error)
    109 			return (error);
    110 	}
    111 	return 0;
    112 }
    113 
    114 /*
    115  * Manipulate signal mask.  Note that we receive new mask, not pointer, and
    116  * return old mask as return value; the library stub does the rest.
    117  */
    118 int
    119 sys___sigprocmask14(struct lwp *l, const struct sys___sigprocmask14_args *uap,
    120     register_t *retval)
    121 {
    122 	/* {
    123 		syscallarg(int)			how;
    124 		syscallarg(const sigset_t *)	set;
    125 		syscallarg(sigset_t *)		oset;
    126 	} */
    127 	struct proc	*p = l->l_proc;
    128 	sigset_t	nss, oss;
    129 	int		error;
    130 
    131 	if (SCARG(uap, set)) {
    132 		error = copyin(SCARG(uap, set), &nss, sizeof(nss));
    133 		if (error)
    134 			return error;
    135 	}
    136 	mutex_enter(p->p_lock);
    137 	error = sigprocmask1(l, SCARG(uap, how),
    138 	    SCARG(uap, set) ? &nss : 0, SCARG(uap, oset) ? &oss : 0);
    139 	mutex_exit(p->p_lock);
    140 	if (error)
    141 		return error;
    142 	if (SCARG(uap, oset)) {
    143 		error = copyout(&oss, SCARG(uap, oset), sizeof(oss));
    144 		if (error)
    145 			return error;
    146 	}
    147 	return 0;
    148 }
    149 
    150 int
    151 sys___sigpending14(struct lwp *l, const struct sys___sigpending14_args *uap,
    152     register_t *retval)
    153 {
    154 	/* {
    155 		syscallarg(sigset_t *)	set;
    156 	} */
    157 	sigset_t ss;
    158 
    159 	sigpending1(l, &ss);
    160 	return copyout(&ss, SCARG(uap, set), sizeof(ss));
    161 }
    162 
    163 /*
    164  * Suspend process until signal, providing mask to be set in the meantime.
    165  * Note nonstandard calling convention: libc stub passes mask, not pointer,
    166  * to save a copyin.
    167  */
    168 int
    169 sys___sigsuspend14(struct lwp *l, const struct sys___sigsuspend14_args *uap,
    170     register_t *retval)
    171 {
    172 	/* {
    173 		syscallarg(const sigset_t *)	set;
    174 	} */
    175 	sigset_t	ss;
    176 	int		error;
    177 
    178 	if (SCARG(uap, set)) {
    179 		error = copyin(SCARG(uap, set), &ss, sizeof(ss));
    180 		if (error)
    181 			return error;
    182 	}
    183 	return sigsuspend1(l, SCARG(uap, set) ? &ss : 0);
    184 }
    185 
    186 int
    187 sys___sigaltstack14(struct lwp *l, const struct sys___sigaltstack14_args *uap,
    188     register_t *retval)
    189 {
    190 	/* {
    191 		syscallarg(const struct sigaltstack *)	nss;
    192 		syscallarg(struct sigaltstack *)	oss;
    193 	} */
    194 	struct sigaltstack	nss, oss;
    195 	int			error;
    196 
    197 	if (SCARG(uap, nss)) {
    198 		error = copyin(SCARG(uap, nss), &nss, sizeof(nss));
    199 		if (error)
    200 			return error;
    201 	}
    202 	error = sigaltstack1(l,
    203 	    SCARG(uap, nss) ? &nss : 0, SCARG(uap, oss) ? &oss : 0);
    204 	if (error)
    205 		return error;
    206 	if (SCARG(uap, oss)) {
    207 		error = copyout(&oss, SCARG(uap, oss), sizeof(oss));
    208 		if (error)
    209 			return error;
    210 	}
    211 	return 0;
    212 }
    213 
    214 int
    215 kill1(struct lwp *l, pid_t pid, ksiginfo_t *ksi, register_t *retval)
    216 {
    217 	int error;
    218 	struct proc *p;
    219 
    220 	if ((u_int)ksi->ksi_signo >= NSIG)
    221 		return EINVAL;
    222 
    223 	if (pid != l->l_proc->p_pid) {
    224 		if (ksi->ksi_pid != l->l_proc->p_pid)
    225 			return EPERM;
    226 
    227 		if (ksi->ksi_uid != kauth_cred_geteuid(l->l_cred))
    228 			return EPERM;
    229 
    230 		switch (ksi->ksi_code) {
    231 		case SI_USER:
    232 		case SI_QUEUE:
    233 			break;
    234 		default:
    235 			return EPERM;
    236 		}
    237 	}
    238 
    239 	if (pid > 0) {
    240 		/* kill single process */
    241 		mutex_enter(proc_lock);
    242 		p = proc_find_raw(pid);
    243 		if (p == NULL || (p->p_stat != SACTIVE && p->p_stat != SSTOP)) {
    244 			mutex_exit(proc_lock);
    245 			/* IEEE Std 1003.1-2001: return success for zombies */
    246 			return p ? 0 : ESRCH;
    247 		}
    248 		mutex_enter(p->p_lock);
    249 		error = kauth_authorize_process(l->l_cred,
    250 		    KAUTH_PROCESS_SIGNAL, p, KAUTH_ARG(ksi->ksi_signo),
    251 		    NULL, NULL);
    252 		if (!error && ksi->ksi_signo) {
    253 			kpsignal2(p, ksi);
    254 		}
    255 		mutex_exit(p->p_lock);
    256 		mutex_exit(proc_lock);
    257 		return error;
    258 	}
    259 
    260 	switch (pid) {
    261 	case -1:		/* broadcast signal */
    262 		return killpg1(l, ksi, 0, 1);
    263 	case 0:			/* signal own process group */
    264 		return killpg1(l, ksi, 0, 0);
    265 	default:		/* negative explicit process group */
    266 		return killpg1(l, ksi, -pid, 0);
    267 	}
    268 	/* NOTREACHED */
    269 }
    270 
    271 int
    272 sys_sigqueueinfo(struct lwp *l, const struct sys_sigqueueinfo_args *uap,
    273     register_t *retval)
    274 {
    275 	/* {
    276 		syscallarg(pid_t int)	pid;
    277 		syscallarg(const siginfo_t *)	info;
    278 	} */
    279 	ksiginfo_t	ksi;
    280 	int error;
    281 
    282 	KSI_INIT(&ksi);
    283 
    284 	if ((error = copyin(&SCARG(uap, info)->_info, &ksi.ksi_info,
    285 	    sizeof(ksi.ksi_info))) != 0)
    286 		return error;
    287 
    288 	return kill1(l, SCARG(uap, pid), &ksi, retval);
    289 }
    290 
    291 int
    292 sys_kill(struct lwp *l, const struct sys_kill_args *uap, register_t *retval)
    293 {
    294 	/* {
    295 		syscallarg(pid_t)	pid;
    296 		syscallarg(int)	signum;
    297 	} */
    298 	ksiginfo_t	ksi;
    299 
    300 	KSI_INIT(&ksi);
    301 
    302 	ksi.ksi_signo = SCARG(uap, signum);
    303 	ksi.ksi_code = SI_USER;
    304 	ksi.ksi_pid = l->l_proc->p_pid;
    305 	ksi.ksi_uid = kauth_cred_geteuid(l->l_cred);
    306 
    307 	return kill1(l, SCARG(uap, pid), &ksi, retval);
    308 }
    309 
    310 int
    311 sys_getcontext(struct lwp *l, const struct sys_getcontext_args *uap,
    312     register_t *retval)
    313 {
    314 	/* {
    315 		syscallarg(struct __ucontext *) ucp;
    316 	} */
    317 	struct proc *p = l->l_proc;
    318 	ucontext_t uc;
    319 
    320 	memset(&uc, 0, sizeof(uc));
    321 
    322 	mutex_enter(p->p_lock);
    323 	getucontext(l, &uc);
    324 	mutex_exit(p->p_lock);
    325 
    326 	return copyout(&uc, SCARG(uap, ucp), sizeof (*SCARG(uap, ucp)));
    327 }
    328 
    329 int
    330 sys_setcontext(struct lwp *l, const struct sys_setcontext_args *uap,
    331     register_t *retval)
    332 {
    333 	/* {
    334 		syscallarg(const ucontext_t *) ucp;
    335 	} */
    336 	struct proc *p = l->l_proc;
    337 	ucontext_t uc;
    338 	int error;
    339 
    340 	error = copyin(SCARG(uap, ucp), &uc, sizeof (uc));
    341 	if (error)
    342 		return error;
    343 	if ((uc.uc_flags & _UC_CPU) == 0)
    344 		return EINVAL;
    345 	mutex_enter(p->p_lock);
    346 	error = setucontext(l, &uc);
    347 	mutex_exit(p->p_lock);
    348 	if (error)
    349  		return error;
    350 
    351 	return EJUSTRETURN;
    352 }
    353 
    354 /*
    355  * sigtimedwait(2) system call, used also for implementation
    356  * of sigwaitinfo() and sigwait().
    357  *
    358  * This only handles single LWP in signal wait. libpthread provides
    359  * its own sigtimedwait() wrapper to DTRT WRT individual threads.
    360  */
    361 int
    362 sys_____sigtimedwait50(struct lwp *l,
    363     const struct sys_____sigtimedwait50_args *uap, register_t *retval)
    364 {
    365 
    366 	return sigtimedwait1(l, uap, retval, copyin, copyout, copyin, copyout);
    367 }
    368 
    369 int
    370 sigaction1(struct lwp *l, int signum, const struct sigaction *nsa,
    371 	struct sigaction *osa, const void *tramp, int vers)
    372 {
    373 	struct proc *p;
    374 	struct sigacts *ps;
    375 	sigset_t tset;
    376 	int prop, error;
    377 	ksiginfoq_t kq;
    378 	static bool v0v1valid;
    379 
    380 	if (signum <= 0 || signum >= NSIG)
    381 		return EINVAL;
    382 
    383 	p = l->l_proc;
    384 	error = 0;
    385 	ksiginfo_queue_init(&kq);
    386 
    387 	/*
    388 	 * Trampoline ABI version 0 is reserved for the legacy kernel
    389 	 * provided on-stack trampoline.  Conversely, if we are using a
    390 	 * non-0 ABI version, we must have a trampoline.  Only validate the
    391 	 * vers if a new sigaction was supplied and there was an actual
    392 	 * handler specified (not SIG_IGN or SIG_DFL), which don't require
    393 	 * a trampoline. Emulations use legacy kernel trampolines with
    394 	 * version 0, alternatively check for that too.
    395 	 *
    396 	 * If version < 2, we try to autoload the compat module.  Note
    397 	 * that we interlock with the unload check in compat_modcmd()
    398 	 * using kernconfig_lock.  If the autoload fails, we don't try it
    399 	 * again for this process.
    400 	 */
    401 	if (nsa != NULL && nsa->sa_handler != SIG_IGN
    402 	    && nsa->sa_handler != SIG_DFL) {
    403 		if (__predict_false(vers < 2)) {
    404 			if (p->p_flag & PK_32)
    405 				v0v1valid = true;
    406 			else if ((p->p_lflag & PL_SIGCOMPAT) == 0) {
    407 				kernconfig_lock();
    408 				if (sendsig_sigcontext_vec == NULL) {
    409 					(void)module_autoload("compat",
    410 					    MODULE_CLASS_ANY);
    411 				}
    412 				if (sendsig_sigcontext_vec != NULL) {
    413 					/*
    414 					 * We need to remember if the
    415 					 * sigcontext method may be useable,
    416 					 * because libc may use it even
    417 					 * if siginfo is available.
    418 					 */
    419 					v0v1valid = true;
    420 				}
    421 				mutex_enter(proc_lock);
    422 				/*
    423 				 * Prevent unload of compat module while
    424 				 * this process remains.
    425 				 */
    426 				p->p_lflag |= PL_SIGCOMPAT;
    427 				mutex_exit(proc_lock);
    428 				kernconfig_unlock();
    429 			}
    430 		}
    431 
    432 		switch (vers) {
    433 		case 0:
    434 			/* sigcontext, kernel supplied trampoline. */
    435 			if (tramp != NULL || !v0v1valid) {
    436 				return EINVAL;
    437 			}
    438 			break;
    439 		case 1:
    440 			/* sigcontext, user supplied trampoline. */
    441 			if (tramp == NULL || !v0v1valid) {
    442 				return EINVAL;
    443 			}
    444 			break;
    445 		case 2:
    446 		case 3:
    447 			/* siginfo, user supplied trampoline. */
    448 			if (tramp == NULL) {
    449 				return EINVAL;
    450 			}
    451 			break;
    452 		default:
    453 			return EINVAL;
    454 		}
    455 	}
    456 
    457 	mutex_enter(p->p_lock);
    458 
    459 	ps = p->p_sigacts;
    460 	if (osa)
    461 		*osa = SIGACTION_PS(ps, signum);
    462 	if (!nsa)
    463 		goto out;
    464 
    465 	prop = sigprop[signum];
    466 	if ((nsa->sa_flags & ~SA_ALLBITS) || (prop & SA_CANTMASK)) {
    467 		error = EINVAL;
    468 		goto out;
    469 	}
    470 
    471 	SIGACTION_PS(ps, signum) = *nsa;
    472 	ps->sa_sigdesc[signum].sd_tramp = tramp;
    473 	ps->sa_sigdesc[signum].sd_vers = vers;
    474 	sigminusset(&sigcantmask, &SIGACTION_PS(ps, signum).sa_mask);
    475 
    476 	if ((prop & SA_NORESET) != 0)
    477 		SIGACTION_PS(ps, signum).sa_flags &= ~SA_RESETHAND;
    478 
    479 	if (signum == SIGCHLD) {
    480 		if (nsa->sa_flags & SA_NOCLDSTOP)
    481 			p->p_sflag |= PS_NOCLDSTOP;
    482 		else
    483 			p->p_sflag &= ~PS_NOCLDSTOP;
    484 		if (nsa->sa_flags & SA_NOCLDWAIT) {
    485 			/*
    486 			 * Paranoia: since SA_NOCLDWAIT is implemented by
    487 			 * reparenting the dying child to PID 1 (and trust
    488 			 * it to reap the zombie), PID 1 itself is forbidden
    489 			 * to set SA_NOCLDWAIT.
    490 			 */
    491 			if (p->p_pid == 1)
    492 				p->p_flag &= ~PK_NOCLDWAIT;
    493 			else
    494 				p->p_flag |= PK_NOCLDWAIT;
    495 		} else
    496 			p->p_flag &= ~PK_NOCLDWAIT;
    497 
    498 		if (nsa->sa_handler == SIG_IGN) {
    499 			/*
    500 			 * Paranoia: same as above.
    501 			 */
    502 			if (p->p_pid == 1)
    503 				p->p_flag &= ~PK_CLDSIGIGN;
    504 			else
    505 				p->p_flag |= PK_CLDSIGIGN;
    506 		} else
    507 			p->p_flag &= ~PK_CLDSIGIGN;
    508 	}
    509 
    510 	if ((nsa->sa_flags & SA_NODEFER) == 0)
    511 		sigaddset(&SIGACTION_PS(ps, signum).sa_mask, signum);
    512 	else
    513 		sigdelset(&SIGACTION_PS(ps, signum).sa_mask, signum);
    514 
    515 	/*
    516 	 * Set bit in p_sigctx.ps_sigignore for signals that are set to
    517 	 * SIG_IGN, and for signals set to SIG_DFL where the default is to
    518 	 * ignore. However, don't put SIGCONT in p_sigctx.ps_sigignore, as
    519 	 * we have to restart the process.
    520 	 */
    521 	if (nsa->sa_handler == SIG_IGN ||
    522 	    (nsa->sa_handler == SIG_DFL && (prop & SA_IGNORE) != 0)) {
    523 		/* Never to be seen again. */
    524 		sigemptyset(&tset);
    525 		sigaddset(&tset, signum);
    526 		sigclearall(p, &tset, &kq);
    527 		if (signum != SIGCONT) {
    528 			/* Easier in psignal */
    529 			sigaddset(&p->p_sigctx.ps_sigignore, signum);
    530 		}
    531 		sigdelset(&p->p_sigctx.ps_sigcatch, signum);
    532 	} else {
    533 		sigdelset(&p->p_sigctx.ps_sigignore, signum);
    534 		if (nsa->sa_handler == SIG_DFL)
    535 			sigdelset(&p->p_sigctx.ps_sigcatch, signum);
    536 		else
    537 			sigaddset(&p->p_sigctx.ps_sigcatch, signum);
    538 	}
    539 
    540 	/*
    541 	 * Previously held signals may now have become visible.  Ensure that
    542 	 * we check for them before returning to userspace.
    543 	 */
    544 	if (sigispending(l, 0)) {
    545 		lwp_lock(l);
    546 		l->l_flag |= LW_PENDSIG;
    547 		lwp_unlock(l);
    548 	}
    549 out:
    550 	mutex_exit(p->p_lock);
    551 	ksiginfo_queue_drain(&kq);
    552 
    553 	return error;
    554 }
    555 
    556 int
    557 sigprocmask1(struct lwp *l, int how, const sigset_t *nss, sigset_t *oss)
    558 {
    559 	sigset_t *mask = &l->l_sigmask;
    560 	bool more;
    561 
    562 	KASSERT(mutex_owned(l->l_proc->p_lock));
    563 
    564 	if (oss) {
    565 		*oss = *mask;
    566 	}
    567 
    568 	if (nss == NULL) {
    569 		return 0;
    570 	}
    571 
    572 	switch (how) {
    573 	case SIG_BLOCK:
    574 		sigplusset(nss, mask);
    575 		more = false;
    576 		break;
    577 	case SIG_UNBLOCK:
    578 		sigminusset(nss, mask);
    579 		more = true;
    580 		break;
    581 	case SIG_SETMASK:
    582 		*mask = *nss;
    583 		more = true;
    584 		break;
    585 	default:
    586 		return EINVAL;
    587 	}
    588 	sigminusset(&sigcantmask, mask);
    589 	if (more && sigispending(l, 0)) {
    590 		/*
    591 		 * Check for pending signals on return to user.
    592 		 */
    593 		lwp_lock(l);
    594 		l->l_flag |= LW_PENDSIG;
    595 		lwp_unlock(l);
    596 	}
    597 	return 0;
    598 }
    599 
    600 void
    601 sigpending1(struct lwp *l, sigset_t *ss)
    602 {
    603 	struct proc *p = l->l_proc;
    604 
    605 	mutex_enter(p->p_lock);
    606 	*ss = l->l_sigpend.sp_set;
    607 	sigplusset(&p->p_sigpend.sp_set, ss);
    608 	mutex_exit(p->p_lock);
    609 }
    610 
    611 void
    612 sigsuspendsetup(struct lwp *l, const sigset_t *ss)
    613 {
    614 	struct proc *p = l->l_proc;
    615 
    616 	/*
    617 	 * When returning from sigsuspend/pselect/pollts, we want
    618 	 * the old mask to be restored after the
    619 	 * signal handler has finished.  Thus, we
    620 	 * save it here and mark the sigctx structure
    621 	 * to indicate this.
    622 	 */
    623 	mutex_enter(p->p_lock);
    624 	l->l_sigrestore = 1;
    625 	l->l_sigoldmask = l->l_sigmask;
    626 	l->l_sigmask = *ss;
    627 	sigminusset(&sigcantmask, &l->l_sigmask);
    628 
    629 	/* Check for pending signals when sleeping. */
    630 	if (sigispending(l, 0)) {
    631 		lwp_lock(l);
    632 		l->l_flag |= LW_PENDSIG;
    633 		lwp_unlock(l);
    634 	}
    635 	mutex_exit(p->p_lock);
    636 }
    637 
    638 void
    639 sigsuspendteardown(struct lwp *l)
    640 {
    641 	struct proc *p = l->l_proc;
    642 
    643 	mutex_enter(p->p_lock);
    644 	/* Check for pending signals when sleeping. */
    645 	if (l->l_sigrestore) {
    646 		if (sigispending(l, 0)) {
    647 			lwp_lock(l);
    648 			l->l_flag |= LW_PENDSIG;
    649 			lwp_unlock(l);
    650 		} else {
    651 			l->l_sigrestore = 0;
    652 			l->l_sigmask = l->l_sigoldmask;
    653 		}
    654 	}
    655 	mutex_exit(p->p_lock);
    656 }
    657 
    658 int
    659 sigsuspend1(struct lwp *l, const sigset_t *ss)
    660 {
    661 
    662 	if (ss)
    663 		sigsuspendsetup(l, ss);
    664 
    665 	while (kpause("pause", true, 0, NULL) == 0)
    666 		;
    667 
    668 	/* always return EINTR rather than ERESTART... */
    669 	return EINTR;
    670 }
    671 
    672 int
    673 sigaltstack1(struct lwp *l, const struct sigaltstack *nss,
    674     struct sigaltstack *oss)
    675 {
    676 	struct proc *p = l->l_proc;
    677 	int error = 0;
    678 
    679 	mutex_enter(p->p_lock);
    680 
    681 	if (oss)
    682 		*oss = l->l_sigstk;
    683 
    684 	if (nss) {
    685 		if (nss->ss_flags & ~SS_ALLBITS)
    686 			error = EINVAL;
    687 		else if (nss->ss_flags & SS_DISABLE) {
    688 			if (l->l_sigstk.ss_flags & SS_ONSTACK)
    689 				error = EINVAL;
    690 		} else if (nss->ss_size < MINSIGSTKSZ)
    691 			error = ENOMEM;
    692 
    693 		if (!error)
    694 			l->l_sigstk = *nss;
    695 	}
    696 
    697 	mutex_exit(p->p_lock);
    698 
    699 	return error;
    700 }
    701 
    702 int
    703 sigtimedwait1(struct lwp *l, const struct sys_____sigtimedwait50_args *uap,
    704     register_t *retval, copyin_t fetchss, copyout_t storeinf, copyin_t fetchts,
    705     copyout_t storets)
    706 {
    707 	/* {
    708 		syscallarg(const sigset_t *) set;
    709 		syscallarg(siginfo_t *) info;
    710 		syscallarg(struct timespec *) timeout;
    711 	} */
    712 	struct proc *p = l->l_proc;
    713 	int error, signum, timo;
    714 	struct timespec ts, tsstart, tsnow;
    715 	ksiginfo_t ksi;
    716 
    717 	/*
    718 	 * Calculate timeout, if it was specified.
    719 	 *
    720 	 * NULL pointer means an infinite timeout.
    721 	 * {.tv_sec = 0, .tv_nsec = 0} means do not block.
    722 	 */
    723 	if (SCARG(uap, timeout)) {
    724 		error = (*fetchts)(SCARG(uap, timeout), &ts, sizeof(ts));
    725 		if (error)
    726 			return error;
    727 
    728 		if ((error = itimespecfix(&ts)) != 0)
    729 			return error;
    730 
    731 		timo = tstohz(&ts);
    732 		if (timo == 0) {
    733 			if (ts.tv_sec == 0 && ts.tv_nsec == 0)
    734 				timo = -1; /* do not block */
    735 			else
    736 				timo = 1; /* the shortest possible timeout */
    737 		}
    738 
    739 		/*
    740 		 * Remember current uptime, it would be used in
    741 		 * ECANCELED/ERESTART case.
    742 		 */
    743 		getnanouptime(&tsstart);
    744 	} else {
    745 		memset(&tsstart, 0, sizeof(tsstart)); /* XXXgcc */
    746 		timo = 0; /* infinite timeout */
    747 	}
    748 
    749 	error = (*fetchss)(SCARG(uap, set), &l->l_sigwaitset,
    750 	    sizeof(l->l_sigwaitset));
    751 	if (error)
    752 		return error;
    753 
    754 	/*
    755 	 * Silently ignore SA_CANTMASK signals. psignal1() would ignore
    756 	 * SA_CANTMASK signals in waitset, we do this only for the below
    757 	 * siglist check.
    758 	 */
    759 	sigminusset(&sigcantmask, &l->l_sigwaitset);
    760 
    761 	mutex_enter(p->p_lock);
    762 
    763 	/* Check for pending signals in the process, if no - then in LWP. */
    764 	if ((signum = sigget(&p->p_sigpend, &ksi, 0, &l->l_sigwaitset)) == 0)
    765 		signum = sigget(&l->l_sigpend, &ksi, 0, &l->l_sigwaitset);
    766 
    767 	if (signum != 0) {
    768 		/* If found a pending signal, just copy it out to the user. */
    769 		mutex_exit(p->p_lock);
    770 		goto out;
    771 	}
    772 
    773 	if (timo < 0) {
    774 		/* If not allowed to block, return an error */
    775 		mutex_exit(p->p_lock);
    776 		return EAGAIN;
    777 	}
    778 
    779 	/*
    780 	 * Set up the sigwait list and wait for signal to arrive.
    781 	 * We can either be woken up or time out.
    782 	 */
    783 	l->l_sigwaited = &ksi;
    784 	LIST_INSERT_HEAD(&p->p_sigwaiters, l, l_sigwaiter);
    785 	error = cv_timedwait_sig(&l->l_sigcv, p->p_lock, timo);
    786 
    787 	/*
    788 	 * Need to find out if we woke as a result of _lwp_wakeup() or a
    789 	 * signal outside our wait set.
    790 	 */
    791 	if (l->l_sigwaited != NULL) {
    792 		if (error == EINTR) {
    793 			/* Wakeup via _lwp_wakeup(). */
    794 			error = ECANCELED;
    795 		} else if (!error) {
    796 			/* Spurious wakeup - arrange for syscall restart. */
    797 			error = ERESTART;
    798 		}
    799 		l->l_sigwaited = NULL;
    800 		LIST_REMOVE(l, l_sigwaiter);
    801 	}
    802 	mutex_exit(p->p_lock);
    803 
    804 	/*
    805 	 * If the sleep was interrupted (either by signal or wakeup), update
    806 	 * the timeout and copyout new value back.  It would be used when
    807 	 * the syscall would be restarted or called again.
    808 	 */
    809 	if (timo && (error == ERESTART || error == ECANCELED)) {
    810 		getnanouptime(&tsnow);
    811 
    812 		/* Compute how much time has passed since start. */
    813 		timespecsub(&tsnow, &tsstart, &tsnow);
    814 
    815 		/* Substract passed time from timeout. */
    816 		timespecsub(&ts, &tsnow, &ts);
    817 
    818 		if (ts.tv_sec < 0)
    819 			error = EAGAIN;
    820 		else {
    821 			/* Copy updated timeout to userland. */
    822 			error = (*storets)(&ts, SCARG(uap, timeout),
    823 			    sizeof(ts));
    824 		}
    825 	}
    826 out:
    827 	/*
    828 	 * If a signal from the wait set arrived, copy it to userland.
    829 	 * Copy only the used part of siginfo, the padding part is
    830 	 * left unchanged (userland is not supposed to touch it anyway).
    831 	 */
    832 	if (error == 0 && SCARG(uap, info)) {
    833 		error = (*storeinf)(&ksi.ksi_info, SCARG(uap, info),
    834 		    sizeof(ksi.ksi_info));
    835 	}
    836 	if (error == 0)
    837 		*retval = ksi.ksi_info._signo;
    838 	return error;
    839 }
    840