Home | History | Annotate | Line # | Download | only in kern
sys_sig.c revision 1.46
      1 /*	$NetBSD: sys_sig.c,v 1.46 2016/08/04 06:43:43 christos Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Copyright (c) 1982, 1986, 1989, 1991, 1993
     34  *	The Regents of the University of California.  All rights reserved.
     35  * (c) UNIX System Laboratories, Inc.
     36  * All or some portions of this file are derived from material licensed
     37  * to the University of California by American Telephone and Telegraph
     38  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     39  * the permission of UNIX System Laboratories, Inc.
     40  *
     41  * Redistribution and use in source and binary forms, with or without
     42  * modification, are permitted provided that the following conditions
     43  * are met:
     44  * 1. Redistributions of source code must retain the above copyright
     45  *    notice, this list of conditions and the following disclaimer.
     46  * 2. Redistributions in binary form must reproduce the above copyright
     47  *    notice, this list of conditions and the following disclaimer in the
     48  *    documentation and/or other materials provided with the distribution.
     49  * 3. Neither the name of the University nor the names of its contributors
     50  *    may be used to endorse or promote products derived from this software
     51  *    without specific prior written permission.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     63  * SUCH DAMAGE.
     64  *
     65  *	@(#)kern_sig.c	8.14 (Berkeley) 5/14/95
     66  */
     67 
     68 #include <sys/cdefs.h>
     69 __KERNEL_RCSID(0, "$NetBSD: sys_sig.c,v 1.46 2016/08/04 06:43:43 christos Exp $");
     70 
     71 #include "opt_dtrace.h"
     72 
     73 #include <sys/param.h>
     74 #include <sys/kernel.h>
     75 #include <sys/signalvar.h>
     76 #include <sys/proc.h>
     77 #include <sys/pool.h>
     78 #include <sys/syscallargs.h>
     79 #include <sys/kauth.h>
     80 #include <sys/wait.h>
     81 #include <sys/kmem.h>
     82 #include <sys/module.h>
     83 #include <sys/sdt.h>
     84 
     85 SDT_PROVIDER_DECLARE(proc);
     86 SDT_PROBE_DEFINE2(proc, kernel, , signal__clear,
     87     "int", 		/* signal */
     88     "ksiginfo_t *");	/* signal-info */
     89 
     90 int
     91 sys___sigaction_sigtramp(struct lwp *l,
     92     const struct sys___sigaction_sigtramp_args *uap, register_t *retval)
     93 {
     94 	/* {
     95 		syscallarg(int)				signum;
     96 		syscallarg(const struct sigaction *)	nsa;
     97 		syscallarg(struct sigaction *)		osa;
     98 		syscallarg(void *)			tramp;
     99 		syscallarg(int)				vers;
    100 	} */
    101 	struct sigaction nsa, osa;
    102 	int error;
    103 
    104 	if (SCARG(uap, nsa)) {
    105 		error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
    106 		if (error)
    107 			return (error);
    108 	}
    109 	error = sigaction1(l, SCARG(uap, signum),
    110 	    SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0,
    111 	    SCARG(uap, tramp), SCARG(uap, vers));
    112 	if (error)
    113 		return (error);
    114 	if (SCARG(uap, osa)) {
    115 		error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
    116 		if (error)
    117 			return (error);
    118 	}
    119 	return 0;
    120 }
    121 
    122 /*
    123  * Manipulate signal mask.  Note that we receive new mask, not pointer, and
    124  * return old mask as return value; the library stub does the rest.
    125  */
    126 int
    127 sys___sigprocmask14(struct lwp *l, const struct sys___sigprocmask14_args *uap,
    128     register_t *retval)
    129 {
    130 	/* {
    131 		syscallarg(int)			how;
    132 		syscallarg(const sigset_t *)	set;
    133 		syscallarg(sigset_t *)		oset;
    134 	} */
    135 	struct proc	*p = l->l_proc;
    136 	sigset_t	nss, oss;
    137 	int		error;
    138 
    139 	if (SCARG(uap, set)) {
    140 		error = copyin(SCARG(uap, set), &nss, sizeof(nss));
    141 		if (error)
    142 			return error;
    143 	}
    144 	mutex_enter(p->p_lock);
    145 	error = sigprocmask1(l, SCARG(uap, how),
    146 	    SCARG(uap, set) ? &nss : 0, SCARG(uap, oset) ? &oss : 0);
    147 	mutex_exit(p->p_lock);
    148 	if (error)
    149 		return error;
    150 	if (SCARG(uap, oset)) {
    151 		error = copyout(&oss, SCARG(uap, oset), sizeof(oss));
    152 		if (error)
    153 			return error;
    154 	}
    155 	return 0;
    156 }
    157 
    158 int
    159 sys___sigpending14(struct lwp *l, const struct sys___sigpending14_args *uap,
    160     register_t *retval)
    161 {
    162 	/* {
    163 		syscallarg(sigset_t *)	set;
    164 	} */
    165 	sigset_t ss;
    166 
    167 	sigpending1(l, &ss);
    168 	return copyout(&ss, SCARG(uap, set), sizeof(ss));
    169 }
    170 
    171 /*
    172  * Suspend process until signal, providing mask to be set in the meantime.
    173  * Note nonstandard calling convention: libc stub passes mask, not pointer,
    174  * to save a copyin.
    175  */
    176 int
    177 sys___sigsuspend14(struct lwp *l, const struct sys___sigsuspend14_args *uap,
    178     register_t *retval)
    179 {
    180 	/* {
    181 		syscallarg(const sigset_t *)	set;
    182 	} */
    183 	sigset_t	ss;
    184 	int		error;
    185 
    186 	if (SCARG(uap, set)) {
    187 		error = copyin(SCARG(uap, set), &ss, sizeof(ss));
    188 		if (error)
    189 			return error;
    190 	}
    191 	return sigsuspend1(l, SCARG(uap, set) ? &ss : 0);
    192 }
    193 
    194 int
    195 sys___sigaltstack14(struct lwp *l, const struct sys___sigaltstack14_args *uap,
    196     register_t *retval)
    197 {
    198 	/* {
    199 		syscallarg(const struct sigaltstack *)	nss;
    200 		syscallarg(struct sigaltstack *)	oss;
    201 	} */
    202 	struct sigaltstack	nss, oss;
    203 	int			error;
    204 
    205 	if (SCARG(uap, nss)) {
    206 		error = copyin(SCARG(uap, nss), &nss, sizeof(nss));
    207 		if (error)
    208 			return error;
    209 	}
    210 	error = sigaltstack1(l,
    211 	    SCARG(uap, nss) ? &nss : 0, SCARG(uap, oss) ? &oss : 0);
    212 	if (error)
    213 		return error;
    214 	if (SCARG(uap, oss)) {
    215 		error = copyout(&oss, SCARG(uap, oss), sizeof(oss));
    216 		if (error)
    217 			return error;
    218 	}
    219 	return 0;
    220 }
    221 
    222 int
    223 kill1(struct lwp *l, pid_t pid, ksiginfo_t *ksi, register_t *retval)
    224 {
    225 	int error;
    226 	struct proc *p;
    227 
    228 	if ((u_int)ksi->ksi_signo >= NSIG)
    229 		return EINVAL;
    230 
    231 	if (pid != l->l_proc->p_pid) {
    232 		if (ksi->ksi_pid != l->l_proc->p_pid)
    233 			return EPERM;
    234 
    235 		if (ksi->ksi_uid != kauth_cred_geteuid(l->l_cred))
    236 			return EPERM;
    237 
    238 		switch (ksi->ksi_code) {
    239 		case SI_USER:
    240 		case SI_QUEUE:
    241 			break;
    242 		default:
    243 			return EPERM;
    244 		}
    245 	}
    246 
    247 	if (pid > 0) {
    248 		/* kill single process */
    249 		mutex_enter(proc_lock);
    250 		p = proc_find_raw(pid);
    251 		if (p == NULL || (p->p_stat != SACTIVE && p->p_stat != SSTOP)) {
    252 			mutex_exit(proc_lock);
    253 			/* IEEE Std 1003.1-2001: return success for zombies */
    254 			return p ? 0 : ESRCH;
    255 		}
    256 		mutex_enter(p->p_lock);
    257 		error = kauth_authorize_process(l->l_cred,
    258 		    KAUTH_PROCESS_SIGNAL, p, KAUTH_ARG(ksi->ksi_signo),
    259 		    NULL, NULL);
    260 		if (!error && ksi->ksi_signo) {
    261 			error = kpsignal2(p, ksi);
    262 		}
    263 		mutex_exit(p->p_lock);
    264 		mutex_exit(proc_lock);
    265 		return error;
    266 	}
    267 
    268 	switch (pid) {
    269 	case -1:		/* broadcast signal */
    270 		return killpg1(l, ksi, 0, 1);
    271 	case 0:			/* signal own process group */
    272 		return killpg1(l, ksi, 0, 0);
    273 	default:		/* negative explicit process group */
    274 		return killpg1(l, ksi, -pid, 0);
    275 	}
    276 	/* NOTREACHED */
    277 }
    278 
    279 int
    280 sys_sigqueueinfo(struct lwp *l, const struct sys_sigqueueinfo_args *uap,
    281     register_t *retval)
    282 {
    283 	/* {
    284 		syscallarg(pid_t int)	pid;
    285 		syscallarg(const siginfo_t *)	info;
    286 	} */
    287 	ksiginfo_t	ksi;
    288 	int error;
    289 
    290 	KSI_INIT(&ksi);
    291 
    292 	if ((error = copyin(&SCARG(uap, info)->_info, &ksi.ksi_info,
    293 	    sizeof(ksi.ksi_info))) != 0)
    294 		return error;
    295 
    296 	return kill1(l, SCARG(uap, pid), &ksi, retval);
    297 }
    298 
    299 int
    300 sys_kill(struct lwp *l, const struct sys_kill_args *uap, register_t *retval)
    301 {
    302 	/* {
    303 		syscallarg(pid_t)	pid;
    304 		syscallarg(int)	signum;
    305 	} */
    306 	ksiginfo_t	ksi;
    307 
    308 	KSI_INIT(&ksi);
    309 
    310 	ksi.ksi_signo = SCARG(uap, signum);
    311 	ksi.ksi_code = SI_USER;
    312 	ksi.ksi_pid = l->l_proc->p_pid;
    313 	ksi.ksi_uid = kauth_cred_geteuid(l->l_cred);
    314 
    315 	return kill1(l, SCARG(uap, pid), &ksi, retval);
    316 }
    317 
    318 int
    319 sys_getcontext(struct lwp *l, const struct sys_getcontext_args *uap,
    320     register_t *retval)
    321 {
    322 	/* {
    323 		syscallarg(struct __ucontext *) ucp;
    324 	} */
    325 	struct proc *p = l->l_proc;
    326 	ucontext_t uc;
    327 
    328 	memset(&uc, 0, sizeof(uc));
    329 
    330 	mutex_enter(p->p_lock);
    331 	getucontext(l, &uc);
    332 	mutex_exit(p->p_lock);
    333 
    334 	return copyout(&uc, SCARG(uap, ucp), sizeof (*SCARG(uap, ucp)));
    335 }
    336 
    337 int
    338 sys_setcontext(struct lwp *l, const struct sys_setcontext_args *uap,
    339     register_t *retval)
    340 {
    341 	/* {
    342 		syscallarg(const ucontext_t *) ucp;
    343 	} */
    344 	struct proc *p = l->l_proc;
    345 	ucontext_t uc;
    346 	int error;
    347 
    348 	error = copyin(SCARG(uap, ucp), &uc, sizeof (uc));
    349 	if (error)
    350 		return error;
    351 	if ((uc.uc_flags & _UC_CPU) == 0)
    352 		return EINVAL;
    353 	mutex_enter(p->p_lock);
    354 	error = setucontext(l, &uc);
    355 	mutex_exit(p->p_lock);
    356 	if (error)
    357  		return error;
    358 
    359 	return EJUSTRETURN;
    360 }
    361 
    362 /*
    363  * sigtimedwait(2) system call, used also for implementation
    364  * of sigwaitinfo() and sigwait().
    365  *
    366  * This only handles single LWP in signal wait. libpthread provides
    367  * its own sigtimedwait() wrapper to DTRT WRT individual threads.
    368  */
    369 int
    370 sys_____sigtimedwait50(struct lwp *l,
    371     const struct sys_____sigtimedwait50_args *uap, register_t *retval)
    372 {
    373 
    374 	return sigtimedwait1(l, uap, retval, copyin, copyout, copyin, copyout);
    375 }
    376 
    377 int
    378 sigaction1(struct lwp *l, int signum, const struct sigaction *nsa,
    379 	struct sigaction *osa, const void *tramp, int vers)
    380 {
    381 	struct proc *p;
    382 	struct sigacts *ps;
    383 	sigset_t tset;
    384 	int prop, error;
    385 	ksiginfoq_t kq;
    386 	static bool v0v1valid;
    387 
    388 	if (signum <= 0 || signum >= NSIG)
    389 		return EINVAL;
    390 
    391 	p = l->l_proc;
    392 	error = 0;
    393 	ksiginfo_queue_init(&kq);
    394 
    395 	/*
    396 	 * Trampoline ABI version 0 is reserved for the legacy kernel
    397 	 * provided on-stack trampoline.  Conversely, if we are using a
    398 	 * non-0 ABI version, we must have a trampoline.  Only validate the
    399 	 * vers if a new sigaction was supplied and there was an actual
    400 	 * handler specified (not SIG_IGN or SIG_DFL), which don't require
    401 	 * a trampoline. Emulations use legacy kernel trampolines with
    402 	 * version 0, alternatively check for that too.
    403 	 *
    404 	 * If version < 2, we try to autoload the compat module.  Note
    405 	 * that we interlock with the unload check in compat_modcmd()
    406 	 * using kernconfig_lock.  If the autoload fails, we don't try it
    407 	 * again for this process.
    408 	 */
    409 	if (nsa != NULL && nsa->sa_handler != SIG_IGN
    410 	    && nsa->sa_handler != SIG_DFL) {
    411 		if (__predict_false(vers < 2)) {
    412 			if (p->p_flag & PK_32)
    413 				v0v1valid = true;
    414 			else if ((p->p_lflag & PL_SIGCOMPAT) == 0) {
    415 				kernconfig_lock();
    416 				if (sendsig_sigcontext_vec == NULL) {
    417 					(void)module_autoload("compat",
    418 					    MODULE_CLASS_ANY);
    419 				}
    420 				if (sendsig_sigcontext_vec != NULL) {
    421 					/*
    422 					 * We need to remember if the
    423 					 * sigcontext method may be useable,
    424 					 * because libc may use it even
    425 					 * if siginfo is available.
    426 					 */
    427 					v0v1valid = true;
    428 				}
    429 				mutex_enter(proc_lock);
    430 				/*
    431 				 * Prevent unload of compat module while
    432 				 * this process remains.
    433 				 */
    434 				p->p_lflag |= PL_SIGCOMPAT;
    435 				mutex_exit(proc_lock);
    436 				kernconfig_unlock();
    437 			}
    438 		}
    439 
    440 		switch (vers) {
    441 		case 0:
    442 			/* sigcontext, kernel supplied trampoline. */
    443 			if (tramp != NULL || !v0v1valid) {
    444 				return EINVAL;
    445 			}
    446 			break;
    447 		case 1:
    448 			/* sigcontext, user supplied trampoline. */
    449 			if (tramp == NULL || !v0v1valid) {
    450 				return EINVAL;
    451 			}
    452 			break;
    453 		case 2:
    454 		case 3:
    455 			/* siginfo, user supplied trampoline. */
    456 			if (tramp == NULL) {
    457 				return EINVAL;
    458 			}
    459 			break;
    460 		default:
    461 			return EINVAL;
    462 		}
    463 	}
    464 
    465 	mutex_enter(p->p_lock);
    466 
    467 	ps = p->p_sigacts;
    468 	if (osa)
    469 		*osa = SIGACTION_PS(ps, signum);
    470 	if (!nsa)
    471 		goto out;
    472 
    473 	prop = sigprop[signum];
    474 	if ((nsa->sa_flags & ~SA_ALLBITS) || (prop & SA_CANTMASK)) {
    475 		error = EINVAL;
    476 		goto out;
    477 	}
    478 
    479 	SIGACTION_PS(ps, signum) = *nsa;
    480 	ps->sa_sigdesc[signum].sd_tramp = tramp;
    481 	ps->sa_sigdesc[signum].sd_vers = vers;
    482 	sigminusset(&sigcantmask, &SIGACTION_PS(ps, signum).sa_mask);
    483 
    484 	if ((prop & SA_NORESET) != 0)
    485 		SIGACTION_PS(ps, signum).sa_flags &= ~SA_RESETHAND;
    486 
    487 	if (signum == SIGCHLD) {
    488 		if (nsa->sa_flags & SA_NOCLDSTOP)
    489 			p->p_sflag |= PS_NOCLDSTOP;
    490 		else
    491 			p->p_sflag &= ~PS_NOCLDSTOP;
    492 		if (nsa->sa_flags & SA_NOCLDWAIT) {
    493 			/*
    494 			 * Paranoia: since SA_NOCLDWAIT is implemented by
    495 			 * reparenting the dying child to PID 1 (and trust
    496 			 * it to reap the zombie), PID 1 itself is forbidden
    497 			 * to set SA_NOCLDWAIT.
    498 			 */
    499 			if (p->p_pid == 1)
    500 				p->p_flag &= ~PK_NOCLDWAIT;
    501 			else
    502 				p->p_flag |= PK_NOCLDWAIT;
    503 		} else
    504 			p->p_flag &= ~PK_NOCLDWAIT;
    505 
    506 		if (nsa->sa_handler == SIG_IGN) {
    507 			/*
    508 			 * Paranoia: same as above.
    509 			 */
    510 			if (p->p_pid == 1)
    511 				p->p_flag &= ~PK_CLDSIGIGN;
    512 			else
    513 				p->p_flag |= PK_CLDSIGIGN;
    514 		} else
    515 			p->p_flag &= ~PK_CLDSIGIGN;
    516 	}
    517 
    518 	if ((nsa->sa_flags & SA_NODEFER) == 0)
    519 		sigaddset(&SIGACTION_PS(ps, signum).sa_mask, signum);
    520 	else
    521 		sigdelset(&SIGACTION_PS(ps, signum).sa_mask, signum);
    522 
    523 	/*
    524 	 * Set bit in p_sigctx.ps_sigignore for signals that are set to
    525 	 * SIG_IGN, and for signals set to SIG_DFL where the default is to
    526 	 * ignore. However, don't put SIGCONT in p_sigctx.ps_sigignore, as
    527 	 * we have to restart the process.
    528 	 */
    529 	if (nsa->sa_handler == SIG_IGN ||
    530 	    (nsa->sa_handler == SIG_DFL && (prop & SA_IGNORE) != 0)) {
    531 		/* Never to be seen again. */
    532 		sigemptyset(&tset);
    533 		sigaddset(&tset, signum);
    534 		sigclearall(p, &tset, &kq);
    535 		if (signum != SIGCONT) {
    536 			/* Easier in psignal */
    537 			sigaddset(&p->p_sigctx.ps_sigignore, signum);
    538 		}
    539 		sigdelset(&p->p_sigctx.ps_sigcatch, signum);
    540 	} else {
    541 		sigdelset(&p->p_sigctx.ps_sigignore, signum);
    542 		if (nsa->sa_handler == SIG_DFL)
    543 			sigdelset(&p->p_sigctx.ps_sigcatch, signum);
    544 		else
    545 			sigaddset(&p->p_sigctx.ps_sigcatch, signum);
    546 	}
    547 
    548 	/*
    549 	 * Previously held signals may now have become visible.  Ensure that
    550 	 * we check for them before returning to userspace.
    551 	 */
    552 	if (sigispending(l, 0)) {
    553 		lwp_lock(l);
    554 		l->l_flag |= LW_PENDSIG;
    555 		lwp_unlock(l);
    556 	}
    557 out:
    558 	mutex_exit(p->p_lock);
    559 	ksiginfo_queue_drain(&kq);
    560 
    561 	return error;
    562 }
    563 
    564 int
    565 sigprocmask1(struct lwp *l, int how, const sigset_t *nss, sigset_t *oss)
    566 {
    567 	sigset_t *mask = &l->l_sigmask;
    568 	bool more;
    569 
    570 	KASSERT(mutex_owned(l->l_proc->p_lock));
    571 
    572 	if (oss) {
    573 		*oss = *mask;
    574 	}
    575 
    576 	if (nss == NULL) {
    577 		return 0;
    578 	}
    579 
    580 	switch (how) {
    581 	case SIG_BLOCK:
    582 		sigplusset(nss, mask);
    583 		more = false;
    584 		break;
    585 	case SIG_UNBLOCK:
    586 		sigminusset(nss, mask);
    587 		more = true;
    588 		break;
    589 	case SIG_SETMASK:
    590 		*mask = *nss;
    591 		more = true;
    592 		break;
    593 	default:
    594 		return EINVAL;
    595 	}
    596 	sigminusset(&sigcantmask, mask);
    597 	if (more && sigispending(l, 0)) {
    598 		/*
    599 		 * Check for pending signals on return to user.
    600 		 */
    601 		lwp_lock(l);
    602 		l->l_flag |= LW_PENDSIG;
    603 		lwp_unlock(l);
    604 	}
    605 	return 0;
    606 }
    607 
    608 void
    609 sigpending1(struct lwp *l, sigset_t *ss)
    610 {
    611 	struct proc *p = l->l_proc;
    612 
    613 	mutex_enter(p->p_lock);
    614 	*ss = l->l_sigpend.sp_set;
    615 	sigplusset(&p->p_sigpend.sp_set, ss);
    616 	mutex_exit(p->p_lock);
    617 }
    618 
    619 void
    620 sigsuspendsetup(struct lwp *l, const sigset_t *ss)
    621 {
    622 	struct proc *p = l->l_proc;
    623 
    624 	/*
    625 	 * When returning from sigsuspend/pselect/pollts, we want
    626 	 * the old mask to be restored after the
    627 	 * signal handler has finished.  Thus, we
    628 	 * save it here and mark the sigctx structure
    629 	 * to indicate this.
    630 	 */
    631 	mutex_enter(p->p_lock);
    632 	l->l_sigrestore = 1;
    633 	l->l_sigoldmask = l->l_sigmask;
    634 	l->l_sigmask = *ss;
    635 	sigminusset(&sigcantmask, &l->l_sigmask);
    636 
    637 	/* Check for pending signals when sleeping. */
    638 	if (sigispending(l, 0)) {
    639 		lwp_lock(l);
    640 		l->l_flag |= LW_PENDSIG;
    641 		lwp_unlock(l);
    642 	}
    643 	mutex_exit(p->p_lock);
    644 }
    645 
    646 void
    647 sigsuspendteardown(struct lwp *l)
    648 {
    649 	struct proc *p = l->l_proc;
    650 
    651 	mutex_enter(p->p_lock);
    652 	/* Check for pending signals when sleeping. */
    653 	if (l->l_sigrestore) {
    654 		if (sigispending(l, 0)) {
    655 			lwp_lock(l);
    656 			l->l_flag |= LW_PENDSIG;
    657 			lwp_unlock(l);
    658 		} else {
    659 			l->l_sigrestore = 0;
    660 			l->l_sigmask = l->l_sigoldmask;
    661 		}
    662 	}
    663 	mutex_exit(p->p_lock);
    664 }
    665 
    666 int
    667 sigsuspend1(struct lwp *l, const sigset_t *ss)
    668 {
    669 
    670 	if (ss)
    671 		sigsuspendsetup(l, ss);
    672 
    673 	while (kpause("pause", true, 0, NULL) == 0)
    674 		;
    675 
    676 	/* always return EINTR rather than ERESTART... */
    677 	return EINTR;
    678 }
    679 
    680 int
    681 sigaltstack1(struct lwp *l, const struct sigaltstack *nss,
    682     struct sigaltstack *oss)
    683 {
    684 	struct proc *p = l->l_proc;
    685 	int error = 0;
    686 
    687 	mutex_enter(p->p_lock);
    688 
    689 	if (oss)
    690 		*oss = l->l_sigstk;
    691 
    692 	if (nss) {
    693 		if (nss->ss_flags & ~SS_ALLBITS)
    694 			error = EINVAL;
    695 		else if (nss->ss_flags & SS_DISABLE) {
    696 			if (l->l_sigstk.ss_flags & SS_ONSTACK)
    697 				error = EINVAL;
    698 		} else if (nss->ss_size < MINSIGSTKSZ)
    699 			error = ENOMEM;
    700 
    701 		if (!error)
    702 			l->l_sigstk = *nss;
    703 	}
    704 
    705 	mutex_exit(p->p_lock);
    706 
    707 	return error;
    708 }
    709 
    710 int
    711 sigtimedwait1(struct lwp *l, const struct sys_____sigtimedwait50_args *uap,
    712     register_t *retval, copyin_t fetchss, copyout_t storeinf, copyin_t fetchts,
    713     copyout_t storets)
    714 {
    715 	/* {
    716 		syscallarg(const sigset_t *) set;
    717 		syscallarg(siginfo_t *) info;
    718 		syscallarg(struct timespec *) timeout;
    719 	} */
    720 	struct proc *p = l->l_proc;
    721 	int error, signum, timo;
    722 	struct timespec ts, tsstart, tsnow;
    723 	ksiginfo_t ksi;
    724 
    725 	/*
    726 	 * Calculate timeout, if it was specified.
    727 	 *
    728 	 * NULL pointer means an infinite timeout.
    729 	 * {.tv_sec = 0, .tv_nsec = 0} means do not block.
    730 	 */
    731 	if (SCARG(uap, timeout)) {
    732 		error = (*fetchts)(SCARG(uap, timeout), &ts, sizeof(ts));
    733 		if (error)
    734 			return error;
    735 
    736 		if ((error = itimespecfix(&ts)) != 0)
    737 			return error;
    738 
    739 		timo = tstohz(&ts);
    740 		if (timo == 0) {
    741 			if (ts.tv_sec == 0 && ts.tv_nsec == 0)
    742 				timo = -1; /* do not block */
    743 			else
    744 				timo = 1; /* the shortest possible timeout */
    745 		}
    746 
    747 		/*
    748 		 * Remember current uptime, it would be used in
    749 		 * ECANCELED/ERESTART case.
    750 		 */
    751 		getnanouptime(&tsstart);
    752 	} else {
    753 		memset(&tsstart, 0, sizeof(tsstart)); /* XXXgcc */
    754 		timo = 0; /* infinite timeout */
    755 	}
    756 
    757 	error = (*fetchss)(SCARG(uap, set), &l->l_sigwaitset,
    758 	    sizeof(l->l_sigwaitset));
    759 	if (error)
    760 		return error;
    761 
    762 	/*
    763 	 * Silently ignore SA_CANTMASK signals. psignal1() would ignore
    764 	 * SA_CANTMASK signals in waitset, we do this only for the below
    765 	 * siglist check.
    766 	 */
    767 	sigminusset(&sigcantmask, &l->l_sigwaitset);
    768 
    769 	mutex_enter(p->p_lock);
    770 
    771 	/* Check for pending signals in the process, if no - then in LWP. */
    772 	if ((signum = sigget(&p->p_sigpend, &ksi, 0, &l->l_sigwaitset)) == 0)
    773 		signum = sigget(&l->l_sigpend, &ksi, 0, &l->l_sigwaitset);
    774 
    775 	if (signum != 0) {
    776 		/* If found a pending signal, just copy it out to the user. */
    777 		mutex_exit(p->p_lock);
    778 		goto out;
    779 	}
    780 
    781 	if (timo < 0) {
    782 		/* If not allowed to block, return an error */
    783 		mutex_exit(p->p_lock);
    784 		return EAGAIN;
    785 	}
    786 
    787 	/*
    788 	 * Set up the sigwait list and wait for signal to arrive.
    789 	 * We can either be woken up or time out.
    790 	 */
    791 	l->l_sigwaited = &ksi;
    792 	LIST_INSERT_HEAD(&p->p_sigwaiters, l, l_sigwaiter);
    793 	error = cv_timedwait_sig(&l->l_sigcv, p->p_lock, timo);
    794 
    795 	/*
    796 	 * Need to find out if we woke as a result of _lwp_wakeup() or a
    797 	 * signal outside our wait set.
    798 	 */
    799 	if (l->l_sigwaited != NULL) {
    800 		if (error == EINTR) {
    801 			/* Wakeup via _lwp_wakeup(). */
    802 			error = ECANCELED;
    803 		} else if (!error) {
    804 			/* Spurious wakeup - arrange for syscall restart. */
    805 			error = ERESTART;
    806 		}
    807 		l->l_sigwaited = NULL;
    808 		LIST_REMOVE(l, l_sigwaiter);
    809 	}
    810 	mutex_exit(p->p_lock);
    811 
    812 	/*
    813 	 * If the sleep was interrupted (either by signal or wakeup), update
    814 	 * the timeout and copyout new value back.  It would be used when
    815 	 * the syscall would be restarted or called again.
    816 	 */
    817 	if (timo && (error == ERESTART || error == ECANCELED)) {
    818 		getnanouptime(&tsnow);
    819 
    820 		/* Compute how much time has passed since start. */
    821 		timespecsub(&tsnow, &tsstart, &tsnow);
    822 
    823 		/* Substract passed time from timeout. */
    824 		timespecsub(&ts, &tsnow, &ts);
    825 
    826 		if (ts.tv_sec < 0)
    827 			error = EAGAIN;
    828 		else {
    829 			/* Copy updated timeout to userland. */
    830 			error = (*storets)(&ts, SCARG(uap, timeout),
    831 			    sizeof(ts));
    832 		}
    833 	}
    834 out:
    835 	/*
    836 	 * If a signal from the wait set arrived, copy it to userland.
    837 	 * Copy only the used part of siginfo, the padding part is
    838 	 * left unchanged (userland is not supposed to touch it anyway).
    839 	 */
    840 	if (error == 0 && SCARG(uap, info)) {
    841 		error = (*storeinf)(&ksi.ksi_info, SCARG(uap, info),
    842 		    sizeof(ksi.ksi_info));
    843 	}
    844 	if (error == 0) {
    845 		*retval = ksi.ksi_info._signo;
    846 		SDT_PROBE(proc, kernel, , signal__clear, *retval,
    847 		    &ksi, 0, 0, 0);
    848 	}
    849 	return error;
    850 }
    851