1 /* $NetBSD: sys_sig.c,v 1.62 2026/02/01 19:41:46 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Copyright (c) 1982, 1986, 1989, 1991, 1993 34 * The Regents of the University of California. All rights reserved. 35 * (c) UNIX System Laboratories, Inc. 36 * All or some portions of this file are derived from material licensed 37 * to the University of California by American Telephone and Telegraph 38 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 39 * the permission of UNIX System Laboratories, Inc. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. Neither the name of the University nor the names of its contributors 50 * may be used to endorse or promote products derived from this software 51 * without specific prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * SUCH DAMAGE. 64 * 65 * @(#)kern_sig.c 8.14 (Berkeley) 5/14/95 66 */ 67 68 #include <sys/cdefs.h> 69 __KERNEL_RCSID(0, "$NetBSD: sys_sig.c,v 1.62 2026/02/01 19:41:46 christos Exp $"); 70 71 #include "opt_dtrace.h" 72 73 #include <sys/param.h> 74 #include <sys/kernel.h> 75 #include <sys/ktrace.h> 76 #include <sys/signalvar.h> 77 #include <sys/proc.h> 78 #include <sys/pool.h> 79 #include <sys/syscallargs.h> 80 #include <sys/kauth.h> 81 #include <sys/wait.h> 82 #include <sys/kmem.h> 83 #include <sys/module.h> 84 #include <sys/sdt.h> 85 #include <sys/compat_stub.h> 86 87 SDT_PROVIDER_DECLARE(proc); 88 SDT_PROBE_DEFINE2(proc, kernel, , signal__clear, 89 "int", /* signal */ 90 "ksiginfo_t *"); /* signal-info */ 91 92 int 93 sys___sigaction_sigtramp(struct lwp *l, 94 const struct sys___sigaction_sigtramp_args *uap, register_t *retval) 95 { 96 /* { 97 syscallarg(int) signum; 98 syscallarg(const struct sigaction *) nsa; 99 syscallarg(struct sigaction *) osa; 100 syscallarg(void *) tramp; 101 syscallarg(int) vers; 102 } */ 103 struct sigaction nsa, osa; 104 int error; 105 106 if (SCARG(uap, nsa)) { 107 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa)); 108 if (error) 109 return (error); 110 } 111 error = sigaction1(l, SCARG(uap, signum), 112 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0, 113 SCARG(uap, tramp), SCARG(uap, vers)); 114 if (error) 115 return (error); 116 if (SCARG(uap, osa)) { 117 error = copyout(&osa, SCARG(uap, osa), sizeof(osa)); 118 if (error) 119 return (error); 120 } 121 return 0; 122 } 123 124 /* 125 * Manipulate signal mask. Note that we receive new mask, not pointer, and 126 * return old mask as return value; the library stub does the rest. 127 */ 128 int 129 sys___sigprocmask14(struct lwp *l, const struct sys___sigprocmask14_args *uap, 130 register_t *retval) 131 { 132 /* { 133 syscallarg(int) how; 134 syscallarg(const sigset_t *) set; 135 syscallarg(sigset_t *) oset; 136 } */ 137 struct proc *p = l->l_proc; 138 sigset_t nss, oss; 139 int error; 140 141 if (SCARG(uap, set)) { 142 error = copyin(SCARG(uap, set), &nss, sizeof(nss)); 143 if (error) 144 return error; 145 } 146 mutex_enter(p->p_lock); 147 error = sigprocmask1(l, SCARG(uap, how), 148 SCARG(uap, set) ? &nss : 0, SCARG(uap, oset) ? &oss : 0); 149 mutex_exit(p->p_lock); 150 if (error) 151 return error; 152 if (SCARG(uap, oset)) { 153 error = copyout(&oss, SCARG(uap, oset), sizeof(oss)); 154 if (error) 155 return error; 156 } 157 return 0; 158 } 159 160 int 161 sys___sigpending14(struct lwp *l, const struct sys___sigpending14_args *uap, 162 register_t *retval) 163 { 164 /* { 165 syscallarg(sigset_t *) set; 166 } */ 167 sigset_t ss; 168 169 sigpending1(l, &ss); 170 return copyout(&ss, SCARG(uap, set), sizeof(ss)); 171 } 172 173 /* 174 * Suspend process until signal, providing mask to be set in the meantime. 175 * Note nonstandard calling convention: libc stub passes mask, not pointer, 176 * to save a copyin. 177 */ 178 int 179 sys___sigsuspend14(struct lwp *l, const struct sys___sigsuspend14_args *uap, 180 register_t *retval) 181 { 182 /* { 183 syscallarg(const sigset_t *) set; 184 } */ 185 sigset_t ss; 186 int error; 187 188 if (SCARG(uap, set)) { 189 error = copyin(SCARG(uap, set), &ss, sizeof(ss)); 190 if (error) 191 return error; 192 } 193 return sigsuspend1(l, SCARG(uap, set) ? &ss : 0); 194 } 195 196 int 197 sys___sigaltstack14(struct lwp *l, const struct sys___sigaltstack14_args *uap, 198 register_t *retval) 199 { 200 /* { 201 syscallarg(const struct sigaltstack *) nss; 202 syscallarg(struct sigaltstack *) oss; 203 } */ 204 stack_t nss, oss; 205 int error; 206 207 if (SCARG(uap, nss)) { 208 error = copyin(SCARG(uap, nss), &nss, sizeof(nss)); 209 if (error) 210 return error; 211 } 212 error = sigaltstack1(l, 213 SCARG(uap, nss) ? &nss : 0, SCARG(uap, oss) ? &oss : 0); 214 if (error) 215 return error; 216 if (SCARG(uap, oss)) { 217 error = copyout(&oss, SCARG(uap, oss), sizeof(oss)); 218 if (error) 219 return error; 220 } 221 return 0; 222 } 223 224 int 225 kill1(struct lwp *l, pid_t pid, ksiginfo_t *ksi, register_t *retval) 226 { 227 int error; 228 struct proc *p; 229 230 if ((u_int)ksi->ksi_signo >= NSIG) 231 return EINVAL; 232 233 if (pid != l->l_proc->p_pid) { 234 if (ksi->ksi_pid != l->l_proc->p_pid) 235 return EPERM; 236 237 if (ksi->ksi_uid != kauth_cred_geteuid(l->l_cred)) 238 return EPERM; 239 240 switch (ksi->ksi_code) { 241 case SI_USER: 242 case SI_QUEUE: 243 break; 244 default: 245 return EPERM; 246 } 247 } 248 249 if (pid > 0) { 250 /* kill single process */ 251 mutex_enter(&proc_lock); 252 p = proc_find_raw(pid); 253 if (p == NULL || (p->p_stat != SACTIVE && p->p_stat != SSTOP)) { 254 mutex_exit(&proc_lock); 255 /* IEEE Std 1003.1-2001: return success for zombies */ 256 return p ? 0 : ESRCH; 257 } 258 mutex_enter(p->p_lock); 259 error = kauth_authorize_process(l->l_cred, 260 KAUTH_PROCESS_SIGNAL, p, KAUTH_ARG(ksi->ksi_signo), 261 NULL, NULL); 262 if (!error && ksi->ksi_signo) { 263 error = kpsignal2(p, ksi); 264 } 265 mutex_exit(p->p_lock); 266 mutex_exit(&proc_lock); 267 return error; 268 } 269 270 switch (pid) { 271 case -1: /* broadcast signal */ 272 return killpg1(l, ksi, 0, 1); 273 case 0: /* signal own process group */ 274 return killpg1(l, ksi, 0, 0); 275 default: /* negative explicit process group */ 276 if (pid <= INT_MIN) 277 return ESRCH; 278 return killpg1(l, ksi, -pid, 0); 279 } 280 /* NOTREACHED */ 281 } 282 283 int 284 sys_sigqueueinfo(struct lwp *l, const struct sys_sigqueueinfo_args *uap, 285 register_t *retval) 286 { 287 /* { 288 syscallarg(pid_t int) pid; 289 syscallarg(const siginfo_t *) info; 290 } */ 291 ksiginfo_t ksi; 292 int error; 293 294 KSI_INIT(&ksi); 295 296 if ((error = copyin(&SCARG(uap, info)->_info, &ksi.ksi_info, 297 sizeof(ksi.ksi_info))) != 0) 298 return error; 299 300 return kill1(l, SCARG(uap, pid), &ksi, retval); 301 } 302 303 int 304 sys_kill(struct lwp *l, const struct sys_kill_args *uap, register_t *retval) 305 { 306 /* { 307 syscallarg(pid_t) pid; 308 syscallarg(int) signum; 309 } */ 310 ksiginfo_t ksi; 311 312 KSI_INIT(&ksi); 313 314 ksi.ksi_signo = SCARG(uap, signum); 315 ksi.ksi_code = SI_USER; 316 ksi.ksi_pid = l->l_proc->p_pid; 317 ksi.ksi_uid = kauth_cred_geteuid(l->l_cred); 318 319 return kill1(l, SCARG(uap, pid), &ksi, retval); 320 } 321 322 int 323 sys_getcontext(struct lwp *l, const struct sys_getcontext_args *uap, 324 register_t *retval) 325 { 326 /* { 327 syscallarg(struct __ucontext *) ucp; 328 } */ 329 struct proc *p = l->l_proc; 330 ucontext_t uc; 331 332 memset(&uc, 0, sizeof(uc)); 333 334 mutex_enter(p->p_lock); 335 getucontext(l, &uc); 336 mutex_exit(p->p_lock); 337 338 return copyout(&uc, SCARG(uap, ucp), sizeof (*SCARG(uap, ucp))); 339 } 340 341 int 342 sys_setcontext(struct lwp *l, const struct sys_setcontext_args *uap, 343 register_t *retval) 344 { 345 /* { 346 syscallarg(const ucontext_t *) ucp; 347 } */ 348 struct proc *p = l->l_proc; 349 ucontext_t uc; 350 int error; 351 352 error = copyin(SCARG(uap, ucp), &uc, sizeof (uc)); 353 if (error) 354 return error; 355 if ((uc.uc_flags & _UC_CPU) == 0) 356 return EINVAL; 357 mutex_enter(p->p_lock); 358 error = setucontext(l, &uc); 359 mutex_exit(p->p_lock); 360 if (error) 361 return error; 362 363 return EJUSTRETURN; 364 } 365 366 /* 367 * sigtimedwait(2) system call, used also for implementation 368 * of sigwaitinfo() and sigwait(). 369 * 370 * This only handles single LWP in signal wait. libpthread provides 371 * its own sigtimedwait() wrapper to DTRT WRT individual threads. 372 */ 373 int 374 sys_____sigtimedwait50(struct lwp *l, 375 const struct sys_____sigtimedwait50_args *uap, register_t *retval) 376 { 377 378 return sigtimedwait1(l, uap, retval, copyin, copyout, copyin, copyout); 379 } 380 381 int 382 sigaction1(struct lwp *l, int signum, const struct sigaction *nsa, 383 struct sigaction *osa, const void *tramp, int vers) 384 { 385 struct proc *p; 386 struct sigacts *ps; 387 sigset_t tset; 388 int prop, error; 389 ksiginfoq_t kq; 390 static bool v0v1valid; 391 392 if (signum <= 0 || signum >= NSIG) 393 return EINVAL; 394 395 p = l->l_proc; 396 error = 0; 397 ksiginfo_queue_init(&kq); 398 399 /* 400 * Trampoline ABI version __SIGTRAMP_SIGCODE_VERSION (0) is reserved 401 * for the legacy kernel provided on-stack trampoline. Conversely, 402 * if we are using a non-0 ABI version, we must have a trampoline. 403 * Only validate the vers if a new sigaction was supplied and there 404 * was an actual handler specified (not SIG_IGN or SIG_DFL), which 405 * don't require a trampoline. Emulations use legacy kernel 406 * trampolines with version 0, alternatively check for that too. 407 * 408 * If version < __SIGTRAMP_SIGINFO_VERSION_MIN (usually 2), we try 409 * to autoload the compat module. Note that we interlock with the 410 * unload check in compat_modcmd() using kernconfig_lock. If the 411 * autoload fails, we don't try it again for this process. 412 */ 413 if (nsa != NULL && nsa->sa_handler != SIG_IGN 414 && nsa->sa_handler != SIG_DFL) { 415 if (__predict_false(vers < __SIGTRAMP_SIGINFO_VERSION_MIN)) { 416 if (vers == __SIGTRAMP_SIGCODE_VERSION && 417 p->p_sigctx.ps_sigcode != NULL) { 418 /* 419 * if sigcode is used for this emulation, 420 * version 0 is allowed. 421 */ 422 } 423 #ifdef __HAVE_STRUCT_SIGCONTEXT 424 else if (p->p_flag & PK_32) { 425 /* 426 * The 32-bit compat module will have 427 * pre-validated this for us. 428 */ 429 v0v1valid = true; 430 } else if ((p->p_lflag & PL_SIGCOMPAT) == 0) { 431 kernconfig_lock(); 432 (void)module_autoload("compat_16", 433 MODULE_CLASS_ANY); 434 if (sendsig_sigcontext_16_hook.hooked) { 435 /* 436 * We need to remember if the 437 * sigcontext method may be useable, 438 * because libc may use it even 439 * if siginfo is available. 440 */ 441 v0v1valid = true; 442 } 443 mutex_enter(&proc_lock); 444 /* 445 * Prevent unload of compat module while 446 * this process remains. 447 */ 448 p->p_lflag |= PL_SIGCOMPAT; 449 mutex_exit(&proc_lock); 450 kernconfig_unlock(); 451 } 452 #endif /* __HAVE_STRUCT_SIGCONTEXT */ 453 } 454 455 switch (vers) { 456 case __SIGTRAMP_SIGCODE_VERSION: 457 /* kernel supplied trampoline. */ 458 if (tramp != NULL || 459 (p->p_sigctx.ps_sigcode == NULL && !v0v1valid)) { 460 return EINVAL; 461 } 462 break; 463 #ifdef __HAVE_STRUCT_SIGCONTEXT 464 case __SIGTRAMP_SIGCONTEXT_VERSION_MIN ... 465 __SIGTRAMP_SIGCONTEXT_VERSION_MAX: 466 /* sigcontext, user supplied trampoline. */ 467 if (tramp == NULL || !v0v1valid) { 468 return EINVAL; 469 } 470 break; 471 #endif /* __HAVE_STRUCT_SIGCONTEXT */ 472 case __SIGTRAMP_SIGINFO_VERSION_MIN ... 473 __SIGTRAMP_SIGINFO_VERSION_MAX: 474 /* siginfo, user supplied trampoline. */ 475 if (tramp == NULL) { 476 return EINVAL; 477 } 478 break; 479 default: 480 /* Invalid trampoline version. */ 481 return EINVAL; 482 } 483 } 484 485 mutex_enter(p->p_lock); 486 487 ps = p->p_sigacts; 488 if (osa) 489 sigaction_copy(osa, &SIGACTION_PS(ps, signum)); 490 if (!nsa) 491 goto out; 492 493 prop = sigprop[signum]; 494 if ((nsa->sa_flags & ~SA_ALLBITS) || (prop & SA_CANTMASK)) { 495 error = EINVAL; 496 goto out; 497 } 498 499 sigaction_copy(&SIGACTION_PS(ps, signum), nsa); 500 ps->sa_sigdesc[signum].sd_tramp = tramp; 501 ps->sa_sigdesc[signum].sd_vers = vers; 502 sigminusset(&sigcantmask, &SIGACTION_PS(ps, signum).sa_mask); 503 504 if ((prop & SA_NORESET) != 0) 505 SIGACTION_PS(ps, signum).sa_flags &= ~SA_RESETHAND; 506 507 if (signum == SIGCHLD) { 508 if (nsa->sa_flags & SA_NOCLDSTOP) 509 p->p_sflag |= PS_NOCLDSTOP; 510 else 511 p->p_sflag &= ~PS_NOCLDSTOP; 512 if (nsa->sa_flags & SA_NOCLDWAIT) { 513 /* 514 * Paranoia: since SA_NOCLDWAIT is implemented by 515 * reparenting the dying child to PID 1 (and trust 516 * it to reap the zombie), PID 1 itself is forbidden 517 * to set SA_NOCLDWAIT. 518 */ 519 if (p->p_pid == 1) 520 p->p_flag &= ~PK_NOCLDWAIT; 521 else 522 p->p_flag |= PK_NOCLDWAIT; 523 } else 524 p->p_flag &= ~PK_NOCLDWAIT; 525 526 if (nsa->sa_handler == SIG_IGN) { 527 /* 528 * Paranoia: same as above. 529 */ 530 if (p->p_pid == 1) 531 p->p_flag &= ~PK_CLDSIGIGN; 532 else 533 p->p_flag |= PK_CLDSIGIGN; 534 } else 535 p->p_flag &= ~PK_CLDSIGIGN; 536 } 537 538 if ((nsa->sa_flags & SA_NODEFER) == 0) 539 sigaddset(&SIGACTION_PS(ps, signum).sa_mask, signum); 540 else 541 sigdelset(&SIGACTION_PS(ps, signum).sa_mask, signum); 542 543 /* 544 * Set bit in p_sigctx.ps_sigignore for signals that are set to 545 * SIG_IGN, and for signals set to SIG_DFL where the default is to 546 * ignore. However, don't put SIGCONT in p_sigctx.ps_sigignore, as 547 * we have to restart the process. 548 */ 549 if (nsa->sa_handler == SIG_IGN || 550 (nsa->sa_handler == SIG_DFL && (prop & SA_IGNORE) != 0)) { 551 /* Never to be seen again. */ 552 sigemptyset(&tset); 553 sigaddset(&tset, signum); 554 sigclearall(p, &tset, &kq); 555 if (signum != SIGCONT) { 556 /* Easier in psignal */ 557 sigaddset(&p->p_sigctx.ps_sigignore, signum); 558 } 559 sigdelset(&p->p_sigctx.ps_sigcatch, signum); 560 } else { 561 sigdelset(&p->p_sigctx.ps_sigignore, signum); 562 if (nsa->sa_handler == SIG_DFL) 563 sigdelset(&p->p_sigctx.ps_sigcatch, signum); 564 else 565 sigaddset(&p->p_sigctx.ps_sigcatch, signum); 566 } 567 568 /* 569 * Previously held signals may now have become visible. Ensure that 570 * we check for them before returning to userspace. 571 */ 572 if (sigispending(l, 0)) { 573 lwp_lock(l); 574 l->l_flag |= LW_PENDSIG; 575 lwp_need_userret(l); 576 lwp_unlock(l); 577 } 578 out: 579 mutex_exit(p->p_lock); 580 ksiginfo_queue_drain(&kq); 581 582 return error; 583 } 584 585 int 586 sigprocmask1(struct lwp *l, int how, const sigset_t *nss, sigset_t *oss) 587 { 588 sigset_t omask, *mask = &l->l_sigmask; 589 bool more; 590 591 KASSERT(mutex_owned(l->l_proc->p_lock)); 592 593 if (!oss) 594 oss = &omask; 595 *oss = *mask; 596 597 if (nss == NULL) { 598 return 0; 599 } 600 601 switch (how) { 602 case SIG_BLOCK: 603 sigplusset(nss, mask); 604 more = false; 605 break; 606 case SIG_UNBLOCK: 607 sigminusset(nss, mask); 608 more = true; 609 break; 610 case SIG_SETMASK: 611 *mask = *nss; 612 more = true; 613 break; 614 default: 615 return EINVAL; 616 } 617 sigminusset(&sigcantmask, mask); 618 ktrsigmask(how, nss, oss, mask); 619 if (more && sigispending(l, 0)) { 620 /* 621 * Check for pending signals on return to user. 622 */ 623 lwp_lock(l); 624 l->l_flag |= LW_PENDSIG; 625 lwp_need_userret(l); 626 lwp_unlock(l); 627 } 628 return 0; 629 } 630 631 void 632 sigpending1(struct lwp *l, sigset_t *ss) 633 { 634 struct proc *p = l->l_proc; 635 636 mutex_enter(p->p_lock); 637 *ss = l->l_sigpend.sp_set; 638 sigplusset(&p->p_sigpend.sp_set, ss); 639 mutex_exit(p->p_lock); 640 } 641 642 void 643 sigsuspendsetup(struct lwp *l, const sigset_t *ss) 644 { 645 struct proc *p = l->l_proc; 646 647 /* 648 * When returning from sigsuspend/pselect/pollts, we want 649 * the old mask to be restored after the 650 * signal handler has finished. Thus, we 651 * save it here and mark the sigctx structure 652 * to indicate this. 653 */ 654 mutex_enter(p->p_lock); 655 l->l_sigrestore = 1; 656 l->l_sigoldmask = l->l_sigmask; 657 l->l_sigmask = *ss; 658 sigminusset(&sigcantmask, &l->l_sigmask); 659 660 /* Check for pending signals when sleeping. */ 661 if (sigispending(l, 0)) { 662 lwp_lock(l); 663 l->l_flag |= LW_PENDSIG; 664 lwp_need_userret(l); 665 lwp_unlock(l); 666 } 667 mutex_exit(p->p_lock); 668 } 669 670 void 671 sigsuspendteardown(struct lwp *l) 672 { 673 struct proc *p = l->l_proc; 674 675 mutex_enter(p->p_lock); 676 /* Check for pending signals when sleeping. */ 677 if (l->l_sigrestore) { 678 if (sigispending(l, 0)) { 679 lwp_lock(l); 680 l->l_flag |= LW_PENDSIG; 681 lwp_need_userret(l); 682 lwp_unlock(l); 683 } else { 684 l->l_sigrestore = 0; 685 l->l_sigmask = l->l_sigoldmask; 686 } 687 } 688 mutex_exit(p->p_lock); 689 } 690 691 int 692 sigsuspend1(struct lwp *l, const sigset_t *ss) 693 { 694 695 if (ss) 696 sigsuspendsetup(l, ss); 697 698 while (kpause("pause", true, 0, NULL) == 0) 699 ; 700 701 /* always return EINTR rather than ERESTART... */ 702 return EINTR; 703 } 704 705 int 706 sigaltstack1(struct lwp *l, const stack_t *nss, stack_t *oss) 707 { 708 struct proc *p = l->l_proc; 709 int error = 0; 710 711 mutex_enter(p->p_lock); 712 713 if (oss) 714 *oss = l->l_sigstk; 715 716 if (nss) { 717 if (nss->ss_flags & ~SS_ALLBITS) 718 error = EINVAL; 719 else if (nss->ss_flags & SS_DISABLE) { 720 if (l->l_sigstk.ss_flags & SS_ONSTACK) 721 error = EINVAL; 722 } else if (nss->ss_size < MINSIGSTKSZ) 723 error = ENOMEM; 724 725 if (!error) 726 l->l_sigstk = *nss; 727 } 728 729 mutex_exit(p->p_lock); 730 731 return error; 732 } 733 734 int 735 sigtimedwait1(struct lwp *l, const struct sys_____sigtimedwait50_args *uap, 736 register_t *retval, copyin_t fetchss, copyout_t storeinf, copyin_t fetchts, 737 copyout_t storets) 738 { 739 /* { 740 syscallarg(const sigset_t *) set; 741 syscallarg(siginfo_t *) info; 742 syscallarg(struct timespec *) timeout; 743 } */ 744 struct proc *p = l->l_proc; 745 int error, signum, timo; 746 struct timespec ts, tsstart, tsnow; 747 ksiginfo_t ksi; 748 749 /* 750 * Calculate timeout, if it was specified. 751 * 752 * NULL pointer means an infinite timeout. 753 * {.tv_sec = 0, .tv_nsec = 0} means do not block. 754 */ 755 if (SCARG(uap, timeout)) { 756 error = (*fetchts)(SCARG(uap, timeout), &ts, sizeof(ts)); 757 if (error) 758 return error; 759 760 if ((error = itimespecfix(&ts)) != 0) 761 return error; 762 763 timo = tstohz(&ts); 764 if (timo == 0) { 765 if (ts.tv_sec == 0 && ts.tv_nsec == 0) 766 timo = -1; /* do not block */ 767 else 768 timo = 1; /* the shortest possible timeout */ 769 } 770 771 /* 772 * Remember current uptime, it would be used in 773 * EINTR/ERESTART case. 774 */ 775 getnanouptime(&tsstart); 776 } else { 777 memset(&tsstart, 0, sizeof(tsstart)); /* XXXgcc */ 778 timo = 0; /* infinite timeout */ 779 } 780 781 error = (*fetchss)(SCARG(uap, set), &l->l_sigwaitset, 782 sizeof(l->l_sigwaitset)); 783 if (error) 784 return error; 785 786 /* 787 * Silently ignore SA_CANTMASK signals. psignal1() would ignore 788 * SA_CANTMASK signals in waitset, we do this only for the below 789 * siglist check. 790 */ 791 sigminusset(&sigcantmask, &l->l_sigwaitset); 792 793 memset(&ksi.ksi_info, 0, sizeof(ksi.ksi_info)); 794 795 mutex_enter(p->p_lock); 796 797 /* Check for pending signals in the process, if no - then in LWP. */ 798 if ((signum = sigget(&p->p_sigpend, &ksi, 0, &l->l_sigwaitset)) == 0) 799 signum = sigget(&l->l_sigpend, &ksi, 0, &l->l_sigwaitset); 800 801 if (signum != 0) { 802 /* If found a pending signal, just copy it out to the user. */ 803 mutex_exit(p->p_lock); 804 goto out; 805 } 806 807 if (timo < 0) { 808 /* If not allowed to block, return an error */ 809 mutex_exit(p->p_lock); 810 return EAGAIN; 811 } 812 813 /* 814 * Set up the sigwait list and wait for signal to arrive. 815 * Four possible outcomes: 816 * 1. woken by one of the requested signals (l_sigwaited is NULL) 817 * 2. interrupted by some other signal (error=EINTR/ERESTART) 818 * 3. timed out (error=EAGAIN, a.k.a. EWOULDBLOCK) 819 * 4. spurious wakeup 820 */ 821 l->l_sigwaited = &ksi; 822 LIST_INSERT_HEAD(&p->p_sigwaiters, l, l_sigwaiter); 823 error = cv_timedwait_sig(&l->l_sigcv, p->p_lock, timo); 824 825 /* 826 * Delivery of any of the requested signals will remove us from 827 * the list of signal waiters and null out l_sigwaited. So if 828 * l_sigwaited is nonnull, then we weren't woken by one of the 829 * requested signals, and we must remove ourselves from the 830 * list. 831 * 832 * XXX What happens if delivery of a requested signal races 833 * with timeout? Should signal delivery win, or should timeout 834 * win? Currently timeout wins: if error is EAGAIN, we just 835 * return that _even if_ a signal was delivered to us. Seems 836 * wrong! 837 */ 838 if (l->l_sigwaited != NULL) { 839 if (!error) { 840 /* Spurious wakeup - arrange for syscall restart. */ 841 error = ERESTART; 842 } 843 l->l_sigwaited = NULL; 844 LIST_REMOVE(l, l_sigwaiter); 845 } 846 mutex_exit(p->p_lock); 847 848 /* 849 * If the sleep was interrupted (either by signal or wakeup), update 850 * the timeout and copyout new value back. It would be used when 851 * the syscall would be restarted or called again. 852 * 853 * This copyout is not used by POSIX sigtimedwait, but it is 854 * used by the NetBSD __sigtimedwait syscall so if interrupted 855 * by a signal handler with SA_RESTART, it can wait for only 856 * the remaining time. 857 * 858 * XXX This introduces some unnecessary timing drift, because 859 * we don't convert the timeout to an absolute deadline, so, 860 * e.g., the time spent in the signal handler in userland 861 * doesn't count toward the timeout. Should replace this 862 * syscall either by one that either gives up and maps ERESTART 863 * to EINTR, or converts timeout to absolute deadline in-place 864 * so it maintains the deadline on restart. 865 */ 866 if (timo && (error == ERESTART || error == EINTR)) { 867 getnanouptime(&tsnow); 868 869 /* Compute how much time has passed since start. */ 870 timespecsub(&tsnow, &tsstart, &tsnow); 871 872 /* Subtract passed time from timeout. */ 873 timespecsub(&ts, &tsnow, &ts); 874 875 if (ts.tv_sec < 0) 876 error = EAGAIN; 877 else { 878 /* Copy updated timeout to userland. */ 879 int error1 = (*storets)(&ts, SCARG(uap, timeout), 880 sizeof(ts)); 881 882 /* 883 * Only override error (ERESTART/EINTR) if the 884 * copyout failed (EFAULT). Don't override it 885 * if the copyout succeeded; we should not 886 * return 0 in this branch. 887 */ 888 if (error1) 889 error = error1; 890 } 891 } 892 out: 893 /* 894 * If a signal from the wait set arrived, copy it to userland. 895 * Copy only the used part of siginfo, the padding part is 896 * left unchanged (userland is not supposed to touch it anyway). 897 */ 898 if (error == 0 && SCARG(uap, info)) { 899 error = (*storeinf)(&ksi.ksi_info, SCARG(uap, info), 900 sizeof(ksi.ksi_info)); 901 } 902 if (error == 0) { 903 *retval = ksi.ksi_info._signo; 904 SDT_PROBE(proc, kernel, , signal__clear, *retval, 905 &ksi, 0, 0, 0); 906 } 907 return error; 908 } 909