sys_sig.c revision 1.1.2.3 1 /* $NetBSD: sys_sig.c,v 1.1.2.3 2006/11/17 16:34:38 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2006 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1982, 1986, 1989, 1991, 1993
41 * The Regents of the University of California. All rights reserved.
42 * (c) UNIX System Laboratories, Inc.
43 * All or some portions of this file are derived from material licensed
44 * to the University of California by American Telephone and Telegraph
45 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
46 * the permission of UNIX System Laboratories, Inc.
47 *
48 * Redistribution and use in source and binary forms, with or without
49 * modification, are permitted provided that the following conditions
50 * are met:
51 * 1. Redistributions of source code must retain the above copyright
52 * notice, this list of conditions and the following disclaimer.
53 * 2. Redistributions in binary form must reproduce the above copyright
54 * notice, this list of conditions and the following disclaimer in the
55 * documentation and/or other materials provided with the distribution.
56 * 3. Neither the name of the University nor the names of its contributors
57 * may be used to endorse or promote products derived from this software
58 * without specific prior written permission.
59 *
60 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
61 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
62 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
63 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
64 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
65 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
66 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
67 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
68 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
69 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
70 * SUCH DAMAGE.
71 *
72 * @(#)kern_sig.c 8.14 (Berkeley) 5/14/95
73 */
74
75 #include <sys/cdefs.h>
76 __KERNEL_RCSID(0, "$NetBSD: sys_sig.c,v 1.1.2.3 2006/11/17 16:34:38 ad Exp $");
77
78 #include "opt_ptrace.h"
79 #include "opt_compat_netbsd.h"
80 #include "opt_compat_netbsd32.h"
81
82 #include <sys/param.h>
83 #include <sys/kernel.h>
84 #include <sys/malloc.h>
85 #include <sys/signalvar.h>
86 #include <sys/proc.h>
87 #include <sys/pool.h>
88 #include <sys/sa.h>
89 #include <sys/savar.h>
90 #include <sys/syscallargs.h>
91 #include <sys/kauth.h>
92 #include <sys/wait.h>
93
94 #ifdef COMPAT_16
95 /* ARGSUSED */
96 int
97 compat_16_sys___sigaction14(struct lwp *l, void *v, register_t *retval)
98 {
99 struct compat_16_sys___sigaction14_args /* {
100 syscallarg(int) signum;
101 syscallarg(const struct sigaction *) nsa;
102 syscallarg(struct sigaction *) osa;
103 } */ *uap = v;
104 struct sigaction nsa, osa;
105 int error;
106
107 if (SCARG(uap, nsa)) {
108 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
109 if (error)
110 return (error);
111 }
112 error = sigaction1(l, SCARG(uap, signum),
113 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0,
114 NULL, 0);
115 if (error)
116 return (error);
117 if (SCARG(uap, osa)) {
118 error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
119 if (error)
120 return (error);
121 }
122 return (0);
123 }
124 #endif
125
126 /* ARGSUSED */
127 int
128 sys___sigaction_sigtramp(struct lwp *l, void *v, register_t *retval)
129 {
130 struct sys___sigaction_sigtramp_args /* {
131 syscallarg(int) signum;
132 syscallarg(const struct sigaction *) nsa;
133 syscallarg(struct sigaction *) osa;
134 syscallarg(void *) tramp;
135 syscallarg(int) vers;
136 } */ *uap = v;
137 struct sigaction nsa, osa;
138 int error;
139
140 if (SCARG(uap, nsa)) {
141 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
142 if (error)
143 return (error);
144 }
145 error = sigaction1(l, SCARG(uap, signum),
146 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0,
147 SCARG(uap, tramp), SCARG(uap, vers));
148 if (error)
149 return (error);
150 if (SCARG(uap, osa)) {
151 error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
152 if (error)
153 return (error);
154 }
155 return (0);
156 }
157
158 /*
159 * Manipulate signal mask. Note that we receive new mask, not pointer, and
160 * return old mask as return value; the library stub does the rest.
161 */
162 int
163 sys___sigprocmask14(struct lwp *l, void *v, register_t *retval)
164 {
165 struct sys___sigprocmask14_args /* {
166 syscallarg(int) how;
167 syscallarg(const sigset_t *) set;
168 syscallarg(sigset_t *) oset;
169 } */ *uap = v;
170 sigset_t nss, oss;
171 int error;
172
173 if (SCARG(uap, set)) {
174 error = copyin(SCARG(uap, set), &nss, sizeof(nss));
175 if (error)
176 return (error);
177 }
178 error = sigprocmask1(l, SCARG(uap, how),
179 SCARG(uap, set) ? &nss : 0, SCARG(uap, oset) ? &oss : 0);
180 if (error)
181 return (error);
182 if (SCARG(uap, oset)) {
183 error = copyout(&oss, SCARG(uap, oset), sizeof(oss));
184 if (error)
185 return (error);
186 }
187 return (0);
188 }
189
190 /* ARGSUSED */
191 int
192 sys___sigpending14(struct lwp *l, void *v, register_t *retval)
193 {
194 struct sys___sigpending14_args /* {
195 syscallarg(sigset_t *) set;
196 } */ *uap = v;
197 sigset_t ss;
198
199 sigpending1(l, &ss);
200 return (copyout(&ss, SCARG(uap, set), sizeof(ss)));
201 }
202
203 /*
204 * Suspend process until signal, providing mask to be set in the meantime.
205 * Note nonstandard calling convention: libc stub passes mask, not pointer,
206 * to save a copyin.
207 */
208 /* ARGSUSED */
209 int
210 sys___sigsuspend14(struct lwp *l, void *v, register_t *retval)
211 {
212 struct sys___sigsuspend14_args /* {
213 syscallarg(const sigset_t *) set;
214 } */ *uap = v;
215 sigset_t ss;
216 int error;
217
218 if (SCARG(uap, set)) {
219 error = copyin(SCARG(uap, set), &ss, sizeof(ss));
220 if (error)
221 return (error);
222 }
223
224 return (sigsuspend1(l, SCARG(uap, set) ? &ss : 0));
225 }
226
227 /* ARGSUSED */
228 int
229 sys___sigaltstack14(struct lwp *l, void *v, register_t *retval)
230 {
231 struct sys___sigaltstack14_args /* {
232 syscallarg(const struct sigaltstack *) nss;
233 syscallarg(struct sigaltstack *) oss;
234 } */ *uap = v;
235 struct sigaltstack nss, oss;
236 int error;
237
238 if (SCARG(uap, nss)) {
239 error = copyin(SCARG(uap, nss), &nss, sizeof(nss));
240 if (error)
241 return (error);
242 }
243 error = sigaltstack1(l,
244 SCARG(uap, nss) ? &nss : 0, SCARG(uap, oss) ? &oss : 0);
245 if (error)
246 return (error);
247 if (SCARG(uap, oss)) {
248 error = copyout(&oss, SCARG(uap, oss), sizeof(oss));
249 if (error)
250 return (error);
251 }
252 return (0);
253 }
254
255 /* ARGSUSED */
256 int
257 sys_kill(struct lwp *l, void *v, register_t *retval)
258 {
259 struct sys_kill_args /* {
260 syscallarg(int) pid;
261 syscallarg(int) signum;
262 } */ *uap = v;
263 struct proc *p;
264 ksiginfo_t ksi;
265 int signum = SCARG(uap, signum);
266 int error;
267
268 if ((u_int)signum >= NSIG)
269 return (EINVAL);
270 KSI_INIT(&ksi);
271 ksi.ksi_signo = signum;
272 ksi.ksi_code = SI_USER;
273 ksi.ksi_pid = l->l_proc->p_pid;
274 ksi.ksi_uid = kauth_cred_geteuid(l->l_cred);
275 if (SCARG(uap, pid) > 0) {
276 /* kill single process */
277 if ((p = p_find(SCARG(uap, pid), PFIND_UNLOCK_FAIL)) == NULL)
278 return (ESRCH);
279 mutex_enter(&p->p_mutex);
280 error = kauth_authorize_process(l->l_cred,
281 KAUTH_PROCESS_CANSIGNAL, p, (void *)(uintptr_t)signum,
282 NULL, NULL);
283 if (!error && signum) {
284 mutex_enter(&p->p_smutex);
285 kpsignal2(p, &ksi);
286 mutex_exit(&p->p_smutex);
287 }
288 mutex_exit(&p->p_mutex);
289 rw_exit(&proclist_lock);
290 return (0);
291 }
292 switch (SCARG(uap, pid)) {
293 case -1: /* broadcast signal */
294 return (killpg1(l, &ksi, 0, 1));
295 case 0: /* signal own process group */
296 return (killpg1(l, &ksi, 0, 0));
297 default: /* negative explicit process group */
298 return (killpg1(l, &ksi, -SCARG(uap, pid), 0));
299 }
300 /* NOTREACHED */
301 }
302
303 /*
304 * Nonexistent system call-- signal process (may want to handle it). Flag
305 * error in case process won't see signal immediately (blocked or ignored).
306 *
307 * XXX This should not be here.
308 */
309 #ifndef PTRACE
310 __weak_alias(sys_ptrace, sys_nosys);
311 #endif
312
313 /* ARGSUSED */
314 int
315 sys_nosys(struct lwp *l, void *v, register_t *retval)
316 {
317
318 psignal(l->l_proc, SIGSYS);
319 return (ENOSYS);
320 }
321
322 /* ARGSUSED */
323 int
324 sys_getcontext(struct lwp *l, void *v, register_t *retval)
325 {
326 struct sys_getcontext_args /* {
327 syscallarg(struct __ucontext *) ucp;
328 } */ *uap = v;
329 ucontext_t uc;
330
331 getucontext(l, &uc);
332
333 return (copyout(&uc, SCARG(uap, ucp), sizeof (*SCARG(uap, ucp))));
334 }
335
336 /* ARGSUSED */
337 int
338 sys_setcontext(struct lwp *l, void *v, register_t *retval)
339 {
340 struct sys_setcontext_args /* {
341 syscallarg(const ucontext_t *) ucp;
342 } */ *uap = v;
343 ucontext_t uc;
344 int error;
345
346 if (SCARG(uap, ucp) == NULL) { /* i.e. end of uc_link chain */
347 /* Acquire the sched state mutex. exit1() will release it. */
348 mutex_enter(&l->l_proc->p_smutex);
349 exit1(l, W_EXITCODE(0, 0));
350 } else if ((error = copyin(SCARG(uap, ucp), &uc, sizeof (uc))) != 0 ||
351 (error = setucontext(l, &uc)) != 0)
352 return (error);
353
354 return (EJUSTRETURN);
355 }
356
357 /*
358 * sigtimedwait(2) system call, used also for implementation
359 * of sigwaitinfo() and sigwait().
360 *
361 * This only handles single LWP in signal wait. libpthread provides
362 * it's own sigtimedwait() wrapper to DTRT WRT individual threads.
363 */
364 int
365 sys___sigtimedwait(struct lwp *l, void *v, register_t *retval)
366 {
367
368 return __sigtimedwait1(l, v, retval, copyout, copyin, copyout);
369 }
370
371 int
372 sigaction1(struct lwp *l, int signum, const struct sigaction *nsa,
373 struct sigaction *osa, const void *tramp, int vers)
374 {
375 struct proc *p;
376 struct sigacts *ps;
377 sigset_t tset;
378 int prop, error;
379
380 if (signum <= 0 || signum >= NSIG)
381 return (EINVAL);
382
383 p = l->l_proc;
384 error = 0;
385
386 /*
387 * Trampoline ABI version 0 is reserved for the legacy kernel
388 * provided on-stack trampoline. Conversely, if we are using a
389 * non-0 ABI version, we must have a trampoline. Only validate the
390 * vers if a new sigaction was supplied. Emulations use legacy
391 * kernel trampolines with version 0, alternatively check for that
392 * too.
393 */
394 if ((vers != 0 && tramp == NULL) ||
395 #ifdef SIGTRAMP_VALID
396 (nsa != NULL &&
397 ((vers == 0) ?
398 (p->p_emul->e_sigcode == NULL) :
399 !SIGTRAMP_VALID(vers))) ||
400 #endif
401 (vers == 0 && tramp != NULL)) {
402 return (EINVAL);
403 }
404
405 mutex_enter(&p->p_mutex); /* p_flag */
406 mutex_enter(&p->p_smutex);
407
408 ps = p->p_sigacts;
409 if (osa)
410 *osa = SIGACTION_PS(ps, signum);
411 if (!nsa)
412 goto out;
413
414 prop = sigprop[signum];
415 if ((nsa->sa_flags & ~SA_ALLBITS) || (prop & SA_CANTMASK)) {
416 error = EINVAL;
417 goto out;
418 }
419
420 SIGACTION_PS(ps, signum) = *nsa;
421 ps->sa_sigdesc[signum].sd_tramp = tramp;
422 ps->sa_sigdesc[signum].sd_vers = vers;
423 sigminusset(&sigcantmask, &SIGACTION_PS(ps, signum).sa_mask);
424
425 if ((prop & SA_NORESET) != 0)
426 SIGACTION_PS(ps, signum).sa_flags &= ~SA_RESETHAND;
427
428 if (signum == SIGCHLD) {
429 if (nsa->sa_flags & SA_NOCLDSTOP)
430 p->p_sflag |= PS_NOCLDSTOP;
431 else
432 p->p_sflag &= ~PS_NOCLDSTOP;
433 if (nsa->sa_flags & SA_NOCLDWAIT) {
434 /*
435 * Paranoia: since SA_NOCLDWAIT is implemented by
436 * reparenting the dying child to PID 1 (and trust
437 * it to reap the zombie), PID 1 itself is forbidden
438 * to set SA_NOCLDWAIT.
439 */
440 if (p->p_pid == 1)
441 p->p_flag &= ~P_NOCLDWAIT;
442 else
443 p->p_flag |= P_NOCLDWAIT;
444 } else
445 p->p_flag &= ~P_NOCLDWAIT;
446
447 if (nsa->sa_handler == SIG_IGN) {
448 /*
449 * Paranoia: same as above.
450 */
451 if (p->p_pid == 1)
452 p->p_flag &= ~P_CLDSIGIGN;
453 else
454 p->p_flag |= P_CLDSIGIGN;
455 } else
456 p->p_flag &= ~P_CLDSIGIGN;
457 }
458
459 if ((nsa->sa_flags & SA_NODEFER) == 0)
460 sigaddset(&SIGACTION_PS(ps, signum).sa_mask, signum);
461 else
462 sigdelset(&SIGACTION_PS(ps, signum).sa_mask, signum);
463
464 /*
465 * Set bit in p_sigctx.ps_sigignore for signals that are set to
466 * SIG_IGN, and for signals set to SIG_DFL where the default is to
467 * ignore. However, don't put SIGCONT in p_sigctx.ps_sigignore, as
468 * we have to restart the process.
469 */
470 if (nsa->sa_handler == SIG_IGN ||
471 (nsa->sa_handler == SIG_DFL && (prop & SA_IGNORE) != 0)) {
472 /* Never to be seen again. */
473 sigemptyset(&tset);
474 sigaddset(&tset, signum);
475 sigclearall(p, &tset);
476 if (signum != SIGCONT) {
477 /* Easier in psignal */
478 sigaddset(&p->p_sigctx.ps_sigignore, signum);
479 }
480 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
481 } else {
482 sigdelset(&p->p_sigctx.ps_sigignore, signum);
483 if (nsa->sa_handler == SIG_DFL)
484 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
485 else
486 sigaddset(&p->p_sigctx.ps_sigcatch, signum);
487 }
488
489 /*
490 * Previously held signals may now have become visible. Ensure that
491 * we check for them before returning to userspace.
492 */
493 lwp_lock(l);
494 signotify(l);
495 lwp_unlock(l);
496 out:
497 mutex_exit(&p->p_smutex);
498 mutex_exit(&p->p_mutex);
499
500 return (error);
501 }
502
503 int
504 sigprocmask1(struct lwp *l, int how, const sigset_t *nss, sigset_t *oss)
505 {
506 struct proc *p = l->l_proc;
507 int more;
508
509 mutex_enter(&p->p_smutex);
510
511 /*
512 * If we've got pending signals that we haven't processed yet,
513 * make sure that we take them before changing the mask.
514 */
515 if (sigispending(l)) {
516 mutex_exit(&p->p_smutex);
517 return ERESTART;
518 }
519
520 if (oss)
521 *oss = *l->l_sigmask;
522 if (nss) {
523 switch (how) {
524 case SIG_BLOCK:
525 sigplusset(nss, l->l_sigmask);
526 more = 0;
527 break;
528 case SIG_UNBLOCK:
529 sigminusset(nss, l->l_sigmask);
530 more = 1;
531 break;
532 case SIG_SETMASK:
533 *l->l_sigmask = *nss;
534 more = 1;
535 break;
536 default:
537 mutex_exit(&p->p_smutex);
538 return (EINVAL);
539 }
540 sigminusset(&sigcantmask, l->l_sigmask);
541 if (more) {
542 /*
543 * Grab signals from the per-process pending
544 * list that are now of interest to us.
545 */
546 if ((p->p_flag & P_SA) == 0)
547 sigpinch(&p->p_sigpend, l->l_sigpend,
548 l->l_sigmask);
549
550 /*
551 * Check for pending signals on return to user.
552 */
553 lwp_lock(l);
554 signotify(l);
555 lwp_unlock(l);
556 }
557 }
558
559 mutex_exit(&p->p_smutex);
560
561 return (0);
562 }
563
564 void
565 sigpending1(struct lwp *l, sigset_t *ss)
566 {
567 struct proc *p = l->l_proc;
568
569 mutex_enter(&p->p_smutex);
570 *ss = l->l_sigpend->sp_set;
571 sigminusset(l->l_sigmask, ss);
572 mutex_exit(&p->p_smutex);
573 }
574
575 int
576 sigsuspend1(struct lwp *l, const sigset_t *ss)
577 {
578 struct proc *p;
579 struct sigacts *ps;
580
581 p = l->l_proc;
582 ps = p->p_sigacts;
583
584 mutex_enter(&p->p_smutex);
585
586 /*
587 * If we've got pending signals that we haven't processed yet,
588 * make sure that we take them before changing the mask.
589 */
590 if (sigispending(l)) {
591 mutex_exit(&p->p_smutex);
592 return ERESTART;
593 }
594
595 if (ss) {
596 /*
597 * When returning from sigpause, we want
598 * the old mask to be restored after the
599 * signal handler has finished. Thus, we
600 * save it here and mark the sigctx structure
601 * to indicate this.
602 */
603 l->l_sigoldmask = *l->l_sigmask;
604 l->l_sigrestore = 1;
605 *l->l_sigmask = *ss;
606 sigminusset(&sigcantmask, l->l_sigmask);
607
608 /*
609 * Pinch any signals from the per-process pending
610 * list that are now of interest to us.
611 */
612 if ((p->p_flag & P_SA) == 0)
613 sigpinch(&p->p_sigpend, l->l_sigpend, l->l_sigmask);
614
615 lwp_lock(l);
616 signotify(l);
617 lwp_unlock(l);
618 }
619
620 while (mtsleep((caddr_t) ps, PPAUSE|PCATCH, "pause", 0,
621 &p->p_smutex) == 0)
622 /* void */;
623
624 mutex_exit(&p->p_smutex);
625
626 /* always return EINTR rather than ERESTART... */
627 return (EINTR);
628 }
629
630 int
631 sigaltstack1(struct lwp *l, const struct sigaltstack *nss,
632 struct sigaltstack *oss)
633 {
634 struct proc *p = l->l_proc;
635 int error = 0;
636
637 mutex_enter(&p->p_smutex);
638
639 if (oss)
640 *oss = *l->l_sigstk;
641
642 if (nss) {
643 if (nss->ss_flags & ~SS_ALLBITS)
644 error = EINVAL;
645 else if (nss->ss_flags & SS_DISABLE) {
646 if (l->l_sigstk->ss_flags & SS_ONSTACK)
647 error = EINVAL;
648 } else if (nss->ss_size < MINSIGSTKSZ)
649 error = ENOMEM;
650
651 if (!error)
652 *l->l_sigstk = *nss;
653 }
654
655 mutex_exit(&p->p_smutex);
656
657 return (error);
658 }
659
660 int
661 __sigtimedwait1(struct lwp *l, void *v, register_t *retval,
662 copyout_t put_info, copyin_t fetch_timeout, copyout_t put_timeout)
663 {
664 struct sys___sigtimedwait_args /* {
665 syscallarg(const sigset_t *) set;
666 syscallarg(siginfo_t *) info;
667 syscallarg(struct timespec *) timeout;
668 } */ *uap = v;
669 sigset_t *waitset;
670 struct proc *p = l->l_proc;
671 int error, signum;
672 int timo = 0;
673 struct timespec ts, tsstart, tsnow;
674 ksiginfo_t *ksi;
675
676 memset(&tsstart, 0, sizeof tsstart); /* XXX gcc */
677
678 /*
679 * Calculate timeout, if it was specified.
680 */
681 if (SCARG(uap, timeout)) {
682 uint64_t ms;
683
684 if ((error = (*fetch_timeout)(SCARG(uap, timeout), &ts, sizeof(ts))))
685 return (error);
686
687 ms = (ts.tv_sec * 1000) + (ts.tv_nsec / 1000000);
688 timo = mstohz(ms);
689 if (timo == 0 && ts.tv_sec == 0 && ts.tv_nsec > 0)
690 timo = 1;
691 if (timo <= 0)
692 return (EAGAIN);
693
694 /*
695 * Remember current uptime, it would be used in
696 * ECANCELED/ERESTART case.
697 */
698 getnanouptime(&tsstart);
699 }
700
701 MALLOC(waitset, sigset_t *, sizeof(sigset_t), M_TEMP, M_WAITOK);
702 if ((error = copyin(SCARG(uap, set), waitset, sizeof(sigset_t)))) {
703 FREE(waitset, M_TEMP);
704 return (error);
705 }
706
707 /*
708 * Silently ignore SA_CANTMASK signals. psignal1() would ignore
709 * SA_CANTMASK signals in waitset, we do this only for the below
710 * siglist check.
711 */
712 sigminusset(&sigcantmask, waitset);
713
714 /*
715 * Allocate a ksi up front. We can't sleep with the mutex held.
716 */
717 if ((ksi = ksiginfo_alloc(p, NULL, PR_WAITOK)) == NULL) {
718 FREE(waitset, M_TEMP);
719 return (ENOMEM);
720 }
721
722 mutex_enter(&p->p_smutex);
723
724 /*
725 * If we've got pending signals that we haven't processed yet,
726 * make sure that we take them before changing the mask.
727 */
728 if ((error = sigispending(l)) != 0) {
729 mutex_exit(&p->p_smutex);
730 goto out;
731 }
732
733 /*
734 * SA processes can have no more than 1 sigwaiter.
735 */
736 if ((p->p_flag & P_SA) != 0 && !LIST_EMPTY(&p->p_sigwaiters)) {
737 mutex_exit(&p->p_smutex);
738 error = EINVAL;
739 goto out;
740 }
741
742 if ((signum = sigget(&p->p_sigpend, ksi, 0, waitset)) == 0)
743 if ((p->p_flag & P_SA) == 0)
744 signum = sigget(l->l_sigpend, ksi, 0, waitset);
745
746 if (signum != 0) {
747 /*
748 * We found a pending signal - copy it out to the user.
749 */
750 mutex_exit(&p->p_smutex);
751 goto out;
752 }
753
754 /*
755 * Set up the sigwait list. Pass pointer to malloced memory here;
756 * it's not possible to pass pointer to a structure on current
757 * process's stack, the current LWP might be swapped out when the
758 * when the signal is delivered.
759 */
760 l->l_sigwaited = ksi;
761 l->l_sigwait = waitset;
762 LIST_INSERT_HEAD(&p->p_sigwaiters, l, l_sigwaiter);
763
764 /*
765 * Wait for signal to arrive. We can either be woken up or time out.
766 */
767 error = mtsleep(&l->l_sigwait, PPAUSE|PCATCH, "sigwait", timo,
768 &p->p_smutex);
769
770 /*
771 * Need to find out if we woke as a result of lwp_wakeup() or a
772 * signal outside our wait set.
773 */
774 if (l->l_sigwaited != NULL) {
775 if (error == EINTR) {
776 /* wakeup via _lwp_wakeup() */
777 error = ECANCELED;
778 } else if (!error) {
779 /* spurious wakeup - arrange for syscall restart */
780 error = ERESTART;
781 }
782 }
783
784 /*
785 * Clear the sigwait indication and unlock.
786 */
787 l->l_sigwait = NULL;
788 l->l_sigwaited = NULL;
789 LIST_REMOVE(l, l_sigwaiter);
790 mutex_exit(&p->p_smutex);
791
792 /*
793 * If the sleep was interrupted (either by signal or wakeup), update
794 * the timeout and copyout new value back. It would be used when
795 * the syscall would be restarted or called again.
796 */
797 if (timo && (error == ERESTART || error == ECANCELED)) {
798 getnanouptime(&tsnow);
799
800 /* compute how much time has passed since start */
801 timespecsub(&tsnow, &tsstart, &tsnow);
802 /* substract passed time from timeout */
803 timespecsub(&ts, &tsnow, &ts);
804
805 if (ts.tv_sec < 0)
806 error = EAGAIN;
807 else {
808 /* copy updated timeout to userland */
809 error = (*put_timeout)(&ts, SCARG(uap, timeout),
810 sizeof(ts));
811 }
812 }
813
814 /*
815 * If a signal from the wait set arrived, copy it to userland.
816 * Copy only the used part of siginfo, the padding part is
817 * left unchanged (userland is not supposed to touch it anyway).
818 */
819 out:
820 FREE(waitset, M_TEMP);
821 ksiginfo_free(ksi);
822
823 if (error == 0)
824 error = (*put_info)(&ksi->ksi_info, SCARG(uap, info),
825 sizeof(ksi->ksi_info));
826
827 return error;
828 }
829