sys_sig.c revision 1.1.2.4 1 /* $NetBSD: sys_sig.c,v 1.1.2.4 2006/11/18 21:39:23 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2006 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1982, 1986, 1989, 1991, 1993
41 * The Regents of the University of California. All rights reserved.
42 * (c) UNIX System Laboratories, Inc.
43 * All or some portions of this file are derived from material licensed
44 * to the University of California by American Telephone and Telegraph
45 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
46 * the permission of UNIX System Laboratories, Inc.
47 *
48 * Redistribution and use in source and binary forms, with or without
49 * modification, are permitted provided that the following conditions
50 * are met:
51 * 1. Redistributions of source code must retain the above copyright
52 * notice, this list of conditions and the following disclaimer.
53 * 2. Redistributions in binary form must reproduce the above copyright
54 * notice, this list of conditions and the following disclaimer in the
55 * documentation and/or other materials provided with the distribution.
56 * 3. Neither the name of the University nor the names of its contributors
57 * may be used to endorse or promote products derived from this software
58 * without specific prior written permission.
59 *
60 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
61 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
62 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
63 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
64 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
65 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
66 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
67 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
68 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
69 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
70 * SUCH DAMAGE.
71 *
72 * @(#)kern_sig.c 8.14 (Berkeley) 5/14/95
73 */
74
75 #include <sys/cdefs.h>
76 __KERNEL_RCSID(0, "$NetBSD: sys_sig.c,v 1.1.2.4 2006/11/18 21:39:23 ad Exp $");
77
78 #include "opt_ptrace.h"
79 #include "opt_compat_netbsd.h"
80 #include "opt_compat_netbsd32.h"
81
82 #include <sys/param.h>
83 #include <sys/kernel.h>
84 #include <sys/malloc.h>
85 #include <sys/signalvar.h>
86 #include <sys/proc.h>
87 #include <sys/pool.h>
88 #include <sys/sa.h>
89 #include <sys/savar.h>
90 #include <sys/syscallargs.h>
91 #include <sys/kauth.h>
92 #include <sys/wait.h>
93
94 #ifdef COMPAT_16
95 /* ARGSUSED */
96 int
97 compat_16_sys___sigaction14(struct lwp *l, void *v, register_t *retval)
98 {
99 struct compat_16_sys___sigaction14_args /* {
100 syscallarg(int) signum;
101 syscallarg(const struct sigaction *) nsa;
102 syscallarg(struct sigaction *) osa;
103 } */ *uap = v;
104 struct sigaction nsa, osa;
105 int error;
106
107 if (SCARG(uap, nsa)) {
108 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
109 if (error)
110 return (error);
111 }
112 error = sigaction1(l, SCARG(uap, signum),
113 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0,
114 NULL, 0);
115 if (error)
116 return (error);
117 if (SCARG(uap, osa)) {
118 error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
119 if (error)
120 return (error);
121 }
122 return (0);
123 }
124 #endif
125
126 /* ARGSUSED */
127 int
128 sys___sigaction_sigtramp(struct lwp *l, void *v, register_t *retval)
129 {
130 struct sys___sigaction_sigtramp_args /* {
131 syscallarg(int) signum;
132 syscallarg(const struct sigaction *) nsa;
133 syscallarg(struct sigaction *) osa;
134 syscallarg(void *) tramp;
135 syscallarg(int) vers;
136 } */ *uap = v;
137 struct sigaction nsa, osa;
138 int error;
139
140 if (SCARG(uap, nsa)) {
141 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
142 if (error)
143 return (error);
144 }
145 error = sigaction1(l, SCARG(uap, signum),
146 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0,
147 SCARG(uap, tramp), SCARG(uap, vers));
148 if (error)
149 return (error);
150 if (SCARG(uap, osa)) {
151 error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
152 if (error)
153 return (error);
154 }
155 return (0);
156 }
157
158 /*
159 * Manipulate signal mask. Note that we receive new mask, not pointer, and
160 * return old mask as return value; the library stub does the rest.
161 */
162 int
163 sys___sigprocmask14(struct lwp *l, void *v, register_t *retval)
164 {
165 struct sys___sigprocmask14_args /* {
166 syscallarg(int) how;
167 syscallarg(const sigset_t *) set;
168 syscallarg(sigset_t *) oset;
169 } */ *uap = v;
170 sigset_t nss, oss;
171 int error;
172
173 if (SCARG(uap, set)) {
174 error = copyin(SCARG(uap, set), &nss, sizeof(nss));
175 if (error)
176 return (error);
177 }
178 error = sigprocmask1(l, SCARG(uap, how),
179 SCARG(uap, set) ? &nss : 0, SCARG(uap, oset) ? &oss : 0);
180 if (error)
181 return (error);
182 if (SCARG(uap, oset)) {
183 error = copyout(&oss, SCARG(uap, oset), sizeof(oss));
184 if (error)
185 return (error);
186 }
187 return (0);
188 }
189
190 /* ARGSUSED */
191 int
192 sys___sigpending14(struct lwp *l, void *v, register_t *retval)
193 {
194 struct sys___sigpending14_args /* {
195 syscallarg(sigset_t *) set;
196 } */ *uap = v;
197 sigset_t ss;
198
199 sigpending1(l, &ss);
200 return (copyout(&ss, SCARG(uap, set), sizeof(ss)));
201 }
202
203 /*
204 * Suspend process until signal, providing mask to be set in the meantime.
205 * Note nonstandard calling convention: libc stub passes mask, not pointer,
206 * to save a copyin.
207 */
208 /* ARGSUSED */
209 int
210 sys___sigsuspend14(struct lwp *l, void *v, register_t *retval)
211 {
212 struct sys___sigsuspend14_args /* {
213 syscallarg(const sigset_t *) set;
214 } */ *uap = v;
215 sigset_t ss;
216 int error;
217
218 if (SCARG(uap, set)) {
219 error = copyin(SCARG(uap, set), &ss, sizeof(ss));
220 if (error)
221 return (error);
222 }
223
224 return (sigsuspend1(l, SCARG(uap, set) ? &ss : 0));
225 }
226
227 /* ARGSUSED */
228 int
229 sys___sigaltstack14(struct lwp *l, void *v, register_t *retval)
230 {
231 struct sys___sigaltstack14_args /* {
232 syscallarg(const struct sigaltstack *) nss;
233 syscallarg(struct sigaltstack *) oss;
234 } */ *uap = v;
235 struct sigaltstack nss, oss;
236 int error;
237
238 if (SCARG(uap, nss)) {
239 error = copyin(SCARG(uap, nss), &nss, sizeof(nss));
240 if (error)
241 return (error);
242 }
243 error = sigaltstack1(l,
244 SCARG(uap, nss) ? &nss : 0, SCARG(uap, oss) ? &oss : 0);
245 if (error)
246 return (error);
247 if (SCARG(uap, oss)) {
248 error = copyout(&oss, SCARG(uap, oss), sizeof(oss));
249 if (error)
250 return (error);
251 }
252 return (0);
253 }
254
255 /* ARGSUSED */
256 int
257 sys_kill(struct lwp *l, void *v, register_t *retval)
258 {
259 struct sys_kill_args /* {
260 syscallarg(int) pid;
261 syscallarg(int) signum;
262 } */ *uap = v;
263 struct proc *p;
264 ksiginfo_t ksi;
265 int signum = SCARG(uap, signum);
266 int error;
267
268 if ((u_int)signum >= NSIG)
269 return (EINVAL);
270 KSI_INIT(&ksi);
271 ksi.ksi_signo = signum;
272 ksi.ksi_code = SI_USER;
273 ksi.ksi_pid = l->l_proc->p_pid;
274 ksi.ksi_uid = kauth_cred_geteuid(l->l_cred);
275 if (SCARG(uap, pid) > 0) {
276 /* kill single process */
277 if ((p = p_find(SCARG(uap, pid), PFIND_UNLOCK_FAIL)) == NULL)
278 return (ESRCH);
279 mutex_enter(&p->p_mutex);
280 error = kauth_authorize_process(l->l_cred,
281 KAUTH_PROCESS_CANSIGNAL, p, (void *)(uintptr_t)signum,
282 NULL, NULL);
283 if (!error && signum) {
284 mutex_enter(&p->p_smutex);
285 kpsignal2(p, &ksi);
286 mutex_exit(&p->p_smutex);
287 }
288 mutex_exit(&p->p_mutex);
289 rw_exit(&proclist_lock);
290 return (0);
291 }
292 switch (SCARG(uap, pid)) {
293 case -1: /* broadcast signal */
294 return (killpg1(l, &ksi, 0, 1));
295 case 0: /* signal own process group */
296 return (killpg1(l, &ksi, 0, 0));
297 default: /* negative explicit process group */
298 return (killpg1(l, &ksi, -SCARG(uap, pid), 0));
299 }
300 /* NOTREACHED */
301 }
302
303 /*
304 * Nonexistent system call-- signal process (may want to handle it). Flag
305 * error in case process won't see signal immediately (blocked or ignored).
306 *
307 * XXX This should not be here.
308 */
309 #ifndef PTRACE
310 __weak_alias(sys_ptrace, sys_nosys);
311 #endif
312
313 /* ARGSUSED */
314 int
315 sys_nosys(struct lwp *l, void *v, register_t *retval)
316 {
317
318 psignal(l->l_proc, SIGSYS);
319 return (ENOSYS);
320 }
321
322 /* ARGSUSED */
323 int
324 sys_getcontext(struct lwp *l, void *v, register_t *retval)
325 {
326 struct sys_getcontext_args /* {
327 syscallarg(struct __ucontext *) ucp;
328 } */ *uap = v;
329 ucontext_t uc;
330
331 getucontext(l, &uc);
332
333 return (copyout(&uc, SCARG(uap, ucp), sizeof (*SCARG(uap, ucp))));
334 }
335
336 /* ARGSUSED */
337 int
338 sys_setcontext(struct lwp *l, void *v, register_t *retval)
339 {
340 struct sys_setcontext_args /* {
341 syscallarg(const ucontext_t *) ucp;
342 } */ *uap = v;
343 ucontext_t uc;
344 int error;
345
346 error = copyin(SCARG(uap, ucp), &uc, sizeof (uc));
347 if (error)
348 return (error);
349 if (!(uc.uc_flags & _UC_CPU))
350 return (EINVAL);
351 error = setucontext(l, &uc);
352 if (error)
353 return (error);
354
355 return (EJUSTRETURN);
356 }
357
358 /*
359 * sigtimedwait(2) system call, used also for implementation
360 * of sigwaitinfo() and sigwait().
361 *
362 * This only handles single LWP in signal wait. libpthread provides
363 * it's own sigtimedwait() wrapper to DTRT WRT individual threads.
364 */
365 int
366 sys___sigtimedwait(struct lwp *l, void *v, register_t *retval)
367 {
368
369 return __sigtimedwait1(l, v, retval, copyout, copyin, copyout);
370 }
371
372 int
373 sigaction1(struct lwp *l, int signum, const struct sigaction *nsa,
374 struct sigaction *osa, const void *tramp, int vers)
375 {
376 struct proc *p;
377 struct sigacts *ps;
378 sigset_t tset;
379 int prop, error;
380
381 if (signum <= 0 || signum >= NSIG)
382 return (EINVAL);
383
384 p = l->l_proc;
385 error = 0;
386
387 /*
388 * Trampoline ABI version 0 is reserved for the legacy kernel
389 * provided on-stack trampoline. Conversely, if we are using a
390 * non-0 ABI version, we must have a trampoline. Only validate the
391 * vers if a new sigaction was supplied. Emulations use legacy
392 * kernel trampolines with version 0, alternatively check for that
393 * too.
394 */
395 if ((vers != 0 && tramp == NULL) ||
396 #ifdef SIGTRAMP_VALID
397 (nsa != NULL &&
398 ((vers == 0) ?
399 (p->p_emul->e_sigcode == NULL) :
400 !SIGTRAMP_VALID(vers))) ||
401 #endif
402 (vers == 0 && tramp != NULL)) {
403 return (EINVAL);
404 }
405
406 mutex_enter(&p->p_mutex); /* p_flag */
407 mutex_enter(&p->p_smutex);
408
409 ps = p->p_sigacts;
410 if (osa)
411 *osa = SIGACTION_PS(ps, signum);
412 if (!nsa)
413 goto out;
414
415 prop = sigprop[signum];
416 if ((nsa->sa_flags & ~SA_ALLBITS) || (prop & SA_CANTMASK)) {
417 error = EINVAL;
418 goto out;
419 }
420
421 SIGACTION_PS(ps, signum) = *nsa;
422 ps->sa_sigdesc[signum].sd_tramp = tramp;
423 ps->sa_sigdesc[signum].sd_vers = vers;
424 sigminusset(&sigcantmask, &SIGACTION_PS(ps, signum).sa_mask);
425
426 if ((prop & SA_NORESET) != 0)
427 SIGACTION_PS(ps, signum).sa_flags &= ~SA_RESETHAND;
428
429 if (signum == SIGCHLD) {
430 if (nsa->sa_flags & SA_NOCLDSTOP)
431 p->p_sflag |= PS_NOCLDSTOP;
432 else
433 p->p_sflag &= ~PS_NOCLDSTOP;
434 if (nsa->sa_flags & SA_NOCLDWAIT) {
435 /*
436 * Paranoia: since SA_NOCLDWAIT is implemented by
437 * reparenting the dying child to PID 1 (and trust
438 * it to reap the zombie), PID 1 itself is forbidden
439 * to set SA_NOCLDWAIT.
440 */
441 if (p->p_pid == 1)
442 p->p_flag &= ~P_NOCLDWAIT;
443 else
444 p->p_flag |= P_NOCLDWAIT;
445 } else
446 p->p_flag &= ~P_NOCLDWAIT;
447
448 if (nsa->sa_handler == SIG_IGN) {
449 /*
450 * Paranoia: same as above.
451 */
452 if (p->p_pid == 1)
453 p->p_flag &= ~P_CLDSIGIGN;
454 else
455 p->p_flag |= P_CLDSIGIGN;
456 } else
457 p->p_flag &= ~P_CLDSIGIGN;
458 }
459
460 if ((nsa->sa_flags & SA_NODEFER) == 0)
461 sigaddset(&SIGACTION_PS(ps, signum).sa_mask, signum);
462 else
463 sigdelset(&SIGACTION_PS(ps, signum).sa_mask, signum);
464
465 /*
466 * Set bit in p_sigctx.ps_sigignore for signals that are set to
467 * SIG_IGN, and for signals set to SIG_DFL where the default is to
468 * ignore. However, don't put SIGCONT in p_sigctx.ps_sigignore, as
469 * we have to restart the process.
470 */
471 if (nsa->sa_handler == SIG_IGN ||
472 (nsa->sa_handler == SIG_DFL && (prop & SA_IGNORE) != 0)) {
473 /* Never to be seen again. */
474 sigemptyset(&tset);
475 sigaddset(&tset, signum);
476 sigclearall(p, &tset);
477 if (signum != SIGCONT) {
478 /* Easier in psignal */
479 sigaddset(&p->p_sigctx.ps_sigignore, signum);
480 }
481 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
482 } else {
483 sigdelset(&p->p_sigctx.ps_sigignore, signum);
484 if (nsa->sa_handler == SIG_DFL)
485 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
486 else
487 sigaddset(&p->p_sigctx.ps_sigcatch, signum);
488 }
489
490 /*
491 * Previously held signals may now have become visible. Ensure that
492 * we check for them before returning to userspace.
493 */
494 lwp_lock(l);
495 signotify(l);
496 lwp_unlock(l);
497 out:
498 mutex_exit(&p->p_smutex);
499 mutex_exit(&p->p_mutex);
500
501 return (error);
502 }
503
504 int
505 sigprocmask1(struct lwp *l, int how, const sigset_t *nss, sigset_t *oss)
506 {
507 struct proc *p = l->l_proc;
508 int more;
509
510 mutex_enter(&p->p_smutex);
511
512 /*
513 * If we've got pending signals that we haven't processed yet,
514 * make sure that we take them before changing the mask.
515 */
516 if (sigispending(l)) {
517 mutex_exit(&p->p_smutex);
518 return ERESTART;
519 }
520
521 if (oss)
522 *oss = *l->l_sigmask;
523 if (nss) {
524 switch (how) {
525 case SIG_BLOCK:
526 sigplusset(nss, l->l_sigmask);
527 more = 0;
528 break;
529 case SIG_UNBLOCK:
530 sigminusset(nss, l->l_sigmask);
531 more = 1;
532 break;
533 case SIG_SETMASK:
534 *l->l_sigmask = *nss;
535 more = 1;
536 break;
537 default:
538 mutex_exit(&p->p_smutex);
539 return (EINVAL);
540 }
541 sigminusset(&sigcantmask, l->l_sigmask);
542 if (more) {
543 /*
544 * Grab signals from the per-process pending
545 * list that are now of interest to us.
546 */
547 if ((p->p_flag & P_SA) == 0)
548 sigpinch(&p->p_sigpend, l->l_sigpend,
549 l->l_sigmask);
550
551 /*
552 * Check for pending signals on return to user.
553 */
554 lwp_lock(l);
555 signotify(l);
556 lwp_unlock(l);
557 }
558 }
559
560 mutex_exit(&p->p_smutex);
561
562 return (0);
563 }
564
565 void
566 sigpending1(struct lwp *l, sigset_t *ss)
567 {
568 struct proc *p = l->l_proc;
569
570 mutex_enter(&p->p_smutex);
571 *ss = l->l_sigpend->sp_set;
572 sigminusset(l->l_sigmask, ss);
573 mutex_exit(&p->p_smutex);
574 }
575
576 int
577 sigsuspend1(struct lwp *l, const sigset_t *ss)
578 {
579 struct proc *p;
580 struct sigacts *ps;
581
582 p = l->l_proc;
583 ps = p->p_sigacts;
584
585 mutex_enter(&p->p_smutex);
586
587 /*
588 * If we've got pending signals that we haven't processed yet,
589 * make sure that we take them before changing the mask.
590 */
591 if (sigispending(l)) {
592 mutex_exit(&p->p_smutex);
593 return ERESTART;
594 }
595
596 if (ss) {
597 /*
598 * When returning from sigpause, we want
599 * the old mask to be restored after the
600 * signal handler has finished. Thus, we
601 * save it here and mark the sigctx structure
602 * to indicate this.
603 */
604 l->l_sigoldmask = *l->l_sigmask;
605 l->l_sigrestore = 1;
606 *l->l_sigmask = *ss;
607 sigminusset(&sigcantmask, l->l_sigmask);
608
609 /*
610 * Pinch any signals from the per-process pending
611 * list that are now of interest to us.
612 */
613 if ((p->p_flag & P_SA) == 0)
614 sigpinch(&p->p_sigpend, l->l_sigpend, l->l_sigmask);
615
616 lwp_lock(l);
617 signotify(l);
618 lwp_unlock(l);
619 }
620
621 while (mtsleep((caddr_t) ps, PPAUSE|PCATCH, "pause", 0,
622 &p->p_smutex) == 0)
623 /* void */;
624
625 mutex_exit(&p->p_smutex);
626
627 /* always return EINTR rather than ERESTART... */
628 return (EINTR);
629 }
630
631 int
632 sigaltstack1(struct lwp *l, const struct sigaltstack *nss,
633 struct sigaltstack *oss)
634 {
635 struct proc *p = l->l_proc;
636 int error = 0;
637
638 mutex_enter(&p->p_smutex);
639
640 if (oss)
641 *oss = *l->l_sigstk;
642
643 if (nss) {
644 if (nss->ss_flags & ~SS_ALLBITS)
645 error = EINVAL;
646 else if (nss->ss_flags & SS_DISABLE) {
647 if (l->l_sigstk->ss_flags & SS_ONSTACK)
648 error = EINVAL;
649 } else if (nss->ss_size < MINSIGSTKSZ)
650 error = ENOMEM;
651
652 if (!error)
653 *l->l_sigstk = *nss;
654 }
655
656 mutex_exit(&p->p_smutex);
657
658 return (error);
659 }
660
661 int
662 __sigtimedwait1(struct lwp *l, void *v, register_t *retval,
663 copyout_t put_info, copyin_t fetch_timeout, copyout_t put_timeout)
664 {
665 struct sys___sigtimedwait_args /* {
666 syscallarg(const sigset_t *) set;
667 syscallarg(siginfo_t *) info;
668 syscallarg(struct timespec *) timeout;
669 } */ *uap = v;
670 sigset_t *waitset;
671 struct proc *p = l->l_proc;
672 int error, signum;
673 int timo = 0;
674 struct timespec ts, tsstart, tsnow;
675 ksiginfo_t *ksi;
676
677 memset(&tsstart, 0, sizeof tsstart); /* XXX gcc */
678
679 /*
680 * Calculate timeout, if it was specified.
681 */
682 if (SCARG(uap, timeout)) {
683 uint64_t ms;
684
685 if ((error = (*fetch_timeout)(SCARG(uap, timeout), &ts, sizeof(ts))))
686 return (error);
687
688 ms = (ts.tv_sec * 1000) + (ts.tv_nsec / 1000000);
689 timo = mstohz(ms);
690 if (timo == 0 && ts.tv_sec == 0 && ts.tv_nsec > 0)
691 timo = 1;
692 if (timo <= 0)
693 return (EAGAIN);
694
695 /*
696 * Remember current uptime, it would be used in
697 * ECANCELED/ERESTART case.
698 */
699 getnanouptime(&tsstart);
700 }
701
702 MALLOC(waitset, sigset_t *, sizeof(sigset_t), M_TEMP, M_WAITOK);
703 if ((error = copyin(SCARG(uap, set), waitset, sizeof(sigset_t)))) {
704 FREE(waitset, M_TEMP);
705 return (error);
706 }
707
708 /*
709 * Silently ignore SA_CANTMASK signals. psignal1() would ignore
710 * SA_CANTMASK signals in waitset, we do this only for the below
711 * siglist check.
712 */
713 sigminusset(&sigcantmask, waitset);
714
715 /*
716 * Allocate a ksi up front. We can't sleep with the mutex held.
717 */
718 if ((ksi = ksiginfo_alloc(p, NULL, PR_WAITOK)) == NULL) {
719 FREE(waitset, M_TEMP);
720 return (ENOMEM);
721 }
722
723 mutex_enter(&p->p_smutex);
724
725 /*
726 * If we've got pending signals that we haven't processed yet,
727 * make sure that we take them before changing the mask.
728 */
729 if ((error = sigispending(l)) != 0) {
730 mutex_exit(&p->p_smutex);
731 goto out;
732 }
733
734 /*
735 * SA processes can have no more than 1 sigwaiter.
736 */
737 if ((p->p_flag & P_SA) != 0 && !LIST_EMPTY(&p->p_sigwaiters)) {
738 mutex_exit(&p->p_smutex);
739 error = EINVAL;
740 goto out;
741 }
742
743 if ((signum = sigget(&p->p_sigpend, ksi, 0, waitset)) == 0)
744 if ((p->p_flag & P_SA) == 0)
745 signum = sigget(l->l_sigpend, ksi, 0, waitset);
746
747 if (signum != 0) {
748 /*
749 * We found a pending signal - copy it out to the user.
750 */
751 mutex_exit(&p->p_smutex);
752 goto out;
753 }
754
755 /*
756 * Set up the sigwait list. Pass pointer to malloced memory here;
757 * it's not possible to pass pointer to a structure on current
758 * process's stack, the current LWP might be swapped out when the
759 * when the signal is delivered.
760 */
761 l->l_sigwaited = ksi;
762 l->l_sigwait = waitset;
763 LIST_INSERT_HEAD(&p->p_sigwaiters, l, l_sigwaiter);
764
765 /*
766 * Wait for signal to arrive. We can either be woken up or time out.
767 */
768 error = mtsleep(&l->l_sigwait, PPAUSE|PCATCH, "sigwait", timo,
769 &p->p_smutex);
770
771 /*
772 * Need to find out if we woke as a result of lwp_wakeup() or a
773 * signal outside our wait set.
774 */
775 if (l->l_sigwaited != NULL) {
776 if (error == EINTR) {
777 /* wakeup via _lwp_wakeup() */
778 error = ECANCELED;
779 } else if (!error) {
780 /* spurious wakeup - arrange for syscall restart */
781 error = ERESTART;
782 }
783 }
784
785 /*
786 * Clear the sigwait indication and unlock.
787 */
788 l->l_sigwait = NULL;
789 l->l_sigwaited = NULL;
790 LIST_REMOVE(l, l_sigwaiter);
791 mutex_exit(&p->p_smutex);
792
793 /*
794 * If the sleep was interrupted (either by signal or wakeup), update
795 * the timeout and copyout new value back. It would be used when
796 * the syscall would be restarted or called again.
797 */
798 if (timo && (error == ERESTART || error == ECANCELED)) {
799 getnanouptime(&tsnow);
800
801 /* compute how much time has passed since start */
802 timespecsub(&tsnow, &tsstart, &tsnow);
803 /* substract passed time from timeout */
804 timespecsub(&ts, &tsnow, &ts);
805
806 if (ts.tv_sec < 0)
807 error = EAGAIN;
808 else {
809 /* copy updated timeout to userland */
810 error = (*put_timeout)(&ts, SCARG(uap, timeout),
811 sizeof(ts));
812 }
813 }
814
815 /*
816 * If a signal from the wait set arrived, copy it to userland.
817 * Copy only the used part of siginfo, the padding part is
818 * left unchanged (userland is not supposed to touch it anyway).
819 */
820 out:
821 FREE(waitset, M_TEMP);
822 ksiginfo_free(ksi);
823
824 if (error == 0)
825 error = (*put_info)(&ksi->ksi_info, SCARG(uap, info),
826 sizeof(ksi->ksi_info));
827
828 return error;
829 }
830