kern_sig.c revision 1.148 1 /* $NetBSD: kern_sig.c,v 1.148 2003/09/06 22:03:09 christos Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)kern_sig.c 8.14 (Berkeley) 5/14/95
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.148 2003/09/06 22:03:09 christos Exp $");
41
42 #include "opt_ktrace.h"
43 #include "opt_compat_sunos.h"
44 #include "opt_compat_netbsd32.h"
45
46 #define SIGPROP /* include signal properties table */
47 #include <sys/param.h>
48 #include <sys/signalvar.h>
49 #include <sys/resourcevar.h>
50 #include <sys/namei.h>
51 #include <sys/vnode.h>
52 #include <sys/proc.h>
53 #include <sys/systm.h>
54 #include <sys/timeb.h>
55 #include <sys/times.h>
56 #include <sys/buf.h>
57 #include <sys/acct.h>
58 #include <sys/file.h>
59 #include <sys/kernel.h>
60 #include <sys/wait.h>
61 #include <sys/ktrace.h>
62 #include <sys/syslog.h>
63 #include <sys/stat.h>
64 #include <sys/core.h>
65 #include <sys/filedesc.h>
66 #include <sys/malloc.h>
67 #include <sys/pool.h>
68 #include <sys/ucontext.h>
69 #include <sys/sa.h>
70 #include <sys/savar.h>
71 #include <sys/exec.h>
72
73 #include <sys/mount.h>
74 #include <sys/syscallargs.h>
75
76 #include <machine/cpu.h>
77
78 #include <sys/user.h> /* for coredump */
79
80 #include <uvm/uvm_extern.h>
81
82 static void proc_stop(struct proc *p);
83 static int build_corename(struct proc *, char [MAXPATHLEN]);
84 sigset_t contsigmask, stopsigmask, sigcantmask;
85
86 struct pool sigacts_pool; /* memory pool for sigacts structures */
87 struct pool siginfo_pool; /* memory pool for siginfo structures */
88
89 /*
90 * Can process p, with pcred pc, send the signal signum to process q?
91 */
92 #define CANSIGNAL(p, pc, q, signum) \
93 ((pc)->pc_ucred->cr_uid == 0 || \
94 (pc)->p_ruid == (q)->p_cred->p_ruid || \
95 (pc)->pc_ucred->cr_uid == (q)->p_cred->p_ruid || \
96 (pc)->p_ruid == (q)->p_ucred->cr_uid || \
97 (pc)->pc_ucred->cr_uid == (q)->p_ucred->cr_uid || \
98 ((signum) == SIGCONT && (q)->p_session == (p)->p_session))
99
100 /*
101 * Initialize signal-related data structures.
102 */
103 void
104 signal_init(void)
105 {
106
107 pool_init(&sigacts_pool, sizeof(struct sigacts), 0, 0, 0, "sigapl",
108 &pool_allocator_nointr);
109 pool_init(&siginfo_pool, sizeof(siginfo_t), 0, 0, 0, "siginfo",
110 &pool_allocator_nointr);
111 }
112
113 /*
114 * Create an initial sigctx structure, using the same signal state
115 * as p. If 'share' is set, share the sigctx_proc part, otherwise just
116 * copy it from parent.
117 */
118 void
119 sigactsinit(struct proc *np, struct proc *pp, int share)
120 {
121 struct sigacts *ps;
122
123 if (share) {
124 np->p_sigacts = pp->p_sigacts;
125 pp->p_sigacts->sa_refcnt++;
126 } else {
127 ps = pool_get(&sigacts_pool, PR_WAITOK);
128 if (pp)
129 memcpy(ps, pp->p_sigacts, sizeof(struct sigacts));
130 else
131 memset(ps, '\0', sizeof(struct sigacts));
132 ps->sa_refcnt = 1;
133 np->p_sigacts = ps;
134 }
135 }
136
137 /*
138 * Make this process not share its sigctx, maintaining all
139 * signal state.
140 */
141 void
142 sigactsunshare(struct proc *p)
143 {
144 struct sigacts *oldps;
145
146 if (p->p_sigacts->sa_refcnt == 1)
147 return;
148
149 oldps = p->p_sigacts;
150 sigactsinit(p, NULL, 0);
151
152 if (--oldps->sa_refcnt == 0)
153 pool_put(&sigacts_pool, oldps);
154 }
155
156 /*
157 * Release a sigctx structure.
158 */
159 void
160 sigactsfree(struct proc *p)
161 {
162 struct sigacts *ps;
163
164 ps = p->p_sigacts;
165 if (--ps->sa_refcnt > 0)
166 return;
167
168 pool_put(&sigacts_pool, ps);
169 }
170
171 int
172 sigaction1(struct proc *p, int signum, const struct sigaction *nsa,
173 struct sigaction *osa, void *tramp, int vers)
174 {
175 struct sigacts *ps;
176 int prop;
177
178 ps = p->p_sigacts;
179 if (signum <= 0 || signum >= NSIG)
180 return (EINVAL);
181
182 /*
183 * Trampoline ABI version 0 is reserved for the legacy
184 * kernel-provided on-stack trampoline. Conversely, if
185 * we are using a non-0 ABI version, we must have a
186 * trampoline.
187 */
188 if ((vers != 0 && tramp == NULL) ||
189 (vers == 0 && tramp != NULL))
190 return (EINVAL);
191
192 if (osa)
193 *osa = SIGACTION_PS(ps, signum);
194
195 if (nsa) {
196 if (nsa->sa_flags & ~SA_ALLBITS)
197 return (EINVAL);
198
199 prop = sigprop[signum];
200 if (prop & SA_CANTMASK)
201 return (EINVAL);
202
203 (void) splsched(); /* XXXSMP */
204 SIGACTION_PS(ps, signum) = *nsa;
205 ps->sa_sigdesc[signum].sd_tramp = tramp;
206 ps->sa_sigdesc[signum].sd_vers = vers;
207 sigminusset(&sigcantmask, &SIGACTION_PS(ps, signum).sa_mask);
208 if ((prop & SA_NORESET) != 0)
209 SIGACTION_PS(ps, signum).sa_flags &= ~SA_RESETHAND;
210 if (signum == SIGCHLD) {
211 if (nsa->sa_flags & SA_NOCLDSTOP)
212 p->p_flag |= P_NOCLDSTOP;
213 else
214 p->p_flag &= ~P_NOCLDSTOP;
215 if (nsa->sa_flags & SA_NOCLDWAIT) {
216 /*
217 * Paranoia: since SA_NOCLDWAIT is implemented
218 * by reparenting the dying child to PID 1 (and
219 * trust it to reap the zombie), PID 1 itself
220 * is forbidden to set SA_NOCLDWAIT.
221 */
222 if (p->p_pid == 1)
223 p->p_flag &= ~P_NOCLDWAIT;
224 else
225 p->p_flag |= P_NOCLDWAIT;
226 } else
227 p->p_flag &= ~P_NOCLDWAIT;
228 }
229 if ((nsa->sa_flags & SA_NODEFER) == 0)
230 sigaddset(&SIGACTION_PS(ps, signum).sa_mask, signum);
231 else
232 sigdelset(&SIGACTION_PS(ps, signum).sa_mask, signum);
233 /*
234 * Set bit in p_sigctx.ps_sigignore for signals that are set to
235 * SIG_IGN, and for signals set to SIG_DFL where the default is
236 * to ignore. However, don't put SIGCONT in
237 * p_sigctx.ps_sigignore, as we have to restart the process.
238 */
239 if (nsa->sa_handler == SIG_IGN ||
240 (nsa->sa_handler == SIG_DFL && (prop & SA_IGNORE) != 0)) {
241 /* never to be seen again */
242 sigdelset(&p->p_sigctx.ps_siglist, signum);
243 if (signum != SIGCONT) {
244 /* easier in psignal */
245 sigaddset(&p->p_sigctx.ps_sigignore, signum);
246 }
247 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
248 } else {
249 sigdelset(&p->p_sigctx.ps_sigignore, signum);
250 if (nsa->sa_handler == SIG_DFL)
251 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
252 else
253 sigaddset(&p->p_sigctx.ps_sigcatch, signum);
254 }
255 (void) spl0();
256 }
257
258 return (0);
259 }
260
261 /* ARGSUSED */
262 int
263 sys___sigaction14(struct lwp *l, void *v, register_t *retval)
264 {
265 struct sys___sigaction14_args /* {
266 syscallarg(int) signum;
267 syscallarg(const struct sigaction *) nsa;
268 syscallarg(struct sigaction *) osa;
269 } */ *uap = v;
270 struct proc *p;
271 struct sigaction nsa, osa;
272 int error;
273
274 if (SCARG(uap, nsa)) {
275 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
276 if (error)
277 return (error);
278 }
279 p = l->l_proc;
280 error = sigaction1(p, SCARG(uap, signum),
281 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0,
282 NULL, 0);
283 if (error)
284 return (error);
285 if (SCARG(uap, osa)) {
286 error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
287 if (error)
288 return (error);
289 }
290 return (0);
291 }
292
293 /* ARGSUSED */
294 int
295 sys___sigaction_sigtramp(struct lwp *l, void *v, register_t *retval)
296 {
297 struct sys___sigaction_sigtramp_args /* {
298 syscallarg(int) signum;
299 syscallarg(const struct sigaction *) nsa;
300 syscallarg(struct sigaction *) osa;
301 syscallarg(void *) tramp;
302 syscallarg(int) vers;
303 } */ *uap = v;
304 struct proc *p = l->l_proc;
305 struct sigaction nsa, osa;
306 int error;
307
308 if (SCARG(uap, nsa)) {
309 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
310 if (error)
311 return (error);
312 }
313 error = sigaction1(p, SCARG(uap, signum),
314 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0,
315 SCARG(uap, tramp), SCARG(uap, vers));
316 if (error)
317 return (error);
318 if (SCARG(uap, osa)) {
319 error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
320 if (error)
321 return (error);
322 }
323 return (0);
324 }
325
326 /*
327 * Initialize signal state for process 0;
328 * set to ignore signals that are ignored by default and disable the signal
329 * stack.
330 */
331 void
332 siginit(struct proc *p)
333 {
334 struct sigacts *ps;
335 int signum, prop;
336
337 ps = p->p_sigacts;
338 sigemptyset(&contsigmask);
339 sigemptyset(&stopsigmask);
340 sigemptyset(&sigcantmask);
341 for (signum = 1; signum < NSIG; signum++) {
342 prop = sigprop[signum];
343 if (prop & SA_CONT)
344 sigaddset(&contsigmask, signum);
345 if (prop & SA_STOP)
346 sigaddset(&stopsigmask, signum);
347 if (prop & SA_CANTMASK)
348 sigaddset(&sigcantmask, signum);
349 if (prop & SA_IGNORE && signum != SIGCONT)
350 sigaddset(&p->p_sigctx.ps_sigignore, signum);
351 sigemptyset(&SIGACTION_PS(ps, signum).sa_mask);
352 SIGACTION_PS(ps, signum).sa_flags = SA_RESTART;
353 }
354 sigemptyset(&p->p_sigctx.ps_sigcatch);
355 p->p_sigctx.ps_sigwaited = 0;
356 p->p_flag &= ~P_NOCLDSTOP;
357
358 /*
359 * Reset stack state to the user stack.
360 */
361 p->p_sigctx.ps_sigstk.ss_flags = SS_DISABLE;
362 p->p_sigctx.ps_sigstk.ss_size = 0;
363 p->p_sigctx.ps_sigstk.ss_sp = 0;
364
365 /* One reference. */
366 ps->sa_refcnt = 1;
367 }
368
369 /*
370 * Reset signals for an exec of the specified process.
371 */
372 void
373 execsigs(struct proc *p)
374 {
375 struct sigacts *ps;
376 int signum, prop;
377
378 sigactsunshare(p);
379
380 ps = p->p_sigacts;
381
382 /*
383 * Reset caught signals. Held signals remain held
384 * through p_sigctx.ps_sigmask (unless they were caught,
385 * and are now ignored by default).
386 */
387 for (signum = 1; signum < NSIG; signum++) {
388 if (sigismember(&p->p_sigctx.ps_sigcatch, signum)) {
389 prop = sigprop[signum];
390 if (prop & SA_IGNORE) {
391 if ((prop & SA_CONT) == 0)
392 sigaddset(&p->p_sigctx.ps_sigignore,
393 signum);
394 sigdelset(&p->p_sigctx.ps_siglist, signum);
395 }
396 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
397 }
398 sigemptyset(&SIGACTION_PS(ps, signum).sa_mask);
399 SIGACTION_PS(ps, signum).sa_flags = SA_RESTART;
400 }
401 sigemptyset(&p->p_sigctx.ps_sigcatch);
402 p->p_sigctx.ps_sigwaited = 0;
403 p->p_flag &= ~P_NOCLDSTOP;
404
405 /*
406 * Reset stack state to the user stack.
407 */
408 p->p_sigctx.ps_sigstk.ss_flags = SS_DISABLE;
409 p->p_sigctx.ps_sigstk.ss_size = 0;
410 p->p_sigctx.ps_sigstk.ss_sp = 0;
411 }
412
413 int
414 sigprocmask1(struct proc *p, int how, const sigset_t *nss, sigset_t *oss)
415 {
416
417 if (oss)
418 *oss = p->p_sigctx.ps_sigmask;
419
420 if (nss) {
421 (void)splsched(); /* XXXSMP */
422 switch (how) {
423 case SIG_BLOCK:
424 sigplusset(nss, &p->p_sigctx.ps_sigmask);
425 break;
426 case SIG_UNBLOCK:
427 sigminusset(nss, &p->p_sigctx.ps_sigmask);
428 CHECKSIGS(p);
429 break;
430 case SIG_SETMASK:
431 p->p_sigctx.ps_sigmask = *nss;
432 CHECKSIGS(p);
433 break;
434 default:
435 (void)spl0(); /* XXXSMP */
436 return (EINVAL);
437 }
438 sigminusset(&sigcantmask, &p->p_sigctx.ps_sigmask);
439 (void)spl0(); /* XXXSMP */
440 }
441
442 return (0);
443 }
444
445 /*
446 * Manipulate signal mask.
447 * Note that we receive new mask, not pointer,
448 * and return old mask as return value;
449 * the library stub does the rest.
450 */
451 int
452 sys___sigprocmask14(struct lwp *l, void *v, register_t *retval)
453 {
454 struct sys___sigprocmask14_args /* {
455 syscallarg(int) how;
456 syscallarg(const sigset_t *) set;
457 syscallarg(sigset_t *) oset;
458 } */ *uap = v;
459 struct proc *p;
460 sigset_t nss, oss;
461 int error;
462
463 if (SCARG(uap, set)) {
464 error = copyin(SCARG(uap, set), &nss, sizeof(nss));
465 if (error)
466 return (error);
467 }
468 p = l->l_proc;
469 error = sigprocmask1(p, SCARG(uap, how),
470 SCARG(uap, set) ? &nss : 0, SCARG(uap, oset) ? &oss : 0);
471 if (error)
472 return (error);
473 if (SCARG(uap, oset)) {
474 error = copyout(&oss, SCARG(uap, oset), sizeof(oss));
475 if (error)
476 return (error);
477 }
478 return (0);
479 }
480
481 void
482 sigpending1(struct proc *p, sigset_t *ss)
483 {
484
485 *ss = p->p_sigctx.ps_siglist;
486 sigminusset(&p->p_sigctx.ps_sigmask, ss);
487 }
488
489 /* ARGSUSED */
490 int
491 sys___sigpending14(struct lwp *l, void *v, register_t *retval)
492 {
493 struct sys___sigpending14_args /* {
494 syscallarg(sigset_t *) set;
495 } */ *uap = v;
496 struct proc *p;
497 sigset_t ss;
498
499 p = l->l_proc;
500 sigpending1(p, &ss);
501 return (copyout(&ss, SCARG(uap, set), sizeof(ss)));
502 }
503
504 int
505 sigsuspend1(struct proc *p, const sigset_t *ss)
506 {
507 struct sigacts *ps;
508
509 ps = p->p_sigacts;
510 if (ss) {
511 /*
512 * When returning from sigpause, we want
513 * the old mask to be restored after the
514 * signal handler has finished. Thus, we
515 * save it here and mark the sigctx structure
516 * to indicate this.
517 */
518 p->p_sigctx.ps_oldmask = p->p_sigctx.ps_sigmask;
519 p->p_sigctx.ps_flags |= SAS_OLDMASK;
520 (void) splsched(); /* XXXSMP */
521 p->p_sigctx.ps_sigmask = *ss;
522 CHECKSIGS(p);
523 sigminusset(&sigcantmask, &p->p_sigctx.ps_sigmask);
524 (void) spl0(); /* XXXSMP */
525 }
526
527 while (tsleep((caddr_t) ps, PPAUSE|PCATCH, "pause", 0) == 0)
528 /* void */;
529
530 /* always return EINTR rather than ERESTART... */
531 return (EINTR);
532 }
533
534 /*
535 * Suspend process until signal, providing mask to be set
536 * in the meantime. Note nonstandard calling convention:
537 * libc stub passes mask, not pointer, to save a copyin.
538 */
539 /* ARGSUSED */
540 int
541 sys___sigsuspend14(struct lwp *l, void *v, register_t *retval)
542 {
543 struct sys___sigsuspend14_args /* {
544 syscallarg(const sigset_t *) set;
545 } */ *uap = v;
546 struct proc *p;
547 sigset_t ss;
548 int error;
549
550 if (SCARG(uap, set)) {
551 error = copyin(SCARG(uap, set), &ss, sizeof(ss));
552 if (error)
553 return (error);
554 }
555
556 p = l->l_proc;
557 return (sigsuspend1(p, SCARG(uap, set) ? &ss : 0));
558 }
559
560 int
561 sigaltstack1(struct proc *p, const struct sigaltstack *nss,
562 struct sigaltstack *oss)
563 {
564
565 if (oss)
566 *oss = p->p_sigctx.ps_sigstk;
567
568 if (nss) {
569 if (nss->ss_flags & ~SS_ALLBITS)
570 return (EINVAL);
571
572 if (nss->ss_flags & SS_DISABLE) {
573 if (p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK)
574 return (EINVAL);
575 } else {
576 if (nss->ss_size < MINSIGSTKSZ)
577 return (ENOMEM);
578 }
579 p->p_sigctx.ps_sigstk = *nss;
580 }
581
582 return (0);
583 }
584
585 /* ARGSUSED */
586 int
587 sys___sigaltstack14(struct lwp *l, void *v, register_t *retval)
588 {
589 struct sys___sigaltstack14_args /* {
590 syscallarg(const struct sigaltstack *) nss;
591 syscallarg(struct sigaltstack *) oss;
592 } */ *uap = v;
593 struct proc *p;
594 struct sigaltstack nss, oss;
595 int error;
596
597 if (SCARG(uap, nss)) {
598 error = copyin(SCARG(uap, nss), &nss, sizeof(nss));
599 if (error)
600 return (error);
601 }
602 p = l->l_proc;
603 error = sigaltstack1(p,
604 SCARG(uap, nss) ? &nss : 0, SCARG(uap, oss) ? &oss : 0);
605 if (error)
606 return (error);
607 if (SCARG(uap, oss)) {
608 error = copyout(&oss, SCARG(uap, oss), sizeof(oss));
609 if (error)
610 return (error);
611 }
612 return (0);
613 }
614
615 /* ARGSUSED */
616 int
617 sys_kill(struct lwp *l, void *v, register_t *retval)
618 {
619 struct sys_kill_args /* {
620 syscallarg(int) pid;
621 syscallarg(int) signum;
622 } */ *uap = v;
623 struct proc *cp, *p;
624 struct pcred *pc;
625 ksiginfo_t ksi;
626
627 cp = l->l_proc;
628 pc = cp->p_cred;
629 if ((u_int)SCARG(uap, signum) >= NSIG)
630 return (EINVAL);
631 memset(&ksi, 0, sizeof(ksi));
632 ksi.ksi_signo = SCARG(uap, signum);
633 ksi.ksi_code = SI_USER;
634 ksi.ksi_pid = cp->p_pid;
635 ksi.ksi_uid = cp->p_ucred->cr_uid;
636 if (SCARG(uap, pid) > 0) {
637 /* kill single process */
638 if ((p = pfind(SCARG(uap, pid))) == NULL)
639 return (ESRCH);
640 if (!CANSIGNAL(cp, pc, p, SCARG(uap, signum)))
641 return (EPERM);
642 if (SCARG(uap, signum))
643 kpsignal(p, &ksi, NULL);
644 return (0);
645 }
646 switch (SCARG(uap, pid)) {
647 case -1: /* broadcast signal */
648 return (killpg1(cp, &ksi, 0, 1));
649 case 0: /* signal own process group */
650 return (killpg1(cp, &ksi, 0, 0));
651 default: /* negative explicit process group */
652 return (killpg1(cp, &ksi, -SCARG(uap, pid), 0));
653 }
654 /* NOTREACHED */
655 }
656
657 /*
658 * Common code for kill process group/broadcast kill.
659 * cp is calling process.
660 */
661 int
662 killpg1(struct proc *cp, ksiginfo_t *ksi, int pgid, int all)
663 {
664 struct proc *p;
665 struct pcred *pc;
666 struct pgrp *pgrp;
667 int nfound;
668 int signum = ksi->ksi_signo;
669
670 pc = cp->p_cred;
671 nfound = 0;
672 if (all) {
673 /*
674 * broadcast
675 */
676 proclist_lock_read();
677 LIST_FOREACH(p, &allproc, p_list) {
678 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
679 p == cp || !CANSIGNAL(cp, pc, p, signum))
680 continue;
681 nfound++;
682 if (signum)
683 kpsignal(p, ksi, NULL);
684 }
685 proclist_unlock_read();
686 } else {
687 if (pgid == 0)
688 /*
689 * zero pgid means send to my process group.
690 */
691 pgrp = cp->p_pgrp;
692 else {
693 pgrp = pgfind(pgid);
694 if (pgrp == NULL)
695 return (ESRCH);
696 }
697 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
698 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
699 !CANSIGNAL(cp, pc, p, signum))
700 continue;
701 nfound++;
702 if (signum && P_ZOMBIE(p) == 0)
703 kpsignal(p, ksi, NULL);
704 }
705 }
706 return (nfound ? 0 : ESRCH);
707 }
708
709 /*
710 * Send a signal to a process group.
711 */
712 void
713 gsignal(int pgid, int signum)
714 {
715 ksiginfo_t ksi;
716 memset(&ksi, 0, sizeof(ksi));
717 ksi.ksi_signo = signum;
718 kgsignal(pgid, &ksi, NULL);
719 }
720
721 void
722 kgsignal(int pgid, ksiginfo_t *ksi, void *data)
723 {
724 struct pgrp *pgrp;
725
726 if (pgid && (pgrp = pgfind(pgid)))
727 kpgsignal(pgrp, ksi, data, 0);
728 }
729
730 /*
731 * Send a signal to a process group. If checktty is 1,
732 * limit to members which have a controlling terminal.
733 */
734 void
735 pgsignal(struct pgrp *pgrp, int sig, int checkctty)
736 {
737 ksiginfo_t ksi;
738 memset(&ksi, 0, sizeof(ksi));
739 ksi.ksi_signo = sig;
740 kpgsignal(pgrp, &ksi, NULL, checkctty);
741 }
742
743 void
744 kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty)
745 {
746 struct proc *p;
747
748 if (pgrp)
749 LIST_FOREACH(p, &pgrp->pg_members, p_pglist)
750 if (checkctty == 0 || p->p_flag & P_CONTROLT)
751 kpsignal(p, ksi, data);
752 }
753
754 /*
755 * Send a signal caused by a trap to the current process.
756 * If it will be caught immediately, deliver it with correct code.
757 * Otherwise, post it normally.
758 */
759 #ifndef __HAVE_SIGINFO
760 void _trapsignal(struct lwp *, ksiginfo_t *);
761 void
762 trapsignal(struct lwp *l, int signum, u_long code)
763 {
764 #define trapsignal _trapsignal
765 ksiginfo_t ksi;
766 memset(&ksi, 0, sizeof(ksi));
767 ksi.ksi_signo = signum;
768 ksi.ksi_trap = (int)code;
769 trapsignal(l, &ksi);
770 }
771 #endif
772
773 void
774 trapsignal(struct lwp *l, ksiginfo_t *ksi)
775 {
776 struct proc *p;
777 struct sigacts *ps;
778 int signum = ksi->ksi_signo;
779
780 p = l->l_proc;
781 ps = p->p_sigacts;
782 if ((p->p_flag & P_TRACED) == 0 &&
783 sigismember(&p->p_sigctx.ps_sigcatch, signum) &&
784 !sigismember(&p->p_sigctx.ps_sigmask, signum)) {
785 p->p_stats->p_ru.ru_nsignals++;
786 #ifdef KTRACE
787 #ifdef notyet
788 if (KTRPOINT(p, KTR_PSIGINFO))
789 ktrpsiginfo(p, ksi, SIGACTION_PS(ps, signum).sa_handler,
790 &p->p_sigctx.ps_sigmask);
791 #else
792 if (KTRPOINT(p, KTR_PSIG))
793 ktrpsig(p, signum, SIGACTION_PS(ps, signum).sa_handler,
794 &p->p_sigctx.ps_sigmask, 0);
795 #endif
796 #endif
797 kpsendsig(l, ksi, &p->p_sigctx.ps_sigmask);
798 (void) splsched(); /* XXXSMP */
799 sigplusset(&SIGACTION_PS(ps, signum).sa_mask,
800 &p->p_sigctx.ps_sigmask);
801 if (SIGACTION_PS(ps, signum).sa_flags & SA_RESETHAND) {
802 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
803 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
804 sigaddset(&p->p_sigctx.ps_sigignore, signum);
805 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
806 }
807 (void) spl0(); /* XXXSMP */
808 } else {
809 /* XXX for core dump/debugger */
810 p->p_sigctx.ps_siginfo = *ksi;
811 p->p_sigctx.ps_lwp = l->l_lid;
812 kpsignal(p, ksi, NULL);
813 }
814 }
815
816 /*
817 * Send the signal to the process. If the signal has an action, the action
818 * is usually performed by the target process rather than the caller; we add
819 * the signal to the set of pending signals for the process.
820 *
821 * Exceptions:
822 * o When a stop signal is sent to a sleeping process that takes the
823 * default action, the process is stopped without awakening it.
824 * o SIGCONT restarts stopped processes (or puts them back to sleep)
825 * regardless of the signal action (eg, blocked or ignored).
826 *
827 * Other ignored signals are discarded immediately.
828 *
829 * XXXSMP: Invoked as psignal() or sched_psignal().
830 */
831 void
832 psignal1(struct proc *p, int signum, int dolock)
833 {
834 ksiginfo_t ksi;
835 memset(&ksi, 0, sizeof(ksi));
836 ksi.ksi_signo = signum;
837 kpsignal1(p, &ksi, NULL, dolock);
838 }
839
840 void
841 kpsignal1(struct proc *p, ksiginfo_t *ksi, void *data,
842 int dolock) /* XXXSMP: works, but icky */
843 {
844 struct lwp *l, *suspended;
845 int s = 0, prop, allsusp;
846 sig_t action;
847 int signum = ksi->ksi_signo;
848
849 #ifdef DIAGNOSTIC
850 if (signum <= 0 || signum >= NSIG)
851 panic("psignal signal number %d", signum);
852
853
854 /* XXXSMP: works, but icky */
855 if (dolock)
856 SCHED_ASSERT_UNLOCKED();
857 else
858 SCHED_ASSERT_LOCKED();
859 #endif
860
861 if (data) {
862 size_t fd;
863 struct filedesc *fdp = p->p_fd;
864 ksi->ksi_fd = -1;
865 for (fd = 0; fd < fdp->fd_nfiles; fd++) {
866 struct file *fp = fdp->fd_ofiles[fd];
867 /* XXX: lock? */
868 if (fp && fp->f_data == data) {
869 ksi->ksi_fd = fd;
870 break;
871 }
872 }
873 }
874
875 /*
876 * Notify any interested parties in the signal.
877 */
878 KNOTE(&p->p_klist, NOTE_SIGNAL | signum);
879
880 prop = sigprop[signum];
881
882 /*
883 * If proc is traced, always give parent a chance.
884 */
885 if (p->p_flag & P_TRACED)
886 action = SIG_DFL;
887 else {
888 /*
889 * If the signal is being ignored,
890 * then we forget about it immediately.
891 * (Note: we don't set SIGCONT in p_sigctx.ps_sigignore,
892 * and if it is set to SIG_IGN,
893 * action will be SIG_DFL here.)
894 */
895 if (sigismember(&p->p_sigctx.ps_sigignore, signum))
896 return;
897 if (sigismember(&p->p_sigctx.ps_sigmask, signum))
898 action = SIG_HOLD;
899 else if (sigismember(&p->p_sigctx.ps_sigcatch, signum))
900 action = SIG_CATCH;
901 else {
902 action = SIG_DFL;
903
904 if (prop & SA_KILL && p->p_nice > NZERO)
905 p->p_nice = NZERO;
906
907 /*
908 * If sending a tty stop signal to a member of an
909 * orphaned process group, discard the signal here if
910 * the action is default; don't stop the process below
911 * if sleeping, and don't clear any pending SIGCONT.
912 */
913 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0)
914 return;
915 }
916 }
917
918 if (prop & SA_CONT)
919 sigminusset(&stopsigmask, &p->p_sigctx.ps_siglist);
920
921 if (prop & SA_STOP)
922 sigminusset(&contsigmask, &p->p_sigctx.ps_siglist);
923
924 sigaddset(&p->p_sigctx.ps_siglist, signum);
925
926 /* CHECKSIGS() is "inlined" here. */
927 p->p_sigctx.ps_sigcheck = 1;
928
929 /*
930 * If the signal doesn't have SA_CANTMASK (no override for SIGKILL,
931 * please!), check if anything waits on it. If yes, clear the
932 * pending signal from siglist set, save it to ps_sigwaited,
933 * clear sigwait list, and wakeup any sigwaiters.
934 * The signal won't be processed further here.
935 */
936 if ((prop & SA_CANTMASK) == 0
937 && p->p_sigctx.ps_sigwaited < 0
938 && sigismember(&p->p_sigctx.ps_sigwait, signum)
939 && p->p_stat != SSTOP) {
940 sigdelset(&p->p_sigctx.ps_siglist, signum);
941 p->p_sigctx.ps_sigwaited = signum;
942 sigemptyset(&p->p_sigctx.ps_sigwait);
943
944 if (dolock)
945 wakeup_one(&p->p_sigctx.ps_sigwait);
946 else
947 sched_wakeup(&p->p_sigctx.ps_sigwait);
948 return;
949 }
950
951 /*
952 * Defer further processing for signals which are held,
953 * except that stopped processes must be continued by SIGCONT.
954 */
955 if (action == SIG_HOLD && ((prop & SA_CONT) == 0 || p->p_stat != SSTOP))
956 return;
957 /* XXXSMP: works, but icky */
958 if (dolock)
959 SCHED_LOCK(s);
960
961 /* XXXUPSXXX LWPs might go to sleep without passing signal handling */
962 if (p->p_nrlwps > 0 && (p->p_stat != SSTOP)
963 && !((p->p_flag & P_SA) && (p->p_sa->sa_idle != NULL))) {
964 /*
965 * At least one LWP is running or on a run queue.
966 * The signal will be noticed when one of them returns
967 * to userspace.
968 */
969 signotify(p);
970 /*
971 * The signal will be noticed very soon.
972 */
973 goto out;
974 } else {
975 /* Process is sleeping or stopped */
976 if (p->p_flag & P_SA) {
977 struct lwp *l2 = p->p_sa->sa_vp;
978 l = NULL;
979 allsusp = 1;
980
981 if ((l2->l_stat == LSSLEEP) && (l2->l_flag & L_SINTR))
982 l = l2;
983 else if (l2->l_stat == LSSUSPENDED)
984 suspended = l2;
985 else if ((l2->l_stat != LSZOMB) &&
986 (l2->l_stat != LSDEAD))
987 allsusp = 0;
988 } else {
989 /*
990 * Find out if any of the sleeps are interruptable,
991 * and if all the live LWPs remaining are suspended.
992 */
993 allsusp = 1;
994 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
995 if (l->l_stat == LSSLEEP &&
996 l->l_flag & L_SINTR)
997 break;
998 if (l->l_stat == LSSUSPENDED)
999 suspended = l;
1000 else if ((l->l_stat != LSZOMB) &&
1001 (l->l_stat != LSDEAD))
1002 allsusp = 0;
1003 }
1004 }
1005 if (p->p_stat == SACTIVE) {
1006
1007
1008 if (l != NULL && (p->p_flag & P_TRACED))
1009 goto run;
1010
1011 /*
1012 * If SIGCONT is default (or ignored) and process is
1013 * asleep, we are finished; the process should not
1014 * be awakened.
1015 */
1016 if ((prop & SA_CONT) && action == SIG_DFL) {
1017 sigdelset(&p->p_sigctx.ps_siglist, signum);
1018 goto out;
1019 }
1020
1021 /*
1022 * When a sleeping process receives a stop
1023 * signal, process immediately if possible.
1024 */
1025 if ((prop & SA_STOP) && action == SIG_DFL) {
1026 /*
1027 * If a child holding parent blocked,
1028 * stopping could cause deadlock.
1029 */
1030 if (p->p_flag & P_PPWAIT)
1031 goto out;
1032 sigdelset(&p->p_sigctx.ps_siglist, signum);
1033 p->p_xstat = signum;
1034 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) {
1035 /*
1036 * XXXSMP: recursive call; don't lock
1037 * the second time around.
1038 */
1039 sched_psignal(p->p_pptr, SIGCHLD);
1040 }
1041 proc_stop(p); /* XXXSMP: recurse? */
1042 goto out;
1043 }
1044
1045 if (l == NULL) {
1046 /*
1047 * Special case: SIGKILL of a process
1048 * which is entirely composed of
1049 * suspended LWPs should succeed. We
1050 * make this happen by unsuspending one of
1051 * them.
1052 */
1053 if (allsusp && (signum == SIGKILL))
1054 lwp_continue(suspended);
1055 goto out;
1056 }
1057 /*
1058 * All other (caught or default) signals
1059 * cause the process to run.
1060 */
1061 goto runfast;
1062 /*NOTREACHED*/
1063 } else if (p->p_stat == SSTOP) {
1064 /* Process is stopped */
1065 /*
1066 * If traced process is already stopped,
1067 * then no further action is necessary.
1068 */
1069 if (p->p_flag & P_TRACED)
1070 goto out;
1071
1072 /*
1073 * Kill signal always sets processes running,
1074 * if possible.
1075 */
1076 if (signum == SIGKILL) {
1077 l = proc_unstop(p);
1078 if (l)
1079 goto runfast;
1080 goto out;
1081 }
1082
1083 if (prop & SA_CONT) {
1084 /*
1085 * If SIGCONT is default (or ignored),
1086 * we continue the process but don't
1087 * leave the signal in ps_siglist, as
1088 * it has no further action. If
1089 * SIGCONT is held, we continue the
1090 * process and leave the signal in
1091 * ps_siglist. If the process catches
1092 * SIGCONT, let it handle the signal
1093 * itself. If it isn't waiting on an
1094 * event, then it goes back to run
1095 * state. Otherwise, process goes
1096 * back to sleep state.
1097 */
1098 if (action == SIG_DFL)
1099 sigdelset(&p->p_sigctx.ps_siglist,
1100 signum);
1101 l = proc_unstop(p);
1102 if (l && (action == SIG_CATCH))
1103 goto runfast;
1104 goto out;
1105 }
1106
1107 if (prop & SA_STOP) {
1108 /*
1109 * Already stopped, don't need to stop again.
1110 * (If we did the shell could get confused.)
1111 */
1112 sigdelset(&p->p_sigctx.ps_siglist, signum);
1113 goto out;
1114 }
1115
1116 /*
1117 * If a lwp is sleeping interruptibly, then
1118 * wake it up; it will run until the kernel
1119 * boundary, where it will stop in issignal(),
1120 * since p->p_stat is still SSTOP. When the
1121 * process is continued, it will be made
1122 * runnable and can look at the signal.
1123 */
1124 if (l)
1125 goto run;
1126 goto out;
1127 } else {
1128 /* Else what? */
1129 panic("psignal: Invalid process state %d.",
1130 p->p_stat);
1131 }
1132 }
1133 /*NOTREACHED*/
1134
1135 runfast:
1136 /*
1137 * Raise priority to at least PUSER.
1138 */
1139 if (l->l_priority > PUSER)
1140 l->l_priority = PUSER;
1141 run:
1142
1143 setrunnable(l); /* XXXSMP: recurse? */
1144 out:
1145 /* XXXSMP: works, but icky */
1146 if (dolock)
1147 SCHED_UNLOCK(s);
1148 }
1149
1150 void
1151 kpsendsig(struct lwp *l, ksiginfo_t *ksi, sigset_t *mask)
1152 {
1153 struct proc *p = l->l_proc;
1154 struct lwp *le, *li;
1155 siginfo_t *si;
1156
1157 if (p->p_flag & P_SA) {
1158
1159 /* XXXUPSXXX What if not on sa_vp ? */
1160
1161 int s = l->l_flag & L_SA;
1162 l->l_flag &= ~L_SA;
1163 si = pool_get(&siginfo_pool, PR_WAITOK);
1164 si->_info = *ksi;
1165 le = li = NULL;
1166 if (ksi->ksi_trap)
1167 le = l;
1168 else
1169 li = l;
1170
1171 sa_upcall(l, SA_UPCALL_SIGNAL | SA_UPCALL_DEFER, le, li,
1172 sizeof(siginfo_t), si);
1173 l->l_flag |= s;
1174 return;
1175 }
1176
1177 #ifdef __HAVE_SIGINFO
1178 (*p->p_emul->e_sendsig)(ksi, mask);
1179 #else
1180 (*p->p_emul->e_sendsig)(ksi->ksi_signo, mask, ksi->ksi_trap);
1181 #endif
1182 }
1183
1184 static __inline int firstsig(const sigset_t *);
1185
1186 static __inline int
1187 firstsig(const sigset_t *ss)
1188 {
1189 int sig;
1190
1191 sig = ffs(ss->__bits[0]);
1192 if (sig != 0)
1193 return (sig);
1194 #if NSIG > 33
1195 sig = ffs(ss->__bits[1]);
1196 if (sig != 0)
1197 return (sig + 32);
1198 #endif
1199 #if NSIG > 65
1200 sig = ffs(ss->__bits[2]);
1201 if (sig != 0)
1202 return (sig + 64);
1203 #endif
1204 #if NSIG > 97
1205 sig = ffs(ss->__bits[3]);
1206 if (sig != 0)
1207 return (sig + 96);
1208 #endif
1209 return (0);
1210 }
1211
1212 /*
1213 * If the current process has received a signal (should be caught or cause
1214 * termination, should interrupt current syscall), return the signal number.
1215 * Stop signals with default action are processed immediately, then cleared;
1216 * they aren't returned. This is checked after each entry to the system for
1217 * a syscall or trap (though this can usually be done without calling issignal
1218 * by checking the pending signal masks in the CURSIG macro.) The normal call
1219 * sequence is
1220 *
1221 * while (signum = CURSIG(curlwp))
1222 * postsig(signum);
1223 */
1224 int
1225 issignal(struct lwp *l)
1226 {
1227 struct proc *p = l->l_proc;
1228 int s = 0, signum, prop;
1229 int dolock = (l->l_flag & L_SINTR) == 0, locked = !dolock;
1230 sigset_t ss;
1231
1232 if (l->l_flag & L_SA) {
1233 struct sadata *sa = p->p_sa;
1234
1235 /* Bail out if we do not own the virtual processor */
1236 if (sa->sa_vp != l)
1237 return 0;
1238 }
1239
1240 if (p->p_stat == SSTOP) {
1241 /*
1242 * The process is stopped/stopping. Stop ourselves now that
1243 * we're on the kernel/userspace boundary.
1244 */
1245 if (dolock)
1246 SCHED_LOCK(s);
1247 l->l_stat = LSSTOP;
1248 p->p_nrlwps--;
1249 if (p->p_flag & P_TRACED)
1250 goto sigtraceswitch;
1251 else
1252 goto sigswitch;
1253 }
1254 for (;;) {
1255 sigpending1(p, &ss);
1256 if (p->p_flag & P_PPWAIT)
1257 sigminusset(&stopsigmask, &ss);
1258 signum = firstsig(&ss);
1259 if (signum == 0) { /* no signal to send */
1260 p->p_sigctx.ps_sigcheck = 0;
1261 if (locked && dolock)
1262 SCHED_LOCK(s);
1263 return (0);
1264 }
1265 /* take the signal! */
1266 sigdelset(&p->p_sigctx.ps_siglist, signum);
1267
1268 /*
1269 * We should see pending but ignored signals
1270 * only if P_TRACED was on when they were posted.
1271 */
1272 if (sigismember(&p->p_sigctx.ps_sigignore, signum) &&
1273 (p->p_flag & P_TRACED) == 0)
1274 continue;
1275
1276 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
1277 /*
1278 * If traced, always stop, and stay
1279 * stopped until released by the debugger.
1280 */
1281 p->p_xstat = signum;
1282 if ((p->p_flag & P_FSTRACE) == 0)
1283 psignal1(p->p_pptr, SIGCHLD, dolock);
1284 if (dolock)
1285 SCHED_LOCK(s);
1286 proc_stop(p);
1287 sigtraceswitch:
1288 mi_switch(l, NULL);
1289 SCHED_ASSERT_UNLOCKED();
1290 if (dolock)
1291 splx(s);
1292 else
1293 dolock = 1;
1294
1295 /*
1296 * If we are no longer being traced, or the parent
1297 * didn't give us a signal, look for more signals.
1298 */
1299 if ((p->p_flag & P_TRACED) == 0 || p->p_xstat == 0)
1300 continue;
1301
1302 /*
1303 * If the new signal is being masked, look for other
1304 * signals.
1305 */
1306 signum = p->p_xstat;
1307 p->p_xstat = 0;
1308 /*
1309 * `p->p_sigctx.ps_siglist |= mask' is done
1310 * in setrunnable().
1311 */
1312 if (sigismember(&p->p_sigctx.ps_sigmask, signum))
1313 continue;
1314 /* take the signal! */
1315 sigdelset(&p->p_sigctx.ps_siglist, signum);
1316 }
1317
1318 prop = sigprop[signum];
1319
1320 /*
1321 * Decide whether the signal should be returned.
1322 * Return the signal's number, or fall through
1323 * to clear it from the pending mask.
1324 */
1325 switch ((long)SIGACTION(p, signum).sa_handler) {
1326
1327 case (long)SIG_DFL:
1328 /*
1329 * Don't take default actions on system processes.
1330 */
1331 if (p->p_pid <= 1) {
1332 #ifdef DIAGNOSTIC
1333 /*
1334 * Are you sure you want to ignore SIGSEGV
1335 * in init? XXX
1336 */
1337 printf("Process (pid %d) got signal %d\n",
1338 p->p_pid, signum);
1339 #endif
1340 break; /* == ignore */
1341 }
1342 /*
1343 * If there is a pending stop signal to process
1344 * with default action, stop here,
1345 * then clear the signal. However,
1346 * if process is member of an orphaned
1347 * process group, ignore tty stop signals.
1348 */
1349 if (prop & SA_STOP) {
1350 if (p->p_flag & P_TRACED ||
1351 (p->p_pgrp->pg_jobc == 0 &&
1352 prop & SA_TTYSTOP))
1353 break; /* == ignore */
1354 p->p_xstat = signum;
1355 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0)
1356 psignal1(p->p_pptr, SIGCHLD, dolock);
1357 if (dolock)
1358 SCHED_LOCK(s);
1359 proc_stop(p);
1360 sigswitch:
1361 mi_switch(l, NULL);
1362 SCHED_ASSERT_UNLOCKED();
1363 if (dolock)
1364 splx(s);
1365 else
1366 dolock = 1;
1367 break;
1368 } else if (prop & SA_IGNORE) {
1369 /*
1370 * Except for SIGCONT, shouldn't get here.
1371 * Default action is to ignore; drop it.
1372 */
1373 break; /* == ignore */
1374 } else
1375 goto keep;
1376 /*NOTREACHED*/
1377
1378 case (long)SIG_IGN:
1379 /*
1380 * Masking above should prevent us ever trying
1381 * to take action on an ignored signal other
1382 * than SIGCONT, unless process is traced.
1383 */
1384 #ifdef DEBUG_ISSIGNAL
1385 if ((prop & SA_CONT) == 0 &&
1386 (p->p_flag & P_TRACED) == 0)
1387 printf("issignal\n");
1388 #endif
1389 break; /* == ignore */
1390
1391 default:
1392 /*
1393 * This signal has an action, let
1394 * postsig() process it.
1395 */
1396 goto keep;
1397 }
1398 }
1399 /* NOTREACHED */
1400
1401 keep:
1402 /* leave the signal for later */
1403 sigaddset(&p->p_sigctx.ps_siglist, signum);
1404 CHECKSIGS(p);
1405 if (locked && dolock)
1406 SCHED_LOCK(s);
1407 return (signum);
1408 }
1409
1410 /*
1411 * Put the argument process into the stopped state and notify the parent
1412 * via wakeup. Signals are handled elsewhere. The process must not be
1413 * on the run queue.
1414 */
1415 static void
1416 proc_stop(struct proc *p)
1417 {
1418 struct lwp *l;
1419
1420 SCHED_ASSERT_LOCKED();
1421
1422 /* XXX lock process LWP state */
1423 p->p_stat = SSTOP;
1424 p->p_flag &= ~P_WAITED;
1425
1426 /*
1427 * Put as many LWP's as possible in stopped state.
1428 * Sleeping ones will notice the stopped state as they try to
1429 * return to userspace.
1430 */
1431
1432 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1433 if ((l->l_stat == LSONPROC) && (l == curlwp)) {
1434 /* XXX SMP this assumes that a LWP that is LSONPROC
1435 * is curlwp and hence is about to be mi_switched
1436 * away; the only callers of proc_stop() are:
1437 * - psignal
1438 * - issignal()
1439 * For the former, proc_stop() is only called when
1440 * no processes are running, so we don't worry.
1441 * For the latter, proc_stop() is called right
1442 * before mi_switch().
1443 */
1444 l->l_stat = LSSTOP;
1445 p->p_nrlwps--;
1446 }
1447 else if ( (l->l_stat == LSSLEEP) && (l->l_flag & L_SINTR)) {
1448 setrunnable(l);
1449 }
1450
1451 /* !!!UPS!!! FIX ME */
1452 #if 0
1453 else if (l->l_stat == LSRUN) {
1454 /* Remove LWP from the run queue */
1455 remrunqueue(l);
1456 l->l_stat = LSSTOP;
1457 p->p_nrlwps--;
1458 } else if ((l->l_stat == LSSLEEP) ||
1459 (l->l_stat == LSSUSPENDED) ||
1460 (l->l_stat == LSZOMB) ||
1461 (l->l_stat == LSDEAD)) {
1462 /*
1463 * Don't do anything; let sleeping LWPs
1464 * discover the stopped state of the process
1465 * on their way out of the kernel; otherwise,
1466 * things like NFS threads that sleep with
1467 * locks will block the rest of the system
1468 * from getting any work done.
1469 *
1470 * Suspended/dead/zombie LWPs aren't going
1471 * anywhere, so we don't need to touch them.
1472 */
1473 }
1474 #ifdef DIAGNOSTIC
1475 else {
1476 panic("proc_stop: process %d lwp %d "
1477 "in unstoppable state %d.\n",
1478 p->p_pid, l->l_lid, l->l_stat);
1479 }
1480 #endif
1481 #endif
1482 }
1483 /* XXX unlock process LWP state */
1484
1485 sched_wakeup((caddr_t)p->p_pptr);
1486 }
1487
1488 /*
1489 * Given a process in state SSTOP, set the state back to SACTIVE and
1490 * move LSSTOP'd LWPs to LSSLEEP or make them runnable.
1491 *
1492 * If no LWPs ended up runnable (and therefore able to take a signal),
1493 * return a LWP that is sleeping interruptably. The caller can wake
1494 * that LWP up to take a signal.
1495 */
1496 struct lwp *
1497 proc_unstop(struct proc *p)
1498 {
1499 struct lwp *l, *lr = NULL;
1500 int cantake = 0;
1501
1502 SCHED_ASSERT_LOCKED();
1503
1504 /*
1505 * Our caller wants to be informed if there are only sleeping
1506 * and interruptable LWPs left after we have run so that it
1507 * can invoke setrunnable() if required - return one of the
1508 * interruptable LWPs if this is the case.
1509 */
1510
1511 p->p_stat = SACTIVE;
1512 if (p->p_flag & P_SA) {
1513 /*
1514 * Preferentially select the idle LWP as the interruptable
1515 * LWP to return if it exists.
1516 */
1517 lr = p->p_sa->sa_idle;
1518 if (lr != NULL)
1519 cantake = 1;
1520 }
1521 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1522 if (l->l_stat == LSRUN) {
1523 lr = NULL;
1524 cantake = 1;
1525 }
1526 if (l->l_stat != LSSTOP)
1527 continue;
1528
1529 if (l->l_wchan != NULL) {
1530 l->l_stat = LSSLEEP;
1531 if ((cantake == 0) && (l->l_flag & L_SINTR)) {
1532 lr = l;
1533 cantake = 1;
1534 }
1535 } else {
1536 setrunnable(l);
1537 lr = NULL;
1538 cantake = 1;
1539 }
1540 }
1541
1542 return lr;
1543 }
1544
1545 /*
1546 * Take the action for the specified signal
1547 * from the current set of pending signals.
1548 */
1549 void
1550 postsig(int signum)
1551 {
1552 struct lwp *l;
1553 struct proc *p;
1554 struct sigacts *ps;
1555 sig_t action;
1556 sigset_t *returnmask;
1557
1558 l = curlwp;
1559 p = l->l_proc;
1560 ps = p->p_sigacts;
1561 #ifdef DIAGNOSTIC
1562 if (signum == 0)
1563 panic("postsig");
1564 #endif
1565
1566 KERNEL_PROC_LOCK(l);
1567
1568 sigdelset(&p->p_sigctx.ps_siglist, signum);
1569 action = SIGACTION_PS(ps, signum).sa_handler;
1570 #ifdef KTRACE
1571 if (KTRPOINT(p, KTR_PSIG))
1572 ktrpsig(p,
1573 signum, action, p->p_sigctx.ps_flags & SAS_OLDMASK ?
1574 &p->p_sigctx.ps_oldmask : &p->p_sigctx.ps_sigmask, 0);
1575 #endif
1576 if (action == SIG_DFL) {
1577 /*
1578 * Default action, where the default is to kill
1579 * the process. (Other cases were ignored above.)
1580 */
1581 sigexit(l, signum);
1582 /* NOTREACHED */
1583 } else {
1584 ksiginfo_t ksi;
1585 /*
1586 * If we get here, the signal must be caught.
1587 */
1588 #ifdef DIAGNOSTIC
1589 if (action == SIG_IGN ||
1590 sigismember(&p->p_sigctx.ps_sigmask, signum))
1591 panic("postsig action");
1592 #endif
1593 /*
1594 * Set the new mask value and also defer further
1595 * occurrences of this signal.
1596 *
1597 * Special case: user has done a sigpause. Here the
1598 * current mask is not of interest, but rather the
1599 * mask from before the sigpause is what we want
1600 * restored after the signal processing is completed.
1601 */
1602 if (p->p_sigctx.ps_flags & SAS_OLDMASK) {
1603 returnmask = &p->p_sigctx.ps_oldmask;
1604 p->p_sigctx.ps_flags &= ~SAS_OLDMASK;
1605 } else
1606 returnmask = &p->p_sigctx.ps_sigmask;
1607 p->p_stats->p_ru.ru_nsignals++;
1608 if (p->p_sigctx.ps_siginfo.ksi_signo != signum) {
1609 memset(&ksi, 0, sizeof(ksi));
1610 ksi.ksi_signo = signum;
1611 } else {
1612 ksi = p->p_sigctx.ps_siginfo;
1613 memset(&p->p_sigctx.ps_siginfo, 0,
1614 sizeof(p->p_sigctx.ps_siginfo));
1615 p->p_sigctx.ps_lwp = 0;
1616 }
1617 kpsendsig(l, &ksi, returnmask);
1618 (void) splsched(); /* XXXSMP */
1619 sigplusset(&SIGACTION_PS(ps, signum).sa_mask,
1620 &p->p_sigctx.ps_sigmask);
1621 if (SIGACTION_PS(ps, signum).sa_flags & SA_RESETHAND) {
1622 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
1623 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
1624 sigaddset(&p->p_sigctx.ps_sigignore, signum);
1625 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
1626 }
1627 (void) spl0(); /* XXXSMP */
1628 }
1629
1630 KERNEL_PROC_UNLOCK(l);
1631 }
1632
1633 /*
1634 * Kill the current process for stated reason.
1635 */
1636 void
1637 killproc(struct proc *p, const char *why)
1638 {
1639 log(LOG_ERR, "pid %d was killed: %s\n", p->p_pid, why);
1640 uprintf("sorry, pid %d was killed: %s\n", p->p_pid, why);
1641 psignal(p, SIGKILL);
1642 }
1643
1644 /*
1645 * Force the current process to exit with the specified signal, dumping core
1646 * if appropriate. We bypass the normal tests for masked and caught signals,
1647 * allowing unrecoverable failures to terminate the process without changing
1648 * signal state. Mark the accounting record with the signal termination.
1649 * If dumping core, save the signal number for the debugger. Calls exit and
1650 * does not return.
1651 */
1652
1653 #if defined(DEBUG)
1654 int kern_logsigexit = 1; /* not static to make public for sysctl */
1655 #else
1656 int kern_logsigexit = 0; /* not static to make public for sysctl */
1657 #endif
1658
1659 static const char logcoredump[] =
1660 "pid %d (%s), uid %d: exited on signal %d (core dumped)\n";
1661 static const char lognocoredump[] =
1662 "pid %d (%s), uid %d: exited on signal %d (core not dumped, err = %d)\n";
1663
1664 /* Wrapper function for use in p_userret */
1665 static void
1666 lwp_coredump_hook(struct lwp *l, void *arg)
1667 {
1668 int s;
1669
1670 /*
1671 * Suspend ourselves, so that the kernel stack and therefore
1672 * the userland registers saved in the trapframe are around
1673 * for coredump() to write them out.
1674 */
1675 KERNEL_PROC_LOCK(l);
1676 l->l_flag &= ~L_DETACHED;
1677 SCHED_LOCK(s);
1678 l->l_stat = LSSUSPENDED;
1679 l->l_proc->p_nrlwps--;
1680 /* XXX NJWLWP check if this makes sense here: */
1681 l->l_proc->p_stats->p_ru.ru_nvcsw++;
1682 mi_switch(l, NULL);
1683 SCHED_ASSERT_UNLOCKED();
1684 splx(s);
1685
1686 lwp_exit(l);
1687 }
1688
1689 void
1690 sigexit(struct lwp *l, int signum)
1691 {
1692 struct proc *p;
1693 #if 0
1694 struct lwp *l2;
1695 #endif
1696 int error, exitsig;
1697
1698 p = l->l_proc;
1699
1700 /*
1701 * Don't permit coredump() or exit1() multiple times
1702 * in the same process.
1703 */
1704 if (p->p_flag & P_WEXIT) {
1705 KERNEL_PROC_UNLOCK(l);
1706 (*p->p_userret)(l, p->p_userret_arg);
1707 }
1708 p->p_flag |= P_WEXIT;
1709 /* We don't want to switch away from exiting. */
1710 /* XXX multiprocessor: stop LWPs on other processors. */
1711 #if 0
1712 if (p->p_flag & P_SA) {
1713 LIST_FOREACH(l2, &p->p_lwps, l_sibling)
1714 l2->l_flag &= ~L_SA;
1715 p->p_flag &= ~P_SA;
1716 }
1717 #endif
1718
1719 /* Make other LWPs stick around long enough to be dumped */
1720 p->p_userret = lwp_coredump_hook;
1721 p->p_userret_arg = NULL;
1722
1723 exitsig = signum;
1724 p->p_acflag |= AXSIG;
1725 if (sigprop[signum] & SA_CORE) {
1726 p->p_sigctx.ps_siginfo.ksi_signo = signum;
1727 if ((error = coredump(l)) == 0)
1728 exitsig |= WCOREFLAG;
1729
1730 if (kern_logsigexit) {
1731 /* XXX What if we ever have really large UIDs? */
1732 int uid = p->p_cred && p->p_ucred ?
1733 (int) p->p_ucred->cr_uid : -1;
1734
1735 if (error)
1736 log(LOG_INFO, lognocoredump, p->p_pid,
1737 p->p_comm, uid, signum, error);
1738 else
1739 log(LOG_INFO, logcoredump, p->p_pid,
1740 p->p_comm, uid, signum);
1741 }
1742
1743 }
1744
1745 exit1(l, W_EXITCODE(0, exitsig));
1746 /* NOTREACHED */
1747 }
1748
1749 /*
1750 * Dump core, into a file named "progname.core" or "core" (depending on the
1751 * value of shortcorename), unless the process was setuid/setgid.
1752 */
1753 int
1754 coredump(struct lwp *l)
1755 {
1756 struct vnode *vp;
1757 struct proc *p;
1758 struct vmspace *vm;
1759 struct ucred *cred;
1760 struct nameidata nd;
1761 struct vattr vattr;
1762 int error, error1;
1763 char name[MAXPATHLEN];
1764
1765 p = l->l_proc;
1766 vm = p->p_vmspace;
1767 cred = p->p_cred->pc_ucred;
1768
1769 /*
1770 * Make sure the process has not set-id, to prevent data leaks.
1771 */
1772 if (p->p_flag & P_SUGID)
1773 return (EPERM);
1774
1775 /*
1776 * Refuse to core if the data + stack + user size is larger than
1777 * the core dump limit. XXX THIS IS WRONG, because of mapped
1778 * data.
1779 */
1780 if (USPACE + ctob(vm->vm_dsize + vm->vm_ssize) >=
1781 p->p_rlimit[RLIMIT_CORE].rlim_cur)
1782 return (EFBIG); /* better error code? */
1783
1784 /*
1785 * The core dump will go in the current working directory. Make
1786 * sure that the directory is still there and that the mount flags
1787 * allow us to write core dumps there.
1788 */
1789 vp = p->p_cwdi->cwdi_cdir;
1790 if (vp->v_mount == NULL ||
1791 (vp->v_mount->mnt_flag & MNT_NOCOREDUMP) != 0)
1792 return (EPERM);
1793
1794 error = build_corename(p, name);
1795 if (error)
1796 return error;
1797
1798 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, p);
1799 error = vn_open(&nd, O_CREAT | O_NOFOLLOW | FWRITE, S_IRUSR | S_IWUSR);
1800 if (error)
1801 return (error);
1802 vp = nd.ni_vp;
1803
1804 /* Don't dump to non-regular files or files with links. */
1805 if (vp->v_type != VREG ||
1806 VOP_GETATTR(vp, &vattr, cred, p) || vattr.va_nlink != 1) {
1807 error = EINVAL;
1808 goto out;
1809 }
1810 VATTR_NULL(&vattr);
1811 vattr.va_size = 0;
1812 VOP_LEASE(vp, p, cred, LEASE_WRITE);
1813 VOP_SETATTR(vp, &vattr, cred, p);
1814 p->p_acflag |= ACORE;
1815
1816 /* Now dump the actual core file. */
1817 error = (*p->p_execsw->es_coredump)(l, vp, cred);
1818 out:
1819 VOP_UNLOCK(vp, 0);
1820 error1 = vn_close(vp, FWRITE, cred, p);
1821 if (error == 0)
1822 error = error1;
1823 return (error);
1824 }
1825
1826 /*
1827 * Nonexistent system call-- signal process (may want to handle it).
1828 * Flag error in case process won't see signal immediately (blocked or ignored).
1829 */
1830 /* ARGSUSED */
1831 int
1832 sys_nosys(struct lwp *l, void *v, register_t *retval)
1833 {
1834 struct proc *p;
1835
1836 p = l->l_proc;
1837 psignal(p, SIGSYS);
1838 return (ENOSYS);
1839 }
1840
1841 static int
1842 build_corename(struct proc *p, char dst[MAXPATHLEN])
1843 {
1844 const char *s;
1845 char *d, *end;
1846 int i;
1847
1848 for (s = p->p_limit->pl_corename, d = dst, end = d + MAXPATHLEN;
1849 *s != '\0'; s++) {
1850 if (*s == '%') {
1851 switch (*(s + 1)) {
1852 case 'n':
1853 i = snprintf(d, end - d, "%s", p->p_comm);
1854 break;
1855 case 'p':
1856 i = snprintf(d, end - d, "%d", p->p_pid);
1857 break;
1858 case 'u':
1859 i = snprintf(d, end - d, "%.*s",
1860 (int)sizeof p->p_pgrp->pg_session->s_login,
1861 p->p_pgrp->pg_session->s_login);
1862 break;
1863 case 't':
1864 i = snprintf(d, end - d, "%ld",
1865 p->p_stats->p_start.tv_sec);
1866 break;
1867 default:
1868 goto copy;
1869 }
1870 d += i;
1871 s++;
1872 } else {
1873 copy: *d = *s;
1874 d++;
1875 }
1876 if (d >= end)
1877 return (ENAMETOOLONG);
1878 }
1879 *d = '\0';
1880 return 0;
1881 }
1882
1883 void
1884 getucontext(struct lwp *l, ucontext_t *ucp)
1885 {
1886 struct proc *p;
1887
1888 p = l->l_proc;
1889
1890 ucp->uc_flags = 0;
1891 ucp->uc_link = l->l_ctxlink;
1892
1893 (void)sigprocmask1(p, 0, NULL, &ucp->uc_sigmask);
1894 ucp->uc_flags |= _UC_SIGMASK;
1895
1896 /*
1897 * The (unsupplied) definition of the `current execution stack'
1898 * in the System V Interface Definition appears to allow returning
1899 * the main context stack.
1900 */
1901 if ((p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK) == 0) {
1902 ucp->uc_stack.ss_sp = (void *)USRSTACK;
1903 ucp->uc_stack.ss_size = ctob(p->p_vmspace->vm_ssize);
1904 ucp->uc_stack.ss_flags = 0; /* XXX, def. is Very Fishy */
1905 } else {
1906 /* Simply copy alternate signal execution stack. */
1907 ucp->uc_stack = p->p_sigctx.ps_sigstk;
1908 }
1909 ucp->uc_flags |= _UC_STACK;
1910
1911 cpu_getmcontext(l, &ucp->uc_mcontext, &ucp->uc_flags);
1912 }
1913
1914 /* ARGSUSED */
1915 int
1916 sys_getcontext(struct lwp *l, void *v, register_t *retval)
1917 {
1918 struct sys_getcontext_args /* {
1919 syscallarg(struct __ucontext *) ucp;
1920 } */ *uap = v;
1921 ucontext_t uc;
1922
1923 getucontext(l, &uc);
1924
1925 return (copyout(&uc, SCARG(uap, ucp), sizeof (*SCARG(uap, ucp))));
1926 }
1927
1928 int
1929 setucontext(struct lwp *l, const ucontext_t *ucp)
1930 {
1931 struct proc *p;
1932 int error;
1933
1934 p = l->l_proc;
1935 if ((error = cpu_setmcontext(l, &ucp->uc_mcontext, ucp->uc_flags)) != 0)
1936 return (error);
1937 l->l_ctxlink = ucp->uc_link;
1938 /*
1939 * We might want to take care of the stack portion here but currently
1940 * don't; see the comment in getucontext().
1941 */
1942 if ((ucp->uc_flags & _UC_SIGMASK) != 0)
1943 sigprocmask1(p, SIG_SETMASK, &ucp->uc_sigmask, NULL);
1944
1945 return 0;
1946 }
1947
1948 /* ARGSUSED */
1949 int
1950 sys_setcontext(struct lwp *l, void *v, register_t *retval)
1951 {
1952 struct sys_setcontext_args /* {
1953 syscallarg(const ucontext_t *) ucp;
1954 } */ *uap = v;
1955 ucontext_t uc;
1956 int error;
1957
1958 if (SCARG(uap, ucp) == NULL) /* i.e. end of uc_link chain */
1959 exit1(l, W_EXITCODE(0, 0));
1960 else if ((error = copyin(SCARG(uap, ucp), &uc, sizeof (uc))) != 0 ||
1961 (error = setucontext(l, &uc)) != 0)
1962 return (error);
1963
1964 return (EJUSTRETURN);
1965 }
1966
1967 /*
1968 * sigtimedwait(2) system call, used also for implementation
1969 * of sigwaitinfo() and sigwait().
1970 *
1971 * This only handles single LWP in signal wait. libpthread provides
1972 * it's own sigtimedwait() wrapper to DTRT WRT individual threads.
1973 *
1974 * XXX no support for queued signals, si_code is always SI_USER.
1975 */
1976 int
1977 sys___sigtimedwait(struct lwp *l, void *v, register_t *retval)
1978 {
1979 struct sys___sigtimedwait_args /* {
1980 syscallarg(const sigset_t *) set;
1981 syscallarg(siginfo_t *) info;
1982 syscallarg(struct timespec *) timeout;
1983 } */ *uap = v;
1984 sigset_t waitset, twaitset;
1985 struct proc *p = l->l_proc;
1986 int error, signum, s;
1987 int timo = 0;
1988 struct timeval tvstart;
1989 struct timespec ts;
1990
1991 if ((error = copyin(SCARG(uap, set), &waitset, sizeof(waitset))))
1992 return (error);
1993
1994 /*
1995 * Silently ignore SA_CANTMASK signals. psignal1() would
1996 * ignore SA_CANTMASK signals in waitset, we do this
1997 * only for the below siglist check.
1998 */
1999 sigminusset(&sigcantmask, &waitset);
2000
2001 /*
2002 * First scan siglist and check if there is signal from
2003 * our waitset already pending.
2004 */
2005 twaitset = waitset;
2006 __sigandset(&p->p_sigctx.ps_siglist, &twaitset);
2007 if ((signum = firstsig(&twaitset))) {
2008 /* found pending signal */
2009 sigdelset(&p->p_sigctx.ps_siglist, signum);
2010 goto sig;
2011 }
2012
2013 /*
2014 * Calculate timeout, if it was specified.
2015 */
2016 if (SCARG(uap, timeout)) {
2017 uint64_t ms;
2018
2019 if ((error = copyin(SCARG(uap, timeout), &ts, sizeof(ts))))
2020 return (error);
2021
2022 ms = (ts.tv_sec * 1000) + (ts.tv_nsec / 1000000);
2023 timo = mstohz(ms);
2024 if (timo == 0 && ts.tv_sec == 0 && ts.tv_nsec > 0)
2025 timo = 1;
2026 if (timo <= 0)
2027 return (EAGAIN);
2028
2029 /*
2030 * Remember current mono_time, it would be used in
2031 * ECANCELED/ERESTART case.
2032 */
2033 s = splclock();
2034 tvstart = mono_time;
2035 splx(s);
2036 }
2037
2038 /*
2039 * Setup ps_sigwait list.
2040 */
2041 p->p_sigctx.ps_sigwaited = -1;
2042 p->p_sigctx.ps_sigwait = waitset;
2043
2044 /*
2045 * Wait for signal to arrive. We can either be woken up or
2046 * time out.
2047 */
2048 error = tsleep(&p->p_sigctx.ps_sigwait, PPAUSE|PCATCH, "sigwait", timo);
2049
2050 /*
2051 * Check if a signal from our wait set has arrived, or if it
2052 * was mere wakeup.
2053 */
2054 if (!error) {
2055 if ((signum = p->p_sigctx.ps_sigwaited) <= 0) {
2056 /* wakeup via _lwp_wakeup() */
2057 error = ECANCELED;
2058 }
2059 }
2060
2061 /*
2062 * On error, clear sigwait indication. psignal1() sets it
2063 * in !error case.
2064 */
2065 if (error) {
2066 p->p_sigctx.ps_sigwaited = 0;
2067
2068 /*
2069 * If the sleep was interrupted (either by signal or wakeup),
2070 * update the timeout and copyout new value back.
2071 * It would be used when the syscall would be restarted
2072 * or called again.
2073 */
2074 if (timo && (error == ERESTART || error == ECANCELED)) {
2075 struct timeval tvnow, tvtimo;
2076 int err;
2077
2078 s = splclock();
2079 tvnow = mono_time;
2080 splx(s);
2081
2082 TIMESPEC_TO_TIMEVAL(&tvtimo, &ts);
2083
2084 /* compute how much time has passed since start */
2085 timersub(&tvnow, &tvstart, &tvnow);
2086 /* substract passed time from timeout */
2087 timersub(&tvtimo, &tvnow, &tvtimo);
2088
2089 if (tvtimo.tv_sec < 0)
2090 return (EAGAIN);
2091
2092 TIMEVAL_TO_TIMESPEC(&tvtimo, &ts);
2093
2094 /* copy updated timeout to userland */
2095 if ((err = copyout(&ts, SCARG(uap, timeout), sizeof(ts))))
2096 return (err);
2097 }
2098
2099 return (error);
2100 }
2101
2102 /*
2103 * If a signal from the wait set arrived, copy it to userland.
2104 * XXX no queued signals for now
2105 */
2106 if (signum > 0) {
2107 siginfo_t si;
2108
2109 sig:
2110 memset(&si, 0, sizeof(si));
2111 si.si_signo = signum;
2112 si.si_code = SI_USER;
2113
2114 error = copyout(&si, SCARG(uap, info), sizeof(si));
2115 if (error)
2116 return (error);
2117 }
2118
2119 return (0);
2120 }
2121
2122 /*
2123 * Returns true if signal is ignored or masked for passed process.
2124 */
2125 int
2126 sigismasked(struct proc *p, int sig)
2127 {
2128
2129 return (sigismember(&p->p_sigctx.ps_sigignore, sig) ||
2130 sigismember(&p->p_sigctx.ps_sigmask, sig));
2131 }
2132
2133 static int
2134 filt_sigattach(struct knote *kn)
2135 {
2136 struct proc *p = curproc;
2137
2138 kn->kn_ptr.p_proc = p;
2139 kn->kn_flags |= EV_CLEAR; /* automatically set */
2140
2141 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
2142
2143 return (0);
2144 }
2145
2146 static void
2147 filt_sigdetach(struct knote *kn)
2148 {
2149 struct proc *p = kn->kn_ptr.p_proc;
2150
2151 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
2152 }
2153
2154 /*
2155 * signal knotes are shared with proc knotes, so we apply a mask to
2156 * the hint in order to differentiate them from process hints. This
2157 * could be avoided by using a signal-specific knote list, but probably
2158 * isn't worth the trouble.
2159 */
2160 static int
2161 filt_signal(struct knote *kn, long hint)
2162 {
2163
2164 if (hint & NOTE_SIGNAL) {
2165 hint &= ~NOTE_SIGNAL;
2166
2167 if (kn->kn_id == hint)
2168 kn->kn_data++;
2169 }
2170 return (kn->kn_data != 0);
2171 }
2172
2173 const struct filterops sig_filtops = {
2174 0, filt_sigattach, filt_sigdetach, filt_signal
2175 };
2176