kern_sig.c revision 1.150 1 /* $NetBSD: kern_sig.c,v 1.150 2003/09/11 01:32:09 cl Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)kern_sig.c 8.14 (Berkeley) 5/14/95
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.150 2003/09/11 01:32:09 cl Exp $");
41
42 #include "opt_ktrace.h"
43 #include "opt_compat_sunos.h"
44 #include "opt_compat_netbsd32.h"
45
46 #define SIGPROP /* include signal properties table */
47 #include <sys/param.h>
48 #include <sys/signalvar.h>
49 #include <sys/resourcevar.h>
50 #include <sys/namei.h>
51 #include <sys/vnode.h>
52 #include <sys/proc.h>
53 #include <sys/systm.h>
54 #include <sys/timeb.h>
55 #include <sys/times.h>
56 #include <sys/buf.h>
57 #include <sys/acct.h>
58 #include <sys/file.h>
59 #include <sys/kernel.h>
60 #include <sys/wait.h>
61 #include <sys/ktrace.h>
62 #include <sys/syslog.h>
63 #include <sys/stat.h>
64 #include <sys/core.h>
65 #include <sys/filedesc.h>
66 #include <sys/malloc.h>
67 #include <sys/pool.h>
68 #include <sys/ucontext.h>
69 #include <sys/sa.h>
70 #include <sys/savar.h>
71 #include <sys/exec.h>
72
73 #include <sys/mount.h>
74 #include <sys/syscallargs.h>
75
76 #include <machine/cpu.h>
77
78 #include <sys/user.h> /* for coredump */
79
80 #include <uvm/uvm_extern.h>
81
82 static void proc_stop(struct proc *p);
83 static int build_corename(struct proc *, char [MAXPATHLEN]);
84 sigset_t contsigmask, stopsigmask, sigcantmask;
85
86 struct pool sigacts_pool; /* memory pool for sigacts structures */
87 struct pool siginfo_pool; /* memory pool for siginfo structures */
88
89 /*
90 * Can process p, with pcred pc, send the signal signum to process q?
91 */
92 #define CANSIGNAL(p, pc, q, signum) \
93 ((pc)->pc_ucred->cr_uid == 0 || \
94 (pc)->p_ruid == (q)->p_cred->p_ruid || \
95 (pc)->pc_ucred->cr_uid == (q)->p_cred->p_ruid || \
96 (pc)->p_ruid == (q)->p_ucred->cr_uid || \
97 (pc)->pc_ucred->cr_uid == (q)->p_ucred->cr_uid || \
98 ((signum) == SIGCONT && (q)->p_session == (p)->p_session))
99
100 /*
101 * Initialize signal-related data structures.
102 */
103 void
104 signal_init(void)
105 {
106
107 pool_init(&sigacts_pool, sizeof(struct sigacts), 0, 0, 0, "sigapl",
108 &pool_allocator_nointr);
109 pool_init(&siginfo_pool, sizeof(siginfo_t), 0, 0, 0, "siginfo",
110 &pool_allocator_nointr);
111 }
112
113 /*
114 * Create an initial sigctx structure, using the same signal state
115 * as p. If 'share' is set, share the sigctx_proc part, otherwise just
116 * copy it from parent.
117 */
118 void
119 sigactsinit(struct proc *np, struct proc *pp, int share)
120 {
121 struct sigacts *ps;
122
123 if (share) {
124 np->p_sigacts = pp->p_sigacts;
125 pp->p_sigacts->sa_refcnt++;
126 } else {
127 ps = pool_get(&sigacts_pool, PR_WAITOK);
128 if (pp)
129 memcpy(ps, pp->p_sigacts, sizeof(struct sigacts));
130 else
131 memset(ps, '\0', sizeof(struct sigacts));
132 ps->sa_refcnt = 1;
133 np->p_sigacts = ps;
134 }
135 }
136
137 /*
138 * Make this process not share its sigctx, maintaining all
139 * signal state.
140 */
141 void
142 sigactsunshare(struct proc *p)
143 {
144 struct sigacts *oldps;
145
146 if (p->p_sigacts->sa_refcnt == 1)
147 return;
148
149 oldps = p->p_sigacts;
150 sigactsinit(p, NULL, 0);
151
152 if (--oldps->sa_refcnt == 0)
153 pool_put(&sigacts_pool, oldps);
154 }
155
156 /*
157 * Release a sigctx structure.
158 */
159 void
160 sigactsfree(struct proc *p)
161 {
162 struct sigacts *ps;
163
164 ps = p->p_sigacts;
165 if (--ps->sa_refcnt > 0)
166 return;
167
168 pool_put(&sigacts_pool, ps);
169 }
170
171 int
172 sigaction1(struct proc *p, int signum, const struct sigaction *nsa,
173 struct sigaction *osa, void *tramp, int vers)
174 {
175 struct sigacts *ps;
176 int prop;
177
178 ps = p->p_sigacts;
179 if (signum <= 0 || signum >= NSIG)
180 return (EINVAL);
181
182 /*
183 * Trampoline ABI version 0 is reserved for the legacy
184 * kernel-provided on-stack trampoline. Conversely, if
185 * we are using a non-0 ABI version, we must have a
186 * trampoline.
187 */
188 if ((vers != 0 && tramp == NULL) ||
189 (vers == 0 && tramp != NULL))
190 return (EINVAL);
191
192 if (osa)
193 *osa = SIGACTION_PS(ps, signum);
194
195 if (nsa) {
196 if (nsa->sa_flags & ~SA_ALLBITS)
197 return (EINVAL);
198
199 #ifndef __HAVE_SIGINFO
200 if (nsa->sa_flags & SA_SIGINFO)
201 return (EINVAL);
202 #endif
203
204 prop = sigprop[signum];
205 if (prop & SA_CANTMASK)
206 return (EINVAL);
207
208 (void) splsched(); /* XXXSMP */
209 SIGACTION_PS(ps, signum) = *nsa;
210 ps->sa_sigdesc[signum].sd_tramp = tramp;
211 ps->sa_sigdesc[signum].sd_vers = vers;
212 sigminusset(&sigcantmask, &SIGACTION_PS(ps, signum).sa_mask);
213 if ((prop & SA_NORESET) != 0)
214 SIGACTION_PS(ps, signum).sa_flags &= ~SA_RESETHAND;
215 if (signum == SIGCHLD) {
216 if (nsa->sa_flags & SA_NOCLDSTOP)
217 p->p_flag |= P_NOCLDSTOP;
218 else
219 p->p_flag &= ~P_NOCLDSTOP;
220 if (nsa->sa_flags & SA_NOCLDWAIT) {
221 /*
222 * Paranoia: since SA_NOCLDWAIT is implemented
223 * by reparenting the dying child to PID 1 (and
224 * trust it to reap the zombie), PID 1 itself
225 * is forbidden to set SA_NOCLDWAIT.
226 */
227 if (p->p_pid == 1)
228 p->p_flag &= ~P_NOCLDWAIT;
229 else
230 p->p_flag |= P_NOCLDWAIT;
231 } else
232 p->p_flag &= ~P_NOCLDWAIT;
233 }
234 if ((nsa->sa_flags & SA_NODEFER) == 0)
235 sigaddset(&SIGACTION_PS(ps, signum).sa_mask, signum);
236 else
237 sigdelset(&SIGACTION_PS(ps, signum).sa_mask, signum);
238 /*
239 * Set bit in p_sigctx.ps_sigignore for signals that are set to
240 * SIG_IGN, and for signals set to SIG_DFL where the default is
241 * to ignore. However, don't put SIGCONT in
242 * p_sigctx.ps_sigignore, as we have to restart the process.
243 */
244 if (nsa->sa_handler == SIG_IGN ||
245 (nsa->sa_handler == SIG_DFL && (prop & SA_IGNORE) != 0)) {
246 /* never to be seen again */
247 sigdelset(&p->p_sigctx.ps_siglist, signum);
248 if (signum != SIGCONT) {
249 /* easier in psignal */
250 sigaddset(&p->p_sigctx.ps_sigignore, signum);
251 }
252 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
253 } else {
254 sigdelset(&p->p_sigctx.ps_sigignore, signum);
255 if (nsa->sa_handler == SIG_DFL)
256 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
257 else
258 sigaddset(&p->p_sigctx.ps_sigcatch, signum);
259 }
260 (void) spl0();
261 }
262
263 return (0);
264 }
265
266 /* ARGSUSED */
267 int
268 sys___sigaction14(struct lwp *l, void *v, register_t *retval)
269 {
270 struct sys___sigaction14_args /* {
271 syscallarg(int) signum;
272 syscallarg(const struct sigaction *) nsa;
273 syscallarg(struct sigaction *) osa;
274 } */ *uap = v;
275 struct proc *p;
276 struct sigaction nsa, osa;
277 int error;
278
279 if (SCARG(uap, nsa)) {
280 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
281 if (error)
282 return (error);
283 }
284 p = l->l_proc;
285 error = sigaction1(p, SCARG(uap, signum),
286 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0,
287 NULL, 0);
288 if (error)
289 return (error);
290 if (SCARG(uap, osa)) {
291 error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
292 if (error)
293 return (error);
294 }
295 return (0);
296 }
297
298 /* ARGSUSED */
299 int
300 sys___sigaction_sigtramp(struct lwp *l, void *v, register_t *retval)
301 {
302 struct sys___sigaction_sigtramp_args /* {
303 syscallarg(int) signum;
304 syscallarg(const struct sigaction *) nsa;
305 syscallarg(struct sigaction *) osa;
306 syscallarg(void *) tramp;
307 syscallarg(int) vers;
308 } */ *uap = v;
309 struct proc *p = l->l_proc;
310 struct sigaction nsa, osa;
311 int error;
312
313 if (SCARG(uap, nsa)) {
314 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
315 if (error)
316 return (error);
317 }
318 error = sigaction1(p, SCARG(uap, signum),
319 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0,
320 SCARG(uap, tramp), SCARG(uap, vers));
321 if (error)
322 return (error);
323 if (SCARG(uap, osa)) {
324 error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
325 if (error)
326 return (error);
327 }
328 return (0);
329 }
330
331 /*
332 * Initialize signal state for process 0;
333 * set to ignore signals that are ignored by default and disable the signal
334 * stack.
335 */
336 void
337 siginit(struct proc *p)
338 {
339 struct sigacts *ps;
340 int signum, prop;
341
342 ps = p->p_sigacts;
343 sigemptyset(&contsigmask);
344 sigemptyset(&stopsigmask);
345 sigemptyset(&sigcantmask);
346 for (signum = 1; signum < NSIG; signum++) {
347 prop = sigprop[signum];
348 if (prop & SA_CONT)
349 sigaddset(&contsigmask, signum);
350 if (prop & SA_STOP)
351 sigaddset(&stopsigmask, signum);
352 if (prop & SA_CANTMASK)
353 sigaddset(&sigcantmask, signum);
354 if (prop & SA_IGNORE && signum != SIGCONT)
355 sigaddset(&p->p_sigctx.ps_sigignore, signum);
356 sigemptyset(&SIGACTION_PS(ps, signum).sa_mask);
357 SIGACTION_PS(ps, signum).sa_flags = SA_RESTART;
358 }
359 sigemptyset(&p->p_sigctx.ps_sigcatch);
360 p->p_sigctx.ps_sigwaited = 0;
361 p->p_flag &= ~P_NOCLDSTOP;
362
363 /*
364 * Reset stack state to the user stack.
365 */
366 p->p_sigctx.ps_sigstk.ss_flags = SS_DISABLE;
367 p->p_sigctx.ps_sigstk.ss_size = 0;
368 p->p_sigctx.ps_sigstk.ss_sp = 0;
369
370 /* One reference. */
371 ps->sa_refcnt = 1;
372 }
373
374 /*
375 * Reset signals for an exec of the specified process.
376 */
377 void
378 execsigs(struct proc *p)
379 {
380 struct sigacts *ps;
381 int signum, prop;
382
383 sigactsunshare(p);
384
385 ps = p->p_sigacts;
386
387 /*
388 * Reset caught signals. Held signals remain held
389 * through p_sigctx.ps_sigmask (unless they were caught,
390 * and are now ignored by default).
391 */
392 for (signum = 1; signum < NSIG; signum++) {
393 if (sigismember(&p->p_sigctx.ps_sigcatch, signum)) {
394 prop = sigprop[signum];
395 if (prop & SA_IGNORE) {
396 if ((prop & SA_CONT) == 0)
397 sigaddset(&p->p_sigctx.ps_sigignore,
398 signum);
399 sigdelset(&p->p_sigctx.ps_siglist, signum);
400 }
401 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
402 }
403 sigemptyset(&SIGACTION_PS(ps, signum).sa_mask);
404 SIGACTION_PS(ps, signum).sa_flags = SA_RESTART;
405 }
406 sigemptyset(&p->p_sigctx.ps_sigcatch);
407 p->p_sigctx.ps_sigwaited = 0;
408 p->p_flag &= ~P_NOCLDSTOP;
409
410 /*
411 * Reset stack state to the user stack.
412 */
413 p->p_sigctx.ps_sigstk.ss_flags = SS_DISABLE;
414 p->p_sigctx.ps_sigstk.ss_size = 0;
415 p->p_sigctx.ps_sigstk.ss_sp = 0;
416 }
417
418 int
419 sigprocmask1(struct proc *p, int how, const sigset_t *nss, sigset_t *oss)
420 {
421
422 if (oss)
423 *oss = p->p_sigctx.ps_sigmask;
424
425 if (nss) {
426 (void)splsched(); /* XXXSMP */
427 switch (how) {
428 case SIG_BLOCK:
429 sigplusset(nss, &p->p_sigctx.ps_sigmask);
430 break;
431 case SIG_UNBLOCK:
432 sigminusset(nss, &p->p_sigctx.ps_sigmask);
433 CHECKSIGS(p);
434 break;
435 case SIG_SETMASK:
436 p->p_sigctx.ps_sigmask = *nss;
437 CHECKSIGS(p);
438 break;
439 default:
440 (void)spl0(); /* XXXSMP */
441 return (EINVAL);
442 }
443 sigminusset(&sigcantmask, &p->p_sigctx.ps_sigmask);
444 (void)spl0(); /* XXXSMP */
445 }
446
447 return (0);
448 }
449
450 /*
451 * Manipulate signal mask.
452 * Note that we receive new mask, not pointer,
453 * and return old mask as return value;
454 * the library stub does the rest.
455 */
456 int
457 sys___sigprocmask14(struct lwp *l, void *v, register_t *retval)
458 {
459 struct sys___sigprocmask14_args /* {
460 syscallarg(int) how;
461 syscallarg(const sigset_t *) set;
462 syscallarg(sigset_t *) oset;
463 } */ *uap = v;
464 struct proc *p;
465 sigset_t nss, oss;
466 int error;
467
468 if (SCARG(uap, set)) {
469 error = copyin(SCARG(uap, set), &nss, sizeof(nss));
470 if (error)
471 return (error);
472 }
473 p = l->l_proc;
474 error = sigprocmask1(p, SCARG(uap, how),
475 SCARG(uap, set) ? &nss : 0, SCARG(uap, oset) ? &oss : 0);
476 if (error)
477 return (error);
478 if (SCARG(uap, oset)) {
479 error = copyout(&oss, SCARG(uap, oset), sizeof(oss));
480 if (error)
481 return (error);
482 }
483 return (0);
484 }
485
486 void
487 sigpending1(struct proc *p, sigset_t *ss)
488 {
489
490 *ss = p->p_sigctx.ps_siglist;
491 sigminusset(&p->p_sigctx.ps_sigmask, ss);
492 }
493
494 /* ARGSUSED */
495 int
496 sys___sigpending14(struct lwp *l, void *v, register_t *retval)
497 {
498 struct sys___sigpending14_args /* {
499 syscallarg(sigset_t *) set;
500 } */ *uap = v;
501 struct proc *p;
502 sigset_t ss;
503
504 p = l->l_proc;
505 sigpending1(p, &ss);
506 return (copyout(&ss, SCARG(uap, set), sizeof(ss)));
507 }
508
509 int
510 sigsuspend1(struct proc *p, const sigset_t *ss)
511 {
512 struct sigacts *ps;
513
514 ps = p->p_sigacts;
515 if (ss) {
516 /*
517 * When returning from sigpause, we want
518 * the old mask to be restored after the
519 * signal handler has finished. Thus, we
520 * save it here and mark the sigctx structure
521 * to indicate this.
522 */
523 p->p_sigctx.ps_oldmask = p->p_sigctx.ps_sigmask;
524 p->p_sigctx.ps_flags |= SAS_OLDMASK;
525 (void) splsched(); /* XXXSMP */
526 p->p_sigctx.ps_sigmask = *ss;
527 CHECKSIGS(p);
528 sigminusset(&sigcantmask, &p->p_sigctx.ps_sigmask);
529 (void) spl0(); /* XXXSMP */
530 }
531
532 while (tsleep((caddr_t) ps, PPAUSE|PCATCH, "pause", 0) == 0)
533 /* void */;
534
535 /* always return EINTR rather than ERESTART... */
536 return (EINTR);
537 }
538
539 /*
540 * Suspend process until signal, providing mask to be set
541 * in the meantime. Note nonstandard calling convention:
542 * libc stub passes mask, not pointer, to save a copyin.
543 */
544 /* ARGSUSED */
545 int
546 sys___sigsuspend14(struct lwp *l, void *v, register_t *retval)
547 {
548 struct sys___sigsuspend14_args /* {
549 syscallarg(const sigset_t *) set;
550 } */ *uap = v;
551 struct proc *p;
552 sigset_t ss;
553 int error;
554
555 if (SCARG(uap, set)) {
556 error = copyin(SCARG(uap, set), &ss, sizeof(ss));
557 if (error)
558 return (error);
559 }
560
561 p = l->l_proc;
562 return (sigsuspend1(p, SCARG(uap, set) ? &ss : 0));
563 }
564
565 int
566 sigaltstack1(struct proc *p, const struct sigaltstack *nss,
567 struct sigaltstack *oss)
568 {
569
570 if (oss)
571 *oss = p->p_sigctx.ps_sigstk;
572
573 if (nss) {
574 if (nss->ss_flags & ~SS_ALLBITS)
575 return (EINVAL);
576
577 if (nss->ss_flags & SS_DISABLE) {
578 if (p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK)
579 return (EINVAL);
580 } else {
581 if (nss->ss_size < MINSIGSTKSZ)
582 return (ENOMEM);
583 }
584 p->p_sigctx.ps_sigstk = *nss;
585 }
586
587 return (0);
588 }
589
590 /* ARGSUSED */
591 int
592 sys___sigaltstack14(struct lwp *l, void *v, register_t *retval)
593 {
594 struct sys___sigaltstack14_args /* {
595 syscallarg(const struct sigaltstack *) nss;
596 syscallarg(struct sigaltstack *) oss;
597 } */ *uap = v;
598 struct proc *p;
599 struct sigaltstack nss, oss;
600 int error;
601
602 if (SCARG(uap, nss)) {
603 error = copyin(SCARG(uap, nss), &nss, sizeof(nss));
604 if (error)
605 return (error);
606 }
607 p = l->l_proc;
608 error = sigaltstack1(p,
609 SCARG(uap, nss) ? &nss : 0, SCARG(uap, oss) ? &oss : 0);
610 if (error)
611 return (error);
612 if (SCARG(uap, oss)) {
613 error = copyout(&oss, SCARG(uap, oss), sizeof(oss));
614 if (error)
615 return (error);
616 }
617 return (0);
618 }
619
620 /* ARGSUSED */
621 int
622 sys_kill(struct lwp *l, void *v, register_t *retval)
623 {
624 struct sys_kill_args /* {
625 syscallarg(int) pid;
626 syscallarg(int) signum;
627 } */ *uap = v;
628 struct proc *cp, *p;
629 struct pcred *pc;
630 ksiginfo_t ksi;
631
632 cp = l->l_proc;
633 pc = cp->p_cred;
634 if ((u_int)SCARG(uap, signum) >= NSIG)
635 return (EINVAL);
636 memset(&ksi, 0, sizeof(ksi));
637 ksi.ksi_signo = SCARG(uap, signum);
638 ksi.ksi_code = SI_USER;
639 ksi.ksi_pid = cp->p_pid;
640 ksi.ksi_uid = cp->p_ucred->cr_uid;
641 if (SCARG(uap, pid) > 0) {
642 /* kill single process */
643 if ((p = pfind(SCARG(uap, pid))) == NULL)
644 return (ESRCH);
645 if (!CANSIGNAL(cp, pc, p, SCARG(uap, signum)))
646 return (EPERM);
647 if (SCARG(uap, signum))
648 kpsignal(p, &ksi, NULL);
649 return (0);
650 }
651 switch (SCARG(uap, pid)) {
652 case -1: /* broadcast signal */
653 return (killpg1(cp, &ksi, 0, 1));
654 case 0: /* signal own process group */
655 return (killpg1(cp, &ksi, 0, 0));
656 default: /* negative explicit process group */
657 return (killpg1(cp, &ksi, -SCARG(uap, pid), 0));
658 }
659 /* NOTREACHED */
660 }
661
662 /*
663 * Common code for kill process group/broadcast kill.
664 * cp is calling process.
665 */
666 int
667 killpg1(struct proc *cp, ksiginfo_t *ksi, int pgid, int all)
668 {
669 struct proc *p;
670 struct pcred *pc;
671 struct pgrp *pgrp;
672 int nfound;
673 int signum = ksi->ksi_signo;
674
675 pc = cp->p_cred;
676 nfound = 0;
677 if (all) {
678 /*
679 * broadcast
680 */
681 proclist_lock_read();
682 LIST_FOREACH(p, &allproc, p_list) {
683 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
684 p == cp || !CANSIGNAL(cp, pc, p, signum))
685 continue;
686 nfound++;
687 if (signum)
688 kpsignal(p, ksi, NULL);
689 }
690 proclist_unlock_read();
691 } else {
692 if (pgid == 0)
693 /*
694 * zero pgid means send to my process group.
695 */
696 pgrp = cp->p_pgrp;
697 else {
698 pgrp = pgfind(pgid);
699 if (pgrp == NULL)
700 return (ESRCH);
701 }
702 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
703 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
704 !CANSIGNAL(cp, pc, p, signum))
705 continue;
706 nfound++;
707 if (signum && P_ZOMBIE(p) == 0)
708 kpsignal(p, ksi, NULL);
709 }
710 }
711 return (nfound ? 0 : ESRCH);
712 }
713
714 /*
715 * Send a signal to a process group.
716 */
717 void
718 gsignal(int pgid, int signum)
719 {
720 ksiginfo_t ksi;
721 memset(&ksi, 0, sizeof(ksi));
722 ksi.ksi_signo = signum;
723 kgsignal(pgid, &ksi, NULL);
724 }
725
726 void
727 kgsignal(int pgid, ksiginfo_t *ksi, void *data)
728 {
729 struct pgrp *pgrp;
730
731 if (pgid && (pgrp = pgfind(pgid)))
732 kpgsignal(pgrp, ksi, data, 0);
733 }
734
735 /*
736 * Send a signal to a process group. If checktty is 1,
737 * limit to members which have a controlling terminal.
738 */
739 void
740 pgsignal(struct pgrp *pgrp, int sig, int checkctty)
741 {
742 ksiginfo_t ksi;
743 memset(&ksi, 0, sizeof(ksi));
744 ksi.ksi_signo = sig;
745 kpgsignal(pgrp, &ksi, NULL, checkctty);
746 }
747
748 void
749 kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty)
750 {
751 struct proc *p;
752
753 if (pgrp)
754 LIST_FOREACH(p, &pgrp->pg_members, p_pglist)
755 if (checkctty == 0 || p->p_flag & P_CONTROLT)
756 kpsignal(p, ksi, data);
757 }
758
759 /*
760 * Send a signal caused by a trap to the current process.
761 * If it will be caught immediately, deliver it with correct code.
762 * Otherwise, post it normally.
763 */
764 #ifndef __HAVE_SIGINFO
765 void _trapsignal(struct lwp *, ksiginfo_t *);
766 void
767 trapsignal(struct lwp *l, int signum, u_long code)
768 {
769 #define trapsignal _trapsignal
770 ksiginfo_t ksi;
771 memset(&ksi, 0, sizeof(ksi));
772 ksi.ksi_signo = signum;
773 ksi.ksi_trap = (int)code;
774 trapsignal(l, &ksi);
775 }
776 #endif
777
778 void
779 trapsignal(struct lwp *l, ksiginfo_t *ksi)
780 {
781 struct proc *p;
782 struct sigacts *ps;
783 int signum = ksi->ksi_signo;
784
785 p = l->l_proc;
786 ps = p->p_sigacts;
787 if ((p->p_flag & P_TRACED) == 0 &&
788 sigismember(&p->p_sigctx.ps_sigcatch, signum) &&
789 !sigismember(&p->p_sigctx.ps_sigmask, signum)) {
790 p->p_stats->p_ru.ru_nsignals++;
791 #ifdef KTRACE
792 #ifdef notyet
793 if (KTRPOINT(p, KTR_PSIGINFO))
794 ktrpsiginfo(p, ksi, SIGACTION_PS(ps, signum).sa_handler,
795 &p->p_sigctx.ps_sigmask);
796 #else
797 if (KTRPOINT(p, KTR_PSIG))
798 ktrpsig(p, signum, SIGACTION_PS(ps, signum).sa_handler,
799 &p->p_sigctx.ps_sigmask, 0);
800 #endif
801 #endif
802 kpsendsig(l, ksi, &p->p_sigctx.ps_sigmask);
803 (void) splsched(); /* XXXSMP */
804 sigplusset(&SIGACTION_PS(ps, signum).sa_mask,
805 &p->p_sigctx.ps_sigmask);
806 if (SIGACTION_PS(ps, signum).sa_flags & SA_RESETHAND) {
807 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
808 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
809 sigaddset(&p->p_sigctx.ps_sigignore, signum);
810 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
811 }
812 (void) spl0(); /* XXXSMP */
813 } else {
814 /* XXX for core dump/debugger */
815 p->p_sigctx.ps_siginfo = *ksi;
816 p->p_sigctx.ps_lwp = l->l_lid;
817 kpsignal(p, ksi, NULL);
818 }
819 }
820
821 /*
822 * Send the signal to the process. If the signal has an action, the action
823 * is usually performed by the target process rather than the caller; we add
824 * the signal to the set of pending signals for the process.
825 *
826 * Exceptions:
827 * o When a stop signal is sent to a sleeping process that takes the
828 * default action, the process is stopped without awakening it.
829 * o SIGCONT restarts stopped processes (or puts them back to sleep)
830 * regardless of the signal action (eg, blocked or ignored).
831 *
832 * Other ignored signals are discarded immediately.
833 *
834 * XXXSMP: Invoked as psignal() or sched_psignal().
835 */
836 void
837 psignal1(struct proc *p, int signum, int dolock)
838 {
839 ksiginfo_t ksi;
840 memset(&ksi, 0, sizeof(ksi));
841 ksi.ksi_signo = signum;
842 kpsignal1(p, &ksi, NULL, dolock);
843 }
844
845 void
846 kpsignal1(struct proc *p, ksiginfo_t *ksi, void *data,
847 int dolock) /* XXXSMP: works, but icky */
848 {
849 struct lwp *l, *suspended;
850 int s = 0, prop, allsusp;
851 sig_t action;
852 int signum = ksi->ksi_signo;
853
854 #ifdef DIAGNOSTIC
855 if (signum <= 0 || signum >= NSIG)
856 panic("psignal signal number %d", signum);
857
858
859 /* XXXSMP: works, but icky */
860 if (dolock)
861 SCHED_ASSERT_UNLOCKED();
862 else
863 SCHED_ASSERT_LOCKED();
864 #endif
865
866 if (data) {
867 size_t fd;
868 struct filedesc *fdp = p->p_fd;
869 ksi->ksi_fd = -1;
870 for (fd = 0; fd < fdp->fd_nfiles; fd++) {
871 struct file *fp = fdp->fd_ofiles[fd];
872 /* XXX: lock? */
873 if (fp && fp->f_data == data) {
874 ksi->ksi_fd = fd;
875 break;
876 }
877 }
878 }
879
880 /*
881 * Notify any interested parties in the signal.
882 */
883 KNOTE(&p->p_klist, NOTE_SIGNAL | signum);
884
885 prop = sigprop[signum];
886
887 /*
888 * If proc is traced, always give parent a chance.
889 */
890 if (p->p_flag & P_TRACED)
891 action = SIG_DFL;
892 else {
893 /*
894 * If the signal is being ignored,
895 * then we forget about it immediately.
896 * (Note: we don't set SIGCONT in p_sigctx.ps_sigignore,
897 * and if it is set to SIG_IGN,
898 * action will be SIG_DFL here.)
899 */
900 if (sigismember(&p->p_sigctx.ps_sigignore, signum))
901 return;
902 if (sigismember(&p->p_sigctx.ps_sigmask, signum))
903 action = SIG_HOLD;
904 else if (sigismember(&p->p_sigctx.ps_sigcatch, signum))
905 action = SIG_CATCH;
906 else {
907 action = SIG_DFL;
908
909 if (prop & SA_KILL && p->p_nice > NZERO)
910 p->p_nice = NZERO;
911
912 /*
913 * If sending a tty stop signal to a member of an
914 * orphaned process group, discard the signal here if
915 * the action is default; don't stop the process below
916 * if sleeping, and don't clear any pending SIGCONT.
917 */
918 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0)
919 return;
920 }
921 }
922
923 if (prop & SA_CONT)
924 sigminusset(&stopsigmask, &p->p_sigctx.ps_siglist);
925
926 if (prop & SA_STOP)
927 sigminusset(&contsigmask, &p->p_sigctx.ps_siglist);
928
929 sigaddset(&p->p_sigctx.ps_siglist, signum);
930
931 /* CHECKSIGS() is "inlined" here. */
932 p->p_sigctx.ps_sigcheck = 1;
933
934 /*
935 * If the signal doesn't have SA_CANTMASK (no override for SIGKILL,
936 * please!), check if anything waits on it. If yes, clear the
937 * pending signal from siglist set, save it to ps_sigwaited,
938 * clear sigwait list, and wakeup any sigwaiters.
939 * The signal won't be processed further here.
940 */
941 if ((prop & SA_CANTMASK) == 0
942 && p->p_sigctx.ps_sigwaited < 0
943 && sigismember(&p->p_sigctx.ps_sigwait, signum)
944 && p->p_stat != SSTOP) {
945 sigdelset(&p->p_sigctx.ps_siglist, signum);
946 p->p_sigctx.ps_sigwaited = signum;
947 sigemptyset(&p->p_sigctx.ps_sigwait);
948
949 if (dolock)
950 wakeup_one(&p->p_sigctx.ps_sigwait);
951 else
952 sched_wakeup(&p->p_sigctx.ps_sigwait);
953 return;
954 }
955
956 /*
957 * Defer further processing for signals which are held,
958 * except that stopped processes must be continued by SIGCONT.
959 */
960 if (action == SIG_HOLD && ((prop & SA_CONT) == 0 || p->p_stat != SSTOP))
961 return;
962 /* XXXSMP: works, but icky */
963 if (dolock)
964 SCHED_LOCK(s);
965
966 /* XXXUPSXXX LWPs might go to sleep without passing signal handling */
967 if (p->p_nrlwps > 0 && (p->p_stat != SSTOP)
968 && !((p->p_flag & P_SA) && (p->p_sa->sa_idle != NULL))) {
969 /*
970 * At least one LWP is running or on a run queue.
971 * The signal will be noticed when one of them returns
972 * to userspace.
973 */
974 signotify(p);
975 /*
976 * The signal will be noticed very soon.
977 */
978 goto out;
979 } else {
980 /* Process is sleeping or stopped */
981 if (p->p_flag & P_SA) {
982 struct lwp *l2 = p->p_sa->sa_vp;
983 l = NULL;
984 allsusp = 1;
985
986 if ((l2->l_stat == LSSLEEP) && (l2->l_flag & L_SINTR))
987 l = l2;
988 else if (l2->l_stat == LSSUSPENDED)
989 suspended = l2;
990 else if ((l2->l_stat != LSZOMB) &&
991 (l2->l_stat != LSDEAD))
992 allsusp = 0;
993 } else {
994 /*
995 * Find out if any of the sleeps are interruptable,
996 * and if all the live LWPs remaining are suspended.
997 */
998 allsusp = 1;
999 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1000 if (l->l_stat == LSSLEEP &&
1001 l->l_flag & L_SINTR)
1002 break;
1003 if (l->l_stat == LSSUSPENDED)
1004 suspended = l;
1005 else if ((l->l_stat != LSZOMB) &&
1006 (l->l_stat != LSDEAD))
1007 allsusp = 0;
1008 }
1009 }
1010 if (p->p_stat == SACTIVE) {
1011
1012
1013 if (l != NULL && (p->p_flag & P_TRACED))
1014 goto run;
1015
1016 /*
1017 * If SIGCONT is default (or ignored) and process is
1018 * asleep, we are finished; the process should not
1019 * be awakened.
1020 */
1021 if ((prop & SA_CONT) && action == SIG_DFL) {
1022 sigdelset(&p->p_sigctx.ps_siglist, signum);
1023 goto out;
1024 }
1025
1026 /*
1027 * When a sleeping process receives a stop
1028 * signal, process immediately if possible.
1029 */
1030 if ((prop & SA_STOP) && action == SIG_DFL) {
1031 /*
1032 * If a child holding parent blocked,
1033 * stopping could cause deadlock.
1034 */
1035 if (p->p_flag & P_PPWAIT)
1036 goto out;
1037 sigdelset(&p->p_sigctx.ps_siglist, signum);
1038 p->p_xstat = signum;
1039 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) {
1040 /*
1041 * XXXSMP: recursive call; don't lock
1042 * the second time around.
1043 */
1044 sched_psignal(p->p_pptr, SIGCHLD);
1045 }
1046 proc_stop(p); /* XXXSMP: recurse? */
1047 goto out;
1048 }
1049
1050 if (l == NULL) {
1051 /*
1052 * Special case: SIGKILL of a process
1053 * which is entirely composed of
1054 * suspended LWPs should succeed. We
1055 * make this happen by unsuspending one of
1056 * them.
1057 */
1058 if (allsusp && (signum == SIGKILL))
1059 lwp_continue(suspended);
1060 goto out;
1061 }
1062 /*
1063 * All other (caught or default) signals
1064 * cause the process to run.
1065 */
1066 goto runfast;
1067 /*NOTREACHED*/
1068 } else if (p->p_stat == SSTOP) {
1069 /* Process is stopped */
1070 /*
1071 * If traced process is already stopped,
1072 * then no further action is necessary.
1073 */
1074 if (p->p_flag & P_TRACED)
1075 goto out;
1076
1077 /*
1078 * Kill signal always sets processes running,
1079 * if possible.
1080 */
1081 if (signum == SIGKILL) {
1082 l = proc_unstop(p);
1083 if (l)
1084 goto runfast;
1085 goto out;
1086 }
1087
1088 if (prop & SA_CONT) {
1089 /*
1090 * If SIGCONT is default (or ignored),
1091 * we continue the process but don't
1092 * leave the signal in ps_siglist, as
1093 * it has no further action. If
1094 * SIGCONT is held, we continue the
1095 * process and leave the signal in
1096 * ps_siglist. If the process catches
1097 * SIGCONT, let it handle the signal
1098 * itself. If it isn't waiting on an
1099 * event, then it goes back to run
1100 * state. Otherwise, process goes
1101 * back to sleep state.
1102 */
1103 if (action == SIG_DFL)
1104 sigdelset(&p->p_sigctx.ps_siglist,
1105 signum);
1106 l = proc_unstop(p);
1107 if (l && (action == SIG_CATCH))
1108 goto runfast;
1109 goto out;
1110 }
1111
1112 if (prop & SA_STOP) {
1113 /*
1114 * Already stopped, don't need to stop again.
1115 * (If we did the shell could get confused.)
1116 */
1117 sigdelset(&p->p_sigctx.ps_siglist, signum);
1118 goto out;
1119 }
1120
1121 /*
1122 * If a lwp is sleeping interruptibly, then
1123 * wake it up; it will run until the kernel
1124 * boundary, where it will stop in issignal(),
1125 * since p->p_stat is still SSTOP. When the
1126 * process is continued, it will be made
1127 * runnable and can look at the signal.
1128 */
1129 if (l)
1130 goto run;
1131 goto out;
1132 } else {
1133 /* Else what? */
1134 panic("psignal: Invalid process state %d.",
1135 p->p_stat);
1136 }
1137 }
1138 /*NOTREACHED*/
1139
1140 runfast:
1141 /*
1142 * Raise priority to at least PUSER.
1143 */
1144 if (l->l_priority > PUSER)
1145 l->l_priority = PUSER;
1146 run:
1147
1148 setrunnable(l); /* XXXSMP: recurse? */
1149 out:
1150 /* XXXSMP: works, but icky */
1151 if (dolock)
1152 SCHED_UNLOCK(s);
1153 }
1154
1155 void
1156 kpsendsig(struct lwp *l, ksiginfo_t *ksi, sigset_t *mask)
1157 {
1158 struct proc *p = l->l_proc;
1159 struct lwp *le, *li;
1160 siginfo_t *si;
1161 int f;
1162
1163 if (p->p_flag & P_SA) {
1164
1165 /* XXXUPSXXX What if not on sa_vp ? */
1166
1167 f = l->l_flag & L_SA;
1168 l->l_flag &= ~L_SA;
1169 si = pool_get(&siginfo_pool, PR_WAITOK);
1170 si->_info = *ksi;
1171 le = li = NULL;
1172 if (ksi->ksi_trap)
1173 le = l;
1174 else
1175 li = l;
1176
1177 sa_upcall(l, SA_UPCALL_SIGNAL | SA_UPCALL_DEFER, le, li,
1178 sizeof(siginfo_t), si);
1179 l->l_flag |= f;
1180 return;
1181 }
1182
1183 #ifdef __HAVE_SIGINFO
1184 (*p->p_emul->e_sendsig)(ksi, mask);
1185 #else
1186 (*p->p_emul->e_sendsig)(ksi->ksi_signo, mask, ksi->ksi_trap);
1187 #endif
1188 }
1189
1190 static __inline int firstsig(const sigset_t *);
1191
1192 static __inline int
1193 firstsig(const sigset_t *ss)
1194 {
1195 int sig;
1196
1197 sig = ffs(ss->__bits[0]);
1198 if (sig != 0)
1199 return (sig);
1200 #if NSIG > 33
1201 sig = ffs(ss->__bits[1]);
1202 if (sig != 0)
1203 return (sig + 32);
1204 #endif
1205 #if NSIG > 65
1206 sig = ffs(ss->__bits[2]);
1207 if (sig != 0)
1208 return (sig + 64);
1209 #endif
1210 #if NSIG > 97
1211 sig = ffs(ss->__bits[3]);
1212 if (sig != 0)
1213 return (sig + 96);
1214 #endif
1215 return (0);
1216 }
1217
1218 /*
1219 * If the current process has received a signal (should be caught or cause
1220 * termination, should interrupt current syscall), return the signal number.
1221 * Stop signals with default action are processed immediately, then cleared;
1222 * they aren't returned. This is checked after each entry to the system for
1223 * a syscall or trap (though this can usually be done without calling issignal
1224 * by checking the pending signal masks in the CURSIG macro.) The normal call
1225 * sequence is
1226 *
1227 * while (signum = CURSIG(curlwp))
1228 * postsig(signum);
1229 */
1230 int
1231 issignal(struct lwp *l)
1232 {
1233 struct proc *p = l->l_proc;
1234 int s = 0, signum, prop;
1235 int dolock = (l->l_flag & L_SINTR) == 0, locked = !dolock;
1236 sigset_t ss;
1237
1238 if (l->l_flag & L_SA) {
1239 struct sadata *sa = p->p_sa;
1240
1241 /* Bail out if we do not own the virtual processor */
1242 if (sa->sa_vp != l)
1243 return 0;
1244 }
1245
1246 if (p->p_stat == SSTOP) {
1247 /*
1248 * The process is stopped/stopping. Stop ourselves now that
1249 * we're on the kernel/userspace boundary.
1250 */
1251 if (dolock)
1252 SCHED_LOCK(s);
1253 l->l_stat = LSSTOP;
1254 p->p_nrlwps--;
1255 if (p->p_flag & P_TRACED)
1256 goto sigtraceswitch;
1257 else
1258 goto sigswitch;
1259 }
1260 for (;;) {
1261 sigpending1(p, &ss);
1262 if (p->p_flag & P_PPWAIT)
1263 sigminusset(&stopsigmask, &ss);
1264 signum = firstsig(&ss);
1265 if (signum == 0) { /* no signal to send */
1266 p->p_sigctx.ps_sigcheck = 0;
1267 if (locked && dolock)
1268 SCHED_LOCK(s);
1269 return (0);
1270 }
1271 /* take the signal! */
1272 sigdelset(&p->p_sigctx.ps_siglist, signum);
1273
1274 /*
1275 * We should see pending but ignored signals
1276 * only if P_TRACED was on when they were posted.
1277 */
1278 if (sigismember(&p->p_sigctx.ps_sigignore, signum) &&
1279 (p->p_flag & P_TRACED) == 0)
1280 continue;
1281
1282 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
1283 /*
1284 * If traced, always stop, and stay
1285 * stopped until released by the debugger.
1286 */
1287 p->p_xstat = signum;
1288 if ((p->p_flag & P_FSTRACE) == 0)
1289 psignal1(p->p_pptr, SIGCHLD, dolock);
1290 if (dolock)
1291 SCHED_LOCK(s);
1292 proc_stop(p);
1293 sigtraceswitch:
1294 mi_switch(l, NULL);
1295 SCHED_ASSERT_UNLOCKED();
1296 if (dolock)
1297 splx(s);
1298 else
1299 dolock = 1;
1300
1301 /*
1302 * If we are no longer being traced, or the parent
1303 * didn't give us a signal, look for more signals.
1304 */
1305 if ((p->p_flag & P_TRACED) == 0 || p->p_xstat == 0)
1306 continue;
1307
1308 /*
1309 * If the new signal is being masked, look for other
1310 * signals.
1311 */
1312 signum = p->p_xstat;
1313 p->p_xstat = 0;
1314 /*
1315 * `p->p_sigctx.ps_siglist |= mask' is done
1316 * in setrunnable().
1317 */
1318 if (sigismember(&p->p_sigctx.ps_sigmask, signum))
1319 continue;
1320 /* take the signal! */
1321 sigdelset(&p->p_sigctx.ps_siglist, signum);
1322 }
1323
1324 prop = sigprop[signum];
1325
1326 /*
1327 * Decide whether the signal should be returned.
1328 * Return the signal's number, or fall through
1329 * to clear it from the pending mask.
1330 */
1331 switch ((long)SIGACTION(p, signum).sa_handler) {
1332
1333 case (long)SIG_DFL:
1334 /*
1335 * Don't take default actions on system processes.
1336 */
1337 if (p->p_pid <= 1) {
1338 #ifdef DIAGNOSTIC
1339 /*
1340 * Are you sure you want to ignore SIGSEGV
1341 * in init? XXX
1342 */
1343 printf("Process (pid %d) got signal %d\n",
1344 p->p_pid, signum);
1345 #endif
1346 break; /* == ignore */
1347 }
1348 /*
1349 * If there is a pending stop signal to process
1350 * with default action, stop here,
1351 * then clear the signal. However,
1352 * if process is member of an orphaned
1353 * process group, ignore tty stop signals.
1354 */
1355 if (prop & SA_STOP) {
1356 if (p->p_flag & P_TRACED ||
1357 (p->p_pgrp->pg_jobc == 0 &&
1358 prop & SA_TTYSTOP))
1359 break; /* == ignore */
1360 p->p_xstat = signum;
1361 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0)
1362 psignal1(p->p_pptr, SIGCHLD, dolock);
1363 if (dolock)
1364 SCHED_LOCK(s);
1365 proc_stop(p);
1366 sigswitch:
1367 mi_switch(l, NULL);
1368 SCHED_ASSERT_UNLOCKED();
1369 if (dolock)
1370 splx(s);
1371 else
1372 dolock = 1;
1373 break;
1374 } else if (prop & SA_IGNORE) {
1375 /*
1376 * Except for SIGCONT, shouldn't get here.
1377 * Default action is to ignore; drop it.
1378 */
1379 break; /* == ignore */
1380 } else
1381 goto keep;
1382 /*NOTREACHED*/
1383
1384 case (long)SIG_IGN:
1385 /*
1386 * Masking above should prevent us ever trying
1387 * to take action on an ignored signal other
1388 * than SIGCONT, unless process is traced.
1389 */
1390 #ifdef DEBUG_ISSIGNAL
1391 if ((prop & SA_CONT) == 0 &&
1392 (p->p_flag & P_TRACED) == 0)
1393 printf("issignal\n");
1394 #endif
1395 break; /* == ignore */
1396
1397 default:
1398 /*
1399 * This signal has an action, let
1400 * postsig() process it.
1401 */
1402 goto keep;
1403 }
1404 }
1405 /* NOTREACHED */
1406
1407 keep:
1408 /* leave the signal for later */
1409 sigaddset(&p->p_sigctx.ps_siglist, signum);
1410 CHECKSIGS(p);
1411 if (locked && dolock)
1412 SCHED_LOCK(s);
1413 return (signum);
1414 }
1415
1416 /*
1417 * Put the argument process into the stopped state and notify the parent
1418 * via wakeup. Signals are handled elsewhere. The process must not be
1419 * on the run queue.
1420 */
1421 static void
1422 proc_stop(struct proc *p)
1423 {
1424 struct lwp *l;
1425
1426 SCHED_ASSERT_LOCKED();
1427
1428 /* XXX lock process LWP state */
1429 p->p_stat = SSTOP;
1430 p->p_flag &= ~P_WAITED;
1431
1432 /*
1433 * Put as many LWP's as possible in stopped state.
1434 * Sleeping ones will notice the stopped state as they try to
1435 * return to userspace.
1436 */
1437
1438 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1439 if ((l->l_stat == LSONPROC) && (l == curlwp)) {
1440 /* XXX SMP this assumes that a LWP that is LSONPROC
1441 * is curlwp and hence is about to be mi_switched
1442 * away; the only callers of proc_stop() are:
1443 * - psignal
1444 * - issignal()
1445 * For the former, proc_stop() is only called when
1446 * no processes are running, so we don't worry.
1447 * For the latter, proc_stop() is called right
1448 * before mi_switch().
1449 */
1450 l->l_stat = LSSTOP;
1451 p->p_nrlwps--;
1452 }
1453 else if ( (l->l_stat == LSSLEEP) && (l->l_flag & L_SINTR)) {
1454 setrunnable(l);
1455 }
1456
1457 /* !!!UPS!!! FIX ME */
1458 #if 0
1459 else if (l->l_stat == LSRUN) {
1460 /* Remove LWP from the run queue */
1461 remrunqueue(l);
1462 l->l_stat = LSSTOP;
1463 p->p_nrlwps--;
1464 } else if ((l->l_stat == LSSLEEP) ||
1465 (l->l_stat == LSSUSPENDED) ||
1466 (l->l_stat == LSZOMB) ||
1467 (l->l_stat == LSDEAD)) {
1468 /*
1469 * Don't do anything; let sleeping LWPs
1470 * discover the stopped state of the process
1471 * on their way out of the kernel; otherwise,
1472 * things like NFS threads that sleep with
1473 * locks will block the rest of the system
1474 * from getting any work done.
1475 *
1476 * Suspended/dead/zombie LWPs aren't going
1477 * anywhere, so we don't need to touch them.
1478 */
1479 }
1480 #ifdef DIAGNOSTIC
1481 else {
1482 panic("proc_stop: process %d lwp %d "
1483 "in unstoppable state %d.\n",
1484 p->p_pid, l->l_lid, l->l_stat);
1485 }
1486 #endif
1487 #endif
1488 }
1489 /* XXX unlock process LWP state */
1490
1491 sched_wakeup((caddr_t)p->p_pptr);
1492 }
1493
1494 /*
1495 * Given a process in state SSTOP, set the state back to SACTIVE and
1496 * move LSSTOP'd LWPs to LSSLEEP or make them runnable.
1497 *
1498 * If no LWPs ended up runnable (and therefore able to take a signal),
1499 * return a LWP that is sleeping interruptably. The caller can wake
1500 * that LWP up to take a signal.
1501 */
1502 struct lwp *
1503 proc_unstop(struct proc *p)
1504 {
1505 struct lwp *l, *lr = NULL;
1506 int cantake = 0;
1507
1508 SCHED_ASSERT_LOCKED();
1509
1510 /*
1511 * Our caller wants to be informed if there are only sleeping
1512 * and interruptable LWPs left after we have run so that it
1513 * can invoke setrunnable() if required - return one of the
1514 * interruptable LWPs if this is the case.
1515 */
1516
1517 p->p_stat = SACTIVE;
1518 if (p->p_flag & P_SA) {
1519 /*
1520 * Preferentially select the idle LWP as the interruptable
1521 * LWP to return if it exists.
1522 */
1523 lr = p->p_sa->sa_idle;
1524 if (lr != NULL)
1525 cantake = 1;
1526 }
1527 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1528 if (l->l_stat == LSRUN) {
1529 lr = NULL;
1530 cantake = 1;
1531 }
1532 if (l->l_stat != LSSTOP)
1533 continue;
1534
1535 if (l->l_wchan != NULL) {
1536 l->l_stat = LSSLEEP;
1537 if ((cantake == 0) && (l->l_flag & L_SINTR)) {
1538 lr = l;
1539 cantake = 1;
1540 }
1541 } else {
1542 setrunnable(l);
1543 lr = NULL;
1544 cantake = 1;
1545 }
1546 }
1547
1548 return lr;
1549 }
1550
1551 /*
1552 * Take the action for the specified signal
1553 * from the current set of pending signals.
1554 */
1555 void
1556 postsig(int signum)
1557 {
1558 struct lwp *l;
1559 struct proc *p;
1560 struct sigacts *ps;
1561 sig_t action;
1562 sigset_t *returnmask;
1563
1564 l = curlwp;
1565 p = l->l_proc;
1566 ps = p->p_sigacts;
1567 #ifdef DIAGNOSTIC
1568 if (signum == 0)
1569 panic("postsig");
1570 #endif
1571
1572 KERNEL_PROC_LOCK(l);
1573
1574 sigdelset(&p->p_sigctx.ps_siglist, signum);
1575 action = SIGACTION_PS(ps, signum).sa_handler;
1576 #ifdef KTRACE
1577 if (KTRPOINT(p, KTR_PSIG))
1578 ktrpsig(p,
1579 signum, action, p->p_sigctx.ps_flags & SAS_OLDMASK ?
1580 &p->p_sigctx.ps_oldmask : &p->p_sigctx.ps_sigmask, 0);
1581 #endif
1582 if (action == SIG_DFL) {
1583 /*
1584 * Default action, where the default is to kill
1585 * the process. (Other cases were ignored above.)
1586 */
1587 sigexit(l, signum);
1588 /* NOTREACHED */
1589 } else {
1590 ksiginfo_t ksi;
1591 /*
1592 * If we get here, the signal must be caught.
1593 */
1594 #ifdef DIAGNOSTIC
1595 if (action == SIG_IGN ||
1596 sigismember(&p->p_sigctx.ps_sigmask, signum))
1597 panic("postsig action");
1598 #endif
1599 /*
1600 * Set the new mask value and also defer further
1601 * occurrences of this signal.
1602 *
1603 * Special case: user has done a sigpause. Here the
1604 * current mask is not of interest, but rather the
1605 * mask from before the sigpause is what we want
1606 * restored after the signal processing is completed.
1607 */
1608 if (p->p_sigctx.ps_flags & SAS_OLDMASK) {
1609 returnmask = &p->p_sigctx.ps_oldmask;
1610 p->p_sigctx.ps_flags &= ~SAS_OLDMASK;
1611 } else
1612 returnmask = &p->p_sigctx.ps_sigmask;
1613 p->p_stats->p_ru.ru_nsignals++;
1614 if (p->p_sigctx.ps_siginfo.ksi_signo != signum) {
1615 memset(&ksi, 0, sizeof(ksi));
1616 ksi.ksi_signo = signum;
1617 } else {
1618 ksi = p->p_sigctx.ps_siginfo;
1619 memset(&p->p_sigctx.ps_siginfo, 0,
1620 sizeof(p->p_sigctx.ps_siginfo));
1621 p->p_sigctx.ps_lwp = 0;
1622 }
1623 kpsendsig(l, &ksi, returnmask);
1624 (void) splsched(); /* XXXSMP */
1625 sigplusset(&SIGACTION_PS(ps, signum).sa_mask,
1626 &p->p_sigctx.ps_sigmask);
1627 if (SIGACTION_PS(ps, signum).sa_flags & SA_RESETHAND) {
1628 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
1629 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
1630 sigaddset(&p->p_sigctx.ps_sigignore, signum);
1631 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
1632 }
1633 (void) spl0(); /* XXXSMP */
1634 }
1635
1636 KERNEL_PROC_UNLOCK(l);
1637 }
1638
1639 /*
1640 * Kill the current process for stated reason.
1641 */
1642 void
1643 killproc(struct proc *p, const char *why)
1644 {
1645 log(LOG_ERR, "pid %d was killed: %s\n", p->p_pid, why);
1646 uprintf("sorry, pid %d was killed: %s\n", p->p_pid, why);
1647 psignal(p, SIGKILL);
1648 }
1649
1650 /*
1651 * Force the current process to exit with the specified signal, dumping core
1652 * if appropriate. We bypass the normal tests for masked and caught signals,
1653 * allowing unrecoverable failures to terminate the process without changing
1654 * signal state. Mark the accounting record with the signal termination.
1655 * If dumping core, save the signal number for the debugger. Calls exit and
1656 * does not return.
1657 */
1658
1659 #if defined(DEBUG)
1660 int kern_logsigexit = 1; /* not static to make public for sysctl */
1661 #else
1662 int kern_logsigexit = 0; /* not static to make public for sysctl */
1663 #endif
1664
1665 static const char logcoredump[] =
1666 "pid %d (%s), uid %d: exited on signal %d (core dumped)\n";
1667 static const char lognocoredump[] =
1668 "pid %d (%s), uid %d: exited on signal %d (core not dumped, err = %d)\n";
1669
1670 /* Wrapper function for use in p_userret */
1671 static void
1672 lwp_coredump_hook(struct lwp *l, void *arg)
1673 {
1674 int s;
1675
1676 /*
1677 * Suspend ourselves, so that the kernel stack and therefore
1678 * the userland registers saved in the trapframe are around
1679 * for coredump() to write them out.
1680 */
1681 KERNEL_PROC_LOCK(l);
1682 l->l_flag &= ~L_DETACHED;
1683 SCHED_LOCK(s);
1684 l->l_stat = LSSUSPENDED;
1685 l->l_proc->p_nrlwps--;
1686 /* XXX NJWLWP check if this makes sense here: */
1687 l->l_proc->p_stats->p_ru.ru_nvcsw++;
1688 mi_switch(l, NULL);
1689 SCHED_ASSERT_UNLOCKED();
1690 splx(s);
1691
1692 lwp_exit(l);
1693 }
1694
1695 void
1696 sigexit(struct lwp *l, int signum)
1697 {
1698 struct proc *p;
1699 #if 0
1700 struct lwp *l2;
1701 #endif
1702 int error, exitsig;
1703
1704 p = l->l_proc;
1705
1706 /*
1707 * Don't permit coredump() or exit1() multiple times
1708 * in the same process.
1709 */
1710 if (p->p_flag & P_WEXIT) {
1711 KERNEL_PROC_UNLOCK(l);
1712 (*p->p_userret)(l, p->p_userret_arg);
1713 }
1714 p->p_flag |= P_WEXIT;
1715 /* We don't want to switch away from exiting. */
1716 /* XXX multiprocessor: stop LWPs on other processors. */
1717 #if 0
1718 if (p->p_flag & P_SA) {
1719 LIST_FOREACH(l2, &p->p_lwps, l_sibling)
1720 l2->l_flag &= ~L_SA;
1721 p->p_flag &= ~P_SA;
1722 }
1723 #endif
1724
1725 /* Make other LWPs stick around long enough to be dumped */
1726 p->p_userret = lwp_coredump_hook;
1727 p->p_userret_arg = NULL;
1728
1729 exitsig = signum;
1730 p->p_acflag |= AXSIG;
1731 if (sigprop[signum] & SA_CORE) {
1732 p->p_sigctx.ps_siginfo.ksi_signo = signum;
1733 if ((error = coredump(l)) == 0)
1734 exitsig |= WCOREFLAG;
1735
1736 if (kern_logsigexit) {
1737 /* XXX What if we ever have really large UIDs? */
1738 int uid = p->p_cred && p->p_ucred ?
1739 (int) p->p_ucred->cr_uid : -1;
1740
1741 if (error)
1742 log(LOG_INFO, lognocoredump, p->p_pid,
1743 p->p_comm, uid, signum, error);
1744 else
1745 log(LOG_INFO, logcoredump, p->p_pid,
1746 p->p_comm, uid, signum);
1747 }
1748
1749 }
1750
1751 exit1(l, W_EXITCODE(0, exitsig));
1752 /* NOTREACHED */
1753 }
1754
1755 /*
1756 * Dump core, into a file named "progname.core" or "core" (depending on the
1757 * value of shortcorename), unless the process was setuid/setgid.
1758 */
1759 int
1760 coredump(struct lwp *l)
1761 {
1762 struct vnode *vp;
1763 struct proc *p;
1764 struct vmspace *vm;
1765 struct ucred *cred;
1766 struct nameidata nd;
1767 struct vattr vattr;
1768 int error, error1;
1769 char name[MAXPATHLEN];
1770
1771 p = l->l_proc;
1772 vm = p->p_vmspace;
1773 cred = p->p_cred->pc_ucred;
1774
1775 /*
1776 * Make sure the process has not set-id, to prevent data leaks.
1777 */
1778 if (p->p_flag & P_SUGID)
1779 return (EPERM);
1780
1781 /*
1782 * Refuse to core if the data + stack + user size is larger than
1783 * the core dump limit. XXX THIS IS WRONG, because of mapped
1784 * data.
1785 */
1786 if (USPACE + ctob(vm->vm_dsize + vm->vm_ssize) >=
1787 p->p_rlimit[RLIMIT_CORE].rlim_cur)
1788 return (EFBIG); /* better error code? */
1789
1790 /*
1791 * The core dump will go in the current working directory. Make
1792 * sure that the directory is still there and that the mount flags
1793 * allow us to write core dumps there.
1794 */
1795 vp = p->p_cwdi->cwdi_cdir;
1796 if (vp->v_mount == NULL ||
1797 (vp->v_mount->mnt_flag & MNT_NOCOREDUMP) != 0)
1798 return (EPERM);
1799
1800 error = build_corename(p, name);
1801 if (error)
1802 return error;
1803
1804 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, p);
1805 error = vn_open(&nd, O_CREAT | O_NOFOLLOW | FWRITE, S_IRUSR | S_IWUSR);
1806 if (error)
1807 return (error);
1808 vp = nd.ni_vp;
1809
1810 /* Don't dump to non-regular files or files with links. */
1811 if (vp->v_type != VREG ||
1812 VOP_GETATTR(vp, &vattr, cred, p) || vattr.va_nlink != 1) {
1813 error = EINVAL;
1814 goto out;
1815 }
1816 VATTR_NULL(&vattr);
1817 vattr.va_size = 0;
1818 VOP_LEASE(vp, p, cred, LEASE_WRITE);
1819 VOP_SETATTR(vp, &vattr, cred, p);
1820 p->p_acflag |= ACORE;
1821
1822 /* Now dump the actual core file. */
1823 error = (*p->p_execsw->es_coredump)(l, vp, cred);
1824 out:
1825 VOP_UNLOCK(vp, 0);
1826 error1 = vn_close(vp, FWRITE, cred, p);
1827 if (error == 0)
1828 error = error1;
1829 return (error);
1830 }
1831
1832 /*
1833 * Nonexistent system call-- signal process (may want to handle it).
1834 * Flag error in case process won't see signal immediately (blocked or ignored).
1835 */
1836 /* ARGSUSED */
1837 int
1838 sys_nosys(struct lwp *l, void *v, register_t *retval)
1839 {
1840 struct proc *p;
1841
1842 p = l->l_proc;
1843 psignal(p, SIGSYS);
1844 return (ENOSYS);
1845 }
1846
1847 static int
1848 build_corename(struct proc *p, char dst[MAXPATHLEN])
1849 {
1850 const char *s;
1851 char *d, *end;
1852 int i;
1853
1854 for (s = p->p_limit->pl_corename, d = dst, end = d + MAXPATHLEN;
1855 *s != '\0'; s++) {
1856 if (*s == '%') {
1857 switch (*(s + 1)) {
1858 case 'n':
1859 i = snprintf(d, end - d, "%s", p->p_comm);
1860 break;
1861 case 'p':
1862 i = snprintf(d, end - d, "%d", p->p_pid);
1863 break;
1864 case 'u':
1865 i = snprintf(d, end - d, "%.*s",
1866 (int)sizeof p->p_pgrp->pg_session->s_login,
1867 p->p_pgrp->pg_session->s_login);
1868 break;
1869 case 't':
1870 i = snprintf(d, end - d, "%ld",
1871 p->p_stats->p_start.tv_sec);
1872 break;
1873 default:
1874 goto copy;
1875 }
1876 d += i;
1877 s++;
1878 } else {
1879 copy: *d = *s;
1880 d++;
1881 }
1882 if (d >= end)
1883 return (ENAMETOOLONG);
1884 }
1885 *d = '\0';
1886 return 0;
1887 }
1888
1889 void
1890 getucontext(struct lwp *l, ucontext_t *ucp)
1891 {
1892 struct proc *p;
1893
1894 p = l->l_proc;
1895
1896 ucp->uc_flags = 0;
1897 ucp->uc_link = l->l_ctxlink;
1898
1899 (void)sigprocmask1(p, 0, NULL, &ucp->uc_sigmask);
1900 ucp->uc_flags |= _UC_SIGMASK;
1901
1902 /*
1903 * The (unsupplied) definition of the `current execution stack'
1904 * in the System V Interface Definition appears to allow returning
1905 * the main context stack.
1906 */
1907 if ((p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK) == 0) {
1908 ucp->uc_stack.ss_sp = (void *)USRSTACK;
1909 ucp->uc_stack.ss_size = ctob(p->p_vmspace->vm_ssize);
1910 ucp->uc_stack.ss_flags = 0; /* XXX, def. is Very Fishy */
1911 } else {
1912 /* Simply copy alternate signal execution stack. */
1913 ucp->uc_stack = p->p_sigctx.ps_sigstk;
1914 }
1915 ucp->uc_flags |= _UC_STACK;
1916
1917 cpu_getmcontext(l, &ucp->uc_mcontext, &ucp->uc_flags);
1918 }
1919
1920 /* ARGSUSED */
1921 int
1922 sys_getcontext(struct lwp *l, void *v, register_t *retval)
1923 {
1924 struct sys_getcontext_args /* {
1925 syscallarg(struct __ucontext *) ucp;
1926 } */ *uap = v;
1927 ucontext_t uc;
1928
1929 getucontext(l, &uc);
1930
1931 return (copyout(&uc, SCARG(uap, ucp), sizeof (*SCARG(uap, ucp))));
1932 }
1933
1934 int
1935 setucontext(struct lwp *l, const ucontext_t *ucp)
1936 {
1937 struct proc *p;
1938 int error;
1939
1940 p = l->l_proc;
1941 if ((error = cpu_setmcontext(l, &ucp->uc_mcontext, ucp->uc_flags)) != 0)
1942 return (error);
1943 l->l_ctxlink = ucp->uc_link;
1944 /*
1945 * We might want to take care of the stack portion here but currently
1946 * don't; see the comment in getucontext().
1947 */
1948 if ((ucp->uc_flags & _UC_SIGMASK) != 0)
1949 sigprocmask1(p, SIG_SETMASK, &ucp->uc_sigmask, NULL);
1950
1951 return 0;
1952 }
1953
1954 /* ARGSUSED */
1955 int
1956 sys_setcontext(struct lwp *l, void *v, register_t *retval)
1957 {
1958 struct sys_setcontext_args /* {
1959 syscallarg(const ucontext_t *) ucp;
1960 } */ *uap = v;
1961 ucontext_t uc;
1962 int error;
1963
1964 if (SCARG(uap, ucp) == NULL) /* i.e. end of uc_link chain */
1965 exit1(l, W_EXITCODE(0, 0));
1966 else if ((error = copyin(SCARG(uap, ucp), &uc, sizeof (uc))) != 0 ||
1967 (error = setucontext(l, &uc)) != 0)
1968 return (error);
1969
1970 return (EJUSTRETURN);
1971 }
1972
1973 /*
1974 * sigtimedwait(2) system call, used also for implementation
1975 * of sigwaitinfo() and sigwait().
1976 *
1977 * This only handles single LWP in signal wait. libpthread provides
1978 * it's own sigtimedwait() wrapper to DTRT WRT individual threads.
1979 *
1980 * XXX no support for queued signals, si_code is always SI_USER.
1981 */
1982 int
1983 sys___sigtimedwait(struct lwp *l, void *v, register_t *retval)
1984 {
1985 struct sys___sigtimedwait_args /* {
1986 syscallarg(const sigset_t *) set;
1987 syscallarg(siginfo_t *) info;
1988 syscallarg(struct timespec *) timeout;
1989 } */ *uap = v;
1990 sigset_t waitset, twaitset;
1991 struct proc *p = l->l_proc;
1992 int error, signum, s;
1993 int timo = 0;
1994 struct timeval tvstart;
1995 struct timespec ts;
1996
1997 if ((error = copyin(SCARG(uap, set), &waitset, sizeof(waitset))))
1998 return (error);
1999
2000 /*
2001 * Silently ignore SA_CANTMASK signals. psignal1() would
2002 * ignore SA_CANTMASK signals in waitset, we do this
2003 * only for the below siglist check.
2004 */
2005 sigminusset(&sigcantmask, &waitset);
2006
2007 /*
2008 * First scan siglist and check if there is signal from
2009 * our waitset already pending.
2010 */
2011 twaitset = waitset;
2012 __sigandset(&p->p_sigctx.ps_siglist, &twaitset);
2013 if ((signum = firstsig(&twaitset))) {
2014 /* found pending signal */
2015 sigdelset(&p->p_sigctx.ps_siglist, signum);
2016 goto sig;
2017 }
2018
2019 /*
2020 * Calculate timeout, if it was specified.
2021 */
2022 if (SCARG(uap, timeout)) {
2023 uint64_t ms;
2024
2025 if ((error = copyin(SCARG(uap, timeout), &ts, sizeof(ts))))
2026 return (error);
2027
2028 ms = (ts.tv_sec * 1000) + (ts.tv_nsec / 1000000);
2029 timo = mstohz(ms);
2030 if (timo == 0 && ts.tv_sec == 0 && ts.tv_nsec > 0)
2031 timo = 1;
2032 if (timo <= 0)
2033 return (EAGAIN);
2034
2035 /*
2036 * Remember current mono_time, it would be used in
2037 * ECANCELED/ERESTART case.
2038 */
2039 s = splclock();
2040 tvstart = mono_time;
2041 splx(s);
2042 }
2043
2044 /*
2045 * Setup ps_sigwait list.
2046 */
2047 p->p_sigctx.ps_sigwaited = -1;
2048 p->p_sigctx.ps_sigwait = waitset;
2049
2050 /*
2051 * Wait for signal to arrive. We can either be woken up or
2052 * time out.
2053 */
2054 error = tsleep(&p->p_sigctx.ps_sigwait, PPAUSE|PCATCH, "sigwait", timo);
2055
2056 /*
2057 * Check if a signal from our wait set has arrived, or if it
2058 * was mere wakeup.
2059 */
2060 if (!error) {
2061 if ((signum = p->p_sigctx.ps_sigwaited) <= 0) {
2062 /* wakeup via _lwp_wakeup() */
2063 error = ECANCELED;
2064 }
2065 }
2066
2067 /*
2068 * On error, clear sigwait indication. psignal1() sets it
2069 * in !error case.
2070 */
2071 if (error) {
2072 p->p_sigctx.ps_sigwaited = 0;
2073
2074 /*
2075 * If the sleep was interrupted (either by signal or wakeup),
2076 * update the timeout and copyout new value back.
2077 * It would be used when the syscall would be restarted
2078 * or called again.
2079 */
2080 if (timo && (error == ERESTART || error == ECANCELED)) {
2081 struct timeval tvnow, tvtimo;
2082 int err;
2083
2084 s = splclock();
2085 tvnow = mono_time;
2086 splx(s);
2087
2088 TIMESPEC_TO_TIMEVAL(&tvtimo, &ts);
2089
2090 /* compute how much time has passed since start */
2091 timersub(&tvnow, &tvstart, &tvnow);
2092 /* substract passed time from timeout */
2093 timersub(&tvtimo, &tvnow, &tvtimo);
2094
2095 if (tvtimo.tv_sec < 0)
2096 return (EAGAIN);
2097
2098 TIMEVAL_TO_TIMESPEC(&tvtimo, &ts);
2099
2100 /* copy updated timeout to userland */
2101 if ((err = copyout(&ts, SCARG(uap, timeout), sizeof(ts))))
2102 return (err);
2103 }
2104
2105 return (error);
2106 }
2107
2108 /*
2109 * If a signal from the wait set arrived, copy it to userland.
2110 * XXX no queued signals for now
2111 */
2112 if (signum > 0) {
2113 siginfo_t si;
2114
2115 sig:
2116 memset(&si, 0, sizeof(si));
2117 si.si_signo = signum;
2118 si.si_code = SI_USER;
2119
2120 error = copyout(&si, SCARG(uap, info), sizeof(si));
2121 if (error)
2122 return (error);
2123 }
2124
2125 return (0);
2126 }
2127
2128 /*
2129 * Returns true if signal is ignored or masked for passed process.
2130 */
2131 int
2132 sigismasked(struct proc *p, int sig)
2133 {
2134
2135 return (sigismember(&p->p_sigctx.ps_sigignore, sig) ||
2136 sigismember(&p->p_sigctx.ps_sigmask, sig));
2137 }
2138
2139 static int
2140 filt_sigattach(struct knote *kn)
2141 {
2142 struct proc *p = curproc;
2143
2144 kn->kn_ptr.p_proc = p;
2145 kn->kn_flags |= EV_CLEAR; /* automatically set */
2146
2147 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
2148
2149 return (0);
2150 }
2151
2152 static void
2153 filt_sigdetach(struct knote *kn)
2154 {
2155 struct proc *p = kn->kn_ptr.p_proc;
2156
2157 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
2158 }
2159
2160 /*
2161 * signal knotes are shared with proc knotes, so we apply a mask to
2162 * the hint in order to differentiate them from process hints. This
2163 * could be avoided by using a signal-specific knote list, but probably
2164 * isn't worth the trouble.
2165 */
2166 static int
2167 filt_signal(struct knote *kn, long hint)
2168 {
2169
2170 if (hint & NOTE_SIGNAL) {
2171 hint &= ~NOTE_SIGNAL;
2172
2173 if (kn->kn_id == hint)
2174 kn->kn_data++;
2175 }
2176 return (kn->kn_data != 0);
2177 }
2178
2179 const struct filterops sig_filtops = {
2180 0, filt_sigattach, filt_sigdetach, filt_signal
2181 };
2182