kern_sig.c revision 1.146 1 /* $NetBSD: kern_sig.c,v 1.146 2003/08/07 16:31:48 agc Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)kern_sig.c 8.14 (Berkeley) 5/14/95
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.146 2003/08/07 16:31:48 agc Exp $");
41
42 #include "opt_ktrace.h"
43 #include "opt_compat_sunos.h"
44 #include "opt_compat_netbsd32.h"
45
46 #define SIGPROP /* include signal properties table */
47 #include <sys/param.h>
48 #include <sys/signalvar.h>
49 #include <sys/resourcevar.h>
50 #include <sys/namei.h>
51 #include <sys/vnode.h>
52 #include <sys/proc.h>
53 #include <sys/systm.h>
54 #include <sys/timeb.h>
55 #include <sys/times.h>
56 #include <sys/buf.h>
57 #include <sys/acct.h>
58 #include <sys/file.h>
59 #include <sys/kernel.h>
60 #include <sys/wait.h>
61 #include <sys/ktrace.h>
62 #include <sys/syslog.h>
63 #include <sys/stat.h>
64 #include <sys/core.h>
65 #include <sys/filedesc.h>
66 #include <sys/malloc.h>
67 #include <sys/pool.h>
68 #include <sys/ucontext.h>
69 #include <sys/sa.h>
70 #include <sys/savar.h>
71 #include <sys/exec.h>
72
73 #include <sys/mount.h>
74 #include <sys/syscallargs.h>
75
76 #include <machine/cpu.h>
77
78 #include <sys/user.h> /* for coredump */
79
80 #include <uvm/uvm_extern.h>
81
82 static void proc_stop(struct proc *p);
83 static int build_corename(struct proc *, char [MAXPATHLEN]);
84 sigset_t contsigmask, stopsigmask, sigcantmask;
85
86 struct pool sigacts_pool; /* memory pool for sigacts structures */
87 struct pool siginfo_pool; /* memory pool for siginfo structures */
88
89 /*
90 * Can process p, with pcred pc, send the signal signum to process q?
91 */
92 #define CANSIGNAL(p, pc, q, signum) \
93 ((pc)->pc_ucred->cr_uid == 0 || \
94 (pc)->p_ruid == (q)->p_cred->p_ruid || \
95 (pc)->pc_ucred->cr_uid == (q)->p_cred->p_ruid || \
96 (pc)->p_ruid == (q)->p_ucred->cr_uid || \
97 (pc)->pc_ucred->cr_uid == (q)->p_ucred->cr_uid || \
98 ((signum) == SIGCONT && (q)->p_session == (p)->p_session))
99
100 /*
101 * Initialize signal-related data structures.
102 */
103 void
104 signal_init(void)
105 {
106
107 pool_init(&sigacts_pool, sizeof(struct sigacts), 0, 0, 0, "sigapl",
108 &pool_allocator_nointr);
109 pool_init(&siginfo_pool, sizeof(siginfo_t), 0, 0, 0, "siginfo",
110 &pool_allocator_nointr);
111 }
112
113 /*
114 * Create an initial sigctx structure, using the same signal state
115 * as p. If 'share' is set, share the sigctx_proc part, otherwise just
116 * copy it from parent.
117 */
118 void
119 sigactsinit(struct proc *np, struct proc *pp, int share)
120 {
121 struct sigacts *ps;
122
123 if (share) {
124 np->p_sigacts = pp->p_sigacts;
125 pp->p_sigacts->sa_refcnt++;
126 } else {
127 ps = pool_get(&sigacts_pool, PR_WAITOK);
128 if (pp)
129 memcpy(ps, pp->p_sigacts, sizeof(struct sigacts));
130 else
131 memset(ps, '\0', sizeof(struct sigacts));
132 ps->sa_refcnt = 1;
133 np->p_sigacts = ps;
134 }
135 }
136
137 /*
138 * Make this process not share its sigctx, maintaining all
139 * signal state.
140 */
141 void
142 sigactsunshare(struct proc *p)
143 {
144 struct sigacts *oldps;
145
146 if (p->p_sigacts->sa_refcnt == 1)
147 return;
148
149 oldps = p->p_sigacts;
150 sigactsinit(p, NULL, 0);
151
152 if (--oldps->sa_refcnt == 0)
153 pool_put(&sigacts_pool, oldps);
154 }
155
156 /*
157 * Release a sigctx structure.
158 */
159 void
160 sigactsfree(struct proc *p)
161 {
162 struct sigacts *ps;
163
164 ps = p->p_sigacts;
165 if (--ps->sa_refcnt > 0)
166 return;
167
168 pool_put(&sigacts_pool, ps);
169 }
170
171 int
172 sigaction1(struct proc *p, int signum, const struct sigaction *nsa,
173 struct sigaction *osa, void *tramp, int vers)
174 {
175 struct sigacts *ps;
176 int prop;
177
178 ps = p->p_sigacts;
179 if (signum <= 0 || signum >= NSIG)
180 return (EINVAL);
181
182 /*
183 * Trampoline ABI version 0 is reserved for the legacy
184 * kernel-provided on-stack trampoline. Conversely, if
185 * we are using a non-0 ABI version, we must have a
186 * trampoline.
187 */
188 if ((vers != 0 && tramp == NULL) ||
189 (vers == 0 && tramp != NULL))
190 return (EINVAL);
191
192 if (osa)
193 *osa = SIGACTION_PS(ps, signum);
194
195 if (nsa) {
196 if (nsa->sa_flags & ~SA_ALLBITS)
197 return (EINVAL);
198
199 prop = sigprop[signum];
200 if (prop & SA_CANTMASK)
201 return (EINVAL);
202
203 (void) splsched(); /* XXXSMP */
204 SIGACTION_PS(ps, signum) = *nsa;
205 ps->sa_sigdesc[signum].sd_tramp = tramp;
206 ps->sa_sigdesc[signum].sd_vers = vers;
207 sigminusset(&sigcantmask, &SIGACTION_PS(ps, signum).sa_mask);
208 if ((prop & SA_NORESET) != 0)
209 SIGACTION_PS(ps, signum).sa_flags &= ~SA_RESETHAND;
210 if (signum == SIGCHLD) {
211 if (nsa->sa_flags & SA_NOCLDSTOP)
212 p->p_flag |= P_NOCLDSTOP;
213 else
214 p->p_flag &= ~P_NOCLDSTOP;
215 if (nsa->sa_flags & SA_NOCLDWAIT) {
216 /*
217 * Paranoia: since SA_NOCLDWAIT is implemented
218 * by reparenting the dying child to PID 1 (and
219 * trust it to reap the zombie), PID 1 itself
220 * is forbidden to set SA_NOCLDWAIT.
221 */
222 if (p->p_pid == 1)
223 p->p_flag &= ~P_NOCLDWAIT;
224 else
225 p->p_flag |= P_NOCLDWAIT;
226 } else
227 p->p_flag &= ~P_NOCLDWAIT;
228 }
229 if ((nsa->sa_flags & SA_NODEFER) == 0)
230 sigaddset(&SIGACTION_PS(ps, signum).sa_mask, signum);
231 else
232 sigdelset(&SIGACTION_PS(ps, signum).sa_mask, signum);
233 /*
234 * Set bit in p_sigctx.ps_sigignore for signals that are set to
235 * SIG_IGN, and for signals set to SIG_DFL where the default is
236 * to ignore. However, don't put SIGCONT in
237 * p_sigctx.ps_sigignore, as we have to restart the process.
238 */
239 if (nsa->sa_handler == SIG_IGN ||
240 (nsa->sa_handler == SIG_DFL && (prop & SA_IGNORE) != 0)) {
241 /* never to be seen again */
242 sigdelset(&p->p_sigctx.ps_siglist, signum);
243 if (signum != SIGCONT) {
244 /* easier in psignal */
245 sigaddset(&p->p_sigctx.ps_sigignore, signum);
246 }
247 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
248 } else {
249 sigdelset(&p->p_sigctx.ps_sigignore, signum);
250 if (nsa->sa_handler == SIG_DFL)
251 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
252 else
253 sigaddset(&p->p_sigctx.ps_sigcatch, signum);
254 }
255 (void) spl0();
256 }
257
258 return (0);
259 }
260
261 /* ARGSUSED */
262 int
263 sys___sigaction14(struct lwp *l, void *v, register_t *retval)
264 {
265 struct sys___sigaction14_args /* {
266 syscallarg(int) signum;
267 syscallarg(const struct sigaction *) nsa;
268 syscallarg(struct sigaction *) osa;
269 } */ *uap = v;
270 struct proc *p;
271 struct sigaction nsa, osa;
272 int error;
273
274 if (SCARG(uap, nsa)) {
275 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
276 if (error)
277 return (error);
278 }
279 p = l->l_proc;
280 error = sigaction1(p, SCARG(uap, signum),
281 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0,
282 NULL, 0);
283 if (error)
284 return (error);
285 if (SCARG(uap, osa)) {
286 error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
287 if (error)
288 return (error);
289 }
290 return (0);
291 }
292
293 /* ARGSUSED */
294 int
295 sys___sigaction_sigtramp(struct lwp *l, void *v, register_t *retval)
296 {
297 struct sys___sigaction_sigtramp_args /* {
298 syscallarg(int) signum;
299 syscallarg(const struct sigaction *) nsa;
300 syscallarg(struct sigaction *) osa;
301 syscallarg(void *) tramp;
302 syscallarg(int) vers;
303 } */ *uap = v;
304 struct proc *p = l->l_proc;
305 struct sigaction nsa, osa;
306 int error;
307
308 if (SCARG(uap, nsa)) {
309 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
310 if (error)
311 return (error);
312 }
313 error = sigaction1(p, SCARG(uap, signum),
314 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0,
315 SCARG(uap, tramp), SCARG(uap, vers));
316 if (error)
317 return (error);
318 if (SCARG(uap, osa)) {
319 error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
320 if (error)
321 return (error);
322 }
323 return (0);
324 }
325
326 /*
327 * Initialize signal state for process 0;
328 * set to ignore signals that are ignored by default and disable the signal
329 * stack.
330 */
331 void
332 siginit(struct proc *p)
333 {
334 struct sigacts *ps;
335 int signum, prop;
336
337 ps = p->p_sigacts;
338 sigemptyset(&contsigmask);
339 sigemptyset(&stopsigmask);
340 sigemptyset(&sigcantmask);
341 for (signum = 1; signum < NSIG; signum++) {
342 prop = sigprop[signum];
343 if (prop & SA_CONT)
344 sigaddset(&contsigmask, signum);
345 if (prop & SA_STOP)
346 sigaddset(&stopsigmask, signum);
347 if (prop & SA_CANTMASK)
348 sigaddset(&sigcantmask, signum);
349 if (prop & SA_IGNORE && signum != SIGCONT)
350 sigaddset(&p->p_sigctx.ps_sigignore, signum);
351 sigemptyset(&SIGACTION_PS(ps, signum).sa_mask);
352 SIGACTION_PS(ps, signum).sa_flags = SA_RESTART;
353 }
354 sigemptyset(&p->p_sigctx.ps_sigcatch);
355 p->p_sigctx.ps_sigwaited = 0;
356 p->p_flag &= ~P_NOCLDSTOP;
357
358 /*
359 * Reset stack state to the user stack.
360 */
361 p->p_sigctx.ps_sigstk.ss_flags = SS_DISABLE;
362 p->p_sigctx.ps_sigstk.ss_size = 0;
363 p->p_sigctx.ps_sigstk.ss_sp = 0;
364
365 /* One reference. */
366 ps->sa_refcnt = 1;
367 }
368
369 /*
370 * Reset signals for an exec of the specified process.
371 */
372 void
373 execsigs(struct proc *p)
374 {
375 struct sigacts *ps;
376 int signum, prop;
377
378 sigactsunshare(p);
379
380 ps = p->p_sigacts;
381
382 /*
383 * Reset caught signals. Held signals remain held
384 * through p_sigctx.ps_sigmask (unless they were caught,
385 * and are now ignored by default).
386 */
387 for (signum = 1; signum < NSIG; signum++) {
388 if (sigismember(&p->p_sigctx.ps_sigcatch, signum)) {
389 prop = sigprop[signum];
390 if (prop & SA_IGNORE) {
391 if ((prop & SA_CONT) == 0)
392 sigaddset(&p->p_sigctx.ps_sigignore,
393 signum);
394 sigdelset(&p->p_sigctx.ps_siglist, signum);
395 }
396 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
397 }
398 sigemptyset(&SIGACTION_PS(ps, signum).sa_mask);
399 SIGACTION_PS(ps, signum).sa_flags = SA_RESTART;
400 }
401 sigemptyset(&p->p_sigctx.ps_sigcatch);
402 p->p_sigctx.ps_sigwaited = 0;
403 p->p_flag &= ~P_NOCLDSTOP;
404
405 /*
406 * Reset stack state to the user stack.
407 */
408 p->p_sigctx.ps_sigstk.ss_flags = SS_DISABLE;
409 p->p_sigctx.ps_sigstk.ss_size = 0;
410 p->p_sigctx.ps_sigstk.ss_sp = 0;
411 }
412
413 int
414 sigprocmask1(struct proc *p, int how, const sigset_t *nss, sigset_t *oss)
415 {
416
417 if (oss)
418 *oss = p->p_sigctx.ps_sigmask;
419
420 if (nss) {
421 (void)splsched(); /* XXXSMP */
422 switch (how) {
423 case SIG_BLOCK:
424 sigplusset(nss, &p->p_sigctx.ps_sigmask);
425 break;
426 case SIG_UNBLOCK:
427 sigminusset(nss, &p->p_sigctx.ps_sigmask);
428 CHECKSIGS(p);
429 break;
430 case SIG_SETMASK:
431 p->p_sigctx.ps_sigmask = *nss;
432 CHECKSIGS(p);
433 break;
434 default:
435 (void)spl0(); /* XXXSMP */
436 return (EINVAL);
437 }
438 sigminusset(&sigcantmask, &p->p_sigctx.ps_sigmask);
439 (void)spl0(); /* XXXSMP */
440 }
441
442 return (0);
443 }
444
445 /*
446 * Manipulate signal mask.
447 * Note that we receive new mask, not pointer,
448 * and return old mask as return value;
449 * the library stub does the rest.
450 */
451 int
452 sys___sigprocmask14(struct lwp *l, void *v, register_t *retval)
453 {
454 struct sys___sigprocmask14_args /* {
455 syscallarg(int) how;
456 syscallarg(const sigset_t *) set;
457 syscallarg(sigset_t *) oset;
458 } */ *uap = v;
459 struct proc *p;
460 sigset_t nss, oss;
461 int error;
462
463 if (SCARG(uap, set)) {
464 error = copyin(SCARG(uap, set), &nss, sizeof(nss));
465 if (error)
466 return (error);
467 }
468 p = l->l_proc;
469 error = sigprocmask1(p, SCARG(uap, how),
470 SCARG(uap, set) ? &nss : 0, SCARG(uap, oset) ? &oss : 0);
471 if (error)
472 return (error);
473 if (SCARG(uap, oset)) {
474 error = copyout(&oss, SCARG(uap, oset), sizeof(oss));
475 if (error)
476 return (error);
477 }
478 return (0);
479 }
480
481 void
482 sigpending1(struct proc *p, sigset_t *ss)
483 {
484
485 *ss = p->p_sigctx.ps_siglist;
486 sigminusset(&p->p_sigctx.ps_sigmask, ss);
487 }
488
489 /* ARGSUSED */
490 int
491 sys___sigpending14(struct lwp *l, void *v, register_t *retval)
492 {
493 struct sys___sigpending14_args /* {
494 syscallarg(sigset_t *) set;
495 } */ *uap = v;
496 struct proc *p;
497 sigset_t ss;
498
499 p = l->l_proc;
500 sigpending1(p, &ss);
501 return (copyout(&ss, SCARG(uap, set), sizeof(ss)));
502 }
503
504 int
505 sigsuspend1(struct proc *p, const sigset_t *ss)
506 {
507 struct sigacts *ps;
508
509 ps = p->p_sigacts;
510 if (ss) {
511 /*
512 * When returning from sigpause, we want
513 * the old mask to be restored after the
514 * signal handler has finished. Thus, we
515 * save it here and mark the sigctx structure
516 * to indicate this.
517 */
518 p->p_sigctx.ps_oldmask = p->p_sigctx.ps_sigmask;
519 p->p_sigctx.ps_flags |= SAS_OLDMASK;
520 (void) splsched(); /* XXXSMP */
521 p->p_sigctx.ps_sigmask = *ss;
522 CHECKSIGS(p);
523 sigminusset(&sigcantmask, &p->p_sigctx.ps_sigmask);
524 (void) spl0(); /* XXXSMP */
525 }
526
527 while (tsleep((caddr_t) ps, PPAUSE|PCATCH, "pause", 0) == 0)
528 /* void */;
529
530 /* always return EINTR rather than ERESTART... */
531 return (EINTR);
532 }
533
534 /*
535 * Suspend process until signal, providing mask to be set
536 * in the meantime. Note nonstandard calling convention:
537 * libc stub passes mask, not pointer, to save a copyin.
538 */
539 /* ARGSUSED */
540 int
541 sys___sigsuspend14(struct lwp *l, void *v, register_t *retval)
542 {
543 struct sys___sigsuspend14_args /* {
544 syscallarg(const sigset_t *) set;
545 } */ *uap = v;
546 struct proc *p;
547 sigset_t ss;
548 int error;
549
550 if (SCARG(uap, set)) {
551 error = copyin(SCARG(uap, set), &ss, sizeof(ss));
552 if (error)
553 return (error);
554 }
555
556 p = l->l_proc;
557 return (sigsuspend1(p, SCARG(uap, set) ? &ss : 0));
558 }
559
560 int
561 sigaltstack1(struct proc *p, const struct sigaltstack *nss,
562 struct sigaltstack *oss)
563 {
564
565 if (oss)
566 *oss = p->p_sigctx.ps_sigstk;
567
568 if (nss) {
569 if (nss->ss_flags & ~SS_ALLBITS)
570 return (EINVAL);
571
572 if (nss->ss_flags & SS_DISABLE) {
573 if (p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK)
574 return (EINVAL);
575 } else {
576 if (nss->ss_size < MINSIGSTKSZ)
577 return (ENOMEM);
578 }
579 p->p_sigctx.ps_sigstk = *nss;
580 }
581
582 return (0);
583 }
584
585 /* ARGSUSED */
586 int
587 sys___sigaltstack14(struct lwp *l, void *v, register_t *retval)
588 {
589 struct sys___sigaltstack14_args /* {
590 syscallarg(const struct sigaltstack *) nss;
591 syscallarg(struct sigaltstack *) oss;
592 } */ *uap = v;
593 struct proc *p;
594 struct sigaltstack nss, oss;
595 int error;
596
597 if (SCARG(uap, nss)) {
598 error = copyin(SCARG(uap, nss), &nss, sizeof(nss));
599 if (error)
600 return (error);
601 }
602 p = l->l_proc;
603 error = sigaltstack1(p,
604 SCARG(uap, nss) ? &nss : 0, SCARG(uap, oss) ? &oss : 0);
605 if (error)
606 return (error);
607 if (SCARG(uap, oss)) {
608 error = copyout(&oss, SCARG(uap, oss), sizeof(oss));
609 if (error)
610 return (error);
611 }
612 return (0);
613 }
614
615 /* ARGSUSED */
616 int
617 sys_kill(struct lwp *l, void *v, register_t *retval)
618 {
619 struct sys_kill_args /* {
620 syscallarg(int) pid;
621 syscallarg(int) signum;
622 } */ *uap = v;
623 struct proc *cp, *p;
624 struct pcred *pc;
625
626 cp = l->l_proc;
627 pc = cp->p_cred;
628 if ((u_int)SCARG(uap, signum) >= NSIG)
629 return (EINVAL);
630 if (SCARG(uap, pid) > 0) {
631 /* kill single process */
632 if ((p = pfind(SCARG(uap, pid))) == NULL)
633 return (ESRCH);
634 if (!CANSIGNAL(cp, pc, p, SCARG(uap, signum)))
635 return (EPERM);
636 if (SCARG(uap, signum))
637 psignal(p, SCARG(uap, signum));
638 return (0);
639 }
640 switch (SCARG(uap, pid)) {
641 case -1: /* broadcast signal */
642 return (killpg1(cp, SCARG(uap, signum), 0, 1));
643 case 0: /* signal own process group */
644 return (killpg1(cp, SCARG(uap, signum), 0, 0));
645 default: /* negative explicit process group */
646 return (killpg1(cp, SCARG(uap, signum), -SCARG(uap, pid), 0));
647 }
648 /* NOTREACHED */
649 }
650
651 /*
652 * Common code for kill process group/broadcast kill.
653 * cp is calling process.
654 */
655 int
656 killpg1(struct proc *cp, int signum, int pgid, int all)
657 {
658 struct proc *p;
659 struct pcred *pc;
660 struct pgrp *pgrp;
661 int nfound;
662
663 pc = cp->p_cred;
664 nfound = 0;
665 if (all) {
666 /*
667 * broadcast
668 */
669 proclist_lock_read();
670 LIST_FOREACH(p, &allproc, p_list) {
671 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
672 p == cp || !CANSIGNAL(cp, pc, p, signum))
673 continue;
674 nfound++;
675 if (signum)
676 psignal(p, signum);
677 }
678 proclist_unlock_read();
679 } else {
680 if (pgid == 0)
681 /*
682 * zero pgid means send to my process group.
683 */
684 pgrp = cp->p_pgrp;
685 else {
686 pgrp = pgfind(pgid);
687 if (pgrp == NULL)
688 return (ESRCH);
689 }
690 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
691 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
692 !CANSIGNAL(cp, pc, p, signum))
693 continue;
694 nfound++;
695 if (signum && P_ZOMBIE(p) == 0)
696 psignal(p, signum);
697 }
698 }
699 return (nfound ? 0 : ESRCH);
700 }
701
702 /*
703 * Send a signal to a process group.
704 */
705 void
706 gsignal(int pgid, int signum)
707 {
708 struct pgrp *pgrp;
709
710 if (pgid && (pgrp = pgfind(pgid)))
711 pgsignal(pgrp, signum, 0);
712 }
713
714 /*
715 * Send a signal to a process group. If checktty is 1,
716 * limit to members which have a controlling terminal.
717 */
718 void
719 pgsignal(struct pgrp *pgrp, int signum, int checkctty)
720 {
721 struct proc *p;
722
723 if (pgrp)
724 LIST_FOREACH(p, &pgrp->pg_members, p_pglist)
725 if (checkctty == 0 || p->p_flag & P_CONTROLT)
726 psignal(p, signum);
727 }
728
729 /*
730 * Send a signal caused by a trap to the current process.
731 * If it will be caught immediately, deliver it with correct code.
732 * Otherwise, post it normally.
733 */
734 void
735 trapsignal(struct lwp *l, int signum, u_long code)
736 {
737 struct proc *p;
738 struct sigacts *ps;
739
740 p = l->l_proc;
741 ps = p->p_sigacts;
742 if ((p->p_flag & P_TRACED) == 0 &&
743 sigismember(&p->p_sigctx.ps_sigcatch, signum) &&
744 !sigismember(&p->p_sigctx.ps_sigmask, signum)) {
745 p->p_stats->p_ru.ru_nsignals++;
746 #ifdef KTRACE
747 if (KTRPOINT(p, KTR_PSIG))
748 ktrpsig(p, signum,
749 SIGACTION_PS(ps, signum).sa_handler,
750 &p->p_sigctx.ps_sigmask, code);
751 #endif
752 psendsig(l, signum, &p->p_sigctx.ps_sigmask, code);
753 (void) splsched(); /* XXXSMP */
754 sigplusset(&SIGACTION_PS(ps, signum).sa_mask,
755 &p->p_sigctx.ps_sigmask);
756 if (SIGACTION_PS(ps, signum).sa_flags & SA_RESETHAND) {
757 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
758 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
759 sigaddset(&p->p_sigctx.ps_sigignore, signum);
760 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
761 }
762 (void) spl0(); /* XXXSMP */
763 } else {
764 p->p_sigctx.ps_code = code; /* XXX for core dump/debugger */
765 p->p_sigctx.ps_sig = signum; /* XXX to verify code */
766 p->p_sigctx.ps_lwp = l->l_lid;
767 psignal(p, signum);
768 }
769 }
770
771 /*
772 * Send the signal to the process. If the signal has an action, the action
773 * is usually performed by the target process rather than the caller; we add
774 * the signal to the set of pending signals for the process.
775 *
776 * Exceptions:
777 * o When a stop signal is sent to a sleeping process that takes the
778 * default action, the process is stopped without awakening it.
779 * o SIGCONT restarts stopped processes (or puts them back to sleep)
780 * regardless of the signal action (eg, blocked or ignored).
781 *
782 * Other ignored signals are discarded immediately.
783 *
784 * XXXSMP: Invoked as psignal() or sched_psignal().
785 */
786 void
787 psignal1(struct proc *p, int signum,
788 int dolock) /* XXXSMP: works, but icky */
789 {
790 struct lwp *l, *suspended;
791 int s = 0, prop, allsusp;
792 sig_t action;
793
794 #ifdef DIAGNOSTIC
795 if (signum <= 0 || signum >= NSIG)
796 panic("psignal signal number");
797
798 /* XXXSMP: works, but icky */
799 if (dolock)
800 SCHED_ASSERT_UNLOCKED();
801 else
802 SCHED_ASSERT_LOCKED();
803 #endif
804 /*
805 * Notify any interested parties in the signal.
806 */
807 KNOTE(&p->p_klist, NOTE_SIGNAL | signum);
808
809 prop = sigprop[signum];
810
811 /*
812 * If proc is traced, always give parent a chance.
813 */
814 if (p->p_flag & P_TRACED)
815 action = SIG_DFL;
816 else {
817 /*
818 * If the signal is being ignored,
819 * then we forget about it immediately.
820 * (Note: we don't set SIGCONT in p_sigctx.ps_sigignore,
821 * and if it is set to SIG_IGN,
822 * action will be SIG_DFL here.)
823 */
824 if (sigismember(&p->p_sigctx.ps_sigignore, signum))
825 return;
826 if (sigismember(&p->p_sigctx.ps_sigmask, signum))
827 action = SIG_HOLD;
828 else if (sigismember(&p->p_sigctx.ps_sigcatch, signum))
829 action = SIG_CATCH;
830 else {
831 action = SIG_DFL;
832
833 if (prop & SA_KILL && p->p_nice > NZERO)
834 p->p_nice = NZERO;
835
836 /*
837 * If sending a tty stop signal to a member of an
838 * orphaned process group, discard the signal here if
839 * the action is default; don't stop the process below
840 * if sleeping, and don't clear any pending SIGCONT.
841 */
842 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0)
843 return;
844 }
845 }
846
847 if (prop & SA_CONT)
848 sigminusset(&stopsigmask, &p->p_sigctx.ps_siglist);
849
850 if (prop & SA_STOP)
851 sigminusset(&contsigmask, &p->p_sigctx.ps_siglist);
852
853 sigaddset(&p->p_sigctx.ps_siglist, signum);
854
855 /* CHECKSIGS() is "inlined" here. */
856 p->p_sigctx.ps_sigcheck = 1;
857
858 /*
859 * If the signal doesn't have SA_CANTMASK (no override for SIGKILL,
860 * please!), check if anything waits on it. If yes, clear the
861 * pending signal from siglist set, save it to ps_sigwaited,
862 * clear sigwait list, and wakeup any sigwaiters.
863 * The signal won't be processed further here.
864 */
865 if ((prop & SA_CANTMASK) == 0
866 && p->p_sigctx.ps_sigwaited < 0
867 && sigismember(&p->p_sigctx.ps_sigwait, signum)
868 && p->p_stat != SSTOP) {
869 sigdelset(&p->p_sigctx.ps_siglist, signum);
870 p->p_sigctx.ps_sigwaited = signum;
871 sigemptyset(&p->p_sigctx.ps_sigwait);
872
873 if (dolock)
874 wakeup_one(&p->p_sigctx.ps_sigwait);
875 else
876 sched_wakeup(&p->p_sigctx.ps_sigwait);
877 return;
878 }
879
880 /*
881 * Defer further processing for signals which are held,
882 * except that stopped processes must be continued by SIGCONT.
883 */
884 if (action == SIG_HOLD && ((prop & SA_CONT) == 0 || p->p_stat != SSTOP))
885 return;
886 /* XXXSMP: works, but icky */
887 if (dolock)
888 SCHED_LOCK(s);
889
890 /* XXXUPSXXX LWPs might go to sleep without passing signal handling */
891 if (p->p_nrlwps > 0 && (p->p_stat != SSTOP)) {
892 /*
893 * At least one LWP is running or on a run queue.
894 * The signal will be noticed when one of them returns
895 * to userspace.
896 */
897 signotify(p);
898 /*
899 * The signal will be noticed very soon.
900 */
901 goto out;
902 } else {
903 /* Process is sleeping or stopped */
904 if (p->p_flag & P_SA) {
905 struct lwp *l2 = p->p_sa->sa_vp;
906 l = NULL;
907 allsusp = 1;
908
909 if ((l2->l_stat == LSSLEEP) && (l2->l_flag & L_SINTR))
910 l = l2;
911 else if (l2->l_stat == LSSUSPENDED)
912 suspended = l2;
913 else if ((l2->l_stat != LSZOMB) &&
914 (l2->l_stat != LSDEAD))
915 allsusp = 0;
916 } else {
917 /*
918 * Find out if any of the sleeps are interruptable,
919 * and if all the live LWPs remaining are suspended.
920 */
921 allsusp = 1;
922 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
923 if (l->l_stat == LSSLEEP &&
924 l->l_flag & L_SINTR)
925 break;
926 if (l->l_stat == LSSUSPENDED)
927 suspended = l;
928 else if ((l->l_stat != LSZOMB) &&
929 (l->l_stat != LSDEAD))
930 allsusp = 0;
931 }
932 }
933 if (p->p_stat == SACTIVE) {
934 /* All LWPs must be sleeping */
935 KDASSERT(((p->p_flag & P_SA) == 0) || (l != NULL));
936
937 if (l != NULL && (p->p_flag & P_TRACED))
938 goto run;
939
940 /*
941 * If SIGCONT is default (or ignored) and process is
942 * asleep, we are finished; the process should not
943 * be awakened.
944 */
945 if ((prop & SA_CONT) && action == SIG_DFL) {
946 sigdelset(&p->p_sigctx.ps_siglist, signum);
947 goto out;
948 }
949
950 /*
951 * When a sleeping process receives a stop
952 * signal, process immediately if possible.
953 */
954 if ((prop & SA_STOP) && action == SIG_DFL) {
955 /*
956 * If a child holding parent blocked,
957 * stopping could cause deadlock.
958 */
959 if (p->p_flag & P_PPWAIT)
960 goto out;
961 sigdelset(&p->p_sigctx.ps_siglist, signum);
962 p->p_xstat = signum;
963 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) {
964 /*
965 * XXXSMP: recursive call; don't lock
966 * the second time around.
967 */
968 sched_psignal(p->p_pptr, SIGCHLD);
969 }
970 proc_stop(p); /* XXXSMP: recurse? */
971 goto out;
972 }
973
974 if (l == NULL) {
975 /*
976 * Special case: SIGKILL of a process
977 * which is entirely composed of
978 * suspended LWPs should succeed. We
979 * make this happen by unsuspending one of
980 * them.
981 */
982 if (allsusp && (signum == SIGKILL))
983 lwp_continue(suspended);
984 goto out;
985 }
986 /*
987 * All other (caught or default) signals
988 * cause the process to run.
989 */
990 goto runfast;
991 /*NOTREACHED*/
992 } else if (p->p_stat == SSTOP) {
993 /* Process is stopped */
994 /*
995 * If traced process is already stopped,
996 * then no further action is necessary.
997 */
998 if (p->p_flag & P_TRACED)
999 goto out;
1000
1001 /*
1002 * Kill signal always sets processes running,
1003 * if possible.
1004 */
1005 if (signum == SIGKILL) {
1006 l = proc_unstop(p);
1007 if (l)
1008 goto runfast;
1009 goto out;
1010 }
1011
1012 if (prop & SA_CONT) {
1013 /*
1014 * If SIGCONT is default (or ignored),
1015 * we continue the process but don't
1016 * leave the signal in ps_siglist, as
1017 * it has no further action. If
1018 * SIGCONT is held, we continue the
1019 * process and leave the signal in
1020 * ps_siglist. If the process catches
1021 * SIGCONT, let it handle the signal
1022 * itself. If it isn't waiting on an
1023 * event, then it goes back to run
1024 * state. Otherwise, process goes
1025 * back to sleep state.
1026 */
1027 if (action == SIG_DFL)
1028 sigdelset(&p->p_sigctx.ps_siglist,
1029 signum);
1030 l = proc_unstop(p);
1031 if (l && (action == SIG_CATCH))
1032 goto runfast;
1033 goto out;
1034 }
1035
1036 if (prop & SA_STOP) {
1037 /*
1038 * Already stopped, don't need to stop again.
1039 * (If we did the shell could get confused.)
1040 */
1041 sigdelset(&p->p_sigctx.ps_siglist, signum);
1042 goto out;
1043 }
1044
1045 /*
1046 * If a lwp is sleeping interruptibly, then
1047 * wake it up; it will run until the kernel
1048 * boundary, where it will stop in issignal(),
1049 * since p->p_stat is still SSTOP. When the
1050 * process is continued, it will be made
1051 * runnable and can look at the signal.
1052 */
1053 if (l)
1054 goto run;
1055 goto out;
1056 } else {
1057 /* Else what? */
1058 panic("psignal: Invalid process state %d.",
1059 p->p_stat);
1060 }
1061 }
1062 /*NOTREACHED*/
1063
1064 runfast:
1065 /*
1066 * Raise priority to at least PUSER.
1067 */
1068 if (l->l_priority > PUSER)
1069 l->l_priority = PUSER;
1070 run:
1071
1072 setrunnable(l); /* XXXSMP: recurse? */
1073 out:
1074 /* XXXSMP: works, but icky */
1075 if (dolock)
1076 SCHED_UNLOCK(s);
1077 }
1078
1079 void
1080 psendsig(struct lwp *l, int sig, sigset_t *mask, u_long code)
1081 {
1082 struct proc *p = l->l_proc;
1083 struct lwp *le, *li;
1084 siginfo_t *si;
1085
1086 if (p->p_flag & P_SA) {
1087
1088 /* XXXUPSXXX What if not on sa_vp ? */
1089
1090 int s = l->l_flag & L_SA;
1091 l->l_flag &= ~L_SA;
1092 si = pool_get(&siginfo_pool, PR_WAITOK);
1093 si->si_signo = sig;
1094 si->si_errno = 0;
1095 si->si_code = code;
1096 le = li = NULL;
1097 if (code)
1098 le = l;
1099 else
1100 li = l;
1101
1102 sa_upcall(l, SA_UPCALL_SIGNAL | SA_UPCALL_DEFER, le, li,
1103 sizeof(siginfo_t), si);
1104 l->l_flag |= s;
1105 return;
1106 }
1107
1108 (*p->p_emul->e_sendsig)(sig, mask, code);
1109 }
1110
1111 static __inline int firstsig(const sigset_t *);
1112
1113 static __inline int
1114 firstsig(const sigset_t *ss)
1115 {
1116 int sig;
1117
1118 sig = ffs(ss->__bits[0]);
1119 if (sig != 0)
1120 return (sig);
1121 #if NSIG > 33
1122 sig = ffs(ss->__bits[1]);
1123 if (sig != 0)
1124 return (sig + 32);
1125 #endif
1126 #if NSIG > 65
1127 sig = ffs(ss->__bits[2]);
1128 if (sig != 0)
1129 return (sig + 64);
1130 #endif
1131 #if NSIG > 97
1132 sig = ffs(ss->__bits[3]);
1133 if (sig != 0)
1134 return (sig + 96);
1135 #endif
1136 return (0);
1137 }
1138
1139 /*
1140 * If the current process has received a signal (should be caught or cause
1141 * termination, should interrupt current syscall), return the signal number.
1142 * Stop signals with default action are processed immediately, then cleared;
1143 * they aren't returned. This is checked after each entry to the system for
1144 * a syscall or trap (though this can usually be done without calling issignal
1145 * by checking the pending signal masks in the CURSIG macro.) The normal call
1146 * sequence is
1147 *
1148 * while (signum = CURSIG(curlwp))
1149 * postsig(signum);
1150 */
1151 int
1152 issignal(struct lwp *l)
1153 {
1154 struct proc *p = l->l_proc;
1155 int s = 0, signum, prop;
1156 int dolock = (l->l_flag & L_SINTR) == 0, locked = !dolock;
1157 sigset_t ss;
1158
1159 if (l->l_flag & L_SA) {
1160 struct sadata *sa = p->p_sa;
1161
1162 /* Bail out if we do not own the virtual processor */
1163 if (sa->sa_vp != l)
1164 return 0;
1165 }
1166
1167 if (p->p_stat == SSTOP) {
1168 /*
1169 * The process is stopped/stopping. Stop ourselves now that
1170 * we're on the kernel/userspace boundary.
1171 */
1172 if (dolock)
1173 SCHED_LOCK(s);
1174 l->l_stat = LSSTOP;
1175 p->p_nrlwps--;
1176 if (p->p_flag & P_TRACED)
1177 goto sigtraceswitch;
1178 else
1179 goto sigswitch;
1180 }
1181 for (;;) {
1182 sigpending1(p, &ss);
1183 if (p->p_flag & P_PPWAIT)
1184 sigminusset(&stopsigmask, &ss);
1185 signum = firstsig(&ss);
1186 if (signum == 0) { /* no signal to send */
1187 p->p_sigctx.ps_sigcheck = 0;
1188 if (locked && dolock)
1189 SCHED_LOCK(s);
1190 return (0);
1191 }
1192 /* take the signal! */
1193 sigdelset(&p->p_sigctx.ps_siglist, signum);
1194
1195 /*
1196 * We should see pending but ignored signals
1197 * only if P_TRACED was on when they were posted.
1198 */
1199 if (sigismember(&p->p_sigctx.ps_sigignore, signum) &&
1200 (p->p_flag & P_TRACED) == 0)
1201 continue;
1202
1203 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
1204 /*
1205 * If traced, always stop, and stay
1206 * stopped until released by the debugger.
1207 */
1208 p->p_xstat = signum;
1209 if ((p->p_flag & P_FSTRACE) == 0)
1210 psignal1(p->p_pptr, SIGCHLD, dolock);
1211 if (dolock)
1212 SCHED_LOCK(s);
1213 proc_stop(p);
1214 sigtraceswitch:
1215 mi_switch(l, NULL);
1216 SCHED_ASSERT_UNLOCKED();
1217 if (dolock)
1218 splx(s);
1219 else
1220 dolock = 1;
1221
1222 /*
1223 * If we are no longer being traced, or the parent
1224 * didn't give us a signal, look for more signals.
1225 */
1226 if ((p->p_flag & P_TRACED) == 0 || p->p_xstat == 0)
1227 continue;
1228
1229 /*
1230 * If the new signal is being masked, look for other
1231 * signals.
1232 */
1233 signum = p->p_xstat;
1234 p->p_xstat = 0;
1235 /*
1236 * `p->p_sigctx.ps_siglist |= mask' is done
1237 * in setrunnable().
1238 */
1239 if (sigismember(&p->p_sigctx.ps_sigmask, signum))
1240 continue;
1241 /* take the signal! */
1242 sigdelset(&p->p_sigctx.ps_siglist, signum);
1243 }
1244
1245 prop = sigprop[signum];
1246
1247 /*
1248 * Decide whether the signal should be returned.
1249 * Return the signal's number, or fall through
1250 * to clear it from the pending mask.
1251 */
1252 switch ((long)SIGACTION(p, signum).sa_handler) {
1253
1254 case (long)SIG_DFL:
1255 /*
1256 * Don't take default actions on system processes.
1257 */
1258 if (p->p_pid <= 1) {
1259 #ifdef DIAGNOSTIC
1260 /*
1261 * Are you sure you want to ignore SIGSEGV
1262 * in init? XXX
1263 */
1264 printf("Process (pid %d) got signal %d\n",
1265 p->p_pid, signum);
1266 #endif
1267 break; /* == ignore */
1268 }
1269 /*
1270 * If there is a pending stop signal to process
1271 * with default action, stop here,
1272 * then clear the signal. However,
1273 * if process is member of an orphaned
1274 * process group, ignore tty stop signals.
1275 */
1276 if (prop & SA_STOP) {
1277 if (p->p_flag & P_TRACED ||
1278 (p->p_pgrp->pg_jobc == 0 &&
1279 prop & SA_TTYSTOP))
1280 break; /* == ignore */
1281 p->p_xstat = signum;
1282 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0)
1283 psignal1(p->p_pptr, SIGCHLD, dolock);
1284 if (dolock)
1285 SCHED_LOCK(s);
1286 proc_stop(p);
1287 sigswitch:
1288 mi_switch(l, NULL);
1289 SCHED_ASSERT_UNLOCKED();
1290 if (dolock)
1291 splx(s);
1292 else
1293 dolock = 1;
1294 break;
1295 } else if (prop & SA_IGNORE) {
1296 /*
1297 * Except for SIGCONT, shouldn't get here.
1298 * Default action is to ignore; drop it.
1299 */
1300 break; /* == ignore */
1301 } else
1302 goto keep;
1303 /*NOTREACHED*/
1304
1305 case (long)SIG_IGN:
1306 /*
1307 * Masking above should prevent us ever trying
1308 * to take action on an ignored signal other
1309 * than SIGCONT, unless process is traced.
1310 */
1311 #ifdef DEBUG_ISSIGNAL
1312 if ((prop & SA_CONT) == 0 &&
1313 (p->p_flag & P_TRACED) == 0)
1314 printf("issignal\n");
1315 #endif
1316 break; /* == ignore */
1317
1318 default:
1319 /*
1320 * This signal has an action, let
1321 * postsig() process it.
1322 */
1323 goto keep;
1324 }
1325 }
1326 /* NOTREACHED */
1327
1328 keep:
1329 /* leave the signal for later */
1330 sigaddset(&p->p_sigctx.ps_siglist, signum);
1331 CHECKSIGS(p);
1332 if (locked && dolock)
1333 SCHED_LOCK(s);
1334 return (signum);
1335 }
1336
1337 /*
1338 * Put the argument process into the stopped state and notify the parent
1339 * via wakeup. Signals are handled elsewhere. The process must not be
1340 * on the run queue.
1341 */
1342 static void
1343 proc_stop(struct proc *p)
1344 {
1345 struct lwp *l;
1346
1347 SCHED_ASSERT_LOCKED();
1348
1349 /* XXX lock process LWP state */
1350 p->p_stat = SSTOP;
1351 p->p_flag &= ~P_WAITED;
1352
1353 /*
1354 * Put as many LWP's as possible in stopped state.
1355 * Sleeping ones will notice the stopped state as they try to
1356 * return to userspace.
1357 */
1358
1359 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1360 if ((l->l_stat == LSONPROC) && (l == curlwp)) {
1361 /* XXX SMP this assumes that a LWP that is LSONPROC
1362 * is curlwp and hence is about to be mi_switched
1363 * away; the only callers of proc_stop() are:
1364 * - psignal
1365 * - issignal()
1366 * For the former, proc_stop() is only called when
1367 * no processes are running, so we don't worry.
1368 * For the latter, proc_stop() is called right
1369 * before mi_switch().
1370 */
1371 l->l_stat = LSSTOP;
1372 p->p_nrlwps--;
1373 }
1374 else if ( (l->l_stat == LSSLEEP) && (l->l_flag & L_SINTR)) {
1375 setrunnable(l);
1376 }
1377
1378 /* !!!UPS!!! FIX ME */
1379 #if 0
1380 else if (l->l_stat == LSRUN) {
1381 /* Remove LWP from the run queue */
1382 remrunqueue(l);
1383 l->l_stat = LSSTOP;
1384 p->p_nrlwps--;
1385 } else if ((l->l_stat == LSSLEEP) ||
1386 (l->l_stat == LSSUSPENDED) ||
1387 (l->l_stat == LSZOMB) ||
1388 (l->l_stat == LSDEAD)) {
1389 /*
1390 * Don't do anything; let sleeping LWPs
1391 * discover the stopped state of the process
1392 * on their way out of the kernel; otherwise,
1393 * things like NFS threads that sleep with
1394 * locks will block the rest of the system
1395 * from getting any work done.
1396 *
1397 * Suspended/dead/zombie LWPs aren't going
1398 * anywhere, so we don't need to touch them.
1399 */
1400 }
1401 #ifdef DIAGNOSTIC
1402 else {
1403 panic("proc_stop: process %d lwp %d "
1404 "in unstoppable state %d.\n",
1405 p->p_pid, l->l_lid, l->l_stat);
1406 }
1407 #endif
1408 #endif
1409 }
1410 /* XXX unlock process LWP state */
1411
1412 sched_wakeup((caddr_t)p->p_pptr);
1413 }
1414
1415 /*
1416 * Given a process in state SSTOP, set the state back to SACTIVE and
1417 * move LSSTOP'd LWPs to LSSLEEP or make them runnable.
1418 *
1419 * If no LWPs ended up runnable (and therefore able to take a signal),
1420 * return a LWP that is sleeping interruptably. The caller can wake
1421 * that LWP up to take a signal.
1422 */
1423 struct lwp *
1424 proc_unstop(struct proc *p)
1425 {
1426 struct lwp *l, *lr = NULL;
1427 int cantake = 0;
1428
1429 SCHED_ASSERT_LOCKED();
1430
1431 /*
1432 * Our caller wants to be informed if there are only sleeping
1433 * and interruptable LWPs left after we have run so that it
1434 * can invoke setrunnable() if required - return one of the
1435 * interruptable LWPs if this is the case.
1436 */
1437
1438 p->p_stat = SACTIVE;
1439 if (p->p_flag & P_SA) {
1440 /*
1441 * Preferentially select the idle LWP as the interruptable
1442 * LWP to return if it exists.
1443 */
1444 lr = p->p_sa->sa_idle;
1445 if (lr != NULL)
1446 cantake = 1;
1447 }
1448 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1449 if (l->l_stat == LSRUN) {
1450 lr = NULL;
1451 cantake = 1;
1452 }
1453 if (l->l_stat != LSSTOP)
1454 continue;
1455
1456 if (l->l_wchan != NULL) {
1457 l->l_stat = LSSLEEP;
1458 if ((cantake == 0) && (l->l_flag & L_SINTR)) {
1459 lr = l;
1460 cantake = 1;
1461 }
1462 } else {
1463 setrunnable(l);
1464 lr = NULL;
1465 cantake = 1;
1466 }
1467 }
1468
1469 return lr;
1470 }
1471
1472 /*
1473 * Take the action for the specified signal
1474 * from the current set of pending signals.
1475 */
1476 void
1477 postsig(int signum)
1478 {
1479 struct lwp *l;
1480 struct proc *p;
1481 struct sigacts *ps;
1482 sig_t action;
1483 u_long code;
1484 sigset_t *returnmask;
1485
1486 l = curlwp;
1487 p = l->l_proc;
1488 ps = p->p_sigacts;
1489 #ifdef DIAGNOSTIC
1490 if (signum == 0)
1491 panic("postsig");
1492 #endif
1493
1494 KERNEL_PROC_LOCK(l);
1495
1496 sigdelset(&p->p_sigctx.ps_siglist, signum);
1497 action = SIGACTION_PS(ps, signum).sa_handler;
1498 #ifdef KTRACE
1499 if (KTRPOINT(p, KTR_PSIG))
1500 ktrpsig(p,
1501 signum, action, p->p_sigctx.ps_flags & SAS_OLDMASK ?
1502 &p->p_sigctx.ps_oldmask : &p->p_sigctx.ps_sigmask, 0);
1503 #endif
1504 if (action == SIG_DFL) {
1505 /*
1506 * Default action, where the default is to kill
1507 * the process. (Other cases were ignored above.)
1508 */
1509 sigexit(l, signum);
1510 /* NOTREACHED */
1511 } else {
1512 /*
1513 * If we get here, the signal must be caught.
1514 */
1515 #ifdef DIAGNOSTIC
1516 if (action == SIG_IGN ||
1517 sigismember(&p->p_sigctx.ps_sigmask, signum))
1518 panic("postsig action");
1519 #endif
1520 /*
1521 * Set the new mask value and also defer further
1522 * occurrences of this signal.
1523 *
1524 * Special case: user has done a sigpause. Here the
1525 * current mask is not of interest, but rather the
1526 * mask from before the sigpause is what we want
1527 * restored after the signal processing is completed.
1528 */
1529 if (p->p_sigctx.ps_flags & SAS_OLDMASK) {
1530 returnmask = &p->p_sigctx.ps_oldmask;
1531 p->p_sigctx.ps_flags &= ~SAS_OLDMASK;
1532 } else
1533 returnmask = &p->p_sigctx.ps_sigmask;
1534 p->p_stats->p_ru.ru_nsignals++;
1535 if (p->p_sigctx.ps_sig != signum) {
1536 code = 0;
1537 } else {
1538 code = p->p_sigctx.ps_code;
1539 p->p_sigctx.ps_code = 0;
1540 p->p_sigctx.ps_lwp = 0;
1541 p->p_sigctx.ps_sig = 0;
1542 }
1543 psendsig(l, signum, returnmask, code);
1544 (void) splsched(); /* XXXSMP */
1545 sigplusset(&SIGACTION_PS(ps, signum).sa_mask,
1546 &p->p_sigctx.ps_sigmask);
1547 if (SIGACTION_PS(ps, signum).sa_flags & SA_RESETHAND) {
1548 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
1549 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
1550 sigaddset(&p->p_sigctx.ps_sigignore, signum);
1551 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
1552 }
1553 (void) spl0(); /* XXXSMP */
1554 }
1555
1556 KERNEL_PROC_UNLOCK(l);
1557 }
1558
1559 /*
1560 * Kill the current process for stated reason.
1561 */
1562 void
1563 killproc(struct proc *p, const char *why)
1564 {
1565
1566 log(LOG_ERR, "pid %d was killed: %s\n", p->p_pid, why);
1567 uprintf("sorry, pid %d was killed: %s\n", p->p_pid, why);
1568 psignal(p, SIGKILL);
1569 }
1570
1571 /*
1572 * Force the current process to exit with the specified signal, dumping core
1573 * if appropriate. We bypass the normal tests for masked and caught signals,
1574 * allowing unrecoverable failures to terminate the process without changing
1575 * signal state. Mark the accounting record with the signal termination.
1576 * If dumping core, save the signal number for the debugger. Calls exit and
1577 * does not return.
1578 */
1579
1580 #if defined(DEBUG)
1581 int kern_logsigexit = 1; /* not static to make public for sysctl */
1582 #else
1583 int kern_logsigexit = 0; /* not static to make public for sysctl */
1584 #endif
1585
1586 static const char logcoredump[] =
1587 "pid %d (%s), uid %d: exited on signal %d (core dumped)\n";
1588 static const char lognocoredump[] =
1589 "pid %d (%s), uid %d: exited on signal %d (core not dumped, err = %d)\n";
1590
1591 /* Wrapper function for use in p_userret */
1592 static void
1593 lwp_coredump_hook(struct lwp *l, void *arg)
1594 {
1595 int s;
1596
1597 /*
1598 * Suspend ourselves, so that the kernel stack and therefore
1599 * the userland registers saved in the trapframe are around
1600 * for coredump() to write them out.
1601 */
1602 KERNEL_PROC_LOCK(l);
1603 l->l_flag &= ~L_DETACHED;
1604 SCHED_LOCK(s);
1605 l->l_stat = LSSUSPENDED;
1606 l->l_proc->p_nrlwps--;
1607 /* XXX NJWLWP check if this makes sense here: */
1608 l->l_proc->p_stats->p_ru.ru_nvcsw++;
1609 mi_switch(l, NULL);
1610 SCHED_ASSERT_UNLOCKED();
1611 splx(s);
1612
1613 lwp_exit(l);
1614 }
1615
1616 void
1617 sigexit(struct lwp *l, int signum)
1618 {
1619 struct proc *p;
1620 #if 0
1621 struct lwp *l2;
1622 #endif
1623 int error, exitsig;
1624
1625 p = l->l_proc;
1626
1627 /*
1628 * Don't permit coredump() or exit1() multiple times
1629 * in the same process.
1630 */
1631 if (p->p_flag & P_WEXIT) {
1632 KERNEL_PROC_UNLOCK(l);
1633 (*p->p_userret)(l, p->p_userret_arg);
1634 }
1635 p->p_flag |= P_WEXIT;
1636 /* We don't want to switch away from exiting. */
1637 /* XXX multiprocessor: stop LWPs on other processors. */
1638 #if 0
1639 if (p->p_flag & P_SA) {
1640 LIST_FOREACH(l2, &p->p_lwps, l_sibling)
1641 l2->l_flag &= ~L_SA;
1642 p->p_flag &= ~P_SA;
1643 }
1644 #endif
1645
1646 /* Make other LWPs stick around long enough to be dumped */
1647 p->p_userret = lwp_coredump_hook;
1648 p->p_userret_arg = NULL;
1649
1650 exitsig = signum;
1651 p->p_acflag |= AXSIG;
1652 if (sigprop[signum] & SA_CORE) {
1653 p->p_sigctx.ps_sig = signum;
1654 if ((error = coredump(l)) == 0)
1655 exitsig |= WCOREFLAG;
1656
1657 if (kern_logsigexit) {
1658 /* XXX What if we ever have really large UIDs? */
1659 int uid = p->p_cred && p->p_ucred ?
1660 (int) p->p_ucred->cr_uid : -1;
1661
1662 if (error)
1663 log(LOG_INFO, lognocoredump, p->p_pid,
1664 p->p_comm, uid, signum, error);
1665 else
1666 log(LOG_INFO, logcoredump, p->p_pid,
1667 p->p_comm, uid, signum);
1668 }
1669
1670 }
1671
1672 exit1(l, W_EXITCODE(0, exitsig));
1673 /* NOTREACHED */
1674 }
1675
1676 /*
1677 * Dump core, into a file named "progname.core" or "core" (depending on the
1678 * value of shortcorename), unless the process was setuid/setgid.
1679 */
1680 int
1681 coredump(struct lwp *l)
1682 {
1683 struct vnode *vp;
1684 struct proc *p;
1685 struct vmspace *vm;
1686 struct ucred *cred;
1687 struct nameidata nd;
1688 struct vattr vattr;
1689 int error, error1;
1690 char name[MAXPATHLEN];
1691
1692 p = l->l_proc;
1693 vm = p->p_vmspace;
1694 cred = p->p_cred->pc_ucred;
1695
1696 /*
1697 * Make sure the process has not set-id, to prevent data leaks.
1698 */
1699 if (p->p_flag & P_SUGID)
1700 return (EPERM);
1701
1702 /*
1703 * Refuse to core if the data + stack + user size is larger than
1704 * the core dump limit. XXX THIS IS WRONG, because of mapped
1705 * data.
1706 */
1707 if (USPACE + ctob(vm->vm_dsize + vm->vm_ssize) >=
1708 p->p_rlimit[RLIMIT_CORE].rlim_cur)
1709 return (EFBIG); /* better error code? */
1710
1711 /*
1712 * The core dump will go in the current working directory. Make
1713 * sure that the directory is still there and that the mount flags
1714 * allow us to write core dumps there.
1715 */
1716 vp = p->p_cwdi->cwdi_cdir;
1717 if (vp->v_mount == NULL ||
1718 (vp->v_mount->mnt_flag & MNT_NOCOREDUMP) != 0)
1719 return (EPERM);
1720
1721 error = build_corename(p, name);
1722 if (error)
1723 return error;
1724
1725 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, p);
1726 error = vn_open(&nd, O_CREAT | O_NOFOLLOW | FWRITE, S_IRUSR | S_IWUSR);
1727 if (error)
1728 return (error);
1729 vp = nd.ni_vp;
1730
1731 /* Don't dump to non-regular files or files with links. */
1732 if (vp->v_type != VREG ||
1733 VOP_GETATTR(vp, &vattr, cred, p) || vattr.va_nlink != 1) {
1734 error = EINVAL;
1735 goto out;
1736 }
1737 VATTR_NULL(&vattr);
1738 vattr.va_size = 0;
1739 VOP_LEASE(vp, p, cred, LEASE_WRITE);
1740 VOP_SETATTR(vp, &vattr, cred, p);
1741 p->p_acflag |= ACORE;
1742
1743 /* Now dump the actual core file. */
1744 error = (*p->p_execsw->es_coredump)(l, vp, cred);
1745 out:
1746 VOP_UNLOCK(vp, 0);
1747 error1 = vn_close(vp, FWRITE, cred, p);
1748 if (error == 0)
1749 error = error1;
1750 return (error);
1751 }
1752
1753 /*
1754 * Nonexistent system call-- signal process (may want to handle it).
1755 * Flag error in case process won't see signal immediately (blocked or ignored).
1756 */
1757 /* ARGSUSED */
1758 int
1759 sys_nosys(struct lwp *l, void *v, register_t *retval)
1760 {
1761 struct proc *p;
1762
1763 p = l->l_proc;
1764 psignal(p, SIGSYS);
1765 return (ENOSYS);
1766 }
1767
1768 static int
1769 build_corename(struct proc *p, char dst[MAXPATHLEN])
1770 {
1771 const char *s;
1772 char *d, *end;
1773 int i;
1774
1775 for (s = p->p_limit->pl_corename, d = dst, end = d + MAXPATHLEN;
1776 *s != '\0'; s++) {
1777 if (*s == '%') {
1778 switch (*(s + 1)) {
1779 case 'n':
1780 i = snprintf(d, end - d, "%s", p->p_comm);
1781 break;
1782 case 'p':
1783 i = snprintf(d, end - d, "%d", p->p_pid);
1784 break;
1785 case 'u':
1786 i = snprintf(d, end - d, "%.*s",
1787 (int)sizeof p->p_pgrp->pg_session->s_login,
1788 p->p_pgrp->pg_session->s_login);
1789 break;
1790 case 't':
1791 i = snprintf(d, end - d, "%ld",
1792 p->p_stats->p_start.tv_sec);
1793 break;
1794 default:
1795 goto copy;
1796 }
1797 d += i;
1798 s++;
1799 } else {
1800 copy: *d = *s;
1801 d++;
1802 }
1803 if (d >= end)
1804 return (ENAMETOOLONG);
1805 }
1806 *d = '\0';
1807 return 0;
1808 }
1809
1810 void
1811 getucontext(struct lwp *l, ucontext_t *ucp)
1812 {
1813 struct proc *p;
1814
1815 p = l->l_proc;
1816
1817 ucp->uc_flags = 0;
1818 ucp->uc_link = l->l_ctxlink;
1819
1820 (void)sigprocmask1(p, 0, NULL, &ucp->uc_sigmask);
1821 ucp->uc_flags |= _UC_SIGMASK;
1822
1823 /*
1824 * The (unsupplied) definition of the `current execution stack'
1825 * in the System V Interface Definition appears to allow returning
1826 * the main context stack.
1827 */
1828 if ((p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK) == 0) {
1829 ucp->uc_stack.ss_sp = (void *)USRSTACK;
1830 ucp->uc_stack.ss_size = ctob(p->p_vmspace->vm_ssize);
1831 ucp->uc_stack.ss_flags = 0; /* XXX, def. is Very Fishy */
1832 } else {
1833 /* Simply copy alternate signal execution stack. */
1834 ucp->uc_stack = p->p_sigctx.ps_sigstk;
1835 }
1836 ucp->uc_flags |= _UC_STACK;
1837
1838 cpu_getmcontext(l, &ucp->uc_mcontext, &ucp->uc_flags);
1839 }
1840
1841 /* ARGSUSED */
1842 int
1843 sys_getcontext(struct lwp *l, void *v, register_t *retval)
1844 {
1845 struct sys_getcontext_args /* {
1846 syscallarg(struct __ucontext *) ucp;
1847 } */ *uap = v;
1848 ucontext_t uc;
1849
1850 getucontext(l, &uc);
1851
1852 return (copyout(&uc, SCARG(uap, ucp), sizeof (*SCARG(uap, ucp))));
1853 }
1854
1855 int
1856 setucontext(struct lwp *l, const ucontext_t *ucp)
1857 {
1858 struct proc *p;
1859 int error;
1860
1861 p = l->l_proc;
1862 if ((error = cpu_setmcontext(l, &ucp->uc_mcontext, ucp->uc_flags)) != 0)
1863 return (error);
1864 l->l_ctxlink = ucp->uc_link;
1865 /*
1866 * We might want to take care of the stack portion here but currently
1867 * don't; see the comment in getucontext().
1868 */
1869 if ((ucp->uc_flags & _UC_SIGMASK) != 0)
1870 sigprocmask1(p, SIG_SETMASK, &ucp->uc_sigmask, NULL);
1871
1872 return 0;
1873 }
1874
1875 /* ARGSUSED */
1876 int
1877 sys_setcontext(struct lwp *l, void *v, register_t *retval)
1878 {
1879 struct sys_setcontext_args /* {
1880 syscallarg(const ucontext_t *) ucp;
1881 } */ *uap = v;
1882 ucontext_t uc;
1883 int error;
1884
1885 if (SCARG(uap, ucp) == NULL) /* i.e. end of uc_link chain */
1886 exit1(l, W_EXITCODE(0, 0));
1887 else if ((error = copyin(SCARG(uap, ucp), &uc, sizeof (uc))) != 0 ||
1888 (error = setucontext(l, &uc)) != 0)
1889 return (error);
1890
1891 return (EJUSTRETURN);
1892 }
1893
1894 /*
1895 * sigtimedwait(2) system call, used also for implementation
1896 * of sigwaitinfo() and sigwait().
1897 *
1898 * This only handles single LWP in signal wait. libpthread provides
1899 * it's own sigtimedwait() wrapper to DTRT WRT individual threads.
1900 *
1901 * XXX no support for queued signals, si_code is always SI_USER.
1902 */
1903 int
1904 sys___sigtimedwait(struct lwp *l, void *v, register_t *retval)
1905 {
1906 struct sys___sigtimedwait_args /* {
1907 syscallarg(const sigset_t *) set;
1908 syscallarg(siginfo_t *) info;
1909 syscallarg(struct timespec *) timeout;
1910 } */ *uap = v;
1911 sigset_t waitset, twaitset;
1912 struct proc *p = l->l_proc;
1913 int error, signum, s;
1914 int timo = 0;
1915 struct timeval tvstart;
1916 struct timespec ts;
1917
1918 if ((error = copyin(SCARG(uap, set), &waitset, sizeof(waitset))))
1919 return (error);
1920
1921 /*
1922 * Silently ignore SA_CANTMASK signals. psignal1() would
1923 * ignore SA_CANTMASK signals in waitset, we do this
1924 * only for the below siglist check.
1925 */
1926 sigminusset(&sigcantmask, &waitset);
1927
1928 /*
1929 * First scan siglist and check if there is signal from
1930 * our waitset already pending.
1931 */
1932 twaitset = waitset;
1933 __sigandset(&p->p_sigctx.ps_siglist, &twaitset);
1934 if ((signum = firstsig(&twaitset))) {
1935 /* found pending signal */
1936 sigdelset(&p->p_sigctx.ps_siglist, signum);
1937 goto sig;
1938 }
1939
1940 /*
1941 * Calculate timeout, if it was specified.
1942 */
1943 if (SCARG(uap, timeout)) {
1944 uint64_t ms;
1945
1946 if ((error = copyin(SCARG(uap, timeout), &ts, sizeof(ts))))
1947 return (error);
1948
1949 ms = (ts.tv_sec * 1000) + (ts.tv_nsec / 1000000);
1950 timo = mstohz(ms);
1951 if (timo == 0 && ts.tv_sec == 0 && ts.tv_nsec > 0)
1952 timo = 1;
1953 if (timo <= 0)
1954 return (EAGAIN);
1955
1956 /*
1957 * Remember current mono_time, it would be used in
1958 * ECANCELED/ERESTART case.
1959 */
1960 s = splclock();
1961 tvstart = mono_time;
1962 splx(s);
1963 }
1964
1965 /*
1966 * Setup ps_sigwait list.
1967 */
1968 p->p_sigctx.ps_sigwaited = -1;
1969 p->p_sigctx.ps_sigwait = waitset;
1970
1971 /*
1972 * Wait for signal to arrive. We can either be woken up or
1973 * time out.
1974 */
1975 error = tsleep(&p->p_sigctx.ps_sigwait, PPAUSE|PCATCH, "sigwait", timo);
1976
1977 /*
1978 * Check if a signal from our wait set has arrived, or if it
1979 * was mere wakeup.
1980 */
1981 if (!error) {
1982 if ((signum = p->p_sigctx.ps_sigwaited) <= 0) {
1983 /* wakeup via _lwp_wakeup() */
1984 error = ECANCELED;
1985 }
1986 }
1987
1988 /*
1989 * On error, clear sigwait indication. psignal1() sets it
1990 * in !error case.
1991 */
1992 if (error) {
1993 p->p_sigctx.ps_sigwaited = 0;
1994
1995 /*
1996 * If the sleep was interrupted (either by signal or wakeup),
1997 * update the timeout and copyout new value back.
1998 * It would be used when the syscall would be restarted
1999 * or called again.
2000 */
2001 if (timo && (error == ERESTART || error == ECANCELED)) {
2002 struct timeval tvnow, tvtimo;
2003 int err;
2004
2005 s = splclock();
2006 tvnow = mono_time;
2007 splx(s);
2008
2009 TIMESPEC_TO_TIMEVAL(&tvtimo, &ts);
2010
2011 /* compute how much time has passed since start */
2012 timersub(&tvnow, &tvstart, &tvnow);
2013 /* substract passed time from timeout */
2014 timersub(&tvtimo, &tvnow, &tvtimo);
2015
2016 if (tvtimo.tv_sec < 0)
2017 return (EAGAIN);
2018
2019 TIMEVAL_TO_TIMESPEC(&tvtimo, &ts);
2020
2021 /* copy updated timeout to userland */
2022 if ((err = copyout(&ts, SCARG(uap, timeout), sizeof(ts))))
2023 return (err);
2024 }
2025
2026 return (error);
2027 }
2028
2029 /*
2030 * If a signal from the wait set arrived, copy it to userland.
2031 * XXX no queued signals for now
2032 */
2033 if (signum > 0) {
2034 siginfo_t si;
2035
2036 sig:
2037 memset(&si, 0, sizeof(si));
2038 si.si_signo = signum;
2039 si.si_code = SI_USER;
2040
2041 error = copyout(&si, SCARG(uap, info), sizeof(si));
2042 if (error)
2043 return (error);
2044 }
2045
2046 return (0);
2047 }
2048
2049 /*
2050 * Returns true if signal is ignored or masked for passed process.
2051 */
2052 int
2053 sigismasked(struct proc *p, int sig)
2054 {
2055
2056 return (sigismember(&p->p_sigctx.ps_sigignore, sig) ||
2057 sigismember(&p->p_sigctx.ps_sigmask, sig));
2058 }
2059
2060 static int
2061 filt_sigattach(struct knote *kn)
2062 {
2063 struct proc *p = curproc;
2064
2065 kn->kn_ptr.p_proc = p;
2066 kn->kn_flags |= EV_CLEAR; /* automatically set */
2067
2068 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
2069
2070 return (0);
2071 }
2072
2073 static void
2074 filt_sigdetach(struct knote *kn)
2075 {
2076 struct proc *p = kn->kn_ptr.p_proc;
2077
2078 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
2079 }
2080
2081 /*
2082 * signal knotes are shared with proc knotes, so we apply a mask to
2083 * the hint in order to differentiate them from process hints. This
2084 * could be avoided by using a signal-specific knote list, but probably
2085 * isn't worth the trouble.
2086 */
2087 static int
2088 filt_signal(struct knote *kn, long hint)
2089 {
2090
2091 if (hint & NOTE_SIGNAL) {
2092 hint &= ~NOTE_SIGNAL;
2093
2094 if (kn->kn_id == hint)
2095 kn->kn_data++;
2096 }
2097 return (kn->kn_data != 0);
2098 }
2099
2100 const struct filterops sig_filtops = {
2101 0, filt_sigattach, filt_sigdetach, filt_signal
2102 };
2103