kern_sig.c revision 1.110 1 /* $NetBSD: kern_sig.c,v 1.110 2001/01/14 22:31:58 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)kern_sig.c 8.14 (Berkeley) 5/14/95
41 */
42
43 #include "opt_ktrace.h"
44 #include "opt_compat_sunos.h"
45 #include "opt_compat_netbsd32.h"
46
47 #define SIGPROP /* include signal properties table */
48 #include <sys/param.h>
49 #include <sys/signalvar.h>
50 #include <sys/resourcevar.h>
51 #include <sys/namei.h>
52 #include <sys/vnode.h>
53 #include <sys/proc.h>
54 #include <sys/systm.h>
55 #include <sys/timeb.h>
56 #include <sys/times.h>
57 #include <sys/buf.h>
58 #include <sys/acct.h>
59 #include <sys/file.h>
60 #include <sys/kernel.h>
61 #include <sys/wait.h>
62 #include <sys/ktrace.h>
63 #include <sys/syslog.h>
64 #include <sys/stat.h>
65 #include <sys/core.h>
66 #include <sys/ptrace.h>
67 #include <sys/filedesc.h>
68 #include <sys/malloc.h>
69 #include <sys/pool.h>
70
71 #include <sys/mount.h>
72 #include <sys/syscallargs.h>
73
74 #include <machine/cpu.h>
75
76 #include <sys/user.h> /* for coredump */
77
78 #include <uvm/uvm_extern.h>
79
80 static void proc_stop __P((struct proc *p));
81 void killproc __P((struct proc *, char *));
82 static int build_corename __P((struct proc *, char [MAXPATHLEN]));
83 #if COMPAT_NETBSD32
84 static int coredump32 __P((struct proc *, struct vnode *));
85 #endif
86 sigset_t contsigmask, stopsigmask, sigcantmask;
87
88 struct pool sigacts_pool; /* memory pool for sigacts structures */
89
90 /*
91 * Can process p, with pcred pc, send the signal signum to process q?
92 */
93 #define CANSIGNAL(p, pc, q, signum) \
94 ((pc)->pc_ucred->cr_uid == 0 || \
95 (pc)->p_ruid == (q)->p_cred->p_ruid || \
96 (pc)->pc_ucred->cr_uid == (q)->p_cred->p_ruid || \
97 (pc)->p_ruid == (q)->p_ucred->cr_uid || \
98 (pc)->pc_ucred->cr_uid == (q)->p_ucred->cr_uid || \
99 ((signum) == SIGCONT && (q)->p_session == (p)->p_session))
100
101 /*
102 * Initialize signal-related data structures.
103 */
104 void
105 signal_init()
106 {
107 pool_init(&sigacts_pool, sizeof(struct sigacts), 0, 0, 0, "sigapl",
108 0, pool_page_alloc_nointr, pool_page_free_nointr, M_SUBPROC);
109 }
110
111 /*
112 * Create an initial sigctx structure, using the same signal state
113 * as p. If 'share' is set, share the sigctx_proc part, otherwise just
114 * copy it from parent.
115 */
116 void
117 sigactsinit(np, pp, share)
118 struct proc *np; /* new process */
119 struct proc *pp; /* parent process */
120 int share;
121 {
122 struct sigacts *ps;
123
124 if (share) {
125 np->p_sigacts = pp->p_sigacts;
126 pp->p_sigacts->sa_refcnt++;
127 } else {
128 ps = pool_get(&sigacts_pool, PR_WAITOK);
129 if (pp)
130 memcpy(ps, pp->p_sigacts, sizeof(struct sigacts));
131 else
132 memset(ps, '\0', sizeof(struct sigacts));
133 ps->sa_refcnt = 1;
134 np->p_sigacts = ps;
135 }
136 }
137
138 /*
139 * Make this process not share its sigctx, maintaining all
140 * signal state.
141 */
142 void
143 sigactsunshare(p)
144 struct proc *p;
145 {
146 struct sigacts *oldps;
147
148 if (p->p_sigacts->sa_refcnt == 1)
149 return;
150
151 oldps = p->p_sigacts;
152 sigactsinit(p, NULL, 0);
153
154 if (--oldps->sa_refcnt == 0)
155 pool_put(&sigacts_pool, oldps);
156 }
157
158 /*
159 * Release a sigctx structure.
160 */
161 void
162 sigactsfree(p)
163 struct proc *p;
164 {
165 struct sigacts *ps = p->p_sigacts;
166
167 if (--ps->sa_refcnt > 0)
168 return;
169
170 pool_put(&sigacts_pool, ps);
171 }
172
173 int
174 sigaction1(p, signum, nsa, osa)
175 struct proc *p;
176 int signum;
177 const struct sigaction *nsa;
178 struct sigaction *osa;
179 {
180 struct sigacts *ps = p->p_sigacts;
181 int prop;
182
183 if (signum <= 0 || signum >= NSIG)
184 return (EINVAL);
185
186 if (osa)
187 *osa = SIGACTION_PS(ps, signum);
188
189 if (nsa) {
190 if (nsa->sa_flags & ~SA_ALLBITS)
191 return (EINVAL);
192
193 prop = sigprop[signum];
194 if (prop & SA_CANTMASK)
195 return (EINVAL);
196
197 (void) splsched(); /* XXXSMP */
198 SIGACTION_PS(ps, signum) = *nsa;
199 sigminusset(&sigcantmask, &SIGACTION_PS(ps, signum).sa_mask);
200 if ((prop & SA_NORESET) != 0)
201 SIGACTION_PS(ps, signum).sa_flags &= ~SA_RESETHAND;
202 if (signum == SIGCHLD) {
203 if (nsa->sa_flags & SA_NOCLDSTOP)
204 p->p_flag |= P_NOCLDSTOP;
205 else
206 p->p_flag &= ~P_NOCLDSTOP;
207 if (nsa->sa_flags & SA_NOCLDWAIT) {
208 /*
209 * Paranoia: since SA_NOCLDWAIT is implemented
210 * by reparenting the dying child to PID 1 (and
211 * trust it to reap the zombie), PID 1 itself is
212 * forbidden to set SA_NOCLDWAIT.
213 */
214 if (p->p_pid == 1)
215 p->p_flag &= ~P_NOCLDWAIT;
216 else
217 p->p_flag |= P_NOCLDWAIT;
218 } else
219 p->p_flag &= ~P_NOCLDWAIT;
220 }
221 if ((nsa->sa_flags & SA_NODEFER) == 0)
222 sigaddset(&SIGACTION_PS(ps, signum).sa_mask, signum);
223 else
224 sigdelset(&SIGACTION_PS(ps, signum).sa_mask, signum);
225 /*
226 * Set bit in p_sigctx.ps_sigignore for signals that are set to SIG_IGN,
227 * and for signals set to SIG_DFL where the default is to ignore.
228 * However, don't put SIGCONT in p_sigctx.ps_sigignore,
229 * as we have to restart the process.
230 */
231 if (nsa->sa_handler == SIG_IGN ||
232 (nsa->sa_handler == SIG_DFL && (prop & SA_IGNORE) != 0)) {
233 sigdelset(&p->p_sigctx.ps_siglist, signum); /* never to be seen again */
234 if (signum != SIGCONT)
235 sigaddset(&p->p_sigctx.ps_sigignore, signum); /* easier in psignal */
236 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
237 } else {
238 sigdelset(&p->p_sigctx.ps_sigignore, signum);
239 if (nsa->sa_handler == SIG_DFL)
240 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
241 else
242 sigaddset(&p->p_sigctx.ps_sigcatch, signum);
243 }
244 (void) spl0();
245 }
246
247 return (0);
248 }
249
250 /* ARGSUSED */
251 int
252 sys___sigaction14(p, v, retval)
253 struct proc *p;
254 void *v;
255 register_t *retval;
256 {
257 struct sys___sigaction14_args /* {
258 syscallarg(int) signum;
259 syscallarg(const struct sigaction *) nsa;
260 syscallarg(struct sigaction *) osa;
261 } */ *uap = v;
262 struct sigaction nsa, osa;
263 int error;
264
265 if (SCARG(uap, nsa)) {
266 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
267 if (error)
268 return (error);
269 }
270 error = sigaction1(p, SCARG(uap, signum),
271 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0);
272 if (error)
273 return (error);
274 if (SCARG(uap, osa)) {
275 error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
276 if (error)
277 return (error);
278 }
279 return (0);
280 }
281
282 /*
283 * Initialize signal state for process 0;
284 * set to ignore signals that are ignored by default and disable the signal
285 * stack.
286 */
287 void
288 siginit(p)
289 struct proc *p;
290 {
291 struct sigacts *ps = p->p_sigacts;
292 int signum;
293 int prop;
294
295 sigemptyset(&contsigmask);
296 sigemptyset(&stopsigmask);
297 sigemptyset(&sigcantmask);
298 for (signum = 1; signum < NSIG; signum++) {
299 prop = sigprop[signum];
300 if (prop & SA_CONT)
301 sigaddset(&contsigmask, signum);
302 if (prop & SA_STOP)
303 sigaddset(&stopsigmask, signum);
304 if (prop & SA_CANTMASK)
305 sigaddset(&sigcantmask, signum);
306 if (prop & SA_IGNORE && signum != SIGCONT)
307 sigaddset(&p->p_sigctx.ps_sigignore, signum);
308 sigemptyset(&SIGACTION_PS(ps, signum).sa_mask);
309 SIGACTION_PS(ps, signum).sa_flags = SA_RESTART;
310 }
311 sigemptyset(&p->p_sigctx.ps_sigcatch);
312 p->p_flag &= ~P_NOCLDSTOP;
313
314 /*
315 * Reset stack state to the user stack.
316 */
317 p->p_sigctx.ps_sigstk.ss_flags = SS_DISABLE;
318 p->p_sigctx.ps_sigstk.ss_size = 0;
319 p->p_sigctx.ps_sigstk.ss_sp = 0;
320
321 /* One reference. */
322 ps->sa_refcnt = 1;
323 }
324
325 /*
326 * Reset signals for an exec of the specified process.
327 */
328 void
329 execsigs(p)
330 struct proc *p;
331 {
332 struct sigacts *ps = p->p_sigacts;
333 int signum;
334 int prop;
335
336 /*
337 * Reset caught signals. Held signals remain held
338 * through p_sigctx.ps_sigmask (unless they were caught,
339 * and are now ignored by default).
340 */
341 for (signum = 1; signum < NSIG; signum++) {
342 if (sigismember(&p->p_sigctx.ps_sigcatch, signum)) {
343 prop = sigprop[signum];
344 if (prop & SA_IGNORE) {
345 if ((prop & SA_CONT) == 0)
346 sigaddset(&p->p_sigctx.ps_sigignore, signum);
347 sigdelset(&p->p_sigctx.ps_siglist, signum);
348 }
349 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
350 }
351 sigemptyset(&SIGACTION_PS(ps, signum).sa_mask);
352 SIGACTION_PS(ps, signum).sa_flags = SA_RESTART;
353 }
354 sigemptyset(&p->p_sigctx.ps_sigcatch);
355 p->p_flag &= ~P_NOCLDSTOP;
356
357 /*
358 * Reset stack state to the user stack.
359 */
360 p->p_sigctx.ps_sigstk.ss_flags = SS_DISABLE;
361 p->p_sigctx.ps_sigstk.ss_size = 0;
362 p->p_sigctx.ps_sigstk.ss_sp = 0;
363 }
364
365 int
366 sigprocmask1(p, how, nss, oss)
367 struct proc *p;
368 int how;
369 const sigset_t *nss;
370 sigset_t *oss;
371 {
372
373 if (oss)
374 *oss = p->p_sigctx.ps_sigmask;
375
376 if (nss) {
377 (void)splsched(); /* XXXSMP */
378 switch (how) {
379 case SIG_BLOCK:
380 sigplusset(nss, &p->p_sigctx.ps_sigmask);
381 break;
382 case SIG_UNBLOCK:
383 sigminusset(nss, &p->p_sigctx.ps_sigmask);
384 CHECKSIGS(p);
385 break;
386 case SIG_SETMASK:
387 p->p_sigctx.ps_sigmask = *nss;
388 CHECKSIGS(p);
389 break;
390 default:
391 (void)spl0(); /* XXXSMP */
392 return (EINVAL);
393 }
394 sigminusset(&sigcantmask, &p->p_sigctx.ps_sigmask);
395 (void)spl0(); /* XXXSMP */
396 }
397
398 return (0);
399 }
400
401 /*
402 * Manipulate signal mask.
403 * Note that we receive new mask, not pointer,
404 * and return old mask as return value;
405 * the library stub does the rest.
406 */
407 int
408 sys___sigprocmask14(p, v, retval)
409 struct proc *p;
410 void *v;
411 register_t *retval;
412 {
413 struct sys___sigprocmask14_args /* {
414 syscallarg(int) how;
415 syscallarg(const sigset_t *) set;
416 syscallarg(sigset_t *) oset;
417 } */ *uap = v;
418 sigset_t nss, oss;
419 int error;
420
421 if (SCARG(uap, set)) {
422 error = copyin(SCARG(uap, set), &nss, sizeof(nss));
423 if (error)
424 return (error);
425 }
426 error = sigprocmask1(p, SCARG(uap, how),
427 SCARG(uap, set) ? &nss : 0, SCARG(uap, oset) ? &oss : 0);
428 if (error)
429 return (error);
430 if (SCARG(uap, oset)) {
431 error = copyout(&oss, SCARG(uap, oset), sizeof(oss));
432 if (error)
433 return (error);
434 }
435 return (0);
436 }
437
438 void
439 sigpending1(p, ss)
440 struct proc *p;
441 sigset_t *ss;
442 {
443
444 *ss = p->p_sigctx.ps_siglist;
445 sigminusset(&p->p_sigctx.ps_sigmask, ss);
446 }
447
448 /* ARGSUSED */
449 int
450 sys___sigpending14(p, v, retval)
451 struct proc *p;
452 void *v;
453 register_t *retval;
454 {
455 struct sys___sigpending14_args /* {
456 syscallarg(sigset_t *) set;
457 } */ *uap = v;
458 sigset_t ss;
459
460 sigpending1(p, &ss);
461 return (copyout(&ss, SCARG(uap, set), sizeof(ss)));
462 }
463
464 int
465 sigsuspend1(p, ss)
466 struct proc *p;
467 const sigset_t *ss;
468 {
469 struct sigacts *ps = p->p_sigacts;
470
471 if (ss) {
472 /*
473 * When returning from sigpause, we want
474 * the old mask to be restored after the
475 * signal handler has finished. Thus, we
476 * save it here and mark the sigctx structure
477 * to indicate this.
478 */
479 p->p_sigctx.ps_oldmask = p->p_sigctx.ps_sigmask;
480 p->p_sigctx.ps_flags |= SAS_OLDMASK;
481 (void) splsched(); /* XXXSMP */
482 p->p_sigctx.ps_sigmask = *ss;
483 CHECKSIGS(p);
484 sigminusset(&sigcantmask, &p->p_sigctx.ps_sigmask);
485 (void) spl0(); /* XXXSMP */
486 }
487
488 while (tsleep((caddr_t) ps, PPAUSE|PCATCH, "pause", 0) == 0)
489 /* void */;
490 /* always return EINTR rather than ERESTART... */
491 return (EINTR);
492 }
493
494 /*
495 * Suspend process until signal, providing mask to be set
496 * in the meantime. Note nonstandard calling convention:
497 * libc stub passes mask, not pointer, to save a copyin.
498 */
499 /* ARGSUSED */
500 int
501 sys___sigsuspend14(p, v, retval)
502 struct proc *p;
503 void *v;
504 register_t *retval;
505 {
506 struct sys___sigsuspend14_args /* {
507 syscallarg(const sigset_t *) set;
508 } */ *uap = v;
509 sigset_t ss;
510 int error;
511
512 if (SCARG(uap, set)) {
513 error = copyin(SCARG(uap, set), &ss, sizeof(ss));
514 if (error)
515 return (error);
516 }
517
518 return (sigsuspend1(p, SCARG(uap, set) ? &ss : 0));
519 }
520
521 int
522 sigaltstack1(p, nss, oss)
523 struct proc *p;
524 const struct sigaltstack *nss;
525 struct sigaltstack *oss;
526 {
527 if (oss)
528 *oss = p->p_sigctx.ps_sigstk;
529
530 if (nss) {
531 if (nss->ss_flags & ~SS_ALLBITS)
532 return (EINVAL);
533
534 if (nss->ss_flags & SS_DISABLE) {
535 if (p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK)
536 return (EINVAL);
537 } else {
538 if (nss->ss_size < MINSIGSTKSZ)
539 return (ENOMEM);
540 }
541 p->p_sigctx.ps_sigstk = *nss;
542 }
543
544 return (0);
545 }
546
547 /* ARGSUSED */
548 int
549 sys___sigaltstack14(p, v, retval)
550 struct proc *p;
551 void *v;
552 register_t *retval;
553 {
554 struct sys___sigaltstack14_args /* {
555 syscallarg(const struct sigaltstack *) nss;
556 syscallarg(struct sigaltstack *) oss;
557 } */ *uap = v;
558 struct sigaltstack nss, oss;
559 int error;
560
561 if (SCARG(uap, nss)) {
562 error = copyin(SCARG(uap, nss), &nss, sizeof(nss));
563 if (error)
564 return (error);
565 }
566 error = sigaltstack1(p,
567 SCARG(uap, nss) ? &nss : 0, SCARG(uap, oss) ? &oss : 0);
568 if (error)
569 return (error);
570 if (SCARG(uap, oss)) {
571 error = copyout(&oss, SCARG(uap, oss), sizeof(oss));
572 if (error)
573 return (error);
574 }
575 return (0);
576 }
577
578 /* ARGSUSED */
579 int
580 sys_kill(cp, v, retval)
581 struct proc *cp;
582 void *v;
583 register_t *retval;
584 {
585 struct sys_kill_args /* {
586 syscallarg(int) pid;
587 syscallarg(int) signum;
588 } */ *uap = v;
589 struct proc *p;
590 struct pcred *pc = cp->p_cred;
591
592 if ((u_int)SCARG(uap, signum) >= NSIG)
593 return (EINVAL);
594 if (SCARG(uap, pid) > 0) {
595 /* kill single process */
596 if ((p = pfind(SCARG(uap, pid))) == NULL)
597 return (ESRCH);
598 if (!CANSIGNAL(cp, pc, p, SCARG(uap, signum)))
599 return (EPERM);
600 if (SCARG(uap, signum))
601 psignal(p, SCARG(uap, signum));
602 return (0);
603 }
604 switch (SCARG(uap, pid)) {
605 case -1: /* broadcast signal */
606 return (killpg1(cp, SCARG(uap, signum), 0, 1));
607 case 0: /* signal own process group */
608 return (killpg1(cp, SCARG(uap, signum), 0, 0));
609 default: /* negative explicit process group */
610 return (killpg1(cp, SCARG(uap, signum), -SCARG(uap, pid), 0));
611 }
612 /* NOTREACHED */
613 }
614
615 /*
616 * Common code for kill process group/broadcast kill.
617 * cp is calling process.
618 */
619 int
620 killpg1(cp, signum, pgid, all)
621 struct proc *cp;
622 int signum, pgid, all;
623 {
624 struct proc *p;
625 struct pcred *pc = cp->p_cred;
626 struct pgrp *pgrp;
627 int nfound = 0;
628
629 if (all) {
630 /*
631 * broadcast
632 */
633 proclist_lock_read();
634 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
635 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
636 p == cp || !CANSIGNAL(cp, pc, p, signum))
637 continue;
638 nfound++;
639 if (signum)
640 psignal(p, signum);
641 }
642 proclist_unlock_read();
643 } else {
644 if (pgid == 0)
645 /*
646 * zero pgid means send to my process group.
647 */
648 pgrp = cp->p_pgrp;
649 else {
650 pgrp = pgfind(pgid);
651 if (pgrp == NULL)
652 return (ESRCH);
653 }
654 for (p = pgrp->pg_members.lh_first; p != 0; p = p->p_pglist.le_next) {
655 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
656 !CANSIGNAL(cp, pc, p, signum))
657 continue;
658 nfound++;
659 if (signum && P_ZOMBIE(p) == 0)
660 psignal(p, signum);
661 }
662 }
663 return (nfound ? 0 : ESRCH);
664 }
665
666 /*
667 * Send a signal to a process group.
668 */
669 void
670 gsignal(pgid, signum)
671 int pgid, signum;
672 {
673 struct pgrp *pgrp;
674
675 if (pgid && (pgrp = pgfind(pgid)))
676 pgsignal(pgrp, signum, 0);
677 }
678
679 /*
680 * Send a signal to a process group. If checktty is 1,
681 * limit to members which have a controlling terminal.
682 */
683 void
684 pgsignal(pgrp, signum, checkctty)
685 struct pgrp *pgrp;
686 int signum, checkctty;
687 {
688 struct proc *p;
689
690 if (pgrp)
691 for (p = pgrp->pg_members.lh_first; p != 0; p = p->p_pglist.le_next)
692 if (checkctty == 0 || p->p_flag & P_CONTROLT)
693 psignal(p, signum);
694 }
695
696 /*
697 * Send a signal caused by a trap to the current process.
698 * If it will be caught immediately, deliver it with correct code.
699 * Otherwise, post it normally.
700 */
701 void
702 trapsignal(p, signum, code)
703 struct proc *p;
704 int signum;
705 u_long code;
706 {
707 struct sigacts *ps = p->p_sigacts;
708
709 if ((p->p_flag & P_TRACED) == 0 &&
710 sigismember(&p->p_sigctx.ps_sigcatch, signum) &&
711 !sigismember(&p->p_sigctx.ps_sigmask, signum)) {
712 p->p_stats->p_ru.ru_nsignals++;
713 #ifdef KTRACE
714 if (KTRPOINT(p, KTR_PSIG))
715 ktrpsig(p, signum,
716 SIGACTION_PS(ps, signum).sa_handler,
717 &p->p_sigctx.ps_sigmask, code);
718 #endif
719 (*p->p_emul->e_sendsig)(SIGACTION_PS(ps, signum).sa_handler,
720 signum, &p->p_sigctx.ps_sigmask, code);
721 (void) splsched(); /* XXXSMP */
722 sigplusset(&SIGACTION_PS(ps, signum).sa_mask, &p->p_sigctx.ps_sigmask);
723 if (SIGACTION_PS(ps, signum).sa_flags & SA_RESETHAND) {
724 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
725 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
726 sigaddset(&p->p_sigctx.ps_sigignore, signum);
727 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
728 }
729 (void) spl0(); /* XXXSMP */
730 } else {
731 p->p_sigctx.ps_code = code; /* XXX for core dump/debugger */
732 p->p_sigctx.ps_sig = signum; /* XXX to verify code */
733 psignal(p, signum);
734 }
735 }
736
737 /*
738 * Send the signal to the process. If the signal has an action, the action
739 * is usually performed by the target process rather than the caller; we add
740 * the signal to the set of pending signals for the process.
741 *
742 * Exceptions:
743 * o When a stop signal is sent to a sleeping process that takes the
744 * default action, the process is stopped without awakening it.
745 * o SIGCONT restarts stopped processes (or puts them back to sleep)
746 * regardless of the signal action (eg, blocked or ignored).
747 *
748 * Other ignored signals are discarded immediately.
749 *
750 * XXXSMP: Invoked as psignal() or sched_psignal().
751 */
752 void
753 psignal1(p, signum, dolock)
754 struct proc *p;
755 int signum;
756 int dolock; /* XXXSMP: works, but icky */
757 {
758 int s, prop;
759 sig_t action;
760
761 #ifdef DIAGNOSTIC
762 if (signum <= 0 || signum >= NSIG)
763 panic("psignal signal number");
764
765 /* XXXSMP: works, but icky */
766 if (dolock)
767 SCHED_ASSERT_UNLOCKED();
768 else
769 SCHED_ASSERT_LOCKED();
770 #endif
771 prop = sigprop[signum];
772
773 /*
774 * If proc is traced, always give parent a chance.
775 */
776 if (p->p_flag & P_TRACED)
777 action = SIG_DFL;
778 else {
779 /*
780 * If the signal is being ignored,
781 * then we forget about it immediately.
782 * (Note: we don't set SIGCONT in p_sigctx.ps_sigignore,
783 * and if it is set to SIG_IGN,
784 * action will be SIG_DFL here.)
785 */
786 if (sigismember(&p->p_sigctx.ps_sigignore, signum))
787 return;
788 if (sigismember(&p->p_sigctx.ps_sigmask, signum))
789 action = SIG_HOLD;
790 else if (sigismember(&p->p_sigctx.ps_sigcatch, signum))
791 action = SIG_CATCH;
792 else {
793 action = SIG_DFL;
794
795 if (prop & SA_KILL && p->p_nice > NZERO)
796 p->p_nice = NZERO;
797
798 /*
799 * If sending a tty stop signal to a member of an
800 * orphaned process group, discard the signal here if
801 * the action is default; don't stop the process below
802 * if sleeping, and don't clear any pending SIGCONT.
803 */
804 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0)
805 return;
806 }
807 }
808
809 if (prop & SA_CONT)
810 sigminusset(&stopsigmask, &p->p_sigctx.ps_siglist);
811
812 if (prop & SA_STOP)
813 sigminusset(&contsigmask, &p->p_sigctx.ps_siglist);
814
815 sigaddset(&p->p_sigctx.ps_siglist, signum);
816
817 /* CHECKSIGS() is "inlined" here. */
818 p->p_sigctx.ps_sigcheck = 1;
819
820 /*
821 * Defer further processing for signals which are held,
822 * except that stopped processes must be continued by SIGCONT.
823 */
824 if (action == SIG_HOLD && ((prop & SA_CONT) == 0 || p->p_stat != SSTOP))
825 return;
826
827 /* XXXSMP: works, but icky */
828 if (dolock)
829 SCHED_LOCK(s);
830
831 switch (p->p_stat) {
832 case SSLEEP:
833 /*
834 * If process is sleeping uninterruptibly
835 * we can't interrupt the sleep... the signal will
836 * be noticed when the process returns through
837 * trap() or syscall().
838 */
839 if ((p->p_flag & P_SINTR) == 0)
840 goto out;
841 /*
842 * Process is sleeping and traced... make it runnable
843 * so it can discover the signal in issignal() and stop
844 * for the parent.
845 */
846 if (p->p_flag & P_TRACED)
847 goto run;
848 /*
849 * If SIGCONT is default (or ignored) and process is
850 * asleep, we are finished; the process should not
851 * be awakened.
852 */
853 if ((prop & SA_CONT) && action == SIG_DFL) {
854 sigdelset(&p->p_sigctx.ps_siglist, signum);
855 goto out;
856 }
857 /*
858 * When a sleeping process receives a stop
859 * signal, process immediately if possible.
860 */
861 if ((prop & SA_STOP) && action == SIG_DFL) {
862 /*
863 * If a child holding parent blocked,
864 * stopping could cause deadlock.
865 */
866 if (p->p_flag & P_PPWAIT)
867 goto out;
868 sigdelset(&p->p_sigctx.ps_siglist, signum);
869 p->p_xstat = signum;
870 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) {
871 /*
872 * XXXSMP: recursive call; don't lock
873 * the second time around.
874 */
875 sched_psignal(p->p_pptr, SIGCHLD);
876 }
877 proc_stop(p); /* XXXSMP: recurse? */
878 goto out;
879 }
880 /*
881 * All other (caught or default) signals
882 * cause the process to run.
883 */
884 goto runfast;
885 /*NOTREACHED*/
886
887 case SSTOP:
888 /*
889 * If traced process is already stopped,
890 * then no further action is necessary.
891 */
892 if (p->p_flag & P_TRACED)
893 goto out;
894
895 /*
896 * Kill signal always sets processes running.
897 */
898 if (signum == SIGKILL)
899 goto runfast;
900
901 if (prop & SA_CONT) {
902 /*
903 * If SIGCONT is default (or ignored), we continue the
904 * process but don't leave the signal in p_sigctx.ps_siglist, as
905 * it has no further action. If SIGCONT is held, we
906 * continue the process and leave the signal in
907 * p_sigctx.ps_siglist. If the process catches SIGCONT, let it
908 * handle the signal itself. If it isn't waiting on
909 * an event, then it goes back to run state.
910 * Otherwise, process goes back to sleep state.
911 */
912 if (action == SIG_DFL)
913 sigdelset(&p->p_sigctx.ps_siglist, signum);
914 if (action == SIG_CATCH)
915 goto runfast;
916 if (p->p_wchan == 0)
917 goto run;
918 p->p_stat = SSLEEP;
919 goto out;
920 }
921
922 if (prop & SA_STOP) {
923 /*
924 * Already stopped, don't need to stop again.
925 * (If we did the shell could get confused.)
926 */
927 sigdelset(&p->p_sigctx.ps_siglist, signum);
928 goto out;
929 }
930
931 /*
932 * If process is sleeping interruptibly, then simulate a
933 * wakeup so that when it is continued, it will be made
934 * runnable and can look at the signal. But don't make
935 * the process runnable, leave it stopped.
936 */
937 if (p->p_wchan && p->p_flag & P_SINTR)
938 unsleep(p);
939 goto out;
940 #ifdef __HAVE_AST_PERPROC
941 case SONPROC:
942 case SRUN:
943 case SIDL:
944 /*
945 * SONPROC: We're running, notice the signal when
946 * we return back to userspace.
947 *
948 * SRUN, SIDL: Notice the signal when we run again
949 * and return to back to userspace.
950 */
951 signotify(p);
952 goto out;
953
954 default:
955 /*
956 * SDEAD, SZOMB: The signal will never be noticed.
957 */
958 goto out;
959 #else /* ! __HAVE_AST_PERPROC */
960 case SONPROC:
961 /*
962 * We're running; notice the signal.
963 */
964 signotify(p);
965 goto out;
966
967 default:
968 /*
969 * SRUN, SIDL, SDEAD, SZOMB do nothing with the signal.
970 * It will either never be noticed, or noticed very soon.
971 */
972 goto out;
973 #endif /* __HAVE_AST_PERPROC */
974 }
975 /*NOTREACHED*/
976
977 runfast:
978 /*
979 * Raise priority to at least PUSER.
980 */
981 if (p->p_priority > PUSER)
982 p->p_priority = PUSER;
983 run:
984 setrunnable(p); /* XXXSMP: recurse? */
985 out:
986 /* XXXSMP: works, but icky */
987 if (dolock)
988 SCHED_UNLOCK(s);
989 }
990
991 static __inline int firstsig __P((const sigset_t *));
992
993 static __inline int
994 firstsig(ss)
995 const sigset_t *ss;
996 {
997 int sig;
998
999 sig = ffs(ss->__bits[0]);
1000 if (sig != 0)
1001 return (sig);
1002 #if NSIG > 33
1003 sig = ffs(ss->__bits[1]);
1004 if (sig != 0)
1005 return (sig + 32);
1006 #endif
1007 #if NSIG > 65
1008 sig = ffs(ss->__bits[2]);
1009 if (sig != 0)
1010 return (sig + 64);
1011 #endif
1012 #if NSIG > 97
1013 sig = ffs(ss->__bits[3]);
1014 if (sig != 0)
1015 return (sig + 96);
1016 #endif
1017 return (0);
1018 }
1019
1020 /*
1021 * If the current process has received a signal (should be caught or cause
1022 * termination, should interrupt current syscall), return the signal number.
1023 * Stop signals with default action are processed immediately, then cleared;
1024 * they aren't returned. This is checked after each entry to the system for
1025 * a syscall or trap (though this can usually be done without calling issignal
1026 * by checking the pending signal masks in the CURSIG macro.) The normal call
1027 * sequence is
1028 *
1029 * while (signum = CURSIG(curproc))
1030 * postsig(signum);
1031 */
1032 int
1033 issignal(p)
1034 struct proc *p;
1035 {
1036 int s, signum, prop;
1037 sigset_t ss;
1038
1039 for (;;) {
1040 sigpending1(p, &ss);
1041 if (p->p_flag & P_PPWAIT)
1042 sigminusset(&stopsigmask, &ss);
1043 signum = firstsig(&ss);
1044 if (signum == 0) { /* no signal to send */
1045 p->p_sigctx.ps_sigcheck = 0;
1046 return (0);
1047 }
1048 sigdelset(&p->p_sigctx.ps_siglist, signum); /* take the signal! */
1049
1050 /*
1051 * We should see pending but ignored signals
1052 * only if P_TRACED was on when they were posted.
1053 */
1054 if (sigismember(&p->p_sigctx.ps_sigignore, signum) &&
1055 (p->p_flag & P_TRACED) == 0)
1056 continue;
1057
1058 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
1059 /*
1060 * If traced, always stop, and stay
1061 * stopped until released by the debugger.
1062 */
1063 p->p_xstat = signum;
1064 if ((p->p_flag & P_FSTRACE) == 0)
1065 psignal(p->p_pptr, SIGCHLD);
1066 do {
1067 SCHED_LOCK(s);
1068 proc_stop(p);
1069 mi_switch(p);
1070 SCHED_ASSERT_UNLOCKED();
1071 splx(s);
1072 } while (!trace_req(p) && p->p_flag & P_TRACED);
1073
1074 /*
1075 * If we are no longer being traced, or the parent
1076 * didn't give us a signal, look for more signals.
1077 */
1078 if ((p->p_flag & P_TRACED) == 0 || p->p_xstat == 0)
1079 continue;
1080
1081 /*
1082 * If the new signal is being masked, look for other
1083 * signals.
1084 */
1085 signum = p->p_xstat;
1086 /* `p->p_sigctx.ps_siglist |= mask' is done in setrunnable(). */
1087 if (sigismember(&p->p_sigctx.ps_sigmask, signum))
1088 continue;
1089 sigdelset(&p->p_sigctx.ps_siglist, signum); /* take the signal! */
1090 }
1091
1092 prop = sigprop[signum];
1093
1094 /*
1095 * Decide whether the signal should be returned.
1096 * Return the signal's number, or fall through
1097 * to clear it from the pending mask.
1098 */
1099 switch ((long)SIGACTION(p, signum).sa_handler) {
1100
1101 case (long)SIG_DFL:
1102 /*
1103 * Don't take default actions on system processes.
1104 */
1105 if (p->p_pid <= 1) {
1106 #ifdef DIAGNOSTIC
1107 /*
1108 * Are you sure you want to ignore SIGSEGV
1109 * in init? XXX
1110 */
1111 printf("Process (pid %d) got signal %d\n",
1112 p->p_pid, signum);
1113 #endif
1114 break; /* == ignore */
1115 }
1116 /*
1117 * If there is a pending stop signal to process
1118 * with default action, stop here,
1119 * then clear the signal. However,
1120 * if process is member of an orphaned
1121 * process group, ignore tty stop signals.
1122 */
1123 if (prop & SA_STOP) {
1124 if (p->p_flag & P_TRACED ||
1125 (p->p_pgrp->pg_jobc == 0 &&
1126 prop & SA_TTYSTOP))
1127 break; /* == ignore */
1128 p->p_xstat = signum;
1129 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0)
1130 psignal(p->p_pptr, SIGCHLD);
1131 SCHED_LOCK(s);
1132 proc_stop(p);
1133 mi_switch(p);
1134 SCHED_ASSERT_UNLOCKED();
1135 splx(s);
1136 break;
1137 } else if (prop & SA_IGNORE) {
1138 /*
1139 * Except for SIGCONT, shouldn't get here.
1140 * Default action is to ignore; drop it.
1141 */
1142 break; /* == ignore */
1143 } else
1144 goto keep;
1145 /*NOTREACHED*/
1146
1147 case (long)SIG_IGN:
1148 /*
1149 * Masking above should prevent us ever trying
1150 * to take action on an ignored signal other
1151 * than SIGCONT, unless process is traced.
1152 */
1153 if ((prop & SA_CONT) == 0 &&
1154 (p->p_flag & P_TRACED) == 0)
1155 printf("issignal\n");
1156 break; /* == ignore */
1157
1158 default:
1159 /*
1160 * This signal has an action, let
1161 * postsig() process it.
1162 */
1163 goto keep;
1164 }
1165 }
1166 /* NOTREACHED */
1167
1168 keep:
1169 sigaddset(&p->p_sigctx.ps_siglist, signum); /* leave the signal for later */
1170 CHECKSIGS(p);
1171 return (signum);
1172 }
1173
1174 /*
1175 * Put the argument process into the stopped state and notify the parent
1176 * via wakeup. Signals are handled elsewhere. The process must not be
1177 * on the run queue.
1178 */
1179 static void
1180 proc_stop(p)
1181 struct proc *p;
1182 {
1183
1184 SCHED_ASSERT_LOCKED();
1185
1186 p->p_stat = SSTOP;
1187 p->p_flag &= ~P_WAITED;
1188 sched_wakeup((caddr_t)p->p_pptr);
1189 }
1190
1191 /*
1192 * Take the action for the specified signal
1193 * from the current set of pending signals.
1194 */
1195 void
1196 postsig(signum)
1197 int signum;
1198 {
1199 struct proc *p = curproc;
1200 struct sigacts *ps = p->p_sigacts;
1201 sig_t action;
1202 u_long code;
1203 sigset_t *returnmask;
1204
1205 #ifdef DIAGNOSTIC
1206 if (signum == 0)
1207 panic("postsig");
1208 #endif
1209
1210 KERNEL_PROC_LOCK(p);
1211
1212 sigdelset(&p->p_sigctx.ps_siglist, signum);
1213 action = SIGACTION_PS(ps, signum).sa_handler;
1214 #ifdef KTRACE
1215 if (KTRPOINT(p, KTR_PSIG))
1216 ktrpsig(p,
1217 signum, action, p->p_sigctx.ps_flags & SAS_OLDMASK ?
1218 &p->p_sigctx.ps_oldmask : &p->p_sigctx.ps_sigmask, 0);
1219 #endif
1220 if (action == SIG_DFL) {
1221 /*
1222 * Default action, where the default is to kill
1223 * the process. (Other cases were ignored above.)
1224 */
1225 sigexit(p, signum);
1226 /* NOTREACHED */
1227 } else {
1228 /*
1229 * If we get here, the signal must be caught.
1230 */
1231 #ifdef DIAGNOSTIC
1232 if (action == SIG_IGN || sigismember(&p->p_sigctx.ps_sigmask, signum))
1233 panic("postsig action");
1234 #endif
1235 /*
1236 * Set the new mask value and also defer further
1237 * occurences of this signal.
1238 *
1239 * Special case: user has done a sigpause. Here the
1240 * current mask is not of interest, but rather the
1241 * mask from before the sigpause is what we want
1242 * restored after the signal processing is completed.
1243 */
1244 if (p->p_sigctx.ps_flags & SAS_OLDMASK) {
1245 returnmask = &p->p_sigctx.ps_oldmask;
1246 p->p_sigctx.ps_flags &= ~SAS_OLDMASK;
1247 } else
1248 returnmask = &p->p_sigctx.ps_sigmask;
1249 p->p_stats->p_ru.ru_nsignals++;
1250 if (p->p_sigctx.ps_sig != signum) {
1251 code = 0;
1252 } else {
1253 code = p->p_sigctx.ps_code;
1254 p->p_sigctx.ps_code = 0;
1255 p->p_sigctx.ps_sig = 0;
1256 }
1257 (*p->p_emul->e_sendsig)(action, signum, returnmask, code);
1258 (void) splsched(); /* XXXSMP */
1259 sigplusset(&SIGACTION_PS(ps, signum).sa_mask, &p->p_sigctx.ps_sigmask);
1260 if (SIGACTION_PS(ps, signum).sa_flags & SA_RESETHAND) {
1261 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
1262 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
1263 sigaddset(&p->p_sigctx.ps_sigignore, signum);
1264 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
1265 }
1266 (void) spl0(); /* XXXSMP */
1267 }
1268
1269 KERNEL_PROC_UNLOCK(p);
1270 }
1271
1272 /*
1273 * Kill the current process for stated reason.
1274 */
1275 void
1276 killproc(p, why)
1277 struct proc *p;
1278 char *why;
1279 {
1280
1281 log(LOG_ERR, "pid %d was killed: %s\n", p->p_pid, why);
1282 uprintf("sorry, pid %d was killed: %s\n", p->p_pid, why);
1283 psignal(p, SIGKILL);
1284 }
1285
1286 /*
1287 * Force the current process to exit with the specified signal, dumping core
1288 * if appropriate. We bypass the normal tests for masked and caught signals,
1289 * allowing unrecoverable failures to terminate the process without changing
1290 * signal state. Mark the accounting record with the signal termination.
1291 * If dumping core, save the signal number for the debugger. Calls exit and
1292 * does not return.
1293 */
1294
1295 #if defined(DEBUG)
1296 int kern_logsigexit = 1; /* not static to make public for sysctl */
1297 #else
1298 int kern_logsigexit = 0; /* not static to make public for sysctl */
1299 #endif
1300
1301 static const char logcoredump[] =
1302 "pid %d (%s), uid %d: exited on signal %d (core dumped)\n";
1303 static const char lognocoredump[] =
1304 "pid %d (%s), uid %d: exited on signal %d (core not dumped, err = %d)\n";
1305
1306 void
1307 sigexit(p, signum)
1308 struct proc *p;
1309 int signum;
1310 {
1311 int error;
1312 int exitsig = signum;
1313
1314 p->p_acflag |= AXSIG;
1315 if (sigprop[signum] & SA_CORE) {
1316 p->p_sigctx.ps_sig = signum;
1317 if ((error = coredump(p)) == 0)
1318 exitsig |= WCOREFLAG;
1319
1320 if (kern_logsigexit) {
1321 int uid = p->p_cred && p->p_ucred ?
1322 p->p_ucred->cr_uid : -1;
1323
1324 if (error)
1325 log(LOG_INFO, lognocoredump, p->p_pid,
1326 p->p_comm, uid, signum, error);
1327 else
1328 log(LOG_INFO, logcoredump, p->p_pid,
1329 p->p_comm, uid, signum);
1330 }
1331
1332 }
1333
1334 exit1(p, W_EXITCODE(0, exitsig));
1335 /* NOTREACHED */
1336 }
1337
1338 /*
1339 * Dump core, into a file named "progname.core" or "core" (depending on the
1340 * value of shortcorename), unless the process was setuid/setgid.
1341 */
1342 int
1343 coredump(p)
1344 struct proc *p;
1345 {
1346 struct vnode *vp;
1347 struct vmspace *vm = p->p_vmspace;
1348 struct ucred *cred = p->p_cred->pc_ucred;
1349 struct nameidata nd;
1350 struct vattr vattr;
1351 int error, error1;
1352 char name[MAXPATHLEN];
1353 struct core core;
1354
1355 /*
1356 * Make sure the process has not set-id, to prevent data leaks.
1357 */
1358 if (p->p_flag & P_SUGID)
1359 return (EPERM);
1360
1361 /*
1362 * Refuse to core if the data + stack + user size is larger than
1363 * the core dump limit. XXX THIS IS WRONG, because of mapped
1364 * data.
1365 */
1366 if (USPACE + ctob(vm->vm_dsize + vm->vm_ssize) >=
1367 p->p_rlimit[RLIMIT_CORE].rlim_cur)
1368 return (EFBIG); /* better error code? */
1369
1370 /*
1371 * The core dump will go in the current working directory. Make
1372 * sure that the directory is still there and that the mount flags
1373 * allow us to write core dumps there.
1374 */
1375 vp = p->p_cwdi->cwdi_cdir;
1376 if (vp->v_mount == NULL ||
1377 (vp->v_mount->mnt_flag & MNT_NOCOREDUMP) != 0)
1378 return (EPERM);
1379
1380 error = build_corename(p, name);
1381 if (error)
1382 return error;
1383
1384 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, p);
1385 error = vn_open(&nd, O_CREAT | FWRITE | FNOSYMLINK, S_IRUSR | S_IWUSR);
1386 if (error)
1387 return (error);
1388 vp = nd.ni_vp;
1389
1390 /* Don't dump to non-regular files or files with links. */
1391 if (vp->v_type != VREG ||
1392 VOP_GETATTR(vp, &vattr, cred, p) || vattr.va_nlink != 1) {
1393 error = EINVAL;
1394 goto out;
1395 }
1396 VATTR_NULL(&vattr);
1397 vattr.va_size = 0;
1398 VOP_LEASE(vp, p, cred, LEASE_WRITE);
1399 VOP_SETATTR(vp, &vattr, cred, p);
1400 p->p_acflag |= ACORE;
1401
1402 #if COMPAT_NETBSD32
1403 if (p->p_flag & P_32)
1404 return (coredump32(p, vp));
1405 #endif
1406 #if 0
1407 /*
1408 * XXX
1409 * It would be nice if we at least dumped the signal state (and made it
1410 * available at run time to the debugger, as well), but this code
1411 * hasn't actually had any effect for a long time, since we don't dump
1412 * the user area. For now, it's dead.
1413 */
1414 memcpy(&p->p_addr->u_kproc.kp_proc, p, sizeof(struct proc));
1415 fill_eproc(p, &p->p_addr->u_kproc.kp_eproc);
1416 #endif
1417
1418 core.c_midmag = 0;
1419 strncpy(core.c_name, p->p_comm, MAXCOMLEN);
1420 core.c_nseg = 0;
1421 core.c_signo = p->p_sigctx.ps_sig;
1422 core.c_ucode = p->p_sigctx.ps_code;
1423 core.c_cpusize = 0;
1424 core.c_tsize = (u_long)ctob(vm->vm_tsize);
1425 core.c_dsize = (u_long)ctob(vm->vm_dsize);
1426 core.c_ssize = (u_long)round_page(ctob(vm->vm_ssize));
1427 error = cpu_coredump(p, vp, cred, &core);
1428 if (error)
1429 goto out;
1430 if (core.c_midmag == 0) {
1431 /* XXX
1432 * cpu_coredump() didn't bother to set the magic; assume
1433 * this is a request to do a traditional dump. cpu_coredump()
1434 * is still responsible for setting sensible values in
1435 * the core header.
1436 */
1437 if (core.c_cpusize == 0)
1438 core.c_cpusize = USPACE; /* Just in case */
1439 error = vn_rdwr(UIO_WRITE, vp, vm->vm_daddr,
1440 (int)core.c_dsize,
1441 (off_t)core.c_cpusize, UIO_USERSPACE,
1442 IO_NODELOCKED|IO_UNIT, cred, NULL, p);
1443 if (error)
1444 goto out;
1445 error = vn_rdwr(UIO_WRITE, vp,
1446 (caddr_t)(u_long)trunc_page(USRSTACK - ctob(vm->vm_ssize)),
1447 core.c_ssize,
1448 (off_t)(core.c_cpusize + core.c_dsize), UIO_USERSPACE,
1449 IO_NODELOCKED|IO_UNIT, cred, NULL, p);
1450 } else {
1451 /*
1452 * uvm_coredump() spits out all appropriate segments.
1453 * All that's left to do is to write the core header.
1454 */
1455 error = uvm_coredump(p, vp, cred, &core);
1456 if (error)
1457 goto out;
1458 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&core,
1459 (int)core.c_hdrsize, (off_t)0,
1460 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, NULL, p);
1461 }
1462 out:
1463 VOP_UNLOCK(vp, 0);
1464 error1 = vn_close(vp, FWRITE, cred, p);
1465 if (error == 0)
1466 error = error1;
1467 return (error);
1468 }
1469
1470 #if COMPAT_NETBSD32
1471 /*
1472 * Same as coredump, but generates a 32-bit image.
1473 */
1474 int
1475 coredump32(p, vp)
1476 struct proc *p;
1477 struct vnode *vp;
1478 {
1479 struct vmspace *vm = p->p_vmspace;
1480 struct ucred *cred = p->p_cred->pc_ucred;
1481 int error, error1;
1482 struct core32 core;
1483
1484 #if 0
1485 /*
1486 * XXX
1487 * It would be nice if we at least dumped the signal state (and made it
1488 * available at run time to the debugger, as well), but this code
1489 * hasn't actually had any effect for a long time, since we don't dump
1490 * the user area. For now, it's dead.
1491 */
1492 memcpy(&p->p_addr->u_kproc.kp_proc, p, sizeof(struct proc));
1493 fill_eproc(p, &p->p_addr->u_kproc.kp_eproc);
1494 #endif
1495
1496 core.c_midmag = 0;
1497 strncpy(core.c_name, p->p_comm, MAXCOMLEN);
1498 core.c_nseg = 0;
1499 core.c_signo = p->p_sigctx.ps_sig;
1500 core.c_ucode = p->p_sigctx.ps_code;
1501 core.c_cpusize = 0;
1502 core.c_tsize = (u_long)ctob(vm->vm_tsize);
1503 core.c_dsize = (u_long)ctob(vm->vm_dsize);
1504 core.c_ssize = (u_long)round_page(ctob(vm->vm_ssize));
1505 error = cpu_coredump32(p, vp, cred, &core);
1506 if (error)
1507 goto out;
1508 if (core.c_midmag == 0) {
1509 /* XXX
1510 * cpu_coredump() didn't bother to set the magic; assume
1511 * this is a request to do a traditional dump. cpu_coredump()
1512 * is still responsible for setting sensible values in
1513 * the core header.
1514 */
1515 if (core.c_cpusize == 0)
1516 core.c_cpusize = USPACE; /* Just in case */
1517 error = vn_rdwr(UIO_WRITE, vp, vm->vm_daddr,
1518 (int)core.c_dsize,
1519 (off_t)core.c_cpusize, UIO_USERSPACE,
1520 IO_NODELOCKED|IO_UNIT, cred, NULL, p);
1521 if (error)
1522 goto out;
1523 error = vn_rdwr(UIO_WRITE, vp,
1524 (caddr_t)(u_long)trunc_page(USRSTACK - ctob(vm->vm_ssize)),
1525 core.c_ssize,
1526 (off_t)(core.c_cpusize + core.c_dsize), UIO_USERSPACE,
1527 IO_NODELOCKED|IO_UNIT, cred, NULL, p);
1528 } else {
1529 /*
1530 * uvm_coredump() spits out all appropriate segments.
1531 * All that's left to do is to write the core header.
1532 */
1533 error = uvm_coredump32(p, vp, cred, &core);
1534 if (error)
1535 goto out;
1536 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&core,
1537 (int)core.c_hdrsize, (off_t)0,
1538 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, NULL, p);
1539 }
1540 out:
1541 VOP_UNLOCK(vp, 0);
1542 error1 = vn_close(vp, FWRITE, cred, p);
1543 if (error == 0)
1544 error = error1;
1545 return (error);
1546 }
1547 #endif
1548
1549 /*
1550 * Nonexistent system call-- signal process (may want to handle it).
1551 * Flag error in case process won't see signal immediately (blocked or ignored).
1552 */
1553 /* ARGSUSED */
1554 int
1555 sys_nosys(p, v, retval)
1556 struct proc *p;
1557 void *v;
1558 register_t *retval;
1559 {
1560
1561 psignal(p, SIGSYS);
1562 return (ENOSYS);
1563 }
1564
1565 static int
1566 build_corename(p, dst)
1567 struct proc *p;
1568 char dst[MAXPATHLEN];
1569 {
1570 const char *s;
1571 char *d, *end;
1572 int i;
1573
1574 for (s = p->p_limit->pl_corename, d = dst, end = d + MAXPATHLEN;
1575 *s != '\0'; s++) {
1576 if (*s == '%') {
1577 switch (*(s + 1)) {
1578 case 'n':
1579 i = snprintf(d, end - d, "%s", p->p_comm);
1580 break;
1581 case 'p':
1582 i = snprintf(d, end - d, "%d", p->p_pid);
1583 break;
1584 case 'u':
1585 i = snprintf(d, end - d, "%s",
1586 p->p_pgrp->pg_session->s_login);
1587 break;
1588 case 't':
1589 i = snprintf(d, end - d, "%ld",
1590 p->p_stats->p_start.tv_sec);
1591 break;
1592 default:
1593 goto copy;
1594 }
1595 d += i;
1596 s++;
1597 } else {
1598 copy: *d = *s;
1599 d++;
1600 }
1601 if (d >= end)
1602 return (ENAMETOOLONG);
1603 }
1604 *d = '\0';
1605 return (0);
1606 }
1607
1608 /*
1609 * Returns true if signal is ignored or masked for passed process.
1610 */
1611 int
1612 sigismasked(p, sig)
1613 struct proc *p;
1614 int sig;
1615 {
1616 return sigismember(&p->p_sigctx.ps_sigignore, SIGTTOU)
1617 || sigismember(&p->p_sigctx.ps_sigmask, SIGTTOU);
1618 }
1619