kern_sig.c revision 1.224 1 /* $NetBSD: kern_sig.c,v 1.224 2006/07/23 22:06:11 ad Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)kern_sig.c 8.14 (Berkeley) 5/14/95
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.224 2006/07/23 22:06:11 ad Exp $");
41
42 #include "opt_ktrace.h"
43 #include "opt_multiprocessor.h"
44 #include "opt_compat_sunos.h"
45 #include "opt_compat_netbsd.h"
46 #include "opt_compat_netbsd32.h"
47
48 #define SIGPROP /* include signal properties table */
49 #include <sys/param.h>
50 #include <sys/signalvar.h>
51 #include <sys/resourcevar.h>
52 #include <sys/namei.h>
53 #include <sys/vnode.h>
54 #include <sys/proc.h>
55 #include <sys/systm.h>
56 #include <sys/timeb.h>
57 #include <sys/times.h>
58 #include <sys/buf.h>
59 #include <sys/acct.h>
60 #include <sys/file.h>
61 #include <sys/kernel.h>
62 #include <sys/wait.h>
63 #include <sys/ktrace.h>
64 #include <sys/syslog.h>
65 #include <sys/stat.h>
66 #include <sys/core.h>
67 #include <sys/filedesc.h>
68 #include <sys/malloc.h>
69 #include <sys/pool.h>
70 #include <sys/ucontext.h>
71 #include <sys/sa.h>
72 #include <sys/savar.h>
73 #include <sys/exec.h>
74 #include <sys/sysctl.h>
75 #include <sys/kauth.h>
76
77 #include <sys/mount.h>
78 #include <sys/syscallargs.h>
79
80 #include <machine/cpu.h>
81
82 #include <sys/user.h> /* for coredump */
83
84 #include <uvm/uvm.h>
85 #include <uvm/uvm_extern.h>
86
87 static int build_corename(struct proc *, char *, const char *, size_t);
88 static void ksiginfo_exithook(struct proc *, void *);
89 static void ksiginfo_put(struct proc *, const ksiginfo_t *);
90 static ksiginfo_t *ksiginfo_get(struct proc *, int);
91 static void kpsignal2(struct proc *, const ksiginfo_t *, int);
92
93 sigset_t contsigmask, stopsigmask, sigcantmask;
94
95 struct pool sigacts_pool; /* memory pool for sigacts structures */
96
97 /*
98 * struct sigacts memory pool allocator.
99 */
100
101 static void *
102 sigacts_poolpage_alloc(struct pool *pp, int flags)
103 {
104
105 return (void *)uvm_km_alloc(kernel_map,
106 (PAGE_SIZE)*2, (PAGE_SIZE)*2,
107 ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK)
108 | UVM_KMF_WIRED);
109 }
110
111 static void
112 sigacts_poolpage_free(struct pool *pp, void *v)
113 {
114 uvm_km_free(kernel_map, (vaddr_t)v, (PAGE_SIZE)*2, UVM_KMF_WIRED);
115 }
116
117 static struct pool_allocator sigactspool_allocator = {
118 sigacts_poolpage_alloc, sigacts_poolpage_free,
119 };
120
121 POOL_INIT(siginfo_pool, sizeof(siginfo_t), 0, 0, 0, "siginfo",
122 &pool_allocator_nointr);
123 POOL_INIT(ksiginfo_pool, sizeof(ksiginfo_t), 0, 0, 0, "ksiginfo", NULL);
124
125 /*
126 * Remove and return the first ksiginfo element that matches our requested
127 * signal, or return NULL if one not found.
128 */
129 static ksiginfo_t *
130 ksiginfo_get(struct proc *p, int signo)
131 {
132 ksiginfo_t *ksi;
133 int s;
134
135 s = splsoftclock();
136 simple_lock(&p->p_sigctx.ps_silock);
137 CIRCLEQ_FOREACH(ksi, &p->p_sigctx.ps_siginfo, ksi_list) {
138 if (ksi->ksi_signo == signo) {
139 CIRCLEQ_REMOVE(&p->p_sigctx.ps_siginfo, ksi, ksi_list);
140 goto out;
141 }
142 }
143 ksi = NULL;
144 out:
145 simple_unlock(&p->p_sigctx.ps_silock);
146 splx(s);
147 return ksi;
148 }
149
150 /*
151 * Append a new ksiginfo element to the list of pending ksiginfo's, if
152 * we need to (SA_SIGINFO was requested). We replace non RT signals if
153 * they already existed in the queue and we add new entries for RT signals,
154 * or for non RT signals with non-existing entries.
155 */
156 static void
157 ksiginfo_put(struct proc *p, const ksiginfo_t *ksi)
158 {
159 ksiginfo_t *kp;
160 struct sigaction *sa = &SIGACTION_PS(p->p_sigacts, ksi->ksi_signo);
161 int s;
162
163 if ((sa->sa_flags & SA_SIGINFO) == 0)
164 return;
165 /*
166 * If there's no info, don't save it.
167 */
168 if (KSI_EMPTY_P(ksi))
169 return;
170
171 s = splsoftclock();
172 simple_lock(&p->p_sigctx.ps_silock);
173 #ifdef notyet /* XXX: QUEUING */
174 if (ksi->ksi_signo < SIGRTMIN)
175 #endif
176 {
177 CIRCLEQ_FOREACH(kp, &p->p_sigctx.ps_siginfo, ksi_list) {
178 if (kp->ksi_signo == ksi->ksi_signo) {
179 KSI_COPY(ksi, kp);
180 goto out;
181 }
182 }
183 }
184 kp = pool_get(&ksiginfo_pool, PR_NOWAIT);
185 if (kp == NULL) {
186 #ifdef DIAGNOSTIC
187 printf("Out of memory allocating siginfo for pid %d\n",
188 p->p_pid);
189 #endif
190 goto out;
191 }
192 *kp = *ksi;
193 CIRCLEQ_INSERT_TAIL(&p->p_sigctx.ps_siginfo, kp, ksi_list);
194 out:
195 simple_unlock(&p->p_sigctx.ps_silock);
196 splx(s);
197 }
198
199 /*
200 * free all pending ksiginfo on exit
201 */
202 static void
203 ksiginfo_exithook(struct proc *p, void *v)
204 {
205 int s;
206
207 s = splsoftclock();
208 simple_lock(&p->p_sigctx.ps_silock);
209 while (!CIRCLEQ_EMPTY(&p->p_sigctx.ps_siginfo)) {
210 ksiginfo_t *ksi = CIRCLEQ_FIRST(&p->p_sigctx.ps_siginfo);
211 CIRCLEQ_REMOVE(&p->p_sigctx.ps_siginfo, ksi, ksi_list);
212 pool_put(&ksiginfo_pool, ksi);
213 }
214 simple_unlock(&p->p_sigctx.ps_silock);
215 splx(s);
216 }
217
218 /*
219 * Initialize signal-related data structures.
220 */
221 void
222 signal_init(void)
223 {
224
225 sigactspool_allocator.pa_pagesz = (PAGE_SIZE)*2;
226
227 pool_init(&sigacts_pool, sizeof(struct sigacts), 0, 0, 0, "sigapl",
228 sizeof(struct sigacts) > PAGE_SIZE ?
229 &sigactspool_allocator : &pool_allocator_nointr);
230
231 exithook_establish(ksiginfo_exithook, NULL);
232 exechook_establish(ksiginfo_exithook, NULL);
233 }
234
235 /*
236 * Create an initial sigctx structure, using the same signal state
237 * as p. If 'share' is set, share the sigctx_proc part, otherwise just
238 * copy it from parent.
239 */
240 void
241 sigactsinit(struct proc *np, struct proc *pp, int share)
242 {
243 struct sigacts *ps;
244
245 if (share) {
246 np->p_sigacts = pp->p_sigacts;
247 pp->p_sigacts->sa_refcnt++;
248 } else {
249 ps = pool_get(&sigacts_pool, PR_WAITOK);
250 if (pp)
251 memcpy(ps, pp->p_sigacts, sizeof(struct sigacts));
252 else
253 memset(ps, '\0', sizeof(struct sigacts));
254 ps->sa_refcnt = 1;
255 np->p_sigacts = ps;
256 }
257 }
258
259 /*
260 * Make this process not share its sigctx, maintaining all
261 * signal state.
262 */
263 void
264 sigactsunshare(struct proc *p)
265 {
266 struct sigacts *oldps;
267
268 if (p->p_sigacts->sa_refcnt == 1)
269 return;
270
271 oldps = p->p_sigacts;
272 sigactsinit(p, NULL, 0);
273
274 if (--oldps->sa_refcnt == 0)
275 pool_put(&sigacts_pool, oldps);
276 }
277
278 /*
279 * Release a sigctx structure.
280 */
281 void
282 sigactsfree(struct sigacts *ps)
283 {
284
285 if (--ps->sa_refcnt > 0)
286 return;
287
288 pool_put(&sigacts_pool, ps);
289 }
290
291 int
292 sigaction1(struct proc *p, int signum, const struct sigaction *nsa,
293 struct sigaction *osa, const void *tramp, int vers)
294 {
295 struct sigacts *ps;
296 int prop;
297
298 ps = p->p_sigacts;
299 if (signum <= 0 || signum >= NSIG)
300 return (EINVAL);
301
302 /*
303 * Trampoline ABI version 0 is reserved for the legacy
304 * kernel-provided on-stack trampoline. Conversely, if we are
305 * using a non-0 ABI version, we must have a trampoline. Only
306 * validate the vers if a new sigaction was supplied. Emulations
307 * use legacy kernel trampolines with version 0, alternatively
308 * check for that too.
309 */
310 if ((vers != 0 && tramp == NULL) ||
311 #ifdef SIGTRAMP_VALID
312 (nsa != NULL &&
313 ((vers == 0) ?
314 (p->p_emul->e_sigcode == NULL) :
315 !SIGTRAMP_VALID(vers))) ||
316 #endif
317 (vers == 0 && tramp != NULL))
318 return (EINVAL);
319
320 if (osa)
321 *osa = SIGACTION_PS(ps, signum);
322
323 if (nsa) {
324 if (nsa->sa_flags & ~SA_ALLBITS)
325 return (EINVAL);
326
327 prop = sigprop[signum];
328 if (prop & SA_CANTMASK)
329 return (EINVAL);
330
331 (void) splsched(); /* XXXSMP */
332 SIGACTION_PS(ps, signum) = *nsa;
333 ps->sa_sigdesc[signum].sd_tramp = tramp;
334 ps->sa_sigdesc[signum].sd_vers = vers;
335 sigminusset(&sigcantmask, &SIGACTION_PS(ps, signum).sa_mask);
336 if ((prop & SA_NORESET) != 0)
337 SIGACTION_PS(ps, signum).sa_flags &= ~SA_RESETHAND;
338 if (signum == SIGCHLD) {
339 if (nsa->sa_flags & SA_NOCLDSTOP)
340 p->p_flag |= P_NOCLDSTOP;
341 else
342 p->p_flag &= ~P_NOCLDSTOP;
343 if (nsa->sa_flags & SA_NOCLDWAIT) {
344 /*
345 * Paranoia: since SA_NOCLDWAIT is implemented
346 * by reparenting the dying child to PID 1 (and
347 * trust it to reap the zombie), PID 1 itself
348 * is forbidden to set SA_NOCLDWAIT.
349 */
350 if (p->p_pid == 1)
351 p->p_flag &= ~P_NOCLDWAIT;
352 else
353 p->p_flag |= P_NOCLDWAIT;
354 } else
355 p->p_flag &= ~P_NOCLDWAIT;
356
357 if (nsa->sa_handler == SIG_IGN) {
358 /*
359 * Paranoia: same as above.
360 */
361 if (p->p_pid == 1)
362 p->p_flag &= ~P_CLDSIGIGN;
363 else
364 p->p_flag |= P_CLDSIGIGN;
365 } else
366 p->p_flag &= ~P_CLDSIGIGN;
367
368 }
369 if ((nsa->sa_flags & SA_NODEFER) == 0)
370 sigaddset(&SIGACTION_PS(ps, signum).sa_mask, signum);
371 else
372 sigdelset(&SIGACTION_PS(ps, signum).sa_mask, signum);
373 /*
374 * Set bit in p_sigctx.ps_sigignore for signals that are set to
375 * SIG_IGN, and for signals set to SIG_DFL where the default is
376 * to ignore. However, don't put SIGCONT in
377 * p_sigctx.ps_sigignore, as we have to restart the process.
378 */
379 if (nsa->sa_handler == SIG_IGN ||
380 (nsa->sa_handler == SIG_DFL && (prop & SA_IGNORE) != 0)) {
381 /* never to be seen again */
382 sigdelset(&p->p_sigctx.ps_siglist, signum);
383 if (signum != SIGCONT) {
384 /* easier in psignal */
385 sigaddset(&p->p_sigctx.ps_sigignore, signum);
386 }
387 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
388 } else {
389 sigdelset(&p->p_sigctx.ps_sigignore, signum);
390 if (nsa->sa_handler == SIG_DFL)
391 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
392 else
393 sigaddset(&p->p_sigctx.ps_sigcatch, signum);
394 }
395 (void) spl0();
396 }
397
398 return (0);
399 }
400
401 #ifdef COMPAT_16
402 /* ARGSUSED */
403 int
404 compat_16_sys___sigaction14(struct lwp *l, void *v, register_t *retval)
405 {
406 struct compat_16_sys___sigaction14_args /* {
407 syscallarg(int) signum;
408 syscallarg(const struct sigaction *) nsa;
409 syscallarg(struct sigaction *) osa;
410 } */ *uap = v;
411 struct proc *p;
412 struct sigaction nsa, osa;
413 int error;
414
415 if (SCARG(uap, nsa)) {
416 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
417 if (error)
418 return (error);
419 }
420 p = l->l_proc;
421 error = sigaction1(p, SCARG(uap, signum),
422 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0,
423 NULL, 0);
424 if (error)
425 return (error);
426 if (SCARG(uap, osa)) {
427 error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
428 if (error)
429 return (error);
430 }
431 return (0);
432 }
433 #endif
434
435 /* ARGSUSED */
436 int
437 sys___sigaction_sigtramp(struct lwp *l, void *v, register_t *retval)
438 {
439 struct sys___sigaction_sigtramp_args /* {
440 syscallarg(int) signum;
441 syscallarg(const struct sigaction *) nsa;
442 syscallarg(struct sigaction *) osa;
443 syscallarg(void *) tramp;
444 syscallarg(int) vers;
445 } */ *uap = v;
446 struct proc *p = l->l_proc;
447 struct sigaction nsa, osa;
448 int error;
449
450 if (SCARG(uap, nsa)) {
451 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
452 if (error)
453 return (error);
454 }
455 error = sigaction1(p, SCARG(uap, signum),
456 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0,
457 SCARG(uap, tramp), SCARG(uap, vers));
458 if (error)
459 return (error);
460 if (SCARG(uap, osa)) {
461 error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
462 if (error)
463 return (error);
464 }
465 return (0);
466 }
467
468 /*
469 * Initialize signal state for process 0;
470 * set to ignore signals that are ignored by default and disable the signal
471 * stack.
472 */
473 void
474 siginit(struct proc *p)
475 {
476 struct sigacts *ps;
477 int signum, prop;
478
479 ps = p->p_sigacts;
480 sigemptyset(&contsigmask);
481 sigemptyset(&stopsigmask);
482 sigemptyset(&sigcantmask);
483 for (signum = 1; signum < NSIG; signum++) {
484 prop = sigprop[signum];
485 if (prop & SA_CONT)
486 sigaddset(&contsigmask, signum);
487 if (prop & SA_STOP)
488 sigaddset(&stopsigmask, signum);
489 if (prop & SA_CANTMASK)
490 sigaddset(&sigcantmask, signum);
491 if (prop & SA_IGNORE && signum != SIGCONT)
492 sigaddset(&p->p_sigctx.ps_sigignore, signum);
493 sigemptyset(&SIGACTION_PS(ps, signum).sa_mask);
494 SIGACTION_PS(ps, signum).sa_flags = SA_RESTART;
495 }
496 sigemptyset(&p->p_sigctx.ps_sigcatch);
497 p->p_sigctx.ps_sigwaited = NULL;
498 p->p_flag &= ~P_NOCLDSTOP;
499
500 /*
501 * Reset stack state to the user stack.
502 */
503 p->p_sigctx.ps_sigstk.ss_flags = SS_DISABLE;
504 p->p_sigctx.ps_sigstk.ss_size = 0;
505 p->p_sigctx.ps_sigstk.ss_sp = 0;
506
507 /* One reference. */
508 ps->sa_refcnt = 1;
509 }
510
511 /*
512 * Reset signals for an exec of the specified process.
513 */
514 void
515 execsigs(struct proc *p)
516 {
517 struct sigacts *ps;
518 int signum, prop;
519
520 sigactsunshare(p);
521
522 ps = p->p_sigacts;
523
524 /*
525 * Reset caught signals. Held signals remain held
526 * through p_sigctx.ps_sigmask (unless they were caught,
527 * and are now ignored by default).
528 */
529 for (signum = 1; signum < NSIG; signum++) {
530 if (sigismember(&p->p_sigctx.ps_sigcatch, signum)) {
531 prop = sigprop[signum];
532 if (prop & SA_IGNORE) {
533 if ((prop & SA_CONT) == 0)
534 sigaddset(&p->p_sigctx.ps_sigignore,
535 signum);
536 sigdelset(&p->p_sigctx.ps_siglist, signum);
537 }
538 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
539 }
540 sigemptyset(&SIGACTION_PS(ps, signum).sa_mask);
541 SIGACTION_PS(ps, signum).sa_flags = SA_RESTART;
542 }
543 sigemptyset(&p->p_sigctx.ps_sigcatch);
544 p->p_sigctx.ps_sigwaited = NULL;
545
546 /*
547 * Reset no zombies if child dies flag as Solaris does.
548 */
549 p->p_flag &= ~(P_NOCLDWAIT | P_CLDSIGIGN);
550 if (SIGACTION_PS(ps, SIGCHLD).sa_handler == SIG_IGN)
551 SIGACTION_PS(ps, SIGCHLD).sa_handler = SIG_DFL;
552
553 /*
554 * Reset stack state to the user stack.
555 */
556 p->p_sigctx.ps_sigstk.ss_flags = SS_DISABLE;
557 p->p_sigctx.ps_sigstk.ss_size = 0;
558 p->p_sigctx.ps_sigstk.ss_sp = 0;
559 }
560
561 int
562 sigprocmask1(struct proc *p, int how, const sigset_t *nss, sigset_t *oss)
563 {
564
565 if (oss)
566 *oss = p->p_sigctx.ps_sigmask;
567
568 if (nss) {
569 (void)splsched(); /* XXXSMP */
570 switch (how) {
571 case SIG_BLOCK:
572 sigplusset(nss, &p->p_sigctx.ps_sigmask);
573 break;
574 case SIG_UNBLOCK:
575 sigminusset(nss, &p->p_sigctx.ps_sigmask);
576 CHECKSIGS(p);
577 break;
578 case SIG_SETMASK:
579 p->p_sigctx.ps_sigmask = *nss;
580 CHECKSIGS(p);
581 break;
582 default:
583 (void)spl0(); /* XXXSMP */
584 return (EINVAL);
585 }
586 sigminusset(&sigcantmask, &p->p_sigctx.ps_sigmask);
587 (void)spl0(); /* XXXSMP */
588 }
589
590 return (0);
591 }
592
593 /*
594 * Manipulate signal mask.
595 * Note that we receive new mask, not pointer,
596 * and return old mask as return value;
597 * the library stub does the rest.
598 */
599 int
600 sys___sigprocmask14(struct lwp *l, void *v, register_t *retval)
601 {
602 struct sys___sigprocmask14_args /* {
603 syscallarg(int) how;
604 syscallarg(const sigset_t *) set;
605 syscallarg(sigset_t *) oset;
606 } */ *uap = v;
607 struct proc *p;
608 sigset_t nss, oss;
609 int error;
610
611 if (SCARG(uap, set)) {
612 error = copyin(SCARG(uap, set), &nss, sizeof(nss));
613 if (error)
614 return (error);
615 }
616 p = l->l_proc;
617 error = sigprocmask1(p, SCARG(uap, how),
618 SCARG(uap, set) ? &nss : 0, SCARG(uap, oset) ? &oss : 0);
619 if (error)
620 return (error);
621 if (SCARG(uap, oset)) {
622 error = copyout(&oss, SCARG(uap, oset), sizeof(oss));
623 if (error)
624 return (error);
625 }
626 return (0);
627 }
628
629 void
630 sigpending1(struct proc *p, sigset_t *ss)
631 {
632
633 *ss = p->p_sigctx.ps_siglist;
634 sigminusset(&p->p_sigctx.ps_sigmask, ss);
635 }
636
637 /* ARGSUSED */
638 int
639 sys___sigpending14(struct lwp *l, void *v, register_t *retval)
640 {
641 struct sys___sigpending14_args /* {
642 syscallarg(sigset_t *) set;
643 } */ *uap = v;
644 struct proc *p;
645 sigset_t ss;
646
647 p = l->l_proc;
648 sigpending1(p, &ss);
649 return (copyout(&ss, SCARG(uap, set), sizeof(ss)));
650 }
651
652 int
653 sigsuspend1(struct proc *p, const sigset_t *ss)
654 {
655 struct sigacts *ps;
656
657 ps = p->p_sigacts;
658 if (ss) {
659 /*
660 * When returning from sigpause, we want
661 * the old mask to be restored after the
662 * signal handler has finished. Thus, we
663 * save it here and mark the sigctx structure
664 * to indicate this.
665 */
666 p->p_sigctx.ps_oldmask = p->p_sigctx.ps_sigmask;
667 p->p_sigctx.ps_flags |= SAS_OLDMASK;
668 (void) splsched(); /* XXXSMP */
669 p->p_sigctx.ps_sigmask = *ss;
670 CHECKSIGS(p);
671 sigminusset(&sigcantmask, &p->p_sigctx.ps_sigmask);
672 (void) spl0(); /* XXXSMP */
673 }
674
675 while (tsleep((caddr_t) ps, PPAUSE|PCATCH, "pause", 0) == 0)
676 /* void */;
677
678 /* always return EINTR rather than ERESTART... */
679 return (EINTR);
680 }
681
682 /*
683 * Suspend process until signal, providing mask to be set
684 * in the meantime. Note nonstandard calling convention:
685 * libc stub passes mask, not pointer, to save a copyin.
686 */
687 /* ARGSUSED */
688 int
689 sys___sigsuspend14(struct lwp *l, void *v, register_t *retval)
690 {
691 struct sys___sigsuspend14_args /* {
692 syscallarg(const sigset_t *) set;
693 } */ *uap = v;
694 struct proc *p;
695 sigset_t ss;
696 int error;
697
698 if (SCARG(uap, set)) {
699 error = copyin(SCARG(uap, set), &ss, sizeof(ss));
700 if (error)
701 return (error);
702 }
703
704 p = l->l_proc;
705 return (sigsuspend1(p, SCARG(uap, set) ? &ss : 0));
706 }
707
708 int
709 sigaltstack1(struct proc *p, const struct sigaltstack *nss,
710 struct sigaltstack *oss)
711 {
712
713 if (oss)
714 *oss = p->p_sigctx.ps_sigstk;
715
716 if (nss) {
717 if (nss->ss_flags & ~SS_ALLBITS)
718 return (EINVAL);
719
720 if (nss->ss_flags & SS_DISABLE) {
721 if (p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK)
722 return (EINVAL);
723 } else {
724 if (nss->ss_size < MINSIGSTKSZ)
725 return (ENOMEM);
726 }
727 p->p_sigctx.ps_sigstk = *nss;
728 }
729
730 return (0);
731 }
732
733 /* ARGSUSED */
734 int
735 sys___sigaltstack14(struct lwp *l, void *v, register_t *retval)
736 {
737 struct sys___sigaltstack14_args /* {
738 syscallarg(const struct sigaltstack *) nss;
739 syscallarg(struct sigaltstack *) oss;
740 } */ *uap = v;
741 struct proc *p;
742 struct sigaltstack nss, oss;
743 int error;
744
745 if (SCARG(uap, nss)) {
746 error = copyin(SCARG(uap, nss), &nss, sizeof(nss));
747 if (error)
748 return (error);
749 }
750 p = l->l_proc;
751 error = sigaltstack1(p,
752 SCARG(uap, nss) ? &nss : 0, SCARG(uap, oss) ? &oss : 0);
753 if (error)
754 return (error);
755 if (SCARG(uap, oss)) {
756 error = copyout(&oss, SCARG(uap, oss), sizeof(oss));
757 if (error)
758 return (error);
759 }
760 return (0);
761 }
762
763 /* ARGSUSED */
764 int
765 sys_kill(struct lwp *l, void *v, register_t *retval)
766 {
767 struct sys_kill_args /* {
768 syscallarg(int) pid;
769 syscallarg(int) signum;
770 } */ *uap = v;
771 struct proc *p;
772 ksiginfo_t ksi;
773 int signum = SCARG(uap, signum);
774 int error;
775
776 if ((u_int)signum >= NSIG)
777 return (EINVAL);
778 KSI_INIT(&ksi);
779 ksi.ksi_signo = signum;
780 ksi.ksi_code = SI_USER;
781 ksi.ksi_pid = l->l_proc->p_pid;
782 ksi.ksi_uid = kauth_cred_geteuid(l->l_cred);
783 if (SCARG(uap, pid) > 0) {
784 /* kill single process */
785 if ((p = pfind(SCARG(uap, pid))) == NULL)
786 return (ESRCH);
787 error = kauth_authorize_process(l->l_cred,
788 KAUTH_PROCESS_CANSIGNAL, p, (void *)(uintptr_t)signum,
789 NULL, NULL);
790 if (error)
791 return error;
792 if (signum)
793 kpsignal2(p, &ksi, 1);
794 return (0);
795 }
796 switch (SCARG(uap, pid)) {
797 case -1: /* broadcast signal */
798 return (killpg1(l, &ksi, 0, 1));
799 case 0: /* signal own process group */
800 return (killpg1(l, &ksi, 0, 0));
801 default: /* negative explicit process group */
802 return (killpg1(l, &ksi, -SCARG(uap, pid), 0));
803 }
804 /* NOTREACHED */
805 }
806
807 /*
808 * Common code for kill process group/broadcast kill.
809 * cp is calling process.
810 */
811 int
812 killpg1(struct lwp *l, ksiginfo_t *ksi, int pgid, int all)
813 {
814 struct proc *p, *cp;
815 kauth_cred_t pc;
816 struct pgrp *pgrp;
817 int nfound;
818 int signum = ksi->ksi_signo;
819
820 cp = l->l_proc;
821 pc = l->l_cred;
822 nfound = 0;
823 if (all) {
824 /*
825 * broadcast
826 */
827 proclist_lock_read();
828 PROCLIST_FOREACH(p, &allproc) {
829 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM || p == cp ||
830 kauth_authorize_process(pc, KAUTH_PROCESS_CANSIGNAL,
831 p, (void *)(uintptr_t)signum, NULL, NULL) != 0)
832 continue;
833 nfound++;
834 if (signum)
835 kpsignal2(p, ksi, 1);
836 }
837 proclist_unlock_read();
838 } else {
839 if (pgid == 0)
840 /*
841 * zero pgid means send to my process group.
842 */
843 pgrp = cp->p_pgrp;
844 else {
845 pgrp = pgfind(pgid);
846 if (pgrp == NULL)
847 return (ESRCH);
848 }
849 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
850 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
851 kauth_authorize_process(pc, KAUTH_PROCESS_CANSIGNAL,
852 p, (void *)(uintptr_t)signum, NULL, NULL) != 0)
853 continue;
854 nfound++;
855 if (signum && P_ZOMBIE(p) == 0)
856 kpsignal2(p, ksi, 1);
857 }
858 }
859 return (nfound ? 0 : ESRCH);
860 }
861
862 /*
863 * Send a signal to a process group.
864 */
865 void
866 gsignal(int pgid, int signum)
867 {
868 ksiginfo_t ksi;
869 KSI_INIT_EMPTY(&ksi);
870 ksi.ksi_signo = signum;
871 kgsignal(pgid, &ksi, NULL);
872 }
873
874 void
875 kgsignal(int pgid, ksiginfo_t *ksi, void *data)
876 {
877 struct pgrp *pgrp;
878
879 if (pgid && (pgrp = pgfind(pgid)))
880 kpgsignal(pgrp, ksi, data, 0);
881 }
882
883 /*
884 * Send a signal to a process group. If checktty is 1,
885 * limit to members which have a controlling terminal.
886 */
887 void
888 pgsignal(struct pgrp *pgrp, int sig, int checkctty)
889 {
890 ksiginfo_t ksi;
891 KSI_INIT_EMPTY(&ksi);
892 ksi.ksi_signo = sig;
893 kpgsignal(pgrp, &ksi, NULL, checkctty);
894 }
895
896 void
897 kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty)
898 {
899 struct proc *p;
900
901 if (pgrp)
902 LIST_FOREACH(p, &pgrp->pg_members, p_pglist)
903 if (checkctty == 0 || p->p_flag & P_CONTROLT)
904 kpsignal(p, ksi, data);
905 }
906
907 /*
908 * Send a signal caused by a trap to the current process.
909 * If it will be caught immediately, deliver it with correct code.
910 * Otherwise, post it normally.
911 */
912 void
913 trapsignal(struct lwp *l, const ksiginfo_t *ksi)
914 {
915 struct proc *p;
916 struct sigacts *ps;
917 int signum = ksi->ksi_signo;
918
919 KASSERT(KSI_TRAP_P(ksi));
920
921 p = l->l_proc;
922 ps = p->p_sigacts;
923 if ((p->p_flag & P_TRACED) == 0 &&
924 sigismember(&p->p_sigctx.ps_sigcatch, signum) &&
925 !sigismember(&p->p_sigctx.ps_sigmask, signum)) {
926 p->p_stats->p_ru.ru_nsignals++;
927 #ifdef KTRACE
928 if (KTRPOINT(p, KTR_PSIG))
929 ktrpsig(l, signum, SIGACTION_PS(ps, signum).sa_handler,
930 &p->p_sigctx.ps_sigmask, ksi);
931 #endif
932 kpsendsig(l, ksi, &p->p_sigctx.ps_sigmask);
933 (void) splsched(); /* XXXSMP */
934 sigplusset(&SIGACTION_PS(ps, signum).sa_mask,
935 &p->p_sigctx.ps_sigmask);
936 if (SIGACTION_PS(ps, signum).sa_flags & SA_RESETHAND) {
937 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
938 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
939 sigaddset(&p->p_sigctx.ps_sigignore, signum);
940 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
941 }
942 (void) spl0(); /* XXXSMP */
943 } else {
944 p->p_sigctx.ps_lwp = l->l_lid;
945 /* XXX for core dump/debugger */
946 p->p_sigctx.ps_signo = ksi->ksi_signo;
947 p->p_sigctx.ps_code = ksi->ksi_trap;
948 kpsignal2(p, ksi, 1);
949 }
950 }
951
952 /*
953 * Fill in signal information and signal the parent for a child status change.
954 */
955 void
956 child_psignal(struct proc *p, int dolock)
957 {
958 ksiginfo_t ksi;
959
960 KSI_INIT(&ksi);
961 ksi.ksi_signo = SIGCHLD;
962 ksi.ksi_code = p->p_xstat == SIGCONT ? CLD_CONTINUED : CLD_STOPPED;
963 ksi.ksi_pid = p->p_pid;
964 ksi.ksi_uid = kauth_cred_geteuid(p->p_cred);
965 ksi.ksi_status = p->p_xstat;
966 ksi.ksi_utime = p->p_stats->p_ru.ru_utime.tv_sec;
967 ksi.ksi_stime = p->p_stats->p_ru.ru_stime.tv_sec;
968 kpsignal2(p->p_pptr, &ksi, dolock);
969 }
970
971 /*
972 * Send the signal to the process. If the signal has an action, the action
973 * is usually performed by the target process rather than the caller; we add
974 * the signal to the set of pending signals for the process.
975 *
976 * Exceptions:
977 * o When a stop signal is sent to a sleeping process that takes the
978 * default action, the process is stopped without awakening it.
979 * o SIGCONT restarts stopped processes (or puts them back to sleep)
980 * regardless of the signal action (eg, blocked or ignored).
981 *
982 * Other ignored signals are discarded immediately.
983 *
984 * XXXSMP: Invoked as psignal() or sched_psignal().
985 */
986 void
987 psignal1(struct proc *p, int signum, int dolock)
988 {
989 ksiginfo_t ksi;
990
991 KSI_INIT_EMPTY(&ksi);
992 ksi.ksi_signo = signum;
993 kpsignal2(p, &ksi, dolock);
994 }
995
996 void
997 kpsignal1(struct proc *p, ksiginfo_t *ksi, void *data, int dolock)
998 {
999
1000 if ((p->p_flag & P_WEXIT) == 0 && data) {
1001 size_t fd;
1002 struct filedesc *fdp = p->p_fd;
1003
1004 ksi->ksi_fd = -1;
1005 for (fd = 0; fd < fdp->fd_nfiles; fd++) {
1006 struct file *fp = fdp->fd_ofiles[fd];
1007 /* XXX: lock? */
1008 if (fp && fp->f_data == data) {
1009 ksi->ksi_fd = fd;
1010 break;
1011 }
1012 }
1013 }
1014 kpsignal2(p, ksi, dolock);
1015 }
1016
1017 static void
1018 kpsignal2(struct proc *p, const ksiginfo_t *ksi, int dolock)
1019 {
1020 struct lwp *l, *suspended = NULL;
1021 struct sadata_vp *vp;
1022 int s = 0, prop, allsusp;
1023 sig_t action;
1024 int signum = ksi->ksi_signo;
1025
1026 #ifdef DIAGNOSTIC
1027 if (signum <= 0 || signum >= NSIG)
1028 panic("psignal signal number %d", signum);
1029
1030 /* XXXSMP: works, but icky */
1031 if (dolock)
1032 SCHED_ASSERT_UNLOCKED();
1033 else
1034 SCHED_ASSERT_LOCKED();
1035 #endif
1036
1037 /*
1038 * Notify any interested parties in the signal.
1039 */
1040 KNOTE(&p->p_klist, NOTE_SIGNAL | signum);
1041
1042 prop = sigprop[signum];
1043
1044 /*
1045 * If proc is traced, always give parent a chance.
1046 */
1047 if (p->p_flag & P_TRACED) {
1048 action = SIG_DFL;
1049
1050 /*
1051 * If the process is being traced and the signal is being
1052 * caught, make sure to save any ksiginfo.
1053 */
1054 if (sigismember(&p->p_sigctx.ps_sigcatch, signum))
1055 ksiginfo_put(p, ksi);
1056 } else {
1057 /*
1058 * If the signal was the result of a trap, reset it
1059 * to default action if it's currently masked, so that it would
1060 * coredump immediatelly instead of spinning repeatedly
1061 * taking the signal.
1062 */
1063 if (KSI_TRAP_P(ksi)
1064 && sigismember(&p->p_sigctx.ps_sigmask, signum)
1065 && !sigismember(&p->p_sigctx.ps_sigcatch, signum)) {
1066 sigdelset(&p->p_sigctx.ps_sigignore, signum);
1067 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
1068 sigdelset(&p->p_sigctx.ps_sigmask, signum);
1069 SIGACTION(p, signum).sa_handler = SIG_DFL;
1070 }
1071
1072 /*
1073 * If the signal is being ignored,
1074 * then we forget about it immediately.
1075 * (Note: we don't set SIGCONT in p_sigctx.ps_sigignore,
1076 * and if it is set to SIG_IGN,
1077 * action will be SIG_DFL here.)
1078 */
1079 if (sigismember(&p->p_sigctx.ps_sigignore, signum))
1080 return;
1081 if (sigismember(&p->p_sigctx.ps_sigmask, signum))
1082 action = SIG_HOLD;
1083 else if (sigismember(&p->p_sigctx.ps_sigcatch, signum))
1084 action = SIG_CATCH;
1085 else {
1086 action = SIG_DFL;
1087
1088 if (prop & SA_KILL && p->p_nice > NZERO)
1089 p->p_nice = NZERO;
1090
1091 /*
1092 * If sending a tty stop signal to a member of an
1093 * orphaned process group, discard the signal here if
1094 * the action is default; don't stop the process below
1095 * if sleeping, and don't clear any pending SIGCONT.
1096 */
1097 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0)
1098 return;
1099 }
1100 }
1101
1102 if (prop & SA_CONT)
1103 sigminusset(&stopsigmask, &p->p_sigctx.ps_siglist);
1104
1105 if (prop & SA_STOP)
1106 sigminusset(&contsigmask, &p->p_sigctx.ps_siglist);
1107
1108 /*
1109 * If the signal doesn't have SA_CANTMASK (no override for SIGKILL,
1110 * please!), check if anything waits on it. If yes, save the
1111 * info into provided ps_sigwaited, and wake-up the waiter.
1112 * The signal won't be processed further here.
1113 */
1114 if ((prop & SA_CANTMASK) == 0
1115 && p->p_sigctx.ps_sigwaited
1116 && sigismember(p->p_sigctx.ps_sigwait, signum)
1117 && p->p_stat != SSTOP) {
1118 p->p_sigctx.ps_sigwaited->ksi_info = ksi->ksi_info;
1119 p->p_sigctx.ps_sigwaited = NULL;
1120 if (dolock)
1121 wakeup_one(&p->p_sigctx.ps_sigwait);
1122 else
1123 sched_wakeup(&p->p_sigctx.ps_sigwait);
1124 return;
1125 }
1126
1127 sigaddset(&p->p_sigctx.ps_siglist, signum);
1128
1129 /* CHECKSIGS() is "inlined" here. */
1130 p->p_sigctx.ps_sigcheck = 1;
1131
1132 /*
1133 * Defer further processing for signals which are held,
1134 * except that stopped processes must be continued by SIGCONT.
1135 */
1136 if (action == SIG_HOLD &&
1137 ((prop & SA_CONT) == 0 || p->p_stat != SSTOP)) {
1138 ksiginfo_put(p, ksi);
1139 return;
1140 }
1141 /* XXXSMP: works, but icky */
1142 if (dolock)
1143 SCHED_LOCK(s);
1144
1145 if (p->p_flag & P_SA) {
1146 allsusp = 0;
1147 l = NULL;
1148 if (p->p_stat == SACTIVE) {
1149 SLIST_FOREACH(vp, &p->p_sa->sa_vps, savp_next) {
1150 l = vp->savp_lwp;
1151 KDASSERT(l != NULL);
1152 if (l->l_flag & L_SA_IDLE) {
1153 /* wakeup idle LWP */
1154 goto found;
1155 /*NOTREACHED*/
1156 } else if (l->l_flag & L_SA_YIELD) {
1157 /* idle LWP is already waking up */
1158 goto out;
1159 /*NOTREACHED*/
1160 }
1161 }
1162 SLIST_FOREACH(vp, &p->p_sa->sa_vps, savp_next) {
1163 l = vp->savp_lwp;
1164 if (l->l_stat == LSRUN ||
1165 l->l_stat == LSONPROC) {
1166 signotify(p);
1167 goto out;
1168 /*NOTREACHED*/
1169 }
1170 if (l->l_stat == LSSLEEP &&
1171 l->l_flag & L_SINTR) {
1172 /* ok to signal vp lwp */
1173 break;
1174 } else
1175 l = NULL;
1176 }
1177 } else if (p->p_stat == SSTOP) {
1178 SLIST_FOREACH(vp, &p->p_sa->sa_vps, savp_next) {
1179 l = vp->savp_lwp;
1180 if (l->l_stat == LSSLEEP && (l->l_flag & L_SINTR) != 0)
1181 break;
1182 l = NULL;
1183 }
1184 }
1185 } else if (p->p_nrlwps > 0 && (p->p_stat != SSTOP)) {
1186 /*
1187 * At least one LWP is running or on a run queue.
1188 * The signal will be noticed when one of them returns
1189 * to userspace.
1190 */
1191 signotify(p);
1192 /*
1193 * The signal will be noticed very soon.
1194 */
1195 goto out;
1196 /*NOTREACHED*/
1197 } else {
1198 /*
1199 * Find out if any of the sleeps are interruptable,
1200 * and if all the live LWPs remaining are suspended.
1201 */
1202 allsusp = 1;
1203 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1204 if (l->l_stat == LSSLEEP &&
1205 l->l_flag & L_SINTR)
1206 break;
1207 if (l->l_stat == LSSUSPENDED)
1208 suspended = l;
1209 else if ((l->l_stat != LSZOMB) &&
1210 (l->l_stat != LSDEAD))
1211 allsusp = 0;
1212 }
1213 }
1214
1215 found:
1216 switch (p->p_stat) {
1217 case SACTIVE:
1218
1219 if (l != NULL && (p->p_flag & P_TRACED))
1220 goto run;
1221
1222 /*
1223 * If SIGCONT is default (or ignored) and process is
1224 * asleep, we are finished; the process should not
1225 * be awakened.
1226 */
1227 if ((prop & SA_CONT) && action == SIG_DFL) {
1228 sigdelset(&p->p_sigctx.ps_siglist, signum);
1229 goto done;
1230 }
1231
1232 /*
1233 * When a sleeping process receives a stop
1234 * signal, process immediately if possible.
1235 */
1236 if ((prop & SA_STOP) && action == SIG_DFL) {
1237 /*
1238 * If a child holding parent blocked,
1239 * stopping could cause deadlock.
1240 */
1241 if (p->p_flag & P_PPWAIT) {
1242 goto out;
1243 }
1244 sigdelset(&p->p_sigctx.ps_siglist, signum);
1245 p->p_xstat = signum;
1246 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) {
1247 /*
1248 * XXXSMP: recursive call; don't lock
1249 * the second time around.
1250 */
1251 child_psignal(p, 0);
1252 }
1253 proc_stop(p, 1); /* XXXSMP: recurse? */
1254 goto done;
1255 }
1256
1257 if (l == NULL) {
1258 /*
1259 * Special case: SIGKILL of a process
1260 * which is entirely composed of
1261 * suspended LWPs should succeed. We
1262 * make this happen by unsuspending one of
1263 * them.
1264 */
1265 if (allsusp && (signum == SIGKILL)) {
1266 lwp_continue(suspended);
1267 }
1268 goto done;
1269 }
1270 /*
1271 * All other (caught or default) signals
1272 * cause the process to run.
1273 */
1274 goto runfast;
1275 /*NOTREACHED*/
1276 case SSTOP:
1277 /* Process is stopped */
1278 /*
1279 * If traced process is already stopped,
1280 * then no further action is necessary.
1281 */
1282 if (p->p_flag & P_TRACED)
1283 goto done;
1284
1285 /*
1286 * Kill signal always sets processes running,
1287 * if possible.
1288 */
1289 if (signum == SIGKILL) {
1290 l = proc_unstop(p);
1291 if (l)
1292 goto runfast;
1293 goto done;
1294 }
1295
1296 if (prop & SA_CONT) {
1297 /*
1298 * If SIGCONT is default (or ignored),
1299 * we continue the process but don't
1300 * leave the signal in ps_siglist, as
1301 * it has no further action. If
1302 * SIGCONT is held, we continue the
1303 * process and leave the signal in
1304 * ps_siglist. If the process catches
1305 * SIGCONT, let it handle the signal
1306 * itself. If it isn't waiting on an
1307 * event, then it goes back to run
1308 * state. Otherwise, process goes
1309 * back to sleep state.
1310 */
1311 if (action == SIG_DFL)
1312 sigdelset(&p->p_sigctx.ps_siglist,
1313 signum);
1314 l = proc_unstop(p);
1315 if (l && (action == SIG_CATCH))
1316 goto runfast;
1317 goto out;
1318 }
1319
1320 if (prop & SA_STOP) {
1321 /*
1322 * Already stopped, don't need to stop again.
1323 * (If we did the shell could get confused.)
1324 */
1325 sigdelset(&p->p_sigctx.ps_siglist, signum);
1326 goto done;
1327 }
1328
1329 /*
1330 * If a lwp is sleeping interruptibly, then
1331 * wake it up; it will run until the kernel
1332 * boundary, where it will stop in issignal(),
1333 * since p->p_stat is still SSTOP. When the
1334 * process is continued, it will be made
1335 * runnable and can look at the signal.
1336 */
1337 if (l)
1338 goto run;
1339 goto out;
1340 case SIDL:
1341 /* Process is being created by fork */
1342 /* XXX: We are not ready to receive signals yet */
1343 goto done;
1344 default:
1345 /* Else what? */
1346 panic("psignal: Invalid process state %d.", p->p_stat);
1347 }
1348 /*NOTREACHED*/
1349
1350 runfast:
1351 if (action == SIG_CATCH) {
1352 ksiginfo_put(p, ksi);
1353 action = SIG_HOLD;
1354 }
1355 /*
1356 * Raise priority to at least PUSER.
1357 */
1358 if (l->l_priority > PUSER)
1359 l->l_priority = PUSER;
1360 run:
1361 if (action == SIG_CATCH) {
1362 ksiginfo_put(p, ksi);
1363 action = SIG_HOLD;
1364 }
1365
1366 setrunnable(l); /* XXXSMP: recurse? */
1367 out:
1368 if (action == SIG_CATCH)
1369 ksiginfo_put(p, ksi);
1370 done:
1371 /* XXXSMP: works, but icky */
1372 if (dolock)
1373 SCHED_UNLOCK(s);
1374 }
1375
1376 siginfo_t *
1377 siginfo_alloc(int flags)
1378 {
1379
1380 return pool_get(&siginfo_pool, flags);
1381 }
1382
1383 void
1384 siginfo_free(void *arg)
1385 {
1386
1387 pool_put(&siginfo_pool, arg);
1388 }
1389
1390 void
1391 kpsendsig(struct lwp *l, const ksiginfo_t *ksi, const sigset_t *mask)
1392 {
1393 struct proc *p = l->l_proc;
1394 struct lwp *le, *li;
1395 siginfo_t *si;
1396 int f;
1397
1398 if (p->p_flag & P_SA) {
1399
1400 /* XXXUPSXXX What if not on sa_vp ? */
1401
1402 f = l->l_flag & L_SA;
1403 l->l_flag &= ~L_SA;
1404 si = siginfo_alloc(PR_WAITOK);
1405 si->_info = ksi->ksi_info;
1406 le = li = NULL;
1407 if (KSI_TRAP_P(ksi))
1408 le = l;
1409 else
1410 li = l;
1411 if (sa_upcall(l, SA_UPCALL_SIGNAL | SA_UPCALL_DEFER, le, li,
1412 sizeof(*si), si, siginfo_free) != 0) {
1413 siginfo_free(si);
1414 if (KSI_TRAP_P(ksi))
1415 /* XXX What do we do here?? */;
1416 }
1417 l->l_flag |= f;
1418 return;
1419 }
1420
1421 (*p->p_emul->e_sendsig)(ksi, mask);
1422 }
1423
1424 static inline int firstsig(const sigset_t *);
1425
1426 static inline int
1427 firstsig(const sigset_t *ss)
1428 {
1429 int sig;
1430
1431 sig = ffs(ss->__bits[0]);
1432 if (sig != 0)
1433 return (sig);
1434 #if NSIG > 33
1435 sig = ffs(ss->__bits[1]);
1436 if (sig != 0)
1437 return (sig + 32);
1438 #endif
1439 #if NSIG > 65
1440 sig = ffs(ss->__bits[2]);
1441 if (sig != 0)
1442 return (sig + 64);
1443 #endif
1444 #if NSIG > 97
1445 sig = ffs(ss->__bits[3]);
1446 if (sig != 0)
1447 return (sig + 96);
1448 #endif
1449 return (0);
1450 }
1451
1452 /*
1453 * If the current process has received a signal (should be caught or cause
1454 * termination, should interrupt current syscall), return the signal number.
1455 * Stop signals with default action are processed immediately, then cleared;
1456 * they aren't returned. This is checked after each entry to the system for
1457 * a syscall or trap (though this can usually be done without calling issignal
1458 * by checking the pending signal masks in the CURSIG macro.) The normal call
1459 * sequence is
1460 *
1461 * while (signum = CURSIG(curlwp))
1462 * postsig(signum);
1463 */
1464 int
1465 issignal(struct lwp *l)
1466 {
1467 struct proc *p = l->l_proc;
1468 int s = 0, signum, prop;
1469 int dolock = (l->l_flag & L_SINTR) == 0, locked = !dolock;
1470 sigset_t ss;
1471
1472 /* Bail out if we do not own the virtual processor */
1473 if (l->l_flag & L_SA && l->l_savp->savp_lwp != l)
1474 return 0;
1475
1476 if (p->p_stat == SSTOP) {
1477 /*
1478 * The process is stopped/stopping. Stop ourselves now that
1479 * we're on the kernel/userspace boundary.
1480 */
1481 if (dolock)
1482 SCHED_LOCK(s);
1483 l->l_stat = LSSTOP;
1484 p->p_nrlwps--;
1485 if (p->p_flag & P_TRACED)
1486 goto sigtraceswitch;
1487 else
1488 goto sigswitch;
1489 }
1490 for (;;) {
1491 sigpending1(p, &ss);
1492 if (p->p_flag & P_PPWAIT)
1493 sigminusset(&stopsigmask, &ss);
1494 signum = firstsig(&ss);
1495 if (signum == 0) { /* no signal to send */
1496 p->p_sigctx.ps_sigcheck = 0;
1497 if (locked && dolock)
1498 SCHED_LOCK(s);
1499 return (0);
1500 }
1501 /* take the signal! */
1502 sigdelset(&p->p_sigctx.ps_siglist, signum);
1503
1504 /*
1505 * We should see pending but ignored signals
1506 * only if P_TRACED was on when they were posted.
1507 */
1508 if (sigismember(&p->p_sigctx.ps_sigignore, signum) &&
1509 (p->p_flag & P_TRACED) == 0)
1510 continue;
1511
1512 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
1513 /*
1514 * If traced, always stop, and stay
1515 * stopped until released by the debugger.
1516 */
1517 p->p_xstat = signum;
1518
1519 /* Emulation-specific handling of signal trace */
1520 if ((p->p_emul->e_tracesig != NULL) &&
1521 ((*p->p_emul->e_tracesig)(p, signum) != 0))
1522 goto childresumed;
1523
1524 if ((p->p_flag & P_FSTRACE) == 0)
1525 child_psignal(p, dolock);
1526 if (dolock)
1527 SCHED_LOCK(s);
1528 proc_stop(p, 1);
1529 sigtraceswitch:
1530 mi_switch(l, NULL);
1531 SCHED_ASSERT_UNLOCKED();
1532 if (dolock)
1533 splx(s);
1534 else
1535 dolock = 1;
1536
1537 childresumed:
1538 /*
1539 * If we are no longer being traced, or the parent
1540 * didn't give us a signal, look for more signals.
1541 */
1542 if ((p->p_flag & P_TRACED) == 0 || p->p_xstat == 0)
1543 continue;
1544
1545 /*
1546 * If the new signal is being masked, look for other
1547 * signals.
1548 */
1549 signum = p->p_xstat;
1550 p->p_xstat = 0;
1551 /*
1552 * `p->p_sigctx.ps_siglist |= mask' is done
1553 * in setrunnable().
1554 */
1555 if (sigismember(&p->p_sigctx.ps_sigmask, signum))
1556 continue;
1557 /* take the signal! */
1558 sigdelset(&p->p_sigctx.ps_siglist, signum);
1559 }
1560
1561 prop = sigprop[signum];
1562
1563 /*
1564 * Decide whether the signal should be returned.
1565 * Return the signal's number, or fall through
1566 * to clear it from the pending mask.
1567 */
1568 switch ((long)SIGACTION(p, signum).sa_handler) {
1569
1570 case (long)SIG_DFL:
1571 /*
1572 * Don't take default actions on system processes.
1573 */
1574 if (p->p_pid <= 1) {
1575 #ifdef DIAGNOSTIC
1576 /*
1577 * Are you sure you want to ignore SIGSEGV
1578 * in init? XXX
1579 */
1580 printf("Process (pid %d) got signal %d\n",
1581 p->p_pid, signum);
1582 #endif
1583 break; /* == ignore */
1584 }
1585 /*
1586 * If there is a pending stop signal to process
1587 * with default action, stop here,
1588 * then clear the signal. However,
1589 * if process is member of an orphaned
1590 * process group, ignore tty stop signals.
1591 */
1592 if (prop & SA_STOP) {
1593 if (p->p_flag & P_TRACED ||
1594 (p->p_pgrp->pg_jobc == 0 &&
1595 prop & SA_TTYSTOP))
1596 break; /* == ignore */
1597 p->p_xstat = signum;
1598 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0)
1599 child_psignal(p, dolock);
1600 if (dolock)
1601 SCHED_LOCK(s);
1602 proc_stop(p, 1);
1603 sigswitch:
1604 mi_switch(l, NULL);
1605 SCHED_ASSERT_UNLOCKED();
1606 if (dolock)
1607 splx(s);
1608 else
1609 dolock = 1;
1610 break;
1611 } else if (prop & SA_IGNORE) {
1612 /*
1613 * Except for SIGCONT, shouldn't get here.
1614 * Default action is to ignore; drop it.
1615 */
1616 break; /* == ignore */
1617 } else
1618 goto keep;
1619 /*NOTREACHED*/
1620
1621 case (long)SIG_IGN:
1622 /*
1623 * Masking above should prevent us ever trying
1624 * to take action on an ignored signal other
1625 * than SIGCONT, unless process is traced.
1626 */
1627 #ifdef DEBUG_ISSIGNAL
1628 if ((prop & SA_CONT) == 0 &&
1629 (p->p_flag & P_TRACED) == 0)
1630 printf("issignal\n");
1631 #endif
1632 break; /* == ignore */
1633
1634 default:
1635 /*
1636 * This signal has an action, let
1637 * postsig() process it.
1638 */
1639 goto keep;
1640 }
1641 }
1642 /* NOTREACHED */
1643
1644 keep:
1645 /* leave the signal for later */
1646 sigaddset(&p->p_sigctx.ps_siglist, signum);
1647 CHECKSIGS(p);
1648 if (locked && dolock)
1649 SCHED_LOCK(s);
1650 return (signum);
1651 }
1652
1653 /*
1654 * Put the argument process into the stopped state and notify the parent
1655 * via wakeup. Signals are handled elsewhere. The process must not be
1656 * on the run queue.
1657 */
1658 void
1659 proc_stop(struct proc *p, int dowakeup)
1660 {
1661 struct lwp *l;
1662 struct proc *parent;
1663 struct sadata_vp *vp;
1664
1665 SCHED_ASSERT_LOCKED();
1666
1667 /* XXX lock process LWP state */
1668 p->p_flag &= ~P_WAITED;
1669 p->p_stat = SSTOP;
1670 parent = p->p_pptr;
1671 parent->p_nstopchild++;
1672
1673 if (p->p_flag & P_SA) {
1674 /*
1675 * Only (try to) put the LWP on the VP in stopped
1676 * state.
1677 * All other LWPs will suspend in sa_setwoken()
1678 * because the VP-LWP in stopped state cannot be
1679 * repossessed.
1680 */
1681 SLIST_FOREACH(vp, &p->p_sa->sa_vps, savp_next) {
1682 l = vp->savp_lwp;
1683 if (l->l_stat == LSONPROC && l->l_cpu == curcpu()) {
1684 l->l_stat = LSSTOP;
1685 p->p_nrlwps--;
1686 } else if (l->l_stat == LSRUN) {
1687 /* Remove LWP from the run queue */
1688 remrunqueue(l);
1689 l->l_stat = LSSTOP;
1690 p->p_nrlwps--;
1691 } else if (l->l_stat == LSSLEEP &&
1692 l->l_flag & L_SA_IDLE) {
1693 l->l_flag &= ~L_SA_IDLE;
1694 l->l_stat = LSSTOP;
1695 }
1696 }
1697 goto out;
1698 }
1699
1700 /*
1701 * Put as many LWP's as possible in stopped state.
1702 * Sleeping ones will notice the stopped state as they try to
1703 * return to userspace.
1704 */
1705
1706 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1707 if (l->l_stat == LSONPROC) {
1708 /* XXX SMP this assumes that a LWP that is LSONPROC
1709 * is curlwp and hence is about to be mi_switched
1710 * away; the only callers of proc_stop() are:
1711 * - psignal
1712 * - issignal()
1713 * For the former, proc_stop() is only called when
1714 * no processes are running, so we don't worry.
1715 * For the latter, proc_stop() is called right
1716 * before mi_switch().
1717 */
1718 l->l_stat = LSSTOP;
1719 p->p_nrlwps--;
1720 } else if (l->l_stat == LSRUN) {
1721 /* Remove LWP from the run queue */
1722 remrunqueue(l);
1723 l->l_stat = LSSTOP;
1724 p->p_nrlwps--;
1725 } else if ((l->l_stat == LSSLEEP) ||
1726 (l->l_stat == LSSUSPENDED) ||
1727 (l->l_stat == LSZOMB) ||
1728 (l->l_stat == LSDEAD)) {
1729 /*
1730 * Don't do anything; let sleeping LWPs
1731 * discover the stopped state of the process
1732 * on their way out of the kernel; otherwise,
1733 * things like NFS threads that sleep with
1734 * locks will block the rest of the system
1735 * from getting any work done.
1736 *
1737 * Suspended/dead/zombie LWPs aren't going
1738 * anywhere, so we don't need to touch them.
1739 */
1740 }
1741 #ifdef DIAGNOSTIC
1742 else {
1743 panic("proc_stop: process %d lwp %d "
1744 "in unstoppable state %d.\n",
1745 p->p_pid, l->l_lid, l->l_stat);
1746 }
1747 #endif
1748 }
1749
1750 out:
1751 /* XXX unlock process LWP state */
1752
1753 if (dowakeup)
1754 sched_wakeup((caddr_t)p->p_pptr);
1755 }
1756
1757 /*
1758 * Given a process in state SSTOP, set the state back to SACTIVE and
1759 * move LSSTOP'd LWPs to LSSLEEP or make them runnable.
1760 *
1761 * If no LWPs ended up runnable (and therefore able to take a signal),
1762 * return a LWP that is sleeping interruptably. The caller can wake
1763 * that LWP up to take a signal.
1764 */
1765 struct lwp *
1766 proc_unstop(struct proc *p)
1767 {
1768 struct lwp *l, *lr = NULL;
1769 struct sadata_vp *vp;
1770 int cantake = 0;
1771
1772 SCHED_ASSERT_LOCKED();
1773
1774 /*
1775 * Our caller wants to be informed if there are only sleeping
1776 * and interruptable LWPs left after we have run so that it
1777 * can invoke setrunnable() if required - return one of the
1778 * interruptable LWPs if this is the case.
1779 */
1780
1781 if (!(p->p_flag & P_WAITED))
1782 p->p_pptr->p_nstopchild--;
1783 p->p_stat = SACTIVE;
1784 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1785 if (l->l_stat == LSRUN) {
1786 lr = NULL;
1787 cantake = 1;
1788 }
1789 if (l->l_stat != LSSTOP)
1790 continue;
1791
1792 if (l->l_wchan != NULL) {
1793 l->l_stat = LSSLEEP;
1794 if ((cantake == 0) && (l->l_flag & L_SINTR)) {
1795 lr = l;
1796 cantake = 1;
1797 }
1798 } else {
1799 setrunnable(l);
1800 lr = NULL;
1801 cantake = 1;
1802 }
1803 }
1804 if (p->p_flag & P_SA) {
1805 /* Only consider returning the LWP on the VP. */
1806 SLIST_FOREACH(vp, &p->p_sa->sa_vps, savp_next) {
1807 lr = vp->savp_lwp;
1808 if (lr->l_stat == LSSLEEP) {
1809 if (lr->l_flag & L_SA_YIELD) {
1810 setrunnable(lr);
1811 break;
1812 } else if (lr->l_flag & L_SINTR)
1813 return lr;
1814 }
1815 }
1816 return NULL;
1817 }
1818 return lr;
1819 }
1820
1821 /*
1822 * Take the action for the specified signal
1823 * from the current set of pending signals.
1824 */
1825 void
1826 postsig(int signum)
1827 {
1828 struct lwp *l;
1829 struct proc *p;
1830 struct sigacts *ps;
1831 sig_t action;
1832 sigset_t *returnmask;
1833
1834 l = curlwp;
1835 p = l->l_proc;
1836 ps = p->p_sigacts;
1837 #ifdef DIAGNOSTIC
1838 if (signum == 0)
1839 panic("postsig");
1840 #endif
1841
1842 KERNEL_PROC_LOCK(l);
1843
1844 #ifdef MULTIPROCESSOR
1845 /*
1846 * On MP, issignal() can return the same signal to multiple
1847 * LWPs. The LWPs will block above waiting for the kernel
1848 * lock and the first LWP which gets through will then remove
1849 * the signal from ps_siglist. All other LWPs exit here.
1850 */
1851 if (!sigismember(&p->p_sigctx.ps_siglist, signum)) {
1852 KERNEL_PROC_UNLOCK(l);
1853 return;
1854 }
1855 #endif
1856 sigdelset(&p->p_sigctx.ps_siglist, signum);
1857 action = SIGACTION_PS(ps, signum).sa_handler;
1858 if (action == SIG_DFL) {
1859 #ifdef KTRACE
1860 if (KTRPOINT(p, KTR_PSIG))
1861 ktrpsig(l, signum, action,
1862 p->p_sigctx.ps_flags & SAS_OLDMASK ?
1863 &p->p_sigctx.ps_oldmask : &p->p_sigctx.ps_sigmask,
1864 NULL);
1865 #endif
1866 /*
1867 * Default action, where the default is to kill
1868 * the process. (Other cases were ignored above.)
1869 */
1870 sigexit(l, signum);
1871 /* NOTREACHED */
1872 } else {
1873 ksiginfo_t *ksi;
1874 /*
1875 * If we get here, the signal must be caught.
1876 */
1877 #ifdef DIAGNOSTIC
1878 if (action == SIG_IGN ||
1879 sigismember(&p->p_sigctx.ps_sigmask, signum))
1880 panic("postsig action");
1881 #endif
1882 /*
1883 * Set the new mask value and also defer further
1884 * occurrences of this signal.
1885 *
1886 * Special case: user has done a sigpause. Here the
1887 * current mask is not of interest, but rather the
1888 * mask from before the sigpause is what we want
1889 * restored after the signal processing is completed.
1890 */
1891 if (p->p_sigctx.ps_flags & SAS_OLDMASK) {
1892 returnmask = &p->p_sigctx.ps_oldmask;
1893 p->p_sigctx.ps_flags &= ~SAS_OLDMASK;
1894 } else
1895 returnmask = &p->p_sigctx.ps_sigmask;
1896 p->p_stats->p_ru.ru_nsignals++;
1897 ksi = ksiginfo_get(p, signum);
1898 #ifdef KTRACE
1899 if (KTRPOINT(p, KTR_PSIG))
1900 ktrpsig(l, signum, action,
1901 p->p_sigctx.ps_flags & SAS_OLDMASK ?
1902 &p->p_sigctx.ps_oldmask : &p->p_sigctx.ps_sigmask,
1903 ksi);
1904 #endif
1905 if (ksi == NULL) {
1906 ksiginfo_t ksi1;
1907 /*
1908 * we did not save any siginfo for this, either
1909 * because the signal was not caught, or because the
1910 * user did not request SA_SIGINFO
1911 */
1912 KSI_INIT_EMPTY(&ksi1);
1913 ksi1.ksi_signo = signum;
1914 kpsendsig(l, &ksi1, returnmask);
1915 } else {
1916 kpsendsig(l, ksi, returnmask);
1917 pool_put(&ksiginfo_pool, ksi);
1918 }
1919 p->p_sigctx.ps_lwp = 0;
1920 p->p_sigctx.ps_code = 0;
1921 p->p_sigctx.ps_signo = 0;
1922 (void) splsched(); /* XXXSMP */
1923 sigplusset(&SIGACTION_PS(ps, signum).sa_mask,
1924 &p->p_sigctx.ps_sigmask);
1925 if (SIGACTION_PS(ps, signum).sa_flags & SA_RESETHAND) {
1926 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
1927 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
1928 sigaddset(&p->p_sigctx.ps_sigignore, signum);
1929 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
1930 }
1931 (void) spl0(); /* XXXSMP */
1932 }
1933
1934 KERNEL_PROC_UNLOCK(l);
1935 }
1936
1937 /*
1938 * Kill the current process for stated reason.
1939 */
1940 void
1941 killproc(struct proc *p, const char *why)
1942 {
1943 log(LOG_ERR, "pid %d was killed: %s\n", p->p_pid, why);
1944 uprintf("sorry, pid %d was killed: %s\n", p->p_pid, why);
1945 psignal(p, SIGKILL);
1946 }
1947
1948 /*
1949 * Force the current process to exit with the specified signal, dumping core
1950 * if appropriate. We bypass the normal tests for masked and caught signals,
1951 * allowing unrecoverable failures to terminate the process without changing
1952 * signal state. Mark the accounting record with the signal termination.
1953 * If dumping core, save the signal number for the debugger. Calls exit and
1954 * does not return.
1955 */
1956
1957 #if defined(DEBUG)
1958 int kern_logsigexit = 1; /* not static to make public for sysctl */
1959 #else
1960 int kern_logsigexit = 0; /* not static to make public for sysctl */
1961 #endif
1962
1963 static const char logcoredump[] =
1964 "pid %d (%s), uid %d: exited on signal %d (core dumped)\n";
1965 static const char lognocoredump[] =
1966 "pid %d (%s), uid %d: exited on signal %d (core not dumped, err = %d)\n";
1967
1968 /* Wrapper function for use in p_userret */
1969 static void
1970 lwp_coredump_hook(struct lwp *l, void *arg)
1971 {
1972 int s;
1973
1974 /*
1975 * Suspend ourselves, so that the kernel stack and therefore
1976 * the userland registers saved in the trapframe are around
1977 * for coredump() to write them out.
1978 */
1979 KERNEL_PROC_LOCK(l);
1980 l->l_flag &= ~L_DETACHED;
1981 SCHED_LOCK(s);
1982 l->l_stat = LSSUSPENDED;
1983 l->l_proc->p_nrlwps--;
1984 /* XXX NJWLWP check if this makes sense here: */
1985 l->l_proc->p_stats->p_ru.ru_nvcsw++;
1986 mi_switch(l, NULL);
1987 SCHED_ASSERT_UNLOCKED();
1988 splx(s);
1989
1990 lwp_exit(l);
1991 }
1992
1993 void
1994 sigexit(struct lwp *l, int signum)
1995 {
1996 struct proc *p;
1997 #if 0
1998 struct lwp *l2;
1999 #endif
2000 int error, exitsig;
2001
2002 p = l->l_proc;
2003
2004 /*
2005 * Don't permit coredump() or exit1() multiple times
2006 * in the same process.
2007 */
2008 if (p->p_flag & P_WEXIT) {
2009 KERNEL_PROC_UNLOCK(l);
2010 (*p->p_userret)(l, p->p_userret_arg);
2011 }
2012 p->p_flag |= P_WEXIT;
2013 /* We don't want to switch away from exiting. */
2014 /* XXX multiprocessor: stop LWPs on other processors. */
2015 #if 0
2016 if (p->p_flag & P_SA) {
2017 LIST_FOREACH(l2, &p->p_lwps, l_sibling)
2018 l2->l_flag &= ~L_SA;
2019 p->p_flag &= ~P_SA;
2020 }
2021 #endif
2022
2023 /* Make other LWPs stick around long enough to be dumped */
2024 p->p_userret = lwp_coredump_hook;
2025 p->p_userret_arg = NULL;
2026
2027 exitsig = signum;
2028 p->p_acflag |= AXSIG;
2029 if (sigprop[signum] & SA_CORE) {
2030 p->p_sigctx.ps_signo = signum;
2031 if ((error = coredump(l, NULL)) == 0)
2032 exitsig |= WCOREFLAG;
2033
2034 if (kern_logsigexit) {
2035 /* XXX What if we ever have really large UIDs? */
2036 int uid = l->l_cred ?
2037 (int)kauth_cred_geteuid(l->l_cred) : -1;
2038
2039 if (error)
2040 log(LOG_INFO, lognocoredump, p->p_pid,
2041 p->p_comm, uid, signum, error);
2042 else
2043 log(LOG_INFO, logcoredump, p->p_pid,
2044 p->p_comm, uid, signum);
2045 }
2046
2047 }
2048
2049 exit1(l, W_EXITCODE(0, exitsig));
2050 /* NOTREACHED */
2051 }
2052
2053 struct coredump_iostate {
2054 struct lwp *io_lwp;
2055 struct vnode *io_vp;
2056 kauth_cred_t io_cred;
2057 off_t io_offset;
2058 };
2059
2060 int
2061 coredump_write(void *cookie, enum uio_seg segflg, const void *data, size_t len)
2062 {
2063 struct coredump_iostate *io = cookie;
2064 int error;
2065
2066 error = vn_rdwr(UIO_WRITE, io->io_vp, __UNCONST(data), len,
2067 io->io_offset, segflg,
2068 IO_NODELOCKED|IO_UNIT, io->io_cred, NULL,
2069 segflg == UIO_USERSPACE ? io->io_lwp : NULL);
2070 if (error) {
2071 printf("pid %d (%s): %s write of %zu@%p at %lld failed: %d\n",
2072 io->io_lwp->l_proc->p_pid, io->io_lwp->l_proc->p_comm,
2073 segflg == UIO_USERSPACE ? "user" : "system",
2074 len, data, (long long) io->io_offset, error);
2075 return (error);
2076 }
2077
2078 io->io_offset += len;
2079 return (0);
2080 }
2081
2082 /*
2083 * Dump core, into a file named "progname.core" or "core" (depending on the
2084 * value of shortcorename), unless the process was setuid/setgid.
2085 */
2086 int
2087 coredump(struct lwp *l, const char *pattern)
2088 {
2089 struct vnode *vp;
2090 struct proc *p;
2091 struct vmspace *vm;
2092 kauth_cred_t cred;
2093 struct nameidata nd;
2094 struct vattr vattr;
2095 struct mount *mp;
2096 struct coredump_iostate io;
2097 int error, error1;
2098 char *name = NULL;
2099
2100 p = l->l_proc;
2101 vm = p->p_vmspace;
2102 cred = l->l_cred;
2103
2104 /*
2105 * Make sure the process has not set-id, to prevent data leaks,
2106 * unless it was specifically requested to allow set-id coredumps.
2107 */
2108 if ((p->p_flag & P_SUGID) && !security_setidcore_dump)
2109 return EPERM;
2110
2111 /*
2112 * Refuse to core if the data + stack + user size is larger than
2113 * the core dump limit. XXX THIS IS WRONG, because of mapped
2114 * data.
2115 */
2116 if (USPACE + ctob(vm->vm_dsize + vm->vm_ssize) >=
2117 p->p_rlimit[RLIMIT_CORE].rlim_cur)
2118 return EFBIG; /* better error code? */
2119
2120 restart:
2121 /*
2122 * The core dump will go in the current working directory. Make
2123 * sure that the directory is still there and that the mount flags
2124 * allow us to write core dumps there.
2125 */
2126 vp = p->p_cwdi->cwdi_cdir;
2127 if (vp->v_mount == NULL ||
2128 (vp->v_mount->mnt_flag & MNT_NOCOREDUMP) != 0) {
2129 error = EPERM;
2130 goto done;
2131 }
2132
2133 if ((p->p_flag & P_SUGID) && security_setidcore_dump)
2134 pattern = security_setidcore_path;
2135
2136 if (pattern == NULL)
2137 pattern = p->p_limit->pl_corename;
2138 if (name == NULL) {
2139 name = PNBUF_GET();
2140 }
2141 if ((error = build_corename(p, name, pattern, MAXPATHLEN)) != 0)
2142 goto done;
2143 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, l);
2144 if ((error = vn_open(&nd, O_CREAT | O_NOFOLLOW | FWRITE,
2145 S_IRUSR | S_IWUSR)) != 0)
2146 goto done;
2147 vp = nd.ni_vp;
2148
2149 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
2150 VOP_UNLOCK(vp, 0);
2151 if ((error = vn_close(vp, FWRITE, cred, l)) != 0)
2152 goto done;
2153 if ((error = vn_start_write(NULL, &mp,
2154 V_WAIT | V_SLEEPONLY | V_PCATCH)) != 0)
2155 goto done;
2156 goto restart;
2157 }
2158
2159 /* Don't dump to non-regular files or files with links. */
2160 if (vp->v_type != VREG ||
2161 VOP_GETATTR(vp, &vattr, cred, l) || vattr.va_nlink != 1) {
2162 error = EINVAL;
2163 goto out;
2164 }
2165 VATTR_NULL(&vattr);
2166 vattr.va_size = 0;
2167
2168 if ((p->p_flag & P_SUGID) && security_setidcore_dump) {
2169 vattr.va_uid = security_setidcore_owner;
2170 vattr.va_gid = security_setidcore_group;
2171 vattr.va_mode = security_setidcore_mode;
2172 }
2173
2174 VOP_LEASE(vp, l, cred, LEASE_WRITE);
2175 VOP_SETATTR(vp, &vattr, cred, l);
2176 p->p_acflag |= ACORE;
2177
2178 io.io_lwp = l;
2179 io.io_vp = vp;
2180 io.io_cred = cred;
2181 io.io_offset = 0;
2182
2183 /* Now dump the actual core file. */
2184 error = (*p->p_execsw->es_coredump)(l, &io);
2185 out:
2186 VOP_UNLOCK(vp, 0);
2187 vn_finished_write(mp, 0);
2188 error1 = vn_close(vp, FWRITE, cred, l);
2189 if (error == 0)
2190 error = error1;
2191 done:
2192 if (name != NULL)
2193 PNBUF_PUT(name);
2194 return error;
2195 }
2196
2197 /*
2198 * Nonexistent system call-- signal process (may want to handle it).
2199 * Flag error in case process won't see signal immediately (blocked or ignored).
2200 */
2201 /* ARGSUSED */
2202 int
2203 sys_nosys(struct lwp *l, void *v, register_t *retval)
2204 {
2205 struct proc *p;
2206
2207 p = l->l_proc;
2208 psignal(p, SIGSYS);
2209 return (ENOSYS);
2210 }
2211
2212 static int
2213 build_corename(struct proc *p, char *dst, const char *src, size_t len)
2214 {
2215 const char *s;
2216 char *d, *end;
2217 int i;
2218
2219 for (s = src, d = dst, end = d + len; *s != '\0'; s++) {
2220 if (*s == '%') {
2221 switch (*(s + 1)) {
2222 case 'n':
2223 i = snprintf(d, end - d, "%s", p->p_comm);
2224 break;
2225 case 'p':
2226 i = snprintf(d, end - d, "%d", p->p_pid);
2227 break;
2228 case 'u':
2229 i = snprintf(d, end - d, "%.*s",
2230 (int)sizeof p->p_pgrp->pg_session->s_login,
2231 p->p_pgrp->pg_session->s_login);
2232 break;
2233 case 't':
2234 i = snprintf(d, end - d, "%ld",
2235 p->p_stats->p_start.tv_sec);
2236 break;
2237 default:
2238 goto copy;
2239 }
2240 d += i;
2241 s++;
2242 } else {
2243 copy: *d = *s;
2244 d++;
2245 }
2246 if (d >= end)
2247 return (ENAMETOOLONG);
2248 }
2249 *d = '\0';
2250 return 0;
2251 }
2252
2253 void
2254 getucontext(struct lwp *l, ucontext_t *ucp)
2255 {
2256 struct proc *p;
2257
2258 p = l->l_proc;
2259
2260 ucp->uc_flags = 0;
2261 ucp->uc_link = l->l_ctxlink;
2262
2263 (void)sigprocmask1(p, 0, NULL, &ucp->uc_sigmask);
2264 ucp->uc_flags |= _UC_SIGMASK;
2265
2266 /*
2267 * The (unsupplied) definition of the `current execution stack'
2268 * in the System V Interface Definition appears to allow returning
2269 * the main context stack.
2270 */
2271 if ((p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK) == 0) {
2272 ucp->uc_stack.ss_sp = (void *)USRSTACK;
2273 ucp->uc_stack.ss_size = ctob(p->p_vmspace->vm_ssize);
2274 ucp->uc_stack.ss_flags = 0; /* XXX, def. is Very Fishy */
2275 } else {
2276 /* Simply copy alternate signal execution stack. */
2277 ucp->uc_stack = p->p_sigctx.ps_sigstk;
2278 }
2279 ucp->uc_flags |= _UC_STACK;
2280
2281 cpu_getmcontext(l, &ucp->uc_mcontext, &ucp->uc_flags);
2282 }
2283
2284 /* ARGSUSED */
2285 int
2286 sys_getcontext(struct lwp *l, void *v, register_t *retval)
2287 {
2288 struct sys_getcontext_args /* {
2289 syscallarg(struct __ucontext *) ucp;
2290 } */ *uap = v;
2291 ucontext_t uc;
2292
2293 getucontext(l, &uc);
2294
2295 return (copyout(&uc, SCARG(uap, ucp), sizeof (*SCARG(uap, ucp))));
2296 }
2297
2298 int
2299 setucontext(struct lwp *l, const ucontext_t *ucp)
2300 {
2301 struct proc *p;
2302 int error;
2303
2304 p = l->l_proc;
2305 if ((error = cpu_setmcontext(l, &ucp->uc_mcontext, ucp->uc_flags)) != 0)
2306 return (error);
2307 l->l_ctxlink = ucp->uc_link;
2308
2309 if ((ucp->uc_flags & _UC_SIGMASK) != 0)
2310 sigprocmask1(p, SIG_SETMASK, &ucp->uc_sigmask, NULL);
2311
2312 /*
2313 * If there was stack information, update whether or not we are
2314 * still running on an alternate signal stack.
2315 */
2316 if ((ucp->uc_flags & _UC_STACK) != 0) {
2317 if (ucp->uc_stack.ss_flags & SS_ONSTACK)
2318 p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK;
2319 else
2320 p->p_sigctx.ps_sigstk.ss_flags &= ~SS_ONSTACK;
2321 }
2322
2323 return 0;
2324 }
2325
2326 /* ARGSUSED */
2327 int
2328 sys_setcontext(struct lwp *l, void *v, register_t *retval)
2329 {
2330 struct sys_setcontext_args /* {
2331 syscallarg(const ucontext_t *) ucp;
2332 } */ *uap = v;
2333 ucontext_t uc;
2334 int error;
2335
2336 if (SCARG(uap, ucp) == NULL) /* i.e. end of uc_link chain */
2337 exit1(l, W_EXITCODE(0, 0));
2338 else if ((error = copyin(SCARG(uap, ucp), &uc, sizeof (uc))) != 0 ||
2339 (error = setucontext(l, &uc)) != 0)
2340 return (error);
2341
2342 return (EJUSTRETURN);
2343 }
2344
2345 /*
2346 * sigtimedwait(2) system call, used also for implementation
2347 * of sigwaitinfo() and sigwait().
2348 *
2349 * This only handles single LWP in signal wait. libpthread provides
2350 * it's own sigtimedwait() wrapper to DTRT WRT individual threads.
2351 */
2352 int
2353 sys___sigtimedwait(struct lwp *l, void *v, register_t *retval)
2354 {
2355 return __sigtimedwait1(l, v, retval, copyout, copyin, copyout);
2356 }
2357
2358 int
2359 __sigtimedwait1(struct lwp *l, void *v, register_t *retval,
2360 copyout_t put_info, copyin_t fetch_timeout, copyout_t put_timeout)
2361 {
2362 struct sys___sigtimedwait_args /* {
2363 syscallarg(const sigset_t *) set;
2364 syscallarg(siginfo_t *) info;
2365 syscallarg(struct timespec *) timeout;
2366 } */ *uap = v;
2367 sigset_t *waitset, twaitset;
2368 struct proc *p = l->l_proc;
2369 int error, signum;
2370 int timo = 0;
2371 struct timespec ts, tsstart;
2372 ksiginfo_t *ksi;
2373
2374 memset(&tsstart, 0, sizeof tsstart); /* XXX gcc */
2375
2376 MALLOC(waitset, sigset_t *, sizeof(sigset_t), M_TEMP, M_WAITOK);
2377
2378 if ((error = copyin(SCARG(uap, set), waitset, sizeof(sigset_t)))) {
2379 FREE(waitset, M_TEMP);
2380 return (error);
2381 }
2382
2383 /*
2384 * Silently ignore SA_CANTMASK signals. psignal1() would
2385 * ignore SA_CANTMASK signals in waitset, we do this
2386 * only for the below siglist check.
2387 */
2388 sigminusset(&sigcantmask, waitset);
2389
2390 /*
2391 * First scan siglist and check if there is signal from
2392 * our waitset already pending.
2393 */
2394 twaitset = *waitset;
2395 __sigandset(&p->p_sigctx.ps_siglist, &twaitset);
2396 if ((signum = firstsig(&twaitset))) {
2397 /* found pending signal */
2398 sigdelset(&p->p_sigctx.ps_siglist, signum);
2399 ksi = ksiginfo_get(p, signum);
2400 if (!ksi) {
2401 /* No queued siginfo, manufacture one */
2402 ksi = pool_get(&ksiginfo_pool, PR_WAITOK);
2403 KSI_INIT(ksi);
2404 ksi->ksi_info._signo = signum;
2405 ksi->ksi_info._code = SI_USER;
2406 }
2407
2408 goto sig;
2409 }
2410
2411 /*
2412 * Calculate timeout, if it was specified.
2413 */
2414 if (SCARG(uap, timeout)) {
2415 uint64_t ms;
2416
2417 if ((error = (*fetch_timeout)(SCARG(uap, timeout), &ts, sizeof(ts))))
2418 return (error);
2419
2420 ms = (ts.tv_sec * 1000) + (ts.tv_nsec / 1000000);
2421 timo = mstohz(ms);
2422 if (timo == 0 && ts.tv_sec == 0 && ts.tv_nsec > 0)
2423 timo = 1;
2424 if (timo <= 0)
2425 return (EAGAIN);
2426
2427 /*
2428 * Remember current uptime, it would be used in
2429 * ECANCELED/ERESTART case.
2430 */
2431 getnanouptime(&tsstart);
2432 }
2433
2434 /*
2435 * Setup ps_sigwait list. Pass pointer to malloced memory
2436 * here; it's not possible to pass pointer to a structure
2437 * on current process's stack, the current process might
2438 * be swapped out at the time the signal would get delivered.
2439 */
2440 ksi = pool_get(&ksiginfo_pool, PR_WAITOK);
2441 p->p_sigctx.ps_sigwaited = ksi;
2442 p->p_sigctx.ps_sigwait = waitset;
2443
2444 /*
2445 * Wait for signal to arrive. We can either be woken up or
2446 * time out.
2447 */
2448 error = tsleep(&p->p_sigctx.ps_sigwait, PPAUSE|PCATCH, "sigwait", timo);
2449
2450 /*
2451 * Need to find out if we woke as a result of lwp_wakeup()
2452 * or a signal outside our wait set.
2453 */
2454 if (error == EINTR && p->p_sigctx.ps_sigwaited
2455 && !firstsig(&p->p_sigctx.ps_siglist)) {
2456 /* wakeup via _lwp_wakeup() */
2457 error = ECANCELED;
2458 } else if (!error && p->p_sigctx.ps_sigwaited) {
2459 /* spurious wakeup - arrange for syscall restart */
2460 error = ERESTART;
2461 goto fail;
2462 }
2463
2464 /*
2465 * On error, clear sigwait indication. psignal1() clears it
2466 * in !error case.
2467 */
2468 if (error) {
2469 p->p_sigctx.ps_sigwaited = NULL;
2470
2471 /*
2472 * If the sleep was interrupted (either by signal or wakeup),
2473 * update the timeout and copyout new value back.
2474 * It would be used when the syscall would be restarted
2475 * or called again.
2476 */
2477 if (timo && (error == ERESTART || error == ECANCELED)) {
2478 struct timespec tsnow;
2479 int err;
2480
2481 /* XXX double check the following change */
2482 getnanouptime(&tsnow);
2483
2484 /* compute how much time has passed since start */
2485 timespecsub(&tsnow, &tsstart, &tsnow);
2486 /* substract passed time from timeout */
2487 timespecsub(&ts, &tsnow, &ts);
2488
2489 if (ts.tv_sec < 0) {
2490 error = EAGAIN;
2491 goto fail;
2492 }
2493 /* XXX double check the previous change */
2494
2495 /* copy updated timeout to userland */
2496 if ((err = (*put_timeout)(&ts, SCARG(uap, timeout),
2497 sizeof(ts)))) {
2498 error = err;
2499 goto fail;
2500 }
2501 }
2502
2503 goto fail;
2504 }
2505
2506 /*
2507 * If a signal from the wait set arrived, copy it to userland.
2508 * Copy only the used part of siginfo, the padding part is
2509 * left unchanged (userland is not supposed to touch it anyway).
2510 */
2511 sig:
2512 return (*put_info)(&ksi->ksi_info, SCARG(uap, info), sizeof(ksi->ksi_info));
2513
2514 fail:
2515 FREE(waitset, M_TEMP);
2516 pool_put(&ksiginfo_pool, ksi);
2517 p->p_sigctx.ps_sigwait = NULL;
2518
2519 return (error);
2520 }
2521
2522 /*
2523 * Returns true if signal is ignored or masked for passed process.
2524 */
2525 int
2526 sigismasked(struct proc *p, int sig)
2527 {
2528
2529 return (sigismember(&p->p_sigctx.ps_sigignore, sig) ||
2530 sigismember(&p->p_sigctx.ps_sigmask, sig));
2531 }
2532
2533 static int
2534 filt_sigattach(struct knote *kn)
2535 {
2536 struct proc *p = curproc;
2537
2538 kn->kn_ptr.p_proc = p;
2539 kn->kn_flags |= EV_CLEAR; /* automatically set */
2540
2541 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
2542
2543 return (0);
2544 }
2545
2546 static void
2547 filt_sigdetach(struct knote *kn)
2548 {
2549 struct proc *p = kn->kn_ptr.p_proc;
2550
2551 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
2552 }
2553
2554 /*
2555 * signal knotes are shared with proc knotes, so we apply a mask to
2556 * the hint in order to differentiate them from process hints. This
2557 * could be avoided by using a signal-specific knote list, but probably
2558 * isn't worth the trouble.
2559 */
2560 static int
2561 filt_signal(struct knote *kn, long hint)
2562 {
2563
2564 if (hint & NOTE_SIGNAL) {
2565 hint &= ~NOTE_SIGNAL;
2566
2567 if (kn->kn_id == hint)
2568 kn->kn_data++;
2569 }
2570 return (kn->kn_data != 0);
2571 }
2572
2573 const struct filterops sig_filtops = {
2574 0, filt_sigattach, filt_sigdetach, filt_signal
2575 };
2576