kern_sig.c revision 1.175 1 /* $NetBSD: kern_sig.c,v 1.175 2003/11/02 16:26:10 cl Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)kern_sig.c 8.14 (Berkeley) 5/14/95
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.175 2003/11/02 16:26:10 cl Exp $");
41
42 #include "opt_ktrace.h"
43 #include "opt_compat_sunos.h"
44 #include "opt_compat_netbsd.h"
45 #include "opt_compat_netbsd32.h"
46
47 #define SIGPROP /* include signal properties table */
48 #include <sys/param.h>
49 #include <sys/signalvar.h>
50 #include <sys/resourcevar.h>
51 #include <sys/namei.h>
52 #include <sys/vnode.h>
53 #include <sys/proc.h>
54 #include <sys/systm.h>
55 #include <sys/timeb.h>
56 #include <sys/times.h>
57 #include <sys/buf.h>
58 #include <sys/acct.h>
59 #include <sys/file.h>
60 #include <sys/kernel.h>
61 #include <sys/wait.h>
62 #include <sys/ktrace.h>
63 #include <sys/syslog.h>
64 #include <sys/stat.h>
65 #include <sys/core.h>
66 #include <sys/filedesc.h>
67 #include <sys/malloc.h>
68 #include <sys/pool.h>
69 #include <sys/ucontext.h>
70 #include <sys/sa.h>
71 #include <sys/savar.h>
72 #include <sys/exec.h>
73
74 #include <sys/mount.h>
75 #include <sys/syscallargs.h>
76
77 #include <machine/cpu.h>
78
79 #include <sys/user.h> /* for coredump */
80
81 #include <uvm/uvm_extern.h>
82
83 static void child_psignal(struct proc *, int);
84 static void proc_stop(struct proc *);
85 static int build_corename(struct proc *, char [MAXPATHLEN]);
86 static void ksiginfo_exithook(struct proc *, void *);
87 static void ksiginfo_put(struct proc *, const ksiginfo_t *);
88 static ksiginfo_t *ksiginfo_get(struct proc *, int);
89 static void kpsignal2(struct proc *, const ksiginfo_t *, int);
90
91 sigset_t contsigmask, stopsigmask, sigcantmask;
92
93 struct pool sigacts_pool; /* memory pool for sigacts structures */
94 struct pool siginfo_pool; /* memory pool for siginfo structures */
95 struct pool ksiginfo_pool; /* memory pool for ksiginfo structures */
96
97 /*
98 * Can process p, with pcred pc, send the signal signum to process q?
99 */
100 #define CANSIGNAL(p, pc, q, signum) \
101 ((pc)->pc_ucred->cr_uid == 0 || \
102 (pc)->p_ruid == (q)->p_cred->p_ruid || \
103 (pc)->pc_ucred->cr_uid == (q)->p_cred->p_ruid || \
104 (pc)->p_ruid == (q)->p_ucred->cr_uid || \
105 (pc)->pc_ucred->cr_uid == (q)->p_ucred->cr_uid || \
106 ((signum) == SIGCONT && (q)->p_session == (p)->p_session))
107
108 /*
109 * Remove and return the first ksiginfo element that matches our requested
110 * signal, or return NULL if one not found.
111 */
112 static ksiginfo_t *
113 ksiginfo_get(struct proc *p, int signo)
114 {
115 ksiginfo_t *ksi;
116 int s;
117
118 s = splsoftclock();
119 simple_lock(&p->p_sigctx.ps_silock);
120 CIRCLEQ_FOREACH(ksi, &p->p_sigctx.ps_siginfo, ksi_list) {
121 if (ksi->ksi_signo == signo) {
122 CIRCLEQ_REMOVE(&p->p_sigctx.ps_siginfo, ksi, ksi_list);
123 goto out;
124 }
125 }
126 ksi = NULL;
127 out:
128 simple_unlock(&p->p_sigctx.ps_silock);
129 splx(s);
130 return ksi;
131 }
132
133 /*
134 * Append a new ksiginfo element to the list of pending ksiginfo's, if
135 * we need to (SA_SIGINFO was requested). We replace non RT signals if
136 * they already existed in the queue and we add new entries for RT signals,
137 * or for non RT signals with non-existing entries.
138 */
139 static void
140 ksiginfo_put(struct proc *p, const ksiginfo_t *ksi)
141 {
142 ksiginfo_t *kp;
143 struct sigaction *sa = &SIGACTION_PS(p->p_sigacts, ksi->ksi_signo);
144 int s;
145
146 if ((sa->sa_flags & SA_SIGINFO) == 0)
147 return;
148
149 s = splsoftclock();
150 simple_lock(&p->p_sigctx.ps_silock);
151 #ifdef notyet /* XXX: QUEUING */
152 if (ksi->ksi_signo < SIGRTMIN)
153 #endif
154 {
155 CIRCLEQ_FOREACH(kp, &p->p_sigctx.ps_siginfo, ksi_list) {
156 if (kp->ksi_signo == ksi->ksi_signo) {
157 KSI_COPY(ksi, kp);
158 goto out;
159 }
160 }
161 }
162 kp = pool_get(&ksiginfo_pool, PR_NOWAIT);
163 if (kp == NULL) {
164 #ifdef DIAGNOSTIC
165 printf("Out of memory allocating siginfo for pid %d\n",
166 p->p_pid);
167 #endif
168 goto out;
169 }
170 *kp = *ksi;
171 CIRCLEQ_INSERT_TAIL(&p->p_sigctx.ps_siginfo, kp, ksi_list);
172 out:
173 simple_unlock(&p->p_sigctx.ps_silock);
174 splx(s);
175 }
176
177 /*
178 * free all pending ksiginfo on exit
179 */
180 static void
181 ksiginfo_exithook(struct proc *p, void *v)
182 {
183 int s;
184
185 s = splsoftclock();
186 simple_lock(&p->p_sigctx.ps_silock);
187 while (!CIRCLEQ_EMPTY(&p->p_sigctx.ps_siginfo)) {
188 ksiginfo_t *ksi = CIRCLEQ_FIRST(&p->p_sigctx.ps_siginfo);
189 CIRCLEQ_REMOVE(&p->p_sigctx.ps_siginfo, ksi, ksi_list);
190 pool_put(&ksiginfo_pool, ksi);
191 }
192 simple_unlock(&p->p_sigctx.ps_silock);
193 splx(s);
194 }
195
196 /*
197 * Initialize signal-related data structures.
198 */
199 void
200 signal_init(void)
201 {
202 pool_init(&sigacts_pool, sizeof(struct sigacts), 0, 0, 0, "sigapl",
203 &pool_allocator_nointr);
204 pool_init(&siginfo_pool, sizeof(siginfo_t), 0, 0, 0, "siginfo",
205 &pool_allocator_nointr);
206 pool_init(&ksiginfo_pool, sizeof(ksiginfo_t), 0, 0, 0, "ksiginfo",
207 NULL);
208 exithook_establish(ksiginfo_exithook, NULL);
209 exechook_establish(ksiginfo_exithook, NULL);
210 }
211
212 /*
213 * Create an initial sigctx structure, using the same signal state
214 * as p. If 'share' is set, share the sigctx_proc part, otherwise just
215 * copy it from parent.
216 */
217 void
218 sigactsinit(struct proc *np, struct proc *pp, int share)
219 {
220 struct sigacts *ps;
221
222 if (share) {
223 np->p_sigacts = pp->p_sigacts;
224 pp->p_sigacts->sa_refcnt++;
225 } else {
226 ps = pool_get(&sigacts_pool, PR_WAITOK);
227 if (pp)
228 memcpy(ps, pp->p_sigacts, sizeof(struct sigacts));
229 else
230 memset(ps, '\0', sizeof(struct sigacts));
231 ps->sa_refcnt = 1;
232 np->p_sigacts = ps;
233 }
234 }
235
236 /*
237 * Make this process not share its sigctx, maintaining all
238 * signal state.
239 */
240 void
241 sigactsunshare(struct proc *p)
242 {
243 struct sigacts *oldps;
244
245 if (p->p_sigacts->sa_refcnt == 1)
246 return;
247
248 oldps = p->p_sigacts;
249 sigactsinit(p, NULL, 0);
250
251 if (--oldps->sa_refcnt == 0)
252 pool_put(&sigacts_pool, oldps);
253 }
254
255 /*
256 * Release a sigctx structure.
257 */
258 void
259 sigactsfree(struct proc *p)
260 {
261 struct sigacts *ps;
262
263 ps = p->p_sigacts;
264 if (--ps->sa_refcnt > 0)
265 return;
266
267 pool_put(&sigacts_pool, ps);
268 }
269
270 int
271 sigaction1(struct proc *p, int signum, const struct sigaction *nsa,
272 struct sigaction *osa, const void *tramp, int vers)
273 {
274 struct sigacts *ps;
275 int prop;
276
277 ps = p->p_sigacts;
278 if (signum <= 0 || signum >= NSIG)
279 return (EINVAL);
280
281 /*
282 * Trampoline ABI version 0 is reserved for the legacy
283 * kernel-provided on-stack trampoline. Conversely, if we are
284 * using a non-0 ABI version, we must have a trampoline. Only
285 * validate the vers if a new sigaction was supplied. Emulations
286 * use legacy kernel trampolines with version 0, alternatively
287 * check for that too.
288 */
289 if ((vers != 0 && tramp == NULL) ||
290 #ifdef SIGTRAMP_VALID
291 (nsa != NULL &&
292 ((vers == 0) ?
293 (p->p_emul->e_sigcode == NULL) :
294 !SIGTRAMP_VALID(vers))) ||
295 #endif
296 (vers == 0 && tramp != NULL))
297 return (EINVAL);
298
299 if (osa)
300 *osa = SIGACTION_PS(ps, signum);
301
302 if (nsa) {
303 if (nsa->sa_flags & ~SA_ALLBITS)
304 return (EINVAL);
305
306 #ifndef __HAVE_SIGINFO
307 if (nsa->sa_flags & SA_SIGINFO)
308 return (EINVAL);
309 #endif
310
311 prop = sigprop[signum];
312 if (prop & SA_CANTMASK)
313 return (EINVAL);
314
315 (void) splsched(); /* XXXSMP */
316 SIGACTION_PS(ps, signum) = *nsa;
317 ps->sa_sigdesc[signum].sd_tramp = tramp;
318 ps->sa_sigdesc[signum].sd_vers = vers;
319 sigminusset(&sigcantmask, &SIGACTION_PS(ps, signum).sa_mask);
320 if ((prop & SA_NORESET) != 0)
321 SIGACTION_PS(ps, signum).sa_flags &= ~SA_RESETHAND;
322 if (signum == SIGCHLD) {
323 if (nsa->sa_flags & SA_NOCLDSTOP)
324 p->p_flag |= P_NOCLDSTOP;
325 else
326 p->p_flag &= ~P_NOCLDSTOP;
327 if (nsa->sa_flags & SA_NOCLDWAIT) {
328 /*
329 * Paranoia: since SA_NOCLDWAIT is implemented
330 * by reparenting the dying child to PID 1 (and
331 * trust it to reap the zombie), PID 1 itself
332 * is forbidden to set SA_NOCLDWAIT.
333 */
334 if (p->p_pid == 1)
335 p->p_flag &= ~P_NOCLDWAIT;
336 else
337 p->p_flag |= P_NOCLDWAIT;
338 } else
339 p->p_flag &= ~P_NOCLDWAIT;
340 }
341 if ((nsa->sa_flags & SA_NODEFER) == 0)
342 sigaddset(&SIGACTION_PS(ps, signum).sa_mask, signum);
343 else
344 sigdelset(&SIGACTION_PS(ps, signum).sa_mask, signum);
345 /*
346 * Set bit in p_sigctx.ps_sigignore for signals that are set to
347 * SIG_IGN, and for signals set to SIG_DFL where the default is
348 * to ignore. However, don't put SIGCONT in
349 * p_sigctx.ps_sigignore, as we have to restart the process.
350 */
351 if (nsa->sa_handler == SIG_IGN ||
352 (nsa->sa_handler == SIG_DFL && (prop & SA_IGNORE) != 0)) {
353 /* never to be seen again */
354 sigdelset(&p->p_sigctx.ps_siglist, signum);
355 if (signum != SIGCONT) {
356 /* easier in psignal */
357 sigaddset(&p->p_sigctx.ps_sigignore, signum);
358 }
359 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
360 } else {
361 sigdelset(&p->p_sigctx.ps_sigignore, signum);
362 if (nsa->sa_handler == SIG_DFL)
363 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
364 else
365 sigaddset(&p->p_sigctx.ps_sigcatch, signum);
366 }
367 (void) spl0();
368 }
369
370 return (0);
371 }
372
373 #ifdef COMPAT_16
374 /* ARGSUSED */
375 int
376 compat_16_sys___sigaction14(struct lwp *l, void *v, register_t *retval)
377 {
378 struct compat_16_sys___sigaction14_args /* {
379 syscallarg(int) signum;
380 syscallarg(const struct sigaction *) nsa;
381 syscallarg(struct sigaction *) osa;
382 } */ *uap = v;
383 struct proc *p;
384 struct sigaction nsa, osa;
385 int error;
386
387 if (SCARG(uap, nsa)) {
388 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
389 if (error)
390 return (error);
391 }
392 p = l->l_proc;
393 error = sigaction1(p, SCARG(uap, signum),
394 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0,
395 NULL, 0);
396 if (error)
397 return (error);
398 if (SCARG(uap, osa)) {
399 error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
400 if (error)
401 return (error);
402 }
403 return (0);
404 }
405 #endif
406
407 /* ARGSUSED */
408 int
409 sys___sigaction_sigtramp(struct lwp *l, void *v, register_t *retval)
410 {
411 struct sys___sigaction_sigtramp_args /* {
412 syscallarg(int) signum;
413 syscallarg(const struct sigaction *) nsa;
414 syscallarg(struct sigaction *) osa;
415 syscallarg(void *) tramp;
416 syscallarg(int) vers;
417 } */ *uap = v;
418 struct proc *p = l->l_proc;
419 struct sigaction nsa, osa;
420 int error;
421
422 if (SCARG(uap, nsa)) {
423 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
424 if (error)
425 return (error);
426 }
427 error = sigaction1(p, SCARG(uap, signum),
428 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0,
429 SCARG(uap, tramp), SCARG(uap, vers));
430 if (error)
431 return (error);
432 if (SCARG(uap, osa)) {
433 error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
434 if (error)
435 return (error);
436 }
437 return (0);
438 }
439
440 /*
441 * Initialize signal state for process 0;
442 * set to ignore signals that are ignored by default and disable the signal
443 * stack.
444 */
445 void
446 siginit(struct proc *p)
447 {
448 struct sigacts *ps;
449 int signum, prop;
450
451 ps = p->p_sigacts;
452 sigemptyset(&contsigmask);
453 sigemptyset(&stopsigmask);
454 sigemptyset(&sigcantmask);
455 for (signum = 1; signum < NSIG; signum++) {
456 prop = sigprop[signum];
457 if (prop & SA_CONT)
458 sigaddset(&contsigmask, signum);
459 if (prop & SA_STOP)
460 sigaddset(&stopsigmask, signum);
461 if (prop & SA_CANTMASK)
462 sigaddset(&sigcantmask, signum);
463 if (prop & SA_IGNORE && signum != SIGCONT)
464 sigaddset(&p->p_sigctx.ps_sigignore, signum);
465 sigemptyset(&SIGACTION_PS(ps, signum).sa_mask);
466 SIGACTION_PS(ps, signum).sa_flags = SA_RESTART;
467 }
468 sigemptyset(&p->p_sigctx.ps_sigcatch);
469 p->p_sigctx.ps_sigwaited = NULL;
470 p->p_flag &= ~P_NOCLDSTOP;
471
472 /*
473 * Reset stack state to the user stack.
474 */
475 p->p_sigctx.ps_sigstk.ss_flags = SS_DISABLE;
476 p->p_sigctx.ps_sigstk.ss_size = 0;
477 p->p_sigctx.ps_sigstk.ss_sp = 0;
478
479 /* One reference. */
480 ps->sa_refcnt = 1;
481 }
482
483 /*
484 * Reset signals for an exec of the specified process.
485 */
486 void
487 execsigs(struct proc *p)
488 {
489 struct sigacts *ps;
490 int signum, prop;
491
492 sigactsunshare(p);
493
494 ps = p->p_sigacts;
495
496 /*
497 * Reset caught signals. Held signals remain held
498 * through p_sigctx.ps_sigmask (unless they were caught,
499 * and are now ignored by default).
500 */
501 for (signum = 1; signum < NSIG; signum++) {
502 if (sigismember(&p->p_sigctx.ps_sigcatch, signum)) {
503 prop = sigprop[signum];
504 if (prop & SA_IGNORE) {
505 if ((prop & SA_CONT) == 0)
506 sigaddset(&p->p_sigctx.ps_sigignore,
507 signum);
508 sigdelset(&p->p_sigctx.ps_siglist, signum);
509 }
510 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
511 }
512 sigemptyset(&SIGACTION_PS(ps, signum).sa_mask);
513 SIGACTION_PS(ps, signum).sa_flags = SA_RESTART;
514 }
515 sigemptyset(&p->p_sigctx.ps_sigcatch);
516 p->p_sigctx.ps_sigwaited = NULL;
517 p->p_flag &= ~P_NOCLDSTOP;
518
519 /*
520 * Reset stack state to the user stack.
521 */
522 p->p_sigctx.ps_sigstk.ss_flags = SS_DISABLE;
523 p->p_sigctx.ps_sigstk.ss_size = 0;
524 p->p_sigctx.ps_sigstk.ss_sp = 0;
525 }
526
527 int
528 sigprocmask1(struct proc *p, int how, const sigset_t *nss, sigset_t *oss)
529 {
530
531 if (oss)
532 *oss = p->p_sigctx.ps_sigmask;
533
534 if (nss) {
535 (void)splsched(); /* XXXSMP */
536 switch (how) {
537 case SIG_BLOCK:
538 sigplusset(nss, &p->p_sigctx.ps_sigmask);
539 break;
540 case SIG_UNBLOCK:
541 sigminusset(nss, &p->p_sigctx.ps_sigmask);
542 CHECKSIGS(p);
543 break;
544 case SIG_SETMASK:
545 p->p_sigctx.ps_sigmask = *nss;
546 CHECKSIGS(p);
547 break;
548 default:
549 (void)spl0(); /* XXXSMP */
550 return (EINVAL);
551 }
552 sigminusset(&sigcantmask, &p->p_sigctx.ps_sigmask);
553 (void)spl0(); /* XXXSMP */
554 }
555
556 return (0);
557 }
558
559 /*
560 * Manipulate signal mask.
561 * Note that we receive new mask, not pointer,
562 * and return old mask as return value;
563 * the library stub does the rest.
564 */
565 int
566 sys___sigprocmask14(struct lwp *l, void *v, register_t *retval)
567 {
568 struct sys___sigprocmask14_args /* {
569 syscallarg(int) how;
570 syscallarg(const sigset_t *) set;
571 syscallarg(sigset_t *) oset;
572 } */ *uap = v;
573 struct proc *p;
574 sigset_t nss, oss;
575 int error;
576
577 if (SCARG(uap, set)) {
578 error = copyin(SCARG(uap, set), &nss, sizeof(nss));
579 if (error)
580 return (error);
581 }
582 p = l->l_proc;
583 error = sigprocmask1(p, SCARG(uap, how),
584 SCARG(uap, set) ? &nss : 0, SCARG(uap, oset) ? &oss : 0);
585 if (error)
586 return (error);
587 if (SCARG(uap, oset)) {
588 error = copyout(&oss, SCARG(uap, oset), sizeof(oss));
589 if (error)
590 return (error);
591 }
592 return (0);
593 }
594
595 void
596 sigpending1(struct proc *p, sigset_t *ss)
597 {
598
599 *ss = p->p_sigctx.ps_siglist;
600 sigminusset(&p->p_sigctx.ps_sigmask, ss);
601 }
602
603 /* ARGSUSED */
604 int
605 sys___sigpending14(struct lwp *l, void *v, register_t *retval)
606 {
607 struct sys___sigpending14_args /* {
608 syscallarg(sigset_t *) set;
609 } */ *uap = v;
610 struct proc *p;
611 sigset_t ss;
612
613 p = l->l_proc;
614 sigpending1(p, &ss);
615 return (copyout(&ss, SCARG(uap, set), sizeof(ss)));
616 }
617
618 int
619 sigsuspend1(struct proc *p, const sigset_t *ss)
620 {
621 struct sigacts *ps;
622
623 ps = p->p_sigacts;
624 if (ss) {
625 /*
626 * When returning from sigpause, we want
627 * the old mask to be restored after the
628 * signal handler has finished. Thus, we
629 * save it here and mark the sigctx structure
630 * to indicate this.
631 */
632 p->p_sigctx.ps_oldmask = p->p_sigctx.ps_sigmask;
633 p->p_sigctx.ps_flags |= SAS_OLDMASK;
634 (void) splsched(); /* XXXSMP */
635 p->p_sigctx.ps_sigmask = *ss;
636 CHECKSIGS(p);
637 sigminusset(&sigcantmask, &p->p_sigctx.ps_sigmask);
638 (void) spl0(); /* XXXSMP */
639 }
640
641 while (tsleep((caddr_t) ps, PPAUSE|PCATCH, "pause", 0) == 0)
642 /* void */;
643
644 /* always return EINTR rather than ERESTART... */
645 return (EINTR);
646 }
647
648 /*
649 * Suspend process until signal, providing mask to be set
650 * in the meantime. Note nonstandard calling convention:
651 * libc stub passes mask, not pointer, to save a copyin.
652 */
653 /* ARGSUSED */
654 int
655 sys___sigsuspend14(struct lwp *l, void *v, register_t *retval)
656 {
657 struct sys___sigsuspend14_args /* {
658 syscallarg(const sigset_t *) set;
659 } */ *uap = v;
660 struct proc *p;
661 sigset_t ss;
662 int error;
663
664 if (SCARG(uap, set)) {
665 error = copyin(SCARG(uap, set), &ss, sizeof(ss));
666 if (error)
667 return (error);
668 }
669
670 p = l->l_proc;
671 return (sigsuspend1(p, SCARG(uap, set) ? &ss : 0));
672 }
673
674 int
675 sigaltstack1(struct proc *p, const struct sigaltstack *nss,
676 struct sigaltstack *oss)
677 {
678
679 if (oss)
680 *oss = p->p_sigctx.ps_sigstk;
681
682 if (nss) {
683 if (nss->ss_flags & ~SS_ALLBITS)
684 return (EINVAL);
685
686 if (nss->ss_flags & SS_DISABLE) {
687 if (p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK)
688 return (EINVAL);
689 } else {
690 if (nss->ss_size < MINSIGSTKSZ)
691 return (ENOMEM);
692 }
693 p->p_sigctx.ps_sigstk = *nss;
694 }
695
696 return (0);
697 }
698
699 /* ARGSUSED */
700 int
701 sys___sigaltstack14(struct lwp *l, void *v, register_t *retval)
702 {
703 struct sys___sigaltstack14_args /* {
704 syscallarg(const struct sigaltstack *) nss;
705 syscallarg(struct sigaltstack *) oss;
706 } */ *uap = v;
707 struct proc *p;
708 struct sigaltstack nss, oss;
709 int error;
710
711 if (SCARG(uap, nss)) {
712 error = copyin(SCARG(uap, nss), &nss, sizeof(nss));
713 if (error)
714 return (error);
715 }
716 p = l->l_proc;
717 error = sigaltstack1(p,
718 SCARG(uap, nss) ? &nss : 0, SCARG(uap, oss) ? &oss : 0);
719 if (error)
720 return (error);
721 if (SCARG(uap, oss)) {
722 error = copyout(&oss, SCARG(uap, oss), sizeof(oss));
723 if (error)
724 return (error);
725 }
726 return (0);
727 }
728
729 /* ARGSUSED */
730 int
731 sys_kill(struct lwp *l, void *v, register_t *retval)
732 {
733 struct sys_kill_args /* {
734 syscallarg(int) pid;
735 syscallarg(int) signum;
736 } */ *uap = v;
737 struct proc *cp, *p;
738 struct pcred *pc;
739 ksiginfo_t ksi;
740
741 cp = l->l_proc;
742 pc = cp->p_cred;
743 if ((u_int)SCARG(uap, signum) >= NSIG)
744 return (EINVAL);
745 memset(&ksi, 0, sizeof(ksi));
746 ksi.ksi_signo = SCARG(uap, signum);
747 ksi.ksi_code = SI_USER;
748 ksi.ksi_pid = cp->p_pid;
749 ksi.ksi_uid = cp->p_ucred->cr_uid;
750 if (SCARG(uap, pid) > 0) {
751 /* kill single process */
752 if ((p = pfind(SCARG(uap, pid))) == NULL)
753 return (ESRCH);
754 if (!CANSIGNAL(cp, pc, p, SCARG(uap, signum)))
755 return (EPERM);
756 if (SCARG(uap, signum))
757 kpsignal2(p, &ksi, 1);
758 return (0);
759 }
760 switch (SCARG(uap, pid)) {
761 case -1: /* broadcast signal */
762 return (killpg1(cp, &ksi, 0, 1));
763 case 0: /* signal own process group */
764 return (killpg1(cp, &ksi, 0, 0));
765 default: /* negative explicit process group */
766 return (killpg1(cp, &ksi, -SCARG(uap, pid), 0));
767 }
768 /* NOTREACHED */
769 }
770
771 /*
772 * Common code for kill process group/broadcast kill.
773 * cp is calling process.
774 */
775 int
776 killpg1(struct proc *cp, ksiginfo_t *ksi, int pgid, int all)
777 {
778 struct proc *p;
779 struct pcred *pc;
780 struct pgrp *pgrp;
781 int nfound;
782 int signum = ksi->ksi_signo;
783
784 pc = cp->p_cred;
785 nfound = 0;
786 if (all) {
787 /*
788 * broadcast
789 */
790 proclist_lock_read();
791 LIST_FOREACH(p, &allproc, p_list) {
792 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
793 p == cp || !CANSIGNAL(cp, pc, p, signum))
794 continue;
795 nfound++;
796 if (signum)
797 kpsignal2(p, ksi, 1);
798 }
799 proclist_unlock_read();
800 } else {
801 if (pgid == 0)
802 /*
803 * zero pgid means send to my process group.
804 */
805 pgrp = cp->p_pgrp;
806 else {
807 pgrp = pgfind(pgid);
808 if (pgrp == NULL)
809 return (ESRCH);
810 }
811 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
812 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
813 !CANSIGNAL(cp, pc, p, signum))
814 continue;
815 nfound++;
816 if (signum && P_ZOMBIE(p) == 0)
817 kpsignal2(p, ksi, 1);
818 }
819 }
820 return (nfound ? 0 : ESRCH);
821 }
822
823 /*
824 * Send a signal to a process group.
825 */
826 void
827 gsignal(int pgid, int signum)
828 {
829 ksiginfo_t ksi;
830 memset(&ksi, 0, sizeof(ksi));
831 ksi.ksi_signo = signum;
832 kgsignal(pgid, &ksi, NULL);
833 }
834
835 void
836 kgsignal(int pgid, ksiginfo_t *ksi, void *data)
837 {
838 struct pgrp *pgrp;
839
840 if (pgid && (pgrp = pgfind(pgid)))
841 kpgsignal(pgrp, ksi, data, 0);
842 }
843
844 /*
845 * Send a signal to a process group. If checktty is 1,
846 * limit to members which have a controlling terminal.
847 */
848 void
849 pgsignal(struct pgrp *pgrp, int sig, int checkctty)
850 {
851 ksiginfo_t ksi;
852 memset(&ksi, 0, sizeof(ksi));
853 ksi.ksi_signo = sig;
854 kpgsignal(pgrp, &ksi, NULL, checkctty);
855 }
856
857 void
858 kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty)
859 {
860 struct proc *p;
861
862 if (pgrp)
863 LIST_FOREACH(p, &pgrp->pg_members, p_pglist)
864 if (checkctty == 0 || p->p_flag & P_CONTROLT)
865 kpsignal(p, ksi, data);
866 }
867
868 /*
869 * Send a signal caused by a trap to the current process.
870 * If it will be caught immediately, deliver it with correct code.
871 * Otherwise, post it normally.
872 */
873 #ifndef __HAVE_SIGINFO
874 void _trapsignal(struct lwp *, const ksiginfo_t *);
875 void
876 trapsignal(struct lwp *l, int signum, u_long code)
877 {
878 #define trapsignal _trapsignal
879 ksiginfo_t ksi;
880
881 KSI_INIT_TRAP(&ksi);
882 ksi.ksi_signo = signum;
883 ksi.ksi_trap = (int)code;
884 trapsignal(l, &ksi);
885 }
886 #endif
887
888 void
889 trapsignal(struct lwp *l, const ksiginfo_t *ksi)
890 {
891 struct proc *p;
892 struct sigacts *ps;
893 int signum = ksi->ksi_signo;
894
895 KASSERT(KSI_TRAP_P(ksi));
896
897 p = l->l_proc;
898 ps = p->p_sigacts;
899 if ((p->p_flag & P_TRACED) == 0 &&
900 sigismember(&p->p_sigctx.ps_sigcatch, signum) &&
901 !sigismember(&p->p_sigctx.ps_sigmask, signum)) {
902 p->p_stats->p_ru.ru_nsignals++;
903 #ifdef KTRACE
904 if (KTRPOINT(p, KTR_PSIG))
905 ktrpsig(p, signum, SIGACTION_PS(ps, signum).sa_handler,
906 &p->p_sigctx.ps_sigmask, ksi);
907 #endif
908 kpsendsig(l, ksi, &p->p_sigctx.ps_sigmask);
909 (void) splsched(); /* XXXSMP */
910 sigplusset(&SIGACTION_PS(ps, signum).sa_mask,
911 &p->p_sigctx.ps_sigmask);
912 if (SIGACTION_PS(ps, signum).sa_flags & SA_RESETHAND) {
913 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
914 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
915 sigaddset(&p->p_sigctx.ps_sigignore, signum);
916 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
917 }
918 (void) spl0(); /* XXXSMP */
919 } else {
920 p->p_sigctx.ps_lwp = l->l_lid;
921 /* XXX for core dump/debugger */
922 p->p_sigctx.ps_signo = ksi->ksi_signo;
923 p->p_sigctx.ps_code = ksi->ksi_trap;
924 kpsignal2(p, ksi, 1);
925 }
926 }
927
928 /*
929 * Fill in signal information and signal the parent for a child status change.
930 */
931 static void
932 child_psignal(struct proc *p, int dolock)
933 {
934 ksiginfo_t ksi;
935
936 (void)memset(&ksi, 0, sizeof(ksi));
937 ksi.ksi_signo = SIGCHLD;
938 ksi.ksi_code = p->p_xstat == SIGCONT ? CLD_CONTINUED : CLD_STOPPED;
939 ksi.ksi_pid = p->p_pid;
940 ksi.ksi_uid = p->p_ucred->cr_uid;
941 ksi.ksi_status = p->p_xstat;
942 ksi.ksi_utime = p->p_stats->p_ru.ru_utime.tv_sec;
943 ksi.ksi_stime = p->p_stats->p_ru.ru_stime.tv_sec;
944 kpsignal2(p->p_pptr, &ksi, dolock);
945 }
946
947 /*
948 * Send the signal to the process. If the signal has an action, the action
949 * is usually performed by the target process rather than the caller; we add
950 * the signal to the set of pending signals for the process.
951 *
952 * Exceptions:
953 * o When a stop signal is sent to a sleeping process that takes the
954 * default action, the process is stopped without awakening it.
955 * o SIGCONT restarts stopped processes (or puts them back to sleep)
956 * regardless of the signal action (eg, blocked or ignored).
957 *
958 * Other ignored signals are discarded immediately.
959 *
960 * XXXSMP: Invoked as psignal() or sched_psignal().
961 */
962 void
963 psignal1(struct proc *p, int signum, int dolock)
964 {
965 ksiginfo_t ksi;
966
967 memset(&ksi, 0, sizeof(ksi));
968 ksi.ksi_signo = signum;
969 kpsignal2(p, &ksi, dolock);
970 }
971
972 void
973 kpsignal1(struct proc *p, ksiginfo_t *ksi, void *data, int dolock)
974 {
975
976 if ((p->p_flag & P_WEXIT) == 0 && data) {
977 size_t fd;
978 struct filedesc *fdp = p->p_fd;
979
980 ksi->ksi_fd = -1;
981 for (fd = 0; fd < fdp->fd_nfiles; fd++) {
982 struct file *fp = fdp->fd_ofiles[fd];
983 /* XXX: lock? */
984 if (fp && fp->f_data == data) {
985 ksi->ksi_fd = fd;
986 break;
987 }
988 }
989 }
990 kpsignal2(p, ksi, dolock);
991 }
992
993 static void
994 kpsignal2(struct proc *p, const ksiginfo_t *ksi, int dolock)
995 {
996 struct lwp *l, *suspended = NULL;
997 int s = 0, prop, allsusp;
998 sig_t action;
999 int signum = ksi->ksi_signo;
1000
1001 #ifdef DIAGNOSTIC
1002 if (signum <= 0 || signum >= NSIG)
1003 panic("psignal signal number %d", signum);
1004
1005 /* XXXSMP: works, but icky */
1006 if (dolock)
1007 SCHED_ASSERT_UNLOCKED();
1008 else
1009 SCHED_ASSERT_LOCKED();
1010 #endif
1011
1012
1013 /*
1014 * Notify any interested parties in the signal.
1015 */
1016 KNOTE(&p->p_klist, NOTE_SIGNAL | signum);
1017
1018 prop = sigprop[signum];
1019
1020 /*
1021 * If proc is traced, always give parent a chance.
1022 */
1023 if (p->p_flag & P_TRACED)
1024 action = SIG_DFL;
1025 else {
1026 /*
1027 * If the signal is being ignored,
1028 * then we forget about it immediately.
1029 * (Note: we don't set SIGCONT in p_sigctx.ps_sigignore,
1030 * and if it is set to SIG_IGN,
1031 * action will be SIG_DFL here.)
1032 */
1033 if (sigismember(&p->p_sigctx.ps_sigignore, signum))
1034 return;
1035 if (sigismember(&p->p_sigctx.ps_sigmask, signum))
1036 action = SIG_HOLD;
1037 else if (sigismember(&p->p_sigctx.ps_sigcatch, signum))
1038 action = SIG_CATCH;
1039 else {
1040 action = SIG_DFL;
1041
1042 if (prop & SA_KILL && p->p_nice > NZERO)
1043 p->p_nice = NZERO;
1044
1045 /*
1046 * If sending a tty stop signal to a member of an
1047 * orphaned process group, discard the signal here if
1048 * the action is default; don't stop the process below
1049 * if sleeping, and don't clear any pending SIGCONT.
1050 */
1051 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0)
1052 return;
1053 }
1054 }
1055
1056 if (prop & SA_CONT)
1057 sigminusset(&stopsigmask, &p->p_sigctx.ps_siglist);
1058
1059 if (prop & SA_STOP)
1060 sigminusset(&contsigmask, &p->p_sigctx.ps_siglist);
1061
1062 /*
1063 * If the signal doesn't have SA_CANTMASK (no override for SIGKILL,
1064 * please!), check if anything waits on it. If yes, save the
1065 * info into provided ps_sigwaited, and wake-up the waiter.
1066 * The signal won't be processed further here.
1067 */
1068 if ((prop & SA_CANTMASK) == 0
1069 && p->p_sigctx.ps_sigwaited
1070 && sigismember(p->p_sigctx.ps_sigwait, signum)
1071 && p->p_stat != SSTOP) {
1072 p->p_sigctx.ps_sigwaited->ksi_info = ksi->ksi_info;
1073 p->p_sigctx.ps_sigwaited = NULL;
1074 if (dolock)
1075 wakeup_one(&p->p_sigctx.ps_sigwait);
1076 else
1077 sched_wakeup(&p->p_sigctx.ps_sigwait);
1078 return;
1079 }
1080
1081 sigaddset(&p->p_sigctx.ps_siglist, signum);
1082
1083 /* CHECKSIGS() is "inlined" here. */
1084 p->p_sigctx.ps_sigcheck = 1;
1085
1086 /*
1087 * Defer further processing for signals which are held,
1088 * except that stopped processes must be continued by SIGCONT.
1089 */
1090 if (action == SIG_HOLD &&
1091 ((prop & SA_CONT) == 0 || p->p_stat != SSTOP)) {
1092 ksiginfo_put(p, ksi);
1093 return;
1094 }
1095 /* XXXSMP: works, but icky */
1096 if (dolock)
1097 SCHED_LOCK(s);
1098
1099 if (p->p_flag & P_SA) {
1100 l = p->p_sa->sa_vp;
1101 allsusp = 0;
1102 if (p->p_stat == SACTIVE) {
1103 KDASSERT(l != NULL);
1104 if (l->l_flag & L_SA_IDLE) {
1105 /* wakeup idle LWP */
1106 } else if (l->l_flag & L_SA_YIELD) {
1107 /* idle LWP is already waking up */
1108 goto out;
1109 /*NOTREACHED*/
1110 } else {
1111 if (l->l_stat == LSRUN ||
1112 l->l_stat == LSONPROC) {
1113 signotify(p);
1114 goto out;
1115 /*NOTREACHED*/
1116 }
1117 if (l->l_stat == LSSLEEP &&
1118 l->l_flag & L_SINTR) {
1119 /* ok to signal vp lwp */
1120 } else if (signum == SIGKILL) {
1121 /*
1122 * get a suspended lwp from
1123 * the cache to send KILL
1124 * signal
1125 * XXXcl add signal checks at resume points
1126 */
1127 suspended = sa_getcachelwp(p);
1128 allsusp = 1;
1129 } else
1130 l = NULL;
1131 }
1132 } else if (p->p_stat == SSTOP) {
1133 if (l->l_stat != LSSLEEP || (l->l_flag & L_SINTR) == 0)
1134 l = NULL;
1135 }
1136 } else if (p->p_nrlwps > 0 && (p->p_stat != SSTOP)) {
1137 /*
1138 * At least one LWP is running or on a run queue.
1139 * The signal will be noticed when one of them returns
1140 * to userspace.
1141 */
1142 signotify(p);
1143 /*
1144 * The signal will be noticed very soon.
1145 */
1146 goto out;
1147 /*NOTREACHED*/
1148 } else {
1149 /*
1150 * Find out if any of the sleeps are interruptable,
1151 * and if all the live LWPs remaining are suspended.
1152 */
1153 allsusp = 1;
1154 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1155 if (l->l_stat == LSSLEEP &&
1156 l->l_flag & L_SINTR)
1157 break;
1158 if (l->l_stat == LSSUSPENDED)
1159 suspended = l;
1160 else if ((l->l_stat != LSZOMB) &&
1161 (l->l_stat != LSDEAD))
1162 allsusp = 0;
1163 }
1164 }
1165
1166 { /* XXXcl wrong indent to keep diff small */
1167 if (p->p_stat == SACTIVE) {
1168
1169 if (l != NULL && (p->p_flag & P_TRACED))
1170 goto run;
1171
1172 /*
1173 * If SIGCONT is default (or ignored) and process is
1174 * asleep, we are finished; the process should not
1175 * be awakened.
1176 */
1177 if ((prop & SA_CONT) && action == SIG_DFL) {
1178 sigdelset(&p->p_sigctx.ps_siglist, signum);
1179 goto done;
1180 }
1181
1182 /*
1183 * When a sleeping process receives a stop
1184 * signal, process immediately if possible.
1185 */
1186 if ((prop & SA_STOP) && action == SIG_DFL) {
1187 /*
1188 * If a child holding parent blocked,
1189 * stopping could cause deadlock.
1190 */
1191 if (p->p_flag & P_PPWAIT) {
1192 goto out;
1193 }
1194 sigdelset(&p->p_sigctx.ps_siglist, signum);
1195 p->p_xstat = signum;
1196 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) {
1197 /*
1198 * XXXSMP: recursive call; don't lock
1199 * the second time around.
1200 */
1201 child_psignal(p, 0);
1202 }
1203 proc_stop(p); /* XXXSMP: recurse? */
1204 goto done;
1205 }
1206
1207 if (l == NULL) {
1208 /*
1209 * Special case: SIGKILL of a process
1210 * which is entirely composed of
1211 * suspended LWPs should succeed. We
1212 * make this happen by unsuspending one of
1213 * them.
1214 */
1215 if (allsusp && (signum == SIGKILL))
1216 lwp_continue(suspended);
1217 goto done;
1218 }
1219 /*
1220 * All other (caught or default) signals
1221 * cause the process to run.
1222 */
1223 goto runfast;
1224 /*NOTREACHED*/
1225 } else if (p->p_stat == SSTOP) {
1226 /* Process is stopped */
1227 /*
1228 * If traced process is already stopped,
1229 * then no further action is necessary.
1230 */
1231 if (p->p_flag & P_TRACED)
1232 goto done;
1233
1234 /*
1235 * Kill signal always sets processes running,
1236 * if possible.
1237 */
1238 if (signum == SIGKILL) {
1239 l = proc_unstop(p);
1240 if (l)
1241 goto runfast;
1242 goto done;
1243 }
1244
1245 if (prop & SA_CONT) {
1246 /*
1247 * If SIGCONT is default (or ignored),
1248 * we continue the process but don't
1249 * leave the signal in ps_siglist, as
1250 * it has no further action. If
1251 * SIGCONT is held, we continue the
1252 * process and leave the signal in
1253 * ps_siglist. If the process catches
1254 * SIGCONT, let it handle the signal
1255 * itself. If it isn't waiting on an
1256 * event, then it goes back to run
1257 * state. Otherwise, process goes
1258 * back to sleep state.
1259 */
1260 if (action == SIG_DFL)
1261 sigdelset(&p->p_sigctx.ps_siglist,
1262 signum);
1263 l = proc_unstop(p);
1264 if (l && (action == SIG_CATCH))
1265 goto runfast;
1266 goto out;
1267 }
1268
1269 if (prop & SA_STOP) {
1270 /*
1271 * Already stopped, don't need to stop again.
1272 * (If we did the shell could get confused.)
1273 */
1274 sigdelset(&p->p_sigctx.ps_siglist, signum);
1275 goto done;
1276 }
1277
1278 /*
1279 * If a lwp is sleeping interruptibly, then
1280 * wake it up; it will run until the kernel
1281 * boundary, where it will stop in issignal(),
1282 * since p->p_stat is still SSTOP. When the
1283 * process is continued, it will be made
1284 * runnable and can look at the signal.
1285 */
1286 if (l)
1287 goto run;
1288 goto out;
1289 } else {
1290 /* Else what? */
1291 panic("psignal: Invalid process state %d.",
1292 p->p_stat);
1293 }
1294 } /* XXXcl change indent after commit */
1295 /*NOTREACHED*/
1296
1297 runfast:
1298 if (action == SIG_CATCH) {
1299 ksiginfo_put(p, ksi);
1300 action = SIG_HOLD;
1301 }
1302 /*
1303 * Raise priority to at least PUSER.
1304 */
1305 if (l->l_priority > PUSER)
1306 l->l_priority = PUSER;
1307 run:
1308 if (action == SIG_CATCH) {
1309 ksiginfo_put(p, ksi);
1310 action = SIG_HOLD;
1311 }
1312
1313 setrunnable(l); /* XXXSMP: recurse? */
1314 out:
1315 if (action == SIG_CATCH)
1316 ksiginfo_put(p, ksi);
1317 done:
1318 /* XXXSMP: works, but icky */
1319 if (dolock)
1320 SCHED_UNLOCK(s);
1321 }
1322
1323 void
1324 kpsendsig(struct lwp *l, const ksiginfo_t *ksi, const sigset_t *mask)
1325 {
1326 struct proc *p = l->l_proc;
1327 struct lwp *le, *li;
1328 siginfo_t *si;
1329 int f;
1330
1331 if (p->p_flag & P_SA) {
1332
1333 /* XXXUPSXXX What if not on sa_vp ? */
1334
1335 f = l->l_flag & L_SA;
1336 l->l_flag &= ~L_SA;
1337 si = pool_get(&siginfo_pool, PR_WAITOK);
1338 si->_info = ksi->ksi_info;
1339 le = li = NULL;
1340 if (KSI_TRAP_P(ksi))
1341 le = l;
1342 else
1343 li = l;
1344
1345 sa_upcall(l, SA_UPCALL_SIGNAL | SA_UPCALL_DEFER, le, li,
1346 sizeof(siginfo_t), si);
1347 l->l_flag |= f;
1348 return;
1349 }
1350
1351 #ifdef __HAVE_SIGINFO
1352 (*p->p_emul->e_sendsig)(ksi, mask);
1353 #else
1354 (*p->p_emul->e_sendsig)(ksi->ksi_signo, mask, KSI_TRAPCODE(ksi));
1355 #endif
1356 }
1357
1358 static __inline int firstsig(const sigset_t *);
1359
1360 static __inline int
1361 firstsig(const sigset_t *ss)
1362 {
1363 int sig;
1364
1365 sig = ffs(ss->__bits[0]);
1366 if (sig != 0)
1367 return (sig);
1368 #if NSIG > 33
1369 sig = ffs(ss->__bits[1]);
1370 if (sig != 0)
1371 return (sig + 32);
1372 #endif
1373 #if NSIG > 65
1374 sig = ffs(ss->__bits[2]);
1375 if (sig != 0)
1376 return (sig + 64);
1377 #endif
1378 #if NSIG > 97
1379 sig = ffs(ss->__bits[3]);
1380 if (sig != 0)
1381 return (sig + 96);
1382 #endif
1383 return (0);
1384 }
1385
1386 /*
1387 * If the current process has received a signal (should be caught or cause
1388 * termination, should interrupt current syscall), return the signal number.
1389 * Stop signals with default action are processed immediately, then cleared;
1390 * they aren't returned. This is checked after each entry to the system for
1391 * a syscall or trap (though this can usually be done without calling issignal
1392 * by checking the pending signal masks in the CURSIG macro.) The normal call
1393 * sequence is
1394 *
1395 * while (signum = CURSIG(curlwp))
1396 * postsig(signum);
1397 */
1398 int
1399 issignal(struct lwp *l)
1400 {
1401 struct proc *p = l->l_proc;
1402 int s = 0, signum, prop;
1403 int dolock = (l->l_flag & L_SINTR) == 0, locked = !dolock;
1404 sigset_t ss;
1405
1406 if (l->l_flag & L_SA) {
1407 struct sadata *sa = p->p_sa;
1408
1409 /* Bail out if we do not own the virtual processor */
1410 if (sa->sa_vp != l)
1411 return 0;
1412 }
1413
1414 if (p->p_stat == SSTOP) {
1415 /*
1416 * The process is stopped/stopping. Stop ourselves now that
1417 * we're on the kernel/userspace boundary.
1418 */
1419 if (dolock)
1420 SCHED_LOCK(s);
1421 l->l_stat = LSSTOP;
1422 p->p_nrlwps--;
1423 if (p->p_flag & P_TRACED)
1424 goto sigtraceswitch;
1425 else
1426 goto sigswitch;
1427 }
1428 for (;;) {
1429 sigpending1(p, &ss);
1430 if (p->p_flag & P_PPWAIT)
1431 sigminusset(&stopsigmask, &ss);
1432 signum = firstsig(&ss);
1433 if (signum == 0) { /* no signal to send */
1434 p->p_sigctx.ps_sigcheck = 0;
1435 if (locked && dolock)
1436 SCHED_LOCK(s);
1437 return (0);
1438 }
1439 /* take the signal! */
1440 sigdelset(&p->p_sigctx.ps_siglist, signum);
1441
1442 /*
1443 * We should see pending but ignored signals
1444 * only if P_TRACED was on when they were posted.
1445 */
1446 if (sigismember(&p->p_sigctx.ps_sigignore, signum) &&
1447 (p->p_flag & P_TRACED) == 0)
1448 continue;
1449
1450 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
1451 /*
1452 * If traced, always stop, and stay
1453 * stopped until released by the debugger.
1454 */
1455 p->p_xstat = signum;
1456 if ((p->p_flag & P_FSTRACE) == 0)
1457 child_psignal(p, dolock);
1458 if (dolock)
1459 SCHED_LOCK(s);
1460 proc_stop(p);
1461 sigtraceswitch:
1462 mi_switch(l, NULL);
1463 SCHED_ASSERT_UNLOCKED();
1464 if (dolock)
1465 splx(s);
1466 else
1467 dolock = 1;
1468
1469 /*
1470 * If we are no longer being traced, or the parent
1471 * didn't give us a signal, look for more signals.
1472 */
1473 if ((p->p_flag & P_TRACED) == 0 || p->p_xstat == 0)
1474 continue;
1475
1476 /*
1477 * If the new signal is being masked, look for other
1478 * signals.
1479 */
1480 signum = p->p_xstat;
1481 p->p_xstat = 0;
1482 /*
1483 * `p->p_sigctx.ps_siglist |= mask' is done
1484 * in setrunnable().
1485 */
1486 if (sigismember(&p->p_sigctx.ps_sigmask, signum))
1487 continue;
1488 /* take the signal! */
1489 sigdelset(&p->p_sigctx.ps_siglist, signum);
1490 }
1491
1492 prop = sigprop[signum];
1493
1494 /*
1495 * Decide whether the signal should be returned.
1496 * Return the signal's number, or fall through
1497 * to clear it from the pending mask.
1498 */
1499 switch ((long)SIGACTION(p, signum).sa_handler) {
1500
1501 case (long)SIG_DFL:
1502 /*
1503 * Don't take default actions on system processes.
1504 */
1505 if (p->p_pid <= 1) {
1506 #ifdef DIAGNOSTIC
1507 /*
1508 * Are you sure you want to ignore SIGSEGV
1509 * in init? XXX
1510 */
1511 printf("Process (pid %d) got signal %d\n",
1512 p->p_pid, signum);
1513 #endif
1514 break; /* == ignore */
1515 }
1516 /*
1517 * If there is a pending stop signal to process
1518 * with default action, stop here,
1519 * then clear the signal. However,
1520 * if process is member of an orphaned
1521 * process group, ignore tty stop signals.
1522 */
1523 if (prop & SA_STOP) {
1524 if (p->p_flag & P_TRACED ||
1525 (p->p_pgrp->pg_jobc == 0 &&
1526 prop & SA_TTYSTOP))
1527 break; /* == ignore */
1528 p->p_xstat = signum;
1529 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0)
1530 child_psignal(p, dolock);
1531 if (dolock)
1532 SCHED_LOCK(s);
1533 proc_stop(p);
1534 sigswitch:
1535 mi_switch(l, NULL);
1536 SCHED_ASSERT_UNLOCKED();
1537 if (dolock)
1538 splx(s);
1539 else
1540 dolock = 1;
1541 break;
1542 } else if (prop & SA_IGNORE) {
1543 /*
1544 * Except for SIGCONT, shouldn't get here.
1545 * Default action is to ignore; drop it.
1546 */
1547 break; /* == ignore */
1548 } else
1549 goto keep;
1550 /*NOTREACHED*/
1551
1552 case (long)SIG_IGN:
1553 /*
1554 * Masking above should prevent us ever trying
1555 * to take action on an ignored signal other
1556 * than SIGCONT, unless process is traced.
1557 */
1558 #ifdef DEBUG_ISSIGNAL
1559 if ((prop & SA_CONT) == 0 &&
1560 (p->p_flag & P_TRACED) == 0)
1561 printf("issignal\n");
1562 #endif
1563 break; /* == ignore */
1564
1565 default:
1566 /*
1567 * This signal has an action, let
1568 * postsig() process it.
1569 */
1570 goto keep;
1571 }
1572 }
1573 /* NOTREACHED */
1574
1575 keep:
1576 /* leave the signal for later */
1577 sigaddset(&p->p_sigctx.ps_siglist, signum);
1578 CHECKSIGS(p);
1579 if (locked && dolock)
1580 SCHED_LOCK(s);
1581 return (signum);
1582 }
1583
1584 /*
1585 * Put the argument process into the stopped state and notify the parent
1586 * via wakeup. Signals are handled elsewhere. The process must not be
1587 * on the run queue.
1588 */
1589 static void
1590 proc_stop(struct proc *p)
1591 {
1592 struct lwp *l;
1593
1594 SCHED_ASSERT_LOCKED();
1595
1596 /* XXX lock process LWP state */
1597 p->p_stat = SSTOP;
1598 p->p_flag &= ~P_WAITED;
1599
1600 if (p->p_flag & P_SA) {
1601 /*
1602 * Only (try to) put the LWP on the VP in stopped
1603 * state.
1604 * All other LWPs will suspend in sa_vp_repossess()
1605 * until the VP-LWP donates the VP.
1606 */
1607 l = p->p_sa->sa_vp;
1608 if (l->l_stat == LSONPROC && l->l_cpu == curcpu()) {
1609 l->l_stat = LSSTOP;
1610 p->p_nrlwps--;
1611 } else if (l->l_stat == LSRUN) {
1612 /* Remove LWP from the run queue */
1613 remrunqueue(l);
1614 l->l_stat = LSSTOP;
1615 p->p_nrlwps--;
1616 } else if (l->l_stat == LSSLEEP &&
1617 l->l_flag & L_SA_IDLE) {
1618 l->l_flag &= ~L_SA_IDLE;
1619 l->l_stat = LSSTOP;
1620 }
1621 goto out;
1622 }
1623
1624 /*
1625 * Put as many LWP's as possible in stopped state.
1626 * Sleeping ones will notice the stopped state as they try to
1627 * return to userspace.
1628 */
1629
1630 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1631 if (l->l_stat == LSONPROC) {
1632 /* XXX SMP this assumes that a LWP that is LSONPROC
1633 * is curlwp and hence is about to be mi_switched
1634 * away; the only callers of proc_stop() are:
1635 * - psignal
1636 * - issignal()
1637 * For the former, proc_stop() is only called when
1638 * no processes are running, so we don't worry.
1639 * For the latter, proc_stop() is called right
1640 * before mi_switch().
1641 */
1642 l->l_stat = LSSTOP;
1643 p->p_nrlwps--;
1644 } else if (l->l_stat == LSRUN) {
1645 /* Remove LWP from the run queue */
1646 remrunqueue(l);
1647 l->l_stat = LSSTOP;
1648 p->p_nrlwps--;
1649 } else if ((l->l_stat == LSSLEEP) ||
1650 (l->l_stat == LSSUSPENDED) ||
1651 (l->l_stat == LSZOMB) ||
1652 (l->l_stat == LSDEAD)) {
1653 /*
1654 * Don't do anything; let sleeping LWPs
1655 * discover the stopped state of the process
1656 * on their way out of the kernel; otherwise,
1657 * things like NFS threads that sleep with
1658 * locks will block the rest of the system
1659 * from getting any work done.
1660 *
1661 * Suspended/dead/zombie LWPs aren't going
1662 * anywhere, so we don't need to touch them.
1663 */
1664 }
1665 #ifdef DIAGNOSTIC
1666 else {
1667 panic("proc_stop: process %d lwp %d "
1668 "in unstoppable state %d.\n",
1669 p->p_pid, l->l_lid, l->l_stat);
1670 }
1671 #endif
1672 }
1673
1674 out:
1675 /* XXX unlock process LWP state */
1676
1677 sched_wakeup((caddr_t)p->p_pptr);
1678 }
1679
1680 /*
1681 * Given a process in state SSTOP, set the state back to SACTIVE and
1682 * move LSSTOP'd LWPs to LSSLEEP or make them runnable.
1683 *
1684 * If no LWPs ended up runnable (and therefore able to take a signal),
1685 * return a LWP that is sleeping interruptably. The caller can wake
1686 * that LWP up to take a signal.
1687 */
1688 struct lwp *
1689 proc_unstop(struct proc *p)
1690 {
1691 struct lwp *l, *lr = NULL;
1692 int cantake = 0;
1693
1694 SCHED_ASSERT_LOCKED();
1695
1696 /*
1697 * Our caller wants to be informed if there are only sleeping
1698 * and interruptable LWPs left after we have run so that it
1699 * can invoke setrunnable() if required - return one of the
1700 * interruptable LWPs if this is the case.
1701 */
1702
1703 p->p_stat = SACTIVE;
1704 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1705 if (l->l_stat == LSRUN) {
1706 lr = NULL;
1707 cantake = 1;
1708 }
1709 if (l->l_stat != LSSTOP)
1710 continue;
1711
1712 if (l->l_wchan != NULL) {
1713 l->l_stat = LSSLEEP;
1714 if ((cantake == 0) && (l->l_flag & L_SINTR)) {
1715 lr = l;
1716 cantake = 1;
1717 }
1718 } else {
1719 setrunnable(l);
1720 lr = NULL;
1721 cantake = 1;
1722 }
1723 }
1724 if (p->p_flag & P_SA) {
1725 /* Only consider returning the LWP on the VP. */
1726 lr = p->p_sa->sa_vp;
1727 if (lr->l_stat == LSSLEEP) {
1728 if (lr->l_flag & L_SA_YIELD)
1729 setrunnable(lr);
1730 else if (lr->l_flag & L_SINTR)
1731 return lr;
1732 }
1733 return NULL;
1734 }
1735 return lr;
1736 }
1737
1738 /*
1739 * Take the action for the specified signal
1740 * from the current set of pending signals.
1741 */
1742 void
1743 postsig(int signum)
1744 {
1745 struct lwp *l;
1746 struct proc *p;
1747 struct sigacts *ps;
1748 sig_t action;
1749 sigset_t *returnmask;
1750
1751 l = curlwp;
1752 p = l->l_proc;
1753 ps = p->p_sigacts;
1754 #ifdef DIAGNOSTIC
1755 if (signum == 0)
1756 panic("postsig");
1757 #endif
1758
1759 KERNEL_PROC_LOCK(l);
1760
1761 sigdelset(&p->p_sigctx.ps_siglist, signum);
1762 action = SIGACTION_PS(ps, signum).sa_handler;
1763 if (action == SIG_DFL) {
1764 #ifdef KTRACE
1765 if (KTRPOINT(p, KTR_PSIG))
1766 ktrpsig(p, signum, action,
1767 p->p_sigctx.ps_flags & SAS_OLDMASK ?
1768 &p->p_sigctx.ps_oldmask : &p->p_sigctx.ps_sigmask,
1769 NULL);
1770 #endif
1771 /*
1772 * Default action, where the default is to kill
1773 * the process. (Other cases were ignored above.)
1774 */
1775 sigexit(l, signum);
1776 /* NOTREACHED */
1777 } else {
1778 ksiginfo_t *ksi;
1779 /*
1780 * If we get here, the signal must be caught.
1781 */
1782 #ifdef DIAGNOSTIC
1783 if (action == SIG_IGN ||
1784 sigismember(&p->p_sigctx.ps_sigmask, signum))
1785 panic("postsig action");
1786 #endif
1787 /*
1788 * Set the new mask value and also defer further
1789 * occurrences of this signal.
1790 *
1791 * Special case: user has done a sigpause. Here the
1792 * current mask is not of interest, but rather the
1793 * mask from before the sigpause is what we want
1794 * restored after the signal processing is completed.
1795 */
1796 if (p->p_sigctx.ps_flags & SAS_OLDMASK) {
1797 returnmask = &p->p_sigctx.ps_oldmask;
1798 p->p_sigctx.ps_flags &= ~SAS_OLDMASK;
1799 } else
1800 returnmask = &p->p_sigctx.ps_sigmask;
1801 p->p_stats->p_ru.ru_nsignals++;
1802 ksi = ksiginfo_get(p, signum);
1803 #ifdef KTRACE
1804 if (KTRPOINT(p, KTR_PSIG))
1805 ktrpsig(p, signum, action,
1806 p->p_sigctx.ps_flags & SAS_OLDMASK ?
1807 &p->p_sigctx.ps_oldmask : &p->p_sigctx.ps_sigmask,
1808 ksi);
1809 #endif
1810 if (ksi == NULL) {
1811 ksiginfo_t ksi1;
1812 /*
1813 * we did not save any siginfo for this, either
1814 * because the signal was not caught, or because the
1815 * user did not request SA_SIGINFO
1816 */
1817 (void)memset(&ksi1, 0, sizeof(ksi1));
1818 ksi1.ksi_signo = signum;
1819 kpsendsig(l, &ksi1, returnmask);
1820 } else {
1821 kpsendsig(l, ksi, returnmask);
1822 pool_put(&ksiginfo_pool, ksi);
1823 }
1824 p->p_sigctx.ps_lwp = 0;
1825 p->p_sigctx.ps_code = 0;
1826 p->p_sigctx.ps_signo = 0;
1827 (void) splsched(); /* XXXSMP */
1828 sigplusset(&SIGACTION_PS(ps, signum).sa_mask,
1829 &p->p_sigctx.ps_sigmask);
1830 if (SIGACTION_PS(ps, signum).sa_flags & SA_RESETHAND) {
1831 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
1832 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
1833 sigaddset(&p->p_sigctx.ps_sigignore, signum);
1834 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
1835 }
1836 (void) spl0(); /* XXXSMP */
1837 }
1838
1839 KERNEL_PROC_UNLOCK(l);
1840 }
1841
1842 /*
1843 * Kill the current process for stated reason.
1844 */
1845 void
1846 killproc(struct proc *p, const char *why)
1847 {
1848 log(LOG_ERR, "pid %d was killed: %s\n", p->p_pid, why);
1849 uprintf("sorry, pid %d was killed: %s\n", p->p_pid, why);
1850 psignal(p, SIGKILL);
1851 }
1852
1853 /*
1854 * Force the current process to exit with the specified signal, dumping core
1855 * if appropriate. We bypass the normal tests for masked and caught signals,
1856 * allowing unrecoverable failures to terminate the process without changing
1857 * signal state. Mark the accounting record with the signal termination.
1858 * If dumping core, save the signal number for the debugger. Calls exit and
1859 * does not return.
1860 */
1861
1862 #if defined(DEBUG)
1863 int kern_logsigexit = 1; /* not static to make public for sysctl */
1864 #else
1865 int kern_logsigexit = 0; /* not static to make public for sysctl */
1866 #endif
1867
1868 static const char logcoredump[] =
1869 "pid %d (%s), uid %d: exited on signal %d (core dumped)\n";
1870 static const char lognocoredump[] =
1871 "pid %d (%s), uid %d: exited on signal %d (core not dumped, err = %d)\n";
1872
1873 /* Wrapper function for use in p_userret */
1874 static void
1875 lwp_coredump_hook(struct lwp *l, void *arg)
1876 {
1877 int s;
1878
1879 /*
1880 * Suspend ourselves, so that the kernel stack and therefore
1881 * the userland registers saved in the trapframe are around
1882 * for coredump() to write them out.
1883 */
1884 KERNEL_PROC_LOCK(l);
1885 l->l_flag &= ~L_DETACHED;
1886 SCHED_LOCK(s);
1887 l->l_stat = LSSUSPENDED;
1888 l->l_proc->p_nrlwps--;
1889 /* XXX NJWLWP check if this makes sense here: */
1890 l->l_proc->p_stats->p_ru.ru_nvcsw++;
1891 mi_switch(l, NULL);
1892 SCHED_ASSERT_UNLOCKED();
1893 splx(s);
1894
1895 lwp_exit(l);
1896 }
1897
1898 void
1899 sigexit(struct lwp *l, int signum)
1900 {
1901 struct proc *p;
1902 #if 0
1903 struct lwp *l2;
1904 #endif
1905 int error, exitsig;
1906
1907 p = l->l_proc;
1908
1909 /*
1910 * Don't permit coredump() or exit1() multiple times
1911 * in the same process.
1912 */
1913 if (p->p_flag & P_WEXIT) {
1914 KERNEL_PROC_UNLOCK(l);
1915 (*p->p_userret)(l, p->p_userret_arg);
1916 }
1917 p->p_flag |= P_WEXIT;
1918 /* We don't want to switch away from exiting. */
1919 /* XXX multiprocessor: stop LWPs on other processors. */
1920 #if 0
1921 if (p->p_flag & P_SA) {
1922 LIST_FOREACH(l2, &p->p_lwps, l_sibling)
1923 l2->l_flag &= ~L_SA;
1924 p->p_flag &= ~P_SA;
1925 }
1926 #endif
1927
1928 /* Make other LWPs stick around long enough to be dumped */
1929 p->p_userret = lwp_coredump_hook;
1930 p->p_userret_arg = NULL;
1931
1932 exitsig = signum;
1933 p->p_acflag |= AXSIG;
1934 if (sigprop[signum] & SA_CORE) {
1935 p->p_sigctx.ps_signo = signum;
1936 if ((error = coredump(l)) == 0)
1937 exitsig |= WCOREFLAG;
1938
1939 if (kern_logsigexit) {
1940 /* XXX What if we ever have really large UIDs? */
1941 int uid = p->p_cred && p->p_ucred ?
1942 (int) p->p_ucred->cr_uid : -1;
1943
1944 if (error)
1945 log(LOG_INFO, lognocoredump, p->p_pid,
1946 p->p_comm, uid, signum, error);
1947 else
1948 log(LOG_INFO, logcoredump, p->p_pid,
1949 p->p_comm, uid, signum);
1950 }
1951
1952 }
1953
1954 exit1(l, W_EXITCODE(0, exitsig));
1955 /* NOTREACHED */
1956 }
1957
1958 /*
1959 * Dump core, into a file named "progname.core" or "core" (depending on the
1960 * value of shortcorename), unless the process was setuid/setgid.
1961 */
1962 int
1963 coredump(struct lwp *l)
1964 {
1965 struct vnode *vp;
1966 struct proc *p;
1967 struct vmspace *vm;
1968 struct ucred *cred;
1969 struct nameidata nd;
1970 struct vattr vattr;
1971 struct mount *mp;
1972 int error, error1;
1973 char name[MAXPATHLEN];
1974
1975 p = l->l_proc;
1976 vm = p->p_vmspace;
1977 cred = p->p_cred->pc_ucred;
1978
1979 /*
1980 * Make sure the process has not set-id, to prevent data leaks.
1981 */
1982 if (p->p_flag & P_SUGID)
1983 return (EPERM);
1984
1985 /*
1986 * Refuse to core if the data + stack + user size is larger than
1987 * the core dump limit. XXX THIS IS WRONG, because of mapped
1988 * data.
1989 */
1990 if (USPACE + ctob(vm->vm_dsize + vm->vm_ssize) >=
1991 p->p_rlimit[RLIMIT_CORE].rlim_cur)
1992 return (EFBIG); /* better error code? */
1993
1994 restart:
1995 /*
1996 * The core dump will go in the current working directory. Make
1997 * sure that the directory is still there and that the mount flags
1998 * allow us to write core dumps there.
1999 */
2000 vp = p->p_cwdi->cwdi_cdir;
2001 if (vp->v_mount == NULL ||
2002 (vp->v_mount->mnt_flag & MNT_NOCOREDUMP) != 0)
2003 return (EPERM);
2004
2005 error = build_corename(p, name);
2006 if (error)
2007 return error;
2008
2009 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, p);
2010 error = vn_open(&nd, O_CREAT | O_NOFOLLOW | FWRITE, S_IRUSR | S_IWUSR);
2011 if (error)
2012 return (error);
2013 vp = nd.ni_vp;
2014
2015 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
2016 VOP_UNLOCK(vp, 0);
2017 if ((error = vn_close(vp, FWRITE, cred, p)) != 0)
2018 return (error);
2019 if ((error = vn_start_write(NULL, &mp,
2020 V_WAIT | V_SLEEPONLY | V_PCATCH)) != 0)
2021 return (error);
2022 goto restart;
2023 }
2024
2025 /* Don't dump to non-regular files or files with links. */
2026 if (vp->v_type != VREG ||
2027 VOP_GETATTR(vp, &vattr, cred, p) || vattr.va_nlink != 1) {
2028 error = EINVAL;
2029 goto out;
2030 }
2031 VATTR_NULL(&vattr);
2032 vattr.va_size = 0;
2033 VOP_LEASE(vp, p, cred, LEASE_WRITE);
2034 VOP_SETATTR(vp, &vattr, cred, p);
2035 p->p_acflag |= ACORE;
2036
2037 /* Now dump the actual core file. */
2038 error = (*p->p_execsw->es_coredump)(l, vp, cred);
2039 out:
2040 VOP_UNLOCK(vp, 0);
2041 vn_finished_write(mp, 0);
2042 error1 = vn_close(vp, FWRITE, cred, p);
2043 if (error == 0)
2044 error = error1;
2045 return (error);
2046 }
2047
2048 /*
2049 * Nonexistent system call-- signal process (may want to handle it).
2050 * Flag error in case process won't see signal immediately (blocked or ignored).
2051 */
2052 /* ARGSUSED */
2053 int
2054 sys_nosys(struct lwp *l, void *v, register_t *retval)
2055 {
2056 struct proc *p;
2057
2058 p = l->l_proc;
2059 psignal(p, SIGSYS);
2060 return (ENOSYS);
2061 }
2062
2063 static int
2064 build_corename(struct proc *p, char dst[MAXPATHLEN])
2065 {
2066 const char *s;
2067 char *d, *end;
2068 int i;
2069
2070 for (s = p->p_limit->pl_corename, d = dst, end = d + MAXPATHLEN;
2071 *s != '\0'; s++) {
2072 if (*s == '%') {
2073 switch (*(s + 1)) {
2074 case 'n':
2075 i = snprintf(d, end - d, "%s", p->p_comm);
2076 break;
2077 case 'p':
2078 i = snprintf(d, end - d, "%d", p->p_pid);
2079 break;
2080 case 'u':
2081 i = snprintf(d, end - d, "%.*s",
2082 (int)sizeof p->p_pgrp->pg_session->s_login,
2083 p->p_pgrp->pg_session->s_login);
2084 break;
2085 case 't':
2086 i = snprintf(d, end - d, "%ld",
2087 p->p_stats->p_start.tv_sec);
2088 break;
2089 default:
2090 goto copy;
2091 }
2092 d += i;
2093 s++;
2094 } else {
2095 copy: *d = *s;
2096 d++;
2097 }
2098 if (d >= end)
2099 return (ENAMETOOLONG);
2100 }
2101 *d = '\0';
2102 return 0;
2103 }
2104
2105 void
2106 getucontext(struct lwp *l, ucontext_t *ucp)
2107 {
2108 struct proc *p;
2109
2110 p = l->l_proc;
2111
2112 ucp->uc_flags = 0;
2113 ucp->uc_link = l->l_ctxlink;
2114
2115 (void)sigprocmask1(p, 0, NULL, &ucp->uc_sigmask);
2116 ucp->uc_flags |= _UC_SIGMASK;
2117
2118 /*
2119 * The (unsupplied) definition of the `current execution stack'
2120 * in the System V Interface Definition appears to allow returning
2121 * the main context stack.
2122 */
2123 if ((p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK) == 0) {
2124 ucp->uc_stack.ss_sp = (void *)USRSTACK;
2125 ucp->uc_stack.ss_size = ctob(p->p_vmspace->vm_ssize);
2126 ucp->uc_stack.ss_flags = 0; /* XXX, def. is Very Fishy */
2127 } else {
2128 /* Simply copy alternate signal execution stack. */
2129 ucp->uc_stack = p->p_sigctx.ps_sigstk;
2130 }
2131 ucp->uc_flags |= _UC_STACK;
2132
2133 cpu_getmcontext(l, &ucp->uc_mcontext, &ucp->uc_flags);
2134 }
2135
2136 /* ARGSUSED */
2137 int
2138 sys_getcontext(struct lwp *l, void *v, register_t *retval)
2139 {
2140 struct sys_getcontext_args /* {
2141 syscallarg(struct __ucontext *) ucp;
2142 } */ *uap = v;
2143 ucontext_t uc;
2144
2145 getucontext(l, &uc);
2146
2147 return (copyout(&uc, SCARG(uap, ucp), sizeof (*SCARG(uap, ucp))));
2148 }
2149
2150 int
2151 setucontext(struct lwp *l, const ucontext_t *ucp)
2152 {
2153 struct proc *p;
2154 int error;
2155
2156 p = l->l_proc;
2157 if ((error = cpu_setmcontext(l, &ucp->uc_mcontext, ucp->uc_flags)) != 0)
2158 return (error);
2159 l->l_ctxlink = ucp->uc_link;
2160 /*
2161 * We might want to take care of the stack portion here but currently
2162 * don't; see the comment in getucontext().
2163 */
2164 if ((ucp->uc_flags & _UC_SIGMASK) != 0)
2165 sigprocmask1(p, SIG_SETMASK, &ucp->uc_sigmask, NULL);
2166
2167 return 0;
2168 }
2169
2170 /* ARGSUSED */
2171 int
2172 sys_setcontext(struct lwp *l, void *v, register_t *retval)
2173 {
2174 struct sys_setcontext_args /* {
2175 syscallarg(const ucontext_t *) ucp;
2176 } */ *uap = v;
2177 ucontext_t uc;
2178 int error;
2179
2180 if (SCARG(uap, ucp) == NULL) /* i.e. end of uc_link chain */
2181 exit1(l, W_EXITCODE(0, 0));
2182 else if ((error = copyin(SCARG(uap, ucp), &uc, sizeof (uc))) != 0 ||
2183 (error = setucontext(l, &uc)) != 0)
2184 return (error);
2185
2186 return (EJUSTRETURN);
2187 }
2188
2189 /*
2190 * sigtimedwait(2) system call, used also for implementation
2191 * of sigwaitinfo() and sigwait().
2192 *
2193 * This only handles single LWP in signal wait. libpthread provides
2194 * it's own sigtimedwait() wrapper to DTRT WRT individual threads.
2195 */
2196 int
2197 sys___sigtimedwait(struct lwp *l, void *v, register_t *retval)
2198 {
2199 struct sys___sigtimedwait_args /* {
2200 syscallarg(const sigset_t *) set;
2201 syscallarg(siginfo_t *) info;
2202 syscallarg(struct timespec *) timeout;
2203 } */ *uap = v;
2204 sigset_t *waitset, twaitset;
2205 struct proc *p = l->l_proc;
2206 int error, signum, s;
2207 int timo = 0;
2208 struct timeval tvstart;
2209 struct timespec ts;
2210 ksiginfo_t *ksi;
2211
2212 MALLOC(waitset, sigset_t *, sizeof(sigset_t), M_TEMP, M_WAITOK);
2213
2214 if ((error = copyin(SCARG(uap, set), waitset, sizeof(sigset_t)))) {
2215 FREE(waitset, M_TEMP);
2216 return (error);
2217 }
2218
2219 /*
2220 * Silently ignore SA_CANTMASK signals. psignal1() would
2221 * ignore SA_CANTMASK signals in waitset, we do this
2222 * only for the below siglist check.
2223 */
2224 sigminusset(&sigcantmask, waitset);
2225
2226 /*
2227 * First scan siglist and check if there is signal from
2228 * our waitset already pending.
2229 */
2230 twaitset = *waitset;
2231 __sigandset(&p->p_sigctx.ps_siglist, &twaitset);
2232 if ((signum = firstsig(&twaitset))) {
2233 /* found pending signal */
2234 sigdelset(&p->p_sigctx.ps_siglist, signum);
2235 ksi = ksiginfo_get(p, signum);
2236 if (!ksi) {
2237 /* No queued siginfo, manufacture one */
2238 ksi = pool_get(&ksiginfo_pool, PR_WAITOK);
2239 KSI_INIT(ksi);
2240 ksi->ksi_info._signo = signum;
2241 ksi->ksi_info._code = SI_USER;
2242 }
2243
2244 goto sig;
2245 }
2246
2247 /*
2248 * Calculate timeout, if it was specified.
2249 */
2250 if (SCARG(uap, timeout)) {
2251 uint64_t ms;
2252
2253 if ((error = copyin(SCARG(uap, timeout), &ts, sizeof(ts))))
2254 return (error);
2255
2256 ms = (ts.tv_sec * 1000) + (ts.tv_nsec / 1000000);
2257 timo = mstohz(ms);
2258 if (timo == 0 && ts.tv_sec == 0 && ts.tv_nsec > 0)
2259 timo = 1;
2260 if (timo <= 0)
2261 return (EAGAIN);
2262
2263 /*
2264 * Remember current mono_time, it would be used in
2265 * ECANCELED/ERESTART case.
2266 */
2267 s = splclock();
2268 tvstart = mono_time;
2269 splx(s);
2270 }
2271
2272 /*
2273 * Setup ps_sigwait list. Pass pointer to malloced memory
2274 * here; it's not possible to pass pointer to a structure
2275 * on current process's stack, the current process might
2276 * be swapped out at the time the signal would get delivered.
2277 */
2278 ksi = pool_get(&ksiginfo_pool, PR_WAITOK);
2279 p->p_sigctx.ps_sigwaited = ksi;
2280 p->p_sigctx.ps_sigwait = waitset;
2281
2282 /*
2283 * Wait for signal to arrive. We can either be woken up or
2284 * time out.
2285 */
2286 error = tsleep(&p->p_sigctx.ps_sigwait, PPAUSE|PCATCH, "sigwait", timo);
2287
2288 /*
2289 * Need to find out if we woke as a result of lwp_wakeup()
2290 * or a signal outside our wait set.
2291 */
2292 if (error == EINTR && p->p_sigctx.ps_sigwaited
2293 && !firstsig(&p->p_sigctx.ps_siglist)) {
2294 /* wakeup via _lwp_wakeup() */
2295 error = ECANCELED;
2296 } else if (!error && p->p_sigctx.ps_sigwaited) {
2297 /* spurious wakeup - arrange for syscall restart */
2298 error = ERESTART;
2299 goto fail;
2300 }
2301
2302 /*
2303 * On error, clear sigwait indication. psignal1() clears it
2304 * in !error case.
2305 */
2306 if (error) {
2307 p->p_sigctx.ps_sigwaited = NULL;
2308
2309 /*
2310 * If the sleep was interrupted (either by signal or wakeup),
2311 * update the timeout and copyout new value back.
2312 * It would be used when the syscall would be restarted
2313 * or called again.
2314 */
2315 if (timo && (error == ERESTART || error == ECANCELED)) {
2316 struct timeval tvnow, tvtimo;
2317 int err;
2318
2319 s = splclock();
2320 tvnow = mono_time;
2321 splx(s);
2322
2323 TIMESPEC_TO_TIMEVAL(&tvtimo, &ts);
2324
2325 /* compute how much time has passed since start */
2326 timersub(&tvnow, &tvstart, &tvnow);
2327 /* substract passed time from timeout */
2328 timersub(&tvtimo, &tvnow, &tvtimo);
2329
2330 if (tvtimo.tv_sec < 0) {
2331 error = EAGAIN;
2332 goto fail;
2333 }
2334
2335 TIMEVAL_TO_TIMESPEC(&tvtimo, &ts);
2336
2337 /* copy updated timeout to userland */
2338 if ((err = copyout(&ts, SCARG(uap, timeout), sizeof(ts)))) {
2339 error = err;
2340 goto fail;
2341 }
2342 }
2343
2344 goto fail;
2345 }
2346
2347 /*
2348 * If a signal from the wait set arrived, copy it to userland.
2349 * Copy only the used part of siginfo, the padding part is
2350 * left unchanged (userland is not supposed to touch it anyway).
2351 */
2352 sig:
2353 error = copyout(&ksi->ksi_info, SCARG(uap, info), sizeof(ksi->ksi_info));
2354
2355 fail:
2356 FREE(waitset, M_TEMP);
2357 pool_put(&ksiginfo_pool, ksi);
2358 p->p_sigctx.ps_sigwait = NULL;
2359
2360 return (error);
2361 }
2362
2363 /*
2364 * Returns true if signal is ignored or masked for passed process.
2365 */
2366 int
2367 sigismasked(struct proc *p, int sig)
2368 {
2369
2370 return (sigismember(&p->p_sigctx.ps_sigignore, sig) ||
2371 sigismember(&p->p_sigctx.ps_sigmask, sig));
2372 }
2373
2374 static int
2375 filt_sigattach(struct knote *kn)
2376 {
2377 struct proc *p = curproc;
2378
2379 kn->kn_ptr.p_proc = p;
2380 kn->kn_flags |= EV_CLEAR; /* automatically set */
2381
2382 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
2383
2384 return (0);
2385 }
2386
2387 static void
2388 filt_sigdetach(struct knote *kn)
2389 {
2390 struct proc *p = kn->kn_ptr.p_proc;
2391
2392 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
2393 }
2394
2395 /*
2396 * signal knotes are shared with proc knotes, so we apply a mask to
2397 * the hint in order to differentiate them from process hints. This
2398 * could be avoided by using a signal-specific knote list, but probably
2399 * isn't worth the trouble.
2400 */
2401 static int
2402 filt_signal(struct knote *kn, long hint)
2403 {
2404
2405 if (hint & NOTE_SIGNAL) {
2406 hint &= ~NOTE_SIGNAL;
2407
2408 if (kn->kn_id == hint)
2409 kn->kn_data++;
2410 }
2411 return (kn->kn_data != 0);
2412 }
2413
2414 const struct filterops sig_filtops = {
2415 0, filt_sigattach, filt_sigdetach, filt_signal
2416 };
2417