kern_sig.c revision 1.170 1 /* $NetBSD: kern_sig.c,v 1.170 2003/10/25 09:06:51 christos Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)kern_sig.c 8.14 (Berkeley) 5/14/95
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.170 2003/10/25 09:06:51 christos Exp $");
41
42 #include "opt_ktrace.h"
43 #include "opt_compat_sunos.h"
44 #include "opt_compat_netbsd.h"
45 #include "opt_compat_netbsd32.h"
46
47 #define SIGPROP /* include signal properties table */
48 #include <sys/param.h>
49 #include <sys/signalvar.h>
50 #include <sys/resourcevar.h>
51 #include <sys/namei.h>
52 #include <sys/vnode.h>
53 #include <sys/proc.h>
54 #include <sys/systm.h>
55 #include <sys/timeb.h>
56 #include <sys/times.h>
57 #include <sys/buf.h>
58 #include <sys/acct.h>
59 #include <sys/file.h>
60 #include <sys/kernel.h>
61 #include <sys/wait.h>
62 #include <sys/ktrace.h>
63 #include <sys/syslog.h>
64 #include <sys/stat.h>
65 #include <sys/core.h>
66 #include <sys/filedesc.h>
67 #include <sys/malloc.h>
68 #include <sys/pool.h>
69 #include <sys/ucontext.h>
70 #include <sys/sa.h>
71 #include <sys/savar.h>
72 #include <sys/exec.h>
73
74 #include <sys/mount.h>
75 #include <sys/syscallargs.h>
76
77 #include <machine/cpu.h>
78
79 #include <sys/user.h> /* for coredump */
80
81 #include <uvm/uvm_extern.h>
82
83 static void child_psignal(struct proc *, int);
84 static void proc_stop(struct proc *);
85 static int build_corename(struct proc *, char [MAXPATHLEN]);
86 static void ksiginfo_exithook(struct proc *, void *);
87 static void ksiginfo_put(struct proc *, const ksiginfo_t *);
88 static ksiginfo_t *ksiginfo_get(struct proc *, int);
89 static void kpsignal2(struct proc *, const ksiginfo_t *, int);
90
91 sigset_t contsigmask, stopsigmask, sigcantmask;
92
93 struct pool sigacts_pool; /* memory pool for sigacts structures */
94 struct pool siginfo_pool; /* memory pool for siginfo structures */
95 struct pool ksiginfo_pool; /* memory pool for ksiginfo structures */
96
97 /*
98 * Can process p, with pcred pc, send the signal signum to process q?
99 */
100 #define CANSIGNAL(p, pc, q, signum) \
101 ((pc)->pc_ucred->cr_uid == 0 || \
102 (pc)->p_ruid == (q)->p_cred->p_ruid || \
103 (pc)->pc_ucred->cr_uid == (q)->p_cred->p_ruid || \
104 (pc)->p_ruid == (q)->p_ucred->cr_uid || \
105 (pc)->pc_ucred->cr_uid == (q)->p_ucred->cr_uid || \
106 ((signum) == SIGCONT && (q)->p_session == (p)->p_session))
107
108 /*
109 * Remove and return the first ksiginfo element that matches our requested
110 * signal, or return NULL if one not found.
111 */
112 static ksiginfo_t *
113 ksiginfo_get(struct proc *p, int signo)
114 {
115 ksiginfo_t *ksi;
116 int s;
117
118 s = splsoftclock();
119 simple_lock(&p->p_sigctx.ps_silock);
120 CIRCLEQ_FOREACH(ksi, &p->p_sigctx.ps_siginfo, ksi_list) {
121 if (ksi->ksi_signo == signo) {
122 CIRCLEQ_REMOVE(&p->p_sigctx.ps_siginfo, ksi, ksi_list);
123 goto out;
124 }
125 }
126 ksi = NULL;
127 out:
128 simple_unlock(&p->p_sigctx.ps_silock);
129 splx(s);
130 return ksi;
131 }
132
133 /*
134 * Append a new ksiginfo element to the list of pending ksiginfo's, if
135 * we need to (SA_SIGINFO was requested). We replace non RT signals if
136 * they already existed in the queue and we add new entries for RT signals,
137 * or for non RT signals with non-existing entries.
138 */
139 static void
140 ksiginfo_put(struct proc *p, const ksiginfo_t *ksi)
141 {
142 ksiginfo_t *kp;
143 struct sigaction *sa = &SIGACTION_PS(p->p_sigacts, ksi->ksi_signo);
144 int s;
145
146 if ((sa->sa_flags & SA_SIGINFO) == 0)
147 return;
148
149 s = splsoftclock();
150 simple_lock(&p->p_sigctx.ps_silock);
151 #ifdef notyet /* XXX: QUEUING */
152 if (ksi->ksi_signo < SIGRTMIN)
153 #endif
154 {
155 CIRCLEQ_FOREACH(kp, &p->p_sigctx.ps_siginfo, ksi_list) {
156 if (kp->ksi_signo == ksi->ksi_signo) {
157 CIRCLEQ_ENTRY(ksiginfo) sv;
158 (void)memcpy(&sv, &kp->ksi_list, sizeof(sv));
159 *kp = *ksi;
160 (void)memcpy(&kp->ksi_list, &sv, sizeof(sv));
161 goto out;
162 }
163 }
164 }
165 kp = pool_get(&ksiginfo_pool, PR_NOWAIT);
166 if (kp == NULL) {
167 #ifdef DIAGNOSTIC
168 printf("Out of memory allocating siginfo for pid %d\n",
169 p->p_pid);
170 #endif
171 goto out;
172 }
173 *kp = *ksi;
174 CIRCLEQ_INSERT_TAIL(&p->p_sigctx.ps_siginfo, kp, ksi_list);
175 out:
176 simple_unlock(&p->p_sigctx.ps_silock);
177 splx(s);
178 }
179
180 /*
181 * free all pending ksiginfo on exit
182 */
183 static void
184 ksiginfo_exithook(struct proc *p, void *v)
185 {
186 int s;
187
188 s = splsoftclock();
189 simple_lock(&p->p_sigctx.ps_silock);
190 while (!CIRCLEQ_EMPTY(&p->p_sigctx.ps_siginfo)) {
191 ksiginfo_t *ksi = CIRCLEQ_FIRST(&p->p_sigctx.ps_siginfo);
192 CIRCLEQ_REMOVE(&p->p_sigctx.ps_siginfo, ksi, ksi_list);
193 pool_put(&ksiginfo_pool, ksi);
194 }
195 simple_unlock(&p->p_sigctx.ps_silock);
196 splx(s);
197 }
198
199 /*
200 * Initialize signal-related data structures.
201 */
202 void
203 signal_init(void)
204 {
205 pool_init(&sigacts_pool, sizeof(struct sigacts), 0, 0, 0, "sigapl",
206 &pool_allocator_nointr);
207 pool_init(&siginfo_pool, sizeof(siginfo_t), 0, 0, 0, "siginfo",
208 &pool_allocator_nointr);
209 pool_init(&ksiginfo_pool, sizeof(ksiginfo_t), 0, 0, 0, "ksiginfo",
210 NULL);
211 exithook_establish(ksiginfo_exithook, NULL);
212 exechook_establish(ksiginfo_exithook, NULL);
213 }
214
215 /*
216 * Create an initial sigctx structure, using the same signal state
217 * as p. If 'share' is set, share the sigctx_proc part, otherwise just
218 * copy it from parent.
219 */
220 void
221 sigactsinit(struct proc *np, struct proc *pp, int share)
222 {
223 struct sigacts *ps;
224
225 if (share) {
226 np->p_sigacts = pp->p_sigacts;
227 pp->p_sigacts->sa_refcnt++;
228 } else {
229 ps = pool_get(&sigacts_pool, PR_WAITOK);
230 if (pp)
231 memcpy(ps, pp->p_sigacts, sizeof(struct sigacts));
232 else
233 memset(ps, '\0', sizeof(struct sigacts));
234 ps->sa_refcnt = 1;
235 np->p_sigacts = ps;
236 }
237 }
238
239 /*
240 * Make this process not share its sigctx, maintaining all
241 * signal state.
242 */
243 void
244 sigactsunshare(struct proc *p)
245 {
246 struct sigacts *oldps;
247
248 if (p->p_sigacts->sa_refcnt == 1)
249 return;
250
251 oldps = p->p_sigacts;
252 sigactsinit(p, NULL, 0);
253
254 if (--oldps->sa_refcnt == 0)
255 pool_put(&sigacts_pool, oldps);
256 }
257
258 /*
259 * Release a sigctx structure.
260 */
261 void
262 sigactsfree(struct proc *p)
263 {
264 struct sigacts *ps;
265
266 ps = p->p_sigacts;
267 if (--ps->sa_refcnt > 0)
268 return;
269
270 pool_put(&sigacts_pool, ps);
271 }
272
273 int
274 sigaction1(struct proc *p, int signum, const struct sigaction *nsa,
275 struct sigaction *osa, const void *tramp, int vers)
276 {
277 struct sigacts *ps;
278 int prop;
279
280 ps = p->p_sigacts;
281 if (signum <= 0 || signum >= NSIG)
282 return (EINVAL);
283
284 /*
285 * Trampoline ABI version 0 is reserved for the legacy
286 * kernel-provided on-stack trampoline. Conversely, if we are
287 * using a non-0 ABI version, we must have a trampoline. Only
288 * validate the vers if a new sigaction was supplied. Emulations
289 * use legacy kernel trampolines with version 0, alternatively
290 * check for that too.
291 */
292 if ((vers != 0 && tramp == NULL) ||
293 #ifdef SIGTRAMP_VALID
294 (nsa != NULL &&
295 ((vers == 0) ?
296 (p->p_emul->e_sigcode == NULL) :
297 !SIGTRAMP_VALID(vers))) ||
298 #endif
299 (vers == 0 && tramp != NULL))
300 return (EINVAL);
301
302 if (osa)
303 *osa = SIGACTION_PS(ps, signum);
304
305 if (nsa) {
306 if (nsa->sa_flags & ~SA_ALLBITS)
307 return (EINVAL);
308
309 #ifndef __HAVE_SIGINFO
310 if (nsa->sa_flags & SA_SIGINFO)
311 return (EINVAL);
312 #endif
313
314 prop = sigprop[signum];
315 if (prop & SA_CANTMASK)
316 return (EINVAL);
317
318 (void) splsched(); /* XXXSMP */
319 SIGACTION_PS(ps, signum) = *nsa;
320 ps->sa_sigdesc[signum].sd_tramp = tramp;
321 ps->sa_sigdesc[signum].sd_vers = vers;
322 sigminusset(&sigcantmask, &SIGACTION_PS(ps, signum).sa_mask);
323 if ((prop & SA_NORESET) != 0)
324 SIGACTION_PS(ps, signum).sa_flags &= ~SA_RESETHAND;
325 if (signum == SIGCHLD) {
326 if (nsa->sa_flags & SA_NOCLDSTOP)
327 p->p_flag |= P_NOCLDSTOP;
328 else
329 p->p_flag &= ~P_NOCLDSTOP;
330 if (nsa->sa_flags & SA_NOCLDWAIT) {
331 /*
332 * Paranoia: since SA_NOCLDWAIT is implemented
333 * by reparenting the dying child to PID 1 (and
334 * trust it to reap the zombie), PID 1 itself
335 * is forbidden to set SA_NOCLDWAIT.
336 */
337 if (p->p_pid == 1)
338 p->p_flag &= ~P_NOCLDWAIT;
339 else
340 p->p_flag |= P_NOCLDWAIT;
341 } else
342 p->p_flag &= ~P_NOCLDWAIT;
343 }
344 if ((nsa->sa_flags & SA_NODEFER) == 0)
345 sigaddset(&SIGACTION_PS(ps, signum).sa_mask, signum);
346 else
347 sigdelset(&SIGACTION_PS(ps, signum).sa_mask, signum);
348 /*
349 * Set bit in p_sigctx.ps_sigignore for signals that are set to
350 * SIG_IGN, and for signals set to SIG_DFL where the default is
351 * to ignore. However, don't put SIGCONT in
352 * p_sigctx.ps_sigignore, as we have to restart the process.
353 */
354 if (nsa->sa_handler == SIG_IGN ||
355 (nsa->sa_handler == SIG_DFL && (prop & SA_IGNORE) != 0)) {
356 /* never to be seen again */
357 sigdelset(&p->p_sigctx.ps_siglist, signum);
358 if (signum != SIGCONT) {
359 /* easier in psignal */
360 sigaddset(&p->p_sigctx.ps_sigignore, signum);
361 }
362 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
363 } else {
364 sigdelset(&p->p_sigctx.ps_sigignore, signum);
365 if (nsa->sa_handler == SIG_DFL)
366 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
367 else
368 sigaddset(&p->p_sigctx.ps_sigcatch, signum);
369 }
370 (void) spl0();
371 }
372
373 return (0);
374 }
375
376 #ifdef COMPAT_16
377 /* ARGSUSED */
378 int
379 compat_16_sys___sigaction14(struct lwp *l, void *v, register_t *retval)
380 {
381 struct compat_16_sys___sigaction14_args /* {
382 syscallarg(int) signum;
383 syscallarg(const struct sigaction *) nsa;
384 syscallarg(struct sigaction *) osa;
385 } */ *uap = v;
386 struct proc *p;
387 struct sigaction nsa, osa;
388 int error;
389
390 if (SCARG(uap, nsa)) {
391 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
392 if (error)
393 return (error);
394 }
395 p = l->l_proc;
396 error = sigaction1(p, SCARG(uap, signum),
397 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0,
398 NULL, 0);
399 if (error)
400 return (error);
401 if (SCARG(uap, osa)) {
402 error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
403 if (error)
404 return (error);
405 }
406 return (0);
407 }
408 #endif
409
410 /* ARGSUSED */
411 int
412 sys___sigaction_sigtramp(struct lwp *l, void *v, register_t *retval)
413 {
414 struct sys___sigaction_sigtramp_args /* {
415 syscallarg(int) signum;
416 syscallarg(const struct sigaction *) nsa;
417 syscallarg(struct sigaction *) osa;
418 syscallarg(void *) tramp;
419 syscallarg(int) vers;
420 } */ *uap = v;
421 struct proc *p = l->l_proc;
422 struct sigaction nsa, osa;
423 int error;
424
425 if (SCARG(uap, nsa)) {
426 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
427 if (error)
428 return (error);
429 }
430 error = sigaction1(p, SCARG(uap, signum),
431 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0,
432 SCARG(uap, tramp), SCARG(uap, vers));
433 if (error)
434 return (error);
435 if (SCARG(uap, osa)) {
436 error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
437 if (error)
438 return (error);
439 }
440 return (0);
441 }
442
443 /*
444 * Initialize signal state for process 0;
445 * set to ignore signals that are ignored by default and disable the signal
446 * stack.
447 */
448 void
449 siginit(struct proc *p)
450 {
451 struct sigacts *ps;
452 int signum, prop;
453
454 ps = p->p_sigacts;
455 sigemptyset(&contsigmask);
456 sigemptyset(&stopsigmask);
457 sigemptyset(&sigcantmask);
458 for (signum = 1; signum < NSIG; signum++) {
459 prop = sigprop[signum];
460 if (prop & SA_CONT)
461 sigaddset(&contsigmask, signum);
462 if (prop & SA_STOP)
463 sigaddset(&stopsigmask, signum);
464 if (prop & SA_CANTMASK)
465 sigaddset(&sigcantmask, signum);
466 if (prop & SA_IGNORE && signum != SIGCONT)
467 sigaddset(&p->p_sigctx.ps_sigignore, signum);
468 sigemptyset(&SIGACTION_PS(ps, signum).sa_mask);
469 SIGACTION_PS(ps, signum).sa_flags = SA_RESTART;
470 }
471 sigemptyset(&p->p_sigctx.ps_sigcatch);
472 p->p_sigctx.ps_sigwaited = 0;
473 p->p_flag &= ~P_NOCLDSTOP;
474
475 /*
476 * Reset stack state to the user stack.
477 */
478 p->p_sigctx.ps_sigstk.ss_flags = SS_DISABLE;
479 p->p_sigctx.ps_sigstk.ss_size = 0;
480 p->p_sigctx.ps_sigstk.ss_sp = 0;
481
482 /* One reference. */
483 ps->sa_refcnt = 1;
484 }
485
486 /*
487 * Reset signals for an exec of the specified process.
488 */
489 void
490 execsigs(struct proc *p)
491 {
492 struct sigacts *ps;
493 int signum, prop;
494
495 sigactsunshare(p);
496
497 ps = p->p_sigacts;
498
499 /*
500 * Reset caught signals. Held signals remain held
501 * through p_sigctx.ps_sigmask (unless they were caught,
502 * and are now ignored by default).
503 */
504 for (signum = 1; signum < NSIG; signum++) {
505 if (sigismember(&p->p_sigctx.ps_sigcatch, signum)) {
506 prop = sigprop[signum];
507 if (prop & SA_IGNORE) {
508 if ((prop & SA_CONT) == 0)
509 sigaddset(&p->p_sigctx.ps_sigignore,
510 signum);
511 sigdelset(&p->p_sigctx.ps_siglist, signum);
512 }
513 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
514 }
515 sigemptyset(&SIGACTION_PS(ps, signum).sa_mask);
516 SIGACTION_PS(ps, signum).sa_flags = SA_RESTART;
517 }
518 sigemptyset(&p->p_sigctx.ps_sigcatch);
519 p->p_sigctx.ps_sigwaited = 0;
520 p->p_flag &= ~P_NOCLDSTOP;
521
522 /*
523 * Reset stack state to the user stack.
524 */
525 p->p_sigctx.ps_sigstk.ss_flags = SS_DISABLE;
526 p->p_sigctx.ps_sigstk.ss_size = 0;
527 p->p_sigctx.ps_sigstk.ss_sp = 0;
528 }
529
530 int
531 sigprocmask1(struct proc *p, int how, const sigset_t *nss, sigset_t *oss)
532 {
533
534 if (oss)
535 *oss = p->p_sigctx.ps_sigmask;
536
537 if (nss) {
538 (void)splsched(); /* XXXSMP */
539 switch (how) {
540 case SIG_BLOCK:
541 sigplusset(nss, &p->p_sigctx.ps_sigmask);
542 break;
543 case SIG_UNBLOCK:
544 sigminusset(nss, &p->p_sigctx.ps_sigmask);
545 CHECKSIGS(p);
546 break;
547 case SIG_SETMASK:
548 p->p_sigctx.ps_sigmask = *nss;
549 CHECKSIGS(p);
550 break;
551 default:
552 (void)spl0(); /* XXXSMP */
553 return (EINVAL);
554 }
555 sigminusset(&sigcantmask, &p->p_sigctx.ps_sigmask);
556 (void)spl0(); /* XXXSMP */
557 }
558
559 return (0);
560 }
561
562 /*
563 * Manipulate signal mask.
564 * Note that we receive new mask, not pointer,
565 * and return old mask as return value;
566 * the library stub does the rest.
567 */
568 int
569 sys___sigprocmask14(struct lwp *l, void *v, register_t *retval)
570 {
571 struct sys___sigprocmask14_args /* {
572 syscallarg(int) how;
573 syscallarg(const sigset_t *) set;
574 syscallarg(sigset_t *) oset;
575 } */ *uap = v;
576 struct proc *p;
577 sigset_t nss, oss;
578 int error;
579
580 if (SCARG(uap, set)) {
581 error = copyin(SCARG(uap, set), &nss, sizeof(nss));
582 if (error)
583 return (error);
584 }
585 p = l->l_proc;
586 error = sigprocmask1(p, SCARG(uap, how),
587 SCARG(uap, set) ? &nss : 0, SCARG(uap, oset) ? &oss : 0);
588 if (error)
589 return (error);
590 if (SCARG(uap, oset)) {
591 error = copyout(&oss, SCARG(uap, oset), sizeof(oss));
592 if (error)
593 return (error);
594 }
595 return (0);
596 }
597
598 void
599 sigpending1(struct proc *p, sigset_t *ss)
600 {
601
602 *ss = p->p_sigctx.ps_siglist;
603 sigminusset(&p->p_sigctx.ps_sigmask, ss);
604 }
605
606 /* ARGSUSED */
607 int
608 sys___sigpending14(struct lwp *l, void *v, register_t *retval)
609 {
610 struct sys___sigpending14_args /* {
611 syscallarg(sigset_t *) set;
612 } */ *uap = v;
613 struct proc *p;
614 sigset_t ss;
615
616 p = l->l_proc;
617 sigpending1(p, &ss);
618 return (copyout(&ss, SCARG(uap, set), sizeof(ss)));
619 }
620
621 int
622 sigsuspend1(struct proc *p, const sigset_t *ss)
623 {
624 struct sigacts *ps;
625
626 ps = p->p_sigacts;
627 if (ss) {
628 /*
629 * When returning from sigpause, we want
630 * the old mask to be restored after the
631 * signal handler has finished. Thus, we
632 * save it here and mark the sigctx structure
633 * to indicate this.
634 */
635 p->p_sigctx.ps_oldmask = p->p_sigctx.ps_sigmask;
636 p->p_sigctx.ps_flags |= SAS_OLDMASK;
637 (void) splsched(); /* XXXSMP */
638 p->p_sigctx.ps_sigmask = *ss;
639 CHECKSIGS(p);
640 sigminusset(&sigcantmask, &p->p_sigctx.ps_sigmask);
641 (void) spl0(); /* XXXSMP */
642 }
643
644 while (tsleep((caddr_t) ps, PPAUSE|PCATCH, "pause", 0) == 0)
645 /* void */;
646
647 /* always return EINTR rather than ERESTART... */
648 return (EINTR);
649 }
650
651 /*
652 * Suspend process until signal, providing mask to be set
653 * in the meantime. Note nonstandard calling convention:
654 * libc stub passes mask, not pointer, to save a copyin.
655 */
656 /* ARGSUSED */
657 int
658 sys___sigsuspend14(struct lwp *l, void *v, register_t *retval)
659 {
660 struct sys___sigsuspend14_args /* {
661 syscallarg(const sigset_t *) set;
662 } */ *uap = v;
663 struct proc *p;
664 sigset_t ss;
665 int error;
666
667 if (SCARG(uap, set)) {
668 error = copyin(SCARG(uap, set), &ss, sizeof(ss));
669 if (error)
670 return (error);
671 }
672
673 p = l->l_proc;
674 return (sigsuspend1(p, SCARG(uap, set) ? &ss : 0));
675 }
676
677 int
678 sigaltstack1(struct proc *p, const struct sigaltstack *nss,
679 struct sigaltstack *oss)
680 {
681
682 if (oss)
683 *oss = p->p_sigctx.ps_sigstk;
684
685 if (nss) {
686 if (nss->ss_flags & ~SS_ALLBITS)
687 return (EINVAL);
688
689 if (nss->ss_flags & SS_DISABLE) {
690 if (p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK)
691 return (EINVAL);
692 } else {
693 if (nss->ss_size < MINSIGSTKSZ)
694 return (ENOMEM);
695 }
696 p->p_sigctx.ps_sigstk = *nss;
697 }
698
699 return (0);
700 }
701
702 /* ARGSUSED */
703 int
704 sys___sigaltstack14(struct lwp *l, void *v, register_t *retval)
705 {
706 struct sys___sigaltstack14_args /* {
707 syscallarg(const struct sigaltstack *) nss;
708 syscallarg(struct sigaltstack *) oss;
709 } */ *uap = v;
710 struct proc *p;
711 struct sigaltstack nss, oss;
712 int error;
713
714 if (SCARG(uap, nss)) {
715 error = copyin(SCARG(uap, nss), &nss, sizeof(nss));
716 if (error)
717 return (error);
718 }
719 p = l->l_proc;
720 error = sigaltstack1(p,
721 SCARG(uap, nss) ? &nss : 0, SCARG(uap, oss) ? &oss : 0);
722 if (error)
723 return (error);
724 if (SCARG(uap, oss)) {
725 error = copyout(&oss, SCARG(uap, oss), sizeof(oss));
726 if (error)
727 return (error);
728 }
729 return (0);
730 }
731
732 /* ARGSUSED */
733 int
734 sys_kill(struct lwp *l, void *v, register_t *retval)
735 {
736 struct sys_kill_args /* {
737 syscallarg(int) pid;
738 syscallarg(int) signum;
739 } */ *uap = v;
740 struct proc *cp, *p;
741 struct pcred *pc;
742 ksiginfo_t ksi;
743
744 cp = l->l_proc;
745 pc = cp->p_cred;
746 if ((u_int)SCARG(uap, signum) >= NSIG)
747 return (EINVAL);
748 memset(&ksi, 0, sizeof(ksi));
749 ksi.ksi_signo = SCARG(uap, signum);
750 ksi.ksi_code = SI_USER;
751 ksi.ksi_pid = cp->p_pid;
752 ksi.ksi_uid = cp->p_ucred->cr_uid;
753 if (SCARG(uap, pid) > 0) {
754 /* kill single process */
755 if ((p = pfind(SCARG(uap, pid))) == NULL)
756 return (ESRCH);
757 if (!CANSIGNAL(cp, pc, p, SCARG(uap, signum)))
758 return (EPERM);
759 if (SCARG(uap, signum))
760 kpsignal2(p, &ksi, 1);
761 return (0);
762 }
763 switch (SCARG(uap, pid)) {
764 case -1: /* broadcast signal */
765 return (killpg1(cp, &ksi, 0, 1));
766 case 0: /* signal own process group */
767 return (killpg1(cp, &ksi, 0, 0));
768 default: /* negative explicit process group */
769 return (killpg1(cp, &ksi, -SCARG(uap, pid), 0));
770 }
771 /* NOTREACHED */
772 }
773
774 /*
775 * Common code for kill process group/broadcast kill.
776 * cp is calling process.
777 */
778 int
779 killpg1(struct proc *cp, ksiginfo_t *ksi, int pgid, int all)
780 {
781 struct proc *p;
782 struct pcred *pc;
783 struct pgrp *pgrp;
784 int nfound;
785 int signum = ksi->ksi_signo;
786
787 pc = cp->p_cred;
788 nfound = 0;
789 if (all) {
790 /*
791 * broadcast
792 */
793 proclist_lock_read();
794 LIST_FOREACH(p, &allproc, p_list) {
795 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
796 p == cp || !CANSIGNAL(cp, pc, p, signum))
797 continue;
798 nfound++;
799 if (signum)
800 kpsignal2(p, ksi, 1);
801 }
802 proclist_unlock_read();
803 } else {
804 if (pgid == 0)
805 /*
806 * zero pgid means send to my process group.
807 */
808 pgrp = cp->p_pgrp;
809 else {
810 pgrp = pgfind(pgid);
811 if (pgrp == NULL)
812 return (ESRCH);
813 }
814 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
815 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
816 !CANSIGNAL(cp, pc, p, signum))
817 continue;
818 nfound++;
819 if (signum && P_ZOMBIE(p) == 0)
820 kpsignal2(p, ksi, 1);
821 }
822 }
823 return (nfound ? 0 : ESRCH);
824 }
825
826 /*
827 * Send a signal to a process group.
828 */
829 void
830 gsignal(int pgid, int signum)
831 {
832 ksiginfo_t ksi;
833 memset(&ksi, 0, sizeof(ksi));
834 ksi.ksi_signo = signum;
835 kgsignal(pgid, &ksi, NULL);
836 }
837
838 void
839 kgsignal(int pgid, ksiginfo_t *ksi, void *data)
840 {
841 struct pgrp *pgrp;
842
843 if (pgid && (pgrp = pgfind(pgid)))
844 kpgsignal(pgrp, ksi, data, 0);
845 }
846
847 /*
848 * Send a signal to a process group. If checktty is 1,
849 * limit to members which have a controlling terminal.
850 */
851 void
852 pgsignal(struct pgrp *pgrp, int sig, int checkctty)
853 {
854 ksiginfo_t ksi;
855 memset(&ksi, 0, sizeof(ksi));
856 ksi.ksi_signo = sig;
857 kpgsignal(pgrp, &ksi, NULL, checkctty);
858 }
859
860 void
861 kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty)
862 {
863 struct proc *p;
864
865 if (pgrp)
866 LIST_FOREACH(p, &pgrp->pg_members, p_pglist)
867 if (checkctty == 0 || p->p_flag & P_CONTROLT)
868 kpsignal(p, ksi, data);
869 }
870
871 /*
872 * Send a signal caused by a trap to the current process.
873 * If it will be caught immediately, deliver it with correct code.
874 * Otherwise, post it normally.
875 */
876 #ifndef __HAVE_SIGINFO
877 void _trapsignal(struct lwp *, const ksiginfo_t *);
878 void
879 trapsignal(struct lwp *l, int signum, u_long code)
880 {
881 #define trapsignal _trapsignal
882 ksiginfo_t ksi;
883
884 KSI_INIT_TRAP(&ksi);
885 ksi.ksi_signo = signum;
886 ksi.ksi_trap = (int)code;
887 trapsignal(l, &ksi);
888 }
889 #endif
890
891 void
892 trapsignal(struct lwp *l, const ksiginfo_t *ksi)
893 {
894 struct proc *p;
895 struct sigacts *ps;
896 int signum = ksi->ksi_signo;
897
898 KASSERT(KSI_TRAP_P(ksi));
899
900 p = l->l_proc;
901 ps = p->p_sigacts;
902 if ((p->p_flag & P_TRACED) == 0 &&
903 sigismember(&p->p_sigctx.ps_sigcatch, signum) &&
904 !sigismember(&p->p_sigctx.ps_sigmask, signum)) {
905 p->p_stats->p_ru.ru_nsignals++;
906 #ifdef KTRACE
907 if (KTRPOINT(p, KTR_PSIG))
908 ktrpsig(p, signum, SIGACTION_PS(ps, signum).sa_handler,
909 &p->p_sigctx.ps_sigmask, ksi);
910 #endif
911 kpsendsig(l, ksi, &p->p_sigctx.ps_sigmask);
912 (void) splsched(); /* XXXSMP */
913 sigplusset(&SIGACTION_PS(ps, signum).sa_mask,
914 &p->p_sigctx.ps_sigmask);
915 if (SIGACTION_PS(ps, signum).sa_flags & SA_RESETHAND) {
916 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
917 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
918 sigaddset(&p->p_sigctx.ps_sigignore, signum);
919 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
920 }
921 (void) spl0(); /* XXXSMP */
922 } else {
923 p->p_sigctx.ps_lwp = l->l_lid;
924 /* XXX for core dump/debugger */
925 p->p_sigctx.ps_signo = ksi->ksi_signo;
926 p->p_sigctx.ps_code = ksi->ksi_trap;
927 kpsignal2(p, ksi, 1);
928 }
929 }
930
931 /*
932 * Fill in signal information and signal the parent for a child status change.
933 */
934 static void
935 child_psignal(struct proc *p, int dolock)
936 {
937 ksiginfo_t ksi;
938
939 (void)memset(&ksi, 0, sizeof(ksi));
940 ksi.ksi_signo = SIGCHLD;
941 ksi.ksi_code = p->p_xstat == SIGCONT ? CLD_CONTINUED : CLD_STOPPED;
942 ksi.ksi_pid = p->p_pid;
943 ksi.ksi_uid = p->p_ucred->cr_uid;
944 ksi.ksi_status = p->p_xstat;
945 ksi.ksi_utime = p->p_stats->p_ru.ru_utime.tv_sec;
946 ksi.ksi_stime = p->p_stats->p_ru.ru_stime.tv_sec;
947 kpsignal2(p->p_pptr, &ksi, dolock);
948 }
949
950 /*
951 * Send the signal to the process. If the signal has an action, the action
952 * is usually performed by the target process rather than the caller; we add
953 * the signal to the set of pending signals for the process.
954 *
955 * Exceptions:
956 * o When a stop signal is sent to a sleeping process that takes the
957 * default action, the process is stopped without awakening it.
958 * o SIGCONT restarts stopped processes (or puts them back to sleep)
959 * regardless of the signal action (eg, blocked or ignored).
960 *
961 * Other ignored signals are discarded immediately.
962 *
963 * XXXSMP: Invoked as psignal() or sched_psignal().
964 */
965 void
966 psignal1(struct proc *p, int signum, int dolock)
967 {
968 ksiginfo_t ksi;
969
970 memset(&ksi, 0, sizeof(ksi));
971 ksi.ksi_signo = signum;
972 kpsignal2(p, &ksi, dolock);
973 }
974
975 void
976 kpsignal1(struct proc *p, ksiginfo_t *ksi, void *data, int dolock)
977 {
978
979 if ((p->p_flag & P_WEXIT) == 0 && data) {
980 size_t fd;
981 struct filedesc *fdp = p->p_fd;
982
983 ksi->ksi_fd = -1;
984 for (fd = 0; fd < fdp->fd_nfiles; fd++) {
985 struct file *fp = fdp->fd_ofiles[fd];
986 /* XXX: lock? */
987 if (fp && fp->f_data == data) {
988 ksi->ksi_fd = fd;
989 break;
990 }
991 }
992 }
993 kpsignal2(p, ksi, dolock);
994 }
995
996 static void
997 kpsignal2(struct proc *p, const ksiginfo_t *ksi, int dolock)
998 {
999 struct lwp *l, *suspended = NULL;
1000 int s = 0, prop, allsusp;
1001 sig_t action;
1002 int signum = ksi->ksi_signo;
1003
1004 #ifdef DIAGNOSTIC
1005 if (signum <= 0 || signum >= NSIG)
1006 panic("psignal signal number %d", signum);
1007
1008 /* XXXSMP: works, but icky */
1009 if (dolock)
1010 SCHED_ASSERT_UNLOCKED();
1011 else
1012 SCHED_ASSERT_LOCKED();
1013 #endif
1014
1015
1016 /*
1017 * Notify any interested parties in the signal.
1018 */
1019 KNOTE(&p->p_klist, NOTE_SIGNAL | signum);
1020
1021 prop = sigprop[signum];
1022
1023 /*
1024 * If proc is traced, always give parent a chance.
1025 */
1026 if (p->p_flag & P_TRACED)
1027 action = SIG_DFL;
1028 else {
1029 /*
1030 * If the signal is being ignored,
1031 * then we forget about it immediately.
1032 * (Note: we don't set SIGCONT in p_sigctx.ps_sigignore,
1033 * and if it is set to SIG_IGN,
1034 * action will be SIG_DFL here.)
1035 */
1036 if (sigismember(&p->p_sigctx.ps_sigignore, signum))
1037 return;
1038 if (sigismember(&p->p_sigctx.ps_sigmask, signum))
1039 action = SIG_HOLD;
1040 else if (sigismember(&p->p_sigctx.ps_sigcatch, signum))
1041 action = SIG_CATCH;
1042 else {
1043 action = SIG_DFL;
1044
1045 if (prop & SA_KILL && p->p_nice > NZERO)
1046 p->p_nice = NZERO;
1047
1048 /*
1049 * If sending a tty stop signal to a member of an
1050 * orphaned process group, discard the signal here if
1051 * the action is default; don't stop the process below
1052 * if sleeping, and don't clear any pending SIGCONT.
1053 */
1054 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0)
1055 return;
1056 }
1057 }
1058
1059 if (prop & SA_CONT)
1060 sigminusset(&stopsigmask, &p->p_sigctx.ps_siglist);
1061
1062 if (prop & SA_STOP)
1063 sigminusset(&contsigmask, &p->p_sigctx.ps_siglist);
1064
1065 sigaddset(&p->p_sigctx.ps_siglist, signum);
1066
1067 /* CHECKSIGS() is "inlined" here. */
1068 p->p_sigctx.ps_sigcheck = 1;
1069
1070 /*
1071 * If the signal doesn't have SA_CANTMASK (no override for SIGKILL,
1072 * please!), check if anything waits on it. If yes, clear the
1073 * pending signal from siglist set, save it to ps_sigwaited,
1074 * clear sigwait list, and wakeup any sigwaiters.
1075 * The signal won't be processed further here.
1076 */
1077 if ((prop & SA_CANTMASK) == 0
1078 && p->p_sigctx.ps_sigwaited < 0
1079 && sigismember(&p->p_sigctx.ps_sigwait, signum)
1080 && p->p_stat != SSTOP) {
1081 if (action == SIG_CATCH)
1082 ksiginfo_put(p, ksi);
1083 sigdelset(&p->p_sigctx.ps_siglist, signum);
1084 p->p_sigctx.ps_sigwaited = signum;
1085 sigemptyset(&p->p_sigctx.ps_sigwait);
1086 if (dolock)
1087 wakeup_one(&p->p_sigctx.ps_sigwait);
1088 else
1089 sched_wakeup(&p->p_sigctx.ps_sigwait);
1090 return;
1091 }
1092
1093 /*
1094 * Defer further processing for signals which are held,
1095 * except that stopped processes must be continued by SIGCONT.
1096 */
1097 if (action == SIG_HOLD &&
1098 ((prop & SA_CONT) == 0 || p->p_stat != SSTOP)) {
1099 ksiginfo_put(p, ksi);
1100 return;
1101 }
1102 /* XXXSMP: works, but icky */
1103 if (dolock)
1104 SCHED_LOCK(s);
1105
1106 /* XXXUPSXXX LWPs might go to sleep without passing signal handling */
1107 if (p->p_nrlwps > 0 && (p->p_stat != SSTOP)
1108 && !((p->p_flag & P_SA) && (p->p_sa->sa_idle != NULL))) {
1109 /*
1110 * At least one LWP is running or on a run queue.
1111 * The signal will be noticed when one of them returns
1112 * to userspace.
1113 */
1114 signotify(p);
1115 /*
1116 * The signal will be noticed very soon.
1117 */
1118 goto out;
1119 } else {
1120 /* Process is sleeping or stopped */
1121 if (p->p_flag & P_SA) {
1122 struct lwp *l2 = p->p_sa->sa_vp;
1123 l = NULL;
1124 allsusp = 1;
1125
1126 if ((l2->l_stat == LSSLEEP) && (l2->l_flag & L_SINTR))
1127 l = l2;
1128 else if (l2->l_stat == LSSUSPENDED)
1129 suspended = l2;
1130 else if ((l2->l_stat != LSZOMB) &&
1131 (l2->l_stat != LSDEAD))
1132 allsusp = 0;
1133 } else {
1134 /*
1135 * Find out if any of the sleeps are interruptable,
1136 * and if all the live LWPs remaining are suspended.
1137 */
1138 allsusp = 1;
1139 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1140 if (l->l_stat == LSSLEEP &&
1141 l->l_flag & L_SINTR)
1142 break;
1143 if (l->l_stat == LSSUSPENDED)
1144 suspended = l;
1145 else if ((l->l_stat != LSZOMB) &&
1146 (l->l_stat != LSDEAD))
1147 allsusp = 0;
1148 }
1149 }
1150 if (p->p_stat == SACTIVE) {
1151
1152
1153 if (l != NULL && (p->p_flag & P_TRACED))
1154 goto run;
1155
1156 /*
1157 * If SIGCONT is default (or ignored) and process is
1158 * asleep, we are finished; the process should not
1159 * be awakened.
1160 */
1161 if ((prop & SA_CONT) && action == SIG_DFL) {
1162 sigdelset(&p->p_sigctx.ps_siglist, signum);
1163 goto done;
1164 }
1165
1166 /*
1167 * When a sleeping process receives a stop
1168 * signal, process immediately if possible.
1169 */
1170 if ((prop & SA_STOP) && action == SIG_DFL) {
1171 /*
1172 * If a child holding parent blocked,
1173 * stopping could cause deadlock.
1174 */
1175 if (p->p_flag & P_PPWAIT) {
1176 goto out;
1177 }
1178 sigdelset(&p->p_sigctx.ps_siglist, signum);
1179 p->p_xstat = signum;
1180 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) {
1181 /*
1182 * XXXSMP: recursive call; don't lock
1183 * the second time around.
1184 */
1185 child_psignal(p, 0);
1186 }
1187 proc_stop(p); /* XXXSMP: recurse? */
1188 goto done;
1189 }
1190
1191 if (l == NULL) {
1192 /*
1193 * Special case: SIGKILL of a process
1194 * which is entirely composed of
1195 * suspended LWPs should succeed. We
1196 * make this happen by unsuspending one of
1197 * them.
1198 */
1199 if (allsusp && (signum == SIGKILL))
1200 lwp_continue(suspended);
1201 goto done;
1202 }
1203 /*
1204 * All other (caught or default) signals
1205 * cause the process to run.
1206 */
1207 goto runfast;
1208 /*NOTREACHED*/
1209 } else if (p->p_stat == SSTOP) {
1210 /* Process is stopped */
1211 /*
1212 * If traced process is already stopped,
1213 * then no further action is necessary.
1214 */
1215 if (p->p_flag & P_TRACED)
1216 goto done;
1217
1218 /*
1219 * Kill signal always sets processes running,
1220 * if possible.
1221 */
1222 if (signum == SIGKILL) {
1223 l = proc_unstop(p);
1224 if (l)
1225 goto runfast;
1226 goto done;
1227 }
1228
1229 if (prop & SA_CONT) {
1230 /*
1231 * If SIGCONT is default (or ignored),
1232 * we continue the process but don't
1233 * leave the signal in ps_siglist, as
1234 * it has no further action. If
1235 * SIGCONT is held, we continue the
1236 * process and leave the signal in
1237 * ps_siglist. If the process catches
1238 * SIGCONT, let it handle the signal
1239 * itself. If it isn't waiting on an
1240 * event, then it goes back to run
1241 * state. Otherwise, process goes
1242 * back to sleep state.
1243 */
1244 if (action == SIG_DFL)
1245 sigdelset(&p->p_sigctx.ps_siglist,
1246 signum);
1247 l = proc_unstop(p);
1248 if (l && (action == SIG_CATCH))
1249 goto runfast;
1250 goto out;
1251 }
1252
1253 if (prop & SA_STOP) {
1254 /*
1255 * Already stopped, don't need to stop again.
1256 * (If we did the shell could get confused.)
1257 */
1258 sigdelset(&p->p_sigctx.ps_siglist, signum);
1259 goto done;
1260 }
1261
1262 /*
1263 * If a lwp is sleeping interruptibly, then
1264 * wake it up; it will run until the kernel
1265 * boundary, where it will stop in issignal(),
1266 * since p->p_stat is still SSTOP. When the
1267 * process is continued, it will be made
1268 * runnable and can look at the signal.
1269 */
1270 if (l)
1271 goto run;
1272 goto out;
1273 } else {
1274 /* Else what? */
1275 panic("psignal: Invalid process state %d.",
1276 p->p_stat);
1277 }
1278 }
1279 /*NOTREACHED*/
1280
1281 runfast:
1282 if (action == SIG_CATCH) {
1283 ksiginfo_put(p, ksi);
1284 action = SIG_HOLD;
1285 }
1286 /*
1287 * Raise priority to at least PUSER.
1288 */
1289 if (l->l_priority > PUSER)
1290 l->l_priority = PUSER;
1291 run:
1292 if (action == SIG_CATCH) {
1293 ksiginfo_put(p, ksi);
1294 action = SIG_HOLD;
1295 }
1296
1297 setrunnable(l); /* XXXSMP: recurse? */
1298 out:
1299 if (action == SIG_CATCH)
1300 ksiginfo_put(p, ksi);
1301 done:
1302 /* XXXSMP: works, but icky */
1303 if (dolock)
1304 SCHED_UNLOCK(s);
1305 }
1306
1307 void
1308 kpsendsig(struct lwp *l, const ksiginfo_t *ksi, const sigset_t *mask)
1309 {
1310 struct proc *p = l->l_proc;
1311 struct lwp *le, *li;
1312 siginfo_t *si;
1313 int f;
1314
1315 if (p->p_flag & P_SA) {
1316
1317 /* XXXUPSXXX What if not on sa_vp ? */
1318
1319 f = l->l_flag & L_SA;
1320 l->l_flag &= ~L_SA;
1321 si = pool_get(&siginfo_pool, PR_WAITOK);
1322 si->_info = ksi->ksi_info;
1323 le = li = NULL;
1324 if (KSI_TRAP_P(ksi))
1325 le = l;
1326 else
1327 li = l;
1328
1329 sa_upcall(l, SA_UPCALL_SIGNAL | SA_UPCALL_DEFER, le, li,
1330 sizeof(siginfo_t), si);
1331 l->l_flag |= f;
1332 return;
1333 }
1334
1335 #ifdef __HAVE_SIGINFO
1336 (*p->p_emul->e_sendsig)(ksi, mask);
1337 #else
1338 (*p->p_emul->e_sendsig)(ksi->ksi_signo, mask, KSI_TRAPCODE(ksi));
1339 #endif
1340 }
1341
1342 static __inline int firstsig(const sigset_t *);
1343
1344 static __inline int
1345 firstsig(const sigset_t *ss)
1346 {
1347 int sig;
1348
1349 sig = ffs(ss->__bits[0]);
1350 if (sig != 0)
1351 return (sig);
1352 #if NSIG > 33
1353 sig = ffs(ss->__bits[1]);
1354 if (sig != 0)
1355 return (sig + 32);
1356 #endif
1357 #if NSIG > 65
1358 sig = ffs(ss->__bits[2]);
1359 if (sig != 0)
1360 return (sig + 64);
1361 #endif
1362 #if NSIG > 97
1363 sig = ffs(ss->__bits[3]);
1364 if (sig != 0)
1365 return (sig + 96);
1366 #endif
1367 return (0);
1368 }
1369
1370 /*
1371 * If the current process has received a signal (should be caught or cause
1372 * termination, should interrupt current syscall), return the signal number.
1373 * Stop signals with default action are processed immediately, then cleared;
1374 * they aren't returned. This is checked after each entry to the system for
1375 * a syscall or trap (though this can usually be done without calling issignal
1376 * by checking the pending signal masks in the CURSIG macro.) The normal call
1377 * sequence is
1378 *
1379 * while (signum = CURSIG(curlwp))
1380 * postsig(signum);
1381 */
1382 int
1383 issignal(struct lwp *l)
1384 {
1385 struct proc *p = l->l_proc;
1386 int s = 0, signum, prop;
1387 int dolock = (l->l_flag & L_SINTR) == 0, locked = !dolock;
1388 sigset_t ss;
1389
1390 if (l->l_flag & L_SA) {
1391 struct sadata *sa = p->p_sa;
1392
1393 /* Bail out if we do not own the virtual processor */
1394 if (sa->sa_vp != l)
1395 return 0;
1396 }
1397
1398 if (p->p_stat == SSTOP) {
1399 /*
1400 * The process is stopped/stopping. Stop ourselves now that
1401 * we're on the kernel/userspace boundary.
1402 */
1403 if (dolock)
1404 SCHED_LOCK(s);
1405 l->l_stat = LSSTOP;
1406 p->p_nrlwps--;
1407 if (p->p_flag & P_TRACED)
1408 goto sigtraceswitch;
1409 else
1410 goto sigswitch;
1411 }
1412 for (;;) {
1413 sigpending1(p, &ss);
1414 if (p->p_flag & P_PPWAIT)
1415 sigminusset(&stopsigmask, &ss);
1416 signum = firstsig(&ss);
1417 if (signum == 0) { /* no signal to send */
1418 p->p_sigctx.ps_sigcheck = 0;
1419 if (locked && dolock)
1420 SCHED_LOCK(s);
1421 return (0);
1422 }
1423 /* take the signal! */
1424 sigdelset(&p->p_sigctx.ps_siglist, signum);
1425
1426 /*
1427 * We should see pending but ignored signals
1428 * only if P_TRACED was on when they were posted.
1429 */
1430 if (sigismember(&p->p_sigctx.ps_sigignore, signum) &&
1431 (p->p_flag & P_TRACED) == 0)
1432 continue;
1433
1434 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
1435 /*
1436 * If traced, always stop, and stay
1437 * stopped until released by the debugger.
1438 */
1439 p->p_xstat = signum;
1440 if ((p->p_flag & P_FSTRACE) == 0)
1441 child_psignal(p, dolock);
1442 if (dolock)
1443 SCHED_LOCK(s);
1444 proc_stop(p);
1445 sigtraceswitch:
1446 mi_switch(l, NULL);
1447 SCHED_ASSERT_UNLOCKED();
1448 if (dolock)
1449 splx(s);
1450 else
1451 dolock = 1;
1452
1453 /*
1454 * If we are no longer being traced, or the parent
1455 * didn't give us a signal, look for more signals.
1456 */
1457 if ((p->p_flag & P_TRACED) == 0 || p->p_xstat == 0)
1458 continue;
1459
1460 /*
1461 * If the new signal is being masked, look for other
1462 * signals.
1463 */
1464 signum = p->p_xstat;
1465 p->p_xstat = 0;
1466 /*
1467 * `p->p_sigctx.ps_siglist |= mask' is done
1468 * in setrunnable().
1469 */
1470 if (sigismember(&p->p_sigctx.ps_sigmask, signum))
1471 continue;
1472 /* take the signal! */
1473 sigdelset(&p->p_sigctx.ps_siglist, signum);
1474 }
1475
1476 prop = sigprop[signum];
1477
1478 /*
1479 * Decide whether the signal should be returned.
1480 * Return the signal's number, or fall through
1481 * to clear it from the pending mask.
1482 */
1483 switch ((long)SIGACTION(p, signum).sa_handler) {
1484
1485 case (long)SIG_DFL:
1486 /*
1487 * Don't take default actions on system processes.
1488 */
1489 if (p->p_pid <= 1) {
1490 #ifdef DIAGNOSTIC
1491 /*
1492 * Are you sure you want to ignore SIGSEGV
1493 * in init? XXX
1494 */
1495 printf("Process (pid %d) got signal %d\n",
1496 p->p_pid, signum);
1497 #endif
1498 break; /* == ignore */
1499 }
1500 /*
1501 * If there is a pending stop signal to process
1502 * with default action, stop here,
1503 * then clear the signal. However,
1504 * if process is member of an orphaned
1505 * process group, ignore tty stop signals.
1506 */
1507 if (prop & SA_STOP) {
1508 if (p->p_flag & P_TRACED ||
1509 (p->p_pgrp->pg_jobc == 0 &&
1510 prop & SA_TTYSTOP))
1511 break; /* == ignore */
1512 p->p_xstat = signum;
1513 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0)
1514 child_psignal(p, dolock);
1515 if (dolock)
1516 SCHED_LOCK(s);
1517 proc_stop(p);
1518 sigswitch:
1519 mi_switch(l, NULL);
1520 SCHED_ASSERT_UNLOCKED();
1521 if (dolock)
1522 splx(s);
1523 else
1524 dolock = 1;
1525 break;
1526 } else if (prop & SA_IGNORE) {
1527 /*
1528 * Except for SIGCONT, shouldn't get here.
1529 * Default action is to ignore; drop it.
1530 */
1531 break; /* == ignore */
1532 } else
1533 goto keep;
1534 /*NOTREACHED*/
1535
1536 case (long)SIG_IGN:
1537 /*
1538 * Masking above should prevent us ever trying
1539 * to take action on an ignored signal other
1540 * than SIGCONT, unless process is traced.
1541 */
1542 #ifdef DEBUG_ISSIGNAL
1543 if ((prop & SA_CONT) == 0 &&
1544 (p->p_flag & P_TRACED) == 0)
1545 printf("issignal\n");
1546 #endif
1547 break; /* == ignore */
1548
1549 default:
1550 /*
1551 * This signal has an action, let
1552 * postsig() process it.
1553 */
1554 goto keep;
1555 }
1556 }
1557 /* NOTREACHED */
1558
1559 keep:
1560 /* leave the signal for later */
1561 sigaddset(&p->p_sigctx.ps_siglist, signum);
1562 CHECKSIGS(p);
1563 if (locked && dolock)
1564 SCHED_LOCK(s);
1565 return (signum);
1566 }
1567
1568 /*
1569 * Put the argument process into the stopped state and notify the parent
1570 * via wakeup. Signals are handled elsewhere. The process must not be
1571 * on the run queue.
1572 */
1573 static void
1574 proc_stop(struct proc *p)
1575 {
1576 struct lwp *l;
1577
1578 SCHED_ASSERT_LOCKED();
1579
1580 /* XXX lock process LWP state */
1581 p->p_stat = SSTOP;
1582 p->p_flag &= ~P_WAITED;
1583
1584 /*
1585 * Put as many LWP's as possible in stopped state.
1586 * Sleeping ones will notice the stopped state as they try to
1587 * return to userspace.
1588 */
1589
1590 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1591 if ((l->l_stat == LSONPROC) && (l == curlwp)) {
1592 /* XXX SMP this assumes that a LWP that is LSONPROC
1593 * is curlwp and hence is about to be mi_switched
1594 * away; the only callers of proc_stop() are:
1595 * - psignal
1596 * - issignal()
1597 * For the former, proc_stop() is only called when
1598 * no processes are running, so we don't worry.
1599 * For the latter, proc_stop() is called right
1600 * before mi_switch().
1601 */
1602 l->l_stat = LSSTOP;
1603 p->p_nrlwps--;
1604 }
1605 else if ( (l->l_stat == LSSLEEP) && (l->l_flag & L_SINTR)) {
1606 setrunnable(l);
1607 }
1608
1609 /* !!!UPS!!! FIX ME */
1610 #if 0
1611 else if (l->l_stat == LSRUN) {
1612 /* Remove LWP from the run queue */
1613 remrunqueue(l);
1614 l->l_stat = LSSTOP;
1615 p->p_nrlwps--;
1616 } else if ((l->l_stat == LSSLEEP) ||
1617 (l->l_stat == LSSUSPENDED) ||
1618 (l->l_stat == LSZOMB) ||
1619 (l->l_stat == LSDEAD)) {
1620 /*
1621 * Don't do anything; let sleeping LWPs
1622 * discover the stopped state of the process
1623 * on their way out of the kernel; otherwise,
1624 * things like NFS threads that sleep with
1625 * locks will block the rest of the system
1626 * from getting any work done.
1627 *
1628 * Suspended/dead/zombie LWPs aren't going
1629 * anywhere, so we don't need to touch them.
1630 */
1631 }
1632 #ifdef DIAGNOSTIC
1633 else {
1634 panic("proc_stop: process %d lwp %d "
1635 "in unstoppable state %d.\n",
1636 p->p_pid, l->l_lid, l->l_stat);
1637 }
1638 #endif
1639 #endif
1640 }
1641 /* XXX unlock process LWP state */
1642
1643 sched_wakeup((caddr_t)p->p_pptr);
1644 }
1645
1646 /*
1647 * Given a process in state SSTOP, set the state back to SACTIVE and
1648 * move LSSTOP'd LWPs to LSSLEEP or make them runnable.
1649 *
1650 * If no LWPs ended up runnable (and therefore able to take a signal),
1651 * return a LWP that is sleeping interruptably. The caller can wake
1652 * that LWP up to take a signal.
1653 */
1654 struct lwp *
1655 proc_unstop(struct proc *p)
1656 {
1657 struct lwp *l, *lr = NULL;
1658 int cantake = 0;
1659
1660 SCHED_ASSERT_LOCKED();
1661
1662 /*
1663 * Our caller wants to be informed if there are only sleeping
1664 * and interruptable LWPs left after we have run so that it
1665 * can invoke setrunnable() if required - return one of the
1666 * interruptable LWPs if this is the case.
1667 */
1668
1669 p->p_stat = SACTIVE;
1670 if (p->p_flag & P_SA) {
1671 /*
1672 * Preferentially select the idle LWP as the interruptable
1673 * LWP to return if it exists.
1674 */
1675 lr = p->p_sa->sa_idle;
1676 if (lr != NULL)
1677 cantake = 1;
1678 }
1679 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1680 if (l->l_stat == LSRUN) {
1681 lr = NULL;
1682 cantake = 1;
1683 }
1684 if (l->l_stat != LSSTOP)
1685 continue;
1686
1687 if (l->l_wchan != NULL) {
1688 l->l_stat = LSSLEEP;
1689 if ((cantake == 0) && (l->l_flag & L_SINTR)) {
1690 lr = l;
1691 cantake = 1;
1692 }
1693 } else {
1694 setrunnable(l);
1695 lr = NULL;
1696 cantake = 1;
1697 }
1698 }
1699
1700 return lr;
1701 }
1702
1703 /*
1704 * Take the action for the specified signal
1705 * from the current set of pending signals.
1706 */
1707 void
1708 postsig(int signum)
1709 {
1710 struct lwp *l;
1711 struct proc *p;
1712 struct sigacts *ps;
1713 sig_t action;
1714 sigset_t *returnmask;
1715
1716 l = curlwp;
1717 p = l->l_proc;
1718 ps = p->p_sigacts;
1719 #ifdef DIAGNOSTIC
1720 if (signum == 0)
1721 panic("postsig");
1722 #endif
1723
1724 KERNEL_PROC_LOCK(l);
1725
1726 sigdelset(&p->p_sigctx.ps_siglist, signum);
1727 action = SIGACTION_PS(ps, signum).sa_handler;
1728 if (action == SIG_DFL) {
1729 #ifdef KTRACE
1730 if (KTRPOINT(p, KTR_PSIG))
1731 ktrpsig(p, signum, action,
1732 p->p_sigctx.ps_flags & SAS_OLDMASK ?
1733 &p->p_sigctx.ps_oldmask : &p->p_sigctx.ps_sigmask,
1734 NULL);
1735 #endif
1736 /*
1737 * Default action, where the default is to kill
1738 * the process. (Other cases were ignored above.)
1739 */
1740 sigexit(l, signum);
1741 /* NOTREACHED */
1742 } else {
1743 ksiginfo_t *ksi;
1744 /*
1745 * If we get here, the signal must be caught.
1746 */
1747 #ifdef DIAGNOSTIC
1748 if (action == SIG_IGN ||
1749 sigismember(&p->p_sigctx.ps_sigmask, signum))
1750 panic("postsig action");
1751 #endif
1752 /*
1753 * Set the new mask value and also defer further
1754 * occurrences of this signal.
1755 *
1756 * Special case: user has done a sigpause. Here the
1757 * current mask is not of interest, but rather the
1758 * mask from before the sigpause is what we want
1759 * restored after the signal processing is completed.
1760 */
1761 if (p->p_sigctx.ps_flags & SAS_OLDMASK) {
1762 returnmask = &p->p_sigctx.ps_oldmask;
1763 p->p_sigctx.ps_flags &= ~SAS_OLDMASK;
1764 } else
1765 returnmask = &p->p_sigctx.ps_sigmask;
1766 p->p_stats->p_ru.ru_nsignals++;
1767 ksi = ksiginfo_get(p, signum);
1768 #ifdef KTRACE
1769 if (KTRPOINT(p, KTR_PSIG))
1770 ktrpsig(p, signum, action,
1771 p->p_sigctx.ps_flags & SAS_OLDMASK ?
1772 &p->p_sigctx.ps_oldmask : &p->p_sigctx.ps_sigmask,
1773 ksi);
1774 #endif
1775 if (ksi == NULL) {
1776 ksiginfo_t ksi1;
1777 /*
1778 * we did not save any siginfo for this, either
1779 * because the signal was not caught, or because the
1780 * user did not request SA_SIGINFO
1781 */
1782 (void)memset(&ksi1, 0, sizeof(ksi1));
1783 ksi1.ksi_signo = signum;
1784 kpsendsig(l, &ksi1, returnmask);
1785 } else {
1786 kpsendsig(l, ksi, returnmask);
1787 pool_put(&ksiginfo_pool, ksi);
1788 }
1789 p->p_sigctx.ps_lwp = 0;
1790 p->p_sigctx.ps_code = 0;
1791 p->p_sigctx.ps_signo = 0;
1792 (void) splsched(); /* XXXSMP */
1793 sigplusset(&SIGACTION_PS(ps, signum).sa_mask,
1794 &p->p_sigctx.ps_sigmask);
1795 if (SIGACTION_PS(ps, signum).sa_flags & SA_RESETHAND) {
1796 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
1797 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
1798 sigaddset(&p->p_sigctx.ps_sigignore, signum);
1799 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
1800 }
1801 (void) spl0(); /* XXXSMP */
1802 }
1803
1804 KERNEL_PROC_UNLOCK(l);
1805 }
1806
1807 /*
1808 * Kill the current process for stated reason.
1809 */
1810 void
1811 killproc(struct proc *p, const char *why)
1812 {
1813 log(LOG_ERR, "pid %d was killed: %s\n", p->p_pid, why);
1814 uprintf("sorry, pid %d was killed: %s\n", p->p_pid, why);
1815 psignal(p, SIGKILL);
1816 }
1817
1818 /*
1819 * Force the current process to exit with the specified signal, dumping core
1820 * if appropriate. We bypass the normal tests for masked and caught signals,
1821 * allowing unrecoverable failures to terminate the process without changing
1822 * signal state. Mark the accounting record with the signal termination.
1823 * If dumping core, save the signal number for the debugger. Calls exit and
1824 * does not return.
1825 */
1826
1827 #if defined(DEBUG)
1828 int kern_logsigexit = 1; /* not static to make public for sysctl */
1829 #else
1830 int kern_logsigexit = 0; /* not static to make public for sysctl */
1831 #endif
1832
1833 static const char logcoredump[] =
1834 "pid %d (%s), uid %d: exited on signal %d (core dumped)\n";
1835 static const char lognocoredump[] =
1836 "pid %d (%s), uid %d: exited on signal %d (core not dumped, err = %d)\n";
1837
1838 /* Wrapper function for use in p_userret */
1839 static void
1840 lwp_coredump_hook(struct lwp *l, void *arg)
1841 {
1842 int s;
1843
1844 /*
1845 * Suspend ourselves, so that the kernel stack and therefore
1846 * the userland registers saved in the trapframe are around
1847 * for coredump() to write them out.
1848 */
1849 KERNEL_PROC_LOCK(l);
1850 l->l_flag &= ~L_DETACHED;
1851 SCHED_LOCK(s);
1852 l->l_stat = LSSUSPENDED;
1853 l->l_proc->p_nrlwps--;
1854 /* XXX NJWLWP check if this makes sense here: */
1855 l->l_proc->p_stats->p_ru.ru_nvcsw++;
1856 mi_switch(l, NULL);
1857 SCHED_ASSERT_UNLOCKED();
1858 splx(s);
1859
1860 lwp_exit(l);
1861 }
1862
1863 void
1864 sigexit(struct lwp *l, int signum)
1865 {
1866 struct proc *p;
1867 #if 0
1868 struct lwp *l2;
1869 #endif
1870 int error, exitsig;
1871
1872 p = l->l_proc;
1873
1874 /*
1875 * Don't permit coredump() or exit1() multiple times
1876 * in the same process.
1877 */
1878 if (p->p_flag & P_WEXIT) {
1879 KERNEL_PROC_UNLOCK(l);
1880 (*p->p_userret)(l, p->p_userret_arg);
1881 }
1882 p->p_flag |= P_WEXIT;
1883 /* We don't want to switch away from exiting. */
1884 /* XXX multiprocessor: stop LWPs on other processors. */
1885 #if 0
1886 if (p->p_flag & P_SA) {
1887 LIST_FOREACH(l2, &p->p_lwps, l_sibling)
1888 l2->l_flag &= ~L_SA;
1889 p->p_flag &= ~P_SA;
1890 }
1891 #endif
1892
1893 /* Make other LWPs stick around long enough to be dumped */
1894 p->p_userret = lwp_coredump_hook;
1895 p->p_userret_arg = NULL;
1896
1897 exitsig = signum;
1898 p->p_acflag |= AXSIG;
1899 if (sigprop[signum] & SA_CORE) {
1900 p->p_sigctx.ps_signo = signum;
1901 if ((error = coredump(l)) == 0)
1902 exitsig |= WCOREFLAG;
1903
1904 if (kern_logsigexit) {
1905 /* XXX What if we ever have really large UIDs? */
1906 int uid = p->p_cred && p->p_ucred ?
1907 (int) p->p_ucred->cr_uid : -1;
1908
1909 if (error)
1910 log(LOG_INFO, lognocoredump, p->p_pid,
1911 p->p_comm, uid, signum, error);
1912 else
1913 log(LOG_INFO, logcoredump, p->p_pid,
1914 p->p_comm, uid, signum);
1915 }
1916
1917 }
1918
1919 exit1(l, W_EXITCODE(0, exitsig));
1920 /* NOTREACHED */
1921 }
1922
1923 /*
1924 * Dump core, into a file named "progname.core" or "core" (depending on the
1925 * value of shortcorename), unless the process was setuid/setgid.
1926 */
1927 int
1928 coredump(struct lwp *l)
1929 {
1930 struct vnode *vp;
1931 struct proc *p;
1932 struct vmspace *vm;
1933 struct ucred *cred;
1934 struct nameidata nd;
1935 struct vattr vattr;
1936 struct mount *mp;
1937 int error, error1;
1938 char name[MAXPATHLEN];
1939
1940 p = l->l_proc;
1941 vm = p->p_vmspace;
1942 cred = p->p_cred->pc_ucred;
1943
1944 /*
1945 * Make sure the process has not set-id, to prevent data leaks.
1946 */
1947 if (p->p_flag & P_SUGID)
1948 return (EPERM);
1949
1950 /*
1951 * Refuse to core if the data + stack + user size is larger than
1952 * the core dump limit. XXX THIS IS WRONG, because of mapped
1953 * data.
1954 */
1955 if (USPACE + ctob(vm->vm_dsize + vm->vm_ssize) >=
1956 p->p_rlimit[RLIMIT_CORE].rlim_cur)
1957 return (EFBIG); /* better error code? */
1958
1959 restart:
1960 /*
1961 * The core dump will go in the current working directory. Make
1962 * sure that the directory is still there and that the mount flags
1963 * allow us to write core dumps there.
1964 */
1965 vp = p->p_cwdi->cwdi_cdir;
1966 if (vp->v_mount == NULL ||
1967 (vp->v_mount->mnt_flag & MNT_NOCOREDUMP) != 0)
1968 return (EPERM);
1969
1970 error = build_corename(p, name);
1971 if (error)
1972 return error;
1973
1974 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, p);
1975 error = vn_open(&nd, O_CREAT | O_NOFOLLOW | FWRITE, S_IRUSR | S_IWUSR);
1976 if (error)
1977 return (error);
1978 vp = nd.ni_vp;
1979
1980 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
1981 VOP_UNLOCK(vp, 0);
1982 if ((error = vn_close(vp, FWRITE, cred, p)) != 0)
1983 return (error);
1984 if ((error = vn_start_write(NULL, &mp,
1985 V_WAIT | V_SLEEPONLY | V_PCATCH)) != 0)
1986 return (error);
1987 goto restart;
1988 }
1989
1990 /* Don't dump to non-regular files or files with links. */
1991 if (vp->v_type != VREG ||
1992 VOP_GETATTR(vp, &vattr, cred, p) || vattr.va_nlink != 1) {
1993 error = EINVAL;
1994 goto out;
1995 }
1996 VATTR_NULL(&vattr);
1997 vattr.va_size = 0;
1998 VOP_LEASE(vp, p, cred, LEASE_WRITE);
1999 VOP_SETATTR(vp, &vattr, cred, p);
2000 p->p_acflag |= ACORE;
2001
2002 /* Now dump the actual core file. */
2003 error = (*p->p_execsw->es_coredump)(l, vp, cred);
2004 out:
2005 VOP_UNLOCK(vp, 0);
2006 vn_finished_write(mp, 0);
2007 error1 = vn_close(vp, FWRITE, cred, p);
2008 if (error == 0)
2009 error = error1;
2010 return (error);
2011 }
2012
2013 /*
2014 * Nonexistent system call-- signal process (may want to handle it).
2015 * Flag error in case process won't see signal immediately (blocked or ignored).
2016 */
2017 /* ARGSUSED */
2018 int
2019 sys_nosys(struct lwp *l, void *v, register_t *retval)
2020 {
2021 struct proc *p;
2022
2023 p = l->l_proc;
2024 psignal(p, SIGSYS);
2025 return (ENOSYS);
2026 }
2027
2028 static int
2029 build_corename(struct proc *p, char dst[MAXPATHLEN])
2030 {
2031 const char *s;
2032 char *d, *end;
2033 int i;
2034
2035 for (s = p->p_limit->pl_corename, d = dst, end = d + MAXPATHLEN;
2036 *s != '\0'; s++) {
2037 if (*s == '%') {
2038 switch (*(s + 1)) {
2039 case 'n':
2040 i = snprintf(d, end - d, "%s", p->p_comm);
2041 break;
2042 case 'p':
2043 i = snprintf(d, end - d, "%d", p->p_pid);
2044 break;
2045 case 'u':
2046 i = snprintf(d, end - d, "%.*s",
2047 (int)sizeof p->p_pgrp->pg_session->s_login,
2048 p->p_pgrp->pg_session->s_login);
2049 break;
2050 case 't':
2051 i = snprintf(d, end - d, "%ld",
2052 p->p_stats->p_start.tv_sec);
2053 break;
2054 default:
2055 goto copy;
2056 }
2057 d += i;
2058 s++;
2059 } else {
2060 copy: *d = *s;
2061 d++;
2062 }
2063 if (d >= end)
2064 return (ENAMETOOLONG);
2065 }
2066 *d = '\0';
2067 return 0;
2068 }
2069
2070 void
2071 getucontext(struct lwp *l, ucontext_t *ucp)
2072 {
2073 struct proc *p;
2074
2075 p = l->l_proc;
2076
2077 ucp->uc_flags = 0;
2078 ucp->uc_link = l->l_ctxlink;
2079
2080 (void)sigprocmask1(p, 0, NULL, &ucp->uc_sigmask);
2081 ucp->uc_flags |= _UC_SIGMASK;
2082
2083 /*
2084 * The (unsupplied) definition of the `current execution stack'
2085 * in the System V Interface Definition appears to allow returning
2086 * the main context stack.
2087 */
2088 if ((p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK) == 0) {
2089 ucp->uc_stack.ss_sp = (void *)USRSTACK;
2090 ucp->uc_stack.ss_size = ctob(p->p_vmspace->vm_ssize);
2091 ucp->uc_stack.ss_flags = 0; /* XXX, def. is Very Fishy */
2092 } else {
2093 /* Simply copy alternate signal execution stack. */
2094 ucp->uc_stack = p->p_sigctx.ps_sigstk;
2095 }
2096 ucp->uc_flags |= _UC_STACK;
2097
2098 cpu_getmcontext(l, &ucp->uc_mcontext, &ucp->uc_flags);
2099 }
2100
2101 /* ARGSUSED */
2102 int
2103 sys_getcontext(struct lwp *l, void *v, register_t *retval)
2104 {
2105 struct sys_getcontext_args /* {
2106 syscallarg(struct __ucontext *) ucp;
2107 } */ *uap = v;
2108 ucontext_t uc;
2109
2110 getucontext(l, &uc);
2111
2112 return (copyout(&uc, SCARG(uap, ucp), sizeof (*SCARG(uap, ucp))));
2113 }
2114
2115 int
2116 setucontext(struct lwp *l, const ucontext_t *ucp)
2117 {
2118 struct proc *p;
2119 int error;
2120
2121 p = l->l_proc;
2122 if ((error = cpu_setmcontext(l, &ucp->uc_mcontext, ucp->uc_flags)) != 0)
2123 return (error);
2124 l->l_ctxlink = ucp->uc_link;
2125 /*
2126 * We might want to take care of the stack portion here but currently
2127 * don't; see the comment in getucontext().
2128 */
2129 if ((ucp->uc_flags & _UC_SIGMASK) != 0)
2130 sigprocmask1(p, SIG_SETMASK, &ucp->uc_sigmask, NULL);
2131
2132 return 0;
2133 }
2134
2135 /* ARGSUSED */
2136 int
2137 sys_setcontext(struct lwp *l, void *v, register_t *retval)
2138 {
2139 struct sys_setcontext_args /* {
2140 syscallarg(const ucontext_t *) ucp;
2141 } */ *uap = v;
2142 ucontext_t uc;
2143 int error;
2144
2145 if (SCARG(uap, ucp) == NULL) /* i.e. end of uc_link chain */
2146 exit1(l, W_EXITCODE(0, 0));
2147 else if ((error = copyin(SCARG(uap, ucp), &uc, sizeof (uc))) != 0 ||
2148 (error = setucontext(l, &uc)) != 0)
2149 return (error);
2150
2151 return (EJUSTRETURN);
2152 }
2153
2154 /*
2155 * sigtimedwait(2) system call, used also for implementation
2156 * of sigwaitinfo() and sigwait().
2157 *
2158 * This only handles single LWP in signal wait. libpthread provides
2159 * it's own sigtimedwait() wrapper to DTRT WRT individual threads.
2160 *
2161 * XXX no support for queued signals, si_code is always SI_USER.
2162 */
2163 int
2164 sys___sigtimedwait(struct lwp *l, void *v, register_t *retval)
2165 {
2166 struct sys___sigtimedwait_args /* {
2167 syscallarg(const sigset_t *) set;
2168 syscallarg(siginfo_t *) info;
2169 syscallarg(struct timespec *) timeout;
2170 } */ *uap = v;
2171 sigset_t waitset, twaitset;
2172 struct proc *p = l->l_proc;
2173 int error, signum, s;
2174 int timo = 0;
2175 struct timeval tvstart;
2176 struct timespec ts;
2177
2178 if ((error = copyin(SCARG(uap, set), &waitset, sizeof(waitset))))
2179 return (error);
2180
2181 /*
2182 * Silently ignore SA_CANTMASK signals. psignal1() would
2183 * ignore SA_CANTMASK signals in waitset, we do this
2184 * only for the below siglist check.
2185 */
2186 sigminusset(&sigcantmask, &waitset);
2187
2188 /*
2189 * First scan siglist and check if there is signal from
2190 * our waitset already pending.
2191 */
2192 twaitset = waitset;
2193 __sigandset(&p->p_sigctx.ps_siglist, &twaitset);
2194 if ((signum = firstsig(&twaitset))) {
2195 /* found pending signal */
2196 sigdelset(&p->p_sigctx.ps_siglist, signum);
2197 goto sig;
2198 }
2199
2200 /*
2201 * Calculate timeout, if it was specified.
2202 */
2203 if (SCARG(uap, timeout)) {
2204 uint64_t ms;
2205
2206 if ((error = copyin(SCARG(uap, timeout), &ts, sizeof(ts))))
2207 return (error);
2208
2209 ms = (ts.tv_sec * 1000) + (ts.tv_nsec / 1000000);
2210 timo = mstohz(ms);
2211 if (timo == 0 && ts.tv_sec == 0 && ts.tv_nsec > 0)
2212 timo = 1;
2213 if (timo <= 0)
2214 return (EAGAIN);
2215
2216 /*
2217 * Remember current mono_time, it would be used in
2218 * ECANCELED/ERESTART case.
2219 */
2220 s = splclock();
2221 tvstart = mono_time;
2222 splx(s);
2223 }
2224
2225 /*
2226 * Setup ps_sigwait list.
2227 */
2228 p->p_sigctx.ps_sigwaited = -1;
2229 p->p_sigctx.ps_sigwait = waitset;
2230
2231 /*
2232 * Wait for signal to arrive. We can either be woken up or
2233 * time out.
2234 */
2235 error = tsleep(&p->p_sigctx.ps_sigwait, PPAUSE|PCATCH, "sigwait", timo);
2236
2237 /*
2238 * Check if a signal from our wait set has arrived, or if it
2239 * was mere wakeup.
2240 */
2241 if (!error) {
2242 if ((signum = p->p_sigctx.ps_sigwaited) <= 0) {
2243 /* wakeup via _lwp_wakeup() */
2244 error = ECANCELED;
2245 }
2246 }
2247
2248 /*
2249 * On error, clear sigwait indication. psignal1() sets it
2250 * in !error case.
2251 */
2252 if (error) {
2253 p->p_sigctx.ps_sigwaited = 0;
2254
2255 /*
2256 * If the sleep was interrupted (either by signal or wakeup),
2257 * update the timeout and copyout new value back.
2258 * It would be used when the syscall would be restarted
2259 * or called again.
2260 */
2261 if (timo && (error == ERESTART || error == ECANCELED)) {
2262 struct timeval tvnow, tvtimo;
2263 int err;
2264
2265 s = splclock();
2266 tvnow = mono_time;
2267 splx(s);
2268
2269 TIMESPEC_TO_TIMEVAL(&tvtimo, &ts);
2270
2271 /* compute how much time has passed since start */
2272 timersub(&tvnow, &tvstart, &tvnow);
2273 /* substract passed time from timeout */
2274 timersub(&tvtimo, &tvnow, &tvtimo);
2275
2276 if (tvtimo.tv_sec < 0)
2277 return (EAGAIN);
2278
2279 TIMEVAL_TO_TIMESPEC(&tvtimo, &ts);
2280
2281 /* copy updated timeout to userland */
2282 if ((err = copyout(&ts, SCARG(uap, timeout), sizeof(ts))))
2283 return (err);
2284 }
2285
2286 return (error);
2287 }
2288
2289 /*
2290 * If a signal from the wait set arrived, copy it to userland.
2291 * XXX no queued signals for now
2292 */
2293 if (signum > 0) {
2294 siginfo_t si;
2295
2296 sig:
2297 memset(&si, 0, sizeof(si));
2298 si.si_signo = signum;
2299 si.si_code = SI_USER;
2300
2301 error = copyout(&si, SCARG(uap, info), sizeof(si));
2302 if (error)
2303 return (error);
2304 }
2305
2306 return (0);
2307 }
2308
2309 /*
2310 * Returns true if signal is ignored or masked for passed process.
2311 */
2312 int
2313 sigismasked(struct proc *p, int sig)
2314 {
2315
2316 return (sigismember(&p->p_sigctx.ps_sigignore, sig) ||
2317 sigismember(&p->p_sigctx.ps_sigmask, sig));
2318 }
2319
2320 static int
2321 filt_sigattach(struct knote *kn)
2322 {
2323 struct proc *p = curproc;
2324
2325 kn->kn_ptr.p_proc = p;
2326 kn->kn_flags |= EV_CLEAR; /* automatically set */
2327
2328 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
2329
2330 return (0);
2331 }
2332
2333 static void
2334 filt_sigdetach(struct knote *kn)
2335 {
2336 struct proc *p = kn->kn_ptr.p_proc;
2337
2338 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
2339 }
2340
2341 /*
2342 * signal knotes are shared with proc knotes, so we apply a mask to
2343 * the hint in order to differentiate them from process hints. This
2344 * could be avoided by using a signal-specific knote list, but probably
2345 * isn't worth the trouble.
2346 */
2347 static int
2348 filt_signal(struct knote *kn, long hint)
2349 {
2350
2351 if (hint & NOTE_SIGNAL) {
2352 hint &= ~NOTE_SIGNAL;
2353
2354 if (kn->kn_id == hint)
2355 kn->kn_data++;
2356 }
2357 return (kn->kn_data != 0);
2358 }
2359
2360 const struct filterops sig_filtops = {
2361 0, filt_sigattach, filt_sigdetach, filt_signal
2362 };
2363