kern_sig.c revision 1.171 1 /* $NetBSD: kern_sig.c,v 1.171 2003/10/25 16:50:37 jdolecek Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)kern_sig.c 8.14 (Berkeley) 5/14/95
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.171 2003/10/25 16:50:37 jdolecek Exp $");
41
42 #include "opt_ktrace.h"
43 #include "opt_compat_sunos.h"
44 #include "opt_compat_netbsd.h"
45 #include "opt_compat_netbsd32.h"
46
47 #define SIGPROP /* include signal properties table */
48 #include <sys/param.h>
49 #include <sys/signalvar.h>
50 #include <sys/resourcevar.h>
51 #include <sys/namei.h>
52 #include <sys/vnode.h>
53 #include <sys/proc.h>
54 #include <sys/systm.h>
55 #include <sys/timeb.h>
56 #include <sys/times.h>
57 #include <sys/buf.h>
58 #include <sys/acct.h>
59 #include <sys/file.h>
60 #include <sys/kernel.h>
61 #include <sys/wait.h>
62 #include <sys/ktrace.h>
63 #include <sys/syslog.h>
64 #include <sys/stat.h>
65 #include <sys/core.h>
66 #include <sys/filedesc.h>
67 #include <sys/malloc.h>
68 #include <sys/pool.h>
69 #include <sys/ucontext.h>
70 #include <sys/sa.h>
71 #include <sys/savar.h>
72 #include <sys/exec.h>
73
74 #include <sys/mount.h>
75 #include <sys/syscallargs.h>
76
77 #include <machine/cpu.h>
78
79 #include <sys/user.h> /* for coredump */
80
81 #include <uvm/uvm_extern.h>
82
83 static void child_psignal(struct proc *, int);
84 static void proc_stop(struct proc *);
85 static int build_corename(struct proc *, char [MAXPATHLEN]);
86 static void ksiginfo_exithook(struct proc *, void *);
87 static void ksiginfo_put(struct proc *, const ksiginfo_t *);
88 static ksiginfo_t *ksiginfo_get(struct proc *, int);
89 static void kpsignal2(struct proc *, const ksiginfo_t *, int);
90
91 sigset_t contsigmask, stopsigmask, sigcantmask;
92
93 struct pool sigacts_pool; /* memory pool for sigacts structures */
94 struct pool siginfo_pool; /* memory pool for siginfo structures */
95 struct pool ksiginfo_pool; /* memory pool for ksiginfo structures */
96
97 /*
98 * Can process p, with pcred pc, send the signal signum to process q?
99 */
100 #define CANSIGNAL(p, pc, q, signum) \
101 ((pc)->pc_ucred->cr_uid == 0 || \
102 (pc)->p_ruid == (q)->p_cred->p_ruid || \
103 (pc)->pc_ucred->cr_uid == (q)->p_cred->p_ruid || \
104 (pc)->p_ruid == (q)->p_ucred->cr_uid || \
105 (pc)->pc_ucred->cr_uid == (q)->p_ucred->cr_uid || \
106 ((signum) == SIGCONT && (q)->p_session == (p)->p_session))
107
108 /*
109 * Remove and return the first ksiginfo element that matches our requested
110 * signal, or return NULL if one not found.
111 */
112 static ksiginfo_t *
113 ksiginfo_get(struct proc *p, int signo)
114 {
115 ksiginfo_t *ksi;
116 int s;
117
118 s = splsoftclock();
119 simple_lock(&p->p_sigctx.ps_silock);
120 CIRCLEQ_FOREACH(ksi, &p->p_sigctx.ps_siginfo, ksi_list) {
121 if (ksi->ksi_signo == signo) {
122 CIRCLEQ_REMOVE(&p->p_sigctx.ps_siginfo, ksi, ksi_list);
123 goto out;
124 }
125 }
126 ksi = NULL;
127 out:
128 simple_unlock(&p->p_sigctx.ps_silock);
129 splx(s);
130 return ksi;
131 }
132
133 /*
134 * Append a new ksiginfo element to the list of pending ksiginfo's, if
135 * we need to (SA_SIGINFO was requested). We replace non RT signals if
136 * they already existed in the queue and we add new entries for RT signals,
137 * or for non RT signals with non-existing entries.
138 */
139 static void
140 ksiginfo_put(struct proc *p, const ksiginfo_t *ksi)
141 {
142 ksiginfo_t *kp;
143 struct sigaction *sa = &SIGACTION_PS(p->p_sigacts, ksi->ksi_signo);
144 int s;
145
146 if ((sa->sa_flags & SA_SIGINFO) == 0)
147 return;
148
149 s = splsoftclock();
150 simple_lock(&p->p_sigctx.ps_silock);
151 #ifdef notyet /* XXX: QUEUING */
152 if (ksi->ksi_signo < SIGRTMIN)
153 #endif
154 {
155 CIRCLEQ_FOREACH(kp, &p->p_sigctx.ps_siginfo, ksi_list) {
156 if (kp->ksi_signo == ksi->ksi_signo) {
157 CIRCLEQ_ENTRY(ksiginfo) sv;
158 (void)memcpy(&sv, &kp->ksi_list, sizeof(sv));
159 *kp = *ksi;
160 (void)memcpy(&kp->ksi_list, &sv, sizeof(sv));
161 goto out;
162 }
163 }
164 }
165 kp = pool_get(&ksiginfo_pool, PR_NOWAIT);
166 if (kp == NULL) {
167 #ifdef DIAGNOSTIC
168 printf("Out of memory allocating siginfo for pid %d\n",
169 p->p_pid);
170 #endif
171 goto out;
172 }
173 *kp = *ksi;
174 CIRCLEQ_INSERT_TAIL(&p->p_sigctx.ps_siginfo, kp, ksi_list);
175 out:
176 simple_unlock(&p->p_sigctx.ps_silock);
177 splx(s);
178 }
179
180 /*
181 * free all pending ksiginfo on exit
182 */
183 static void
184 ksiginfo_exithook(struct proc *p, void *v)
185 {
186 int s;
187
188 s = splsoftclock();
189 simple_lock(&p->p_sigctx.ps_silock);
190 while (!CIRCLEQ_EMPTY(&p->p_sigctx.ps_siginfo)) {
191 ksiginfo_t *ksi = CIRCLEQ_FIRST(&p->p_sigctx.ps_siginfo);
192 CIRCLEQ_REMOVE(&p->p_sigctx.ps_siginfo, ksi, ksi_list);
193 pool_put(&ksiginfo_pool, ksi);
194 }
195 simple_unlock(&p->p_sigctx.ps_silock);
196 splx(s);
197 }
198
199 /*
200 * Initialize signal-related data structures.
201 */
202 void
203 signal_init(void)
204 {
205 pool_init(&sigacts_pool, sizeof(struct sigacts), 0, 0, 0, "sigapl",
206 &pool_allocator_nointr);
207 pool_init(&siginfo_pool, sizeof(siginfo_t), 0, 0, 0, "siginfo",
208 &pool_allocator_nointr);
209 pool_init(&ksiginfo_pool, sizeof(ksiginfo_t), 0, 0, 0, "ksiginfo",
210 NULL);
211 exithook_establish(ksiginfo_exithook, NULL);
212 exechook_establish(ksiginfo_exithook, NULL);
213 }
214
215 /*
216 * Create an initial sigctx structure, using the same signal state
217 * as p. If 'share' is set, share the sigctx_proc part, otherwise just
218 * copy it from parent.
219 */
220 void
221 sigactsinit(struct proc *np, struct proc *pp, int share)
222 {
223 struct sigacts *ps;
224
225 if (share) {
226 np->p_sigacts = pp->p_sigacts;
227 pp->p_sigacts->sa_refcnt++;
228 } else {
229 ps = pool_get(&sigacts_pool, PR_WAITOK);
230 if (pp)
231 memcpy(ps, pp->p_sigacts, sizeof(struct sigacts));
232 else
233 memset(ps, '\0', sizeof(struct sigacts));
234 ps->sa_refcnt = 1;
235 np->p_sigacts = ps;
236 }
237 }
238
239 /*
240 * Make this process not share its sigctx, maintaining all
241 * signal state.
242 */
243 void
244 sigactsunshare(struct proc *p)
245 {
246 struct sigacts *oldps;
247
248 if (p->p_sigacts->sa_refcnt == 1)
249 return;
250
251 oldps = p->p_sigacts;
252 sigactsinit(p, NULL, 0);
253
254 if (--oldps->sa_refcnt == 0)
255 pool_put(&sigacts_pool, oldps);
256 }
257
258 /*
259 * Release a sigctx structure.
260 */
261 void
262 sigactsfree(struct proc *p)
263 {
264 struct sigacts *ps;
265
266 ps = p->p_sigacts;
267 if (--ps->sa_refcnt > 0)
268 return;
269
270 pool_put(&sigacts_pool, ps);
271 }
272
273 int
274 sigaction1(struct proc *p, int signum, const struct sigaction *nsa,
275 struct sigaction *osa, const void *tramp, int vers)
276 {
277 struct sigacts *ps;
278 int prop;
279
280 ps = p->p_sigacts;
281 if (signum <= 0 || signum >= NSIG)
282 return (EINVAL);
283
284 /*
285 * Trampoline ABI version 0 is reserved for the legacy
286 * kernel-provided on-stack trampoline. Conversely, if we are
287 * using a non-0 ABI version, we must have a trampoline. Only
288 * validate the vers if a new sigaction was supplied. Emulations
289 * use legacy kernel trampolines with version 0, alternatively
290 * check for that too.
291 */
292 if ((vers != 0 && tramp == NULL) ||
293 #ifdef SIGTRAMP_VALID
294 (nsa != NULL &&
295 ((vers == 0) ?
296 (p->p_emul->e_sigcode == NULL) :
297 !SIGTRAMP_VALID(vers))) ||
298 #endif
299 (vers == 0 && tramp != NULL))
300 return (EINVAL);
301
302 if (osa)
303 *osa = SIGACTION_PS(ps, signum);
304
305 if (nsa) {
306 if (nsa->sa_flags & ~SA_ALLBITS)
307 return (EINVAL);
308
309 #ifndef __HAVE_SIGINFO
310 if (nsa->sa_flags & SA_SIGINFO)
311 return (EINVAL);
312 #endif
313
314 prop = sigprop[signum];
315 if (prop & SA_CANTMASK)
316 return (EINVAL);
317
318 (void) splsched(); /* XXXSMP */
319 SIGACTION_PS(ps, signum) = *nsa;
320 ps->sa_sigdesc[signum].sd_tramp = tramp;
321 ps->sa_sigdesc[signum].sd_vers = vers;
322 sigminusset(&sigcantmask, &SIGACTION_PS(ps, signum).sa_mask);
323 if ((prop & SA_NORESET) != 0)
324 SIGACTION_PS(ps, signum).sa_flags &= ~SA_RESETHAND;
325 if (signum == SIGCHLD) {
326 if (nsa->sa_flags & SA_NOCLDSTOP)
327 p->p_flag |= P_NOCLDSTOP;
328 else
329 p->p_flag &= ~P_NOCLDSTOP;
330 if (nsa->sa_flags & SA_NOCLDWAIT) {
331 /*
332 * Paranoia: since SA_NOCLDWAIT is implemented
333 * by reparenting the dying child to PID 1 (and
334 * trust it to reap the zombie), PID 1 itself
335 * is forbidden to set SA_NOCLDWAIT.
336 */
337 if (p->p_pid == 1)
338 p->p_flag &= ~P_NOCLDWAIT;
339 else
340 p->p_flag |= P_NOCLDWAIT;
341 } else
342 p->p_flag &= ~P_NOCLDWAIT;
343 }
344 if ((nsa->sa_flags & SA_NODEFER) == 0)
345 sigaddset(&SIGACTION_PS(ps, signum).sa_mask, signum);
346 else
347 sigdelset(&SIGACTION_PS(ps, signum).sa_mask, signum);
348 /*
349 * Set bit in p_sigctx.ps_sigignore for signals that are set to
350 * SIG_IGN, and for signals set to SIG_DFL where the default is
351 * to ignore. However, don't put SIGCONT in
352 * p_sigctx.ps_sigignore, as we have to restart the process.
353 */
354 if (nsa->sa_handler == SIG_IGN ||
355 (nsa->sa_handler == SIG_DFL && (prop & SA_IGNORE) != 0)) {
356 /* never to be seen again */
357 sigdelset(&p->p_sigctx.ps_siglist, signum);
358 if (signum != SIGCONT) {
359 /* easier in psignal */
360 sigaddset(&p->p_sigctx.ps_sigignore, signum);
361 }
362 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
363 } else {
364 sigdelset(&p->p_sigctx.ps_sigignore, signum);
365 if (nsa->sa_handler == SIG_DFL)
366 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
367 else
368 sigaddset(&p->p_sigctx.ps_sigcatch, signum);
369 }
370 (void) spl0();
371 }
372
373 return (0);
374 }
375
376 #ifdef COMPAT_16
377 /* ARGSUSED */
378 int
379 compat_16_sys___sigaction14(struct lwp *l, void *v, register_t *retval)
380 {
381 struct compat_16_sys___sigaction14_args /* {
382 syscallarg(int) signum;
383 syscallarg(const struct sigaction *) nsa;
384 syscallarg(struct sigaction *) osa;
385 } */ *uap = v;
386 struct proc *p;
387 struct sigaction nsa, osa;
388 int error;
389
390 if (SCARG(uap, nsa)) {
391 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
392 if (error)
393 return (error);
394 }
395 p = l->l_proc;
396 error = sigaction1(p, SCARG(uap, signum),
397 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0,
398 NULL, 0);
399 if (error)
400 return (error);
401 if (SCARG(uap, osa)) {
402 error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
403 if (error)
404 return (error);
405 }
406 return (0);
407 }
408 #endif
409
410 /* ARGSUSED */
411 int
412 sys___sigaction_sigtramp(struct lwp *l, void *v, register_t *retval)
413 {
414 struct sys___sigaction_sigtramp_args /* {
415 syscallarg(int) signum;
416 syscallarg(const struct sigaction *) nsa;
417 syscallarg(struct sigaction *) osa;
418 syscallarg(void *) tramp;
419 syscallarg(int) vers;
420 } */ *uap = v;
421 struct proc *p = l->l_proc;
422 struct sigaction nsa, osa;
423 int error;
424
425 if (SCARG(uap, nsa)) {
426 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
427 if (error)
428 return (error);
429 }
430 error = sigaction1(p, SCARG(uap, signum),
431 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0,
432 SCARG(uap, tramp), SCARG(uap, vers));
433 if (error)
434 return (error);
435 if (SCARG(uap, osa)) {
436 error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
437 if (error)
438 return (error);
439 }
440 return (0);
441 }
442
443 /*
444 * Initialize signal state for process 0;
445 * set to ignore signals that are ignored by default and disable the signal
446 * stack.
447 */
448 void
449 siginit(struct proc *p)
450 {
451 struct sigacts *ps;
452 int signum, prop;
453
454 ps = p->p_sigacts;
455 sigemptyset(&contsigmask);
456 sigemptyset(&stopsigmask);
457 sigemptyset(&sigcantmask);
458 for (signum = 1; signum < NSIG; signum++) {
459 prop = sigprop[signum];
460 if (prop & SA_CONT)
461 sigaddset(&contsigmask, signum);
462 if (prop & SA_STOP)
463 sigaddset(&stopsigmask, signum);
464 if (prop & SA_CANTMASK)
465 sigaddset(&sigcantmask, signum);
466 if (prop & SA_IGNORE && signum != SIGCONT)
467 sigaddset(&p->p_sigctx.ps_sigignore, signum);
468 sigemptyset(&SIGACTION_PS(ps, signum).sa_mask);
469 SIGACTION_PS(ps, signum).sa_flags = SA_RESTART;
470 }
471 sigemptyset(&p->p_sigctx.ps_sigcatch);
472 p->p_sigctx.ps_sigwaited = NULL;
473 p->p_flag &= ~P_NOCLDSTOP;
474
475 /*
476 * Reset stack state to the user stack.
477 */
478 p->p_sigctx.ps_sigstk.ss_flags = SS_DISABLE;
479 p->p_sigctx.ps_sigstk.ss_size = 0;
480 p->p_sigctx.ps_sigstk.ss_sp = 0;
481
482 /* One reference. */
483 ps->sa_refcnt = 1;
484 }
485
486 /*
487 * Reset signals for an exec of the specified process.
488 */
489 void
490 execsigs(struct proc *p)
491 {
492 struct sigacts *ps;
493 int signum, prop;
494
495 sigactsunshare(p);
496
497 ps = p->p_sigacts;
498
499 /*
500 * Reset caught signals. Held signals remain held
501 * through p_sigctx.ps_sigmask (unless they were caught,
502 * and are now ignored by default).
503 */
504 for (signum = 1; signum < NSIG; signum++) {
505 if (sigismember(&p->p_sigctx.ps_sigcatch, signum)) {
506 prop = sigprop[signum];
507 if (prop & SA_IGNORE) {
508 if ((prop & SA_CONT) == 0)
509 sigaddset(&p->p_sigctx.ps_sigignore,
510 signum);
511 sigdelset(&p->p_sigctx.ps_siglist, signum);
512 }
513 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
514 }
515 sigemptyset(&SIGACTION_PS(ps, signum).sa_mask);
516 SIGACTION_PS(ps, signum).sa_flags = SA_RESTART;
517 }
518 sigemptyset(&p->p_sigctx.ps_sigcatch);
519 p->p_sigctx.ps_sigwaited = NULL;
520 p->p_flag &= ~P_NOCLDSTOP;
521
522 /*
523 * Reset stack state to the user stack.
524 */
525 p->p_sigctx.ps_sigstk.ss_flags = SS_DISABLE;
526 p->p_sigctx.ps_sigstk.ss_size = 0;
527 p->p_sigctx.ps_sigstk.ss_sp = 0;
528 }
529
530 int
531 sigprocmask1(struct proc *p, int how, const sigset_t *nss, sigset_t *oss)
532 {
533
534 if (oss)
535 *oss = p->p_sigctx.ps_sigmask;
536
537 if (nss) {
538 (void)splsched(); /* XXXSMP */
539 switch (how) {
540 case SIG_BLOCK:
541 sigplusset(nss, &p->p_sigctx.ps_sigmask);
542 break;
543 case SIG_UNBLOCK:
544 sigminusset(nss, &p->p_sigctx.ps_sigmask);
545 CHECKSIGS(p);
546 break;
547 case SIG_SETMASK:
548 p->p_sigctx.ps_sigmask = *nss;
549 CHECKSIGS(p);
550 break;
551 default:
552 (void)spl0(); /* XXXSMP */
553 return (EINVAL);
554 }
555 sigminusset(&sigcantmask, &p->p_sigctx.ps_sigmask);
556 (void)spl0(); /* XXXSMP */
557 }
558
559 return (0);
560 }
561
562 /*
563 * Manipulate signal mask.
564 * Note that we receive new mask, not pointer,
565 * and return old mask as return value;
566 * the library stub does the rest.
567 */
568 int
569 sys___sigprocmask14(struct lwp *l, void *v, register_t *retval)
570 {
571 struct sys___sigprocmask14_args /* {
572 syscallarg(int) how;
573 syscallarg(const sigset_t *) set;
574 syscallarg(sigset_t *) oset;
575 } */ *uap = v;
576 struct proc *p;
577 sigset_t nss, oss;
578 int error;
579
580 if (SCARG(uap, set)) {
581 error = copyin(SCARG(uap, set), &nss, sizeof(nss));
582 if (error)
583 return (error);
584 }
585 p = l->l_proc;
586 error = sigprocmask1(p, SCARG(uap, how),
587 SCARG(uap, set) ? &nss : 0, SCARG(uap, oset) ? &oss : 0);
588 if (error)
589 return (error);
590 if (SCARG(uap, oset)) {
591 error = copyout(&oss, SCARG(uap, oset), sizeof(oss));
592 if (error)
593 return (error);
594 }
595 return (0);
596 }
597
598 void
599 sigpending1(struct proc *p, sigset_t *ss)
600 {
601
602 *ss = p->p_sigctx.ps_siglist;
603 sigminusset(&p->p_sigctx.ps_sigmask, ss);
604 }
605
606 /* ARGSUSED */
607 int
608 sys___sigpending14(struct lwp *l, void *v, register_t *retval)
609 {
610 struct sys___sigpending14_args /* {
611 syscallarg(sigset_t *) set;
612 } */ *uap = v;
613 struct proc *p;
614 sigset_t ss;
615
616 p = l->l_proc;
617 sigpending1(p, &ss);
618 return (copyout(&ss, SCARG(uap, set), sizeof(ss)));
619 }
620
621 int
622 sigsuspend1(struct proc *p, const sigset_t *ss)
623 {
624 struct sigacts *ps;
625
626 ps = p->p_sigacts;
627 if (ss) {
628 /*
629 * When returning from sigpause, we want
630 * the old mask to be restored after the
631 * signal handler has finished. Thus, we
632 * save it here and mark the sigctx structure
633 * to indicate this.
634 */
635 p->p_sigctx.ps_oldmask = p->p_sigctx.ps_sigmask;
636 p->p_sigctx.ps_flags |= SAS_OLDMASK;
637 (void) splsched(); /* XXXSMP */
638 p->p_sigctx.ps_sigmask = *ss;
639 CHECKSIGS(p);
640 sigminusset(&sigcantmask, &p->p_sigctx.ps_sigmask);
641 (void) spl0(); /* XXXSMP */
642 }
643
644 while (tsleep((caddr_t) ps, PPAUSE|PCATCH, "pause", 0) == 0)
645 /* void */;
646
647 /* always return EINTR rather than ERESTART... */
648 return (EINTR);
649 }
650
651 /*
652 * Suspend process until signal, providing mask to be set
653 * in the meantime. Note nonstandard calling convention:
654 * libc stub passes mask, not pointer, to save a copyin.
655 */
656 /* ARGSUSED */
657 int
658 sys___sigsuspend14(struct lwp *l, void *v, register_t *retval)
659 {
660 struct sys___sigsuspend14_args /* {
661 syscallarg(const sigset_t *) set;
662 } */ *uap = v;
663 struct proc *p;
664 sigset_t ss;
665 int error;
666
667 if (SCARG(uap, set)) {
668 error = copyin(SCARG(uap, set), &ss, sizeof(ss));
669 if (error)
670 return (error);
671 }
672
673 p = l->l_proc;
674 return (sigsuspend1(p, SCARG(uap, set) ? &ss : 0));
675 }
676
677 int
678 sigaltstack1(struct proc *p, const struct sigaltstack *nss,
679 struct sigaltstack *oss)
680 {
681
682 if (oss)
683 *oss = p->p_sigctx.ps_sigstk;
684
685 if (nss) {
686 if (nss->ss_flags & ~SS_ALLBITS)
687 return (EINVAL);
688
689 if (nss->ss_flags & SS_DISABLE) {
690 if (p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK)
691 return (EINVAL);
692 } else {
693 if (nss->ss_size < MINSIGSTKSZ)
694 return (ENOMEM);
695 }
696 p->p_sigctx.ps_sigstk = *nss;
697 }
698
699 return (0);
700 }
701
702 /* ARGSUSED */
703 int
704 sys___sigaltstack14(struct lwp *l, void *v, register_t *retval)
705 {
706 struct sys___sigaltstack14_args /* {
707 syscallarg(const struct sigaltstack *) nss;
708 syscallarg(struct sigaltstack *) oss;
709 } */ *uap = v;
710 struct proc *p;
711 struct sigaltstack nss, oss;
712 int error;
713
714 if (SCARG(uap, nss)) {
715 error = copyin(SCARG(uap, nss), &nss, sizeof(nss));
716 if (error)
717 return (error);
718 }
719 p = l->l_proc;
720 error = sigaltstack1(p,
721 SCARG(uap, nss) ? &nss : 0, SCARG(uap, oss) ? &oss : 0);
722 if (error)
723 return (error);
724 if (SCARG(uap, oss)) {
725 error = copyout(&oss, SCARG(uap, oss), sizeof(oss));
726 if (error)
727 return (error);
728 }
729 return (0);
730 }
731
732 /* ARGSUSED */
733 int
734 sys_kill(struct lwp *l, void *v, register_t *retval)
735 {
736 struct sys_kill_args /* {
737 syscallarg(int) pid;
738 syscallarg(int) signum;
739 } */ *uap = v;
740 struct proc *cp, *p;
741 struct pcred *pc;
742 ksiginfo_t ksi;
743
744 cp = l->l_proc;
745 pc = cp->p_cred;
746 if ((u_int)SCARG(uap, signum) >= NSIG)
747 return (EINVAL);
748 memset(&ksi, 0, sizeof(ksi));
749 ksi.ksi_signo = SCARG(uap, signum);
750 ksi.ksi_code = SI_USER;
751 ksi.ksi_pid = cp->p_pid;
752 ksi.ksi_uid = cp->p_ucred->cr_uid;
753 if (SCARG(uap, pid) > 0) {
754 /* kill single process */
755 if ((p = pfind(SCARG(uap, pid))) == NULL)
756 return (ESRCH);
757 if (!CANSIGNAL(cp, pc, p, SCARG(uap, signum)))
758 return (EPERM);
759 if (SCARG(uap, signum))
760 kpsignal2(p, &ksi, 1);
761 return (0);
762 }
763 switch (SCARG(uap, pid)) {
764 case -1: /* broadcast signal */
765 return (killpg1(cp, &ksi, 0, 1));
766 case 0: /* signal own process group */
767 return (killpg1(cp, &ksi, 0, 0));
768 default: /* negative explicit process group */
769 return (killpg1(cp, &ksi, -SCARG(uap, pid), 0));
770 }
771 /* NOTREACHED */
772 }
773
774 /*
775 * Common code for kill process group/broadcast kill.
776 * cp is calling process.
777 */
778 int
779 killpg1(struct proc *cp, ksiginfo_t *ksi, int pgid, int all)
780 {
781 struct proc *p;
782 struct pcred *pc;
783 struct pgrp *pgrp;
784 int nfound;
785 int signum = ksi->ksi_signo;
786
787 pc = cp->p_cred;
788 nfound = 0;
789 if (all) {
790 /*
791 * broadcast
792 */
793 proclist_lock_read();
794 LIST_FOREACH(p, &allproc, p_list) {
795 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
796 p == cp || !CANSIGNAL(cp, pc, p, signum))
797 continue;
798 nfound++;
799 if (signum)
800 kpsignal2(p, ksi, 1);
801 }
802 proclist_unlock_read();
803 } else {
804 if (pgid == 0)
805 /*
806 * zero pgid means send to my process group.
807 */
808 pgrp = cp->p_pgrp;
809 else {
810 pgrp = pgfind(pgid);
811 if (pgrp == NULL)
812 return (ESRCH);
813 }
814 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
815 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
816 !CANSIGNAL(cp, pc, p, signum))
817 continue;
818 nfound++;
819 if (signum && P_ZOMBIE(p) == 0)
820 kpsignal2(p, ksi, 1);
821 }
822 }
823 return (nfound ? 0 : ESRCH);
824 }
825
826 /*
827 * Send a signal to a process group.
828 */
829 void
830 gsignal(int pgid, int signum)
831 {
832 ksiginfo_t ksi;
833 memset(&ksi, 0, sizeof(ksi));
834 ksi.ksi_signo = signum;
835 kgsignal(pgid, &ksi, NULL);
836 }
837
838 void
839 kgsignal(int pgid, ksiginfo_t *ksi, void *data)
840 {
841 struct pgrp *pgrp;
842
843 if (pgid && (pgrp = pgfind(pgid)))
844 kpgsignal(pgrp, ksi, data, 0);
845 }
846
847 /*
848 * Send a signal to a process group. If checktty is 1,
849 * limit to members which have a controlling terminal.
850 */
851 void
852 pgsignal(struct pgrp *pgrp, int sig, int checkctty)
853 {
854 ksiginfo_t ksi;
855 memset(&ksi, 0, sizeof(ksi));
856 ksi.ksi_signo = sig;
857 kpgsignal(pgrp, &ksi, NULL, checkctty);
858 }
859
860 void
861 kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty)
862 {
863 struct proc *p;
864
865 if (pgrp)
866 LIST_FOREACH(p, &pgrp->pg_members, p_pglist)
867 if (checkctty == 0 || p->p_flag & P_CONTROLT)
868 kpsignal(p, ksi, data);
869 }
870
871 /*
872 * Send a signal caused by a trap to the current process.
873 * If it will be caught immediately, deliver it with correct code.
874 * Otherwise, post it normally.
875 */
876 #ifndef __HAVE_SIGINFO
877 void _trapsignal(struct lwp *, const ksiginfo_t *);
878 void
879 trapsignal(struct lwp *l, int signum, u_long code)
880 {
881 #define trapsignal _trapsignal
882 ksiginfo_t ksi;
883
884 KSI_INIT_TRAP(&ksi);
885 ksi.ksi_signo = signum;
886 ksi.ksi_trap = (int)code;
887 trapsignal(l, &ksi);
888 }
889 #endif
890
891 void
892 trapsignal(struct lwp *l, const ksiginfo_t *ksi)
893 {
894 struct proc *p;
895 struct sigacts *ps;
896 int signum = ksi->ksi_signo;
897
898 KASSERT(KSI_TRAP_P(ksi));
899
900 p = l->l_proc;
901 ps = p->p_sigacts;
902 if ((p->p_flag & P_TRACED) == 0 &&
903 sigismember(&p->p_sigctx.ps_sigcatch, signum) &&
904 !sigismember(&p->p_sigctx.ps_sigmask, signum)) {
905 p->p_stats->p_ru.ru_nsignals++;
906 #ifdef KTRACE
907 if (KTRPOINT(p, KTR_PSIG))
908 ktrpsig(p, signum, SIGACTION_PS(ps, signum).sa_handler,
909 &p->p_sigctx.ps_sigmask, ksi);
910 #endif
911 kpsendsig(l, ksi, &p->p_sigctx.ps_sigmask);
912 (void) splsched(); /* XXXSMP */
913 sigplusset(&SIGACTION_PS(ps, signum).sa_mask,
914 &p->p_sigctx.ps_sigmask);
915 if (SIGACTION_PS(ps, signum).sa_flags & SA_RESETHAND) {
916 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
917 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
918 sigaddset(&p->p_sigctx.ps_sigignore, signum);
919 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
920 }
921 (void) spl0(); /* XXXSMP */
922 } else {
923 p->p_sigctx.ps_lwp = l->l_lid;
924 /* XXX for core dump/debugger */
925 p->p_sigctx.ps_signo = ksi->ksi_signo;
926 p->p_sigctx.ps_code = ksi->ksi_trap;
927 kpsignal2(p, ksi, 1);
928 }
929 }
930
931 /*
932 * Fill in signal information and signal the parent for a child status change.
933 */
934 static void
935 child_psignal(struct proc *p, int dolock)
936 {
937 ksiginfo_t ksi;
938
939 (void)memset(&ksi, 0, sizeof(ksi));
940 ksi.ksi_signo = SIGCHLD;
941 ksi.ksi_code = p->p_xstat == SIGCONT ? CLD_CONTINUED : CLD_STOPPED;
942 ksi.ksi_pid = p->p_pid;
943 ksi.ksi_uid = p->p_ucred->cr_uid;
944 ksi.ksi_status = p->p_xstat;
945 ksi.ksi_utime = p->p_stats->p_ru.ru_utime.tv_sec;
946 ksi.ksi_stime = p->p_stats->p_ru.ru_stime.tv_sec;
947 kpsignal2(p->p_pptr, &ksi, dolock);
948 }
949
950 /*
951 * Send the signal to the process. If the signal has an action, the action
952 * is usually performed by the target process rather than the caller; we add
953 * the signal to the set of pending signals for the process.
954 *
955 * Exceptions:
956 * o When a stop signal is sent to a sleeping process that takes the
957 * default action, the process is stopped without awakening it.
958 * o SIGCONT restarts stopped processes (or puts them back to sleep)
959 * regardless of the signal action (eg, blocked or ignored).
960 *
961 * Other ignored signals are discarded immediately.
962 *
963 * XXXSMP: Invoked as psignal() or sched_psignal().
964 */
965 void
966 psignal1(struct proc *p, int signum, int dolock)
967 {
968 ksiginfo_t ksi;
969
970 memset(&ksi, 0, sizeof(ksi));
971 ksi.ksi_signo = signum;
972 kpsignal2(p, &ksi, dolock);
973 }
974
975 void
976 kpsignal1(struct proc *p, ksiginfo_t *ksi, void *data, int dolock)
977 {
978
979 if ((p->p_flag & P_WEXIT) == 0 && data) {
980 size_t fd;
981 struct filedesc *fdp = p->p_fd;
982
983 ksi->ksi_fd = -1;
984 for (fd = 0; fd < fdp->fd_nfiles; fd++) {
985 struct file *fp = fdp->fd_ofiles[fd];
986 /* XXX: lock? */
987 if (fp && fp->f_data == data) {
988 ksi->ksi_fd = fd;
989 break;
990 }
991 }
992 }
993 kpsignal2(p, ksi, dolock);
994 }
995
996 static void
997 kpsignal2(struct proc *p, const ksiginfo_t *ksi, int dolock)
998 {
999 struct lwp *l, *suspended = NULL;
1000 int s = 0, prop, allsusp;
1001 sig_t action;
1002 int signum = ksi->ksi_signo;
1003
1004 #ifdef DIAGNOSTIC
1005 if (signum <= 0 || signum >= NSIG)
1006 panic("psignal signal number %d", signum);
1007
1008 /* XXXSMP: works, but icky */
1009 if (dolock)
1010 SCHED_ASSERT_UNLOCKED();
1011 else
1012 SCHED_ASSERT_LOCKED();
1013 #endif
1014
1015
1016 /*
1017 * Notify any interested parties in the signal.
1018 */
1019 KNOTE(&p->p_klist, NOTE_SIGNAL | signum);
1020
1021 prop = sigprop[signum];
1022
1023 /*
1024 * If proc is traced, always give parent a chance.
1025 */
1026 if (p->p_flag & P_TRACED)
1027 action = SIG_DFL;
1028 else {
1029 /*
1030 * If the signal is being ignored,
1031 * then we forget about it immediately.
1032 * (Note: we don't set SIGCONT in p_sigctx.ps_sigignore,
1033 * and if it is set to SIG_IGN,
1034 * action will be SIG_DFL here.)
1035 */
1036 if (sigismember(&p->p_sigctx.ps_sigignore, signum))
1037 return;
1038 if (sigismember(&p->p_sigctx.ps_sigmask, signum))
1039 action = SIG_HOLD;
1040 else if (sigismember(&p->p_sigctx.ps_sigcatch, signum))
1041 action = SIG_CATCH;
1042 else {
1043 action = SIG_DFL;
1044
1045 if (prop & SA_KILL && p->p_nice > NZERO)
1046 p->p_nice = NZERO;
1047
1048 /*
1049 * If sending a tty stop signal to a member of an
1050 * orphaned process group, discard the signal here if
1051 * the action is default; don't stop the process below
1052 * if sleeping, and don't clear any pending SIGCONT.
1053 */
1054 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0)
1055 return;
1056 }
1057 }
1058
1059 if (prop & SA_CONT)
1060 sigminusset(&stopsigmask, &p->p_sigctx.ps_siglist);
1061
1062 if (prop & SA_STOP)
1063 sigminusset(&contsigmask, &p->p_sigctx.ps_siglist);
1064
1065 /*
1066 * If the signal doesn't have SA_CANTMASK (no override for SIGKILL,
1067 * please!), check if anything waits on it. If yes, save the
1068 * info into provided ps_sigwaited, and wake-up the waiter.
1069 * The signal won't be processed further here.
1070 */
1071 if ((prop & SA_CANTMASK) == 0
1072 && p->p_sigctx.ps_sigwaited
1073 && sigismember(p->p_sigctx.ps_sigwait, signum)
1074 && p->p_stat != SSTOP) {
1075 p->p_sigctx.ps_sigwaited->ksi_info = ksi->ksi_info;
1076 p->p_sigctx.ps_sigwaited = NULL;
1077 if (dolock)
1078 wakeup_one(&p->p_sigctx.ps_sigwait);
1079 else
1080 sched_wakeup(&p->p_sigctx.ps_sigwait);
1081 return;
1082 }
1083
1084 sigaddset(&p->p_sigctx.ps_siglist, signum);
1085
1086 /* CHECKSIGS() is "inlined" here. */
1087 p->p_sigctx.ps_sigcheck = 1;
1088
1089 /*
1090 * Defer further processing for signals which are held,
1091 * except that stopped processes must be continued by SIGCONT.
1092 */
1093 if (action == SIG_HOLD &&
1094 ((prop & SA_CONT) == 0 || p->p_stat != SSTOP)) {
1095 ksiginfo_put(p, ksi);
1096 return;
1097 }
1098 /* XXXSMP: works, but icky */
1099 if (dolock)
1100 SCHED_LOCK(s);
1101
1102 /* XXXUPSXXX LWPs might go to sleep without passing signal handling */
1103 if (p->p_nrlwps > 0 && (p->p_stat != SSTOP)
1104 && !((p->p_flag & P_SA) && (p->p_sa->sa_idle != NULL))) {
1105 /*
1106 * At least one LWP is running or on a run queue.
1107 * The signal will be noticed when one of them returns
1108 * to userspace.
1109 */
1110 signotify(p);
1111 /*
1112 * The signal will be noticed very soon.
1113 */
1114 goto out;
1115 } else {
1116 /* Process is sleeping or stopped */
1117 if (p->p_flag & P_SA) {
1118 struct lwp *l2 = p->p_sa->sa_vp;
1119 l = NULL;
1120 allsusp = 1;
1121
1122 if ((l2->l_stat == LSSLEEP) && (l2->l_flag & L_SINTR))
1123 l = l2;
1124 else if (l2->l_stat == LSSUSPENDED)
1125 suspended = l2;
1126 else if ((l2->l_stat != LSZOMB) &&
1127 (l2->l_stat != LSDEAD))
1128 allsusp = 0;
1129 } else {
1130 /*
1131 * Find out if any of the sleeps are interruptable,
1132 * and if all the live LWPs remaining are suspended.
1133 */
1134 allsusp = 1;
1135 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1136 if (l->l_stat == LSSLEEP &&
1137 l->l_flag & L_SINTR)
1138 break;
1139 if (l->l_stat == LSSUSPENDED)
1140 suspended = l;
1141 else if ((l->l_stat != LSZOMB) &&
1142 (l->l_stat != LSDEAD))
1143 allsusp = 0;
1144 }
1145 }
1146 if (p->p_stat == SACTIVE) {
1147
1148
1149 if (l != NULL && (p->p_flag & P_TRACED))
1150 goto run;
1151
1152 /*
1153 * If SIGCONT is default (or ignored) and process is
1154 * asleep, we are finished; the process should not
1155 * be awakened.
1156 */
1157 if ((prop & SA_CONT) && action == SIG_DFL) {
1158 sigdelset(&p->p_sigctx.ps_siglist, signum);
1159 goto done;
1160 }
1161
1162 /*
1163 * When a sleeping process receives a stop
1164 * signal, process immediately if possible.
1165 */
1166 if ((prop & SA_STOP) && action == SIG_DFL) {
1167 /*
1168 * If a child holding parent blocked,
1169 * stopping could cause deadlock.
1170 */
1171 if (p->p_flag & P_PPWAIT) {
1172 goto out;
1173 }
1174 sigdelset(&p->p_sigctx.ps_siglist, signum);
1175 p->p_xstat = signum;
1176 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) {
1177 /*
1178 * XXXSMP: recursive call; don't lock
1179 * the second time around.
1180 */
1181 child_psignal(p, 0);
1182 }
1183 proc_stop(p); /* XXXSMP: recurse? */
1184 goto done;
1185 }
1186
1187 if (l == NULL) {
1188 /*
1189 * Special case: SIGKILL of a process
1190 * which is entirely composed of
1191 * suspended LWPs should succeed. We
1192 * make this happen by unsuspending one of
1193 * them.
1194 */
1195 if (allsusp && (signum == SIGKILL))
1196 lwp_continue(suspended);
1197 goto done;
1198 }
1199 /*
1200 * All other (caught or default) signals
1201 * cause the process to run.
1202 */
1203 goto runfast;
1204 /*NOTREACHED*/
1205 } else if (p->p_stat == SSTOP) {
1206 /* Process is stopped */
1207 /*
1208 * If traced process is already stopped,
1209 * then no further action is necessary.
1210 */
1211 if (p->p_flag & P_TRACED)
1212 goto done;
1213
1214 /*
1215 * Kill signal always sets processes running,
1216 * if possible.
1217 */
1218 if (signum == SIGKILL) {
1219 l = proc_unstop(p);
1220 if (l)
1221 goto runfast;
1222 goto done;
1223 }
1224
1225 if (prop & SA_CONT) {
1226 /*
1227 * If SIGCONT is default (or ignored),
1228 * we continue the process but don't
1229 * leave the signal in ps_siglist, as
1230 * it has no further action. If
1231 * SIGCONT is held, we continue the
1232 * process and leave the signal in
1233 * ps_siglist. If the process catches
1234 * SIGCONT, let it handle the signal
1235 * itself. If it isn't waiting on an
1236 * event, then it goes back to run
1237 * state. Otherwise, process goes
1238 * back to sleep state.
1239 */
1240 if (action == SIG_DFL)
1241 sigdelset(&p->p_sigctx.ps_siglist,
1242 signum);
1243 l = proc_unstop(p);
1244 if (l && (action == SIG_CATCH))
1245 goto runfast;
1246 goto out;
1247 }
1248
1249 if (prop & SA_STOP) {
1250 /*
1251 * Already stopped, don't need to stop again.
1252 * (If we did the shell could get confused.)
1253 */
1254 sigdelset(&p->p_sigctx.ps_siglist, signum);
1255 goto done;
1256 }
1257
1258 /*
1259 * If a lwp is sleeping interruptibly, then
1260 * wake it up; it will run until the kernel
1261 * boundary, where it will stop in issignal(),
1262 * since p->p_stat is still SSTOP. When the
1263 * process is continued, it will be made
1264 * runnable and can look at the signal.
1265 */
1266 if (l)
1267 goto run;
1268 goto out;
1269 } else {
1270 /* Else what? */
1271 panic("psignal: Invalid process state %d.",
1272 p->p_stat);
1273 }
1274 }
1275 /*NOTREACHED*/
1276
1277 runfast:
1278 if (action == SIG_CATCH) {
1279 ksiginfo_put(p, ksi);
1280 action = SIG_HOLD;
1281 }
1282 /*
1283 * Raise priority to at least PUSER.
1284 */
1285 if (l->l_priority > PUSER)
1286 l->l_priority = PUSER;
1287 run:
1288 if (action == SIG_CATCH) {
1289 ksiginfo_put(p, ksi);
1290 action = SIG_HOLD;
1291 }
1292
1293 setrunnable(l); /* XXXSMP: recurse? */
1294 out:
1295 if (action == SIG_CATCH)
1296 ksiginfo_put(p, ksi);
1297 done:
1298 /* XXXSMP: works, but icky */
1299 if (dolock)
1300 SCHED_UNLOCK(s);
1301 }
1302
1303 void
1304 kpsendsig(struct lwp *l, const ksiginfo_t *ksi, const sigset_t *mask)
1305 {
1306 struct proc *p = l->l_proc;
1307 struct lwp *le, *li;
1308 siginfo_t *si;
1309 int f;
1310
1311 if (p->p_flag & P_SA) {
1312
1313 /* XXXUPSXXX What if not on sa_vp ? */
1314
1315 f = l->l_flag & L_SA;
1316 l->l_flag &= ~L_SA;
1317 si = pool_get(&siginfo_pool, PR_WAITOK);
1318 si->_info = ksi->ksi_info;
1319 le = li = NULL;
1320 if (KSI_TRAP_P(ksi))
1321 le = l;
1322 else
1323 li = l;
1324
1325 sa_upcall(l, SA_UPCALL_SIGNAL | SA_UPCALL_DEFER, le, li,
1326 sizeof(siginfo_t), si);
1327 l->l_flag |= f;
1328 return;
1329 }
1330
1331 #ifdef __HAVE_SIGINFO
1332 (*p->p_emul->e_sendsig)(ksi, mask);
1333 #else
1334 (*p->p_emul->e_sendsig)(ksi->ksi_signo, mask, KSI_TRAPCODE(ksi));
1335 #endif
1336 }
1337
1338 static __inline int firstsig(const sigset_t *);
1339
1340 static __inline int
1341 firstsig(const sigset_t *ss)
1342 {
1343 int sig;
1344
1345 sig = ffs(ss->__bits[0]);
1346 if (sig != 0)
1347 return (sig);
1348 #if NSIG > 33
1349 sig = ffs(ss->__bits[1]);
1350 if (sig != 0)
1351 return (sig + 32);
1352 #endif
1353 #if NSIG > 65
1354 sig = ffs(ss->__bits[2]);
1355 if (sig != 0)
1356 return (sig + 64);
1357 #endif
1358 #if NSIG > 97
1359 sig = ffs(ss->__bits[3]);
1360 if (sig != 0)
1361 return (sig + 96);
1362 #endif
1363 return (0);
1364 }
1365
1366 /*
1367 * If the current process has received a signal (should be caught or cause
1368 * termination, should interrupt current syscall), return the signal number.
1369 * Stop signals with default action are processed immediately, then cleared;
1370 * they aren't returned. This is checked after each entry to the system for
1371 * a syscall or trap (though this can usually be done without calling issignal
1372 * by checking the pending signal masks in the CURSIG macro.) The normal call
1373 * sequence is
1374 *
1375 * while (signum = CURSIG(curlwp))
1376 * postsig(signum);
1377 */
1378 int
1379 issignal(struct lwp *l)
1380 {
1381 struct proc *p = l->l_proc;
1382 int s = 0, signum, prop;
1383 int dolock = (l->l_flag & L_SINTR) == 0, locked = !dolock;
1384 sigset_t ss;
1385
1386 if (l->l_flag & L_SA) {
1387 struct sadata *sa = p->p_sa;
1388
1389 /* Bail out if we do not own the virtual processor */
1390 if (sa->sa_vp != l)
1391 return 0;
1392 }
1393
1394 if (p->p_stat == SSTOP) {
1395 /*
1396 * The process is stopped/stopping. Stop ourselves now that
1397 * we're on the kernel/userspace boundary.
1398 */
1399 if (dolock)
1400 SCHED_LOCK(s);
1401 l->l_stat = LSSTOP;
1402 p->p_nrlwps--;
1403 if (p->p_flag & P_TRACED)
1404 goto sigtraceswitch;
1405 else
1406 goto sigswitch;
1407 }
1408 for (;;) {
1409 sigpending1(p, &ss);
1410 if (p->p_flag & P_PPWAIT)
1411 sigminusset(&stopsigmask, &ss);
1412 signum = firstsig(&ss);
1413 if (signum == 0) { /* no signal to send */
1414 p->p_sigctx.ps_sigcheck = 0;
1415 if (locked && dolock)
1416 SCHED_LOCK(s);
1417 return (0);
1418 }
1419 /* take the signal! */
1420 sigdelset(&p->p_sigctx.ps_siglist, signum);
1421
1422 /*
1423 * We should see pending but ignored signals
1424 * only if P_TRACED was on when they were posted.
1425 */
1426 if (sigismember(&p->p_sigctx.ps_sigignore, signum) &&
1427 (p->p_flag & P_TRACED) == 0)
1428 continue;
1429
1430 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
1431 /*
1432 * If traced, always stop, and stay
1433 * stopped until released by the debugger.
1434 */
1435 p->p_xstat = signum;
1436 if ((p->p_flag & P_FSTRACE) == 0)
1437 child_psignal(p, dolock);
1438 if (dolock)
1439 SCHED_LOCK(s);
1440 proc_stop(p);
1441 sigtraceswitch:
1442 mi_switch(l, NULL);
1443 SCHED_ASSERT_UNLOCKED();
1444 if (dolock)
1445 splx(s);
1446 else
1447 dolock = 1;
1448
1449 /*
1450 * If we are no longer being traced, or the parent
1451 * didn't give us a signal, look for more signals.
1452 */
1453 if ((p->p_flag & P_TRACED) == 0 || p->p_xstat == 0)
1454 continue;
1455
1456 /*
1457 * If the new signal is being masked, look for other
1458 * signals.
1459 */
1460 signum = p->p_xstat;
1461 p->p_xstat = 0;
1462 /*
1463 * `p->p_sigctx.ps_siglist |= mask' is done
1464 * in setrunnable().
1465 */
1466 if (sigismember(&p->p_sigctx.ps_sigmask, signum))
1467 continue;
1468 /* take the signal! */
1469 sigdelset(&p->p_sigctx.ps_siglist, signum);
1470 }
1471
1472 prop = sigprop[signum];
1473
1474 /*
1475 * Decide whether the signal should be returned.
1476 * Return the signal's number, or fall through
1477 * to clear it from the pending mask.
1478 */
1479 switch ((long)SIGACTION(p, signum).sa_handler) {
1480
1481 case (long)SIG_DFL:
1482 /*
1483 * Don't take default actions on system processes.
1484 */
1485 if (p->p_pid <= 1) {
1486 #ifdef DIAGNOSTIC
1487 /*
1488 * Are you sure you want to ignore SIGSEGV
1489 * in init? XXX
1490 */
1491 printf("Process (pid %d) got signal %d\n",
1492 p->p_pid, signum);
1493 #endif
1494 break; /* == ignore */
1495 }
1496 /*
1497 * If there is a pending stop signal to process
1498 * with default action, stop here,
1499 * then clear the signal. However,
1500 * if process is member of an orphaned
1501 * process group, ignore tty stop signals.
1502 */
1503 if (prop & SA_STOP) {
1504 if (p->p_flag & P_TRACED ||
1505 (p->p_pgrp->pg_jobc == 0 &&
1506 prop & SA_TTYSTOP))
1507 break; /* == ignore */
1508 p->p_xstat = signum;
1509 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0)
1510 child_psignal(p, dolock);
1511 if (dolock)
1512 SCHED_LOCK(s);
1513 proc_stop(p);
1514 sigswitch:
1515 mi_switch(l, NULL);
1516 SCHED_ASSERT_UNLOCKED();
1517 if (dolock)
1518 splx(s);
1519 else
1520 dolock = 1;
1521 break;
1522 } else if (prop & SA_IGNORE) {
1523 /*
1524 * Except for SIGCONT, shouldn't get here.
1525 * Default action is to ignore; drop it.
1526 */
1527 break; /* == ignore */
1528 } else
1529 goto keep;
1530 /*NOTREACHED*/
1531
1532 case (long)SIG_IGN:
1533 /*
1534 * Masking above should prevent us ever trying
1535 * to take action on an ignored signal other
1536 * than SIGCONT, unless process is traced.
1537 */
1538 #ifdef DEBUG_ISSIGNAL
1539 if ((prop & SA_CONT) == 0 &&
1540 (p->p_flag & P_TRACED) == 0)
1541 printf("issignal\n");
1542 #endif
1543 break; /* == ignore */
1544
1545 default:
1546 /*
1547 * This signal has an action, let
1548 * postsig() process it.
1549 */
1550 goto keep;
1551 }
1552 }
1553 /* NOTREACHED */
1554
1555 keep:
1556 /* leave the signal for later */
1557 sigaddset(&p->p_sigctx.ps_siglist, signum);
1558 CHECKSIGS(p);
1559 if (locked && dolock)
1560 SCHED_LOCK(s);
1561 return (signum);
1562 }
1563
1564 /*
1565 * Put the argument process into the stopped state and notify the parent
1566 * via wakeup. Signals are handled elsewhere. The process must not be
1567 * on the run queue.
1568 */
1569 static void
1570 proc_stop(struct proc *p)
1571 {
1572 struct lwp *l;
1573
1574 SCHED_ASSERT_LOCKED();
1575
1576 /* XXX lock process LWP state */
1577 p->p_stat = SSTOP;
1578 p->p_flag &= ~P_WAITED;
1579
1580 /*
1581 * Put as many LWP's as possible in stopped state.
1582 * Sleeping ones will notice the stopped state as they try to
1583 * return to userspace.
1584 */
1585
1586 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1587 if ((l->l_stat == LSONPROC) && (l == curlwp)) {
1588 /* XXX SMP this assumes that a LWP that is LSONPROC
1589 * is curlwp and hence is about to be mi_switched
1590 * away; the only callers of proc_stop() are:
1591 * - psignal
1592 * - issignal()
1593 * For the former, proc_stop() is only called when
1594 * no processes are running, so we don't worry.
1595 * For the latter, proc_stop() is called right
1596 * before mi_switch().
1597 */
1598 l->l_stat = LSSTOP;
1599 p->p_nrlwps--;
1600 }
1601 else if ( (l->l_stat == LSSLEEP) && (l->l_flag & L_SINTR)) {
1602 setrunnable(l);
1603 }
1604
1605 /* !!!UPS!!! FIX ME */
1606 #if 0
1607 else if (l->l_stat == LSRUN) {
1608 /* Remove LWP from the run queue */
1609 remrunqueue(l);
1610 l->l_stat = LSSTOP;
1611 p->p_nrlwps--;
1612 } else if ((l->l_stat == LSSLEEP) ||
1613 (l->l_stat == LSSUSPENDED) ||
1614 (l->l_stat == LSZOMB) ||
1615 (l->l_stat == LSDEAD)) {
1616 /*
1617 * Don't do anything; let sleeping LWPs
1618 * discover the stopped state of the process
1619 * on their way out of the kernel; otherwise,
1620 * things like NFS threads that sleep with
1621 * locks will block the rest of the system
1622 * from getting any work done.
1623 *
1624 * Suspended/dead/zombie LWPs aren't going
1625 * anywhere, so we don't need to touch them.
1626 */
1627 }
1628 #ifdef DIAGNOSTIC
1629 else {
1630 panic("proc_stop: process %d lwp %d "
1631 "in unstoppable state %d.\n",
1632 p->p_pid, l->l_lid, l->l_stat);
1633 }
1634 #endif
1635 #endif
1636 }
1637 /* XXX unlock process LWP state */
1638
1639 sched_wakeup((caddr_t)p->p_pptr);
1640 }
1641
1642 /*
1643 * Given a process in state SSTOP, set the state back to SACTIVE and
1644 * move LSSTOP'd LWPs to LSSLEEP or make them runnable.
1645 *
1646 * If no LWPs ended up runnable (and therefore able to take a signal),
1647 * return a LWP that is sleeping interruptably. The caller can wake
1648 * that LWP up to take a signal.
1649 */
1650 struct lwp *
1651 proc_unstop(struct proc *p)
1652 {
1653 struct lwp *l, *lr = NULL;
1654 int cantake = 0;
1655
1656 SCHED_ASSERT_LOCKED();
1657
1658 /*
1659 * Our caller wants to be informed if there are only sleeping
1660 * and interruptable LWPs left after we have run so that it
1661 * can invoke setrunnable() if required - return one of the
1662 * interruptable LWPs if this is the case.
1663 */
1664
1665 p->p_stat = SACTIVE;
1666 if (p->p_flag & P_SA) {
1667 /*
1668 * Preferentially select the idle LWP as the interruptable
1669 * LWP to return if it exists.
1670 */
1671 lr = p->p_sa->sa_idle;
1672 if (lr != NULL)
1673 cantake = 1;
1674 }
1675 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1676 if (l->l_stat == LSRUN) {
1677 lr = NULL;
1678 cantake = 1;
1679 }
1680 if (l->l_stat != LSSTOP)
1681 continue;
1682
1683 if (l->l_wchan != NULL) {
1684 l->l_stat = LSSLEEP;
1685 if ((cantake == 0) && (l->l_flag & L_SINTR)) {
1686 lr = l;
1687 cantake = 1;
1688 }
1689 } else {
1690 setrunnable(l);
1691 lr = NULL;
1692 cantake = 1;
1693 }
1694 }
1695
1696 return lr;
1697 }
1698
1699 /*
1700 * Take the action for the specified signal
1701 * from the current set of pending signals.
1702 */
1703 void
1704 postsig(int signum)
1705 {
1706 struct lwp *l;
1707 struct proc *p;
1708 struct sigacts *ps;
1709 sig_t action;
1710 sigset_t *returnmask;
1711
1712 l = curlwp;
1713 p = l->l_proc;
1714 ps = p->p_sigacts;
1715 #ifdef DIAGNOSTIC
1716 if (signum == 0)
1717 panic("postsig");
1718 #endif
1719
1720 KERNEL_PROC_LOCK(l);
1721
1722 sigdelset(&p->p_sigctx.ps_siglist, signum);
1723 action = SIGACTION_PS(ps, signum).sa_handler;
1724 if (action == SIG_DFL) {
1725 #ifdef KTRACE
1726 if (KTRPOINT(p, KTR_PSIG))
1727 ktrpsig(p, signum, action,
1728 p->p_sigctx.ps_flags & SAS_OLDMASK ?
1729 &p->p_sigctx.ps_oldmask : &p->p_sigctx.ps_sigmask,
1730 NULL);
1731 #endif
1732 /*
1733 * Default action, where the default is to kill
1734 * the process. (Other cases were ignored above.)
1735 */
1736 sigexit(l, signum);
1737 /* NOTREACHED */
1738 } else {
1739 ksiginfo_t *ksi;
1740 /*
1741 * If we get here, the signal must be caught.
1742 */
1743 #ifdef DIAGNOSTIC
1744 if (action == SIG_IGN ||
1745 sigismember(&p->p_sigctx.ps_sigmask, signum))
1746 panic("postsig action");
1747 #endif
1748 /*
1749 * Set the new mask value and also defer further
1750 * occurrences of this signal.
1751 *
1752 * Special case: user has done a sigpause. Here the
1753 * current mask is not of interest, but rather the
1754 * mask from before the sigpause is what we want
1755 * restored after the signal processing is completed.
1756 */
1757 if (p->p_sigctx.ps_flags & SAS_OLDMASK) {
1758 returnmask = &p->p_sigctx.ps_oldmask;
1759 p->p_sigctx.ps_flags &= ~SAS_OLDMASK;
1760 } else
1761 returnmask = &p->p_sigctx.ps_sigmask;
1762 p->p_stats->p_ru.ru_nsignals++;
1763 ksi = ksiginfo_get(p, signum);
1764 #ifdef KTRACE
1765 if (KTRPOINT(p, KTR_PSIG))
1766 ktrpsig(p, signum, action,
1767 p->p_sigctx.ps_flags & SAS_OLDMASK ?
1768 &p->p_sigctx.ps_oldmask : &p->p_sigctx.ps_sigmask,
1769 ksi);
1770 #endif
1771 if (ksi == NULL) {
1772 ksiginfo_t ksi1;
1773 /*
1774 * we did not save any siginfo for this, either
1775 * because the signal was not caught, or because the
1776 * user did not request SA_SIGINFO
1777 */
1778 (void)memset(&ksi1, 0, sizeof(ksi1));
1779 ksi1.ksi_signo = signum;
1780 kpsendsig(l, &ksi1, returnmask);
1781 } else {
1782 kpsendsig(l, ksi, returnmask);
1783 pool_put(&ksiginfo_pool, ksi);
1784 }
1785 p->p_sigctx.ps_lwp = 0;
1786 p->p_sigctx.ps_code = 0;
1787 p->p_sigctx.ps_signo = 0;
1788 (void) splsched(); /* XXXSMP */
1789 sigplusset(&SIGACTION_PS(ps, signum).sa_mask,
1790 &p->p_sigctx.ps_sigmask);
1791 if (SIGACTION_PS(ps, signum).sa_flags & SA_RESETHAND) {
1792 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
1793 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
1794 sigaddset(&p->p_sigctx.ps_sigignore, signum);
1795 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
1796 }
1797 (void) spl0(); /* XXXSMP */
1798 }
1799
1800 KERNEL_PROC_UNLOCK(l);
1801 }
1802
1803 /*
1804 * Kill the current process for stated reason.
1805 */
1806 void
1807 killproc(struct proc *p, const char *why)
1808 {
1809 log(LOG_ERR, "pid %d was killed: %s\n", p->p_pid, why);
1810 uprintf("sorry, pid %d was killed: %s\n", p->p_pid, why);
1811 psignal(p, SIGKILL);
1812 }
1813
1814 /*
1815 * Force the current process to exit with the specified signal, dumping core
1816 * if appropriate. We bypass the normal tests for masked and caught signals,
1817 * allowing unrecoverable failures to terminate the process without changing
1818 * signal state. Mark the accounting record with the signal termination.
1819 * If dumping core, save the signal number for the debugger. Calls exit and
1820 * does not return.
1821 */
1822
1823 #if defined(DEBUG)
1824 int kern_logsigexit = 1; /* not static to make public for sysctl */
1825 #else
1826 int kern_logsigexit = 0; /* not static to make public for sysctl */
1827 #endif
1828
1829 static const char logcoredump[] =
1830 "pid %d (%s), uid %d: exited on signal %d (core dumped)\n";
1831 static const char lognocoredump[] =
1832 "pid %d (%s), uid %d: exited on signal %d (core not dumped, err = %d)\n";
1833
1834 /* Wrapper function for use in p_userret */
1835 static void
1836 lwp_coredump_hook(struct lwp *l, void *arg)
1837 {
1838 int s;
1839
1840 /*
1841 * Suspend ourselves, so that the kernel stack and therefore
1842 * the userland registers saved in the trapframe are around
1843 * for coredump() to write them out.
1844 */
1845 KERNEL_PROC_LOCK(l);
1846 l->l_flag &= ~L_DETACHED;
1847 SCHED_LOCK(s);
1848 l->l_stat = LSSUSPENDED;
1849 l->l_proc->p_nrlwps--;
1850 /* XXX NJWLWP check if this makes sense here: */
1851 l->l_proc->p_stats->p_ru.ru_nvcsw++;
1852 mi_switch(l, NULL);
1853 SCHED_ASSERT_UNLOCKED();
1854 splx(s);
1855
1856 lwp_exit(l);
1857 }
1858
1859 void
1860 sigexit(struct lwp *l, int signum)
1861 {
1862 struct proc *p;
1863 #if 0
1864 struct lwp *l2;
1865 #endif
1866 int error, exitsig;
1867
1868 p = l->l_proc;
1869
1870 /*
1871 * Don't permit coredump() or exit1() multiple times
1872 * in the same process.
1873 */
1874 if (p->p_flag & P_WEXIT) {
1875 KERNEL_PROC_UNLOCK(l);
1876 (*p->p_userret)(l, p->p_userret_arg);
1877 }
1878 p->p_flag |= P_WEXIT;
1879 /* We don't want to switch away from exiting. */
1880 /* XXX multiprocessor: stop LWPs on other processors. */
1881 #if 0
1882 if (p->p_flag & P_SA) {
1883 LIST_FOREACH(l2, &p->p_lwps, l_sibling)
1884 l2->l_flag &= ~L_SA;
1885 p->p_flag &= ~P_SA;
1886 }
1887 #endif
1888
1889 /* Make other LWPs stick around long enough to be dumped */
1890 p->p_userret = lwp_coredump_hook;
1891 p->p_userret_arg = NULL;
1892
1893 exitsig = signum;
1894 p->p_acflag |= AXSIG;
1895 if (sigprop[signum] & SA_CORE) {
1896 p->p_sigctx.ps_signo = signum;
1897 if ((error = coredump(l)) == 0)
1898 exitsig |= WCOREFLAG;
1899
1900 if (kern_logsigexit) {
1901 /* XXX What if we ever have really large UIDs? */
1902 int uid = p->p_cred && p->p_ucred ?
1903 (int) p->p_ucred->cr_uid : -1;
1904
1905 if (error)
1906 log(LOG_INFO, lognocoredump, p->p_pid,
1907 p->p_comm, uid, signum, error);
1908 else
1909 log(LOG_INFO, logcoredump, p->p_pid,
1910 p->p_comm, uid, signum);
1911 }
1912
1913 }
1914
1915 exit1(l, W_EXITCODE(0, exitsig));
1916 /* NOTREACHED */
1917 }
1918
1919 /*
1920 * Dump core, into a file named "progname.core" or "core" (depending on the
1921 * value of shortcorename), unless the process was setuid/setgid.
1922 */
1923 int
1924 coredump(struct lwp *l)
1925 {
1926 struct vnode *vp;
1927 struct proc *p;
1928 struct vmspace *vm;
1929 struct ucred *cred;
1930 struct nameidata nd;
1931 struct vattr vattr;
1932 struct mount *mp;
1933 int error, error1;
1934 char name[MAXPATHLEN];
1935
1936 p = l->l_proc;
1937 vm = p->p_vmspace;
1938 cred = p->p_cred->pc_ucred;
1939
1940 /*
1941 * Make sure the process has not set-id, to prevent data leaks.
1942 */
1943 if (p->p_flag & P_SUGID)
1944 return (EPERM);
1945
1946 /*
1947 * Refuse to core if the data + stack + user size is larger than
1948 * the core dump limit. XXX THIS IS WRONG, because of mapped
1949 * data.
1950 */
1951 if (USPACE + ctob(vm->vm_dsize + vm->vm_ssize) >=
1952 p->p_rlimit[RLIMIT_CORE].rlim_cur)
1953 return (EFBIG); /* better error code? */
1954
1955 restart:
1956 /*
1957 * The core dump will go in the current working directory. Make
1958 * sure that the directory is still there and that the mount flags
1959 * allow us to write core dumps there.
1960 */
1961 vp = p->p_cwdi->cwdi_cdir;
1962 if (vp->v_mount == NULL ||
1963 (vp->v_mount->mnt_flag & MNT_NOCOREDUMP) != 0)
1964 return (EPERM);
1965
1966 error = build_corename(p, name);
1967 if (error)
1968 return error;
1969
1970 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, p);
1971 error = vn_open(&nd, O_CREAT | O_NOFOLLOW | FWRITE, S_IRUSR | S_IWUSR);
1972 if (error)
1973 return (error);
1974 vp = nd.ni_vp;
1975
1976 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
1977 VOP_UNLOCK(vp, 0);
1978 if ((error = vn_close(vp, FWRITE, cred, p)) != 0)
1979 return (error);
1980 if ((error = vn_start_write(NULL, &mp,
1981 V_WAIT | V_SLEEPONLY | V_PCATCH)) != 0)
1982 return (error);
1983 goto restart;
1984 }
1985
1986 /* Don't dump to non-regular files or files with links. */
1987 if (vp->v_type != VREG ||
1988 VOP_GETATTR(vp, &vattr, cred, p) || vattr.va_nlink != 1) {
1989 error = EINVAL;
1990 goto out;
1991 }
1992 VATTR_NULL(&vattr);
1993 vattr.va_size = 0;
1994 VOP_LEASE(vp, p, cred, LEASE_WRITE);
1995 VOP_SETATTR(vp, &vattr, cred, p);
1996 p->p_acflag |= ACORE;
1997
1998 /* Now dump the actual core file. */
1999 error = (*p->p_execsw->es_coredump)(l, vp, cred);
2000 out:
2001 VOP_UNLOCK(vp, 0);
2002 vn_finished_write(mp, 0);
2003 error1 = vn_close(vp, FWRITE, cred, p);
2004 if (error == 0)
2005 error = error1;
2006 return (error);
2007 }
2008
2009 /*
2010 * Nonexistent system call-- signal process (may want to handle it).
2011 * Flag error in case process won't see signal immediately (blocked or ignored).
2012 */
2013 /* ARGSUSED */
2014 int
2015 sys_nosys(struct lwp *l, void *v, register_t *retval)
2016 {
2017 struct proc *p;
2018
2019 p = l->l_proc;
2020 psignal(p, SIGSYS);
2021 return (ENOSYS);
2022 }
2023
2024 static int
2025 build_corename(struct proc *p, char dst[MAXPATHLEN])
2026 {
2027 const char *s;
2028 char *d, *end;
2029 int i;
2030
2031 for (s = p->p_limit->pl_corename, d = dst, end = d + MAXPATHLEN;
2032 *s != '\0'; s++) {
2033 if (*s == '%') {
2034 switch (*(s + 1)) {
2035 case 'n':
2036 i = snprintf(d, end - d, "%s", p->p_comm);
2037 break;
2038 case 'p':
2039 i = snprintf(d, end - d, "%d", p->p_pid);
2040 break;
2041 case 'u':
2042 i = snprintf(d, end - d, "%.*s",
2043 (int)sizeof p->p_pgrp->pg_session->s_login,
2044 p->p_pgrp->pg_session->s_login);
2045 break;
2046 case 't':
2047 i = snprintf(d, end - d, "%ld",
2048 p->p_stats->p_start.tv_sec);
2049 break;
2050 default:
2051 goto copy;
2052 }
2053 d += i;
2054 s++;
2055 } else {
2056 copy: *d = *s;
2057 d++;
2058 }
2059 if (d >= end)
2060 return (ENAMETOOLONG);
2061 }
2062 *d = '\0';
2063 return 0;
2064 }
2065
2066 void
2067 getucontext(struct lwp *l, ucontext_t *ucp)
2068 {
2069 struct proc *p;
2070
2071 p = l->l_proc;
2072
2073 ucp->uc_flags = 0;
2074 ucp->uc_link = l->l_ctxlink;
2075
2076 (void)sigprocmask1(p, 0, NULL, &ucp->uc_sigmask);
2077 ucp->uc_flags |= _UC_SIGMASK;
2078
2079 /*
2080 * The (unsupplied) definition of the `current execution stack'
2081 * in the System V Interface Definition appears to allow returning
2082 * the main context stack.
2083 */
2084 if ((p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK) == 0) {
2085 ucp->uc_stack.ss_sp = (void *)USRSTACK;
2086 ucp->uc_stack.ss_size = ctob(p->p_vmspace->vm_ssize);
2087 ucp->uc_stack.ss_flags = 0; /* XXX, def. is Very Fishy */
2088 } else {
2089 /* Simply copy alternate signal execution stack. */
2090 ucp->uc_stack = p->p_sigctx.ps_sigstk;
2091 }
2092 ucp->uc_flags |= _UC_STACK;
2093
2094 cpu_getmcontext(l, &ucp->uc_mcontext, &ucp->uc_flags);
2095 }
2096
2097 /* ARGSUSED */
2098 int
2099 sys_getcontext(struct lwp *l, void *v, register_t *retval)
2100 {
2101 struct sys_getcontext_args /* {
2102 syscallarg(struct __ucontext *) ucp;
2103 } */ *uap = v;
2104 ucontext_t uc;
2105
2106 getucontext(l, &uc);
2107
2108 return (copyout(&uc, SCARG(uap, ucp), sizeof (*SCARG(uap, ucp))));
2109 }
2110
2111 int
2112 setucontext(struct lwp *l, const ucontext_t *ucp)
2113 {
2114 struct proc *p;
2115 int error;
2116
2117 p = l->l_proc;
2118 if ((error = cpu_setmcontext(l, &ucp->uc_mcontext, ucp->uc_flags)) != 0)
2119 return (error);
2120 l->l_ctxlink = ucp->uc_link;
2121 /*
2122 * We might want to take care of the stack portion here but currently
2123 * don't; see the comment in getucontext().
2124 */
2125 if ((ucp->uc_flags & _UC_SIGMASK) != 0)
2126 sigprocmask1(p, SIG_SETMASK, &ucp->uc_sigmask, NULL);
2127
2128 return 0;
2129 }
2130
2131 /* ARGSUSED */
2132 int
2133 sys_setcontext(struct lwp *l, void *v, register_t *retval)
2134 {
2135 struct sys_setcontext_args /* {
2136 syscallarg(const ucontext_t *) ucp;
2137 } */ *uap = v;
2138 ucontext_t uc;
2139 int error;
2140
2141 if (SCARG(uap, ucp) == NULL) /* i.e. end of uc_link chain */
2142 exit1(l, W_EXITCODE(0, 0));
2143 else if ((error = copyin(SCARG(uap, ucp), &uc, sizeof (uc))) != 0 ||
2144 (error = setucontext(l, &uc)) != 0)
2145 return (error);
2146
2147 return (EJUSTRETURN);
2148 }
2149
2150 /*
2151 * sigtimedwait(2) system call, used also for implementation
2152 * of sigwaitinfo() and sigwait().
2153 *
2154 * This only handles single LWP in signal wait. libpthread provides
2155 * it's own sigtimedwait() wrapper to DTRT WRT individual threads.
2156 *
2157 * XXX no support for queued signals, si_code is always SI_USER.
2158 */
2159 int
2160 sys___sigtimedwait(struct lwp *l, void *v, register_t *retval)
2161 {
2162 struct sys___sigtimedwait_args /* {
2163 syscallarg(const sigset_t *) set;
2164 syscallarg(siginfo_t *) info;
2165 syscallarg(struct timespec *) timeout;
2166 } */ *uap = v;
2167 sigset_t waitset, twaitset;
2168 struct proc *p = l->l_proc;
2169 int error, signum, s;
2170 int timo = 0;
2171 struct timeval tvstart;
2172 struct timespec ts;
2173 ksiginfo_t *ksi;
2174
2175 if ((error = copyin(SCARG(uap, set), &waitset, sizeof(waitset))))
2176 return (error);
2177
2178 /*
2179 * Silently ignore SA_CANTMASK signals. psignal1() would
2180 * ignore SA_CANTMASK signals in waitset, we do this
2181 * only for the below siglist check.
2182 */
2183 sigminusset(&sigcantmask, &waitset);
2184
2185 /*
2186 * First scan siglist and check if there is signal from
2187 * our waitset already pending.
2188 */
2189 twaitset = waitset;
2190 __sigandset(&p->p_sigctx.ps_siglist, &twaitset);
2191 if ((signum = firstsig(&twaitset))) {
2192 /* found pending signal */
2193 sigdelset(&p->p_sigctx.ps_siglist, signum);
2194 ksi = ksiginfo_get(p, signum);
2195 if (!ksi) {
2196 /* No queued siginfo, manufacture one */
2197 ksi = pool_get(&ksiginfo_pool, PR_WAITOK);
2198 KSI_INIT(ksi);
2199 ksi->ksi_info._signo = signum;
2200 ksi->ksi_info._code = SI_USER;
2201 }
2202
2203 goto sig;
2204 }
2205
2206 /*
2207 * Calculate timeout, if it was specified.
2208 */
2209 if (SCARG(uap, timeout)) {
2210 uint64_t ms;
2211
2212 if ((error = copyin(SCARG(uap, timeout), &ts, sizeof(ts))))
2213 return (error);
2214
2215 ms = (ts.tv_sec * 1000) + (ts.tv_nsec / 1000000);
2216 timo = mstohz(ms);
2217 if (timo == 0 && ts.tv_sec == 0 && ts.tv_nsec > 0)
2218 timo = 1;
2219 if (timo <= 0)
2220 return (EAGAIN);
2221
2222 /*
2223 * Remember current mono_time, it would be used in
2224 * ECANCELED/ERESTART case.
2225 */
2226 s = splclock();
2227 tvstart = mono_time;
2228 splx(s);
2229 }
2230
2231 /*
2232 * Setup ps_sigwait list.
2233 */
2234 ksi = pool_get(&ksiginfo_pool, PR_WAITOK);
2235 p->p_sigctx.ps_sigwaited = ksi;
2236 p->p_sigctx.ps_sigwait = &waitset;
2237
2238 /*
2239 * Wait for signal to arrive. We can either be woken up or
2240 * time out.
2241 */
2242 error = tsleep(&p->p_sigctx.ps_sigwait, PPAUSE|PCATCH, "sigwait", timo);
2243
2244 /*
2245 * Need to find out if we woke as a result of lwp_wakeup()
2246 * or a signal outside our wait set.
2247 */
2248 if (error == EINTR && p->p_sigctx.ps_sigwaited
2249 && !firstsig(&p->p_sigctx.ps_siglist)) {
2250 /* wakeup via _lwp_wakeup() */
2251 error = ECANCELED;
2252 } else if (!error && p->p_sigctx.ps_sigwaited) {
2253 /* spurious wakeup - arrange for syscall restart */
2254 error = ERESTART;
2255 goto fail;
2256 }
2257
2258 /*
2259 * On error, clear sigwait indication. psignal1() clears it
2260 * in !error case.
2261 */
2262 if (error) {
2263 p->p_sigctx.ps_sigwaited = NULL;
2264
2265 /*
2266 * If the sleep was interrupted (either by signal or wakeup),
2267 * update the timeout and copyout new value back.
2268 * It would be used when the syscall would be restarted
2269 * or called again.
2270 */
2271 if (timo && (error == ERESTART || error == ECANCELED)) {
2272 struct timeval tvnow, tvtimo;
2273 int err;
2274
2275 s = splclock();
2276 tvnow = mono_time;
2277 splx(s);
2278
2279 TIMESPEC_TO_TIMEVAL(&tvtimo, &ts);
2280
2281 /* compute how much time has passed since start */
2282 timersub(&tvnow, &tvstart, &tvnow);
2283 /* substract passed time from timeout */
2284 timersub(&tvtimo, &tvnow, &tvtimo);
2285
2286 if (tvtimo.tv_sec < 0) {
2287 error = EAGAIN;
2288 goto fail;
2289 }
2290
2291 TIMEVAL_TO_TIMESPEC(&tvtimo, &ts);
2292
2293 /* copy updated timeout to userland */
2294 if ((err = copyout(&ts, SCARG(uap, timeout), sizeof(ts)))) {
2295 error = err;
2296 goto fail;
2297 }
2298 }
2299
2300 goto fail;
2301 }
2302
2303 /*
2304 * If a signal from the wait set arrived, copy it to userland.
2305 * Copy only the used part of siginfo, the padding part is
2306 * left unchanged (userland is not supposed to touch it anyway).
2307 */
2308 sig:
2309 error = copyout(&ksi->ksi_info, SCARG(uap, info), sizeof(ksi->ksi_info));
2310
2311 fail:
2312 pool_put(&ksiginfo_pool, ksi);
2313 p->p_sigctx.ps_sigwait = NULL;
2314
2315 return (error);
2316 }
2317
2318 /*
2319 * Returns true if signal is ignored or masked for passed process.
2320 */
2321 int
2322 sigismasked(struct proc *p, int sig)
2323 {
2324
2325 return (sigismember(&p->p_sigctx.ps_sigignore, sig) ||
2326 sigismember(&p->p_sigctx.ps_sigmask, sig));
2327 }
2328
2329 static int
2330 filt_sigattach(struct knote *kn)
2331 {
2332 struct proc *p = curproc;
2333
2334 kn->kn_ptr.p_proc = p;
2335 kn->kn_flags |= EV_CLEAR; /* automatically set */
2336
2337 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
2338
2339 return (0);
2340 }
2341
2342 static void
2343 filt_sigdetach(struct knote *kn)
2344 {
2345 struct proc *p = kn->kn_ptr.p_proc;
2346
2347 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
2348 }
2349
2350 /*
2351 * signal knotes are shared with proc knotes, so we apply a mask to
2352 * the hint in order to differentiate them from process hints. This
2353 * could be avoided by using a signal-specific knote list, but probably
2354 * isn't worth the trouble.
2355 */
2356 static int
2357 filt_signal(struct knote *kn, long hint)
2358 {
2359
2360 if (hint & NOTE_SIGNAL) {
2361 hint &= ~NOTE_SIGNAL;
2362
2363 if (kn->kn_id == hint)
2364 kn->kn_data++;
2365 }
2366 return (kn->kn_data != 0);
2367 }
2368
2369 const struct filterops sig_filtops = {
2370 0, filt_sigattach, filt_sigdetach, filt_signal
2371 };
2372