kern_sig.c revision 1.163 1 /* $NetBSD: kern_sig.c,v 1.163 2003/10/03 17:51:13 christos Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)kern_sig.c 8.14 (Berkeley) 5/14/95
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.163 2003/10/03 17:51:13 christos Exp $");
41
42 #include "opt_ktrace.h"
43 #include "opt_compat_sunos.h"
44 #include "opt_compat_netbsd.h"
45 #include "opt_compat_netbsd32.h"
46
47 #define SIGPROP /* include signal properties table */
48 #include <sys/param.h>
49 #include <sys/signalvar.h>
50 #include <sys/resourcevar.h>
51 #include <sys/namei.h>
52 #include <sys/vnode.h>
53 #include <sys/proc.h>
54 #include <sys/systm.h>
55 #include <sys/timeb.h>
56 #include <sys/times.h>
57 #include <sys/buf.h>
58 #include <sys/acct.h>
59 #include <sys/file.h>
60 #include <sys/kernel.h>
61 #include <sys/wait.h>
62 #include <sys/ktrace.h>
63 #include <sys/syslog.h>
64 #include <sys/stat.h>
65 #include <sys/core.h>
66 #include <sys/filedesc.h>
67 #include <sys/malloc.h>
68 #include <sys/pool.h>
69 #include <sys/ucontext.h>
70 #include <sys/sa.h>
71 #include <sys/savar.h>
72 #include <sys/exec.h>
73
74 #include <sys/mount.h>
75 #include <sys/syscallargs.h>
76
77 #include <machine/cpu.h>
78
79 #include <sys/user.h> /* for coredump */
80
81 #include <uvm/uvm_extern.h>
82
83 static void child_psignal(struct proc *, int);
84 static void proc_stop(struct proc *);
85 static int build_corename(struct proc *, char [MAXPATHLEN]);
86 static void ksiginfo_exithook(struct proc *, void *);
87 static void ksiginfo_put(struct proc *, const ksiginfo_t *);
88 static ksiginfo_t *ksiginfo_get(struct proc *, int);
89 static void kpsignal2(struct proc *, const ksiginfo_t *, int);
90
91 sigset_t contsigmask, stopsigmask, sigcantmask;
92
93 struct pool sigacts_pool; /* memory pool for sigacts structures */
94 struct pool siginfo_pool; /* memory pool for siginfo structures */
95 struct pool ksiginfo_pool; /* memory pool for ksiginfo structures */
96
97 /*
98 * Can process p, with pcred pc, send the signal signum to process q?
99 */
100 #define CANSIGNAL(p, pc, q, signum) \
101 ((pc)->pc_ucred->cr_uid == 0 || \
102 (pc)->p_ruid == (q)->p_cred->p_ruid || \
103 (pc)->pc_ucred->cr_uid == (q)->p_cred->p_ruid || \
104 (pc)->p_ruid == (q)->p_ucred->cr_uid || \
105 (pc)->pc_ucred->cr_uid == (q)->p_ucred->cr_uid || \
106 ((signum) == SIGCONT && (q)->p_session == (p)->p_session))
107
108 /*
109 * Remove and return the first ksiginfo element that matches our requested
110 * signal, or return NULL if one not found.
111 */
112 static ksiginfo_t *
113 ksiginfo_get(struct proc *p, int signo)
114 {
115 ksiginfo_t *ksi;
116
117 simple_lock(&p->p_sigctx.ps_silock);
118 CIRCLEQ_FOREACH(ksi, &p->p_sigctx.ps_siginfo, ksi_list) {
119 if (ksi->ksi_signo == signo) {
120 CIRCLEQ_REMOVE(&p->p_sigctx.ps_siginfo, ksi, ksi_list);
121 simple_unlock(&p->p_sigctx.ps_silock);
122 return ksi;
123 }
124 }
125 simple_unlock(&p->p_sigctx.ps_silock);
126 return NULL;
127 }
128
129 /*
130 * Append a new ksiginfo element to the list of pending ksiginfo's, if
131 * we need to (SA_SIGINFO was requested). We replace non RT signals if
132 * they already existed in the queue and we add new entries for RT signals,
133 * or for non RT signals with non-existing entries.
134 */
135 static void
136 ksiginfo_put(struct proc *p, const ksiginfo_t *ksi)
137 {
138 ksiginfo_t *kp;
139 struct sigaction *sa = &SIGACTION_PS(p->p_sigacts, ksi->ksi_signo);
140
141 if ((sa->sa_flags & SA_SIGINFO) == 0)
142 return;
143
144 simple_lock(&p->p_sigctx.ps_silock);
145 #ifdef notyet /* XXX: QUEUING */
146 if (ksi->ksi_signo < SIGRTMIN)
147 #endif
148 {
149 CIRCLEQ_FOREACH(kp, &p->p_sigctx.ps_siginfo, ksi_list) {
150 if (kp->ksi_signo == ksi->ksi_signo) {
151 CIRCLEQ_ENTRY(ksiginfo) sv;
152 (void)memcpy(&sv, &kp->ksi_list, sizeof(sv));
153 *kp = *ksi;
154 (void)memcpy(&kp->ksi_list, &sv, sizeof(sv));
155 simple_unlock(&p->p_sigctx.ps_silock);
156 return;
157 }
158 }
159 }
160 kp = pool_get(&ksiginfo_pool, PR_NOWAIT);
161 if (kp == NULL) {
162 #ifdef DIAGNOSTIC
163 printf("Out of memory allocating siginfo for pid %d\n",
164 p->p_pid);
165 #endif
166 return;
167 }
168 *kp = *ksi;
169 CIRCLEQ_INSERT_TAIL(&p->p_sigctx.ps_siginfo, kp, ksi_list);
170 simple_unlock(&p->p_sigctx.ps_silock);
171 }
172
173 /*
174 * free all pending ksiginfo on exit
175 */
176 static void
177 ksiginfo_exithook(struct proc *p, void *v)
178 {
179
180 simple_lock(&p->p_sigctx.ps_silock);
181 while (!CIRCLEQ_EMPTY(&p->p_sigctx.ps_siginfo)) {
182 ksiginfo_t *ksi = CIRCLEQ_FIRST(&p->p_sigctx.ps_siginfo);
183 CIRCLEQ_REMOVE(&p->p_sigctx.ps_siginfo, ksi, ksi_list);
184 pool_put(&ksiginfo_pool, ksi);
185 }
186 simple_unlock(&p->p_sigctx.ps_silock);
187 }
188
189 /*
190 * Initialize signal-related data structures.
191 */
192 void
193 signal_init(void)
194 {
195 pool_init(&sigacts_pool, sizeof(struct sigacts), 0, 0, 0, "sigapl",
196 &pool_allocator_nointr);
197 pool_init(&siginfo_pool, sizeof(siginfo_t), 0, 0, 0, "siginfo",
198 &pool_allocator_nointr);
199 pool_init(&ksiginfo_pool, sizeof(ksiginfo_t), 0, 0, 0, "ksiginfo",
200 NULL);
201 exithook_establish(ksiginfo_exithook, NULL);
202 exechook_establish(ksiginfo_exithook, NULL);
203 }
204
205 /*
206 * Create an initial sigctx structure, using the same signal state
207 * as p. If 'share' is set, share the sigctx_proc part, otherwise just
208 * copy it from parent.
209 */
210 void
211 sigactsinit(struct proc *np, struct proc *pp, int share)
212 {
213 struct sigacts *ps;
214
215 if (share) {
216 np->p_sigacts = pp->p_sigacts;
217 pp->p_sigacts->sa_refcnt++;
218 } else {
219 ps = pool_get(&sigacts_pool, PR_WAITOK);
220 if (pp)
221 memcpy(ps, pp->p_sigacts, sizeof(struct sigacts));
222 else
223 memset(ps, '\0', sizeof(struct sigacts));
224 ps->sa_refcnt = 1;
225 np->p_sigacts = ps;
226 }
227 }
228
229 /*
230 * Make this process not share its sigctx, maintaining all
231 * signal state.
232 */
233 void
234 sigactsunshare(struct proc *p)
235 {
236 struct sigacts *oldps;
237
238 if (p->p_sigacts->sa_refcnt == 1)
239 return;
240
241 oldps = p->p_sigacts;
242 sigactsinit(p, NULL, 0);
243
244 if (--oldps->sa_refcnt == 0)
245 pool_put(&sigacts_pool, oldps);
246 }
247
248 /*
249 * Release a sigctx structure.
250 */
251 void
252 sigactsfree(struct proc *p)
253 {
254 struct sigacts *ps;
255
256 ps = p->p_sigacts;
257 if (--ps->sa_refcnt > 0)
258 return;
259
260 pool_put(&sigacts_pool, ps);
261 }
262
263 int
264 sigaction1(struct proc *p, int signum, const struct sigaction *nsa,
265 struct sigaction *osa, const void *tramp, int vers)
266 {
267 struct sigacts *ps;
268 int prop;
269
270 ps = p->p_sigacts;
271 if (signum <= 0 || signum >= NSIG)
272 return (EINVAL);
273
274 /*
275 * Trampoline ABI version 0 is reserved for the legacy
276 * kernel-provided on-stack trampoline. Conversely, if we are
277 * using a non-0 ABI version, we must have a trampoline. Only
278 * validate the vers if a new sigaction was supplied. Emulations
279 * use legacy kernel trampolines with version 0, alternatively
280 * check for that too.
281 */
282 if ((vers != 0 && tramp == NULL) ||
283 #ifdef SIGTRAMP_VALID
284 (nsa != NULL &&
285 ((vers == 0) ?
286 (p->p_emul->e_sigcode == NULL) :
287 !SIGTRAMP_VALID(vers))) ||
288 #endif
289 (vers == 0 && tramp != NULL))
290 return (EINVAL);
291
292 if (osa)
293 *osa = SIGACTION_PS(ps, signum);
294
295 if (nsa) {
296 if (nsa->sa_flags & ~SA_ALLBITS)
297 return (EINVAL);
298
299 #ifndef __HAVE_SIGINFO
300 if (nsa->sa_flags & SA_SIGINFO)
301 return (EINVAL);
302 #endif
303
304 prop = sigprop[signum];
305 if (prop & SA_CANTMASK)
306 return (EINVAL);
307
308 (void) splsched(); /* XXXSMP */
309 SIGACTION_PS(ps, signum) = *nsa;
310 ps->sa_sigdesc[signum].sd_tramp = tramp;
311 ps->sa_sigdesc[signum].sd_vers = vers;
312 sigminusset(&sigcantmask, &SIGACTION_PS(ps, signum).sa_mask);
313 if ((prop & SA_NORESET) != 0)
314 SIGACTION_PS(ps, signum).sa_flags &= ~SA_RESETHAND;
315 if (signum == SIGCHLD) {
316 if (nsa->sa_flags & SA_NOCLDSTOP)
317 p->p_flag |= P_NOCLDSTOP;
318 else
319 p->p_flag &= ~P_NOCLDSTOP;
320 if (nsa->sa_flags & SA_NOCLDWAIT) {
321 /*
322 * Paranoia: since SA_NOCLDWAIT is implemented
323 * by reparenting the dying child to PID 1 (and
324 * trust it to reap the zombie), PID 1 itself
325 * is forbidden to set SA_NOCLDWAIT.
326 */
327 if (p->p_pid == 1)
328 p->p_flag &= ~P_NOCLDWAIT;
329 else
330 p->p_flag |= P_NOCLDWAIT;
331 } else
332 p->p_flag &= ~P_NOCLDWAIT;
333 }
334 if ((nsa->sa_flags & SA_NODEFER) == 0)
335 sigaddset(&SIGACTION_PS(ps, signum).sa_mask, signum);
336 else
337 sigdelset(&SIGACTION_PS(ps, signum).sa_mask, signum);
338 /*
339 * Set bit in p_sigctx.ps_sigignore for signals that are set to
340 * SIG_IGN, and for signals set to SIG_DFL where the default is
341 * to ignore. However, don't put SIGCONT in
342 * p_sigctx.ps_sigignore, as we have to restart the process.
343 */
344 if (nsa->sa_handler == SIG_IGN ||
345 (nsa->sa_handler == SIG_DFL && (prop & SA_IGNORE) != 0)) {
346 /* never to be seen again */
347 sigdelset(&p->p_sigctx.ps_siglist, signum);
348 if (signum != SIGCONT) {
349 /* easier in psignal */
350 sigaddset(&p->p_sigctx.ps_sigignore, signum);
351 }
352 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
353 } else {
354 sigdelset(&p->p_sigctx.ps_sigignore, signum);
355 if (nsa->sa_handler == SIG_DFL)
356 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
357 else
358 sigaddset(&p->p_sigctx.ps_sigcatch, signum);
359 }
360 (void) spl0();
361 }
362
363 return (0);
364 }
365
366 #ifdef COMPAT_16
367 /* ARGSUSED */
368 int
369 compat_16_sys___sigaction14(struct lwp *l, void *v, register_t *retval)
370 {
371 struct compat_16_sys___sigaction14_args /* {
372 syscallarg(int) signum;
373 syscallarg(const struct sigaction *) nsa;
374 syscallarg(struct sigaction *) osa;
375 } */ *uap = v;
376 struct proc *p;
377 struct sigaction nsa, osa;
378 int error;
379
380 if (SCARG(uap, nsa)) {
381 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
382 if (error)
383 return (error);
384 }
385 p = l->l_proc;
386 error = sigaction1(p, SCARG(uap, signum),
387 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0,
388 NULL, 0);
389 if (error)
390 return (error);
391 if (SCARG(uap, osa)) {
392 error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
393 if (error)
394 return (error);
395 }
396 return (0);
397 }
398 #endif
399
400 /* ARGSUSED */
401 int
402 sys___sigaction_sigtramp(struct lwp *l, void *v, register_t *retval)
403 {
404 struct sys___sigaction_sigtramp_args /* {
405 syscallarg(int) signum;
406 syscallarg(const struct sigaction *) nsa;
407 syscallarg(struct sigaction *) osa;
408 syscallarg(void *) tramp;
409 syscallarg(int) vers;
410 } */ *uap = v;
411 struct proc *p = l->l_proc;
412 struct sigaction nsa, osa;
413 int error;
414
415 if (SCARG(uap, nsa)) {
416 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
417 if (error)
418 return (error);
419 }
420 error = sigaction1(p, SCARG(uap, signum),
421 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0,
422 SCARG(uap, tramp), SCARG(uap, vers));
423 if (error)
424 return (error);
425 if (SCARG(uap, osa)) {
426 error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
427 if (error)
428 return (error);
429 }
430 return (0);
431 }
432
433 /*
434 * Initialize signal state for process 0;
435 * set to ignore signals that are ignored by default and disable the signal
436 * stack.
437 */
438 void
439 siginit(struct proc *p)
440 {
441 struct sigacts *ps;
442 int signum, prop;
443
444 ps = p->p_sigacts;
445 sigemptyset(&contsigmask);
446 sigemptyset(&stopsigmask);
447 sigemptyset(&sigcantmask);
448 for (signum = 1; signum < NSIG; signum++) {
449 prop = sigprop[signum];
450 if (prop & SA_CONT)
451 sigaddset(&contsigmask, signum);
452 if (prop & SA_STOP)
453 sigaddset(&stopsigmask, signum);
454 if (prop & SA_CANTMASK)
455 sigaddset(&sigcantmask, signum);
456 if (prop & SA_IGNORE && signum != SIGCONT)
457 sigaddset(&p->p_sigctx.ps_sigignore, signum);
458 sigemptyset(&SIGACTION_PS(ps, signum).sa_mask);
459 SIGACTION_PS(ps, signum).sa_flags = SA_RESTART;
460 }
461 sigemptyset(&p->p_sigctx.ps_sigcatch);
462 p->p_sigctx.ps_sigwaited = 0;
463 p->p_flag &= ~P_NOCLDSTOP;
464
465 /*
466 * Reset stack state to the user stack.
467 */
468 p->p_sigctx.ps_sigstk.ss_flags = SS_DISABLE;
469 p->p_sigctx.ps_sigstk.ss_size = 0;
470 p->p_sigctx.ps_sigstk.ss_sp = 0;
471
472 /* One reference. */
473 ps->sa_refcnt = 1;
474 }
475
476 /*
477 * Reset signals for an exec of the specified process.
478 */
479 void
480 execsigs(struct proc *p)
481 {
482 struct sigacts *ps;
483 int signum, prop;
484
485 sigactsunshare(p);
486
487 ps = p->p_sigacts;
488
489 /*
490 * Reset caught signals. Held signals remain held
491 * through p_sigctx.ps_sigmask (unless they were caught,
492 * and are now ignored by default).
493 */
494 for (signum = 1; signum < NSIG; signum++) {
495 if (sigismember(&p->p_sigctx.ps_sigcatch, signum)) {
496 prop = sigprop[signum];
497 if (prop & SA_IGNORE) {
498 if ((prop & SA_CONT) == 0)
499 sigaddset(&p->p_sigctx.ps_sigignore,
500 signum);
501 sigdelset(&p->p_sigctx.ps_siglist, signum);
502 }
503 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
504 }
505 sigemptyset(&SIGACTION_PS(ps, signum).sa_mask);
506 SIGACTION_PS(ps, signum).sa_flags = SA_RESTART;
507 }
508 sigemptyset(&p->p_sigctx.ps_sigcatch);
509 p->p_sigctx.ps_sigwaited = 0;
510 p->p_flag &= ~P_NOCLDSTOP;
511
512 /*
513 * Reset stack state to the user stack.
514 */
515 p->p_sigctx.ps_sigstk.ss_flags = SS_DISABLE;
516 p->p_sigctx.ps_sigstk.ss_size = 0;
517 p->p_sigctx.ps_sigstk.ss_sp = 0;
518 }
519
520 int
521 sigprocmask1(struct proc *p, int how, const sigset_t *nss, sigset_t *oss)
522 {
523
524 if (oss)
525 *oss = p->p_sigctx.ps_sigmask;
526
527 if (nss) {
528 (void)splsched(); /* XXXSMP */
529 switch (how) {
530 case SIG_BLOCK:
531 sigplusset(nss, &p->p_sigctx.ps_sigmask);
532 break;
533 case SIG_UNBLOCK:
534 sigminusset(nss, &p->p_sigctx.ps_sigmask);
535 CHECKSIGS(p);
536 break;
537 case SIG_SETMASK:
538 p->p_sigctx.ps_sigmask = *nss;
539 CHECKSIGS(p);
540 break;
541 default:
542 (void)spl0(); /* XXXSMP */
543 return (EINVAL);
544 }
545 sigminusset(&sigcantmask, &p->p_sigctx.ps_sigmask);
546 (void)spl0(); /* XXXSMP */
547 }
548
549 return (0);
550 }
551
552 /*
553 * Manipulate signal mask.
554 * Note that we receive new mask, not pointer,
555 * and return old mask as return value;
556 * the library stub does the rest.
557 */
558 int
559 sys___sigprocmask14(struct lwp *l, void *v, register_t *retval)
560 {
561 struct sys___sigprocmask14_args /* {
562 syscallarg(int) how;
563 syscallarg(const sigset_t *) set;
564 syscallarg(sigset_t *) oset;
565 } */ *uap = v;
566 struct proc *p;
567 sigset_t nss, oss;
568 int error;
569
570 if (SCARG(uap, set)) {
571 error = copyin(SCARG(uap, set), &nss, sizeof(nss));
572 if (error)
573 return (error);
574 }
575 p = l->l_proc;
576 error = sigprocmask1(p, SCARG(uap, how),
577 SCARG(uap, set) ? &nss : 0, SCARG(uap, oset) ? &oss : 0);
578 if (error)
579 return (error);
580 if (SCARG(uap, oset)) {
581 error = copyout(&oss, SCARG(uap, oset), sizeof(oss));
582 if (error)
583 return (error);
584 }
585 return (0);
586 }
587
588 void
589 sigpending1(struct proc *p, sigset_t *ss)
590 {
591
592 *ss = p->p_sigctx.ps_siglist;
593 sigminusset(&p->p_sigctx.ps_sigmask, ss);
594 }
595
596 /* ARGSUSED */
597 int
598 sys___sigpending14(struct lwp *l, void *v, register_t *retval)
599 {
600 struct sys___sigpending14_args /* {
601 syscallarg(sigset_t *) set;
602 } */ *uap = v;
603 struct proc *p;
604 sigset_t ss;
605
606 p = l->l_proc;
607 sigpending1(p, &ss);
608 return (copyout(&ss, SCARG(uap, set), sizeof(ss)));
609 }
610
611 int
612 sigsuspend1(struct proc *p, const sigset_t *ss)
613 {
614 struct sigacts *ps;
615
616 ps = p->p_sigacts;
617 if (ss) {
618 /*
619 * When returning from sigpause, we want
620 * the old mask to be restored after the
621 * signal handler has finished. Thus, we
622 * save it here and mark the sigctx structure
623 * to indicate this.
624 */
625 p->p_sigctx.ps_oldmask = p->p_sigctx.ps_sigmask;
626 p->p_sigctx.ps_flags |= SAS_OLDMASK;
627 (void) splsched(); /* XXXSMP */
628 p->p_sigctx.ps_sigmask = *ss;
629 CHECKSIGS(p);
630 sigminusset(&sigcantmask, &p->p_sigctx.ps_sigmask);
631 (void) spl0(); /* XXXSMP */
632 }
633
634 while (tsleep((caddr_t) ps, PPAUSE|PCATCH, "pause", 0) == 0)
635 /* void */;
636
637 /* always return EINTR rather than ERESTART... */
638 return (EINTR);
639 }
640
641 /*
642 * Suspend process until signal, providing mask to be set
643 * in the meantime. Note nonstandard calling convention:
644 * libc stub passes mask, not pointer, to save a copyin.
645 */
646 /* ARGSUSED */
647 int
648 sys___sigsuspend14(struct lwp *l, void *v, register_t *retval)
649 {
650 struct sys___sigsuspend14_args /* {
651 syscallarg(const sigset_t *) set;
652 } */ *uap = v;
653 struct proc *p;
654 sigset_t ss;
655 int error;
656
657 if (SCARG(uap, set)) {
658 error = copyin(SCARG(uap, set), &ss, sizeof(ss));
659 if (error)
660 return (error);
661 }
662
663 p = l->l_proc;
664 return (sigsuspend1(p, SCARG(uap, set) ? &ss : 0));
665 }
666
667 int
668 sigaltstack1(struct proc *p, const struct sigaltstack *nss,
669 struct sigaltstack *oss)
670 {
671
672 if (oss)
673 *oss = p->p_sigctx.ps_sigstk;
674
675 if (nss) {
676 if (nss->ss_flags & ~SS_ALLBITS)
677 return (EINVAL);
678
679 if (nss->ss_flags & SS_DISABLE) {
680 if (p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK)
681 return (EINVAL);
682 } else {
683 if (nss->ss_size < MINSIGSTKSZ)
684 return (ENOMEM);
685 }
686 p->p_sigctx.ps_sigstk = *nss;
687 }
688
689 return (0);
690 }
691
692 /* ARGSUSED */
693 int
694 sys___sigaltstack14(struct lwp *l, void *v, register_t *retval)
695 {
696 struct sys___sigaltstack14_args /* {
697 syscallarg(const struct sigaltstack *) nss;
698 syscallarg(struct sigaltstack *) oss;
699 } */ *uap = v;
700 struct proc *p;
701 struct sigaltstack nss, oss;
702 int error;
703
704 if (SCARG(uap, nss)) {
705 error = copyin(SCARG(uap, nss), &nss, sizeof(nss));
706 if (error)
707 return (error);
708 }
709 p = l->l_proc;
710 error = sigaltstack1(p,
711 SCARG(uap, nss) ? &nss : 0, SCARG(uap, oss) ? &oss : 0);
712 if (error)
713 return (error);
714 if (SCARG(uap, oss)) {
715 error = copyout(&oss, SCARG(uap, oss), sizeof(oss));
716 if (error)
717 return (error);
718 }
719 return (0);
720 }
721
722 /* ARGSUSED */
723 int
724 sys_kill(struct lwp *l, void *v, register_t *retval)
725 {
726 struct sys_kill_args /* {
727 syscallarg(int) pid;
728 syscallarg(int) signum;
729 } */ *uap = v;
730 struct proc *cp, *p;
731 struct pcred *pc;
732 ksiginfo_t ksi;
733
734 cp = l->l_proc;
735 pc = cp->p_cred;
736 if ((u_int)SCARG(uap, signum) >= NSIG)
737 return (EINVAL);
738 memset(&ksi, 0, sizeof(ksi));
739 ksi.ksi_signo = SCARG(uap, signum);
740 ksi.ksi_code = SI_USER;
741 ksi.ksi_pid = cp->p_pid;
742 ksi.ksi_uid = cp->p_ucred->cr_uid;
743 if (SCARG(uap, pid) > 0) {
744 /* kill single process */
745 if ((p = pfind(SCARG(uap, pid))) == NULL)
746 return (ESRCH);
747 if (!CANSIGNAL(cp, pc, p, SCARG(uap, signum)))
748 return (EPERM);
749 if (SCARG(uap, signum))
750 kpsignal2(p, &ksi, 1);
751 return (0);
752 }
753 switch (SCARG(uap, pid)) {
754 case -1: /* broadcast signal */
755 return (killpg1(cp, &ksi, 0, 1));
756 case 0: /* signal own process group */
757 return (killpg1(cp, &ksi, 0, 0));
758 default: /* negative explicit process group */
759 return (killpg1(cp, &ksi, -SCARG(uap, pid), 0));
760 }
761 /* NOTREACHED */
762 }
763
764 /*
765 * Common code for kill process group/broadcast kill.
766 * cp is calling process.
767 */
768 int
769 killpg1(struct proc *cp, ksiginfo_t *ksi, int pgid, int all)
770 {
771 struct proc *p;
772 struct pcred *pc;
773 struct pgrp *pgrp;
774 int nfound;
775 int signum = ksi->ksi_signo;
776
777 pc = cp->p_cred;
778 nfound = 0;
779 if (all) {
780 /*
781 * broadcast
782 */
783 proclist_lock_read();
784 LIST_FOREACH(p, &allproc, p_list) {
785 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
786 p == cp || !CANSIGNAL(cp, pc, p, signum))
787 continue;
788 nfound++;
789 if (signum)
790 kpsignal2(p, ksi, 1);
791 }
792 proclist_unlock_read();
793 } else {
794 if (pgid == 0)
795 /*
796 * zero pgid means send to my process group.
797 */
798 pgrp = cp->p_pgrp;
799 else {
800 pgrp = pgfind(pgid);
801 if (pgrp == NULL)
802 return (ESRCH);
803 }
804 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
805 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
806 !CANSIGNAL(cp, pc, p, signum))
807 continue;
808 nfound++;
809 if (signum && P_ZOMBIE(p) == 0)
810 kpsignal2(p, ksi, 1);
811 }
812 }
813 return (nfound ? 0 : ESRCH);
814 }
815
816 /*
817 * Send a signal to a process group.
818 */
819 void
820 gsignal(int pgid, int signum)
821 {
822 ksiginfo_t ksi;
823 memset(&ksi, 0, sizeof(ksi));
824 ksi.ksi_signo = signum;
825 kgsignal(pgid, &ksi, NULL);
826 }
827
828 void
829 kgsignal(int pgid, ksiginfo_t *ksi, void *data)
830 {
831 struct pgrp *pgrp;
832
833 if (pgid && (pgrp = pgfind(pgid)))
834 kpgsignal(pgrp, ksi, data, 0);
835 }
836
837 /*
838 * Send a signal to a process group. If checktty is 1,
839 * limit to members which have a controlling terminal.
840 */
841 void
842 pgsignal(struct pgrp *pgrp, int sig, int checkctty)
843 {
844 ksiginfo_t ksi;
845 memset(&ksi, 0, sizeof(ksi));
846 ksi.ksi_signo = sig;
847 kpgsignal(pgrp, &ksi, NULL, checkctty);
848 }
849
850 void
851 kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty)
852 {
853 struct proc *p;
854
855 if (pgrp)
856 LIST_FOREACH(p, &pgrp->pg_members, p_pglist)
857 if (checkctty == 0 || p->p_flag & P_CONTROLT)
858 kpsignal(p, ksi, data);
859 }
860
861 /*
862 * Send a signal caused by a trap to the current process.
863 * If it will be caught immediately, deliver it with correct code.
864 * Otherwise, post it normally.
865 */
866 #ifndef __HAVE_SIGINFO
867 void _trapsignal(struct lwp *, const ksiginfo_t *);
868 void
869 trapsignal(struct lwp *l, int signum, u_long code)
870 {
871 #define trapsignal _trapsignal
872 ksiginfo_t ksi;
873 memset(&ksi, 0, sizeof(ksi));
874 ksi.ksi_signo = signum;
875 ksi.ksi_trap = (int)code;
876 trapsignal(l, &ksi);
877 }
878 #endif
879
880 void
881 trapsignal(struct lwp *l, const ksiginfo_t *ksi)
882 {
883 struct proc *p;
884 struct sigacts *ps;
885 int signum = ksi->ksi_signo;
886
887 p = l->l_proc;
888 ps = p->p_sigacts;
889 if ((p->p_flag & P_TRACED) == 0 &&
890 sigismember(&p->p_sigctx.ps_sigcatch, signum) &&
891 !sigismember(&p->p_sigctx.ps_sigmask, signum)) {
892 p->p_stats->p_ru.ru_nsignals++;
893 #ifdef KTRACE
894 if (KTRPOINT(p, KTR_PSIG))
895 ktrpsig(p, signum, SIGACTION_PS(ps, signum).sa_handler,
896 &p->p_sigctx.ps_sigmask, ksi);
897 #endif
898 kpsendsig(l, ksi, &p->p_sigctx.ps_sigmask);
899 (void) splsched(); /* XXXSMP */
900 sigplusset(&SIGACTION_PS(ps, signum).sa_mask,
901 &p->p_sigctx.ps_sigmask);
902 if (SIGACTION_PS(ps, signum).sa_flags & SA_RESETHAND) {
903 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
904 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
905 sigaddset(&p->p_sigctx.ps_sigignore, signum);
906 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
907 }
908 (void) spl0(); /* XXXSMP */
909 } else {
910 p->p_sigctx.ps_lwp = l->l_lid;
911 /* XXX for core dump/debugger */
912 p->p_sigctx.ps_signo = ksi->ksi_signo;
913 p->p_sigctx.ps_code = ksi->ksi_trap;
914 kpsignal2(p, ksi, 1);
915 }
916 }
917
918 /*
919 * Fill in signal information and signal the parent for a child status change.
920 */
921 static void
922 child_psignal(struct proc *p, int dolock)
923 {
924 ksiginfo_t ksi;
925
926 (void)memset(&ksi, 0, sizeof(ksi));
927 ksi.ksi_signo = SIGCHLD;
928 ksi.ksi_code = p->p_xstat == SIGCONT ? CLD_CONTINUED : CLD_STOPPED;
929 ksi.ksi_pid = p->p_pid;
930 ksi.ksi_uid = p->p_ucred->cr_uid;
931 ksi.ksi_status = p->p_xstat;
932 ksi.ksi_utime = p->p_stats->p_ru.ru_utime.tv_sec;
933 ksi.ksi_stime = p->p_stats->p_ru.ru_stime.tv_sec;
934 kpsignal2(p->p_pptr, &ksi, dolock);
935 }
936
937 /*
938 * Send the signal to the process. If the signal has an action, the action
939 * is usually performed by the target process rather than the caller; we add
940 * the signal to the set of pending signals for the process.
941 *
942 * Exceptions:
943 * o When a stop signal is sent to a sleeping process that takes the
944 * default action, the process is stopped without awakening it.
945 * o SIGCONT restarts stopped processes (or puts them back to sleep)
946 * regardless of the signal action (eg, blocked or ignored).
947 *
948 * Other ignored signals are discarded immediately.
949 *
950 * XXXSMP: Invoked as psignal() or sched_psignal().
951 */
952 void
953 psignal1(struct proc *p, int signum, int dolock)
954 {
955 ksiginfo_t ksi;
956 memset(&ksi, 0, sizeof(ksi));
957 ksi.ksi_signo = signum;
958 kpsignal2(p, &ksi, dolock);
959 }
960
961 void
962 kpsignal1(struct proc *p, ksiginfo_t *ksi, void *data, int dolock)
963 {
964 if (data) {
965 size_t fd;
966 struct filedesc *fdp = p->p_fd;
967 ksi->ksi_fd = -1;
968 for (fd = 0; fd < fdp->fd_nfiles; fd++) {
969 struct file *fp = fdp->fd_ofiles[fd];
970 /* XXX: lock? */
971 if (fp && fp->f_data == data) {
972 ksi->ksi_fd = fd;
973 break;
974 }
975 }
976 }
977 kpsignal2(p, ksi, dolock);
978 }
979
980 static void
981 kpsignal2(struct proc *p, const ksiginfo_t *ksi, int dolock)
982 {
983 struct lwp *l, *suspended;
984 int s = 0, prop, allsusp;
985 sig_t action;
986 int signum = ksi->ksi_signo;
987
988 #ifdef DIAGNOSTIC
989 if (signum <= 0 || signum >= NSIG)
990 panic("psignal signal number %d", signum);
991
992 /* XXXSMP: works, but icky */
993 if (dolock)
994 SCHED_ASSERT_UNLOCKED();
995 else
996 SCHED_ASSERT_LOCKED();
997 #endif
998
999
1000 /*
1001 * Notify any interested parties in the signal.
1002 */
1003 KNOTE(&p->p_klist, NOTE_SIGNAL | signum);
1004
1005 prop = sigprop[signum];
1006
1007 /*
1008 * If proc is traced, always give parent a chance.
1009 */
1010 if (p->p_flag & P_TRACED)
1011 action = SIG_DFL;
1012 else {
1013 /*
1014 * If the signal is being ignored,
1015 * then we forget about it immediately.
1016 * (Note: we don't set SIGCONT in p_sigctx.ps_sigignore,
1017 * and if it is set to SIG_IGN,
1018 * action will be SIG_DFL here.)
1019 */
1020 if (sigismember(&p->p_sigctx.ps_sigignore, signum))
1021 return;
1022 if (sigismember(&p->p_sigctx.ps_sigmask, signum))
1023 action = SIG_HOLD;
1024 else if (sigismember(&p->p_sigctx.ps_sigcatch, signum))
1025 action = SIG_CATCH;
1026 else {
1027 action = SIG_DFL;
1028
1029 if (prop & SA_KILL && p->p_nice > NZERO)
1030 p->p_nice = NZERO;
1031
1032 /*
1033 * If sending a tty stop signal to a member of an
1034 * orphaned process group, discard the signal here if
1035 * the action is default; don't stop the process below
1036 * if sleeping, and don't clear any pending SIGCONT.
1037 */
1038 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0)
1039 return;
1040 }
1041 }
1042
1043 if (prop & SA_CONT)
1044 sigminusset(&stopsigmask, &p->p_sigctx.ps_siglist);
1045
1046 if (prop & SA_STOP)
1047 sigminusset(&contsigmask, &p->p_sigctx.ps_siglist);
1048
1049 sigaddset(&p->p_sigctx.ps_siglist, signum);
1050
1051 /* CHECKSIGS() is "inlined" here. */
1052 p->p_sigctx.ps_sigcheck = 1;
1053
1054 /*
1055 * If the signal doesn't have SA_CANTMASK (no override for SIGKILL,
1056 * please!), check if anything waits on it. If yes, clear the
1057 * pending signal from siglist set, save it to ps_sigwaited,
1058 * clear sigwait list, and wakeup any sigwaiters.
1059 * The signal won't be processed further here.
1060 */
1061 if ((prop & SA_CANTMASK) == 0
1062 && p->p_sigctx.ps_sigwaited < 0
1063 && sigismember(&p->p_sigctx.ps_sigwait, signum)
1064 && p->p_stat != SSTOP) {
1065 if (action == SIG_CATCH)
1066 ksiginfo_put(p, ksi);
1067 sigdelset(&p->p_sigctx.ps_siglist, signum);
1068 p->p_sigctx.ps_sigwaited = signum;
1069 sigemptyset(&p->p_sigctx.ps_sigwait);
1070 if (dolock)
1071 wakeup_one(&p->p_sigctx.ps_sigwait);
1072 else
1073 sched_wakeup(&p->p_sigctx.ps_sigwait);
1074 return;
1075 }
1076
1077 /*
1078 * Defer further processing for signals which are held,
1079 * except that stopped processes must be continued by SIGCONT.
1080 */
1081 if (action == SIG_HOLD &&
1082 ((prop & SA_CONT) == 0 || p->p_stat != SSTOP)) {
1083 ksiginfo_put(p, ksi);
1084 return;
1085 }
1086 /* XXXSMP: works, but icky */
1087 if (dolock)
1088 SCHED_LOCK(s);
1089
1090 /* XXXUPSXXX LWPs might go to sleep without passing signal handling */
1091 if (p->p_nrlwps > 0 && (p->p_stat != SSTOP)
1092 && !((p->p_flag & P_SA) && (p->p_sa->sa_idle != NULL))) {
1093 /*
1094 * At least one LWP is running or on a run queue.
1095 * The signal will be noticed when one of them returns
1096 * to userspace.
1097 */
1098 signotify(p);
1099 /*
1100 * The signal will be noticed very soon.
1101 */
1102 goto out;
1103 } else {
1104 /* Process is sleeping or stopped */
1105 if (p->p_flag & P_SA) {
1106 struct lwp *l2 = p->p_sa->sa_vp;
1107 l = NULL;
1108 allsusp = 1;
1109
1110 if ((l2->l_stat == LSSLEEP) && (l2->l_flag & L_SINTR))
1111 l = l2;
1112 else if (l2->l_stat == LSSUSPENDED)
1113 suspended = l2;
1114 else if ((l2->l_stat != LSZOMB) &&
1115 (l2->l_stat != LSDEAD))
1116 allsusp = 0;
1117 } else {
1118 /*
1119 * Find out if any of the sleeps are interruptable,
1120 * and if all the live LWPs remaining are suspended.
1121 */
1122 allsusp = 1;
1123 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1124 if (l->l_stat == LSSLEEP &&
1125 l->l_flag & L_SINTR)
1126 break;
1127 if (l->l_stat == LSSUSPENDED)
1128 suspended = l;
1129 else if ((l->l_stat != LSZOMB) &&
1130 (l->l_stat != LSDEAD))
1131 allsusp = 0;
1132 }
1133 }
1134 if (p->p_stat == SACTIVE) {
1135
1136
1137 if (l != NULL && (p->p_flag & P_TRACED))
1138 goto run;
1139
1140 /*
1141 * If SIGCONT is default (or ignored) and process is
1142 * asleep, we are finished; the process should not
1143 * be awakened.
1144 */
1145 if ((prop & SA_CONT) && action == SIG_DFL) {
1146 sigdelset(&p->p_sigctx.ps_siglist, signum);
1147 goto done;
1148 }
1149
1150 /*
1151 * When a sleeping process receives a stop
1152 * signal, process immediately if possible.
1153 */
1154 if ((prop & SA_STOP) && action == SIG_DFL) {
1155 /*
1156 * If a child holding parent blocked,
1157 * stopping could cause deadlock.
1158 */
1159 if (p->p_flag & P_PPWAIT) {
1160 goto out;
1161 }
1162 sigdelset(&p->p_sigctx.ps_siglist, signum);
1163 p->p_xstat = signum;
1164 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) {
1165 /*
1166 * XXXSMP: recursive call; don't lock
1167 * the second time around.
1168 */
1169 child_psignal(p, 0);
1170 }
1171 proc_stop(p); /* XXXSMP: recurse? */
1172 goto done;
1173 }
1174
1175 if (l == NULL) {
1176 /*
1177 * Special case: SIGKILL of a process
1178 * which is entirely composed of
1179 * suspended LWPs should succeed. We
1180 * make this happen by unsuspending one of
1181 * them.
1182 */
1183 if (allsusp && (signum == SIGKILL))
1184 lwp_continue(suspended);
1185 goto done;
1186 }
1187 /*
1188 * All other (caught or default) signals
1189 * cause the process to run.
1190 */
1191 goto runfast;
1192 /*NOTREACHED*/
1193 } else if (p->p_stat == SSTOP) {
1194 /* Process is stopped */
1195 /*
1196 * If traced process is already stopped,
1197 * then no further action is necessary.
1198 */
1199 if (p->p_flag & P_TRACED)
1200 goto done;
1201
1202 /*
1203 * Kill signal always sets processes running,
1204 * if possible.
1205 */
1206 if (signum == SIGKILL) {
1207 l = proc_unstop(p);
1208 if (l)
1209 goto runfast;
1210 goto done;
1211 }
1212
1213 if (prop & SA_CONT) {
1214 /*
1215 * If SIGCONT is default (or ignored),
1216 * we continue the process but don't
1217 * leave the signal in ps_siglist, as
1218 * it has no further action. If
1219 * SIGCONT is held, we continue the
1220 * process and leave the signal in
1221 * ps_siglist. If the process catches
1222 * SIGCONT, let it handle the signal
1223 * itself. If it isn't waiting on an
1224 * event, then it goes back to run
1225 * state. Otherwise, process goes
1226 * back to sleep state.
1227 */
1228 if (action == SIG_DFL)
1229 sigdelset(&p->p_sigctx.ps_siglist,
1230 signum);
1231 l = proc_unstop(p);
1232 if (l && (action == SIG_CATCH))
1233 goto runfast;
1234 goto out;
1235 }
1236
1237 if (prop & SA_STOP) {
1238 /*
1239 * Already stopped, don't need to stop again.
1240 * (If we did the shell could get confused.)
1241 */
1242 sigdelset(&p->p_sigctx.ps_siglist, signum);
1243 goto done;
1244 }
1245
1246 /*
1247 * If a lwp is sleeping interruptibly, then
1248 * wake it up; it will run until the kernel
1249 * boundary, where it will stop in issignal(),
1250 * since p->p_stat is still SSTOP. When the
1251 * process is continued, it will be made
1252 * runnable and can look at the signal.
1253 */
1254 if (l)
1255 goto run;
1256 goto out;
1257 } else {
1258 /* Else what? */
1259 panic("psignal: Invalid process state %d.",
1260 p->p_stat);
1261 }
1262 }
1263 /*NOTREACHED*/
1264
1265 runfast:
1266 if (action == SIG_CATCH) {
1267 ksiginfo_put(p, ksi);
1268 action = SIG_HOLD;
1269 }
1270 /*
1271 * Raise priority to at least PUSER.
1272 */
1273 if (l->l_priority > PUSER)
1274 l->l_priority = PUSER;
1275 run:
1276 if (action == SIG_CATCH) {
1277 ksiginfo_put(p, ksi);
1278 action = SIG_HOLD;
1279 }
1280
1281 setrunnable(l); /* XXXSMP: recurse? */
1282 out:
1283 if (action == SIG_CATCH)
1284 ksiginfo_put(p, ksi);
1285 done:
1286 /* XXXSMP: works, but icky */
1287 if (dolock)
1288 SCHED_UNLOCK(s);
1289 }
1290
1291 void
1292 kpsendsig(struct lwp *l, const ksiginfo_t *ksi, const sigset_t *mask)
1293 {
1294 struct proc *p = l->l_proc;
1295 struct lwp *le, *li;
1296 siginfo_t *si;
1297 int f;
1298
1299 if (p->p_flag & P_SA) {
1300
1301 /* XXXUPSXXX What if not on sa_vp ? */
1302
1303 f = l->l_flag & L_SA;
1304 l->l_flag &= ~L_SA;
1305 si = pool_get(&siginfo_pool, PR_WAITOK);
1306 si->_info = *ksi;
1307 le = li = NULL;
1308 if (ksi->ksi_trap)
1309 le = l;
1310 else
1311 li = l;
1312
1313 sa_upcall(l, SA_UPCALL_SIGNAL | SA_UPCALL_DEFER, le, li,
1314 sizeof(siginfo_t), si);
1315 l->l_flag |= f;
1316 return;
1317 }
1318
1319 #ifdef __HAVE_SIGINFO
1320 (*p->p_emul->e_sendsig)(ksi, mask);
1321 #else
1322 (*p->p_emul->e_sendsig)(ksi->ksi_signo, mask, ksi->ksi_trap);
1323 #endif
1324 }
1325
1326 static __inline int firstsig(const sigset_t *);
1327
1328 static __inline int
1329 firstsig(const sigset_t *ss)
1330 {
1331 int sig;
1332
1333 sig = ffs(ss->__bits[0]);
1334 if (sig != 0)
1335 return (sig);
1336 #if NSIG > 33
1337 sig = ffs(ss->__bits[1]);
1338 if (sig != 0)
1339 return (sig + 32);
1340 #endif
1341 #if NSIG > 65
1342 sig = ffs(ss->__bits[2]);
1343 if (sig != 0)
1344 return (sig + 64);
1345 #endif
1346 #if NSIG > 97
1347 sig = ffs(ss->__bits[3]);
1348 if (sig != 0)
1349 return (sig + 96);
1350 #endif
1351 return (0);
1352 }
1353
1354 /*
1355 * If the current process has received a signal (should be caught or cause
1356 * termination, should interrupt current syscall), return the signal number.
1357 * Stop signals with default action are processed immediately, then cleared;
1358 * they aren't returned. This is checked after each entry to the system for
1359 * a syscall or trap (though this can usually be done without calling issignal
1360 * by checking the pending signal masks in the CURSIG macro.) The normal call
1361 * sequence is
1362 *
1363 * while (signum = CURSIG(curlwp))
1364 * postsig(signum);
1365 */
1366 int
1367 issignal(struct lwp *l)
1368 {
1369 struct proc *p = l->l_proc;
1370 int s = 0, signum, prop;
1371 int dolock = (l->l_flag & L_SINTR) == 0, locked = !dolock;
1372 sigset_t ss;
1373
1374 if (l->l_flag & L_SA) {
1375 struct sadata *sa = p->p_sa;
1376
1377 /* Bail out if we do not own the virtual processor */
1378 if (sa->sa_vp != l)
1379 return 0;
1380 }
1381
1382 if (p->p_stat == SSTOP) {
1383 /*
1384 * The process is stopped/stopping. Stop ourselves now that
1385 * we're on the kernel/userspace boundary.
1386 */
1387 if (dolock)
1388 SCHED_LOCK(s);
1389 l->l_stat = LSSTOP;
1390 p->p_nrlwps--;
1391 if (p->p_flag & P_TRACED)
1392 goto sigtraceswitch;
1393 else
1394 goto sigswitch;
1395 }
1396 for (;;) {
1397 sigpending1(p, &ss);
1398 if (p->p_flag & P_PPWAIT)
1399 sigminusset(&stopsigmask, &ss);
1400 signum = firstsig(&ss);
1401 if (signum == 0) { /* no signal to send */
1402 p->p_sigctx.ps_sigcheck = 0;
1403 if (locked && dolock)
1404 SCHED_LOCK(s);
1405 return (0);
1406 }
1407 /* take the signal! */
1408 sigdelset(&p->p_sigctx.ps_siglist, signum);
1409
1410 /*
1411 * We should see pending but ignored signals
1412 * only if P_TRACED was on when they were posted.
1413 */
1414 if (sigismember(&p->p_sigctx.ps_sigignore, signum) &&
1415 (p->p_flag & P_TRACED) == 0)
1416 continue;
1417
1418 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
1419 /*
1420 * If traced, always stop, and stay
1421 * stopped until released by the debugger.
1422 */
1423 p->p_xstat = signum;
1424 if ((p->p_flag & P_FSTRACE) == 0)
1425 child_psignal(p, dolock);
1426 if (dolock)
1427 SCHED_LOCK(s);
1428 proc_stop(p);
1429 sigtraceswitch:
1430 mi_switch(l, NULL);
1431 SCHED_ASSERT_UNLOCKED();
1432 if (dolock)
1433 splx(s);
1434 else
1435 dolock = 1;
1436
1437 /*
1438 * If we are no longer being traced, or the parent
1439 * didn't give us a signal, look for more signals.
1440 */
1441 if ((p->p_flag & P_TRACED) == 0 || p->p_xstat == 0)
1442 continue;
1443
1444 /*
1445 * If the new signal is being masked, look for other
1446 * signals.
1447 */
1448 signum = p->p_xstat;
1449 p->p_xstat = 0;
1450 /*
1451 * `p->p_sigctx.ps_siglist |= mask' is done
1452 * in setrunnable().
1453 */
1454 if (sigismember(&p->p_sigctx.ps_sigmask, signum))
1455 continue;
1456 /* take the signal! */
1457 sigdelset(&p->p_sigctx.ps_siglist, signum);
1458 }
1459
1460 prop = sigprop[signum];
1461
1462 /*
1463 * Decide whether the signal should be returned.
1464 * Return the signal's number, or fall through
1465 * to clear it from the pending mask.
1466 */
1467 switch ((long)SIGACTION(p, signum).sa_handler) {
1468
1469 case (long)SIG_DFL:
1470 /*
1471 * Don't take default actions on system processes.
1472 */
1473 if (p->p_pid <= 1) {
1474 #ifdef DIAGNOSTIC
1475 /*
1476 * Are you sure you want to ignore SIGSEGV
1477 * in init? XXX
1478 */
1479 printf("Process (pid %d) got signal %d\n",
1480 p->p_pid, signum);
1481 #endif
1482 break; /* == ignore */
1483 }
1484 /*
1485 * If there is a pending stop signal to process
1486 * with default action, stop here,
1487 * then clear the signal. However,
1488 * if process is member of an orphaned
1489 * process group, ignore tty stop signals.
1490 */
1491 if (prop & SA_STOP) {
1492 if (p->p_flag & P_TRACED ||
1493 (p->p_pgrp->pg_jobc == 0 &&
1494 prop & SA_TTYSTOP))
1495 break; /* == ignore */
1496 p->p_xstat = signum;
1497 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0)
1498 child_psignal(p, dolock);
1499 if (dolock)
1500 SCHED_LOCK(s);
1501 proc_stop(p);
1502 sigswitch:
1503 mi_switch(l, NULL);
1504 SCHED_ASSERT_UNLOCKED();
1505 if (dolock)
1506 splx(s);
1507 else
1508 dolock = 1;
1509 break;
1510 } else if (prop & SA_IGNORE) {
1511 /*
1512 * Except for SIGCONT, shouldn't get here.
1513 * Default action is to ignore; drop it.
1514 */
1515 break; /* == ignore */
1516 } else
1517 goto keep;
1518 /*NOTREACHED*/
1519
1520 case (long)SIG_IGN:
1521 /*
1522 * Masking above should prevent us ever trying
1523 * to take action on an ignored signal other
1524 * than SIGCONT, unless process is traced.
1525 */
1526 #ifdef DEBUG_ISSIGNAL
1527 if ((prop & SA_CONT) == 0 &&
1528 (p->p_flag & P_TRACED) == 0)
1529 printf("issignal\n");
1530 #endif
1531 break; /* == ignore */
1532
1533 default:
1534 /*
1535 * This signal has an action, let
1536 * postsig() process it.
1537 */
1538 goto keep;
1539 }
1540 }
1541 /* NOTREACHED */
1542
1543 keep:
1544 /* leave the signal for later */
1545 sigaddset(&p->p_sigctx.ps_siglist, signum);
1546 CHECKSIGS(p);
1547 if (locked && dolock)
1548 SCHED_LOCK(s);
1549 return (signum);
1550 }
1551
1552 /*
1553 * Put the argument process into the stopped state and notify the parent
1554 * via wakeup. Signals are handled elsewhere. The process must not be
1555 * on the run queue.
1556 */
1557 static void
1558 proc_stop(struct proc *p)
1559 {
1560 struct lwp *l;
1561
1562 SCHED_ASSERT_LOCKED();
1563
1564 /* XXX lock process LWP state */
1565 p->p_stat = SSTOP;
1566 p->p_flag &= ~P_WAITED;
1567
1568 /*
1569 * Put as many LWP's as possible in stopped state.
1570 * Sleeping ones will notice the stopped state as they try to
1571 * return to userspace.
1572 */
1573
1574 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1575 if ((l->l_stat == LSONPROC) && (l == curlwp)) {
1576 /* XXX SMP this assumes that a LWP that is LSONPROC
1577 * is curlwp and hence is about to be mi_switched
1578 * away; the only callers of proc_stop() are:
1579 * - psignal
1580 * - issignal()
1581 * For the former, proc_stop() is only called when
1582 * no processes are running, so we don't worry.
1583 * For the latter, proc_stop() is called right
1584 * before mi_switch().
1585 */
1586 l->l_stat = LSSTOP;
1587 p->p_nrlwps--;
1588 }
1589 else if ( (l->l_stat == LSSLEEP) && (l->l_flag & L_SINTR)) {
1590 setrunnable(l);
1591 }
1592
1593 /* !!!UPS!!! FIX ME */
1594 #if 0
1595 else if (l->l_stat == LSRUN) {
1596 /* Remove LWP from the run queue */
1597 remrunqueue(l);
1598 l->l_stat = LSSTOP;
1599 p->p_nrlwps--;
1600 } else if ((l->l_stat == LSSLEEP) ||
1601 (l->l_stat == LSSUSPENDED) ||
1602 (l->l_stat == LSZOMB) ||
1603 (l->l_stat == LSDEAD)) {
1604 /*
1605 * Don't do anything; let sleeping LWPs
1606 * discover the stopped state of the process
1607 * on their way out of the kernel; otherwise,
1608 * things like NFS threads that sleep with
1609 * locks will block the rest of the system
1610 * from getting any work done.
1611 *
1612 * Suspended/dead/zombie LWPs aren't going
1613 * anywhere, so we don't need to touch them.
1614 */
1615 }
1616 #ifdef DIAGNOSTIC
1617 else {
1618 panic("proc_stop: process %d lwp %d "
1619 "in unstoppable state %d.\n",
1620 p->p_pid, l->l_lid, l->l_stat);
1621 }
1622 #endif
1623 #endif
1624 }
1625 /* XXX unlock process LWP state */
1626
1627 sched_wakeup((caddr_t)p->p_pptr);
1628 }
1629
1630 /*
1631 * Given a process in state SSTOP, set the state back to SACTIVE and
1632 * move LSSTOP'd LWPs to LSSLEEP or make them runnable.
1633 *
1634 * If no LWPs ended up runnable (and therefore able to take a signal),
1635 * return a LWP that is sleeping interruptably. The caller can wake
1636 * that LWP up to take a signal.
1637 */
1638 struct lwp *
1639 proc_unstop(struct proc *p)
1640 {
1641 struct lwp *l, *lr = NULL;
1642 int cantake = 0;
1643
1644 SCHED_ASSERT_LOCKED();
1645
1646 /*
1647 * Our caller wants to be informed if there are only sleeping
1648 * and interruptable LWPs left after we have run so that it
1649 * can invoke setrunnable() if required - return one of the
1650 * interruptable LWPs if this is the case.
1651 */
1652
1653 p->p_stat = SACTIVE;
1654 if (p->p_flag & P_SA) {
1655 /*
1656 * Preferentially select the idle LWP as the interruptable
1657 * LWP to return if it exists.
1658 */
1659 lr = p->p_sa->sa_idle;
1660 if (lr != NULL)
1661 cantake = 1;
1662 }
1663 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1664 if (l->l_stat == LSRUN) {
1665 lr = NULL;
1666 cantake = 1;
1667 }
1668 if (l->l_stat != LSSTOP)
1669 continue;
1670
1671 if (l->l_wchan != NULL) {
1672 l->l_stat = LSSLEEP;
1673 if ((cantake == 0) && (l->l_flag & L_SINTR)) {
1674 lr = l;
1675 cantake = 1;
1676 }
1677 } else {
1678 setrunnable(l);
1679 lr = NULL;
1680 cantake = 1;
1681 }
1682 }
1683
1684 return lr;
1685 }
1686
1687 /*
1688 * Take the action for the specified signal
1689 * from the current set of pending signals.
1690 */
1691 void
1692 postsig(int signum)
1693 {
1694 struct lwp *l;
1695 struct proc *p;
1696 struct sigacts *ps;
1697 sig_t action;
1698 sigset_t *returnmask;
1699
1700 l = curlwp;
1701 p = l->l_proc;
1702 ps = p->p_sigacts;
1703 #ifdef DIAGNOSTIC
1704 if (signum == 0)
1705 panic("postsig");
1706 #endif
1707
1708 KERNEL_PROC_LOCK(l);
1709
1710 sigdelset(&p->p_sigctx.ps_siglist, signum);
1711 action = SIGACTION_PS(ps, signum).sa_handler;
1712 if (action == SIG_DFL) {
1713 #ifdef KTRACE
1714 if (KTRPOINT(p, KTR_PSIG))
1715 ktrpsig(p, signum, action,
1716 p->p_sigctx.ps_flags & SAS_OLDMASK ?
1717 &p->p_sigctx.ps_oldmask : &p->p_sigctx.ps_sigmask,
1718 NULL);
1719 #endif
1720 /*
1721 * Default action, where the default is to kill
1722 * the process. (Other cases were ignored above.)
1723 */
1724 sigexit(l, signum);
1725 /* NOTREACHED */
1726 } else {
1727 ksiginfo_t *ksi;
1728 /*
1729 * If we get here, the signal must be caught.
1730 */
1731 #ifdef DIAGNOSTIC
1732 if (action == SIG_IGN ||
1733 sigismember(&p->p_sigctx.ps_sigmask, signum))
1734 panic("postsig action");
1735 #endif
1736 /*
1737 * Set the new mask value and also defer further
1738 * occurrences of this signal.
1739 *
1740 * Special case: user has done a sigpause. Here the
1741 * current mask is not of interest, but rather the
1742 * mask from before the sigpause is what we want
1743 * restored after the signal processing is completed.
1744 */
1745 if (p->p_sigctx.ps_flags & SAS_OLDMASK) {
1746 returnmask = &p->p_sigctx.ps_oldmask;
1747 p->p_sigctx.ps_flags &= ~SAS_OLDMASK;
1748 } else
1749 returnmask = &p->p_sigctx.ps_sigmask;
1750 p->p_stats->p_ru.ru_nsignals++;
1751 ksi = ksiginfo_get(p, signum);
1752 #ifdef KTRACE
1753 if (KTRPOINT(p, KTR_PSIG))
1754 ktrpsig(p, signum, action,
1755 p->p_sigctx.ps_flags & SAS_OLDMASK ?
1756 &p->p_sigctx.ps_oldmask : &p->p_sigctx.ps_sigmask,
1757 ksi);
1758 #endif
1759 if (ksi == NULL) {
1760 ksiginfo_t ksi1;
1761 /*
1762 * we did not save any siginfo for this, either
1763 * because the signal was not caught, or because the
1764 * user did not request SA_SIGINFO
1765 */
1766 (void)memset(&ksi1, 0, sizeof(ksi1));
1767 ksi1.ksi_signo = signum;
1768 kpsendsig(l, &ksi1, returnmask);
1769 } else {
1770 kpsendsig(l, ksi, returnmask);
1771 pool_put(&ksiginfo_pool, ksi);
1772 }
1773 p->p_sigctx.ps_lwp = 0;
1774 p->p_sigctx.ps_code = 0;
1775 p->p_sigctx.ps_signo = 0;
1776 (void) splsched(); /* XXXSMP */
1777 sigplusset(&SIGACTION_PS(ps, signum).sa_mask,
1778 &p->p_sigctx.ps_sigmask);
1779 if (SIGACTION_PS(ps, signum).sa_flags & SA_RESETHAND) {
1780 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
1781 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
1782 sigaddset(&p->p_sigctx.ps_sigignore, signum);
1783 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
1784 }
1785 (void) spl0(); /* XXXSMP */
1786 }
1787
1788 KERNEL_PROC_UNLOCK(l);
1789 }
1790
1791 /*
1792 * Kill the current process for stated reason.
1793 */
1794 void
1795 killproc(struct proc *p, const char *why)
1796 {
1797 log(LOG_ERR, "pid %d was killed: %s\n", p->p_pid, why);
1798 uprintf("sorry, pid %d was killed: %s\n", p->p_pid, why);
1799 psignal(p, SIGKILL);
1800 }
1801
1802 /*
1803 * Force the current process to exit with the specified signal, dumping core
1804 * if appropriate. We bypass the normal tests for masked and caught signals,
1805 * allowing unrecoverable failures to terminate the process without changing
1806 * signal state. Mark the accounting record with the signal termination.
1807 * If dumping core, save the signal number for the debugger. Calls exit and
1808 * does not return.
1809 */
1810
1811 #if defined(DEBUG)
1812 int kern_logsigexit = 1; /* not static to make public for sysctl */
1813 #else
1814 int kern_logsigexit = 0; /* not static to make public for sysctl */
1815 #endif
1816
1817 static const char logcoredump[] =
1818 "pid %d (%s), uid %d: exited on signal %d (core dumped)\n";
1819 static const char lognocoredump[] =
1820 "pid %d (%s), uid %d: exited on signal %d (core not dumped, err = %d)\n";
1821
1822 /* Wrapper function for use in p_userret */
1823 static void
1824 lwp_coredump_hook(struct lwp *l, void *arg)
1825 {
1826 int s;
1827
1828 /*
1829 * Suspend ourselves, so that the kernel stack and therefore
1830 * the userland registers saved in the trapframe are around
1831 * for coredump() to write them out.
1832 */
1833 KERNEL_PROC_LOCK(l);
1834 l->l_flag &= ~L_DETACHED;
1835 SCHED_LOCK(s);
1836 l->l_stat = LSSUSPENDED;
1837 l->l_proc->p_nrlwps--;
1838 /* XXX NJWLWP check if this makes sense here: */
1839 l->l_proc->p_stats->p_ru.ru_nvcsw++;
1840 mi_switch(l, NULL);
1841 SCHED_ASSERT_UNLOCKED();
1842 splx(s);
1843
1844 lwp_exit(l);
1845 }
1846
1847 void
1848 sigexit(struct lwp *l, int signum)
1849 {
1850 struct proc *p;
1851 #if 0
1852 struct lwp *l2;
1853 #endif
1854 int error, exitsig;
1855
1856 p = l->l_proc;
1857
1858 /*
1859 * Don't permit coredump() or exit1() multiple times
1860 * in the same process.
1861 */
1862 if (p->p_flag & P_WEXIT) {
1863 KERNEL_PROC_UNLOCK(l);
1864 (*p->p_userret)(l, p->p_userret_arg);
1865 }
1866 p->p_flag |= P_WEXIT;
1867 /* We don't want to switch away from exiting. */
1868 /* XXX multiprocessor: stop LWPs on other processors. */
1869 #if 0
1870 if (p->p_flag & P_SA) {
1871 LIST_FOREACH(l2, &p->p_lwps, l_sibling)
1872 l2->l_flag &= ~L_SA;
1873 p->p_flag &= ~P_SA;
1874 }
1875 #endif
1876
1877 /* Make other LWPs stick around long enough to be dumped */
1878 p->p_userret = lwp_coredump_hook;
1879 p->p_userret_arg = NULL;
1880
1881 exitsig = signum;
1882 p->p_acflag |= AXSIG;
1883 if (sigprop[signum] & SA_CORE) {
1884 p->p_sigctx.ps_signo = signum;
1885 if ((error = coredump(l)) == 0)
1886 exitsig |= WCOREFLAG;
1887
1888 if (kern_logsigexit) {
1889 /* XXX What if we ever have really large UIDs? */
1890 int uid = p->p_cred && p->p_ucred ?
1891 (int) p->p_ucred->cr_uid : -1;
1892
1893 if (error)
1894 log(LOG_INFO, lognocoredump, p->p_pid,
1895 p->p_comm, uid, signum, error);
1896 else
1897 log(LOG_INFO, logcoredump, p->p_pid,
1898 p->p_comm, uid, signum);
1899 }
1900
1901 }
1902
1903 exit1(l, W_EXITCODE(0, exitsig));
1904 /* NOTREACHED */
1905 }
1906
1907 /*
1908 * Dump core, into a file named "progname.core" or "core" (depending on the
1909 * value of shortcorename), unless the process was setuid/setgid.
1910 */
1911 int
1912 coredump(struct lwp *l)
1913 {
1914 struct vnode *vp;
1915 struct proc *p;
1916 struct vmspace *vm;
1917 struct ucred *cred;
1918 struct nameidata nd;
1919 struct vattr vattr;
1920 int error, error1;
1921 char name[MAXPATHLEN];
1922
1923 p = l->l_proc;
1924 vm = p->p_vmspace;
1925 cred = p->p_cred->pc_ucred;
1926
1927 /*
1928 * Make sure the process has not set-id, to prevent data leaks.
1929 */
1930 if (p->p_flag & P_SUGID)
1931 return (EPERM);
1932
1933 /*
1934 * Refuse to core if the data + stack + user size is larger than
1935 * the core dump limit. XXX THIS IS WRONG, because of mapped
1936 * data.
1937 */
1938 if (USPACE + ctob(vm->vm_dsize + vm->vm_ssize) >=
1939 p->p_rlimit[RLIMIT_CORE].rlim_cur)
1940 return (EFBIG); /* better error code? */
1941
1942 /*
1943 * The core dump will go in the current working directory. Make
1944 * sure that the directory is still there and that the mount flags
1945 * allow us to write core dumps there.
1946 */
1947 vp = p->p_cwdi->cwdi_cdir;
1948 if (vp->v_mount == NULL ||
1949 (vp->v_mount->mnt_flag & MNT_NOCOREDUMP) != 0)
1950 return (EPERM);
1951
1952 error = build_corename(p, name);
1953 if (error)
1954 return error;
1955
1956 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, p);
1957 error = vn_open(&nd, O_CREAT | O_NOFOLLOW | FWRITE, S_IRUSR | S_IWUSR);
1958 if (error)
1959 return (error);
1960 vp = nd.ni_vp;
1961
1962 /* Don't dump to non-regular files or files with links. */
1963 if (vp->v_type != VREG ||
1964 VOP_GETATTR(vp, &vattr, cred, p) || vattr.va_nlink != 1) {
1965 error = EINVAL;
1966 goto out;
1967 }
1968 VATTR_NULL(&vattr);
1969 vattr.va_size = 0;
1970 VOP_LEASE(vp, p, cred, LEASE_WRITE);
1971 VOP_SETATTR(vp, &vattr, cred, p);
1972 p->p_acflag |= ACORE;
1973
1974 /* Now dump the actual core file. */
1975 error = (*p->p_execsw->es_coredump)(l, vp, cred);
1976 out:
1977 VOP_UNLOCK(vp, 0);
1978 error1 = vn_close(vp, FWRITE, cred, p);
1979 if (error == 0)
1980 error = error1;
1981 return (error);
1982 }
1983
1984 /*
1985 * Nonexistent system call-- signal process (may want to handle it).
1986 * Flag error in case process won't see signal immediately (blocked or ignored).
1987 */
1988 /* ARGSUSED */
1989 int
1990 sys_nosys(struct lwp *l, void *v, register_t *retval)
1991 {
1992 struct proc *p;
1993
1994 p = l->l_proc;
1995 psignal(p, SIGSYS);
1996 return (ENOSYS);
1997 }
1998
1999 static int
2000 build_corename(struct proc *p, char dst[MAXPATHLEN])
2001 {
2002 const char *s;
2003 char *d, *end;
2004 int i;
2005
2006 for (s = p->p_limit->pl_corename, d = dst, end = d + MAXPATHLEN;
2007 *s != '\0'; s++) {
2008 if (*s == '%') {
2009 switch (*(s + 1)) {
2010 case 'n':
2011 i = snprintf(d, end - d, "%s", p->p_comm);
2012 break;
2013 case 'p':
2014 i = snprintf(d, end - d, "%d", p->p_pid);
2015 break;
2016 case 'u':
2017 i = snprintf(d, end - d, "%.*s",
2018 (int)sizeof p->p_pgrp->pg_session->s_login,
2019 p->p_pgrp->pg_session->s_login);
2020 break;
2021 case 't':
2022 i = snprintf(d, end - d, "%ld",
2023 p->p_stats->p_start.tv_sec);
2024 break;
2025 default:
2026 goto copy;
2027 }
2028 d += i;
2029 s++;
2030 } else {
2031 copy: *d = *s;
2032 d++;
2033 }
2034 if (d >= end)
2035 return (ENAMETOOLONG);
2036 }
2037 *d = '\0';
2038 return 0;
2039 }
2040
2041 void
2042 getucontext(struct lwp *l, ucontext_t *ucp)
2043 {
2044 struct proc *p;
2045
2046 p = l->l_proc;
2047
2048 ucp->uc_flags = 0;
2049 ucp->uc_link = l->l_ctxlink;
2050
2051 (void)sigprocmask1(p, 0, NULL, &ucp->uc_sigmask);
2052 ucp->uc_flags |= _UC_SIGMASK;
2053
2054 /*
2055 * The (unsupplied) definition of the `current execution stack'
2056 * in the System V Interface Definition appears to allow returning
2057 * the main context stack.
2058 */
2059 if ((p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK) == 0) {
2060 ucp->uc_stack.ss_sp = (void *)USRSTACK;
2061 ucp->uc_stack.ss_size = ctob(p->p_vmspace->vm_ssize);
2062 ucp->uc_stack.ss_flags = 0; /* XXX, def. is Very Fishy */
2063 } else {
2064 /* Simply copy alternate signal execution stack. */
2065 ucp->uc_stack = p->p_sigctx.ps_sigstk;
2066 }
2067 ucp->uc_flags |= _UC_STACK;
2068
2069 cpu_getmcontext(l, &ucp->uc_mcontext, &ucp->uc_flags);
2070 }
2071
2072 /* ARGSUSED */
2073 int
2074 sys_getcontext(struct lwp *l, void *v, register_t *retval)
2075 {
2076 struct sys_getcontext_args /* {
2077 syscallarg(struct __ucontext *) ucp;
2078 } */ *uap = v;
2079 ucontext_t uc;
2080
2081 getucontext(l, &uc);
2082
2083 return (copyout(&uc, SCARG(uap, ucp), sizeof (*SCARG(uap, ucp))));
2084 }
2085
2086 int
2087 setucontext(struct lwp *l, const ucontext_t *ucp)
2088 {
2089 struct proc *p;
2090 int error;
2091
2092 p = l->l_proc;
2093 if ((error = cpu_setmcontext(l, &ucp->uc_mcontext, ucp->uc_flags)) != 0)
2094 return (error);
2095 l->l_ctxlink = ucp->uc_link;
2096 /*
2097 * We might want to take care of the stack portion here but currently
2098 * don't; see the comment in getucontext().
2099 */
2100 if ((ucp->uc_flags & _UC_SIGMASK) != 0)
2101 sigprocmask1(p, SIG_SETMASK, &ucp->uc_sigmask, NULL);
2102
2103 return 0;
2104 }
2105
2106 /* ARGSUSED */
2107 int
2108 sys_setcontext(struct lwp *l, void *v, register_t *retval)
2109 {
2110 struct sys_setcontext_args /* {
2111 syscallarg(const ucontext_t *) ucp;
2112 } */ *uap = v;
2113 ucontext_t uc;
2114 int error;
2115
2116 if (SCARG(uap, ucp) == NULL) /* i.e. end of uc_link chain */
2117 exit1(l, W_EXITCODE(0, 0));
2118 else if ((error = copyin(SCARG(uap, ucp), &uc, sizeof (uc))) != 0 ||
2119 (error = setucontext(l, &uc)) != 0)
2120 return (error);
2121
2122 return (EJUSTRETURN);
2123 }
2124
2125 /*
2126 * sigtimedwait(2) system call, used also for implementation
2127 * of sigwaitinfo() and sigwait().
2128 *
2129 * This only handles single LWP in signal wait. libpthread provides
2130 * it's own sigtimedwait() wrapper to DTRT WRT individual threads.
2131 *
2132 * XXX no support for queued signals, si_code is always SI_USER.
2133 */
2134 int
2135 sys___sigtimedwait(struct lwp *l, void *v, register_t *retval)
2136 {
2137 struct sys___sigtimedwait_args /* {
2138 syscallarg(const sigset_t *) set;
2139 syscallarg(siginfo_t *) info;
2140 syscallarg(struct timespec *) timeout;
2141 } */ *uap = v;
2142 sigset_t waitset, twaitset;
2143 struct proc *p = l->l_proc;
2144 int error, signum, s;
2145 int timo = 0;
2146 struct timeval tvstart;
2147 struct timespec ts;
2148
2149 if ((error = copyin(SCARG(uap, set), &waitset, sizeof(waitset))))
2150 return (error);
2151
2152 /*
2153 * Silently ignore SA_CANTMASK signals. psignal1() would
2154 * ignore SA_CANTMASK signals in waitset, we do this
2155 * only for the below siglist check.
2156 */
2157 sigminusset(&sigcantmask, &waitset);
2158
2159 /*
2160 * First scan siglist and check if there is signal from
2161 * our waitset already pending.
2162 */
2163 twaitset = waitset;
2164 __sigandset(&p->p_sigctx.ps_siglist, &twaitset);
2165 if ((signum = firstsig(&twaitset))) {
2166 /* found pending signal */
2167 sigdelset(&p->p_sigctx.ps_siglist, signum);
2168 goto sig;
2169 }
2170
2171 /*
2172 * Calculate timeout, if it was specified.
2173 */
2174 if (SCARG(uap, timeout)) {
2175 uint64_t ms;
2176
2177 if ((error = copyin(SCARG(uap, timeout), &ts, sizeof(ts))))
2178 return (error);
2179
2180 ms = (ts.tv_sec * 1000) + (ts.tv_nsec / 1000000);
2181 timo = mstohz(ms);
2182 if (timo == 0 && ts.tv_sec == 0 && ts.tv_nsec > 0)
2183 timo = 1;
2184 if (timo <= 0)
2185 return (EAGAIN);
2186
2187 /*
2188 * Remember current mono_time, it would be used in
2189 * ECANCELED/ERESTART case.
2190 */
2191 s = splclock();
2192 tvstart = mono_time;
2193 splx(s);
2194 }
2195
2196 /*
2197 * Setup ps_sigwait list.
2198 */
2199 p->p_sigctx.ps_sigwaited = -1;
2200 p->p_sigctx.ps_sigwait = waitset;
2201
2202 /*
2203 * Wait for signal to arrive. We can either be woken up or
2204 * time out.
2205 */
2206 error = tsleep(&p->p_sigctx.ps_sigwait, PPAUSE|PCATCH, "sigwait", timo);
2207
2208 /*
2209 * Check if a signal from our wait set has arrived, or if it
2210 * was mere wakeup.
2211 */
2212 if (!error) {
2213 if ((signum = p->p_sigctx.ps_sigwaited) <= 0) {
2214 /* wakeup via _lwp_wakeup() */
2215 error = ECANCELED;
2216 }
2217 }
2218
2219 /*
2220 * On error, clear sigwait indication. psignal1() sets it
2221 * in !error case.
2222 */
2223 if (error) {
2224 p->p_sigctx.ps_sigwaited = 0;
2225
2226 /*
2227 * If the sleep was interrupted (either by signal or wakeup),
2228 * update the timeout and copyout new value back.
2229 * It would be used when the syscall would be restarted
2230 * or called again.
2231 */
2232 if (timo && (error == ERESTART || error == ECANCELED)) {
2233 struct timeval tvnow, tvtimo;
2234 int err;
2235
2236 s = splclock();
2237 tvnow = mono_time;
2238 splx(s);
2239
2240 TIMESPEC_TO_TIMEVAL(&tvtimo, &ts);
2241
2242 /* compute how much time has passed since start */
2243 timersub(&tvnow, &tvstart, &tvnow);
2244 /* substract passed time from timeout */
2245 timersub(&tvtimo, &tvnow, &tvtimo);
2246
2247 if (tvtimo.tv_sec < 0)
2248 return (EAGAIN);
2249
2250 TIMEVAL_TO_TIMESPEC(&tvtimo, &ts);
2251
2252 /* copy updated timeout to userland */
2253 if ((err = copyout(&ts, SCARG(uap, timeout), sizeof(ts))))
2254 return (err);
2255 }
2256
2257 return (error);
2258 }
2259
2260 /*
2261 * If a signal from the wait set arrived, copy it to userland.
2262 * XXX no queued signals for now
2263 */
2264 if (signum > 0) {
2265 siginfo_t si;
2266
2267 sig:
2268 memset(&si, 0, sizeof(si));
2269 si.si_signo = signum;
2270 si.si_code = SI_USER;
2271
2272 error = copyout(&si, SCARG(uap, info), sizeof(si));
2273 if (error)
2274 return (error);
2275 }
2276
2277 return (0);
2278 }
2279
2280 /*
2281 * Returns true if signal is ignored or masked for passed process.
2282 */
2283 int
2284 sigismasked(struct proc *p, int sig)
2285 {
2286
2287 return (sigismember(&p->p_sigctx.ps_sigignore, sig) ||
2288 sigismember(&p->p_sigctx.ps_sigmask, sig));
2289 }
2290
2291 static int
2292 filt_sigattach(struct knote *kn)
2293 {
2294 struct proc *p = curproc;
2295
2296 kn->kn_ptr.p_proc = p;
2297 kn->kn_flags |= EV_CLEAR; /* automatically set */
2298
2299 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
2300
2301 return (0);
2302 }
2303
2304 static void
2305 filt_sigdetach(struct knote *kn)
2306 {
2307 struct proc *p = kn->kn_ptr.p_proc;
2308
2309 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
2310 }
2311
2312 /*
2313 * signal knotes are shared with proc knotes, so we apply a mask to
2314 * the hint in order to differentiate them from process hints. This
2315 * could be avoided by using a signal-specific knote list, but probably
2316 * isn't worth the trouble.
2317 */
2318 static int
2319 filt_signal(struct knote *kn, long hint)
2320 {
2321
2322 if (hint & NOTE_SIGNAL) {
2323 hint &= ~NOTE_SIGNAL;
2324
2325 if (kn->kn_id == hint)
2326 kn->kn_data++;
2327 }
2328 return (kn->kn_data != 0);
2329 }
2330
2331 const struct filterops sig_filtops = {
2332 0, filt_sigattach, filt_sigdetach, filt_signal
2333 };
2334