kern_sig.c revision 1.167 1 /* $NetBSD: kern_sig.c,v 1.167 2003/10/12 14:32:05 pk Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)kern_sig.c 8.14 (Berkeley) 5/14/95
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.167 2003/10/12 14:32:05 pk Exp $");
41
42 #include "opt_ktrace.h"
43 #include "opt_compat_sunos.h"
44 #include "opt_compat_netbsd.h"
45 #include "opt_compat_netbsd32.h"
46
47 #define SIGPROP /* include signal properties table */
48 #include <sys/param.h>
49 #include <sys/signalvar.h>
50 #include <sys/resourcevar.h>
51 #include <sys/namei.h>
52 #include <sys/vnode.h>
53 #include <sys/proc.h>
54 #include <sys/systm.h>
55 #include <sys/timeb.h>
56 #include <sys/times.h>
57 #include <sys/buf.h>
58 #include <sys/acct.h>
59 #include <sys/file.h>
60 #include <sys/kernel.h>
61 #include <sys/wait.h>
62 #include <sys/ktrace.h>
63 #include <sys/syslog.h>
64 #include <sys/stat.h>
65 #include <sys/core.h>
66 #include <sys/filedesc.h>
67 #include <sys/malloc.h>
68 #include <sys/pool.h>
69 #include <sys/ucontext.h>
70 #include <sys/sa.h>
71 #include <sys/savar.h>
72 #include <sys/exec.h>
73
74 #include <sys/mount.h>
75 #include <sys/syscallargs.h>
76
77 #include <machine/cpu.h>
78
79 #include <sys/user.h> /* for coredump */
80
81 #include <uvm/uvm_extern.h>
82
83 static void child_psignal(struct proc *, int);
84 static void proc_stop(struct proc *);
85 static int build_corename(struct proc *, char [MAXPATHLEN]);
86 static void ksiginfo_exithook(struct proc *, void *);
87 static void ksiginfo_put(struct proc *, const ksiginfo_t *);
88 static ksiginfo_t *ksiginfo_get(struct proc *, int);
89 static void kpsignal2(struct proc *, const ksiginfo_t *, int);
90
91 sigset_t contsigmask, stopsigmask, sigcantmask;
92
93 struct pool sigacts_pool; /* memory pool for sigacts structures */
94 struct pool siginfo_pool; /* memory pool for siginfo structures */
95 struct pool ksiginfo_pool; /* memory pool for ksiginfo structures */
96
97 /*
98 * Can process p, with pcred pc, send the signal signum to process q?
99 */
100 #define CANSIGNAL(p, pc, q, signum) \
101 ((pc)->pc_ucred->cr_uid == 0 || \
102 (pc)->p_ruid == (q)->p_cred->p_ruid || \
103 (pc)->pc_ucred->cr_uid == (q)->p_cred->p_ruid || \
104 (pc)->p_ruid == (q)->p_ucred->cr_uid || \
105 (pc)->pc_ucred->cr_uid == (q)->p_ucred->cr_uid || \
106 ((signum) == SIGCONT && (q)->p_session == (p)->p_session))
107
108 /*
109 * Remove and return the first ksiginfo element that matches our requested
110 * signal, or return NULL if one not found.
111 */
112 static ksiginfo_t *
113 ksiginfo_get(struct proc *p, int signo)
114 {
115 ksiginfo_t *ksi;
116
117 simple_lock(&p->p_sigctx.ps_silock);
118 CIRCLEQ_FOREACH(ksi, &p->p_sigctx.ps_siginfo, ksi_list) {
119 if (ksi->ksi_signo == signo) {
120 CIRCLEQ_REMOVE(&p->p_sigctx.ps_siginfo, ksi, ksi_list);
121 simple_unlock(&p->p_sigctx.ps_silock);
122 return ksi;
123 }
124 }
125 simple_unlock(&p->p_sigctx.ps_silock);
126 return NULL;
127 }
128
129 /*
130 * Append a new ksiginfo element to the list of pending ksiginfo's, if
131 * we need to (SA_SIGINFO was requested). We replace non RT signals if
132 * they already existed in the queue and we add new entries for RT signals,
133 * or for non RT signals with non-existing entries.
134 */
135 static void
136 ksiginfo_put(struct proc *p, const ksiginfo_t *ksi)
137 {
138 ksiginfo_t *kp;
139 struct sigaction *sa = &SIGACTION_PS(p->p_sigacts, ksi->ksi_signo);
140 int s;
141
142 if ((sa->sa_flags & SA_SIGINFO) == 0)
143 return;
144
145 s = splsoftclock();
146 simple_lock(&p->p_sigctx.ps_silock);
147 #ifdef notyet /* XXX: QUEUING */
148 if (ksi->ksi_signo < SIGRTMIN)
149 #endif
150 {
151 CIRCLEQ_FOREACH(kp, &p->p_sigctx.ps_siginfo, ksi_list) {
152 if (kp->ksi_signo == ksi->ksi_signo) {
153 CIRCLEQ_ENTRY(ksiginfo) sv;
154 (void)memcpy(&sv, &kp->ksi_list, sizeof(sv));
155 *kp = *ksi;
156 (void)memcpy(&kp->ksi_list, &sv, sizeof(sv));
157 goto out;
158 }
159 }
160 }
161 kp = pool_get(&ksiginfo_pool, PR_NOWAIT);
162 if (kp == NULL) {
163 #ifdef DIAGNOSTIC
164 printf("Out of memory allocating siginfo for pid %d\n",
165 p->p_pid);
166 #endif
167 goto out;
168 }
169 *kp = *ksi;
170 CIRCLEQ_INSERT_TAIL(&p->p_sigctx.ps_siginfo, kp, ksi_list);
171 out:
172 simple_unlock(&p->p_sigctx.ps_silock);
173 splx(s);
174 }
175
176 /*
177 * free all pending ksiginfo on exit
178 */
179 static void
180 ksiginfo_exithook(struct proc *p, void *v)
181 {
182
183 simple_lock(&p->p_sigctx.ps_silock);
184 while (!CIRCLEQ_EMPTY(&p->p_sigctx.ps_siginfo)) {
185 ksiginfo_t *ksi = CIRCLEQ_FIRST(&p->p_sigctx.ps_siginfo);
186 CIRCLEQ_REMOVE(&p->p_sigctx.ps_siginfo, ksi, ksi_list);
187 pool_put(&ksiginfo_pool, ksi);
188 }
189 simple_unlock(&p->p_sigctx.ps_silock);
190 }
191
192 /*
193 * Initialize signal-related data structures.
194 */
195 void
196 signal_init(void)
197 {
198 pool_init(&sigacts_pool, sizeof(struct sigacts), 0, 0, 0, "sigapl",
199 &pool_allocator_nointr);
200 pool_init(&siginfo_pool, sizeof(siginfo_t), 0, 0, 0, "siginfo",
201 &pool_allocator_nointr);
202 pool_init(&ksiginfo_pool, sizeof(ksiginfo_t), 0, 0, 0, "ksiginfo",
203 NULL);
204 exithook_establish(ksiginfo_exithook, NULL);
205 exechook_establish(ksiginfo_exithook, NULL);
206 }
207
208 /*
209 * Create an initial sigctx structure, using the same signal state
210 * as p. If 'share' is set, share the sigctx_proc part, otherwise just
211 * copy it from parent.
212 */
213 void
214 sigactsinit(struct proc *np, struct proc *pp, int share)
215 {
216 struct sigacts *ps;
217
218 if (share) {
219 np->p_sigacts = pp->p_sigacts;
220 pp->p_sigacts->sa_refcnt++;
221 } else {
222 ps = pool_get(&sigacts_pool, PR_WAITOK);
223 if (pp)
224 memcpy(ps, pp->p_sigacts, sizeof(struct sigacts));
225 else
226 memset(ps, '\0', sizeof(struct sigacts));
227 ps->sa_refcnt = 1;
228 np->p_sigacts = ps;
229 }
230 }
231
232 /*
233 * Make this process not share its sigctx, maintaining all
234 * signal state.
235 */
236 void
237 sigactsunshare(struct proc *p)
238 {
239 struct sigacts *oldps;
240
241 if (p->p_sigacts->sa_refcnt == 1)
242 return;
243
244 oldps = p->p_sigacts;
245 sigactsinit(p, NULL, 0);
246
247 if (--oldps->sa_refcnt == 0)
248 pool_put(&sigacts_pool, oldps);
249 }
250
251 /*
252 * Release a sigctx structure.
253 */
254 void
255 sigactsfree(struct proc *p)
256 {
257 struct sigacts *ps;
258
259 ps = p->p_sigacts;
260 if (--ps->sa_refcnt > 0)
261 return;
262
263 pool_put(&sigacts_pool, ps);
264 }
265
266 int
267 sigaction1(struct proc *p, int signum, const struct sigaction *nsa,
268 struct sigaction *osa, const void *tramp, int vers)
269 {
270 struct sigacts *ps;
271 int prop;
272
273 ps = p->p_sigacts;
274 if (signum <= 0 || signum >= NSIG)
275 return (EINVAL);
276
277 /*
278 * Trampoline ABI version 0 is reserved for the legacy
279 * kernel-provided on-stack trampoline. Conversely, if we are
280 * using a non-0 ABI version, we must have a trampoline. Only
281 * validate the vers if a new sigaction was supplied. Emulations
282 * use legacy kernel trampolines with version 0, alternatively
283 * check for that too.
284 */
285 if ((vers != 0 && tramp == NULL) ||
286 #ifdef SIGTRAMP_VALID
287 (nsa != NULL &&
288 ((vers == 0) ?
289 (p->p_emul->e_sigcode == NULL) :
290 !SIGTRAMP_VALID(vers))) ||
291 #endif
292 (vers == 0 && tramp != NULL))
293 return (EINVAL);
294
295 if (osa)
296 *osa = SIGACTION_PS(ps, signum);
297
298 if (nsa) {
299 if (nsa->sa_flags & ~SA_ALLBITS)
300 return (EINVAL);
301
302 #ifndef __HAVE_SIGINFO
303 if (nsa->sa_flags & SA_SIGINFO)
304 return (EINVAL);
305 #endif
306
307 prop = sigprop[signum];
308 if (prop & SA_CANTMASK)
309 return (EINVAL);
310
311 (void) splsched(); /* XXXSMP */
312 SIGACTION_PS(ps, signum) = *nsa;
313 ps->sa_sigdesc[signum].sd_tramp = tramp;
314 ps->sa_sigdesc[signum].sd_vers = vers;
315 sigminusset(&sigcantmask, &SIGACTION_PS(ps, signum).sa_mask);
316 if ((prop & SA_NORESET) != 0)
317 SIGACTION_PS(ps, signum).sa_flags &= ~SA_RESETHAND;
318 if (signum == SIGCHLD) {
319 if (nsa->sa_flags & SA_NOCLDSTOP)
320 p->p_flag |= P_NOCLDSTOP;
321 else
322 p->p_flag &= ~P_NOCLDSTOP;
323 if (nsa->sa_flags & SA_NOCLDWAIT) {
324 /*
325 * Paranoia: since SA_NOCLDWAIT is implemented
326 * by reparenting the dying child to PID 1 (and
327 * trust it to reap the zombie), PID 1 itself
328 * is forbidden to set SA_NOCLDWAIT.
329 */
330 if (p->p_pid == 1)
331 p->p_flag &= ~P_NOCLDWAIT;
332 else
333 p->p_flag |= P_NOCLDWAIT;
334 } else
335 p->p_flag &= ~P_NOCLDWAIT;
336 }
337 if ((nsa->sa_flags & SA_NODEFER) == 0)
338 sigaddset(&SIGACTION_PS(ps, signum).sa_mask, signum);
339 else
340 sigdelset(&SIGACTION_PS(ps, signum).sa_mask, signum);
341 /*
342 * Set bit in p_sigctx.ps_sigignore for signals that are set to
343 * SIG_IGN, and for signals set to SIG_DFL where the default is
344 * to ignore. However, don't put SIGCONT in
345 * p_sigctx.ps_sigignore, as we have to restart the process.
346 */
347 if (nsa->sa_handler == SIG_IGN ||
348 (nsa->sa_handler == SIG_DFL && (prop & SA_IGNORE) != 0)) {
349 /* never to be seen again */
350 sigdelset(&p->p_sigctx.ps_siglist, signum);
351 if (signum != SIGCONT) {
352 /* easier in psignal */
353 sigaddset(&p->p_sigctx.ps_sigignore, signum);
354 }
355 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
356 } else {
357 sigdelset(&p->p_sigctx.ps_sigignore, signum);
358 if (nsa->sa_handler == SIG_DFL)
359 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
360 else
361 sigaddset(&p->p_sigctx.ps_sigcatch, signum);
362 }
363 (void) spl0();
364 }
365
366 return (0);
367 }
368
369 #ifdef COMPAT_16
370 /* ARGSUSED */
371 int
372 compat_16_sys___sigaction14(struct lwp *l, void *v, register_t *retval)
373 {
374 struct compat_16_sys___sigaction14_args /* {
375 syscallarg(int) signum;
376 syscallarg(const struct sigaction *) nsa;
377 syscallarg(struct sigaction *) osa;
378 } */ *uap = v;
379 struct proc *p;
380 struct sigaction nsa, osa;
381 int error;
382
383 if (SCARG(uap, nsa)) {
384 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
385 if (error)
386 return (error);
387 }
388 p = l->l_proc;
389 error = sigaction1(p, SCARG(uap, signum),
390 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0,
391 NULL, 0);
392 if (error)
393 return (error);
394 if (SCARG(uap, osa)) {
395 error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
396 if (error)
397 return (error);
398 }
399 return (0);
400 }
401 #endif
402
403 /* ARGSUSED */
404 int
405 sys___sigaction_sigtramp(struct lwp *l, void *v, register_t *retval)
406 {
407 struct sys___sigaction_sigtramp_args /* {
408 syscallarg(int) signum;
409 syscallarg(const struct sigaction *) nsa;
410 syscallarg(struct sigaction *) osa;
411 syscallarg(void *) tramp;
412 syscallarg(int) vers;
413 } */ *uap = v;
414 struct proc *p = l->l_proc;
415 struct sigaction nsa, osa;
416 int error;
417
418 if (SCARG(uap, nsa)) {
419 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
420 if (error)
421 return (error);
422 }
423 error = sigaction1(p, SCARG(uap, signum),
424 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0,
425 SCARG(uap, tramp), SCARG(uap, vers));
426 if (error)
427 return (error);
428 if (SCARG(uap, osa)) {
429 error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
430 if (error)
431 return (error);
432 }
433 return (0);
434 }
435
436 /*
437 * Initialize signal state for process 0;
438 * set to ignore signals that are ignored by default and disable the signal
439 * stack.
440 */
441 void
442 siginit(struct proc *p)
443 {
444 struct sigacts *ps;
445 int signum, prop;
446
447 ps = p->p_sigacts;
448 sigemptyset(&contsigmask);
449 sigemptyset(&stopsigmask);
450 sigemptyset(&sigcantmask);
451 for (signum = 1; signum < NSIG; signum++) {
452 prop = sigprop[signum];
453 if (prop & SA_CONT)
454 sigaddset(&contsigmask, signum);
455 if (prop & SA_STOP)
456 sigaddset(&stopsigmask, signum);
457 if (prop & SA_CANTMASK)
458 sigaddset(&sigcantmask, signum);
459 if (prop & SA_IGNORE && signum != SIGCONT)
460 sigaddset(&p->p_sigctx.ps_sigignore, signum);
461 sigemptyset(&SIGACTION_PS(ps, signum).sa_mask);
462 SIGACTION_PS(ps, signum).sa_flags = SA_RESTART;
463 }
464 sigemptyset(&p->p_sigctx.ps_sigcatch);
465 p->p_sigctx.ps_sigwaited = 0;
466 p->p_flag &= ~P_NOCLDSTOP;
467
468 /*
469 * Reset stack state to the user stack.
470 */
471 p->p_sigctx.ps_sigstk.ss_flags = SS_DISABLE;
472 p->p_sigctx.ps_sigstk.ss_size = 0;
473 p->p_sigctx.ps_sigstk.ss_sp = 0;
474
475 /* One reference. */
476 ps->sa_refcnt = 1;
477 }
478
479 /*
480 * Reset signals for an exec of the specified process.
481 */
482 void
483 execsigs(struct proc *p)
484 {
485 struct sigacts *ps;
486 int signum, prop;
487
488 sigactsunshare(p);
489
490 ps = p->p_sigacts;
491
492 /*
493 * Reset caught signals. Held signals remain held
494 * through p_sigctx.ps_sigmask (unless they were caught,
495 * and are now ignored by default).
496 */
497 for (signum = 1; signum < NSIG; signum++) {
498 if (sigismember(&p->p_sigctx.ps_sigcatch, signum)) {
499 prop = sigprop[signum];
500 if (prop & SA_IGNORE) {
501 if ((prop & SA_CONT) == 0)
502 sigaddset(&p->p_sigctx.ps_sigignore,
503 signum);
504 sigdelset(&p->p_sigctx.ps_siglist, signum);
505 }
506 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
507 }
508 sigemptyset(&SIGACTION_PS(ps, signum).sa_mask);
509 SIGACTION_PS(ps, signum).sa_flags = SA_RESTART;
510 }
511 sigemptyset(&p->p_sigctx.ps_sigcatch);
512 p->p_sigctx.ps_sigwaited = 0;
513 p->p_flag &= ~P_NOCLDSTOP;
514
515 /*
516 * Reset stack state to the user stack.
517 */
518 p->p_sigctx.ps_sigstk.ss_flags = SS_DISABLE;
519 p->p_sigctx.ps_sigstk.ss_size = 0;
520 p->p_sigctx.ps_sigstk.ss_sp = 0;
521 }
522
523 int
524 sigprocmask1(struct proc *p, int how, const sigset_t *nss, sigset_t *oss)
525 {
526
527 if (oss)
528 *oss = p->p_sigctx.ps_sigmask;
529
530 if (nss) {
531 (void)splsched(); /* XXXSMP */
532 switch (how) {
533 case SIG_BLOCK:
534 sigplusset(nss, &p->p_sigctx.ps_sigmask);
535 break;
536 case SIG_UNBLOCK:
537 sigminusset(nss, &p->p_sigctx.ps_sigmask);
538 CHECKSIGS(p);
539 break;
540 case SIG_SETMASK:
541 p->p_sigctx.ps_sigmask = *nss;
542 CHECKSIGS(p);
543 break;
544 default:
545 (void)spl0(); /* XXXSMP */
546 return (EINVAL);
547 }
548 sigminusset(&sigcantmask, &p->p_sigctx.ps_sigmask);
549 (void)spl0(); /* XXXSMP */
550 }
551
552 return (0);
553 }
554
555 /*
556 * Manipulate signal mask.
557 * Note that we receive new mask, not pointer,
558 * and return old mask as return value;
559 * the library stub does the rest.
560 */
561 int
562 sys___sigprocmask14(struct lwp *l, void *v, register_t *retval)
563 {
564 struct sys___sigprocmask14_args /* {
565 syscallarg(int) how;
566 syscallarg(const sigset_t *) set;
567 syscallarg(sigset_t *) oset;
568 } */ *uap = v;
569 struct proc *p;
570 sigset_t nss, oss;
571 int error;
572
573 if (SCARG(uap, set)) {
574 error = copyin(SCARG(uap, set), &nss, sizeof(nss));
575 if (error)
576 return (error);
577 }
578 p = l->l_proc;
579 error = sigprocmask1(p, SCARG(uap, how),
580 SCARG(uap, set) ? &nss : 0, SCARG(uap, oset) ? &oss : 0);
581 if (error)
582 return (error);
583 if (SCARG(uap, oset)) {
584 error = copyout(&oss, SCARG(uap, oset), sizeof(oss));
585 if (error)
586 return (error);
587 }
588 return (0);
589 }
590
591 void
592 sigpending1(struct proc *p, sigset_t *ss)
593 {
594
595 *ss = p->p_sigctx.ps_siglist;
596 sigminusset(&p->p_sigctx.ps_sigmask, ss);
597 }
598
599 /* ARGSUSED */
600 int
601 sys___sigpending14(struct lwp *l, void *v, register_t *retval)
602 {
603 struct sys___sigpending14_args /* {
604 syscallarg(sigset_t *) set;
605 } */ *uap = v;
606 struct proc *p;
607 sigset_t ss;
608
609 p = l->l_proc;
610 sigpending1(p, &ss);
611 return (copyout(&ss, SCARG(uap, set), sizeof(ss)));
612 }
613
614 int
615 sigsuspend1(struct proc *p, const sigset_t *ss)
616 {
617 struct sigacts *ps;
618
619 ps = p->p_sigacts;
620 if (ss) {
621 /*
622 * When returning from sigpause, we want
623 * the old mask to be restored after the
624 * signal handler has finished. Thus, we
625 * save it here and mark the sigctx structure
626 * to indicate this.
627 */
628 p->p_sigctx.ps_oldmask = p->p_sigctx.ps_sigmask;
629 p->p_sigctx.ps_flags |= SAS_OLDMASK;
630 (void) splsched(); /* XXXSMP */
631 p->p_sigctx.ps_sigmask = *ss;
632 CHECKSIGS(p);
633 sigminusset(&sigcantmask, &p->p_sigctx.ps_sigmask);
634 (void) spl0(); /* XXXSMP */
635 }
636
637 while (tsleep((caddr_t) ps, PPAUSE|PCATCH, "pause", 0) == 0)
638 /* void */;
639
640 /* always return EINTR rather than ERESTART... */
641 return (EINTR);
642 }
643
644 /*
645 * Suspend process until signal, providing mask to be set
646 * in the meantime. Note nonstandard calling convention:
647 * libc stub passes mask, not pointer, to save a copyin.
648 */
649 /* ARGSUSED */
650 int
651 sys___sigsuspend14(struct lwp *l, void *v, register_t *retval)
652 {
653 struct sys___sigsuspend14_args /* {
654 syscallarg(const sigset_t *) set;
655 } */ *uap = v;
656 struct proc *p;
657 sigset_t ss;
658 int error;
659
660 if (SCARG(uap, set)) {
661 error = copyin(SCARG(uap, set), &ss, sizeof(ss));
662 if (error)
663 return (error);
664 }
665
666 p = l->l_proc;
667 return (sigsuspend1(p, SCARG(uap, set) ? &ss : 0));
668 }
669
670 int
671 sigaltstack1(struct proc *p, const struct sigaltstack *nss,
672 struct sigaltstack *oss)
673 {
674
675 if (oss)
676 *oss = p->p_sigctx.ps_sigstk;
677
678 if (nss) {
679 if (nss->ss_flags & ~SS_ALLBITS)
680 return (EINVAL);
681
682 if (nss->ss_flags & SS_DISABLE) {
683 if (p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK)
684 return (EINVAL);
685 } else {
686 if (nss->ss_size < MINSIGSTKSZ)
687 return (ENOMEM);
688 }
689 p->p_sigctx.ps_sigstk = *nss;
690 }
691
692 return (0);
693 }
694
695 /* ARGSUSED */
696 int
697 sys___sigaltstack14(struct lwp *l, void *v, register_t *retval)
698 {
699 struct sys___sigaltstack14_args /* {
700 syscallarg(const struct sigaltstack *) nss;
701 syscallarg(struct sigaltstack *) oss;
702 } */ *uap = v;
703 struct proc *p;
704 struct sigaltstack nss, oss;
705 int error;
706
707 if (SCARG(uap, nss)) {
708 error = copyin(SCARG(uap, nss), &nss, sizeof(nss));
709 if (error)
710 return (error);
711 }
712 p = l->l_proc;
713 error = sigaltstack1(p,
714 SCARG(uap, nss) ? &nss : 0, SCARG(uap, oss) ? &oss : 0);
715 if (error)
716 return (error);
717 if (SCARG(uap, oss)) {
718 error = copyout(&oss, SCARG(uap, oss), sizeof(oss));
719 if (error)
720 return (error);
721 }
722 return (0);
723 }
724
725 /* ARGSUSED */
726 int
727 sys_kill(struct lwp *l, void *v, register_t *retval)
728 {
729 struct sys_kill_args /* {
730 syscallarg(int) pid;
731 syscallarg(int) signum;
732 } */ *uap = v;
733 struct proc *cp, *p;
734 struct pcred *pc;
735 ksiginfo_t ksi;
736
737 cp = l->l_proc;
738 pc = cp->p_cred;
739 if ((u_int)SCARG(uap, signum) >= NSIG)
740 return (EINVAL);
741 memset(&ksi, 0, sizeof(ksi));
742 ksi.ksi_signo = SCARG(uap, signum);
743 ksi.ksi_code = SI_USER;
744 ksi.ksi_pid = cp->p_pid;
745 ksi.ksi_uid = cp->p_ucred->cr_uid;
746 if (SCARG(uap, pid) > 0) {
747 /* kill single process */
748 if ((p = pfind(SCARG(uap, pid))) == NULL)
749 return (ESRCH);
750 if (!CANSIGNAL(cp, pc, p, SCARG(uap, signum)))
751 return (EPERM);
752 if (SCARG(uap, signum))
753 kpsignal2(p, &ksi, 1);
754 return (0);
755 }
756 switch (SCARG(uap, pid)) {
757 case -1: /* broadcast signal */
758 return (killpg1(cp, &ksi, 0, 1));
759 case 0: /* signal own process group */
760 return (killpg1(cp, &ksi, 0, 0));
761 default: /* negative explicit process group */
762 return (killpg1(cp, &ksi, -SCARG(uap, pid), 0));
763 }
764 /* NOTREACHED */
765 }
766
767 /*
768 * Common code for kill process group/broadcast kill.
769 * cp is calling process.
770 */
771 int
772 killpg1(struct proc *cp, ksiginfo_t *ksi, int pgid, int all)
773 {
774 struct proc *p;
775 struct pcred *pc;
776 struct pgrp *pgrp;
777 int nfound;
778 int signum = ksi->ksi_signo;
779
780 pc = cp->p_cred;
781 nfound = 0;
782 if (all) {
783 /*
784 * broadcast
785 */
786 proclist_lock_read();
787 LIST_FOREACH(p, &allproc, p_list) {
788 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
789 p == cp || !CANSIGNAL(cp, pc, p, signum))
790 continue;
791 nfound++;
792 if (signum)
793 kpsignal2(p, ksi, 1);
794 }
795 proclist_unlock_read();
796 } else {
797 if (pgid == 0)
798 /*
799 * zero pgid means send to my process group.
800 */
801 pgrp = cp->p_pgrp;
802 else {
803 pgrp = pgfind(pgid);
804 if (pgrp == NULL)
805 return (ESRCH);
806 }
807 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
808 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
809 !CANSIGNAL(cp, pc, p, signum))
810 continue;
811 nfound++;
812 if (signum && P_ZOMBIE(p) == 0)
813 kpsignal2(p, ksi, 1);
814 }
815 }
816 return (nfound ? 0 : ESRCH);
817 }
818
819 /*
820 * Send a signal to a process group.
821 */
822 void
823 gsignal(int pgid, int signum)
824 {
825 ksiginfo_t ksi;
826 memset(&ksi, 0, sizeof(ksi));
827 ksi.ksi_signo = signum;
828 kgsignal(pgid, &ksi, NULL);
829 }
830
831 void
832 kgsignal(int pgid, ksiginfo_t *ksi, void *data)
833 {
834 struct pgrp *pgrp;
835
836 if (pgid && (pgrp = pgfind(pgid)))
837 kpgsignal(pgrp, ksi, data, 0);
838 }
839
840 /*
841 * Send a signal to a process group. If checktty is 1,
842 * limit to members which have a controlling terminal.
843 */
844 void
845 pgsignal(struct pgrp *pgrp, int sig, int checkctty)
846 {
847 ksiginfo_t ksi;
848 memset(&ksi, 0, sizeof(ksi));
849 ksi.ksi_signo = sig;
850 kpgsignal(pgrp, &ksi, NULL, checkctty);
851 }
852
853 void
854 kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty)
855 {
856 struct proc *p;
857
858 if (pgrp)
859 LIST_FOREACH(p, &pgrp->pg_members, p_pglist)
860 if (checkctty == 0 || p->p_flag & P_CONTROLT)
861 kpsignal(p, ksi, data);
862 }
863
864 /*
865 * Send a signal caused by a trap to the current process.
866 * If it will be caught immediately, deliver it with correct code.
867 * Otherwise, post it normally.
868 */
869 #ifndef __HAVE_SIGINFO
870 void _trapsignal(struct lwp *, const ksiginfo_t *);
871 void
872 trapsignal(struct lwp *l, int signum, u_long code)
873 {
874 #define trapsignal _trapsignal
875 ksiginfo_t ksi;
876
877 KSI_INIT_TRAP(&ksi);
878 ksi.ksi_signo = signum;
879 ksi.ksi_trap = (int)code;
880 trapsignal(l, &ksi);
881 }
882 #endif
883
884 void
885 trapsignal(struct lwp *l, const ksiginfo_t *ksi)
886 {
887 struct proc *p;
888 struct sigacts *ps;
889 int signum = ksi->ksi_signo;
890
891 KASSERT(KSI_TRAP_P(ksi));
892
893 p = l->l_proc;
894 ps = p->p_sigacts;
895 if ((p->p_flag & P_TRACED) == 0 &&
896 sigismember(&p->p_sigctx.ps_sigcatch, signum) &&
897 !sigismember(&p->p_sigctx.ps_sigmask, signum)) {
898 p->p_stats->p_ru.ru_nsignals++;
899 #ifdef KTRACE
900 if (KTRPOINT(p, KTR_PSIG))
901 ktrpsig(p, signum, SIGACTION_PS(ps, signum).sa_handler,
902 &p->p_sigctx.ps_sigmask, ksi);
903 #endif
904 kpsendsig(l, ksi, &p->p_sigctx.ps_sigmask);
905 (void) splsched(); /* XXXSMP */
906 sigplusset(&SIGACTION_PS(ps, signum).sa_mask,
907 &p->p_sigctx.ps_sigmask);
908 if (SIGACTION_PS(ps, signum).sa_flags & SA_RESETHAND) {
909 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
910 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
911 sigaddset(&p->p_sigctx.ps_sigignore, signum);
912 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
913 }
914 (void) spl0(); /* XXXSMP */
915 } else {
916 p->p_sigctx.ps_lwp = l->l_lid;
917 /* XXX for core dump/debugger */
918 p->p_sigctx.ps_signo = ksi->ksi_signo;
919 p->p_sigctx.ps_code = ksi->ksi_trap;
920 kpsignal2(p, ksi, 1);
921 }
922 }
923
924 /*
925 * Fill in signal information and signal the parent for a child status change.
926 */
927 static void
928 child_psignal(struct proc *p, int dolock)
929 {
930 ksiginfo_t ksi;
931
932 (void)memset(&ksi, 0, sizeof(ksi));
933 ksi.ksi_signo = SIGCHLD;
934 ksi.ksi_code = p->p_xstat == SIGCONT ? CLD_CONTINUED : CLD_STOPPED;
935 ksi.ksi_pid = p->p_pid;
936 ksi.ksi_uid = p->p_ucred->cr_uid;
937 ksi.ksi_status = p->p_xstat;
938 ksi.ksi_utime = p->p_stats->p_ru.ru_utime.tv_sec;
939 ksi.ksi_stime = p->p_stats->p_ru.ru_stime.tv_sec;
940 kpsignal2(p->p_pptr, &ksi, dolock);
941 }
942
943 /*
944 * Send the signal to the process. If the signal has an action, the action
945 * is usually performed by the target process rather than the caller; we add
946 * the signal to the set of pending signals for the process.
947 *
948 * Exceptions:
949 * o When a stop signal is sent to a sleeping process that takes the
950 * default action, the process is stopped without awakening it.
951 * o SIGCONT restarts stopped processes (or puts them back to sleep)
952 * regardless of the signal action (eg, blocked or ignored).
953 *
954 * Other ignored signals are discarded immediately.
955 *
956 * XXXSMP: Invoked as psignal() or sched_psignal().
957 */
958 void
959 psignal1(struct proc *p, int signum, int dolock)
960 {
961 ksiginfo_t ksi;
962
963 memset(&ksi, 0, sizeof(ksi));
964 ksi.ksi_signo = signum;
965 kpsignal2(p, &ksi, dolock);
966 }
967
968 void
969 kpsignal1(struct proc *p, ksiginfo_t *ksi, void *data, int dolock)
970 {
971
972 if ((p->p_flag & P_WEXIT) == 0 && data) {
973 size_t fd;
974 struct filedesc *fdp = p->p_fd;
975
976 ksi->ksi_fd = -1;
977 for (fd = 0; fd < fdp->fd_nfiles; fd++) {
978 struct file *fp = fdp->fd_ofiles[fd];
979 /* XXX: lock? */
980 if (fp && fp->f_data == data) {
981 ksi->ksi_fd = fd;
982 break;
983 }
984 }
985 }
986 kpsignal2(p, ksi, dolock);
987 }
988
989 static void
990 kpsignal2(struct proc *p, const ksiginfo_t *ksi, int dolock)
991 {
992 struct lwp *l, *suspended;
993 int s = 0, prop, allsusp;
994 sig_t action;
995 int signum = ksi->ksi_signo;
996
997 #ifdef DIAGNOSTIC
998 if (signum <= 0 || signum >= NSIG)
999 panic("psignal signal number %d", signum);
1000
1001 /* XXXSMP: works, but icky */
1002 if (dolock)
1003 SCHED_ASSERT_UNLOCKED();
1004 else
1005 SCHED_ASSERT_LOCKED();
1006 #endif
1007
1008
1009 /*
1010 * Notify any interested parties in the signal.
1011 */
1012 KNOTE(&p->p_klist, NOTE_SIGNAL | signum);
1013
1014 prop = sigprop[signum];
1015
1016 /*
1017 * If proc is traced, always give parent a chance.
1018 */
1019 if (p->p_flag & P_TRACED)
1020 action = SIG_DFL;
1021 else {
1022 /*
1023 * If the signal is being ignored,
1024 * then we forget about it immediately.
1025 * (Note: we don't set SIGCONT in p_sigctx.ps_sigignore,
1026 * and if it is set to SIG_IGN,
1027 * action will be SIG_DFL here.)
1028 */
1029 if (sigismember(&p->p_sigctx.ps_sigignore, signum))
1030 return;
1031 if (sigismember(&p->p_sigctx.ps_sigmask, signum))
1032 action = SIG_HOLD;
1033 else if (sigismember(&p->p_sigctx.ps_sigcatch, signum))
1034 action = SIG_CATCH;
1035 else {
1036 action = SIG_DFL;
1037
1038 if (prop & SA_KILL && p->p_nice > NZERO)
1039 p->p_nice = NZERO;
1040
1041 /*
1042 * If sending a tty stop signal to a member of an
1043 * orphaned process group, discard the signal here if
1044 * the action is default; don't stop the process below
1045 * if sleeping, and don't clear any pending SIGCONT.
1046 */
1047 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0)
1048 return;
1049 }
1050 }
1051
1052 if (prop & SA_CONT)
1053 sigminusset(&stopsigmask, &p->p_sigctx.ps_siglist);
1054
1055 if (prop & SA_STOP)
1056 sigminusset(&contsigmask, &p->p_sigctx.ps_siglist);
1057
1058 sigaddset(&p->p_sigctx.ps_siglist, signum);
1059
1060 /* CHECKSIGS() is "inlined" here. */
1061 p->p_sigctx.ps_sigcheck = 1;
1062
1063 /*
1064 * If the signal doesn't have SA_CANTMASK (no override for SIGKILL,
1065 * please!), check if anything waits on it. If yes, clear the
1066 * pending signal from siglist set, save it to ps_sigwaited,
1067 * clear sigwait list, and wakeup any sigwaiters.
1068 * The signal won't be processed further here.
1069 */
1070 if ((prop & SA_CANTMASK) == 0
1071 && p->p_sigctx.ps_sigwaited < 0
1072 && sigismember(&p->p_sigctx.ps_sigwait, signum)
1073 && p->p_stat != SSTOP) {
1074 if (action == SIG_CATCH)
1075 ksiginfo_put(p, ksi);
1076 sigdelset(&p->p_sigctx.ps_siglist, signum);
1077 p->p_sigctx.ps_sigwaited = signum;
1078 sigemptyset(&p->p_sigctx.ps_sigwait);
1079 if (dolock)
1080 wakeup_one(&p->p_sigctx.ps_sigwait);
1081 else
1082 sched_wakeup(&p->p_sigctx.ps_sigwait);
1083 return;
1084 }
1085
1086 /*
1087 * Defer further processing for signals which are held,
1088 * except that stopped processes must be continued by SIGCONT.
1089 */
1090 if (action == SIG_HOLD &&
1091 ((prop & SA_CONT) == 0 || p->p_stat != SSTOP)) {
1092 ksiginfo_put(p, ksi);
1093 return;
1094 }
1095 /* XXXSMP: works, but icky */
1096 if (dolock)
1097 SCHED_LOCK(s);
1098
1099 /* XXXUPSXXX LWPs might go to sleep without passing signal handling */
1100 if (p->p_nrlwps > 0 && (p->p_stat != SSTOP)
1101 && !((p->p_flag & P_SA) && (p->p_sa->sa_idle != NULL))) {
1102 /*
1103 * At least one LWP is running or on a run queue.
1104 * The signal will be noticed when one of them returns
1105 * to userspace.
1106 */
1107 signotify(p);
1108 /*
1109 * The signal will be noticed very soon.
1110 */
1111 goto out;
1112 } else {
1113 /* Process is sleeping or stopped */
1114 if (p->p_flag & P_SA) {
1115 struct lwp *l2 = p->p_sa->sa_vp;
1116 l = NULL;
1117 allsusp = 1;
1118
1119 if ((l2->l_stat == LSSLEEP) && (l2->l_flag & L_SINTR))
1120 l = l2;
1121 else if (l2->l_stat == LSSUSPENDED)
1122 suspended = l2;
1123 else if ((l2->l_stat != LSZOMB) &&
1124 (l2->l_stat != LSDEAD))
1125 allsusp = 0;
1126 } else {
1127 /*
1128 * Find out if any of the sleeps are interruptable,
1129 * and if all the live LWPs remaining are suspended.
1130 */
1131 allsusp = 1;
1132 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1133 if (l->l_stat == LSSLEEP &&
1134 l->l_flag & L_SINTR)
1135 break;
1136 if (l->l_stat == LSSUSPENDED)
1137 suspended = l;
1138 else if ((l->l_stat != LSZOMB) &&
1139 (l->l_stat != LSDEAD))
1140 allsusp = 0;
1141 }
1142 }
1143 if (p->p_stat == SACTIVE) {
1144
1145
1146 if (l != NULL && (p->p_flag & P_TRACED))
1147 goto run;
1148
1149 /*
1150 * If SIGCONT is default (or ignored) and process is
1151 * asleep, we are finished; the process should not
1152 * be awakened.
1153 */
1154 if ((prop & SA_CONT) && action == SIG_DFL) {
1155 sigdelset(&p->p_sigctx.ps_siglist, signum);
1156 goto done;
1157 }
1158
1159 /*
1160 * When a sleeping process receives a stop
1161 * signal, process immediately if possible.
1162 */
1163 if ((prop & SA_STOP) && action == SIG_DFL) {
1164 /*
1165 * If a child holding parent blocked,
1166 * stopping could cause deadlock.
1167 */
1168 if (p->p_flag & P_PPWAIT) {
1169 goto out;
1170 }
1171 sigdelset(&p->p_sigctx.ps_siglist, signum);
1172 p->p_xstat = signum;
1173 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) {
1174 /*
1175 * XXXSMP: recursive call; don't lock
1176 * the second time around.
1177 */
1178 child_psignal(p, 0);
1179 }
1180 proc_stop(p); /* XXXSMP: recurse? */
1181 goto done;
1182 }
1183
1184 if (l == NULL) {
1185 /*
1186 * Special case: SIGKILL of a process
1187 * which is entirely composed of
1188 * suspended LWPs should succeed. We
1189 * make this happen by unsuspending one of
1190 * them.
1191 */
1192 if (allsusp && (signum == SIGKILL))
1193 lwp_continue(suspended);
1194 goto done;
1195 }
1196 /*
1197 * All other (caught or default) signals
1198 * cause the process to run.
1199 */
1200 goto runfast;
1201 /*NOTREACHED*/
1202 } else if (p->p_stat == SSTOP) {
1203 /* Process is stopped */
1204 /*
1205 * If traced process is already stopped,
1206 * then no further action is necessary.
1207 */
1208 if (p->p_flag & P_TRACED)
1209 goto done;
1210
1211 /*
1212 * Kill signal always sets processes running,
1213 * if possible.
1214 */
1215 if (signum == SIGKILL) {
1216 l = proc_unstop(p);
1217 if (l)
1218 goto runfast;
1219 goto done;
1220 }
1221
1222 if (prop & SA_CONT) {
1223 /*
1224 * If SIGCONT is default (or ignored),
1225 * we continue the process but don't
1226 * leave the signal in ps_siglist, as
1227 * it has no further action. If
1228 * SIGCONT is held, we continue the
1229 * process and leave the signal in
1230 * ps_siglist. If the process catches
1231 * SIGCONT, let it handle the signal
1232 * itself. If it isn't waiting on an
1233 * event, then it goes back to run
1234 * state. Otherwise, process goes
1235 * back to sleep state.
1236 */
1237 if (action == SIG_DFL)
1238 sigdelset(&p->p_sigctx.ps_siglist,
1239 signum);
1240 l = proc_unstop(p);
1241 if (l && (action == SIG_CATCH))
1242 goto runfast;
1243 goto out;
1244 }
1245
1246 if (prop & SA_STOP) {
1247 /*
1248 * Already stopped, don't need to stop again.
1249 * (If we did the shell could get confused.)
1250 */
1251 sigdelset(&p->p_sigctx.ps_siglist, signum);
1252 goto done;
1253 }
1254
1255 /*
1256 * If a lwp is sleeping interruptibly, then
1257 * wake it up; it will run until the kernel
1258 * boundary, where it will stop in issignal(),
1259 * since p->p_stat is still SSTOP. When the
1260 * process is continued, it will be made
1261 * runnable and can look at the signal.
1262 */
1263 if (l)
1264 goto run;
1265 goto out;
1266 } else {
1267 /* Else what? */
1268 panic("psignal: Invalid process state %d.",
1269 p->p_stat);
1270 }
1271 }
1272 /*NOTREACHED*/
1273
1274 runfast:
1275 if (action == SIG_CATCH) {
1276 ksiginfo_put(p, ksi);
1277 action = SIG_HOLD;
1278 }
1279 /*
1280 * Raise priority to at least PUSER.
1281 */
1282 if (l->l_priority > PUSER)
1283 l->l_priority = PUSER;
1284 run:
1285 if (action == SIG_CATCH) {
1286 ksiginfo_put(p, ksi);
1287 action = SIG_HOLD;
1288 }
1289
1290 setrunnable(l); /* XXXSMP: recurse? */
1291 out:
1292 if (action == SIG_CATCH)
1293 ksiginfo_put(p, ksi);
1294 done:
1295 /* XXXSMP: works, but icky */
1296 if (dolock)
1297 SCHED_UNLOCK(s);
1298 }
1299
1300 void
1301 kpsendsig(struct lwp *l, const ksiginfo_t *ksi, const sigset_t *mask)
1302 {
1303 struct proc *p = l->l_proc;
1304 struct lwp *le, *li;
1305 siginfo_t *si;
1306 int f;
1307
1308 if (p->p_flag & P_SA) {
1309
1310 /* XXXUPSXXX What if not on sa_vp ? */
1311
1312 f = l->l_flag & L_SA;
1313 l->l_flag &= ~L_SA;
1314 si = pool_get(&siginfo_pool, PR_WAITOK);
1315 si->_info = ksi->ksi_info;
1316 le = li = NULL;
1317 if (KSI_TRAP_P(ksi))
1318 le = l;
1319 else
1320 li = l;
1321
1322 sa_upcall(l, SA_UPCALL_SIGNAL | SA_UPCALL_DEFER, le, li,
1323 sizeof(siginfo_t), si);
1324 l->l_flag |= f;
1325 return;
1326 }
1327
1328 #ifdef __HAVE_SIGINFO
1329 (*p->p_emul->e_sendsig)(ksi, mask);
1330 #else
1331 (*p->p_emul->e_sendsig)(ksi->ksi_signo, mask, KSI_TRAPCODE(ksi));
1332 #endif
1333 }
1334
1335 static __inline int firstsig(const sigset_t *);
1336
1337 static __inline int
1338 firstsig(const sigset_t *ss)
1339 {
1340 int sig;
1341
1342 sig = ffs(ss->__bits[0]);
1343 if (sig != 0)
1344 return (sig);
1345 #if NSIG > 33
1346 sig = ffs(ss->__bits[1]);
1347 if (sig != 0)
1348 return (sig + 32);
1349 #endif
1350 #if NSIG > 65
1351 sig = ffs(ss->__bits[2]);
1352 if (sig != 0)
1353 return (sig + 64);
1354 #endif
1355 #if NSIG > 97
1356 sig = ffs(ss->__bits[3]);
1357 if (sig != 0)
1358 return (sig + 96);
1359 #endif
1360 return (0);
1361 }
1362
1363 /*
1364 * If the current process has received a signal (should be caught or cause
1365 * termination, should interrupt current syscall), return the signal number.
1366 * Stop signals with default action are processed immediately, then cleared;
1367 * they aren't returned. This is checked after each entry to the system for
1368 * a syscall or trap (though this can usually be done without calling issignal
1369 * by checking the pending signal masks in the CURSIG macro.) The normal call
1370 * sequence is
1371 *
1372 * while (signum = CURSIG(curlwp))
1373 * postsig(signum);
1374 */
1375 int
1376 issignal(struct lwp *l)
1377 {
1378 struct proc *p = l->l_proc;
1379 int s = 0, signum, prop;
1380 int dolock = (l->l_flag & L_SINTR) == 0, locked = !dolock;
1381 sigset_t ss;
1382
1383 if (l->l_flag & L_SA) {
1384 struct sadata *sa = p->p_sa;
1385
1386 /* Bail out if we do not own the virtual processor */
1387 if (sa->sa_vp != l)
1388 return 0;
1389 }
1390
1391 if (p->p_stat == SSTOP) {
1392 /*
1393 * The process is stopped/stopping. Stop ourselves now that
1394 * we're on the kernel/userspace boundary.
1395 */
1396 if (dolock)
1397 SCHED_LOCK(s);
1398 l->l_stat = LSSTOP;
1399 p->p_nrlwps--;
1400 if (p->p_flag & P_TRACED)
1401 goto sigtraceswitch;
1402 else
1403 goto sigswitch;
1404 }
1405 for (;;) {
1406 sigpending1(p, &ss);
1407 if (p->p_flag & P_PPWAIT)
1408 sigminusset(&stopsigmask, &ss);
1409 signum = firstsig(&ss);
1410 if (signum == 0) { /* no signal to send */
1411 p->p_sigctx.ps_sigcheck = 0;
1412 if (locked && dolock)
1413 SCHED_LOCK(s);
1414 return (0);
1415 }
1416 /* take the signal! */
1417 sigdelset(&p->p_sigctx.ps_siglist, signum);
1418
1419 /*
1420 * We should see pending but ignored signals
1421 * only if P_TRACED was on when they were posted.
1422 */
1423 if (sigismember(&p->p_sigctx.ps_sigignore, signum) &&
1424 (p->p_flag & P_TRACED) == 0)
1425 continue;
1426
1427 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
1428 /*
1429 * If traced, always stop, and stay
1430 * stopped until released by the debugger.
1431 */
1432 p->p_xstat = signum;
1433 if ((p->p_flag & P_FSTRACE) == 0)
1434 child_psignal(p, dolock);
1435 if (dolock)
1436 SCHED_LOCK(s);
1437 proc_stop(p);
1438 sigtraceswitch:
1439 mi_switch(l, NULL);
1440 SCHED_ASSERT_UNLOCKED();
1441 if (dolock)
1442 splx(s);
1443 else
1444 dolock = 1;
1445
1446 /*
1447 * If we are no longer being traced, or the parent
1448 * didn't give us a signal, look for more signals.
1449 */
1450 if ((p->p_flag & P_TRACED) == 0 || p->p_xstat == 0)
1451 continue;
1452
1453 /*
1454 * If the new signal is being masked, look for other
1455 * signals.
1456 */
1457 signum = p->p_xstat;
1458 p->p_xstat = 0;
1459 /*
1460 * `p->p_sigctx.ps_siglist |= mask' is done
1461 * in setrunnable().
1462 */
1463 if (sigismember(&p->p_sigctx.ps_sigmask, signum))
1464 continue;
1465 /* take the signal! */
1466 sigdelset(&p->p_sigctx.ps_siglist, signum);
1467 }
1468
1469 prop = sigprop[signum];
1470
1471 /*
1472 * Decide whether the signal should be returned.
1473 * Return the signal's number, or fall through
1474 * to clear it from the pending mask.
1475 */
1476 switch ((long)SIGACTION(p, signum).sa_handler) {
1477
1478 case (long)SIG_DFL:
1479 /*
1480 * Don't take default actions on system processes.
1481 */
1482 if (p->p_pid <= 1) {
1483 #ifdef DIAGNOSTIC
1484 /*
1485 * Are you sure you want to ignore SIGSEGV
1486 * in init? XXX
1487 */
1488 printf("Process (pid %d) got signal %d\n",
1489 p->p_pid, signum);
1490 #endif
1491 break; /* == ignore */
1492 }
1493 /*
1494 * If there is a pending stop signal to process
1495 * with default action, stop here,
1496 * then clear the signal. However,
1497 * if process is member of an orphaned
1498 * process group, ignore tty stop signals.
1499 */
1500 if (prop & SA_STOP) {
1501 if (p->p_flag & P_TRACED ||
1502 (p->p_pgrp->pg_jobc == 0 &&
1503 prop & SA_TTYSTOP))
1504 break; /* == ignore */
1505 p->p_xstat = signum;
1506 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0)
1507 child_psignal(p, dolock);
1508 if (dolock)
1509 SCHED_LOCK(s);
1510 proc_stop(p);
1511 sigswitch:
1512 mi_switch(l, NULL);
1513 SCHED_ASSERT_UNLOCKED();
1514 if (dolock)
1515 splx(s);
1516 else
1517 dolock = 1;
1518 break;
1519 } else if (prop & SA_IGNORE) {
1520 /*
1521 * Except for SIGCONT, shouldn't get here.
1522 * Default action is to ignore; drop it.
1523 */
1524 break; /* == ignore */
1525 } else
1526 goto keep;
1527 /*NOTREACHED*/
1528
1529 case (long)SIG_IGN:
1530 /*
1531 * Masking above should prevent us ever trying
1532 * to take action on an ignored signal other
1533 * than SIGCONT, unless process is traced.
1534 */
1535 #ifdef DEBUG_ISSIGNAL
1536 if ((prop & SA_CONT) == 0 &&
1537 (p->p_flag & P_TRACED) == 0)
1538 printf("issignal\n");
1539 #endif
1540 break; /* == ignore */
1541
1542 default:
1543 /*
1544 * This signal has an action, let
1545 * postsig() process it.
1546 */
1547 goto keep;
1548 }
1549 }
1550 /* NOTREACHED */
1551
1552 keep:
1553 /* leave the signal for later */
1554 sigaddset(&p->p_sigctx.ps_siglist, signum);
1555 CHECKSIGS(p);
1556 if (locked && dolock)
1557 SCHED_LOCK(s);
1558 return (signum);
1559 }
1560
1561 /*
1562 * Put the argument process into the stopped state and notify the parent
1563 * via wakeup. Signals are handled elsewhere. The process must not be
1564 * on the run queue.
1565 */
1566 static void
1567 proc_stop(struct proc *p)
1568 {
1569 struct lwp *l;
1570
1571 SCHED_ASSERT_LOCKED();
1572
1573 /* XXX lock process LWP state */
1574 p->p_stat = SSTOP;
1575 p->p_flag &= ~P_WAITED;
1576
1577 /*
1578 * Put as many LWP's as possible in stopped state.
1579 * Sleeping ones will notice the stopped state as they try to
1580 * return to userspace.
1581 */
1582
1583 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1584 if ((l->l_stat == LSONPROC) && (l == curlwp)) {
1585 /* XXX SMP this assumes that a LWP that is LSONPROC
1586 * is curlwp and hence is about to be mi_switched
1587 * away; the only callers of proc_stop() are:
1588 * - psignal
1589 * - issignal()
1590 * For the former, proc_stop() is only called when
1591 * no processes are running, so we don't worry.
1592 * For the latter, proc_stop() is called right
1593 * before mi_switch().
1594 */
1595 l->l_stat = LSSTOP;
1596 p->p_nrlwps--;
1597 }
1598 else if ( (l->l_stat == LSSLEEP) && (l->l_flag & L_SINTR)) {
1599 setrunnable(l);
1600 }
1601
1602 /* !!!UPS!!! FIX ME */
1603 #if 0
1604 else if (l->l_stat == LSRUN) {
1605 /* Remove LWP from the run queue */
1606 remrunqueue(l);
1607 l->l_stat = LSSTOP;
1608 p->p_nrlwps--;
1609 } else if ((l->l_stat == LSSLEEP) ||
1610 (l->l_stat == LSSUSPENDED) ||
1611 (l->l_stat == LSZOMB) ||
1612 (l->l_stat == LSDEAD)) {
1613 /*
1614 * Don't do anything; let sleeping LWPs
1615 * discover the stopped state of the process
1616 * on their way out of the kernel; otherwise,
1617 * things like NFS threads that sleep with
1618 * locks will block the rest of the system
1619 * from getting any work done.
1620 *
1621 * Suspended/dead/zombie LWPs aren't going
1622 * anywhere, so we don't need to touch them.
1623 */
1624 }
1625 #ifdef DIAGNOSTIC
1626 else {
1627 panic("proc_stop: process %d lwp %d "
1628 "in unstoppable state %d.\n",
1629 p->p_pid, l->l_lid, l->l_stat);
1630 }
1631 #endif
1632 #endif
1633 }
1634 /* XXX unlock process LWP state */
1635
1636 sched_wakeup((caddr_t)p->p_pptr);
1637 }
1638
1639 /*
1640 * Given a process in state SSTOP, set the state back to SACTIVE and
1641 * move LSSTOP'd LWPs to LSSLEEP or make them runnable.
1642 *
1643 * If no LWPs ended up runnable (and therefore able to take a signal),
1644 * return a LWP that is sleeping interruptably. The caller can wake
1645 * that LWP up to take a signal.
1646 */
1647 struct lwp *
1648 proc_unstop(struct proc *p)
1649 {
1650 struct lwp *l, *lr = NULL;
1651 int cantake = 0;
1652
1653 SCHED_ASSERT_LOCKED();
1654
1655 /*
1656 * Our caller wants to be informed if there are only sleeping
1657 * and interruptable LWPs left after we have run so that it
1658 * can invoke setrunnable() if required - return one of the
1659 * interruptable LWPs if this is the case.
1660 */
1661
1662 p->p_stat = SACTIVE;
1663 if (p->p_flag & P_SA) {
1664 /*
1665 * Preferentially select the idle LWP as the interruptable
1666 * LWP to return if it exists.
1667 */
1668 lr = p->p_sa->sa_idle;
1669 if (lr != NULL)
1670 cantake = 1;
1671 }
1672 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1673 if (l->l_stat == LSRUN) {
1674 lr = NULL;
1675 cantake = 1;
1676 }
1677 if (l->l_stat != LSSTOP)
1678 continue;
1679
1680 if (l->l_wchan != NULL) {
1681 l->l_stat = LSSLEEP;
1682 if ((cantake == 0) && (l->l_flag & L_SINTR)) {
1683 lr = l;
1684 cantake = 1;
1685 }
1686 } else {
1687 setrunnable(l);
1688 lr = NULL;
1689 cantake = 1;
1690 }
1691 }
1692
1693 return lr;
1694 }
1695
1696 /*
1697 * Take the action for the specified signal
1698 * from the current set of pending signals.
1699 */
1700 void
1701 postsig(int signum)
1702 {
1703 struct lwp *l;
1704 struct proc *p;
1705 struct sigacts *ps;
1706 sig_t action;
1707 sigset_t *returnmask;
1708
1709 l = curlwp;
1710 p = l->l_proc;
1711 ps = p->p_sigacts;
1712 #ifdef DIAGNOSTIC
1713 if (signum == 0)
1714 panic("postsig");
1715 #endif
1716
1717 KERNEL_PROC_LOCK(l);
1718
1719 sigdelset(&p->p_sigctx.ps_siglist, signum);
1720 action = SIGACTION_PS(ps, signum).sa_handler;
1721 if (action == SIG_DFL) {
1722 #ifdef KTRACE
1723 if (KTRPOINT(p, KTR_PSIG))
1724 ktrpsig(p, signum, action,
1725 p->p_sigctx.ps_flags & SAS_OLDMASK ?
1726 &p->p_sigctx.ps_oldmask : &p->p_sigctx.ps_sigmask,
1727 NULL);
1728 #endif
1729 /*
1730 * Default action, where the default is to kill
1731 * the process. (Other cases were ignored above.)
1732 */
1733 sigexit(l, signum);
1734 /* NOTREACHED */
1735 } else {
1736 ksiginfo_t *ksi;
1737 /*
1738 * If we get here, the signal must be caught.
1739 */
1740 #ifdef DIAGNOSTIC
1741 if (action == SIG_IGN ||
1742 sigismember(&p->p_sigctx.ps_sigmask, signum))
1743 panic("postsig action");
1744 #endif
1745 /*
1746 * Set the new mask value and also defer further
1747 * occurrences of this signal.
1748 *
1749 * Special case: user has done a sigpause. Here the
1750 * current mask is not of interest, but rather the
1751 * mask from before the sigpause is what we want
1752 * restored after the signal processing is completed.
1753 */
1754 if (p->p_sigctx.ps_flags & SAS_OLDMASK) {
1755 returnmask = &p->p_sigctx.ps_oldmask;
1756 p->p_sigctx.ps_flags &= ~SAS_OLDMASK;
1757 } else
1758 returnmask = &p->p_sigctx.ps_sigmask;
1759 p->p_stats->p_ru.ru_nsignals++;
1760 ksi = ksiginfo_get(p, signum);
1761 #ifdef KTRACE
1762 if (KTRPOINT(p, KTR_PSIG))
1763 ktrpsig(p, signum, action,
1764 p->p_sigctx.ps_flags & SAS_OLDMASK ?
1765 &p->p_sigctx.ps_oldmask : &p->p_sigctx.ps_sigmask,
1766 ksi);
1767 #endif
1768 if (ksi == NULL) {
1769 ksiginfo_t ksi1;
1770 /*
1771 * we did not save any siginfo for this, either
1772 * because the signal was not caught, or because the
1773 * user did not request SA_SIGINFO
1774 */
1775 (void)memset(&ksi1, 0, sizeof(ksi1));
1776 ksi1.ksi_signo = signum;
1777 kpsendsig(l, &ksi1, returnmask);
1778 } else {
1779 kpsendsig(l, ksi, returnmask);
1780 pool_put(&ksiginfo_pool, ksi);
1781 }
1782 p->p_sigctx.ps_lwp = 0;
1783 p->p_sigctx.ps_code = 0;
1784 p->p_sigctx.ps_signo = 0;
1785 (void) splsched(); /* XXXSMP */
1786 sigplusset(&SIGACTION_PS(ps, signum).sa_mask,
1787 &p->p_sigctx.ps_sigmask);
1788 if (SIGACTION_PS(ps, signum).sa_flags & SA_RESETHAND) {
1789 sigdelset(&p->p_sigctx.ps_sigcatch, signum);
1790 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
1791 sigaddset(&p->p_sigctx.ps_sigignore, signum);
1792 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL;
1793 }
1794 (void) spl0(); /* XXXSMP */
1795 }
1796
1797 KERNEL_PROC_UNLOCK(l);
1798 }
1799
1800 /*
1801 * Kill the current process for stated reason.
1802 */
1803 void
1804 killproc(struct proc *p, const char *why)
1805 {
1806 log(LOG_ERR, "pid %d was killed: %s\n", p->p_pid, why);
1807 uprintf("sorry, pid %d was killed: %s\n", p->p_pid, why);
1808 psignal(p, SIGKILL);
1809 }
1810
1811 /*
1812 * Force the current process to exit with the specified signal, dumping core
1813 * if appropriate. We bypass the normal tests for masked and caught signals,
1814 * allowing unrecoverable failures to terminate the process without changing
1815 * signal state. Mark the accounting record with the signal termination.
1816 * If dumping core, save the signal number for the debugger. Calls exit and
1817 * does not return.
1818 */
1819
1820 #if defined(DEBUG)
1821 int kern_logsigexit = 1; /* not static to make public for sysctl */
1822 #else
1823 int kern_logsigexit = 0; /* not static to make public for sysctl */
1824 #endif
1825
1826 static const char logcoredump[] =
1827 "pid %d (%s), uid %d: exited on signal %d (core dumped)\n";
1828 static const char lognocoredump[] =
1829 "pid %d (%s), uid %d: exited on signal %d (core not dumped, err = %d)\n";
1830
1831 /* Wrapper function for use in p_userret */
1832 static void
1833 lwp_coredump_hook(struct lwp *l, void *arg)
1834 {
1835 int s;
1836
1837 /*
1838 * Suspend ourselves, so that the kernel stack and therefore
1839 * the userland registers saved in the trapframe are around
1840 * for coredump() to write them out.
1841 */
1842 KERNEL_PROC_LOCK(l);
1843 l->l_flag &= ~L_DETACHED;
1844 SCHED_LOCK(s);
1845 l->l_stat = LSSUSPENDED;
1846 l->l_proc->p_nrlwps--;
1847 /* XXX NJWLWP check if this makes sense here: */
1848 l->l_proc->p_stats->p_ru.ru_nvcsw++;
1849 mi_switch(l, NULL);
1850 SCHED_ASSERT_UNLOCKED();
1851 splx(s);
1852
1853 lwp_exit(l);
1854 }
1855
1856 void
1857 sigexit(struct lwp *l, int signum)
1858 {
1859 struct proc *p;
1860 #if 0
1861 struct lwp *l2;
1862 #endif
1863 int error, exitsig;
1864
1865 p = l->l_proc;
1866
1867 /*
1868 * Don't permit coredump() or exit1() multiple times
1869 * in the same process.
1870 */
1871 if (p->p_flag & P_WEXIT) {
1872 KERNEL_PROC_UNLOCK(l);
1873 (*p->p_userret)(l, p->p_userret_arg);
1874 }
1875 p->p_flag |= P_WEXIT;
1876 /* We don't want to switch away from exiting. */
1877 /* XXX multiprocessor: stop LWPs on other processors. */
1878 #if 0
1879 if (p->p_flag & P_SA) {
1880 LIST_FOREACH(l2, &p->p_lwps, l_sibling)
1881 l2->l_flag &= ~L_SA;
1882 p->p_flag &= ~P_SA;
1883 }
1884 #endif
1885
1886 /* Make other LWPs stick around long enough to be dumped */
1887 p->p_userret = lwp_coredump_hook;
1888 p->p_userret_arg = NULL;
1889
1890 exitsig = signum;
1891 p->p_acflag |= AXSIG;
1892 if (sigprop[signum] & SA_CORE) {
1893 p->p_sigctx.ps_signo = signum;
1894 if ((error = coredump(l)) == 0)
1895 exitsig |= WCOREFLAG;
1896
1897 if (kern_logsigexit) {
1898 /* XXX What if we ever have really large UIDs? */
1899 int uid = p->p_cred && p->p_ucred ?
1900 (int) p->p_ucred->cr_uid : -1;
1901
1902 if (error)
1903 log(LOG_INFO, lognocoredump, p->p_pid,
1904 p->p_comm, uid, signum, error);
1905 else
1906 log(LOG_INFO, logcoredump, p->p_pid,
1907 p->p_comm, uid, signum);
1908 }
1909
1910 }
1911
1912 exit1(l, W_EXITCODE(0, exitsig));
1913 /* NOTREACHED */
1914 }
1915
1916 /*
1917 * Dump core, into a file named "progname.core" or "core" (depending on the
1918 * value of shortcorename), unless the process was setuid/setgid.
1919 */
1920 int
1921 coredump(struct lwp *l)
1922 {
1923 struct vnode *vp;
1924 struct proc *p;
1925 struct vmspace *vm;
1926 struct ucred *cred;
1927 struct nameidata nd;
1928 struct vattr vattr;
1929 int error, error1;
1930 char name[MAXPATHLEN];
1931
1932 p = l->l_proc;
1933 vm = p->p_vmspace;
1934 cred = p->p_cred->pc_ucred;
1935
1936 /*
1937 * Make sure the process has not set-id, to prevent data leaks.
1938 */
1939 if (p->p_flag & P_SUGID)
1940 return (EPERM);
1941
1942 /*
1943 * Refuse to core if the data + stack + user size is larger than
1944 * the core dump limit. XXX THIS IS WRONG, because of mapped
1945 * data.
1946 */
1947 if (USPACE + ctob(vm->vm_dsize + vm->vm_ssize) >=
1948 p->p_rlimit[RLIMIT_CORE].rlim_cur)
1949 return (EFBIG); /* better error code? */
1950
1951 /*
1952 * The core dump will go in the current working directory. Make
1953 * sure that the directory is still there and that the mount flags
1954 * allow us to write core dumps there.
1955 */
1956 vp = p->p_cwdi->cwdi_cdir;
1957 if (vp->v_mount == NULL ||
1958 (vp->v_mount->mnt_flag & MNT_NOCOREDUMP) != 0)
1959 return (EPERM);
1960
1961 error = build_corename(p, name);
1962 if (error)
1963 return error;
1964
1965 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, p);
1966 error = vn_open(&nd, O_CREAT | O_NOFOLLOW | FWRITE, S_IRUSR | S_IWUSR);
1967 if (error)
1968 return (error);
1969 vp = nd.ni_vp;
1970
1971 /* Don't dump to non-regular files or files with links. */
1972 if (vp->v_type != VREG ||
1973 VOP_GETATTR(vp, &vattr, cred, p) || vattr.va_nlink != 1) {
1974 error = EINVAL;
1975 goto out;
1976 }
1977 VATTR_NULL(&vattr);
1978 vattr.va_size = 0;
1979 VOP_LEASE(vp, p, cred, LEASE_WRITE);
1980 VOP_SETATTR(vp, &vattr, cred, p);
1981 p->p_acflag |= ACORE;
1982
1983 /* Now dump the actual core file. */
1984 error = (*p->p_execsw->es_coredump)(l, vp, cred);
1985 out:
1986 VOP_UNLOCK(vp, 0);
1987 error1 = vn_close(vp, FWRITE, cred, p);
1988 if (error == 0)
1989 error = error1;
1990 return (error);
1991 }
1992
1993 /*
1994 * Nonexistent system call-- signal process (may want to handle it).
1995 * Flag error in case process won't see signal immediately (blocked or ignored).
1996 */
1997 /* ARGSUSED */
1998 int
1999 sys_nosys(struct lwp *l, void *v, register_t *retval)
2000 {
2001 struct proc *p;
2002
2003 p = l->l_proc;
2004 psignal(p, SIGSYS);
2005 return (ENOSYS);
2006 }
2007
2008 static int
2009 build_corename(struct proc *p, char dst[MAXPATHLEN])
2010 {
2011 const char *s;
2012 char *d, *end;
2013 int i;
2014
2015 for (s = p->p_limit->pl_corename, d = dst, end = d + MAXPATHLEN;
2016 *s != '\0'; s++) {
2017 if (*s == '%') {
2018 switch (*(s + 1)) {
2019 case 'n':
2020 i = snprintf(d, end - d, "%s", p->p_comm);
2021 break;
2022 case 'p':
2023 i = snprintf(d, end - d, "%d", p->p_pid);
2024 break;
2025 case 'u':
2026 i = snprintf(d, end - d, "%.*s",
2027 (int)sizeof p->p_pgrp->pg_session->s_login,
2028 p->p_pgrp->pg_session->s_login);
2029 break;
2030 case 't':
2031 i = snprintf(d, end - d, "%ld",
2032 p->p_stats->p_start.tv_sec);
2033 break;
2034 default:
2035 goto copy;
2036 }
2037 d += i;
2038 s++;
2039 } else {
2040 copy: *d = *s;
2041 d++;
2042 }
2043 if (d >= end)
2044 return (ENAMETOOLONG);
2045 }
2046 *d = '\0';
2047 return 0;
2048 }
2049
2050 void
2051 getucontext(struct lwp *l, ucontext_t *ucp)
2052 {
2053 struct proc *p;
2054
2055 p = l->l_proc;
2056
2057 ucp->uc_flags = 0;
2058 ucp->uc_link = l->l_ctxlink;
2059
2060 (void)sigprocmask1(p, 0, NULL, &ucp->uc_sigmask);
2061 ucp->uc_flags |= _UC_SIGMASK;
2062
2063 /*
2064 * The (unsupplied) definition of the `current execution stack'
2065 * in the System V Interface Definition appears to allow returning
2066 * the main context stack.
2067 */
2068 if ((p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK) == 0) {
2069 ucp->uc_stack.ss_sp = (void *)USRSTACK;
2070 ucp->uc_stack.ss_size = ctob(p->p_vmspace->vm_ssize);
2071 ucp->uc_stack.ss_flags = 0; /* XXX, def. is Very Fishy */
2072 } else {
2073 /* Simply copy alternate signal execution stack. */
2074 ucp->uc_stack = p->p_sigctx.ps_sigstk;
2075 }
2076 ucp->uc_flags |= _UC_STACK;
2077
2078 cpu_getmcontext(l, &ucp->uc_mcontext, &ucp->uc_flags);
2079 }
2080
2081 /* ARGSUSED */
2082 int
2083 sys_getcontext(struct lwp *l, void *v, register_t *retval)
2084 {
2085 struct sys_getcontext_args /* {
2086 syscallarg(struct __ucontext *) ucp;
2087 } */ *uap = v;
2088 ucontext_t uc;
2089
2090 getucontext(l, &uc);
2091
2092 return (copyout(&uc, SCARG(uap, ucp), sizeof (*SCARG(uap, ucp))));
2093 }
2094
2095 int
2096 setucontext(struct lwp *l, const ucontext_t *ucp)
2097 {
2098 struct proc *p;
2099 int error;
2100
2101 p = l->l_proc;
2102 if ((error = cpu_setmcontext(l, &ucp->uc_mcontext, ucp->uc_flags)) != 0)
2103 return (error);
2104 l->l_ctxlink = ucp->uc_link;
2105 /*
2106 * We might want to take care of the stack portion here but currently
2107 * don't; see the comment in getucontext().
2108 */
2109 if ((ucp->uc_flags & _UC_SIGMASK) != 0)
2110 sigprocmask1(p, SIG_SETMASK, &ucp->uc_sigmask, NULL);
2111
2112 return 0;
2113 }
2114
2115 /* ARGSUSED */
2116 int
2117 sys_setcontext(struct lwp *l, void *v, register_t *retval)
2118 {
2119 struct sys_setcontext_args /* {
2120 syscallarg(const ucontext_t *) ucp;
2121 } */ *uap = v;
2122 ucontext_t uc;
2123 int error;
2124
2125 if (SCARG(uap, ucp) == NULL) /* i.e. end of uc_link chain */
2126 exit1(l, W_EXITCODE(0, 0));
2127 else if ((error = copyin(SCARG(uap, ucp), &uc, sizeof (uc))) != 0 ||
2128 (error = setucontext(l, &uc)) != 0)
2129 return (error);
2130
2131 return (EJUSTRETURN);
2132 }
2133
2134 /*
2135 * sigtimedwait(2) system call, used also for implementation
2136 * of sigwaitinfo() and sigwait().
2137 *
2138 * This only handles single LWP in signal wait. libpthread provides
2139 * it's own sigtimedwait() wrapper to DTRT WRT individual threads.
2140 *
2141 * XXX no support for queued signals, si_code is always SI_USER.
2142 */
2143 int
2144 sys___sigtimedwait(struct lwp *l, void *v, register_t *retval)
2145 {
2146 struct sys___sigtimedwait_args /* {
2147 syscallarg(const sigset_t *) set;
2148 syscallarg(siginfo_t *) info;
2149 syscallarg(struct timespec *) timeout;
2150 } */ *uap = v;
2151 sigset_t waitset, twaitset;
2152 struct proc *p = l->l_proc;
2153 int error, signum, s;
2154 int timo = 0;
2155 struct timeval tvstart;
2156 struct timespec ts;
2157
2158 if ((error = copyin(SCARG(uap, set), &waitset, sizeof(waitset))))
2159 return (error);
2160
2161 /*
2162 * Silently ignore SA_CANTMASK signals. psignal1() would
2163 * ignore SA_CANTMASK signals in waitset, we do this
2164 * only for the below siglist check.
2165 */
2166 sigminusset(&sigcantmask, &waitset);
2167
2168 /*
2169 * First scan siglist and check if there is signal from
2170 * our waitset already pending.
2171 */
2172 twaitset = waitset;
2173 __sigandset(&p->p_sigctx.ps_siglist, &twaitset);
2174 if ((signum = firstsig(&twaitset))) {
2175 /* found pending signal */
2176 sigdelset(&p->p_sigctx.ps_siglist, signum);
2177 goto sig;
2178 }
2179
2180 /*
2181 * Calculate timeout, if it was specified.
2182 */
2183 if (SCARG(uap, timeout)) {
2184 uint64_t ms;
2185
2186 if ((error = copyin(SCARG(uap, timeout), &ts, sizeof(ts))))
2187 return (error);
2188
2189 ms = (ts.tv_sec * 1000) + (ts.tv_nsec / 1000000);
2190 timo = mstohz(ms);
2191 if (timo == 0 && ts.tv_sec == 0 && ts.tv_nsec > 0)
2192 timo = 1;
2193 if (timo <= 0)
2194 return (EAGAIN);
2195
2196 /*
2197 * Remember current mono_time, it would be used in
2198 * ECANCELED/ERESTART case.
2199 */
2200 s = splclock();
2201 tvstart = mono_time;
2202 splx(s);
2203 }
2204
2205 /*
2206 * Setup ps_sigwait list.
2207 */
2208 p->p_sigctx.ps_sigwaited = -1;
2209 p->p_sigctx.ps_sigwait = waitset;
2210
2211 /*
2212 * Wait for signal to arrive. We can either be woken up or
2213 * time out.
2214 */
2215 error = tsleep(&p->p_sigctx.ps_sigwait, PPAUSE|PCATCH, "sigwait", timo);
2216
2217 /*
2218 * Check if a signal from our wait set has arrived, or if it
2219 * was mere wakeup.
2220 */
2221 if (!error) {
2222 if ((signum = p->p_sigctx.ps_sigwaited) <= 0) {
2223 /* wakeup via _lwp_wakeup() */
2224 error = ECANCELED;
2225 }
2226 }
2227
2228 /*
2229 * On error, clear sigwait indication. psignal1() sets it
2230 * in !error case.
2231 */
2232 if (error) {
2233 p->p_sigctx.ps_sigwaited = 0;
2234
2235 /*
2236 * If the sleep was interrupted (either by signal or wakeup),
2237 * update the timeout and copyout new value back.
2238 * It would be used when the syscall would be restarted
2239 * or called again.
2240 */
2241 if (timo && (error == ERESTART || error == ECANCELED)) {
2242 struct timeval tvnow, tvtimo;
2243 int err;
2244
2245 s = splclock();
2246 tvnow = mono_time;
2247 splx(s);
2248
2249 TIMESPEC_TO_TIMEVAL(&tvtimo, &ts);
2250
2251 /* compute how much time has passed since start */
2252 timersub(&tvnow, &tvstart, &tvnow);
2253 /* substract passed time from timeout */
2254 timersub(&tvtimo, &tvnow, &tvtimo);
2255
2256 if (tvtimo.tv_sec < 0)
2257 return (EAGAIN);
2258
2259 TIMEVAL_TO_TIMESPEC(&tvtimo, &ts);
2260
2261 /* copy updated timeout to userland */
2262 if ((err = copyout(&ts, SCARG(uap, timeout), sizeof(ts))))
2263 return (err);
2264 }
2265
2266 return (error);
2267 }
2268
2269 /*
2270 * If a signal from the wait set arrived, copy it to userland.
2271 * XXX no queued signals for now
2272 */
2273 if (signum > 0) {
2274 siginfo_t si;
2275
2276 sig:
2277 memset(&si, 0, sizeof(si));
2278 si.si_signo = signum;
2279 si.si_code = SI_USER;
2280
2281 error = copyout(&si, SCARG(uap, info), sizeof(si));
2282 if (error)
2283 return (error);
2284 }
2285
2286 return (0);
2287 }
2288
2289 /*
2290 * Returns true if signal is ignored or masked for passed process.
2291 */
2292 int
2293 sigismasked(struct proc *p, int sig)
2294 {
2295
2296 return (sigismember(&p->p_sigctx.ps_sigignore, sig) ||
2297 sigismember(&p->p_sigctx.ps_sigmask, sig));
2298 }
2299
2300 static int
2301 filt_sigattach(struct knote *kn)
2302 {
2303 struct proc *p = curproc;
2304
2305 kn->kn_ptr.p_proc = p;
2306 kn->kn_flags |= EV_CLEAR; /* automatically set */
2307
2308 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
2309
2310 return (0);
2311 }
2312
2313 static void
2314 filt_sigdetach(struct knote *kn)
2315 {
2316 struct proc *p = kn->kn_ptr.p_proc;
2317
2318 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
2319 }
2320
2321 /*
2322 * signal knotes are shared with proc knotes, so we apply a mask to
2323 * the hint in order to differentiate them from process hints. This
2324 * could be avoided by using a signal-specific knote list, but probably
2325 * isn't worth the trouble.
2326 */
2327 static int
2328 filt_signal(struct knote *kn, long hint)
2329 {
2330
2331 if (hint & NOTE_SIGNAL) {
2332 hint &= ~NOTE_SIGNAL;
2333
2334 if (kn->kn_id == hint)
2335 kn->kn_data++;
2336 }
2337 return (kn->kn_data != 0);
2338 }
2339
2340 const struct filterops sig_filtops = {
2341 0, filt_sigattach, filt_sigdetach, filt_signal
2342 };
2343