kern_sig.c revision 1.261 1 /* $NetBSD: kern_sig.c,v 1.261 2007/12/03 20:26:25 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1982, 1986, 1989, 1991, 1993
41 * The Regents of the University of California. All rights reserved.
42 * (c) UNIX System Laboratories, Inc.
43 * All or some portions of this file are derived from material licensed
44 * to the University of California by American Telephone and Telegraph
45 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
46 * the permission of UNIX System Laboratories, Inc.
47 *
48 * Redistribution and use in source and binary forms, with or without
49 * modification, are permitted provided that the following conditions
50 * are met:
51 * 1. Redistributions of source code must retain the above copyright
52 * notice, this list of conditions and the following disclaimer.
53 * 2. Redistributions in binary form must reproduce the above copyright
54 * notice, this list of conditions and the following disclaimer in the
55 * documentation and/or other materials provided with the distribution.
56 * 3. Neither the name of the University nor the names of its contributors
57 * may be used to endorse or promote products derived from this software
58 * without specific prior written permission.
59 *
60 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
61 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
62 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
63 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
64 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
65 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
66 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
67 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
68 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
69 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
70 * SUCH DAMAGE.
71 *
72 * @(#)kern_sig.c 8.14 (Berkeley) 5/14/95
73 */
74
75 #include <sys/cdefs.h>
76 __KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.261 2007/12/03 20:26:25 ad Exp $");
77
78 #include "opt_ptrace.h"
79 #include "opt_multiprocessor.h"
80 #include "opt_compat_sunos.h"
81 #include "opt_compat_netbsd.h"
82 #include "opt_compat_netbsd32.h"
83 #include "opt_pax.h"
84
85 #define SIGPROP /* include signal properties table */
86 #include <sys/param.h>
87 #include <sys/signalvar.h>
88 #include <sys/proc.h>
89 #include <sys/systm.h>
90 #include <sys/wait.h>
91 #include <sys/ktrace.h>
92 #include <sys/syslog.h>
93 #include <sys/filedesc.h>
94 #include <sys/file.h>
95 #include <sys/malloc.h>
96 #include <sys/pool.h>
97 #include <sys/ucontext.h>
98 #include <sys/exec.h>
99 #include <sys/kauth.h>
100 #include <sys/acct.h>
101 #include <sys/callout.h>
102 #include <sys/atomic.h>
103 #include <sys/cpu.h>
104
105 #ifdef PAX_SEGVGUARD
106 #include <sys/pax.h>
107 #endif /* PAX_SEGVGUARD */
108
109 #include <uvm/uvm.h>
110 #include <uvm/uvm_extern.h>
111
112 static void ksiginfo_exechook(struct proc *, void *);
113 static void proc_stop_callout(void *);
114
115 int sigunwait(struct proc *, const ksiginfo_t *);
116 void sigput(sigpend_t *, struct proc *, ksiginfo_t *);
117 int sigpost(struct lwp *, sig_t, int, int);
118 int sigchecktrace(sigpend_t **);
119 void sigswitch(bool, int, int);
120 void sigrealloc(ksiginfo_t *);
121
122 sigset_t contsigmask, stopsigmask, sigcantmask;
123 struct pool sigacts_pool; /* memory pool for sigacts structures */
124 static void sigacts_poolpage_free(struct pool *, void *);
125 static void *sigacts_poolpage_alloc(struct pool *, int);
126 static callout_t proc_stop_ch;
127
128 static struct pool_allocator sigactspool_allocator = {
129 .pa_alloc = sigacts_poolpage_alloc,
130 .pa_free = sigacts_poolpage_free,
131 };
132
133 #ifdef DEBUG
134 int kern_logsigexit = 1;
135 #else
136 int kern_logsigexit = 0;
137 #endif
138
139 static const char logcoredump[] =
140 "pid %d (%s), uid %d: exited on signal %d (core dumped)\n";
141 static const char lognocoredump[] =
142 "pid %d (%s), uid %d: exited on signal %d (core not dumped, err = %d)\n";
143
144 POOL_INIT(siginfo_pool, sizeof(siginfo_t), 0, 0, 0, "siginfo",
145 &pool_allocator_nointr, IPL_NONE);
146 POOL_INIT(ksiginfo_pool, sizeof(ksiginfo_t), 0, 0, 0, "ksiginfo",
147 NULL, IPL_VM);
148
149 /*
150 * signal_init:
151 *
152 * Initialize global signal-related data structures.
153 */
154 void
155 signal_init(void)
156 {
157
158 sigactspool_allocator.pa_pagesz = (PAGE_SIZE)*2;
159
160 pool_init(&sigacts_pool, sizeof(struct sigacts), 0, 0, 0, "sigapl",
161 sizeof(struct sigacts) > PAGE_SIZE ?
162 &sigactspool_allocator : &pool_allocator_nointr,
163 IPL_NONE);
164
165 exechook_establish(ksiginfo_exechook, NULL);
166
167 callout_init(&proc_stop_ch, 0);
168 callout_setfunc(&proc_stop_ch, proc_stop_callout, NULL);
169 }
170
171 /*
172 * sigacts_poolpage_alloc:
173 *
174 * Allocate a page for the sigacts memory pool.
175 */
176 static void *
177 sigacts_poolpage_alloc(struct pool *pp, int flags)
178 {
179
180 return (void *)uvm_km_alloc(kernel_map,
181 (PAGE_SIZE)*2, (PAGE_SIZE)*2,
182 ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK)
183 | UVM_KMF_WIRED);
184 }
185
186 /*
187 * sigacts_poolpage_free:
188 *
189 * Free a page on behalf of the sigacts memory pool.
190 */
191 static void
192 sigacts_poolpage_free(struct pool *pp, void *v)
193 {
194 uvm_km_free(kernel_map, (vaddr_t)v, (PAGE_SIZE)*2, UVM_KMF_WIRED);
195 }
196
197 /*
198 * sigactsinit:
199 *
200 * Create an initial sigctx structure, using the same signal state as
201 * p. If 'share' is set, share the sigctx_proc part, otherwise just
202 * copy it from parent.
203 */
204 struct sigacts *
205 sigactsinit(struct proc *pp, int share)
206 {
207 struct sigacts *ps, *ps2;
208
209 ps = pp->p_sigacts;
210
211 if (share) {
212 mutex_enter(&ps->sa_mutex);
213 ps->sa_refcnt++;
214 mutex_exit(&ps->sa_mutex);
215 ps2 = ps;
216 } else {
217 ps2 = pool_get(&sigacts_pool, PR_WAITOK);
218 mutex_init(&ps2->sa_mutex, MUTEX_SPIN, IPL_SCHED);
219 mutex_enter(&ps->sa_mutex);
220 memcpy(&ps2->sa_sigdesc, ps->sa_sigdesc,
221 sizeof(ps2->sa_sigdesc));
222 mutex_exit(&ps->sa_mutex);
223 ps2->sa_refcnt = 1;
224 }
225
226 return ps2;
227 }
228
229 /*
230 * sigactsunshare:
231 *
232 * Make this process not share its sigctx, maintaining all
233 * signal state.
234 */
235 void
236 sigactsunshare(struct proc *p)
237 {
238 struct sigacts *ps, *oldps;
239
240 oldps = p->p_sigacts;
241 if (oldps->sa_refcnt == 1)
242 return;
243 ps = pool_get(&sigacts_pool, PR_WAITOK);
244 mutex_init(&ps->sa_mutex, MUTEX_SPIN, IPL_SCHED);
245 memset(&ps->sa_sigdesc, 0, sizeof(ps->sa_sigdesc));
246 p->p_sigacts = ps;
247 sigactsfree(oldps);
248 }
249
250 /*
251 * sigactsfree;
252 *
253 * Release a sigctx structure.
254 */
255 void
256 sigactsfree(struct sigacts *ps)
257 {
258 int refcnt;
259
260 mutex_enter(&ps->sa_mutex);
261 refcnt = --ps->sa_refcnt;
262 mutex_exit(&ps->sa_mutex);
263
264 if (refcnt == 0) {
265 mutex_destroy(&ps->sa_mutex);
266 pool_put(&sigacts_pool, ps);
267 }
268 }
269
270 /*
271 * siginit:
272 *
273 * Initialize signal state for process 0; set to ignore signals that
274 * are ignored by default and disable the signal stack. Locking not
275 * required as the system is still cold.
276 */
277 void
278 siginit(struct proc *p)
279 {
280 struct lwp *l;
281 struct sigacts *ps;
282 int signo, prop;
283
284 ps = p->p_sigacts;
285 sigemptyset(&contsigmask);
286 sigemptyset(&stopsigmask);
287 sigemptyset(&sigcantmask);
288 for (signo = 1; signo < NSIG; signo++) {
289 prop = sigprop[signo];
290 if (prop & SA_CONT)
291 sigaddset(&contsigmask, signo);
292 if (prop & SA_STOP)
293 sigaddset(&stopsigmask, signo);
294 if (prop & SA_CANTMASK)
295 sigaddset(&sigcantmask, signo);
296 if (prop & SA_IGNORE && signo != SIGCONT)
297 sigaddset(&p->p_sigctx.ps_sigignore, signo);
298 sigemptyset(&SIGACTION_PS(ps, signo).sa_mask);
299 SIGACTION_PS(ps, signo).sa_flags = SA_RESTART;
300 }
301 sigemptyset(&p->p_sigctx.ps_sigcatch);
302 p->p_sflag &= ~PS_NOCLDSTOP;
303
304 ksiginfo_queue_init(&p->p_sigpend.sp_info);
305 sigemptyset(&p->p_sigpend.sp_set);
306
307 /*
308 * Reset per LWP state.
309 */
310 l = LIST_FIRST(&p->p_lwps);
311 l->l_sigwaited = NULL;
312 l->l_sigstk.ss_flags = SS_DISABLE;
313 l->l_sigstk.ss_size = 0;
314 l->l_sigstk.ss_sp = 0;
315 ksiginfo_queue_init(&l->l_sigpend.sp_info);
316 sigemptyset(&l->l_sigpend.sp_set);
317
318 /* One reference. */
319 ps->sa_refcnt = 1;
320 }
321
322 /*
323 * execsigs:
324 *
325 * Reset signals for an exec of the specified process.
326 */
327 void
328 execsigs(struct proc *p)
329 {
330 struct sigacts *ps;
331 struct lwp *l;
332 int signo, prop;
333 sigset_t tset;
334 ksiginfoq_t kq;
335
336 KASSERT(p->p_nlwps == 1);
337
338 sigactsunshare(p);
339 ps = p->p_sigacts;
340
341 /*
342 * Reset caught signals. Held signals remain held through
343 * l->l_sigmask (unless they were caught, and are now ignored
344 * by default).
345 *
346 * No need to lock yet, the process has only one LWP and
347 * at this point the sigacts are private to the process.
348 */
349 sigemptyset(&tset);
350 for (signo = 1; signo < NSIG; signo++) {
351 if (sigismember(&p->p_sigctx.ps_sigcatch, signo)) {
352 prop = sigprop[signo];
353 if (prop & SA_IGNORE) {
354 if ((prop & SA_CONT) == 0)
355 sigaddset(&p->p_sigctx.ps_sigignore,
356 signo);
357 sigaddset(&tset, signo);
358 }
359 SIGACTION_PS(ps, signo).sa_handler = SIG_DFL;
360 }
361 sigemptyset(&SIGACTION_PS(ps, signo).sa_mask);
362 SIGACTION_PS(ps, signo).sa_flags = SA_RESTART;
363 }
364 ksiginfo_queue_init(&kq);
365
366 mutex_enter(&p->p_smutex);
367 sigclearall(p, &tset, &kq);
368 sigemptyset(&p->p_sigctx.ps_sigcatch);
369
370 /*
371 * Reset no zombies if child dies flag as Solaris does.
372 */
373 p->p_flag &= ~(PK_NOCLDWAIT | PK_CLDSIGIGN);
374 if (SIGACTION_PS(ps, SIGCHLD).sa_handler == SIG_IGN)
375 SIGACTION_PS(ps, SIGCHLD).sa_handler = SIG_DFL;
376
377 /*
378 * Reset per-LWP state.
379 */
380 l = LIST_FIRST(&p->p_lwps);
381 l->l_sigwaited = NULL;
382 l->l_sigstk.ss_flags = SS_DISABLE;
383 l->l_sigstk.ss_size = 0;
384 l->l_sigstk.ss_sp = 0;
385 ksiginfo_queue_init(&l->l_sigpend.sp_info);
386 sigemptyset(&l->l_sigpend.sp_set);
387 mutex_exit(&p->p_smutex);
388
389 ksiginfo_queue_drain(&kq);
390 }
391
392 /*
393 * ksiginfo_exechook:
394 *
395 * Free all pending ksiginfo entries from a process on exec.
396 * Additionally, drain any unused ksiginfo structures in the
397 * system back to the pool.
398 *
399 * XXX This should not be a hook, every process has signals.
400 */
401 static void
402 ksiginfo_exechook(struct proc *p, void *v)
403 {
404 ksiginfoq_t kq;
405
406 ksiginfo_queue_init(&kq);
407
408 mutex_enter(&p->p_smutex);
409 sigclearall(p, NULL, &kq);
410 mutex_exit(&p->p_smutex);
411
412 ksiginfo_queue_drain(&kq);
413 }
414
415 /*
416 * ksiginfo_alloc:
417 *
418 * Allocate a new ksiginfo structure from the pool, and optionally copy
419 * an existing one. If the existing ksiginfo_t is from the pool, and
420 * has not been queued somewhere, then just return it. Additionally,
421 * if the existing ksiginfo_t does not contain any information beyond
422 * the signal number, then just return it.
423 */
424 ksiginfo_t *
425 ksiginfo_alloc(struct proc *p, ksiginfo_t *ok, int flags)
426 {
427 ksiginfo_t *kp;
428 int s;
429
430 if (ok != NULL) {
431 if ((ok->ksi_flags & (KSI_QUEUED | KSI_FROMPOOL)) ==
432 KSI_FROMPOOL)
433 return ok;
434 if (KSI_EMPTY_P(ok))
435 return ok;
436 }
437
438 s = splvm();
439 kp = pool_get(&ksiginfo_pool, flags);
440 splx(s);
441 if (kp == NULL) {
442 #ifdef DIAGNOSTIC
443 printf("Out of memory allocating ksiginfo for pid %d\n",
444 p->p_pid);
445 #endif
446 return NULL;
447 }
448
449 if (ok != NULL) {
450 memcpy(kp, ok, sizeof(*kp));
451 kp->ksi_flags &= ~KSI_QUEUED;
452 } else
453 KSI_INIT_EMPTY(kp);
454
455 kp->ksi_flags |= KSI_FROMPOOL;
456
457 return kp;
458 }
459
460 /*
461 * ksiginfo_free:
462 *
463 * If the given ksiginfo_t is from the pool and has not been queued,
464 * then free it.
465 */
466 void
467 ksiginfo_free(ksiginfo_t *kp)
468 {
469 int s;
470
471 if ((kp->ksi_flags & (KSI_QUEUED | KSI_FROMPOOL)) != KSI_FROMPOOL)
472 return;
473 s = splvm();
474 pool_put(&ksiginfo_pool, kp);
475 splx(s);
476 }
477
478 /*
479 * ksiginfo_queue_drain:
480 *
481 * Drain a non-empty ksiginfo_t queue.
482 */
483 void
484 ksiginfo_queue_drain0(ksiginfoq_t *kq)
485 {
486 ksiginfo_t *ksi;
487 int s;
488
489 KASSERT(!CIRCLEQ_EMPTY(kq));
490
491 KERNEL_LOCK(1, curlwp); /* XXXSMP */
492 while (!CIRCLEQ_EMPTY(kq)) {
493 ksi = CIRCLEQ_FIRST(kq);
494 CIRCLEQ_REMOVE(kq, ksi, ksi_list);
495 s = splvm();
496 pool_put(&ksiginfo_pool, ksi);
497 splx(s);
498 }
499 KERNEL_UNLOCK_ONE(curlwp); /* XXXSMP */
500 }
501
502 /*
503 * sigget:
504 *
505 * Fetch the first pending signal from a set. Optionally, also fetch
506 * or manufacture a ksiginfo element. Returns the number of the first
507 * pending signal, or zero.
508 */
509 int
510 sigget(sigpend_t *sp, ksiginfo_t *out, int signo, sigset_t *mask)
511 {
512 ksiginfo_t *ksi;
513 sigset_t tset;
514
515 /* If there's no pending set, the signal is from the debugger. */
516 if (sp == NULL) {
517 if (out != NULL) {
518 KSI_INIT(out);
519 out->ksi_info._signo = signo;
520 out->ksi_info._code = SI_USER;
521 }
522 return signo;
523 }
524
525 /* Construct mask from signo, and 'mask'. */
526 if (signo == 0) {
527 if (mask != NULL) {
528 tset = *mask;
529 __sigandset(&sp->sp_set, &tset);
530 } else
531 tset = sp->sp_set;
532
533 /* If there are no signals pending, that's it. */
534 if ((signo = firstsig(&tset)) == 0)
535 return 0;
536 } else {
537 KASSERT(sigismember(&sp->sp_set, signo));
538 }
539
540 sigdelset(&sp->sp_set, signo);
541
542 /* Find siginfo and copy it out. */
543 CIRCLEQ_FOREACH(ksi, &sp->sp_info, ksi_list) {
544 if (ksi->ksi_signo == signo) {
545 CIRCLEQ_REMOVE(&sp->sp_info, ksi, ksi_list);
546 KASSERT((ksi->ksi_flags & KSI_FROMPOOL) != 0);
547 KASSERT((ksi->ksi_flags & KSI_QUEUED) != 0);
548 ksi->ksi_flags &= ~KSI_QUEUED;
549 if (out != NULL) {
550 memcpy(out, ksi, sizeof(*out));
551 out->ksi_flags &= ~(KSI_FROMPOOL | KSI_QUEUED);
552 }
553 ksiginfo_free(ksi);
554 return signo;
555 }
556 }
557
558 /* If there's no siginfo, then manufacture it. */
559 if (out != NULL) {
560 KSI_INIT(out);
561 out->ksi_info._signo = signo;
562 out->ksi_info._code = SI_USER;
563 }
564
565 return signo;
566 }
567
568 /*
569 * sigput:
570 *
571 * Append a new ksiginfo element to the list of pending ksiginfo's, if
572 * we need to (e.g. SA_SIGINFO was requested).
573 */
574 void
575 sigput(sigpend_t *sp, struct proc *p, ksiginfo_t *ksi)
576 {
577 ksiginfo_t *kp;
578 struct sigaction *sa = &SIGACTION_PS(p->p_sigacts, ksi->ksi_signo);
579
580 KASSERT(mutex_owned(&p->p_smutex));
581 KASSERT((ksi->ksi_flags & KSI_QUEUED) == 0);
582
583 sigaddset(&sp->sp_set, ksi->ksi_signo);
584
585 /*
586 * If siginfo is not required, or there is none, then just mark the
587 * signal as pending.
588 */
589 if ((sa->sa_flags & SA_SIGINFO) == 0 || KSI_EMPTY_P(ksi))
590 return;
591
592 KASSERT((ksi->ksi_flags & KSI_FROMPOOL) != 0);
593
594 #ifdef notyet /* XXX: QUEUING */
595 if (ksi->ksi_signo < SIGRTMIN)
596 #endif
597 {
598 CIRCLEQ_FOREACH(kp, &sp->sp_info, ksi_list) {
599 if (kp->ksi_signo == ksi->ksi_signo) {
600 KSI_COPY(ksi, kp);
601 kp->ksi_flags |= KSI_QUEUED;
602 return;
603 }
604 }
605 }
606
607 ksi->ksi_flags |= KSI_QUEUED;
608 CIRCLEQ_INSERT_TAIL(&sp->sp_info, ksi, ksi_list);
609 }
610
611 /*
612 * sigclear:
613 *
614 * Clear all pending signals in the specified set.
615 */
616 void
617 sigclear(sigpend_t *sp, sigset_t *mask, ksiginfoq_t *kq)
618 {
619 ksiginfo_t *ksi, *next;
620
621 if (mask == NULL)
622 sigemptyset(&sp->sp_set);
623 else
624 sigminusset(mask, &sp->sp_set);
625
626 ksi = CIRCLEQ_FIRST(&sp->sp_info);
627 for (; ksi != (void *)&sp->sp_info; ksi = next) {
628 next = CIRCLEQ_NEXT(ksi, ksi_list);
629 if (mask == NULL || sigismember(mask, ksi->ksi_signo)) {
630 CIRCLEQ_REMOVE(&sp->sp_info, ksi, ksi_list);
631 KASSERT((ksi->ksi_flags & KSI_FROMPOOL) != 0);
632 KASSERT((ksi->ksi_flags & KSI_QUEUED) != 0);
633 CIRCLEQ_INSERT_TAIL(kq, ksi, ksi_list);
634 }
635 }
636 }
637
638 /*
639 * sigclearall:
640 *
641 * Clear all pending signals in the specified set from a process and
642 * its LWPs.
643 */
644 void
645 sigclearall(struct proc *p, sigset_t *mask, ksiginfoq_t *kq)
646 {
647 struct lwp *l;
648
649 KASSERT(mutex_owned(&p->p_smutex));
650
651 sigclear(&p->p_sigpend, mask, kq);
652
653 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
654 sigclear(&l->l_sigpend, mask, kq);
655 }
656 }
657
658 /*
659 * sigispending:
660 *
661 * Return true if there are pending signals for the current LWP. May
662 * be called unlocked provided that L_PENDSIG is set, and that the
663 * signal has been posted to the appopriate queue before L_PENDSIG is
664 * set.
665 */
666 int
667 sigispending(struct lwp *l, int signo)
668 {
669 struct proc *p = l->l_proc;
670 sigset_t tset;
671
672 membar_consumer();
673
674 tset = l->l_sigpend.sp_set;
675 sigplusset(&p->p_sigpend.sp_set, &tset);
676 sigminusset(&p->p_sigctx.ps_sigignore, &tset);
677 sigminusset(&l->l_sigmask, &tset);
678
679 if (signo == 0) {
680 if (firstsig(&tset) != 0)
681 return EINTR;
682 } else if (sigismember(&tset, signo))
683 return EINTR;
684
685 return 0;
686 }
687
688 /*
689 * siginfo_alloc:
690 *
691 * Allocate a new siginfo_t structure from the pool.
692 */
693 siginfo_t *
694 siginfo_alloc(int flags)
695 {
696
697 return pool_get(&siginfo_pool, flags);
698 }
699
700 /*
701 * siginfo_free:
702 *
703 * Return a siginfo_t structure to the pool.
704 */
705 void
706 siginfo_free(void *arg)
707 {
708
709 pool_put(&siginfo_pool, arg);
710 }
711
712 void
713 getucontext(struct lwp *l, ucontext_t *ucp)
714 {
715 struct proc *p = l->l_proc;
716
717 KASSERT(mutex_owned(&p->p_smutex));
718
719 ucp->uc_flags = 0;
720 ucp->uc_link = l->l_ctxlink;
721
722 ucp->uc_sigmask = l->l_sigmask;
723 ucp->uc_flags |= _UC_SIGMASK;
724
725 /*
726 * The (unsupplied) definition of the `current execution stack'
727 * in the System V Interface Definition appears to allow returning
728 * the main context stack.
729 */
730 if ((l->l_sigstk.ss_flags & SS_ONSTACK) == 0) {
731 ucp->uc_stack.ss_sp = (void *)USRSTACK;
732 ucp->uc_stack.ss_size = ctob(l->l_proc->p_vmspace->vm_ssize);
733 ucp->uc_stack.ss_flags = 0; /* XXX, def. is Very Fishy */
734 } else {
735 /* Simply copy alternate signal execution stack. */
736 ucp->uc_stack = l->l_sigstk;
737 }
738 ucp->uc_flags |= _UC_STACK;
739 mutex_exit(&p->p_smutex);
740 cpu_getmcontext(l, &ucp->uc_mcontext, &ucp->uc_flags);
741 mutex_enter(&p->p_smutex);
742 }
743
744 int
745 setucontext(struct lwp *l, const ucontext_t *ucp)
746 {
747 struct proc *p = l->l_proc;
748 int error;
749
750 KASSERT(mutex_owned(&p->p_smutex));
751
752 if ((ucp->uc_flags & _UC_SIGMASK) != 0) {
753 error = sigprocmask1(l, SIG_SETMASK, &ucp->uc_sigmask, NULL);
754 if (error != 0)
755 return error;
756 }
757
758 mutex_exit(&p->p_smutex);
759 error = cpu_setmcontext(l, &ucp->uc_mcontext, ucp->uc_flags);
760 mutex_enter(&p->p_smutex);
761 if (error != 0)
762 return (error);
763
764 l->l_ctxlink = ucp->uc_link;
765
766 /*
767 * If there was stack information, update whether or not we are
768 * still running on an alternate signal stack.
769 */
770 if ((ucp->uc_flags & _UC_STACK) != 0) {
771 if (ucp->uc_stack.ss_flags & SS_ONSTACK)
772 l->l_sigstk.ss_flags |= SS_ONSTACK;
773 else
774 l->l_sigstk.ss_flags &= ~SS_ONSTACK;
775 }
776
777 return 0;
778 }
779
780 /*
781 * Common code for kill process group/broadcast kill. cp is calling
782 * process.
783 */
784 int
785 killpg1(struct lwp *l, ksiginfo_t *ksi, int pgid, int all)
786 {
787 struct proc *p, *cp;
788 kauth_cred_t pc;
789 struct pgrp *pgrp;
790 int nfound;
791 int signo = ksi->ksi_signo;
792
793 cp = l->l_proc;
794 pc = l->l_cred;
795 nfound = 0;
796
797 mutex_enter(&proclist_lock);
798 if (all) {
799 /*
800 * broadcast
801 */
802 PROCLIST_FOREACH(p, &allproc) {
803 if (p->p_pid <= 1 || p->p_flag & PK_SYSTEM || p == cp)
804 continue;
805 mutex_enter(&p->p_mutex);
806 if (kauth_authorize_process(pc,
807 KAUTH_PROCESS_CANSIGNAL, p,
808 (void *)(uintptr_t)signo, NULL, NULL) == 0) {
809 nfound++;
810 if (signo) {
811 mutex_enter(&proclist_mutex);
812 mutex_enter(&p->p_smutex);
813 kpsignal2(p, ksi);
814 mutex_exit(&p->p_smutex);
815 mutex_exit(&proclist_mutex);
816 }
817 }
818 mutex_exit(&p->p_mutex);
819 }
820 } else {
821 if (pgid == 0)
822 /*
823 * zero pgid means send to my process group.
824 */
825 pgrp = cp->p_pgrp;
826 else {
827 pgrp = pg_find(pgid, PFIND_LOCKED);
828 if (pgrp == NULL)
829 goto out;
830 }
831 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
832 if (p->p_pid <= 1 || p->p_flag & PK_SYSTEM)
833 continue;
834 mutex_enter(&p->p_mutex);
835 if (kauth_authorize_process(pc, KAUTH_PROCESS_CANSIGNAL,
836 p, (void *)(uintptr_t)signo, NULL, NULL) == 0) {
837 nfound++;
838 if (signo) {
839 mutex_enter(&proclist_mutex);
840 mutex_enter(&p->p_smutex);
841 if (P_ZOMBIE(p) == 0)
842 kpsignal2(p, ksi);
843 mutex_exit(&p->p_smutex);
844 mutex_exit(&proclist_mutex);
845 }
846 }
847 mutex_exit(&p->p_mutex);
848 }
849 }
850 out:
851 mutex_exit(&proclist_lock);
852 return (nfound ? 0 : ESRCH);
853 }
854
855 /*
856 * Send a signal to a process group. If checktty is 1, limit to members
857 * which have a controlling terminal.
858 */
859 void
860 pgsignal(struct pgrp *pgrp, int sig, int checkctty)
861 {
862 ksiginfo_t ksi;
863
864 KASSERT(mutex_owned(&proclist_mutex));
865
866 KSI_INIT_EMPTY(&ksi);
867 ksi.ksi_signo = sig;
868 kpgsignal(pgrp, &ksi, NULL, checkctty);
869 }
870
871 void
872 kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty)
873 {
874 struct proc *p;
875
876 KASSERT(mutex_owned(&proclist_mutex));
877
878 if (pgrp)
879 LIST_FOREACH(p, &pgrp->pg_members, p_pglist)
880 if (checkctty == 0 || p->p_lflag & PL_CONTROLT)
881 kpsignal(p, ksi, data);
882 }
883
884 /*
885 * Send a signal caused by a trap to the current LWP. If it will be caught
886 * immediately, deliver it with correct code. Otherwise, post it normally.
887 */
888 void
889 trapsignal(struct lwp *l, ksiginfo_t *ksi)
890 {
891 struct proc *p;
892 struct sigacts *ps;
893 int signo = ksi->ksi_signo;
894
895 KASSERT(KSI_TRAP_P(ksi));
896
897 ksi->ksi_lid = l->l_lid;
898 p = l->l_proc;
899
900 mutex_enter(&proclist_mutex);
901 mutex_enter(&p->p_smutex);
902 ps = p->p_sigacts;
903 if ((p->p_slflag & PSL_TRACED) == 0 &&
904 sigismember(&p->p_sigctx.ps_sigcatch, signo) &&
905 !sigismember(&l->l_sigmask, signo)) {
906 mutex_exit(&proclist_mutex);
907 p->p_stats->p_ru.ru_nsignals++;
908 kpsendsig(l, ksi, &l->l_sigmask);
909 mutex_exit(&p->p_smutex);
910 ktrpsig(signo, SIGACTION_PS(ps, signo).sa_handler,
911 &l->l_sigmask, ksi);
912 } else {
913 /* XXX for core dump/debugger */
914 p->p_sigctx.ps_lwp = l->l_lid;
915 p->p_sigctx.ps_signo = ksi->ksi_signo;
916 p->p_sigctx.ps_code = ksi->ksi_trap;
917 kpsignal2(p, ksi);
918 mutex_exit(&proclist_mutex);
919 mutex_exit(&p->p_smutex);
920 }
921 }
922
923 /*
924 * Fill in signal information and signal the parent for a child status change.
925 */
926 void
927 child_psignal(struct proc *p, int mask)
928 {
929 ksiginfo_t ksi;
930 struct proc *q;
931 int xstat;
932
933 KASSERT(mutex_owned(&proclist_mutex));
934 KASSERT(mutex_owned(&p->p_smutex));
935
936 xstat = p->p_xstat;
937
938 KSI_INIT(&ksi);
939 ksi.ksi_signo = SIGCHLD;
940 ksi.ksi_code = (xstat == SIGCONT ? CLD_CONTINUED : CLD_STOPPED);
941 ksi.ksi_pid = p->p_pid;
942 ksi.ksi_uid = kauth_cred_geteuid(p->p_cred);
943 ksi.ksi_status = xstat;
944 ksi.ksi_utime = p->p_stats->p_ru.ru_utime.tv_sec;
945 ksi.ksi_stime = p->p_stats->p_ru.ru_stime.tv_sec;
946
947 q = p->p_pptr;
948
949 mutex_exit(&p->p_smutex);
950 mutex_enter(&q->p_smutex);
951
952 if ((q->p_sflag & mask) == 0)
953 kpsignal2(q, &ksi);
954
955 mutex_exit(&q->p_smutex);
956 mutex_enter(&p->p_smutex);
957 }
958
959 void
960 psignal(struct proc *p, int signo)
961 {
962 ksiginfo_t ksi;
963
964 KASSERT(mutex_owned(&proclist_mutex));
965
966 KSI_INIT_EMPTY(&ksi);
967 ksi.ksi_signo = signo;
968 mutex_enter(&p->p_smutex);
969 kpsignal2(p, &ksi);
970 mutex_exit(&p->p_smutex);
971 }
972
973 void
974 kpsignal(struct proc *p, ksiginfo_t *ksi, void *data)
975 {
976
977 KASSERT(mutex_owned(&proclist_mutex));
978
979 /* XXXSMP Why is this here? */
980 if ((p->p_sflag & PS_WEXIT) == 0 && data) {
981 size_t fd;
982 struct filedesc *fdp = p->p_fd;
983
984 ksi->ksi_fd = -1;
985 for (fd = 0; fd < fdp->fd_nfiles; fd++) {
986 struct file *fp = fdp->fd_ofiles[fd];
987 /* XXX: lock? */
988 if (fp && fp->f_data == data) {
989 ksi->ksi_fd = fd;
990 break;
991 }
992 }
993 }
994 mutex_enter(&p->p_smutex);
995 kpsignal2(p, ksi);
996 mutex_exit(&p->p_smutex);
997 }
998
999 /*
1000 * sigismasked:
1001 *
1002 * Returns true if signal is ignored or masked for the specified LWP.
1003 */
1004 int
1005 sigismasked(struct lwp *l, int sig)
1006 {
1007 struct proc *p = l->l_proc;
1008
1009 return (sigismember(&p->p_sigctx.ps_sigignore, sig) ||
1010 sigismember(&l->l_sigmask, sig));
1011 }
1012
1013 /*
1014 * sigpost:
1015 *
1016 * Post a pending signal to an LWP. Returns non-zero if the LWP was
1017 * able to take the signal.
1018 */
1019 int
1020 sigpost(struct lwp *l, sig_t action, int prop, int sig)
1021 {
1022 int rv, masked;
1023
1024 KASSERT(mutex_owned(&l->l_proc->p_smutex));
1025
1026 /*
1027 * If the LWP is on the way out, sigclear() will be busy draining all
1028 * pending signals. Don't give it more.
1029 */
1030 if (l->l_refcnt == 0)
1031 return 0;
1032
1033 lwp_lock(l);
1034
1035 /*
1036 * Have the LWP check for signals. This ensures that even if no LWP
1037 * is found to take the signal immediately, it should be taken soon.
1038 */
1039 l->l_flag |= LW_PENDSIG;
1040
1041 /*
1042 * SIGCONT can be masked, but must always restart stopped LWPs.
1043 */
1044 masked = sigismember(&l->l_sigmask, sig);
1045 if (masked && ((prop & SA_CONT) == 0 || l->l_stat != LSSTOP)) {
1046 lwp_unlock(l);
1047 return 0;
1048 }
1049
1050 /*
1051 * If killing the process, make it run fast.
1052 */
1053 if (__predict_false((prop & SA_KILL) != 0) &&
1054 action == SIG_DFL && l->l_priority > PUSER)
1055 lwp_changepri(l, PUSER);
1056
1057 /*
1058 * If the LWP is running or on a run queue, then we win. If it's
1059 * sleeping interruptably, wake it and make it take the signal. If
1060 * the sleep isn't interruptable, then the chances are it will get
1061 * to see the signal soon anyhow. If suspended, it can't take the
1062 * signal right now. If it's LWP private or for all LWPs, save it
1063 * for later; otherwise punt.
1064 */
1065 rv = 0;
1066
1067 switch (l->l_stat) {
1068 case LSRUN:
1069 case LSONPROC:
1070 lwp_need_userret(l);
1071 rv = 1;
1072 break;
1073
1074 case LSSLEEP:
1075 if ((l->l_flag & LW_SINTR) != 0) {
1076 /* setrunnable() will release the lock. */
1077 setrunnable(l);
1078 return 1;
1079 }
1080 break;
1081
1082 case LSSUSPENDED:
1083 if ((prop & SA_KILL) != 0) {
1084 /* lwp_continue() will release the lock. */
1085 lwp_continue(l);
1086 return 1;
1087 }
1088 break;
1089
1090 case LSSTOP:
1091 if ((prop & SA_STOP) != 0)
1092 break;
1093
1094 /*
1095 * If the LWP is stopped and we are sending a continue
1096 * signal, then start it again.
1097 */
1098 if ((prop & SA_CONT) != 0) {
1099 if (l->l_wchan != NULL) {
1100 l->l_stat = LSSLEEP;
1101 l->l_proc->p_nrlwps++;
1102 rv = 1;
1103 break;
1104 }
1105 /* setrunnable() will release the lock. */
1106 setrunnable(l);
1107 return 1;
1108 } else if (l->l_wchan == NULL || (l->l_flag & LW_SINTR) != 0) {
1109 /* setrunnable() will release the lock. */
1110 setrunnable(l);
1111 return 1;
1112 }
1113 break;
1114
1115 default:
1116 break;
1117 }
1118
1119 lwp_unlock(l);
1120 return rv;
1121 }
1122
1123 /*
1124 * Notify an LWP that it has a pending signal.
1125 */
1126 void
1127 signotify(struct lwp *l)
1128 {
1129 KASSERT(lwp_locked(l, NULL));
1130
1131 l->l_flag |= LW_PENDSIG;
1132 lwp_need_userret(l);
1133 }
1134
1135 /*
1136 * Find an LWP within process p that is waiting on signal ksi, and hand
1137 * it on.
1138 */
1139 int
1140 sigunwait(struct proc *p, const ksiginfo_t *ksi)
1141 {
1142 struct lwp *l;
1143 int signo;
1144
1145 KASSERT(mutex_owned(&p->p_smutex));
1146
1147 signo = ksi->ksi_signo;
1148
1149 if (ksi->ksi_lid != 0) {
1150 /*
1151 * Signal came via _lwp_kill(). Find the LWP and see if
1152 * it's interested.
1153 */
1154 if ((l = lwp_find(p, ksi->ksi_lid)) == NULL)
1155 return 0;
1156 if (l->l_sigwaited == NULL ||
1157 !sigismember(&l->l_sigwaitset, signo))
1158 return 0;
1159 } else {
1160 /*
1161 * Look for any LWP that may be interested.
1162 */
1163 LIST_FOREACH(l, &p->p_sigwaiters, l_sigwaiter) {
1164 KASSERT(l->l_sigwaited != NULL);
1165 if (sigismember(&l->l_sigwaitset, signo))
1166 break;
1167 }
1168 }
1169
1170 if (l != NULL) {
1171 l->l_sigwaited->ksi_info = ksi->ksi_info;
1172 l->l_sigwaited = NULL;
1173 LIST_REMOVE(l, l_sigwaiter);
1174 cv_signal(&l->l_sigcv);
1175 return 1;
1176 }
1177
1178 return 0;
1179 }
1180
1181 /*
1182 * Send the signal to the process. If the signal has an action, the action
1183 * is usually performed by the target process rather than the caller; we add
1184 * the signal to the set of pending signals for the process.
1185 *
1186 * Exceptions:
1187 * o When a stop signal is sent to a sleeping process that takes the
1188 * default action, the process is stopped without awakening it.
1189 * o SIGCONT restarts stopped processes (or puts them back to sleep)
1190 * regardless of the signal action (eg, blocked or ignored).
1191 *
1192 * Other ignored signals are discarded immediately.
1193 */
1194 void
1195 kpsignal2(struct proc *p, ksiginfo_t *ksi)
1196 {
1197 int prop, lid, toall, signo = ksi->ksi_signo;
1198 struct sigacts *sa;
1199 struct lwp *l;
1200 ksiginfo_t *kp;
1201 ksiginfoq_t kq;
1202 sig_t action;
1203
1204 KASSERT(mutex_owned(&proclist_mutex));
1205 KASSERT(mutex_owned(&p->p_smutex));
1206 KASSERT((ksi->ksi_flags & KSI_QUEUED) == 0);
1207 KASSERT(signo > 0 && signo < NSIG);
1208
1209 /*
1210 * If the process is being created by fork, is a zombie or is
1211 * exiting, then just drop the signal here and bail out.
1212 */
1213 if (p->p_stat != SACTIVE && p->p_stat != SSTOP)
1214 return;
1215
1216 /*
1217 * Notify any interested parties of the signal.
1218 */
1219 KNOTE(&p->p_klist, NOTE_SIGNAL | signo);
1220
1221 /*
1222 * Some signals including SIGKILL must act on the entire process.
1223 */
1224 kp = NULL;
1225 prop = sigprop[signo];
1226 toall = ((prop & SA_TOALL) != 0);
1227
1228 if (toall)
1229 lid = 0;
1230 else
1231 lid = ksi->ksi_lid;
1232
1233 /*
1234 * If proc is traced, always give parent a chance.
1235 */
1236 if (p->p_slflag & PSL_TRACED) {
1237 action = SIG_DFL;
1238
1239 if (lid == 0) {
1240 /*
1241 * If the process is being traced and the signal
1242 * is being caught, make sure to save any ksiginfo.
1243 */
1244 if ((kp = ksiginfo_alloc(p, ksi, PR_NOWAIT)) == NULL)
1245 return;
1246 sigput(&p->p_sigpend, p, kp);
1247 }
1248 } else {
1249 /*
1250 * If the signal was the result of a trap and is not being
1251 * caught, then reset it to default action so that the
1252 * process dumps core immediately.
1253 */
1254 if (KSI_TRAP_P(ksi)) {
1255 sa = p->p_sigacts;
1256 mutex_enter(&sa->sa_mutex);
1257 if (!sigismember(&p->p_sigctx.ps_sigcatch, signo)) {
1258 sigdelset(&p->p_sigctx.ps_sigignore, signo);
1259 SIGACTION(p, signo).sa_handler = SIG_DFL;
1260 }
1261 mutex_exit(&sa->sa_mutex);
1262 }
1263
1264 /*
1265 * If the signal is being ignored, then drop it. Note: we
1266 * don't set SIGCONT in ps_sigignore, and if it is set to
1267 * SIG_IGN, action will be SIG_DFL here.
1268 */
1269 if (sigismember(&p->p_sigctx.ps_sigignore, signo))
1270 return;
1271
1272 else if (sigismember(&p->p_sigctx.ps_sigcatch, signo))
1273 action = SIG_CATCH;
1274 else {
1275 action = SIG_DFL;
1276
1277 /*
1278 * If sending a tty stop signal to a member of an
1279 * orphaned process group, discard the signal here if
1280 * the action is default; don't stop the process below
1281 * if sleeping, and don't clear any pending SIGCONT.
1282 */
1283 if (prop & SA_TTYSTOP &&
1284 (p->p_sflag & PS_ORPHANPG) != 0)
1285 return;
1286
1287 if (prop & SA_KILL && p->p_nice > NZERO)
1288 p->p_nice = NZERO;
1289 }
1290 }
1291
1292 /*
1293 * If stopping or continuing a process, discard any pending
1294 * signals that would do the inverse.
1295 */
1296 if ((prop & (SA_CONT | SA_STOP)) != 0) {
1297 ksiginfo_queue_init(&kq);
1298 if ((prop & SA_CONT) != 0)
1299 sigclear(&p->p_sigpend, &stopsigmask, &kq);
1300 if ((prop & SA_STOP) != 0)
1301 sigclear(&p->p_sigpend, &contsigmask, &kq);
1302 ksiginfo_queue_drain(&kq); /* XXXSMP */
1303 }
1304
1305 /*
1306 * If the signal doesn't have SA_CANTMASK (no override for SIGKILL,
1307 * please!), check if any LWPs are waiting on it. If yes, pass on
1308 * the signal info. The signal won't be processed further here.
1309 */
1310 if ((prop & SA_CANTMASK) == 0 && !LIST_EMPTY(&p->p_sigwaiters) &&
1311 p->p_stat == SACTIVE && (p->p_sflag & PS_STOPPING) == 0 &&
1312 sigunwait(p, ksi))
1313 return;
1314
1315 /*
1316 * XXXSMP Should be allocated by the caller, we're holding locks
1317 * here.
1318 */
1319 if (kp == NULL && (kp = ksiginfo_alloc(p, ksi, PR_NOWAIT)) == NULL)
1320 return;
1321
1322 /*
1323 * LWP private signals are easy - just find the LWP and post
1324 * the signal to it.
1325 */
1326 if (lid != 0) {
1327 l = lwp_find(p, lid);
1328 if (l != NULL) {
1329 sigput(&l->l_sigpend, p, kp);
1330 membar_producer();
1331 (void)sigpost(l, action, prop, kp->ksi_signo);
1332 }
1333 goto out;
1334 }
1335
1336 /*
1337 * Some signals go to all LWPs, even if posted with _lwp_kill().
1338 */
1339 if (p->p_stat == SACTIVE && (p->p_sflag & PS_STOPPING) == 0) {
1340 if ((p->p_slflag & PSL_TRACED) != 0)
1341 goto deliver;
1342
1343 /*
1344 * If SIGCONT is default (or ignored) and process is
1345 * asleep, we are finished; the process should not
1346 * be awakened.
1347 */
1348 if ((prop & SA_CONT) != 0 && action == SIG_DFL)
1349 goto out;
1350
1351 if ((prop & SA_STOP) != 0 && action == SIG_DFL) {
1352 /*
1353 * If a child holding parent blocked, stopping could
1354 * cause deadlock: discard the signal.
1355 */
1356 if ((p->p_sflag & PS_PPWAIT) == 0) {
1357 p->p_xstat = signo;
1358 proc_stop(p, 1, signo);
1359 }
1360 goto out;
1361 } else {
1362 /*
1363 * Stop signals with the default action are handled
1364 * specially in issignal(), and so are not enqueued.
1365 */
1366 sigput(&p->p_sigpend, p, kp);
1367 }
1368 } else {
1369 /*
1370 * Process is stopped or stopping. If traced, then no
1371 * further action is necessary.
1372 */
1373 if ((p->p_slflag & PSL_TRACED) != 0 && signo != SIGKILL)
1374 goto out;
1375
1376 if ((prop & (SA_CONT | SA_KILL)) != 0) {
1377 /*
1378 * Re-adjust p_nstopchild if the process wasn't
1379 * collected by its parent.
1380 */
1381 p->p_stat = SACTIVE;
1382 p->p_sflag &= ~PS_STOPPING;
1383 if (!p->p_waited)
1384 p->p_pptr->p_nstopchild--;
1385
1386 /*
1387 * If SIGCONT is default (or ignored), we continue
1388 * the process but don't leave the signal in
1389 * ps_siglist, as it has no further action. If
1390 * SIGCONT is held, we continue the process and
1391 * leave the signal in ps_siglist. If the process
1392 * catches SIGCONT, let it handle the signal itself.
1393 * If it isn't waiting on an event, then it goes
1394 * back to run state. Otherwise, process goes back
1395 * to sleep state.
1396 */
1397 if ((prop & SA_CONT) == 0 || action != SIG_DFL)
1398 sigput(&p->p_sigpend, p, kp);
1399 } else if ((prop & SA_STOP) != 0) {
1400 /*
1401 * Already stopped, don't need to stop again.
1402 * (If we did the shell could get confused.)
1403 */
1404 goto out;
1405 } else
1406 sigput(&p->p_sigpend, p, kp);
1407 }
1408
1409 deliver:
1410 /*
1411 * Before we set L_PENDSIG on any LWP, ensure that the signal is
1412 * visible on the per process list (for sigispending()). This
1413 * is unlikely to be needed in practice, but...
1414 */
1415 membar_producer();
1416
1417 /*
1418 * Try to find an LWP that can take the signal.
1419 */
1420 LIST_FOREACH(l, &p->p_lwps, l_sibling)
1421 if (sigpost(l, action, prop, kp->ksi_signo) && !toall)
1422 break;
1423
1424 out:
1425 /*
1426 * If the ksiginfo wasn't used, then bin it. XXXSMP freeing memory
1427 * with locks held. The caller should take care of this.
1428 */
1429 ksiginfo_free(kp);
1430 }
1431
1432 void
1433 kpsendsig(struct lwp *l, const ksiginfo_t *ksi, const sigset_t *mask)
1434 {
1435 struct proc *p = l->l_proc;
1436
1437 KASSERT(mutex_owned(&p->p_smutex));
1438
1439 (*p->p_emul->e_sendsig)(ksi, mask);
1440 }
1441
1442 /*
1443 * Stop the current process and switch away when being stopped or traced.
1444 */
1445 void
1446 sigswitch(bool ppsig, int ppmask, int signo)
1447 {
1448 struct lwp *l = curlwp, *l2;
1449 struct proc *p = l->l_proc;
1450 #ifdef MULTIPROCESSOR
1451 int biglocks;
1452 #endif
1453
1454 KASSERT(mutex_owned(&p->p_smutex));
1455 KASSERT(l->l_stat == LSONPROC);
1456 KASSERT(p->p_nrlwps > 0);
1457
1458 /*
1459 * On entry we know that the process needs to stop. If it's
1460 * the result of a 'sideways' stop signal that has been sourced
1461 * through issignal(), then stop other LWPs in the process too.
1462 */
1463 if (p->p_stat == SACTIVE && (p->p_sflag & PS_STOPPING) == 0) {
1464 /*
1465 * Set the stopping indicator and bring all sleeping LWPs
1466 * to a halt so they are included in p->p_nrlwps
1467 */
1468 p->p_sflag |= (PS_STOPPING | PS_NOTIFYSTOP);
1469 membar_producer();
1470
1471 LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
1472 lwp_lock(l2);
1473 if (l2->l_stat == LSSLEEP &&
1474 (l2->l_flag & LW_SINTR) != 0) {
1475 l2->l_stat = LSSTOP;
1476 p->p_nrlwps--;
1477 }
1478 lwp_unlock(l2);
1479 }
1480
1481 /*
1482 * Have the remaining LWPs come to a halt, and trigger
1483 * proc_stop_callout() to ensure that they do.
1484 */
1485 KASSERT(signo != 0);
1486 KASSERT(p->p_nrlwps > 0);
1487
1488 if (p->p_nrlwps > 1) {
1489 LIST_FOREACH(l2, &p->p_lwps, l_sibling)
1490 sigpost(l2, SIG_DFL, SA_STOP, signo);
1491 callout_schedule(&proc_stop_ch, 1);
1492 }
1493 }
1494
1495 /*
1496 * If we are the last live LWP, and the stop was a result of
1497 * a new signal, then signal the parent.
1498 */
1499 if ((p->p_sflag & PS_STOPPING) != 0) {
1500 if (!mutex_tryenter(&proclist_mutex)) {
1501 mutex_exit(&p->p_smutex);
1502 mutex_enter(&proclist_mutex);
1503 mutex_enter(&p->p_smutex);
1504 }
1505
1506 if (p->p_nrlwps == 1 && (p->p_sflag & PS_STOPPING) != 0) {
1507 p->p_sflag &= ~PS_STOPPING;
1508 p->p_stat = SSTOP;
1509 p->p_waited = 0;
1510 p->p_pptr->p_nstopchild++;
1511 if ((p->p_sflag & PS_NOTIFYSTOP) != 0) {
1512 /*
1513 * Note that child_psignal() will drop
1514 * p->p_smutex briefly.
1515 */
1516 if (ppsig)
1517 child_psignal(p, ppmask);
1518 cv_broadcast(&p->p_pptr->p_waitcv);
1519 }
1520 }
1521
1522 mutex_exit(&proclist_mutex);
1523 }
1524
1525 /*
1526 * Unlock and switch away.
1527 */
1528 KERNEL_UNLOCK_ALL(l, &biglocks);
1529 if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) {
1530 p->p_nrlwps--;
1531 lwp_lock(l);
1532 KASSERT(l->l_stat == LSONPROC || l->l_stat == LSSLEEP);
1533 l->l_stat = LSSTOP;
1534 lwp_unlock(l);
1535 }
1536
1537 mutex_exit(&p->p_smutex);
1538 lwp_lock(l);
1539 mi_switch(l);
1540 KERNEL_LOCK(biglocks, l);
1541 mutex_enter(&p->p_smutex);
1542 }
1543
1544 /*
1545 * Check for a signal from the debugger.
1546 */
1547 int
1548 sigchecktrace(sigpend_t **spp)
1549 {
1550 struct lwp *l = curlwp;
1551 struct proc *p = l->l_proc;
1552 int signo;
1553
1554 KASSERT(mutex_owned(&p->p_smutex));
1555
1556 /*
1557 * If we are no longer being traced, or the parent didn't
1558 * give us a signal, look for more signals.
1559 */
1560 if ((p->p_slflag & PSL_TRACED) == 0 || p->p_xstat == 0)
1561 return 0;
1562
1563 /* If there's a pending SIGKILL, process it immediately. */
1564 if (sigismember(&p->p_sigpend.sp_set, SIGKILL))
1565 return 0;
1566
1567 /*
1568 * If the new signal is being masked, look for other signals.
1569 * `p->p_sigctx.ps_siglist |= mask' is done in setrunnable().
1570 */
1571 signo = p->p_xstat;
1572 p->p_xstat = 0;
1573 if ((sigprop[signo] & SA_TOLWP) != 0)
1574 *spp = &l->l_sigpend;
1575 else
1576 *spp = &p->p_sigpend;
1577 if (sigismember(&l->l_sigmask, signo))
1578 signo = 0;
1579
1580 return signo;
1581 }
1582
1583 /*
1584 * If the current process has received a signal (should be caught or cause
1585 * termination, should interrupt current syscall), return the signal number.
1586 *
1587 * Stop signals with default action are processed immediately, then cleared;
1588 * they aren't returned. This is checked after each entry to the system for
1589 * a syscall or trap.
1590 *
1591 * We will also return -1 if the process is exiting and the current LWP must
1592 * follow suit.
1593 *
1594 * Note that we may be called while on a sleep queue, so MUST NOT sleep. We
1595 * can switch away, though.
1596 */
1597 int
1598 issignal(struct lwp *l)
1599 {
1600 struct proc *p = l->l_proc;
1601 int signo = 0, prop;
1602 sigpend_t *sp = NULL;
1603 sigset_t ss;
1604
1605 KASSERT(mutex_owned(&p->p_smutex));
1606
1607 for (;;) {
1608 /* Discard any signals that we have decided not to take. */
1609 if (signo != 0)
1610 (void)sigget(sp, NULL, signo, NULL);
1611
1612 /*
1613 * If the process is stopped/stopping, then stop ourselves
1614 * now that we're on the kernel/userspace boundary. When
1615 * we awaken, check for a signal from the debugger.
1616 */
1617 if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) {
1618 sigswitch(true, PS_NOCLDSTOP, 0);
1619 signo = sigchecktrace(&sp);
1620 } else
1621 signo = 0;
1622
1623 /*
1624 * If the debugger didn't provide a signal, find a pending
1625 * signal from our set. Check per-LWP signals first, and
1626 * then per-process.
1627 */
1628 if (signo == 0) {
1629 sp = &l->l_sigpend;
1630 ss = sp->sp_set;
1631 if ((p->p_sflag & PS_PPWAIT) != 0)
1632 sigminusset(&stopsigmask, &ss);
1633 sigminusset(&l->l_sigmask, &ss);
1634
1635 if ((signo = firstsig(&ss)) == 0) {
1636 sp = &p->p_sigpend;
1637 ss = sp->sp_set;
1638 if ((p->p_sflag & PS_PPWAIT) != 0)
1639 sigminusset(&stopsigmask, &ss);
1640 sigminusset(&l->l_sigmask, &ss);
1641
1642 if ((signo = firstsig(&ss)) == 0) {
1643 /*
1644 * No signal pending - clear the
1645 * indicator and bail out.
1646 */
1647 lwp_lock(l);
1648 l->l_flag &= ~LW_PENDSIG;
1649 lwp_unlock(l);
1650 sp = NULL;
1651 break;
1652 }
1653 }
1654 }
1655
1656 /*
1657 * We should see pending but ignored signals only if
1658 * we are being traced.
1659 */
1660 if (sigismember(&p->p_sigctx.ps_sigignore, signo) &&
1661 (p->p_slflag & PSL_TRACED) == 0) {
1662 /* Discard the signal. */
1663 continue;
1664 }
1665
1666 /*
1667 * If traced, always stop, and stay stopped until released
1668 * by the debugger. If the our parent process is waiting
1669 * for us, don't hang as we could deadlock.
1670 */
1671 if ((p->p_slflag & PSL_TRACED) != 0 &&
1672 (p->p_sflag & PS_PPWAIT) == 0 && signo != SIGKILL) {
1673 /* Take the signal. */
1674 (void)sigget(sp, NULL, signo, NULL);
1675 p->p_xstat = signo;
1676
1677 /* Emulation-specific handling of signal trace */
1678 if (p->p_emul->e_tracesig == NULL ||
1679 (*p->p_emul->e_tracesig)(p, signo) == 0)
1680 sigswitch(!(p->p_slflag & PSL_FSTRACE), 0,
1681 signo);
1682
1683 /* Check for a signal from the debugger. */
1684 if ((signo = sigchecktrace(&sp)) == 0)
1685 continue;
1686 }
1687
1688 prop = sigprop[signo];
1689
1690 /*
1691 * Decide whether the signal should be returned.
1692 */
1693 switch ((long)SIGACTION(p, signo).sa_handler) {
1694 case (long)SIG_DFL:
1695 /*
1696 * Don't take default actions on system processes.
1697 */
1698 if (p->p_pid <= 1) {
1699 #ifdef DIAGNOSTIC
1700 /*
1701 * Are you sure you want to ignore SIGSEGV
1702 * in init? XXX
1703 */
1704 printf_nolog("Process (pid %d) got sig %d\n",
1705 p->p_pid, signo);
1706 #endif
1707 continue;
1708 }
1709
1710 /*
1711 * If there is a pending stop signal to process with
1712 * default action, stop here, then clear the signal.
1713 * However, if process is member of an orphaned
1714 * process group, ignore tty stop signals.
1715 */
1716 if (prop & SA_STOP) {
1717 if (p->p_slflag & PSL_TRACED ||
1718 ((p->p_sflag & PS_ORPHANPG) != 0 &&
1719 prop & SA_TTYSTOP)) {
1720 /* Ignore the signal. */
1721 continue;
1722 }
1723 /* Take the signal. */
1724 (void)sigget(sp, NULL, signo, NULL);
1725 p->p_xstat = signo;
1726 signo = 0;
1727 sigswitch(true, PS_NOCLDSTOP, p->p_xstat);
1728 } else if (prop & SA_IGNORE) {
1729 /*
1730 * Except for SIGCONT, shouldn't get here.
1731 * Default action is to ignore; drop it.
1732 */
1733 continue;
1734 }
1735 break;
1736
1737 case (long)SIG_IGN:
1738 #ifdef DEBUG_ISSIGNAL
1739 /*
1740 * Masking above should prevent us ever trying
1741 * to take action on an ignored signal other
1742 * than SIGCONT, unless process is traced.
1743 */
1744 if ((prop & SA_CONT) == 0 &&
1745 (p->p_slflag & PSL_TRACED) == 0)
1746 printf_nolog("issignal\n");
1747 #endif
1748 continue;
1749
1750 default:
1751 /*
1752 * This signal has an action, let postsig() process
1753 * it.
1754 */
1755 break;
1756 }
1757
1758 break;
1759 }
1760
1761 l->l_sigpendset = sp;
1762 return signo;
1763 }
1764
1765 /*
1766 * Take the action for the specified signal
1767 * from the current set of pending signals.
1768 */
1769 void
1770 postsig(int signo)
1771 {
1772 struct lwp *l;
1773 struct proc *p;
1774 struct sigacts *ps;
1775 sig_t action;
1776 sigset_t *returnmask;
1777 ksiginfo_t ksi;
1778
1779 l = curlwp;
1780 p = l->l_proc;
1781 ps = p->p_sigacts;
1782
1783 KASSERT(mutex_owned(&p->p_smutex));
1784 KASSERT(signo > 0);
1785
1786 /*
1787 * Set the new mask value and also defer further occurrences of this
1788 * signal.
1789 *
1790 * Special case: user has done a sigpause. Here the current mask is
1791 * not of interest, but rather the mask from before the sigpause is
1792 * what we want restored after the signal processing is completed.
1793 */
1794 if (l->l_sigrestore) {
1795 returnmask = &l->l_sigoldmask;
1796 l->l_sigrestore = 0;
1797 } else
1798 returnmask = &l->l_sigmask;
1799
1800 /*
1801 * Commit to taking the signal before releasing the mutex.
1802 */
1803 action = SIGACTION_PS(ps, signo).sa_handler;
1804 p->p_stats->p_ru.ru_nsignals++;
1805 sigget(l->l_sigpendset, &ksi, signo, NULL);
1806
1807 if (ktrpoint(KTR_PSIG)) {
1808 mutex_exit(&p->p_smutex);
1809 ktrpsig(signo, action, returnmask, NULL);
1810 mutex_enter(&p->p_smutex);
1811 }
1812
1813 if (action == SIG_DFL) {
1814 /*
1815 * Default action, where the default is to kill
1816 * the process. (Other cases were ignored above.)
1817 */
1818 sigexit(l, signo);
1819 return;
1820 }
1821
1822 /*
1823 * If we get here, the signal must be caught.
1824 */
1825 #ifdef DIAGNOSTIC
1826 if (action == SIG_IGN || sigismember(&l->l_sigmask, signo))
1827 panic("postsig action");
1828 #endif
1829
1830 kpsendsig(l, &ksi, returnmask);
1831 }
1832
1833 /*
1834 * sendsig_reset:
1835 *
1836 * Reset the signal action. Called from emulation specific sendsig()
1837 * before unlocking to deliver the signal.
1838 */
1839 void
1840 sendsig_reset(struct lwp *l, int signo)
1841 {
1842 struct proc *p = l->l_proc;
1843 struct sigacts *ps = p->p_sigacts;
1844
1845 KASSERT(mutex_owned(&p->p_smutex));
1846
1847 p->p_sigctx.ps_lwp = 0;
1848 p->p_sigctx.ps_code = 0;
1849 p->p_sigctx.ps_signo = 0;
1850
1851 mutex_enter(&ps->sa_mutex);
1852 sigplusset(&SIGACTION_PS(ps, signo).sa_mask, &l->l_sigmask);
1853 if (SIGACTION_PS(ps, signo).sa_flags & SA_RESETHAND) {
1854 sigdelset(&p->p_sigctx.ps_sigcatch, signo);
1855 if (signo != SIGCONT && sigprop[signo] & SA_IGNORE)
1856 sigaddset(&p->p_sigctx.ps_sigignore, signo);
1857 SIGACTION_PS(ps, signo).sa_handler = SIG_DFL;
1858 }
1859 mutex_exit(&ps->sa_mutex);
1860 }
1861
1862 /*
1863 * Kill the current process for stated reason.
1864 */
1865 void
1866 killproc(struct proc *p, const char *why)
1867 {
1868 log(LOG_ERR, "pid %d was killed: %s\n", p->p_pid, why);
1869 uprintf_locked("sorry, pid %d was killed: %s\n", p->p_pid, why);
1870 mutex_enter(&proclist_mutex); /* XXXSMP */
1871 psignal(p, SIGKILL);
1872 mutex_exit(&proclist_mutex); /* XXXSMP */
1873 }
1874
1875 /*
1876 * Force the current process to exit with the specified signal, dumping core
1877 * if appropriate. We bypass the normal tests for masked and caught
1878 * signals, allowing unrecoverable failures to terminate the process without
1879 * changing signal state. Mark the accounting record with the signal
1880 * termination. If dumping core, save the signal number for the debugger.
1881 * Calls exit and does not return.
1882 */
1883 void
1884 sigexit(struct lwp *l, int signo)
1885 {
1886 int exitsig, error, docore;
1887 struct proc *p;
1888 struct lwp *t;
1889
1890 p = l->l_proc;
1891
1892 KASSERT(mutex_owned(&p->p_smutex));
1893 KERNEL_UNLOCK_ALL(l, NULL);
1894
1895 /*
1896 * Don't permit coredump() multiple times in the same process.
1897 * Call back into sigexit, where we will be suspended until
1898 * the deed is done. Note that this is a recursive call, but
1899 * LW_WCORE will prevent us from coming back this way.
1900 */
1901 if ((p->p_sflag & PS_WCORE) != 0) {
1902 lwp_lock(l);
1903 l->l_flag |= (LW_WCORE | LW_WEXIT | LW_WSUSPEND);
1904 lwp_unlock(l);
1905 mutex_exit(&p->p_smutex);
1906 lwp_userret(l);
1907 #ifdef DIAGNOSTIC
1908 panic("sigexit");
1909 #endif
1910 /* NOTREACHED */
1911 }
1912
1913 /*
1914 * Prepare all other LWPs for exit. If dumping core, suspend them
1915 * so that their registers are available long enough to be dumped.
1916 */
1917 if ((docore = (sigprop[signo] & SA_CORE)) != 0) {
1918 p->p_sflag |= PS_WCORE;
1919 for (;;) {
1920 LIST_FOREACH(t, &p->p_lwps, l_sibling) {
1921 lwp_lock(t);
1922 if (t == l) {
1923 t->l_flag &= ~LW_WSUSPEND;
1924 lwp_unlock(t);
1925 continue;
1926 }
1927 t->l_flag |= (LW_WCORE | LW_WEXIT);
1928 lwp_suspend(l, t);
1929 }
1930
1931 if (p->p_nrlwps == 1)
1932 break;
1933
1934 /*
1935 * Kick any LWPs sitting in lwp_wait1(), and wait
1936 * for everyone else to stop before proceeding.
1937 */
1938 p->p_nlwpwait++;
1939 cv_broadcast(&p->p_lwpcv);
1940 cv_wait(&p->p_lwpcv, &p->p_smutex);
1941 p->p_nlwpwait--;
1942 }
1943 }
1944
1945 exitsig = signo;
1946 p->p_acflag |= AXSIG;
1947 p->p_sigctx.ps_signo = signo;
1948 mutex_exit(&p->p_smutex);
1949
1950 KERNEL_LOCK(1, l);
1951
1952 if (docore) {
1953 if ((error = coredump(l, NULL)) == 0)
1954 exitsig |= WCOREFLAG;
1955
1956 if (kern_logsigexit) {
1957 int uid = l->l_cred ?
1958 (int)kauth_cred_geteuid(l->l_cred) : -1;
1959
1960 if (error)
1961 log(LOG_INFO, lognocoredump, p->p_pid,
1962 p->p_comm, uid, signo, error);
1963 else
1964 log(LOG_INFO, logcoredump, p->p_pid,
1965 p->p_comm, uid, signo);
1966 }
1967
1968 #ifdef PAX_SEGVGUARD
1969 pax_segvguard(l, p->p_textvp, p->p_comm, true);
1970 #endif /* PAX_SEGVGUARD */
1971 }
1972
1973 /* Acquire the sched state mutex. exit1() will release it. */
1974 mutex_enter(&p->p_smutex);
1975
1976 /* No longer dumping core. */
1977 p->p_sflag &= ~PS_WCORE;
1978
1979 exit1(l, W_EXITCODE(0, exitsig));
1980 /* NOTREACHED */
1981 }
1982
1983 /*
1984 * Put process 'p' into the stopped state and optionally, notify the parent.
1985 */
1986 void
1987 proc_stop(struct proc *p, int notify, int signo)
1988 {
1989 struct lwp *l;
1990
1991 KASSERT(mutex_owned(&proclist_mutex));
1992 KASSERT(mutex_owned(&p->p_smutex));
1993
1994 /*
1995 * First off, set the stopping indicator and bring all sleeping
1996 * LWPs to a halt so they are included in p->p_nrlwps. We musn't
1997 * unlock between here and the p->p_nrlwps check below.
1998 */
1999 p->p_sflag |= PS_STOPPING;
2000 membar_producer();
2001
2002 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
2003 lwp_lock(l);
2004 if (l->l_stat == LSSLEEP && (l->l_flag & LW_SINTR) != 0) {
2005 l->l_stat = LSSTOP;
2006 p->p_nrlwps--;
2007 }
2008 lwp_unlock(l);
2009 }
2010
2011 /*
2012 * If there are no LWPs available to take the signal, then we
2013 * signal the parent process immediately. Otherwise, the last
2014 * LWP to stop will take care of it.
2015 */
2016 if (notify)
2017 p->p_sflag |= PS_NOTIFYSTOP;
2018 else
2019 p->p_sflag &= ~PS_NOTIFYSTOP;
2020
2021 if (p->p_nrlwps == 0) {
2022 p->p_sflag &= ~PS_STOPPING;
2023 p->p_stat = SSTOP;
2024 p->p_waited = 0;
2025 p->p_pptr->p_nstopchild++;
2026
2027 if (notify) {
2028 child_psignal(p, PS_NOCLDSTOP);
2029 cv_broadcast(&p->p_pptr->p_waitcv);
2030 }
2031 } else {
2032 /*
2033 * Have the remaining LWPs come to a halt, and trigger
2034 * proc_stop_callout() to ensure that they do.
2035 */
2036 LIST_FOREACH(l, &p->p_lwps, l_sibling)
2037 sigpost(l, SIG_DFL, SA_STOP, signo);
2038 callout_schedule(&proc_stop_ch, 1);
2039 }
2040 }
2041
2042 /*
2043 * When stopping a process, we do not immediatly set sleeping LWPs stopped,
2044 * but wait for them to come to a halt at the kernel-user boundary. This is
2045 * to allow LWPs to release any locks that they may hold before stopping.
2046 *
2047 * Non-interruptable sleeps can be long, and there is the potential for an
2048 * LWP to begin sleeping interruptably soon after the process has been set
2049 * stopping (PS_STOPPING). These LWPs will not notice that the process is
2050 * stopping, and so complete halt of the process and the return of status
2051 * information to the parent could be delayed indefinitely.
2052 *
2053 * To handle this race, proc_stop_callout() runs once per tick while there
2054 * are stopping processes in the system. It sets LWPs that are sleeping
2055 * interruptably into the LSSTOP state.
2056 *
2057 * Note that we are not concerned about keeping all LWPs stopped while the
2058 * process is stopped: stopped LWPs can awaken briefly to handle signals.
2059 * What we do need to ensure is that all LWPs in a stopping process have
2060 * stopped at least once, so that notification can be sent to the parent
2061 * process.
2062 */
2063 static void
2064 proc_stop_callout(void *cookie)
2065 {
2066 bool more, restart;
2067 struct proc *p;
2068 struct lwp *l;
2069
2070 (void)cookie;
2071
2072 do {
2073 restart = false;
2074 more = false;
2075
2076 mutex_enter(&proclist_lock);
2077 mutex_enter(&proclist_mutex);
2078 PROCLIST_FOREACH(p, &allproc) {
2079 mutex_enter(&p->p_smutex);
2080
2081 if ((p->p_sflag & PS_STOPPING) == 0) {
2082 mutex_exit(&p->p_smutex);
2083 continue;
2084 }
2085
2086 /* Stop any LWPs sleeping interruptably. */
2087 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
2088 lwp_lock(l);
2089 if (l->l_stat == LSSLEEP &&
2090 (l->l_flag & LW_SINTR) != 0) {
2091 l->l_stat = LSSTOP;
2092 p->p_nrlwps--;
2093 }
2094 lwp_unlock(l);
2095 }
2096
2097 if (p->p_nrlwps == 0) {
2098 /*
2099 * We brought the process to a halt.
2100 * Mark it as stopped and notify the
2101 * parent.
2102 */
2103 p->p_sflag &= ~PS_STOPPING;
2104 p->p_stat = SSTOP;
2105 p->p_waited = 0;
2106 p->p_pptr->p_nstopchild++;
2107 if ((p->p_sflag & PS_NOTIFYSTOP) != 0) {
2108 /*
2109 * Note that child_psignal() will
2110 * drop p->p_smutex briefly.
2111 * Arrange to restart and check
2112 * all processes again.
2113 */
2114 restart = true;
2115 child_psignal(p, PS_NOCLDSTOP);
2116 cv_broadcast(&p->p_pptr->p_waitcv);
2117 }
2118 } else
2119 more = true;
2120
2121 mutex_exit(&p->p_smutex);
2122 if (restart)
2123 break;
2124 }
2125 mutex_exit(&proclist_mutex);
2126 mutex_exit(&proclist_lock);
2127 } while (restart);
2128
2129 /*
2130 * If we noted processes that are stopping but still have
2131 * running LWPs, then arrange to check again in 1 tick.
2132 */
2133 if (more)
2134 callout_schedule(&proc_stop_ch, 1);
2135 }
2136
2137 /*
2138 * Given a process in state SSTOP, set the state back to SACTIVE and
2139 * move LSSTOP'd LWPs to LSSLEEP or make them runnable.
2140 */
2141 void
2142 proc_unstop(struct proc *p)
2143 {
2144 struct lwp *l;
2145 int sig;
2146
2147 KASSERT(mutex_owned(&proclist_mutex));
2148 KASSERT(mutex_owned(&p->p_smutex));
2149
2150 p->p_stat = SACTIVE;
2151 p->p_sflag &= ~PS_STOPPING;
2152 sig = p->p_xstat;
2153
2154 if (!p->p_waited)
2155 p->p_pptr->p_nstopchild--;
2156
2157 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
2158 lwp_lock(l);
2159 if (l->l_stat != LSSTOP) {
2160 lwp_unlock(l);
2161 continue;
2162 }
2163 if (l->l_wchan == NULL) {
2164 setrunnable(l);
2165 continue;
2166 }
2167 if (sig && (l->l_flag & LW_SINTR) != 0) {
2168 setrunnable(l);
2169 sig = 0;
2170 } else {
2171 l->l_stat = LSSLEEP;
2172 p->p_nrlwps++;
2173 lwp_unlock(l);
2174 }
2175 }
2176 }
2177
2178 static int
2179 filt_sigattach(struct knote *kn)
2180 {
2181 struct proc *p = curproc;
2182
2183 kn->kn_ptr.p_proc = p;
2184 kn->kn_flags |= EV_CLEAR; /* automatically set */
2185
2186 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
2187
2188 return (0);
2189 }
2190
2191 static void
2192 filt_sigdetach(struct knote *kn)
2193 {
2194 struct proc *p = kn->kn_ptr.p_proc;
2195
2196 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
2197 }
2198
2199 /*
2200 * signal knotes are shared with proc knotes, so we apply a mask to
2201 * the hint in order to differentiate them from process hints. This
2202 * could be avoided by using a signal-specific knote list, but probably
2203 * isn't worth the trouble.
2204 */
2205 static int
2206 filt_signal(struct knote *kn, long hint)
2207 {
2208
2209 if (hint & NOTE_SIGNAL) {
2210 hint &= ~NOTE_SIGNAL;
2211
2212 if (kn->kn_id == hint)
2213 kn->kn_data++;
2214 }
2215 return (kn->kn_data != 0);
2216 }
2217
2218 const struct filterops sig_filtops = {
2219 0, filt_sigattach, filt_sigdetach, filt_signal
2220 };
2221