pthread_atfork.c revision 1.19 1 /* $NetBSD: pthread_atfork.c,v 1.19 2025/02/28 16:00:26 christos Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 #if defined(LIBC_SCCS) && !defined(lint)
34 __RCSID("$NetBSD: pthread_atfork.c,v 1.19 2025/02/28 16:00:26 christos Exp $");
35 #endif /* LIBC_SCCS and not lint */
36
37 #include "namespace.h"
38
39 #include <sys/queue.h>
40 #include <sys/mman.h>
41 #include <errno.h>
42 #include <stdlib.h>
43 #include <unistd.h>
44
45 #include "extern.h"
46 #include "reentrant.h"
47
48 #ifdef __weak_alias
49 __weak_alias(pthread_atfork, _pthread_atfork)
50 __weak_alias(fork, _fork)
51 #endif /* __weak_alias */
52
53 pid_t
54 __locked_fork(int *my_errno)
55 {
56 return __fork();
57 }
58
59 struct atfork_callback {
60 SIMPLEQ_ENTRY(atfork_callback) next;
61 void (*fn)(void);
62 };
63
64 /*
65 * Hypothetically, we could protect the queues with a rwlock which is
66 * write-locked by pthread_atfork() and read-locked by fork(), but
67 * since the intended use of the functions is obtaining locks to hold
68 * across the fork, forking is going to be serialized anyway.
69 */
70 static struct atfork_callback atfork_builtin;
71 #ifdef _REENTRANT
72 static mutex_t atfork_lock = MUTEX_INITIALIZER;
73 #endif
74 SIMPLEQ_HEAD(atfork_callback_q, atfork_callback);
75
76 static struct atfork_callback_q prepareq = SIMPLEQ_HEAD_INITIALIZER(prepareq);
77 static struct atfork_callback_q parentq = SIMPLEQ_HEAD_INITIALIZER(parentq);
78 static struct atfork_callback_q childq = SIMPLEQ_HEAD_INITIALIZER(childq);
79
80 static struct atfork_callback *
81 af_alloc(void)
82 {
83 void *rv;
84
85 if (atfork_builtin.fn == NULL)
86 return &atfork_builtin;
87
88 rv = mmap(0, sizeof(atfork_builtin), PROT_READ|PROT_WRITE, MAP_PRIVATE,
89 -1, 0);
90 return rv == MAP_FAILED ? NULL : rv;
91 }
92
93 static void
94 af_free(struct atfork_callback *af)
95 {
96
97 if (af != &atfork_builtin)
98 munmap(af, sizeof(*af));
99 }
100
101 int
102 pthread_atfork(void (*prepare)(void), void (*parent)(void),
103 void (*child)(void))
104 {
105 struct atfork_callback *newprepare, *newparent, *newchild;
106 sigset_t mask, omask;
107 int error;
108
109 newprepare = newparent = newchild = NULL;
110
111 sigfillset(&mask);
112 thr_sigsetmask(SIG_SETMASK, &mask, &omask);
113
114 mutex_lock(&atfork_lock);
115 if (prepare != NULL) {
116 newprepare = af_alloc();
117 if (newprepare == NULL) {
118 error = ENOMEM;
119 goto out;
120 }
121 newprepare->fn = prepare;
122 }
123
124 if (parent != NULL) {
125 newparent = af_alloc();
126 if (newparent == NULL) {
127 if (newprepare != NULL)
128 af_free(newprepare);
129 error = ENOMEM;
130 goto out;
131 }
132 newparent->fn = parent;
133 }
134
135 if (child != NULL) {
136 newchild = af_alloc();
137 if (newchild == NULL) {
138 if (newprepare != NULL)
139 af_free(newprepare);
140 if (newparent != NULL)
141 af_free(newparent);
142 error = ENOMEM;
143 goto out;
144 }
145 newchild->fn = child;
146 }
147
148 /*
149 * The order in which the functions are called is specified as
150 * LIFO for the prepare handler and FIFO for the others; insert
151 * at the head and tail as appropriate so that SIMPLEQ_FOREACH()
152 * produces the right order.
153 */
154 if (prepare)
155 SIMPLEQ_INSERT_HEAD(&prepareq, newprepare, next);
156 if (parent)
157 SIMPLEQ_INSERT_TAIL(&parentq, newparent, next);
158 if (child)
159 SIMPLEQ_INSERT_TAIL(&childq, newchild, next);
160 error = 0;
161
162 out: mutex_unlock(&atfork_lock);
163 thr_sigsetmask(SIG_SETMASK, &omask, NULL);
164 return error;
165 }
166
167 pid_t
168 fork(void)
169 {
170 struct atfork_callback *iter;
171 pid_t ret;
172
173 mutex_lock(&atfork_lock);
174 SIMPLEQ_FOREACH(iter, &prepareq, next)
175 (*iter->fn)();
176 _malloc_prefork();
177
178 ret = __locked_fork(&errno);
179
180 if (ret != 0) {
181 /*
182 * We are the parent. It doesn't matter here whether
183 * the fork call succeeded or failed.
184 */
185 _malloc_postfork();
186 SIMPLEQ_FOREACH(iter, &parentq, next)
187 (*iter->fn)();
188 mutex_unlock(&atfork_lock);
189 } else {
190 /* We are the child */
191 _malloc_postfork_child();
192 SIMPLEQ_FOREACH(iter, &childq, next)
193 (*iter->fn)();
194 /*
195 * Note: We are explicitly *not* unlocking
196 * atfork_lock. Unlocking atfork_lock is problematic,
197 * because if any threads in the parent blocked on it
198 * between the initial lock and the fork() syscall,
199 * unlocking in the child will try to schedule
200 * threads, and either the internal mutex interlock or
201 * the runqueue spinlock could have been held at the
202 * moment of fork(). Since the other threads do not
203 * exist in this process, the spinlock will never be
204 * unlocked, and we would wedge.
205 * Instead, we reinitialize atfork_lock, since we know
206 * that the state of the atfork lists is consistent here,
207 * and that there are no other threads to be affected by
208 * the forcible cleaning of the queue.
209 * This permits double-forking to work, although
210 * it requires knowing that it's "safe" to initialize
211 * a locked mutex in this context.
212 *
213 * The problem exists for users of this interface,
214 * too, since the intended use of pthread_atfork() is
215 * to acquire locks across the fork call to ensure
216 * that the child sees consistent state. There's not
217 * much that can usefully be done in a child handler,
218 * and conventional wisdom discourages using them, but
219 * they're part of the interface, so here we are...
220 */
221 mutex_init(&atfork_lock, NULL);
222 }
223
224 return ret;
225 }
226