pthread_atfork.c revision 1.24 1 /* $NetBSD: pthread_atfork.c,v 1.24 2025/03/02 22:46:23 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 #if defined(LIBC_SCCS) && !defined(lint)
34 __RCSID("$NetBSD: pthread_atfork.c,v 1.24 2025/03/02 22:46:23 riastradh Exp $");
35 #endif /* LIBC_SCCS and not lint */
36
37 #include "namespace.h"
38
39 #include <sys/queue.h>
40
41 #include <errno.h>
42 #include <stdlib.h>
43 #include <unistd.h>
44
45 #include "atfork.h"
46 #include "extern.h"
47 #include "reentrant.h"
48
49 #ifdef __weak_alias
50 __weak_alias(pthread_atfork, _pthread_atfork)
51 __weak_alias(fork, _fork)
52 #endif /* __weak_alias */
53
54 pid_t
55 __locked_fork(int *my_errno)
56 {
57 return __fork();
58 }
59
60 /*
61 * Keep a cache for of 3, one for prepare, one for parent, one for child.
62 * This is so that we don't have to allocate memory for the call from the
63 * pthread_tsd_init() constructor, where it is too early to call malloc(3).
64 */
65 static struct atfork_callback atfork_builtin[3];
66
67 /*
68 * Hypothetically, we could protect the queues with a rwlock which is
69 * write-locked by pthread_atfork() and read-locked by fork(), but
70 * since the intended use of the functions is obtaining locks to hold
71 * across the fork, forking is going to be serialized anyway.
72 */
73 #ifdef _REENTRANT
74 static mutex_t atfork_lock = MUTEX_INITIALIZER;
75 #endif
76 SIMPLEQ_HEAD(atfork_callback_q, atfork_callback);
77
78 static struct atfork_callback_q prepareq = SIMPLEQ_HEAD_INITIALIZER(prepareq);
79 static struct atfork_callback_q parentq = SIMPLEQ_HEAD_INITIALIZER(parentq);
80 static struct atfork_callback_q childq = SIMPLEQ_HEAD_INITIALIZER(childq);
81
82 static struct atfork_callback *
83 af_alloc(void)
84 {
85
86 for (size_t i = 0; i < __arraycount(atfork_builtin); i++) {
87 if (atfork_builtin[i].fn == NULL)
88 return &atfork_builtin[i];
89 }
90
91 return malloc(sizeof(atfork_builtin));
92 }
93
94 static void
95 af_free(struct atfork_callback *af)
96 {
97
98 if (af >= atfork_builtin
99 && af < atfork_builtin + __arraycount(atfork_builtin))
100 af->fn = NULL;
101 else
102 free(af);
103 }
104
105 static void
106 __libc_atfork_locked(
107 struct atfork_callback *restrict newprepare, void (*prepare)(void),
108 struct atfork_callback *restrict newparent, void (*parent)(void),
109 struct atfork_callback *restrict newchild, void (*child)(void))
110 {
111
112 /*
113 * The order in which the functions are called is specified as
114 * LIFO for the prepare handler and FIFO for the others; insert
115 * at the head and tail as appropriate so that SIMPLEQ_FOREACH()
116 * produces the right order.
117 */
118 if (prepare) {
119 newprepare->fn = prepare;
120 SIMPLEQ_INSERT_HEAD(&prepareq, newprepare, next);
121 }
122 if (parent) {
123 newparent->fn = parent;
124 SIMPLEQ_INSERT_TAIL(&parentq, newparent, next);
125 }
126 if (child) {
127 newchild->fn = child;
128 SIMPLEQ_INSERT_TAIL(&childq, newchild, next);
129 }
130 }
131
132 void
133 __libc_atfork(
134 struct atfork_callback *restrict newprepare, void (*prepare)(void),
135 struct atfork_callback *restrict newparent, void (*parent)(void),
136 struct atfork_callback *restrict newchild, void (*child)(void))
137 {
138 sigset_t mask, omask;
139
140 sigfillset(&mask);
141 thr_sigsetmask(SIG_SETMASK, &mask, &omask);
142
143 mutex_lock(&atfork_lock);
144 __libc_atfork_locked(newprepare, prepare,
145 newparent, parent,
146 newchild, child);
147 mutex_unlock(&atfork_lock);
148
149 thr_sigsetmask(SIG_SETMASK, &omask, NULL);
150 }
151
152 int
153 pthread_atfork(void (*prepare)(void), void (*parent)(void),
154 void (*child)(void))
155 {
156 struct atfork_callback *newprepare, *newparent, *newchild;
157 sigset_t mask, omask;
158 int error;
159
160 newprepare = newparent = newchild = NULL;
161
162 sigfillset(&mask);
163 thr_sigsetmask(SIG_SETMASK, &mask, &omask);
164
165 mutex_lock(&atfork_lock);
166 if (prepare != NULL) {
167 newprepare = af_alloc();
168 if (newprepare == NULL) {
169 error = ENOMEM;
170 goto out;
171 }
172 }
173
174 if (parent != NULL) {
175 newparent = af_alloc();
176 if (newparent == NULL) {
177 if (newprepare != NULL)
178 af_free(newprepare);
179 error = ENOMEM;
180 goto out;
181 }
182 }
183
184 if (child != NULL) {
185 newchild = af_alloc();
186 if (newchild == NULL) {
187 if (newprepare != NULL)
188 af_free(newprepare);
189 if (newparent != NULL)
190 af_free(newparent);
191 error = ENOMEM;
192 goto out;
193 }
194 }
195
196 __libc_atfork_locked(newprepare, prepare,
197 newparent, parent,
198 newchild, child);
199 error = 0;
200
201 out: mutex_unlock(&atfork_lock);
202 thr_sigsetmask(SIG_SETMASK, &omask, NULL);
203 return error;
204 }
205
206 pid_t
207 fork(void)
208 {
209 struct atfork_callback *iter;
210 pid_t ret;
211
212 mutex_lock(&atfork_lock);
213 SIMPLEQ_FOREACH(iter, &prepareq, next)
214 (*iter->fn)();
215 _malloc_prefork();
216
217 ret = __locked_fork(&errno);
218
219 if (ret != 0) {
220 /*
221 * We are the parent. It doesn't matter here whether
222 * the fork call succeeded or failed.
223 */
224 _malloc_postfork();
225 SIMPLEQ_FOREACH(iter, &parentq, next)
226 (*iter->fn)();
227 mutex_unlock(&atfork_lock);
228 } else {
229 /* We are the child */
230 _malloc_postfork_child();
231 SIMPLEQ_FOREACH(iter, &childq, next)
232 (*iter->fn)();
233 /*
234 * Note: We are explicitly *not* unlocking
235 * atfork_lock. Unlocking atfork_lock is problematic,
236 * because if any threads in the parent blocked on it
237 * between the initial lock and the fork() syscall,
238 * unlocking in the child will try to schedule
239 * threads, and either the internal mutex interlock or
240 * the runqueue spinlock could have been held at the
241 * moment of fork(). Since the other threads do not
242 * exist in this process, the spinlock will never be
243 * unlocked, and we would wedge.
244 * Instead, we reinitialize atfork_lock, since we know
245 * that the state of the atfork lists is consistent here,
246 * and that there are no other threads to be affected by
247 * the forcible cleaning of the queue.
248 * This permits double-forking to work, although
249 * it requires knowing that it's "safe" to initialize
250 * a locked mutex in this context.
251 *
252 * The problem exists for users of this interface,
253 * too, since the intended use of pthread_atfork() is
254 * to acquire locks across the fork call to ensure
255 * that the child sees consistent state. There's not
256 * much that can usefully be done in a child handler,
257 * and conventional wisdom discourages using them, but
258 * they're part of the interface, so here we are...
259 */
260 mutex_init(&atfork_lock, NULL);
261 }
262
263 return ret;
264 }
265