pthread_atfork.c revision 1.22 1 /* $NetBSD: pthread_atfork.c,v 1.22 2025/03/01 18:19:50 christos Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 #if defined(LIBC_SCCS) && !defined(lint)
34 __RCSID("$NetBSD: pthread_atfork.c,v 1.22 2025/03/01 18:19:50 christos Exp $");
35 #endif /* LIBC_SCCS and not lint */
36
37 #include "namespace.h"
38
39 #include <errno.h>
40 #include <stdlib.h>
41 #include <unistd.h>
42 #include <sys/queue.h>
43 #include "extern.h"
44 #include "reentrant.h"
45
46 #ifdef __weak_alias
47 __weak_alias(pthread_atfork, _pthread_atfork)
48 __weak_alias(fork, _fork)
49 #endif /* __weak_alias */
50
51 pid_t
52 __locked_fork(int *my_errno)
53 {
54 return __fork();
55 }
56
57 struct atfork_callback {
58 SIMPLEQ_ENTRY(atfork_callback) next;
59 void (*fn)(void);
60 };
61
62
63 /*
64 * Keep a cache for of 3, one for prepare, one for parent, one for child.
65 * This is so that we don't have to allocate memory for the call from the
66 * pthread_tsd_init() constructor, where it is too early to call malloc(3).
67 */
68 static struct atfork_callback atfork_builtin[3];
69
70 /*
71 * Hypothetically, we could protect the queues with a rwlock which is
72 * write-locked by pthread_atfork() and read-locked by fork(), but
73 * since the intended use of the functions is obtaining locks to hold
74 * across the fork, forking is going to be serialized anyway.
75 */
76 #ifdef _REENTRANT
77 static mutex_t atfork_lock = MUTEX_INITIALIZER;
78 #endif
79 SIMPLEQ_HEAD(atfork_callback_q, atfork_callback);
80
81 static struct atfork_callback_q prepareq = SIMPLEQ_HEAD_INITIALIZER(prepareq);
82 static struct atfork_callback_q parentq = SIMPLEQ_HEAD_INITIALIZER(parentq);
83 static struct atfork_callback_q childq = SIMPLEQ_HEAD_INITIALIZER(childq);
84
85 static struct atfork_callback *
86 af_alloc(void)
87 {
88 for (size_t i = 0; i < __arraycount(atfork_builtin); i++) {
89 if (atfork_builtin[i].fn == NULL)
90 return &atfork_builtin[i];
91 }
92
93 return malloc(sizeof(atfork_builtin));
94 }
95
96 static void
97 af_free(struct atfork_callback *af)
98 {
99 for (size_t i = 0; i < __arraycount(atfork_builtin); i++) {
100 if (af == &atfork_builtin[i]) {
101 atfork_builtin[i].fn = NULL;
102 return;
103 }
104 }
105
106 free(af);
107 }
108
109 int
110 pthread_atfork(void (*prepare)(void), void (*parent)(void),
111 void (*child)(void))
112 {
113 struct atfork_callback *newprepare, *newparent, *newchild;
114 sigset_t mask, omask;
115 int error;
116
117 newprepare = newparent = newchild = NULL;
118
119 sigfillset(&mask);
120 thr_sigsetmask(SIG_SETMASK, &mask, &omask);
121
122 mutex_lock(&atfork_lock);
123 if (prepare != NULL) {
124 newprepare = af_alloc();
125 if (newprepare == NULL) {
126 error = ENOMEM;
127 goto out;
128 }
129 newprepare->fn = prepare;
130 }
131
132 if (parent != NULL) {
133 newparent = af_alloc();
134 if (newparent == NULL) {
135 if (newprepare != NULL)
136 af_free(newprepare);
137 error = ENOMEM;
138 goto out;
139 }
140 newparent->fn = parent;
141 }
142
143 if (child != NULL) {
144 newchild = af_alloc();
145 if (newchild == NULL) {
146 if (newprepare != NULL)
147 af_free(newprepare);
148 if (newparent != NULL)
149 af_free(newparent);
150 error = ENOMEM;
151 goto out;
152 }
153 newchild->fn = child;
154 }
155
156 /*
157 * The order in which the functions are called is specified as
158 * LIFO for the prepare handler and FIFO for the others; insert
159 * at the head and tail as appropriate so that SIMPLEQ_FOREACH()
160 * produces the right order.
161 */
162 if (prepare)
163 SIMPLEQ_INSERT_HEAD(&prepareq, newprepare, next);
164 if (parent)
165 SIMPLEQ_INSERT_TAIL(&parentq, newparent, next);
166 if (child)
167 SIMPLEQ_INSERT_TAIL(&childq, newchild, next);
168 error = 0;
169
170 out: mutex_unlock(&atfork_lock);
171 thr_sigsetmask(SIG_SETMASK, &omask, NULL);
172 return error;
173 }
174
175 pid_t
176 fork(void)
177 {
178 struct atfork_callback *iter;
179 pid_t ret;
180
181 mutex_lock(&atfork_lock);
182 SIMPLEQ_FOREACH(iter, &prepareq, next)
183 (*iter->fn)();
184 _malloc_prefork();
185
186 ret = __locked_fork(&errno);
187
188 if (ret != 0) {
189 /*
190 * We are the parent. It doesn't matter here whether
191 * the fork call succeeded or failed.
192 */
193 _malloc_postfork();
194 SIMPLEQ_FOREACH(iter, &parentq, next)
195 (*iter->fn)();
196 mutex_unlock(&atfork_lock);
197 } else {
198 /* We are the child */
199 _malloc_postfork_child();
200 SIMPLEQ_FOREACH(iter, &childq, next)
201 (*iter->fn)();
202 /*
203 * Note: We are explicitly *not* unlocking
204 * atfork_lock. Unlocking atfork_lock is problematic,
205 * because if any threads in the parent blocked on it
206 * between the initial lock and the fork() syscall,
207 * unlocking in the child will try to schedule
208 * threads, and either the internal mutex interlock or
209 * the runqueue spinlock could have been held at the
210 * moment of fork(). Since the other threads do not
211 * exist in this process, the spinlock will never be
212 * unlocked, and we would wedge.
213 * Instead, we reinitialize atfork_lock, since we know
214 * that the state of the atfork lists is consistent here,
215 * and that there are no other threads to be affected by
216 * the forcible cleaning of the queue.
217 * This permits double-forking to work, although
218 * it requires knowing that it's "safe" to initialize
219 * a locked mutex in this context.
220 *
221 * The problem exists for users of this interface,
222 * too, since the intended use of pthread_atfork() is
223 * to acquire locks across the fork call to ensure
224 * that the child sees consistent state. There's not
225 * much that can usefully be done in a child handler,
226 * and conventional wisdom discourages using them, but
227 * they're part of the interface, so here we are...
228 */
229 mutex_init(&atfork_lock, NULL);
230 }
231
232 return ret;
233 }
234