pthread_atfork.c revision 1.23 1 /* $NetBSD: pthread_atfork.c,v 1.23 2025/03/01 20:31:58 christos Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 #if defined(LIBC_SCCS) && !defined(lint)
34 __RCSID("$NetBSD: pthread_atfork.c,v 1.23 2025/03/01 20:31:58 christos Exp $");
35 #endif /* LIBC_SCCS and not lint */
36
37 #include "namespace.h"
38
39 #include <errno.h>
40 #include <stdlib.h>
41 #include <unistd.h>
42 #include <sys/queue.h>
43 #include "extern.h"
44 #include "reentrant.h"
45
46 #ifdef __weak_alias
47 __weak_alias(pthread_atfork, _pthread_atfork)
48 __weak_alias(fork, _fork)
49 #endif /* __weak_alias */
50
51 pid_t
52 __locked_fork(int *my_errno)
53 {
54 return __fork();
55 }
56
57 struct atfork_callback {
58 SIMPLEQ_ENTRY(atfork_callback) next;
59 void (*fn)(void);
60 };
61
62
63 /*
64 * Keep a cache for of 3, one for prepare, one for parent, one for child.
65 * This is so that we don't have to allocate memory for the call from the
66 * pthread_tsd_init() constructor, where it is too early to call malloc(3).
67 */
68 static struct atfork_callback atfork_builtin[3];
69
70 /*
71 * Hypothetically, we could protect the queues with a rwlock which is
72 * write-locked by pthread_atfork() and read-locked by fork(), but
73 * since the intended use of the functions is obtaining locks to hold
74 * across the fork, forking is going to be serialized anyway.
75 */
76 #ifdef _REENTRANT
77 static mutex_t atfork_lock = MUTEX_INITIALIZER;
78 #endif
79 SIMPLEQ_HEAD(atfork_callback_q, atfork_callback);
80
81 static struct atfork_callback_q prepareq = SIMPLEQ_HEAD_INITIALIZER(prepareq);
82 static struct atfork_callback_q parentq = SIMPLEQ_HEAD_INITIALIZER(parentq);
83 static struct atfork_callback_q childq = SIMPLEQ_HEAD_INITIALIZER(childq);
84
85 static struct atfork_callback *
86 af_alloc(void)
87 {
88
89 for (size_t i = 0; i < __arraycount(atfork_builtin); i++) {
90 if (atfork_builtin[i].fn == NULL)
91 return &atfork_builtin[i];
92 }
93
94 return malloc(sizeof(atfork_builtin));
95 }
96
97 static void
98 af_free(struct atfork_callback *af)
99 {
100
101 if (af >= atfork_builtin
102 && af < atfork_builtin + __arraycount(atfork_builtin))
103 af->fn = NULL;
104 else
105 free(af);
106 }
107
108 int
109 pthread_atfork(void (*prepare)(void), void (*parent)(void),
110 void (*child)(void))
111 {
112 struct atfork_callback *newprepare, *newparent, *newchild;
113 sigset_t mask, omask;
114 int error;
115
116 newprepare = newparent = newchild = NULL;
117
118 sigfillset(&mask);
119 thr_sigsetmask(SIG_SETMASK, &mask, &omask);
120
121 mutex_lock(&atfork_lock);
122 if (prepare != NULL) {
123 newprepare = af_alloc();
124 if (newprepare == NULL) {
125 error = ENOMEM;
126 goto out;
127 }
128 newprepare->fn = prepare;
129 }
130
131 if (parent != NULL) {
132 newparent = af_alloc();
133 if (newparent == NULL) {
134 if (newprepare != NULL)
135 af_free(newprepare);
136 error = ENOMEM;
137 goto out;
138 }
139 newparent->fn = parent;
140 }
141
142 if (child != NULL) {
143 newchild = af_alloc();
144 if (newchild == NULL) {
145 if (newprepare != NULL)
146 af_free(newprepare);
147 if (newparent != NULL)
148 af_free(newparent);
149 error = ENOMEM;
150 goto out;
151 }
152 newchild->fn = child;
153 }
154
155 /*
156 * The order in which the functions are called is specified as
157 * LIFO for the prepare handler and FIFO for the others; insert
158 * at the head and tail as appropriate so that SIMPLEQ_FOREACH()
159 * produces the right order.
160 */
161 if (prepare)
162 SIMPLEQ_INSERT_HEAD(&prepareq, newprepare, next);
163 if (parent)
164 SIMPLEQ_INSERT_TAIL(&parentq, newparent, next);
165 if (child)
166 SIMPLEQ_INSERT_TAIL(&childq, newchild, next);
167 error = 0;
168
169 out: mutex_unlock(&atfork_lock);
170 thr_sigsetmask(SIG_SETMASK, &omask, NULL);
171 return error;
172 }
173
174 pid_t
175 fork(void)
176 {
177 struct atfork_callback *iter;
178 pid_t ret;
179
180 mutex_lock(&atfork_lock);
181 SIMPLEQ_FOREACH(iter, &prepareq, next)
182 (*iter->fn)();
183 _malloc_prefork();
184
185 ret = __locked_fork(&errno);
186
187 if (ret != 0) {
188 /*
189 * We are the parent. It doesn't matter here whether
190 * the fork call succeeded or failed.
191 */
192 _malloc_postfork();
193 SIMPLEQ_FOREACH(iter, &parentq, next)
194 (*iter->fn)();
195 mutex_unlock(&atfork_lock);
196 } else {
197 /* We are the child */
198 _malloc_postfork_child();
199 SIMPLEQ_FOREACH(iter, &childq, next)
200 (*iter->fn)();
201 /*
202 * Note: We are explicitly *not* unlocking
203 * atfork_lock. Unlocking atfork_lock is problematic,
204 * because if any threads in the parent blocked on it
205 * between the initial lock and the fork() syscall,
206 * unlocking in the child will try to schedule
207 * threads, and either the internal mutex interlock or
208 * the runqueue spinlock could have been held at the
209 * moment of fork(). Since the other threads do not
210 * exist in this process, the spinlock will never be
211 * unlocked, and we would wedge.
212 * Instead, we reinitialize atfork_lock, since we know
213 * that the state of the atfork lists is consistent here,
214 * and that there are no other threads to be affected by
215 * the forcible cleaning of the queue.
216 * This permits double-forking to work, although
217 * it requires knowing that it's "safe" to initialize
218 * a locked mutex in this context.
219 *
220 * The problem exists for users of this interface,
221 * too, since the intended use of pthread_atfork() is
222 * to acquire locks across the fork call to ensure
223 * that the child sees consistent state. There's not
224 * much that can usefully be done in a child handler,
225 * and conventional wisdom discourages using them, but
226 * they're part of the interface, so here we are...
227 */
228 mutex_init(&atfork_lock, NULL);
229 }
230
231 return ret;
232 }
233