pthread_atfork.c revision 1.14 1 /* $NetBSD: pthread_atfork.c,v 1.14 2020/04/19 01:06:15 joerg Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 #if defined(LIBC_SCCS) && !defined(lint)
34 __RCSID("$NetBSD: pthread_atfork.c,v 1.14 2020/04/19 01:06:15 joerg Exp $");
35 #endif /* LIBC_SCCS and not lint */
36
37 #include "namespace.h"
38
39 #include <errno.h>
40 #include <stdlib.h>
41 #include <unistd.h>
42 #include <sys/queue.h>
43 #include "reentrant.h"
44
45 #ifdef __weak_alias
46 __weak_alias(pthread_atfork, _pthread_atfork)
47 __weak_alias(fork, _fork)
48 #endif /* __weak_alias */
49
50 pid_t __fork(void); /* XXX */
51 pid_t __locked_fork(int *) __weak; /* XXX */
52
53 pid_t
54 __locked_fork(int *my_errno)
55 {
56 return __fork();
57 }
58
59 struct atfork_callback {
60 SIMPLEQ_ENTRY(atfork_callback) next;
61 void (*fn)(void);
62 };
63
64 /*
65 * Hypothetically, we could protect the queues with a rwlock which is
66 * write-locked by pthread_atfork() and read-locked by fork(), but
67 * since the intended use of the functions is obtaining locks to hold
68 * across the fork, forking is going to be serialized anyway.
69 */
70 static struct atfork_callback atfork_builtin;
71 #ifdef _REENTRANT
72 static mutex_t atfork_lock = MUTEX_INITIALIZER;
73 #endif
74 SIMPLEQ_HEAD(atfork_callback_q, atfork_callback);
75
76 static struct atfork_callback_q prepareq = SIMPLEQ_HEAD_INITIALIZER(prepareq);
77 static struct atfork_callback_q parentq = SIMPLEQ_HEAD_INITIALIZER(parentq);
78 static struct atfork_callback_q childq = SIMPLEQ_HEAD_INITIALIZER(childq);
79
80 static struct atfork_callback *
81 af_alloc(void)
82 {
83
84 if (atfork_builtin.fn == NULL)
85 return &atfork_builtin;
86
87 return malloc(sizeof(atfork_builtin));
88 }
89
90 static void
91 af_free(struct atfork_callback *af)
92 {
93
94 if (af != &atfork_builtin)
95 free(af);
96 }
97
98 int
99 pthread_atfork(void (*prepare)(void), void (*parent)(void),
100 void (*child)(void))
101 {
102 struct atfork_callback *newprepare, *newparent, *newchild;
103
104 newprepare = newparent = newchild = NULL;
105
106 mutex_lock(&atfork_lock);
107 if (prepare != NULL) {
108 newprepare = af_alloc();
109 if (newprepare == NULL) {
110 mutex_unlock(&atfork_lock);
111 return ENOMEM;
112 }
113 newprepare->fn = prepare;
114 }
115
116 if (parent != NULL) {
117 newparent = af_alloc();
118 if (newparent == NULL) {
119 if (newprepare != NULL)
120 af_free(newprepare);
121 mutex_unlock(&atfork_lock);
122 return ENOMEM;
123 }
124 newparent->fn = parent;
125 }
126
127 if (child != NULL) {
128 newchild = af_alloc();
129 if (newchild == NULL) {
130 if (newprepare != NULL)
131 af_free(newprepare);
132 if (newparent != NULL)
133 af_free(newparent);
134 mutex_unlock(&atfork_lock);
135 return ENOMEM;
136 }
137 newchild->fn = child;
138 }
139
140 /*
141 * The order in which the functions are called is specified as
142 * LIFO for the prepare handler and FIFO for the others; insert
143 * at the head and tail as appropriate so that SIMPLEQ_FOREACH()
144 * produces the right order.
145 */
146 if (prepare)
147 SIMPLEQ_INSERT_HEAD(&prepareq, newprepare, next);
148 if (parent)
149 SIMPLEQ_INSERT_TAIL(&parentq, newparent, next);
150 if (child)
151 SIMPLEQ_INSERT_TAIL(&childq, newchild, next);
152 mutex_unlock(&atfork_lock);
153
154 return 0;
155 }
156
157 pid_t
158 fork(void)
159 {
160 struct atfork_callback *iter;
161 pid_t ret;
162
163 mutex_lock(&atfork_lock);
164 SIMPLEQ_FOREACH(iter, &prepareq, next)
165 (*iter->fn)();
166
167 ret = __locked_fork(&errno);
168
169 if (ret != 0) {
170 /*
171 * We are the parent. It doesn't matter here whether
172 * the fork call succeeded or failed.
173 */
174 SIMPLEQ_FOREACH(iter, &parentq, next)
175 (*iter->fn)();
176 mutex_unlock(&atfork_lock);
177 } else {
178 /* We are the child */
179 SIMPLEQ_FOREACH(iter, &childq, next)
180 (*iter->fn)();
181 /*
182 * Note: We are explicitly *not* unlocking
183 * atfork_lock. Unlocking atfork_lock is problematic,
184 * because if any threads in the parent blocked on it
185 * between the initial lock and the fork() syscall,
186 * unlocking in the child will try to schedule
187 * threads, and either the internal mutex interlock or
188 * the runqueue spinlock could have been held at the
189 * moment of fork(). Since the other threads do not
190 * exist in this process, the spinlock will never be
191 * unlocked, and we would wedge.
192 * Instead, we reinitialize atfork_lock, since we know
193 * that the state of the atfork lists is consistent here,
194 * and that there are no other threads to be affected by
195 * the forcible cleaning of the queue.
196 * This permits double-forking to work, although
197 * it requires knowing that it's "safe" to initialize
198 * a locked mutex in this context.
199 *
200 * The problem exists for users of this interface,
201 * too, since the intented use of pthread_atfork() is
202 * to acquire locks across the fork call to ensure
203 * that the child sees consistent state. There's not
204 * much that can usefully be done in a child handler,
205 * and conventional wisdom discourages using them, but
206 * they're part of the interface, so here we are...
207 */
208 mutex_init(&atfork_lock, NULL);
209 }
210
211 return ret;
212 }
213