pthread_atfork.c revision 1.9 1 /* $NetBSD: pthread_atfork.c,v 1.9 2012/03/20 16:36:05 matt Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 #if defined(LIBC_SCCS) && !defined(lint)
34 __RCSID("$NetBSD: pthread_atfork.c,v 1.9 2012/03/20 16:36:05 matt Exp $");
35 #endif /* LIBC_SCCS and not lint */
36
37 #include "namespace.h"
38
39 #include <errno.h>
40 #include <stdlib.h>
41 #include <unistd.h>
42 #include <sys/queue.h>
43 #include "reentrant.h"
44
45 #ifdef __weak_alias
46 __weak_alias(pthread_atfork, _pthread_atfork)
47 __weak_alias(fork, _fork)
48 #endif /* __weak_alias */
49
50 pid_t __fork(void); /* XXX */
51
52 struct atfork_callback {
53 SIMPLEQ_ENTRY(atfork_callback) next;
54 void (*fn)(void);
55 };
56
57 /*
58 * Hypothetically, we could protect the queues with a rwlock which is
59 * write-locked by pthread_atfork() and read-locked by fork(), but
60 * since the intended use of the functions is obtaining locks to hold
61 * across the fork, forking is going to be serialized anyway.
62 */
63 static struct atfork_callback atfork_builtin;
64 static mutex_t atfork_lock = MUTEX_INITIALIZER;
65 SIMPLEQ_HEAD(atfork_callback_q, atfork_callback);
66
67 static struct atfork_callback_q prepareq = SIMPLEQ_HEAD_INITIALIZER(prepareq);
68 static struct atfork_callback_q parentq = SIMPLEQ_HEAD_INITIALIZER(parentq);
69 static struct atfork_callback_q childq = SIMPLEQ_HEAD_INITIALIZER(childq);
70
71 static struct atfork_callback *
72 af_alloc(void)
73 {
74
75 if (atfork_builtin.fn == NULL)
76 return &atfork_builtin;
77
78 return malloc(sizeof(atfork_builtin));
79 }
80
81 static void
82 af_free(struct atfork_callback *af)
83 {
84
85 if (af != &atfork_builtin)
86 free(af);
87 }
88
89 int
90 pthread_atfork(void (*prepare)(void), void (*parent)(void),
91 void (*child)(void))
92 {
93 struct atfork_callback *newprepare, *newparent, *newchild;
94
95 newprepare = newparent = newchild = NULL;
96
97 mutex_lock(&atfork_lock);
98 if (prepare != NULL) {
99 newprepare = af_alloc();
100 if (newprepare == NULL) {
101 mutex_unlock(&atfork_lock);
102 return ENOMEM;
103 }
104 newprepare->fn = prepare;
105 }
106
107 if (parent != NULL) {
108 newparent = af_alloc();
109 if (newparent == NULL) {
110 if (newprepare != NULL)
111 af_free(newprepare);
112 mutex_unlock(&atfork_lock);
113 return ENOMEM;
114 }
115 newparent->fn = parent;
116 }
117
118 if (child != NULL) {
119 newchild = af_alloc();
120 if (newchild == NULL) {
121 if (newprepare != NULL)
122 af_free(newprepare);
123 if (newparent != NULL)
124 af_free(newparent);
125 mutex_unlock(&atfork_lock);
126 return ENOMEM;
127 }
128 newchild->fn = child;
129 }
130
131 /*
132 * The order in which the functions are called is specified as
133 * LIFO for the prepare handler and FIFO for the others; insert
134 * at the head and tail as appropriate so that SIMPLEQ_FOREACH()
135 * produces the right order.
136 */
137 if (prepare)
138 SIMPLEQ_INSERT_HEAD(&prepareq, newprepare, next);
139 if (parent)
140 SIMPLEQ_INSERT_TAIL(&parentq, newparent, next);
141 if (child)
142 SIMPLEQ_INSERT_TAIL(&childq, newchild, next);
143 mutex_unlock(&atfork_lock);
144
145 return 0;
146 }
147
148 pid_t
149 fork(void)
150 {
151 struct atfork_callback *iter;
152 pid_t ret;
153
154 mutex_lock(&atfork_lock);
155 SIMPLEQ_FOREACH(iter, &prepareq, next)
156 (*iter->fn)();
157
158 ret = __fork();
159
160 if (ret != 0) {
161 /*
162 * We are the parent. It doesn't matter here whether
163 * the fork call succeeded or failed.
164 */
165 SIMPLEQ_FOREACH(iter, &parentq, next)
166 (*iter->fn)();
167 mutex_unlock(&atfork_lock);
168 } else {
169 /* We are the child */
170 SIMPLEQ_FOREACH(iter, &childq, next)
171 (*iter->fn)();
172 /*
173 * Note: We are explicitly *not* unlocking
174 * atfork_lock. Unlocking atfork_lock is problematic,
175 * because if any threads in the parent blocked on it
176 * between the initial lock and the fork() syscall,
177 * unlocking in the child will try to schedule
178 * threads, and either the internal mutex interlock or
179 * the runqueue spinlock could have been held at the
180 * moment of fork(). Since the other threads do not
181 * exist in this process, the spinlock will never be
182 * unlocked, and we would wedge.
183 * Instead, we reinitialize atfork_lock, since we know
184 * that the state of the atfork lists is consistent here,
185 * and that there are no other threads to be affected by
186 * the forcible cleaning of the queue.
187 * This permits double-forking to work, although
188 * it requires knowing that it's "safe" to initialize
189 * a locked mutex in this context.
190 *
191 * The problem exists for users of this interface,
192 * too, since the intented use of pthread_atfork() is
193 * to acquire locks across the fork call to ensure
194 * that the child sees consistent state. There's not
195 * much that can usefully be done in a child handler,
196 * and conventional wisdom discourages using them, but
197 * they're part of the interface, so here we are...
198 */
199 mutex_init(&atfork_lock, NULL);
200 }
201
202 return ret;
203 }
204