pthread_atfork.c revision 1.9 1 1.9 matt /* $NetBSD: pthread_atfork.c,v 1.9 2012/03/20 16:36:05 matt Exp $ */
2 1.1 nathanw
3 1.1 nathanw /*-
4 1.1 nathanw * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 1.1 nathanw * All rights reserved.
6 1.1 nathanw *
7 1.1 nathanw * This code is derived from software contributed to The NetBSD Foundation
8 1.1 nathanw * by Nathan J. Williams.
9 1.1 nathanw *
10 1.1 nathanw * Redistribution and use in source and binary forms, with or without
11 1.1 nathanw * modification, are permitted provided that the following conditions
12 1.1 nathanw * are met:
13 1.1 nathanw * 1. Redistributions of source code must retain the above copyright
14 1.1 nathanw * notice, this list of conditions and the following disclaimer.
15 1.1 nathanw * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 nathanw * notice, this list of conditions and the following disclaimer in the
17 1.1 nathanw * documentation and/or other materials provided with the distribution.
18 1.1 nathanw *
19 1.1 nathanw * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 nathanw * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 nathanw * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 nathanw * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 nathanw * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 nathanw * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 nathanw * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 nathanw * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 nathanw * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 nathanw * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 nathanw * POSSIBILITY OF SUCH DAMAGE.
30 1.1 nathanw */
31 1.1 nathanw
32 1.1 nathanw #include <sys/cdefs.h>
33 1.1 nathanw #if defined(LIBC_SCCS) && !defined(lint)
34 1.9 matt __RCSID("$NetBSD: pthread_atfork.c,v 1.9 2012/03/20 16:36:05 matt Exp $");
35 1.1 nathanw #endif /* LIBC_SCCS and not lint */
36 1.1 nathanw
37 1.1 nathanw #include "namespace.h"
38 1.1 nathanw
39 1.1 nathanw #include <errno.h>
40 1.1 nathanw #include <stdlib.h>
41 1.1 nathanw #include <unistd.h>
42 1.1 nathanw #include <sys/queue.h>
43 1.1 nathanw #include "reentrant.h"
44 1.1 nathanw
45 1.1 nathanw #ifdef __weak_alias
46 1.1 nathanw __weak_alias(pthread_atfork, _pthread_atfork)
47 1.1 nathanw __weak_alias(fork, _fork)
48 1.1 nathanw #endif /* __weak_alias */
49 1.1 nathanw
50 1.9 matt pid_t __fork(void); /* XXX */
51 1.1 nathanw
52 1.1 nathanw struct atfork_callback {
53 1.1 nathanw SIMPLEQ_ENTRY(atfork_callback) next;
54 1.1 nathanw void (*fn)(void);
55 1.1 nathanw };
56 1.1 nathanw
57 1.1 nathanw /*
58 1.1 nathanw * Hypothetically, we could protect the queues with a rwlock which is
59 1.1 nathanw * write-locked by pthread_atfork() and read-locked by fork(), but
60 1.1 nathanw * since the intended use of the functions is obtaining locks to hold
61 1.1 nathanw * across the fork, forking is going to be serialized anyway.
62 1.1 nathanw */
63 1.7 ad static struct atfork_callback atfork_builtin;
64 1.1 nathanw static mutex_t atfork_lock = MUTEX_INITIALIZER;
65 1.1 nathanw SIMPLEQ_HEAD(atfork_callback_q, atfork_callback);
66 1.1 nathanw
67 1.2 nathanw static struct atfork_callback_q prepareq = SIMPLEQ_HEAD_INITIALIZER(prepareq);
68 1.2 nathanw static struct atfork_callback_q parentq = SIMPLEQ_HEAD_INITIALIZER(parentq);
69 1.2 nathanw static struct atfork_callback_q childq = SIMPLEQ_HEAD_INITIALIZER(childq);
70 1.1 nathanw
71 1.7 ad static struct atfork_callback *
72 1.7 ad af_alloc(void)
73 1.7 ad {
74 1.7 ad
75 1.7 ad if (atfork_builtin.fn == NULL)
76 1.7 ad return &atfork_builtin;
77 1.7 ad
78 1.7 ad return malloc(sizeof(atfork_builtin));
79 1.7 ad }
80 1.7 ad
81 1.7 ad static void
82 1.7 ad af_free(struct atfork_callback *af)
83 1.7 ad {
84 1.7 ad
85 1.7 ad if (af != &atfork_builtin)
86 1.7 ad free(af);
87 1.7 ad }
88 1.7 ad
89 1.1 nathanw int
90 1.1 nathanw pthread_atfork(void (*prepare)(void), void (*parent)(void),
91 1.1 nathanw void (*child)(void))
92 1.1 nathanw {
93 1.1 nathanw struct atfork_callback *newprepare, *newparent, *newchild;
94 1.1 nathanw
95 1.4 lukem newprepare = newparent = newchild = NULL;
96 1.4 lukem
97 1.7 ad mutex_lock(&atfork_lock);
98 1.1 nathanw if (prepare != NULL) {
99 1.7 ad newprepare = af_alloc();
100 1.7 ad if (newprepare == NULL) {
101 1.7 ad mutex_unlock(&atfork_lock);
102 1.1 nathanw return ENOMEM;
103 1.7 ad }
104 1.1 nathanw newprepare->fn = prepare;
105 1.1 nathanw }
106 1.1 nathanw
107 1.1 nathanw if (parent != NULL) {
108 1.7 ad newparent = af_alloc();
109 1.1 nathanw if (newparent == NULL) {
110 1.1 nathanw if (newprepare != NULL)
111 1.7 ad af_free(newprepare);
112 1.7 ad mutex_unlock(&atfork_lock);
113 1.1 nathanw return ENOMEM;
114 1.1 nathanw }
115 1.1 nathanw newparent->fn = parent;
116 1.1 nathanw }
117 1.1 nathanw
118 1.1 nathanw if (child != NULL) {
119 1.7 ad newchild = af_alloc();
120 1.1 nathanw if (newchild == NULL) {
121 1.1 nathanw if (newprepare != NULL)
122 1.7 ad af_free(newprepare);
123 1.1 nathanw if (newparent != NULL)
124 1.7 ad af_free(newparent);
125 1.7 ad mutex_unlock(&atfork_lock);
126 1.1 nathanw return ENOMEM;
127 1.1 nathanw }
128 1.1 nathanw newchild->fn = child;
129 1.1 nathanw }
130 1.1 nathanw
131 1.1 nathanw /*
132 1.1 nathanw * The order in which the functions are called is specified as
133 1.1 nathanw * LIFO for the prepare handler and FIFO for the others; insert
134 1.1 nathanw * at the head and tail as appropriate so that SIMPLEQ_FOREACH()
135 1.1 nathanw * produces the right order.
136 1.1 nathanw */
137 1.1 nathanw if (prepare)
138 1.1 nathanw SIMPLEQ_INSERT_HEAD(&prepareq, newprepare, next);
139 1.1 nathanw if (parent)
140 1.1 nathanw SIMPLEQ_INSERT_TAIL(&parentq, newparent, next);
141 1.1 nathanw if (child)
142 1.1 nathanw SIMPLEQ_INSERT_TAIL(&childq, newchild, next);
143 1.1 nathanw mutex_unlock(&atfork_lock);
144 1.1 nathanw
145 1.1 nathanw return 0;
146 1.1 nathanw }
147 1.1 nathanw
148 1.3 lukem pid_t
149 1.3 lukem fork(void)
150 1.1 nathanw {
151 1.1 nathanw struct atfork_callback *iter;
152 1.1 nathanw pid_t ret;
153 1.1 nathanw
154 1.1 nathanw mutex_lock(&atfork_lock);
155 1.1 nathanw SIMPLEQ_FOREACH(iter, &prepareq, next)
156 1.6 yamt (*iter->fn)();
157 1.1 nathanw
158 1.1 nathanw ret = __fork();
159 1.1 nathanw
160 1.1 nathanw if (ret != 0) {
161 1.1 nathanw /*
162 1.1 nathanw * We are the parent. It doesn't matter here whether
163 1.1 nathanw * the fork call succeeded or failed.
164 1.1 nathanw */
165 1.1 nathanw SIMPLEQ_FOREACH(iter, &parentq, next)
166 1.6 yamt (*iter->fn)();
167 1.1 nathanw mutex_unlock(&atfork_lock);
168 1.1 nathanw } else {
169 1.1 nathanw /* We are the child */
170 1.1 nathanw SIMPLEQ_FOREACH(iter, &childq, next)
171 1.6 yamt (*iter->fn)();
172 1.1 nathanw /*
173 1.1 nathanw * Note: We are explicitly *not* unlocking
174 1.1 nathanw * atfork_lock. Unlocking atfork_lock is problematic,
175 1.1 nathanw * because if any threads in the parent blocked on it
176 1.1 nathanw * between the initial lock and the fork() syscall,
177 1.1 nathanw * unlocking in the child will try to schedule
178 1.1 nathanw * threads, and either the internal mutex interlock or
179 1.1 nathanw * the runqueue spinlock could have been held at the
180 1.1 nathanw * moment of fork(). Since the other threads do not
181 1.1 nathanw * exist in this process, the spinlock will never be
182 1.1 nathanw * unlocked, and we would wedge.
183 1.1 nathanw * Instead, we reinitialize atfork_lock, since we know
184 1.1 nathanw * that the state of the atfork lists is consistent here,
185 1.1 nathanw * and that there are no other threads to be affected by
186 1.1 nathanw * the forcible cleaning of the queue.
187 1.1 nathanw * This permits double-forking to work, although
188 1.1 nathanw * it requires knowing that it's "safe" to initialize
189 1.1 nathanw * a locked mutex in this context.
190 1.1 nathanw *
191 1.1 nathanw * The problem exists for users of this interface,
192 1.1 nathanw * too, since the intented use of pthread_atfork() is
193 1.1 nathanw * to acquire locks across the fork call to ensure
194 1.1 nathanw * that the child sees consistent state. There's not
195 1.1 nathanw * much that can usefully be done in a child handler,
196 1.1 nathanw * and conventional wisdom discourages using them, but
197 1.1 nathanw * they're part of the interface, so here we are...
198 1.1 nathanw */
199 1.1 nathanw mutex_init(&atfork_lock, NULL);
200 1.1 nathanw }
201 1.1 nathanw
202 1.1 nathanw return ret;
203 1.1 nathanw }
204