Home | History | Annotate | Line # | Download | only in gen
pthread_atfork.c revision 1.26
      1 /*	$NetBSD: pthread_atfork.c,v 1.26 2025/03/04 16:40:46 christos Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nathan J. Williams.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 #if defined(LIBC_SCCS) && !defined(lint)
     34 __RCSID("$NetBSD: pthread_atfork.c,v 1.26 2025/03/04 16:40:46 christos Exp $");
     35 #endif /* LIBC_SCCS and not lint */
     36 
     37 #include "namespace.h"
     38 
     39 #include <errno.h>
     40 #include <stdlib.h>
     41 #include <unistd.h>
     42 #include <sys/queue.h>
     43 #include "extern.h"
     44 #include "reentrant.h"
     45 
     46 #ifdef __weak_alias
     47 __weak_alias(pthread_atfork, _pthread_atfork)
     48 __weak_alias(fork, _fork)
     49 #endif /* __weak_alias */
     50 
     51 pid_t
     52 __locked_fork(int *my_errno)
     53 {
     54 	return __fork();
     55 }
     56 
     57 struct atfork_callback {
     58 	SIMPLEQ_ENTRY(atfork_callback) next;
     59 	void (*fn)(void);
     60 };
     61 
     62 
     63 /*
     64  * We need to keep a cache for of at least 6, one for prepare, one for parent,
     65  * one for child x 2 bexause of the two uses in the libpthread (pthread_init,
     66  * pthread_tsd_init) constructors, where it is too early to call malloc(3).
     67  * This does not guarantee that we will have enough, because other libraries
     68  * can also call pthread_atfork() from their own constructors, so this is not
     69  * a complete solution and will need to be fixed properly. For now a keep
     70  * space for 16 since it is just 256 bytes.
     71  */
     72 static struct atfork_callback atfork_builtin[16];
     73 
     74 /*
     75  * Hypothetically, we could protect the queues with a rwlock which is
     76  * write-locked by pthread_atfork() and read-locked by fork(), but
     77  * since the intended use of the functions is obtaining locks to hold
     78  * across the fork, forking is going to be serialized anyway.
     79  */
     80 #ifdef _REENTRANT
     81 static mutex_t atfork_lock = MUTEX_INITIALIZER;
     82 #endif
     83 SIMPLEQ_HEAD(atfork_callback_q, atfork_callback);
     84 
     85 static struct atfork_callback_q prepareq = SIMPLEQ_HEAD_INITIALIZER(prepareq);
     86 static struct atfork_callback_q parentq = SIMPLEQ_HEAD_INITIALIZER(parentq);
     87 static struct atfork_callback_q childq = SIMPLEQ_HEAD_INITIALIZER(childq);
     88 
     89 static struct atfork_callback *
     90 af_alloc(void)
     91 {
     92 
     93 	for (size_t i = 0; i < __arraycount(atfork_builtin); i++) {
     94 		if (atfork_builtin[i].fn == NULL)
     95 			return &atfork_builtin[i];
     96 	}
     97 
     98 	return malloc(sizeof(atfork_builtin));
     99 }
    100 
    101 static void
    102 af_free(struct atfork_callback *af)
    103 {
    104 
    105 	if (af >= atfork_builtin
    106 	    && af < atfork_builtin + __arraycount(atfork_builtin))
    107 		af->fn = NULL;
    108 	else
    109 		free(af);
    110 }
    111 
    112 int
    113 pthread_atfork(void (*prepare)(void), void (*parent)(void),
    114     void (*child)(void))
    115 {
    116 	struct atfork_callback *newprepare, *newparent, *newchild;
    117 	sigset_t mask, omask;
    118 	int error;
    119 
    120 	newprepare = newparent = newchild = NULL;
    121 
    122 	sigfillset(&mask);
    123 	thr_sigsetmask(SIG_SETMASK, &mask, &omask);
    124 
    125 	mutex_lock(&atfork_lock);
    126 	if (prepare != NULL) {
    127 		newprepare = af_alloc();
    128 		if (newprepare == NULL) {
    129 			error = ENOMEM;
    130 			goto out;
    131 		}
    132 		newprepare->fn = prepare;
    133 	}
    134 
    135 	if (parent != NULL) {
    136 		newparent = af_alloc();
    137 		if (newparent == NULL) {
    138 			if (newprepare != NULL)
    139 				af_free(newprepare);
    140 			error = ENOMEM;
    141 			goto out;
    142 		}
    143 		newparent->fn = parent;
    144 	}
    145 
    146 	if (child != NULL) {
    147 		newchild = af_alloc();
    148 		if (newchild == NULL) {
    149 			if (newprepare != NULL)
    150 				af_free(newprepare);
    151 			if (newparent != NULL)
    152 				af_free(newparent);
    153 			error = ENOMEM;
    154 			goto out;
    155 		}
    156 		newchild->fn = child;
    157 	}
    158 
    159 	/*
    160 	 * The order in which the functions are called is specified as
    161 	 * LIFO for the prepare handler and FIFO for the others; insert
    162 	 * at the head and tail as appropriate so that SIMPLEQ_FOREACH()
    163 	 * produces the right order.
    164 	 */
    165 	if (prepare)
    166 		SIMPLEQ_INSERT_HEAD(&prepareq, newprepare, next);
    167 	if (parent)
    168 		SIMPLEQ_INSERT_TAIL(&parentq, newparent, next);
    169 	if (child)
    170 		SIMPLEQ_INSERT_TAIL(&childq, newchild, next);
    171 	error = 0;
    172 
    173 out:	mutex_unlock(&atfork_lock);
    174 	thr_sigsetmask(SIG_SETMASK, &omask, NULL);
    175 	return error;
    176 }
    177 
    178 pid_t
    179 fork(void)
    180 {
    181 	struct atfork_callback *iter;
    182 	pid_t ret;
    183 
    184 	mutex_lock(&atfork_lock);
    185 	SIMPLEQ_FOREACH(iter, &prepareq, next)
    186 		(*iter->fn)();
    187 	_malloc_prefork();
    188 
    189 	ret = __locked_fork(&errno);
    190 
    191 	if (ret != 0) {
    192 		/*
    193 		 * We are the parent. It doesn't matter here whether
    194 		 * the fork call succeeded or failed.
    195 		 */
    196 		_malloc_postfork();
    197 		SIMPLEQ_FOREACH(iter, &parentq, next)
    198 			(*iter->fn)();
    199 		mutex_unlock(&atfork_lock);
    200 	} else {
    201 		/* We are the child */
    202 		_malloc_postfork_child();
    203 		SIMPLEQ_FOREACH(iter, &childq, next)
    204 			(*iter->fn)();
    205 		/*
    206 		 * Note: We are explicitly *not* unlocking
    207 		 * atfork_lock.  Unlocking atfork_lock is problematic,
    208 		 * because if any threads in the parent blocked on it
    209 		 * between the initial lock and the fork() syscall,
    210 		 * unlocking in the child will try to schedule
    211 		 * threads, and either the internal mutex interlock or
    212 		 * the runqueue spinlock could have been held at the
    213 		 * moment of fork(). Since the other threads do not
    214 		 * exist in this process, the spinlock will never be
    215 		 * unlocked, and we would wedge.
    216 		 * Instead, we reinitialize atfork_lock, since we know
    217 		 * that the state of the atfork lists is consistent here,
    218 		 * and that there are no other threads to be affected by
    219 		 * the forcible cleaning of the queue.
    220 		 * This permits double-forking to work, although
    221 		 * it requires knowing that it's "safe" to initialize
    222 		 * a locked mutex in this context.
    223 		 *
    224 		 * The problem exists for users of this interface,
    225 		 * too, since the intended use of pthread_atfork() is
    226 		 * to acquire locks across the fork call to ensure
    227 		 * that the child sees consistent state. There's not
    228 		 * much that can usefully be done in a child handler,
    229 		 * and conventional wisdom discourages using them, but
    230 		 * they're part of the interface, so here we are...
    231 		 */
    232 		mutex_init(&atfork_lock, NULL);
    233 	}
    234 
    235 	return ret;
    236 }
    237