1 1.26 christos /* $NetBSD: pthread_tsd.c,v 1.26 2025/03/01 18:21:49 christos Exp $ */ 2 1.1 nathanw 3 1.1 nathanw /*- 4 1.23 ad * Copyright (c) 2001, 2007, 2020 The NetBSD Foundation, Inc. 5 1.1 nathanw * All rights reserved. 6 1.1 nathanw * 7 1.1 nathanw * This code is derived from software contributed to The NetBSD Foundation 8 1.10 christos * by Nathan J. Williams, by Andrew Doran, and by Christos Zoulas. 9 1.1 nathanw * 10 1.1 nathanw * Redistribution and use in source and binary forms, with or without 11 1.1 nathanw * modification, are permitted provided that the following conditions 12 1.1 nathanw * are met: 13 1.1 nathanw * 1. Redistributions of source code must retain the above copyright 14 1.1 nathanw * notice, this list of conditions and the following disclaimer. 15 1.1 nathanw * 2. Redistributions in binary form must reproduce the above copyright 16 1.1 nathanw * notice, this list of conditions and the following disclaimer in the 17 1.1 nathanw * documentation and/or other materials provided with the distribution. 18 1.1 nathanw * 19 1.1 nathanw * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 1.1 nathanw * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 1.1 nathanw * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 1.1 nathanw * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 1.1 nathanw * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 1.1 nathanw * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 1.1 nathanw * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 1.1 nathanw * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 1.1 nathanw * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 1.1 nathanw * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 1.1 nathanw * POSSIBILITY OF SUCH DAMAGE. 30 1.1 nathanw */ 31 1.1 nathanw 32 1.1 nathanw #include <sys/cdefs.h> 33 1.26 christos __RCSID("$NetBSD: pthread_tsd.c,v 1.26 2025/03/01 18:21:49 christos Exp $"); 34 1.24 riastrad 35 1.24 riastrad /* Need to use libc-private names for atomic operations. */ 36 1.24 riastrad #include "../../common/lib/libc/atomic/atomic_op_namespace.h" 37 1.1 nathanw 38 1.1 nathanw /* Functions and structures dealing with thread-specific data */ 39 1.1 nathanw #include <errno.h> 40 1.13 christos #include <sys/mman.h> 41 1.1 nathanw 42 1.1 nathanw #include "pthread.h" 43 1.1 nathanw #include "pthread_int.h" 44 1.11 christos #include "reentrant.h" 45 1.17 christos #include "tsd.h" 46 1.1 nathanw 47 1.12 manu int pthread_keys_max; 48 1.1 nathanw static pthread_mutex_t tsd_mutex = PTHREAD_MUTEX_INITIALIZER; 49 1.1 nathanw static int nextkey; 50 1.9 christos 51 1.12 manu PTQ_HEAD(pthread__tsd_list, pt_specific) *pthread__tsd_list = NULL; 52 1.12 manu void (**pthread__tsd_destructors)(void *) = NULL; 53 1.1 nathanw 54 1.1 nathanw __strong_alias(__libc_thr_keycreate,pthread_key_create) 55 1.1 nathanw __strong_alias(__libc_thr_keydelete,pthread_key_delete) 56 1.1 nathanw 57 1.9 christos static void 58 1.9 christos /*ARGSUSED*/ 59 1.9 christos null_destructor(void *p) 60 1.9 christos { 61 1.9 christos } 62 1.9 christos 63 1.11 christos #include <err.h> 64 1.11 christos #include <stdlib.h> 65 1.12 manu #include <stdio.h> 66 1.12 manu 67 1.20 kamil static void 68 1.20 kamil pthread_tsd_prefork(void) 69 1.20 kamil { 70 1.20 kamil pthread_mutex_lock(&tsd_mutex); 71 1.20 kamil } 72 1.20 kamil 73 1.20 kamil static void 74 1.20 kamil pthread_tsd_postfork(void) 75 1.20 kamil { 76 1.20 kamil pthread_mutex_unlock(&tsd_mutex); 77 1.20 kamil } 78 1.20 kamil 79 1.21 joerg static void 80 1.21 joerg pthread_tsd_postfork_child(void) 81 1.21 joerg { 82 1.21 joerg pthread_mutex_init(&tsd_mutex, NULL); 83 1.21 joerg } 84 1.21 joerg 85 1.13 christos void * 86 1.20 kamil pthread_tsd_init(size_t *tlen) 87 1.12 manu { 88 1.12 manu char *pkm; 89 1.13 christos size_t alen; 90 1.13 christos char *arena; 91 1.12 manu 92 1.26 christos /* 93 1.26 christos * This pthread_atfork() call will not call malloc, since it 94 1.26 christos * has a cache of 3 entries, specially for this purpose. 95 1.26 christos */ 96 1.26 christos pthread_atfork(pthread_tsd_prefork, pthread_tsd_postfork, 97 1.26 christos pthread_tsd_postfork_child); 98 1.20 kamil 99 1.13 christos if ((pkm = pthread__getenv("PTHREAD_KEYS_MAX")) != NULL) { 100 1.12 manu pthread_keys_max = (int)strtol(pkm, NULL, 0); 101 1.12 manu if (pthread_keys_max < _POSIX_THREAD_KEYS_MAX) 102 1.12 manu pthread_keys_max = _POSIX_THREAD_KEYS_MAX; 103 1.12 manu } else { 104 1.12 manu pthread_keys_max = PTHREAD_KEYS_MAX; 105 1.12 manu } 106 1.12 manu 107 1.13 christos /* 108 1.13 christos * Can't use malloc here yet, because malloc will use the fake 109 1.13 christos * libc thread functions to initialize itself, so mmap the space. 110 1.13 christos */ 111 1.13 christos *tlen = sizeof(struct __pthread_st) 112 1.13 christos + pthread_keys_max * sizeof(struct pt_specific); 113 1.13 christos alen = *tlen 114 1.13 christos + sizeof(*pthread__tsd_list) * pthread_keys_max 115 1.13 christos + sizeof(*pthread__tsd_destructors) * pthread_keys_max; 116 1.13 christos 117 1.15 pooka arena = mmap(NULL, alen, PROT_READ|PROT_WRITE, MAP_ANON, -1, 0); 118 1.13 christos if (arena == MAP_FAILED) { 119 1.13 christos pthread_keys_max = 0; 120 1.13 christos return NULL; 121 1.13 christos } 122 1.12 manu 123 1.13 christos pthread__tsd_list = (void *)arena; 124 1.13 christos arena += sizeof(*pthread__tsd_list) * pthread_keys_max; 125 1.13 christos pthread__tsd_destructors = (void *)arena; 126 1.13 christos arena += sizeof(*pthread__tsd_destructors) * pthread_keys_max; 127 1.13 christos return arena; 128 1.12 manu } 129 1.12 manu 130 1.1 nathanw int 131 1.1 nathanw pthread_key_create(pthread_key_t *key, void (*destructor)(void *)) 132 1.1 nathanw { 133 1.1 nathanw int i; 134 1.1 nathanw 135 1.11 christos if (__predict_false(__uselibcstub)) 136 1.11 christos return __libc_thr_keycreate_stub(key, destructor); 137 1.11 christos 138 1.1 nathanw /* Get a lock on the allocation list */ 139 1.1 nathanw pthread_mutex_lock(&tsd_mutex); 140 1.25 riastrad 141 1.9 christos /* Find an available slot: 142 1.9 christos * The condition for an available slot is one with the destructor 143 1.25 riastrad * not being NULL. If the desired destructor is NULL we set it to 144 1.9 christos * our own internal destructor to satisfy the non NULL condition. 145 1.9 christos */ 146 1.1 nathanw /* 1. Search from "nextkey" to the end of the list. */ 147 1.12 manu for (i = nextkey; i < pthread_keys_max; i++) 148 1.9 christos if (pthread__tsd_destructors[i] == NULL) 149 1.1 nathanw break; 150 1.1 nathanw 151 1.12 manu if (i == pthread_keys_max) { 152 1.1 nathanw /* 2. If that didn't work, search from the start 153 1.1 nathanw * of the list back to "nextkey". 154 1.1 nathanw */ 155 1.1 nathanw for (i = 0; i < nextkey; i++) 156 1.9 christos if (pthread__tsd_destructors[i] == NULL) 157 1.1 nathanw break; 158 1.25 riastrad 159 1.1 nathanw if (i == nextkey) { 160 1.1 nathanw /* If we didn't find one here, there isn't one 161 1.1 nathanw * to be found. 162 1.1 nathanw */ 163 1.1 nathanw pthread_mutex_unlock(&tsd_mutex); 164 1.1 nathanw return EAGAIN; 165 1.1 nathanw } 166 1.1 nathanw } 167 1.1 nathanw 168 1.1 nathanw /* Got one. */ 169 1.9 christos pthread__assert(PTQ_EMPTY(&pthread__tsd_list[i])); 170 1.9 christos pthread__tsd_destructors[i] = destructor ? destructor : null_destructor; 171 1.9 christos 172 1.12 manu nextkey = (i + 1) % pthread_keys_max; 173 1.1 nathanw pthread_mutex_unlock(&tsd_mutex); 174 1.1 nathanw *key = i; 175 1.1 nathanw 176 1.1 nathanw return 0; 177 1.1 nathanw } 178 1.1 nathanw 179 1.9 christos /* 180 1.12 manu * Each thread holds an array of pthread_keys_max pt_specific list 181 1.9 christos * elements. When an element is used it is inserted into the appropriate 182 1.9 christos * key bucket of pthread__tsd_list. This means that ptqe_prev == NULL, 183 1.9 christos * means that the element is not threaded, ptqe_prev != NULL it is 184 1.22 joerg * already part of the list. If a key is set to a non-NULL value for the 185 1.22 joerg * first time, it is added to the list. 186 1.9 christos * 187 1.9 christos * We keep this global array of lists of threads that have called 188 1.9 christos * pthread_set_specific with non-null values, for each key so that 189 1.9 christos * we don't have to check all threads for non-NULL values in 190 1.22 joerg * pthread_key_destroy. 191 1.22 joerg * 192 1.22 joerg * The assumption here is that a concurrent pthread_key_delete is already 193 1.22 joerg * undefined behavior. The mutex is taken only once per thread/key 194 1.22 joerg * combination. 195 1.9 christos * 196 1.9 christos * We could keep an accounting of the number of specific used 197 1.9 christos * entries per thread, so that we can update pt_havespecific when we delete 198 1.9 christos * the last one, but we don't bother for now 199 1.9 christos */ 200 1.9 christos int 201 1.9 christos pthread__add_specific(pthread_t self, pthread_key_t key, const void *value) 202 1.9 christos { 203 1.9 christos struct pt_specific *pt; 204 1.9 christos 205 1.12 manu pthread__assert(key >= 0 && key < pthread_keys_max); 206 1.9 christos 207 1.9 christos pthread__assert(pthread__tsd_destructors[key] != NULL); 208 1.9 christos pt = &self->pt_specific[key]; 209 1.9 christos self->pt_havespecific = 1; 210 1.22 joerg if (value && !pt->pts_next.ptqe_prev) { 211 1.22 joerg pthread_mutex_lock(&tsd_mutex); 212 1.22 joerg PTQ_INSERT_HEAD(&pthread__tsd_list[key], pt, pts_next); 213 1.22 joerg pthread_mutex_unlock(&tsd_mutex); 214 1.9 christos } 215 1.9 christos pt->pts_value = __UNCONST(value); 216 1.9 christos 217 1.9 christos return 0; 218 1.9 christos } 219 1.9 christos 220 1.1 nathanw int 221 1.1 nathanw pthread_key_delete(pthread_key_t key) 222 1.1 nathanw { 223 1.1 nathanw /* 224 1.1 nathanw * This is tricky. The standard says of pthread_key_create() 225 1.1 nathanw * that new keys have the value NULL associated with them in 226 1.1 nathanw * all threads. According to people who were present at the 227 1.1 nathanw * standardization meeting, that requirement was written 228 1.1 nathanw * before pthread_key_delete() was introduced, and not 229 1.1 nathanw * reconsidered when it was. 230 1.1 nathanw * 231 1.1 nathanw * See David Butenhof's article in comp.programming.threads: 232 1.1 nathanw * Subject: Re: TSD key reusing issue 233 1.1 nathanw * Message-ID: <u97d8.29$fL6.200 (at) news.cpqcorp.net> 234 1.1 nathanw * Date: Thu, 21 Feb 2002 09:06:17 -0500 235 1.10 christos * http://groups.google.com/groups?\ 236 1.10 christos * hl=en&selm=u97d8.29%24fL6.200%40news.cpqcorp.net 237 1.25 riastrad * 238 1.1 nathanw * Given: 239 1.1 nathanw * 240 1.1 nathanw * 1: Applications are not required to clear keys in all 241 1.1 nathanw * threads before calling pthread_key_delete(). 242 1.1 nathanw * 2: Clearing pointers without running destructors is a 243 1.1 nathanw * memory leak. 244 1.1 nathanw * 3: The pthread_key_delete() function is expressly forbidden 245 1.1 nathanw * to run any destructors. 246 1.1 nathanw * 247 1.1 nathanw * Option 1: Make this function effectively a no-op and 248 1.1 nathanw * prohibit key reuse. This is a possible resource-exhaustion 249 1.1 nathanw * problem given that we have a static storage area for keys, 250 1.1 nathanw * but having a non-static storage area would make 251 1.1 nathanw * pthread_setspecific() expensive (might need to realloc the 252 1.1 nathanw * TSD array). 253 1.1 nathanw * 254 1.1 nathanw * Option 2: Ignore the specified behavior of 255 1.1 nathanw * pthread_key_create() and leave the old values. If an 256 1.1 nathanw * application deletes a key that still has non-NULL values in 257 1.1 nathanw * some threads... it's probably a memory leak and hence 258 1.1 nathanw * incorrect anyway, and we're within our rights to let the 259 1.1 nathanw * application lose. However, it's possible (if unlikely) that 260 1.1 nathanw * the application is storing pointers to non-heap data, or 261 1.1 nathanw * non-pointers that have been wedged into a void pointer, so 262 1.1 nathanw * we can't entirely write off such applications as incorrect. 263 1.1 nathanw * This could also lead to running (new) destructors on old 264 1.1 nathanw * data that was never supposed to be associated with that 265 1.1 nathanw * destructor. 266 1.1 nathanw * 267 1.1 nathanw * Option 3: Follow the specified behavior of 268 1.1 nathanw * pthread_key_create(). Either pthread_key_create() or 269 1.1 nathanw * pthread_key_delete() would then have to clear the values in 270 1.1 nathanw * every thread's slot for that key. In order to guarantee the 271 1.1 nathanw * visibility of the NULL value in other threads, there would 272 1.1 nathanw * have to be synchronization operations in both the clearer 273 1.1 nathanw * and pthread_getspecific(). Putting synchronization in 274 1.1 nathanw * pthread_getspecific() is a big performance lose. But in 275 1.1 nathanw * reality, only (buggy) reuse of an old key would require 276 1.1 nathanw * this synchronization; for a new key, there has to be a 277 1.1 nathanw * memory-visibility propagating event between the call to 278 1.1 nathanw * pthread_key_create() and pthread_getspecific() with that 279 1.1 nathanw * key, so setting the entries to NULL without synchronization 280 1.1 nathanw * will work, subject to problem (2) above. However, it's kind 281 1.1 nathanw * of slow. 282 1.1 nathanw * 283 1.1 nathanw * Note that the argument in option 3 only applies because we 284 1.1 nathanw * keep TSD in ordinary memory which follows the pthreads 285 1.1 nathanw * visibility rules. The visibility rules are not required by 286 1.1 nathanw * the standard to apply to TSD, so the argument doesn't 287 1.1 nathanw * apply in general, just to this implementation. 288 1.1 nathanw */ 289 1.1 nathanw 290 1.9 christos /* 291 1.9 christos * We do option 3; we find the list of all pt_specific structures 292 1.10 christos * threaded on the key we are deleting, unthread them, and set the 293 1.10 christos * pointer to NULL. Finally we unthread the entry, freeing it for 294 1.10 christos * further use. 295 1.10 christos * 296 1.10 christos * We don't call the destructor here, it is the responsibility 297 1.10 christos * of the application to cleanup the storage: 298 1.10 christos * http://pubs.opengroup.org/onlinepubs/9699919799/functions/\ 299 1.10 christos * pthread_key_delete.html 300 1.9 christos */ 301 1.9 christos struct pt_specific *pt; 302 1.9 christos 303 1.11 christos if (__predict_false(__uselibcstub)) 304 1.11 christos return __libc_thr_keydelete_stub(key); 305 1.11 christos 306 1.12 manu pthread__assert(key >= 0 && key < pthread_keys_max); 307 1.9 christos 308 1.1 nathanw pthread_mutex_lock(&tsd_mutex); 309 1.9 christos 310 1.9 christos pthread__assert(pthread__tsd_destructors[key] != NULL); 311 1.9 christos 312 1.9 christos while ((pt = PTQ_FIRST(&pthread__tsd_list[key])) != NULL) { 313 1.9 christos PTQ_REMOVE(&pthread__tsd_list[key], pt, pts_next); 314 1.9 christos pt->pts_value = NULL; 315 1.9 christos pt->pts_next.ptqe_prev = NULL; 316 1.9 christos } 317 1.9 christos 318 1.1 nathanw pthread__tsd_destructors[key] = NULL; 319 1.1 nathanw pthread_mutex_unlock(&tsd_mutex); 320 1.1 nathanw 321 1.1 nathanw return 0; 322 1.1 nathanw } 323 1.1 nathanw 324 1.1 nathanw /* Perform thread-exit-time destruction of thread-specific data. */ 325 1.1 nathanw void 326 1.1 nathanw pthread__destroy_tsd(pthread_t self) 327 1.1 nathanw { 328 1.1 nathanw int i, done, iterations; 329 1.1 nathanw void *val; 330 1.1 nathanw void (*destructor)(void *); 331 1.1 nathanw 332 1.3 ad if (!self->pt_havespecific) 333 1.3 ad return; 334 1.3 ad 335 1.1 nathanw /* Butenhof, section 5.4.2 (page 167): 336 1.25 riastrad * 337 1.1 nathanw * ``Also, Pthreads sets the thread-specific data value for a 338 1.1 nathanw * key to NULL before calling that key's destructor (passing 339 1.1 nathanw * the previous value of the key) when a thread terminates [*]. 340 1.1 nathanw * ... 341 1.1 nathanw * [*] That is, unfortunately, not what the standard 342 1.1 nathanw * says. This is one of the problems with formal standards - 343 1.1 nathanw * they say what they say, not what they were intended to 344 1.1 nathanw * say. Somehow, an error crept in, and the sentence 345 1.1 nathanw * specifying that "the implementation clears the 346 1.1 nathanw * thread-specific data value before calling the destructor" 347 1.1 nathanw * was deleted. Nobody noticed, and the standard was approved 348 1.1 nathanw * with the error. So the standard says (by omission) that if 349 1.1 nathanw * you want to write a portable application using 350 1.1 nathanw * thread-specific data, that will not hang on thread 351 1.1 nathanw * termination, you must call pthread_setspecific within your 352 1.1 nathanw * destructor function to change the value to NULL. This would 353 1.1 nathanw * be silly, and any serious implementation of Pthreads will 354 1.1 nathanw * violate the standard in this respect. Of course, the 355 1.1 nathanw * standard will be fixed, probably by the 1003.1n amendment 356 1.1 nathanw * (assorted corrections to 1003.1c-1995), but that will take 357 1.1 nathanw * a while.'' 358 1.1 nathanw */ 359 1.1 nathanw 360 1.16 christos /* We're not required to try very hard */ 361 1.16 christos iterations = PTHREAD_DESTRUCTOR_ITERATIONS; 362 1.1 nathanw do { 363 1.1 nathanw done = 1; 364 1.12 manu for (i = 0; i < pthread_keys_max; i++) { 365 1.9 christos struct pt_specific *pt = &self->pt_specific[i]; 366 1.9 christos if (pt->pts_next.ptqe_prev == NULL) 367 1.9 christos continue; 368 1.9 christos pthread_mutex_lock(&tsd_mutex); 369 1.9 christos 370 1.9 christos if (pt->pts_next.ptqe_prev != NULL) { 371 1.9 christos PTQ_REMOVE(&pthread__tsd_list[i], pt, pts_next); 372 1.9 christos val = pt->pts_value; 373 1.9 christos pt->pts_value = NULL; 374 1.9 christos pt->pts_next.ptqe_prev = NULL; 375 1.1 nathanw destructor = pthread__tsd_destructors[i]; 376 1.9 christos } else 377 1.9 christos destructor = NULL; 378 1.9 christos 379 1.9 christos pthread_mutex_unlock(&tsd_mutex); 380 1.22 joerg if (destructor != NULL && val != NULL) { 381 1.9 christos done = 0; 382 1.9 christos (*destructor)(val); 383 1.1 nathanw } 384 1.1 nathanw } 385 1.16 christos } while (!done && --iterations); 386 1.3 ad 387 1.3 ad self->pt_havespecific = 0; 388 1.1 nathanw } 389 1.17 christos 390 1.17 christos void 391 1.17 christos pthread__copy_tsd(pthread_t self) 392 1.17 christos { 393 1.17 christos for (size_t key = 0; key < TSD_KEYS_MAX; key++) { 394 1.17 christos 395 1.17 christos if (__libc_tsd[key].tsd_inuse == 0) 396 1.17 christos continue; 397 1.17 christos 398 1.17 christos pthread__assert(pthread__tsd_destructors[key] == NULL); 399 1.17 christos pthread__tsd_destructors[key] = __libc_tsd[key].tsd_dtor ? 400 1.17 christos __libc_tsd[key].tsd_dtor : null_destructor; 401 1.17 christos nextkey = (key + 1) % pthread_keys_max; 402 1.17 christos 403 1.17 christos self->pt_havespecific = 1; 404 1.17 christos struct pt_specific *pt = &self->pt_specific[key]; 405 1.17 christos pt->pts_value = __libc_tsd[key].tsd_val; 406 1.17 christos __libc_tsd[key].tsd_inuse = 0; 407 1.17 christos } 408 1.17 christos } 409