tls.c revision 1.20 1 /* $NetBSD: tls.c,v 1.20 2024/07/22 23:14:25 riastradh Exp $ */
2 /*-
3 * Copyright (c) 2011 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Joerg Sonnenberger.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __RCSID("$NetBSD: tls.c,v 1.20 2024/07/22 23:14:25 riastradh Exp $");
33
34 /*
35 * Thread-local storage
36 *
37 * Reference:
38 *
39 * [ELFTLS] Ulrich Drepper, `ELF Handling For Thread-Local
40 * Storage', Version 0.21, 2023-08-22.
41 * https://akkadia.org/drepper/tls.pdf
42 * https://web.archive.org/web/20240718081934/https://akkadia.org/drepper/tls.pdf
43 */
44
45 #include <sys/param.h>
46 #include <sys/ucontext.h>
47 #include <lwp.h>
48 #include <stdalign.h>
49 #include <stddef.h>
50 #include <string.h>
51 #include "debug.h"
52 #include "rtld.h"
53
54 #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)
55
56 static struct tls_tcb *_rtld_tls_allocate_locked(void);
57 static void *_rtld_tls_module_allocate(struct tls_tcb *, size_t);
58
59 /*
60 * DTV offset
61 *
62 * On some architectures (m68k, mips, or1k, powerpc, and riscv),
63 * the DTV offsets passed to __tls_get_addr have a bias relative
64 * to the start of the DTV, in order to maximize the range of TLS
65 * offsets that can be used by instruction encodings with signed
66 * displacements.
67 */
68 #ifndef TLS_DTV_OFFSET
69 #define TLS_DTV_OFFSET 0
70 #endif
71
72 static size_t _rtld_tls_static_space; /* Static TLS space allocated */
73 static size_t _rtld_tls_static_offset; /* Next offset for static TLS to use */
74 size_t _rtld_tls_dtv_generation = 1; /* Bumped on each load of obj w/ TLS */
75 size_t _rtld_tls_max_index = 1; /* Max index into up-to-date DTV */
76
77 /*
78 * DTV -- Dynamic Thread Vector
79 *
80 * The DTV is a per-thread array that maps each module with
81 * thread-local storage to a pointer into part of the thread's TCB
82 * (thread control block), or dynamically loaded TLS blocks,
83 * reserved for that module's storage.
84 *
85 * The TCB itself, struct tls_tcb, has a pointer to the DTV at
86 * tcb->tcb_dtv.
87 *
88 * The layout is:
89 *
90 * +---------------+
91 * | max index | -1 max index i for which dtv[i] is alloced
92 * +---------------+
93 * | generation | 0 void **dtv points here
94 * +---------------+
95 * | obj 1 tls ptr | 1 TLS pointer for obj w/ obj->tlsindex 1
96 * +---------------+
97 * | obj 2 tls ptr | 2 TLS pointer for obj w/ obj->tlsindex 2
98 * +---------------+
99 * .
100 * .
101 * .
102 *
103 * The values of obj->tlsindex start at 1; this way,
104 * dtv[obj->tlsindex] works, when dtv[0] is the generation. The
105 * TLS pointers go either into the static thread-local storage,
106 * for the initial objects (i.e., those loaded at startup), or
107 * into TLS blocks dynamically allocated for objects that
108 * dynamically loaded by dlopen.
109 *
110 * The generation field is a cache of the global generation number
111 * _rtld_tls_dtv_generation, which is bumped every time an object
112 * with TLS is loaded in _rtld_map_object, and cached by
113 * __tls_get_addr (via _rtld_tls_get_addr) when a newly loaded
114 * module lies outside the bounds of the current DTV.
115 *
116 * XXX Why do we keep max index and generation separately? They
117 * appear to be initialized the same, always incremented together,
118 * and always stored together.
119 *
120 * XXX Why is this not a struct?
121 *
122 * struct dtv {
123 * size_t dtv_gen;
124 * void *dtv_module[];
125 * };
126 */
127 #define DTV_GENERATION(dtv) ((size_t)((dtv)[0]))
128 #define DTV_MAX_INDEX(dtv) ((size_t)((dtv)[-1]))
129 #define SET_DTV_GENERATION(dtv, val) (dtv)[0] = (void *)(size_t)(val)
130 #define SET_DTV_MAX_INDEX(dtv, val) (dtv)[-1] = (void *)(size_t)(val)
131
132 /*
133 * _rtld_tls_get_addr(tcb, idx, offset)
134 *
135 * Slow path for __tls_get_addr (see below), called to allocate
136 * TLS space if needed for the object obj with obj->tlsindex idx,
137 * at offset, which must be below obj->tlssize.
138 *
139 * This may allocate a DTV if the current one is too old, and it
140 * may allocate a dynamically loaded TLS block if there isn't one
141 * already allocated for it.
142 *
143 * XXX Why is the first argument passed as `void *tls' instead of
144 * just `struct tls_tcb *tcb'?
145 */
146 void *
147 _rtld_tls_get_addr(void *tls, size_t idx, size_t offset)
148 {
149 struct tls_tcb *tcb = tls;
150 void **dtv, **new_dtv;
151 sigset_t mask;
152
153 _rtld_exclusive_enter(&mask);
154
155 dtv = tcb->tcb_dtv;
156
157 /*
158 * If the generation number has changed, we have to allocate a
159 * new DTV.
160 *
161 * XXX Do we really? Isn't it enough to check whether idx <=
162 * DTV_MAX_INDEX(dtv)?
163 */
164 if (__predict_false(DTV_GENERATION(dtv) != _rtld_tls_dtv_generation)) {
165 size_t to_copy = DTV_MAX_INDEX(dtv);
166
167 new_dtv = xcalloc((2 + _rtld_tls_max_index) * sizeof(*dtv));
168 ++new_dtv; /* advance past DTV_MAX_INDEX */
169 if (to_copy > _rtld_tls_max_index) /* XXX How? */
170 to_copy = _rtld_tls_max_index;
171 memcpy(new_dtv + 1, dtv + 1, to_copy * sizeof(*dtv));
172 xfree(dtv - 1); /* retreat back to DTV_MAX_INDEX */
173 dtv = tcb->tcb_dtv = new_dtv;
174 SET_DTV_MAX_INDEX(dtv, _rtld_tls_max_index);
175 SET_DTV_GENERATION(dtv, _rtld_tls_dtv_generation);
176 }
177
178 if (__predict_false(dtv[idx] == NULL))
179 dtv[idx] = _rtld_tls_module_allocate(tcb, idx);
180
181 _rtld_exclusive_exit(&mask);
182
183 return (uint8_t *)dtv[idx] + offset;
184 }
185
186 /*
187 * _rtld_tls_initial_allocation()
188 *
189 * Allocate the TCB (thread control block) for the initial thread,
190 * once the static TLS space usage has been determined (plus some
191 * slop to allow certain special cases like Mesa to be dlopened).
192 *
193 * This must be done _after_ all initial objects (i.e., those
194 * loaded at startup, as opposed to objects dynamically loaded by
195 * dlopen) have had TLS offsets allocated if need be by
196 * _rtld_tls_offset_allocate, and have had relocations processed.
197 */
198 void
199 _rtld_tls_initial_allocation(void)
200 {
201 struct tls_tcb *tcb;
202
203 _rtld_tls_static_space = _rtld_tls_static_offset +
204 RTLD_STATIC_TLS_RESERVATION;
205
206 #ifndef __HAVE_TLS_VARIANT_I
207 _rtld_tls_static_space = roundup2(_rtld_tls_static_space,
208 alignof(max_align_t));
209 #endif
210 dbg(("_rtld_tls_static_space %zu", _rtld_tls_static_space));
211
212 tcb = _rtld_tls_allocate_locked();
213 #ifdef __HAVE___LWP_SETTCB
214 __lwp_settcb(tcb);
215 #else
216 _lwp_setprivate(tcb);
217 #endif
218 }
219
220 /*
221 * _rtld_tls_allocate_locked()
222 *
223 * Internal subroutine to allocate a TCB (thread control block)
224 * for the current thread.
225 *
226 * This allocates a DTV and a TCB that points to it, including
227 * static space in the TCB for the TLS of the initial objects.
228 * TLS blocks for dynamically loaded objects are allocated lazily.
229 *
230 * Caller must either be single-threaded (at startup via
231 * _rtld_tls_initial_allocation) or hold the rtld exclusive lock
232 * (via _rtld_tls_allocate).
233 */
234 static struct tls_tcb *
235 _rtld_tls_allocate_locked(void)
236 {
237 Obj_Entry *obj;
238 struct tls_tcb *tcb;
239 uint8_t *p, *q;
240
241 p = xcalloc(_rtld_tls_static_space + sizeof(struct tls_tcb));
242 #ifdef __HAVE_TLS_VARIANT_I
243 tcb = (struct tls_tcb *)p;
244 p += sizeof(struct tls_tcb);
245 #else
246 p += _rtld_tls_static_space;
247 tcb = (struct tls_tcb *)p;
248 tcb->tcb_self = tcb;
249 #endif
250 dbg(("lwp %d tls tcb %p", _lwp_self(), tcb));
251 tcb->tcb_dtv = xcalloc(sizeof(*tcb->tcb_dtv) * (2 + _rtld_tls_max_index));
252 ++tcb->tcb_dtv; /* advance past DTV_MAX_INDEX */
253 SET_DTV_MAX_INDEX(tcb->tcb_dtv, _rtld_tls_max_index);
254 SET_DTV_GENERATION(tcb->tcb_dtv, _rtld_tls_dtv_generation);
255
256 for (obj = _rtld_objlist; obj != NULL; obj = obj->next) {
257 if (obj->tls_static) {
258 #ifdef __HAVE_TLS_VARIANT_I
259 q = p + obj->tlsoffset;
260 #else
261 q = p - obj->tlsoffset;
262 #endif
263 dbg(("%s: [lwp %d] tls dtv %p index %zu offset %zu",
264 obj->path, _lwp_self(),
265 q, obj->tlsindex, obj->tlsoffset));
266 if (obj->tlsinitsize)
267 memcpy(q, obj->tlsinit, obj->tlsinitsize);
268 tcb->tcb_dtv[obj->tlsindex] = q;
269 }
270 }
271
272 return tcb;
273 }
274
275 /*
276 * _rtld_tls_allocate()
277 *
278 * Allocate a TCB (thread control block) for the current thread.
279 *
280 * Called by pthread_create for non-initial threads. (The initial
281 * thread's TCB is allocated by _rtld_tls_initial_allocation.)
282 */
283 struct tls_tcb *
284 _rtld_tls_allocate(void)
285 {
286 struct tls_tcb *tcb;
287 sigset_t mask;
288
289 _rtld_exclusive_enter(&mask);
290 tcb = _rtld_tls_allocate_locked();
291 _rtld_exclusive_exit(&mask);
292
293 return tcb;
294 }
295
296 /*
297 * _rtld_tls_free(tcb)
298 *
299 * Free a TCB allocated with _rtld_tls_allocate.
300 *
301 * Frees any TLS blocks for dynamically loaded objects that tcb's
302 * DTV points to, and frees tcb's DTV, and frees tcb.
303 */
304 void
305 _rtld_tls_free(struct tls_tcb *tcb)
306 {
307 size_t i, max_index;
308 uint8_t *p, *p_end;
309 sigset_t mask;
310
311 _rtld_exclusive_enter(&mask);
312
313 #ifdef __HAVE_TLS_VARIANT_I
314 p = (uint8_t *)tcb;
315 #else
316 p = (uint8_t *)tcb - _rtld_tls_static_space;
317 #endif
318 p_end = p + _rtld_tls_static_space;
319
320 max_index = DTV_MAX_INDEX(tcb->tcb_dtv);
321 for (i = 1; i <= max_index; ++i) {
322 if ((uint8_t *)tcb->tcb_dtv[i] < p ||
323 (uint8_t *)tcb->tcb_dtv[i] >= p_end)
324 xfree(tcb->tcb_dtv[i]);
325 }
326 xfree(tcb->tcb_dtv - 1); /* retreat back to DTV_MAX_INDEX */
327 xfree(p);
328
329 _rtld_exclusive_exit(&mask);
330 }
331
332 /*
333 * _rtld_tls_module_allocate(tcb, idx)
334 *
335 * Allocate thread-local storage in the thread with the given TCB
336 * (thread control block) for the object obj whose obj->tlsindex
337 * is idx.
338 *
339 * If obj has had space in static TLS reserved (obj->tls_static),
340 * return a pointer into that. Otherwise, allocate a TLS block,
341 * mark obj as having a TLS block allocated (obj->tls_dynamic),
342 * and return it.
343 *
344 * Called by _rtld_tls_get_addr to get the thread-local storage
345 * for an object the first time around.
346 */
347 static void *
348 _rtld_tls_module_allocate(struct tls_tcb *tcb, size_t idx)
349 {
350 Obj_Entry *obj;
351 uint8_t *p;
352
353 for (obj = _rtld_objlist; obj != NULL; obj = obj->next) {
354 if (obj->tlsindex == idx)
355 break;
356 }
357 if (obj == NULL) {
358 _rtld_error("Module for TLS index %zu missing", idx);
359 _rtld_die();
360 }
361 if (obj->tls_static) {
362 #ifdef __HAVE_TLS_VARIANT_I
363 p = (uint8_t *)tcb + obj->tlsoffset + sizeof(struct tls_tcb);
364 #else
365 p = (uint8_t *)tcb - obj->tlsoffset;
366 #endif
367 return p;
368 }
369
370 p = xmalloc(obj->tlssize);
371 memcpy(p, obj->tlsinit, obj->tlsinitsize);
372 memset(p + obj->tlsinitsize, 0, obj->tlssize - obj->tlsinitsize);
373
374 obj->tls_dynamic = 1;
375
376 return p;
377 }
378
379 /*
380 * _rtld_tls_offset_allocate(obj)
381 *
382 * Allocate a static thread-local storage offset for obj.
383 *
384 * Called by _rtld at startup for all initial objects. Called
385 * also by MD relocation logic, which is allowed (for Mesa) to
386 * allocate an additional 64 bytes (RTLD_STATIC_TLS_RESERVATION)
387 * of static thread-local storage in dlopened objects.
388 */
389 int
390 _rtld_tls_offset_allocate(Obj_Entry *obj)
391 {
392 size_t offset, next_offset;
393
394 if (obj->tls_dynamic)
395 return -1;
396
397 if (obj->tls_static)
398 return 0;
399 if (obj->tlssize == 0) {
400 obj->tlsoffset = 0;
401 obj->tls_static = 1;
402 return 0;
403 }
404
405 #ifdef __HAVE_TLS_VARIANT_I
406 offset = roundup2(_rtld_tls_static_offset, obj->tlsalign);
407 next_offset = offset + obj->tlssize;
408 #else
409 offset = roundup2(_rtld_tls_static_offset + obj->tlssize,
410 obj->tlsalign);
411 next_offset = offset;
412 #endif
413
414 /*
415 * Check if the static allocation was already done.
416 * This happens if dynamically loaded modules want to use
417 * static TLS space.
418 *
419 * XXX Keep an actual free list and callbacks for initialisation.
420 */
421 if (_rtld_tls_static_space) {
422 if (obj->tlsinitsize) {
423 _rtld_error("%s: Use of initialized "
424 "Thread Local Storage with model initial-exec "
425 "and dlopen is not supported",
426 obj->path);
427 return -1;
428 }
429 if (next_offset > _rtld_tls_static_space) {
430 _rtld_error("%s: No space available "
431 "for static Thread Local Storage",
432 obj->path);
433 return -1;
434 }
435 }
436 obj->tlsoffset = offset;
437 dbg(("%s: static tls offset 0x%zx size %zu\n",
438 obj->path, obj->tlsoffset, obj->tlssize));
439 _rtld_tls_static_offset = next_offset;
440 obj->tls_static = 1;
441
442 return 0;
443 }
444
445 /*
446 * _rtld_tls_offset_free(obj)
447 *
448 * Free a static thread-local storage offset for obj.
449 *
450 * Called by dlclose (via _rtld_unload_object -> _rtld_obj_free).
451 *
452 * Since static thread-local storage is normally not used by
453 * dlopened objects (with the exception of Mesa), this doesn't do
454 * anything to recycle the space right now.
455 */
456 void
457 _rtld_tls_offset_free(Obj_Entry *obj)
458 {
459
460 /*
461 * XXX See above.
462 */
463 obj->tls_static = 0;
464 return;
465 }
466
467 #if defined(__HAVE_COMMON___TLS_GET_ADDR) && defined(RTLD_LOADER)
468 /*
469 * __tls_get_addr(tlsindex)
470 *
471 * Symbol directly called by code generated by the compiler for
472 * references thread-local storage in the general-dynamic or
473 * local-dynamic TLS models (but not initial-exec or local-exec).
474 *
475 * The argument is a pointer to
476 *
477 * struct {
478 * unsigned long int ti_module;
479 * unsigned long int ti_offset;
480 * };
481 *
482 * as in, e.g., [ELFTLS] Sec. 3.4.3. This coincides with the
483 * type size_t[2] on all architectures that use this common
484 * __tls_get_addr definition (XXX but why do we write it as
485 * size_t[2]?).
486 *
487 * ti_module, i.e., arg[0], is the obj->tlsindex assigned at
488 * load-time by _rtld_map_object, and ti_offset, i.e., arg[1], is
489 * assigned at link-time by ld(1), possibly adjusted by
490 * TLS_DTV_OFFSET.
491 *
492 * Some architectures -- specifically IA-64 -- use a different
493 * calling convention. Some architectures -- specifically i386
494 * -- also use another entry point ___tls_get_addr (that's three
495 * leading underscores) with a different calling convention.
496 */
497 void *
498 __tls_get_addr(void *arg_)
499 {
500 size_t *arg = (size_t *)arg_;
501 void **dtv;
502 #ifdef __HAVE___LWP_GETTCB_FAST
503 struct tls_tcb * const tcb = __lwp_gettcb_fast();
504 #else
505 struct tls_tcb * const tcb = __lwp_getprivate_fast();
506 #endif
507 size_t idx = arg[0], offset = arg[1] + TLS_DTV_OFFSET;
508
509 dtv = tcb->tcb_dtv;
510
511 /*
512 * Fast path: access to an already allocated DTV entry. This
513 * checks the current limit and the entry without needing any
514 * locking. Entries are only freed on dlclose() and it is an
515 * application bug if code of the module is still running at
516 * that point.
517 */
518 if (__predict_true(idx < DTV_MAX_INDEX(dtv) && dtv[idx] != NULL))
519 return (uint8_t *)dtv[idx] + offset;
520
521 return _rtld_tls_get_addr(tcb, idx, offset);
522 }
523 #endif
524
525 #endif /* __HAVE_TLS_VARIANT_I || __HAVE_TLS_VARIANT_II */
526