tls.c revision 1.22 1 /* $NetBSD: tls.c,v 1.22 2024/07/23 22:00:00 riastradh Exp $ */
2 /*-
3 * Copyright (c) 2011 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Joerg Sonnenberger.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __RCSID("$NetBSD: tls.c,v 1.22 2024/07/23 22:00:00 riastradh Exp $");
33
34 /*
35 * Thread-local storage
36 *
37 * Reference:
38 *
39 * [ELFTLS] Ulrich Drepper, `ELF Handling For Thread-Local
40 * Storage', Version 0.21, 2023-08-22.
41 * https://akkadia.org/drepper/tls.pdf
42 * https://web.archive.org/web/20240718081934/https://akkadia.org/drepper/tls.pdf
43 */
44
45 #include <sys/param.h>
46 #include <sys/ucontext.h>
47 #include <lwp.h>
48 #include <stdalign.h>
49 #include <stddef.h>
50 #include <string.h>
51 #include "debug.h"
52 #include "rtld.h"
53
54 #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)
55
56 static struct tls_tcb *_rtld_tls_allocate_locked(void);
57 static void *_rtld_tls_module_allocate(struct tls_tcb *, size_t);
58
59 /*
60 * DTV offset
61 *
62 * On some architectures (m68k, mips, or1k, powerpc, and riscv),
63 * the DTV offsets passed to __tls_get_addr have a bias relative
64 * to the start of the DTV, in order to maximize the range of TLS
65 * offsets that can be used by instruction encodings with signed
66 * displacements.
67 */
68 #ifndef TLS_DTV_OFFSET
69 #define TLS_DTV_OFFSET 0
70 #endif
71
72 static size_t _rtld_tls_static_space; /* Static TLS space allocated */
73 static size_t _rtld_tls_static_offset; /* Next offset for static TLS to use */
74 size_t _rtld_tls_dtv_generation = 1; /* Bumped on each load of obj w/ TLS */
75 size_t _rtld_tls_max_index = 1; /* Max index into up-to-date DTV */
76
77 /*
78 * DTV -- Dynamic Thread Vector
79 *
80 * The DTV is a per-thread array that maps each module with
81 * thread-local storage to a pointer into part of the thread's TCB
82 * (thread control block), or dynamically loaded TLS blocks,
83 * reserved for that module's storage.
84 *
85 * The TCB itself, struct tls_tcb, has a pointer to the DTV at
86 * tcb->tcb_dtv.
87 *
88 * The layout is:
89 *
90 * +---------------+
91 * | max index | -1 max index i for which dtv[i] is alloced
92 * +---------------+
93 * | generation | 0 void **dtv points here
94 * +---------------+
95 * | obj 1 tls ptr | 1 TLS pointer for obj w/ obj->tlsindex 1
96 * +---------------+
97 * | obj 2 tls ptr | 2 TLS pointer for obj w/ obj->tlsindex 2
98 * +---------------+
99 * .
100 * .
101 * .
102 *
103 * The values of obj->tlsindex start at 1; this way,
104 * dtv[obj->tlsindex] works, when dtv[0] is the generation. The
105 * TLS pointers go either into the static thread-local storage,
106 * for the initial objects (i.e., those loaded at startup), or
107 * into TLS blocks dynamically allocated for objects that
108 * dynamically loaded by dlopen.
109 *
110 * The generation field is a cache of the global generation number
111 * _rtld_tls_dtv_generation, which is bumped every time an object
112 * with TLS is loaded in _rtld_map_object, and cached by
113 * __tls_get_addr (via _rtld_tls_get_addr) when a newly loaded
114 * module lies outside the bounds of the current DTV.
115 *
116 * XXX Why do we keep max index and generation separately? They
117 * appear to be initialized the same, always incremented together,
118 * and always stored together.
119 *
120 * XXX Why is this not a struct?
121 *
122 * struct dtv {
123 * size_t dtv_gen;
124 * void *dtv_module[];
125 * };
126 */
127 #define DTV_GENERATION(dtv) ((size_t)((dtv)[0]))
128 #define DTV_MAX_INDEX(dtv) ((size_t)((dtv)[-1]))
129 #define SET_DTV_GENERATION(dtv, val) (dtv)[0] = (void *)(size_t)(val)
130 #define SET_DTV_MAX_INDEX(dtv, val) (dtv)[-1] = (void *)(size_t)(val)
131
132 /*
133 * _rtld_tls_get_addr(tcb, idx, offset)
134 *
135 * Slow path for __tls_get_addr (see below), called to allocate
136 * TLS space if needed for the object obj with obj->tlsindex idx,
137 * at offset, which must be below obj->tlssize.
138 *
139 * This may allocate a DTV if the current one is too old, and it
140 * may allocate a dynamically loaded TLS block if there isn't one
141 * already allocated for it.
142 *
143 * XXX Why is the first argument passed as `void *tls' instead of
144 * just `struct tls_tcb *tcb'?
145 */
146 void *
147 _rtld_tls_get_addr(void *tls, size_t idx, size_t offset)
148 {
149 struct tls_tcb *tcb = tls;
150 void **dtv, **new_dtv;
151 sigset_t mask;
152
153 _rtld_exclusive_enter(&mask);
154
155 dtv = tcb->tcb_dtv;
156
157 /*
158 * If the generation number has changed, we have to allocate a
159 * new DTV.
160 *
161 * XXX Do we really? Isn't it enough to check whether idx <=
162 * DTV_MAX_INDEX(dtv)?
163 */
164 if (__predict_false(DTV_GENERATION(dtv) != _rtld_tls_dtv_generation)) {
165 size_t to_copy = DTV_MAX_INDEX(dtv);
166
167 /*
168 * "2 +" because the first element is the generation and
169 * the second one is the maximum index.
170 */
171 new_dtv = xcalloc((2 + _rtld_tls_max_index) * sizeof(*dtv));
172 ++new_dtv; /* advance past DTV_MAX_INDEX */
173 if (to_copy > _rtld_tls_max_index) /* XXX How? */
174 to_copy = _rtld_tls_max_index;
175 memcpy(new_dtv + 1, dtv + 1, to_copy * sizeof(*dtv));
176 xfree(dtv - 1); /* retreat back to DTV_MAX_INDEX */
177 dtv = tcb->tcb_dtv = new_dtv;
178 SET_DTV_MAX_INDEX(dtv, _rtld_tls_max_index);
179 SET_DTV_GENERATION(dtv, _rtld_tls_dtv_generation);
180 }
181
182 if (__predict_false(dtv[idx] == NULL))
183 dtv[idx] = _rtld_tls_module_allocate(tcb, idx);
184
185 _rtld_exclusive_exit(&mask);
186
187 return (uint8_t *)dtv[idx] + offset;
188 }
189
190 /*
191 * _rtld_tls_initial_allocation()
192 *
193 * Allocate the TCB (thread control block) for the initial thread,
194 * once the static TLS space usage has been determined (plus some
195 * slop to allow certain special cases like Mesa to be dlopened).
196 *
197 * This must be done _after_ all initial objects (i.e., those
198 * loaded at startup, as opposed to objects dynamically loaded by
199 * dlopen) have had TLS offsets allocated if need be by
200 * _rtld_tls_offset_allocate, and have had relocations processed.
201 */
202 void
203 _rtld_tls_initial_allocation(void)
204 {
205 struct tls_tcb *tcb;
206
207 _rtld_tls_static_space = _rtld_tls_static_offset +
208 RTLD_STATIC_TLS_RESERVATION;
209
210 #ifndef __HAVE_TLS_VARIANT_I
211 _rtld_tls_static_space = roundup2(_rtld_tls_static_space,
212 alignof(max_align_t));
213 #endif
214 dbg(("_rtld_tls_static_space %zu", _rtld_tls_static_space));
215
216 tcb = _rtld_tls_allocate_locked();
217 #ifdef __HAVE___LWP_SETTCB
218 __lwp_settcb(tcb);
219 #else
220 _lwp_setprivate(tcb);
221 #endif
222 }
223
224 /*
225 * _rtld_tls_allocate_locked()
226 *
227 * Internal subroutine to allocate a TCB (thread control block)
228 * for the current thread.
229 *
230 * This allocates a DTV and a TCB that points to it, including
231 * static space in the TCB for the TLS of the initial objects.
232 * TLS blocks for dynamically loaded objects are allocated lazily.
233 *
234 * Caller must either be single-threaded (at startup via
235 * _rtld_tls_initial_allocation) or hold the rtld exclusive lock
236 * (via _rtld_tls_allocate).
237 */
238 static struct tls_tcb *
239 _rtld_tls_allocate_locked(void)
240 {
241 Obj_Entry *obj;
242 struct tls_tcb *tcb;
243 uint8_t *p, *q;
244
245 p = xcalloc(_rtld_tls_static_space + sizeof(struct tls_tcb));
246 #ifdef __HAVE_TLS_VARIANT_I
247 tcb = (struct tls_tcb *)p;
248 p += sizeof(struct tls_tcb);
249 #else
250 p += _rtld_tls_static_space;
251 tcb = (struct tls_tcb *)p;
252 tcb->tcb_self = tcb;
253 #endif
254 dbg(("lwp %d tls tcb %p", _lwp_self(), tcb));
255 /*
256 * "2 +" because the first element is the generation and the second
257 * one is the maximum index.
258 */
259 tcb->tcb_dtv = xcalloc(sizeof(*tcb->tcb_dtv) * (2 + _rtld_tls_max_index));
260 ++tcb->tcb_dtv; /* advance past DTV_MAX_INDEX */
261 SET_DTV_MAX_INDEX(tcb->tcb_dtv, _rtld_tls_max_index);
262 SET_DTV_GENERATION(tcb->tcb_dtv, _rtld_tls_dtv_generation);
263
264 for (obj = _rtld_objlist; obj != NULL; obj = obj->next) {
265 if (obj->tls_static) {
266 #ifdef __HAVE_TLS_VARIANT_I
267 q = p + obj->tlsoffset;
268 #else
269 q = p - obj->tlsoffset;
270 #endif
271 dbg(("%s: [lwp %d] tls dtv %p index %zu offset %zu",
272 obj->path, _lwp_self(),
273 q, obj->tlsindex, obj->tlsoffset));
274 if (obj->tlsinitsize)
275 memcpy(q, obj->tlsinit, obj->tlsinitsize);
276 tcb->tcb_dtv[obj->tlsindex] = q;
277 }
278 }
279
280 return tcb;
281 }
282
283 /*
284 * _rtld_tls_allocate()
285 *
286 * Allocate a TCB (thread control block) for the current thread.
287 *
288 * Called by pthread_create for non-initial threads. (The initial
289 * thread's TCB is allocated by _rtld_tls_initial_allocation.)
290 */
291 struct tls_tcb *
292 _rtld_tls_allocate(void)
293 {
294 struct tls_tcb *tcb;
295 sigset_t mask;
296
297 _rtld_exclusive_enter(&mask);
298 tcb = _rtld_tls_allocate_locked();
299 _rtld_exclusive_exit(&mask);
300
301 return tcb;
302 }
303
304 /*
305 * _rtld_tls_free(tcb)
306 *
307 * Free a TCB allocated with _rtld_tls_allocate.
308 *
309 * Frees any TLS blocks for dynamically loaded objects that tcb's
310 * DTV points to, and frees tcb's DTV, and frees tcb.
311 */
312 void
313 _rtld_tls_free(struct tls_tcb *tcb)
314 {
315 size_t i, max_index;
316 uint8_t *p, *p_end;
317 sigset_t mask;
318
319 _rtld_exclusive_enter(&mask);
320
321 #ifdef __HAVE_TLS_VARIANT_I
322 p = (uint8_t *)tcb;
323 #else
324 p = (uint8_t *)tcb - _rtld_tls_static_space;
325 #endif
326 p_end = p + _rtld_tls_static_space;
327
328 max_index = DTV_MAX_INDEX(tcb->tcb_dtv);
329 for (i = 1; i <= max_index; ++i) {
330 if ((uint8_t *)tcb->tcb_dtv[i] < p ||
331 (uint8_t *)tcb->tcb_dtv[i] >= p_end)
332 xfree(tcb->tcb_dtv[i]);
333 }
334 xfree(tcb->tcb_dtv - 1); /* retreat back to DTV_MAX_INDEX */
335 xfree(p);
336
337 _rtld_exclusive_exit(&mask);
338 }
339
340 /*
341 * _rtld_tls_module_allocate(tcb, idx)
342 *
343 * Allocate thread-local storage in the thread with the given TCB
344 * (thread control block) for the object obj whose obj->tlsindex
345 * is idx.
346 *
347 * If obj has had space in static TLS reserved (obj->tls_static),
348 * return a pointer into that. Otherwise, allocate a TLS block,
349 * mark obj as having a TLS block allocated (obj->tls_dynamic),
350 * and return it.
351 *
352 * Called by _rtld_tls_get_addr to get the thread-local storage
353 * for an object the first time around.
354 */
355 static void *
356 _rtld_tls_module_allocate(struct tls_tcb *tcb, size_t idx)
357 {
358 Obj_Entry *obj;
359 uint8_t *p;
360
361 for (obj = _rtld_objlist; obj != NULL; obj = obj->next) {
362 if (obj->tlsindex == idx)
363 break;
364 }
365 if (obj == NULL) {
366 _rtld_error("Module for TLS index %zu missing", idx);
367 _rtld_die();
368 }
369 if (obj->tls_static) {
370 #ifdef __HAVE_TLS_VARIANT_I
371 p = (uint8_t *)tcb + obj->tlsoffset + sizeof(struct tls_tcb);
372 #else
373 p = (uint8_t *)tcb - obj->tlsoffset;
374 #endif
375 return p;
376 }
377
378 p = xmalloc(obj->tlssize);
379 memcpy(p, obj->tlsinit, obj->tlsinitsize);
380 memset(p + obj->tlsinitsize, 0, obj->tlssize - obj->tlsinitsize);
381
382 obj->tls_dynamic = 1;
383
384 return p;
385 }
386
387 /*
388 * _rtld_tls_offset_allocate(obj)
389 *
390 * Allocate a static thread-local storage offset for obj.
391 *
392 * Called by _rtld at startup for all initial objects. Called
393 * also by MD relocation logic, which is allowed (for Mesa) to
394 * allocate an additional 64 bytes (RTLD_STATIC_TLS_RESERVATION)
395 * of static thread-local storage in dlopened objects.
396 */
397 int
398 _rtld_tls_offset_allocate(Obj_Entry *obj)
399 {
400 size_t offset, next_offset;
401
402 if (obj->tls_dynamic)
403 return -1;
404
405 if (obj->tls_static)
406 return 0;
407 if (obj->tlssize == 0) {
408 obj->tlsoffset = 0;
409 obj->tls_static = 1;
410 return 0;
411 }
412
413 #ifdef __HAVE_TLS_VARIANT_I
414 offset = roundup2(_rtld_tls_static_offset, obj->tlsalign);
415 next_offset = offset + obj->tlssize;
416 #else
417 offset = roundup2(_rtld_tls_static_offset + obj->tlssize,
418 obj->tlsalign);
419 next_offset = offset;
420 #endif
421
422 /*
423 * Check if the static allocation was already done.
424 * This happens if dynamically loaded modules want to use
425 * static TLS space.
426 *
427 * XXX Keep an actual free list and callbacks for initialisation.
428 */
429 if (_rtld_tls_static_space) {
430 if (obj->tlsinitsize) {
431 _rtld_error("%s: Use of initialized "
432 "Thread Local Storage with model initial-exec "
433 "and dlopen is not supported",
434 obj->path);
435 return -1;
436 }
437 if (next_offset > _rtld_tls_static_space) {
438 _rtld_error("%s: No space available "
439 "for static Thread Local Storage",
440 obj->path);
441 return -1;
442 }
443 }
444 obj->tlsoffset = offset;
445 dbg(("%s: static tls offset 0x%zx size %zu\n",
446 obj->path, obj->tlsoffset, obj->tlssize));
447 _rtld_tls_static_offset = next_offset;
448 obj->tls_static = 1;
449
450 return 0;
451 }
452
453 /*
454 * _rtld_tls_offset_free(obj)
455 *
456 * Free a static thread-local storage offset for obj.
457 *
458 * Called by dlclose (via _rtld_unload_object -> _rtld_obj_free).
459 *
460 * Since static thread-local storage is normally not used by
461 * dlopened objects (with the exception of Mesa), this doesn't do
462 * anything to recycle the space right now.
463 */
464 void
465 _rtld_tls_offset_free(Obj_Entry *obj)
466 {
467
468 /*
469 * XXX See above.
470 */
471 obj->tls_static = 0;
472 return;
473 }
474
475 #if defined(__HAVE_COMMON___TLS_GET_ADDR) && defined(RTLD_LOADER)
476 /*
477 * __tls_get_addr(tlsindex)
478 *
479 * Symbol directly called by code generated by the compiler for
480 * references thread-local storage in the general-dynamic or
481 * local-dynamic TLS models (but not initial-exec or local-exec).
482 *
483 * The argument is a pointer to
484 *
485 * struct {
486 * unsigned long int ti_module;
487 * unsigned long int ti_offset;
488 * };
489 *
490 * as in, e.g., [ELFTLS] Sec. 3.4.3. This coincides with the
491 * type size_t[2] on all architectures that use this common
492 * __tls_get_addr definition (XXX but why do we write it as
493 * size_t[2]?).
494 *
495 * ti_module, i.e., arg[0], is the obj->tlsindex assigned at
496 * load-time by _rtld_map_object, and ti_offset, i.e., arg[1], is
497 * assigned at link-time by ld(1), possibly adjusted by
498 * TLS_DTV_OFFSET.
499 *
500 * Some architectures -- specifically IA-64 -- use a different
501 * calling convention. Some architectures -- specifically i386
502 * -- also use another entry point ___tls_get_addr (that's three
503 * leading underscores) with a different calling convention.
504 */
505 void *
506 __tls_get_addr(void *arg_)
507 {
508 size_t *arg = (size_t *)arg_;
509 void **dtv;
510 #ifdef __HAVE___LWP_GETTCB_FAST
511 struct tls_tcb * const tcb = __lwp_gettcb_fast();
512 #else
513 struct tls_tcb * const tcb = __lwp_getprivate_fast();
514 #endif
515 size_t idx = arg[0], offset = arg[1] + TLS_DTV_OFFSET;
516
517 dtv = tcb->tcb_dtv;
518
519 /*
520 * Fast path: access to an already allocated DTV entry. This
521 * checks the current limit and the entry without needing any
522 * locking. Entries are only freed on dlclose() and it is an
523 * application bug if code of the module is still running at
524 * that point.
525 */
526 if (__predict_true(idx <= DTV_MAX_INDEX(dtv) && dtv[idx] != NULL))
527 return (uint8_t *)dtv[idx] + offset;
528
529 return _rtld_tls_get_addr(tcb, idx, offset);
530 }
531 #endif
532
533 #endif /* __HAVE_TLS_VARIANT_I || __HAVE_TLS_VARIANT_II */
534