tls.c revision 1.22 1 1.22 riastrad /* $NetBSD: tls.c,v 1.22 2024/07/23 22:00:00 riastradh Exp $ */
2 1.1 joerg /*-
3 1.1 joerg * Copyright (c) 2011 The NetBSD Foundation, Inc.
4 1.1 joerg * All rights reserved.
5 1.1 joerg *
6 1.1 joerg * This code is derived from software contributed to The NetBSD Foundation
7 1.1 joerg * by Joerg Sonnenberger.
8 1.1 joerg *
9 1.1 joerg * Redistribution and use in source and binary forms, with or without
10 1.1 joerg * modification, are permitted provided that the following conditions
11 1.1 joerg * are met:
12 1.1 joerg * 1. Redistributions of source code must retain the above copyright
13 1.1 joerg * notice, this list of conditions and the following disclaimer.
14 1.1 joerg * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 joerg * notice, this list of conditions and the following disclaimer in the
16 1.1 joerg * documentation and/or other materials provided with the distribution.
17 1.1 joerg *
18 1.1 joerg * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 1.1 joerg * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 1.1 joerg * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 1.1 joerg * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 1.1 joerg * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 1.1 joerg * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 1.1 joerg * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 1.1 joerg * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 1.1 joerg * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 1.1 joerg * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 1.1 joerg * POSSIBILITY OF SUCH DAMAGE.
29 1.1 joerg */
30 1.1 joerg
31 1.1 joerg #include <sys/cdefs.h>
32 1.22 riastrad __RCSID("$NetBSD: tls.c,v 1.22 2024/07/23 22:00:00 riastradh Exp $");
33 1.20 riastrad
34 1.20 riastrad /*
35 1.20 riastrad * Thread-local storage
36 1.20 riastrad *
37 1.20 riastrad * Reference:
38 1.20 riastrad *
39 1.20 riastrad * [ELFTLS] Ulrich Drepper, `ELF Handling For Thread-Local
40 1.20 riastrad * Storage', Version 0.21, 2023-08-22.
41 1.20 riastrad * https://akkadia.org/drepper/tls.pdf
42 1.20 riastrad * https://web.archive.org/web/20240718081934/https://akkadia.org/drepper/tls.pdf
43 1.20 riastrad */
44 1.1 joerg
45 1.1 joerg #include <sys/param.h>
46 1.2 joerg #include <sys/ucontext.h>
47 1.1 joerg #include <lwp.h>
48 1.14 joerg #include <stdalign.h>
49 1.13 joerg #include <stddef.h>
50 1.1 joerg #include <string.h>
51 1.8 skrll #include "debug.h"
52 1.1 joerg #include "rtld.h"
53 1.1 joerg
54 1.1 joerg #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)
55 1.1 joerg
56 1.4 joerg static struct tls_tcb *_rtld_tls_allocate_locked(void);
57 1.18 joerg static void *_rtld_tls_module_allocate(struct tls_tcb *, size_t);
58 1.4 joerg
59 1.20 riastrad /*
60 1.20 riastrad * DTV offset
61 1.20 riastrad *
62 1.20 riastrad * On some architectures (m68k, mips, or1k, powerpc, and riscv),
63 1.20 riastrad * the DTV offsets passed to __tls_get_addr have a bias relative
64 1.20 riastrad * to the start of the DTV, in order to maximize the range of TLS
65 1.20 riastrad * offsets that can be used by instruction encodings with signed
66 1.20 riastrad * displacements.
67 1.20 riastrad */
68 1.3 matt #ifndef TLS_DTV_OFFSET
69 1.3 matt #define TLS_DTV_OFFSET 0
70 1.3 matt #endif
71 1.3 matt
72 1.1 joerg static size_t _rtld_tls_static_space; /* Static TLS space allocated */
73 1.1 joerg static size_t _rtld_tls_static_offset; /* Next offset for static TLS to use */
74 1.20 riastrad size_t _rtld_tls_dtv_generation = 1; /* Bumped on each load of obj w/ TLS */
75 1.20 riastrad size_t _rtld_tls_max_index = 1; /* Max index into up-to-date DTV */
76 1.1 joerg
77 1.20 riastrad /*
78 1.20 riastrad * DTV -- Dynamic Thread Vector
79 1.20 riastrad *
80 1.20 riastrad * The DTV is a per-thread array that maps each module with
81 1.20 riastrad * thread-local storage to a pointer into part of the thread's TCB
82 1.20 riastrad * (thread control block), or dynamically loaded TLS blocks,
83 1.20 riastrad * reserved for that module's storage.
84 1.20 riastrad *
85 1.20 riastrad * The TCB itself, struct tls_tcb, has a pointer to the DTV at
86 1.20 riastrad * tcb->tcb_dtv.
87 1.20 riastrad *
88 1.20 riastrad * The layout is:
89 1.20 riastrad *
90 1.20 riastrad * +---------------+
91 1.20 riastrad * | max index | -1 max index i for which dtv[i] is alloced
92 1.20 riastrad * +---------------+
93 1.20 riastrad * | generation | 0 void **dtv points here
94 1.20 riastrad * +---------------+
95 1.20 riastrad * | obj 1 tls ptr | 1 TLS pointer for obj w/ obj->tlsindex 1
96 1.20 riastrad * +---------------+
97 1.20 riastrad * | obj 2 tls ptr | 2 TLS pointer for obj w/ obj->tlsindex 2
98 1.20 riastrad * +---------------+
99 1.20 riastrad * .
100 1.20 riastrad * .
101 1.20 riastrad * .
102 1.20 riastrad *
103 1.20 riastrad * The values of obj->tlsindex start at 1; this way,
104 1.20 riastrad * dtv[obj->tlsindex] works, when dtv[0] is the generation. The
105 1.20 riastrad * TLS pointers go either into the static thread-local storage,
106 1.20 riastrad * for the initial objects (i.e., those loaded at startup), or
107 1.20 riastrad * into TLS blocks dynamically allocated for objects that
108 1.20 riastrad * dynamically loaded by dlopen.
109 1.20 riastrad *
110 1.20 riastrad * The generation field is a cache of the global generation number
111 1.20 riastrad * _rtld_tls_dtv_generation, which is bumped every time an object
112 1.20 riastrad * with TLS is loaded in _rtld_map_object, and cached by
113 1.20 riastrad * __tls_get_addr (via _rtld_tls_get_addr) when a newly loaded
114 1.20 riastrad * module lies outside the bounds of the current DTV.
115 1.20 riastrad *
116 1.20 riastrad * XXX Why do we keep max index and generation separately? They
117 1.20 riastrad * appear to be initialized the same, always incremented together,
118 1.20 riastrad * and always stored together.
119 1.20 riastrad *
120 1.20 riastrad * XXX Why is this not a struct?
121 1.20 riastrad *
122 1.20 riastrad * struct dtv {
123 1.20 riastrad * size_t dtv_gen;
124 1.20 riastrad * void *dtv_module[];
125 1.20 riastrad * };
126 1.20 riastrad */
127 1.15 skrll #define DTV_GENERATION(dtv) ((size_t)((dtv)[0]))
128 1.15 skrll #define DTV_MAX_INDEX(dtv) ((size_t)((dtv)[-1]))
129 1.1 joerg #define SET_DTV_GENERATION(dtv, val) (dtv)[0] = (void *)(size_t)(val)
130 1.1 joerg #define SET_DTV_MAX_INDEX(dtv, val) (dtv)[-1] = (void *)(size_t)(val)
131 1.1 joerg
132 1.20 riastrad /*
133 1.20 riastrad * _rtld_tls_get_addr(tcb, idx, offset)
134 1.20 riastrad *
135 1.20 riastrad * Slow path for __tls_get_addr (see below), called to allocate
136 1.20 riastrad * TLS space if needed for the object obj with obj->tlsindex idx,
137 1.20 riastrad * at offset, which must be below obj->tlssize.
138 1.20 riastrad *
139 1.20 riastrad * This may allocate a DTV if the current one is too old, and it
140 1.20 riastrad * may allocate a dynamically loaded TLS block if there isn't one
141 1.20 riastrad * already allocated for it.
142 1.20 riastrad *
143 1.20 riastrad * XXX Why is the first argument passed as `void *tls' instead of
144 1.20 riastrad * just `struct tls_tcb *tcb'?
145 1.20 riastrad */
146 1.1 joerg void *
147 1.1 joerg _rtld_tls_get_addr(void *tls, size_t idx, size_t offset)
148 1.1 joerg {
149 1.1 joerg struct tls_tcb *tcb = tls;
150 1.1 joerg void **dtv, **new_dtv;
151 1.5 joerg sigset_t mask;
152 1.1 joerg
153 1.5 joerg _rtld_exclusive_enter(&mask);
154 1.4 joerg
155 1.1 joerg dtv = tcb->tcb_dtv;
156 1.1 joerg
157 1.20 riastrad /*
158 1.20 riastrad * If the generation number has changed, we have to allocate a
159 1.20 riastrad * new DTV.
160 1.20 riastrad *
161 1.20 riastrad * XXX Do we really? Isn't it enough to check whether idx <=
162 1.20 riastrad * DTV_MAX_INDEX(dtv)?
163 1.20 riastrad */
164 1.1 joerg if (__predict_false(DTV_GENERATION(dtv) != _rtld_tls_dtv_generation)) {
165 1.1 joerg size_t to_copy = DTV_MAX_INDEX(dtv);
166 1.1 joerg
167 1.21 riastrad /*
168 1.21 riastrad * "2 +" because the first element is the generation and
169 1.21 riastrad * the second one is the maximum index.
170 1.21 riastrad */
171 1.1 joerg new_dtv = xcalloc((2 + _rtld_tls_max_index) * sizeof(*dtv));
172 1.20 riastrad ++new_dtv; /* advance past DTV_MAX_INDEX */
173 1.20 riastrad if (to_copy > _rtld_tls_max_index) /* XXX How? */
174 1.1 joerg to_copy = _rtld_tls_max_index;
175 1.1 joerg memcpy(new_dtv + 1, dtv + 1, to_copy * sizeof(*dtv));
176 1.20 riastrad xfree(dtv - 1); /* retreat back to DTV_MAX_INDEX */
177 1.1 joerg dtv = tcb->tcb_dtv = new_dtv;
178 1.1 joerg SET_DTV_MAX_INDEX(dtv, _rtld_tls_max_index);
179 1.1 joerg SET_DTV_GENERATION(dtv, _rtld_tls_dtv_generation);
180 1.1 joerg }
181 1.1 joerg
182 1.1 joerg if (__predict_false(dtv[idx] == NULL))
183 1.18 joerg dtv[idx] = _rtld_tls_module_allocate(tcb, idx);
184 1.1 joerg
185 1.5 joerg _rtld_exclusive_exit(&mask);
186 1.4 joerg
187 1.1 joerg return (uint8_t *)dtv[idx] + offset;
188 1.1 joerg }
189 1.1 joerg
190 1.20 riastrad /*
191 1.20 riastrad * _rtld_tls_initial_allocation()
192 1.20 riastrad *
193 1.20 riastrad * Allocate the TCB (thread control block) for the initial thread,
194 1.20 riastrad * once the static TLS space usage has been determined (plus some
195 1.20 riastrad * slop to allow certain special cases like Mesa to be dlopened).
196 1.20 riastrad *
197 1.20 riastrad * This must be done _after_ all initial objects (i.e., those
198 1.20 riastrad * loaded at startup, as opposed to objects dynamically loaded by
199 1.20 riastrad * dlopen) have had TLS offsets allocated if need be by
200 1.20 riastrad * _rtld_tls_offset_allocate, and have had relocations processed.
201 1.20 riastrad */
202 1.1 joerg void
203 1.1 joerg _rtld_tls_initial_allocation(void)
204 1.1 joerg {
205 1.1 joerg struct tls_tcb *tcb;
206 1.1 joerg
207 1.1 joerg _rtld_tls_static_space = _rtld_tls_static_offset +
208 1.1 joerg RTLD_STATIC_TLS_RESERVATION;
209 1.1 joerg
210 1.1 joerg #ifndef __HAVE_TLS_VARIANT_I
211 1.1 joerg _rtld_tls_static_space = roundup2(_rtld_tls_static_space,
212 1.14 joerg alignof(max_align_t));
213 1.1 joerg #endif
214 1.8 skrll dbg(("_rtld_tls_static_space %zu", _rtld_tls_static_space));
215 1.1 joerg
216 1.4 joerg tcb = _rtld_tls_allocate_locked();
217 1.3 matt #ifdef __HAVE___LWP_SETTCB
218 1.3 matt __lwp_settcb(tcb);
219 1.3 matt #else
220 1.1 joerg _lwp_setprivate(tcb);
221 1.3 matt #endif
222 1.1 joerg }
223 1.1 joerg
224 1.20 riastrad /*
225 1.20 riastrad * _rtld_tls_allocate_locked()
226 1.20 riastrad *
227 1.20 riastrad * Internal subroutine to allocate a TCB (thread control block)
228 1.20 riastrad * for the current thread.
229 1.20 riastrad *
230 1.20 riastrad * This allocates a DTV and a TCB that points to it, including
231 1.20 riastrad * static space in the TCB for the TLS of the initial objects.
232 1.20 riastrad * TLS blocks for dynamically loaded objects are allocated lazily.
233 1.20 riastrad *
234 1.20 riastrad * Caller must either be single-threaded (at startup via
235 1.20 riastrad * _rtld_tls_initial_allocation) or hold the rtld exclusive lock
236 1.20 riastrad * (via _rtld_tls_allocate).
237 1.20 riastrad */
238 1.4 joerg static struct tls_tcb *
239 1.4 joerg _rtld_tls_allocate_locked(void)
240 1.1 joerg {
241 1.1 joerg Obj_Entry *obj;
242 1.1 joerg struct tls_tcb *tcb;
243 1.1 joerg uint8_t *p, *q;
244 1.1 joerg
245 1.1 joerg p = xcalloc(_rtld_tls_static_space + sizeof(struct tls_tcb));
246 1.1 joerg #ifdef __HAVE_TLS_VARIANT_I
247 1.1 joerg tcb = (struct tls_tcb *)p;
248 1.1 joerg p += sizeof(struct tls_tcb);
249 1.1 joerg #else
250 1.1 joerg p += _rtld_tls_static_space;
251 1.1 joerg tcb = (struct tls_tcb *)p;
252 1.1 joerg tcb->tcb_self = tcb;
253 1.1 joerg #endif
254 1.17 riastrad dbg(("lwp %d tls tcb %p", _lwp_self(), tcb));
255 1.21 riastrad /*
256 1.21 riastrad * "2 +" because the first element is the generation and the second
257 1.21 riastrad * one is the maximum index.
258 1.21 riastrad */
259 1.1 joerg tcb->tcb_dtv = xcalloc(sizeof(*tcb->tcb_dtv) * (2 + _rtld_tls_max_index));
260 1.20 riastrad ++tcb->tcb_dtv; /* advance past DTV_MAX_INDEX */
261 1.1 joerg SET_DTV_MAX_INDEX(tcb->tcb_dtv, _rtld_tls_max_index);
262 1.1 joerg SET_DTV_GENERATION(tcb->tcb_dtv, _rtld_tls_dtv_generation);
263 1.1 joerg
264 1.1 joerg for (obj = _rtld_objlist; obj != NULL; obj = obj->next) {
265 1.18 joerg if (obj->tls_static) {
266 1.1 joerg #ifdef __HAVE_TLS_VARIANT_I
267 1.1 joerg q = p + obj->tlsoffset;
268 1.1 joerg #else
269 1.1 joerg q = p - obj->tlsoffset;
270 1.1 joerg #endif
271 1.17 riastrad dbg(("%s: [lwp %d] tls dtv %p index %zu offset %zu",
272 1.17 riastrad obj->path, _lwp_self(),
273 1.17 riastrad q, obj->tlsindex, obj->tlsoffset));
274 1.11 joerg if (obj->tlsinitsize)
275 1.11 joerg memcpy(q, obj->tlsinit, obj->tlsinitsize);
276 1.1 joerg tcb->tcb_dtv[obj->tlsindex] = q;
277 1.1 joerg }
278 1.1 joerg }
279 1.1 joerg
280 1.1 joerg return tcb;
281 1.1 joerg }
282 1.1 joerg
283 1.20 riastrad /*
284 1.20 riastrad * _rtld_tls_allocate()
285 1.20 riastrad *
286 1.20 riastrad * Allocate a TCB (thread control block) for the current thread.
287 1.20 riastrad *
288 1.20 riastrad * Called by pthread_create for non-initial threads. (The initial
289 1.20 riastrad * thread's TCB is allocated by _rtld_tls_initial_allocation.)
290 1.20 riastrad */
291 1.4 joerg struct tls_tcb *
292 1.4 joerg _rtld_tls_allocate(void)
293 1.4 joerg {
294 1.4 joerg struct tls_tcb *tcb;
295 1.5 joerg sigset_t mask;
296 1.4 joerg
297 1.5 joerg _rtld_exclusive_enter(&mask);
298 1.4 joerg tcb = _rtld_tls_allocate_locked();
299 1.5 joerg _rtld_exclusive_exit(&mask);
300 1.4 joerg
301 1.4 joerg return tcb;
302 1.4 joerg }
303 1.4 joerg
304 1.20 riastrad /*
305 1.20 riastrad * _rtld_tls_free(tcb)
306 1.20 riastrad *
307 1.20 riastrad * Free a TCB allocated with _rtld_tls_allocate.
308 1.20 riastrad *
309 1.20 riastrad * Frees any TLS blocks for dynamically loaded objects that tcb's
310 1.20 riastrad * DTV points to, and frees tcb's DTV, and frees tcb.
311 1.20 riastrad */
312 1.1 joerg void
313 1.1 joerg _rtld_tls_free(struct tls_tcb *tcb)
314 1.1 joerg {
315 1.1 joerg size_t i, max_index;
316 1.11 joerg uint8_t *p, *p_end;
317 1.5 joerg sigset_t mask;
318 1.1 joerg
319 1.5 joerg _rtld_exclusive_enter(&mask);
320 1.4 joerg
321 1.1 joerg #ifdef __HAVE_TLS_VARIANT_I
322 1.1 joerg p = (uint8_t *)tcb;
323 1.1 joerg #else
324 1.1 joerg p = (uint8_t *)tcb - _rtld_tls_static_space;
325 1.1 joerg #endif
326 1.11 joerg p_end = p + _rtld_tls_static_space;
327 1.11 joerg
328 1.11 joerg max_index = DTV_MAX_INDEX(tcb->tcb_dtv);
329 1.11 joerg for (i = 1; i <= max_index; ++i) {
330 1.11 joerg if ((uint8_t *)tcb->tcb_dtv[i] < p ||
331 1.11 joerg (uint8_t *)tcb->tcb_dtv[i] >= p_end)
332 1.11 joerg xfree(tcb->tcb_dtv[i]);
333 1.11 joerg }
334 1.20 riastrad xfree(tcb->tcb_dtv - 1); /* retreat back to DTV_MAX_INDEX */
335 1.1 joerg xfree(p);
336 1.4 joerg
337 1.5 joerg _rtld_exclusive_exit(&mask);
338 1.1 joerg }
339 1.1 joerg
340 1.20 riastrad /*
341 1.20 riastrad * _rtld_tls_module_allocate(tcb, idx)
342 1.20 riastrad *
343 1.20 riastrad * Allocate thread-local storage in the thread with the given TCB
344 1.20 riastrad * (thread control block) for the object obj whose obj->tlsindex
345 1.20 riastrad * is idx.
346 1.20 riastrad *
347 1.20 riastrad * If obj has had space in static TLS reserved (obj->tls_static),
348 1.20 riastrad * return a pointer into that. Otherwise, allocate a TLS block,
349 1.20 riastrad * mark obj as having a TLS block allocated (obj->tls_dynamic),
350 1.20 riastrad * and return it.
351 1.20 riastrad *
352 1.20 riastrad * Called by _rtld_tls_get_addr to get the thread-local storage
353 1.20 riastrad * for an object the first time around.
354 1.20 riastrad */
355 1.18 joerg static void *
356 1.18 joerg _rtld_tls_module_allocate(struct tls_tcb *tcb, size_t idx)
357 1.1 joerg {
358 1.1 joerg Obj_Entry *obj;
359 1.1 joerg uint8_t *p;
360 1.1 joerg
361 1.1 joerg for (obj = _rtld_objlist; obj != NULL; obj = obj->next) {
362 1.1 joerg if (obj->tlsindex == idx)
363 1.1 joerg break;
364 1.1 joerg }
365 1.1 joerg if (obj == NULL) {
366 1.1 joerg _rtld_error("Module for TLS index %zu missing", idx);
367 1.1 joerg _rtld_die();
368 1.1 joerg }
369 1.18 joerg if (obj->tls_static) {
370 1.18 joerg #ifdef __HAVE_TLS_VARIANT_I
371 1.19 joerg p = (uint8_t *)tcb + obj->tlsoffset + sizeof(struct tls_tcb);
372 1.18 joerg #else
373 1.18 joerg p = (uint8_t *)tcb - obj->tlsoffset;
374 1.18 joerg #endif
375 1.18 joerg return p;
376 1.18 joerg }
377 1.1 joerg
378 1.1 joerg p = xmalloc(obj->tlssize);
379 1.1 joerg memcpy(p, obj->tlsinit, obj->tlsinitsize);
380 1.1 joerg memset(p + obj->tlsinitsize, 0, obj->tlssize - obj->tlsinitsize);
381 1.1 joerg
382 1.18 joerg obj->tls_dynamic = 1;
383 1.18 joerg
384 1.1 joerg return p;
385 1.1 joerg }
386 1.1 joerg
387 1.20 riastrad /*
388 1.20 riastrad * _rtld_tls_offset_allocate(obj)
389 1.20 riastrad *
390 1.20 riastrad * Allocate a static thread-local storage offset for obj.
391 1.20 riastrad *
392 1.20 riastrad * Called by _rtld at startup for all initial objects. Called
393 1.20 riastrad * also by MD relocation logic, which is allowed (for Mesa) to
394 1.20 riastrad * allocate an additional 64 bytes (RTLD_STATIC_TLS_RESERVATION)
395 1.20 riastrad * of static thread-local storage in dlopened objects.
396 1.20 riastrad */
397 1.1 joerg int
398 1.1 joerg _rtld_tls_offset_allocate(Obj_Entry *obj)
399 1.1 joerg {
400 1.1 joerg size_t offset, next_offset;
401 1.1 joerg
402 1.18 joerg if (obj->tls_dynamic)
403 1.18 joerg return -1;
404 1.18 joerg
405 1.18 joerg if (obj->tls_static)
406 1.1 joerg return 0;
407 1.1 joerg if (obj->tlssize == 0) {
408 1.1 joerg obj->tlsoffset = 0;
409 1.18 joerg obj->tls_static = 1;
410 1.1 joerg return 0;
411 1.1 joerg }
412 1.1 joerg
413 1.1 joerg #ifdef __HAVE_TLS_VARIANT_I
414 1.1 joerg offset = roundup2(_rtld_tls_static_offset, obj->tlsalign);
415 1.1 joerg next_offset = offset + obj->tlssize;
416 1.1 joerg #else
417 1.1 joerg offset = roundup2(_rtld_tls_static_offset + obj->tlssize,
418 1.1 joerg obj->tlsalign);
419 1.1 joerg next_offset = offset;
420 1.1 joerg #endif
421 1.1 joerg
422 1.1 joerg /*
423 1.1 joerg * Check if the static allocation was already done.
424 1.1 joerg * This happens if dynamically loaded modules want to use
425 1.1 joerg * static TLS space.
426 1.1 joerg *
427 1.1 joerg * XXX Keep an actual free list and callbacks for initialisation.
428 1.1 joerg */
429 1.1 joerg if (_rtld_tls_static_space) {
430 1.1 joerg if (obj->tlsinitsize) {
431 1.1 joerg _rtld_error("%s: Use of initialized "
432 1.7 joerg "Thread Local Storage with model initial-exec "
433 1.1 joerg "and dlopen is not supported",
434 1.1 joerg obj->path);
435 1.1 joerg return -1;
436 1.1 joerg }
437 1.1 joerg if (next_offset > _rtld_tls_static_space) {
438 1.1 joerg _rtld_error("%s: No space available "
439 1.1 joerg "for static Thread Local Storage",
440 1.1 joerg obj->path);
441 1.1 joerg return -1;
442 1.1 joerg }
443 1.1 joerg }
444 1.1 joerg obj->tlsoffset = offset;
445 1.16 riastrad dbg(("%s: static tls offset 0x%zx size %zu\n",
446 1.16 riastrad obj->path, obj->tlsoffset, obj->tlssize));
447 1.1 joerg _rtld_tls_static_offset = next_offset;
448 1.18 joerg obj->tls_static = 1;
449 1.1 joerg
450 1.1 joerg return 0;
451 1.1 joerg }
452 1.1 joerg
453 1.20 riastrad /*
454 1.20 riastrad * _rtld_tls_offset_free(obj)
455 1.20 riastrad *
456 1.20 riastrad * Free a static thread-local storage offset for obj.
457 1.20 riastrad *
458 1.20 riastrad * Called by dlclose (via _rtld_unload_object -> _rtld_obj_free).
459 1.20 riastrad *
460 1.20 riastrad * Since static thread-local storage is normally not used by
461 1.20 riastrad * dlopened objects (with the exception of Mesa), this doesn't do
462 1.20 riastrad * anything to recycle the space right now.
463 1.20 riastrad */
464 1.1 joerg void
465 1.1 joerg _rtld_tls_offset_free(Obj_Entry *obj)
466 1.1 joerg {
467 1.1 joerg
468 1.1 joerg /*
469 1.1 joerg * XXX See above.
470 1.1 joerg */
471 1.18 joerg obj->tls_static = 0;
472 1.1 joerg return;
473 1.1 joerg }
474 1.1 joerg
475 1.12 rin #if defined(__HAVE_COMMON___TLS_GET_ADDR) && defined(RTLD_LOADER)
476 1.2 joerg /*
477 1.20 riastrad * __tls_get_addr(tlsindex)
478 1.20 riastrad *
479 1.20 riastrad * Symbol directly called by code generated by the compiler for
480 1.20 riastrad * references thread-local storage in the general-dynamic or
481 1.20 riastrad * local-dynamic TLS models (but not initial-exec or local-exec).
482 1.20 riastrad *
483 1.20 riastrad * The argument is a pointer to
484 1.20 riastrad *
485 1.20 riastrad * struct {
486 1.20 riastrad * unsigned long int ti_module;
487 1.20 riastrad * unsigned long int ti_offset;
488 1.20 riastrad * };
489 1.20 riastrad *
490 1.20 riastrad * as in, e.g., [ELFTLS] Sec. 3.4.3. This coincides with the
491 1.20 riastrad * type size_t[2] on all architectures that use this common
492 1.20 riastrad * __tls_get_addr definition (XXX but why do we write it as
493 1.20 riastrad * size_t[2]?).
494 1.20 riastrad *
495 1.20 riastrad * ti_module, i.e., arg[0], is the obj->tlsindex assigned at
496 1.20 riastrad * load-time by _rtld_map_object, and ti_offset, i.e., arg[1], is
497 1.20 riastrad * assigned at link-time by ld(1), possibly adjusted by
498 1.20 riastrad * TLS_DTV_OFFSET.
499 1.20 riastrad *
500 1.20 riastrad * Some architectures -- specifically IA-64 -- use a different
501 1.20 riastrad * calling convention. Some architectures -- specifically i386
502 1.20 riastrad * -- also use another entry point ___tls_get_addr (that's three
503 1.20 riastrad * leading underscores) with a different calling convention.
504 1.2 joerg */
505 1.2 joerg void *
506 1.2 joerg __tls_get_addr(void *arg_)
507 1.2 joerg {
508 1.2 joerg size_t *arg = (size_t *)arg_;
509 1.2 joerg void **dtv;
510 1.3 matt #ifdef __HAVE___LWP_GETTCB_FAST
511 1.3 matt struct tls_tcb * const tcb = __lwp_gettcb_fast();
512 1.3 matt #else
513 1.3 matt struct tls_tcb * const tcb = __lwp_getprivate_fast();
514 1.3 matt #endif
515 1.3 matt size_t idx = arg[0], offset = arg[1] + TLS_DTV_OFFSET;
516 1.2 joerg
517 1.2 joerg dtv = tcb->tcb_dtv;
518 1.2 joerg
519 1.20 riastrad /*
520 1.20 riastrad * Fast path: access to an already allocated DTV entry. This
521 1.20 riastrad * checks the current limit and the entry without needing any
522 1.20 riastrad * locking. Entries are only freed on dlclose() and it is an
523 1.20 riastrad * application bug if code of the module is still running at
524 1.20 riastrad * that point.
525 1.20 riastrad */
526 1.22 riastrad if (__predict_true(idx <= DTV_MAX_INDEX(dtv) && dtv[idx] != NULL))
527 1.2 joerg return (uint8_t *)dtv[idx] + offset;
528 1.2 joerg
529 1.2 joerg return _rtld_tls_get_addr(tcb, idx, offset);
530 1.2 joerg }
531 1.2 joerg #endif
532 1.2 joerg
533 1.1 joerg #endif /* __HAVE_TLS_VARIANT_I || __HAVE_TLS_VARIANT_II */
534