Home | History | Annotate | Line # | Download | only in ld.elf_so
      1 /*	$NetBSD: tls.c,v 1.28 2026/01/17 10:47:45 skrll Exp $	*/
      2 /*-
      3  * Copyright (c) 2011 The NetBSD Foundation, Inc.
      4  * All rights reserved.
      5  *
      6  * This code is derived from software contributed to The NetBSD Foundation
      7  * by Joerg Sonnenberger.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     20  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     21  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     28  * POSSIBILITY OF SUCH DAMAGE.
     29  */
     30 
     31 #include <sys/cdefs.h>
     32 __RCSID("$NetBSD: tls.c,v 1.28 2026/01/17 10:47:45 skrll Exp $");
     33 
     34 /*
     35  * Thread-local storage
     36  *
     37  * Reference:
     38  *
     39  *	[ELFTLS] Ulrich Drepper, `ELF Handling For Thread-Local
     40  *	Storage', Version 0.21, 2023-08-22.
     41  *	https://akkadia.org/drepper/tls.pdf
     42  *	https://web.archive.org/web/20240718081934/https://akkadia.org/drepper/tls.pdf
     43  */
     44 
     45 #include <sys/param.h>
     46 #include <sys/ucontext.h>
     47 #include <lwp.h>
     48 #include <stdalign.h>
     49 #include <stddef.h>
     50 #include <string.h>
     51 #include "debug.h"
     52 #include "rtld.h"
     53 
     54 #include <machine/lwp_private.h>
     55 
     56 #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)
     57 
     58 static struct tls_tcb *_rtld_tls_allocate_locked(void);
     59 static void *_rtld_tls_module_allocate(struct tls_tcb *, size_t);
     60 
     61 /* A macro to test correct alignment of a pointer. */
     62 #define ALIGNED_P(ptr, algnmt)	((algnmt) == 0 || ((uintptr_t)(ptr) & ((algnmt) - 1)) == 0)
     63 
     64 /*
     65  * DTV offset
     66  *
     67  *	On some architectures (m68k, mips, or1k, powerpc, and riscv),
     68  *	the DTV offsets passed to __tls_get_addr have a bias relative
     69  *	to the start of the DTV, in order to maximize the range of TLS
     70  *	offsets that can be used by instruction encodings with signed
     71  *	displacements.
     72  */
     73 #ifndef TLS_DTV_OFFSET
     74 #define	TLS_DTV_OFFSET	0
     75 #endif
     76 
     77 #if defined(__HAVE_TLS_VARIANT_I)
     78 #define _RTLD_TLS_INITIAL_OFFSET	sizeof(struct tls_tcb)
     79 #endif
     80 #if defined(__HAVE_TLS_VARIANT_II)
     81 #define _RTLD_TLS_INITIAL_OFFSET	0
     82 #endif
     83 
     84 static size_t _rtld_tls_static_space;	/* Static TLS space allocated */
     85 static size_t _rtld_tls_static_offset =
     86 	_RTLD_TLS_INITIAL_OFFSET;	/* Next offset for static TLS to use */
     87 static size_t _rtld_tls_static_max_align;
     88 
     89 size_t _rtld_tls_dtv_generation = 1;	/* Bumped on each load of obj w/ TLS */
     90 size_t _rtld_tls_max_index = 1;		/* Max index into up-to-date DTV */
     91 
     92 /*
     93  * DTV -- Dynamic Thread Vector
     94  *
     95  *	The DTV is a per-thread array that maps each module with
     96  *	thread-local storage to a pointer into part of the thread's TCB
     97  *	(thread control block), or dynamically loaded TLS blocks,
     98  *	reserved for that module's storage.
     99  *
    100  *	The TCB itself, struct tls_tcb, has a pointer to the DTV at
    101  *	tcb->tcb_dtv.
    102  *
    103  *	The layout is:
    104  *
    105  *		+---------------+
    106  *		| max index     | -1    max index i for which dtv[i] is alloced
    107  *		+---------------+
    108  *		| generation    |  0    void **dtv points here
    109  *		+---------------+
    110  *		| obj 1 tls ptr |  1    TLS pointer for obj w/ obj->tlsindex 1
    111  *		+---------------+
    112  *		| obj 2 tls ptr |  2    TLS pointer for obj w/ obj->tlsindex 2
    113  *		+---------------+
    114  *		  .
    115  *		  .
    116  *		  .
    117  *
    118  *	The values of obj->tlsindex start at 1; this way,
    119  *	dtv[obj->tlsindex] works, when dtv[0] is the generation.  The
    120  *	TLS pointers go either into the static thread-local storage,
    121  *	for the initial objects (i.e., those loaded at startup), or
    122  *	into TLS blocks dynamically allocated for objects that
    123  *	dynamically loaded by dlopen.
    124  *
    125  *	The generation field is a cache of the global generation number
    126  *	_rtld_tls_dtv_generation, which is bumped every time an object
    127  *	with TLS is loaded in _rtld_map_object, and cached by
    128  *	__tls_get_addr (via _rtld_tls_get_addr) when a newly loaded
    129  *	module lies outside the bounds of the current DTV.
    130  *
    131  *	XXX Why do we keep max index and generation separately?  They
    132  *	appear to be initialized the same, always incremented together,
    133  *	and always stored together.
    134  *
    135  *	XXX Why is this not a struct?
    136  *
    137  *		struct dtv {
    138  *			size_t	dtv_gen;
    139  *			void	*dtv_module[];
    140  *		};
    141  */
    142 #define	DTV_GENERATION(dtv)		((size_t)((dtv)[0]))
    143 #define	DTV_MAX_INDEX(dtv)		((size_t)((dtv)[-1]))
    144 #define	SET_DTV_GENERATION(dtv, val)	(dtv)[0] = (void *)(size_t)(val)
    145 #define	SET_DTV_MAX_INDEX(dtv, val)	(dtv)[-1] = (void *)(size_t)(val)
    146 
    147 /*
    148  * _rtld_tls_get_addr(tcb, idx, offset)
    149  *
    150  *	Slow path for __tls_get_addr (see below), called to allocate
    151  *	TLS space if needed for the object obj with obj->tlsindex idx,
    152  *	at offset, which must be below obj->tlssize.
    153  *
    154  *	This may allocate a DTV if the current one is too old, and it
    155  *	may allocate a dynamically loaded TLS block if there isn't one
    156  *	already allocated for it.
    157  *
    158  *	XXX Why is the first argument passed as `void *tls' instead of
    159  *	just `struct tls_tcb *tcb'?
    160  */
    161 void *
    162 _rtld_tls_get_addr(void *tls, size_t idx, size_t offset)
    163 {
    164 	struct tls_tcb *tcb = tls;
    165 	void **dtv, **new_dtv;
    166 	sigset_t mask;
    167 
    168 	_rtld_exclusive_enter(&mask);
    169 
    170 	dtv = tcb->tcb_dtv;
    171 
    172 	/*
    173 	 * If the generation number has changed, we have to allocate a
    174 	 * new DTV.
    175 	 *
    176 	 * XXX Do we really?  Isn't it enough to check whether idx <=
    177 	 * DTV_MAX_INDEX(dtv)?
    178 	 */
    179 	if (__predict_false(DTV_GENERATION(dtv) != _rtld_tls_dtv_generation)) {
    180 		size_t to_copy = DTV_MAX_INDEX(dtv);
    181 
    182 		/*
    183 		 * "2 +" because the first element is the generation and
    184 		 * the second one is the maximum index.
    185 		 */
    186 		new_dtv = xcalloc((2 + _rtld_tls_max_index) * sizeof(*dtv));
    187 		++new_dtv;		/* advance past DTV_MAX_INDEX */
    188 		if (to_copy > _rtld_tls_max_index)	/* XXX How? */
    189 			to_copy = _rtld_tls_max_index;
    190 		memcpy(new_dtv + 1, dtv + 1, to_copy * sizeof(*dtv));
    191 		xfree(dtv - 1);		/* retreat back to DTV_MAX_INDEX */
    192 		dtv = tcb->tcb_dtv = new_dtv;
    193 		SET_DTV_MAX_INDEX(dtv, _rtld_tls_max_index);
    194 		SET_DTV_GENERATION(dtv, _rtld_tls_dtv_generation);
    195 	}
    196 
    197 	if (__predict_false(dtv[idx] == NULL))
    198 		dtv[idx] = _rtld_tls_module_allocate(tcb, idx);
    199 
    200 	_rtld_exclusive_exit(&mask);
    201 
    202 	return (uint8_t *)dtv[idx] + offset;
    203 }
    204 
    205 /*
    206  * _rtld_tls_initial_allocation()
    207  *
    208  *	Allocate the TCB (thread control block) for the initial thread,
    209  *	once the static TLS space usage has been determined (plus some
    210  *	slop to allow certain special cases like Mesa to be dlopened).
    211  *
    212  *	This must be done _after_ all initial objects (i.e., those
    213  *	loaded at startup, as opposed to objects dynamically loaded by
    214  *	dlopen) have had TLS offsets allocated if need be by
    215  *	_rtld_tls_offset_allocate, and have had relocations processed.
    216  */
    217 void
    218 _rtld_tls_initial_allocation(void)
    219 {
    220 	struct tls_tcb *tcb;
    221 
    222 	_rtld_tls_static_space = _rtld_tls_static_offset +
    223 	    RTLD_STATIC_TLS_RESERVATION;
    224 
    225 #ifndef __HAVE_TLS_VARIANT_I
    226 	_rtld_tls_static_space = roundup2(_rtld_tls_static_space,
    227 	    alignof(max_align_t));
    228 #endif
    229 	dbg(("_rtld_tls_static_space %zu", _rtld_tls_static_space));
    230 
    231 	tcb = _rtld_tls_allocate_locked();
    232 #ifdef __HAVE___LWP_SETTCB
    233 	__lwp_settcb(tcb);
    234 #else
    235 	_lwp_setprivate(tcb);
    236 #endif
    237 }
    238 
    239 /*
    240  * _rtld_tls_allocate_locked()
    241  *
    242  *	Internal subroutine to allocate a TCB (thread control block)
    243  *	for the current thread.
    244  *
    245  *	This allocates a DTV and a TCB that points to it, including
    246  *	static space in the TCB for the TLS of the initial objects.
    247  *	TLS blocks for dynamically loaded objects are allocated lazily.
    248  *
    249  *	Caller must either be single-threaded (at startup via
    250  *	_rtld_tls_initial_allocation) or hold the rtld exclusive lock
    251  *	(via _rtld_tls_allocate).
    252  */
    253 static struct tls_tcb *
    254 _rtld_tls_allocate_locked(void)
    255 {
    256 	Obj_Entry *obj;
    257 	struct tls_tcb *tcb;
    258 	uint8_t *p, *q;
    259 
    260 	p = xmalloc_aligned(_rtld_tls_static_space + sizeof(struct tls_tcb),
    261 	    _rtld_tls_static_max_align, 0);
    262 
    263 	memset(p, 0, _rtld_tls_static_space + sizeof(struct tls_tcb));
    264 #ifdef __HAVE_TLS_VARIANT_I
    265 	tcb = (struct tls_tcb *)p;
    266 	p += sizeof(struct tls_tcb);
    267 #else
    268 	p += _rtld_tls_static_space;
    269 	tcb = (struct tls_tcb *)p;
    270 	tcb->tcb_self = tcb;
    271 #endif
    272 	dbg(("lwp %d tls tcb %p", _lwp_self(), tcb));
    273 	/*
    274 	 * "2 +" because the first element is the generation and the second
    275 	 * one is the maximum index.
    276 	 */
    277 	tcb->tcb_dtv = xcalloc(sizeof(*tcb->tcb_dtv) * (2 + _rtld_tls_max_index));
    278 	++tcb->tcb_dtv;		/* advance past DTV_MAX_INDEX */
    279 	SET_DTV_MAX_INDEX(tcb->tcb_dtv, _rtld_tls_max_index);
    280 	SET_DTV_GENERATION(tcb->tcb_dtv, _rtld_tls_dtv_generation);
    281 
    282 	for (obj = _rtld_objlist; obj != NULL; obj = obj->next) {
    283 		if (obj->tls_static) {
    284 #ifdef __HAVE_TLS_VARIANT_I
    285 			q = p + obj->tlsoffset;
    286 #else
    287 			q = p - obj->tlsoffset;
    288 #endif
    289 			dbg(("%s: [lwp %d] tls dtv %p-%p index %zu "
    290 			    "offset %zx alignment %zx tlsinit %p%s",
    291 			    obj->path, _lwp_self(),
    292 			    q, q + obj->tlsinitsize, obj->tlsindex,
    293 			    obj->tlsoffset, obj->tlsalign, obj->tlsinit,
    294 			    ALIGNED_P(q, obj->tlsalign) ? "" :
    295 				 " BAD ALIGNMENT"));
    296 			if (obj->tlsinitsize)
    297 				memcpy(q, obj->tlsinit, obj->tlsinitsize);
    298 			tcb->tcb_dtv[obj->tlsindex] = q;
    299 		}
    300 	}
    301 
    302 	return tcb;
    303 }
    304 
    305 /*
    306  * _rtld_tls_allocate()
    307  *
    308  *	Allocate a TCB (thread control block) for the current thread.
    309  *
    310  *	Called by pthread_create for non-initial threads.  (The initial
    311  *	thread's TCB is allocated by _rtld_tls_initial_allocation.)
    312  */
    313 struct tls_tcb *
    314 _rtld_tls_allocate(void)
    315 {
    316 	struct tls_tcb *tcb;
    317 	sigset_t mask;
    318 
    319 	_rtld_exclusive_enter(&mask);
    320 	tcb = _rtld_tls_allocate_locked();
    321 	_rtld_exclusive_exit(&mask);
    322 
    323 	return tcb;
    324 }
    325 
    326 /*
    327  * _rtld_tls_free(tcb)
    328  *
    329  *	Free a TCB allocated with _rtld_tls_allocate.
    330  *
    331  *	Frees any TLS blocks for dynamically loaded objects that tcb's
    332  *	DTV points to, and frees tcb's DTV, and frees tcb.
    333  */
    334 void
    335 _rtld_tls_free(struct tls_tcb *tcb)
    336 {
    337 	size_t i, max_index;
    338 	uint8_t *p, *p_end;
    339 	sigset_t mask;
    340 
    341 	_rtld_exclusive_enter(&mask);
    342 
    343 #ifdef __HAVE_TLS_VARIANT_I
    344 	p = (uint8_t *)tcb;
    345 #else
    346 	p = (uint8_t *)tcb - _rtld_tls_static_space;
    347 #endif
    348 	p_end = p + _rtld_tls_static_space;
    349 
    350 	max_index = DTV_MAX_INDEX(tcb->tcb_dtv);
    351 	for (i = 1; i <= max_index; ++i) {
    352 		if ((uint8_t *)tcb->tcb_dtv[i] < p ||
    353 		    (uint8_t *)tcb->tcb_dtv[i] >= p_end)
    354 			xfree(tcb->tcb_dtv[i]);
    355 	}
    356 	xfree(tcb->tcb_dtv - 1);	/* retreat back to DTV_MAX_INDEX */
    357 	xfree(p);
    358 
    359 	_rtld_exclusive_exit(&mask);
    360 }
    361 
    362 /*
    363  * _rtld_tls_module_allocate(tcb, idx)
    364  *
    365  *	Allocate thread-local storage in the thread with the given TCB
    366  *	(thread control block) for the object obj whose obj->tlsindex
    367  *	is idx.
    368  *
    369  *	If obj has had space in static TLS reserved (obj->tls_static),
    370  *	return a pointer into that.  Otherwise, allocate a TLS block,
    371  *	mark obj as having a TLS block allocated (obj->tls_dynamic),
    372  *	and return it.
    373  *
    374  *	Called by _rtld_tls_get_addr to get the thread-local storage
    375  *	for an object the first time around.
    376  */
    377 static void *
    378 _rtld_tls_module_allocate(struct tls_tcb *tcb, size_t idx)
    379 {
    380 	Obj_Entry *obj;
    381 	uint8_t *p;
    382 
    383 	for (obj = _rtld_objlist; obj != NULL; obj = obj->next) {
    384 		if (obj->tlsindex == idx)
    385 			break;
    386 	}
    387 	if (obj == NULL) {
    388 		_rtld_error("Module for TLS index %zu missing", idx);
    389 		_rtld_die();
    390 	}
    391 	if (obj->tls_static) {
    392 #ifdef __HAVE_TLS_VARIANT_I
    393 		p = (uint8_t *)tcb + obj->tlsoffset + sizeof(struct tls_tcb);
    394 #else
    395 		p = (uint8_t *)tcb - obj->tlsoffset;
    396 #endif
    397 		return p;
    398 	}
    399 
    400 	p = xmalloc_aligned(obj->tlssize, obj->tlsalign, 0);
    401 	memcpy(p, obj->tlsinit, obj->tlsinitsize);
    402 	memset(p + obj->tlsinitsize, 0, obj->tlssize - obj->tlsinitsize);
    403 
    404 	obj->tls_dynamic = 1;
    405 
    406 	return p;
    407 }
    408 
    409 /*
    410  * _rtld_tls_offset_allocate(obj)
    411  *
    412  *	Allocate a static thread-local storage offset for obj.
    413  *
    414  *	Called by _rtld at startup for all initial objects.  Called
    415  *	also by MD relocation logic, which is allowed (for Mesa) to
    416  *	allocate an additional 64 bytes (RTLD_STATIC_TLS_RESERVATION)
    417  *	of static thread-local storage in dlopened objects.
    418  */
    419 int
    420 _rtld_tls_offset_allocate(Obj_Entry *obj)
    421 {
    422 	size_t offset, next_offset;
    423 
    424 	if (obj->tls_dynamic)
    425 		return -1;
    426 
    427 	if (obj->tls_static)
    428 		return 0;
    429 
    430 	if (obj->tlssize == 0) {
    431 		obj->tlsoffset = 0;
    432 		obj->tls_static = 1;
    433 		return 0;
    434 	}
    435 
    436 #ifdef __HAVE_TLS_VARIANT_I
    437 	offset = roundup2(_rtld_tls_static_offset, obj->tlsalign);
    438 	next_offset = offset + obj->tlssize;
    439 	offset -= sizeof(struct tls_tcb);
    440 #else
    441 	offset = roundup2(_rtld_tls_static_offset + obj->tlssize,
    442 	    obj->tlsalign);
    443 	next_offset = offset;
    444 #endif
    445 
    446 	/*
    447 	 * Check if the static allocation was already done.
    448 	 * This happens if dynamically loaded modules want to use
    449 	 * static TLS space.
    450 	 *
    451 	 * XXX Keep an actual free list and callbacks for initialisation.
    452 	 */
    453 	if (_rtld_tls_static_space) {
    454 		if (obj->tlsinitsize) {
    455 			_rtld_error("%s: Use of initialized "
    456 			    "Thread Local Storage with model initial-exec "
    457 			    "and dlopen is not supported",
    458 			    obj->path);
    459 			return -1;
    460 		}
    461 		if (next_offset > _rtld_tls_static_space) {
    462 			_rtld_error("%s: No space available "
    463 			    "for static Thread Local Storage",
    464 			    obj->path);
    465 			return -1;
    466 		}
    467 	}
    468 	if (obj->tlsalign > _rtld_tls_static_max_align) {
    469 		_rtld_tls_static_max_align = obj->tlsalign;
    470 	}
    471 	obj->tlsoffset = offset;
    472 	dbg(("%s: static tls offset 0x%zx size %zu align %zu (%zx/%zx)",
    473 	    obj->path, obj->tlsoffset, obj->tlssize, obj->tlsalign,
    474 	    _rtld_tls_static_offset, next_offset));
    475 	_rtld_tls_static_offset = next_offset;
    476 	obj->tls_static = 1;
    477 
    478 	return 0;
    479 }
    480 
    481 /*
    482  * _rtld_tls_offset_free(obj)
    483  *
    484  *	Free a static thread-local storage offset for obj.
    485  *
    486  *	Called by dlclose (via _rtld_unload_object -> _rtld_obj_free).
    487  *
    488  *	Since static thread-local storage is normally not used by
    489  *	dlopened objects (with the exception of Mesa), this doesn't do
    490  *	anything to recycle the space right now.
    491  */
    492 void
    493 _rtld_tls_offset_free(Obj_Entry *obj)
    494 {
    495 
    496 	/*
    497 	 * XXX See above.
    498 	 */
    499 	obj->tls_static = 0;
    500 	return;
    501 }
    502 
    503 #if defined(__HAVE_COMMON___TLS_GET_ADDR) && defined(RTLD_LOADER)
    504 /*
    505  * __tls_get_addr(tlsindex)
    506  *
    507  *	Symbol directly called by code generated by the compiler for
    508  *	references thread-local storage in the general-dynamic or
    509  *	local-dynamic TLS models (but not initial-exec or local-exec).
    510  *
    511  *	The argument is a pointer to
    512  *
    513  *		struct {
    514  *			unsigned long int ti_module;
    515  *			unsigned long int ti_offset;
    516  *		};
    517  *
    518  *	 as in, e.g., [ELFTLS] Sec. 3.4.3.  This coincides with the
    519  *	 type size_t[2] on all architectures that use this common
    520  *	 __tls_get_addr definition (XXX but why do we write it as
    521  *	 size_t[2]?).
    522  *
    523  *	 ti_module, i.e., arg[0], is the obj->tlsindex assigned at
    524  *	 load-time by _rtld_map_object, and ti_offset, i.e., arg[1], is
    525  *	 assigned at link-time by ld(1), possibly adjusted by
    526  *	 TLS_DTV_OFFSET.
    527  *
    528  *	 Some architectures -- specifically IA-64 -- use a different
    529  *	 calling convention.  Some architectures -- specifically i386
    530  *	 -- also use another entry point ___tls_get_addr (that's three
    531  *	 leading underscores) with a different calling convention.
    532  */
    533 void *
    534 __tls_get_addr(void *arg_)
    535 {
    536 	size_t *arg = (size_t *)arg_;
    537 	void **dtv;
    538 #ifdef __HAVE___LWP_GETTCB_FAST
    539 	struct tls_tcb * const tcb = __lwp_gettcb_fast();
    540 #else
    541 	struct tls_tcb * const tcb = __lwp_getprivate_fast();
    542 #endif
    543 	size_t idx = arg[0], offset = arg[1] + TLS_DTV_OFFSET;
    544 
    545 	dtv = tcb->tcb_dtv;
    546 
    547 	/*
    548 	 * Fast path: access to an already allocated DTV entry.  This
    549 	 * checks the current limit and the entry without needing any
    550 	 * locking.  Entries are only freed on dlclose() and it is an
    551 	 * application bug if code of the module is still running at
    552 	 * that point.
    553 	 */
    554 	if (__predict_true(idx <= DTV_MAX_INDEX(dtv) && dtv[idx] != NULL))
    555 		return (uint8_t *)dtv[idx] + offset;
    556 
    557 	return _rtld_tls_get_addr(tcb, idx, offset);
    558 }
    559 #endif
    560 
    561 #endif /* __HAVE_TLS_VARIANT_I || __HAVE_TLS_VARIANT_II */
    562