elf64-x86-64.c revision 1.5 1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2016 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh (at) suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf-nacl.h"
28 #include "bfd_stdint.h"
29 #include "objalloc.h"
30 #include "hashtab.h"
31 #include "dwarf2.h"
32 #include "libiberty.h"
33
34 #include "opcode/i386.h"
35 #include "elf/x86-64.h"
36
37 #ifdef CORE_HEADER
38 #include <stdarg.h>
39 #include CORE_HEADER
40 #endif
41
42 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
43 #define MINUS_ONE (~ (bfd_vma) 0)
44
45 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
46 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
47 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
48 since they are the same. */
49
50 #define ABI_64_P(abfd) \
51 (get_elf_backend_data (abfd)->s->elfclass == ELFCLASS64)
52
53 /* The relocation "howto" table. Order of fields:
54 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
55 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
56 static reloc_howto_type x86_64_elf_howto_table[] =
57 {
58 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
59 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
60 FALSE),
61 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
62 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
63 FALSE),
64 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
65 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
66 TRUE),
67 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
68 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
69 FALSE),
70 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
71 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
72 TRUE),
73 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
74 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
75 FALSE),
76 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
77 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
78 MINUS_ONE, FALSE),
79 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
80 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
81 MINUS_ONE, FALSE),
82 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
83 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
84 MINUS_ONE, FALSE),
85 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
86 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
87 0xffffffff, TRUE),
88 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
89 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
90 FALSE),
91 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
92 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
93 FALSE),
94 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
95 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
96 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
97 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
98 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
100 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
101 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
102 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
103 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
104 MINUS_ONE, FALSE),
105 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
106 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
107 MINUS_ONE, FALSE),
108 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
109 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
110 MINUS_ONE, FALSE),
111 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
112 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
113 0xffffffff, TRUE),
114 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
115 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
116 0xffffffff, TRUE),
117 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
118 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
119 0xffffffff, FALSE),
120 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
121 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
122 0xffffffff, TRUE),
123 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
124 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
125 0xffffffff, FALSE),
126 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
127 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
128 TRUE),
129 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
130 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
131 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
132 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
133 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
134 FALSE, 0xffffffff, 0xffffffff, TRUE),
135 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
136 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
137 FALSE),
138 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
139 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
140 MINUS_ONE, TRUE),
141 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
142 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
143 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
144 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
145 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
146 MINUS_ONE, FALSE),
147 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
148 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
149 MINUS_ONE, FALSE),
150 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
151 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
152 FALSE),
153 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
154 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
155 FALSE),
156 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
157 complain_overflow_bitfield, bfd_elf_generic_reloc,
158 "R_X86_64_GOTPC32_TLSDESC",
159 FALSE, 0xffffffff, 0xffffffff, TRUE),
160 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
161 complain_overflow_dont, bfd_elf_generic_reloc,
162 "R_X86_64_TLSDESC_CALL",
163 FALSE, 0, 0, FALSE),
164 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
165 complain_overflow_bitfield, bfd_elf_generic_reloc,
166 "R_X86_64_TLSDESC",
167 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
168 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
169 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
170 MINUS_ONE, FALSE),
171 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
172 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
173 MINUS_ONE, FALSE),
174 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
175 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
176 TRUE),
177 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
178 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
179 TRUE),
180 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
181 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
182 0xffffffff, TRUE),
183 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
184 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
185 0xffffffff, TRUE),
186
187 /* We have a gap in the reloc numbers here.
188 R_X86_64_standard counts the number up to this point, and
189 R_X86_64_vt_offset is the value to subtract from a reloc type of
190 R_X86_64_GNU_VT* to form an index into this table. */
191 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
192 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
193
194 /* GNU extension to record C++ vtable hierarchy. */
195 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
196 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
197
198 /* GNU extension to record C++ vtable member usage. */
199 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
200 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
201 FALSE),
202
203 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
204 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
205 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
206 FALSE)
207 };
208
209 #define IS_X86_64_PCREL_TYPE(TYPE) \
210 ( ((TYPE) == R_X86_64_PC8) \
211 || ((TYPE) == R_X86_64_PC16) \
212 || ((TYPE) == R_X86_64_PC32) \
213 || ((TYPE) == R_X86_64_PC32_BND) \
214 || ((TYPE) == R_X86_64_PC64))
215
216 /* Map BFD relocs to the x86_64 elf relocs. */
217 struct elf_reloc_map
218 {
219 bfd_reloc_code_real_type bfd_reloc_val;
220 unsigned char elf_reloc_val;
221 };
222
223 static const struct elf_reloc_map x86_64_reloc_map[] =
224 {
225 { BFD_RELOC_NONE, R_X86_64_NONE, },
226 { BFD_RELOC_64, R_X86_64_64, },
227 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
228 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
229 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
230 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
231 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
232 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
233 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
234 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
235 { BFD_RELOC_32, R_X86_64_32, },
236 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
237 { BFD_RELOC_16, R_X86_64_16, },
238 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
239 { BFD_RELOC_8, R_X86_64_8, },
240 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
241 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
242 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
243 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
244 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
245 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
246 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
247 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
248 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
249 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
250 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
251 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
252 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
253 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
254 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
255 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
256 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
257 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
258 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
259 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
260 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
261 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
262 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
263 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
264 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
265 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
266 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
267 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
268 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
269 };
270
271 static reloc_howto_type *
272 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
273 {
274 unsigned i;
275
276 if (r_type == (unsigned int) R_X86_64_32)
277 {
278 if (ABI_64_P (abfd))
279 i = r_type;
280 else
281 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
282 }
283 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
284 || r_type >= (unsigned int) R_X86_64_max)
285 {
286 if (r_type >= (unsigned int) R_X86_64_standard)
287 {
288 (*_bfd_error_handler) (_("%B: invalid relocation type %d"),
289 abfd, (int) r_type);
290 r_type = R_X86_64_NONE;
291 }
292 i = r_type;
293 }
294 else
295 i = r_type - (unsigned int) R_X86_64_vt_offset;
296 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
297 return &x86_64_elf_howto_table[i];
298 }
299
300 /* Given a BFD reloc type, return a HOWTO structure. */
301 static reloc_howto_type *
302 elf_x86_64_reloc_type_lookup (bfd *abfd,
303 bfd_reloc_code_real_type code)
304 {
305 unsigned int i;
306
307 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
308 i++)
309 {
310 if (x86_64_reloc_map[i].bfd_reloc_val == code)
311 return elf_x86_64_rtype_to_howto (abfd,
312 x86_64_reloc_map[i].elf_reloc_val);
313 }
314 return NULL;
315 }
316
317 static reloc_howto_type *
318 elf_x86_64_reloc_name_lookup (bfd *abfd,
319 const char *r_name)
320 {
321 unsigned int i;
322
323 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
324 {
325 /* Get x32 R_X86_64_32. */
326 reloc_howto_type *reloc
327 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
328 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
329 return reloc;
330 }
331
332 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
333 if (x86_64_elf_howto_table[i].name != NULL
334 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
335 return &x86_64_elf_howto_table[i];
336
337 return NULL;
338 }
339
340 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
341
342 static void
343 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
344 Elf_Internal_Rela *dst)
345 {
346 unsigned r_type;
347
348 r_type = ELF32_R_TYPE (dst->r_info);
349 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
350 BFD_ASSERT (r_type == cache_ptr->howto->type);
351 }
352
353 /* Support for core dump NOTE sections. */
355 static bfd_boolean
356 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
357 {
358 int offset;
359 size_t size;
360
361 switch (note->descsz)
362 {
363 default:
364 return FALSE;
365
366 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
367 /* pr_cursig */
368 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
369
370 /* pr_pid */
371 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
372
373 /* pr_reg */
374 offset = 72;
375 size = 216;
376
377 break;
378
379 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
380 /* pr_cursig */
381 elf_tdata (abfd)->core->signal
382 = bfd_get_16 (abfd, note->descdata + 12);
383
384 /* pr_pid */
385 elf_tdata (abfd)->core->lwpid
386 = bfd_get_32 (abfd, note->descdata + 32);
387
388 /* pr_reg */
389 offset = 112;
390 size = 216;
391
392 break;
393 }
394
395 /* Make a ".reg/999" section. */
396 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
397 size, note->descpos + offset);
398 }
399
400 static bfd_boolean
401 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
402 {
403 switch (note->descsz)
404 {
405 default:
406 return FALSE;
407
408 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
409 elf_tdata (abfd)->core->pid
410 = bfd_get_32 (abfd, note->descdata + 12);
411 elf_tdata (abfd)->core->program
412 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
413 elf_tdata (abfd)->core->command
414 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
415 break;
416
417 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
418 elf_tdata (abfd)->core->pid
419 = bfd_get_32 (abfd, note->descdata + 24);
420 elf_tdata (abfd)->core->program
421 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
422 elf_tdata (abfd)->core->command
423 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
424 }
425
426 /* Note that for some reason, a spurious space is tacked
427 onto the end of the args in some (at least one anyway)
428 implementations, so strip it off if it exists. */
429
430 {
431 char *command = elf_tdata (abfd)->core->command;
432 int n = strlen (command);
433
434 if (0 < n && command[n - 1] == ' ')
435 command[n - 1] = '\0';
436 }
437
438 return TRUE;
439 }
440
441 #ifdef CORE_HEADER
442 static char *
443 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
444 int note_type, ...)
445 {
446 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
447 va_list ap;
448 const char *fname, *psargs;
449 long pid;
450 int cursig;
451 const void *gregs;
452
453 switch (note_type)
454 {
455 default:
456 return NULL;
457
458 case NT_PRPSINFO:
459 va_start (ap, note_type);
460 fname = va_arg (ap, const char *);
461 psargs = va_arg (ap, const char *);
462 va_end (ap);
463
464 if (bed->s->elfclass == ELFCLASS32)
465 {
466 prpsinfo32_t data;
467 memset (&data, 0, sizeof (data));
468 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
469 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
470 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
471 &data, sizeof (data));
472 }
473 else
474 {
475 prpsinfo64_t data;
476 memset (&data, 0, sizeof (data));
477 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
478 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
479 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
480 &data, sizeof (data));
481 }
482 /* NOTREACHED */
483
484 case NT_PRSTATUS:
485 va_start (ap, note_type);
486 pid = va_arg (ap, long);
487 cursig = va_arg (ap, int);
488 gregs = va_arg (ap, const void *);
489 va_end (ap);
490
491 if (bed->s->elfclass == ELFCLASS32)
492 {
493 if (bed->elf_machine_code == EM_X86_64)
494 {
495 prstatusx32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
497 prstat.pr_pid = pid;
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
502 }
503 else
504 {
505 prstatus32_t prstat;
506 memset (&prstat, 0, sizeof (prstat));
507 prstat.pr_pid = pid;
508 prstat.pr_cursig = cursig;
509 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
510 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
511 &prstat, sizeof (prstat));
512 }
513 }
514 else
515 {
516 prstatus64_t prstat;
517 memset (&prstat, 0, sizeof (prstat));
518 prstat.pr_pid = pid;
519 prstat.pr_cursig = cursig;
520 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
521 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
522 &prstat, sizeof (prstat));
523 }
524 }
525 /* NOTREACHED */
526 }
527 #endif
528
529 /* Functions for the x86-64 ELF linker. */
531
532 /* The name of the dynamic interpreter. This is put in the .interp
533 section. */
534
535 #define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1"
536 #define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1"
537
538 /* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid
539 copying dynamic variables from a shared lib into an app's dynbss
540 section, and instead use a dynamic relocation to point into the
541 shared lib. */
542 #define ELIMINATE_COPY_RELOCS 1
543
544 /* The size in bytes of an entry in the global offset table. */
545
546 #define GOT_ENTRY_SIZE 8
547
548 /* The size in bytes of an entry in the procedure linkage table. */
549
550 #define PLT_ENTRY_SIZE 16
551
552 /* The first entry in a procedure linkage table looks like this. See the
553 SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */
554
555 static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] =
556 {
557 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
558 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
559 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
560 };
561
562 /* Subsequent entries in a procedure linkage table look like this. */
563
564 static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] =
565 {
566 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
567 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
568 0x68, /* pushq immediate */
569 0, 0, 0, 0, /* replaced with index into relocation table. */
570 0xe9, /* jmp relative */
571 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
572 };
573
574 /* The first entry in a procedure linkage table with BND relocations
575 like this. */
576
577 static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] =
578 {
579 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
580 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
581 0x0f, 0x1f, 0 /* nopl (%rax) */
582 };
583
584 /* Subsequent entries for legacy branches in a procedure linkage table
585 with BND relocations look like this. */
586
587 static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] =
588 {
589 0x68, 0, 0, 0, 0, /* pushq immediate */
590 0xe9, 0, 0, 0, 0, /* jmpq relative */
591 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */
592 };
593
594 /* Subsequent entries for branches with BND prefx in a procedure linkage
595 table with BND relocations look like this. */
596
597 static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] =
598 {
599 0x68, 0, 0, 0, 0, /* pushq immediate */
600 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
601 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
602 };
603
604 /* Entries for legacy branches in the second procedure linkage table
605 look like this. */
606
607 static const bfd_byte elf_x86_64_legacy_plt2_entry[8] =
608 {
609 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
610 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
611 0x66, 0x90 /* xchg %ax,%ax */
612 };
613
614 /* Entries for branches with BND prefix in the second procedure linkage
615 table look like this. */
616
617 static const bfd_byte elf_x86_64_bnd_plt2_entry[8] =
618 {
619 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
620 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
621 0x90 /* nop */
622 };
623
624 /* .eh_frame covering the .plt section. */
625
626 static const bfd_byte elf_x86_64_eh_frame_plt[] =
627 {
628 #define PLT_CIE_LENGTH 20
629 #define PLT_FDE_LENGTH 36
630 #define PLT_FDE_START_OFFSET 4 + PLT_CIE_LENGTH + 8
631 #define PLT_FDE_LEN_OFFSET 4 + PLT_CIE_LENGTH + 12
632 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
633 0, 0, 0, 0, /* CIE ID */
634 1, /* CIE version */
635 'z', 'R', 0, /* Augmentation string */
636 1, /* Code alignment factor */
637 0x78, /* Data alignment factor */
638 16, /* Return address column */
639 1, /* Augmentation size */
640 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
641 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
642 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
643 DW_CFA_nop, DW_CFA_nop,
644
645 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
646 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
647 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
648 0, 0, 0, 0, /* .plt size goes here */
649 0, /* Augmentation size */
650 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
651 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
652 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
653 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
654 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
655 11, /* Block length */
656 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
657 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
658 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
659 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
660 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
661 };
662
663 /* Architecture-specific backend data for x86-64. */
664
665 struct elf_x86_64_backend_data
666 {
667 /* Templates for the initial PLT entry and for subsequent entries. */
668 const bfd_byte *plt0_entry;
669 const bfd_byte *plt_entry;
670 unsigned int plt_entry_size; /* Size of each PLT entry. */
671
672 /* Offsets into plt0_entry that are to be replaced with GOT[1] and GOT[2]. */
673 unsigned int plt0_got1_offset;
674 unsigned int plt0_got2_offset;
675
676 /* Offset of the end of the PC-relative instruction containing
677 plt0_got2_offset. */
678 unsigned int plt0_got2_insn_end;
679
680 /* Offsets into plt_entry that are to be replaced with... */
681 unsigned int plt_got_offset; /* ... address of this symbol in .got. */
682 unsigned int plt_reloc_offset; /* ... offset into relocation table. */
683 unsigned int plt_plt_offset; /* ... offset to start of .plt. */
684
685 /* Length of the PC-relative instruction containing plt_got_offset. */
686 unsigned int plt_got_insn_size;
687
688 /* Offset of the end of the PC-relative jump to plt0_entry. */
689 unsigned int plt_plt_insn_end;
690
691 /* Offset into plt_entry where the initial value of the GOT entry points. */
692 unsigned int plt_lazy_offset;
693
694 /* .eh_frame covering the .plt section. */
695 const bfd_byte *eh_frame_plt;
696 unsigned int eh_frame_plt_size;
697 };
698
699 #define get_elf_x86_64_arch_data(bed) \
700 ((const struct elf_x86_64_backend_data *) (bed)->arch_data)
701
702 #define get_elf_x86_64_backend_data(abfd) \
703 get_elf_x86_64_arch_data (get_elf_backend_data (abfd))
704
705 #define GET_PLT_ENTRY_SIZE(abfd) \
706 get_elf_x86_64_backend_data (abfd)->plt_entry_size
707
708 /* These are the standard parameters. */
709 static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
710 {
711 elf_x86_64_plt0_entry, /* plt0_entry */
712 elf_x86_64_plt_entry, /* plt_entry */
713 sizeof (elf_x86_64_plt_entry), /* plt_entry_size */
714 2, /* plt0_got1_offset */
715 8, /* plt0_got2_offset */
716 12, /* plt0_got2_insn_end */
717 2, /* plt_got_offset */
718 7, /* plt_reloc_offset */
719 12, /* plt_plt_offset */
720 6, /* plt_got_insn_size */
721 PLT_ENTRY_SIZE, /* plt_plt_insn_end */
722 6, /* plt_lazy_offset */
723 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
724 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
725 };
726
727 static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed =
728 {
729 elf_x86_64_bnd_plt0_entry, /* plt0_entry */
730 elf_x86_64_bnd_plt_entry, /* plt_entry */
731 sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */
732 2, /* plt0_got1_offset */
733 1+8, /* plt0_got2_offset */
734 1+12, /* plt0_got2_insn_end */
735 1+2, /* plt_got_offset */
736 1, /* plt_reloc_offset */
737 7, /* plt_plt_offset */
738 1+6, /* plt_got_insn_size */
739 11, /* plt_plt_insn_end */
740 0, /* plt_lazy_offset */
741 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
742 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
743 };
744
745 #define elf_backend_arch_data &elf_x86_64_arch_bed
746
747 /* Is a undefined weak symbol which is resolved to 0. Reference to an
748 undefined weak symbol is resolved to 0 when building executable if
749 it isn't dynamic and
750 1. Has non-GOT/non-PLT relocations in text section. Or
751 2. Has no GOT/PLT relocation.
752 */
753 #define UNDEFINED_WEAK_RESOLVED_TO_ZERO(INFO, GOT_RELOC, EH) \
754 ((EH)->elf.root.type == bfd_link_hash_undefweak \
755 && bfd_link_executable (INFO) \
756 && (elf_x86_64_hash_table (INFO)->interp == NULL \
757 || !(GOT_RELOC) \
758 || (EH)->has_non_got_reloc \
759 || !(INFO)->dynamic_undefined_weak))
760
761 /* x86-64 ELF linker hash entry. */
762
763 struct elf_x86_64_link_hash_entry
764 {
765 struct elf_link_hash_entry elf;
766
767 /* Track dynamic relocs copied for this symbol. */
768 struct elf_dyn_relocs *dyn_relocs;
769
770 #define GOT_UNKNOWN 0
771 #define GOT_NORMAL 1
772 #define GOT_TLS_GD 2
773 #define GOT_TLS_IE 3
774 #define GOT_TLS_GDESC 4
775 #define GOT_TLS_GD_BOTH_P(type) \
776 ((type) == (GOT_TLS_GD | GOT_TLS_GDESC))
777 #define GOT_TLS_GD_P(type) \
778 ((type) == GOT_TLS_GD || GOT_TLS_GD_BOTH_P (type))
779 #define GOT_TLS_GDESC_P(type) \
780 ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type))
781 #define GOT_TLS_GD_ANY_P(type) \
782 (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type))
783 unsigned char tls_type;
784
785 /* TRUE if a weak symbol with a real definition needs a copy reloc.
786 When there is a weak symbol with a real definition, the processor
787 independent code will have arranged for us to see the real
788 definition first. We need to copy the needs_copy bit from the
789 real definition and check it when allowing copy reloc in PIE. */
790 unsigned int needs_copy : 1;
791
792 /* TRUE if symbol has at least one BND relocation. */
793 unsigned int has_bnd_reloc : 1;
794
795 /* TRUE if symbol has GOT or PLT relocations. */
796 unsigned int has_got_reloc : 1;
797
798 /* TRUE if symbol has non-GOT/non-PLT relocations in text sections. */
799 unsigned int has_non_got_reloc : 1;
800
801 /* 0: symbol isn't __tls_get_addr.
802 1: symbol is __tls_get_addr.
803 2: symbol is unknown. */
804 unsigned int tls_get_addr : 2;
805
806 /* Reference count of C/C++ function pointer relocations in read-write
807 section which can be resolved at run-time. */
808 bfd_signed_vma func_pointer_refcount;
809
810 /* Information about the GOT PLT entry. Filled when there are both
811 GOT and PLT relocations against the same function. */
812 union gotplt_union plt_got;
813
814 /* Information about the second PLT entry. Filled when has_bnd_reloc is
815 set. */
816 union gotplt_union plt_bnd;
817
818 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
819 starting at the end of the jump table. */
820 bfd_vma tlsdesc_got;
821 };
822
823 #define elf_x86_64_hash_entry(ent) \
824 ((struct elf_x86_64_link_hash_entry *)(ent))
825
826 struct elf_x86_64_obj_tdata
827 {
828 struct elf_obj_tdata root;
829
830 /* tls_type for each local got entry. */
831 char *local_got_tls_type;
832
833 /* GOTPLT entries for TLS descriptors. */
834 bfd_vma *local_tlsdesc_gotent;
835 };
836
837 #define elf_x86_64_tdata(abfd) \
838 ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any)
839
840 #define elf_x86_64_local_got_tls_type(abfd) \
841 (elf_x86_64_tdata (abfd)->local_got_tls_type)
842
843 #define elf_x86_64_local_tlsdesc_gotent(abfd) \
844 (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent)
845
846 #define is_x86_64_elf(bfd) \
847 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
848 && elf_tdata (bfd) != NULL \
849 && elf_object_id (bfd) == X86_64_ELF_DATA)
850
851 static bfd_boolean
852 elf_x86_64_mkobject (bfd *abfd)
853 {
854 return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata),
855 X86_64_ELF_DATA);
856 }
857
858 /* x86-64 ELF linker hash table. */
859
860 struct elf_x86_64_link_hash_table
861 {
862 struct elf_link_hash_table elf;
863
864 /* Short-cuts to get to dynamic linker sections. */
865 asection *interp;
866 asection *sdynbss;
867 asection *srelbss;
868 asection *plt_eh_frame;
869 asection *plt_bnd;
870 asection *plt_got;
871
872 union
873 {
874 bfd_signed_vma refcount;
875 bfd_vma offset;
876 } tls_ld_got;
877
878 /* The amount of space used by the jump slots in the GOT. */
879 bfd_vma sgotplt_jump_table_size;
880
881 /* Small local sym cache. */
882 struct sym_cache sym_cache;
883
884 bfd_vma (*r_info) (bfd_vma, bfd_vma);
885 bfd_vma (*r_sym) (bfd_vma);
886 unsigned int pointer_r_type;
887 const char *dynamic_interpreter;
888 int dynamic_interpreter_size;
889
890 /* _TLS_MODULE_BASE_ symbol. */
891 struct bfd_link_hash_entry *tls_module_base;
892
893 /* Used by local STT_GNU_IFUNC symbols. */
894 htab_t loc_hash_table;
895 void * loc_hash_memory;
896
897 /* The offset into splt of the PLT entry for the TLS descriptor
898 resolver. Special values are 0, if not necessary (or not found
899 to be necessary yet), and -1 if needed but not determined
900 yet. */
901 bfd_vma tlsdesc_plt;
902 /* The offset into sgot of the GOT entry used by the PLT entry
903 above. */
904 bfd_vma tlsdesc_got;
905
906 /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */
907 bfd_vma next_jump_slot_index;
908 /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */
909 bfd_vma next_irelative_index;
910
911 /* TRUE if there are dynamic relocs against IFUNC symbols that apply
912 to read-only sections. */
913 bfd_boolean readonly_dynrelocs_against_ifunc;
914 };
915
916 /* Get the x86-64 ELF linker hash table from a link_info structure. */
917
918 #define elf_x86_64_hash_table(p) \
919 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
920 == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL)
921
922 #define elf_x86_64_compute_jump_table_size(htab) \
923 ((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE)
924
925 /* Create an entry in an x86-64 ELF linker hash table. */
926
927 static struct bfd_hash_entry *
928 elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry,
929 struct bfd_hash_table *table,
930 const char *string)
931 {
932 /* Allocate the structure if it has not already been allocated by a
933 subclass. */
934 if (entry == NULL)
935 {
936 entry = (struct bfd_hash_entry *)
937 bfd_hash_allocate (table,
938 sizeof (struct elf_x86_64_link_hash_entry));
939 if (entry == NULL)
940 return entry;
941 }
942
943 /* Call the allocation method of the superclass. */
944 entry = _bfd_elf_link_hash_newfunc (entry, table, string);
945 if (entry != NULL)
946 {
947 struct elf_x86_64_link_hash_entry *eh;
948
949 eh = (struct elf_x86_64_link_hash_entry *) entry;
950 eh->dyn_relocs = NULL;
951 eh->tls_type = GOT_UNKNOWN;
952 eh->needs_copy = 0;
953 eh->has_bnd_reloc = 0;
954 eh->has_got_reloc = 0;
955 eh->has_non_got_reloc = 0;
956 eh->tls_get_addr = 2;
957 eh->func_pointer_refcount = 0;
958 eh->plt_bnd.offset = (bfd_vma) -1;
959 eh->plt_got.offset = (bfd_vma) -1;
960 eh->tlsdesc_got = (bfd_vma) -1;
961 }
962
963 return entry;
964 }
965
966 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
967 for local symbol so that we can handle local STT_GNU_IFUNC symbols
968 as global symbol. We reuse indx and dynstr_index for local symbol
969 hash since they aren't used by global symbols in this backend. */
970
971 static hashval_t
972 elf_x86_64_local_htab_hash (const void *ptr)
973 {
974 struct elf_link_hash_entry *h
975 = (struct elf_link_hash_entry *) ptr;
976 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
977 }
978
979 /* Compare local hash entries. */
980
981 static int
982 elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2)
983 {
984 struct elf_link_hash_entry *h1
985 = (struct elf_link_hash_entry *) ptr1;
986 struct elf_link_hash_entry *h2
987 = (struct elf_link_hash_entry *) ptr2;
988
989 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
990 }
991
992 /* Find and/or create a hash entry for local symbol. */
993
994 static struct elf_link_hash_entry *
995 elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab,
996 bfd *abfd, const Elf_Internal_Rela *rel,
997 bfd_boolean create)
998 {
999 struct elf_x86_64_link_hash_entry e, *ret;
1000 asection *sec = abfd->sections;
1001 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
1002 htab->r_sym (rel->r_info));
1003 void **slot;
1004
1005 e.elf.indx = sec->id;
1006 e.elf.dynstr_index = htab->r_sym (rel->r_info);
1007 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
1008 create ? INSERT : NO_INSERT);
1009
1010 if (!slot)
1011 return NULL;
1012
1013 if (*slot)
1014 {
1015 ret = (struct elf_x86_64_link_hash_entry *) *slot;
1016 return &ret->elf;
1017 }
1018
1019 ret = (struct elf_x86_64_link_hash_entry *)
1020 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
1021 sizeof (struct elf_x86_64_link_hash_entry));
1022 if (ret)
1023 {
1024 memset (ret, 0, sizeof (*ret));
1025 ret->elf.indx = sec->id;
1026 ret->elf.dynstr_index = htab->r_sym (rel->r_info);
1027 ret->elf.dynindx = -1;
1028 ret->func_pointer_refcount = 0;
1029 ret->plt_got.offset = (bfd_vma) -1;
1030 *slot = ret;
1031 }
1032 return &ret->elf;
1033 }
1034
1035 /* Destroy an X86-64 ELF linker hash table. */
1036
1037 static void
1038 elf_x86_64_link_hash_table_free (bfd *obfd)
1039 {
1040 struct elf_x86_64_link_hash_table *htab
1041 = (struct elf_x86_64_link_hash_table *) obfd->link.hash;
1042
1043 if (htab->loc_hash_table)
1044 htab_delete (htab->loc_hash_table);
1045 if (htab->loc_hash_memory)
1046 objalloc_free ((struct objalloc *) htab->loc_hash_memory);
1047 _bfd_elf_link_hash_table_free (obfd);
1048 }
1049
1050 /* Create an X86-64 ELF linker hash table. */
1051
1052 static struct bfd_link_hash_table *
1053 elf_x86_64_link_hash_table_create (bfd *abfd)
1054 {
1055 struct elf_x86_64_link_hash_table *ret;
1056 bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table);
1057
1058 ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt);
1059 if (ret == NULL)
1060 return NULL;
1061
1062 if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd,
1063 elf_x86_64_link_hash_newfunc,
1064 sizeof (struct elf_x86_64_link_hash_entry),
1065 X86_64_ELF_DATA))
1066 {
1067 free (ret);
1068 return NULL;
1069 }
1070
1071 if (ABI_64_P (abfd))
1072 {
1073 ret->r_info = elf64_r_info;
1074 ret->r_sym = elf64_r_sym;
1075 ret->pointer_r_type = R_X86_64_64;
1076 ret->dynamic_interpreter = ELF64_DYNAMIC_INTERPRETER;
1077 ret->dynamic_interpreter_size = sizeof ELF64_DYNAMIC_INTERPRETER;
1078 }
1079 else
1080 {
1081 ret->r_info = elf32_r_info;
1082 ret->r_sym = elf32_r_sym;
1083 ret->pointer_r_type = R_X86_64_32;
1084 ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER;
1085 ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER;
1086 }
1087
1088 ret->loc_hash_table = htab_try_create (1024,
1089 elf_x86_64_local_htab_hash,
1090 elf_x86_64_local_htab_eq,
1091 NULL);
1092 ret->loc_hash_memory = objalloc_create ();
1093 if (!ret->loc_hash_table || !ret->loc_hash_memory)
1094 {
1095 elf_x86_64_link_hash_table_free (abfd);
1096 return NULL;
1097 }
1098 ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free;
1099
1100 return &ret->elf.root;
1101 }
1102
1103 /* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
1104 .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
1105 hash table. */
1106
1107 static bfd_boolean
1108 elf_x86_64_create_dynamic_sections (bfd *dynobj,
1109 struct bfd_link_info *info)
1110 {
1111 struct elf_x86_64_link_hash_table *htab;
1112
1113 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
1114 return FALSE;
1115
1116 htab = elf_x86_64_hash_table (info);
1117 if (htab == NULL)
1118 return FALSE;
1119
1120 /* Set the contents of the .interp section to the interpreter. */
1121 if (bfd_link_executable (info) && !info->nointerp)
1122 {
1123 asection *s = bfd_get_linker_section (dynobj, ".interp");
1124 if (s == NULL)
1125 abort ();
1126 s->size = htab->dynamic_interpreter_size;
1127 s->contents = (unsigned char *) htab->dynamic_interpreter;
1128 htab->interp = s;
1129 }
1130
1131 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
1132 if (!htab->sdynbss)
1133 abort ();
1134
1135 if (bfd_link_executable (info))
1136 {
1137 /* Always allow copy relocs for building executables. */
1138 asection *s = bfd_get_linker_section (dynobj, ".rela.bss");
1139 if (s == NULL)
1140 {
1141 const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
1142 s = bfd_make_section_anyway_with_flags (dynobj,
1143 ".rela.bss",
1144 (bed->dynamic_sec_flags
1145 | SEC_READONLY));
1146 if (s == NULL
1147 || ! bfd_set_section_alignment (dynobj, s,
1148 bed->s->log_file_align))
1149 return FALSE;
1150 }
1151 htab->srelbss = s;
1152 }
1153
1154 if (!info->no_ld_generated_unwind_info
1155 && htab->plt_eh_frame == NULL
1156 && htab->elf.splt != NULL)
1157 {
1158 flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
1159 | SEC_HAS_CONTENTS | SEC_IN_MEMORY
1160 | SEC_LINKER_CREATED);
1161 htab->plt_eh_frame
1162 = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags);
1163 if (htab->plt_eh_frame == NULL
1164 || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3))
1165 return FALSE;
1166 }
1167 return TRUE;
1168 }
1169
1170 /* Copy the extra info we tack onto an elf_link_hash_entry. */
1171
1172 static void
1173 elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info,
1174 struct elf_link_hash_entry *dir,
1175 struct elf_link_hash_entry *ind)
1176 {
1177 struct elf_x86_64_link_hash_entry *edir, *eind;
1178
1179 edir = (struct elf_x86_64_link_hash_entry *) dir;
1180 eind = (struct elf_x86_64_link_hash_entry *) ind;
1181
1182 if (!edir->has_bnd_reloc)
1183 edir->has_bnd_reloc = eind->has_bnd_reloc;
1184
1185 if (!edir->has_got_reloc)
1186 edir->has_got_reloc = eind->has_got_reloc;
1187
1188 if (!edir->has_non_got_reloc)
1189 edir->has_non_got_reloc = eind->has_non_got_reloc;
1190
1191 if (eind->dyn_relocs != NULL)
1192 {
1193 if (edir->dyn_relocs != NULL)
1194 {
1195 struct elf_dyn_relocs **pp;
1196 struct elf_dyn_relocs *p;
1197
1198 /* Add reloc counts against the indirect sym to the direct sym
1199 list. Merge any entries against the same section. */
1200 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
1201 {
1202 struct elf_dyn_relocs *q;
1203
1204 for (q = edir->dyn_relocs; q != NULL; q = q->next)
1205 if (q->sec == p->sec)
1206 {
1207 q->pc_count += p->pc_count;
1208 q->count += p->count;
1209 *pp = p->next;
1210 break;
1211 }
1212 if (q == NULL)
1213 pp = &p->next;
1214 }
1215 *pp = edir->dyn_relocs;
1216 }
1217
1218 edir->dyn_relocs = eind->dyn_relocs;
1219 eind->dyn_relocs = NULL;
1220 }
1221
1222 if (ind->root.type == bfd_link_hash_indirect
1223 && dir->got.refcount <= 0)
1224 {
1225 edir->tls_type = eind->tls_type;
1226 eind->tls_type = GOT_UNKNOWN;
1227 }
1228
1229 if (ELIMINATE_COPY_RELOCS
1230 && ind->root.type != bfd_link_hash_indirect
1231 && dir->dynamic_adjusted)
1232 {
1233 /* If called to transfer flags for a weakdef during processing
1234 of elf_adjust_dynamic_symbol, don't copy non_got_ref.
1235 We clear it ourselves for ELIMINATE_COPY_RELOCS. */
1236 dir->ref_dynamic |= ind->ref_dynamic;
1237 dir->ref_regular |= ind->ref_regular;
1238 dir->ref_regular_nonweak |= ind->ref_regular_nonweak;
1239 dir->needs_plt |= ind->needs_plt;
1240 dir->pointer_equality_needed |= ind->pointer_equality_needed;
1241 }
1242 else
1243 {
1244 if (eind->func_pointer_refcount > 0)
1245 {
1246 edir->func_pointer_refcount += eind->func_pointer_refcount;
1247 eind->func_pointer_refcount = 0;
1248 }
1249
1250 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
1251 }
1252 }
1253
1254 static bfd_boolean
1255 elf64_x86_64_elf_object_p (bfd *abfd)
1256 {
1257 /* Set the right machine number for an x86-64 elf64 file. */
1258 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1259 return TRUE;
1260 }
1261
1262 static bfd_boolean
1263 elf32_x86_64_elf_object_p (bfd *abfd)
1264 {
1265 /* Set the right machine number for an x86-64 elf32 file. */
1266 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1267 return TRUE;
1268 }
1269
1270 /* Return TRUE if the TLS access code sequence support transition
1271 from R_TYPE. */
1272
1273 static bfd_boolean
1274 elf_x86_64_check_tls_transition (bfd *abfd,
1275 struct bfd_link_info *info,
1276 asection *sec,
1277 bfd_byte *contents,
1278 Elf_Internal_Shdr *symtab_hdr,
1279 struct elf_link_hash_entry **sym_hashes,
1280 unsigned int r_type,
1281 const Elf_Internal_Rela *rel,
1282 const Elf_Internal_Rela *relend)
1283 {
1284 unsigned int val;
1285 unsigned long r_symndx;
1286 bfd_boolean largepic = FALSE;
1287 struct elf_link_hash_entry *h;
1288 bfd_vma offset;
1289 struct elf_x86_64_link_hash_table *htab;
1290 bfd_byte *call;
1291 bfd_boolean indirect_call, tls_get_addr;
1292
1293 htab = elf_x86_64_hash_table (info);
1294 offset = rel->r_offset;
1295 switch (r_type)
1296 {
1297 case R_X86_64_TLSGD:
1298 case R_X86_64_TLSLD:
1299 if ((rel + 1) >= relend)
1300 return FALSE;
1301
1302 if (r_type == R_X86_64_TLSGD)
1303 {
1304 /* Check transition from GD access model. For 64bit, only
1305 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1306 .word 0x6666; rex64; call __tls_get_addr@PLT
1307 or
1308 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1309 .byte 0x66; rex64
1310 call *__tls_get_addr@GOTPCREL(%rip)
1311 which may be converted to
1312 addr32 call __tls_get_addr
1313 can transit to different access model. For 32bit, only
1314 leaq foo@tlsgd(%rip), %rdi
1315 .word 0x6666; rex64; call __tls_get_addr@PLT
1316 or
1317 leaq foo@tlsgd(%rip), %rdi
1318 .byte 0x66; rex64
1319 call *__tls_get_addr@GOTPCREL(%rip)
1320 which may be converted to
1321 addr32 call __tls_get_addr
1322 can transit to different access model. For largepic,
1323 we also support:
1324 leaq foo@tlsgd(%rip), %rdi
1325 movabsq $__tls_get_addr@pltoff, %rax
1326 addq $r15, %rax
1327 call *%rax
1328 or
1329 leaq foo@tlsgd(%rip), %rdi
1330 movabsq $__tls_get_addr@pltoff, %rax
1331 addq $rbx, %rax
1332 call *%rax */
1333
1334 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1335
1336 if ((offset + 12) > sec->size)
1337 return FALSE;
1338
1339 call = contents + offset + 4;
1340 if (call[0] != 0x66
1341 || !((call[1] == 0x48
1342 && call[2] == 0xff
1343 && call[3] == 0x15)
1344 || (call[1] == 0x48
1345 && call[2] == 0x67
1346 && call[3] == 0xe8)
1347 || (call[1] == 0x66
1348 && call[2] == 0x48
1349 && call[3] == 0xe8)))
1350 {
1351 if (!ABI_64_P (abfd)
1352 || (offset + 19) > sec->size
1353 || offset < 3
1354 || memcmp (call - 7, leaq + 1, 3) != 0
1355 || memcmp (call, "\x48\xb8", 2) != 0
1356 || call[11] != 0x01
1357 || call[13] != 0xff
1358 || call[14] != 0xd0
1359 || !((call[10] == 0x48 && call[12] == 0xd8)
1360 || (call[10] == 0x4c && call[12] == 0xf8)))
1361 return FALSE;
1362 largepic = TRUE;
1363 }
1364 else if (ABI_64_P (abfd))
1365 {
1366 if (offset < 4
1367 || memcmp (contents + offset - 4, leaq, 4) != 0)
1368 return FALSE;
1369 }
1370 else
1371 {
1372 if (offset < 3
1373 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1374 return FALSE;
1375 }
1376 indirect_call = call[2] == 0xff;
1377 }
1378 else
1379 {
1380 /* Check transition from LD access model. Only
1381 leaq foo@tlsld(%rip), %rdi;
1382 call __tls_get_addr@PLT
1383 or
1384 leaq foo@tlsld(%rip), %rdi;
1385 call *__tls_get_addr@GOTPCREL(%rip)
1386 which may be converted to
1387 addr32 call __tls_get_addr
1388 can transit to different access model. For largepic
1389 we also support:
1390 leaq foo@tlsld(%rip), %rdi
1391 movabsq $__tls_get_addr@pltoff, %rax
1392 addq $r15, %rax
1393 call *%rax
1394 or
1395 leaq foo@tlsld(%rip), %rdi
1396 movabsq $__tls_get_addr@pltoff, %rax
1397 addq $rbx, %rax
1398 call *%rax */
1399
1400 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1401
1402 if (offset < 3 || (offset + 9) > sec->size)
1403 return FALSE;
1404
1405 if (memcmp (contents + offset - 3, lea, 3) != 0)
1406 return FALSE;
1407
1408 call = contents + offset + 4;
1409 if (!(call[0] == 0xe8
1410 || (call[0] == 0xff && call[1] == 0x15)
1411 || (call[0] == 0x67 && call[1] == 0xe8)))
1412 {
1413 if (!ABI_64_P (abfd)
1414 || (offset + 19) > sec->size
1415 || memcmp (call, "\x48\xb8", 2) != 0
1416 || call[11] != 0x01
1417 || call[13] != 0xff
1418 || call[14] != 0xd0
1419 || !((call[10] == 0x48 && call[12] == 0xd8)
1420 || (call[10] == 0x4c && call[12] == 0xf8)))
1421 return FALSE;
1422 largepic = TRUE;
1423 }
1424 indirect_call = call[0] == 0xff;
1425 }
1426
1427 r_symndx = htab->r_sym (rel[1].r_info);
1428 if (r_symndx < symtab_hdr->sh_info)
1429 return FALSE;
1430
1431 tls_get_addr = FALSE;
1432 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1433 if (h != NULL && h->root.root.string != NULL)
1434 {
1435 struct elf_x86_64_link_hash_entry *eh
1436 = (struct elf_x86_64_link_hash_entry *) h;
1437 tls_get_addr = eh->tls_get_addr == 1;
1438 if (eh->tls_get_addr > 1)
1439 {
1440 /* Use strncmp to check __tls_get_addr since
1441 __tls_get_addr may be versioned. */
1442 if (strncmp (h->root.root.string, "__tls_get_addr", 14)
1443 == 0)
1444 {
1445 eh->tls_get_addr = 1;
1446 tls_get_addr = TRUE;
1447 }
1448 else
1449 eh->tls_get_addr = 0;
1450 }
1451 }
1452
1453 if (!tls_get_addr)
1454 return FALSE;
1455 else if (largepic)
1456 return ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64;
1457 else if (indirect_call)
1458 return ELF32_R_TYPE (rel[1].r_info) == R_X86_64_GOTPCRELX;
1459 else
1460 return (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32
1461 || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32);
1462
1463 case R_X86_64_GOTTPOFF:
1464 /* Check transition from IE access model:
1465 mov foo@gottpoff(%rip), %reg
1466 add foo@gottpoff(%rip), %reg
1467 */
1468
1469 /* Check REX prefix first. */
1470 if (offset >= 3 && (offset + 4) <= sec->size)
1471 {
1472 val = bfd_get_8 (abfd, contents + offset - 3);
1473 if (val != 0x48 && val != 0x4c)
1474 {
1475 /* X32 may have 0x44 REX prefix or no REX prefix. */
1476 if (ABI_64_P (abfd))
1477 return FALSE;
1478 }
1479 }
1480 else
1481 {
1482 /* X32 may not have any REX prefix. */
1483 if (ABI_64_P (abfd))
1484 return FALSE;
1485 if (offset < 2 || (offset + 3) > sec->size)
1486 return FALSE;
1487 }
1488
1489 val = bfd_get_8 (abfd, contents + offset - 2);
1490 if (val != 0x8b && val != 0x03)
1491 return FALSE;
1492
1493 val = bfd_get_8 (abfd, contents + offset - 1);
1494 return (val & 0xc7) == 5;
1495
1496 case R_X86_64_GOTPC32_TLSDESC:
1497 /* Check transition from GDesc access model:
1498 leaq x@tlsdesc(%rip), %rax
1499
1500 Make sure it's a leaq adding rip to a 32-bit offset
1501 into any register, although it's probably almost always
1502 going to be rax. */
1503
1504 if (offset < 3 || (offset + 4) > sec->size)
1505 return FALSE;
1506
1507 val = bfd_get_8 (abfd, contents + offset - 3);
1508 if ((val & 0xfb) != 0x48)
1509 return FALSE;
1510
1511 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1512 return FALSE;
1513
1514 val = bfd_get_8 (abfd, contents + offset - 1);
1515 return (val & 0xc7) == 0x05;
1516
1517 case R_X86_64_TLSDESC_CALL:
1518 /* Check transition from GDesc access model:
1519 call *x@tlsdesc(%rax)
1520 */
1521 if (offset + 2 <= sec->size)
1522 {
1523 /* Make sure that it's a call *x@tlsdesc(%rax). */
1524 call = contents + offset;
1525 return call[0] == 0xff && call[1] == 0x10;
1526 }
1527
1528 return FALSE;
1529
1530 default:
1531 abort ();
1532 }
1533 }
1534
1535 /* Return TRUE if the TLS access transition is OK or no transition
1536 will be performed. Update R_TYPE if there is a transition. */
1537
1538 static bfd_boolean
1539 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1540 asection *sec, bfd_byte *contents,
1541 Elf_Internal_Shdr *symtab_hdr,
1542 struct elf_link_hash_entry **sym_hashes,
1543 unsigned int *r_type, int tls_type,
1544 const Elf_Internal_Rela *rel,
1545 const Elf_Internal_Rela *relend,
1546 struct elf_link_hash_entry *h,
1547 unsigned long r_symndx,
1548 bfd_boolean from_relocate_section)
1549 {
1550 unsigned int from_type = *r_type;
1551 unsigned int to_type = from_type;
1552 bfd_boolean check = TRUE;
1553
1554 /* Skip TLS transition for functions. */
1555 if (h != NULL
1556 && (h->type == STT_FUNC
1557 || h->type == STT_GNU_IFUNC))
1558 return TRUE;
1559
1560 switch (from_type)
1561 {
1562 case R_X86_64_TLSGD:
1563 case R_X86_64_GOTPC32_TLSDESC:
1564 case R_X86_64_TLSDESC_CALL:
1565 case R_X86_64_GOTTPOFF:
1566 if (bfd_link_executable (info))
1567 {
1568 if (h == NULL)
1569 to_type = R_X86_64_TPOFF32;
1570 else
1571 to_type = R_X86_64_GOTTPOFF;
1572 }
1573
1574 /* When we are called from elf_x86_64_relocate_section, there may
1575 be additional transitions based on TLS_TYPE. */
1576 if (from_relocate_section)
1577 {
1578 unsigned int new_to_type = to_type;
1579
1580 if (bfd_link_executable (info)
1581 && h != NULL
1582 && h->dynindx == -1
1583 && tls_type == GOT_TLS_IE)
1584 new_to_type = R_X86_64_TPOFF32;
1585
1586 if (to_type == R_X86_64_TLSGD
1587 || to_type == R_X86_64_GOTPC32_TLSDESC
1588 || to_type == R_X86_64_TLSDESC_CALL)
1589 {
1590 if (tls_type == GOT_TLS_IE)
1591 new_to_type = R_X86_64_GOTTPOFF;
1592 }
1593
1594 /* We checked the transition before when we were called from
1595 elf_x86_64_check_relocs. We only want to check the new
1596 transition which hasn't been checked before. */
1597 check = new_to_type != to_type && from_type == to_type;
1598 to_type = new_to_type;
1599 }
1600
1601 break;
1602
1603 case R_X86_64_TLSLD:
1604 if (bfd_link_executable (info))
1605 to_type = R_X86_64_TPOFF32;
1606 break;
1607
1608 default:
1609 return TRUE;
1610 }
1611
1612 /* Return TRUE if there is no transition. */
1613 if (from_type == to_type)
1614 return TRUE;
1615
1616 /* Check if the transition can be performed. */
1617 if (check
1618 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1619 symtab_hdr, sym_hashes,
1620 from_type, rel, relend))
1621 {
1622 reloc_howto_type *from, *to;
1623 const char *name;
1624
1625 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1626 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1627
1628 if (h)
1629 name = h->root.root.string;
1630 else
1631 {
1632 struct elf_x86_64_link_hash_table *htab;
1633
1634 htab = elf_x86_64_hash_table (info);
1635 if (htab == NULL)
1636 name = "*unknown*";
1637 else
1638 {
1639 Elf_Internal_Sym *isym;
1640
1641 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1642 abfd, r_symndx);
1643 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1644 }
1645 }
1646
1647 (*_bfd_error_handler)
1648 (_("%B: TLS transition from %s to %s against `%s' at 0x%lx "
1649 "in section `%A' failed"),
1650 abfd, sec, from->name, to->name, name,
1651 (unsigned long) rel->r_offset);
1652 bfd_set_error (bfd_error_bad_value);
1653 return FALSE;
1654 }
1655
1656 *r_type = to_type;
1657 return TRUE;
1658 }
1659
1660 /* Rename some of the generic section flags to better document how they
1661 are used here. */
1662 #define need_convert_load sec_flg0
1663 #define check_relocs_failed sec_flg1
1664
1665 static bfd_boolean
1666 elf_x86_64_need_pic (bfd *input_bfd, asection *sec,
1667 struct elf_link_hash_entry *h,
1668 Elf_Internal_Shdr *symtab_hdr,
1669 Elf_Internal_Sym *isym,
1670 reloc_howto_type *howto)
1671 {
1672 const char *v = "";
1673 const char *und = "";
1674 const char *pic = "";
1675
1676 const char *name;
1677 if (h)
1678 {
1679 name = h->root.root.string;
1680 switch (ELF_ST_VISIBILITY (h->other))
1681 {
1682 case STV_HIDDEN:
1683 v = _("hidden symbol ");
1684 break;
1685 case STV_INTERNAL:
1686 v = _("internal symbol ");
1687 break;
1688 case STV_PROTECTED:
1689 v = _("protected symbol ");
1690 break;
1691 default:
1692 v = _("symbol ");
1693 pic = _("; recompile with -fPIC");
1694 break;
1695 }
1696
1697 if (!h->def_regular && !h->def_dynamic)
1698 und = _("undefined ");
1699 }
1700 else
1701 {
1702 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1703 pic = _("; recompile with -fPIC");
1704 }
1705
1706 (*_bfd_error_handler) (_("%B: relocation %s against %s%s`%s' can "
1707 "not be used when making a shared object%s"),
1708 input_bfd, howto->name, und, v, name, pic);
1709 bfd_set_error (bfd_error_bad_value);
1710 sec->check_relocs_failed = 1;
1711 return FALSE;
1712 }
1713
1714 /* With the local symbol, foo, we convert
1715 mov foo@GOTPCREL(%rip), %reg
1716 to
1717 lea foo(%rip), %reg
1718 and convert
1719 call/jmp *foo@GOTPCREL(%rip)
1720 to
1721 nop call foo/jmp foo nop
1722 When PIC is false, convert
1723 test %reg, foo@GOTPCREL(%rip)
1724 to
1725 test $foo, %reg
1726 and convert
1727 binop foo@GOTPCREL(%rip), %reg
1728 to
1729 binop $foo, %reg
1730 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1731 instructions. */
1732
1733 static bfd_boolean
1734 elf_x86_64_convert_load_reloc (bfd *abfd, asection *sec,
1735 bfd_byte *contents,
1736 Elf_Internal_Rela *irel,
1737 struct elf_link_hash_entry *h,
1738 bfd_boolean *converted,
1739 struct bfd_link_info *link_info)
1740 {
1741 struct elf_x86_64_link_hash_table *htab;
1742 bfd_boolean is_pic;
1743 bfd_boolean require_reloc_pc32;
1744 bfd_boolean relocx;
1745 bfd_boolean to_reloc_pc32;
1746 asection *tsec;
1747 char symtype;
1748 bfd_signed_vma raddend;
1749 unsigned int opcode;
1750 unsigned int modrm;
1751 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
1752 unsigned int r_symndx;
1753 bfd_vma toff;
1754 bfd_vma roff = irel->r_offset;
1755
1756 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1757 return TRUE;
1758
1759 raddend = irel->r_addend;
1760 /* Addend for 32-bit PC-relative relocation must be -4. */
1761 if (raddend != -4)
1762 return TRUE;
1763
1764 htab = elf_x86_64_hash_table (link_info);
1765 is_pic = bfd_link_pic (link_info);
1766
1767 relocx = (r_type == R_X86_64_GOTPCRELX
1768 || r_type == R_X86_64_REX_GOTPCRELX);
1769
1770 /* TRUE if we can convert only to R_X86_64_PC32. Enable it for
1771 --no-relax. */
1772 require_reloc_pc32
1773 = link_info->disable_target_specific_optimizations > 1;
1774
1775 r_symndx = htab->r_sym (irel->r_info);
1776
1777 opcode = bfd_get_8 (abfd, contents + roff - 2);
1778
1779 /* Convert mov to lea since it has been done for a while. */
1780 if (opcode != 0x8b)
1781 {
1782 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1783 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1784 test, xor instructions. */
1785 if (!relocx)
1786 return TRUE;
1787 }
1788
1789 /* We convert only to R_X86_64_PC32:
1790 1. Branch.
1791 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1792 3. require_reloc_pc32 is true.
1793 4. PIC.
1794 */
1795 to_reloc_pc32 = (opcode == 0xff
1796 || !relocx
1797 || require_reloc_pc32
1798 || is_pic);
1799
1800 /* Get the symbol referred to by the reloc. */
1801 if (h == NULL)
1802 {
1803 Elf_Internal_Sym *isym
1804 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1805
1806 /* Skip relocation against undefined symbols. */
1807 if (isym->st_shndx == SHN_UNDEF)
1808 return TRUE;
1809
1810 symtype = ELF_ST_TYPE (isym->st_info);
1811
1812 if (isym->st_shndx == SHN_ABS)
1813 tsec = bfd_abs_section_ptr;
1814 else if (isym->st_shndx == SHN_COMMON)
1815 tsec = bfd_com_section_ptr;
1816 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1817 tsec = &_bfd_elf_large_com_section;
1818 else
1819 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1820
1821 toff = isym->st_value;
1822 }
1823 else
1824 {
1825 /* Undefined weak symbol is only bound locally in executable
1826 and its reference is resolved as 0 without relocation
1827 overflow. We can only perform this optimization for
1828 GOTPCRELX relocations since we need to modify REX byte.
1829 It is OK convert mov with R_X86_64_GOTPCREL to
1830 R_X86_64_PC32. */
1831 if ((relocx || opcode == 0x8b)
1832 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (link_info,
1833 TRUE,
1834 elf_x86_64_hash_entry (h)))
1835 {
1836 if (opcode == 0xff)
1837 {
1838 /* Skip for branch instructions since R_X86_64_PC32
1839 may overflow. */
1840 if (require_reloc_pc32)
1841 return TRUE;
1842 }
1843 else if (relocx)
1844 {
1845 /* For non-branch instructions, we can convert to
1846 R_X86_64_32/R_X86_64_32S since we know if there
1847 is a REX byte. */
1848 to_reloc_pc32 = FALSE;
1849 }
1850
1851 /* Since we don't know the current PC when PIC is true,
1852 we can't convert to R_X86_64_PC32. */
1853 if (to_reloc_pc32 && is_pic)
1854 return TRUE;
1855
1856 goto convert;
1857 }
1858 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1859 ld.so may use its link-time address. */
1860 else if ((h->def_regular
1861 || h->root.type == bfd_link_hash_defined
1862 || h->root.type == bfd_link_hash_defweak)
1863 && h != htab->elf.hdynamic
1864 && SYMBOL_REFERENCES_LOCAL (link_info, h))
1865 {
1866 /* bfd_link_hash_new or bfd_link_hash_undefined is
1867 set by an assignment in a linker script in
1868 bfd_elf_record_link_assignment. */
1869 if (h->def_regular
1870 && (h->root.type == bfd_link_hash_new
1871 || h->root.type == bfd_link_hash_undefined))
1872 {
1873 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1874 if (require_reloc_pc32)
1875 return TRUE;
1876 goto convert;
1877 }
1878 tsec = h->root.u.def.section;
1879 toff = h->root.u.def.value;
1880 symtype = h->type;
1881 }
1882 else
1883 return TRUE;
1884 }
1885
1886 /* Don't convert GOTPCREL relocation against large section. */
1887 if (elf_section_data (tsec) != NULL
1888 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1889 return TRUE;
1890
1891 /* We can only estimate relocation overflow for R_X86_64_PC32. */
1892 if (!to_reloc_pc32)
1893 goto convert;
1894
1895 if (tsec->sec_info_type == SEC_INFO_TYPE_MERGE)
1896 {
1897 /* At this stage in linking, no SEC_MERGE symbol has been
1898 adjusted, so all references to such symbols need to be
1899 passed through _bfd_merged_section_offset. (Later, in
1900 relocate_section, all SEC_MERGE symbols *except* for
1901 section symbols have been adjusted.)
1902
1903 gas may reduce relocations against symbols in SEC_MERGE
1904 sections to a relocation against the section symbol when
1905 the original addend was zero. When the reloc is against
1906 a section symbol we should include the addend in the
1907 offset passed to _bfd_merged_section_offset, since the
1908 location of interest is the original symbol. On the
1909 other hand, an access to "sym+addend" where "sym" is not
1910 a section symbol should not include the addend; Such an
1911 access is presumed to be an offset from "sym"; The
1912 location of interest is just "sym". */
1913 if (symtype == STT_SECTION)
1914 toff += raddend;
1915
1916 toff = _bfd_merged_section_offset (abfd, &tsec,
1917 elf_section_data (tsec)->sec_info,
1918 toff);
1919
1920 if (symtype != STT_SECTION)
1921 toff += raddend;
1922 }
1923 else
1924 toff += raddend;
1925
1926 /* Don't convert if R_X86_64_PC32 relocation overflows. */
1927 if (tsec->output_section == sec->output_section)
1928 {
1929 if ((toff - roff + 0x80000000) > 0xffffffff)
1930 return TRUE;
1931 }
1932 else
1933 {
1934 bfd_signed_vma distance;
1935
1936 /* At this point, we don't know the load addresses of TSEC
1937 section nor SEC section. We estimate the distrance between
1938 SEC and TSEC. We store the estimated distances in the
1939 compressed_size field of the output section, which is only
1940 used to decompress the compressed input section. */
1941 if (sec->output_section->compressed_size == 0)
1942 {
1943 asection *asect;
1944 bfd_size_type size = 0;
1945 for (asect = link_info->output_bfd->sections;
1946 asect != NULL;
1947 asect = asect->next)
1948 /* Skip debug sections since compressed_size is used to
1949 compress debug sections. */
1950 if ((asect->flags & SEC_DEBUGGING) == 0)
1951 {
1952 asection *i;
1953 for (i = asect->map_head.s;
1954 i != NULL;
1955 i = i->map_head.s)
1956 {
1957 size = align_power (size, i->alignment_power);
1958 size += i->size;
1959 }
1960 asect->compressed_size = size;
1961 }
1962 }
1963
1964 /* Don't convert GOTPCREL relocations if TSEC isn't placed
1965 after SEC. */
1966 distance = (tsec->output_section->compressed_size
1967 - sec->output_section->compressed_size);
1968 if (distance < 0)
1969 return TRUE;
1970
1971 /* Take PT_GNU_RELRO segment into account by adding
1972 maxpagesize. */
1973 if ((toff + distance + get_elf_backend_data (abfd)->maxpagesize
1974 - roff + 0x80000000) > 0xffffffff)
1975 return TRUE;
1976 }
1977
1978 convert:
1979 if (opcode == 0xff)
1980 {
1981 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1982 unsigned int nop;
1983 unsigned int disp;
1984 bfd_vma nop_offset;
1985
1986 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1987 R_X86_64_PC32. */
1988 modrm = bfd_get_8 (abfd, contents + roff - 1);
1989 if (modrm == 0x25)
1990 {
1991 /* Convert to "jmp foo nop". */
1992 modrm = 0xe9;
1993 nop = NOP_OPCODE;
1994 nop_offset = irel->r_offset + 3;
1995 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1996 irel->r_offset -= 1;
1997 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1998 }
1999 else
2000 {
2001 struct elf_x86_64_link_hash_entry *eh
2002 = (struct elf_x86_64_link_hash_entry *) h;
2003
2004 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
2005 is a nop prefix. */
2006 modrm = 0xe8;
2007 /* To support TLS optimization, always use addr32 prefix for
2008 "call *__tls_get_addr@GOTPCREL(%rip)". */
2009 if (eh && eh->tls_get_addr == 1)
2010 {
2011 nop = 0x67;
2012 nop_offset = irel->r_offset - 2;
2013 }
2014 else
2015 {
2016 nop = link_info->call_nop_byte;
2017 if (link_info->call_nop_as_suffix)
2018 {
2019 nop_offset = irel->r_offset + 3;
2020 disp = bfd_get_32 (abfd, contents + irel->r_offset);
2021 irel->r_offset -= 1;
2022 bfd_put_32 (abfd, disp, contents + irel->r_offset);
2023 }
2024 else
2025 nop_offset = irel->r_offset - 2;
2026 }
2027 }
2028 bfd_put_8 (abfd, nop, contents + nop_offset);
2029 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
2030 r_type = R_X86_64_PC32;
2031 }
2032 else
2033 {
2034 unsigned int rex;
2035 unsigned int rex_mask = REX_R;
2036
2037 if (r_type == R_X86_64_REX_GOTPCRELX)
2038 rex = bfd_get_8 (abfd, contents + roff - 3);
2039 else
2040 rex = 0;
2041
2042 if (opcode == 0x8b)
2043 {
2044 if (to_reloc_pc32)
2045 {
2046 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
2047 "lea foo(%rip), %reg". */
2048 opcode = 0x8d;
2049 r_type = R_X86_64_PC32;
2050 }
2051 else
2052 {
2053 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
2054 "mov $foo, %reg". */
2055 opcode = 0xc7;
2056 modrm = bfd_get_8 (abfd, contents + roff - 1);
2057 modrm = 0xc0 | (modrm & 0x38) >> 3;
2058 if ((rex & REX_W) != 0
2059 && ABI_64_P (link_info->output_bfd))
2060 {
2061 /* Keep the REX_W bit in REX byte for LP64. */
2062 r_type = R_X86_64_32S;
2063 goto rewrite_modrm_rex;
2064 }
2065 else
2066 {
2067 /* If the REX_W bit in REX byte isn't needed,
2068 use R_X86_64_32 and clear the W bit to avoid
2069 sign-extend imm32 to imm64. */
2070 r_type = R_X86_64_32;
2071 /* Clear the W bit in REX byte. */
2072 rex_mask |= REX_W;
2073 goto rewrite_modrm_rex;
2074 }
2075 }
2076 }
2077 else
2078 {
2079 /* R_X86_64_PC32 isn't supported. */
2080 if (to_reloc_pc32)
2081 return TRUE;
2082
2083 modrm = bfd_get_8 (abfd, contents + roff - 1);
2084 if (opcode == 0x85)
2085 {
2086 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
2087 "test $foo, %reg". */
2088 modrm = 0xc0 | (modrm & 0x38) >> 3;
2089 opcode = 0xf7;
2090 }
2091 else
2092 {
2093 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
2094 "binop $foo, %reg". */
2095 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
2096 opcode = 0x81;
2097 }
2098
2099 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
2100 overflow when sign-extending imm32 to imm64. */
2101 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
2102
2103 rewrite_modrm_rex:
2104 bfd_put_8 (abfd, modrm, contents + roff - 1);
2105
2106 if (rex)
2107 {
2108 /* Move the R bit to the B bit in REX byte. */
2109 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
2110 bfd_put_8 (abfd, rex, contents + roff - 3);
2111 }
2112
2113 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
2114 irel->r_addend = 0;
2115 }
2116
2117 bfd_put_8 (abfd, opcode, contents + roff - 2);
2118 }
2119
2120 irel->r_info = htab->r_info (r_symndx, r_type);
2121
2122 *converted = TRUE;
2123
2124 return TRUE;
2125 }
2126
2127 /* Look through the relocs for a section during the first phase, and
2128 calculate needed space in the global offset table, procedure
2129 linkage table, and dynamic reloc sections. */
2130
2131 static bfd_boolean
2132 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
2133 asection *sec,
2134 const Elf_Internal_Rela *relocs)
2135 {
2136 struct elf_x86_64_link_hash_table *htab;
2137 Elf_Internal_Shdr *symtab_hdr;
2138 struct elf_link_hash_entry **sym_hashes;
2139 const Elf_Internal_Rela *rel;
2140 const Elf_Internal_Rela *rel_end;
2141 asection *sreloc;
2142 bfd_byte *contents;
2143 bfd_boolean use_plt_got;
2144
2145 if (bfd_link_relocatable (info))
2146 return TRUE;
2147
2148 /* Don't do anything special with non-loaded, non-alloced sections.
2149 In particular, any relocs in such sections should not affect GOT
2150 and PLT reference counting (ie. we don't allow them to create GOT
2151 or PLT entries), there's no possibility or desire to optimize TLS
2152 relocs, and there's not much point in propagating relocs to shared
2153 libs that the dynamic linker won't relocate. */
2154 if ((sec->flags & SEC_ALLOC) == 0)
2155 return TRUE;
2156
2157 BFD_ASSERT (is_x86_64_elf (abfd));
2158
2159 htab = elf_x86_64_hash_table (info);
2160 if (htab == NULL)
2161 {
2162 sec->check_relocs_failed = 1;
2163 return FALSE;
2164 }
2165
2166 /* Get the section contents. */
2167 if (elf_section_data (sec)->this_hdr.contents != NULL)
2168 contents = elf_section_data (sec)->this_hdr.contents;
2169 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
2170 {
2171 sec->check_relocs_failed = 1;
2172 return FALSE;
2173 }
2174
2175 use_plt_got = get_elf_x86_64_backend_data (abfd) == &elf_x86_64_arch_bed;
2176
2177 symtab_hdr = &elf_symtab_hdr (abfd);
2178 sym_hashes = elf_sym_hashes (abfd);
2179
2180 sreloc = NULL;
2181
2182 rel_end = relocs + sec->reloc_count;
2183 for (rel = relocs; rel < rel_end; rel++)
2184 {
2185 unsigned int r_type;
2186 unsigned long r_symndx;
2187 struct elf_link_hash_entry *h;
2188 struct elf_x86_64_link_hash_entry *eh;
2189 Elf_Internal_Sym *isym;
2190 const char *name;
2191 bfd_boolean size_reloc;
2192
2193 r_symndx = htab->r_sym (rel->r_info);
2194 r_type = ELF32_R_TYPE (rel->r_info);
2195
2196 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
2197 {
2198 (*_bfd_error_handler) (_("%B: bad symbol index: %d"),
2199 abfd, r_symndx);
2200 goto error_return;
2201 }
2202
2203 if (r_symndx < symtab_hdr->sh_info)
2204 {
2205 /* A local symbol. */
2206 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2207 abfd, r_symndx);
2208 if (isym == NULL)
2209 goto error_return;
2210
2211 /* Check relocation against local STT_GNU_IFUNC symbol. */
2212 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
2213 {
2214 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel,
2215 TRUE);
2216 if (h == NULL)
2217 goto error_return;
2218
2219 /* Fake a STT_GNU_IFUNC symbol. */
2220 h->type = STT_GNU_IFUNC;
2221 h->def_regular = 1;
2222 h->ref_regular = 1;
2223 h->forced_local = 1;
2224 h->root.type = bfd_link_hash_defined;
2225 }
2226 else
2227 h = NULL;
2228 }
2229 else
2230 {
2231 isym = NULL;
2232 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
2233 while (h->root.type == bfd_link_hash_indirect
2234 || h->root.type == bfd_link_hash_warning)
2235 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2236 }
2237
2238 /* Check invalid x32 relocations. */
2239 if (!ABI_64_P (abfd))
2240 switch (r_type)
2241 {
2242 default:
2243 break;
2244
2245 case R_X86_64_DTPOFF64:
2246 case R_X86_64_TPOFF64:
2247 case R_X86_64_PC64:
2248 case R_X86_64_GOTOFF64:
2249 case R_X86_64_GOT64:
2250 case R_X86_64_GOTPCREL64:
2251 case R_X86_64_GOTPC64:
2252 case R_X86_64_GOTPLT64:
2253 case R_X86_64_PLTOFF64:
2254 {
2255 if (h)
2256 name = h->root.root.string;
2257 else
2258 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
2259 NULL);
2260 (*_bfd_error_handler)
2261 (_("%B: relocation %s against symbol `%s' isn't "
2262 "supported in x32 mode"), abfd,
2263 x86_64_elf_howto_table[r_type].name, name);
2264 bfd_set_error (bfd_error_bad_value);
2265 goto error_return;
2266 }
2267 break;
2268 }
2269
2270 if (h != NULL)
2271 {
2272 switch (r_type)
2273 {
2274 default:
2275 break;
2276
2277 case R_X86_64_PC32_BND:
2278 case R_X86_64_PLT32_BND:
2279 case R_X86_64_PC32:
2280 case R_X86_64_PLT32:
2281 case R_X86_64_32:
2282 case R_X86_64_64:
2283 /* MPX PLT is supported only if elf_x86_64_arch_bed
2284 is used in 64-bit mode. */
2285 if (ABI_64_P (abfd)
2286 && info->bndplt
2287 && (get_elf_x86_64_backend_data (abfd)
2288 == &elf_x86_64_arch_bed))
2289 {
2290 elf_x86_64_hash_entry (h)->has_bnd_reloc = 1;
2291
2292 /* Create the second PLT for Intel MPX support. */
2293 if (htab->plt_bnd == NULL)
2294 {
2295 unsigned int plt_bnd_align;
2296 const struct elf_backend_data *bed;
2297
2298 bed = get_elf_backend_data (info->output_bfd);
2299 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) == 8
2300 && (sizeof (elf_x86_64_bnd_plt2_entry)
2301 == sizeof (elf_x86_64_legacy_plt2_entry)));
2302 plt_bnd_align = 3;
2303
2304 if (htab->elf.dynobj == NULL)
2305 htab->elf.dynobj = abfd;
2306 htab->plt_bnd
2307 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2308 ".plt.bnd",
2309 (bed->dynamic_sec_flags
2310 | SEC_ALLOC
2311 | SEC_CODE
2312 | SEC_LOAD
2313 | SEC_READONLY));
2314 if (htab->plt_bnd == NULL
2315 || !bfd_set_section_alignment (htab->elf.dynobj,
2316 htab->plt_bnd,
2317 plt_bnd_align))
2318 goto error_return;
2319 }
2320 }
2321
2322 case R_X86_64_32S:
2323 case R_X86_64_PC64:
2324 case R_X86_64_GOTPCREL:
2325 case R_X86_64_GOTPCRELX:
2326 case R_X86_64_REX_GOTPCRELX:
2327 case R_X86_64_GOTPCREL64:
2328 if (htab->elf.dynobj == NULL)
2329 htab->elf.dynobj = abfd;
2330 /* Create the ifunc sections for static executables. */
2331 if (h->type == STT_GNU_IFUNC
2332 && !_bfd_elf_create_ifunc_sections (htab->elf.dynobj,
2333 info))
2334 goto error_return;
2335 break;
2336 }
2337
2338 /* It is referenced by a non-shared object. */
2339 h->ref_regular = 1;
2340 h->root.non_ir_ref = 1;
2341
2342 if (h->type == STT_GNU_IFUNC)
2343 elf_tdata (info->output_bfd)->has_gnu_symbols
2344 |= elf_gnu_symbol_ifunc;
2345 }
2346
2347 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
2348 symtab_hdr, sym_hashes,
2349 &r_type, GOT_UNKNOWN,
2350 rel, rel_end, h, r_symndx, FALSE))
2351 goto error_return;
2352
2353 eh = (struct elf_x86_64_link_hash_entry *) h;
2354 switch (r_type)
2355 {
2356 case R_X86_64_TLSLD:
2357 htab->tls_ld_got.refcount += 1;
2358 goto create_got;
2359
2360 case R_X86_64_TPOFF32:
2361 if (!bfd_link_executable (info) && ABI_64_P (abfd))
2362 return elf_x86_64_need_pic (abfd, sec, h, symtab_hdr, isym,
2363 &x86_64_elf_howto_table[r_type]);
2364 if (eh != NULL)
2365 eh->has_got_reloc = 1;
2366 break;
2367
2368 case R_X86_64_GOTTPOFF:
2369 if (!bfd_link_executable (info))
2370 info->flags |= DF_STATIC_TLS;
2371 /* Fall through */
2372
2373 case R_X86_64_GOT32:
2374 case R_X86_64_GOTPCREL:
2375 case R_X86_64_GOTPCRELX:
2376 case R_X86_64_REX_GOTPCRELX:
2377 case R_X86_64_TLSGD:
2378 case R_X86_64_GOT64:
2379 case R_X86_64_GOTPCREL64:
2380 case R_X86_64_GOTPLT64:
2381 case R_X86_64_GOTPC32_TLSDESC:
2382 case R_X86_64_TLSDESC_CALL:
2383 /* This symbol requires a global offset table entry. */
2384 {
2385 int tls_type, old_tls_type;
2386
2387 switch (r_type)
2388 {
2389 default: tls_type = GOT_NORMAL; break;
2390 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
2391 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
2392 case R_X86_64_GOTPC32_TLSDESC:
2393 case R_X86_64_TLSDESC_CALL:
2394 tls_type = GOT_TLS_GDESC; break;
2395 }
2396
2397 if (h != NULL)
2398 {
2399 h->got.refcount += 1;
2400 old_tls_type = eh->tls_type;
2401 }
2402 else
2403 {
2404 bfd_signed_vma *local_got_refcounts;
2405
2406 /* This is a global offset table entry for a local symbol. */
2407 local_got_refcounts = elf_local_got_refcounts (abfd);
2408 if (local_got_refcounts == NULL)
2409 {
2410 bfd_size_type size;
2411
2412 size = symtab_hdr->sh_info;
2413 size *= sizeof (bfd_signed_vma)
2414 + sizeof (bfd_vma) + sizeof (char);
2415 local_got_refcounts = ((bfd_signed_vma *)
2416 bfd_zalloc (abfd, size));
2417 if (local_got_refcounts == NULL)
2418 goto error_return;
2419 elf_local_got_refcounts (abfd) = local_got_refcounts;
2420 elf_x86_64_local_tlsdesc_gotent (abfd)
2421 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2422 elf_x86_64_local_got_tls_type (abfd)
2423 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2424 }
2425 local_got_refcounts[r_symndx] += 1;
2426 old_tls_type
2427 = elf_x86_64_local_got_tls_type (abfd) [r_symndx];
2428 }
2429
2430 /* If a TLS symbol is accessed using IE at least once,
2431 there is no point to use dynamic model for it. */
2432 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2433 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2434 || tls_type != GOT_TLS_IE))
2435 {
2436 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2437 tls_type = old_tls_type;
2438 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2439 && GOT_TLS_GD_ANY_P (tls_type))
2440 tls_type |= old_tls_type;
2441 else
2442 {
2443 if (h)
2444 name = h->root.root.string;
2445 else
2446 name = bfd_elf_sym_name (abfd, symtab_hdr,
2447 isym, NULL);
2448 (*_bfd_error_handler)
2449 (_("%B: '%s' accessed both as normal and thread local symbol"),
2450 abfd, name);
2451 bfd_set_error (bfd_error_bad_value);
2452 goto error_return;
2453 }
2454 }
2455
2456 if (old_tls_type != tls_type)
2457 {
2458 if (eh != NULL)
2459 eh->tls_type = tls_type;
2460 else
2461 elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type;
2462 }
2463 }
2464 /* Fall through */
2465
2466 case R_X86_64_GOTOFF64:
2467 case R_X86_64_GOTPC32:
2468 case R_X86_64_GOTPC64:
2469 create_got:
2470 if (eh != NULL)
2471 eh->has_got_reloc = 1;
2472 if (htab->elf.sgot == NULL)
2473 {
2474 if (htab->elf.dynobj == NULL)
2475 htab->elf.dynobj = abfd;
2476 if (!_bfd_elf_create_got_section (htab->elf.dynobj,
2477 info))
2478 goto error_return;
2479 }
2480 break;
2481
2482 case R_X86_64_PLT32:
2483 case R_X86_64_PLT32_BND:
2484 /* This symbol requires a procedure linkage table entry. We
2485 actually build the entry in adjust_dynamic_symbol,
2486 because this might be a case of linking PIC code which is
2487 never referenced by a dynamic object, in which case we
2488 don't need to generate a procedure linkage table entry
2489 after all. */
2490
2491 /* If this is a local symbol, we resolve it directly without
2492 creating a procedure linkage table entry. */
2493 if (h == NULL)
2494 continue;
2495
2496 eh->has_got_reloc = 1;
2497 h->needs_plt = 1;
2498 h->plt.refcount += 1;
2499 break;
2500
2501 case R_X86_64_PLTOFF64:
2502 /* This tries to form the 'address' of a function relative
2503 to GOT. For global symbols we need a PLT entry. */
2504 if (h != NULL)
2505 {
2506 h->needs_plt = 1;
2507 h->plt.refcount += 1;
2508 }
2509 goto create_got;
2510
2511 case R_X86_64_SIZE32:
2512 case R_X86_64_SIZE64:
2513 size_reloc = TRUE;
2514 goto do_size;
2515
2516 case R_X86_64_32:
2517 if (!ABI_64_P (abfd))
2518 goto pointer;
2519 case R_X86_64_8:
2520 case R_X86_64_16:
2521 case R_X86_64_32S:
2522 /* Check relocation overflow as these relocs may lead to
2523 run-time relocation overflow. Don't error out for
2524 sections we don't care about, such as debug sections or
2525 when relocation overflow check is disabled. */
2526 if (!info->no_reloc_overflow_check
2527 && (bfd_link_pic (info)
2528 || (bfd_link_executable (info)
2529 && h != NULL
2530 && !h->def_regular
2531 && h->def_dynamic
2532 && (sec->flags & SEC_READONLY) == 0)))
2533 return elf_x86_64_need_pic (abfd, sec, h, symtab_hdr, isym,
2534 &x86_64_elf_howto_table[r_type]);
2535 /* Fall through. */
2536
2537 case R_X86_64_PC8:
2538 case R_X86_64_PC16:
2539 case R_X86_64_PC32:
2540 case R_X86_64_PC32_BND:
2541 case R_X86_64_PC64:
2542 case R_X86_64_64:
2543 pointer:
2544 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2545 eh->has_non_got_reloc = 1;
2546 /* We are called after all symbols have been resolved. Only
2547 relocation against STT_GNU_IFUNC symbol must go through
2548 PLT. */
2549 if (h != NULL
2550 && (bfd_link_executable (info)
2551 || h->type == STT_GNU_IFUNC))
2552 {
2553 /* If this reloc is in a read-only section, we might
2554 need a copy reloc. We can't check reliably at this
2555 stage whether the section is read-only, as input
2556 sections have not yet been mapped to output sections.
2557 Tentatively set the flag for now, and correct in
2558 adjust_dynamic_symbol. */
2559 h->non_got_ref = 1;
2560
2561 /* We may need a .plt entry if the symbol is a function
2562 defined in a shared lib or is a STT_GNU_IFUNC function
2563 referenced from the code or read-only section. */
2564 if (!h->def_regular
2565 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2566 h->plt.refcount += 1;
2567
2568 if (r_type == R_X86_64_PC32)
2569 {
2570 /* Since something like ".long foo - ." may be used
2571 as pointer, make sure that PLT is used if foo is
2572 a function defined in a shared library. */
2573 if ((sec->flags & SEC_CODE) == 0)
2574 h->pointer_equality_needed = 1;
2575 }
2576 else if (r_type != R_X86_64_PC32_BND
2577 && r_type != R_X86_64_PC64)
2578 {
2579 h->pointer_equality_needed = 1;
2580 /* At run-time, R_X86_64_64 can be resolved for both
2581 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2582 can only be resolved for x32. */
2583 if ((sec->flags & SEC_READONLY) == 0
2584 && (r_type == R_X86_64_64
2585 || (!ABI_64_P (abfd)
2586 && (r_type == R_X86_64_32
2587 || r_type == R_X86_64_32S))))
2588 eh->func_pointer_refcount += 1;
2589 }
2590 }
2591
2592 size_reloc = FALSE;
2593 do_size:
2594 /* If we are creating a shared library, and this is a reloc
2595 against a global symbol, or a non PC relative reloc
2596 against a local symbol, then we need to copy the reloc
2597 into the shared library. However, if we are linking with
2598 -Bsymbolic, we do not need to copy a reloc against a
2599 global symbol which is defined in an object we are
2600 including in the link (i.e., DEF_REGULAR is set). At
2601 this point we have not seen all the input files, so it is
2602 possible that DEF_REGULAR is not set now but will be set
2603 later (it is never cleared). In case of a weak definition,
2604 DEF_REGULAR may be cleared later by a strong definition in
2605 a shared library. We account for that possibility below by
2606 storing information in the relocs_copied field of the hash
2607 table entry. A similar situation occurs when creating
2608 shared libraries and symbol visibility changes render the
2609 symbol local.
2610
2611 If on the other hand, we are creating an executable, we
2612 may need to keep relocations for symbols satisfied by a
2613 dynamic library if we manage to avoid copy relocs for the
2614 symbol.
2615
2616 Generate dynamic pointer relocation against STT_GNU_IFUNC
2617 symbol in the non-code section. */
2618 if ((bfd_link_pic (info)
2619 && (! IS_X86_64_PCREL_TYPE (r_type)
2620 || (h != NULL
2621 && (! (bfd_link_pie (info)
2622 || SYMBOLIC_BIND (info, h))
2623 || h->root.type == bfd_link_hash_defweak
2624 || !h->def_regular))))
2625 || (h != NULL
2626 && h->type == STT_GNU_IFUNC
2627 && r_type == htab->pointer_r_type
2628 && (sec->flags & SEC_CODE) == 0)
2629 || (ELIMINATE_COPY_RELOCS
2630 && !bfd_link_pic (info)
2631 && h != NULL
2632 && (h->root.type == bfd_link_hash_defweak
2633 || !h->def_regular)))
2634 {
2635 struct elf_dyn_relocs *p;
2636 struct elf_dyn_relocs **head;
2637
2638 /* We must copy these reloc types into the output file.
2639 Create a reloc section in dynobj and make room for
2640 this reloc. */
2641 if (sreloc == NULL)
2642 {
2643 if (htab->elf.dynobj == NULL)
2644 htab->elf.dynobj = abfd;
2645
2646 sreloc = _bfd_elf_make_dynamic_reloc_section
2647 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2648 abfd, /*rela?*/ TRUE);
2649
2650 if (sreloc == NULL)
2651 goto error_return;
2652 }
2653
2654 /* If this is a global symbol, we count the number of
2655 relocations we need for this symbol. */
2656 if (h != NULL)
2657 head = &eh->dyn_relocs;
2658 else
2659 {
2660 /* Track dynamic relocs needed for local syms too.
2661 We really need local syms available to do this
2662 easily. Oh well. */
2663 asection *s;
2664 void **vpp;
2665
2666 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2667 abfd, r_symndx);
2668 if (isym == NULL)
2669 goto error_return;
2670
2671 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2672 if (s == NULL)
2673 s = sec;
2674
2675 /* Beware of type punned pointers vs strict aliasing
2676 rules. */
2677 vpp = &(elf_section_data (s)->local_dynrel);
2678 head = (struct elf_dyn_relocs **)vpp;
2679 }
2680
2681 p = *head;
2682 if (p == NULL || p->sec != sec)
2683 {
2684 bfd_size_type amt = sizeof *p;
2685
2686 p = ((struct elf_dyn_relocs *)
2687 bfd_alloc (htab->elf.dynobj, amt));
2688 if (p == NULL)
2689 goto error_return;
2690 p->next = *head;
2691 *head = p;
2692 p->sec = sec;
2693 p->count = 0;
2694 p->pc_count = 0;
2695 }
2696
2697 p->count += 1;
2698 /* Count size relocation as PC-relative relocation. */
2699 if (IS_X86_64_PCREL_TYPE (r_type) || size_reloc)
2700 p->pc_count += 1;
2701 }
2702 break;
2703
2704 /* This relocation describes the C++ object vtable hierarchy.
2705 Reconstruct it for later use during GC. */
2706 case R_X86_64_GNU_VTINHERIT:
2707 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2708 goto error_return;
2709 break;
2710
2711 /* This relocation describes which C++ vtable entries are actually
2712 used. Record for later use during GC. */
2713 case R_X86_64_GNU_VTENTRY:
2714 BFD_ASSERT (h != NULL);
2715 if (h != NULL
2716 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2717 goto error_return;
2718 break;
2719
2720 default:
2721 break;
2722 }
2723
2724 if (use_plt_got
2725 && h != NULL
2726 && h->plt.refcount > 0
2727 && (((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
2728 || h->got.refcount > 0)
2729 && htab->plt_got == NULL)
2730 {
2731 /* Create the GOT procedure linkage table. */
2732 unsigned int plt_got_align;
2733 const struct elf_backend_data *bed;
2734
2735 bed = get_elf_backend_data (info->output_bfd);
2736 BFD_ASSERT (sizeof (elf_x86_64_legacy_plt2_entry) == 8
2737 && (sizeof (elf_x86_64_bnd_plt2_entry)
2738 == sizeof (elf_x86_64_legacy_plt2_entry)));
2739 plt_got_align = 3;
2740
2741 if (htab->elf.dynobj == NULL)
2742 htab->elf.dynobj = abfd;
2743 htab->plt_got
2744 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2745 ".plt.got",
2746 (bed->dynamic_sec_flags
2747 | SEC_ALLOC
2748 | SEC_CODE
2749 | SEC_LOAD
2750 | SEC_READONLY));
2751 if (htab->plt_got == NULL
2752 || !bfd_set_section_alignment (htab->elf.dynobj,
2753 htab->plt_got,
2754 plt_got_align))
2755 goto error_return;
2756 }
2757
2758 if ((r_type == R_X86_64_GOTPCREL
2759 || r_type == R_X86_64_GOTPCRELX
2760 || r_type == R_X86_64_REX_GOTPCRELX)
2761 && (h == NULL || h->type != STT_GNU_IFUNC))
2762 sec->need_convert_load = 1;
2763 }
2764
2765 if (elf_section_data (sec)->this_hdr.contents != contents)
2766 {
2767 if (!info->keep_memory)
2768 free (contents);
2769 else
2770 {
2771 /* Cache the section contents for elf_link_input_bfd. */
2772 elf_section_data (sec)->this_hdr.contents = contents;
2773 }
2774 }
2775
2776 return TRUE;
2777
2778 error_return:
2779 if (elf_section_data (sec)->this_hdr.contents != contents)
2780 free (contents);
2781 sec->check_relocs_failed = 1;
2782 return FALSE;
2783 }
2784
2785 /* Return the section that should be marked against GC for a given
2786 relocation. */
2787
2788 static asection *
2789 elf_x86_64_gc_mark_hook (asection *sec,
2790 struct bfd_link_info *info,
2791 Elf_Internal_Rela *rel,
2792 struct elf_link_hash_entry *h,
2793 Elf_Internal_Sym *sym)
2794 {
2795 if (h != NULL)
2796 switch (ELF32_R_TYPE (rel->r_info))
2797 {
2798 case R_X86_64_GNU_VTINHERIT:
2799 case R_X86_64_GNU_VTENTRY:
2800 return NULL;
2801 }
2802
2803 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
2804 }
2805
2806 /* Remove undefined weak symbol from the dynamic symbol table if it
2807 is resolved to 0. */
2808
2809 static bfd_boolean
2810 elf_x86_64_fixup_symbol (struct bfd_link_info *info,
2811 struct elf_link_hash_entry *h)
2812 {
2813 if (h->dynindx != -1
2814 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
2815 elf_x86_64_hash_entry (h)->has_got_reloc,
2816 elf_x86_64_hash_entry (h)))
2817 {
2818 h->dynindx = -1;
2819 _bfd_elf_strtab_delref (elf_hash_table (info)->dynstr,
2820 h->dynstr_index);
2821 }
2822 return TRUE;
2823 }
2824
2825 /* Adjust a symbol defined by a dynamic object and referenced by a
2826 regular object. The current definition is in some section of the
2827 dynamic object, but we're not including those sections. We have to
2828 change the definition to something the rest of the link can
2829 understand. */
2830
2831 static bfd_boolean
2832 elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info,
2833 struct elf_link_hash_entry *h)
2834 {
2835 struct elf_x86_64_link_hash_table *htab;
2836 asection *s;
2837 struct elf_x86_64_link_hash_entry *eh;
2838 struct elf_dyn_relocs *p;
2839
2840 /* STT_GNU_IFUNC symbol must go through PLT. */
2841 if (h->type == STT_GNU_IFUNC)
2842 {
2843 /* All local STT_GNU_IFUNC references must be treate as local
2844 calls via local PLT. */
2845 if (h->ref_regular
2846 && SYMBOL_CALLS_LOCAL (info, h))
2847 {
2848 bfd_size_type pc_count = 0, count = 0;
2849 struct elf_dyn_relocs **pp;
2850
2851 eh = (struct elf_x86_64_link_hash_entry *) h;
2852 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2853 {
2854 pc_count += p->pc_count;
2855 p->count -= p->pc_count;
2856 p->pc_count = 0;
2857 count += p->count;
2858 if (p->count == 0)
2859 *pp = p->next;
2860 else
2861 pp = &p->next;
2862 }
2863
2864 if (pc_count || count)
2865 {
2866 h->non_got_ref = 1;
2867 if (pc_count)
2868 {
2869 /* Increment PLT reference count only for PC-relative
2870 references. */
2871 h->needs_plt = 1;
2872 if (h->plt.refcount <= 0)
2873 h->plt.refcount = 1;
2874 else
2875 h->plt.refcount += 1;
2876 }
2877 }
2878 }
2879
2880 if (h->plt.refcount <= 0)
2881 {
2882 h->plt.offset = (bfd_vma) -1;
2883 h->needs_plt = 0;
2884 }
2885 return TRUE;
2886 }
2887
2888 /* If this is a function, put it in the procedure linkage table. We
2889 will fill in the contents of the procedure linkage table later,
2890 when we know the address of the .got section. */
2891 if (h->type == STT_FUNC
2892 || h->needs_plt)
2893 {
2894 if (h->plt.refcount <= 0
2895 || SYMBOL_CALLS_LOCAL (info, h)
2896 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
2897 && h->root.type == bfd_link_hash_undefweak))
2898 {
2899 /* This case can occur if we saw a PLT32 reloc in an input
2900 file, but the symbol was never referred to by a dynamic
2901 object, or if all references were garbage collected. In
2902 such a case, we don't actually need to build a procedure
2903 linkage table, and we can just do a PC32 reloc instead. */
2904 h->plt.offset = (bfd_vma) -1;
2905 h->needs_plt = 0;
2906 }
2907
2908 return TRUE;
2909 }
2910 else
2911 /* It's possible that we incorrectly decided a .plt reloc was
2912 needed for an R_X86_64_PC32 reloc to a non-function sym in
2913 check_relocs. We can't decide accurately between function and
2914 non-function syms in check-relocs; Objects loaded later in
2915 the link may change h->type. So fix it now. */
2916 h->plt.offset = (bfd_vma) -1;
2917
2918 /* If this is a weak symbol, and there is a real definition, the
2919 processor independent code will have arranged for us to see the
2920 real definition first, and we can just use the same value. */
2921 if (h->u.weakdef != NULL)
2922 {
2923 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
2924 || h->u.weakdef->root.type == bfd_link_hash_defweak);
2925 h->root.u.def.section = h->u.weakdef->root.u.def.section;
2926 h->root.u.def.value = h->u.weakdef->root.u.def.value;
2927 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
2928 {
2929 eh = (struct elf_x86_64_link_hash_entry *) h;
2930 h->non_got_ref = h->u.weakdef->non_got_ref;
2931 eh->needs_copy = h->u.weakdef->needs_copy;
2932 }
2933 return TRUE;
2934 }
2935
2936 /* This is a reference to a symbol defined by a dynamic object which
2937 is not a function. */
2938
2939 /* If we are creating a shared library, we must presume that the
2940 only references to the symbol are via the global offset table.
2941 For such cases we need not do anything here; the relocations will
2942 be handled correctly by relocate_section. */
2943 if (!bfd_link_executable (info))
2944 return TRUE;
2945
2946 /* If there are no references to this symbol that do not use the
2947 GOT, we don't need to generate a copy reloc. */
2948 if (!h->non_got_ref)
2949 return TRUE;
2950
2951 /* If -z nocopyreloc was given, we won't generate them either. */
2952 if (info->nocopyreloc)
2953 {
2954 h->non_got_ref = 0;
2955 return TRUE;
2956 }
2957
2958 if (ELIMINATE_COPY_RELOCS)
2959 {
2960 eh = (struct elf_x86_64_link_hash_entry *) h;
2961 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2962 {
2963 s = p->sec->output_section;
2964 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2965 break;
2966 }
2967
2968 /* If we didn't find any dynamic relocs in read-only sections, then
2969 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
2970 if (p == NULL)
2971 {
2972 h->non_got_ref = 0;
2973 return TRUE;
2974 }
2975 }
2976
2977 /* We must allocate the symbol in our .dynbss section, which will
2978 become part of the .bss section of the executable. There will be
2979 an entry for this symbol in the .dynsym section. The dynamic
2980 object will contain position independent code, so all references
2981 from the dynamic object to this symbol will go through the global
2982 offset table. The dynamic linker will use the .dynsym entry to
2983 determine the address it must put in the global offset table, so
2984 both the dynamic object and the regular object will refer to the
2985 same memory location for the variable. */
2986
2987 htab = elf_x86_64_hash_table (info);
2988 if (htab == NULL)
2989 return FALSE;
2990
2991 /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker
2992 to copy the initial value out of the dynamic object and into the
2993 runtime process image. */
2994 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
2995 {
2996 const struct elf_backend_data *bed;
2997 bed = get_elf_backend_data (info->output_bfd);
2998 htab->srelbss->size += bed->s->sizeof_rela;
2999 h->needs_copy = 1;
3000 }
3001
3002 s = htab->sdynbss;
3003
3004 return _bfd_elf_adjust_dynamic_copy (info, h, s);
3005 }
3006
3007 /* Allocate space in .plt, .got and associated reloc sections for
3008 dynamic relocs. */
3009
3010 static bfd_boolean
3011 elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
3012 {
3013 struct bfd_link_info *info;
3014 struct elf_x86_64_link_hash_table *htab;
3015 struct elf_x86_64_link_hash_entry *eh;
3016 struct elf_dyn_relocs *p;
3017 const struct elf_backend_data *bed;
3018 unsigned int plt_entry_size;
3019 bfd_boolean resolved_to_zero;
3020
3021 if (h->root.type == bfd_link_hash_indirect)
3022 return TRUE;
3023
3024 eh = (struct elf_x86_64_link_hash_entry *) h;
3025
3026 info = (struct bfd_link_info *) inf;
3027 htab = elf_x86_64_hash_table (info);
3028 if (htab == NULL)
3029 return FALSE;
3030 bed = get_elf_backend_data (info->output_bfd);
3031 plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
3032
3033 resolved_to_zero = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
3034 eh->has_got_reloc,
3035 eh);
3036
3037 /* We can't use the GOT PLT if pointer equality is needed since
3038 finish_dynamic_symbol won't clear symbol value and the dynamic
3039 linker won't update the GOT slot. We will get into an infinite
3040 loop at run-time. */
3041 if (htab->plt_got != NULL
3042 && h->type != STT_GNU_IFUNC
3043 && !h->pointer_equality_needed
3044 && h->plt.refcount > 0
3045 && h->got.refcount > 0)
3046 {
3047 /* Don't use the regular PLT if there are both GOT and GOTPLT
3048 reloctions. */
3049 h->plt.offset = (bfd_vma) -1;
3050
3051 /* Use the GOT PLT. */
3052 eh->plt_got.refcount = 1;
3053 }
3054
3055 /* Clear the reference count of function pointer relocations if
3056 symbol isn't a normal function. */
3057 if (h->type != STT_FUNC)
3058 eh->func_pointer_refcount = 0;
3059
3060 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
3061 here if it is defined and referenced in a non-shared object. */
3062 if (h->type == STT_GNU_IFUNC
3063 && h->def_regular)
3064 {
3065 if (_bfd_elf_allocate_ifunc_dyn_relocs (info, h,
3066 &eh->dyn_relocs,
3067 &htab->readonly_dynrelocs_against_ifunc,
3068 plt_entry_size,
3069 plt_entry_size,
3070 GOT_ENTRY_SIZE, TRUE))
3071 {
3072 asection *s = htab->plt_bnd;
3073 if (h->plt.offset != (bfd_vma) -1 && s != NULL)
3074 {
3075 /* Use the .plt.bnd section if it is created. */
3076 eh->plt_bnd.offset = s->size;
3077
3078 /* Make room for this entry in the .plt.bnd section. */
3079 s->size += sizeof (elf_x86_64_legacy_plt2_entry);
3080 }
3081
3082 return TRUE;
3083 }
3084 else
3085 return FALSE;
3086 }
3087 /* Don't create the PLT entry if there are only function pointer
3088 relocations which can be resolved at run-time. */
3089 else if (htab->elf.dynamic_sections_created
3090 && (h->plt.refcount > eh->func_pointer_refcount
3091 || eh->plt_got.refcount > 0))
3092 {
3093 bfd_boolean use_plt_got;
3094
3095 /* Clear the reference count of function pointer relocations
3096 if PLT is used. */
3097 eh->func_pointer_refcount = 0;
3098
3099 if ((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
3100 {
3101 /* Don't use the regular PLT for DF_BIND_NOW. */
3102 h->plt.offset = (bfd_vma) -1;
3103
3104 /* Use the GOT PLT. */
3105 h->got.refcount = 1;
3106 eh->plt_got.refcount = 1;
3107 }
3108
3109 use_plt_got = eh->plt_got.refcount > 0;
3110
3111 /* Make sure this symbol is output as a dynamic symbol.
3112 Undefined weak syms won't yet be marked as dynamic. */
3113 if (h->dynindx == -1
3114 && !h->forced_local
3115 && !resolved_to_zero)
3116 {
3117 if (! bfd_elf_link_record_dynamic_symbol (info, h))
3118 return FALSE;
3119 }
3120
3121 if (bfd_link_pic (info)
3122 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
3123 {
3124 asection *s = htab->elf.splt;
3125 asection *bnd_s = htab->plt_bnd;
3126 asection *got_s = htab->plt_got;
3127
3128 /* If this is the first .plt entry, make room for the special
3129 first entry. The .plt section is used by prelink to undo
3130 prelinking for dynamic relocations. */
3131 if (s->size == 0)
3132 s->size = plt_entry_size;
3133
3134 if (use_plt_got)
3135 eh->plt_got.offset = got_s->size;
3136 else
3137 {
3138 h->plt.offset = s->size;
3139 if (bnd_s)
3140 eh->plt_bnd.offset = bnd_s->size;
3141 }
3142
3143 /* If this symbol is not defined in a regular file, and we are
3144 not generating a shared library, then set the symbol to this
3145 location in the .plt. This is required to make function
3146 pointers compare as equal between the normal executable and
3147 the shared library. */
3148 if (! bfd_link_pic (info)
3149 && !h->def_regular)
3150 {
3151 if (use_plt_got)
3152 {
3153 /* We need to make a call to the entry of the GOT PLT
3154 instead of regular PLT entry. */
3155 h->root.u.def.section = got_s;
3156 h->root.u.def.value = eh->plt_got.offset;
3157 }
3158 else
3159 {
3160 if (bnd_s)
3161 {
3162 /* We need to make a call to the entry of the second
3163 PLT instead of regular PLT entry. */
3164 h->root.u.def.section = bnd_s;
3165 h->root.u.def.value = eh->plt_bnd.offset;
3166 }
3167 else
3168 {
3169 h->root.u.def.section = s;
3170 h->root.u.def.value = h->plt.offset;
3171 }
3172 }
3173 }
3174
3175 /* Make room for this entry. */
3176 if (use_plt_got)
3177 got_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
3178 else
3179 {
3180 s->size += plt_entry_size;
3181 if (bnd_s)
3182 bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
3183
3184 /* We also need to make an entry in the .got.plt section,
3185 which will be placed in the .got section by the linker
3186 script. */
3187 htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
3188
3189 /* There should be no PLT relocation against resolved
3190 undefined weak symbol in executable. */
3191 if (!resolved_to_zero)
3192 {
3193 /* We also need to make an entry in the .rela.plt
3194 section. */
3195 htab->elf.srelplt->size += bed->s->sizeof_rela;
3196 htab->elf.srelplt->reloc_count++;
3197 }
3198 }
3199 }
3200 else
3201 {
3202 eh->plt_got.offset = (bfd_vma) -1;
3203 h->plt.offset = (bfd_vma) -1;
3204 h->needs_plt = 0;
3205 }
3206 }
3207 else
3208 {
3209 eh->plt_got.offset = (bfd_vma) -1;
3210 h->plt.offset = (bfd_vma) -1;
3211 h->needs_plt = 0;
3212 }
3213
3214 eh->tlsdesc_got = (bfd_vma) -1;
3215
3216 /* If R_X86_64_GOTTPOFF symbol is now local to the binary,
3217 make it a R_X86_64_TPOFF32 requiring no GOT entry. */
3218 if (h->got.refcount > 0
3219 && bfd_link_executable (info)
3220 && h->dynindx == -1
3221 && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE)
3222 {
3223 h->got.offset = (bfd_vma) -1;
3224 }
3225 else if (h->got.refcount > 0)
3226 {
3227 asection *s;
3228 bfd_boolean dyn;
3229 int tls_type = elf_x86_64_hash_entry (h)->tls_type;
3230
3231 /* Make sure this symbol is output as a dynamic symbol.
3232 Undefined weak syms won't yet be marked as dynamic. */
3233 if (h->dynindx == -1
3234 && !h->forced_local
3235 && !resolved_to_zero)
3236 {
3237 if (! bfd_elf_link_record_dynamic_symbol (info, h))
3238 return FALSE;
3239 }
3240
3241 if (GOT_TLS_GDESC_P (tls_type))
3242 {
3243 eh->tlsdesc_got = htab->elf.sgotplt->size
3244 - elf_x86_64_compute_jump_table_size (htab);
3245 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3246 h->got.offset = (bfd_vma) -2;
3247 }
3248 if (! GOT_TLS_GDESC_P (tls_type)
3249 || GOT_TLS_GD_P (tls_type))
3250 {
3251 s = htab->elf.sgot;
3252 h->got.offset = s->size;
3253 s->size += GOT_ENTRY_SIZE;
3254 if (GOT_TLS_GD_P (tls_type))
3255 s->size += GOT_ENTRY_SIZE;
3256 }
3257 dyn = htab->elf.dynamic_sections_created;
3258 /* R_X86_64_TLSGD needs one dynamic relocation if local symbol
3259 and two if global. R_X86_64_GOTTPOFF needs one dynamic
3260 relocation. No dynamic relocation against resolved undefined
3261 weak symbol in executable. */
3262 if ((GOT_TLS_GD_P (tls_type) && h->dynindx == -1)
3263 || tls_type == GOT_TLS_IE)
3264 htab->elf.srelgot->size += bed->s->sizeof_rela;
3265 else if (GOT_TLS_GD_P (tls_type))
3266 htab->elf.srelgot->size += 2 * bed->s->sizeof_rela;
3267 else if (! GOT_TLS_GDESC_P (tls_type)
3268 && ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3269 && !resolved_to_zero)
3270 || h->root.type != bfd_link_hash_undefweak)
3271 && (bfd_link_pic (info)
3272 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
3273 htab->elf.srelgot->size += bed->s->sizeof_rela;
3274 if (GOT_TLS_GDESC_P (tls_type))
3275 {
3276 htab->elf.srelplt->size += bed->s->sizeof_rela;
3277 htab->tlsdesc_plt = (bfd_vma) -1;
3278 }
3279 }
3280 else
3281 h->got.offset = (bfd_vma) -1;
3282
3283 if (eh->dyn_relocs == NULL)
3284 return TRUE;
3285
3286 /* In the shared -Bsymbolic case, discard space allocated for
3287 dynamic pc-relative relocs against symbols which turn out to be
3288 defined in regular objects. For the normal shared case, discard
3289 space for pc-relative relocs that have become local due to symbol
3290 visibility changes. */
3291
3292 if (bfd_link_pic (info))
3293 {
3294 /* Relocs that use pc_count are those that appear on a call
3295 insn, or certain REL relocs that can generated via assembly.
3296 We want calls to protected symbols to resolve directly to the
3297 function rather than going via the plt. If people want
3298 function pointer comparisons to work as expected then they
3299 should avoid writing weird assembly. */
3300 if (SYMBOL_CALLS_LOCAL (info, h))
3301 {
3302 struct elf_dyn_relocs **pp;
3303
3304 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
3305 {
3306 p->count -= p->pc_count;
3307 p->pc_count = 0;
3308 if (p->count == 0)
3309 *pp = p->next;
3310 else
3311 pp = &p->next;
3312 }
3313 }
3314
3315 /* Also discard relocs on undefined weak syms with non-default
3316 visibility or in PIE. */
3317 if (eh->dyn_relocs != NULL)
3318 {
3319 if (h->root.type == bfd_link_hash_undefweak)
3320 {
3321 /* Undefined weak symbol is never bound locally in shared
3322 library. */
3323 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
3324 || resolved_to_zero)
3325 eh->dyn_relocs = NULL;
3326 else if (h->dynindx == -1
3327 && ! h->forced_local
3328 && ! bfd_elf_link_record_dynamic_symbol (info, h))
3329 return FALSE;
3330 }
3331 /* For PIE, discard space for pc-relative relocs against
3332 symbols which turn out to need copy relocs. */
3333 else if (bfd_link_executable (info)
3334 && (h->needs_copy || eh->needs_copy)
3335 && h->def_dynamic
3336 && !h->def_regular)
3337 {
3338 struct elf_dyn_relocs **pp;
3339
3340 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
3341 {
3342 if (p->pc_count != 0)
3343 *pp = p->next;
3344 else
3345 pp = &p->next;
3346 }
3347 }
3348 }
3349 }
3350 else if (ELIMINATE_COPY_RELOCS)
3351 {
3352 /* For the non-shared case, discard space for relocs against
3353 symbols which turn out to need copy relocs or are not
3354 dynamic. Keep dynamic relocations for run-time function
3355 pointer initialization. */
3356
3357 if ((!h->non_got_ref
3358 || eh->func_pointer_refcount > 0
3359 || (h->root.type == bfd_link_hash_undefweak
3360 && !resolved_to_zero))
3361 && ((h->def_dynamic
3362 && !h->def_regular)
3363 || (htab->elf.dynamic_sections_created
3364 && (h->root.type == bfd_link_hash_undefweak
3365 || h->root.type == bfd_link_hash_undefined))))
3366 {
3367 /* Make sure this symbol is output as a dynamic symbol.
3368 Undefined weak syms won't yet be marked as dynamic. */
3369 if (h->dynindx == -1
3370 && ! h->forced_local
3371 && ! resolved_to_zero
3372 && ! bfd_elf_link_record_dynamic_symbol (info, h))
3373 return FALSE;
3374
3375 /* If that succeeded, we know we'll be keeping all the
3376 relocs. */
3377 if (h->dynindx != -1)
3378 goto keep;
3379 }
3380
3381 eh->dyn_relocs = NULL;
3382 eh->func_pointer_refcount = 0;
3383
3384 keep: ;
3385 }
3386
3387 /* Finally, allocate space. */
3388 for (p = eh->dyn_relocs; p != NULL; p = p->next)
3389 {
3390 asection * sreloc;
3391
3392 sreloc = elf_section_data (p->sec)->sreloc;
3393
3394 BFD_ASSERT (sreloc != NULL);
3395
3396 sreloc->size += p->count * bed->s->sizeof_rela;
3397 }
3398
3399 return TRUE;
3400 }
3401
3402 /* Allocate space in .plt, .got and associated reloc sections for
3403 local dynamic relocs. */
3404
3405 static bfd_boolean
3406 elf_x86_64_allocate_local_dynrelocs (void **slot, void *inf)
3407 {
3408 struct elf_link_hash_entry *h
3409 = (struct elf_link_hash_entry *) *slot;
3410
3411 if (h->type != STT_GNU_IFUNC
3412 || !h->def_regular
3413 || !h->ref_regular
3414 || !h->forced_local
3415 || h->root.type != bfd_link_hash_defined)
3416 abort ();
3417
3418 return elf_x86_64_allocate_dynrelocs (h, inf);
3419 }
3420
3421 /* Find any dynamic relocs that apply to read-only sections. */
3422
3423 static bfd_boolean
3424 elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h,
3425 void * inf)
3426 {
3427 struct elf_x86_64_link_hash_entry *eh;
3428 struct elf_dyn_relocs *p;
3429
3430 /* Skip local IFUNC symbols. */
3431 if (h->forced_local && h->type == STT_GNU_IFUNC)
3432 return TRUE;
3433
3434 eh = (struct elf_x86_64_link_hash_entry *) h;
3435 for (p = eh->dyn_relocs; p != NULL; p = p->next)
3436 {
3437 asection *s = p->sec->output_section;
3438
3439 if (s != NULL && (s->flags & SEC_READONLY) != 0)
3440 {
3441 struct bfd_link_info *info = (struct bfd_link_info *) inf;
3442
3443 info->flags |= DF_TEXTREL;
3444
3445 if ((info->warn_shared_textrel && bfd_link_pic (info))
3446 || info->error_textrel)
3447 info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'\n"),
3448 p->sec->owner, h->root.root.string,
3449 p->sec);
3450
3451 /* Not an error, just cut short the traversal. */
3452 return FALSE;
3453 }
3454 }
3455 return TRUE;
3456 }
3457
3458 /* Convert load via the GOT slot to load immediate. */
3459
3460 static bfd_boolean
3461 elf_x86_64_convert_load (bfd *abfd, asection *sec,
3462 struct bfd_link_info *link_info)
3463 {
3464 Elf_Internal_Shdr *symtab_hdr;
3465 Elf_Internal_Rela *internal_relocs;
3466 Elf_Internal_Rela *irel, *irelend;
3467 bfd_byte *contents;
3468 struct elf_x86_64_link_hash_table *htab;
3469 bfd_boolean changed;
3470 bfd_signed_vma *local_got_refcounts;
3471
3472 /* Don't even try to convert non-ELF outputs. */
3473 if (!is_elf_hash_table (link_info->hash))
3474 return FALSE;
3475
3476 /* Nothing to do if there is no need or no output. */
3477 if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC)
3478 || sec->need_convert_load == 0
3479 || bfd_is_abs_section (sec->output_section))
3480 return TRUE;
3481
3482 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
3483
3484 /* Load the relocations for this section. */
3485 internal_relocs = (_bfd_elf_link_read_relocs
3486 (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
3487 link_info->keep_memory));
3488 if (internal_relocs == NULL)
3489 return FALSE;
3490
3491 changed = FALSE;
3492 htab = elf_x86_64_hash_table (link_info);
3493 local_got_refcounts = elf_local_got_refcounts (abfd);
3494
3495 /* Get the section contents. */
3496 if (elf_section_data (sec)->this_hdr.contents != NULL)
3497 contents = elf_section_data (sec)->this_hdr.contents;
3498 else
3499 {
3500 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
3501 goto error_return;
3502 }
3503
3504 irelend = internal_relocs + sec->reloc_count;
3505 for (irel = internal_relocs; irel < irelend; irel++)
3506 {
3507 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
3508 unsigned int r_symndx;
3509 struct elf_link_hash_entry *h;
3510 bfd_boolean converted;
3511
3512 if (r_type != R_X86_64_GOTPCRELX
3513 && r_type != R_X86_64_REX_GOTPCRELX
3514 && r_type != R_X86_64_GOTPCREL)
3515 continue;
3516
3517 r_symndx = htab->r_sym (irel->r_info);
3518 if (r_symndx < symtab_hdr->sh_info)
3519 h = elf_x86_64_get_local_sym_hash (htab, sec->owner,
3520 (const Elf_Internal_Rela *) irel,
3521 FALSE);
3522 else
3523 {
3524 h = elf_sym_hashes (abfd)[r_symndx - symtab_hdr->sh_info];
3525 while (h->root.type == bfd_link_hash_indirect
3526 || h->root.type == bfd_link_hash_warning)
3527 h = (struct elf_link_hash_entry *) h->root.u.i.link;
3528 }
3529
3530 /* STT_GNU_IFUNC must keep GOTPCREL relocations. */
3531 if (h != NULL && h->type == STT_GNU_IFUNC)
3532 continue;
3533
3534 converted = FALSE;
3535 if (!elf_x86_64_convert_load_reloc (abfd, sec, contents, irel, h,
3536 &converted, link_info))
3537 goto error_return;
3538
3539 if (converted)
3540 {
3541 changed = converted;
3542 if (h)
3543 {
3544 if (h->got.refcount > 0)
3545 h->got.refcount -= 1;
3546 }
3547 else
3548 {
3549 if (local_got_refcounts != NULL
3550 && local_got_refcounts[r_symndx] > 0)
3551 local_got_refcounts[r_symndx] -= 1;
3552 }
3553 }
3554 }
3555
3556 if (contents != NULL
3557 && elf_section_data (sec)->this_hdr.contents != contents)
3558 {
3559 if (!changed && !link_info->keep_memory)
3560 free (contents);
3561 else
3562 {
3563 /* Cache the section contents for elf_link_input_bfd. */
3564 elf_section_data (sec)->this_hdr.contents = contents;
3565 }
3566 }
3567
3568 if (elf_section_data (sec)->relocs != internal_relocs)
3569 {
3570 if (!changed)
3571 free (internal_relocs);
3572 else
3573 elf_section_data (sec)->relocs = internal_relocs;
3574 }
3575
3576 return TRUE;
3577
3578 error_return:
3579 if (contents != NULL
3580 && elf_section_data (sec)->this_hdr.contents != contents)
3581 free (contents);
3582 if (internal_relocs != NULL
3583 && elf_section_data (sec)->relocs != internal_relocs)
3584 free (internal_relocs);
3585 return FALSE;
3586 }
3587
3588 /* Set the sizes of the dynamic sections. */
3589
3590 static bfd_boolean
3591 elf_x86_64_size_dynamic_sections (bfd *output_bfd,
3592 struct bfd_link_info *info)
3593 {
3594 struct elf_x86_64_link_hash_table *htab;
3595 bfd *dynobj;
3596 asection *s;
3597 bfd_boolean relocs;
3598 bfd *ibfd;
3599 const struct elf_backend_data *bed;
3600
3601 htab = elf_x86_64_hash_table (info);
3602 if (htab == NULL)
3603 return FALSE;
3604 bed = get_elf_backend_data (output_bfd);
3605
3606 dynobj = htab->elf.dynobj;
3607 if (dynobj == NULL)
3608 abort ();
3609
3610 /* Set up .got offsets for local syms, and space for local dynamic
3611 relocs. */
3612 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3613 {
3614 bfd_signed_vma *local_got;
3615 bfd_signed_vma *end_local_got;
3616 char *local_tls_type;
3617 bfd_vma *local_tlsdesc_gotent;
3618 bfd_size_type locsymcount;
3619 Elf_Internal_Shdr *symtab_hdr;
3620 asection *srel;
3621
3622 if (! is_x86_64_elf (ibfd))
3623 continue;
3624
3625 for (s = ibfd->sections; s != NULL; s = s->next)
3626 {
3627 struct elf_dyn_relocs *p;
3628
3629 if (!elf_x86_64_convert_load (ibfd, s, info))
3630 return FALSE;
3631
3632 for (p = (struct elf_dyn_relocs *)
3633 (elf_section_data (s)->local_dynrel);
3634 p != NULL;
3635 p = p->next)
3636 {
3637 if (!bfd_is_abs_section (p->sec)
3638 && bfd_is_abs_section (p->sec->output_section))
3639 {
3640 /* Input section has been discarded, either because
3641 it is a copy of a linkonce section or due to
3642 linker script /DISCARD/, so we'll be discarding
3643 the relocs too. */
3644 }
3645 else if (p->count != 0)
3646 {
3647 srel = elf_section_data (p->sec)->sreloc;
3648 srel->size += p->count * bed->s->sizeof_rela;
3649 if ((p->sec->output_section->flags & SEC_READONLY) != 0
3650 && (info->flags & DF_TEXTREL) == 0)
3651 {
3652 info->flags |= DF_TEXTREL;
3653 if ((info->warn_shared_textrel && bfd_link_pic (info))
3654 || info->error_textrel)
3655 info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'\n"),
3656 p->sec->owner, p->sec);
3657 }
3658 }
3659 }
3660 }
3661
3662 local_got = elf_local_got_refcounts (ibfd);
3663 if (!local_got)
3664 continue;
3665
3666 symtab_hdr = &elf_symtab_hdr (ibfd);
3667 locsymcount = symtab_hdr->sh_info;
3668 end_local_got = local_got + locsymcount;
3669 local_tls_type = elf_x86_64_local_got_tls_type (ibfd);
3670 local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd);
3671 s = htab->elf.sgot;
3672 srel = htab->elf.srelgot;
3673 for (; local_got < end_local_got;
3674 ++local_got, ++local_tls_type, ++local_tlsdesc_gotent)
3675 {
3676 *local_tlsdesc_gotent = (bfd_vma) -1;
3677 if (*local_got > 0)
3678 {
3679 if (GOT_TLS_GDESC_P (*local_tls_type))
3680 {
3681 *local_tlsdesc_gotent = htab->elf.sgotplt->size
3682 - elf_x86_64_compute_jump_table_size (htab);
3683 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3684 *local_got = (bfd_vma) -2;
3685 }
3686 if (! GOT_TLS_GDESC_P (*local_tls_type)
3687 || GOT_TLS_GD_P (*local_tls_type))
3688 {
3689 *local_got = s->size;
3690 s->size += GOT_ENTRY_SIZE;
3691 if (GOT_TLS_GD_P (*local_tls_type))
3692 s->size += GOT_ENTRY_SIZE;
3693 }
3694 if (bfd_link_pic (info)
3695 || GOT_TLS_GD_ANY_P (*local_tls_type)
3696 || *local_tls_type == GOT_TLS_IE)
3697 {
3698 if (GOT_TLS_GDESC_P (*local_tls_type))
3699 {
3700 htab->elf.srelplt->size
3701 += bed->s->sizeof_rela;
3702 htab->tlsdesc_plt = (bfd_vma) -1;
3703 }
3704 if (! GOT_TLS_GDESC_P (*local_tls_type)
3705 || GOT_TLS_GD_P (*local_tls_type))
3706 srel->size += bed->s->sizeof_rela;
3707 }
3708 }
3709 else
3710 *local_got = (bfd_vma) -1;
3711 }
3712 }
3713
3714 if (htab->tls_ld_got.refcount > 0)
3715 {
3716 /* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD
3717 relocs. */
3718 htab->tls_ld_got.offset = htab->elf.sgot->size;
3719 htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE;
3720 htab->elf.srelgot->size += bed->s->sizeof_rela;
3721 }
3722 else
3723 htab->tls_ld_got.offset = -1;
3724
3725 /* Allocate global sym .plt and .got entries, and space for global
3726 sym dynamic relocs. */
3727 elf_link_hash_traverse (&htab->elf, elf_x86_64_allocate_dynrelocs,
3728 info);
3729
3730 /* Allocate .plt and .got entries, and space for local symbols. */
3731 htab_traverse (htab->loc_hash_table,
3732 elf_x86_64_allocate_local_dynrelocs,
3733 info);
3734
3735 /* For every jump slot reserved in the sgotplt, reloc_count is
3736 incremented. However, when we reserve space for TLS descriptors,
3737 it's not incremented, so in order to compute the space reserved
3738 for them, it suffices to multiply the reloc count by the jump
3739 slot size.
3740
3741 PR ld/13302: We start next_irelative_index at the end of .rela.plt
3742 so that R_X86_64_IRELATIVE entries come last. */
3743 if (htab->elf.srelplt)
3744 {
3745 htab->sgotplt_jump_table_size
3746 = elf_x86_64_compute_jump_table_size (htab);
3747 htab->next_irelative_index = htab->elf.srelplt->reloc_count - 1;
3748 }
3749 else if (htab->elf.irelplt)
3750 htab->next_irelative_index = htab->elf.irelplt->reloc_count - 1;
3751
3752 if (htab->tlsdesc_plt)
3753 {
3754 /* If we're not using lazy TLS relocations, don't generate the
3755 PLT and GOT entries they require. */
3756 if ((info->flags & DF_BIND_NOW))
3757 htab->tlsdesc_plt = 0;
3758 else
3759 {
3760 htab->tlsdesc_got = htab->elf.sgot->size;
3761 htab->elf.sgot->size += GOT_ENTRY_SIZE;
3762 /* Reserve room for the initial entry.
3763 FIXME: we could probably do away with it in this case. */
3764 if (htab->elf.splt->size == 0)
3765 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3766 htab->tlsdesc_plt = htab->elf.splt->size;
3767 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3768 }
3769 }
3770
3771 if (htab->elf.sgotplt)
3772 {
3773 /* Don't allocate .got.plt section if there are no GOT nor PLT
3774 entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */
3775 if ((htab->elf.hgot == NULL
3776 || !htab->elf.hgot->ref_regular_nonweak)
3777 && (htab->elf.sgotplt->size
3778 == get_elf_backend_data (output_bfd)->got_header_size)
3779 && (htab->elf.splt == NULL
3780 || htab->elf.splt->size == 0)
3781 && (htab->elf.sgot == NULL
3782 || htab->elf.sgot->size == 0)
3783 && (htab->elf.iplt == NULL
3784 || htab->elf.iplt->size == 0)
3785 && (htab->elf.igotplt == NULL
3786 || htab->elf.igotplt->size == 0))
3787 htab->elf.sgotplt->size = 0;
3788 }
3789
3790 if (htab->plt_eh_frame != NULL
3791 && htab->elf.splt != NULL
3792 && htab->elf.splt->size != 0
3793 && !bfd_is_abs_section (htab->elf.splt->output_section)
3794 && _bfd_elf_eh_frame_present (info))
3795 {
3796 const struct elf_x86_64_backend_data *arch_data
3797 = get_elf_x86_64_arch_data (bed);
3798 htab->plt_eh_frame->size = arch_data->eh_frame_plt_size;
3799 }
3800
3801 /* We now have determined the sizes of the various dynamic sections.
3802 Allocate memory for them. */
3803 relocs = FALSE;
3804 for (s = dynobj->sections; s != NULL; s = s->next)
3805 {
3806 if ((s->flags & SEC_LINKER_CREATED) == 0)
3807 continue;
3808
3809 if (s == htab->elf.splt
3810 || s == htab->elf.sgot
3811 || s == htab->elf.sgotplt
3812 || s == htab->elf.iplt
3813 || s == htab->elf.igotplt
3814 || s == htab->plt_bnd
3815 || s == htab->plt_got
3816 || s == htab->plt_eh_frame
3817 || s == htab->sdynbss)
3818 {
3819 /* Strip this section if we don't need it; see the
3820 comment below. */
3821 }
3822 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
3823 {
3824 if (s->size != 0 && s != htab->elf.srelplt)
3825 relocs = TRUE;
3826
3827 /* We use the reloc_count field as a counter if we need
3828 to copy relocs into the output file. */
3829 if (s != htab->elf.srelplt)
3830 s->reloc_count = 0;
3831 }
3832 else
3833 {
3834 /* It's not one of our sections, so don't allocate space. */
3835 continue;
3836 }
3837
3838 if (s->size == 0)
3839 {
3840 /* If we don't need this section, strip it from the
3841 output file. This is mostly to handle .rela.bss and
3842 .rela.plt. We must create both sections in
3843 create_dynamic_sections, because they must be created
3844 before the linker maps input sections to output
3845 sections. The linker does that before
3846 adjust_dynamic_symbol is called, and it is that
3847 function which decides whether anything needs to go
3848 into these sections. */
3849
3850 s->flags |= SEC_EXCLUDE;
3851 continue;
3852 }
3853
3854 if ((s->flags & SEC_HAS_CONTENTS) == 0)
3855 continue;
3856
3857 /* Allocate memory for the section contents. We use bfd_zalloc
3858 here in case unused entries are not reclaimed before the
3859 section's contents are written out. This should not happen,
3860 but this way if it does, we get a R_X86_64_NONE reloc instead
3861 of garbage. */
3862 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
3863 if (s->contents == NULL)
3864 return FALSE;
3865 }
3866
3867 if (htab->plt_eh_frame != NULL
3868 && htab->plt_eh_frame->contents != NULL)
3869 {
3870 const struct elf_x86_64_backend_data *arch_data
3871 = get_elf_x86_64_arch_data (bed);
3872
3873 memcpy (htab->plt_eh_frame->contents,
3874 arch_data->eh_frame_plt, htab->plt_eh_frame->size);
3875 bfd_put_32 (dynobj, htab->elf.splt->size,
3876 htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET);
3877 }
3878
3879 if (htab->elf.dynamic_sections_created)
3880 {
3881 /* Add some entries to the .dynamic section. We fill in the
3882 values later, in elf_x86_64_finish_dynamic_sections, but we
3883 must add the entries now so that we get the correct size for
3884 the .dynamic section. The DT_DEBUG entry is filled in by the
3885 dynamic linker and used by the debugger. */
3886 #define add_dynamic_entry(TAG, VAL) \
3887 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
3888
3889 if (bfd_link_executable (info))
3890 {
3891 if (!add_dynamic_entry (DT_DEBUG, 0))
3892 return FALSE;
3893 }
3894
3895 if (htab->elf.splt->size != 0)
3896 {
3897 /* DT_PLTGOT is used by prelink even if there is no PLT
3898 relocation. */
3899 if (!add_dynamic_entry (DT_PLTGOT, 0))
3900 return FALSE;
3901
3902 if (htab->elf.srelplt->size != 0)
3903 {
3904 if (!add_dynamic_entry (DT_PLTRELSZ, 0)
3905 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
3906 || !add_dynamic_entry (DT_JMPREL, 0))
3907 return FALSE;
3908 }
3909
3910 if (htab->tlsdesc_plt
3911 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
3912 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
3913 return FALSE;
3914 }
3915
3916 if (relocs)
3917 {
3918 if (!add_dynamic_entry (DT_RELA, 0)
3919 || !add_dynamic_entry (DT_RELASZ, 0)
3920 || !add_dynamic_entry (DT_RELAENT, bed->s->sizeof_rela))
3921 return FALSE;
3922
3923 /* If any dynamic relocs apply to a read-only section,
3924 then we need a DT_TEXTREL entry. */
3925 if ((info->flags & DF_TEXTREL) == 0)
3926 elf_link_hash_traverse (&htab->elf,
3927 elf_x86_64_readonly_dynrelocs,
3928 info);
3929
3930 if ((info->flags & DF_TEXTREL) != 0)
3931 {
3932 if (htab->readonly_dynrelocs_against_ifunc)
3933 {
3934 info->callbacks->einfo
3935 (_("%P%X: read-only segment has dynamic IFUNC relocations; recompile with -fPIC\n"));
3936 bfd_set_error (bfd_error_bad_value);
3937 return FALSE;
3938 }
3939
3940 if (!add_dynamic_entry (DT_TEXTREL, 0))
3941 return FALSE;
3942 }
3943 }
3944 }
3945 #undef add_dynamic_entry
3946
3947 return TRUE;
3948 }
3949
3950 static bfd_boolean
3951 elf_x86_64_always_size_sections (bfd *output_bfd,
3952 struct bfd_link_info *info)
3953 {
3954 asection *tls_sec = elf_hash_table (info)->tls_sec;
3955
3956 if (tls_sec)
3957 {
3958 struct elf_link_hash_entry *tlsbase;
3959
3960 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
3961 "_TLS_MODULE_BASE_",
3962 FALSE, FALSE, FALSE);
3963
3964 if (tlsbase && tlsbase->type == STT_TLS)
3965 {
3966 struct elf_x86_64_link_hash_table *htab;
3967 struct bfd_link_hash_entry *bh = NULL;
3968 const struct elf_backend_data *bed
3969 = get_elf_backend_data (output_bfd);
3970
3971 htab = elf_x86_64_hash_table (info);
3972 if (htab == NULL)
3973 return FALSE;
3974
3975 if (!(_bfd_generic_link_add_one_symbol
3976 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
3977 tls_sec, 0, NULL, FALSE,
3978 bed->collect, &bh)))
3979 return FALSE;
3980
3981 htab->tls_module_base = bh;
3982
3983 tlsbase = (struct elf_link_hash_entry *)bh;
3984 tlsbase->def_regular = 1;
3985 tlsbase->other = STV_HIDDEN;
3986 tlsbase->root.linker_def = 1;
3987 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
3988 }
3989 }
3990
3991 return TRUE;
3992 }
3993
3994 /* _TLS_MODULE_BASE_ needs to be treated especially when linking
3995 executables. Rather than setting it to the beginning of the TLS
3996 section, we have to set it to the end. This function may be called
3997 multiple times, it is idempotent. */
3998
3999 static void
4000 elf_x86_64_set_tls_module_base (struct bfd_link_info *info)
4001 {
4002 struct elf_x86_64_link_hash_table *htab;
4003 struct bfd_link_hash_entry *base;
4004
4005 if (!bfd_link_executable (info))
4006 return;
4007
4008 htab = elf_x86_64_hash_table (info);
4009 if (htab == NULL)
4010 return;
4011
4012 base = htab->tls_module_base;
4013 if (base == NULL)
4014 return;
4015
4016 base->u.def.value = htab->elf.tls_size;
4017 }
4018
4019 /* Return the base VMA address which should be subtracted from real addresses
4020 when resolving @dtpoff relocation.
4021 This is PT_TLS segment p_vaddr. */
4022
4023 static bfd_vma
4024 elf_x86_64_dtpoff_base (struct bfd_link_info *info)
4025 {
4026 /* If tls_sec is NULL, we should have signalled an error already. */
4027 if (elf_hash_table (info)->tls_sec == NULL)
4028 return 0;
4029 return elf_hash_table (info)->tls_sec->vma;
4030 }
4031
4032 /* Return the relocation value for @tpoff relocation
4033 if STT_TLS virtual address is ADDRESS. */
4034
4035 static bfd_vma
4036 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
4037 {
4038 struct elf_link_hash_table *htab = elf_hash_table (info);
4039 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
4040 bfd_vma static_tls_size;
4041
4042 /* If tls_segment is NULL, we should have signalled an error already. */
4043 if (htab->tls_sec == NULL)
4044 return 0;
4045
4046 /* Consider special static TLS alignment requirements. */
4047 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
4048 return address - static_tls_size - htab->tls_sec->vma;
4049 }
4050
4051 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
4052 branch? */
4053
4054 static bfd_boolean
4055 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
4056 {
4057 /* Opcode Instruction
4058 0xe8 call
4059 0xe9 jump
4060 0x0f 0x8x conditional jump */
4061 return ((offset > 0
4062 && (contents [offset - 1] == 0xe8
4063 || contents [offset - 1] == 0xe9))
4064 || (offset > 1
4065 && contents [offset - 2] == 0x0f
4066 && (contents [offset - 1] & 0xf0) == 0x80));
4067 }
4068
4069 /* Relocate an x86_64 ELF section. */
4070
4071 static bfd_boolean
4072 elf_x86_64_relocate_section (bfd *output_bfd,
4073 struct bfd_link_info *info,
4074 bfd *input_bfd,
4075 asection *input_section,
4076 bfd_byte *contents,
4077 Elf_Internal_Rela *relocs,
4078 Elf_Internal_Sym *local_syms,
4079 asection **local_sections)
4080 {
4081 struct elf_x86_64_link_hash_table *htab;
4082 Elf_Internal_Shdr *symtab_hdr;
4083 struct elf_link_hash_entry **sym_hashes;
4084 bfd_vma *local_got_offsets;
4085 bfd_vma *local_tlsdesc_gotents;
4086 Elf_Internal_Rela *rel;
4087 Elf_Internal_Rela *wrel;
4088 Elf_Internal_Rela *relend;
4089 const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
4090
4091 BFD_ASSERT (is_x86_64_elf (input_bfd));
4092
4093 /* Skip if check_relocs failed. */
4094 if (input_section->check_relocs_failed)
4095 return FALSE;
4096
4097 htab = elf_x86_64_hash_table (info);
4098 if (htab == NULL)
4099 return FALSE;
4100 symtab_hdr = &elf_symtab_hdr (input_bfd);
4101 sym_hashes = elf_sym_hashes (input_bfd);
4102 local_got_offsets = elf_local_got_offsets (input_bfd);
4103 local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd);
4104
4105 elf_x86_64_set_tls_module_base (info);
4106
4107 rel = wrel = relocs;
4108 relend = relocs + input_section->reloc_count;
4109 for (; rel < relend; wrel++, rel++)
4110 {
4111 unsigned int r_type;
4112 reloc_howto_type *howto;
4113 unsigned long r_symndx;
4114 struct elf_link_hash_entry *h;
4115 struct elf_x86_64_link_hash_entry *eh;
4116 Elf_Internal_Sym *sym;
4117 asection *sec;
4118 bfd_vma off, offplt, plt_offset;
4119 bfd_vma relocation;
4120 bfd_boolean unresolved_reloc;
4121 bfd_reloc_status_type r;
4122 int tls_type;
4123 asection *base_got, *resolved_plt;
4124 bfd_vma st_size;
4125 bfd_boolean resolved_to_zero;
4126
4127 r_type = ELF32_R_TYPE (rel->r_info);
4128 if (r_type == (int) R_X86_64_GNU_VTINHERIT
4129 || r_type == (int) R_X86_64_GNU_VTENTRY)
4130 {
4131 if (wrel != rel)
4132 *wrel = *rel;
4133 continue;
4134 }
4135
4136 if (r_type >= (int) R_X86_64_standard)
4137 {
4138 (*_bfd_error_handler)
4139 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
4140 input_bfd, input_section, r_type);
4141 bfd_set_error (bfd_error_bad_value);
4142 return FALSE;
4143 }
4144
4145 if (r_type != (int) R_X86_64_32
4146 || ABI_64_P (output_bfd))
4147 howto = x86_64_elf_howto_table + r_type;
4148 else
4149 howto = (x86_64_elf_howto_table
4150 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
4151 r_symndx = htab->r_sym (rel->r_info);
4152 h = NULL;
4153 sym = NULL;
4154 sec = NULL;
4155 unresolved_reloc = FALSE;
4156 if (r_symndx < symtab_hdr->sh_info)
4157 {
4158 sym = local_syms + r_symndx;
4159 sec = local_sections[r_symndx];
4160
4161 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
4162 &sec, rel);
4163 st_size = sym->st_size;
4164
4165 /* Relocate against local STT_GNU_IFUNC symbol. */
4166 if (!bfd_link_relocatable (info)
4167 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
4168 {
4169 h = elf_x86_64_get_local_sym_hash (htab, input_bfd,
4170 rel, FALSE);
4171 if (h == NULL)
4172 abort ();
4173
4174 /* Set STT_GNU_IFUNC symbol value. */
4175 h->root.u.def.value = sym->st_value;
4176 h->root.u.def.section = sec;
4177 }
4178 }
4179 else
4180 {
4181 bfd_boolean warned ATTRIBUTE_UNUSED;
4182 bfd_boolean ignored ATTRIBUTE_UNUSED;
4183
4184 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
4185 r_symndx, symtab_hdr, sym_hashes,
4186 h, sec, relocation,
4187 unresolved_reloc, warned, ignored);
4188 st_size = h->size;
4189 }
4190
4191 if (sec != NULL && discarded_section (sec))
4192 {
4193 _bfd_clear_contents (howto, input_bfd, input_section,
4194 contents + rel->r_offset);
4195 wrel->r_offset = rel->r_offset;
4196 wrel->r_info = 0;
4197 wrel->r_addend = 0;
4198
4199 /* For ld -r, remove relocations in debug sections against
4200 sections defined in discarded sections. Not done for
4201 eh_frame editing code expects to be present. */
4202 if (bfd_link_relocatable (info)
4203 && (input_section->flags & SEC_DEBUGGING))
4204 wrel--;
4205
4206 continue;
4207 }
4208
4209 if (bfd_link_relocatable (info))
4210 {
4211 if (wrel != rel)
4212 *wrel = *rel;
4213 continue;
4214 }
4215
4216 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
4217 {
4218 if (r_type == R_X86_64_64)
4219 {
4220 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
4221 zero-extend it to 64bit if addend is zero. */
4222 r_type = R_X86_64_32;
4223 memset (contents + rel->r_offset + 4, 0, 4);
4224 }
4225 else if (r_type == R_X86_64_SIZE64)
4226 {
4227 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
4228 zero-extend it to 64bit if addend is zero. */
4229 r_type = R_X86_64_SIZE32;
4230 memset (contents + rel->r_offset + 4, 0, 4);
4231 }
4232 }
4233
4234 eh = (struct elf_x86_64_link_hash_entry *) h;
4235
4236 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
4237 it here if it is defined in a non-shared object. */
4238 if (h != NULL
4239 && h->type == STT_GNU_IFUNC
4240 && h->def_regular)
4241 {
4242 bfd_vma plt_index;
4243 const char *name;
4244
4245 if ((input_section->flags & SEC_ALLOC) == 0)
4246 {
4247 /* Dynamic relocs are not propagated for SEC_DEBUGGING
4248 sections because such sections are not SEC_ALLOC and
4249 thus ld.so will not process them. */
4250 if ((input_section->flags & SEC_DEBUGGING) != 0)
4251 continue;
4252 abort ();
4253 }
4254
4255 switch (r_type)
4256 {
4257 default:
4258 break;
4259
4260 case R_X86_64_GOTPCREL:
4261 case R_X86_64_GOTPCRELX:
4262 case R_X86_64_REX_GOTPCRELX:
4263 case R_X86_64_GOTPCREL64:
4264 base_got = htab->elf.sgot;
4265 off = h->got.offset;
4266
4267 if (base_got == NULL)
4268 abort ();
4269
4270 if (off == (bfd_vma) -1)
4271 {
4272 /* We can't use h->got.offset here to save state, or
4273 even just remember the offset, as finish_dynamic_symbol
4274 would use that as offset into .got. */
4275
4276 if (h->plt.offset == (bfd_vma) -1)
4277 abort ();
4278
4279 if (htab->elf.splt != NULL)
4280 {
4281 plt_index = h->plt.offset / plt_entry_size - 1;
4282 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4283 base_got = htab->elf.sgotplt;
4284 }
4285 else
4286 {
4287 plt_index = h->plt.offset / plt_entry_size;
4288 off = plt_index * GOT_ENTRY_SIZE;
4289 base_got = htab->elf.igotplt;
4290 }
4291
4292 if (h->dynindx == -1
4293 || h->forced_local
4294 || info->symbolic)
4295 {
4296 /* This references the local defitionion. We must
4297 initialize this entry in the global offset table.
4298 Since the offset must always be a multiple of 8,
4299 we use the least significant bit to record
4300 whether we have initialized it already.
4301
4302 When doing a dynamic link, we create a .rela.got
4303 relocation entry to initialize the value. This
4304 is done in the finish_dynamic_symbol routine. */
4305 if ((off & 1) != 0)
4306 off &= ~1;
4307 else
4308 {
4309 bfd_put_64 (output_bfd, relocation,
4310 base_got->contents + off);
4311 /* Note that this is harmless for the GOTPLT64
4312 case, as -1 | 1 still is -1. */
4313 h->got.offset |= 1;
4314 }
4315 }
4316 }
4317
4318 relocation = (base_got->output_section->vma
4319 + base_got->output_offset + off);
4320
4321 goto do_relocation;
4322 }
4323
4324 if (h->plt.offset == (bfd_vma) -1)
4325 {
4326 /* Handle static pointers of STT_GNU_IFUNC symbols. */
4327 if (r_type == htab->pointer_r_type
4328 && (input_section->flags & SEC_CODE) == 0)
4329 goto do_ifunc_pointer;
4330 goto bad_ifunc_reloc;
4331 }
4332
4333 /* STT_GNU_IFUNC symbol must go through PLT. */
4334 if (htab->elf.splt != NULL)
4335 {
4336 if (htab->plt_bnd != NULL)
4337 {
4338 resolved_plt = htab->plt_bnd;
4339 plt_offset = eh->plt_bnd.offset;
4340 }
4341 else
4342 {
4343 resolved_plt = htab->elf.splt;
4344 plt_offset = h->plt.offset;
4345 }
4346 }
4347 else
4348 {
4349 resolved_plt = htab->elf.iplt;
4350 plt_offset = h->plt.offset;
4351 }
4352
4353 relocation = (resolved_plt->output_section->vma
4354 + resolved_plt->output_offset + plt_offset);
4355
4356 switch (r_type)
4357 {
4358 default:
4359 bad_ifunc_reloc:
4360 if (h->root.root.string)
4361 name = h->root.root.string;
4362 else
4363 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
4364 NULL);
4365 (*_bfd_error_handler)
4366 (_("%B: relocation %s against STT_GNU_IFUNC "
4367 "symbol `%s' isn't supported"), input_bfd,
4368 howto->name, name);
4369 bfd_set_error (bfd_error_bad_value);
4370 return FALSE;
4371
4372 case R_X86_64_32S:
4373 if (bfd_link_pic (info))
4374 abort ();
4375 goto do_relocation;
4376
4377 case R_X86_64_32:
4378 if (ABI_64_P (output_bfd))
4379 goto do_relocation;
4380 /* FALLTHROUGH */
4381 case R_X86_64_64:
4382 do_ifunc_pointer:
4383 if (rel->r_addend != 0)
4384 {
4385 if (h->root.root.string)
4386 name = h->root.root.string;
4387 else
4388 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4389 sym, NULL);
4390 (*_bfd_error_handler)
4391 (_("%B: relocation %s against STT_GNU_IFUNC "
4392 "symbol `%s' has non-zero addend: %d"),
4393 input_bfd, howto->name, name, rel->r_addend);
4394 bfd_set_error (bfd_error_bad_value);
4395 return FALSE;
4396 }
4397
4398 /* Generate dynamic relcoation only when there is a
4399 non-GOT reference in a shared object or there is no
4400 PLT. */
4401 if ((bfd_link_pic (info) && h->non_got_ref)
4402 || h->plt.offset == (bfd_vma) -1)
4403 {
4404 Elf_Internal_Rela outrel;
4405 asection *sreloc;
4406
4407 /* Need a dynamic relocation to get the real function
4408 address. */
4409 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
4410 info,
4411 input_section,
4412 rel->r_offset);
4413 if (outrel.r_offset == (bfd_vma) -1
4414 || outrel.r_offset == (bfd_vma) -2)
4415 abort ();
4416
4417 outrel.r_offset += (input_section->output_section->vma
4418 + input_section->output_offset);
4419
4420 if (h->dynindx == -1
4421 || h->forced_local
4422 || bfd_link_executable (info))
4423 {
4424 /* This symbol is resolved locally. */
4425 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4426 outrel.r_addend = (h->root.u.def.value
4427 + h->root.u.def.section->output_section->vma
4428 + h->root.u.def.section->output_offset);
4429 }
4430 else
4431 {
4432 outrel.r_info = htab->r_info (h->dynindx, r_type);
4433 outrel.r_addend = 0;
4434 }
4435
4436 /* Dynamic relocations are stored in
4437 1. .rela.ifunc section in PIC object.
4438 2. .rela.got section in dynamic executable.
4439 3. .rela.iplt section in static executable. */
4440 if (bfd_link_pic (info))
4441 sreloc = htab->elf.irelifunc;
4442 else if (htab->elf.splt != NULL)
4443 sreloc = htab->elf.srelgot;
4444 else
4445 sreloc = htab->elf.irelplt;
4446 elf_append_rela (output_bfd, sreloc, &outrel);
4447
4448 /* If this reloc is against an external symbol, we
4449 do not want to fiddle with the addend. Otherwise,
4450 we need to include the symbol value so that it
4451 becomes an addend for the dynamic reloc. For an
4452 internal symbol, we have updated addend. */
4453 continue;
4454 }
4455 /* FALLTHROUGH */
4456 case R_X86_64_PC32:
4457 case R_X86_64_PC32_BND:
4458 case R_X86_64_PC64:
4459 case R_X86_64_PLT32:
4460 case R_X86_64_PLT32_BND:
4461 goto do_relocation;
4462 }
4463 }
4464
4465 resolved_to_zero = (eh != NULL
4466 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
4467 eh->has_got_reloc,
4468 eh));
4469
4470 /* When generating a shared object, the relocations handled here are
4471 copied into the output file to be resolved at run time. */
4472 switch (r_type)
4473 {
4474 case R_X86_64_GOT32:
4475 case R_X86_64_GOT64:
4476 /* Relocation is to the entry for this symbol in the global
4477 offset table. */
4478 case R_X86_64_GOTPCREL:
4479 case R_X86_64_GOTPCRELX:
4480 case R_X86_64_REX_GOTPCRELX:
4481 case R_X86_64_GOTPCREL64:
4482 /* Use global offset table entry as symbol value. */
4483 case R_X86_64_GOTPLT64:
4484 /* This is obsolete and treated the the same as GOT64. */
4485 base_got = htab->elf.sgot;
4486
4487 if (htab->elf.sgot == NULL)
4488 abort ();
4489
4490 if (h != NULL)
4491 {
4492 bfd_boolean dyn;
4493
4494 off = h->got.offset;
4495 if (h->needs_plt
4496 && h->plt.offset != (bfd_vma)-1
4497 && off == (bfd_vma)-1)
4498 {
4499 /* We can't use h->got.offset here to save
4500 state, or even just remember the offset, as
4501 finish_dynamic_symbol would use that as offset into
4502 .got. */
4503 bfd_vma plt_index = h->plt.offset / plt_entry_size - 1;
4504 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4505 base_got = htab->elf.sgotplt;
4506 }
4507
4508 dyn = htab->elf.dynamic_sections_created;
4509
4510 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
4511 || (bfd_link_pic (info)
4512 && SYMBOL_REFERENCES_LOCAL (info, h))
4513 || (ELF_ST_VISIBILITY (h->other)
4514 && h->root.type == bfd_link_hash_undefweak))
4515 {
4516 /* This is actually a static link, or it is a -Bsymbolic
4517 link and the symbol is defined locally, or the symbol
4518 was forced to be local because of a version file. We
4519 must initialize this entry in the global offset table.
4520 Since the offset must always be a multiple of 8, we
4521 use the least significant bit to record whether we
4522 have initialized it already.
4523
4524 When doing a dynamic link, we create a .rela.got
4525 relocation entry to initialize the value. This is
4526 done in the finish_dynamic_symbol routine. */
4527 if ((off & 1) != 0)
4528 off &= ~1;
4529 else
4530 {
4531 bfd_put_64 (output_bfd, relocation,
4532 base_got->contents + off);
4533 /* Note that this is harmless for the GOTPLT64 case,
4534 as -1 | 1 still is -1. */
4535 h->got.offset |= 1;
4536 }
4537 }
4538 else
4539 unresolved_reloc = FALSE;
4540 }
4541 else
4542 {
4543 if (local_got_offsets == NULL)
4544 abort ();
4545
4546 off = local_got_offsets[r_symndx];
4547
4548 /* The offset must always be a multiple of 8. We use
4549 the least significant bit to record whether we have
4550 already generated the necessary reloc. */
4551 if ((off & 1) != 0)
4552 off &= ~1;
4553 else
4554 {
4555 bfd_put_64 (output_bfd, relocation,
4556 base_got->contents + off);
4557
4558 if (bfd_link_pic (info))
4559 {
4560 asection *s;
4561 Elf_Internal_Rela outrel;
4562
4563 /* We need to generate a R_X86_64_RELATIVE reloc
4564 for the dynamic linker. */
4565 s = htab->elf.srelgot;
4566 if (s == NULL)
4567 abort ();
4568
4569 outrel.r_offset = (base_got->output_section->vma
4570 + base_got->output_offset
4571 + off);
4572 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4573 outrel.r_addend = relocation;
4574 elf_append_rela (output_bfd, s, &outrel);
4575 }
4576
4577 local_got_offsets[r_symndx] |= 1;
4578 }
4579 }
4580
4581 if (off >= (bfd_vma) -2)
4582 abort ();
4583
4584 relocation = base_got->output_section->vma
4585 + base_got->output_offset + off;
4586 if (r_type != R_X86_64_GOTPCREL
4587 && r_type != R_X86_64_GOTPCRELX
4588 && r_type != R_X86_64_REX_GOTPCRELX
4589 && r_type != R_X86_64_GOTPCREL64)
4590 relocation -= htab->elf.sgotplt->output_section->vma
4591 - htab->elf.sgotplt->output_offset;
4592
4593 break;
4594
4595 case R_X86_64_GOTOFF64:
4596 /* Relocation is relative to the start of the global offset
4597 table. */
4598
4599 /* Check to make sure it isn't a protected function or data
4600 symbol for shared library since it may not be local when
4601 used as function address or with copy relocation. We also
4602 need to make sure that a symbol is referenced locally. */
4603 if (bfd_link_pic (info) && h)
4604 {
4605 if (!h->def_regular)
4606 {
4607 const char *v;
4608
4609 switch (ELF_ST_VISIBILITY (h->other))
4610 {
4611 case STV_HIDDEN:
4612 v = _("hidden symbol");
4613 break;
4614 case STV_INTERNAL:
4615 v = _("internal symbol");
4616 break;
4617 case STV_PROTECTED:
4618 v = _("protected symbol");
4619 break;
4620 default:
4621 v = _("symbol");
4622 break;
4623 }
4624
4625 (*_bfd_error_handler)
4626 (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s `%s' can not be used when making a shared object"),
4627 input_bfd, v, h->root.root.string);
4628 bfd_set_error (bfd_error_bad_value);
4629 return FALSE;
4630 }
4631 else if (!bfd_link_executable (info)
4632 && !SYMBOL_REFERENCES_LOCAL (info, h)
4633 && (h->type == STT_FUNC
4634 || h->type == STT_OBJECT)
4635 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
4636 {
4637 (*_bfd_error_handler)
4638 (_("%B: relocation R_X86_64_GOTOFF64 against protected %s `%s' can not be used when making a shared object"),
4639 input_bfd,
4640 h->type == STT_FUNC ? "function" : "data",
4641 h->root.root.string);
4642 bfd_set_error (bfd_error_bad_value);
4643 return FALSE;
4644 }
4645 }
4646
4647 /* Note that sgot is not involved in this
4648 calculation. We always want the start of .got.plt. If we
4649 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
4650 permitted by the ABI, we might have to change this
4651 calculation. */
4652 relocation -= htab->elf.sgotplt->output_section->vma
4653 + htab->elf.sgotplt->output_offset;
4654 break;
4655
4656 case R_X86_64_GOTPC32:
4657 case R_X86_64_GOTPC64:
4658 /* Use global offset table as symbol value. */
4659 relocation = htab->elf.sgotplt->output_section->vma
4660 + htab->elf.sgotplt->output_offset;
4661 unresolved_reloc = FALSE;
4662 break;
4663
4664 case R_X86_64_PLTOFF64:
4665 /* Relocation is PLT entry relative to GOT. For local
4666 symbols it's the symbol itself relative to GOT. */
4667 if (h != NULL
4668 /* See PLT32 handling. */
4669 && h->plt.offset != (bfd_vma) -1
4670 && htab->elf.splt != NULL)
4671 {
4672 if (htab->plt_bnd != NULL)
4673 {
4674 resolved_plt = htab->plt_bnd;
4675 plt_offset = eh->plt_bnd.offset;
4676 }
4677 else
4678 {
4679 resolved_plt = htab->elf.splt;
4680 plt_offset = h->plt.offset;
4681 }
4682
4683 relocation = (resolved_plt->output_section->vma
4684 + resolved_plt->output_offset
4685 + plt_offset);
4686 unresolved_reloc = FALSE;
4687 }
4688
4689 relocation -= htab->elf.sgotplt->output_section->vma
4690 + htab->elf.sgotplt->output_offset;
4691 break;
4692
4693 case R_X86_64_PLT32:
4694 case R_X86_64_PLT32_BND:
4695 /* Relocation is to the entry for this symbol in the
4696 procedure linkage table. */
4697
4698 /* Resolve a PLT32 reloc against a local symbol directly,
4699 without using the procedure linkage table. */
4700 if (h == NULL)
4701 break;
4702
4703 if ((h->plt.offset == (bfd_vma) -1
4704 && eh->plt_got.offset == (bfd_vma) -1)
4705 || htab->elf.splt == NULL)
4706 {
4707 /* We didn't make a PLT entry for this symbol. This
4708 happens when statically linking PIC code, or when
4709 using -Bsymbolic. */
4710 break;
4711 }
4712
4713 if (h->plt.offset != (bfd_vma) -1)
4714 {
4715 if (htab->plt_bnd != NULL)
4716 {
4717 resolved_plt = htab->plt_bnd;
4718 plt_offset = eh->plt_bnd.offset;
4719 }
4720 else
4721 {
4722 resolved_plt = htab->elf.splt;
4723 plt_offset = h->plt.offset;
4724 }
4725 }
4726 else
4727 {
4728 /* Use the GOT PLT. */
4729 resolved_plt = htab->plt_got;
4730 plt_offset = eh->plt_got.offset;
4731 }
4732
4733 relocation = (resolved_plt->output_section->vma
4734 + resolved_plt->output_offset
4735 + plt_offset);
4736 unresolved_reloc = FALSE;
4737 break;
4738
4739 case R_X86_64_SIZE32:
4740 case R_X86_64_SIZE64:
4741 /* Set to symbol size. */
4742 relocation = st_size;
4743 goto direct;
4744
4745 case R_X86_64_PC8:
4746 case R_X86_64_PC16:
4747 case R_X86_64_PC32:
4748 case R_X86_64_PC32_BND:
4749 /* Don't complain about -fPIC if the symbol is undefined when
4750 building executable unless it is unresolved weak symbol. */
4751 if ((input_section->flags & SEC_ALLOC) != 0
4752 && (input_section->flags & SEC_READONLY) != 0
4753 && h != NULL
4754 && ((bfd_link_executable (info)
4755 && h->root.type == bfd_link_hash_undefweak
4756 && !resolved_to_zero)
4757 || (bfd_link_pic (info)
4758 && !(bfd_link_pie (info)
4759 && h->root.type == bfd_link_hash_undefined))))
4760 {
4761 bfd_boolean fail = FALSE;
4762 bfd_boolean branch
4763 = ((r_type == R_X86_64_PC32
4764 || r_type == R_X86_64_PC32_BND)
4765 && is_32bit_relative_branch (contents, rel->r_offset));
4766
4767 if (SYMBOL_REFERENCES_LOCAL (info, h))
4768 {
4769 /* Symbol is referenced locally. Make sure it is
4770 defined locally or for a branch. */
4771 fail = !h->def_regular && !branch;
4772 }
4773 else if (!(bfd_link_pie (info)
4774 && (h->needs_copy || eh->needs_copy)))
4775 {
4776 /* Symbol doesn't need copy reloc and isn't referenced
4777 locally. We only allow branch to symbol with
4778 non-default visibility. */
4779 fail = (!branch
4780 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
4781 }
4782
4783 if (fail)
4784 return elf_x86_64_need_pic (input_bfd, input_section,
4785 h, NULL, NULL, howto);
4786 }
4787 /* Fall through. */
4788
4789 case R_X86_64_8:
4790 case R_X86_64_16:
4791 case R_X86_64_32:
4792 case R_X86_64_PC64:
4793 case R_X86_64_64:
4794 /* FIXME: The ABI says the linker should make sure the value is
4795 the same when it's zeroextended to 64 bit. */
4796
4797 direct:
4798 if ((input_section->flags & SEC_ALLOC) == 0)
4799 break;
4800
4801 /* Don't copy a pc-relative relocation into the output file
4802 if the symbol needs copy reloc or the symbol is undefined
4803 when building executable. Copy dynamic function pointer
4804 relocations. Don't generate dynamic relocations against
4805 resolved undefined weak symbols in PIE. */
4806 if ((bfd_link_pic (info)
4807 && !(bfd_link_pie (info)
4808 && h != NULL
4809 && (h->needs_copy
4810 || eh->needs_copy
4811 || h->root.type == bfd_link_hash_undefined)
4812 && IS_X86_64_PCREL_TYPE (r_type))
4813 && (h == NULL
4814 || ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4815 && !resolved_to_zero)
4816 || h->root.type != bfd_link_hash_undefweak))
4817 && ((! IS_X86_64_PCREL_TYPE (r_type)
4818 && r_type != R_X86_64_SIZE32
4819 && r_type != R_X86_64_SIZE64)
4820 || ! SYMBOL_CALLS_LOCAL (info, h)))
4821 || (ELIMINATE_COPY_RELOCS
4822 && !bfd_link_pic (info)
4823 && h != NULL
4824 && h->dynindx != -1
4825 && (!h->non_got_ref
4826 || eh->func_pointer_refcount > 0
4827 || (h->root.type == bfd_link_hash_undefweak
4828 && !resolved_to_zero))
4829 && ((h->def_dynamic && !h->def_regular)
4830 /* Undefined weak symbol is bound locally when
4831 PIC is false. */
4832 || h->root.type == bfd_link_hash_undefined)))
4833 {
4834 Elf_Internal_Rela outrel;
4835 bfd_boolean skip, relocate;
4836 asection *sreloc;
4837
4838 /* When generating a shared object, these relocations
4839 are copied into the output file to be resolved at run
4840 time. */
4841 skip = FALSE;
4842 relocate = FALSE;
4843
4844 outrel.r_offset =
4845 _bfd_elf_section_offset (output_bfd, info, input_section,
4846 rel->r_offset);
4847 if (outrel.r_offset == (bfd_vma) -1)
4848 skip = TRUE;
4849 else if (outrel.r_offset == (bfd_vma) -2)
4850 skip = TRUE, relocate = TRUE;
4851
4852 outrel.r_offset += (input_section->output_section->vma
4853 + input_section->output_offset);
4854
4855 if (skip)
4856 memset (&outrel, 0, sizeof outrel);
4857
4858 /* h->dynindx may be -1 if this symbol was marked to
4859 become local. */
4860 else if (h != NULL
4861 && h->dynindx != -1
4862 && (IS_X86_64_PCREL_TYPE (r_type)
4863 || !(bfd_link_executable (info)
4864 || SYMBOLIC_BIND (info, h))
4865 || ! h->def_regular))
4866 {
4867 outrel.r_info = htab->r_info (h->dynindx, r_type);
4868 outrel.r_addend = rel->r_addend;
4869 }
4870 else
4871 {
4872 /* This symbol is local, or marked to become local.
4873 When relocation overflow check is disabled, we
4874 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
4875 if (r_type == htab->pointer_r_type
4876 || (r_type == R_X86_64_32
4877 && info->no_reloc_overflow_check))
4878 {
4879 relocate = TRUE;
4880 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4881 outrel.r_addend = relocation + rel->r_addend;
4882 }
4883 else if (r_type == R_X86_64_64
4884 && !ABI_64_P (output_bfd))
4885 {
4886 relocate = TRUE;
4887 outrel.r_info = htab->r_info (0,
4888 R_X86_64_RELATIVE64);
4889 outrel.r_addend = relocation + rel->r_addend;
4890 /* Check addend overflow. */
4891 if ((outrel.r_addend & 0x80000000)
4892 != (rel->r_addend & 0x80000000))
4893 {
4894 const char *name;
4895 int addend = rel->r_addend;
4896 if (h && h->root.root.string)
4897 name = h->root.root.string;
4898 else
4899 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4900 sym, NULL);
4901 if (addend < 0)
4902 (*_bfd_error_handler)
4903 (_("%B: addend -0x%x in relocation %s against "
4904 "symbol `%s' at 0x%lx in section `%A' is "
4905 "out of range"),
4906 input_bfd, input_section, addend,
4907 howto->name, name,
4908 (unsigned long) rel->r_offset);
4909 else
4910 (*_bfd_error_handler)
4911 (_("%B: addend 0x%x in relocation %s against "
4912 "symbol `%s' at 0x%lx in section `%A' is "
4913 "out of range"),
4914 input_bfd, input_section, addend,
4915 howto->name, name,
4916 (unsigned long) rel->r_offset);
4917 bfd_set_error (bfd_error_bad_value);
4918 return FALSE;
4919 }
4920 }
4921 else
4922 {
4923 long sindx;
4924
4925 if (bfd_is_abs_section (sec))
4926 sindx = 0;
4927 else if (sec == NULL || sec->owner == NULL)
4928 {
4929 bfd_set_error (bfd_error_bad_value);
4930 return FALSE;
4931 }
4932 else
4933 {
4934 asection *osec;
4935
4936 /* We are turning this relocation into one
4937 against a section symbol. It would be
4938 proper to subtract the symbol's value,
4939 osec->vma, from the emitted reloc addend,
4940 but ld.so expects buggy relocs. */
4941 osec = sec->output_section;
4942 sindx = elf_section_data (osec)->dynindx;
4943 if (sindx == 0)
4944 {
4945 asection *oi = htab->elf.text_index_section;
4946 sindx = elf_section_data (oi)->dynindx;
4947 }
4948 BFD_ASSERT (sindx != 0);
4949 }
4950
4951 outrel.r_info = htab->r_info (sindx, r_type);
4952 outrel.r_addend = relocation + rel->r_addend;
4953 }
4954 }
4955
4956 sreloc = elf_section_data (input_section)->sreloc;
4957
4958 if (sreloc == NULL || sreloc->contents == NULL)
4959 {
4960 r = bfd_reloc_notsupported;
4961 goto check_relocation_error;
4962 }
4963
4964 elf_append_rela (output_bfd, sreloc, &outrel);
4965
4966 /* If this reloc is against an external symbol, we do
4967 not want to fiddle with the addend. Otherwise, we
4968 need to include the symbol value so that it becomes
4969 an addend for the dynamic reloc. */
4970 if (! relocate)
4971 continue;
4972 }
4973
4974 break;
4975
4976 case R_X86_64_TLSGD:
4977 case R_X86_64_GOTPC32_TLSDESC:
4978 case R_X86_64_TLSDESC_CALL:
4979 case R_X86_64_GOTTPOFF:
4980 tls_type = GOT_UNKNOWN;
4981 if (h == NULL && local_got_offsets)
4982 tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx];
4983 else if (h != NULL)
4984 tls_type = elf_x86_64_hash_entry (h)->tls_type;
4985
4986 if (! elf_x86_64_tls_transition (info, input_bfd,
4987 input_section, contents,
4988 symtab_hdr, sym_hashes,
4989 &r_type, tls_type, rel,
4990 relend, h, r_symndx, TRUE))
4991 return FALSE;
4992
4993 if (r_type == R_X86_64_TPOFF32)
4994 {
4995 bfd_vma roff = rel->r_offset;
4996
4997 BFD_ASSERT (! unresolved_reloc);
4998
4999 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
5000 {
5001 /* GD->LE transition. For 64bit, change
5002 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
5003 .word 0x6666; rex64; call __tls_get_addr@PLT
5004 or
5005 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
5006 .byte 0x66; rex64
5007 call *__tls_get_addr@GOTPCREL(%rip)
5008 which may be converted to
5009 addr32 call __tls_get_addr
5010 into:
5011 movq %fs:0, %rax
5012 leaq foo@tpoff(%rax), %rax
5013 For 32bit, change
5014 leaq foo@tlsgd(%rip), %rdi
5015 .word 0x6666; rex64; call __tls_get_addr@PLT
5016 or
5017 leaq foo@tlsgd(%rip), %rdi
5018 .byte 0x66; rex64
5019 call *__tls_get_addr@GOTPCREL(%rip)
5020 which may be converted to
5021 addr32 call __tls_get_addr
5022 into:
5023 movl %fs:0, %eax
5024 leaq foo@tpoff(%rax), %rax
5025 For largepic, change:
5026 leaq foo@tlsgd(%rip), %rdi
5027 movabsq $__tls_get_addr@pltoff, %rax
5028 addq %r15, %rax
5029 call *%rax
5030 into:
5031 movq %fs:0, %rax
5032 leaq foo@tpoff(%rax), %rax
5033 nopw 0x0(%rax,%rax,1) */
5034 int largepic = 0;
5035 if (ABI_64_P (output_bfd))
5036 {
5037 if (contents[roff + 5] == 0xb8)
5038 {
5039 memcpy (contents + roff - 3,
5040 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
5041 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
5042 largepic = 1;
5043 }
5044 else
5045 memcpy (contents + roff - 4,
5046 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
5047 16);
5048 }
5049 else
5050 memcpy (contents + roff - 3,
5051 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
5052 15);
5053 bfd_put_32 (output_bfd,
5054 elf_x86_64_tpoff (info, relocation),
5055 contents + roff + 8 + largepic);
5056 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
5057 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
5058 rel++;
5059 wrel++;
5060 continue;
5061 }
5062 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
5063 {
5064 /* GDesc -> LE transition.
5065 It's originally something like:
5066 leaq x@tlsdesc(%rip), %rax
5067
5068 Change it to:
5069 movl $x@tpoff, %rax. */
5070
5071 unsigned int val, type;
5072
5073 type = bfd_get_8 (input_bfd, contents + roff - 3);
5074 val = bfd_get_8 (input_bfd, contents + roff - 1);
5075 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
5076 contents + roff - 3);
5077 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
5078 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
5079 contents + roff - 1);
5080 bfd_put_32 (output_bfd,
5081 elf_x86_64_tpoff (info, relocation),
5082 contents + roff);
5083 continue;
5084 }
5085 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
5086 {
5087 /* GDesc -> LE transition.
5088 It's originally:
5089 call *(%rax)
5090 Turn it into:
5091 xchg %ax,%ax. */
5092 bfd_put_8 (output_bfd, 0x66, contents + roff);
5093 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
5094 continue;
5095 }
5096 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF)
5097 {
5098 /* IE->LE transition:
5099 For 64bit, originally it can be one of:
5100 movq foo@gottpoff(%rip), %reg
5101 addq foo@gottpoff(%rip), %reg
5102 We change it into:
5103 movq $foo, %reg
5104 leaq foo(%reg), %reg
5105 addq $foo, %reg.
5106 For 32bit, originally it can be one of:
5107 movq foo@gottpoff(%rip), %reg
5108 addl foo@gottpoff(%rip), %reg
5109 We change it into:
5110 movq $foo, %reg
5111 leal foo(%reg), %reg
5112 addl $foo, %reg. */
5113
5114 unsigned int val, type, reg;
5115
5116 if (roff >= 3)
5117 val = bfd_get_8 (input_bfd, contents + roff - 3);
5118 else
5119 val = 0;
5120 type = bfd_get_8 (input_bfd, contents + roff - 2);
5121 reg = bfd_get_8 (input_bfd, contents + roff - 1);
5122 reg >>= 3;
5123 if (type == 0x8b)
5124 {
5125 /* movq */
5126 if (val == 0x4c)
5127 bfd_put_8 (output_bfd, 0x49,
5128 contents + roff - 3);
5129 else if (!ABI_64_P (output_bfd) && val == 0x44)
5130 bfd_put_8 (output_bfd, 0x41,
5131 contents + roff - 3);
5132 bfd_put_8 (output_bfd, 0xc7,
5133 contents + roff - 2);
5134 bfd_put_8 (output_bfd, 0xc0 | reg,
5135 contents + roff - 1);
5136 }
5137 else if (reg == 4)
5138 {
5139 /* addq/addl -> addq/addl - addressing with %rsp/%r12
5140 is special */
5141 if (val == 0x4c)
5142 bfd_put_8 (output_bfd, 0x49,
5143 contents + roff - 3);
5144 else if (!ABI_64_P (output_bfd) && val == 0x44)
5145 bfd_put_8 (output_bfd, 0x41,
5146 contents + roff - 3);
5147 bfd_put_8 (output_bfd, 0x81,
5148 contents + roff - 2);
5149 bfd_put_8 (output_bfd, 0xc0 | reg,
5150 contents + roff - 1);
5151 }
5152 else
5153 {
5154 /* addq/addl -> leaq/leal */
5155 if (val == 0x4c)
5156 bfd_put_8 (output_bfd, 0x4d,
5157 contents + roff - 3);
5158 else if (!ABI_64_P (output_bfd) && val == 0x44)
5159 bfd_put_8 (output_bfd, 0x45,
5160 contents + roff - 3);
5161 bfd_put_8 (output_bfd, 0x8d,
5162 contents + roff - 2);
5163 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
5164 contents + roff - 1);
5165 }
5166 bfd_put_32 (output_bfd,
5167 elf_x86_64_tpoff (info, relocation),
5168 contents + roff);
5169 continue;
5170 }
5171 else
5172 BFD_ASSERT (FALSE);
5173 }
5174
5175 if (htab->elf.sgot == NULL)
5176 abort ();
5177
5178 if (h != NULL)
5179 {
5180 off = h->got.offset;
5181 offplt = elf_x86_64_hash_entry (h)->tlsdesc_got;
5182 }
5183 else
5184 {
5185 if (local_got_offsets == NULL)
5186 abort ();
5187
5188 off = local_got_offsets[r_symndx];
5189 offplt = local_tlsdesc_gotents[r_symndx];
5190 }
5191
5192 if ((off & 1) != 0)
5193 off &= ~1;
5194 else
5195 {
5196 Elf_Internal_Rela outrel;
5197 int dr_type, indx;
5198 asection *sreloc;
5199
5200 if (htab->elf.srelgot == NULL)
5201 abort ();
5202
5203 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5204
5205 if (GOT_TLS_GDESC_P (tls_type))
5206 {
5207 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
5208 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
5209 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
5210 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
5211 + htab->elf.sgotplt->output_offset
5212 + offplt
5213 + htab->sgotplt_jump_table_size);
5214 sreloc = htab->elf.srelplt;
5215 if (indx == 0)
5216 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
5217 else
5218 outrel.r_addend = 0;
5219 elf_append_rela (output_bfd, sreloc, &outrel);
5220 }
5221
5222 sreloc = htab->elf.srelgot;
5223
5224 outrel.r_offset = (htab->elf.sgot->output_section->vma
5225 + htab->elf.sgot->output_offset + off);
5226
5227 if (GOT_TLS_GD_P (tls_type))
5228 dr_type = R_X86_64_DTPMOD64;
5229 else if (GOT_TLS_GDESC_P (tls_type))
5230 goto dr_done;
5231 else
5232 dr_type = R_X86_64_TPOFF64;
5233
5234 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
5235 outrel.r_addend = 0;
5236 if ((dr_type == R_X86_64_TPOFF64
5237 || dr_type == R_X86_64_TLSDESC) && indx == 0)
5238 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
5239 outrel.r_info = htab->r_info (indx, dr_type);
5240
5241 elf_append_rela (output_bfd, sreloc, &outrel);
5242
5243 if (GOT_TLS_GD_P (tls_type))
5244 {
5245 if (indx == 0)
5246 {
5247 BFD_ASSERT (! unresolved_reloc);
5248 bfd_put_64 (output_bfd,
5249 relocation - elf_x86_64_dtpoff_base (info),
5250 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
5251 }
5252 else
5253 {
5254 bfd_put_64 (output_bfd, 0,
5255 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
5256 outrel.r_info = htab->r_info (indx,
5257 R_X86_64_DTPOFF64);
5258 outrel.r_offset += GOT_ENTRY_SIZE;
5259 elf_append_rela (output_bfd, sreloc,
5260 &outrel);
5261 }
5262 }
5263
5264 dr_done:
5265 if (h != NULL)
5266 h->got.offset |= 1;
5267 else
5268 local_got_offsets[r_symndx] |= 1;
5269 }
5270
5271 if (off >= (bfd_vma) -2
5272 && ! GOT_TLS_GDESC_P (tls_type))
5273 abort ();
5274 if (r_type == ELF32_R_TYPE (rel->r_info))
5275 {
5276 if (r_type == R_X86_64_GOTPC32_TLSDESC
5277 || r_type == R_X86_64_TLSDESC_CALL)
5278 relocation = htab->elf.sgotplt->output_section->vma
5279 + htab->elf.sgotplt->output_offset
5280 + offplt + htab->sgotplt_jump_table_size;
5281 else
5282 relocation = htab->elf.sgot->output_section->vma
5283 + htab->elf.sgot->output_offset + off;
5284 unresolved_reloc = FALSE;
5285 }
5286 else
5287 {
5288 bfd_vma roff = rel->r_offset;
5289
5290 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
5291 {
5292 /* GD->IE transition. For 64bit, change
5293 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
5294 .word 0x6666; rex64; call __tls_get_addr@PLT
5295 or
5296 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
5297 .byte 0x66; rex64
5298 call *__tls_get_addr@GOTPCREL(%rip
5299 which may be converted to
5300 addr32 call __tls_get_addr
5301 into:
5302 movq %fs:0, %rax
5303 addq foo@gottpoff(%rip), %rax
5304 For 32bit, change
5305 leaq foo@tlsgd(%rip), %rdi
5306 .word 0x6666; rex64; call __tls_get_addr@PLT
5307 or
5308 leaq foo@tlsgd(%rip), %rdi
5309 .byte 0x66; rex64;
5310 call *__tls_get_addr@GOTPCREL(%rip)
5311 which may be converted to
5312 addr32 call __tls_get_addr
5313 into:
5314 movl %fs:0, %eax
5315 addq foo@gottpoff(%rip), %rax
5316 For largepic, change:
5317 leaq foo@tlsgd(%rip), %rdi
5318 movabsq $__tls_get_addr@pltoff, %rax
5319 addq %r15, %rax
5320 call *%rax
5321 into:
5322 movq %fs:0, %rax
5323 addq foo@gottpoff(%rax), %rax
5324 nopw 0x0(%rax,%rax,1) */
5325 int largepic = 0;
5326 if (ABI_64_P (output_bfd))
5327 {
5328 if (contents[roff + 5] == 0xb8)
5329 {
5330 memcpy (contents + roff - 3,
5331 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
5332 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
5333 largepic = 1;
5334 }
5335 else
5336 memcpy (contents + roff - 4,
5337 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
5338 16);
5339 }
5340 else
5341 memcpy (contents + roff - 3,
5342 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
5343 15);
5344
5345 relocation = (htab->elf.sgot->output_section->vma
5346 + htab->elf.sgot->output_offset + off
5347 - roff
5348 - largepic
5349 - input_section->output_section->vma
5350 - input_section->output_offset
5351 - 12);
5352 bfd_put_32 (output_bfd, relocation,
5353 contents + roff + 8 + largepic);
5354 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
5355 rel++;
5356 wrel++;
5357 continue;
5358 }
5359 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
5360 {
5361 /* GDesc -> IE transition.
5362 It's originally something like:
5363 leaq x@tlsdesc(%rip), %rax
5364
5365 Change it to:
5366 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
5367
5368 /* Now modify the instruction as appropriate. To
5369 turn a leaq into a movq in the form we use it, it
5370 suffices to change the second byte from 0x8d to
5371 0x8b. */
5372 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
5373
5374 bfd_put_32 (output_bfd,
5375 htab->elf.sgot->output_section->vma
5376 + htab->elf.sgot->output_offset + off
5377 - rel->r_offset
5378 - input_section->output_section->vma
5379 - input_section->output_offset
5380 - 4,
5381 contents + roff);
5382 continue;
5383 }
5384 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
5385 {
5386 /* GDesc -> IE transition.
5387 It's originally:
5388 call *(%rax)
5389
5390 Change it to:
5391 xchg %ax, %ax. */
5392
5393 bfd_put_8 (output_bfd, 0x66, contents + roff);
5394 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
5395 continue;
5396 }
5397 else
5398 BFD_ASSERT (FALSE);
5399 }
5400 break;
5401
5402 case R_X86_64_TLSLD:
5403 if (! elf_x86_64_tls_transition (info, input_bfd,
5404 input_section, contents,
5405 symtab_hdr, sym_hashes,
5406 &r_type, GOT_UNKNOWN, rel,
5407 relend, h, r_symndx, TRUE))
5408 return FALSE;
5409
5410 if (r_type != R_X86_64_TLSLD)
5411 {
5412 /* LD->LE transition:
5413 leaq foo@tlsld(%rip), %rdi
5414 call __tls_get_addr@PLT
5415 For 64bit, we change it into:
5416 .word 0x6666; .byte 0x66; movq %fs:0, %rax
5417 For 32bit, we change it into:
5418 nopl 0x0(%rax); movl %fs:0, %eax
5419 Or
5420 leaq foo@tlsld(%rip), %rdi;
5421 call *__tls_get_addr@GOTPCREL(%rip)
5422 which may be converted to
5423 addr32 call __tls_get_addr
5424 For 64bit, we change it into:
5425 .word 0x6666; .word 0x6666; movq %fs:0, %rax
5426 For 32bit, we change it into:
5427 nopw 0x0(%rax); movl %fs:0, %eax
5428 For largepic, change:
5429 leaq foo@tlsgd(%rip), %rdi
5430 movabsq $__tls_get_addr@pltoff, %rax
5431 addq %rbx, %rax
5432 call *%rax
5433 into
5434 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
5435 movq %fs:0, %eax */
5436
5437 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
5438 if (ABI_64_P (output_bfd))
5439 {
5440 if (contents[rel->r_offset + 5] == 0xb8)
5441 memcpy (contents + rel->r_offset - 3,
5442 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
5443 "\x64\x48\x8b\x04\x25\0\0\0", 22);
5444 else if (contents[rel->r_offset + 4] == 0xff
5445 || contents[rel->r_offset + 4] == 0x67)
5446 memcpy (contents + rel->r_offset - 3,
5447 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
5448 13);
5449 else
5450 memcpy (contents + rel->r_offset - 3,
5451 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
5452 }
5453 else
5454 {
5455 if (contents[rel->r_offset + 4] == 0xff)
5456 memcpy (contents + rel->r_offset - 3,
5457 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
5458 13);
5459 else
5460 memcpy (contents + rel->r_offset - 3,
5461 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
5462 }
5463 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
5464 and R_X86_64_PLTOFF64. */
5465 rel++;
5466 wrel++;
5467 continue;
5468 }
5469
5470 if (htab->elf.sgot == NULL)
5471 abort ();
5472
5473 off = htab->tls_ld_got.offset;
5474 if (off & 1)
5475 off &= ~1;
5476 else
5477 {
5478 Elf_Internal_Rela outrel;
5479
5480 if (htab->elf.srelgot == NULL)
5481 abort ();
5482
5483 outrel.r_offset = (htab->elf.sgot->output_section->vma
5484 + htab->elf.sgot->output_offset + off);
5485
5486 bfd_put_64 (output_bfd, 0,
5487 htab->elf.sgot->contents + off);
5488 bfd_put_64 (output_bfd, 0,
5489 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
5490 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
5491 outrel.r_addend = 0;
5492 elf_append_rela (output_bfd, htab->elf.srelgot,
5493 &outrel);
5494 htab->tls_ld_got.offset |= 1;
5495 }
5496 relocation = htab->elf.sgot->output_section->vma
5497 + htab->elf.sgot->output_offset + off;
5498 unresolved_reloc = FALSE;
5499 break;
5500
5501 case R_X86_64_DTPOFF32:
5502 if (!bfd_link_executable (info)
5503 || (input_section->flags & SEC_CODE) == 0)
5504 relocation -= elf_x86_64_dtpoff_base (info);
5505 else
5506 relocation = elf_x86_64_tpoff (info, relocation);
5507 break;
5508
5509 case R_X86_64_TPOFF32:
5510 case R_X86_64_TPOFF64:
5511 BFD_ASSERT (bfd_link_executable (info));
5512 relocation = elf_x86_64_tpoff (info, relocation);
5513 break;
5514
5515 case R_X86_64_DTPOFF64:
5516 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
5517 relocation -= elf_x86_64_dtpoff_base (info);
5518 break;
5519
5520 default:
5521 break;
5522 }
5523
5524 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5525 because such sections are not SEC_ALLOC and thus ld.so will
5526 not process them. */
5527 if (unresolved_reloc
5528 && !((input_section->flags & SEC_DEBUGGING) != 0
5529 && h->def_dynamic)
5530 && _bfd_elf_section_offset (output_bfd, info, input_section,
5531 rel->r_offset) != (bfd_vma) -1)
5532 {
5533 (*_bfd_error_handler)
5534 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5535 input_bfd,
5536 input_section,
5537 (long) rel->r_offset,
5538 howto->name,
5539 h->root.root.string);
5540 return FALSE;
5541 }
5542
5543 do_relocation:
5544 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
5545 contents, rel->r_offset,
5546 relocation, rel->r_addend);
5547
5548 check_relocation_error:
5549 if (r != bfd_reloc_ok)
5550 {
5551 const char *name;
5552
5553 if (h != NULL)
5554 name = h->root.root.string;
5555 else
5556 {
5557 name = bfd_elf_string_from_elf_section (input_bfd,
5558 symtab_hdr->sh_link,
5559 sym->st_name);
5560 if (name == NULL)
5561 return FALSE;
5562 if (*name == '\0')
5563 name = bfd_section_name (input_bfd, sec);
5564 }
5565
5566 if (r == bfd_reloc_overflow)
5567 (*info->callbacks->reloc_overflow)
5568 (info, (h ? &h->root : NULL), name, howto->name,
5569 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
5570 else
5571 {
5572 (*_bfd_error_handler)
5573 (_("%B(%A+0x%lx): reloc against `%s': error %d"),
5574 input_bfd, input_section,
5575 (long) rel->r_offset, name, (int) r);
5576 return FALSE;
5577 }
5578 }
5579
5580 if (wrel != rel)
5581 *wrel = *rel;
5582 }
5583
5584 if (wrel != rel)
5585 {
5586 Elf_Internal_Shdr *rel_hdr;
5587 size_t deleted = rel - wrel;
5588
5589 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
5590 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
5591 if (rel_hdr->sh_size == 0)
5592 {
5593 /* It is too late to remove an empty reloc section. Leave
5594 one NONE reloc.
5595 ??? What is wrong with an empty section??? */
5596 rel_hdr->sh_size = rel_hdr->sh_entsize;
5597 deleted -= 1;
5598 }
5599 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
5600 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
5601 input_section->reloc_count -= deleted;
5602 }
5603
5604 return TRUE;
5605 }
5606
5607 /* Finish up dynamic symbol handling. We set the contents of various
5608 dynamic sections here. */
5609
5610 static bfd_boolean
5611 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
5612 struct bfd_link_info *info,
5613 struct elf_link_hash_entry *h,
5614 Elf_Internal_Sym *sym)
5615 {
5616 struct elf_x86_64_link_hash_table *htab;
5617 const struct elf_x86_64_backend_data *abed;
5618 bfd_boolean use_plt_bnd;
5619 struct elf_x86_64_link_hash_entry *eh;
5620 bfd_boolean local_undefweak;
5621
5622 htab = elf_x86_64_hash_table (info);
5623 if (htab == NULL)
5624 return FALSE;
5625
5626 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5627 section only if there is .plt section. */
5628 use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL;
5629 abed = (use_plt_bnd
5630 ? &elf_x86_64_bnd_arch_bed
5631 : get_elf_x86_64_backend_data (output_bfd));
5632
5633 eh = (struct elf_x86_64_link_hash_entry *) h;
5634
5635 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
5636 resolved undefined weak symbols in executable so that their
5637 references have value 0 at run-time. */
5638 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
5639 eh->has_got_reloc,
5640 eh);
5641
5642 if (h->plt.offset != (bfd_vma) -1)
5643 {
5644 bfd_vma plt_index;
5645 bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset;
5646 bfd_vma plt_plt_insn_end, plt_got_insn_size;
5647 Elf_Internal_Rela rela;
5648 bfd_byte *loc;
5649 asection *plt, *gotplt, *relplt, *resolved_plt;
5650 const struct elf_backend_data *bed;
5651 bfd_vma plt_got_pcrel_offset;
5652
5653 /* When building a static executable, use .iplt, .igot.plt and
5654 .rela.iplt sections for STT_GNU_IFUNC symbols. */
5655 if (htab->elf.splt != NULL)
5656 {
5657 plt = htab->elf.splt;
5658 gotplt = htab->elf.sgotplt;
5659 relplt = htab->elf.srelplt;
5660 }
5661 else
5662 {
5663 plt = htab->elf.iplt;
5664 gotplt = htab->elf.igotplt;
5665 relplt = htab->elf.irelplt;
5666 }
5667
5668 /* This symbol has an entry in the procedure linkage table. Set
5669 it up. */
5670 if ((h->dynindx == -1
5671 && !local_undefweak
5672 && !((h->forced_local || bfd_link_executable (info))
5673 && h->def_regular
5674 && h->type == STT_GNU_IFUNC))
5675 || plt == NULL
5676 || gotplt == NULL
5677 || relplt == NULL)
5678 abort ();
5679
5680 /* Get the index in the procedure linkage table which
5681 corresponds to this symbol. This is the index of this symbol
5682 in all the symbols for which we are making plt entries. The
5683 first entry in the procedure linkage table is reserved.
5684
5685 Get the offset into the .got table of the entry that
5686 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
5687 bytes. The first three are reserved for the dynamic linker.
5688
5689 For static executables, we don't reserve anything. */
5690
5691 if (plt == htab->elf.splt)
5692 {
5693 got_offset = h->plt.offset / abed->plt_entry_size - 1;
5694 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
5695 }
5696 else
5697 {
5698 got_offset = h->plt.offset / abed->plt_entry_size;
5699 got_offset = got_offset * GOT_ENTRY_SIZE;
5700 }
5701
5702 plt_plt_insn_end = abed->plt_plt_insn_end;
5703 plt_plt_offset = abed->plt_plt_offset;
5704 plt_got_insn_size = abed->plt_got_insn_size;
5705 plt_got_offset = abed->plt_got_offset;
5706 if (use_plt_bnd)
5707 {
5708 /* Use the second PLT with BND relocations. */
5709 const bfd_byte *plt_entry, *plt2_entry;
5710
5711 if (eh->has_bnd_reloc)
5712 {
5713 plt_entry = elf_x86_64_bnd_plt_entry;
5714 plt2_entry = elf_x86_64_bnd_plt2_entry;
5715 }
5716 else
5717 {
5718 plt_entry = elf_x86_64_legacy_plt_entry;
5719 plt2_entry = elf_x86_64_legacy_plt2_entry;
5720
5721 /* Subtract 1 since there is no BND prefix. */
5722 plt_plt_insn_end -= 1;
5723 plt_plt_offset -= 1;
5724 plt_got_insn_size -= 1;
5725 plt_got_offset -= 1;
5726 }
5727
5728 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry)
5729 == sizeof (elf_x86_64_legacy_plt_entry));
5730
5731 /* Fill in the entry in the procedure linkage table. */
5732 memcpy (plt->contents + h->plt.offset,
5733 plt_entry, sizeof (elf_x86_64_legacy_plt_entry));
5734 /* Fill in the entry in the second PLT. */
5735 memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset,
5736 plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5737
5738 resolved_plt = htab->plt_bnd;
5739 plt_offset = eh->plt_bnd.offset;
5740 }
5741 else
5742 {
5743 /* Fill in the entry in the procedure linkage table. */
5744 memcpy (plt->contents + h->plt.offset, abed->plt_entry,
5745 abed->plt_entry_size);
5746
5747 resolved_plt = plt;
5748 plt_offset = h->plt.offset;
5749 }
5750
5751 /* Insert the relocation positions of the plt section. */
5752
5753 /* Put offset the PC-relative instruction referring to the GOT entry,
5754 subtracting the size of that instruction. */
5755 plt_got_pcrel_offset = (gotplt->output_section->vma
5756 + gotplt->output_offset
5757 + got_offset
5758 - resolved_plt->output_section->vma
5759 - resolved_plt->output_offset
5760 - plt_offset
5761 - plt_got_insn_size);
5762
5763 /* Check PC-relative offset overflow in PLT entry. */
5764 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
5765 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
5766 output_bfd, h->root.root.string);
5767
5768 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
5769 resolved_plt->contents + plt_offset + plt_got_offset);
5770
5771 /* Fill in the entry in the global offset table, initially this
5772 points to the second part of the PLT entry. Leave the entry
5773 as zero for undefined weak symbol in PIE. No PLT relocation
5774 against undefined weak symbol in PIE. */
5775 if (!local_undefweak)
5776 {
5777 bfd_put_64 (output_bfd, (plt->output_section->vma
5778 + plt->output_offset
5779 + h->plt.offset
5780 + abed->plt_lazy_offset),
5781 gotplt->contents + got_offset);
5782
5783 /* Fill in the entry in the .rela.plt section. */
5784 rela.r_offset = (gotplt->output_section->vma
5785 + gotplt->output_offset
5786 + got_offset);
5787 if (h->dynindx == -1
5788 || ((bfd_link_executable (info)
5789 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
5790 && h->def_regular
5791 && h->type == STT_GNU_IFUNC))
5792 {
5793 /* If an STT_GNU_IFUNC symbol is locally defined, generate
5794 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
5795 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
5796 rela.r_addend = (h->root.u.def.value
5797 + h->root.u.def.section->output_section->vma
5798 + h->root.u.def.section->output_offset);
5799 /* R_X86_64_IRELATIVE comes last. */
5800 plt_index = htab->next_irelative_index--;
5801 }
5802 else
5803 {
5804 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
5805 rela.r_addend = 0;
5806 plt_index = htab->next_jump_slot_index++;
5807 }
5808
5809 /* Don't fill PLT entry for static executables. */
5810 if (plt == htab->elf.splt)
5811 {
5812 bfd_vma plt0_offset = h->plt.offset + plt_plt_insn_end;
5813
5814 /* Put relocation index. */
5815 bfd_put_32 (output_bfd, plt_index,
5816 (plt->contents + h->plt.offset
5817 + abed->plt_reloc_offset));
5818
5819 /* Put offset for jmp .PLT0 and check for overflow. We don't
5820 check relocation index for overflow since branch displacement
5821 will overflow first. */
5822 if (plt0_offset > 0x80000000)
5823 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
5824 output_bfd, h->root.root.string);
5825 bfd_put_32 (output_bfd, - plt0_offset,
5826 plt->contents + h->plt.offset + plt_plt_offset);
5827 }
5828
5829 bed = get_elf_backend_data (output_bfd);
5830 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
5831 bed->s->swap_reloca_out (output_bfd, &rela, loc);
5832 }
5833 }
5834 else if (eh->plt_got.offset != (bfd_vma) -1)
5835 {
5836 bfd_vma got_offset, plt_offset, plt_got_offset, plt_got_insn_size;
5837 asection *plt, *got;
5838 bfd_boolean got_after_plt;
5839 int32_t got_pcrel_offset;
5840 const bfd_byte *got_plt_entry;
5841
5842 /* Set the entry in the GOT procedure linkage table. */
5843 plt = htab->plt_got;
5844 got = htab->elf.sgot;
5845 got_offset = h->got.offset;
5846
5847 if (got_offset == (bfd_vma) -1
5848 || h->type == STT_GNU_IFUNC
5849 || plt == NULL
5850 || got == NULL)
5851 abort ();
5852
5853 /* Use the second PLT entry template for the GOT PLT since they
5854 are the identical. */
5855 plt_got_insn_size = elf_x86_64_bnd_arch_bed.plt_got_insn_size;
5856 plt_got_offset = elf_x86_64_bnd_arch_bed.plt_got_offset;
5857 if (eh->has_bnd_reloc)
5858 got_plt_entry = elf_x86_64_bnd_plt2_entry;
5859 else
5860 {
5861 got_plt_entry = elf_x86_64_legacy_plt2_entry;
5862
5863 /* Subtract 1 since there is no BND prefix. */
5864 plt_got_insn_size -= 1;
5865 plt_got_offset -= 1;
5866 }
5867
5868 /* Fill in the entry in the GOT procedure linkage table. */
5869 plt_offset = eh->plt_got.offset;
5870 memcpy (plt->contents + plt_offset,
5871 got_plt_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5872
5873 /* Put offset the PC-relative instruction referring to the GOT
5874 entry, subtracting the size of that instruction. */
5875 got_pcrel_offset = (got->output_section->vma
5876 + got->output_offset
5877 + got_offset
5878 - plt->output_section->vma
5879 - plt->output_offset
5880 - plt_offset
5881 - plt_got_insn_size);
5882
5883 /* Check PC-relative offset overflow in GOT PLT entry. */
5884 got_after_plt = got->output_section->vma > plt->output_section->vma;
5885 if ((got_after_plt && got_pcrel_offset < 0)
5886 || (!got_after_plt && got_pcrel_offset > 0))
5887 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
5888 output_bfd, h->root.root.string);
5889
5890 bfd_put_32 (output_bfd, got_pcrel_offset,
5891 plt->contents + plt_offset + plt_got_offset);
5892 }
5893
5894 if (!local_undefweak
5895 && !h->def_regular
5896 && (h->plt.offset != (bfd_vma) -1
5897 || eh->plt_got.offset != (bfd_vma) -1))
5898 {
5899 /* Mark the symbol as undefined, rather than as defined in
5900 the .plt section. Leave the value if there were any
5901 relocations where pointer equality matters (this is a clue
5902 for the dynamic linker, to make function pointer
5903 comparisons work between an application and shared
5904 library), otherwise set it to zero. If a function is only
5905 called from a binary, there is no need to slow down
5906 shared libraries because of that. */
5907 sym->st_shndx = SHN_UNDEF;
5908 if (!h->pointer_equality_needed)
5909 sym->st_value = 0;
5910 }
5911
5912 /* Don't generate dynamic GOT relocation against undefined weak
5913 symbol in executable. */
5914 if (h->got.offset != (bfd_vma) -1
5915 && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type)
5916 && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE
5917 && !local_undefweak)
5918 {
5919 Elf_Internal_Rela rela;
5920 asection *relgot = htab->elf.srelgot;
5921
5922 /* This symbol has an entry in the global offset table. Set it
5923 up. */
5924 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
5925 abort ();
5926
5927 rela.r_offset = (htab->elf.sgot->output_section->vma
5928 + htab->elf.sgot->output_offset
5929 + (h->got.offset &~ (bfd_vma) 1));
5930
5931 /* If this is a static link, or it is a -Bsymbolic link and the
5932 symbol is defined locally or was forced to be local because
5933 of a version file, we just want to emit a RELATIVE reloc.
5934 The entry in the global offset table will already have been
5935 initialized in the relocate_section function. */
5936 if (h->def_regular
5937 && h->type == STT_GNU_IFUNC)
5938 {
5939 if (h->plt.offset == (bfd_vma) -1)
5940 {
5941 /* STT_GNU_IFUNC is referenced without PLT. */
5942 if (htab->elf.splt == NULL)
5943 {
5944 /* use .rel[a].iplt section to store .got relocations
5945 in static executable. */
5946 relgot = htab->elf.irelplt;
5947 }
5948 if (SYMBOL_REFERENCES_LOCAL (info, h))
5949 {
5950 rela.r_info = htab->r_info (0,
5951 R_X86_64_IRELATIVE);
5952 rela.r_addend = (h->root.u.def.value
5953 + h->root.u.def.section->output_section->vma
5954 + h->root.u.def.section->output_offset);
5955 }
5956 else
5957 goto do_glob_dat;
5958 }
5959 else if (bfd_link_pic (info))
5960 {
5961 /* Generate R_X86_64_GLOB_DAT. */
5962 goto do_glob_dat;
5963 }
5964 else
5965 {
5966 asection *plt;
5967
5968 if (!h->pointer_equality_needed)
5969 abort ();
5970
5971 /* For non-shared object, we can't use .got.plt, which
5972 contains the real function addres if we need pointer
5973 equality. We load the GOT entry with the PLT entry. */
5974 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
5975 bfd_put_64 (output_bfd, (plt->output_section->vma
5976 + plt->output_offset
5977 + h->plt.offset),
5978 htab->elf.sgot->contents + h->got.offset);
5979 return TRUE;
5980 }
5981 }
5982 else if (bfd_link_pic (info)
5983 && SYMBOL_REFERENCES_LOCAL (info, h))
5984 {
5985 if (!h->def_regular)
5986 return FALSE;
5987 BFD_ASSERT((h->got.offset & 1) != 0);
5988 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
5989 rela.r_addend = (h->root.u.def.value
5990 + h->root.u.def.section->output_section->vma
5991 + h->root.u.def.section->output_offset);
5992 }
5993 else
5994 {
5995 BFD_ASSERT((h->got.offset & 1) == 0);
5996 do_glob_dat:
5997 bfd_put_64 (output_bfd, (bfd_vma) 0,
5998 htab->elf.sgot->contents + h->got.offset);
5999 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
6000 rela.r_addend = 0;
6001 }
6002
6003 elf_append_rela (output_bfd, relgot, &rela);
6004 }
6005
6006 if (h->needs_copy)
6007 {
6008 Elf_Internal_Rela rela;
6009
6010 /* This symbol needs a copy reloc. Set it up. */
6011
6012 if (h->dynindx == -1
6013 || (h->root.type != bfd_link_hash_defined
6014 && h->root.type != bfd_link_hash_defweak)
6015 || htab->srelbss == NULL)
6016 abort ();
6017
6018 rela.r_offset = (h->root.u.def.value
6019 + h->root.u.def.section->output_section->vma
6020 + h->root.u.def.section->output_offset);
6021 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
6022 rela.r_addend = 0;
6023 elf_append_rela (output_bfd, htab->srelbss, &rela);
6024 }
6025
6026 return TRUE;
6027 }
6028
6029 /* Finish up local dynamic symbol handling. We set the contents of
6030 various dynamic sections here. */
6031
6032 static bfd_boolean
6033 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
6034 {
6035 struct elf_link_hash_entry *h
6036 = (struct elf_link_hash_entry *) *slot;
6037 struct bfd_link_info *info
6038 = (struct bfd_link_info *) inf;
6039
6040 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
6041 info, h, NULL);
6042 }
6043
6044 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
6045 here since undefined weak symbol may not be dynamic and may not be
6046 called for elf_x86_64_finish_dynamic_symbol. */
6047
6048 static bfd_boolean
6049 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
6050 void *inf)
6051 {
6052 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
6053 struct bfd_link_info *info = (struct bfd_link_info *) inf;
6054
6055 if (h->root.type != bfd_link_hash_undefweak
6056 || h->dynindx != -1)
6057 return TRUE;
6058
6059 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
6060 info, h, NULL);
6061 }
6062
6063 /* Used to decide how to sort relocs in an optimal manner for the
6064 dynamic linker, before writing them out. */
6065
6066 static enum elf_reloc_type_class
6067 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
6068 const asection *rel_sec ATTRIBUTE_UNUSED,
6069 const Elf_Internal_Rela *rela)
6070 {
6071 bfd *abfd = info->output_bfd;
6072 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
6073 struct elf_x86_64_link_hash_table *htab = elf_x86_64_hash_table (info);
6074
6075 if (htab->elf.dynsym != NULL
6076 && htab->elf.dynsym->contents != NULL)
6077 {
6078 /* Check relocation against STT_GNU_IFUNC symbol if there are
6079 dynamic symbols. */
6080 unsigned long r_symndx = htab->r_sym (rela->r_info);
6081 if (r_symndx != STN_UNDEF)
6082 {
6083 Elf_Internal_Sym sym;
6084 if (!bed->s->swap_symbol_in (abfd,
6085 (htab->elf.dynsym->contents
6086 + r_symndx * bed->s->sizeof_sym),
6087 0, &sym))
6088 abort ();
6089
6090 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
6091 return reloc_class_ifunc;
6092 }
6093 }
6094
6095 switch ((int) ELF32_R_TYPE (rela->r_info))
6096 {
6097 case R_X86_64_IRELATIVE:
6098 return reloc_class_ifunc;
6099 case R_X86_64_RELATIVE:
6100 case R_X86_64_RELATIVE64:
6101 return reloc_class_relative;
6102 case R_X86_64_JUMP_SLOT:
6103 return reloc_class_plt;
6104 case R_X86_64_COPY:
6105 return reloc_class_copy;
6106 default:
6107 return reloc_class_normal;
6108 }
6109 }
6110
6111 /* Finish up the dynamic sections. */
6112
6113 static bfd_boolean
6114 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
6115 struct bfd_link_info *info)
6116 {
6117 struct elf_x86_64_link_hash_table *htab;
6118 bfd *dynobj;
6119 asection *sdyn;
6120 const struct elf_x86_64_backend_data *abed;
6121
6122 htab = elf_x86_64_hash_table (info);
6123 if (htab == NULL)
6124 return FALSE;
6125
6126 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
6127 section only if there is .plt section. */
6128 abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL
6129 ? &elf_x86_64_bnd_arch_bed
6130 : get_elf_x86_64_backend_data (output_bfd));
6131
6132 dynobj = htab->elf.dynobj;
6133 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
6134
6135 if (htab->elf.dynamic_sections_created)
6136 {
6137 bfd_byte *dyncon, *dynconend;
6138 const struct elf_backend_data *bed;
6139 bfd_size_type sizeof_dyn;
6140
6141 if (sdyn == NULL || htab->elf.sgot == NULL)
6142 abort ();
6143
6144 bed = get_elf_backend_data (dynobj);
6145 sizeof_dyn = bed->s->sizeof_dyn;
6146 dyncon = sdyn->contents;
6147 dynconend = sdyn->contents + sdyn->size;
6148 for (; dyncon < dynconend; dyncon += sizeof_dyn)
6149 {
6150 Elf_Internal_Dyn dyn;
6151 asection *s;
6152
6153 (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
6154
6155 switch (dyn.d_tag)
6156 {
6157 default:
6158 continue;
6159
6160 case DT_PLTGOT:
6161 s = htab->elf.sgotplt;
6162 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
6163 break;
6164
6165 case DT_JMPREL:
6166 dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
6167 break;
6168
6169 case DT_PLTRELSZ:
6170 s = htab->elf.srelplt->output_section;
6171 dyn.d_un.d_val = s->size;
6172 break;
6173
6174 case DT_RELASZ:
6175 /* The procedure linkage table relocs (DT_JMPREL) should
6176 not be included in the overall relocs (DT_RELA).
6177 Therefore, we override the DT_RELASZ entry here to
6178 make it not include the JMPREL relocs. Since the
6179 linker script arranges for .rela.plt to follow all
6180 other relocation sections, we don't have to worry
6181 about changing the DT_RELA entry. */
6182 if (htab->elf.srelplt != NULL)
6183 {
6184 s = htab->elf.srelplt->output_section;
6185 dyn.d_un.d_val -= s->size;
6186 }
6187 break;
6188
6189 case DT_TLSDESC_PLT:
6190 s = htab->elf.splt;
6191 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6192 + htab->tlsdesc_plt;
6193 break;
6194
6195 case DT_TLSDESC_GOT:
6196 s = htab->elf.sgot;
6197 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6198 + htab->tlsdesc_got;
6199 break;
6200 }
6201
6202 (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
6203 }
6204
6205 /* Fill in the special first entry in the procedure linkage table. */
6206 if (htab->elf.splt && htab->elf.splt->size > 0)
6207 {
6208 /* Fill in the first entry in the procedure linkage table. */
6209 memcpy (htab->elf.splt->contents,
6210 abed->plt0_entry, abed->plt_entry_size);
6211 /* Add offset for pushq GOT+8(%rip), since the instruction
6212 uses 6 bytes subtract this value. */
6213 bfd_put_32 (output_bfd,
6214 (htab->elf.sgotplt->output_section->vma
6215 + htab->elf.sgotplt->output_offset
6216 + 8
6217 - htab->elf.splt->output_section->vma
6218 - htab->elf.splt->output_offset
6219 - 6),
6220 htab->elf.splt->contents + abed->plt0_got1_offset);
6221 /* Add offset for the PC-relative instruction accessing GOT+16,
6222 subtracting the offset to the end of that instruction. */
6223 bfd_put_32 (output_bfd,
6224 (htab->elf.sgotplt->output_section->vma
6225 + htab->elf.sgotplt->output_offset
6226 + 16
6227 - htab->elf.splt->output_section->vma
6228 - htab->elf.splt->output_offset
6229 - abed->plt0_got2_insn_end),
6230 htab->elf.splt->contents + abed->plt0_got2_offset);
6231
6232 elf_section_data (htab->elf.splt->output_section)
6233 ->this_hdr.sh_entsize = abed->plt_entry_size;
6234
6235 if (htab->tlsdesc_plt)
6236 {
6237 bfd_put_64 (output_bfd, (bfd_vma) 0,
6238 htab->elf.sgot->contents + htab->tlsdesc_got);
6239
6240 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
6241 abed->plt0_entry, abed->plt_entry_size);
6242
6243 /* Add offset for pushq GOT+8(%rip), since the
6244 instruction uses 6 bytes subtract this value. */
6245 bfd_put_32 (output_bfd,
6246 (htab->elf.sgotplt->output_section->vma
6247 + htab->elf.sgotplt->output_offset
6248 + 8
6249 - htab->elf.splt->output_section->vma
6250 - htab->elf.splt->output_offset
6251 - htab->tlsdesc_plt
6252 - 6),
6253 htab->elf.splt->contents
6254 + htab->tlsdesc_plt + abed->plt0_got1_offset);
6255 /* Add offset for the PC-relative instruction accessing GOT+TDG,
6256 where TGD stands for htab->tlsdesc_got, subtracting the offset
6257 to the end of that instruction. */
6258 bfd_put_32 (output_bfd,
6259 (htab->elf.sgot->output_section->vma
6260 + htab->elf.sgot->output_offset
6261 + htab->tlsdesc_got
6262 - htab->elf.splt->output_section->vma
6263 - htab->elf.splt->output_offset
6264 - htab->tlsdesc_plt
6265 - abed->plt0_got2_insn_end),
6266 htab->elf.splt->contents
6267 + htab->tlsdesc_plt + abed->plt0_got2_offset);
6268 }
6269 }
6270 }
6271
6272 if (htab->plt_bnd != NULL)
6273 elf_section_data (htab->plt_bnd->output_section)
6274 ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry);
6275
6276 if (htab->elf.sgotplt)
6277 {
6278 if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
6279 {
6280 (*_bfd_error_handler)
6281 (_("discarded output section: `%A'"), htab->elf.sgotplt);
6282 return FALSE;
6283 }
6284
6285 /* Fill in the first three entries in the global offset table. */
6286 if (htab->elf.sgotplt->size > 0)
6287 {
6288 /* Set the first entry in the global offset table to the address of
6289 the dynamic section. */
6290 if (sdyn == NULL)
6291 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
6292 else
6293 bfd_put_64 (output_bfd,
6294 sdyn->output_section->vma + sdyn->output_offset,
6295 htab->elf.sgotplt->contents);
6296 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
6297 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
6298 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
6299 }
6300
6301 elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize =
6302 GOT_ENTRY_SIZE;
6303 }
6304
6305 /* Adjust .eh_frame for .plt section. */
6306 if (htab->plt_eh_frame != NULL
6307 && htab->plt_eh_frame->contents != NULL)
6308 {
6309 if (htab->elf.splt != NULL
6310 && htab->elf.splt->size != 0
6311 && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
6312 && htab->elf.splt->output_section != NULL
6313 && htab->plt_eh_frame->output_section != NULL)
6314 {
6315 bfd_vma plt_start = htab->elf.splt->output_section->vma;
6316 bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma
6317 + htab->plt_eh_frame->output_offset
6318 + PLT_FDE_START_OFFSET;
6319 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
6320 htab->plt_eh_frame->contents
6321 + PLT_FDE_START_OFFSET);
6322 }
6323 if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
6324 {
6325 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
6326 htab->plt_eh_frame,
6327 htab->plt_eh_frame->contents))
6328 return FALSE;
6329 }
6330 }
6331
6332 if (htab->elf.sgot && htab->elf.sgot->size > 0)
6333 elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
6334 = GOT_ENTRY_SIZE;
6335
6336 /* Fill PLT entries for undefined weak symbols in PIE. */
6337 if (bfd_link_pie (info))
6338 bfd_hash_traverse (&info->hash->table,
6339 elf_x86_64_pie_finish_undefweak_symbol,
6340 info);
6341
6342 return TRUE;
6343 }
6344
6345 /* Fill PLT/GOT entries and allocate dynamic relocations for local
6346 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
6347 It has to be done before elf_link_sort_relocs is called so that
6348 dynamic relocations are properly sorted. */
6349
6350 static bfd_boolean
6351 elf_x86_64_output_arch_local_syms
6352 (bfd *output_bfd ATTRIBUTE_UNUSED,
6353 struct bfd_link_info *info,
6354 void *flaginfo ATTRIBUTE_UNUSED,
6355 int (*func) (void *, const char *,
6356 Elf_Internal_Sym *,
6357 asection *,
6358 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
6359 {
6360 struct elf_x86_64_link_hash_table *htab = elf_x86_64_hash_table (info);
6361 if (htab == NULL)
6362 return FALSE;
6363
6364 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
6365 htab_traverse (htab->loc_hash_table,
6366 elf_x86_64_finish_local_dynamic_symbol,
6367 info);
6368
6369 return TRUE;
6370 }
6371
6372 /* Return an array of PLT entry symbol values. */
6373
6374 static bfd_vma *
6375 elf_x86_64_get_plt_sym_val (bfd *abfd, asymbol **dynsyms, asection *plt,
6376 asection *relplt)
6377 {
6378 bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean);
6379 arelent *p;
6380 long count, i;
6381 bfd_vma *plt_sym_val;
6382 bfd_vma plt_offset;
6383 bfd_byte *plt_contents;
6384 const struct elf_x86_64_backend_data *bed;
6385 Elf_Internal_Shdr *hdr;
6386 asection *plt_bnd;
6387
6388 /* Get the .plt section contents. PLT passed down may point to the
6389 .plt.bnd section. Make sure that PLT always points to the .plt
6390 section. */
6391 plt_bnd = bfd_get_section_by_name (abfd, ".plt.bnd");
6392 if (plt_bnd)
6393 {
6394 if (plt != plt_bnd)
6395 abort ();
6396 plt = bfd_get_section_by_name (abfd, ".plt");
6397 if (plt == NULL)
6398 abort ();
6399 bed = &elf_x86_64_bnd_arch_bed;
6400 }
6401 else
6402 bed = get_elf_x86_64_backend_data (abfd);
6403
6404 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
6405 if (plt_contents == NULL)
6406 return NULL;
6407 if (!bfd_get_section_contents (abfd, (asection *) plt,
6408 plt_contents, 0, plt->size))
6409 {
6410 bad_return:
6411 free (plt_contents);
6412 return NULL;
6413 }
6414
6415 slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table;
6416 if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE))
6417 goto bad_return;
6418
6419 hdr = &elf_section_data (relplt)->this_hdr;
6420 count = relplt->size / hdr->sh_entsize;
6421
6422 plt_sym_val = (bfd_vma *) bfd_malloc (sizeof (bfd_vma) * count);
6423 if (plt_sym_val == NULL)
6424 goto bad_return;
6425
6426 for (i = 0; i < count; i++)
6427 plt_sym_val[i] = -1;
6428
6429 plt_offset = bed->plt_entry_size;
6430 p = relplt->relocation;
6431 for (i = 0; i < count; i++, p++)
6432 {
6433 long reloc_index;
6434
6435 /* Skip unknown relocation. */
6436 if (p->howto == NULL)
6437 continue;
6438
6439 if (p->howto->type != R_X86_64_JUMP_SLOT
6440 && p->howto->type != R_X86_64_IRELATIVE)
6441 continue;
6442
6443 reloc_index = H_GET_32 (abfd, (plt_contents + plt_offset
6444 + bed->plt_reloc_offset));
6445 if (reloc_index < count)
6446 {
6447 if (plt_bnd)
6448 {
6449 /* This is the index in .plt section. */
6450 long plt_index = plt_offset / bed->plt_entry_size;
6451 /* Store VMA + the offset in .plt.bnd section. */
6452 plt_sym_val[reloc_index] =
6453 (plt_bnd->vma
6454 + (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry));
6455 }
6456 else
6457 plt_sym_val[reloc_index] = plt->vma + plt_offset;
6458 }
6459 plt_offset += bed->plt_entry_size;
6460
6461 /* PR binutils/18437: Skip extra relocations in the .rela.plt
6462 section. */
6463 if (plt_offset >= plt->size)
6464 break;
6465 }
6466
6467 free (plt_contents);
6468
6469 return plt_sym_val;
6470 }
6471
6472 /* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section
6473 support. */
6474
6475 static long
6476 elf_x86_64_get_synthetic_symtab (bfd *abfd,
6477 long symcount,
6478 asymbol **syms,
6479 long dynsymcount,
6480 asymbol **dynsyms,
6481 asymbol **ret)
6482 {
6483 /* Pass the .plt.bnd section to _bfd_elf_ifunc_get_synthetic_symtab
6484 as PLT if it exists. */
6485 asection *plt = bfd_get_section_by_name (abfd, ".plt.bnd");
6486 if (plt == NULL)
6487 plt = bfd_get_section_by_name (abfd, ".plt");
6488 return _bfd_elf_ifunc_get_synthetic_symtab (abfd, symcount, syms,
6489 dynsymcount, dynsyms, ret,
6490 plt,
6491 elf_x86_64_get_plt_sym_val);
6492 }
6493
6494 /* Handle an x86-64 specific section when reading an object file. This
6495 is called when elfcode.h finds a section with an unknown type. */
6496
6497 static bfd_boolean
6498 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
6499 const char *name, int shindex)
6500 {
6501 if (hdr->sh_type != SHT_X86_64_UNWIND)
6502 return FALSE;
6503
6504 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
6505 return FALSE;
6506
6507 return TRUE;
6508 }
6509
6510 /* Hook called by the linker routine which adds symbols from an object
6511 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
6512 of .bss. */
6513
6514 static bfd_boolean
6515 elf_x86_64_add_symbol_hook (bfd *abfd,
6516 struct bfd_link_info *info ATTRIBUTE_UNUSED,
6517 Elf_Internal_Sym *sym,
6518 const char **namep ATTRIBUTE_UNUSED,
6519 flagword *flagsp ATTRIBUTE_UNUSED,
6520 asection **secp,
6521 bfd_vma *valp)
6522 {
6523 asection *lcomm;
6524
6525 switch (sym->st_shndx)
6526 {
6527 case SHN_X86_64_LCOMMON:
6528 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
6529 if (lcomm == NULL)
6530 {
6531 lcomm = bfd_make_section_with_flags (abfd,
6532 "LARGE_COMMON",
6533 (SEC_ALLOC
6534 | SEC_IS_COMMON
6535 | SEC_LINKER_CREATED));
6536 if (lcomm == NULL)
6537 return FALSE;
6538 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
6539 }
6540 *secp = lcomm;
6541 *valp = sym->st_size;
6542 return TRUE;
6543 }
6544
6545 return TRUE;
6546 }
6547
6548
6549 /* Given a BFD section, try to locate the corresponding ELF section
6550 index. */
6551
6552 static bfd_boolean
6553 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
6554 asection *sec, int *index_return)
6555 {
6556 if (sec == &_bfd_elf_large_com_section)
6557 {
6558 *index_return = SHN_X86_64_LCOMMON;
6559 return TRUE;
6560 }
6561 return FALSE;
6562 }
6563
6564 /* Process a symbol. */
6565
6566 static void
6567 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
6568 asymbol *asym)
6569 {
6570 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
6571
6572 switch (elfsym->internal_elf_sym.st_shndx)
6573 {
6574 case SHN_X86_64_LCOMMON:
6575 asym->section = &_bfd_elf_large_com_section;
6576 asym->value = elfsym->internal_elf_sym.st_size;
6577 /* Common symbol doesn't set BSF_GLOBAL. */
6578 asym->flags &= ~BSF_GLOBAL;
6579 break;
6580 }
6581 }
6582
6583 static bfd_boolean
6584 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
6585 {
6586 return (sym->st_shndx == SHN_COMMON
6587 || sym->st_shndx == SHN_X86_64_LCOMMON);
6588 }
6589
6590 static unsigned int
6591 elf_x86_64_common_section_index (asection *sec)
6592 {
6593 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
6594 return SHN_COMMON;
6595 else
6596 return SHN_X86_64_LCOMMON;
6597 }
6598
6599 static asection *
6600 elf_x86_64_common_section (asection *sec)
6601 {
6602 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
6603 return bfd_com_section_ptr;
6604 else
6605 return &_bfd_elf_large_com_section;
6606 }
6607
6608 static bfd_boolean
6609 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
6610 const Elf_Internal_Sym *sym,
6611 asection **psec,
6612 bfd_boolean newdef,
6613 bfd_boolean olddef,
6614 bfd *oldbfd,
6615 const asection *oldsec)
6616 {
6617 /* A normal common symbol and a large common symbol result in a
6618 normal common symbol. We turn the large common symbol into a
6619 normal one. */
6620 if (!olddef
6621 && h->root.type == bfd_link_hash_common
6622 && !newdef
6623 && bfd_is_com_section (*psec)
6624 && oldsec != *psec)
6625 {
6626 if (sym->st_shndx == SHN_COMMON
6627 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
6628 {
6629 h->root.u.c.p->section
6630 = bfd_make_section_old_way (oldbfd, "COMMON");
6631 h->root.u.c.p->section->flags = SEC_ALLOC;
6632 }
6633 else if (sym->st_shndx == SHN_X86_64_LCOMMON
6634 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
6635 *psec = bfd_com_section_ptr;
6636 }
6637
6638 return TRUE;
6639 }
6640
6641 static int
6642 elf_x86_64_additional_program_headers (bfd *abfd,
6643 struct bfd_link_info *info ATTRIBUTE_UNUSED)
6644 {
6645 asection *s;
6646 int count = 0;
6647
6648 /* Check to see if we need a large readonly segment. */
6649 s = bfd_get_section_by_name (abfd, ".lrodata");
6650 if (s && (s->flags & SEC_LOAD))
6651 count++;
6652
6653 /* Check to see if we need a large data segment. Since .lbss sections
6654 is placed right after the .bss section, there should be no need for
6655 a large data segment just because of .lbss. */
6656 s = bfd_get_section_by_name (abfd, ".ldata");
6657 if (s && (s->flags & SEC_LOAD))
6658 count++;
6659
6660 return count;
6661 }
6662
6663 /* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */
6664
6665 static bfd_boolean
6666 elf_x86_64_hash_symbol (struct elf_link_hash_entry *h)
6667 {
6668 if (h->plt.offset != (bfd_vma) -1
6669 && !h->def_regular
6670 && !h->pointer_equality_needed)
6671 return FALSE;
6672
6673 return _bfd_elf_hash_symbol (h);
6674 }
6675
6676 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
6677
6678 static bfd_boolean
6679 elf_x86_64_relocs_compatible (const bfd_target *input,
6680 const bfd_target *output)
6681 {
6682 return ((xvec_get_elf_backend_data (input)->s->elfclass
6683 == xvec_get_elf_backend_data (output)->s->elfclass)
6684 && _bfd_elf_relocs_compatible (input, output));
6685 }
6686
6687 static const struct bfd_elf_special_section
6688 elf_x86_64_special_sections[]=
6689 {
6690 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6691 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6692 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
6693 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6694 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6695 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6696 { NULL, 0, 0, 0, 0 }
6697 };
6698
6699 #define TARGET_LITTLE_SYM x86_64_elf64_vec
6700 #define TARGET_LITTLE_NAME "elf64-x86-64"
6701 #define ELF_ARCH bfd_arch_i386
6702 #define ELF_TARGET_ID X86_64_ELF_DATA
6703 #define ELF_MACHINE_CODE EM_X86_64
6704 #define ELF_MAXPAGESIZE 0x200000
6705 #define ELF_MINPAGESIZE 0x1000
6706 #define ELF_COMMONPAGESIZE 0x1000
6707
6708 #define elf_backend_can_gc_sections 1
6709 #define elf_backend_can_refcount 1
6710 #define elf_backend_want_got_plt 1
6711 #define elf_backend_plt_readonly 1
6712 #define elf_backend_want_plt_sym 0
6713 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
6714 #define elf_backend_rela_normal 1
6715 #define elf_backend_plt_alignment 4
6716 #define elf_backend_extern_protected_data 1
6717 #define elf_backend_caches_rawsize 1
6718
6719 #define elf_info_to_howto elf_x86_64_info_to_howto
6720
6721 #define bfd_elf64_bfd_link_hash_table_create \
6722 elf_x86_64_link_hash_table_create
6723 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
6724 #define bfd_elf64_bfd_reloc_name_lookup \
6725 elf_x86_64_reloc_name_lookup
6726
6727 #define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol
6728 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
6729 #define elf_backend_check_relocs elf_x86_64_check_relocs
6730 #define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol
6731 #define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections
6732 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
6733 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
6734 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
6735 #define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook
6736 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
6737 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
6738 #ifdef CORE_HEADER
6739 #define elf_backend_write_core_note elf_x86_64_write_core_note
6740 #endif
6741 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
6742 #define elf_backend_relocate_section elf_x86_64_relocate_section
6743 #define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections
6744 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
6745 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
6746 #define elf_backend_object_p elf64_x86_64_elf_object_p
6747 #define bfd_elf64_mkobject elf_x86_64_mkobject
6748 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
6749
6750 #define elf_backend_section_from_shdr \
6751 elf_x86_64_section_from_shdr
6752
6753 #define elf_backend_section_from_bfd_section \
6754 elf_x86_64_elf_section_from_bfd_section
6755 #define elf_backend_add_symbol_hook \
6756 elf_x86_64_add_symbol_hook
6757 #define elf_backend_symbol_processing \
6758 elf_x86_64_symbol_processing
6759 #define elf_backend_common_section_index \
6760 elf_x86_64_common_section_index
6761 #define elf_backend_common_section \
6762 elf_x86_64_common_section
6763 #define elf_backend_common_definition \
6764 elf_x86_64_common_definition
6765 #define elf_backend_merge_symbol \
6766 elf_x86_64_merge_symbol
6767 #define elf_backend_special_sections \
6768 elf_x86_64_special_sections
6769 #define elf_backend_additional_program_headers \
6770 elf_x86_64_additional_program_headers
6771 #define elf_backend_hash_symbol \
6772 elf_x86_64_hash_symbol
6773 #define elf_backend_omit_section_dynsym \
6774 ((bfd_boolean (*) (bfd *, struct bfd_link_info *, asection *)) bfd_true)
6775 #define elf_backend_fixup_symbol \
6776 elf_x86_64_fixup_symbol
6777
6778 #include "elf64-target.h"
6779
6780 /* CloudABI support. */
6781
6782 #undef TARGET_LITTLE_SYM
6783 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
6784 #undef TARGET_LITTLE_NAME
6785 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
6786
6787 #undef ELF_OSABI
6788 #define ELF_OSABI ELFOSABI_CLOUDABI
6789
6790 #undef elf64_bed
6791 #define elf64_bed elf64_x86_64_cloudabi_bed
6792
6793 #include "elf64-target.h"
6794
6795 /* FreeBSD support. */
6796
6797 #undef TARGET_LITTLE_SYM
6798 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
6799 #undef TARGET_LITTLE_NAME
6800 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
6801
6802 #undef ELF_OSABI
6803 #define ELF_OSABI ELFOSABI_FREEBSD
6804
6805 #undef elf64_bed
6806 #define elf64_bed elf64_x86_64_fbsd_bed
6807
6808 #include "elf64-target.h"
6809
6810 /* Solaris 2 support. */
6811
6812 #undef TARGET_LITTLE_SYM
6813 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
6814 #undef TARGET_LITTLE_NAME
6815 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
6816
6817 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
6818 objects won't be recognized. */
6819 #undef ELF_OSABI
6820
6821 #undef elf64_bed
6822 #define elf64_bed elf64_x86_64_sol2_bed
6823
6824 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
6825 boundary. */
6826 #undef elf_backend_static_tls_alignment
6827 #define elf_backend_static_tls_alignment 16
6828
6829 /* The Solaris 2 ABI requires a plt symbol on all platforms.
6830
6831 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
6832 File, p.63. */
6833 #undef elf_backend_want_plt_sym
6834 #define elf_backend_want_plt_sym 1
6835
6836 #undef elf_backend_strtab_flags
6837 #define elf_backend_strtab_flags SHF_STRINGS
6838
6839 static bfd_boolean
6840 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
6841 bfd *obfd ATTRIBUTE_UNUSED,
6842 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
6843 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
6844 {
6845 /* PR 19938: FIXME: Need to add code for setting the sh_info
6846 and sh_link fields of Solaris specific section types. */
6847 return FALSE;
6848 }
6849
6850 #undef elf_backend_copy_special_section_fields
6851 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
6852
6853 #include "elf64-target.h"
6854
6855 /* Native Client support. */
6856
6857 static bfd_boolean
6858 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
6859 {
6860 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
6861 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
6862 return TRUE;
6863 }
6864
6865 #undef TARGET_LITTLE_SYM
6866 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
6867 #undef TARGET_LITTLE_NAME
6868 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
6869 #undef elf64_bed
6870 #define elf64_bed elf64_x86_64_nacl_bed
6871
6872 #undef ELF_MAXPAGESIZE
6873 #undef ELF_MINPAGESIZE
6874 #undef ELF_COMMONPAGESIZE
6875 #define ELF_MAXPAGESIZE 0x10000
6876 #define ELF_MINPAGESIZE 0x10000
6877 #define ELF_COMMONPAGESIZE 0x10000
6878
6879 /* Restore defaults. */
6880 #undef ELF_OSABI
6881 #undef elf_backend_static_tls_alignment
6882 #undef elf_backend_want_plt_sym
6883 #define elf_backend_want_plt_sym 0
6884 #undef elf_backend_strtab_flags
6885 #undef elf_backend_copy_special_section_fields
6886
6887 /* NaCl uses substantially different PLT entries for the same effects. */
6888
6889 #undef elf_backend_plt_alignment
6890 #define elf_backend_plt_alignment 5
6891 #define NACL_PLT_ENTRY_SIZE 64
6892 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
6893
6894 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
6895 {
6896 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
6897 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
6898 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6899 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6900 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6901
6902 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
6903 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
6904
6905 /* 32 bytes of nop to pad out to the standard size. */
6906 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
6907 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6908 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
6909 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6910 0x66, /* excess data16 prefix */
6911 0x90 /* nop */
6912 };
6913
6914 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
6915 {
6916 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
6917 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6918 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6919 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6920
6921 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
6922 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
6923 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6924
6925 /* Lazy GOT entries point here (32-byte aligned). */
6926 0x68, /* pushq immediate */
6927 0, 0, 0, 0, /* replaced with index into relocation table. */
6928 0xe9, /* jmp relative */
6929 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
6930
6931 /* 22 bytes of nop to pad out to the standard size. */
6932 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
6933 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6934 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
6935 };
6936
6937 /* .eh_frame covering the .plt section. */
6938
6939 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
6940 {
6941 #if (PLT_CIE_LENGTH != 20 \
6942 || PLT_FDE_LENGTH != 36 \
6943 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
6944 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
6945 # error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!"
6946 #endif
6947 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
6948 0, 0, 0, 0, /* CIE ID */
6949 1, /* CIE version */
6950 'z', 'R', 0, /* Augmentation string */
6951 1, /* Code alignment factor */
6952 0x78, /* Data alignment factor */
6953 16, /* Return address column */
6954 1, /* Augmentation size */
6955 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
6956 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
6957 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
6958 DW_CFA_nop, DW_CFA_nop,
6959
6960 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
6961 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
6962 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
6963 0, 0, 0, 0, /* .plt size goes here */
6964 0, /* Augmentation size */
6965 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
6966 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
6967 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
6968 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
6969 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
6970 13, /* Block length */
6971 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
6972 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
6973 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
6974 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
6975 DW_CFA_nop, DW_CFA_nop
6976 };
6977
6978 static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
6979 {
6980 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
6981 elf_x86_64_nacl_plt_entry, /* plt_entry */
6982 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
6983 2, /* plt0_got1_offset */
6984 9, /* plt0_got2_offset */
6985 13, /* plt0_got2_insn_end */
6986 3, /* plt_got_offset */
6987 33, /* plt_reloc_offset */
6988 38, /* plt_plt_offset */
6989 7, /* plt_got_insn_size */
6990 42, /* plt_plt_insn_end */
6991 32, /* plt_lazy_offset */
6992 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
6993 sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */
6994 };
6995
6996 #undef elf_backend_arch_data
6997 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
6998
6999 #undef elf_backend_object_p
7000 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
7001 #undef elf_backend_modify_segment_map
7002 #define elf_backend_modify_segment_map nacl_modify_segment_map
7003 #undef elf_backend_modify_program_headers
7004 #define elf_backend_modify_program_headers nacl_modify_program_headers
7005 #undef elf_backend_final_write_processing
7006 #define elf_backend_final_write_processing nacl_final_write_processing
7007
7008 #include "elf64-target.h"
7009
7010 /* Native Client x32 support. */
7011
7012 static bfd_boolean
7013 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
7014 {
7015 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
7016 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
7017 return TRUE;
7018 }
7019
7020 #undef TARGET_LITTLE_SYM
7021 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
7022 #undef TARGET_LITTLE_NAME
7023 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
7024 #undef elf32_bed
7025 #define elf32_bed elf32_x86_64_nacl_bed
7026
7027 #define bfd_elf32_bfd_link_hash_table_create \
7028 elf_x86_64_link_hash_table_create
7029 #define bfd_elf32_bfd_reloc_type_lookup \
7030 elf_x86_64_reloc_type_lookup
7031 #define bfd_elf32_bfd_reloc_name_lookup \
7032 elf_x86_64_reloc_name_lookup
7033 #define bfd_elf32_mkobject \
7034 elf_x86_64_mkobject
7035 #define bfd_elf32_get_synthetic_symtab \
7036 elf_x86_64_get_synthetic_symtab
7037
7038 #undef elf_backend_object_p
7039 #define elf_backend_object_p \
7040 elf32_x86_64_nacl_elf_object_p
7041
7042 #undef elf_backend_bfd_from_remote_memory
7043 #define elf_backend_bfd_from_remote_memory \
7044 _bfd_elf32_bfd_from_remote_memory
7045
7046 #undef elf_backend_size_info
7047 #define elf_backend_size_info \
7048 _bfd_elf32_size_info
7049
7050 #include "elf32-target.h"
7051
7052 /* Restore defaults. */
7053 #undef elf_backend_object_p
7054 #define elf_backend_object_p elf64_x86_64_elf_object_p
7055 #undef elf_backend_bfd_from_remote_memory
7056 #undef elf_backend_size_info
7057 #undef elf_backend_modify_segment_map
7058 #undef elf_backend_modify_program_headers
7059 #undef elf_backend_final_write_processing
7060
7061 /* Intel L1OM support. */
7062
7063 static bfd_boolean
7064 elf64_l1om_elf_object_p (bfd *abfd)
7065 {
7066 /* Set the right machine number for an L1OM elf64 file. */
7067 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
7068 return TRUE;
7069 }
7070
7071 #undef TARGET_LITTLE_SYM
7072 #define TARGET_LITTLE_SYM l1om_elf64_vec
7073 #undef TARGET_LITTLE_NAME
7074 #define TARGET_LITTLE_NAME "elf64-l1om"
7075 #undef ELF_ARCH
7076 #define ELF_ARCH bfd_arch_l1om
7077
7078 #undef ELF_MACHINE_CODE
7079 #define ELF_MACHINE_CODE EM_L1OM
7080
7081 #undef ELF_OSABI
7082
7083 #undef elf64_bed
7084 #define elf64_bed elf64_l1om_bed
7085
7086 #undef elf_backend_object_p
7087 #define elf_backend_object_p elf64_l1om_elf_object_p
7088
7089 /* Restore defaults. */
7090 #undef ELF_MAXPAGESIZE
7091 #undef ELF_MINPAGESIZE
7092 #undef ELF_COMMONPAGESIZE
7093 #define ELF_MAXPAGESIZE 0x200000
7094 #define ELF_MINPAGESIZE 0x1000
7095 #define ELF_COMMONPAGESIZE 0x1000
7096 #undef elf_backend_plt_alignment
7097 #define elf_backend_plt_alignment 4
7098 #undef elf_backend_arch_data
7099 #define elf_backend_arch_data &elf_x86_64_arch_bed
7100
7101 #include "elf64-target.h"
7102
7103 /* FreeBSD L1OM support. */
7104
7105 #undef TARGET_LITTLE_SYM
7106 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
7107 #undef TARGET_LITTLE_NAME
7108 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
7109
7110 #undef ELF_OSABI
7111 #define ELF_OSABI ELFOSABI_FREEBSD
7112
7113 #undef elf64_bed
7114 #define elf64_bed elf64_l1om_fbsd_bed
7115
7116 #include "elf64-target.h"
7117
7118 /* Intel K1OM support. */
7119
7120 static bfd_boolean
7121 elf64_k1om_elf_object_p (bfd *abfd)
7122 {
7123 /* Set the right machine number for an K1OM elf64 file. */
7124 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
7125 return TRUE;
7126 }
7127
7128 #undef TARGET_LITTLE_SYM
7129 #define TARGET_LITTLE_SYM k1om_elf64_vec
7130 #undef TARGET_LITTLE_NAME
7131 #define TARGET_LITTLE_NAME "elf64-k1om"
7132 #undef ELF_ARCH
7133 #define ELF_ARCH bfd_arch_k1om
7134
7135 #undef ELF_MACHINE_CODE
7136 #define ELF_MACHINE_CODE EM_K1OM
7137
7138 #undef ELF_OSABI
7139
7140 #undef elf64_bed
7141 #define elf64_bed elf64_k1om_bed
7142
7143 #undef elf_backend_object_p
7144 #define elf_backend_object_p elf64_k1om_elf_object_p
7145
7146 #undef elf_backend_static_tls_alignment
7147
7148 #undef elf_backend_want_plt_sym
7149 #define elf_backend_want_plt_sym 0
7150
7151 #include "elf64-target.h"
7152
7153 /* FreeBSD K1OM support. */
7154
7155 #undef TARGET_LITTLE_SYM
7156 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
7157 #undef TARGET_LITTLE_NAME
7158 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
7159
7160 #undef ELF_OSABI
7161 #define ELF_OSABI ELFOSABI_FREEBSD
7162
7163 #undef elf64_bed
7164 #define elf64_bed elf64_k1om_fbsd_bed
7165
7166 #include "elf64-target.h"
7167
7168 /* 32bit x86-64 support. */
7169
7170 #undef TARGET_LITTLE_SYM
7171 #define TARGET_LITTLE_SYM x86_64_elf32_vec
7172 #undef TARGET_LITTLE_NAME
7173 #define TARGET_LITTLE_NAME "elf32-x86-64"
7174 #undef elf32_bed
7175
7176 #undef ELF_ARCH
7177 #define ELF_ARCH bfd_arch_i386
7178
7179 #undef ELF_MACHINE_CODE
7180 #define ELF_MACHINE_CODE EM_X86_64
7181
7182 #undef ELF_OSABI
7183
7184 #undef elf_backend_object_p
7185 #define elf_backend_object_p \
7186 elf32_x86_64_elf_object_p
7187
7188 #undef elf_backend_bfd_from_remote_memory
7189 #define elf_backend_bfd_from_remote_memory \
7190 _bfd_elf32_bfd_from_remote_memory
7191
7192 #undef elf_backend_size_info
7193 #define elf_backend_size_info \
7194 _bfd_elf32_size_info
7195
7196 #include "elf32-target.h"
7197