elf64-x86-64.c revision 1.5.2.1 1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2018 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh (at) suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "elf-nacl.h"
24 #include "dwarf2.h"
25 #include "libiberty.h"
26
27 #include "opcode/i386.h"
28 #include "elf/x86-64.h"
29
30 #ifdef CORE_HEADER
31 #include <stdarg.h>
32 #include CORE_HEADER
33 #endif
34
35 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
36 #define MINUS_ONE (~ (bfd_vma) 0)
37
38 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
39 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
40 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
41 since they are the same. */
42
43 /* The relocation "howto" table. Order of fields:
44 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
45 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
46 static reloc_howto_type x86_64_elf_howto_table[] =
47 {
48 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
49 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
50 FALSE),
51 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
52 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
53 FALSE),
54 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
55 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
56 TRUE),
57 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
58 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
59 FALSE),
60 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
61 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
62 TRUE),
63 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
64 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
65 FALSE),
66 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
67 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
68 MINUS_ONE, FALSE),
69 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
70 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
71 MINUS_ONE, FALSE),
72 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
74 MINUS_ONE, FALSE),
75 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
76 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
77 0xffffffff, TRUE),
78 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
79 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
80 FALSE),
81 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
82 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
83 FALSE),
84 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
85 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
86 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
87 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
88 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
89 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
90 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
92 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
93 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
94 MINUS_ONE, FALSE),
95 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
97 MINUS_ONE, FALSE),
98 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
100 MINUS_ONE, FALSE),
101 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
102 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
103 0xffffffff, TRUE),
104 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
105 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
106 0xffffffff, TRUE),
107 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
108 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
109 0xffffffff, FALSE),
110 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
115 0xffffffff, FALSE),
116 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
117 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
118 TRUE),
119 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
120 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
121 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
122 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
124 FALSE, 0xffffffff, 0xffffffff, TRUE),
125 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
126 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
127 FALSE),
128 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
129 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
130 MINUS_ONE, TRUE),
131 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
133 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
134 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
136 MINUS_ONE, FALSE),
137 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
139 MINUS_ONE, FALSE),
140 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
141 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
142 FALSE),
143 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
144 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
145 FALSE),
146 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
147 complain_overflow_bitfield, bfd_elf_generic_reloc,
148 "R_X86_64_GOTPC32_TLSDESC",
149 FALSE, 0xffffffff, 0xffffffff, TRUE),
150 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
151 complain_overflow_dont, bfd_elf_generic_reloc,
152 "R_X86_64_TLSDESC_CALL",
153 FALSE, 0, 0, FALSE),
154 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
155 complain_overflow_bitfield, bfd_elf_generic_reloc,
156 "R_X86_64_TLSDESC",
157 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
158 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
159 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
160 MINUS_ONE, FALSE),
161 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
162 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
163 MINUS_ONE, FALSE),
164 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
166 TRUE),
167 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
169 TRUE),
170 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
172 0xffffffff, TRUE),
173 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
175 0xffffffff, TRUE),
176
177 /* We have a gap in the reloc numbers here.
178 R_X86_64_standard counts the number up to this point, and
179 R_X86_64_vt_offset is the value to subtract from a reloc type of
180 R_X86_64_GNU_VT* to form an index into this table. */
181 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
182 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
183
184 /* GNU extension to record C++ vtable hierarchy. */
185 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
186 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
187
188 /* GNU extension to record C++ vtable member usage. */
189 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
190 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
191 FALSE),
192
193 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
194 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
195 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
196 FALSE)
197 };
198
199 /* Set if a relocation is converted from a GOTPCREL relocation. */
200 #define R_X86_64_converted_reloc_bit (1 << 7)
201
202 #define X86_PCREL_TYPE_P(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 #define X86_SIZE_TYPE_P(TYPE) \
210 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
211
212 /* Map BFD relocs to the x86_64 elf relocs. */
213 struct elf_reloc_map
214 {
215 bfd_reloc_code_real_type bfd_reloc_val;
216 unsigned char elf_reloc_val;
217 };
218
219 static const struct elf_reloc_map x86_64_reloc_map[] =
220 {
221 { BFD_RELOC_NONE, R_X86_64_NONE, },
222 { BFD_RELOC_64, R_X86_64_64, },
223 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
224 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
225 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
226 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
227 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
228 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
229 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
230 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
231 { BFD_RELOC_32, R_X86_64_32, },
232 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
233 { BFD_RELOC_16, R_X86_64_16, },
234 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
235 { BFD_RELOC_8, R_X86_64_8, },
236 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
237 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
238 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
239 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
240 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
241 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
242 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
243 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
244 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
245 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
246 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
247 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
248 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
249 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
250 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
251 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
252 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
253 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
254 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
255 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
256 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
257 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
258 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
259 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
260 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
261 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
262 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
263 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
264 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
265 };
266
267 static reloc_howto_type *
268 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
269 {
270 unsigned i;
271
272 if (r_type == (unsigned int) R_X86_64_32)
273 {
274 if (ABI_64_P (abfd))
275 i = r_type;
276 else
277 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
278 }
279 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
280 || r_type >= (unsigned int) R_X86_64_max)
281 {
282 if (r_type >= (unsigned int) R_X86_64_standard)
283 {
284 /* xgettext:c-format */
285 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
286 abfd, r_type);
287 bfd_set_error (bfd_error_bad_value);
288 return NULL;
289 }
290 i = r_type;
291 }
292 else
293 i = r_type - (unsigned int) R_X86_64_vt_offset;
294 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
295 return &x86_64_elf_howto_table[i];
296 }
297
298 /* Given a BFD reloc type, return a HOWTO structure. */
299 static reloc_howto_type *
300 elf_x86_64_reloc_type_lookup (bfd *abfd,
301 bfd_reloc_code_real_type code)
302 {
303 unsigned int i;
304
305 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
306 i++)
307 {
308 if (x86_64_reloc_map[i].bfd_reloc_val == code)
309 return elf_x86_64_rtype_to_howto (abfd,
310 x86_64_reloc_map[i].elf_reloc_val);
311 }
312 return NULL;
313 }
314
315 static reloc_howto_type *
316 elf_x86_64_reloc_name_lookup (bfd *abfd,
317 const char *r_name)
318 {
319 unsigned int i;
320
321 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
322 {
323 /* Get x32 R_X86_64_32. */
324 reloc_howto_type *reloc
325 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
326 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
327 return reloc;
328 }
329
330 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
331 if (x86_64_elf_howto_table[i].name != NULL
332 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
333 return &x86_64_elf_howto_table[i];
334
335 return NULL;
336 }
337
338 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
339
340 static bfd_boolean
341 elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr,
342 Elf_Internal_Rela *dst)
343 {
344 unsigned r_type;
345
346 r_type = ELF32_R_TYPE (dst->r_info);
347 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
348 if (cache_ptr->howto == NULL)
349 return FALSE;
350 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
351 return TRUE;
352 }
353
354 /* Support for core dump NOTE sections. */
356 static bfd_boolean
357 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
358 {
359 int offset;
360 size_t size;
361
362 switch (note->descsz)
363 {
364 default:
365 return FALSE;
366
367 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
368 /* pr_cursig */
369 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
370
371 /* pr_pid */
372 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
373
374 /* pr_reg */
375 offset = 72;
376 size = 216;
377
378 break;
379
380 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
381 /* pr_cursig */
382 elf_tdata (abfd)->core->signal
383 = bfd_get_16 (abfd, note->descdata + 12);
384
385 /* pr_pid */
386 elf_tdata (abfd)->core->lwpid
387 = bfd_get_32 (abfd, note->descdata + 32);
388
389 /* pr_reg */
390 offset = 112;
391 size = 216;
392
393 break;
394 }
395
396 /* Make a ".reg/999" section. */
397 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
398 size, note->descpos + offset);
399 }
400
401 static bfd_boolean
402 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
403 {
404 switch (note->descsz)
405 {
406 default:
407 return FALSE;
408
409 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
410 elf_tdata (abfd)->core->pid
411 = bfd_get_32 (abfd, note->descdata + 12);
412 elf_tdata (abfd)->core->program
413 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
414 elf_tdata (abfd)->core->command
415 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
416 break;
417
418 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
419 elf_tdata (abfd)->core->pid
420 = bfd_get_32 (abfd, note->descdata + 24);
421 elf_tdata (abfd)->core->program
422 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
423 elf_tdata (abfd)->core->command
424 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
425 }
426
427 /* Note that for some reason, a spurious space is tacked
428 onto the end of the args in some (at least one anyway)
429 implementations, so strip it off if it exists. */
430
431 {
432 char *command = elf_tdata (abfd)->core->command;
433 int n = strlen (command);
434
435 if (0 < n && command[n - 1] == ' ')
436 command[n - 1] = '\0';
437 }
438
439 return TRUE;
440 }
441
442 #ifdef CORE_HEADER
443 # if GCC_VERSION >= 8000
444 # pragma GCC diagnostic push
445 # pragma GCC diagnostic ignored "-Wstringop-truncation"
446 # endif
447 static char *
448 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
449 int note_type, ...)
450 {
451 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
452 va_list ap;
453 const char *fname, *psargs;
454 long pid;
455 int cursig;
456 const void *gregs;
457
458 switch (note_type)
459 {
460 default:
461 return NULL;
462
463 case NT_PRPSINFO:
464 va_start (ap, note_type);
465 fname = va_arg (ap, const char *);
466 psargs = va_arg (ap, const char *);
467 va_end (ap);
468
469 if (bed->s->elfclass == ELFCLASS32)
470 {
471 prpsinfo32_t data;
472 memset (&data, 0, sizeof (data));
473 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
474 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
475 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
476 &data, sizeof (data));
477 }
478 else
479 {
480 prpsinfo64_t data;
481 memset (&data, 0, sizeof (data));
482 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
483 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
484 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
485 &data, sizeof (data));
486 }
487 /* NOTREACHED */
488
489 case NT_PRSTATUS:
490 va_start (ap, note_type);
491 pid = va_arg (ap, long);
492 cursig = va_arg (ap, int);
493 gregs = va_arg (ap, const void *);
494 va_end (ap);
495
496 if (bed->s->elfclass == ELFCLASS32)
497 {
498 if (bed->elf_machine_code == EM_X86_64)
499 {
500 prstatusx32_t prstat;
501 memset (&prstat, 0, sizeof (prstat));
502 prstat.pr_pid = pid;
503 prstat.pr_cursig = cursig;
504 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
505 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
506 &prstat, sizeof (prstat));
507 }
508 else
509 {
510 prstatus32_t prstat;
511 memset (&prstat, 0, sizeof (prstat));
512 prstat.pr_pid = pid;
513 prstat.pr_cursig = cursig;
514 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
515 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
516 &prstat, sizeof (prstat));
517 }
518 }
519 else
520 {
521 prstatus64_t prstat;
522 memset (&prstat, 0, sizeof (prstat));
523 prstat.pr_pid = pid;
524 prstat.pr_cursig = cursig;
525 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
526 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
527 &prstat, sizeof (prstat));
528 }
529 }
530 /* NOTREACHED */
531 }
532 # if GCC_VERSION >= 8000
533 # pragma GCC diagnostic pop
534 # endif
535 #endif
536
537 /* Functions for the x86-64 ELF linker. */
539
540 /* The size in bytes of an entry in the global offset table. */
541
542 #define GOT_ENTRY_SIZE 8
543
544 /* The size in bytes of an entry in the lazy procedure linkage table. */
545
546 #define LAZY_PLT_ENTRY_SIZE 16
547
548 /* The size in bytes of an entry in the non-lazy procedure linkage
549 table. */
550
551 #define NON_LAZY_PLT_ENTRY_SIZE 8
552
553 /* The first entry in a lazy procedure linkage table looks like this.
554 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
555 works. */
556
557 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
558 {
559 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
560 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
561 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
562 };
563
564 /* Subsequent entries in a lazy procedure linkage table look like this. */
565
566 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
567 {
568 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
569 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
570 0x68, /* pushq immediate */
571 0, 0, 0, 0, /* replaced with index into relocation table. */
572 0xe9, /* jmp relative */
573 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
574 };
575
576 /* The first entry in a lazy procedure linkage table with BND prefix
577 like this. */
578
579 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
580 {
581 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
582 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
583 0x0f, 0x1f, 0 /* nopl (%rax) */
584 };
585
586 /* Subsequent entries for branches with BND prefx in a lazy procedure
587 linkage table look like this. */
588
589 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
590 {
591 0x68, 0, 0, 0, 0, /* pushq immediate */
592 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
593 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
594 };
595
596 /* The first entry in the IBT-enabled lazy procedure linkage table is the
597 the same as the lazy PLT with BND prefix so that bound registers are
598 preserved when control is passed to dynamic linker. Subsequent
599 entries for a IBT-enabled lazy procedure linkage table look like
600 this. */
601
602 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
603 {
604 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
605 0x68, 0, 0, 0, 0, /* pushq immediate */
606 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
607 0x90 /* nop */
608 };
609
610 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
611 is the same as the normal lazy PLT. Subsequent entries for an
612 x32 IBT-enabled lazy procedure linkage table look like this. */
613
614 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
615 {
616 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
617 0x68, 0, 0, 0, 0, /* pushq immediate */
618 0xe9, 0, 0, 0, 0, /* jmpq relative */
619 0x66, 0x90 /* xchg %ax,%ax */
620 };
621
622 /* Entries in the non-lazey procedure linkage table look like this. */
623
624 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
625 {
626 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
627 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
628 0x66, 0x90 /* xchg %ax,%ax */
629 };
630
631 /* Entries for branches with BND prefix in the non-lazey procedure
632 linkage table look like this. */
633
634 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
635 {
636 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
637 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
638 0x90 /* nop */
639 };
640
641 /* Entries for branches with IBT-enabled in the non-lazey procedure
642 linkage table look like this. They have the same size as the lazy
643 PLT entry. */
644
645 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
646 {
647 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
648 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
649 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
650 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
651 };
652
653 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
654 linkage table look like this. They have the same size as the lazy
655 PLT entry. */
656
657 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
658 {
659 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
660 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
661 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
662 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
663 };
664
665 /* The TLSDESC entry in a lazy procedure linkage table. */
666 static const bfd_byte elf_x86_64_tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] =
667 {
668 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
669 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
670 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */
671 };
672
673 /* .eh_frame covering the lazy .plt section. */
674
675 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
676 {
677 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
678 0, 0, 0, 0, /* CIE ID */
679 1, /* CIE version */
680 'z', 'R', 0, /* Augmentation string */
681 1, /* Code alignment factor */
682 0x78, /* Data alignment factor */
683 16, /* Return address column */
684 1, /* Augmentation size */
685 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
686 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
687 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
688 DW_CFA_nop, DW_CFA_nop,
689
690 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
691 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
692 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
693 0, 0, 0, 0, /* .plt size goes here */
694 0, /* Augmentation size */
695 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
696 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
697 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
698 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
699 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
700 11, /* Block length */
701 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
702 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
703 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
704 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
705 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
706 };
707
708 /* .eh_frame covering the lazy BND .plt section. */
709
710 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
711 {
712 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
713 0, 0, 0, 0, /* CIE ID */
714 1, /* CIE version */
715 'z', 'R', 0, /* Augmentation string */
716 1, /* Code alignment factor */
717 0x78, /* Data alignment factor */
718 16, /* Return address column */
719 1, /* Augmentation size */
720 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
721 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
722 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
723 DW_CFA_nop, DW_CFA_nop,
724
725 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
726 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
727 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
728 0, 0, 0, 0, /* .plt size goes here */
729 0, /* Augmentation size */
730 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
731 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
732 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
733 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
734 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
735 11, /* Block length */
736 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
737 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
738 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
739 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
740 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
741 };
742
743 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
744
745 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
746 {
747 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
748 0, 0, 0, 0, /* CIE ID */
749 1, /* CIE version */
750 'z', 'R', 0, /* Augmentation string */
751 1, /* Code alignment factor */
752 0x78, /* Data alignment factor */
753 16, /* Return address column */
754 1, /* Augmentation size */
755 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
756 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
757 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
758 DW_CFA_nop, DW_CFA_nop,
759
760 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
761 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
762 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
763 0, 0, 0, 0, /* .plt size goes here */
764 0, /* Augmentation size */
765 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
766 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
767 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
768 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
769 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
770 11, /* Block length */
771 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
772 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
773 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
774 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
775 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
776 };
777
778 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
779
780 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
781 {
782 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
783 0, 0, 0, 0, /* CIE ID */
784 1, /* CIE version */
785 'z', 'R', 0, /* Augmentation string */
786 1, /* Code alignment factor */
787 0x78, /* Data alignment factor */
788 16, /* Return address column */
789 1, /* Augmentation size */
790 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
791 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
792 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
793 DW_CFA_nop, DW_CFA_nop,
794
795 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
796 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
797 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
798 0, 0, 0, 0, /* .plt size goes here */
799 0, /* Augmentation size */
800 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
801 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
802 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
803 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
804 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
805 11, /* Block length */
806 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
807 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
808 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
809 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
810 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
811 };
812
813 /* .eh_frame covering the non-lazy .plt section. */
814
815 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
816 {
817 #define PLT_GOT_FDE_LENGTH 20
818 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
819 0, 0, 0, 0, /* CIE ID */
820 1, /* CIE version */
821 'z', 'R', 0, /* Augmentation string */
822 1, /* Code alignment factor */
823 0x78, /* Data alignment factor */
824 16, /* Return address column */
825 1, /* Augmentation size */
826 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
827 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
828 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
829 DW_CFA_nop, DW_CFA_nop,
830
831 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
832 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
833 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
834 0, 0, 0, 0, /* non-lazy .plt size goes here */
835 0, /* Augmentation size */
836 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
837 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
838 };
839
840 /* These are the standard parameters. */
841 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
842 {
843 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
844 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
845 elf_x86_64_lazy_plt_entry, /* plt_entry */
846 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
847 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
848 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
849 6, /* plt_tlsdesc_got1_offset */
850 12, /* plt_tlsdesc_got2_offset */
851 10, /* plt_tlsdesc_got1_insn_end */
852 16, /* plt_tlsdesc_got2_insn_end */
853 2, /* plt0_got1_offset */
854 8, /* plt0_got2_offset */
855 12, /* plt0_got2_insn_end */
856 2, /* plt_got_offset */
857 7, /* plt_reloc_offset */
858 12, /* plt_plt_offset */
859 6, /* plt_got_insn_size */
860 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
861 6, /* plt_lazy_offset */
862 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
863 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
864 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
865 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
866 };
867
868 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
869 {
870 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
871 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
872 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
873 2, /* plt_got_offset */
874 6, /* plt_got_insn_size */
875 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
876 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
877 };
878
879 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
880 {
881 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
882 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
883 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
884 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
885 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
886 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
887 6, /* plt_tlsdesc_got1_offset */
888 12, /* plt_tlsdesc_got2_offset */
889 10, /* plt_tlsdesc_got1_insn_end */
890 16, /* plt_tlsdesc_got2_insn_end */
891 2, /* plt0_got1_offset */
892 1+8, /* plt0_got2_offset */
893 1+12, /* plt0_got2_insn_end */
894 1+2, /* plt_got_offset */
895 1, /* plt_reloc_offset */
896 7, /* plt_plt_offset */
897 1+6, /* plt_got_insn_size */
898 11, /* plt_plt_insn_end */
899 0, /* plt_lazy_offset */
900 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
901 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
902 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
903 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
904 };
905
906 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
907 {
908 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
909 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
910 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
911 1+2, /* plt_got_offset */
912 1+6, /* plt_got_insn_size */
913 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
914 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
915 };
916
917 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
918 {
919 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
920 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
921 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
922 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
923 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
924 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
925 6, /* plt_tlsdesc_got1_offset */
926 12, /* plt_tlsdesc_got2_offset */
927 10, /* plt_tlsdesc_got1_insn_end */
928 16, /* plt_tlsdesc_got2_insn_end */
929 2, /* plt0_got1_offset */
930 1+8, /* plt0_got2_offset */
931 1+12, /* plt0_got2_insn_end */
932 4+1+2, /* plt_got_offset */
933 4+1, /* plt_reloc_offset */
934 4+1+6, /* plt_plt_offset */
935 4+1+6, /* plt_got_insn_size */
936 4+1+5+5, /* plt_plt_insn_end */
937 0, /* plt_lazy_offset */
938 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
939 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
940 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
941 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
942 };
943
944 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
945 {
946 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
947 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
948 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
949 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
950 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
951 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
952 6, /* plt_tlsdesc_got1_offset */
953 12, /* plt_tlsdesc_got2_offset */
954 10, /* plt_tlsdesc_got1_insn_end */
955 16, /* plt_tlsdesc_got2_insn_end */
956 2, /* plt0_got1_offset */
957 8, /* plt0_got2_offset */
958 12, /* plt0_got2_insn_end */
959 4+2, /* plt_got_offset */
960 4+1, /* plt_reloc_offset */
961 4+6, /* plt_plt_offset */
962 4+6, /* plt_got_insn_size */
963 4+5+5, /* plt_plt_insn_end */
964 0, /* plt_lazy_offset */
965 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
966 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
967 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
968 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
969 };
970
971 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
972 {
973 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
974 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
975 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
976 4+1+2, /* plt_got_offset */
977 4+1+6, /* plt_got_insn_size */
978 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
979 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
980 };
981
982 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
983 {
984 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
985 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
986 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
987 4+2, /* plt_got_offset */
988 4+6, /* plt_got_insn_size */
989 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
990 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
991 };
992
993 static const struct elf_x86_backend_data elf_x86_64_arch_bed =
994 {
995 is_normal /* os */
996 };
997
998 #define elf_backend_arch_data &elf_x86_64_arch_bed
999
1000 static bfd_boolean
1001 elf64_x86_64_elf_object_p (bfd *abfd)
1002 {
1003 /* Set the right machine number for an x86-64 elf64 file. */
1004 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1005 return TRUE;
1006 }
1007
1008 static bfd_boolean
1009 elf32_x86_64_elf_object_p (bfd *abfd)
1010 {
1011 /* Set the right machine number for an x86-64 elf32 file. */
1012 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1013 return TRUE;
1014 }
1015
1016 /* Return TRUE if the TLS access code sequence support transition
1017 from R_TYPE. */
1018
1019 static bfd_boolean
1020 elf_x86_64_check_tls_transition (bfd *abfd,
1021 struct bfd_link_info *info,
1022 asection *sec,
1023 bfd_byte *contents,
1024 Elf_Internal_Shdr *symtab_hdr,
1025 struct elf_link_hash_entry **sym_hashes,
1026 unsigned int r_type,
1027 const Elf_Internal_Rela *rel,
1028 const Elf_Internal_Rela *relend)
1029 {
1030 unsigned int val;
1031 unsigned long r_symndx;
1032 bfd_boolean largepic = FALSE;
1033 struct elf_link_hash_entry *h;
1034 bfd_vma offset;
1035 struct elf_x86_link_hash_table *htab;
1036 bfd_byte *call;
1037 bfd_boolean indirect_call;
1038
1039 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1040 offset = rel->r_offset;
1041 switch (r_type)
1042 {
1043 case R_X86_64_TLSGD:
1044 case R_X86_64_TLSLD:
1045 if ((rel + 1) >= relend)
1046 return FALSE;
1047
1048 if (r_type == R_X86_64_TLSGD)
1049 {
1050 /* Check transition from GD access model. For 64bit, only
1051 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1052 .word 0x6666; rex64; call __tls_get_addr@PLT
1053 or
1054 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1055 .byte 0x66; rex64
1056 call *__tls_get_addr@GOTPCREL(%rip)
1057 which may be converted to
1058 addr32 call __tls_get_addr
1059 can transit to different access model. For 32bit, only
1060 leaq foo@tlsgd(%rip), %rdi
1061 .word 0x6666; rex64; call __tls_get_addr@PLT
1062 or
1063 leaq foo@tlsgd(%rip), %rdi
1064 .byte 0x66; rex64
1065 call *__tls_get_addr@GOTPCREL(%rip)
1066 which may be converted to
1067 addr32 call __tls_get_addr
1068 can transit to different access model. For largepic,
1069 we also support:
1070 leaq foo@tlsgd(%rip), %rdi
1071 movabsq $__tls_get_addr@pltoff, %rax
1072 addq $r15, %rax
1073 call *%rax
1074 or
1075 leaq foo@tlsgd(%rip), %rdi
1076 movabsq $__tls_get_addr@pltoff, %rax
1077 addq $rbx, %rax
1078 call *%rax */
1079
1080 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1081
1082 if ((offset + 12) > sec->size)
1083 return FALSE;
1084
1085 call = contents + offset + 4;
1086 if (call[0] != 0x66
1087 || !((call[1] == 0x48
1088 && call[2] == 0xff
1089 && call[3] == 0x15)
1090 || (call[1] == 0x48
1091 && call[2] == 0x67
1092 && call[3] == 0xe8)
1093 || (call[1] == 0x66
1094 && call[2] == 0x48
1095 && call[3] == 0xe8)))
1096 {
1097 if (!ABI_64_P (abfd)
1098 || (offset + 19) > sec->size
1099 || offset < 3
1100 || memcmp (call - 7, leaq + 1, 3) != 0
1101 || memcmp (call, "\x48\xb8", 2) != 0
1102 || call[11] != 0x01
1103 || call[13] != 0xff
1104 || call[14] != 0xd0
1105 || !((call[10] == 0x48 && call[12] == 0xd8)
1106 || (call[10] == 0x4c && call[12] == 0xf8)))
1107 return FALSE;
1108 largepic = TRUE;
1109 }
1110 else if (ABI_64_P (abfd))
1111 {
1112 if (offset < 4
1113 || memcmp (contents + offset - 4, leaq, 4) != 0)
1114 return FALSE;
1115 }
1116 else
1117 {
1118 if (offset < 3
1119 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1120 return FALSE;
1121 }
1122 indirect_call = call[2] == 0xff;
1123 }
1124 else
1125 {
1126 /* Check transition from LD access model. Only
1127 leaq foo@tlsld(%rip), %rdi;
1128 call __tls_get_addr@PLT
1129 or
1130 leaq foo@tlsld(%rip), %rdi;
1131 call *__tls_get_addr@GOTPCREL(%rip)
1132 which may be converted to
1133 addr32 call __tls_get_addr
1134 can transit to different access model. For largepic
1135 we also support:
1136 leaq foo@tlsld(%rip), %rdi
1137 movabsq $__tls_get_addr@pltoff, %rax
1138 addq $r15, %rax
1139 call *%rax
1140 or
1141 leaq foo@tlsld(%rip), %rdi
1142 movabsq $__tls_get_addr@pltoff, %rax
1143 addq $rbx, %rax
1144 call *%rax */
1145
1146 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1147
1148 if (offset < 3 || (offset + 9) > sec->size)
1149 return FALSE;
1150
1151 if (memcmp (contents + offset - 3, lea, 3) != 0)
1152 return FALSE;
1153
1154 call = contents + offset + 4;
1155 if (!(call[0] == 0xe8
1156 || (call[0] == 0xff && call[1] == 0x15)
1157 || (call[0] == 0x67 && call[1] == 0xe8)))
1158 {
1159 if (!ABI_64_P (abfd)
1160 || (offset + 19) > sec->size
1161 || memcmp (call, "\x48\xb8", 2) != 0
1162 || call[11] != 0x01
1163 || call[13] != 0xff
1164 || call[14] != 0xd0
1165 || !((call[10] == 0x48 && call[12] == 0xd8)
1166 || (call[10] == 0x4c && call[12] == 0xf8)))
1167 return FALSE;
1168 largepic = TRUE;
1169 }
1170 indirect_call = call[0] == 0xff;
1171 }
1172
1173 r_symndx = htab->r_sym (rel[1].r_info);
1174 if (r_symndx < symtab_hdr->sh_info)
1175 return FALSE;
1176
1177 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1178 if (h == NULL
1179 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1180 return FALSE;
1181 else
1182 {
1183 r_type = (ELF32_R_TYPE (rel[1].r_info)
1184 & ~R_X86_64_converted_reloc_bit);
1185 if (largepic)
1186 return r_type == R_X86_64_PLTOFF64;
1187 else if (indirect_call)
1188 return r_type == R_X86_64_GOTPCRELX;
1189 else
1190 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1191 }
1192
1193 case R_X86_64_GOTTPOFF:
1194 /* Check transition from IE access model:
1195 mov foo@gottpoff(%rip), %reg
1196 add foo@gottpoff(%rip), %reg
1197 */
1198
1199 /* Check REX prefix first. */
1200 if (offset >= 3 && (offset + 4) <= sec->size)
1201 {
1202 val = bfd_get_8 (abfd, contents + offset - 3);
1203 if (val != 0x48 && val != 0x4c)
1204 {
1205 /* X32 may have 0x44 REX prefix or no REX prefix. */
1206 if (ABI_64_P (abfd))
1207 return FALSE;
1208 }
1209 }
1210 else
1211 {
1212 /* X32 may not have any REX prefix. */
1213 if (ABI_64_P (abfd))
1214 return FALSE;
1215 if (offset < 2 || (offset + 3) > sec->size)
1216 return FALSE;
1217 }
1218
1219 val = bfd_get_8 (abfd, contents + offset - 2);
1220 if (val != 0x8b && val != 0x03)
1221 return FALSE;
1222
1223 val = bfd_get_8 (abfd, contents + offset - 1);
1224 return (val & 0xc7) == 5;
1225
1226 case R_X86_64_GOTPC32_TLSDESC:
1227 /* Check transition from GDesc access model:
1228 leaq x@tlsdesc(%rip), %rax
1229
1230 Make sure it's a leaq adding rip to a 32-bit offset
1231 into any register, although it's probably almost always
1232 going to be rax. */
1233
1234 if (offset < 3 || (offset + 4) > sec->size)
1235 return FALSE;
1236
1237 val = bfd_get_8 (abfd, contents + offset - 3);
1238 if ((val & 0xfb) != 0x48)
1239 return FALSE;
1240
1241 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1242 return FALSE;
1243
1244 val = bfd_get_8 (abfd, contents + offset - 1);
1245 return (val & 0xc7) == 0x05;
1246
1247 case R_X86_64_TLSDESC_CALL:
1248 /* Check transition from GDesc access model:
1249 call *x@tlsdesc(%rax)
1250 */
1251 if (offset + 2 <= sec->size)
1252 {
1253 /* Make sure that it's a call *x@tlsdesc(%rax). */
1254 call = contents + offset;
1255 return call[0] == 0xff && call[1] == 0x10;
1256 }
1257
1258 return FALSE;
1259
1260 default:
1261 abort ();
1262 }
1263 }
1264
1265 /* Return TRUE if the TLS access transition is OK or no transition
1266 will be performed. Update R_TYPE if there is a transition. */
1267
1268 static bfd_boolean
1269 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1270 asection *sec, bfd_byte *contents,
1271 Elf_Internal_Shdr *symtab_hdr,
1272 struct elf_link_hash_entry **sym_hashes,
1273 unsigned int *r_type, int tls_type,
1274 const Elf_Internal_Rela *rel,
1275 const Elf_Internal_Rela *relend,
1276 struct elf_link_hash_entry *h,
1277 unsigned long r_symndx,
1278 bfd_boolean from_relocate_section)
1279 {
1280 unsigned int from_type = *r_type;
1281 unsigned int to_type = from_type;
1282 bfd_boolean check = TRUE;
1283
1284 /* Skip TLS transition for functions. */
1285 if (h != NULL
1286 && (h->type == STT_FUNC
1287 || h->type == STT_GNU_IFUNC))
1288 return TRUE;
1289
1290 switch (from_type)
1291 {
1292 case R_X86_64_TLSGD:
1293 case R_X86_64_GOTPC32_TLSDESC:
1294 case R_X86_64_TLSDESC_CALL:
1295 case R_X86_64_GOTTPOFF:
1296 if (bfd_link_executable (info))
1297 {
1298 if (h == NULL)
1299 to_type = R_X86_64_TPOFF32;
1300 else
1301 to_type = R_X86_64_GOTTPOFF;
1302 }
1303
1304 /* When we are called from elf_x86_64_relocate_section, there may
1305 be additional transitions based on TLS_TYPE. */
1306 if (from_relocate_section)
1307 {
1308 unsigned int new_to_type = to_type;
1309
1310 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1311 new_to_type = R_X86_64_TPOFF32;
1312
1313 if (to_type == R_X86_64_TLSGD
1314 || to_type == R_X86_64_GOTPC32_TLSDESC
1315 || to_type == R_X86_64_TLSDESC_CALL)
1316 {
1317 if (tls_type == GOT_TLS_IE)
1318 new_to_type = R_X86_64_GOTTPOFF;
1319 }
1320
1321 /* We checked the transition before when we were called from
1322 elf_x86_64_check_relocs. We only want to check the new
1323 transition which hasn't been checked before. */
1324 check = new_to_type != to_type && from_type == to_type;
1325 to_type = new_to_type;
1326 }
1327
1328 break;
1329
1330 case R_X86_64_TLSLD:
1331 if (bfd_link_executable (info))
1332 to_type = R_X86_64_TPOFF32;
1333 break;
1334
1335 default:
1336 return TRUE;
1337 }
1338
1339 /* Return TRUE if there is no transition. */
1340 if (from_type == to_type)
1341 return TRUE;
1342
1343 /* Check if the transition can be performed. */
1344 if (check
1345 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1346 symtab_hdr, sym_hashes,
1347 from_type, rel, relend))
1348 {
1349 reloc_howto_type *from, *to;
1350 const char *name;
1351
1352 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1353 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1354
1355 if (from == NULL || to == NULL)
1356 return FALSE;
1357
1358 if (h)
1359 name = h->root.root.string;
1360 else
1361 {
1362 struct elf_x86_link_hash_table *htab;
1363
1364 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1365 if (htab == NULL)
1366 name = "*unknown*";
1367 else
1368 {
1369 Elf_Internal_Sym *isym;
1370
1371 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1372 abfd, r_symndx);
1373 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1374 }
1375 }
1376
1377 _bfd_error_handler
1378 /* xgettext:c-format */
1379 (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64
1380 " in section `%pA' failed"),
1381 abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec);
1382 bfd_set_error (bfd_error_bad_value);
1383 return FALSE;
1384 }
1385
1386 *r_type = to_type;
1387 return TRUE;
1388 }
1389
1390 /* Rename some of the generic section flags to better document how they
1391 are used here. */
1392 #define check_relocs_failed sec_flg0
1393
1394 static bfd_boolean
1395 elf_x86_64_need_pic (struct bfd_link_info *info,
1396 bfd *input_bfd, asection *sec,
1397 struct elf_link_hash_entry *h,
1398 Elf_Internal_Shdr *symtab_hdr,
1399 Elf_Internal_Sym *isym,
1400 reloc_howto_type *howto)
1401 {
1402 const char *v = "";
1403 const char *und = "";
1404 const char *pic = "";
1405 const char *object;
1406
1407 const char *name;
1408 if (h)
1409 {
1410 name = h->root.root.string;
1411 switch (ELF_ST_VISIBILITY (h->other))
1412 {
1413 case STV_HIDDEN:
1414 v = _("hidden symbol ");
1415 break;
1416 case STV_INTERNAL:
1417 v = _("internal symbol ");
1418 break;
1419 case STV_PROTECTED:
1420 v = _("protected symbol ");
1421 break;
1422 default:
1423 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1424 v = _("protected symbol ");
1425 else
1426 v = _("symbol ");
1427 pic = _("; recompile with -fPIC");
1428 break;
1429 }
1430
1431 if (!h->def_regular && !h->def_dynamic)
1432 und = _("undefined ");
1433 }
1434 else
1435 {
1436 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1437 pic = _("; recompile with -fPIC");
1438 }
1439
1440 if (bfd_link_dll (info))
1441 object = _("a shared object");
1442 else if (bfd_link_pie (info))
1443 object = _("a PIE object");
1444 else
1445 object = _("a PDE object");
1446
1447 /* xgettext:c-format */
1448 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
1449 "not be used when making %s%s"),
1450 input_bfd, howto->name, und, v, name,
1451 object, pic);
1452 bfd_set_error (bfd_error_bad_value);
1453 sec->check_relocs_failed = 1;
1454 return FALSE;
1455 }
1456
1457 /* With the local symbol, foo, we convert
1458 mov foo@GOTPCREL(%rip), %reg
1459 to
1460 lea foo(%rip), %reg
1461 and convert
1462 call/jmp *foo@GOTPCREL(%rip)
1463 to
1464 nop call foo/jmp foo nop
1465 When PIC is false, convert
1466 test %reg, foo@GOTPCREL(%rip)
1467 to
1468 test $foo, %reg
1469 and convert
1470 binop foo@GOTPCREL(%rip), %reg
1471 to
1472 binop $foo, %reg
1473 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1474 instructions. */
1475
1476 static bfd_boolean
1477 elf_x86_64_convert_load_reloc (bfd *abfd,
1478 bfd_byte *contents,
1479 unsigned int *r_type_p,
1480 Elf_Internal_Rela *irel,
1481 struct elf_link_hash_entry *h,
1482 bfd_boolean *converted,
1483 struct bfd_link_info *link_info)
1484 {
1485 struct elf_x86_link_hash_table *htab;
1486 bfd_boolean is_pic;
1487 bfd_boolean no_overflow;
1488 bfd_boolean relocx;
1489 bfd_boolean to_reloc_pc32;
1490 asection *tsec;
1491 bfd_signed_vma raddend;
1492 unsigned int opcode;
1493 unsigned int modrm;
1494 unsigned int r_type = *r_type_p;
1495 unsigned int r_symndx;
1496 bfd_vma roff = irel->r_offset;
1497
1498 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1499 return TRUE;
1500
1501 raddend = irel->r_addend;
1502 /* Addend for 32-bit PC-relative relocation must be -4. */
1503 if (raddend != -4)
1504 return TRUE;
1505
1506 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1507 is_pic = bfd_link_pic (link_info);
1508
1509 relocx = (r_type == R_X86_64_GOTPCRELX
1510 || r_type == R_X86_64_REX_GOTPCRELX);
1511
1512 /* TRUE if --no-relax is used. */
1513 no_overflow = link_info->disable_target_specific_optimizations > 1;
1514
1515 r_symndx = htab->r_sym (irel->r_info);
1516
1517 opcode = bfd_get_8 (abfd, contents + roff - 2);
1518
1519 /* Convert mov to lea since it has been done for a while. */
1520 if (opcode != 0x8b)
1521 {
1522 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1523 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1524 test, xor instructions. */
1525 if (!relocx)
1526 return TRUE;
1527 }
1528
1529 /* We convert only to R_X86_64_PC32:
1530 1. Branch.
1531 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1532 3. no_overflow is true.
1533 4. PIC.
1534 */
1535 to_reloc_pc32 = (opcode == 0xff
1536 || !relocx
1537 || no_overflow
1538 || is_pic);
1539
1540 /* Get the symbol referred to by the reloc. */
1541 if (h == NULL)
1542 {
1543 Elf_Internal_Sym *isym
1544 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1545
1546 /* Skip relocation against undefined symbols. */
1547 if (isym->st_shndx == SHN_UNDEF)
1548 return TRUE;
1549
1550 if (isym->st_shndx == SHN_ABS)
1551 tsec = bfd_abs_section_ptr;
1552 else if (isym->st_shndx == SHN_COMMON)
1553 tsec = bfd_com_section_ptr;
1554 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1555 tsec = &_bfd_elf_large_com_section;
1556 else
1557 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1558 }
1559 else
1560 {
1561 /* Undefined weak symbol is only bound locally in executable
1562 and its reference is resolved as 0 without relocation
1563 overflow. We can only perform this optimization for
1564 GOTPCRELX relocations since we need to modify REX byte.
1565 It is OK convert mov with R_X86_64_GOTPCREL to
1566 R_X86_64_PC32. */
1567 bfd_boolean local_ref;
1568 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1569
1570 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1571 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1572 if ((relocx || opcode == 0x8b)
1573 && (h->root.type == bfd_link_hash_undefweak
1574 && !eh->linker_def
1575 && local_ref))
1576 {
1577 if (opcode == 0xff)
1578 {
1579 /* Skip for branch instructions since R_X86_64_PC32
1580 may overflow. */
1581 if (no_overflow)
1582 return TRUE;
1583 }
1584 else if (relocx)
1585 {
1586 /* For non-branch instructions, we can convert to
1587 R_X86_64_32/R_X86_64_32S since we know if there
1588 is a REX byte. */
1589 to_reloc_pc32 = FALSE;
1590 }
1591
1592 /* Since we don't know the current PC when PIC is true,
1593 we can't convert to R_X86_64_PC32. */
1594 if (to_reloc_pc32 && is_pic)
1595 return TRUE;
1596
1597 goto convert;
1598 }
1599 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1600 ld.so may use its link-time address. */
1601 else if (h->start_stop
1602 || eh->linker_def
1603 || ((h->def_regular
1604 || h->root.type == bfd_link_hash_defined
1605 || h->root.type == bfd_link_hash_defweak)
1606 && h != htab->elf.hdynamic
1607 && local_ref))
1608 {
1609 /* bfd_link_hash_new or bfd_link_hash_undefined is
1610 set by an assignment in a linker script in
1611 bfd_elf_record_link_assignment. start_stop is set
1612 on __start_SECNAME/__stop_SECNAME which mark section
1613 SECNAME. */
1614 if (h->start_stop
1615 || eh->linker_def
1616 || (h->def_regular
1617 && (h->root.type == bfd_link_hash_new
1618 || h->root.type == bfd_link_hash_undefined
1619 || ((h->root.type == bfd_link_hash_defined
1620 || h->root.type == bfd_link_hash_defweak)
1621 && h->root.u.def.section == bfd_und_section_ptr))))
1622 {
1623 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1624 if (no_overflow)
1625 return TRUE;
1626 goto convert;
1627 }
1628 tsec = h->root.u.def.section;
1629 }
1630 else
1631 return TRUE;
1632 }
1633
1634 /* Don't convert GOTPCREL relocation against large section. */
1635 if (elf_section_data (tsec) != NULL
1636 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1637 return TRUE;
1638
1639 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1640 if (no_overflow)
1641 return TRUE;
1642
1643 convert:
1644 if (opcode == 0xff)
1645 {
1646 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1647 unsigned int nop;
1648 unsigned int disp;
1649 bfd_vma nop_offset;
1650
1651 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1652 R_X86_64_PC32. */
1653 modrm = bfd_get_8 (abfd, contents + roff - 1);
1654 if (modrm == 0x25)
1655 {
1656 /* Convert to "jmp foo nop". */
1657 modrm = 0xe9;
1658 nop = NOP_OPCODE;
1659 nop_offset = irel->r_offset + 3;
1660 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1661 irel->r_offset -= 1;
1662 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1663 }
1664 else
1665 {
1666 struct elf_x86_link_hash_entry *eh
1667 = (struct elf_x86_link_hash_entry *) h;
1668
1669 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1670 is a nop prefix. */
1671 modrm = 0xe8;
1672 /* To support TLS optimization, always use addr32 prefix for
1673 "call *__tls_get_addr@GOTPCREL(%rip)". */
1674 if (eh && eh->tls_get_addr)
1675 {
1676 nop = 0x67;
1677 nop_offset = irel->r_offset - 2;
1678 }
1679 else
1680 {
1681 nop = link_info->call_nop_byte;
1682 if (link_info->call_nop_as_suffix)
1683 {
1684 nop_offset = irel->r_offset + 3;
1685 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1686 irel->r_offset -= 1;
1687 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1688 }
1689 else
1690 nop_offset = irel->r_offset - 2;
1691 }
1692 }
1693 bfd_put_8 (abfd, nop, contents + nop_offset);
1694 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1695 r_type = R_X86_64_PC32;
1696 }
1697 else
1698 {
1699 unsigned int rex;
1700 unsigned int rex_mask = REX_R;
1701
1702 if (r_type == R_X86_64_REX_GOTPCRELX)
1703 rex = bfd_get_8 (abfd, contents + roff - 3);
1704 else
1705 rex = 0;
1706
1707 if (opcode == 0x8b)
1708 {
1709 if (to_reloc_pc32)
1710 {
1711 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1712 "lea foo(%rip), %reg". */
1713 opcode = 0x8d;
1714 r_type = R_X86_64_PC32;
1715 }
1716 else
1717 {
1718 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1719 "mov $foo, %reg". */
1720 opcode = 0xc7;
1721 modrm = bfd_get_8 (abfd, contents + roff - 1);
1722 modrm = 0xc0 | (modrm & 0x38) >> 3;
1723 if ((rex & REX_W) != 0
1724 && ABI_64_P (link_info->output_bfd))
1725 {
1726 /* Keep the REX_W bit in REX byte for LP64. */
1727 r_type = R_X86_64_32S;
1728 goto rewrite_modrm_rex;
1729 }
1730 else
1731 {
1732 /* If the REX_W bit in REX byte isn't needed,
1733 use R_X86_64_32 and clear the W bit to avoid
1734 sign-extend imm32 to imm64. */
1735 r_type = R_X86_64_32;
1736 /* Clear the W bit in REX byte. */
1737 rex_mask |= REX_W;
1738 goto rewrite_modrm_rex;
1739 }
1740 }
1741 }
1742 else
1743 {
1744 /* R_X86_64_PC32 isn't supported. */
1745 if (to_reloc_pc32)
1746 return TRUE;
1747
1748 modrm = bfd_get_8 (abfd, contents + roff - 1);
1749 if (opcode == 0x85)
1750 {
1751 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1752 "test $foo, %reg". */
1753 modrm = 0xc0 | (modrm & 0x38) >> 3;
1754 opcode = 0xf7;
1755 }
1756 else
1757 {
1758 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1759 "binop $foo, %reg". */
1760 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1761 opcode = 0x81;
1762 }
1763
1764 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1765 overflow when sign-extending imm32 to imm64. */
1766 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1767
1768 rewrite_modrm_rex:
1769 bfd_put_8 (abfd, modrm, contents + roff - 1);
1770
1771 if (rex)
1772 {
1773 /* Move the R bit to the B bit in REX byte. */
1774 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1775 bfd_put_8 (abfd, rex, contents + roff - 3);
1776 }
1777
1778 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1779 irel->r_addend = 0;
1780 }
1781
1782 bfd_put_8 (abfd, opcode, contents + roff - 2);
1783 }
1784
1785 *r_type_p = r_type;
1786 irel->r_info = htab->r_info (r_symndx,
1787 r_type | R_X86_64_converted_reloc_bit);
1788
1789 *converted = TRUE;
1790
1791 return TRUE;
1792 }
1793
1794 /* Look through the relocs for a section during the first phase, and
1795 calculate needed space in the global offset table, procedure
1796 linkage table, and dynamic reloc sections. */
1797
1798 static bfd_boolean
1799 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1800 asection *sec,
1801 const Elf_Internal_Rela *relocs)
1802 {
1803 struct elf_x86_link_hash_table *htab;
1804 Elf_Internal_Shdr *symtab_hdr;
1805 struct elf_link_hash_entry **sym_hashes;
1806 const Elf_Internal_Rela *rel;
1807 const Elf_Internal_Rela *rel_end;
1808 asection *sreloc;
1809 bfd_byte *contents;
1810 bfd_boolean converted;
1811
1812 if (bfd_link_relocatable (info))
1813 return TRUE;
1814
1815 /* Don't do anything special with non-loaded, non-alloced sections.
1816 In particular, any relocs in such sections should not affect GOT
1817 and PLT reference counting (ie. we don't allow them to create GOT
1818 or PLT entries), there's no possibility or desire to optimize TLS
1819 relocs, and there's not much point in propagating relocs to shared
1820 libs that the dynamic linker won't relocate. */
1821 if ((sec->flags & SEC_ALLOC) == 0)
1822 return TRUE;
1823
1824 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1825 if (htab == NULL)
1826 {
1827 sec->check_relocs_failed = 1;
1828 return FALSE;
1829 }
1830
1831 BFD_ASSERT (is_x86_elf (abfd, htab));
1832
1833 /* Get the section contents. */
1834 if (elf_section_data (sec)->this_hdr.contents != NULL)
1835 contents = elf_section_data (sec)->this_hdr.contents;
1836 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1837 {
1838 sec->check_relocs_failed = 1;
1839 return FALSE;
1840 }
1841
1842 symtab_hdr = &elf_symtab_hdr (abfd);
1843 sym_hashes = elf_sym_hashes (abfd);
1844
1845 converted = FALSE;
1846
1847 sreloc = NULL;
1848
1849 rel_end = relocs + sec->reloc_count;
1850 for (rel = relocs; rel < rel_end; rel++)
1851 {
1852 unsigned int r_type;
1853 unsigned int r_symndx;
1854 struct elf_link_hash_entry *h;
1855 struct elf_x86_link_hash_entry *eh;
1856 Elf_Internal_Sym *isym;
1857 const char *name;
1858 bfd_boolean size_reloc;
1859 bfd_boolean converted_reloc;
1860
1861 r_symndx = htab->r_sym (rel->r_info);
1862 r_type = ELF32_R_TYPE (rel->r_info);
1863
1864 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1865 {
1866 /* xgettext:c-format */
1867 _bfd_error_handler (_("%pB: bad symbol index: %d"),
1868 abfd, r_symndx);
1869 goto error_return;
1870 }
1871
1872 if (r_symndx < symtab_hdr->sh_info)
1873 {
1874 /* A local symbol. */
1875 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1876 abfd, r_symndx);
1877 if (isym == NULL)
1878 goto error_return;
1879
1880 /* Check relocation against local STT_GNU_IFUNC symbol. */
1881 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1882 {
1883 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1884 TRUE);
1885 if (h == NULL)
1886 goto error_return;
1887
1888 /* Fake a STT_GNU_IFUNC symbol. */
1889 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1890 isym, NULL);
1891 h->type = STT_GNU_IFUNC;
1892 h->def_regular = 1;
1893 h->ref_regular = 1;
1894 h->forced_local = 1;
1895 h->root.type = bfd_link_hash_defined;
1896 }
1897 else
1898 h = NULL;
1899 }
1900 else
1901 {
1902 isym = NULL;
1903 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1904 while (h->root.type == bfd_link_hash_indirect
1905 || h->root.type == bfd_link_hash_warning)
1906 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1907 }
1908
1909 /* Check invalid x32 relocations. */
1910 if (!ABI_64_P (abfd))
1911 switch (r_type)
1912 {
1913 default:
1914 break;
1915
1916 case R_X86_64_DTPOFF64:
1917 case R_X86_64_TPOFF64:
1918 case R_X86_64_PC64:
1919 case R_X86_64_GOTOFF64:
1920 case R_X86_64_GOT64:
1921 case R_X86_64_GOTPCREL64:
1922 case R_X86_64_GOTPC64:
1923 case R_X86_64_GOTPLT64:
1924 case R_X86_64_PLTOFF64:
1925 {
1926 if (h)
1927 name = h->root.root.string;
1928 else
1929 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1930 NULL);
1931 _bfd_error_handler
1932 /* xgettext:c-format */
1933 (_("%pB: relocation %s against symbol `%s' isn't "
1934 "supported in x32 mode"), abfd,
1935 x86_64_elf_howto_table[r_type].name, name);
1936 bfd_set_error (bfd_error_bad_value);
1937 goto error_return;
1938 }
1939 break;
1940 }
1941
1942 if (h != NULL)
1943 {
1944 /* It is referenced by a non-shared object. */
1945 h->ref_regular = 1;
1946
1947 if (h->type == STT_GNU_IFUNC)
1948 elf_tdata (info->output_bfd)->has_gnu_symbols
1949 |= elf_gnu_symbol_ifunc;
1950 }
1951
1952 converted_reloc = FALSE;
1953 if ((r_type == R_X86_64_GOTPCREL
1954 || r_type == R_X86_64_GOTPCRELX
1955 || r_type == R_X86_64_REX_GOTPCRELX)
1956 && (h == NULL || h->type != STT_GNU_IFUNC))
1957 {
1958 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1959 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1960 irel, h, &converted_reloc,
1961 info))
1962 goto error_return;
1963
1964 if (converted_reloc)
1965 converted = TRUE;
1966 }
1967
1968 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
1969 symtab_hdr, sym_hashes,
1970 &r_type, GOT_UNKNOWN,
1971 rel, rel_end, h, r_symndx, FALSE))
1972 goto error_return;
1973
1974 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
1975 if (h == htab->elf.hgot)
1976 htab->got_referenced = TRUE;
1977
1978 eh = (struct elf_x86_link_hash_entry *) h;
1979 switch (r_type)
1980 {
1981 case R_X86_64_TLSLD:
1982 htab->tls_ld_or_ldm_got.refcount = 1;
1983 goto create_got;
1984
1985 case R_X86_64_TPOFF32:
1986 if (!bfd_link_executable (info) && ABI_64_P (abfd))
1987 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
1988 &x86_64_elf_howto_table[r_type]);
1989 if (eh != NULL)
1990 eh->zero_undefweak &= 0x2;
1991 break;
1992
1993 case R_X86_64_GOTTPOFF:
1994 if (!bfd_link_executable (info))
1995 info->flags |= DF_STATIC_TLS;
1996 /* Fall through */
1997
1998 case R_X86_64_GOT32:
1999 case R_X86_64_GOTPCREL:
2000 case R_X86_64_GOTPCRELX:
2001 case R_X86_64_REX_GOTPCRELX:
2002 case R_X86_64_TLSGD:
2003 case R_X86_64_GOT64:
2004 case R_X86_64_GOTPCREL64:
2005 case R_X86_64_GOTPLT64:
2006 case R_X86_64_GOTPC32_TLSDESC:
2007 case R_X86_64_TLSDESC_CALL:
2008 /* This symbol requires a global offset table entry. */
2009 {
2010 int tls_type, old_tls_type;
2011
2012 switch (r_type)
2013 {
2014 default: tls_type = GOT_NORMAL; break;
2015 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
2016 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
2017 case R_X86_64_GOTPC32_TLSDESC:
2018 case R_X86_64_TLSDESC_CALL:
2019 tls_type = GOT_TLS_GDESC; break;
2020 }
2021
2022 if (h != NULL)
2023 {
2024 h->got.refcount = 1;
2025 old_tls_type = eh->tls_type;
2026 }
2027 else
2028 {
2029 bfd_signed_vma *local_got_refcounts;
2030
2031 /* This is a global offset table entry for a local symbol. */
2032 local_got_refcounts = elf_local_got_refcounts (abfd);
2033 if (local_got_refcounts == NULL)
2034 {
2035 bfd_size_type size;
2036
2037 size = symtab_hdr->sh_info;
2038 size *= sizeof (bfd_signed_vma)
2039 + sizeof (bfd_vma) + sizeof (char);
2040 local_got_refcounts = ((bfd_signed_vma *)
2041 bfd_zalloc (abfd, size));
2042 if (local_got_refcounts == NULL)
2043 goto error_return;
2044 elf_local_got_refcounts (abfd) = local_got_refcounts;
2045 elf_x86_local_tlsdesc_gotent (abfd)
2046 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2047 elf_x86_local_got_tls_type (abfd)
2048 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2049 }
2050 local_got_refcounts[r_symndx] = 1;
2051 old_tls_type
2052 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2053 }
2054
2055 /* If a TLS symbol is accessed using IE at least once,
2056 there is no point to use dynamic model for it. */
2057 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2058 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2059 || tls_type != GOT_TLS_IE))
2060 {
2061 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2062 tls_type = old_tls_type;
2063 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2064 && GOT_TLS_GD_ANY_P (tls_type))
2065 tls_type |= old_tls_type;
2066 else
2067 {
2068 if (h)
2069 name = h->root.root.string;
2070 else
2071 name = bfd_elf_sym_name (abfd, symtab_hdr,
2072 isym, NULL);
2073 _bfd_error_handler
2074 /* xgettext:c-format */
2075 (_("%pB: '%s' accessed both as normal and"
2076 " thread local symbol"),
2077 abfd, name);
2078 bfd_set_error (bfd_error_bad_value);
2079 goto error_return;
2080 }
2081 }
2082
2083 if (old_tls_type != tls_type)
2084 {
2085 if (eh != NULL)
2086 eh->tls_type = tls_type;
2087 else
2088 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2089 }
2090 }
2091 /* Fall through */
2092
2093 case R_X86_64_GOTOFF64:
2094 case R_X86_64_GOTPC32:
2095 case R_X86_64_GOTPC64:
2096 create_got:
2097 if (eh != NULL)
2098 eh->zero_undefweak &= 0x2;
2099 break;
2100
2101 case R_X86_64_PLT32:
2102 case R_X86_64_PLT32_BND:
2103 /* This symbol requires a procedure linkage table entry. We
2104 actually build the entry in adjust_dynamic_symbol,
2105 because this might be a case of linking PIC code which is
2106 never referenced by a dynamic object, in which case we
2107 don't need to generate a procedure linkage table entry
2108 after all. */
2109
2110 /* If this is a local symbol, we resolve it directly without
2111 creating a procedure linkage table entry. */
2112 if (h == NULL)
2113 continue;
2114
2115 eh->zero_undefweak &= 0x2;
2116 h->needs_plt = 1;
2117 h->plt.refcount = 1;
2118 break;
2119
2120 case R_X86_64_PLTOFF64:
2121 /* This tries to form the 'address' of a function relative
2122 to GOT. For global symbols we need a PLT entry. */
2123 if (h != NULL)
2124 {
2125 h->needs_plt = 1;
2126 h->plt.refcount = 1;
2127 }
2128 goto create_got;
2129
2130 case R_X86_64_SIZE32:
2131 case R_X86_64_SIZE64:
2132 size_reloc = TRUE;
2133 goto do_size;
2134
2135 case R_X86_64_32:
2136 if (!ABI_64_P (abfd))
2137 goto pointer;
2138 /* Fall through. */
2139 case R_X86_64_8:
2140 case R_X86_64_16:
2141 case R_X86_64_32S:
2142 /* Check relocation overflow as these relocs may lead to
2143 run-time relocation overflow. Don't error out for
2144 sections we don't care about, such as debug sections or
2145 when relocation overflow check is disabled. */
2146 if (!info->no_reloc_overflow_check
2147 && !converted_reloc
2148 && (bfd_link_pic (info)
2149 || (bfd_link_executable (info)
2150 && h != NULL
2151 && !h->def_regular
2152 && h->def_dynamic
2153 && (sec->flags & SEC_READONLY) == 0)))
2154 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2155 &x86_64_elf_howto_table[r_type]);
2156 /* Fall through. */
2157
2158 case R_X86_64_PC8:
2159 case R_X86_64_PC16:
2160 case R_X86_64_PC32:
2161 case R_X86_64_PC32_BND:
2162 case R_X86_64_PC64:
2163 case R_X86_64_64:
2164 pointer:
2165 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2166 eh->zero_undefweak |= 0x2;
2167 /* We are called after all symbols have been resolved. Only
2168 relocation against STT_GNU_IFUNC symbol must go through
2169 PLT. */
2170 if (h != NULL
2171 && (bfd_link_executable (info)
2172 || h->type == STT_GNU_IFUNC))
2173 {
2174 bfd_boolean func_pointer_ref = FALSE;
2175
2176 if (r_type == R_X86_64_PC32)
2177 {
2178 /* Since something like ".long foo - ." may be used
2179 as pointer, make sure that PLT is used if foo is
2180 a function defined in a shared library. */
2181 if ((sec->flags & SEC_CODE) == 0)
2182 {
2183 h->pointer_equality_needed = 1;
2184 if (bfd_link_pie (info)
2185 && h->type == STT_FUNC
2186 && !h->def_regular
2187 && h->def_dynamic)
2188 {
2189 h->needs_plt = 1;
2190 h->plt.refcount = 1;
2191 }
2192 }
2193 }
2194 else if (r_type != R_X86_64_PC32_BND
2195 && r_type != R_X86_64_PC64)
2196 {
2197 h->pointer_equality_needed = 1;
2198 /* At run-time, R_X86_64_64 can be resolved for both
2199 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2200 can only be resolved for x32. */
2201 if ((sec->flags & SEC_READONLY) == 0
2202 && (r_type == R_X86_64_64
2203 || (!ABI_64_P (abfd)
2204 && (r_type == R_X86_64_32
2205 || r_type == R_X86_64_32S))))
2206 func_pointer_ref = TRUE;
2207 }
2208
2209 if (!func_pointer_ref)
2210 {
2211 /* If this reloc is in a read-only section, we might
2212 need a copy reloc. We can't check reliably at this
2213 stage whether the section is read-only, as input
2214 sections have not yet been mapped to output sections.
2215 Tentatively set the flag for now, and correct in
2216 adjust_dynamic_symbol. */
2217 h->non_got_ref = 1;
2218
2219 /* We may need a .plt entry if the symbol is a function
2220 defined in a shared lib or is a function referenced
2221 from the code or read-only section. */
2222 if (!h->def_regular
2223 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2224 h->plt.refcount = 1;
2225 }
2226 }
2227
2228 size_reloc = FALSE;
2229 do_size:
2230 if (NEED_DYNAMIC_RELOCATION_P (info, TRUE, h, sec, r_type,
2231 htab->pointer_r_type))
2232 {
2233 struct elf_dyn_relocs *p;
2234 struct elf_dyn_relocs **head;
2235
2236 /* We must copy these reloc types into the output file.
2237 Create a reloc section in dynobj and make room for
2238 this reloc. */
2239 if (sreloc == NULL)
2240 {
2241 sreloc = _bfd_elf_make_dynamic_reloc_section
2242 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2243 abfd, /*rela?*/ TRUE);
2244
2245 if (sreloc == NULL)
2246 goto error_return;
2247 }
2248
2249 /* If this is a global symbol, we count the number of
2250 relocations we need for this symbol. */
2251 if (h != NULL)
2252 head = &eh->dyn_relocs;
2253 else
2254 {
2255 /* Track dynamic relocs needed for local syms too.
2256 We really need local syms available to do this
2257 easily. Oh well. */
2258 asection *s;
2259 void **vpp;
2260
2261 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2262 abfd, r_symndx);
2263 if (isym == NULL)
2264 goto error_return;
2265
2266 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2267 if (s == NULL)
2268 s = sec;
2269
2270 /* Beware of type punned pointers vs strict aliasing
2271 rules. */
2272 vpp = &(elf_section_data (s)->local_dynrel);
2273 head = (struct elf_dyn_relocs **)vpp;
2274 }
2275
2276 p = *head;
2277 if (p == NULL || p->sec != sec)
2278 {
2279 bfd_size_type amt = sizeof *p;
2280
2281 p = ((struct elf_dyn_relocs *)
2282 bfd_alloc (htab->elf.dynobj, amt));
2283 if (p == NULL)
2284 goto error_return;
2285 p->next = *head;
2286 *head = p;
2287 p->sec = sec;
2288 p->count = 0;
2289 p->pc_count = 0;
2290 }
2291
2292 p->count += 1;
2293 /* Count size relocation as PC-relative relocation. */
2294 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2295 p->pc_count += 1;
2296 }
2297 break;
2298
2299 /* This relocation describes the C++ object vtable hierarchy.
2300 Reconstruct it for later use during GC. */
2301 case R_X86_64_GNU_VTINHERIT:
2302 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2303 goto error_return;
2304 break;
2305
2306 /* This relocation describes which C++ vtable entries are actually
2307 used. Record for later use during GC. */
2308 case R_X86_64_GNU_VTENTRY:
2309 BFD_ASSERT (h != NULL);
2310 if (h != NULL
2311 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2312 goto error_return;
2313 break;
2314
2315 default:
2316 break;
2317 }
2318 }
2319
2320 if (elf_section_data (sec)->this_hdr.contents != contents)
2321 {
2322 if (!converted && !info->keep_memory)
2323 free (contents);
2324 else
2325 {
2326 /* Cache the section contents for elf_link_input_bfd if any
2327 load is converted or --no-keep-memory isn't used. */
2328 elf_section_data (sec)->this_hdr.contents = contents;
2329 }
2330 }
2331
2332 /* Cache relocations if any load is converted. */
2333 if (elf_section_data (sec)->relocs != relocs && converted)
2334 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2335
2336 return TRUE;
2337
2338 error_return:
2339 if (elf_section_data (sec)->this_hdr.contents != contents)
2340 free (contents);
2341 sec->check_relocs_failed = 1;
2342 return FALSE;
2343 }
2344
2345 /* Return the relocation value for @tpoff relocation
2346 if STT_TLS virtual address is ADDRESS. */
2347
2348 static bfd_vma
2349 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2350 {
2351 struct elf_link_hash_table *htab = elf_hash_table (info);
2352 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2353 bfd_vma static_tls_size;
2354
2355 /* If tls_segment is NULL, we should have signalled an error already. */
2356 if (htab->tls_sec == NULL)
2357 return 0;
2358
2359 /* Consider special static TLS alignment requirements. */
2360 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2361 return address - static_tls_size - htab->tls_sec->vma;
2362 }
2363
2364 /* Relocate an x86_64 ELF section. */
2365
2366 static bfd_boolean
2367 elf_x86_64_relocate_section (bfd *output_bfd,
2368 struct bfd_link_info *info,
2369 bfd *input_bfd,
2370 asection *input_section,
2371 bfd_byte *contents,
2372 Elf_Internal_Rela *relocs,
2373 Elf_Internal_Sym *local_syms,
2374 asection **local_sections)
2375 {
2376 struct elf_x86_link_hash_table *htab;
2377 Elf_Internal_Shdr *symtab_hdr;
2378 struct elf_link_hash_entry **sym_hashes;
2379 bfd_vma *local_got_offsets;
2380 bfd_vma *local_tlsdesc_gotents;
2381 Elf_Internal_Rela *rel;
2382 Elf_Internal_Rela *wrel;
2383 Elf_Internal_Rela *relend;
2384 unsigned int plt_entry_size;
2385
2386 /* Skip if check_relocs failed. */
2387 if (input_section->check_relocs_failed)
2388 return FALSE;
2389
2390 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2391 if (htab == NULL)
2392 return FALSE;
2393
2394 BFD_ASSERT (is_x86_elf (input_bfd, htab));
2395
2396 plt_entry_size = htab->plt.plt_entry_size;
2397 symtab_hdr = &elf_symtab_hdr (input_bfd);
2398 sym_hashes = elf_sym_hashes (input_bfd);
2399 local_got_offsets = elf_local_got_offsets (input_bfd);
2400 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2401
2402 _bfd_x86_elf_set_tls_module_base (info);
2403
2404 rel = wrel = relocs;
2405 relend = relocs + input_section->reloc_count;
2406 for (; rel < relend; wrel++, rel++)
2407 {
2408 unsigned int r_type, r_type_tls;
2409 reloc_howto_type *howto;
2410 unsigned long r_symndx;
2411 struct elf_link_hash_entry *h;
2412 struct elf_x86_link_hash_entry *eh;
2413 Elf_Internal_Sym *sym;
2414 asection *sec;
2415 bfd_vma off, offplt, plt_offset;
2416 bfd_vma relocation;
2417 bfd_boolean unresolved_reloc;
2418 bfd_reloc_status_type r;
2419 int tls_type;
2420 asection *base_got, *resolved_plt;
2421 bfd_vma st_size;
2422 bfd_boolean resolved_to_zero;
2423 bfd_boolean relative_reloc;
2424 bfd_boolean converted_reloc;
2425 bfd_boolean need_copy_reloc_in_pie;
2426
2427 r_type = ELF32_R_TYPE (rel->r_info);
2428 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2429 || r_type == (int) R_X86_64_GNU_VTENTRY)
2430 {
2431 if (wrel != rel)
2432 *wrel = *rel;
2433 continue;
2434 }
2435
2436 r_symndx = htab->r_sym (rel->r_info);
2437 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2438 if (converted_reloc)
2439 {
2440 r_type &= ~R_X86_64_converted_reloc_bit;
2441 rel->r_info = htab->r_info (r_symndx, r_type);
2442 }
2443
2444 if (r_type >= (int) R_X86_64_standard)
2445 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2446
2447 if (r_type != (int) R_X86_64_32
2448 || ABI_64_P (output_bfd))
2449 howto = x86_64_elf_howto_table + r_type;
2450 else
2451 howto = (x86_64_elf_howto_table
2452 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
2453 h = NULL;
2454 sym = NULL;
2455 sec = NULL;
2456 unresolved_reloc = FALSE;
2457 if (r_symndx < symtab_hdr->sh_info)
2458 {
2459 sym = local_syms + r_symndx;
2460 sec = local_sections[r_symndx];
2461
2462 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2463 &sec, rel);
2464 st_size = sym->st_size;
2465
2466 /* Relocate against local STT_GNU_IFUNC symbol. */
2467 if (!bfd_link_relocatable (info)
2468 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2469 {
2470 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2471 rel, FALSE);
2472 if (h == NULL)
2473 abort ();
2474
2475 /* Set STT_GNU_IFUNC symbol value. */
2476 h->root.u.def.value = sym->st_value;
2477 h->root.u.def.section = sec;
2478 }
2479 }
2480 else
2481 {
2482 bfd_boolean warned ATTRIBUTE_UNUSED;
2483 bfd_boolean ignored ATTRIBUTE_UNUSED;
2484
2485 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2486 r_symndx, symtab_hdr, sym_hashes,
2487 h, sec, relocation,
2488 unresolved_reloc, warned, ignored);
2489 st_size = h->size;
2490 }
2491
2492 if (sec != NULL && discarded_section (sec))
2493 {
2494 _bfd_clear_contents (howto, input_bfd, input_section,
2495 contents + rel->r_offset);
2496 wrel->r_offset = rel->r_offset;
2497 wrel->r_info = 0;
2498 wrel->r_addend = 0;
2499
2500 /* For ld -r, remove relocations in debug sections against
2501 sections defined in discarded sections. Not done for
2502 eh_frame editing code expects to be present. */
2503 if (bfd_link_relocatable (info)
2504 && (input_section->flags & SEC_DEBUGGING))
2505 wrel--;
2506
2507 continue;
2508 }
2509
2510 if (bfd_link_relocatable (info))
2511 {
2512 if (wrel != rel)
2513 *wrel = *rel;
2514 continue;
2515 }
2516
2517 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2518 {
2519 if (r_type == R_X86_64_64)
2520 {
2521 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2522 zero-extend it to 64bit if addend is zero. */
2523 r_type = R_X86_64_32;
2524 memset (contents + rel->r_offset + 4, 0, 4);
2525 }
2526 else if (r_type == R_X86_64_SIZE64)
2527 {
2528 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2529 zero-extend it to 64bit if addend is zero. */
2530 r_type = R_X86_64_SIZE32;
2531 memset (contents + rel->r_offset + 4, 0, 4);
2532 }
2533 }
2534
2535 eh = (struct elf_x86_link_hash_entry *) h;
2536
2537 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2538 it here if it is defined in a non-shared object. */
2539 if (h != NULL
2540 && h->type == STT_GNU_IFUNC
2541 && h->def_regular)
2542 {
2543 bfd_vma plt_index;
2544 const char *name;
2545
2546 if ((input_section->flags & SEC_ALLOC) == 0)
2547 {
2548 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
2549 STT_GNU_IFUNC symbol as STT_FUNC. */
2550 if (elf_section_type (input_section) == SHT_NOTE)
2551 goto skip_ifunc;
2552 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2553 sections because such sections are not SEC_ALLOC and
2554 thus ld.so will not process them. */
2555 if ((input_section->flags & SEC_DEBUGGING) != 0)
2556 continue;
2557 abort ();
2558 }
2559
2560 switch (r_type)
2561 {
2562 default:
2563 break;
2564
2565 case R_X86_64_GOTPCREL:
2566 case R_X86_64_GOTPCRELX:
2567 case R_X86_64_REX_GOTPCRELX:
2568 case R_X86_64_GOTPCREL64:
2569 base_got = htab->elf.sgot;
2570 off = h->got.offset;
2571
2572 if (base_got == NULL)
2573 abort ();
2574
2575 if (off == (bfd_vma) -1)
2576 {
2577 /* We can't use h->got.offset here to save state, or
2578 even just remember the offset, as finish_dynamic_symbol
2579 would use that as offset into .got. */
2580
2581 if (h->plt.offset == (bfd_vma) -1)
2582 abort ();
2583
2584 if (htab->elf.splt != NULL)
2585 {
2586 plt_index = (h->plt.offset / plt_entry_size
2587 - htab->plt.has_plt0);
2588 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2589 base_got = htab->elf.sgotplt;
2590 }
2591 else
2592 {
2593 plt_index = h->plt.offset / plt_entry_size;
2594 off = plt_index * GOT_ENTRY_SIZE;
2595 base_got = htab->elf.igotplt;
2596 }
2597
2598 if (h->dynindx == -1
2599 || h->forced_local
2600 || info->symbolic)
2601 {
2602 /* This references the local defitionion. We must
2603 initialize this entry in the global offset table.
2604 Since the offset must always be a multiple of 8,
2605 we use the least significant bit to record
2606 whether we have initialized it already.
2607
2608 When doing a dynamic link, we create a .rela.got
2609 relocation entry to initialize the value. This
2610 is done in the finish_dynamic_symbol routine. */
2611 if ((off & 1) != 0)
2612 off &= ~1;
2613 else
2614 {
2615 bfd_put_64 (output_bfd, relocation,
2616 base_got->contents + off);
2617 /* Note that this is harmless for the GOTPLT64
2618 case, as -1 | 1 still is -1. */
2619 h->got.offset |= 1;
2620 }
2621 }
2622 }
2623
2624 relocation = (base_got->output_section->vma
2625 + base_got->output_offset + off);
2626
2627 goto do_relocation;
2628 }
2629
2630 if (h->plt.offset == (bfd_vma) -1)
2631 {
2632 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2633 if (r_type == htab->pointer_r_type
2634 && (input_section->flags & SEC_CODE) == 0)
2635 goto do_ifunc_pointer;
2636 goto bad_ifunc_reloc;
2637 }
2638
2639 /* STT_GNU_IFUNC symbol must go through PLT. */
2640 if (htab->elf.splt != NULL)
2641 {
2642 if (htab->plt_second != NULL)
2643 {
2644 resolved_plt = htab->plt_second;
2645 plt_offset = eh->plt_second.offset;
2646 }
2647 else
2648 {
2649 resolved_plt = htab->elf.splt;
2650 plt_offset = h->plt.offset;
2651 }
2652 }
2653 else
2654 {
2655 resolved_plt = htab->elf.iplt;
2656 plt_offset = h->plt.offset;
2657 }
2658
2659 relocation = (resolved_plt->output_section->vma
2660 + resolved_plt->output_offset + plt_offset);
2661
2662 switch (r_type)
2663 {
2664 default:
2665 bad_ifunc_reloc:
2666 if (h->root.root.string)
2667 name = h->root.root.string;
2668 else
2669 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2670 NULL);
2671 _bfd_error_handler
2672 /* xgettext:c-format */
2673 (_("%pB: relocation %s against STT_GNU_IFUNC "
2674 "symbol `%s' isn't supported"), input_bfd,
2675 howto->name, name);
2676 bfd_set_error (bfd_error_bad_value);
2677 return FALSE;
2678
2679 case R_X86_64_32S:
2680 if (bfd_link_pic (info))
2681 abort ();
2682 goto do_relocation;
2683
2684 case R_X86_64_32:
2685 if (ABI_64_P (output_bfd))
2686 goto do_relocation;
2687 /* FALLTHROUGH */
2688 case R_X86_64_64:
2689 do_ifunc_pointer:
2690 if (rel->r_addend != 0)
2691 {
2692 if (h->root.root.string)
2693 name = h->root.root.string;
2694 else
2695 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2696 sym, NULL);
2697 _bfd_error_handler
2698 /* xgettext:c-format */
2699 (_("%pB: relocation %s against STT_GNU_IFUNC "
2700 "symbol `%s' has non-zero addend: %" PRId64),
2701 input_bfd, howto->name, name, (int64_t) rel->r_addend);
2702 bfd_set_error (bfd_error_bad_value);
2703 return FALSE;
2704 }
2705
2706 /* Generate dynamic relcoation only when there is a
2707 non-GOT reference in a shared object or there is no
2708 PLT. */
2709 if ((bfd_link_pic (info) && h->non_got_ref)
2710 || h->plt.offset == (bfd_vma) -1)
2711 {
2712 Elf_Internal_Rela outrel;
2713 asection *sreloc;
2714
2715 /* Need a dynamic relocation to get the real function
2716 address. */
2717 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2718 info,
2719 input_section,
2720 rel->r_offset);
2721 if (outrel.r_offset == (bfd_vma) -1
2722 || outrel.r_offset == (bfd_vma) -2)
2723 abort ();
2724
2725 outrel.r_offset += (input_section->output_section->vma
2726 + input_section->output_offset);
2727
2728 if (POINTER_LOCAL_IFUNC_P (info, h))
2729 {
2730 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
2731 h->root.root.string,
2732 h->root.u.def.section->owner);
2733
2734 /* This symbol is resolved locally. */
2735 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2736 outrel.r_addend = (h->root.u.def.value
2737 + h->root.u.def.section->output_section->vma
2738 + h->root.u.def.section->output_offset);
2739 }
2740 else
2741 {
2742 outrel.r_info = htab->r_info (h->dynindx, r_type);
2743 outrel.r_addend = 0;
2744 }
2745
2746 /* Dynamic relocations are stored in
2747 1. .rela.ifunc section in PIC object.
2748 2. .rela.got section in dynamic executable.
2749 3. .rela.iplt section in static executable. */
2750 if (bfd_link_pic (info))
2751 sreloc = htab->elf.irelifunc;
2752 else if (htab->elf.splt != NULL)
2753 sreloc = htab->elf.srelgot;
2754 else
2755 sreloc = htab->elf.irelplt;
2756 elf_append_rela (output_bfd, sreloc, &outrel);
2757
2758 /* If this reloc is against an external symbol, we
2759 do not want to fiddle with the addend. Otherwise,
2760 we need to include the symbol value so that it
2761 becomes an addend for the dynamic reloc. For an
2762 internal symbol, we have updated addend. */
2763 continue;
2764 }
2765 /* FALLTHROUGH */
2766 case R_X86_64_PC32:
2767 case R_X86_64_PC32_BND:
2768 case R_X86_64_PC64:
2769 case R_X86_64_PLT32:
2770 case R_X86_64_PLT32_BND:
2771 goto do_relocation;
2772 }
2773 }
2774
2775 skip_ifunc:
2776 resolved_to_zero = (eh != NULL
2777 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2778
2779 /* When generating a shared object, the relocations handled here are
2780 copied into the output file to be resolved at run time. */
2781 switch (r_type)
2782 {
2783 case R_X86_64_GOT32:
2784 case R_X86_64_GOT64:
2785 /* Relocation is to the entry for this symbol in the global
2786 offset table. */
2787 case R_X86_64_GOTPCREL:
2788 case R_X86_64_GOTPCRELX:
2789 case R_X86_64_REX_GOTPCRELX:
2790 case R_X86_64_GOTPCREL64:
2791 /* Use global offset table entry as symbol value. */
2792 case R_X86_64_GOTPLT64:
2793 /* This is obsolete and treated the same as GOT64. */
2794 base_got = htab->elf.sgot;
2795
2796 if (htab->elf.sgot == NULL)
2797 abort ();
2798
2799 relative_reloc = FALSE;
2800 if (h != NULL)
2801 {
2802 off = h->got.offset;
2803 if (h->needs_plt
2804 && h->plt.offset != (bfd_vma)-1
2805 && off == (bfd_vma)-1)
2806 {
2807 /* We can't use h->got.offset here to save
2808 state, or even just remember the offset, as
2809 finish_dynamic_symbol would use that as offset into
2810 .got. */
2811 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2812 - htab->plt.has_plt0);
2813 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2814 base_got = htab->elf.sgotplt;
2815 }
2816
2817 if (RESOLVED_LOCALLY_P (info, h, htab))
2818 {
2819 /* We must initialize this entry in the global offset
2820 table. Since the offset must always be a multiple
2821 of 8, we use the least significant bit to record
2822 whether we have initialized it already.
2823
2824 When doing a dynamic link, we create a .rela.got
2825 relocation entry to initialize the value. This is
2826 done in the finish_dynamic_symbol routine. */
2827 if ((off & 1) != 0)
2828 off &= ~1;
2829 else
2830 {
2831 bfd_put_64 (output_bfd, relocation,
2832 base_got->contents + off);
2833 /* Note that this is harmless for the GOTPLT64 case,
2834 as -1 | 1 still is -1. */
2835 h->got.offset |= 1;
2836
2837 if (GENERATE_RELATIVE_RELOC_P (info, h))
2838 {
2839 /* If this symbol isn't dynamic in PIC,
2840 generate R_X86_64_RELATIVE here. */
2841 eh->no_finish_dynamic_symbol = 1;
2842 relative_reloc = TRUE;
2843 }
2844 }
2845 }
2846 else
2847 unresolved_reloc = FALSE;
2848 }
2849 else
2850 {
2851 if (local_got_offsets == NULL)
2852 abort ();
2853
2854 off = local_got_offsets[r_symndx];
2855
2856 /* The offset must always be a multiple of 8. We use
2857 the least significant bit to record whether we have
2858 already generated the necessary reloc. */
2859 if ((off & 1) != 0)
2860 off &= ~1;
2861 else
2862 {
2863 bfd_put_64 (output_bfd, relocation,
2864 base_got->contents + off);
2865 local_got_offsets[r_symndx] |= 1;
2866
2867 if (bfd_link_pic (info))
2868 relative_reloc = TRUE;
2869 }
2870 }
2871
2872 if (relative_reloc)
2873 {
2874 asection *s;
2875 Elf_Internal_Rela outrel;
2876
2877 /* We need to generate a R_X86_64_RELATIVE reloc
2878 for the dynamic linker. */
2879 s = htab->elf.srelgot;
2880 if (s == NULL)
2881 abort ();
2882
2883 outrel.r_offset = (base_got->output_section->vma
2884 + base_got->output_offset
2885 + off);
2886 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2887 outrel.r_addend = relocation;
2888 elf_append_rela (output_bfd, s, &outrel);
2889 }
2890
2891 if (off >= (bfd_vma) -2)
2892 abort ();
2893
2894 relocation = base_got->output_section->vma
2895 + base_got->output_offset + off;
2896 if (r_type != R_X86_64_GOTPCREL
2897 && r_type != R_X86_64_GOTPCRELX
2898 && r_type != R_X86_64_REX_GOTPCRELX
2899 && r_type != R_X86_64_GOTPCREL64)
2900 relocation -= htab->elf.sgotplt->output_section->vma
2901 - htab->elf.sgotplt->output_offset;
2902
2903 break;
2904
2905 case R_X86_64_GOTOFF64:
2906 /* Relocation is relative to the start of the global offset
2907 table. */
2908
2909 /* Check to make sure it isn't a protected function or data
2910 symbol for shared library since it may not be local when
2911 used as function address or with copy relocation. We also
2912 need to make sure that a symbol is referenced locally. */
2913 if (bfd_link_pic (info) && h)
2914 {
2915 if (!h->def_regular)
2916 {
2917 const char *v;
2918
2919 switch (ELF_ST_VISIBILITY (h->other))
2920 {
2921 case STV_HIDDEN:
2922 v = _("hidden symbol");
2923 break;
2924 case STV_INTERNAL:
2925 v = _("internal symbol");
2926 break;
2927 case STV_PROTECTED:
2928 v = _("protected symbol");
2929 break;
2930 default:
2931 v = _("symbol");
2932 break;
2933 }
2934
2935 _bfd_error_handler
2936 /* xgettext:c-format */
2937 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
2938 " `%s' can not be used when making a shared object"),
2939 input_bfd, v, h->root.root.string);
2940 bfd_set_error (bfd_error_bad_value);
2941 return FALSE;
2942 }
2943 else if (!bfd_link_executable (info)
2944 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
2945 && (h->type == STT_FUNC
2946 || h->type == STT_OBJECT)
2947 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
2948 {
2949 _bfd_error_handler
2950 /* xgettext:c-format */
2951 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
2952 " `%s' can not be used when making a shared object"),
2953 input_bfd,
2954 h->type == STT_FUNC ? "function" : "data",
2955 h->root.root.string);
2956 bfd_set_error (bfd_error_bad_value);
2957 return FALSE;
2958 }
2959 }
2960
2961 /* Note that sgot is not involved in this
2962 calculation. We always want the start of .got.plt. If we
2963 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
2964 permitted by the ABI, we might have to change this
2965 calculation. */
2966 relocation -= htab->elf.sgotplt->output_section->vma
2967 + htab->elf.sgotplt->output_offset;
2968 break;
2969
2970 case R_X86_64_GOTPC32:
2971 case R_X86_64_GOTPC64:
2972 /* Use global offset table as symbol value. */
2973 relocation = htab->elf.sgotplt->output_section->vma
2974 + htab->elf.sgotplt->output_offset;
2975 unresolved_reloc = FALSE;
2976 break;
2977
2978 case R_X86_64_PLTOFF64:
2979 /* Relocation is PLT entry relative to GOT. For local
2980 symbols it's the symbol itself relative to GOT. */
2981 if (h != NULL
2982 /* See PLT32 handling. */
2983 && (h->plt.offset != (bfd_vma) -1
2984 || eh->plt_got.offset != (bfd_vma) -1)
2985 && htab->elf.splt != NULL)
2986 {
2987 if (eh->plt_got.offset != (bfd_vma) -1)
2988 {
2989 /* Use the GOT PLT. */
2990 resolved_plt = htab->plt_got;
2991 plt_offset = eh->plt_got.offset;
2992 }
2993 else if (htab->plt_second != NULL)
2994 {
2995 resolved_plt = htab->plt_second;
2996 plt_offset = eh->plt_second.offset;
2997 }
2998 else
2999 {
3000 resolved_plt = htab->elf.splt;
3001 plt_offset = h->plt.offset;
3002 }
3003
3004 relocation = (resolved_plt->output_section->vma
3005 + resolved_plt->output_offset
3006 + plt_offset);
3007 unresolved_reloc = FALSE;
3008 }
3009
3010 relocation -= htab->elf.sgotplt->output_section->vma
3011 + htab->elf.sgotplt->output_offset;
3012 break;
3013
3014 case R_X86_64_PLT32:
3015 case R_X86_64_PLT32_BND:
3016 /* Relocation is to the entry for this symbol in the
3017 procedure linkage table. */
3018
3019 /* Resolve a PLT32 reloc against a local symbol directly,
3020 without using the procedure linkage table. */
3021 if (h == NULL)
3022 break;
3023
3024 if ((h->plt.offset == (bfd_vma) -1
3025 && eh->plt_got.offset == (bfd_vma) -1)
3026 || htab->elf.splt == NULL)
3027 {
3028 /* We didn't make a PLT entry for this symbol. This
3029 happens when statically linking PIC code, or when
3030 using -Bsymbolic. */
3031 break;
3032 }
3033
3034 use_plt:
3035 if (h->plt.offset != (bfd_vma) -1)
3036 {
3037 if (htab->plt_second != NULL)
3038 {
3039 resolved_plt = htab->plt_second;
3040 plt_offset = eh->plt_second.offset;
3041 }
3042 else
3043 {
3044 resolved_plt = htab->elf.splt;
3045 plt_offset = h->plt.offset;
3046 }
3047 }
3048 else
3049 {
3050 /* Use the GOT PLT. */
3051 resolved_plt = htab->plt_got;
3052 plt_offset = eh->plt_got.offset;
3053 }
3054
3055 relocation = (resolved_plt->output_section->vma
3056 + resolved_plt->output_offset
3057 + plt_offset);
3058 unresolved_reloc = FALSE;
3059 break;
3060
3061 case R_X86_64_SIZE32:
3062 case R_X86_64_SIZE64:
3063 /* Set to symbol size. */
3064 relocation = st_size;
3065 goto direct;
3066
3067 case R_X86_64_PC8:
3068 case R_X86_64_PC16:
3069 case R_X86_64_PC32:
3070 case R_X86_64_PC32_BND:
3071 /* Don't complain about -fPIC if the symbol is undefined when
3072 building executable unless it is unresolved weak symbol,
3073 references a dynamic definition in PIE or -z nocopyreloc
3074 is used. */
3075 if ((input_section->flags & SEC_ALLOC) != 0
3076 && (input_section->flags & SEC_READONLY) != 0
3077 && h != NULL
3078 && ((bfd_link_executable (info)
3079 && ((h->root.type == bfd_link_hash_undefweak
3080 && !resolved_to_zero)
3081 || (bfd_link_pie (info)
3082 && !h->def_regular
3083 && h->def_dynamic)
3084 || ((info->nocopyreloc
3085 || (eh->def_protected
3086 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3087 && h->def_dynamic
3088 && !(h->root.u.def.section->flags & SEC_CODE))))
3089 || bfd_link_dll (info)))
3090 {
3091 bfd_boolean fail = FALSE;
3092 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3093 {
3094 /* Symbol is referenced locally. Make sure it is
3095 defined locally. */
3096 fail = !(h->def_regular || ELF_COMMON_DEF_P (h));
3097 }
3098 else if (!(bfd_link_pie (info)
3099 && (h->needs_copy || eh->needs_copy)))
3100 {
3101 /* Symbol doesn't need copy reloc and isn't referenced
3102 locally. Address of protected function may not be
3103 reachable at run-time. */
3104 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3105 || (ELF_ST_VISIBILITY (h->other) == STV_PROTECTED
3106 && h->type == STT_FUNC));
3107 }
3108
3109 if (fail)
3110 return elf_x86_64_need_pic (info, input_bfd, input_section,
3111 h, NULL, NULL, howto);
3112 }
3113 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
3114 as function address. */
3115 else if (h != NULL
3116 && (input_section->flags & SEC_CODE) == 0
3117 && bfd_link_pie (info)
3118 && h->type == STT_FUNC
3119 && !h->def_regular
3120 && h->def_dynamic)
3121 goto use_plt;
3122 /* Fall through. */
3123
3124 case R_X86_64_8:
3125 case R_X86_64_16:
3126 case R_X86_64_32:
3127 case R_X86_64_PC64:
3128 case R_X86_64_64:
3129 /* FIXME: The ABI says the linker should make sure the value is
3130 the same when it's zeroextended to 64 bit. */
3131
3132 direct:
3133 if ((input_section->flags & SEC_ALLOC) == 0)
3134 break;
3135
3136 need_copy_reloc_in_pie = (bfd_link_pie (info)
3137 && h != NULL
3138 && (h->needs_copy
3139 || eh->needs_copy
3140 || (h->root.type
3141 == bfd_link_hash_undefined))
3142 && (X86_PCREL_TYPE_P (r_type)
3143 || X86_SIZE_TYPE_P (r_type)));
3144
3145 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type,
3146 need_copy_reloc_in_pie,
3147 resolved_to_zero, FALSE))
3148 {
3149 Elf_Internal_Rela outrel;
3150 bfd_boolean skip, relocate;
3151 asection *sreloc;
3152
3153 /* When generating a shared object, these relocations
3154 are copied into the output file to be resolved at run
3155 time. */
3156 skip = FALSE;
3157 relocate = FALSE;
3158
3159 outrel.r_offset =
3160 _bfd_elf_section_offset (output_bfd, info, input_section,
3161 rel->r_offset);
3162 if (outrel.r_offset == (bfd_vma) -1)
3163 skip = TRUE;
3164 else if (outrel.r_offset == (bfd_vma) -2)
3165 skip = TRUE, relocate = TRUE;
3166
3167 outrel.r_offset += (input_section->output_section->vma
3168 + input_section->output_offset);
3169
3170 if (skip)
3171 memset (&outrel, 0, sizeof outrel);
3172
3173 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3174 {
3175 outrel.r_info = htab->r_info (h->dynindx, r_type);
3176 outrel.r_addend = rel->r_addend;
3177 }
3178 else
3179 {
3180 /* This symbol is local, or marked to become local.
3181 When relocation overflow check is disabled, we
3182 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3183 if (r_type == htab->pointer_r_type
3184 || (r_type == R_X86_64_32
3185 && info->no_reloc_overflow_check))
3186 {
3187 relocate = TRUE;
3188 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3189 outrel.r_addend = relocation + rel->r_addend;
3190 }
3191 else if (r_type == R_X86_64_64
3192 && !ABI_64_P (output_bfd))
3193 {
3194 relocate = TRUE;
3195 outrel.r_info = htab->r_info (0,
3196 R_X86_64_RELATIVE64);
3197 outrel.r_addend = relocation + rel->r_addend;
3198 /* Check addend overflow. */
3199 if ((outrel.r_addend & 0x80000000)
3200 != (rel->r_addend & 0x80000000))
3201 {
3202 const char *name;
3203 int addend = rel->r_addend;
3204 if (h && h->root.root.string)
3205 name = h->root.root.string;
3206 else
3207 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3208 sym, NULL);
3209 _bfd_error_handler
3210 /* xgettext:c-format */
3211 (_("%pB: addend %s%#x in relocation %s against "
3212 "symbol `%s' at %#" PRIx64
3213 " in section `%pA' is out of range"),
3214 input_bfd, addend < 0 ? "-" : "", addend,
3215 howto->name, name, (uint64_t) rel->r_offset,
3216 input_section);
3217 bfd_set_error (bfd_error_bad_value);
3218 return FALSE;
3219 }
3220 }
3221 else
3222 {
3223 long sindx;
3224
3225 if (bfd_is_abs_section (sec))
3226 sindx = 0;
3227 else if (sec == NULL || sec->owner == NULL)
3228 {
3229 bfd_set_error (bfd_error_bad_value);
3230 return FALSE;
3231 }
3232 else
3233 {
3234 asection *osec;
3235
3236 /* We are turning this relocation into one
3237 against a section symbol. It would be
3238 proper to subtract the symbol's value,
3239 osec->vma, from the emitted reloc addend,
3240 but ld.so expects buggy relocs. */
3241 osec = sec->output_section;
3242 sindx = elf_section_data (osec)->dynindx;
3243 if (sindx == 0)
3244 {
3245 asection *oi = htab->elf.text_index_section;
3246 sindx = elf_section_data (oi)->dynindx;
3247 }
3248 BFD_ASSERT (sindx != 0);
3249 }
3250
3251 outrel.r_info = htab->r_info (sindx, r_type);
3252 outrel.r_addend = relocation + rel->r_addend;
3253 }
3254 }
3255
3256 sreloc = elf_section_data (input_section)->sreloc;
3257
3258 if (sreloc == NULL || sreloc->contents == NULL)
3259 {
3260 r = bfd_reloc_notsupported;
3261 goto check_relocation_error;
3262 }
3263
3264 elf_append_rela (output_bfd, sreloc, &outrel);
3265
3266 /* If this reloc is against an external symbol, we do
3267 not want to fiddle with the addend. Otherwise, we
3268 need to include the symbol value so that it becomes
3269 an addend for the dynamic reloc. */
3270 if (! relocate)
3271 continue;
3272 }
3273
3274 break;
3275
3276 case R_X86_64_TLSGD:
3277 case R_X86_64_GOTPC32_TLSDESC:
3278 case R_X86_64_TLSDESC_CALL:
3279 case R_X86_64_GOTTPOFF:
3280 tls_type = GOT_UNKNOWN;
3281 if (h == NULL && local_got_offsets)
3282 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3283 else if (h != NULL)
3284 tls_type = elf_x86_hash_entry (h)->tls_type;
3285
3286 r_type_tls = r_type;
3287 if (! elf_x86_64_tls_transition (info, input_bfd,
3288 input_section, contents,
3289 symtab_hdr, sym_hashes,
3290 &r_type_tls, tls_type, rel,
3291 relend, h, r_symndx, TRUE))
3292 return FALSE;
3293
3294 if (r_type_tls == R_X86_64_TPOFF32)
3295 {
3296 bfd_vma roff = rel->r_offset;
3297
3298 BFD_ASSERT (! unresolved_reloc);
3299
3300 if (r_type == R_X86_64_TLSGD)
3301 {
3302 /* GD->LE transition. For 64bit, change
3303 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3304 .word 0x6666; rex64; call __tls_get_addr@PLT
3305 or
3306 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3307 .byte 0x66; rex64
3308 call *__tls_get_addr@GOTPCREL(%rip)
3309 which may be converted to
3310 addr32 call __tls_get_addr
3311 into:
3312 movq %fs:0, %rax
3313 leaq foo@tpoff(%rax), %rax
3314 For 32bit, change
3315 leaq foo@tlsgd(%rip), %rdi
3316 .word 0x6666; rex64; call __tls_get_addr@PLT
3317 or
3318 leaq foo@tlsgd(%rip), %rdi
3319 .byte 0x66; rex64
3320 call *__tls_get_addr@GOTPCREL(%rip)
3321 which may be converted to
3322 addr32 call __tls_get_addr
3323 into:
3324 movl %fs:0, %eax
3325 leaq foo@tpoff(%rax), %rax
3326 For largepic, change:
3327 leaq foo@tlsgd(%rip), %rdi
3328 movabsq $__tls_get_addr@pltoff, %rax
3329 addq %r15, %rax
3330 call *%rax
3331 into:
3332 movq %fs:0, %rax
3333 leaq foo@tpoff(%rax), %rax
3334 nopw 0x0(%rax,%rax,1) */
3335 int largepic = 0;
3336 if (ABI_64_P (output_bfd))
3337 {
3338 if (contents[roff + 5] == 0xb8)
3339 {
3340 memcpy (contents + roff - 3,
3341 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3342 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3343 largepic = 1;
3344 }
3345 else
3346 memcpy (contents + roff - 4,
3347 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3348 16);
3349 }
3350 else
3351 memcpy (contents + roff - 3,
3352 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3353 15);
3354 bfd_put_32 (output_bfd,
3355 elf_x86_64_tpoff (info, relocation),
3356 contents + roff + 8 + largepic);
3357 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3358 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3359 rel++;
3360 wrel++;
3361 continue;
3362 }
3363 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3364 {
3365 /* GDesc -> LE transition.
3366 It's originally something like:
3367 leaq x@tlsdesc(%rip), %rax
3368
3369 Change it to:
3370 movl $x@tpoff, %rax. */
3371
3372 unsigned int val, type;
3373
3374 type = bfd_get_8 (input_bfd, contents + roff - 3);
3375 val = bfd_get_8 (input_bfd, contents + roff - 1);
3376 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
3377 contents + roff - 3);
3378 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3379 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3380 contents + roff - 1);
3381 bfd_put_32 (output_bfd,
3382 elf_x86_64_tpoff (info, relocation),
3383 contents + roff);
3384 continue;
3385 }
3386 else if (r_type == R_X86_64_TLSDESC_CALL)
3387 {
3388 /* GDesc -> LE transition.
3389 It's originally:
3390 call *(%rax)
3391 Turn it into:
3392 xchg %ax,%ax. */
3393 bfd_put_8 (output_bfd, 0x66, contents + roff);
3394 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3395 continue;
3396 }
3397 else if (r_type == R_X86_64_GOTTPOFF)
3398 {
3399 /* IE->LE transition:
3400 For 64bit, originally it can be one of:
3401 movq foo@gottpoff(%rip), %reg
3402 addq foo@gottpoff(%rip), %reg
3403 We change it into:
3404 movq $foo, %reg
3405 leaq foo(%reg), %reg
3406 addq $foo, %reg.
3407 For 32bit, originally it can be one of:
3408 movq foo@gottpoff(%rip), %reg
3409 addl foo@gottpoff(%rip), %reg
3410 We change it into:
3411 movq $foo, %reg
3412 leal foo(%reg), %reg
3413 addl $foo, %reg. */
3414
3415 unsigned int val, type, reg;
3416
3417 if (roff >= 3)
3418 val = bfd_get_8 (input_bfd, contents + roff - 3);
3419 else
3420 val = 0;
3421 type = bfd_get_8 (input_bfd, contents + roff - 2);
3422 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3423 reg >>= 3;
3424 if (type == 0x8b)
3425 {
3426 /* movq */
3427 if (val == 0x4c)
3428 bfd_put_8 (output_bfd, 0x49,
3429 contents + roff - 3);
3430 else if (!ABI_64_P (output_bfd) && val == 0x44)
3431 bfd_put_8 (output_bfd, 0x41,
3432 contents + roff - 3);
3433 bfd_put_8 (output_bfd, 0xc7,
3434 contents + roff - 2);
3435 bfd_put_8 (output_bfd, 0xc0 | reg,
3436 contents + roff - 1);
3437 }
3438 else if (reg == 4)
3439 {
3440 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3441 is special */
3442 if (val == 0x4c)
3443 bfd_put_8 (output_bfd, 0x49,
3444 contents + roff - 3);
3445 else if (!ABI_64_P (output_bfd) && val == 0x44)
3446 bfd_put_8 (output_bfd, 0x41,
3447 contents + roff - 3);
3448 bfd_put_8 (output_bfd, 0x81,
3449 contents + roff - 2);
3450 bfd_put_8 (output_bfd, 0xc0 | reg,
3451 contents + roff - 1);
3452 }
3453 else
3454 {
3455 /* addq/addl -> leaq/leal */
3456 if (val == 0x4c)
3457 bfd_put_8 (output_bfd, 0x4d,
3458 contents + roff - 3);
3459 else if (!ABI_64_P (output_bfd) && val == 0x44)
3460 bfd_put_8 (output_bfd, 0x45,
3461 contents + roff - 3);
3462 bfd_put_8 (output_bfd, 0x8d,
3463 contents + roff - 2);
3464 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3465 contents + roff - 1);
3466 }
3467 bfd_put_32 (output_bfd,
3468 elf_x86_64_tpoff (info, relocation),
3469 contents + roff);
3470 continue;
3471 }
3472 else
3473 BFD_ASSERT (FALSE);
3474 }
3475
3476 if (htab->elf.sgot == NULL)
3477 abort ();
3478
3479 if (h != NULL)
3480 {
3481 off = h->got.offset;
3482 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3483 }
3484 else
3485 {
3486 if (local_got_offsets == NULL)
3487 abort ();
3488
3489 off = local_got_offsets[r_symndx];
3490 offplt = local_tlsdesc_gotents[r_symndx];
3491 }
3492
3493 if ((off & 1) != 0)
3494 off &= ~1;
3495 else
3496 {
3497 Elf_Internal_Rela outrel;
3498 int dr_type, indx;
3499 asection *sreloc;
3500
3501 if (htab->elf.srelgot == NULL)
3502 abort ();
3503
3504 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3505
3506 if (GOT_TLS_GDESC_P (tls_type))
3507 {
3508 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3509 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3510 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3511 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3512 + htab->elf.sgotplt->output_offset
3513 + offplt
3514 + htab->sgotplt_jump_table_size);
3515 sreloc = htab->elf.srelplt;
3516 if (indx == 0)
3517 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3518 else
3519 outrel.r_addend = 0;
3520 elf_append_rela (output_bfd, sreloc, &outrel);
3521 }
3522
3523 sreloc = htab->elf.srelgot;
3524
3525 outrel.r_offset = (htab->elf.sgot->output_section->vma
3526 + htab->elf.sgot->output_offset + off);
3527
3528 if (GOT_TLS_GD_P (tls_type))
3529 dr_type = R_X86_64_DTPMOD64;
3530 else if (GOT_TLS_GDESC_P (tls_type))
3531 goto dr_done;
3532 else
3533 dr_type = R_X86_64_TPOFF64;
3534
3535 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3536 outrel.r_addend = 0;
3537 if ((dr_type == R_X86_64_TPOFF64
3538 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3539 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3540 outrel.r_info = htab->r_info (indx, dr_type);
3541
3542 elf_append_rela (output_bfd, sreloc, &outrel);
3543
3544 if (GOT_TLS_GD_P (tls_type))
3545 {
3546 if (indx == 0)
3547 {
3548 BFD_ASSERT (! unresolved_reloc);
3549 bfd_put_64 (output_bfd,
3550 relocation - _bfd_x86_elf_dtpoff_base (info),
3551 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3552 }
3553 else
3554 {
3555 bfd_put_64 (output_bfd, 0,
3556 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3557 outrel.r_info = htab->r_info (indx,
3558 R_X86_64_DTPOFF64);
3559 outrel.r_offset += GOT_ENTRY_SIZE;
3560 elf_append_rela (output_bfd, sreloc,
3561 &outrel);
3562 }
3563 }
3564
3565 dr_done:
3566 if (h != NULL)
3567 h->got.offset |= 1;
3568 else
3569 local_got_offsets[r_symndx] |= 1;
3570 }
3571
3572 if (off >= (bfd_vma) -2
3573 && ! GOT_TLS_GDESC_P (tls_type))
3574 abort ();
3575 if (r_type_tls == r_type)
3576 {
3577 if (r_type == R_X86_64_GOTPC32_TLSDESC
3578 || r_type == R_X86_64_TLSDESC_CALL)
3579 relocation = htab->elf.sgotplt->output_section->vma
3580 + htab->elf.sgotplt->output_offset
3581 + offplt + htab->sgotplt_jump_table_size;
3582 else
3583 relocation = htab->elf.sgot->output_section->vma
3584 + htab->elf.sgot->output_offset + off;
3585 unresolved_reloc = FALSE;
3586 }
3587 else
3588 {
3589 bfd_vma roff = rel->r_offset;
3590
3591 if (r_type == R_X86_64_TLSGD)
3592 {
3593 /* GD->IE transition. For 64bit, change
3594 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3595 .word 0x6666; rex64; call __tls_get_addr@PLT
3596 or
3597 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3598 .byte 0x66; rex64
3599 call *__tls_get_addr@GOTPCREL(%rip
3600 which may be converted to
3601 addr32 call __tls_get_addr
3602 into:
3603 movq %fs:0, %rax
3604 addq foo@gottpoff(%rip), %rax
3605 For 32bit, change
3606 leaq foo@tlsgd(%rip), %rdi
3607 .word 0x6666; rex64; call __tls_get_addr@PLT
3608 or
3609 leaq foo@tlsgd(%rip), %rdi
3610 .byte 0x66; rex64;
3611 call *__tls_get_addr@GOTPCREL(%rip)
3612 which may be converted to
3613 addr32 call __tls_get_addr
3614 into:
3615 movl %fs:0, %eax
3616 addq foo@gottpoff(%rip), %rax
3617 For largepic, change:
3618 leaq foo@tlsgd(%rip), %rdi
3619 movabsq $__tls_get_addr@pltoff, %rax
3620 addq %r15, %rax
3621 call *%rax
3622 into:
3623 movq %fs:0, %rax
3624 addq foo@gottpoff(%rax), %rax
3625 nopw 0x0(%rax,%rax,1) */
3626 int largepic = 0;
3627 if (ABI_64_P (output_bfd))
3628 {
3629 if (contents[roff + 5] == 0xb8)
3630 {
3631 memcpy (contents + roff - 3,
3632 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3633 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3634 largepic = 1;
3635 }
3636 else
3637 memcpy (contents + roff - 4,
3638 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3639 16);
3640 }
3641 else
3642 memcpy (contents + roff - 3,
3643 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3644 15);
3645
3646 relocation = (htab->elf.sgot->output_section->vma
3647 + htab->elf.sgot->output_offset + off
3648 - roff
3649 - largepic
3650 - input_section->output_section->vma
3651 - input_section->output_offset
3652 - 12);
3653 bfd_put_32 (output_bfd, relocation,
3654 contents + roff + 8 + largepic);
3655 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3656 rel++;
3657 wrel++;
3658 continue;
3659 }
3660 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3661 {
3662 /* GDesc -> IE transition.
3663 It's originally something like:
3664 leaq x@tlsdesc(%rip), %rax
3665
3666 Change it to:
3667 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
3668
3669 /* Now modify the instruction as appropriate. To
3670 turn a leaq into a movq in the form we use it, it
3671 suffices to change the second byte from 0x8d to
3672 0x8b. */
3673 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3674
3675 bfd_put_32 (output_bfd,
3676 htab->elf.sgot->output_section->vma
3677 + htab->elf.sgot->output_offset + off
3678 - rel->r_offset
3679 - input_section->output_section->vma
3680 - input_section->output_offset
3681 - 4,
3682 contents + roff);
3683 continue;
3684 }
3685 else if (r_type == R_X86_64_TLSDESC_CALL)
3686 {
3687 /* GDesc -> IE transition.
3688 It's originally:
3689 call *(%rax)
3690
3691 Change it to:
3692 xchg %ax, %ax. */
3693
3694 bfd_put_8 (output_bfd, 0x66, contents + roff);
3695 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3696 continue;
3697 }
3698 else
3699 BFD_ASSERT (FALSE);
3700 }
3701 break;
3702
3703 case R_X86_64_TLSLD:
3704 if (! elf_x86_64_tls_transition (info, input_bfd,
3705 input_section, contents,
3706 symtab_hdr, sym_hashes,
3707 &r_type, GOT_UNKNOWN, rel,
3708 relend, h, r_symndx, TRUE))
3709 return FALSE;
3710
3711 if (r_type != R_X86_64_TLSLD)
3712 {
3713 /* LD->LE transition:
3714 leaq foo@tlsld(%rip), %rdi
3715 call __tls_get_addr@PLT
3716 For 64bit, we change it into:
3717 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3718 For 32bit, we change it into:
3719 nopl 0x0(%rax); movl %fs:0, %eax
3720 Or
3721 leaq foo@tlsld(%rip), %rdi;
3722 call *__tls_get_addr@GOTPCREL(%rip)
3723 which may be converted to
3724 addr32 call __tls_get_addr
3725 For 64bit, we change it into:
3726 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3727 For 32bit, we change it into:
3728 nopw 0x0(%rax); movl %fs:0, %eax
3729 For largepic, change:
3730 leaq foo@tlsgd(%rip), %rdi
3731 movabsq $__tls_get_addr@pltoff, %rax
3732 addq %rbx, %rax
3733 call *%rax
3734 into
3735 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3736 movq %fs:0, %eax */
3737
3738 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3739 if (ABI_64_P (output_bfd))
3740 {
3741 if (contents[rel->r_offset + 5] == 0xb8)
3742 memcpy (contents + rel->r_offset - 3,
3743 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3744 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3745 else if (contents[rel->r_offset + 4] == 0xff
3746 || contents[rel->r_offset + 4] == 0x67)
3747 memcpy (contents + rel->r_offset - 3,
3748 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3749 13);
3750 else
3751 memcpy (contents + rel->r_offset - 3,
3752 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3753 }
3754 else
3755 {
3756 if (contents[rel->r_offset + 4] == 0xff)
3757 memcpy (contents + rel->r_offset - 3,
3758 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3759 13);
3760 else
3761 memcpy (contents + rel->r_offset - 3,
3762 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3763 }
3764 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
3765 and R_X86_64_PLTOFF64. */
3766 rel++;
3767 wrel++;
3768 continue;
3769 }
3770
3771 if (htab->elf.sgot == NULL)
3772 abort ();
3773
3774 off = htab->tls_ld_or_ldm_got.offset;
3775 if (off & 1)
3776 off &= ~1;
3777 else
3778 {
3779 Elf_Internal_Rela outrel;
3780
3781 if (htab->elf.srelgot == NULL)
3782 abort ();
3783
3784 outrel.r_offset = (htab->elf.sgot->output_section->vma
3785 + htab->elf.sgot->output_offset + off);
3786
3787 bfd_put_64 (output_bfd, 0,
3788 htab->elf.sgot->contents + off);
3789 bfd_put_64 (output_bfd, 0,
3790 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3791 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
3792 outrel.r_addend = 0;
3793 elf_append_rela (output_bfd, htab->elf.srelgot,
3794 &outrel);
3795 htab->tls_ld_or_ldm_got.offset |= 1;
3796 }
3797 relocation = htab->elf.sgot->output_section->vma
3798 + htab->elf.sgot->output_offset + off;
3799 unresolved_reloc = FALSE;
3800 break;
3801
3802 case R_X86_64_DTPOFF32:
3803 if (!bfd_link_executable (info)
3804 || (input_section->flags & SEC_CODE) == 0)
3805 relocation -= _bfd_x86_elf_dtpoff_base (info);
3806 else
3807 relocation = elf_x86_64_tpoff (info, relocation);
3808 break;
3809
3810 case R_X86_64_TPOFF32:
3811 case R_X86_64_TPOFF64:
3812 BFD_ASSERT (bfd_link_executable (info));
3813 relocation = elf_x86_64_tpoff (info, relocation);
3814 break;
3815
3816 case R_X86_64_DTPOFF64:
3817 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
3818 relocation -= _bfd_x86_elf_dtpoff_base (info);
3819 break;
3820
3821 default:
3822 break;
3823 }
3824
3825 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
3826 because such sections are not SEC_ALLOC and thus ld.so will
3827 not process them. */
3828 if (unresolved_reloc
3829 && !((input_section->flags & SEC_DEBUGGING) != 0
3830 && h->def_dynamic)
3831 && _bfd_elf_section_offset (output_bfd, info, input_section,
3832 rel->r_offset) != (bfd_vma) -1)
3833 {
3834 switch (r_type)
3835 {
3836 case R_X86_64_32S:
3837 sec = h->root.u.def.section;
3838 if ((info->nocopyreloc
3839 || (eh->def_protected
3840 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3841 && !(h->root.u.def.section->flags & SEC_CODE))
3842 return elf_x86_64_need_pic (info, input_bfd, input_section,
3843 h, NULL, NULL, howto);
3844 /* Fall through. */
3845
3846 default:
3847 _bfd_error_handler
3848 /* xgettext:c-format */
3849 (_("%pB(%pA+%#" PRIx64 "): "
3850 "unresolvable %s relocation against symbol `%s'"),
3851 input_bfd,
3852 input_section,
3853 (uint64_t) rel->r_offset,
3854 howto->name,
3855 h->root.root.string);
3856 return FALSE;
3857 }
3858 }
3859
3860 do_relocation:
3861 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
3862 contents, rel->r_offset,
3863 relocation, rel->r_addend);
3864
3865 check_relocation_error:
3866 if (r != bfd_reloc_ok)
3867 {
3868 const char *name;
3869
3870 if (h != NULL)
3871 name = h->root.root.string;
3872 else
3873 {
3874 name = bfd_elf_string_from_elf_section (input_bfd,
3875 symtab_hdr->sh_link,
3876 sym->st_name);
3877 if (name == NULL)
3878 return FALSE;
3879 if (*name == '\0')
3880 name = bfd_section_name (input_bfd, sec);
3881 }
3882
3883 if (r == bfd_reloc_overflow)
3884 {
3885 if (converted_reloc)
3886 {
3887 info->callbacks->einfo
3888 (_("%F%P: failed to convert GOTPCREL relocation; relink with --no-relax\n"));
3889 return FALSE;
3890 }
3891 (*info->callbacks->reloc_overflow)
3892 (info, (h ? &h->root : NULL), name, howto->name,
3893 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
3894 }
3895 else
3896 {
3897 _bfd_error_handler
3898 /* xgettext:c-format */
3899 (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"),
3900 input_bfd, input_section,
3901 (uint64_t) rel->r_offset, name, (int) r);
3902 return FALSE;
3903 }
3904 }
3905
3906 if (wrel != rel)
3907 *wrel = *rel;
3908 }
3909
3910 if (wrel != rel)
3911 {
3912 Elf_Internal_Shdr *rel_hdr;
3913 size_t deleted = rel - wrel;
3914
3915 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
3916 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3917 if (rel_hdr->sh_size == 0)
3918 {
3919 /* It is too late to remove an empty reloc section. Leave
3920 one NONE reloc.
3921 ??? What is wrong with an empty section??? */
3922 rel_hdr->sh_size = rel_hdr->sh_entsize;
3923 deleted -= 1;
3924 }
3925 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
3926 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3927 input_section->reloc_count -= deleted;
3928 }
3929
3930 return TRUE;
3931 }
3932
3933 /* Finish up dynamic symbol handling. We set the contents of various
3934 dynamic sections here. */
3935
3936 static bfd_boolean
3937 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
3938 struct bfd_link_info *info,
3939 struct elf_link_hash_entry *h,
3940 Elf_Internal_Sym *sym)
3941 {
3942 struct elf_x86_link_hash_table *htab;
3943 bfd_boolean use_plt_second;
3944 struct elf_x86_link_hash_entry *eh;
3945 bfd_boolean local_undefweak;
3946
3947 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
3948 if (htab == NULL)
3949 return FALSE;
3950
3951 /* Use the second PLT section only if there is .plt section. */
3952 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
3953
3954 eh = (struct elf_x86_link_hash_entry *) h;
3955 if (eh->no_finish_dynamic_symbol)
3956 abort ();
3957
3958 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
3959 resolved undefined weak symbols in executable so that their
3960 references have value 0 at run-time. */
3961 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
3962
3963 if (h->plt.offset != (bfd_vma) -1)
3964 {
3965 bfd_vma plt_index;
3966 bfd_vma got_offset, plt_offset;
3967 Elf_Internal_Rela rela;
3968 bfd_byte *loc;
3969 asection *plt, *gotplt, *relplt, *resolved_plt;
3970 const struct elf_backend_data *bed;
3971 bfd_vma plt_got_pcrel_offset;
3972
3973 /* When building a static executable, use .iplt, .igot.plt and
3974 .rela.iplt sections for STT_GNU_IFUNC symbols. */
3975 if (htab->elf.splt != NULL)
3976 {
3977 plt = htab->elf.splt;
3978 gotplt = htab->elf.sgotplt;
3979 relplt = htab->elf.srelplt;
3980 }
3981 else
3982 {
3983 plt = htab->elf.iplt;
3984 gotplt = htab->elf.igotplt;
3985 relplt = htab->elf.irelplt;
3986 }
3987
3988 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
3989
3990 /* Get the index in the procedure linkage table which
3991 corresponds to this symbol. This is the index of this symbol
3992 in all the symbols for which we are making plt entries. The
3993 first entry in the procedure linkage table is reserved.
3994
3995 Get the offset into the .got table of the entry that
3996 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
3997 bytes. The first three are reserved for the dynamic linker.
3998
3999 For static executables, we don't reserve anything. */
4000
4001 if (plt == htab->elf.splt)
4002 {
4003 got_offset = (h->plt.offset / htab->plt.plt_entry_size
4004 - htab->plt.has_plt0);
4005 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4006 }
4007 else
4008 {
4009 got_offset = h->plt.offset / htab->plt.plt_entry_size;
4010 got_offset = got_offset * GOT_ENTRY_SIZE;
4011 }
4012
4013 /* Fill in the entry in the procedure linkage table. */
4014 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
4015 htab->plt.plt_entry_size);
4016 if (use_plt_second)
4017 {
4018 memcpy (htab->plt_second->contents + eh->plt_second.offset,
4019 htab->non_lazy_plt->plt_entry,
4020 htab->non_lazy_plt->plt_entry_size);
4021
4022 resolved_plt = htab->plt_second;
4023 plt_offset = eh->plt_second.offset;
4024 }
4025 else
4026 {
4027 resolved_plt = plt;
4028 plt_offset = h->plt.offset;
4029 }
4030
4031 /* Insert the relocation positions of the plt section. */
4032
4033 /* Put offset the PC-relative instruction referring to the GOT entry,
4034 subtracting the size of that instruction. */
4035 plt_got_pcrel_offset = (gotplt->output_section->vma
4036 + gotplt->output_offset
4037 + got_offset
4038 - resolved_plt->output_section->vma
4039 - resolved_plt->output_offset
4040 - plt_offset
4041 - htab->plt.plt_got_insn_size);
4042
4043 /* Check PC-relative offset overflow in PLT entry. */
4044 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4045 /* xgettext:c-format */
4046 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
4047 output_bfd, h->root.root.string);
4048
4049 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4050 (resolved_plt->contents + plt_offset
4051 + htab->plt.plt_got_offset));
4052
4053 /* Fill in the entry in the global offset table, initially this
4054 points to the second part of the PLT entry. Leave the entry
4055 as zero for undefined weak symbol in PIE. No PLT relocation
4056 against undefined weak symbol in PIE. */
4057 if (!local_undefweak)
4058 {
4059 if (htab->plt.has_plt0)
4060 bfd_put_64 (output_bfd, (plt->output_section->vma
4061 + plt->output_offset
4062 + h->plt.offset
4063 + htab->lazy_plt->plt_lazy_offset),
4064 gotplt->contents + got_offset);
4065
4066 /* Fill in the entry in the .rela.plt section. */
4067 rela.r_offset = (gotplt->output_section->vma
4068 + gotplt->output_offset
4069 + got_offset);
4070 if (PLT_LOCAL_IFUNC_P (info, h))
4071 {
4072 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4073 h->root.root.string,
4074 h->root.u.def.section->owner);
4075
4076 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4077 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4078 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4079 rela.r_addend = (h->root.u.def.value
4080 + h->root.u.def.section->output_section->vma
4081 + h->root.u.def.section->output_offset);
4082 /* R_X86_64_IRELATIVE comes last. */
4083 plt_index = htab->next_irelative_index--;
4084 }
4085 else
4086 {
4087 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4088 rela.r_addend = 0;
4089 plt_index = htab->next_jump_slot_index++;
4090 }
4091
4092 /* Don't fill the second and third slots in PLT entry for
4093 static executables nor without PLT0. */
4094 if (plt == htab->elf.splt && htab->plt.has_plt0)
4095 {
4096 bfd_vma plt0_offset
4097 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4098
4099 /* Put relocation index. */
4100 bfd_put_32 (output_bfd, plt_index,
4101 (plt->contents + h->plt.offset
4102 + htab->lazy_plt->plt_reloc_offset));
4103
4104 /* Put offset for jmp .PLT0 and check for overflow. We don't
4105 check relocation index for overflow since branch displacement
4106 will overflow first. */
4107 if (plt0_offset > 0x80000000)
4108 /* xgettext:c-format */
4109 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"),
4110 output_bfd, h->root.root.string);
4111 bfd_put_32 (output_bfd, - plt0_offset,
4112 (plt->contents + h->plt.offset
4113 + htab->lazy_plt->plt_plt_offset));
4114 }
4115
4116 bed = get_elf_backend_data (output_bfd);
4117 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4118 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4119 }
4120 }
4121 else if (eh->plt_got.offset != (bfd_vma) -1)
4122 {
4123 bfd_vma got_offset, plt_offset;
4124 asection *plt, *got;
4125 bfd_boolean got_after_plt;
4126 int32_t got_pcrel_offset;
4127
4128 /* Set the entry in the GOT procedure linkage table. */
4129 plt = htab->plt_got;
4130 got = htab->elf.sgot;
4131 got_offset = h->got.offset;
4132
4133 if (got_offset == (bfd_vma) -1
4134 || (h->type == STT_GNU_IFUNC && h->def_regular)
4135 || plt == NULL
4136 || got == NULL)
4137 abort ();
4138
4139 /* Use the non-lazy PLT entry template for the GOT PLT since they
4140 are the identical. */
4141 /* Fill in the entry in the GOT procedure linkage table. */
4142 plt_offset = eh->plt_got.offset;
4143 memcpy (plt->contents + plt_offset,
4144 htab->non_lazy_plt->plt_entry,
4145 htab->non_lazy_plt->plt_entry_size);
4146
4147 /* Put offset the PC-relative instruction referring to the GOT
4148 entry, subtracting the size of that instruction. */
4149 got_pcrel_offset = (got->output_section->vma
4150 + got->output_offset
4151 + got_offset
4152 - plt->output_section->vma
4153 - plt->output_offset
4154 - plt_offset
4155 - htab->non_lazy_plt->plt_got_insn_size);
4156
4157 /* Check PC-relative offset overflow in GOT PLT entry. */
4158 got_after_plt = got->output_section->vma > plt->output_section->vma;
4159 if ((got_after_plt && got_pcrel_offset < 0)
4160 || (!got_after_plt && got_pcrel_offset > 0))
4161 /* xgettext:c-format */
4162 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4163 output_bfd, h->root.root.string);
4164
4165 bfd_put_32 (output_bfd, got_pcrel_offset,
4166 (plt->contents + plt_offset
4167 + htab->non_lazy_plt->plt_got_offset));
4168 }
4169
4170 if (!local_undefweak
4171 && !h->def_regular
4172 && (h->plt.offset != (bfd_vma) -1
4173 || eh->plt_got.offset != (bfd_vma) -1))
4174 {
4175 /* Mark the symbol as undefined, rather than as defined in
4176 the .plt section. Leave the value if there were any
4177 relocations where pointer equality matters (this is a clue
4178 for the dynamic linker, to make function pointer
4179 comparisons work between an application and shared
4180 library), otherwise set it to zero. If a function is only
4181 called from a binary, there is no need to slow down
4182 shared libraries because of that. */
4183 sym->st_shndx = SHN_UNDEF;
4184 if (!h->pointer_equality_needed)
4185 sym->st_value = 0;
4186 }
4187
4188 _bfd_x86_elf_link_fixup_ifunc_symbol (info, htab, h, sym);
4189
4190 /* Don't generate dynamic GOT relocation against undefined weak
4191 symbol in executable. */
4192 if (h->got.offset != (bfd_vma) -1
4193 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4194 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4195 && !local_undefweak)
4196 {
4197 Elf_Internal_Rela rela;
4198 asection *relgot = htab->elf.srelgot;
4199
4200 /* This symbol has an entry in the global offset table. Set it
4201 up. */
4202 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4203 abort ();
4204
4205 rela.r_offset = (htab->elf.sgot->output_section->vma
4206 + htab->elf.sgot->output_offset
4207 + (h->got.offset &~ (bfd_vma) 1));
4208
4209 /* If this is a static link, or it is a -Bsymbolic link and the
4210 symbol is defined locally or was forced to be local because
4211 of a version file, we just want to emit a RELATIVE reloc.
4212 The entry in the global offset table will already have been
4213 initialized in the relocate_section function. */
4214 if (h->def_regular
4215 && h->type == STT_GNU_IFUNC)
4216 {
4217 if (h->plt.offset == (bfd_vma) -1)
4218 {
4219 /* STT_GNU_IFUNC is referenced without PLT. */
4220 if (htab->elf.splt == NULL)
4221 {
4222 /* use .rel[a].iplt section to store .got relocations
4223 in static executable. */
4224 relgot = htab->elf.irelplt;
4225 }
4226 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4227 {
4228 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4229 h->root.root.string,
4230 h->root.u.def.section->owner);
4231
4232 rela.r_info = htab->r_info (0,
4233 R_X86_64_IRELATIVE);
4234 rela.r_addend = (h->root.u.def.value
4235 + h->root.u.def.section->output_section->vma
4236 + h->root.u.def.section->output_offset);
4237 }
4238 else
4239 goto do_glob_dat;
4240 }
4241 else if (bfd_link_pic (info))
4242 {
4243 /* Generate R_X86_64_GLOB_DAT. */
4244 goto do_glob_dat;
4245 }
4246 else
4247 {
4248 asection *plt;
4249 bfd_vma plt_offset;
4250
4251 if (!h->pointer_equality_needed)
4252 abort ();
4253
4254 /* For non-shared object, we can't use .got.plt, which
4255 contains the real function addres if we need pointer
4256 equality. We load the GOT entry with the PLT entry. */
4257 if (htab->plt_second != NULL)
4258 {
4259 plt = htab->plt_second;
4260 plt_offset = eh->plt_second.offset;
4261 }
4262 else
4263 {
4264 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4265 plt_offset = h->plt.offset;
4266 }
4267 bfd_put_64 (output_bfd, (plt->output_section->vma
4268 + plt->output_offset
4269 + plt_offset),
4270 htab->elf.sgot->contents + h->got.offset);
4271 return TRUE;
4272 }
4273 }
4274 else if (bfd_link_pic (info)
4275 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4276 {
4277 if (!(h->def_regular || ELF_COMMON_DEF_P (h)))
4278 return FALSE;
4279 BFD_ASSERT((h->got.offset & 1) != 0);
4280 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4281 rela.r_addend = (h->root.u.def.value
4282 + h->root.u.def.section->output_section->vma
4283 + h->root.u.def.section->output_offset);
4284 }
4285 else
4286 {
4287 BFD_ASSERT((h->got.offset & 1) == 0);
4288 do_glob_dat:
4289 bfd_put_64 (output_bfd, (bfd_vma) 0,
4290 htab->elf.sgot->contents + h->got.offset);
4291 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4292 rela.r_addend = 0;
4293 }
4294
4295 elf_append_rela (output_bfd, relgot, &rela);
4296 }
4297
4298 if (h->needs_copy)
4299 {
4300 Elf_Internal_Rela rela;
4301 asection *s;
4302
4303 /* This symbol needs a copy reloc. Set it up. */
4304 VERIFY_COPY_RELOC (h, htab)
4305
4306 rela.r_offset = (h->root.u.def.value
4307 + h->root.u.def.section->output_section->vma
4308 + h->root.u.def.section->output_offset);
4309 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4310 rela.r_addend = 0;
4311 if (h->root.u.def.section == htab->elf.sdynrelro)
4312 s = htab->elf.sreldynrelro;
4313 else
4314 s = htab->elf.srelbss;
4315 elf_append_rela (output_bfd, s, &rela);
4316 }
4317
4318 return TRUE;
4319 }
4320
4321 /* Finish up local dynamic symbol handling. We set the contents of
4322 various dynamic sections here. */
4323
4324 static bfd_boolean
4325 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4326 {
4327 struct elf_link_hash_entry *h
4328 = (struct elf_link_hash_entry *) *slot;
4329 struct bfd_link_info *info
4330 = (struct bfd_link_info *) inf;
4331
4332 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4333 info, h, NULL);
4334 }
4335
4336 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4337 here since undefined weak symbol may not be dynamic and may not be
4338 called for elf_x86_64_finish_dynamic_symbol. */
4339
4340 static bfd_boolean
4341 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4342 void *inf)
4343 {
4344 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4345 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4346
4347 if (h->root.type != bfd_link_hash_undefweak
4348 || h->dynindx != -1)
4349 return TRUE;
4350
4351 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4352 info, h, NULL);
4353 }
4354
4355 /* Used to decide how to sort relocs in an optimal manner for the
4356 dynamic linker, before writing them out. */
4357
4358 static enum elf_reloc_type_class
4359 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4360 const asection *rel_sec ATTRIBUTE_UNUSED,
4361 const Elf_Internal_Rela *rela)
4362 {
4363 bfd *abfd = info->output_bfd;
4364 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4365 struct elf_x86_link_hash_table *htab
4366 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4367
4368 if (htab->elf.dynsym != NULL
4369 && htab->elf.dynsym->contents != NULL)
4370 {
4371 /* Check relocation against STT_GNU_IFUNC symbol if there are
4372 dynamic symbols. */
4373 unsigned long r_symndx = htab->r_sym (rela->r_info);
4374 if (r_symndx != STN_UNDEF)
4375 {
4376 Elf_Internal_Sym sym;
4377 if (!bed->s->swap_symbol_in (abfd,
4378 (htab->elf.dynsym->contents
4379 + r_symndx * bed->s->sizeof_sym),
4380 0, &sym))
4381 abort ();
4382
4383 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4384 return reloc_class_ifunc;
4385 }
4386 }
4387
4388 switch ((int) ELF32_R_TYPE (rela->r_info))
4389 {
4390 case R_X86_64_IRELATIVE:
4391 return reloc_class_ifunc;
4392 case R_X86_64_RELATIVE:
4393 case R_X86_64_RELATIVE64:
4394 return reloc_class_relative;
4395 case R_X86_64_JUMP_SLOT:
4396 return reloc_class_plt;
4397 case R_X86_64_COPY:
4398 return reloc_class_copy;
4399 default:
4400 return reloc_class_normal;
4401 }
4402 }
4403
4404 /* Finish up the dynamic sections. */
4405
4406 static bfd_boolean
4407 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4408 struct bfd_link_info *info)
4409 {
4410 struct elf_x86_link_hash_table *htab;
4411
4412 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4413 if (htab == NULL)
4414 return FALSE;
4415
4416 if (! htab->elf.dynamic_sections_created)
4417 return TRUE;
4418
4419 if (htab->elf.splt && htab->elf.splt->size > 0)
4420 {
4421 elf_section_data (htab->elf.splt->output_section)
4422 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4423
4424 if (htab->plt.has_plt0)
4425 {
4426 /* Fill in the special first entry in the procedure linkage
4427 table. */
4428 memcpy (htab->elf.splt->contents,
4429 htab->lazy_plt->plt0_entry,
4430 htab->lazy_plt->plt0_entry_size);
4431 /* Add offset for pushq GOT+8(%rip), since the instruction
4432 uses 6 bytes subtract this value. */
4433 bfd_put_32 (output_bfd,
4434 (htab->elf.sgotplt->output_section->vma
4435 + htab->elf.sgotplt->output_offset
4436 + 8
4437 - htab->elf.splt->output_section->vma
4438 - htab->elf.splt->output_offset
4439 - 6),
4440 (htab->elf.splt->contents
4441 + htab->lazy_plt->plt0_got1_offset));
4442 /* Add offset for the PC-relative instruction accessing
4443 GOT+16, subtracting the offset to the end of that
4444 instruction. */
4445 bfd_put_32 (output_bfd,
4446 (htab->elf.sgotplt->output_section->vma
4447 + htab->elf.sgotplt->output_offset
4448 + 16
4449 - htab->elf.splt->output_section->vma
4450 - htab->elf.splt->output_offset
4451 - htab->lazy_plt->plt0_got2_insn_end),
4452 (htab->elf.splt->contents
4453 + htab->lazy_plt->plt0_got2_offset));
4454 }
4455
4456 if (htab->tlsdesc_plt)
4457 {
4458 bfd_put_64 (output_bfd, (bfd_vma) 0,
4459 htab->elf.sgot->contents + htab->tlsdesc_got);
4460
4461 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
4462 htab->lazy_plt->plt_tlsdesc_entry,
4463 htab->lazy_plt->plt_tlsdesc_entry_size);
4464
4465 /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4
4466 bytes and the instruction uses 6 bytes, subtract these
4467 values. */
4468 bfd_put_32 (output_bfd,
4469 (htab->elf.sgotplt->output_section->vma
4470 + htab->elf.sgotplt->output_offset
4471 + 8
4472 - htab->elf.splt->output_section->vma
4473 - htab->elf.splt->output_offset
4474 - htab->tlsdesc_plt
4475 - htab->lazy_plt->plt_tlsdesc_got1_insn_end),
4476 (htab->elf.splt->contents
4477 + htab->tlsdesc_plt
4478 + htab->lazy_plt->plt_tlsdesc_got1_offset));
4479 /* Add offset for indirect branch via GOT+TDG, where TDG
4480 stands for htab->tlsdesc_got, subtracting the offset
4481 to the end of that instruction. */
4482 bfd_put_32 (output_bfd,
4483 (htab->elf.sgot->output_section->vma
4484 + htab->elf.sgot->output_offset
4485 + htab->tlsdesc_got
4486 - htab->elf.splt->output_section->vma
4487 - htab->elf.splt->output_offset
4488 - htab->tlsdesc_plt
4489 - htab->lazy_plt->plt_tlsdesc_got2_insn_end),
4490 (htab->elf.splt->contents
4491 + htab->tlsdesc_plt
4492 + htab->lazy_plt->plt_tlsdesc_got2_offset));
4493 }
4494 }
4495
4496 /* Fill PLT entries for undefined weak symbols in PIE. */
4497 if (bfd_link_pie (info))
4498 bfd_hash_traverse (&info->hash->table,
4499 elf_x86_64_pie_finish_undefweak_symbol,
4500 info);
4501
4502 return TRUE;
4503 }
4504
4505 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4506 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4507 It has to be done before elf_link_sort_relocs is called so that
4508 dynamic relocations are properly sorted. */
4509
4510 static bfd_boolean
4511 elf_x86_64_output_arch_local_syms
4512 (bfd *output_bfd ATTRIBUTE_UNUSED,
4513 struct bfd_link_info *info,
4514 void *flaginfo ATTRIBUTE_UNUSED,
4515 int (*func) (void *, const char *,
4516 Elf_Internal_Sym *,
4517 asection *,
4518 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4519 {
4520 struct elf_x86_link_hash_table *htab
4521 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4522 if (htab == NULL)
4523 return FALSE;
4524
4525 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4526 htab_traverse (htab->loc_hash_table,
4527 elf_x86_64_finish_local_dynamic_symbol,
4528 info);
4529
4530 return TRUE;
4531 }
4532
4533 /* Forward declaration. */
4534 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt;
4535
4536 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4537 dynamic relocations. */
4538
4539 static long
4540 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4541 long symcount ATTRIBUTE_UNUSED,
4542 asymbol **syms ATTRIBUTE_UNUSED,
4543 long dynsymcount,
4544 asymbol **dynsyms,
4545 asymbol **ret)
4546 {
4547 long count, i, n;
4548 int j;
4549 bfd_byte *plt_contents;
4550 long relsize;
4551 const struct elf_x86_lazy_plt_layout *lazy_plt;
4552 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4553 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4554 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4555 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4556 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4557 asection *plt;
4558 enum elf_x86_plt_type plt_type;
4559 struct elf_x86_plt plts[] =
4560 {
4561 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4562 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4563 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4564 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4565 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4566 };
4567
4568 *ret = NULL;
4569
4570 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4571 return 0;
4572
4573 if (dynsymcount <= 0)
4574 return 0;
4575
4576 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4577 if (relsize <= 0)
4578 return -1;
4579
4580 if (get_elf_x86_backend_data (abfd)->target_os != is_nacl)
4581 {
4582 lazy_plt = &elf_x86_64_lazy_plt;
4583 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4584 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4585 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4586 if (ABI_64_P (abfd))
4587 {
4588 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4589 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4590 }
4591 else
4592 {
4593 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4594 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4595 }
4596 }
4597 else
4598 {
4599 lazy_plt = &elf_x86_64_nacl_plt;
4600 non_lazy_plt = NULL;
4601 lazy_bnd_plt = NULL;
4602 non_lazy_bnd_plt = NULL;
4603 lazy_ibt_plt = NULL;
4604 non_lazy_ibt_plt = NULL;
4605 }
4606
4607 count = 0;
4608 for (j = 0; plts[j].name != NULL; j++)
4609 {
4610 plt = bfd_get_section_by_name (abfd, plts[j].name);
4611 if (plt == NULL || plt->size == 0)
4612 continue;
4613
4614 /* Get the PLT section contents. */
4615 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
4616 if (plt_contents == NULL)
4617 break;
4618 if (!bfd_get_section_contents (abfd, (asection *) plt,
4619 plt_contents, 0, plt->size))
4620 {
4621 free (plt_contents);
4622 break;
4623 }
4624
4625 /* Check what kind of PLT it is. */
4626 plt_type = plt_unknown;
4627 if (plts[j].type == plt_unknown
4628 && (plt->size >= (lazy_plt->plt_entry_size
4629 + lazy_plt->plt_entry_size)))
4630 {
4631 /* Match lazy PLT first. Need to check the first two
4632 instructions. */
4633 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4634 lazy_plt->plt0_got1_offset) == 0)
4635 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4636 2) == 0))
4637 plt_type = plt_lazy;
4638 else if (lazy_bnd_plt != NULL
4639 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4640 lazy_bnd_plt->plt0_got1_offset) == 0)
4641 && (memcmp (plt_contents + 6,
4642 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4643 {
4644 plt_type = plt_lazy | plt_second;
4645 /* The fist entry in the lazy IBT PLT is the same as the
4646 lazy BND PLT. */
4647 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4648 lazy_ibt_plt->plt_entry,
4649 lazy_ibt_plt->plt_got_offset) == 0))
4650 lazy_plt = lazy_ibt_plt;
4651 else
4652 lazy_plt = lazy_bnd_plt;
4653 }
4654 }
4655
4656 if (non_lazy_plt != NULL
4657 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4658 && plt->size >= non_lazy_plt->plt_entry_size)
4659 {
4660 /* Match non-lazy PLT. */
4661 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4662 non_lazy_plt->plt_got_offset) == 0)
4663 plt_type = plt_non_lazy;
4664 }
4665
4666 if (plt_type == plt_unknown || plt_type == plt_second)
4667 {
4668 if (non_lazy_bnd_plt != NULL
4669 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4670 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4671 non_lazy_bnd_plt->plt_got_offset) == 0))
4672 {
4673 /* Match BND PLT. */
4674 plt_type = plt_second;
4675 non_lazy_plt = non_lazy_bnd_plt;
4676 }
4677 else if (non_lazy_ibt_plt != NULL
4678 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4679 && (memcmp (plt_contents,
4680 non_lazy_ibt_plt->plt_entry,
4681 non_lazy_ibt_plt->plt_got_offset) == 0))
4682 {
4683 /* Match IBT PLT. */
4684 plt_type = plt_second;
4685 non_lazy_plt = non_lazy_ibt_plt;
4686 }
4687 }
4688
4689 if (plt_type == plt_unknown)
4690 {
4691 free (plt_contents);
4692 continue;
4693 }
4694
4695 plts[j].sec = plt;
4696 plts[j].type = plt_type;
4697
4698 if ((plt_type & plt_lazy))
4699 {
4700 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4701 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4702 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4703 /* Skip PLT0 in lazy PLT. */
4704 i = 1;
4705 }
4706 else
4707 {
4708 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4709 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4710 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4711 i = 0;
4712 }
4713
4714 /* Skip lazy PLT when the second PLT is used. */
4715 if (plt_type == (plt_lazy | plt_second))
4716 plts[j].count = 0;
4717 else
4718 {
4719 n = plt->size / plts[j].plt_entry_size;
4720 plts[j].count = n;
4721 count += n - i;
4722 }
4723
4724 plts[j].contents = plt_contents;
4725 }
4726
4727 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4728 (bfd_vma) 0, plts, dynsyms,
4729 ret);
4730 }
4731
4732 /* Handle an x86-64 specific section when reading an object file. This
4733 is called when elfcode.h finds a section with an unknown type. */
4734
4735 static bfd_boolean
4736 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4737 const char *name, int shindex)
4738 {
4739 if (hdr->sh_type != SHT_X86_64_UNWIND)
4740 return FALSE;
4741
4742 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4743 return FALSE;
4744
4745 return TRUE;
4746 }
4747
4748 /* Hook called by the linker routine which adds symbols from an object
4749 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4750 of .bss. */
4751
4752 static bfd_boolean
4753 elf_x86_64_add_symbol_hook (bfd *abfd,
4754 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4755 Elf_Internal_Sym *sym,
4756 const char **namep ATTRIBUTE_UNUSED,
4757 flagword *flagsp ATTRIBUTE_UNUSED,
4758 asection **secp,
4759 bfd_vma *valp)
4760 {
4761 asection *lcomm;
4762
4763 switch (sym->st_shndx)
4764 {
4765 case SHN_X86_64_LCOMMON:
4766 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4767 if (lcomm == NULL)
4768 {
4769 lcomm = bfd_make_section_with_flags (abfd,
4770 "LARGE_COMMON",
4771 (SEC_ALLOC
4772 | SEC_IS_COMMON
4773 | SEC_LINKER_CREATED));
4774 if (lcomm == NULL)
4775 return FALSE;
4776 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
4777 }
4778 *secp = lcomm;
4779 *valp = sym->st_size;
4780 return TRUE;
4781 }
4782
4783 return TRUE;
4784 }
4785
4786
4787 /* Given a BFD section, try to locate the corresponding ELF section
4788 index. */
4789
4790 static bfd_boolean
4791 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
4792 asection *sec, int *index_return)
4793 {
4794 if (sec == &_bfd_elf_large_com_section)
4795 {
4796 *index_return = SHN_X86_64_LCOMMON;
4797 return TRUE;
4798 }
4799 return FALSE;
4800 }
4801
4802 /* Process a symbol. */
4803
4804 static void
4805 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
4806 asymbol *asym)
4807 {
4808 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
4809
4810 switch (elfsym->internal_elf_sym.st_shndx)
4811 {
4812 case SHN_X86_64_LCOMMON:
4813 asym->section = &_bfd_elf_large_com_section;
4814 asym->value = elfsym->internal_elf_sym.st_size;
4815 /* Common symbol doesn't set BSF_GLOBAL. */
4816 asym->flags &= ~BSF_GLOBAL;
4817 break;
4818 }
4819 }
4820
4821 static bfd_boolean
4822 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
4823 {
4824 return (sym->st_shndx == SHN_COMMON
4825 || sym->st_shndx == SHN_X86_64_LCOMMON);
4826 }
4827
4828 static unsigned int
4829 elf_x86_64_common_section_index (asection *sec)
4830 {
4831 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4832 return SHN_COMMON;
4833 else
4834 return SHN_X86_64_LCOMMON;
4835 }
4836
4837 static asection *
4838 elf_x86_64_common_section (asection *sec)
4839 {
4840 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4841 return bfd_com_section_ptr;
4842 else
4843 return &_bfd_elf_large_com_section;
4844 }
4845
4846 static bfd_boolean
4847 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
4848 const Elf_Internal_Sym *sym,
4849 asection **psec,
4850 bfd_boolean newdef,
4851 bfd_boolean olddef,
4852 bfd *oldbfd,
4853 const asection *oldsec)
4854 {
4855 /* A normal common symbol and a large common symbol result in a
4856 normal common symbol. We turn the large common symbol into a
4857 normal one. */
4858 if (!olddef
4859 && h->root.type == bfd_link_hash_common
4860 && !newdef
4861 && bfd_is_com_section (*psec)
4862 && oldsec != *psec)
4863 {
4864 if (sym->st_shndx == SHN_COMMON
4865 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
4866 {
4867 h->root.u.c.p->section
4868 = bfd_make_section_old_way (oldbfd, "COMMON");
4869 h->root.u.c.p->section->flags = SEC_ALLOC;
4870 }
4871 else if (sym->st_shndx == SHN_X86_64_LCOMMON
4872 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
4873 *psec = bfd_com_section_ptr;
4874 }
4875
4876 return TRUE;
4877 }
4878
4879 static int
4880 elf_x86_64_additional_program_headers (bfd *abfd,
4881 struct bfd_link_info *info ATTRIBUTE_UNUSED)
4882 {
4883 asection *s;
4884 int count = 0;
4885
4886 /* Check to see if we need a large readonly segment. */
4887 s = bfd_get_section_by_name (abfd, ".lrodata");
4888 if (s && (s->flags & SEC_LOAD))
4889 count++;
4890
4891 /* Check to see if we need a large data segment. Since .lbss sections
4892 is placed right after the .bss section, there should be no need for
4893 a large data segment just because of .lbss. */
4894 s = bfd_get_section_by_name (abfd, ".ldata");
4895 if (s && (s->flags & SEC_LOAD))
4896 count++;
4897
4898 return count;
4899 }
4900
4901 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
4902
4903 static bfd_boolean
4904 elf_x86_64_relocs_compatible (const bfd_target *input,
4905 const bfd_target *output)
4906 {
4907 return ((xvec_get_elf_backend_data (input)->s->elfclass
4908 == xvec_get_elf_backend_data (output)->s->elfclass)
4909 && _bfd_elf_relocs_compatible (input, output));
4910 }
4911
4912 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
4913 with GNU properties if found. Otherwise, return NULL. */
4914
4915 static bfd *
4916 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
4917 {
4918 struct elf_x86_init_table init_table;
4919
4920 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
4921 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
4922 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
4923 != (int) R_X86_64_GNU_VTINHERIT)
4924 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
4925 != (int) R_X86_64_GNU_VTENTRY))
4926 abort ();
4927
4928 /* This is unused for x86-64. */
4929 init_table.plt0_pad_byte = 0x90;
4930
4931 if (get_elf_x86_backend_data (info->output_bfd)->target_os != is_nacl)
4932 {
4933 if (info->bndplt)
4934 {
4935 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
4936 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
4937 }
4938 else
4939 {
4940 init_table.lazy_plt = &elf_x86_64_lazy_plt;
4941 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
4942 }
4943
4944 if (ABI_64_P (info->output_bfd))
4945 {
4946 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4947 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4948 }
4949 else
4950 {
4951 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4952 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4953 }
4954 }
4955 else
4956 {
4957 init_table.lazy_plt = &elf_x86_64_nacl_plt;
4958 init_table.non_lazy_plt = NULL;
4959 init_table.lazy_ibt_plt = NULL;
4960 init_table.non_lazy_ibt_plt = NULL;
4961 }
4962
4963 if (ABI_64_P (info->output_bfd))
4964 {
4965 init_table.r_info = elf64_r_info;
4966 init_table.r_sym = elf64_r_sym;
4967 }
4968 else
4969 {
4970 init_table.r_info = elf32_r_info;
4971 init_table.r_sym = elf32_r_sym;
4972 }
4973
4974 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
4975 }
4976
4977 static const struct bfd_elf_special_section
4978 elf_x86_64_special_sections[]=
4979 {
4980 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4981 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4982 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
4983 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4984 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4985 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4986 { NULL, 0, 0, 0, 0 }
4987 };
4988
4989 #define TARGET_LITTLE_SYM x86_64_elf64_vec
4990 #define TARGET_LITTLE_NAME "elf64-x86-64"
4991 #define ELF_ARCH bfd_arch_i386
4992 #define ELF_TARGET_ID X86_64_ELF_DATA
4993 #define ELF_MACHINE_CODE EM_X86_64
4994 #if DEFAULT_LD_Z_SEPARATE_CODE
4995 # define ELF_MAXPAGESIZE 0x1000
4996 #else
4997 # define ELF_MAXPAGESIZE 0x200000
4998 #endif
4999 #define ELF_MINPAGESIZE 0x1000
5000 #define ELF_COMMONPAGESIZE 0x1000
5001
5002 #define elf_backend_can_gc_sections 1
5003 #define elf_backend_can_refcount 1
5004 #define elf_backend_want_got_plt 1
5005 #define elf_backend_plt_readonly 1
5006 #define elf_backend_want_plt_sym 0
5007 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5008 #define elf_backend_rela_normal 1
5009 #define elf_backend_plt_alignment 4
5010 #define elf_backend_extern_protected_data 1
5011 #define elf_backend_caches_rawsize 1
5012 #define elf_backend_dtrel_excludes_plt 1
5013 #define elf_backend_want_dynrelro 1
5014
5015 #define elf_info_to_howto elf_x86_64_info_to_howto
5016
5017 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5018 #define bfd_elf64_bfd_reloc_name_lookup \
5019 elf_x86_64_reloc_name_lookup
5020
5021 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5022 #define elf_backend_check_relocs elf_x86_64_check_relocs
5023 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
5024 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5025 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5026 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
5027 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5028 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5029 #ifdef CORE_HEADER
5030 #define elf_backend_write_core_note elf_x86_64_write_core_note
5031 #endif
5032 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5033 #define elf_backend_relocate_section elf_x86_64_relocate_section
5034 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5035 #define elf_backend_object_p elf64_x86_64_elf_object_p
5036 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5037
5038 #define elf_backend_section_from_shdr \
5039 elf_x86_64_section_from_shdr
5040
5041 #define elf_backend_section_from_bfd_section \
5042 elf_x86_64_elf_section_from_bfd_section
5043 #define elf_backend_add_symbol_hook \
5044 elf_x86_64_add_symbol_hook
5045 #define elf_backend_symbol_processing \
5046 elf_x86_64_symbol_processing
5047 #define elf_backend_common_section_index \
5048 elf_x86_64_common_section_index
5049 #define elf_backend_common_section \
5050 elf_x86_64_common_section
5051 #define elf_backend_common_definition \
5052 elf_x86_64_common_definition
5053 #define elf_backend_merge_symbol \
5054 elf_x86_64_merge_symbol
5055 #define elf_backend_special_sections \
5056 elf_x86_64_special_sections
5057 #define elf_backend_additional_program_headers \
5058 elf_x86_64_additional_program_headers
5059 #define elf_backend_setup_gnu_properties \
5060 elf_x86_64_link_setup_gnu_properties
5061 #define elf_backend_hide_symbol \
5062 _bfd_x86_elf_hide_symbol
5063
5064 #undef elf64_bed
5065 #define elf64_bed elf64_x86_64_bed
5066
5067 #include "elf64-target.h"
5068
5069 /* CloudABI support. */
5070
5071 #undef TARGET_LITTLE_SYM
5072 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5073 #undef TARGET_LITTLE_NAME
5074 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5075
5076 #undef ELF_OSABI
5077 #define ELF_OSABI ELFOSABI_CLOUDABI
5078
5079 #undef elf64_bed
5080 #define elf64_bed elf64_x86_64_cloudabi_bed
5081
5082 #include "elf64-target.h"
5083
5084 /* FreeBSD support. */
5085
5086 #undef TARGET_LITTLE_SYM
5087 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5088 #undef TARGET_LITTLE_NAME
5089 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5090
5091 #undef ELF_OSABI
5092 #define ELF_OSABI ELFOSABI_FREEBSD
5093
5094 #undef elf64_bed
5095 #define elf64_bed elf64_x86_64_fbsd_bed
5096
5097 #include "elf64-target.h"
5098
5099 /* Solaris 2 support. */
5100
5101 #undef TARGET_LITTLE_SYM
5102 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5103 #undef TARGET_LITTLE_NAME
5104 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5105
5106 static const struct elf_x86_backend_data elf_x86_64_solaris_arch_bed =
5107 {
5108 is_solaris /* os */
5109 };
5110
5111 #undef elf_backend_arch_data
5112 #define elf_backend_arch_data &elf_x86_64_solaris_arch_bed
5113
5114 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5115 objects won't be recognized. */
5116 #undef ELF_OSABI
5117
5118 #undef elf64_bed
5119 #define elf64_bed elf64_x86_64_sol2_bed
5120
5121 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5122 boundary. */
5123 #undef elf_backend_static_tls_alignment
5124 #define elf_backend_static_tls_alignment 16
5125
5126 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5127
5128 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5129 File, p.63. */
5130 #undef elf_backend_want_plt_sym
5131 #define elf_backend_want_plt_sym 1
5132
5133 #undef elf_backend_strtab_flags
5134 #define elf_backend_strtab_flags SHF_STRINGS
5135
5136 static bfd_boolean
5137 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5138 bfd *obfd ATTRIBUTE_UNUSED,
5139 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5140 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5141 {
5142 /* PR 19938: FIXME: Need to add code for setting the sh_info
5143 and sh_link fields of Solaris specific section types. */
5144 return FALSE;
5145 }
5146
5147 #undef elf_backend_copy_special_section_fields
5148 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5149
5150 #include "elf64-target.h"
5151
5152 /* Native Client support. */
5153
5154 static bfd_boolean
5155 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5156 {
5157 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5158 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5159 return TRUE;
5160 }
5161
5162 #undef TARGET_LITTLE_SYM
5163 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5164 #undef TARGET_LITTLE_NAME
5165 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5166 #undef elf64_bed
5167 #define elf64_bed elf64_x86_64_nacl_bed
5168
5169 #undef ELF_MAXPAGESIZE
5170 #undef ELF_MINPAGESIZE
5171 #undef ELF_COMMONPAGESIZE
5172 #define ELF_MAXPAGESIZE 0x10000
5173 #define ELF_MINPAGESIZE 0x10000
5174 #define ELF_COMMONPAGESIZE 0x10000
5175
5176 /* Restore defaults. */
5177 #undef ELF_OSABI
5178 #undef elf_backend_static_tls_alignment
5179 #undef elf_backend_want_plt_sym
5180 #define elf_backend_want_plt_sym 0
5181 #undef elf_backend_strtab_flags
5182 #undef elf_backend_copy_special_section_fields
5183
5184 /* NaCl uses substantially different PLT entries for the same effects. */
5185
5186 #undef elf_backend_plt_alignment
5187 #define elf_backend_plt_alignment 5
5188 #define NACL_PLT_ENTRY_SIZE 64
5189 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5190
5191 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5192 {
5193 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5194 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5195 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5196 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5197 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5198
5199 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5200 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
5201
5202 /* 32 bytes of nop to pad out to the standard size. */
5203 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5204 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5205 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5206 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5207 0x66, /* excess data16 prefix */
5208 0x90 /* nop */
5209 };
5210
5211 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5212 {
5213 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
5214 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5215 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5216 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5217
5218 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
5219 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5220 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5221
5222 /* Lazy GOT entries point here (32-byte aligned). */
5223 0x68, /* pushq immediate */
5224 0, 0, 0, 0, /* replaced with index into relocation table. */
5225 0xe9, /* jmp relative */
5226 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
5227
5228 /* 22 bytes of nop to pad out to the standard size. */
5229 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5230 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5231 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
5232 };
5233
5234 /* .eh_frame covering the .plt section. */
5235
5236 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
5237 {
5238 #if (PLT_CIE_LENGTH != 20 \
5239 || PLT_FDE_LENGTH != 36 \
5240 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
5241 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
5242 # error "Need elf_x86_backend_data parameters for eh_frame_plt offsets!"
5243 #endif
5244 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
5245 0, 0, 0, 0, /* CIE ID */
5246 1, /* CIE version */
5247 'z', 'R', 0, /* Augmentation string */
5248 1, /* Code alignment factor */
5249 0x78, /* Data alignment factor */
5250 16, /* Return address column */
5251 1, /* Augmentation size */
5252 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
5253 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
5254 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
5255 DW_CFA_nop, DW_CFA_nop,
5256
5257 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
5258 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
5259 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
5260 0, 0, 0, 0, /* .plt size goes here */
5261 0, /* Augmentation size */
5262 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
5263 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
5264 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
5265 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
5266 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
5267 13, /* Block length */
5268 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
5269 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
5270 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
5271 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
5272 DW_CFA_nop, DW_CFA_nop
5273 };
5274
5275 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt =
5276 {
5277 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
5278 NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */
5279 elf_x86_64_nacl_plt_entry, /* plt_entry */
5280 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
5281 elf_x86_64_nacl_plt0_entry, /* plt_tlsdesc_entry */
5282 NACL_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
5283 2, /* plt_tlsdesc_got1_offset */
5284 9, /* plt_tlsdesc_got2_offset */
5285 6, /* plt_tlsdesc_got1_insn_end */
5286 13, /* plt_tlsdesc_got2_insn_end */
5287 2, /* plt0_got1_offset */
5288 9, /* plt0_got2_offset */
5289 13, /* plt0_got2_insn_end */
5290 3, /* plt_got_offset */
5291 33, /* plt_reloc_offset */
5292 38, /* plt_plt_offset */
5293 7, /* plt_got_insn_size */
5294 42, /* plt_plt_insn_end */
5295 32, /* plt_lazy_offset */
5296 elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */
5297 elf_x86_64_nacl_plt_entry, /* pic_plt_entry */
5298 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
5299 sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */
5300 };
5301
5302 static const struct elf_x86_backend_data elf_x86_64_nacl_arch_bed =
5303 {
5304 is_nacl /* os */
5305 };
5306
5307 #undef elf_backend_arch_data
5308 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
5309
5310 #undef elf_backend_object_p
5311 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
5312 #undef elf_backend_modify_segment_map
5313 #define elf_backend_modify_segment_map nacl_modify_segment_map
5314 #undef elf_backend_modify_program_headers
5315 #define elf_backend_modify_program_headers nacl_modify_program_headers
5316 #undef elf_backend_final_write_processing
5317 #define elf_backend_final_write_processing nacl_final_write_processing
5318
5319 #include "elf64-target.h"
5320
5321 /* Native Client x32 support. */
5322
5323 static bfd_boolean
5324 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
5325 {
5326 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
5327 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
5328 return TRUE;
5329 }
5330
5331 #undef TARGET_LITTLE_SYM
5332 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
5333 #undef TARGET_LITTLE_NAME
5334 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
5335 #undef elf32_bed
5336 #define elf32_bed elf32_x86_64_nacl_bed
5337
5338 #define bfd_elf32_bfd_reloc_type_lookup \
5339 elf_x86_64_reloc_type_lookup
5340 #define bfd_elf32_bfd_reloc_name_lookup \
5341 elf_x86_64_reloc_name_lookup
5342 #define bfd_elf32_get_synthetic_symtab \
5343 elf_x86_64_get_synthetic_symtab
5344
5345 #undef elf_backend_object_p
5346 #define elf_backend_object_p \
5347 elf32_x86_64_nacl_elf_object_p
5348
5349 #undef elf_backend_bfd_from_remote_memory
5350 #define elf_backend_bfd_from_remote_memory \
5351 _bfd_elf32_bfd_from_remote_memory
5352
5353 #undef elf_backend_size_info
5354 #define elf_backend_size_info \
5355 _bfd_elf32_size_info
5356
5357 #undef elf32_bed
5358 #define elf32_bed elf32_x86_64_bed
5359
5360 #include "elf32-target.h"
5361
5362 /* Restore defaults. */
5363 #undef elf_backend_object_p
5364 #define elf_backend_object_p elf64_x86_64_elf_object_p
5365 #undef elf_backend_bfd_from_remote_memory
5366 #undef elf_backend_size_info
5367 #undef elf_backend_modify_segment_map
5368 #undef elf_backend_modify_program_headers
5369 #undef elf_backend_final_write_processing
5370
5371 /* Intel L1OM support. */
5372
5373 static bfd_boolean
5374 elf64_l1om_elf_object_p (bfd *abfd)
5375 {
5376 /* Set the right machine number for an L1OM elf64 file. */
5377 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5378 return TRUE;
5379 }
5380
5381 #undef TARGET_LITTLE_SYM
5382 #define TARGET_LITTLE_SYM l1om_elf64_vec
5383 #undef TARGET_LITTLE_NAME
5384 #define TARGET_LITTLE_NAME "elf64-l1om"
5385 #undef ELF_ARCH
5386 #define ELF_ARCH bfd_arch_l1om
5387
5388 #undef ELF_MACHINE_CODE
5389 #define ELF_MACHINE_CODE EM_L1OM
5390
5391 #undef ELF_OSABI
5392
5393 #undef elf64_bed
5394 #define elf64_bed elf64_l1om_bed
5395
5396 #undef elf_backend_object_p
5397 #define elf_backend_object_p elf64_l1om_elf_object_p
5398
5399 /* Restore defaults. */
5400 #undef ELF_MAXPAGESIZE
5401 #undef ELF_MINPAGESIZE
5402 #undef ELF_COMMONPAGESIZE
5403 #if DEFAULT_LD_Z_SEPARATE_CODE
5404 # define ELF_MAXPAGESIZE 0x1000
5405 #else
5406 # define ELF_MAXPAGESIZE 0x200000
5407 #endif
5408 #define ELF_MINPAGESIZE 0x1000
5409 #define ELF_COMMONPAGESIZE 0x1000
5410 #undef elf_backend_plt_alignment
5411 #define elf_backend_plt_alignment 4
5412 #undef elf_backend_arch_data
5413 #define elf_backend_arch_data &elf_x86_64_arch_bed
5414
5415 #include "elf64-target.h"
5416
5417 /* FreeBSD L1OM support. */
5418
5419 #undef TARGET_LITTLE_SYM
5420 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5421 #undef TARGET_LITTLE_NAME
5422 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5423
5424 #undef ELF_OSABI
5425 #define ELF_OSABI ELFOSABI_FREEBSD
5426
5427 #undef elf64_bed
5428 #define elf64_bed elf64_l1om_fbsd_bed
5429
5430 #include "elf64-target.h"
5431
5432 /* Intel K1OM support. */
5433
5434 static bfd_boolean
5435 elf64_k1om_elf_object_p (bfd *abfd)
5436 {
5437 /* Set the right machine number for an K1OM elf64 file. */
5438 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5439 return TRUE;
5440 }
5441
5442 #undef TARGET_LITTLE_SYM
5443 #define TARGET_LITTLE_SYM k1om_elf64_vec
5444 #undef TARGET_LITTLE_NAME
5445 #define TARGET_LITTLE_NAME "elf64-k1om"
5446 #undef ELF_ARCH
5447 #define ELF_ARCH bfd_arch_k1om
5448
5449 #undef ELF_MACHINE_CODE
5450 #define ELF_MACHINE_CODE EM_K1OM
5451
5452 #undef ELF_OSABI
5453
5454 #undef elf64_bed
5455 #define elf64_bed elf64_k1om_bed
5456
5457 #undef elf_backend_object_p
5458 #define elf_backend_object_p elf64_k1om_elf_object_p
5459
5460 #undef elf_backend_static_tls_alignment
5461
5462 #undef elf_backend_want_plt_sym
5463 #define elf_backend_want_plt_sym 0
5464
5465 #include "elf64-target.h"
5466
5467 /* FreeBSD K1OM support. */
5468
5469 #undef TARGET_LITTLE_SYM
5470 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5471 #undef TARGET_LITTLE_NAME
5472 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5473
5474 #undef ELF_OSABI
5475 #define ELF_OSABI ELFOSABI_FREEBSD
5476
5477 #undef elf64_bed
5478 #define elf64_bed elf64_k1om_fbsd_bed
5479
5480 #include "elf64-target.h"
5481
5482 /* 32bit x86-64 support. */
5483
5484 #undef TARGET_LITTLE_SYM
5485 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5486 #undef TARGET_LITTLE_NAME
5487 #define TARGET_LITTLE_NAME "elf32-x86-64"
5488 #undef elf32_bed
5489
5490 #undef ELF_ARCH
5491 #define ELF_ARCH bfd_arch_i386
5492
5493 #undef ELF_MACHINE_CODE
5494 #define ELF_MACHINE_CODE EM_X86_64
5495
5496 #undef ELF_OSABI
5497
5498 #undef elf_backend_object_p
5499 #define elf_backend_object_p \
5500 elf32_x86_64_elf_object_p
5501
5502 #undef elf_backend_bfd_from_remote_memory
5503 #define elf_backend_bfd_from_remote_memory \
5504 _bfd_elf32_bfd_from_remote_memory
5505
5506 #undef elf_backend_size_info
5507 #define elf_backend_size_info \
5508 _bfd_elf32_size_info
5509
5510 #include "elf32-target.h"
5511