elf64-x86-64.c revision 1.1.1.8 1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2025 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh (at) suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "dwarf2.h"
24 #include "libiberty.h"
25 #include "sframe.h"
26
27 #include "opcode/i386.h"
28
29 #ifdef CORE_HEADER
30 #include <stdarg.h>
31 #include CORE_HEADER
32 #endif
33
34 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
35 #define MINUS_ONE (~ (bfd_vma) 0)
36
37 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
38 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
39 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
40 since they are the same. */
41
42 /* The relocation "howto" table. Order of fields:
43 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
44 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
45 static reloc_howto_type x86_64_elf_howto_table[] =
46 {
47 HOWTO(R_X86_64_NONE, 0, 0, 0, false, 0, complain_overflow_dont,
48 bfd_elf_generic_reloc, "R_X86_64_NONE", false, 0, 0x00000000,
49 false),
50 HOWTO(R_X86_64_64, 0, 8, 64, false, 0, complain_overflow_dont,
51 bfd_elf_generic_reloc, "R_X86_64_64", false, 0, MINUS_ONE,
52 false),
53 HOWTO(R_X86_64_PC32, 0, 4, 32, true, 0, complain_overflow_signed,
54 bfd_elf_generic_reloc, "R_X86_64_PC32", false, 0, 0xffffffff,
55 true),
56 HOWTO(R_X86_64_GOT32, 0, 4, 32, false, 0, complain_overflow_signed,
57 bfd_elf_generic_reloc, "R_X86_64_GOT32", false, 0, 0xffffffff,
58 false),
59 HOWTO(R_X86_64_PLT32, 0, 4, 32, true, 0, complain_overflow_signed,
60 bfd_elf_generic_reloc, "R_X86_64_PLT32", false, 0, 0xffffffff,
61 true),
62 HOWTO(R_X86_64_COPY, 0, 4, 32, false, 0, complain_overflow_bitfield,
63 bfd_elf_generic_reloc, "R_X86_64_COPY", false, 0, 0xffffffff,
64 false),
65 HOWTO(R_X86_64_GLOB_DAT, 0, 8, 64, false, 0, complain_overflow_dont,
66 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", false, 0, MINUS_ONE,
67 false),
68 HOWTO(R_X86_64_JUMP_SLOT, 0, 8, 64, false, 0, complain_overflow_dont,
69 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", false, 0, MINUS_ONE,
70 false),
71 HOWTO(R_X86_64_RELATIVE, 0, 8, 64, false, 0, complain_overflow_dont,
72 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", false, 0, MINUS_ONE,
73 false),
74 HOWTO(R_X86_64_GOTPCREL, 0, 4, 32, true, 0, complain_overflow_signed,
75 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", false, 0, 0xffffffff,
76 true),
77 HOWTO(R_X86_64_32, 0, 4, 32, false, 0, complain_overflow_unsigned,
78 bfd_elf_generic_reloc, "R_X86_64_32", false, 0, 0xffffffff,
79 false),
80 HOWTO(R_X86_64_32S, 0, 4, 32, false, 0, complain_overflow_signed,
81 bfd_elf_generic_reloc, "R_X86_64_32S", false, 0, 0xffffffff,
82 false),
83 HOWTO(R_X86_64_16, 0, 2, 16, false, 0, complain_overflow_bitfield,
84 bfd_elf_generic_reloc, "R_X86_64_16", false, 0, 0xffff, false),
85 HOWTO(R_X86_64_PC16, 0, 2, 16, true, 0, complain_overflow_bitfield,
86 bfd_elf_generic_reloc, "R_X86_64_PC16", false, 0, 0xffff, true),
87 HOWTO(R_X86_64_8, 0, 1, 8, false, 0, complain_overflow_bitfield,
88 bfd_elf_generic_reloc, "R_X86_64_8", false, 0, 0xff, false),
89 HOWTO(R_X86_64_PC8, 0, 1, 8, true, 0, complain_overflow_signed,
90 bfd_elf_generic_reloc, "R_X86_64_PC8", false, 0, 0xff, true),
91 HOWTO(R_X86_64_DTPMOD64, 0, 8, 64, false, 0, complain_overflow_dont,
92 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", false, 0, MINUS_ONE,
93 false),
94 HOWTO(R_X86_64_DTPOFF64, 0, 8, 64, false, 0, complain_overflow_dont,
95 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", false, 0, MINUS_ONE,
96 false),
97 HOWTO(R_X86_64_TPOFF64, 0, 8, 64, false, 0, complain_overflow_dont,
98 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", false, 0, MINUS_ONE,
99 false),
100 HOWTO(R_X86_64_TLSGD, 0, 4, 32, true, 0, complain_overflow_signed,
101 bfd_elf_generic_reloc, "R_X86_64_TLSGD", false, 0, 0xffffffff,
102 true),
103 HOWTO(R_X86_64_TLSLD, 0, 4, 32, true, 0, complain_overflow_signed,
104 bfd_elf_generic_reloc, "R_X86_64_TLSLD", false, 0, 0xffffffff,
105 true),
106 HOWTO(R_X86_64_DTPOFF32, 0, 4, 32, false, 0, complain_overflow_signed,
107 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", false, 0, 0xffffffff,
108 false),
109 HOWTO(R_X86_64_GOTTPOFF, 0, 4, 32, true, 0, complain_overflow_signed,
110 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", false, 0, 0xffffffff,
111 true),
112 HOWTO(R_X86_64_TPOFF32, 0, 4, 32, false, 0, complain_overflow_signed,
113 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", false, 0, 0xffffffff,
114 false),
115 HOWTO(R_X86_64_PC64, 0, 8, 64, true, 0, complain_overflow_dont,
116 bfd_elf_generic_reloc, "R_X86_64_PC64", false, 0, MINUS_ONE,
117 true),
118 HOWTO(R_X86_64_GOTOFF64, 0, 8, 64, false, 0, complain_overflow_dont,
119 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64", false, 0, MINUS_ONE,
120 false),
121 HOWTO(R_X86_64_GOTPC32, 0, 4, 32, true, 0, complain_overflow_signed,
122 bfd_elf_generic_reloc, "R_X86_64_GOTPC32", false, 0, 0xffffffff,
123 true),
124 HOWTO(R_X86_64_GOT64, 0, 8, 64, false, 0, complain_overflow_signed,
125 bfd_elf_generic_reloc, "R_X86_64_GOT64", false, 0, MINUS_ONE,
126 false),
127 HOWTO(R_X86_64_GOTPCREL64, 0, 8, 64, true, 0, complain_overflow_signed,
128 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", false, 0, MINUS_ONE,
129 true),
130 HOWTO(R_X86_64_GOTPC64, 0, 8, 64, true, 0, complain_overflow_signed,
131 bfd_elf_generic_reloc, "R_X86_64_GOTPC64", false, 0, MINUS_ONE,
132 true),
133 HOWTO(R_X86_64_GOTPLT64, 0, 8, 64, false, 0, complain_overflow_signed,
134 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", false, 0, MINUS_ONE,
135 false),
136 HOWTO(R_X86_64_PLTOFF64, 0, 8, 64, false, 0, complain_overflow_signed,
137 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", false, 0, MINUS_ONE,
138 false),
139 HOWTO(R_X86_64_SIZE32, 0, 4, 32, false, 0, complain_overflow_unsigned,
140 bfd_elf_generic_reloc, "R_X86_64_SIZE32", false, 0, 0xffffffff,
141 false),
142 HOWTO(R_X86_64_SIZE64, 0, 8, 64, false, 0, complain_overflow_dont,
143 bfd_elf_generic_reloc, "R_X86_64_SIZE64", false, 0, MINUS_ONE,
144 false),
145 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 4, 32, true, 0,
146 complain_overflow_bitfield, bfd_elf_generic_reloc,
147 "R_X86_64_GOTPC32_TLSDESC", false, 0, 0xffffffff, true),
148 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, false, 0,
149 complain_overflow_dont, bfd_elf_generic_reloc,
150 "R_X86_64_TLSDESC_CALL",
151 false, 0, 0, false),
152 HOWTO(R_X86_64_TLSDESC, 0, 8, 64, false, 0,
153 complain_overflow_dont, bfd_elf_generic_reloc,
154 "R_X86_64_TLSDESC", false, 0, MINUS_ONE, false),
155 HOWTO(R_X86_64_IRELATIVE, 0, 8, 64, false, 0, complain_overflow_dont,
156 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", false, 0, MINUS_ONE,
157 false),
158 HOWTO(R_X86_64_RELATIVE64, 0, 8, 64, false, 0, complain_overflow_dont,
159 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", false, 0, MINUS_ONE,
160 false),
161 HOWTO(R_X86_64_PC32_BND, 0, 4, 32, true, 0, complain_overflow_signed,
162 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", false, 0, 0xffffffff,
163 true),
164 HOWTO(R_X86_64_PLT32_BND, 0, 4, 32, true, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", false, 0, 0xffffffff,
166 true),
167 HOWTO(R_X86_64_GOTPCRELX, 0, 4, 32, true, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", false, 0, 0xffffffff,
169 true),
170 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 4, 32, true, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", false, 0, 0xffffffff,
172 true),
173 HOWTO(R_X86_64_CODE_4_GOTPCRELX, 0, 4, 32, true, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_CODE_4_GOTPCRELX", false, 0, 0xffffffff,
175 true),
176 HOWTO(R_X86_64_CODE_4_GOTTPOFF, 0, 4, 32, true, 0, complain_overflow_signed,
177 bfd_elf_generic_reloc, "R_X86_64_CODE_4_GOTTPOFF", false, 0, 0xffffffff,
178 true),
179 HOWTO(R_X86_64_CODE_4_GOTPC32_TLSDESC, 0, 4, 32, true, 0,
180 complain_overflow_bitfield, bfd_elf_generic_reloc,
181 "R_X86_64_CODE_4_GOTPC32_TLSDESC", false, 0, 0xffffffff, true),
182 HOWTO(R_X86_64_CODE_5_GOTPCRELX, 0, 4, 32, true, 0,
183 complain_overflow_signed, bfd_elf_generic_reloc,
184 "R_X86_64_CODE_5_GOTPCRELX", false, 0, 0xffffffff, true),
185 HOWTO(R_X86_64_CODE_5_GOTTPOFF, 0, 4, 32, true, 0,
186 complain_overflow_signed, bfd_elf_generic_reloc,
187 "R_X86_64_CODE_5_GOTTPOFF", false, 0, 0xffffffff, true),
188 HOWTO(R_X86_64_CODE_5_GOTPC32_TLSDESC, 0, 4, 32, true, 0,
189 complain_overflow_bitfield, bfd_elf_generic_reloc,
190 "R_X86_64_CODE_5_GOTPC32_TLSDESC", false, 0, 0xffffffff, true),
191 HOWTO(R_X86_64_CODE_6_GOTPCRELX, 0, 4, 32, true, 0,
192 complain_overflow_signed, bfd_elf_generic_reloc,
193 "R_X86_64_CODE_6_GOTPCRELX", false, 0, 0xffffffff, true),
194 HOWTO(R_X86_64_CODE_6_GOTTPOFF, 0, 4, 32, true, 0,
195 complain_overflow_signed, bfd_elf_generic_reloc,
196 "R_X86_64_CODE_6_GOTTPOFF", false, 0, 0xffffffff, true),
197 HOWTO(R_X86_64_CODE_6_GOTPC32_TLSDESC, 0, 4, 32, true, 0,
198 complain_overflow_bitfield, bfd_elf_generic_reloc,
199 "R_X86_64_CODE_6_GOTPC32_TLSDESC", false, 0, 0xffffffff, true),
200
201 /* We have a gap in the reloc numbers here.
202 R_X86_64_standard counts the number up to this point, and
203 R_X86_64_vt_offset is the value to subtract from a reloc type of
204 R_X86_64_GNU_VT* to form an index into this table. */
205 #define R_X86_64_standard (R_X86_64_CODE_6_GOTPC32_TLSDESC + 1)
206 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
207
208 /* GNU extension to record C++ vtable hierarchy. */
209 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 8, 0, false, 0, complain_overflow_dont,
210 NULL, "R_X86_64_GNU_VTINHERIT", false, 0, 0, false),
211
212 /* GNU extension to record C++ vtable member usage. */
213 HOWTO (R_X86_64_GNU_VTENTRY, 0, 8, 0, false, 0, complain_overflow_dont,
214 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", false, 0, 0,
215 false),
216
217 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
218 HOWTO(R_X86_64_32, 0, 4, 32, false, 0, complain_overflow_bitfield,
219 bfd_elf_generic_reloc, "R_X86_64_32", false, 0, 0xffffffff,
220 false)
221 };
222
223 /* Map BFD relocs to the x86_64 elf relocs. */
224 struct elf_reloc_map
225 {
226 bfd_reloc_code_real_type bfd_reloc_val;
227 unsigned char elf_reloc_val;
228 };
229
230 static const struct elf_reloc_map x86_64_reloc_map[] =
231 {
232 { BFD_RELOC_NONE, R_X86_64_NONE, },
233 { BFD_RELOC_64, R_X86_64_64, },
234 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
235 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
236 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
237 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
238 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
239 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
240 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
241 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
242 { BFD_RELOC_32, R_X86_64_32, },
243 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
244 { BFD_RELOC_16, R_X86_64_16, },
245 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
246 { BFD_RELOC_8, R_X86_64_8, },
247 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
248 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
249 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
250 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
251 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
252 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
253 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
254 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
255 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
256 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
257 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
258 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
259 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
260 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
261 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
262 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
263 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
264 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
265 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
266 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
267 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
268 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
269 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
270 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
271 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
272 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
273 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
274 { BFD_RELOC_X86_64_CODE_4_GOTPCRELX, R_X86_64_CODE_4_GOTPCRELX, },
275 { BFD_RELOC_X86_64_CODE_4_GOTTPOFF, R_X86_64_CODE_4_GOTTPOFF, },
276 { BFD_RELOC_X86_64_CODE_4_GOTPC32_TLSDESC, R_X86_64_CODE_4_GOTPC32_TLSDESC, },
277 { BFD_RELOC_X86_64_CODE_5_GOTPCRELX, R_X86_64_CODE_5_GOTPCRELX, },
278 { BFD_RELOC_X86_64_CODE_5_GOTTPOFF, R_X86_64_CODE_5_GOTTPOFF, },
279 { BFD_RELOC_X86_64_CODE_5_GOTPC32_TLSDESC, R_X86_64_CODE_5_GOTPC32_TLSDESC, },
280 { BFD_RELOC_X86_64_CODE_6_GOTPCRELX, R_X86_64_CODE_6_GOTPCRELX, },
281 { BFD_RELOC_X86_64_CODE_6_GOTTPOFF, R_X86_64_CODE_6_GOTTPOFF, },
282 { BFD_RELOC_X86_64_CODE_6_GOTPC32_TLSDESC, R_X86_64_CODE_6_GOTPC32_TLSDESC, },
283 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
284 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
285 };
286
287 static reloc_howto_type *
288 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
289 {
290 unsigned i;
291
292 if (r_type == (unsigned int) R_X86_64_32)
293 {
294 if (ABI_64_P (abfd))
295 i = r_type;
296 else
297 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
298 }
299 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
300 || r_type >= (unsigned int) R_X86_64_max)
301 {
302 if (r_type >= (unsigned int) R_X86_64_standard)
303 {
304 /* xgettext:c-format */
305 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
306 abfd, r_type);
307 bfd_set_error (bfd_error_bad_value);
308 return NULL;
309 }
310 i = r_type;
311 }
312 else
313 i = r_type - (unsigned int) R_X86_64_vt_offset;
314 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
315 return &x86_64_elf_howto_table[i];
316 }
317
318 /* Given a BFD reloc type, return a HOWTO structure. */
319 static reloc_howto_type *
320 elf_x86_64_reloc_type_lookup (bfd *abfd,
321 bfd_reloc_code_real_type code)
322 {
323 unsigned int i;
324
325 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
326 i++)
327 {
328 if (x86_64_reloc_map[i].bfd_reloc_val == code)
329 return elf_x86_64_rtype_to_howto (abfd,
330 x86_64_reloc_map[i].elf_reloc_val);
331 }
332 return NULL;
333 }
334
335 static reloc_howto_type *
336 elf_x86_64_reloc_name_lookup (bfd *abfd,
337 const char *r_name)
338 {
339 unsigned int i;
340
341 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
342 {
343 /* Get x32 R_X86_64_32. */
344 reloc_howto_type *reloc
345 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
346 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
347 return reloc;
348 }
349
350 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
351 if (x86_64_elf_howto_table[i].name != NULL
352 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
353 return &x86_64_elf_howto_table[i];
354
355 return NULL;
356 }
357
358 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
359
360 static bool
361 elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr,
362 Elf_Internal_Rela *dst)
363 {
364 unsigned r_type;
365
366 r_type = ELF32_R_TYPE (dst->r_info);
367 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
368 if (cache_ptr->howto == NULL)
369 return false;
370 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
371 return true;
372 }
373
374 /* Support for core dump NOTE sections. */
376 static bool
377 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
378 {
379 int offset;
380 size_t size;
381
382 switch (note->descsz)
383 {
384 default:
385 return false;
386
387 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
388 /* pr_cursig */
389 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
390
391 /* pr_pid */
392 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
393
394 /* pr_reg */
395 offset = 72;
396 size = 216;
397
398 break;
399
400 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
401 /* pr_cursig */
402 elf_tdata (abfd)->core->signal
403 = bfd_get_16 (abfd, note->descdata + 12);
404
405 /* pr_pid */
406 elf_tdata (abfd)->core->lwpid
407 = bfd_get_32 (abfd, note->descdata + 32);
408
409 /* pr_reg */
410 offset = 112;
411 size = 216;
412
413 break;
414 }
415
416 /* Make a ".reg/999" section. */
417 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
418 size, note->descpos + offset);
419 }
420
421 static bool
422 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
423 {
424 switch (note->descsz)
425 {
426 default:
427 return false;
428
429 case 124:
430 /* sizeof (struct elf_external_linux_prpsinfo32_ugid16). */
431 elf_tdata (abfd)->core->pid
432 = bfd_get_32 (abfd, note->descdata + 12);
433 elf_tdata (abfd)->core->program
434 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
435 elf_tdata (abfd)->core->command
436 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
437 break;
438
439 case 128:
440 /* sizeof (struct elf_external_linux_prpsinfo32_ugid32). */
441 elf_tdata (abfd)->core->pid
442 = bfd_get_32 (abfd, note->descdata + 12);
443 elf_tdata (abfd)->core->program
444 = _bfd_elfcore_strndup (abfd, note->descdata + 32, 16);
445 elf_tdata (abfd)->core->command
446 = _bfd_elfcore_strndup (abfd, note->descdata + 48, 80);
447 break;
448
449 case 136:
450 /* sizeof (struct elf_prpsinfo) on Linux/x86_64. */
451 elf_tdata (abfd)->core->pid
452 = bfd_get_32 (abfd, note->descdata + 24);
453 elf_tdata (abfd)->core->program
454 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
455 elf_tdata (abfd)->core->command
456 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
457 }
458
459 /* Note that for some reason, a spurious space is tacked
460 onto the end of the args in some (at least one anyway)
461 implementations, so strip it off if it exists. */
462
463 {
464 char *command = elf_tdata (abfd)->core->command;
465 int n = strlen (command);
466
467 if (0 < n && command[n - 1] == ' ')
468 command[n - 1] = '\0';
469 }
470
471 return true;
472 }
473
474 #ifdef CORE_HEADER
475 # if GCC_VERSION >= 8000
476 # pragma GCC diagnostic push
477 # pragma GCC diagnostic ignored "-Wstringop-truncation"
478 # endif
479 static char *
480 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
481 int note_type, ...)
482 {
483 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
484 va_list ap;
485 const char *fname, *psargs;
486 long pid;
487 int cursig;
488 const void *gregs;
489
490 switch (note_type)
491 {
492 default:
493 return NULL;
494
495 case NT_PRPSINFO:
496 va_start (ap, note_type);
497 fname = va_arg (ap, const char *);
498 psargs = va_arg (ap, const char *);
499 va_end (ap);
500
501 if (bed->s->elfclass == ELFCLASS32)
502 {
503 prpsinfo32_t data;
504 memset (&data, 0, sizeof (data));
505 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
506 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
507 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
508 &data, sizeof (data));
509 }
510 else
511 {
512 prpsinfo64_t data;
513 memset (&data, 0, sizeof (data));
514 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
515 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
516 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
517 &data, sizeof (data));
518 }
519 /* NOTREACHED */
520
521 case NT_PRSTATUS:
522 va_start (ap, note_type);
523 pid = va_arg (ap, long);
524 cursig = va_arg (ap, int);
525 gregs = va_arg (ap, const void *);
526 va_end (ap);
527
528 if (bed->s->elfclass == ELFCLASS32)
529 {
530 if (bed->elf_machine_code == EM_X86_64)
531 {
532 prstatusx32_t prstat;
533 memset (&prstat, 0, sizeof (prstat));
534 prstat.pr_pid = pid;
535 prstat.pr_cursig = cursig;
536 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
537 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
538 &prstat, sizeof (prstat));
539 }
540 else
541 {
542 prstatus32_t prstat;
543 memset (&prstat, 0, sizeof (prstat));
544 prstat.pr_pid = pid;
545 prstat.pr_cursig = cursig;
546 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
547 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
548 &prstat, sizeof (prstat));
549 }
550 }
551 else
552 {
553 prstatus64_t prstat;
554 memset (&prstat, 0, sizeof (prstat));
555 prstat.pr_pid = pid;
556 prstat.pr_cursig = cursig;
557 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
558 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
559 &prstat, sizeof (prstat));
560 }
561 }
562 /* NOTREACHED */
563 }
564 # if GCC_VERSION >= 8000
565 # pragma GCC diagnostic pop
566 # endif
567 #endif
568
569 /* Functions for the x86-64 ELF linker. */
571
572 /* The size in bytes of an entry in the global offset table. */
573
574 #define GOT_ENTRY_SIZE 8
575
576 /* The size in bytes of an entry in the lazy procedure linkage table. */
577
578 #define LAZY_PLT_ENTRY_SIZE 16
579
580 /* The size in bytes of an entry in the non-lazy procedure linkage
581 table. */
582
583 #define NON_LAZY_PLT_ENTRY_SIZE 8
584
585 /* The first entry in a lazy procedure linkage table looks like this.
586 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
587 works. */
588
589 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
590 {
591 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
592 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
593 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
594 };
595
596 /* Subsequent entries in a lazy procedure linkage table look like this. */
597
598 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
599 {
600 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
601 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
602 0x68, /* pushq immediate */
603 0, 0, 0, 0, /* replaced with index into relocation table. */
604 0xe9, /* jmp relative */
605 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
606 };
607
608 /* The first entry in a lazy procedure linkage table with BND prefix
609 like this. */
610
611 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
612 {
613 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
614 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
615 0x0f, 0x1f, 0 /* nopl (%rax) */
616 };
617
618 /* Subsequent entries for branches with BND prefx in a lazy procedure
619 linkage table look like this. */
620
621 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
622 {
623 0x68, 0, 0, 0, 0, /* pushq immediate */
624 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
625 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
626 };
627
628 /* The first entry in the IBT-enabled lazy procedure linkage table is the
629 the same as the lazy PLT with BND prefix so that bound registers are
630 preserved when control is passed to dynamic linker. Subsequent
631 entries for a IBT-enabled lazy procedure linkage table look like
632 this. */
633
634 static const bfd_byte elf_x86_64_lazy_bnd_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
635 {
636 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
637 0x68, 0, 0, 0, 0, /* pushq immediate */
638 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
639 0x90 /* nop */
640 };
641
642 /* The first entry in the IBT-enabled lazy procedure linkage table
643 is the same as the normal lazy PLT. Subsequent entries for an
644 IBT-enabled lazy procedure linkage table look like this. */
645
646 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
647 {
648 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
649 0x68, 0, 0, 0, 0, /* pushq immediate */
650 0xe9, 0, 0, 0, 0, /* jmpq relative */
651 0x66, 0x90 /* xchg %ax,%ax */
652 };
653
654 /* Entries in the non-lazey procedure linkage table look like this. */
655
656 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
657 {
658 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
659 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
660 0x66, 0x90 /* xchg %ax,%ax */
661 };
662
663 /* Entries for branches with BND prefix in the non-lazey procedure
664 linkage table look like this. */
665
666 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
667 {
668 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
669 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
670 0x90 /* nop */
671 };
672
673 /* Entries for IBT-enabled branches with BND prefix in the non-lazey
674 procedure linkage table look like this. They have the same size as
675 the lazy PLT entry. */
676
677 static const bfd_byte elf_x86_64_non_lazy_bnd_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
678 {
679 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
680 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
681 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
682 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
683 };
684
685 /* Entries for branches with IBT-enabled in the non-lazey procedure
686 linkage table look like this. They have the same size as the lazy
687 PLT entry. */
688
689 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
690 {
691 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
692 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
693 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
694 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
695 };
696
697 /* The TLSDESC entry in a lazy procedure linkage table. */
698 static const bfd_byte elf_x86_64_tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] =
699 {
700 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
701 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
702 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */
703 };
704
705 /* .eh_frame covering the lazy .plt section. */
706
707 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
708 {
709 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
710 0, 0, 0, 0, /* CIE ID */
711 1, /* CIE version */
712 'z', 'R', 0, /* Augmentation string */
713 1, /* Code alignment factor */
714 0x78, /* Data alignment factor */
715 16, /* Return address column */
716 1, /* Augmentation size */
717 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
718 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
719 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
720 DW_CFA_nop, DW_CFA_nop,
721
722 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
723 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
724 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
725 0, 0, 0, 0, /* .plt size goes here */
726 0, /* Augmentation size */
727 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
728 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
729 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
730 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
731 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
732 11, /* Block length */
733 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
734 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
735 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
736 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
737 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
738 };
739
740 /* .eh_frame covering the lazy BND .plt section. */
741
742 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
743 {
744 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
745 0, 0, 0, 0, /* CIE ID */
746 1, /* CIE version */
747 'z', 'R', 0, /* Augmentation string */
748 1, /* Code alignment factor */
749 0x78, /* Data alignment factor */
750 16, /* Return address column */
751 1, /* Augmentation size */
752 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
753 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
754 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
755 DW_CFA_nop, DW_CFA_nop,
756
757 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
758 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
759 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
760 0, 0, 0, 0, /* .plt size goes here */
761 0, /* Augmentation size */
762 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
763 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
764 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
765 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
766 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
767 11, /* Block length */
768 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
769 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
770 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
771 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
772 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
773 };
774
775 /* .eh_frame covering the lazy .plt section with IBT-enabled and BND
776 prefix. */
777
778 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_ibt_plt[] =
779 {
780 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
781 0, 0, 0, 0, /* CIE ID */
782 1, /* CIE version */
783 'z', 'R', 0, /* Augmentation string */
784 1, /* Code alignment factor */
785 0x78, /* Data alignment factor */
786 16, /* Return address column */
787 1, /* Augmentation size */
788 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
789 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
790 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
791 DW_CFA_nop, DW_CFA_nop,
792
793 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
794 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
795 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
796 0, 0, 0, 0, /* .plt size goes here */
797 0, /* Augmentation size */
798 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
799 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
800 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
801 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
802 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
803 11, /* Block length */
804 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
805 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
806 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
807 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
808 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
809 };
810
811 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
812
813 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
814 {
815 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
816 0, 0, 0, 0, /* CIE ID */
817 1, /* CIE version */
818 'z', 'R', 0, /* Augmentation string */
819 1, /* Code alignment factor */
820 0x78, /* Data alignment factor */
821 16, /* Return address column */
822 1, /* Augmentation size */
823 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
824 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
825 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
826 DW_CFA_nop, DW_CFA_nop,
827
828 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
829 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
830 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
831 0, 0, 0, 0, /* .plt size goes here */
832 0, /* Augmentation size */
833 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
834 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
835 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
836 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
837 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
838 11, /* Block length */
839 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
840 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
841 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
842 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
843 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
844 };
845
846 /* .eh_frame covering the non-lazy .plt section. */
847
848 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
849 {
850 #define PLT_GOT_FDE_LENGTH 20
851 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
852 0, 0, 0, 0, /* CIE ID */
853 1, /* CIE version */
854 'z', 'R', 0, /* Augmentation string */
855 1, /* Code alignment factor */
856 0x78, /* Data alignment factor */
857 16, /* Return address column */
858 1, /* Augmentation size */
859 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
860 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
861 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
862 DW_CFA_nop, DW_CFA_nop,
863
864 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
865 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
866 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
867 0, 0, 0, 0, /* non-lazy .plt size goes here */
868 0, /* Augmentation size */
869 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
870 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
871 };
872
873 /* .sframe FRE covering the .plt section entry. */
874 static const sframe_frame_row_entry elf_x86_64_sframe_plt0_fre1 =
875 {
876 0, /* SFrame FRE start address. */
877 {16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
878 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
879 };
880
881 /* .sframe FRE covering the .plt section entry. */
882 static const sframe_frame_row_entry elf_x86_64_sframe_plt0_fre2 =
883 {
884 6, /* SFrame FRE start address. */
885 {24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
886 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
887 };
888
889 /* .sframe FRE covering the .plt section entry. */
890 static const sframe_frame_row_entry elf_x86_64_sframe_pltn_fre1 =
891 {
892 0, /* SFrame FRE start address. */
893 {8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
894 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
895 };
896
897 /* .sframe FRE covering the .plt section entry. */
898 static const sframe_frame_row_entry elf_x86_64_sframe_pltn_fre2 =
899 {
900 11, /* SFrame FRE start address. */
901 {16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
902 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
903 };
904
905 /* .sframe FRE covering the .plt section entry for IBT. */
906 static const sframe_frame_row_entry elf_x86_64_sframe_ibt_pltn_fre2 =
907 {
908 9, /* SFrame FRE start address. */
909 {16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
910 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
911 };
912
913 /* .sframe FRE covering the second .plt section entry. */
914 static const sframe_frame_row_entry elf_x86_64_sframe_sec_pltn_fre1 =
915 {
916 0, /* SFrame FRE start address. */
917 {8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
918 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
919 };
920
921 /* .sframe FRE covering the .plt.got section entry. */
922 static const sframe_frame_row_entry elf_x86_64_sframe_pltgot_fre1 =
923 {
924 0, /* SFrame FRE start address. */
925 {16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
926 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
927 };
928
929 /* SFrame helper object for non-lazy PLT. */
930 static const struct elf_x86_sframe_plt elf_x86_64_sframe_non_lazy_plt =
931 {
932 LAZY_PLT_ENTRY_SIZE,
933 2, /* Number of FREs for PLT0. */
934 /* Array of SFrame FREs for plt0. */
935 { &elf_x86_64_sframe_plt0_fre1, &elf_x86_64_sframe_plt0_fre2 },
936 LAZY_PLT_ENTRY_SIZE,
937 1, /* Number of FREs for PLTn. */
938 /* Array of SFrame FREs for plt. */
939 { &elf_x86_64_sframe_sec_pltn_fre1 },
940 0,
941 0, /* There is no second PLT necessary. */
942 { },
943 NON_LAZY_PLT_ENTRY_SIZE,
944 1, /* Number of FREs for PLT GOT. */
945 /* Array of SFrame FREs for PLT GOT. */
946 { &elf_x86_64_sframe_pltgot_fre1 },
947 };
948
949 /* SFrame helper object for non-lazy IBT enabled PLT. */
950 static const struct elf_x86_sframe_plt elf_x86_64_sframe_non_lazy_ibt_plt =
951 {
952 LAZY_PLT_ENTRY_SIZE,
953 2, /* Number of FREs for PLT0. */
954 /* Array of SFrame FREs for plt0. */
955 { &elf_x86_64_sframe_plt0_fre1, &elf_x86_64_sframe_plt0_fre2 },
956 LAZY_PLT_ENTRY_SIZE,
957 1, /* Number of FREs for PLTn. */
958 /* Array of SFrame FREs for plt. */
959 { &elf_x86_64_sframe_sec_pltn_fre1 },
960 0,
961 0, /* There is no second PLT necessary. */
962 { },
963 LAZY_PLT_ENTRY_SIZE,
964 1, /* Number of FREs for PLT GOT. */
965 /* Array of SFrame FREs for PLT GOT. */
966 { &elf_x86_64_sframe_pltgot_fre1 },
967 };
968
969 /* SFrame helper object for lazy PLT. */
970 static const struct elf_x86_sframe_plt elf_x86_64_sframe_plt =
971 {
972 LAZY_PLT_ENTRY_SIZE,
973 2, /* Number of FREs for PLT0. */
974 /* Array of SFrame FREs for plt0. */
975 { &elf_x86_64_sframe_plt0_fre1, &elf_x86_64_sframe_plt0_fre2 },
976 LAZY_PLT_ENTRY_SIZE,
977 2, /* Number of FREs for PLTn. */
978 /* Array of SFrame FREs for plt. */
979 { &elf_x86_64_sframe_pltn_fre1, &elf_x86_64_sframe_pltn_fre2 },
980 NON_LAZY_PLT_ENTRY_SIZE,
981 1, /* Number of FREs for second PLT. */
982 /* Array of SFrame FREs for second PLT. */
983 { &elf_x86_64_sframe_sec_pltn_fre1 },
984 NON_LAZY_PLT_ENTRY_SIZE,
985 1, /* Number of FREs for PLT GOT. */
986 /* Array of SFrame FREs for PLT GOT. */
987 { &elf_x86_64_sframe_pltgot_fre1 },
988 };
989
990 /* SFrame helper object for lazy PLT with IBT. */
991 static const struct elf_x86_sframe_plt elf_x86_64_sframe_ibt_plt =
992 {
993 LAZY_PLT_ENTRY_SIZE,
994 2, /* Number of FREs for PLT0. */
995 /* Array of SFrame FREs for plt0. */
996 { &elf_x86_64_sframe_plt0_fre1, &elf_x86_64_sframe_plt0_fre2 },
997 LAZY_PLT_ENTRY_SIZE,
998 2, /* Number of FREs for PLTn. */
999 /* Array of SFrame FREs for plt. */
1000 { &elf_x86_64_sframe_pltn_fre1, &elf_x86_64_sframe_ibt_pltn_fre2 },
1001 LAZY_PLT_ENTRY_SIZE,
1002 1, /* Number of FREs for second PLT. */
1003 /* Array of SFrame FREs for second plt. */
1004 { &elf_x86_64_sframe_sec_pltn_fre1 },
1005 LAZY_PLT_ENTRY_SIZE,
1006 1, /* Number of FREs for PLT GOT. */
1007 /* Array of SFrame FREs for PLT GOT. */
1008 { &elf_x86_64_sframe_pltgot_fre1 },
1009 };
1010
1011 /* These are the standard parameters. */
1012 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
1013 {
1014 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
1015 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
1016 elf_x86_64_lazy_plt_entry, /* plt_entry */
1017 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
1018 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
1019 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
1020 6, /* plt_tlsdesc_got1_offset */
1021 12, /* plt_tlsdesc_got2_offset */
1022 10, /* plt_tlsdesc_got1_insn_end */
1023 16, /* plt_tlsdesc_got2_insn_end */
1024 2, /* plt0_got1_offset */
1025 8, /* plt0_got2_offset */
1026 12, /* plt0_got2_insn_end */
1027 2, /* plt_got_offset */
1028 7, /* plt_reloc_offset */
1029 12, /* plt_plt_offset */
1030 6, /* plt_got_insn_size */
1031 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
1032 6, /* plt_lazy_offset */
1033 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
1034 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
1035 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
1036 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
1037 };
1038
1039 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
1040 {
1041 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
1042 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
1043 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
1044 2, /* plt_got_offset */
1045 6, /* plt_got_insn_size */
1046 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
1047 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
1048 };
1049
1050 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
1051 {
1052 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
1053 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
1054 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
1055 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
1056 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
1057 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
1058 6, /* plt_tlsdesc_got1_offset */
1059 12, /* plt_tlsdesc_got2_offset */
1060 10, /* plt_tlsdesc_got1_insn_end */
1061 16, /* plt_tlsdesc_got2_insn_end */
1062 2, /* plt0_got1_offset */
1063 1+8, /* plt0_got2_offset */
1064 1+12, /* plt0_got2_insn_end */
1065 1+2, /* plt_got_offset */
1066 1, /* plt_reloc_offset */
1067 7, /* plt_plt_offset */
1068 1+6, /* plt_got_insn_size */
1069 11, /* plt_plt_insn_end */
1070 0, /* plt_lazy_offset */
1071 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
1072 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
1073 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
1074 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
1075 };
1076
1077 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
1078 {
1079 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
1080 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
1081 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
1082 1+2, /* plt_got_offset */
1083 1+6, /* plt_got_insn_size */
1084 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
1085 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
1086 };
1087
1088 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_ibt_plt =
1089 {
1090 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
1091 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
1092 elf_x86_64_lazy_bnd_ibt_plt_entry, /* plt_entry */
1093 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
1094 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
1095 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
1096 6, /* plt_tlsdesc_got1_offset */
1097 12, /* plt_tlsdesc_got2_offset */
1098 10, /* plt_tlsdesc_got1_insn_end */
1099 16, /* plt_tlsdesc_got2_insn_end */
1100 2, /* plt0_got1_offset */
1101 1+8, /* plt0_got2_offset */
1102 1+12, /* plt0_got2_insn_end */
1103 4+1+2, /* plt_got_offset */
1104 4+1, /* plt_reloc_offset */
1105 4+1+6, /* plt_plt_offset */
1106 4+1+6, /* plt_got_insn_size */
1107 4+1+5+5, /* plt_plt_insn_end */
1108 0, /* plt_lazy_offset */
1109 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
1110 elf_x86_64_lazy_bnd_ibt_plt_entry, /* pic_plt_entry */
1111 elf_x86_64_eh_frame_lazy_bnd_ibt_plt, /* eh_frame_plt */
1112 sizeof (elf_x86_64_eh_frame_lazy_bnd_ibt_plt) /* eh_frame_plt_size */
1113 };
1114
1115 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
1116 {
1117 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
1118 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
1119 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
1120 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
1121 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
1122 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
1123 6, /* plt_tlsdesc_got1_offset */
1124 12, /* plt_tlsdesc_got2_offset */
1125 10, /* plt_tlsdesc_got1_insn_end */
1126 16, /* plt_tlsdesc_got2_insn_end */
1127 2, /* plt0_got1_offset */
1128 8, /* plt0_got2_offset */
1129 12, /* plt0_got2_insn_end */
1130 4+2, /* plt_got_offset */
1131 4+1, /* plt_reloc_offset */
1132 4+6, /* plt_plt_offset */
1133 4+6, /* plt_got_insn_size */
1134 4+5+5, /* plt_plt_insn_end */
1135 0, /* plt_lazy_offset */
1136 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
1137 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
1138 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
1139 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
1140 };
1141
1142 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_ibt_plt =
1143 {
1144 elf_x86_64_non_lazy_bnd_ibt_plt_entry, /* plt_entry */
1145 elf_x86_64_non_lazy_bnd_ibt_plt_entry, /* pic_plt_entry */
1146 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
1147 4+1+2, /* plt_got_offset */
1148 4+1+6, /* plt_got_insn_size */
1149 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
1150 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
1151 };
1152
1153 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
1154 {
1155 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
1156 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
1157 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
1158 4+2, /* plt_got_offset */
1159 4+6, /* plt_got_insn_size */
1160 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
1161 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
1162 };
1163
1164 static bool
1165 elf64_x86_64_elf_object_p (bfd *abfd)
1166 {
1167 /* Set the right machine number for an x86-64 elf64 file. */
1168 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1169 return true;
1170 }
1171
1172 static bool
1173 elf32_x86_64_elf_object_p (bfd *abfd)
1174 {
1175 /* Set the right machine number for an x86-64 elf32 file. */
1176 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1177 return true;
1178 }
1179
1180 /* Return TRUE if the TLS access code sequence support transition
1181 from R_TYPE. */
1182
1183 static enum elf_x86_tls_error_type
1184 elf_x86_64_check_tls_transition (bfd *abfd,
1185 struct bfd_link_info *info,
1186 asection *sec,
1187 bfd_byte *contents,
1188 Elf_Internal_Shdr *symtab_hdr,
1189 struct elf_link_hash_entry **sym_hashes,
1190 unsigned int r_type,
1191 const Elf_Internal_Rela *rel,
1192 const Elf_Internal_Rela *relend)
1193 {
1194 unsigned int val;
1195 unsigned long r_symndx;
1196 bool largepic = false;
1197 struct elf_link_hash_entry *h;
1198 bfd_vma offset;
1199 struct elf_x86_link_hash_table *htab;
1200 bfd_byte *call;
1201 bool indirect_call;
1202
1203 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1204 offset = rel->r_offset;
1205 switch (r_type)
1206 {
1207 case R_X86_64_TLSGD:
1208 case R_X86_64_TLSLD:
1209 if ((rel + 1) >= relend)
1210 return elf_x86_tls_error_yes;
1211
1212 if (r_type == R_X86_64_TLSGD)
1213 {
1214 /* Check transition from GD access model. For 64bit, only
1215 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1216 .word 0x6666; rex64; call __tls_get_addr@PLT
1217 or
1218 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1219 .byte 0x66; rex64
1220 call *__tls_get_addr@GOTPCREL(%rip)
1221 which may be converted to
1222 addr32 call __tls_get_addr
1223 can transit to different access model. For 32bit, only
1224 leaq foo@tlsgd(%rip), %rdi
1225 .word 0x6666; rex64; call __tls_get_addr@PLT
1226 or
1227 leaq foo@tlsgd(%rip), %rdi
1228 .byte 0x66; rex64
1229 call *__tls_get_addr@GOTPCREL(%rip)
1230 which may be converted to
1231 addr32 call __tls_get_addr
1232 can transit to different access model. For largepic,
1233 we also support:
1234 leaq foo@tlsgd(%rip), %rdi
1235 movabsq $__tls_get_addr@pltoff, %rax
1236 addq $r15, %rax
1237 call *%rax
1238 or
1239 leaq foo@tlsgd(%rip), %rdi
1240 movabsq $__tls_get_addr@pltoff, %rax
1241 addq $rbx, %rax
1242 call *%rax */
1243
1244 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1245
1246 if ((offset + 12) > sec->size)
1247 return elf_x86_tls_error_yes;
1248
1249 call = contents + offset + 4;
1250 if (call[0] != 0x66
1251 || !((call[1] == 0x48
1252 && call[2] == 0xff
1253 && call[3] == 0x15)
1254 || (call[1] == 0x48
1255 && call[2] == 0x67
1256 && call[3] == 0xe8)
1257 || (call[1] == 0x66
1258 && call[2] == 0x48
1259 && call[3] == 0xe8)))
1260 {
1261 if (!ABI_64_P (abfd)
1262 || (offset + 19) > sec->size
1263 || offset < 3
1264 || memcmp (call - 7, leaq + 1, 3) != 0
1265 || memcmp (call, "\x48\xb8", 2) != 0
1266 || call[11] != 0x01
1267 || call[13] != 0xff
1268 || call[14] != 0xd0
1269 || !((call[10] == 0x48 && call[12] == 0xd8)
1270 || (call[10] == 0x4c && call[12] == 0xf8)))
1271 return elf_x86_tls_error_yes;
1272 largepic = true;
1273 }
1274 else if (ABI_64_P (abfd))
1275 {
1276 if (offset < 4
1277 || memcmp (contents + offset - 4, leaq, 4) != 0)
1278 return elf_x86_tls_error_yes;
1279 }
1280 else
1281 {
1282 if (offset < 3
1283 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1284 return elf_x86_tls_error_yes;
1285 }
1286 indirect_call = call[2] == 0xff;
1287 }
1288 else
1289 {
1290 /* Check transition from LD access model. Only
1291 leaq foo@tlsld(%rip), %rdi;
1292 call __tls_get_addr@PLT
1293 or
1294 leaq foo@tlsld(%rip), %rdi;
1295 call *__tls_get_addr@GOTPCREL(%rip)
1296 which may be converted to
1297 addr32 call __tls_get_addr
1298 can transit to different access model. For largepic
1299 we also support:
1300 leaq foo@tlsld(%rip), %rdi
1301 movabsq $__tls_get_addr@pltoff, %rax
1302 addq $r15, %rax
1303 call *%rax
1304 or
1305 leaq foo@tlsld(%rip), %rdi
1306 movabsq $__tls_get_addr@pltoff, %rax
1307 addq $rbx, %rax
1308 call *%rax */
1309
1310 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1311
1312 if (offset < 3 || (offset + 9) > sec->size)
1313 return elf_x86_tls_error_yes;
1314
1315 if (memcmp (contents + offset - 3, lea, 3) != 0)
1316 return elf_x86_tls_error_yes;
1317
1318 call = contents + offset + 4;
1319 if (!(call[0] == 0xe8
1320 || (call[0] == 0xff && call[1] == 0x15)
1321 || (call[0] == 0x67 && call[1] == 0xe8)))
1322 {
1323 if (!ABI_64_P (abfd)
1324 || (offset + 19) > sec->size
1325 || memcmp (call, "\x48\xb8", 2) != 0
1326 || call[11] != 0x01
1327 || call[13] != 0xff
1328 || call[14] != 0xd0
1329 || !((call[10] == 0x48 && call[12] == 0xd8)
1330 || (call[10] == 0x4c && call[12] == 0xf8)))
1331 return elf_x86_tls_error_yes;
1332 largepic = true;
1333 }
1334 indirect_call = call[0] == 0xff;
1335 }
1336
1337 r_symndx = htab->r_sym (rel[1].r_info);
1338 if (r_symndx < symtab_hdr->sh_info)
1339 return elf_x86_tls_error_yes;
1340
1341 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1342 if (h == NULL
1343 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1344 return elf_x86_tls_error_yes;
1345 else
1346 {
1347 r_type = (ELF32_R_TYPE (rel[1].r_info)
1348 & ~R_X86_64_converted_reloc_bit);
1349 if (largepic)
1350 return (r_type == R_X86_64_PLTOFF64
1351 ? elf_x86_tls_error_none
1352 : elf_x86_tls_error_yes);
1353 else if (indirect_call)
1354 return ((r_type == R_X86_64_GOTPCRELX
1355 || r_type == R_X86_64_GOTPCREL)
1356 ? elf_x86_tls_error_none
1357 : elf_x86_tls_error_yes);
1358 else
1359 return ((r_type == R_X86_64_PC32
1360 || r_type == R_X86_64_PLT32)
1361 ? elf_x86_tls_error_none
1362 : elf_x86_tls_error_yes);
1363 }
1364
1365 case R_X86_64_CODE_4_GOTTPOFF:
1366 /* Check transition from IE access model:
1367 mov foo@gottpoff(%rip), %reg
1368 add foo@gottpoff(%rip), %reg
1369 where reg is one of r16 to r31.
1370 For x32 also:
1371 movrs foo@gottpoff(%rip), %reg
1372 where no REX prefix is present. */
1373
1374 if (offset < 4
1375 || (offset + 4) > sec->size)
1376 return elf_x86_tls_error_yes;
1377
1378 if (!ABI_64_P (abfd)
1379 && contents[offset - 4] == 0x0f
1380 && contents[offset - 3] == 0x38
1381 && contents[offset - 2] == 0x8b)
1382 goto check_gottpoff_modrm;
1383
1384 if (contents[offset - 4] != 0xd5)
1385 return elf_x86_tls_error_yes;
1386
1387 goto check_gottpoff;
1388
1389 case R_X86_64_CODE_5_GOTTPOFF:
1390 /* Check transition from IE access model:
1391 movrs foo@gottpoff(%rip), %reg
1392 where reg isn't one of r16 to r31. */
1393 if (offset < 5
1394 || (offset + 4) > sec->size
1395 || (contents[offset - 5] | (ABI_64_P (abfd) ? 7 : 0xf)) != 0x4f
1396 || contents[offset - 4] != 0x0f
1397 || contents[offset - 3] != 0x38
1398 || contents[offset - 2] != 0x8b)
1399 return elf_x86_tls_error_yes;
1400
1401 goto check_gottpoff_modrm;
1402
1403 case R_X86_64_CODE_6_GOTTPOFF:
1404 /* Check transition from IE access model:
1405 add %reg1, foo@gottpoff(%rip), %reg2
1406 movrs foo@gottpoff(%rip), %reg
1407 where reg1/reg2/reg are one of r16 to r31. */
1408
1409 if (offset < 6
1410 || (offset + 4) > sec->size
1411 || contents[offset - 6] != 0x62)
1412 return elf_x86_tls_error_yes;
1413
1414 val = bfd_get_8 (abfd, contents + offset - 2);
1415 if (val != 0x01 && val != 0x03 && val != 0x8b)
1416 return elf_x86_tls_error_add_movrs;
1417
1418 goto check_gottpoff_modrm;
1419
1420 case R_X86_64_GOTTPOFF:
1421 /* Check transition from IE access model:
1422 mov foo@gottpoff(%rip), %reg
1423 add foo@gottpoff(%rip), %reg
1424 */
1425
1426 /* Check REX prefix first. */
1427 if (offset >= 3 && (offset + 4) <= sec->size)
1428 {
1429 val = bfd_get_8 (abfd, contents + offset - 3);
1430 if (val != 0x48 && val != 0x4c)
1431 {
1432 /* X32 may have 0x44 REX prefix or no REX prefix. */
1433 if (ABI_64_P (abfd))
1434 return elf_x86_tls_error_yes;
1435 }
1436 }
1437 else
1438 {
1439 /* X32 may not have any REX prefix. */
1440 if (ABI_64_P (abfd))
1441 return elf_x86_tls_error_yes;
1442 if (offset < 2 || (offset + 3) > sec->size)
1443 return elf_x86_tls_error_yes;
1444 }
1445
1446 check_gottpoff:
1447 val = bfd_get_8 (abfd, contents + offset - 2);
1448 if (val != 0x8b && val != 0x03)
1449 return elf_x86_tls_error_add_mov;
1450
1451 check_gottpoff_modrm:
1452 val = bfd_get_8 (abfd, contents + offset - 1);
1453 return ((val & 0xc7) == 5
1454 ? elf_x86_tls_error_none
1455 : elf_x86_tls_error_yes);
1456
1457 case R_X86_64_CODE_4_GOTPC32_TLSDESC:
1458 /* Check transition from GDesc access model:
1459 lea x@tlsdesc(%rip), %reg
1460 where reg is one of r16 to r31. */
1461
1462 if (offset < 4
1463 || (offset + 4) > sec->size
1464 || contents[offset - 4] != 0xd5)
1465 return elf_x86_tls_error_yes;
1466
1467 goto check_tlsdesc;
1468
1469 case R_X86_64_GOTPC32_TLSDESC:
1470 /* Check transition from GDesc access model:
1471 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
1472 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
1473
1474 Make sure it's a leaq adding rip to a 32-bit offset
1475 into any register, although it's probably almost always
1476 going to be rax. */
1477
1478 if (offset < 3 || (offset + 4) > sec->size)
1479 return elf_x86_tls_error_yes;
1480
1481 val = bfd_get_8 (abfd, contents + offset - 3);
1482 val &= 0xfb;
1483 if (val != 0x48 && (ABI_64_P (abfd) || val != 0x40))
1484 return elf_x86_tls_error_yes;
1485
1486 check_tlsdesc:
1487 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1488 return elf_x86_tls_error_lea;
1489
1490 val = bfd_get_8 (abfd, contents + offset - 1);
1491 return ((val & 0xc7) == 0x05
1492 ? elf_x86_tls_error_none
1493 : elf_x86_tls_error_yes);
1494
1495 case R_X86_64_TLSDESC_CALL:
1496 /* It has been checked in elf_x86_64_tls_transition. */
1497 return elf_x86_tls_error_none;
1498
1499 default:
1500 abort ();
1501 }
1502 }
1503
1504 /* Return TRUE if the TLS access transition is OK or no transition
1505 will be performed. Update R_TYPE if there is a transition. */
1506
1507 static bool
1508 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1509 asection *sec, bfd_byte *contents,
1510 Elf_Internal_Shdr *symtab_hdr,
1511 struct elf_link_hash_entry **sym_hashes,
1512 unsigned int *r_type, int tls_type,
1513 const Elf_Internal_Rela *rel,
1514 const Elf_Internal_Rela *relend,
1515 struct elf_link_hash_entry *h,
1516 Elf_Internal_Sym *sym,
1517 bool from_relocate_section)
1518 {
1519 unsigned int from_type = *r_type;
1520 unsigned int to_type = from_type;
1521 bool check = true;
1522 bfd_vma offset;
1523 bfd_byte *call;
1524
1525 /* Skip TLS transition for functions. */
1526 if (h != NULL
1527 && (h->type == STT_FUNC
1528 || h->type == STT_GNU_IFUNC))
1529 return true;
1530
1531 switch (from_type)
1532 {
1533 case R_X86_64_TLSDESC_CALL:
1534 /* Check valid GDesc call:
1535 call *x@tlscall(%rax) <--- LP64 mode.
1536 call *x@tlscall(%eax) <--- X32 mode.
1537 */
1538 offset = rel->r_offset;
1539 call = NULL;
1540 if (offset + 2 <= sec->size)
1541 {
1542 unsigned int prefix;
1543 call = contents + offset;
1544 prefix = 0;
1545 if (!ABI_64_P (abfd))
1546 {
1547 /* Check for call *x@tlscall(%eax). */
1548 if (call[0] == 0x67)
1549 {
1550 prefix = 1;
1551 if (offset + 3 > sec->size)
1552 call = NULL;
1553 }
1554 }
1555
1556 /* Make sure that it's a call *x@tlscall(%rax). */
1557 if (call != NULL
1558 && (call[prefix] != 0xff || call[1 + prefix] != 0x10))
1559 call = NULL;
1560 }
1561
1562 if (call == NULL)
1563 {
1564 _bfd_x86_elf_link_report_tls_transition_error
1565 (info, abfd, sec, symtab_hdr, h, sym, rel,
1566 "R_X86_64_TLSDESC_CALL", NULL,
1567 elf_x86_tls_error_indirect_call);
1568 return false;
1569 }
1570
1571 /* Fall through. */
1572
1573 case R_X86_64_TLSGD:
1574 case R_X86_64_GOTPC32_TLSDESC:
1575 case R_X86_64_CODE_4_GOTPC32_TLSDESC:
1576 case R_X86_64_GOTTPOFF:
1577 case R_X86_64_CODE_4_GOTTPOFF:
1578 case R_X86_64_CODE_5_GOTTPOFF:
1579 case R_X86_64_CODE_6_GOTTPOFF:
1580 if (bfd_link_executable (info))
1581 {
1582 if (h == NULL)
1583 to_type = R_X86_64_TPOFF32;
1584 else
1585 to_type = R_X86_64_GOTTPOFF;
1586 }
1587
1588 /* When we are called from elf_x86_64_relocate_section, there may
1589 be additional transitions based on TLS_TYPE. */
1590 if (from_relocate_section)
1591 {
1592 unsigned int new_to_type = to_type;
1593
1594 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1595 new_to_type = R_X86_64_TPOFF32;
1596
1597 if (to_type == R_X86_64_TLSGD
1598 || to_type == R_X86_64_GOTPC32_TLSDESC
1599 || to_type == R_X86_64_CODE_4_GOTPC32_TLSDESC
1600 || to_type == R_X86_64_TLSDESC_CALL)
1601 {
1602 if (tls_type == GOT_TLS_IE)
1603 new_to_type = R_X86_64_GOTTPOFF;
1604 }
1605
1606 /* We checked the transition before when we were called from
1607 elf_x86_64_scan_relocs. We only want to check the new
1608 transition which hasn't been checked before. */
1609 check = (new_to_type != to_type
1610 && (from_type == to_type
1611 || (from_type == R_X86_64_CODE_4_GOTTPOFF
1612 && to_type == R_X86_64_GOTTPOFF)
1613 || (from_type == R_X86_64_CODE_5_GOTTPOFF
1614 && to_type == R_X86_64_GOTTPOFF)
1615 || (from_type == R_X86_64_CODE_6_GOTTPOFF
1616 && to_type == R_X86_64_GOTTPOFF)));
1617 to_type = new_to_type;
1618 }
1619
1620 break;
1621
1622 case R_X86_64_TLSLD:
1623 if (bfd_link_executable (info))
1624 to_type = R_X86_64_TPOFF32;
1625 break;
1626
1627 default:
1628 return true;
1629 }
1630
1631 /* Return TRUE if there is no transition. */
1632 if (from_type == to_type
1633 || (from_type == R_X86_64_CODE_4_GOTTPOFF
1634 && to_type == R_X86_64_GOTTPOFF)
1635 || (from_type == R_X86_64_CODE_5_GOTTPOFF
1636 && to_type == R_X86_64_GOTTPOFF)
1637 || (from_type == R_X86_64_CODE_6_GOTTPOFF
1638 && to_type == R_X86_64_GOTTPOFF))
1639 return true;
1640
1641 /* Check if the transition can be performed. */
1642 enum elf_x86_tls_error_type tls_error;
1643 if (check
1644 && ((tls_error = elf_x86_64_check_tls_transition (abfd, info, sec,
1645 contents,
1646 symtab_hdr,
1647 sym_hashes,
1648 from_type, rel,
1649 relend))
1650 != elf_x86_tls_error_none))
1651
1652 {
1653 reloc_howto_type *from, *to;
1654
1655 from = &x86_64_elf_howto_table[from_type];
1656 to = &x86_64_elf_howto_table[to_type];
1657
1658 if (from == NULL || to == NULL)
1659 return false;
1660
1661 _bfd_x86_elf_link_report_tls_transition_error
1662 (info, abfd, sec, symtab_hdr, h, sym, rel, from->name,
1663 to->name, tls_error);
1664
1665 return false;
1666 }
1667
1668 *r_type = to_type;
1669 return true;
1670 }
1671
1672 static bool
1673 elf_x86_64_need_pic (struct bfd_link_info *info,
1674 bfd *input_bfd, asection *sec,
1675 struct elf_link_hash_entry *h,
1676 Elf_Internal_Shdr *symtab_hdr,
1677 Elf_Internal_Sym *isym,
1678 reloc_howto_type *howto)
1679 {
1680 const char *v = "";
1681 const char *und = "";
1682 const char *pic = "";
1683 const char *object;
1684
1685 const char *name;
1686 if (h)
1687 {
1688 name = h->root.root.string;
1689 switch (ELF_ST_VISIBILITY (h->other))
1690 {
1691 case STV_HIDDEN:
1692 v = _("hidden symbol ");
1693 break;
1694 case STV_INTERNAL:
1695 v = _("internal symbol ");
1696 break;
1697 case STV_PROTECTED:
1698 v = _("protected symbol ");
1699 break;
1700 default:
1701 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1702 v = _("protected symbol ");
1703 else
1704 v = _("symbol ");
1705 pic = NULL;
1706 break;
1707 }
1708
1709 if (!SYMBOL_DEFINED_NON_SHARED_P (h) && !h->def_dynamic)
1710 und = _("undefined ");
1711 }
1712 else
1713 {
1714 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1715 pic = NULL;
1716 }
1717
1718 if (bfd_link_dll (info))
1719 {
1720 object = _("a shared object");
1721 if (!pic)
1722 pic = _("; recompile with -fPIC");
1723 }
1724 else
1725 {
1726 if (bfd_link_pie (info))
1727 object = _("a PIE object");
1728 else
1729 object = _("a PDE object");
1730 if (!pic)
1731 pic = _("; recompile with -fPIE");
1732 }
1733
1734 /* xgettext:c-format */
1735 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
1736 "not be used when making %s%s"),
1737 input_bfd, howto->name, und, v, name,
1738 object, pic);
1739 bfd_set_error (bfd_error_bad_value);
1740 sec->check_relocs_failed = 1;
1741 return false;
1742 }
1743
1744 /* Move the R bits to the B bits in EVEX payload byte 1. */
1745 static unsigned int evex_move_r_to_b (unsigned int byte1, bool copy)
1746 {
1747 byte1 = (byte1 & ~(1 << 5)) | ((byte1 & (1 << 7)) >> 2); /* R3 -> B3 */
1748 byte1 = (byte1 & ~(1 << 3)) | ((~byte1 & (1 << 4)) >> 1); /* R4 -> B4 */
1749
1750 /* Set both R bits, as they're inverted. */
1751 if (!copy)
1752 byte1 |= (1 << 4) | (1 << 7);
1753
1754 return byte1;
1755 }
1756
1757 /* With the local symbol, foo, we convert
1758 mov foo@GOTPCREL(%rip), %reg
1759 movrs foo@GOTPCREL(%rip), %reg
1760 to
1761 lea foo(%rip), %reg
1762 and convert
1763 call/jmp *foo@GOTPCREL(%rip)
1764 to
1765 nop call foo/jmp foo nop
1766 When PIC is false, convert
1767 test %reg, foo@GOTPCREL(%rip)
1768 to
1769 test $foo, %reg
1770 and convert
1771 push foo@GOTPCREL(%rip)
1772 to
1773 push $foo
1774 and convert
1775 binop foo@GOTPCREL(%rip), %reg
1776 to
1777 binop $foo, %reg
1778 where binop is one of adc, add, and, cmp, imul, or, sbb, sub, xor
1779 instructions. */
1780
1781 static bool
1782 elf_x86_64_convert_load_reloc (bfd *abfd,
1783 asection *input_section,
1784 bfd_byte *contents,
1785 unsigned int *r_type_p,
1786 Elf_Internal_Rela *irel,
1787 struct elf_link_hash_entry *h,
1788 bool *converted,
1789 struct bfd_link_info *link_info)
1790 {
1791 struct elf_x86_link_hash_table *htab;
1792 bool is_pic;
1793 bool no_overflow;
1794 bool relocx;
1795 bool is_branch = false;
1796 bool to_reloc_pc32;
1797 bool abs_symbol;
1798 bool local_ref;
1799 asection *tsec = NULL;
1800 bfd_signed_vma raddend;
1801 unsigned int opcode;
1802 unsigned int modrm;
1803 unsigned char evex[3] = { 0, 0, 0 };
1804 unsigned int r_type = *r_type_p;
1805 unsigned int r_symndx;
1806 bfd_vma roff = irel->r_offset;
1807 bfd_vma abs_relocation;
1808 reloc_howto_type *howto;
1809 bfd_reloc_status_type r;
1810 Elf_Internal_Sym *isym;
1811 bfd_vma relocation;
1812
1813 switch (r_type)
1814 {
1815 default:
1816 if (roff < 2)
1817 return true;
1818 relocx = (r_type == R_X86_64_GOTPCRELX);
1819 break;
1820
1821 case R_X86_64_REX_GOTPCRELX:
1822 if (roff < 3)
1823 return true;
1824 relocx = true;
1825 break;
1826
1827 case R_X86_64_CODE_4_GOTPCRELX:
1828 if (roff < 4)
1829 return true;
1830
1831 /* Skip if this isn't a REX2 instruction, nor un-prefixed MOVRS. */
1832 opcode = bfd_get_8 (abfd, contents + roff - 4);
1833 if (opcode != 0xd5
1834 && (opcode != 0x0f
1835 || bfd_get_8 (abfd, contents + roff - 3) != 0x38
1836 || bfd_get_8 (abfd, contents + roff - 2) != 0x8b))
1837 return true;
1838
1839 relocx = true;
1840 break;
1841
1842 case R_X86_64_CODE_5_GOTPCRELX:
1843 if (roff < 5)
1844 return true;
1845
1846 /* Skip if this isn't REX-prefixed MOVRS. */
1847 if ((bfd_get_8 (abfd, contents + roff - 5) | 0xf) != 0x4f
1848 || bfd_get_8 (abfd, contents + roff - 4) != 0x0f
1849 || bfd_get_8 (abfd, contents + roff - 3) != 0x38
1850 || bfd_get_8 (abfd, contents + roff - 2) != 0x8b)
1851 return true;
1852
1853 relocx = true;
1854 break;
1855
1856 case R_X86_64_CODE_6_GOTPCRELX:
1857 if (roff < 6)
1858 return true;
1859
1860 /* Skip if this isn't an EVEX instruction. */
1861 if (bfd_get_8 (abfd, contents + roff - 6) != 0x62)
1862 return true;
1863
1864 evex[0] = bfd_get_8 (abfd, contents + roff - 5);
1865 evex[1] = bfd_get_8 (abfd, contents + roff - 4);
1866 evex[2] = bfd_get_8 (abfd, contents + roff - 3);
1867
1868 /* Skip if this isn't a Map 4 NP instruction. */
1869 if ((evex[0] & 7) != 4
1870 || (evex[1] & 3) != 0
1871 || (evex[2] & 0xe0) != 0)
1872 return true;
1873
1874 relocx = true;
1875 break;
1876 }
1877
1878 raddend = irel->r_addend;
1879 /* Addend for 32-bit PC-relative relocation must be -4. */
1880 if (raddend != -4)
1881 return true;
1882
1883 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1884 is_pic = bfd_link_pic (link_info);
1885
1886 /* TRUE if --no-relax is used. */
1887 no_overflow = link_info->disable_target_specific_optimizations > 1;
1888
1889 r_symndx = htab->r_sym (irel->r_info);
1890
1891 opcode = bfd_get_8 (abfd, contents + roff - 2);
1892 modrm = bfd_get_8 (abfd, contents + roff - 1);
1893 if (opcode == 0xff)
1894 {
1895 switch (modrm & 0x38)
1896 {
1897 case 0x10: /* CALL */
1898 case 0x20: /* JMP */
1899 is_branch = true;
1900 break;
1901
1902 case 0x30: /* PUSH */
1903 break;
1904
1905 default:
1906 return true;
1907 }
1908 }
1909
1910 /* Convert mov to lea since it has been done for a while. */
1911 if (opcode != 0x8b)
1912 {
1913 /* Only convert R_X86_64_GOTPCRELX, R_X86_64_REX_GOTPCRELX
1914 and R_X86_64_CODE_<n>_GOTPCRELX for call, jmp or one of adc,
1915 add, and, cmp, or, sbb, sub, test, xor instructions. */
1916 if (!relocx)
1917 return true;
1918 }
1919
1920 /* We convert only to R_X86_64_PC32:
1921 1. Branch.
1922 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1923 3. no_overflow is true.
1924 4. PIC.
1925 */
1926 to_reloc_pc32 = (is_branch
1927 || !relocx
1928 || no_overflow
1929 || is_pic);
1930
1931 abs_symbol = false;
1932 abs_relocation = 0;
1933
1934 /* Get the symbol referred to by the reloc. */
1935 if (h == NULL)
1936 {
1937 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache, abfd,
1938 r_symndx);
1939
1940 /* Skip relocation against undefined symbols. */
1941 if (isym->st_shndx == SHN_UNDEF)
1942 return true;
1943
1944 local_ref = true;
1945 if (isym->st_shndx == SHN_ABS)
1946 {
1947 tsec = bfd_abs_section_ptr;
1948 abs_symbol = true;
1949 abs_relocation = isym->st_value;
1950 }
1951 else if (isym->st_shndx == SHN_COMMON)
1952 tsec = bfd_com_section_ptr;
1953 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1954 tsec = &_bfd_elf_large_com_section;
1955 else
1956 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1957 }
1958 else
1959 {
1960 /* Undefined weak symbol is only bound locally in executable
1961 and its reference is resolved as 0 without relocation
1962 overflow. We can only perform this optimization for
1963 GOTPCRELX relocations since we need to modify REX byte.
1964 It is OK convert mov with R_X86_64_GOTPCREL to
1965 R_X86_64_PC32. */
1966 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1967
1968 isym = NULL;
1969 tsec = NULL;
1970
1971 abs_symbol = ABS_SYMBOL_P (h);
1972 abs_relocation = h->root.u.def.value;
1973
1974 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1975 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1976 if ((relocx || opcode == 0x8b)
1977 && (h->root.type == bfd_link_hash_undefweak
1978 && !eh->linker_def
1979 && local_ref))
1980 {
1981 if (is_branch)
1982 {
1983 /* Skip for branch instructions since R_X86_64_PC32
1984 may overflow. */
1985 if (no_overflow)
1986 return true;
1987 }
1988 else if (relocx)
1989 {
1990 /* For non-branch instructions, we can convert to
1991 R_X86_64_32/R_X86_64_32S since we know if there
1992 is a REX byte. */
1993 to_reloc_pc32 = false;
1994 }
1995
1996 /* Since we don't know the current PC when PIC is true,
1997 we can't convert to R_X86_64_PC32. */
1998 if (to_reloc_pc32 && is_pic)
1999 return true;
2000
2001 goto convert;
2002 }
2003 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
2004 ld.so may use its link-time address. */
2005 else if (h->start_stop
2006 || eh->linker_def
2007 || ((h->def_regular
2008 || h->root.type == bfd_link_hash_defined
2009 || h->root.type == bfd_link_hash_defweak)
2010 && h != htab->elf.hdynamic
2011 && local_ref))
2012 {
2013 /* bfd_link_hash_new or bfd_link_hash_undefined is
2014 set by an assignment in a linker script in
2015 bfd_elf_record_link_assignment. start_stop is set
2016 on __start_SECNAME/__stop_SECNAME which mark section
2017 SECNAME. */
2018 if (h->start_stop
2019 || eh->linker_def
2020 || (h->def_regular
2021 && (h->root.type == bfd_link_hash_new
2022 || h->root.type == bfd_link_hash_undefined
2023 || ((h->root.type == bfd_link_hash_defined
2024 || h->root.type == bfd_link_hash_defweak)
2025 && h->root.u.def.section == bfd_und_section_ptr))))
2026 {
2027 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
2028 if (no_overflow)
2029 return true;
2030 if (h->start_stop)
2031 tsec = h->root.u.def.section;
2032 else if (h == htab->elf.hehdr_start)
2033 {
2034 /* Use the lowest-addressed section to estimate the
2035 __ehdr_start symbol value. */
2036 asection *sec;
2037 tsec = NULL;
2038 for (sec = link_info->output_bfd->sections;
2039 sec != NULL;
2040 sec = sec->next)
2041 if ((sec->flags & SEC_LOAD) != 0
2042 && (tsec == NULL || tsec->vma > sec->vma))
2043 tsec = sec;
2044
2045 }
2046 goto convert;
2047 }
2048 tsec = h->root.u.def.section;
2049 }
2050 else
2051 return true;
2052 }
2053
2054 if (tsec == NULL)
2055 return false;
2056
2057 /* Don't convert GOTPCREL relocation against large section. */
2058 if (elf_section_data (tsec) != NULL
2059 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
2060 return true;
2061
2062 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
2063 if (no_overflow)
2064 return true;
2065
2066 convert:
2067 /* Compute relocation value so that it can be used later to check for
2068 overflow against the converted relocation. */
2069 if (h == NULL)
2070 {
2071 /* Make a copy of IREL so that _bfd_elf_rela_local_sym won't
2072 change IREL. */
2073 Elf_Internal_Rela rel = *irel;
2074 relocation = _bfd_elf_rela_local_sym (link_info->output_bfd, isym,
2075 &tsec, &rel);
2076 /* Use the updated r_addend. */
2077 raddend = rel.r_addend;
2078 }
2079 else if (tsec != NULL)
2080 relocation = (h->root.u.def.value
2081 + tsec->output_section->vma
2082 + tsec->output_offset);
2083 else
2084 relocation = 0;
2085
2086 if (is_branch)
2087 {
2088 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
2089 unsigned int nop;
2090 unsigned int disp;
2091 bfd_vma nop_offset;
2092
2093 r_type = R_X86_64_PC32;
2094
2095 /* Skip if the converted relocation will overflow. */
2096 howto = &x86_64_elf_howto_table[r_type];
2097 r = _bfd_final_link_relocate (howto, abfd, input_section,
2098 contents, irel->r_offset,
2099 relocation, raddend);
2100 if (r == bfd_reloc_overflow)
2101 return true;
2102
2103 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
2104 R_X86_64_PC32. */
2105 if (modrm == 0x25)
2106 {
2107 /* Convert to "jmp foo nop". */
2108 modrm = 0xe9;
2109 nop = NOP_OPCODE;
2110 nop_offset = irel->r_offset + 3;
2111 disp = bfd_get_32 (abfd, contents + irel->r_offset);
2112 irel->r_offset -= 1;
2113 bfd_put_32 (abfd, disp, contents + irel->r_offset);
2114 }
2115 else
2116 {
2117 struct elf_x86_link_hash_entry *eh
2118 = (struct elf_x86_link_hash_entry *) h;
2119
2120 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
2121 is a nop prefix. */
2122 modrm = 0xe8;
2123 /* To support TLS optimization, always use addr32 prefix for
2124 "call *__tls_get_addr@GOTPCREL(%rip)". */
2125 if (eh && eh->tls_get_addr)
2126 {
2127 nop = 0x67;
2128 nop_offset = irel->r_offset - 2;
2129 }
2130 else
2131 {
2132 nop = htab->params->call_nop_byte;
2133 if (htab->params->call_nop_as_suffix)
2134 {
2135 nop_offset = irel->r_offset + 3;
2136 disp = bfd_get_32 (abfd, contents + irel->r_offset);
2137 irel->r_offset -= 1;
2138 bfd_put_32 (abfd, disp, contents + irel->r_offset);
2139 }
2140 else
2141 nop_offset = irel->r_offset - 2;
2142 }
2143 }
2144 bfd_put_8 (abfd, nop, contents + nop_offset);
2145 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
2146 }
2147 else if (r_type == R_X86_64_CODE_6_GOTPCRELX && opcode != 0x8b)
2148 {
2149 bool move_v_r = false;
2150
2151 /* R_X86_64_PC32 isn't supported. */
2152 if (to_reloc_pc32)
2153 return true;
2154
2155 if (opcode == 0x85)
2156 {
2157 /* Convert "ctest<cc> %reg, foo@GOTPCREL(%rip)" to
2158 "ctest<cc> $foo, %reg". */
2159 modrm = 0xc0 | (modrm & 0x38) >> 3;
2160 opcode = 0xf7;
2161 }
2162 else if ((opcode | 0x3a) == 0x3b)
2163 {
2164 /* Don't convert (non-NDD) forms with memory destination. */
2165 if (!(evex[2] & 0x10) && (opcode | 0x38) != 0x3b)
2166 return true;
2167
2168 /* Don't convert non-commutative insns with the memory operand
2169 2nd. */
2170 if ((evex[2] & 0x10) && (opcode | 0x38) != 0x3b
2171 && (opcode == 0x19 /* SBB */
2172 || opcode == 0x29 /* SUB */))
2173 return true;
2174
2175 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
2176 "binop $foo, %reg", or alike for 3-operand forms. */
2177 modrm = 0xc0 | ((modrm & 0x38) >> 3) | (opcode & 0x38);
2178 opcode = 0x81;
2179 }
2180 else if (opcode == 0xaf)
2181 {
2182 if (!(evex[2] & 0x10))
2183 {
2184 /* Convert "imul foo@GOTPCREL(%rip), %reg" to
2185 "imul $foo, %reg, %reg". */
2186 modrm = 0xc0 | ((modrm & 0x38) >> 3) | (modrm & 0x38);
2187 }
2188 else
2189 {
2190 /* Convert "imul foo@GOTPCREL(%rip), %reg1, %reg2" to
2191 "imul $foo, %reg1, %reg2". */
2192 modrm = 0xc0 | ((modrm & 0x38) >> 3) | (~evex[1] & 0x38);
2193 move_v_r = true;
2194 }
2195 opcode = 0x69;
2196 }
2197 else
2198 return true;
2199
2200 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
2201 overflow when sign-extending imm32 to 64 bits. */
2202 r_type = evex[1] & 0x80 ? R_X86_64_32S : R_X86_64_32;
2203
2204 /* Skip if the converted relocation will overflow. */
2205 howto = elf_x86_64_rtype_to_howto (abfd, r_type);
2206 r = _bfd_final_link_relocate (howto, abfd, input_section,
2207 contents, irel->r_offset,
2208 relocation, 0);
2209 if (r == bfd_reloc_overflow)
2210 return true;
2211
2212 if (abs_relocation) /* Bogus; should be abs_symbol. */
2213 {
2214 /* Check if R_X86_64_32S/R_X86_64_32 fits. */
2215 if (r_type == R_X86_64_32S)
2216 {
2217 if ((abs_relocation + 0x80000000) > 0xffffffff)
2218 return true;
2219 }
2220 else
2221 {
2222 if (abs_relocation > 0xffffffff)
2223 return true;
2224 }
2225 }
2226
2227 bfd_put_8 (abfd, opcode, contents + roff - 2);
2228 bfd_put_8 (abfd, modrm, contents + roff - 1);
2229
2230 evex[0] = evex_move_r_to_b (evex[0], opcode == 0x69 && !move_v_r);
2231 if (move_v_r)
2232 {
2233 /* Move the top two V bits to the R bits in EVEX payload byte 1.
2234 Note that evex_move_r_to_b() set both R bits. */
2235 if (!(evex[1] & (1 << 6)))
2236 evex[0] &= ~(1 << 7); /* V3 -> R3 */
2237 if (!(evex[2] & (1 << 3)))
2238 evex[0] &= ~(1 << 4); /* V4 -> R4 */
2239 /* Set all V bits, as they're inverted. */
2240 evex[1] |= 0xf << 3;
2241 evex[2] |= 1 << 3;
2242 /* Clear the ND (ZU) bit (it ought to be ignored anyway). */
2243 evex[2] &= ~(1 << 4);
2244 bfd_put_8 (abfd, evex[2], contents + roff - 3);
2245 bfd_put_8 (abfd, evex[1], contents + roff - 4);
2246 }
2247 bfd_put_8 (abfd, evex[0], contents + roff - 5);
2248
2249 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
2250 irel->r_addend = 0;
2251 }
2252 else
2253 {
2254 unsigned int rex = 0;
2255 unsigned int rex_mask = REX_R;
2256 unsigned int rex2 = 0;
2257 unsigned int rex2_mask = REX_R | REX_R << 4;
2258 unsigned int movrs = 0;
2259 bool rex_w = false;
2260
2261 if (r_type == R_X86_64_CODE_6_GOTPCRELX)
2262 {
2263 /* Synthesize a REX2 prefix from EVEX, just enough for the LEA
2264 and MOV case below. */
2265 unsigned int p;
2266
2267 p = bfd_get_8 (abfd, contents + roff - 5);
2268 if (!(p & 0x80))
2269 rex2 |= REX_R;
2270 if (!(p & 0x10))
2271 rex2 |= REX_R << 4;
2272 if (bfd_get_8 (abfd, contents + roff - 4) & 0x80)
2273 {
2274 rex2 |= REX_W;
2275 rex_w = true;
2276 }
2277 movrs = 6;
2278 }
2279 else if (r_type == R_X86_64_CODE_5_GOTPCRELX)
2280 {
2281 rex = bfd_get_8 (abfd, contents + roff - 5);
2282 rex_w = (rex & REX_W) != 0;
2283 movrs = 5;
2284 }
2285 else if (r_type == R_X86_64_CODE_4_GOTPCRELX)
2286 {
2287 if (bfd_get_8 (abfd, contents + roff - 4) == 0xd5)
2288 {
2289 /* Make sure even an all-zero payload leaves a non-zero value
2290 in the variable. */
2291 rex2 = bfd_get_8 (abfd, contents + roff - 3) | 0x100;
2292 rex2_mask |= 0x100;
2293 rex_w = (rex2 & REX_W) != 0;
2294 }
2295 else if (bfd_get_8 (abfd, contents + roff - 4) == 0x0f)
2296 movrs = 4;
2297 }
2298 else if (r_type == R_X86_64_REX_GOTPCRELX)
2299 {
2300 rex = bfd_get_8 (abfd, contents + roff - 3);
2301 rex_w = (rex & REX_W) != 0;
2302 }
2303
2304 if (opcode == 0x8b)
2305 {
2306 if (abs_symbol && local_ref && relocx)
2307 to_reloc_pc32 = false;
2308
2309 if (to_reloc_pc32)
2310 {
2311 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
2312 "lea foo(%rip), %reg". */
2313 opcode = 0x8d;
2314 r_type = R_X86_64_PC32;
2315
2316 /* Skip if the converted relocation will overflow. */
2317 howto = &x86_64_elf_howto_table[r_type];
2318 r = _bfd_final_link_relocate (howto, abfd, input_section,
2319 contents, irel->r_offset,
2320 relocation,
2321 raddend);
2322 if (r == bfd_reloc_overflow)
2323 return true;
2324
2325 /* For MOVRS move a possible REX prefix as necessary. */
2326 if (movrs == 5)
2327 bfd_put_8 (abfd, rex, contents + roff - 3);
2328 }
2329 else
2330 {
2331 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
2332 "mov $foo, %reg". */
2333 opcode = 0xc7;
2334 modrm = 0xc0 | (modrm & 0x38) >> 3;
2335 if (rex_w && ABI_64_P (link_info->output_bfd))
2336 {
2337 /* Keep the REX_W bit in REX byte for LP64. */
2338 r_type = R_X86_64_32S;
2339 goto rewrite_modrm_rex;
2340 }
2341 else
2342 {
2343 /* If the REX_W bit in REX byte isn't needed,
2344 use R_X86_64_32 and clear the W bit to avoid
2345 sign-extend imm32 to imm64. */
2346 r_type = R_X86_64_32;
2347 /* Clear the W bit in REX byte and REX2 payload. */
2348 rex_mask |= REX_W;
2349 rex2_mask |= REX_W;
2350 goto rewrite_modrm_rex;
2351 }
2352 }
2353 }
2354 else
2355 {
2356 /* R_X86_64_PC32 isn't supported. */
2357 if (to_reloc_pc32)
2358 return true;
2359
2360 if (opcode == 0x85 && !(rex2 & (REX2_M << 4)))
2361 {
2362 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
2363 "test $foo, %reg". */
2364 modrm = 0xc0 | (modrm & 0x38) >> 3;
2365 opcode = 0xf7;
2366 }
2367 else if ((opcode | 0x38) == 0x3b && !(rex2 & (REX2_M << 4)))
2368 {
2369 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
2370 "binop $foo, %reg". */
2371 modrm = 0xc0 | ((modrm & 0x38) >> 3) | (opcode & 0x38);
2372 opcode = 0x81;
2373 }
2374 else if (opcode == 0xaf && (rex2 & (REX2_M << 4)))
2375 {
2376 /* Convert "imul foo@GOTPCREL(%rip), %reg" to
2377 "imul $foo, %reg, %reg". */
2378 modrm = 0xc0 | ((modrm & 0x38) >> 3) | (modrm & 0x38);
2379 rex_mask = 0;
2380 rex2_mask = REX2_M << 4;
2381 opcode = 0x69;
2382 }
2383 else if (opcode == 0xff && !(rex2 & (REX2_M << 4)))
2384 {
2385 /* Convert "push foo@GOTPCREL(%rip)" to
2386 "push $foo". */
2387 bfd_put_8 (abfd, 0x68, contents + roff - 1);
2388 if (rex)
2389 {
2390 bfd_put_8 (abfd, 0x2e, contents + roff - 3);
2391 bfd_put_8 (abfd, rex, contents + roff - 2);
2392 }
2393 else if (rex2)
2394 {
2395 bfd_put_8 (abfd, 0x2e, contents + roff - 4);
2396 bfd_put_8 (abfd, 0xd5, contents + roff - 3);
2397 bfd_put_8 (abfd, rex2, contents + roff - 2);
2398 }
2399 else
2400 bfd_put_8 (abfd, 0x2e, contents + roff - 2);
2401
2402 r_type = R_X86_64_32S;
2403 /* No addend for R_X86_64_32S relocations. */
2404 irel->r_addend = 0;
2405 goto finish;
2406 }
2407 else
2408 return true;
2409
2410 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
2411 overflow when sign-extending imm32 to imm64. */
2412 r_type = rex_w ? R_X86_64_32S : R_X86_64_32;
2413
2414 rewrite_modrm_rex:
2415 /* Skip if the converted relocation will overflow. */
2416 howto = elf_x86_64_rtype_to_howto (abfd, r_type);
2417 r = _bfd_final_link_relocate (howto, abfd, input_section,
2418 contents, irel->r_offset,
2419 relocation, 0);
2420 if (r == bfd_reloc_overflow)
2421 return true;
2422
2423 if (abs_relocation)
2424 {
2425 /* Check if R_X86_64_32S/R_X86_64_32 fits. */
2426 if (r_type == R_X86_64_32S)
2427 {
2428 if ((abs_relocation + 0x80000000) > 0xffffffff)
2429 return true;
2430 }
2431 else
2432 {
2433 if (abs_relocation > 0xffffffff)
2434 return true;
2435 }
2436 }
2437
2438 bfd_put_8 (abfd, modrm, contents + roff - 1);
2439
2440 if (rex)
2441 {
2442 /* Move the R bit to the B bit in REX byte. */
2443 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
2444 bfd_put_8 (abfd, rex, contents + roff - 3);
2445 }
2446 else if (rex2)
2447 {
2448 /* Move the R bits to the B bits in REX2 payload byte. */
2449 rex2 = ((rex2 & ~rex2_mask)
2450 | (rex2 & (REX_R | REX_R << 4)) >> 2);
2451 bfd_put_8 (abfd, rex2, contents + roff - 3);
2452 }
2453
2454 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
2455 irel->r_addend = 0;
2456 }
2457
2458 bfd_put_8 (abfd, opcode, contents + roff - 2);
2459
2460 /* For MOVRS zap the 0f38 or EVEX prefix, applying meaningless CS
2461 segment overrides instead. When necessary also install the REX2
2462 prefix and payload (which may not have been written yet). */
2463 if (movrs)
2464 {
2465 bfd_put_8 (abfd, 0x2e, contents + roff - movrs);
2466 bfd_put_8 (abfd, 0x2e, contents + roff - movrs + 1);
2467 if (movrs == 6)
2468 {
2469 bfd_put_8 (abfd, 0xd5, contents + roff - 4);
2470 bfd_put_8 (abfd, rex2, contents + roff - 3);
2471 }
2472 }
2473 }
2474
2475 finish:
2476 *r_type_p = r_type;
2477 irel->r_info = htab->r_info (r_symndx,
2478 r_type | R_X86_64_converted_reloc_bit);
2479
2480 *converted = true;
2481
2482 return true;
2483 }
2484
2485 /* Look through the relocs for a section during the first phase, and
2486 calculate needed space in the global offset table, and procedure
2487 linkage table. */
2488
2489 static bool
2490 elf_x86_64_scan_relocs (bfd *abfd, struct bfd_link_info *info,
2491 asection *sec,
2492 const Elf_Internal_Rela *relocs)
2493 {
2494 struct elf_x86_link_hash_table *htab;
2495 Elf_Internal_Shdr *symtab_hdr;
2496 struct elf_link_hash_entry **sym_hashes;
2497 const Elf_Internal_Rela *rel;
2498 const Elf_Internal_Rela *rel_end;
2499 bfd_byte *contents;
2500 bool converted;
2501
2502 if (bfd_link_relocatable (info))
2503 return true;
2504
2505 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2506 if (htab == NULL)
2507 {
2508 sec->check_relocs_failed = 1;
2509 return false;
2510 }
2511
2512 BFD_ASSERT (is_x86_elf (abfd, htab));
2513
2514 /* Get the section contents. */
2515 if (elf_section_data (sec)->this_hdr.contents != NULL)
2516 contents = elf_section_data (sec)->this_hdr.contents;
2517 else if (!_bfd_elf_mmap_section_contents (abfd, sec, &contents))
2518 {
2519 sec->check_relocs_failed = 1;
2520 return false;
2521 }
2522
2523 symtab_hdr = &elf_symtab_hdr (abfd);
2524 sym_hashes = elf_sym_hashes (abfd);
2525
2526 converted = false;
2527
2528 rel_end = relocs + sec->reloc_count;
2529 for (rel = relocs; rel < rel_end; rel++)
2530 {
2531 unsigned int r_type;
2532 unsigned int r_symndx;
2533 struct elf_link_hash_entry *h;
2534 struct elf_x86_link_hash_entry *eh;
2535 Elf_Internal_Sym *isym;
2536 const char *name;
2537 bool size_reloc;
2538 bool converted_reloc;
2539 bool no_dynreloc;
2540 reloc_howto_type *howto;
2541
2542 r_symndx = htab->r_sym (rel->r_info);
2543 r_type = ELF32_R_TYPE (rel->r_info);
2544
2545 /* Don't check R_X86_64_NONE. */
2546 if (r_type == R_X86_64_NONE)
2547 continue;
2548
2549 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
2550 {
2551 /* xgettext:c-format */
2552 _bfd_error_handler (_("%pB: bad symbol index: %d"),
2553 abfd, r_symndx);
2554 goto error_return;
2555 }
2556
2557 howto = elf_x86_64_rtype_to_howto (abfd, r_type);
2558 if (howto == NULL)
2559 {
2560 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
2561 abfd, r_type);
2562 goto error_return;
2563 }
2564 if (!bfd_reloc_offset_in_range (howto, abfd, sec, rel->r_offset))
2565 {
2566 /* xgettext:c-format */
2567 _bfd_error_handler
2568 (_("%pB: bad reloc offset (%#" PRIx64 " > %#" PRIx64 ") for"
2569 " section `%pA'"), abfd, (uint64_t) rel->r_offset,
2570 (uint64_t) sec->size, sec);
2571 goto error_return;
2572 }
2573
2574 if (r_symndx < symtab_hdr->sh_info)
2575 {
2576 /* A local symbol. */
2577 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache,
2578 abfd, r_symndx);
2579 if (isym == NULL)
2580 goto error_return;
2581
2582 /* Check relocation against local STT_GNU_IFUNC symbol. */
2583 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
2584 {
2585 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
2586 true);
2587 if (h == NULL)
2588 goto error_return;
2589
2590 /* Fake a STT_GNU_IFUNC symbol. */
2591 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
2592 isym, NULL);
2593 h->type = STT_GNU_IFUNC;
2594 h->def_regular = 1;
2595 h->ref_regular = 1;
2596 h->forced_local = 1;
2597 h->root.type = bfd_link_hash_defined;
2598 }
2599 else
2600 h = NULL;
2601 }
2602 else
2603 {
2604 isym = NULL;
2605 h = _bfd_elf_get_link_hash_entry (sym_hashes, r_symndx, symtab_hdr);
2606 }
2607
2608 /* Check invalid x32 relocations. */
2609 if (!ABI_64_P (abfd))
2610 switch (r_type)
2611 {
2612 default:
2613 break;
2614
2615 case R_X86_64_DTPOFF64:
2616 case R_X86_64_TPOFF64:
2617 case R_X86_64_PC64:
2618 case R_X86_64_GOTOFF64:
2619 case R_X86_64_GOT64:
2620 case R_X86_64_GOTPCREL64:
2621 case R_X86_64_GOTPC64:
2622 case R_X86_64_GOTPLT64:
2623 case R_X86_64_PLTOFF64:
2624 {
2625 if (h)
2626 name = h->root.root.string;
2627 else
2628 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
2629 NULL);
2630 _bfd_error_handler
2631 /* xgettext:c-format */
2632 (_("%pB: relocation %s against symbol `%s' isn't "
2633 "supported in x32 mode"), abfd,
2634 x86_64_elf_howto_table[r_type].name, name);
2635 bfd_set_error (bfd_error_bad_value);
2636 goto error_return;
2637 }
2638 break;
2639 }
2640
2641 eh = (struct elf_x86_link_hash_entry *) h;
2642
2643 if (h != NULL)
2644 {
2645 /* It is referenced by a non-shared object. */
2646 h->ref_regular = 1;
2647 }
2648
2649 converted_reloc = false;
2650 if ((r_type == R_X86_64_GOTPCREL
2651 || r_type == R_X86_64_GOTPCRELX
2652 || r_type == R_X86_64_REX_GOTPCRELX
2653 || r_type == R_X86_64_CODE_4_GOTPCRELX
2654 || r_type == R_X86_64_CODE_5_GOTPCRELX
2655 || r_type == R_X86_64_CODE_6_GOTPCRELX)
2656 && (h == NULL || h->type != STT_GNU_IFUNC))
2657 {
2658 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
2659 if (!elf_x86_64_convert_load_reloc (abfd, sec, contents,
2660 &r_type, irel, h,
2661 &converted_reloc, info))
2662 goto error_return;
2663
2664 if (converted_reloc)
2665 converted = true;
2666 }
2667
2668 if (!_bfd_elf_x86_valid_reloc_p (sec, info, htab, rel, h, isym,
2669 symtab_hdr, &no_dynreloc))
2670 goto error_return;
2671
2672 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
2673 symtab_hdr, sym_hashes,
2674 &r_type, GOT_UNKNOWN,
2675 rel, rel_end, h, isym, false))
2676 goto error_return;
2677
2678 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
2679 if (h == htab->elf.hgot)
2680 htab->got_referenced = true;
2681
2682 switch (r_type)
2683 {
2684 case R_X86_64_TLSLD:
2685 htab->tls_ld_or_ldm_got.refcount = 1;
2686 goto create_got;
2687
2688 case R_X86_64_TPOFF32:
2689 if (!bfd_link_executable (info) && ABI_64_P (abfd))
2690 {
2691 elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2692 &x86_64_elf_howto_table[r_type]);
2693 goto error_return;
2694 }
2695 if (eh != NULL)
2696 eh->zero_undefweak &= 0x2;
2697 break;
2698
2699 case R_X86_64_GOTTPOFF:
2700 case R_X86_64_CODE_4_GOTTPOFF:
2701 case R_X86_64_CODE_5_GOTTPOFF:
2702 case R_X86_64_CODE_6_GOTTPOFF:
2703 if (!bfd_link_executable (info))
2704 info->flags |= DF_STATIC_TLS;
2705 /* Fall through */
2706
2707 case R_X86_64_GOT32:
2708 case R_X86_64_GOTPCREL:
2709 case R_X86_64_GOTPCRELX:
2710 case R_X86_64_REX_GOTPCRELX:
2711 case R_X86_64_CODE_4_GOTPCRELX:
2712 case R_X86_64_CODE_5_GOTPCRELX:
2713 case R_X86_64_CODE_6_GOTPCRELX:
2714 case R_X86_64_TLSGD:
2715 case R_X86_64_GOT64:
2716 case R_X86_64_GOTPCREL64:
2717 case R_X86_64_GOTPLT64:
2718 case R_X86_64_GOTPC32_TLSDESC:
2719 case R_X86_64_CODE_4_GOTPC32_TLSDESC:
2720 case R_X86_64_TLSDESC_CALL:
2721 /* This symbol requires a global offset table entry. */
2722 {
2723 int tls_type, old_tls_type;
2724
2725 switch (r_type)
2726 {
2727 default:
2728 tls_type = GOT_NORMAL;
2729 if (h)
2730 {
2731 if (ABS_SYMBOL_P (h))
2732 tls_type = GOT_ABS;
2733 }
2734 else if (isym->st_shndx == SHN_ABS)
2735 tls_type = GOT_ABS;
2736 break;
2737 case R_X86_64_TLSGD:
2738 tls_type = GOT_TLS_GD;
2739 break;
2740 case R_X86_64_GOTTPOFF:
2741 case R_X86_64_CODE_4_GOTTPOFF:
2742 case R_X86_64_CODE_5_GOTTPOFF:
2743 case R_X86_64_CODE_6_GOTTPOFF:
2744 tls_type = GOT_TLS_IE;
2745 break;
2746 case R_X86_64_GOTPC32_TLSDESC:
2747 case R_X86_64_CODE_4_GOTPC32_TLSDESC:
2748 case R_X86_64_TLSDESC_CALL:
2749 tls_type = GOT_TLS_GDESC;
2750 break;
2751 }
2752
2753 if (h != NULL)
2754 {
2755 h->got.refcount = 1;
2756 old_tls_type = eh->tls_type;
2757 }
2758 else
2759 {
2760 bfd_signed_vma *local_got_refcounts;
2761
2762 if (!elf_x86_allocate_local_got_info (abfd,
2763 symtab_hdr->sh_info))
2764 goto error_return;
2765
2766 /* This is a global offset table entry for a local symbol. */
2767 local_got_refcounts = elf_local_got_refcounts (abfd);
2768 local_got_refcounts[r_symndx] = 1;
2769 old_tls_type
2770 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2771 }
2772
2773 /* If a TLS symbol is accessed using IE at least once,
2774 there is no point to use dynamic model for it. */
2775 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2776 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2777 || tls_type != GOT_TLS_IE))
2778 {
2779 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2780 tls_type = old_tls_type;
2781 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2782 && GOT_TLS_GD_ANY_P (tls_type))
2783 tls_type |= old_tls_type;
2784 else
2785 {
2786 if (h)
2787 name = h->root.root.string;
2788 else
2789 name = bfd_elf_sym_name (abfd, symtab_hdr,
2790 isym, NULL);
2791 _bfd_error_handler
2792 /* xgettext:c-format */
2793 (_("%pB: '%s' accessed both as normal and"
2794 " thread local symbol"),
2795 abfd, name);
2796 bfd_set_error (bfd_error_bad_value);
2797 goto error_return;
2798 }
2799 }
2800
2801 if (old_tls_type != tls_type)
2802 {
2803 if (eh != NULL)
2804 eh->tls_type = tls_type;
2805 else
2806 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2807 }
2808 }
2809 /* Fall through */
2810
2811 case R_X86_64_GOTOFF64:
2812 case R_X86_64_GOTPC32:
2813 case R_X86_64_GOTPC64:
2814 create_got:
2815 if (eh != NULL)
2816 eh->zero_undefweak &= 0x2;
2817 break;
2818
2819 case R_X86_64_PLT32:
2820 /* This symbol requires a procedure linkage table entry. We
2821 actually build the entry in adjust_dynamic_symbol,
2822 because this might be a case of linking PIC code which is
2823 never referenced by a dynamic object, in which case we
2824 don't need to generate a procedure linkage table entry
2825 after all. */
2826
2827 /* If this is a local symbol, we resolve it directly without
2828 creating a procedure linkage table entry. */
2829 if (h == NULL)
2830 continue;
2831
2832 eh->zero_undefweak &= 0x2;
2833 h->needs_plt = 1;
2834 h->plt.refcount = 1;
2835 break;
2836
2837 case R_X86_64_PLTOFF64:
2838 /* This tries to form the 'address' of a function relative
2839 to GOT. For global symbols we need a PLT entry. */
2840 if (h != NULL)
2841 {
2842 h->needs_plt = 1;
2843 h->plt.refcount = 1;
2844 }
2845 goto create_got;
2846
2847 case R_X86_64_SIZE32:
2848 case R_X86_64_SIZE64:
2849 size_reloc = true;
2850 goto do_size;
2851
2852 case R_X86_64_32:
2853 if (!ABI_64_P (abfd))
2854 goto pointer;
2855 /* Fall through. */
2856 case R_X86_64_8:
2857 case R_X86_64_16:
2858 case R_X86_64_32S:
2859 /* Check relocation overflow as these relocs may lead to
2860 run-time relocation overflow. Don't error out for
2861 sections we don't care about, such as debug sections or
2862 when relocation overflow check is disabled. */
2863 if (!htab->params->no_reloc_overflow_check
2864 && !converted_reloc
2865 && (bfd_link_pic (info)
2866 || (bfd_link_executable (info)
2867 && h != NULL
2868 && !h->def_regular
2869 && h->def_dynamic
2870 && (sec->flags & SEC_READONLY) == 0)))
2871 {
2872 elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2873 &x86_64_elf_howto_table[r_type]);
2874 goto error_return;
2875 }
2876 /* Fall through. */
2877
2878 case R_X86_64_PC8:
2879 case R_X86_64_PC16:
2880 case R_X86_64_PC32:
2881 case R_X86_64_PC64:
2882 case R_X86_64_64:
2883 pointer:
2884 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2885 eh->zero_undefweak |= 0x2;
2886 /* We are called after all symbols have been resolved. Only
2887 relocation against STT_GNU_IFUNC symbol must go through
2888 PLT. */
2889 if (h != NULL
2890 && (bfd_link_executable (info)
2891 || h->type == STT_GNU_IFUNC))
2892 {
2893 bool func_pointer_ref = false;
2894
2895 if (r_type == R_X86_64_PC32)
2896 {
2897 /* Since something like ".long foo - ." may be used
2898 as pointer, make sure that PLT is used if foo is
2899 a function defined in a shared library. */
2900 if ((sec->flags & SEC_CODE) == 0)
2901 {
2902 h->pointer_equality_needed = 1;
2903 if (bfd_link_pie (info)
2904 && h->type == STT_FUNC
2905 && !h->def_regular
2906 && h->def_dynamic)
2907 {
2908 h->needs_plt = 1;
2909 h->plt.refcount = 1;
2910 }
2911 }
2912 }
2913 else if (r_type != R_X86_64_PC64)
2914 {
2915 /* At run-time, R_X86_64_64 can be resolved for both
2916 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2917 can only be resolved for x32. Function pointer
2918 reference doesn't need PLT for pointer equality. */
2919 if ((sec->flags & SEC_READONLY) == 0
2920 && (r_type == R_X86_64_64
2921 || (!ABI_64_P (abfd)
2922 && (r_type == R_X86_64_32
2923 || r_type == R_X86_64_32S))))
2924 func_pointer_ref = true;
2925
2926 /* IFUNC symbol needs pointer equality in PDE so that
2927 function pointer reference will be resolved to its
2928 PLT entry directly. */
2929 if (!func_pointer_ref
2930 || (bfd_link_pde (info)
2931 && h->type == STT_GNU_IFUNC))
2932 h->pointer_equality_needed = 1;
2933 }
2934
2935 if (!func_pointer_ref)
2936 {
2937 /* If this reloc is in a read-only section, we might
2938 need a copy reloc. We can't check reliably at this
2939 stage whether the section is read-only, as input
2940 sections have not yet been mapped to output sections.
2941 Tentatively set the flag for now, and correct in
2942 adjust_dynamic_symbol. */
2943 h->non_got_ref = 1;
2944
2945 if (!elf_has_indirect_extern_access (sec->owner))
2946 eh->non_got_ref_without_indirect_extern_access = 1;
2947
2948 /* We may need a .plt entry if the symbol is a function
2949 defined in a shared lib or is a function referenced
2950 from the code or read-only section. */
2951 if (!h->def_regular
2952 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2953 h->plt.refcount = 1;
2954
2955 if (htab->elf.target_os != is_solaris
2956 && h->pointer_equality_needed
2957 && h->type == STT_FUNC
2958 && eh->def_protected
2959 && !SYMBOL_DEFINED_NON_SHARED_P (h)
2960 && h->def_dynamic)
2961 {
2962 /* Disallow non-canonical reference to canonical
2963 protected function. */
2964 _bfd_error_handler
2965 /* xgettext:c-format */
2966 (_("%pB: non-canonical reference to canonical "
2967 "protected function `%s' in %pB"),
2968 abfd, h->root.root.string,
2969 h->root.u.def.section->owner);
2970 bfd_set_error (bfd_error_bad_value);
2971 goto error_return;
2972 }
2973 }
2974 }
2975
2976 size_reloc = false;
2977 do_size:
2978 if (!no_dynreloc
2979 && NEED_DYNAMIC_RELOCATION_P (true, info, true, h, sec,
2980 r_type,
2981 htab->pointer_r_type))
2982 {
2983 struct elf_dyn_relocs *p;
2984 struct elf_dyn_relocs **head;
2985
2986 /* If this is a global symbol, we count the number of
2987 relocations we need for this symbol. */
2988 if (h != NULL)
2989 head = &h->dyn_relocs;
2990 else
2991 {
2992 /* Track dynamic relocs needed for local syms too.
2993 We really need local syms available to do this
2994 easily. Oh well. */
2995 asection *s;
2996 void **vpp;
2997
2998 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache,
2999 abfd, r_symndx);
3000 if (isym == NULL)
3001 goto error_return;
3002
3003 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3004 if (s == NULL)
3005 s = sec;
3006
3007 /* Beware of type punned pointers vs strict aliasing
3008 rules. */
3009 vpp = &(elf_section_data (s)->local_dynrel);
3010 head = (struct elf_dyn_relocs **)vpp;
3011 }
3012
3013 p = *head;
3014 if (p == NULL || p->sec != sec)
3015 {
3016 size_t amt = sizeof *p;
3017
3018 p = ((struct elf_dyn_relocs *)
3019 bfd_alloc (htab->elf.dynobj, amt));
3020 if (p == NULL)
3021 goto error_return;
3022 p->next = *head;
3023 *head = p;
3024 p->sec = sec;
3025 p->count = 0;
3026 p->pc_count = 0;
3027 }
3028
3029 p->count += 1;
3030 /* Count size relocation as PC-relative relocation. */
3031 if (X86_PCREL_TYPE_P (true, r_type) || size_reloc)
3032 p->pc_count += 1;
3033 }
3034 break;
3035
3036 case R_X86_64_CODE_5_GOTPC32_TLSDESC:
3037 case R_X86_64_CODE_6_GOTPC32_TLSDESC:
3038 {
3039 /* These relocations are added only for completeness and
3040 aren't be used. */
3041 if (h)
3042 name = h->root.root.string;
3043 else
3044 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
3045 NULL);
3046 _bfd_error_handler
3047 /* xgettext:c-format */
3048 (_("%pB: unsupported relocation %s against symbol `%s'"),
3049 abfd, x86_64_elf_howto_table[r_type].name, name);
3050 }
3051 break;
3052
3053 /* This relocation describes the C++ object vtable hierarchy.
3054 Reconstruct it for later use during GC. */
3055 case R_X86_64_GNU_VTINHERIT:
3056 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
3057 goto error_return;
3058 break;
3059
3060 /* This relocation describes which C++ vtable entries are actually
3061 used. Record for later use during GC. */
3062 case R_X86_64_GNU_VTENTRY:
3063 if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
3064 goto error_return;
3065 break;
3066
3067 default:
3068 break;
3069 }
3070 }
3071
3072 if (elf_section_data (sec)->this_hdr.contents != contents)
3073 {
3074 if (!converted)
3075 _bfd_elf_munmap_section_contents (sec, contents);
3076 else
3077 {
3078 /* Cache the section contents for elf_link_input_bfd if any
3079 load is converted or --no-keep-memory isn't used. */
3080 elf_section_data (sec)->this_hdr.contents = contents;
3081 info->cache_size += sec->size;
3082 }
3083 }
3084
3085 /* Cache relocations if any load is converted. */
3086 if (elf_section_data (sec)->relocs != relocs && converted)
3087 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
3088
3089 return true;
3090
3091 error_return:
3092 if (elf_section_data (sec)->this_hdr.contents != contents)
3093 _bfd_elf_munmap_section_contents (sec, contents);
3094 sec->check_relocs_failed = 1;
3095 return false;
3096 }
3097
3098 static bool
3099 elf_x86_64_early_size_sections (bfd *output_bfd, struct bfd_link_info *info)
3100 {
3101 bfd *abfd;
3102
3103 /* Scan relocations after rel_from_abs has been set on __ehdr_start. */
3104 for (abfd = info->input_bfds;
3105 abfd != (bfd *) NULL;
3106 abfd = abfd->link.next)
3107 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour
3108 && !_bfd_elf_link_iterate_on_relocs (abfd, info,
3109 elf_x86_64_scan_relocs))
3110 return false;
3111
3112 return _bfd_x86_elf_early_size_sections (output_bfd, info);
3113 }
3114
3115 /* Return the relocation value for @tpoff relocation
3116 if STT_TLS virtual address is ADDRESS. */
3117
3118 static bfd_vma
3119 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
3120 {
3121 struct elf_link_hash_table *htab = elf_hash_table (info);
3122 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
3123 bfd_vma static_tls_size;
3124
3125 /* If tls_segment is NULL, we should have signalled an error already. */
3126 if (htab->tls_sec == NULL)
3127 return 0;
3128
3129 /* Consider special static TLS alignment requirements. */
3130 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
3131 return address - static_tls_size - htab->tls_sec->vma;
3132 }
3133
3134 /* Relocate an x86_64 ELF section. */
3135
3136 static int
3137 elf_x86_64_relocate_section (bfd *output_bfd,
3138 struct bfd_link_info *info,
3139 bfd *input_bfd,
3140 asection *input_section,
3141 bfd_byte *contents,
3142 Elf_Internal_Rela *relocs,
3143 Elf_Internal_Sym *local_syms,
3144 asection **local_sections)
3145 {
3146 struct elf_x86_link_hash_table *htab;
3147 Elf_Internal_Shdr *symtab_hdr;
3148 struct elf_link_hash_entry **sym_hashes;
3149 bfd_vma *local_got_offsets;
3150 bfd_vma *local_tlsdesc_gotents;
3151 Elf_Internal_Rela *rel;
3152 Elf_Internal_Rela *wrel;
3153 Elf_Internal_Rela *relend;
3154 unsigned int plt_entry_size;
3155 bool status;
3156
3157 /* Skip if check_relocs or scan_relocs failed. */
3158 if (input_section->check_relocs_failed)
3159 return false;
3160
3161 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
3162 if (htab == NULL)
3163 return false;
3164
3165 if (!is_x86_elf (input_bfd, htab))
3166 {
3167 bfd_set_error (bfd_error_wrong_format);
3168 return false;
3169 }
3170
3171 plt_entry_size = htab->plt.plt_entry_size;
3172 symtab_hdr = &elf_symtab_hdr (input_bfd);
3173 sym_hashes = elf_sym_hashes (input_bfd);
3174 local_got_offsets = elf_local_got_offsets (input_bfd);
3175 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
3176
3177 _bfd_x86_elf_set_tls_module_base (info);
3178
3179 status = true;
3180 rel = wrel = relocs;
3181 relend = relocs + input_section->reloc_count;
3182 for (; rel < relend; wrel++, rel++)
3183 {
3184 unsigned int r_type, r_type_tls;
3185 reloc_howto_type *howto;
3186 unsigned long r_symndx;
3187 struct elf_link_hash_entry *h;
3188 struct elf_x86_link_hash_entry *eh;
3189 Elf_Internal_Sym *sym;
3190 asection *sec;
3191 bfd_vma off, offplt, plt_offset;
3192 bfd_vma relocation;
3193 bool unresolved_reloc;
3194 bfd_reloc_status_type r;
3195 int tls_type;
3196 asection *base_got, *resolved_plt;
3197 bfd_vma st_size;
3198 bool resolved_to_zero;
3199 bool relative_reloc;
3200 bool converted_reloc;
3201 bool need_copy_reloc_in_pie;
3202 bool no_copyreloc_p;
3203
3204 r_type = ELF32_R_TYPE (rel->r_info);
3205 if (r_type == (int) R_X86_64_GNU_VTINHERIT
3206 || r_type == (int) R_X86_64_GNU_VTENTRY)
3207 {
3208 if (wrel != rel)
3209 *wrel = *rel;
3210 continue;
3211 }
3212
3213 r_symndx = htab->r_sym (rel->r_info);
3214 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
3215 if (converted_reloc)
3216 {
3217 r_type &= ~R_X86_64_converted_reloc_bit;
3218 rel->r_info = htab->r_info (r_symndx, r_type);
3219 }
3220
3221 howto = elf_x86_64_rtype_to_howto (input_bfd, r_type);
3222 if (howto == NULL)
3223 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
3224
3225 h = NULL;
3226 sym = NULL;
3227 sec = NULL;
3228 unresolved_reloc = false;
3229 if (r_symndx < symtab_hdr->sh_info)
3230 {
3231 sym = local_syms + r_symndx;
3232 sec = local_sections[r_symndx];
3233
3234 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
3235 &sec, rel);
3236 st_size = sym->st_size;
3237
3238 /* Relocate against local STT_GNU_IFUNC symbol. */
3239 if (!bfd_link_relocatable (info)
3240 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
3241 {
3242 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
3243 rel, false);
3244 if (h == NULL)
3245 abort ();
3246
3247 /* Set STT_GNU_IFUNC symbol value. */
3248 h->root.u.def.value = sym->st_value;
3249 h->root.u.def.section = sec;
3250 }
3251 }
3252 else
3253 {
3254 bool warned ATTRIBUTE_UNUSED;
3255 bool ignored ATTRIBUTE_UNUSED;
3256
3257 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
3258 r_symndx, symtab_hdr, sym_hashes,
3259 h, sec, relocation,
3260 unresolved_reloc, warned, ignored);
3261 st_size = h->size;
3262 }
3263
3264 if (sec != NULL && discarded_section (sec))
3265 {
3266 _bfd_clear_contents (howto, input_bfd, input_section,
3267 contents, rel->r_offset);
3268 wrel->r_offset = rel->r_offset;
3269 wrel->r_info = 0;
3270 wrel->r_addend = 0;
3271
3272 /* For ld -r, remove relocations in debug sections against
3273 sections defined in discarded sections, including sframe
3274 sections. Not done for eh_frame editing code expects to
3275 be present. NB: Since sframe code keeps R_X86_64_NONE
3276 reloc as is, its r_offset is wrong, we must not generate
3277 R_X86_64_NONE reloc in sframe section. */
3278 if (bfd_link_relocatable (info)
3279 && ((input_section->flags & SEC_DEBUGGING) != 0
3280 || elf_section_type (input_section) == SHT_GNU_SFRAME))
3281 wrel--;
3282
3283 continue;
3284 }
3285
3286 if (bfd_link_relocatable (info))
3287 {
3288 if (wrel != rel)
3289 *wrel = *rel;
3290 continue;
3291 }
3292
3293 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
3294 {
3295 if (r_type == R_X86_64_64)
3296 {
3297 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
3298 zero-extend it to 64bit if addend is zero. */
3299 r_type = R_X86_64_32;
3300 memset (contents + rel->r_offset + 4, 0, 4);
3301 }
3302 else if (r_type == R_X86_64_SIZE64)
3303 {
3304 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
3305 zero-extend it to 64bit if addend is zero. */
3306 r_type = R_X86_64_SIZE32;
3307 memset (contents + rel->r_offset + 4, 0, 4);
3308 }
3309 }
3310
3311 eh = (struct elf_x86_link_hash_entry *) h;
3312
3313 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
3314 it here if it is defined in a non-shared object. */
3315 if (h != NULL
3316 && h->type == STT_GNU_IFUNC
3317 && h->def_regular)
3318 {
3319 bfd_vma plt_index;
3320 const char *name;
3321
3322 if ((input_section->flags & SEC_ALLOC) == 0)
3323 {
3324 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
3325 STT_GNU_IFUNC symbol as STT_FUNC. */
3326 if (elf_section_type (input_section) == SHT_NOTE)
3327 goto skip_ifunc;
3328 /* Dynamic relocs are not propagated for SEC_DEBUGGING
3329 sections because such sections are not SEC_ALLOC and
3330 thus ld.so will not process them. */
3331 if ((input_section->flags & SEC_DEBUGGING) != 0)
3332 continue;
3333 abort ();
3334 }
3335
3336 switch (r_type)
3337 {
3338 default:
3339 break;
3340
3341 case R_X86_64_GOTPCREL:
3342 case R_X86_64_GOTPCRELX:
3343 case R_X86_64_REX_GOTPCRELX:
3344 case R_X86_64_CODE_4_GOTPCRELX:
3345 case R_X86_64_CODE_5_GOTPCRELX:
3346 case R_X86_64_CODE_6_GOTPCRELX:
3347 case R_X86_64_GOTPCREL64:
3348 base_got = htab->elf.sgot;
3349 off = h->got.offset;
3350
3351 if (base_got == NULL)
3352 abort ();
3353
3354 if (off == (bfd_vma) -1)
3355 {
3356 /* We can't use h->got.offset here to save state, or
3357 even just remember the offset, as finish_dynamic_symbol
3358 would use that as offset into .got. */
3359
3360 if (h->plt.offset == (bfd_vma) -1)
3361 abort ();
3362
3363 if (htab->elf.splt != NULL)
3364 {
3365 plt_index = (h->plt.offset / plt_entry_size
3366 - htab->plt.has_plt0);
3367 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3368 base_got = htab->elf.sgotplt;
3369 }
3370 else
3371 {
3372 plt_index = h->plt.offset / plt_entry_size;
3373 off = plt_index * GOT_ENTRY_SIZE;
3374 base_got = htab->elf.igotplt;
3375 }
3376
3377 if (h->dynindx == -1
3378 || h->forced_local
3379 || info->symbolic)
3380 {
3381 /* This references the local defitionion. We must
3382 initialize this entry in the global offset table.
3383 Since the offset must always be a multiple of 8,
3384 we use the least significant bit to record
3385 whether we have initialized it already.
3386
3387 When doing a dynamic link, we create a .rela.got
3388 relocation entry to initialize the value. This
3389 is done in the finish_dynamic_symbol routine. */
3390 if ((off & 1) != 0)
3391 off &= ~1;
3392 else
3393 {
3394 bfd_put_64 (output_bfd, relocation,
3395 base_got->contents + off);
3396 /* Note that this is harmless for the GOTPLT64
3397 case, as -1 | 1 still is -1. */
3398 h->got.offset |= 1;
3399 }
3400 }
3401 }
3402
3403 relocation = (base_got->output_section->vma
3404 + base_got->output_offset + off);
3405
3406 goto do_relocation;
3407 }
3408
3409 if (h->plt.offset == (bfd_vma) -1)
3410 {
3411 /* Handle static pointers of STT_GNU_IFUNC symbols. */
3412 if (r_type == htab->pointer_r_type
3413 && (input_section->flags & SEC_CODE) == 0)
3414 goto do_ifunc_pointer;
3415 goto bad_ifunc_reloc;
3416 }
3417
3418 /* STT_GNU_IFUNC symbol must go through PLT. */
3419 if (htab->elf.splt != NULL)
3420 {
3421 if (htab->plt_second != NULL)
3422 {
3423 resolved_plt = htab->plt_second;
3424 plt_offset = eh->plt_second.offset;
3425 }
3426 else
3427 {
3428 resolved_plt = htab->elf.splt;
3429 plt_offset = h->plt.offset;
3430 }
3431 }
3432 else
3433 {
3434 resolved_plt = htab->elf.iplt;
3435 plt_offset = h->plt.offset;
3436 }
3437
3438 relocation = (resolved_plt->output_section->vma
3439 + resolved_plt->output_offset + plt_offset);
3440
3441 switch (r_type)
3442 {
3443 default:
3444 bad_ifunc_reloc:
3445 if (h->root.root.string)
3446 name = h->root.root.string;
3447 else
3448 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
3449 NULL);
3450 _bfd_error_handler
3451 /* xgettext:c-format */
3452 (_("%pB: relocation %s against STT_GNU_IFUNC "
3453 "symbol `%s' isn't supported"), input_bfd,
3454 howto->name, name);
3455 bfd_set_error (bfd_error_bad_value);
3456 return false;
3457
3458 case R_X86_64_32S:
3459 if (bfd_link_pic (info))
3460 abort ();
3461 goto do_relocation;
3462
3463 case R_X86_64_32:
3464 if (ABI_64_P (output_bfd))
3465 goto do_relocation;
3466 /* FALLTHROUGH */
3467 case R_X86_64_64:
3468 do_ifunc_pointer:
3469 if (rel->r_addend != 0)
3470 {
3471 if (h->root.root.string)
3472 name = h->root.root.string;
3473 else
3474 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3475 sym, NULL);
3476 _bfd_error_handler
3477 /* xgettext:c-format */
3478 (_("%pB: relocation %s against STT_GNU_IFUNC "
3479 "symbol `%s' has non-zero addend: %" PRId64),
3480 input_bfd, howto->name, name, (int64_t) rel->r_addend);
3481 bfd_set_error (bfd_error_bad_value);
3482 return false;
3483 }
3484
3485 /* Generate dynamic relcoation only when there is a
3486 non-GOT reference in a shared object or there is no
3487 PLT. */
3488 if ((bfd_link_pic (info) && h->non_got_ref)
3489 || h->plt.offset == (bfd_vma) -1)
3490 {
3491 Elf_Internal_Rela outrel;
3492 asection *sreloc;
3493
3494 /* Need a dynamic relocation to get the real function
3495 address. */
3496 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
3497 info,
3498 input_section,
3499 rel->r_offset);
3500 if (outrel.r_offset == (bfd_vma) -1
3501 || outrel.r_offset == (bfd_vma) -2)
3502 abort ();
3503
3504 outrel.r_offset += (input_section->output_section->vma
3505 + input_section->output_offset);
3506
3507 if (POINTER_LOCAL_IFUNC_P (info, h))
3508 {
3509 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
3510 h->root.root.string,
3511 h->root.u.def.section->owner);
3512
3513 /* This symbol is resolved locally. */
3514 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
3515 outrel.r_addend = (h->root.u.def.value
3516 + h->root.u.def.section->output_section->vma
3517 + h->root.u.def.section->output_offset);
3518
3519 if (htab->params->report_relative_reloc)
3520 _bfd_x86_elf_link_report_relative_reloc
3521 (info, input_section, h, sym,
3522 "R_X86_64_IRELATIVE", &outrel);
3523 }
3524 else
3525 {
3526 outrel.r_info = htab->r_info (h->dynindx, r_type);
3527 outrel.r_addend = 0;
3528 }
3529
3530 /* Dynamic relocations are stored in
3531 1. .rela.ifunc section in PIC object.
3532 2. .rela.got section in dynamic executable.
3533 3. .rela.iplt section in static executable. */
3534 if (bfd_link_pic (info))
3535 sreloc = htab->elf.irelifunc;
3536 else if (htab->elf.splt != NULL)
3537 sreloc = htab->elf.srelgot;
3538 else
3539 sreloc = htab->elf.irelplt;
3540 elf_append_rela (output_bfd, sreloc, &outrel);
3541
3542 /* If this reloc is against an external symbol, we
3543 do not want to fiddle with the addend. Otherwise,
3544 we need to include the symbol value so that it
3545 becomes an addend for the dynamic reloc. For an
3546 internal symbol, we have updated addend. */
3547 continue;
3548 }
3549 /* FALLTHROUGH */
3550 case R_X86_64_PC32:
3551 case R_X86_64_PC64:
3552 case R_X86_64_PLT32:
3553 goto do_relocation;
3554 }
3555 }
3556
3557 skip_ifunc:
3558 resolved_to_zero = (eh != NULL
3559 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
3560
3561 /* When generating a shared object, the relocations handled here are
3562 copied into the output file to be resolved at run time. */
3563 switch (r_type)
3564 {
3565 case R_X86_64_GOT32:
3566 case R_X86_64_GOT64:
3567 /* Relocation is to the entry for this symbol in the global
3568 offset table. */
3569 case R_X86_64_GOTPCREL:
3570 case R_X86_64_GOTPCRELX:
3571 case R_X86_64_REX_GOTPCRELX:
3572 case R_X86_64_CODE_4_GOTPCRELX:
3573 case R_X86_64_CODE_5_GOTPCRELX:
3574 case R_X86_64_CODE_6_GOTPCRELX:
3575 case R_X86_64_GOTPCREL64:
3576 /* Use global offset table entry as symbol value. */
3577 case R_X86_64_GOTPLT64:
3578 /* This is obsolete and treated the same as GOT64. */
3579 base_got = htab->elf.sgot;
3580
3581 if (htab->elf.sgot == NULL)
3582 abort ();
3583
3584 relative_reloc = false;
3585 if (h != NULL)
3586 {
3587 off = h->got.offset;
3588 if (h->needs_plt
3589 && h->plt.offset != (bfd_vma)-1
3590 && off == (bfd_vma)-1)
3591 {
3592 /* We can't use h->got.offset here to save
3593 state, or even just remember the offset, as
3594 finish_dynamic_symbol would use that as offset into
3595 .got. */
3596 bfd_vma plt_index = (h->plt.offset / plt_entry_size
3597 - htab->plt.has_plt0);
3598 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3599 base_got = htab->elf.sgotplt;
3600 }
3601
3602 if (RESOLVED_LOCALLY_P (info, h, htab))
3603 {
3604 /* We must initialize this entry in the global offset
3605 table. Since the offset must always be a multiple
3606 of 8, we use the least significant bit to record
3607 whether we have initialized it already.
3608
3609 When doing a dynamic link, we create a .rela.got
3610 relocation entry to initialize the value. This is
3611 done in the finish_dynamic_symbol routine. */
3612 if ((off & 1) != 0)
3613 off &= ~1;
3614 else
3615 {
3616 bfd_put_64 (output_bfd, relocation,
3617 base_got->contents + off);
3618 /* Note that this is harmless for the GOTPLT64 case,
3619 as -1 | 1 still is -1. */
3620 h->got.offset |= 1;
3621
3622 /* NB: Don't generate relative relocation here if
3623 it has been generated by DT_RELR. */
3624 if (!info->enable_dt_relr
3625 && GENERATE_RELATIVE_RELOC_P (info, h))
3626 {
3627 /* If this symbol isn't dynamic in PIC,
3628 generate R_X86_64_RELATIVE here. */
3629 eh->no_finish_dynamic_symbol = 1;
3630 relative_reloc = true;
3631 }
3632 }
3633 }
3634 else
3635 unresolved_reloc = false;
3636 }
3637 else
3638 {
3639 if (local_got_offsets == NULL)
3640 abort ();
3641
3642 off = local_got_offsets[r_symndx];
3643
3644 /* The offset must always be a multiple of 8. We use
3645 the least significant bit to record whether we have
3646 already generated the necessary reloc. */
3647 if ((off & 1) != 0)
3648 off &= ~1;
3649 else
3650 {
3651 bfd_put_64 (output_bfd, relocation,
3652 base_got->contents + off);
3653 local_got_offsets[r_symndx] |= 1;
3654
3655 /* NB: GOTPCREL relocations against local absolute
3656 symbol store relocation value in the GOT slot
3657 without relative relocation. Don't generate
3658 relative relocation here if it has been generated
3659 by DT_RELR. */
3660 if (!info->enable_dt_relr
3661 && bfd_link_pic (info)
3662 && !(sym->st_shndx == SHN_ABS
3663 && (r_type == R_X86_64_GOTPCREL
3664 || r_type == R_X86_64_GOTPCRELX
3665 || r_type == R_X86_64_REX_GOTPCRELX
3666 || r_type == R_X86_64_CODE_4_GOTPCRELX
3667 || r_type == R_X86_64_CODE_5_GOTPCRELX
3668 || r_type == R_X86_64_CODE_6_GOTPCRELX)))
3669 relative_reloc = true;
3670 }
3671 }
3672
3673 if (relative_reloc)
3674 {
3675 asection *s;
3676 Elf_Internal_Rela outrel;
3677
3678 /* We need to generate a R_X86_64_RELATIVE reloc
3679 for the dynamic linker. */
3680 s = htab->elf.srelgot;
3681 if (s == NULL)
3682 abort ();
3683
3684 outrel.r_offset = (base_got->output_section->vma
3685 + base_got->output_offset
3686 + off);
3687 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3688 outrel.r_addend = relocation;
3689
3690 if (htab->params->report_relative_reloc)
3691 _bfd_x86_elf_link_report_relative_reloc
3692 (info, input_section, h, sym, "R_X86_64_RELATIVE",
3693 &outrel);
3694
3695 elf_append_rela (output_bfd, s, &outrel);
3696 }
3697
3698 if (off >= (bfd_vma) -2)
3699 abort ();
3700
3701 relocation = base_got->output_section->vma
3702 + base_got->output_offset + off;
3703 if (r_type != R_X86_64_GOTPCREL
3704 && r_type != R_X86_64_GOTPCRELX
3705 && r_type != R_X86_64_REX_GOTPCRELX
3706 && r_type != R_X86_64_CODE_4_GOTPCRELX
3707 && r_type != R_X86_64_CODE_5_GOTPCRELX
3708 && r_type != R_X86_64_CODE_6_GOTPCRELX
3709 && r_type != R_X86_64_GOTPCREL64)
3710 relocation -= htab->elf.sgotplt->output_section->vma
3711 - htab->elf.sgotplt->output_offset;
3712
3713 break;
3714
3715 case R_X86_64_GOTOFF64:
3716 /* Relocation is relative to the start of the global offset
3717 table. */
3718
3719 /* Check to make sure it isn't a protected function or data
3720 symbol for shared library since it may not be local when
3721 used as function address or with copy relocation. We also
3722 need to make sure that a symbol is referenced locally. */
3723 if (bfd_link_pic (info) && h)
3724 {
3725 if (!h->def_regular)
3726 {
3727 const char *v;
3728
3729 switch (ELF_ST_VISIBILITY (h->other))
3730 {
3731 case STV_HIDDEN:
3732 v = _("hidden symbol");
3733 break;
3734 case STV_INTERNAL:
3735 v = _("internal symbol");
3736 break;
3737 case STV_PROTECTED:
3738 v = _("protected symbol");
3739 break;
3740 default:
3741 v = _("symbol");
3742 break;
3743 }
3744
3745 _bfd_error_handler
3746 /* xgettext:c-format */
3747 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
3748 " `%s' can not be used when making a shared object"),
3749 input_bfd, v, h->root.root.string);
3750 bfd_set_error (bfd_error_bad_value);
3751 return false;
3752 }
3753 else if (!bfd_link_executable (info)
3754 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
3755 && (h->type == STT_FUNC
3756 || h->type == STT_OBJECT)
3757 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
3758 {
3759 _bfd_error_handler
3760 /* xgettext:c-format */
3761 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
3762 " `%s' can not be used when making a shared object"),
3763 input_bfd,
3764 h->type == STT_FUNC ? "function" : "data",
3765 h->root.root.string);
3766 bfd_set_error (bfd_error_bad_value);
3767 return false;
3768 }
3769 }
3770
3771 /* Note that sgot is not involved in this
3772 calculation. We always want the start of .got.plt. If we
3773 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
3774 permitted by the ABI, we might have to change this
3775 calculation. */
3776 relocation -= htab->elf.sgotplt->output_section->vma
3777 + htab->elf.sgotplt->output_offset;
3778 break;
3779
3780 case R_X86_64_GOTPC32:
3781 case R_X86_64_GOTPC64:
3782 /* Use global offset table as symbol value. */
3783 relocation = htab->elf.sgotplt->output_section->vma
3784 + htab->elf.sgotplt->output_offset;
3785 unresolved_reloc = false;
3786 break;
3787
3788 case R_X86_64_PLTOFF64:
3789 /* Relocation is PLT entry relative to GOT. For local
3790 symbols it's the symbol itself relative to GOT. */
3791 if (h != NULL
3792 /* See PLT32 handling. */
3793 && (h->plt.offset != (bfd_vma) -1
3794 || eh->plt_got.offset != (bfd_vma) -1)
3795 && htab->elf.splt != NULL)
3796 {
3797 if (eh->plt_got.offset != (bfd_vma) -1)
3798 {
3799 /* Use the GOT PLT. */
3800 resolved_plt = htab->plt_got;
3801 plt_offset = eh->plt_got.offset;
3802 }
3803 else if (htab->plt_second != NULL)
3804 {
3805 resolved_plt = htab->plt_second;
3806 plt_offset = eh->plt_second.offset;
3807 }
3808 else
3809 {
3810 resolved_plt = htab->elf.splt;
3811 plt_offset = h->plt.offset;
3812 }
3813
3814 relocation = (resolved_plt->output_section->vma
3815 + resolved_plt->output_offset
3816 + plt_offset);
3817 unresolved_reloc = false;
3818 }
3819
3820 relocation -= htab->elf.sgotplt->output_section->vma
3821 + htab->elf.sgotplt->output_offset;
3822 break;
3823
3824 case R_X86_64_PLT32:
3825 /* Relocation is to the entry for this symbol in the
3826 procedure linkage table. */
3827
3828 /* Resolve a PLT32 reloc against a local symbol directly,
3829 without using the procedure linkage table. */
3830 if (h == NULL)
3831 break;
3832
3833 if ((h->plt.offset == (bfd_vma) -1
3834 && eh->plt_got.offset == (bfd_vma) -1)
3835 || htab->elf.splt == NULL)
3836 {
3837 /* We didn't make a PLT entry for this symbol. This
3838 happens when statically linking PIC code, or when
3839 using -Bsymbolic. */
3840 break;
3841 }
3842
3843 use_plt:
3844 if (h->plt.offset != (bfd_vma) -1)
3845 {
3846 if (htab->plt_second != NULL)
3847 {
3848 resolved_plt = htab->plt_second;
3849 plt_offset = eh->plt_second.offset;
3850 }
3851 else
3852 {
3853 resolved_plt = htab->elf.splt;
3854 plt_offset = h->plt.offset;
3855 }
3856 }
3857 else
3858 {
3859 /* Use the GOT PLT. */
3860 resolved_plt = htab->plt_got;
3861 plt_offset = eh->plt_got.offset;
3862 }
3863
3864 relocation = (resolved_plt->output_section->vma
3865 + resolved_plt->output_offset
3866 + plt_offset);
3867 unresolved_reloc = false;
3868 break;
3869
3870 case R_X86_64_SIZE32:
3871 case R_X86_64_SIZE64:
3872 /* Set to symbol size. */
3873 relocation = st_size;
3874 goto direct;
3875
3876 case R_X86_64_PC8:
3877 case R_X86_64_PC16:
3878 case R_X86_64_PC32:
3879 /* Don't complain about -fPIC if the symbol is undefined when
3880 building executable unless it is unresolved weak symbol,
3881 references a dynamic definition in PIE or -z nocopyreloc
3882 is used. */
3883 no_copyreloc_p
3884 = (info->nocopyreloc
3885 || (h != NULL
3886 && !h->root.linker_def
3887 && !h->root.ldscript_def
3888 && eh->def_protected));
3889
3890 if ((input_section->flags & SEC_ALLOC) != 0
3891 && (input_section->flags & SEC_READONLY) != 0
3892 && h != NULL
3893 && ((bfd_link_executable (info)
3894 && ((h->root.type == bfd_link_hash_undefweak
3895 && (eh == NULL
3896 || !UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
3897 eh)))
3898 || (bfd_link_pie (info)
3899 && !SYMBOL_DEFINED_NON_SHARED_P (h)
3900 && h->def_dynamic)
3901 || (no_copyreloc_p
3902 && h->def_dynamic
3903 && !(h->root.u.def.section->flags & SEC_CODE))))
3904 || (bfd_link_pie (info)
3905 && h->root.type == bfd_link_hash_undefweak)
3906 || bfd_link_dll (info)))
3907 {
3908 bool fail = false;
3909 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3910 {
3911 /* Symbol is referenced locally. Make sure it is
3912 defined locally. */
3913 fail = !SYMBOL_DEFINED_NON_SHARED_P (h);
3914 }
3915 else if (bfd_link_pie (info))
3916 {
3917 /* We can only use PC-relative relocations in PIE
3918 from non-code sections. */
3919 if (h->root.type == bfd_link_hash_undefweak
3920 || (h->type == STT_FUNC
3921 && (sec->flags & SEC_CODE) != 0))
3922 fail = true;
3923 }
3924 else if (no_copyreloc_p || bfd_link_dll (info))
3925 {
3926 /* Symbol doesn't need copy reloc and isn't
3927 referenced locally. Don't allow PC-relative
3928 relocations against default and protected
3929 symbols since address of protected function
3930 and location of protected data may not be in
3931 the shared object. */
3932 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3933 || ELF_ST_VISIBILITY (h->other) == STV_PROTECTED);
3934 }
3935
3936 if (fail)
3937 return elf_x86_64_need_pic (info, input_bfd, input_section,
3938 h, NULL, NULL, howto);
3939 }
3940 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
3941 as function address. */
3942 else if (h != NULL
3943 && (input_section->flags & SEC_CODE) == 0
3944 && bfd_link_pie (info)
3945 && h->type == STT_FUNC
3946 && !h->def_regular
3947 && h->def_dynamic)
3948 goto use_plt;
3949 /* Fall through. */
3950
3951 case R_X86_64_8:
3952 case R_X86_64_16:
3953 case R_X86_64_32:
3954 case R_X86_64_PC64:
3955 case R_X86_64_64:
3956 /* FIXME: The ABI says the linker should make sure the value is
3957 the same when it's zeroextended to 64 bit. */
3958
3959 direct:
3960 if ((input_section->flags & SEC_ALLOC) == 0)
3961 break;
3962
3963 need_copy_reloc_in_pie = (bfd_link_pie (info)
3964 && h != NULL
3965 && (h->needs_copy
3966 || eh->needs_copy
3967 || (h->root.type
3968 == bfd_link_hash_undefined))
3969 && (X86_PCREL_TYPE_P (true, r_type)
3970 || X86_SIZE_TYPE_P (true,
3971 r_type)));
3972
3973 if (GENERATE_DYNAMIC_RELOCATION_P (true, info, eh, r_type, sec,
3974 need_copy_reloc_in_pie,
3975 resolved_to_zero, false))
3976 {
3977 Elf_Internal_Rela outrel;
3978 bool skip, relocate;
3979 bool generate_dynamic_reloc = true;
3980 asection *sreloc;
3981 const char *relative_reloc_name = NULL;
3982
3983 /* When generating a shared object, these relocations
3984 are copied into the output file to be resolved at run
3985 time. */
3986 skip = false;
3987 relocate = false;
3988
3989 outrel.r_offset =
3990 _bfd_elf_section_offset (output_bfd, info, input_section,
3991 rel->r_offset);
3992 if (outrel.r_offset == (bfd_vma) -1)
3993 skip = true;
3994 else if (outrel.r_offset == (bfd_vma) -2)
3995 skip = true, relocate = true;
3996
3997 outrel.r_offset += (input_section->output_section->vma
3998 + input_section->output_offset);
3999
4000 if (skip)
4001 memset (&outrel, 0, sizeof outrel);
4002
4003 else if (COPY_INPUT_RELOC_P (true, info, h, r_type))
4004 {
4005 outrel.r_info = htab->r_info (h->dynindx, r_type);
4006 outrel.r_addend = rel->r_addend;
4007 }
4008 else
4009 {
4010 /* This symbol is local, or marked to become local.
4011 When relocation overflow check is disabled, we
4012 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
4013 if (r_type == htab->pointer_r_type
4014 || (r_type == R_X86_64_32
4015 && htab->params->no_reloc_overflow_check))
4016 {
4017 relocate = true;
4018 /* NB: Don't generate relative relocation here if
4019 it has been generated by DT_RELR. */
4020 if (info->enable_dt_relr)
4021 generate_dynamic_reloc = false;
4022 else
4023 {
4024 outrel.r_info =
4025 htab->r_info (0, R_X86_64_RELATIVE);
4026 outrel.r_addend = relocation + rel->r_addend;
4027 relative_reloc_name = "R_X86_64_RELATIVE";
4028 }
4029 }
4030 else if (r_type == R_X86_64_64
4031 && !ABI_64_P (output_bfd))
4032 {
4033 relocate = true;
4034 outrel.r_info = htab->r_info (0,
4035 R_X86_64_RELATIVE64);
4036 outrel.r_addend = relocation + rel->r_addend;
4037 relative_reloc_name = "R_X86_64_RELATIVE64";
4038 /* Check addend overflow. */
4039 if ((outrel.r_addend & 0x80000000)
4040 != (rel->r_addend & 0x80000000))
4041 {
4042 const char *name;
4043 int addend = rel->r_addend;
4044 if (h && h->root.root.string)
4045 name = h->root.root.string;
4046 else
4047 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4048 sym, NULL);
4049 _bfd_error_handler
4050 /* xgettext:c-format */
4051 (_("%pB: addend %s%#x in relocation %s against "
4052 "symbol `%s' at %#" PRIx64
4053 " in section `%pA' is out of range"),
4054 input_bfd, addend < 0 ? "-" : "", addend,
4055 howto->name, name, (uint64_t) rel->r_offset,
4056 input_section);
4057 bfd_set_error (bfd_error_bad_value);
4058 return false;
4059 }
4060 }
4061 else
4062 {
4063 long sindx;
4064
4065 if (bfd_is_abs_section (sec))
4066 sindx = 0;
4067 else if (sec == NULL || sec->owner == NULL)
4068 {
4069 bfd_set_error (bfd_error_bad_value);
4070 return false;
4071 }
4072 else
4073 {
4074 asection *osec;
4075
4076 /* We are turning this relocation into one
4077 against a section symbol. It would be
4078 proper to subtract the symbol's value,
4079 osec->vma, from the emitted reloc addend,
4080 but ld.so expects buggy relocs. */
4081 osec = sec->output_section;
4082 sindx = elf_section_data (osec)->dynindx;
4083 if (sindx == 0)
4084 {
4085 asection *oi = htab->elf.text_index_section;
4086 sindx = elf_section_data (oi)->dynindx;
4087 }
4088 BFD_ASSERT (sindx != 0);
4089 }
4090
4091 outrel.r_info = htab->r_info (sindx, r_type);
4092 outrel.r_addend = relocation + rel->r_addend;
4093 }
4094 }
4095
4096 if (generate_dynamic_reloc)
4097 {
4098 sreloc = elf_section_data (input_section)->sreloc;
4099
4100 if (sreloc == NULL || sreloc->contents == NULL)
4101 {
4102 r = bfd_reloc_notsupported;
4103 goto check_relocation_error;
4104 }
4105
4106 if (relative_reloc_name
4107 && htab->params->report_relative_reloc)
4108 _bfd_x86_elf_link_report_relative_reloc
4109 (info, input_section, h, sym,
4110 relative_reloc_name, &outrel);
4111
4112 elf_append_rela (output_bfd, sreloc, &outrel);
4113 }
4114
4115 /* If this reloc is against an external symbol, we do
4116 not want to fiddle with the addend. Otherwise, we
4117 need to include the symbol value so that it becomes
4118 an addend for the dynamic reloc. */
4119 if (! relocate)
4120 continue;
4121 }
4122
4123 break;
4124
4125 case R_X86_64_TLSGD:
4126 case R_X86_64_GOTPC32_TLSDESC:
4127 case R_X86_64_CODE_4_GOTPC32_TLSDESC:
4128 case R_X86_64_TLSDESC_CALL:
4129 case R_X86_64_GOTTPOFF:
4130 case R_X86_64_CODE_4_GOTTPOFF:
4131 case R_X86_64_CODE_5_GOTTPOFF:
4132 case R_X86_64_CODE_6_GOTTPOFF:
4133 tls_type = GOT_UNKNOWN;
4134 if (h == NULL && local_got_offsets)
4135 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
4136 else if (h != NULL)
4137 tls_type = elf_x86_hash_entry (h)->tls_type;
4138
4139 r_type_tls = r_type;
4140 if (! elf_x86_64_tls_transition (info, input_bfd,
4141 input_section, contents,
4142 symtab_hdr, sym_hashes,
4143 &r_type_tls, tls_type, rel,
4144 relend, h, sym, true))
4145 return false;
4146
4147 if (r_type_tls == R_X86_64_TPOFF32)
4148 {
4149 bfd_vma roff = rel->r_offset;
4150
4151 if (roff >= input_section->size)
4152 goto corrupt_input;
4153
4154 BFD_ASSERT (! unresolved_reloc);
4155
4156 if (r_type == R_X86_64_TLSGD)
4157 {
4158 /* GD->LE transition. For 64bit, change
4159 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4160 .word 0x6666; rex64; call __tls_get_addr@PLT
4161 or
4162 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4163 .byte 0x66; rex64
4164 call *__tls_get_addr@GOTPCREL(%rip)
4165 which may be converted to
4166 addr32 call __tls_get_addr
4167 into:
4168 movq %fs:0, %rax
4169 leaq foo@tpoff(%rax), %rax
4170 For 32bit, change
4171 leaq foo@tlsgd(%rip), %rdi
4172 .word 0x6666; rex64; call __tls_get_addr@PLT
4173 or
4174 leaq foo@tlsgd(%rip), %rdi
4175 .byte 0x66; rex64
4176 call *__tls_get_addr@GOTPCREL(%rip)
4177 which may be converted to
4178 addr32 call __tls_get_addr
4179 into:
4180 movl %fs:0, %eax
4181 leaq foo@tpoff(%rax), %rax
4182 For largepic, change:
4183 leaq foo@tlsgd(%rip), %rdi
4184 movabsq $__tls_get_addr@pltoff, %rax
4185 addq %r15, %rax
4186 call *%rax
4187 into:
4188 movq %fs:0, %rax
4189 leaq foo@tpoff(%rax), %rax
4190 nopw 0x0(%rax,%rax,1) */
4191 int largepic = 0;
4192 if (ABI_64_P (output_bfd))
4193 {
4194 if (roff + 5 >= input_section->size)
4195 goto corrupt_input;
4196 if (contents[roff + 5] == 0xb8)
4197 {
4198 if (roff < 3
4199 || (roff - 3 + 22) > input_section->size)
4200 {
4201 corrupt_input:
4202 info->callbacks->fatal
4203 (_("%P: corrupt input: %pB\n"),
4204 input_bfd);
4205 return false;
4206 }
4207 memcpy (contents + roff - 3,
4208 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
4209 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4210 largepic = 1;
4211 }
4212 else
4213 {
4214 if (roff < 4
4215 || (roff - 4 + 16) > input_section->size)
4216 goto corrupt_input;
4217 memcpy (contents + roff - 4,
4218 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4219 16);
4220 }
4221 }
4222 else
4223 {
4224 if (roff < 3
4225 || (roff - 3 + 15) > input_section->size)
4226 goto corrupt_input;
4227 memcpy (contents + roff - 3,
4228 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4229 15);
4230 }
4231
4232 if (roff + 8 + largepic >= input_section->size)
4233 goto corrupt_input;
4234
4235 bfd_put_32 (output_bfd,
4236 elf_x86_64_tpoff (info, relocation),
4237 contents + roff + 8 + largepic);
4238 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
4239 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
4240 rel++;
4241 wrel++;
4242 continue;
4243 }
4244 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
4245 {
4246 /* GDesc -> LE transition.
4247 It's originally something like:
4248 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
4249 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
4250
4251 Change it to:
4252 movq $x@tpoff, %rax <--- LP64 mode.
4253 rex movl $x@tpoff, %eax <--- X32 mode.
4254 */
4255
4256 unsigned int val, type;
4257
4258 if (roff < 3)
4259 goto corrupt_input;
4260 type = bfd_get_8 (input_bfd, contents + roff - 3);
4261 val = bfd_get_8 (input_bfd, contents + roff - 1);
4262 bfd_put_8 (output_bfd,
4263 (type & 0x48) | ((type >> 2) & 1),
4264 contents + roff - 3);
4265 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
4266 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
4267 contents + roff - 1);
4268 bfd_put_32 (output_bfd,
4269 elf_x86_64_tpoff (info, relocation),
4270 contents + roff);
4271 continue;
4272 }
4273 else if (r_type == R_X86_64_CODE_4_GOTPC32_TLSDESC)
4274 {
4275 /* GDesc -> LE transition.
4276 It's originally something like:
4277 lea x@tlsdesc(%rip), %reg
4278
4279 Change it to:
4280 mov $x@tpoff, %reg
4281 where reg is one of r16 to r31. */
4282
4283 unsigned int val, rex2;
4284 unsigned int rex2_mask = REX_R | REX_R << 4;
4285
4286 if (roff < 4)
4287 goto corrupt_input;
4288 rex2 = bfd_get_8 (input_bfd, contents + roff - 3);
4289 val = bfd_get_8 (input_bfd, contents + roff - 1);
4290 /* Move the R bits to the B bits in REX2 payload
4291 byte. */
4292 bfd_put_8 (output_bfd,
4293 ((rex2 & ~rex2_mask)
4294 | (rex2 & rex2_mask) >> 2),
4295 contents + roff - 3);
4296 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
4297 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
4298 contents + roff - 1);
4299 bfd_put_32 (output_bfd,
4300 elf_x86_64_tpoff (info, relocation),
4301 contents + roff);
4302 continue;
4303 }
4304 else if (r_type == R_X86_64_TLSDESC_CALL)
4305 {
4306 /* GDesc -> LE transition.
4307 It's originally:
4308 call *(%rax) <--- LP64 mode.
4309 call *(%eax) <--- X32 mode.
4310 Turn it into:
4311 xchg %ax,%ax <-- LP64 mode.
4312 nopl (%rax) <-- X32 mode.
4313 */
4314 unsigned int prefix = 0;
4315 if (!ABI_64_P (input_bfd))
4316 {
4317 /* Check for call *x@tlscall(%eax). */
4318 if (contents[roff] == 0x67)
4319 prefix = 1;
4320 }
4321 if (prefix)
4322 {
4323 if (roff + 2 >= input_section->size)
4324 goto corrupt_input;
4325
4326 bfd_put_8 (output_bfd, 0x0f, contents + roff);
4327 bfd_put_8 (output_bfd, 0x1f, contents + roff + 1);
4328 bfd_put_8 (output_bfd, 0x00, contents + roff + 2);
4329 }
4330 else
4331 {
4332 if (roff + 1 >= input_section->size)
4333 goto corrupt_input;
4334
4335 bfd_put_8 (output_bfd, 0x66, contents + roff);
4336 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4337 }
4338 continue;
4339 }
4340 else if (r_type == R_X86_64_GOTTPOFF)
4341 {
4342 /* IE->LE transition:
4343 For 64bit, originally it can be one of:
4344 movq foo@gottpoff(%rip), %reg
4345 addq foo@gottpoff(%rip), %reg
4346 We change it into:
4347 movq $foo, %reg
4348 leaq foo(%reg), %reg
4349 addq $foo, %reg.
4350 For 32bit, originally it can be one of:
4351 movq foo@gottpoff(%rip), %reg
4352 addl foo@gottpoff(%rip), %reg
4353 We change it into:
4354 movq $foo, %reg
4355 leal foo(%reg), %reg
4356 addl $foo, %reg. */
4357
4358 unsigned int val, type, reg;
4359
4360 if (roff >= 3)
4361 val = bfd_get_8 (input_bfd, contents + roff - 3);
4362 else
4363 {
4364 if (roff < 2)
4365 goto corrupt_input;
4366 val = 0;
4367 }
4368 type = bfd_get_8 (input_bfd, contents + roff - 2);
4369 reg = bfd_get_8 (input_bfd, contents + roff - 1);
4370 reg >>= 3;
4371 if (type == 0x8b)
4372 {
4373 /* movq */
4374 if (val == 0x4c)
4375 {
4376 if (roff < 3)
4377 goto corrupt_input;
4378 bfd_put_8 (output_bfd, 0x49,
4379 contents + roff - 3);
4380 }
4381 else if (!ABI_64_P (output_bfd) && val == 0x44)
4382 {
4383 if (roff < 3)
4384 goto corrupt_input;
4385 bfd_put_8 (output_bfd, 0x41,
4386 contents + roff - 3);
4387 }
4388 bfd_put_8 (output_bfd, 0xc7,
4389 contents + roff - 2);
4390 bfd_put_8 (output_bfd, 0xc0 | reg,
4391 contents + roff - 1);
4392 }
4393 else if (reg == 4)
4394 {
4395 /* addq/addl -> addq/addl - addressing with %rsp/%r12
4396 is special */
4397 if (val == 0x4c)
4398 {
4399 if (roff < 3)
4400 goto corrupt_input;
4401 bfd_put_8 (output_bfd, 0x49,
4402 contents + roff - 3);
4403 }
4404 else if (!ABI_64_P (output_bfd) && val == 0x44)
4405 {
4406 if (roff < 3)
4407 goto corrupt_input;
4408 bfd_put_8 (output_bfd, 0x41,
4409 contents + roff - 3);
4410 }
4411 bfd_put_8 (output_bfd, 0x81,
4412 contents + roff - 2);
4413 bfd_put_8 (output_bfd, 0xc0 | reg,
4414 contents + roff - 1);
4415 }
4416 else
4417 {
4418 /* addq/addl -> leaq/leal */
4419 if (val == 0x4c)
4420 {
4421 if (roff < 3)
4422 goto corrupt_input;
4423 bfd_put_8 (output_bfd, 0x4d,
4424 contents + roff - 3);
4425 }
4426 else if (!ABI_64_P (output_bfd) && val == 0x44)
4427 {
4428 if (roff < 3)
4429 goto corrupt_input;
4430 bfd_put_8 (output_bfd, 0x45,
4431 contents + roff - 3);
4432 }
4433 bfd_put_8 (output_bfd, 0x8d,
4434 contents + roff - 2);
4435 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
4436 contents + roff - 1);
4437 }
4438 bfd_put_32 (output_bfd,
4439 elf_x86_64_tpoff (info, relocation),
4440 contents + roff);
4441 continue;
4442 }
4443 else if (r_type == R_X86_64_CODE_4_GOTTPOFF)
4444 {
4445 /* IE->LE transition:
4446 Originally it can be one of:
4447 mov foo@gottpoff(%rip), %reg
4448 add foo@gottpoff(%rip), %reg
4449 movrs foo@gottpoff(%rip), %reg
4450 We change it into:
4451 mov $foo@tpoff, %reg
4452 add $foo@tpoff, %reg
4453 where reg is one of r16 to r31, except for MOVRS, where
4454 it's not one of r8 to r31 and no REX byte is present. */
4455
4456 unsigned int rex2, type, reg;
4457 unsigned int rex2_mask = REX_R | REX_R << 4;
4458
4459 if (roff < 4)
4460 goto corrupt_input;
4461
4462 rex2 = bfd_get_8 (input_bfd, contents + roff - 3);
4463 type = bfd_get_8 (input_bfd, contents + roff - 2);
4464 reg = bfd_get_8 (input_bfd, contents + roff - 1);
4465 reg >>= 3;
4466 /* Move the R bits to the B bits in REX2 payload
4467 byte. */
4468 if (type == 0x8b)
4469 {
4470 /* For MOVRS emit meaningless CS prefixes. */
4471 if (bfd_get_8 (input_bfd, contents + roff - 4) == 0x0f)
4472 {
4473 bfd_put_8 (output_bfd, 0x2e, contents + roff - 4);
4474 rex2 = 0x2e;
4475 rex2_mask = 0;
4476 }
4477 type = 0xc7;
4478 }
4479 else
4480 type = 0x81;
4481 bfd_put_8 (output_bfd,
4482 ((rex2 & ~rex2_mask)
4483 | (rex2 & rex2_mask) >> 2),
4484 contents + roff - 3);
4485 bfd_put_8 (output_bfd, type,
4486 contents + roff - 2);
4487 bfd_put_8 (output_bfd, 0xc0 | reg,
4488 contents + roff - 1);
4489 bfd_put_32 (output_bfd,
4490 elf_x86_64_tpoff (info, relocation),
4491 contents + roff);
4492 continue;
4493 }
4494 else if (r_type == R_X86_64_CODE_5_GOTTPOFF)
4495 {
4496 /* IE->LE transition:
4497 Originally it is
4498 movrs foo@gottpoff(%rip), %reg
4499 We change it into:
4500 mov $foo@tpoff, %reg
4501 where reg isn't one of r16 to r31, but a REX
4502 byte is present. */
4503 unsigned int rex = bfd_get_8 (input_bfd, contents + roff - 5);
4504
4505 /* Move REX.R to REX.B. */
4506 rex = (rex & ~(REX_R | REX_B))
4507 | ((rex & REX_R) / (REX_R / REX_B));
4508
4509 unsigned int reg = bfd_get_8 (input_bfd, contents + roff - 1);
4510 reg >>= 3;
4511
4512 /* Replace 0f38 by meaningless CS prefixes, shifting the REX
4513 prefix forward. */
4514 bfd_put_8 (output_bfd, 0x2e, contents + roff - 5);
4515 bfd_put_8 (output_bfd, 0x2e, contents + roff - 4);
4516 bfd_put_8 (output_bfd, rex, contents + roff - 3);
4517 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
4518 bfd_put_8 (output_bfd, 0xc0 | reg, contents + roff - 1);
4519
4520 bfd_put_32 (output_bfd,
4521 elf_x86_64_tpoff (info, relocation),
4522 contents + roff);
4523 continue;
4524 }
4525 else if (r_type == R_X86_64_CODE_6_GOTTPOFF)
4526 {
4527 /* IE->LE transition:
4528 Originally it is
4529 add %reg1, foo@gottpoff(%rip), %reg2
4530 or
4531 add foo@gottpoff(%rip), %reg1, %reg2
4532 or
4533 movrs foo@gottpoff(%rip), %reg
4534 We change it into:
4535 add $foo@tpoff, %reg1, %reg2
4536 mov $foo@tpoff, %reg
4537 where reg is one of r16 to r31. */
4538 unsigned int type, reg, byte1;
4539
4540 if (roff < 6)
4541 goto corrupt_input;
4542
4543 byte1 = bfd_get_8 (input_bfd, contents + roff - 5);
4544 type = bfd_get_8 (input_bfd, contents + roff - 2);
4545 reg = bfd_get_8 (input_bfd, contents + roff - 1);
4546 reg >>= 3;
4547
4548 if (type == 0x8b)
4549 {
4550 /* Convert MOVRS to REX2-encoded MOV. */
4551 unsigned int rex2 = 0;
4552
4553 /* Move the EVEX R bits to the REX2 B ones. */
4554 if (!(byte1 & (1 << 7)))
4555 rex2 |= REX_B;
4556 if (!(byte1 & (1 << 4)))
4557 rex2 |= REX_B << 4;
4558 /* Propagate the EVEX W bit to the REX2 one. */
4559 type = bfd_get_8 (input_bfd, contents + roff - 4);
4560 if (type & (1 << 7))
4561 rex2 |= REX_W;
4562
4563
4564 bfd_put_8 (output_bfd, 0x2e, contents + roff - 6);
4565 bfd_put_8 (output_bfd, 0x2e, contents + roff - 5);
4566 bfd_put_8 (output_bfd, 0xd5, contents + roff - 4);
4567 bfd_put_8 (output_bfd, rex2, contents + roff - 3);
4568 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
4569 bfd_put_8 (output_bfd, 0xc0 | reg, contents + roff - 1);
4570 bfd_put_32 (output_bfd,
4571 elf_x86_64_tpoff (info, relocation),
4572 contents + roff);
4573 continue;
4574 }
4575
4576 byte1 = evex_move_r_to_b (byte1, false);
4577 bfd_put_8 (output_bfd, byte1, contents + roff - 5);
4578 bfd_put_8 (output_bfd, 0x81, contents + roff - 2);
4579 bfd_put_8 (output_bfd, 0xc0 | reg, contents + roff - 1);
4580 bfd_put_32 (output_bfd,
4581 elf_x86_64_tpoff (info, relocation),
4582 contents + roff);
4583 continue;
4584 }
4585 else
4586 BFD_ASSERT (false);
4587 }
4588
4589 if (htab->elf.sgot == NULL)
4590 abort ();
4591
4592 if (h != NULL)
4593 {
4594 off = h->got.offset;
4595 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
4596 }
4597 else
4598 {
4599 if (local_got_offsets == NULL)
4600 abort ();
4601
4602 off = local_got_offsets[r_symndx];
4603 offplt = local_tlsdesc_gotents[r_symndx];
4604 }
4605
4606 if ((off & 1) != 0)
4607 off &= ~1;
4608 else
4609 {
4610 Elf_Internal_Rela outrel;
4611 int dr_type, indx;
4612 asection *sreloc;
4613
4614 if (htab->elf.srelgot == NULL)
4615 abort ();
4616
4617 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4618
4619 if (GOT_TLS_GDESC_P (tls_type))
4620 {
4621 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
4622 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
4623 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
4624 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
4625 + htab->elf.sgotplt->output_offset
4626 + offplt
4627 + htab->sgotplt_jump_table_size);
4628 sreloc = htab->elf.srelplt;
4629 if (indx == 0)
4630 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
4631 else
4632 outrel.r_addend = 0;
4633 elf_append_rela (output_bfd, sreloc, &outrel);
4634 }
4635
4636 sreloc = htab->elf.srelgot;
4637
4638 outrel.r_offset = (htab->elf.sgot->output_section->vma
4639 + htab->elf.sgot->output_offset + off);
4640
4641 if (GOT_TLS_GD_P (tls_type))
4642 dr_type = R_X86_64_DTPMOD64;
4643 else if (GOT_TLS_GDESC_P (tls_type))
4644 goto dr_done;
4645 else
4646 dr_type = R_X86_64_TPOFF64;
4647
4648 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
4649 outrel.r_addend = 0;
4650 if ((dr_type == R_X86_64_TPOFF64
4651 || dr_type == R_X86_64_TLSDESC) && indx == 0)
4652 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
4653 outrel.r_info = htab->r_info (indx, dr_type);
4654
4655 elf_append_rela (output_bfd, sreloc, &outrel);
4656
4657 if (GOT_TLS_GD_P (tls_type))
4658 {
4659 if (indx == 0)
4660 {
4661 BFD_ASSERT (! unresolved_reloc);
4662 bfd_put_64 (output_bfd,
4663 relocation - _bfd_x86_elf_dtpoff_base (info),
4664 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4665 }
4666 else
4667 {
4668 bfd_put_64 (output_bfd, 0,
4669 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4670 outrel.r_info = htab->r_info (indx,
4671 R_X86_64_DTPOFF64);
4672 outrel.r_offset += GOT_ENTRY_SIZE;
4673 elf_append_rela (output_bfd, sreloc,
4674 &outrel);
4675 }
4676 }
4677
4678 dr_done:
4679 if (h != NULL)
4680 h->got.offset |= 1;
4681 else
4682 local_got_offsets[r_symndx] |= 1;
4683 }
4684
4685 if (off >= (bfd_vma) -2
4686 && ! GOT_TLS_GDESC_P (tls_type))
4687 abort ();
4688 if (r_type_tls == r_type)
4689 {
4690 if (r_type == R_X86_64_GOTPC32_TLSDESC
4691 || r_type == R_X86_64_CODE_4_GOTPC32_TLSDESC
4692 || r_type == R_X86_64_TLSDESC_CALL)
4693 relocation = htab->elf.sgotplt->output_section->vma
4694 + htab->elf.sgotplt->output_offset
4695 + offplt + htab->sgotplt_jump_table_size;
4696 else
4697 relocation = htab->elf.sgot->output_section->vma
4698 + htab->elf.sgot->output_offset + off;
4699 unresolved_reloc = false;
4700 }
4701 else
4702 {
4703 bfd_vma roff = rel->r_offset;
4704
4705 if (r_type == R_X86_64_TLSGD)
4706 {
4707 /* GD->IE transition. For 64bit, change
4708 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4709 .word 0x6666; rex64; call __tls_get_addr@PLT
4710 or
4711 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4712 .byte 0x66; rex64
4713 call *__tls_get_addr@GOTPCREL(%rip
4714 which may be converted to
4715 addr32 call __tls_get_addr
4716 into:
4717 movq %fs:0, %rax
4718 addq foo@gottpoff(%rip), %rax
4719 For 32bit, change
4720 leaq foo@tlsgd(%rip), %rdi
4721 .word 0x6666; rex64; call __tls_get_addr@PLT
4722 or
4723 leaq foo@tlsgd(%rip), %rdi
4724 .byte 0x66; rex64;
4725 call *__tls_get_addr@GOTPCREL(%rip)
4726 which may be converted to
4727 addr32 call __tls_get_addr
4728 into:
4729 movl %fs:0, %eax
4730 addq foo@gottpoff(%rip), %rax
4731 For largepic, change:
4732 leaq foo@tlsgd(%rip), %rdi
4733 movabsq $__tls_get_addr@pltoff, %rax
4734 addq %r15, %rax
4735 call *%rax
4736 into:
4737 movq %fs:0, %rax
4738 addq foo@gottpoff(%rax), %rax
4739 nopw 0x0(%rax,%rax,1) */
4740 int largepic = 0;
4741 if (ABI_64_P (output_bfd))
4742 {
4743 if (contents[roff + 5] == 0xb8)
4744 {
4745 if (roff < 3
4746 || (roff - 3 + 22) > input_section->size)
4747 goto corrupt_input;
4748 memcpy (contents + roff - 3,
4749 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
4750 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4751 largepic = 1;
4752 }
4753 else
4754 {
4755 if (roff < 4
4756 || (roff - 4 + 16) > input_section->size)
4757 goto corrupt_input;
4758 memcpy (contents + roff - 4,
4759 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4760 16);
4761 }
4762 }
4763 else
4764 {
4765 if (roff < 3
4766 || (roff - 3 + 15) > input_section->size)
4767 goto corrupt_input;
4768 memcpy (contents + roff - 3,
4769 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4770 15);
4771 }
4772
4773 relocation = (htab->elf.sgot->output_section->vma
4774 + htab->elf.sgot->output_offset + off
4775 - roff
4776 - largepic
4777 - input_section->output_section->vma
4778 - input_section->output_offset
4779 - 12);
4780 bfd_put_32 (output_bfd, relocation,
4781 contents + roff + 8 + largepic);
4782 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4783 rel++;
4784 wrel++;
4785 continue;
4786 }
4787 else if (r_type == R_X86_64_GOTPC32_TLSDESC
4788 || r_type == R_X86_64_CODE_4_GOTPC32_TLSDESC)
4789 {
4790 /* GDesc -> IE transition.
4791 It's originally something like:
4792 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
4793 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
4794
4795 Change it to:
4796 # before xchg %ax,%ax in LP64 mode.
4797 movq x@gottpoff(%rip), %rax
4798 # before nopl (%rax) in X32 mode.
4799 rex movl x@gottpoff(%rip), %eax
4800 */
4801
4802 /* Now modify the instruction as appropriate. To
4803 turn a lea into a mov in the form we use it, it
4804 suffices to change the second byte from 0x8d to
4805 0x8b. */
4806 if (roff < 2)
4807 goto corrupt_input;
4808 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
4809
4810 bfd_put_32 (output_bfd,
4811 htab->elf.sgot->output_section->vma
4812 + htab->elf.sgot->output_offset + off
4813 - rel->r_offset
4814 - input_section->output_section->vma
4815 - input_section->output_offset
4816 - 4,
4817 contents + roff);
4818 continue;
4819 }
4820 else if (r_type == R_X86_64_TLSDESC_CALL)
4821 {
4822 /* GDesc -> IE transition.
4823 It's originally:
4824 call *(%rax) <--- LP64 mode.
4825 call *(%eax) <--- X32 mode.
4826
4827 Change it to:
4828 xchg %ax, %ax <-- LP64 mode.
4829 nopl (%rax) <-- X32 mode.
4830 */
4831
4832 unsigned int prefix = 0;
4833 if (!ABI_64_P (input_bfd))
4834 {
4835 /* Check for call *x@tlscall(%eax). */
4836 if (contents[roff] == 0x67)
4837 prefix = 1;
4838 }
4839 if (prefix)
4840 {
4841 bfd_put_8 (output_bfd, 0x0f, contents + roff);
4842 bfd_put_8 (output_bfd, 0x1f, contents + roff + 1);
4843 bfd_put_8 (output_bfd, 0x00, contents + roff + 2);
4844 }
4845 else
4846 {
4847 bfd_put_8 (output_bfd, 0x66, contents + roff);
4848 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4849 }
4850 continue;
4851 }
4852 else
4853 BFD_ASSERT (false);
4854 }
4855 break;
4856
4857 case R_X86_64_TLSLD:
4858 if (! elf_x86_64_tls_transition (info, input_bfd,
4859 input_section, contents,
4860 symtab_hdr, sym_hashes,
4861 &r_type, GOT_UNKNOWN, rel,
4862 relend, h, sym, true))
4863 return false;
4864
4865 if (r_type != R_X86_64_TLSLD)
4866 {
4867 /* LD->LE transition:
4868 leaq foo@tlsld(%rip), %rdi
4869 call __tls_get_addr@PLT
4870 For 64bit, we change it into:
4871 .word 0x6666; .byte 0x66; movq %fs:0, %rax
4872 For 32bit, we change it into:
4873 nopl 0x0(%rax); movl %fs:0, %eax
4874 Or
4875 leaq foo@tlsld(%rip), %rdi;
4876 call *__tls_get_addr@GOTPCREL(%rip)
4877 which may be converted to
4878 addr32 call __tls_get_addr
4879 For 64bit, we change it into:
4880 .word 0x6666; .word 0x6666; movq %fs:0, %rax
4881 For 32bit, we change it into:
4882 nopw 0x0(%rax); movl %fs:0, %eax
4883 For largepic, change:
4884 leaq foo@tlsgd(%rip), %rdi
4885 movabsq $__tls_get_addr@pltoff, %rax
4886 addq %rbx, %rax
4887 call *%rax
4888 into
4889 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
4890 movq %fs:0, %eax */
4891
4892 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
4893 if (ABI_64_P (output_bfd))
4894 {
4895 if ((rel->r_offset + 5) >= input_section->size)
4896 goto corrupt_input;
4897 if (contents[rel->r_offset + 5] == 0xb8)
4898 {
4899 if (rel->r_offset < 3
4900 || (rel->r_offset - 3 + 22) > input_section->size)
4901 goto corrupt_input;
4902 memcpy (contents + rel->r_offset - 3,
4903 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
4904 "\x64\x48\x8b\x04\x25\0\0\0", 22);
4905 }
4906 else if (contents[rel->r_offset + 4] == 0xff
4907 || contents[rel->r_offset + 4] == 0x67)
4908 {
4909 if (rel->r_offset < 3
4910 || (rel->r_offset - 3 + 13) > input_section->size)
4911 goto corrupt_input;
4912 memcpy (contents + rel->r_offset - 3,
4913 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
4914 13);
4915
4916 }
4917 else
4918 {
4919 if (rel->r_offset < 3
4920 || (rel->r_offset - 3 + 12) > input_section->size)
4921 goto corrupt_input;
4922 memcpy (contents + rel->r_offset - 3,
4923 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
4924 }
4925 }
4926 else
4927 {
4928 if ((rel->r_offset + 4) >= input_section->size)
4929 goto corrupt_input;
4930 if (contents[rel->r_offset + 4] == 0xff)
4931 {
4932 if (rel->r_offset < 3
4933 || (rel->r_offset - 3 + 13) > input_section->size)
4934 goto corrupt_input;
4935 memcpy (contents + rel->r_offset - 3,
4936 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
4937 13);
4938 }
4939 else
4940 {
4941 if (rel->r_offset < 3
4942 || (rel->r_offset - 3 + 12) > input_section->size)
4943 goto corrupt_input;
4944 memcpy (contents + rel->r_offset - 3,
4945 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
4946 }
4947 }
4948 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
4949 and R_X86_64_PLTOFF64. */
4950 rel++;
4951 wrel++;
4952 continue;
4953 }
4954
4955 if (htab->elf.sgot == NULL)
4956 abort ();
4957
4958 off = htab->tls_ld_or_ldm_got.offset;
4959 if (off & 1)
4960 off &= ~1;
4961 else
4962 {
4963 Elf_Internal_Rela outrel;
4964
4965 if (htab->elf.srelgot == NULL)
4966 abort ();
4967
4968 outrel.r_offset = (htab->elf.sgot->output_section->vma
4969 + htab->elf.sgot->output_offset + off);
4970
4971 bfd_put_64 (output_bfd, 0,
4972 htab->elf.sgot->contents + off);
4973 bfd_put_64 (output_bfd, 0,
4974 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4975 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
4976 outrel.r_addend = 0;
4977 elf_append_rela (output_bfd, htab->elf.srelgot,
4978 &outrel);
4979 htab->tls_ld_or_ldm_got.offset |= 1;
4980 }
4981 relocation = htab->elf.sgot->output_section->vma
4982 + htab->elf.sgot->output_offset + off;
4983 unresolved_reloc = false;
4984 break;
4985
4986 case R_X86_64_DTPOFF32:
4987 if (!bfd_link_executable (info)
4988 || (input_section->flags & SEC_CODE) == 0)
4989 relocation -= _bfd_x86_elf_dtpoff_base (info);
4990 else
4991 relocation = elf_x86_64_tpoff (info, relocation);
4992 break;
4993
4994 case R_X86_64_TPOFF32:
4995 case R_X86_64_TPOFF64:
4996 BFD_ASSERT (bfd_link_executable (info));
4997 relocation = elf_x86_64_tpoff (info, relocation);
4998 break;
4999
5000 case R_X86_64_DTPOFF64:
5001 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
5002 relocation -= _bfd_x86_elf_dtpoff_base (info);
5003 break;
5004
5005 default:
5006 break;
5007 }
5008
5009 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5010 because such sections are not SEC_ALLOC and thus ld.so will
5011 not process them. */
5012 if (unresolved_reloc
5013 && !((input_section->flags & SEC_DEBUGGING) != 0
5014 && h->def_dynamic)
5015 && _bfd_elf_section_offset (output_bfd, info, input_section,
5016 rel->r_offset) != (bfd_vma) -1)
5017 {
5018 switch (r_type)
5019 {
5020 case R_X86_64_32S:
5021 sec = h->root.u.def.section;
5022 if ((info->nocopyreloc || eh->def_protected)
5023 && !(h->root.u.def.section->flags & SEC_CODE))
5024 return elf_x86_64_need_pic (info, input_bfd, input_section,
5025 h, NULL, NULL, howto);
5026 /* Fall through. */
5027
5028 default:
5029 _bfd_error_handler
5030 /* xgettext:c-format */
5031 (_("%pB(%pA+%#" PRIx64 "): "
5032 "unresolvable %s relocation against symbol `%s'"),
5033 input_bfd,
5034 input_section,
5035 (uint64_t) rel->r_offset,
5036 howto->name,
5037 h->root.root.string);
5038 return false;
5039 }
5040 }
5041
5042 do_relocation:
5043 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
5044 contents, rel->r_offset,
5045 relocation, rel->r_addend);
5046
5047 check_relocation_error:
5048 if (r != bfd_reloc_ok)
5049 {
5050 const char *name;
5051
5052 if (h != NULL)
5053 name = h->root.root.string;
5054 else
5055 {
5056 name = bfd_elf_string_from_elf_section (input_bfd,
5057 symtab_hdr->sh_link,
5058 sym->st_name);
5059 if (name == NULL)
5060 return false;
5061 if (*name == '\0')
5062 name = bfd_section_name (sec);
5063 }
5064
5065 if (r == bfd_reloc_overflow)
5066 {
5067 if (converted_reloc)
5068 {
5069 info->callbacks->einfo
5070 ("%X%H:", input_bfd, input_section, rel->r_offset);
5071 info->callbacks->einfo
5072 (_(" failed to convert GOTPCREL relocation against "
5073 "'%s'; relink with --no-relax\n"),
5074 name);
5075 status = false;
5076 continue;
5077 }
5078 (*info->callbacks->reloc_overflow)
5079 (info, (h ? &h->root : NULL), name, howto->name,
5080 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
5081 }
5082 else
5083 {
5084 _bfd_error_handler
5085 /* xgettext:c-format */
5086 (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"),
5087 input_bfd, input_section,
5088 (uint64_t) rel->r_offset, name, (int) r);
5089 return false;
5090 }
5091 }
5092
5093 if (wrel != rel)
5094 *wrel = *rel;
5095 }
5096
5097 if (wrel != rel)
5098 {
5099 Elf_Internal_Shdr *rel_hdr;
5100 size_t deleted = rel - wrel;
5101
5102 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
5103 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
5104 if (rel_hdr->sh_size == 0)
5105 {
5106 /* It is too late to remove an empty reloc section. Leave
5107 one NONE reloc.
5108 ??? What is wrong with an empty section??? */
5109 rel_hdr->sh_size = rel_hdr->sh_entsize;
5110 deleted -= 1;
5111 }
5112 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
5113 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
5114 input_section->reloc_count -= deleted;
5115 }
5116
5117 return status;
5118 }
5119
5120 /* Finish up dynamic symbol handling. We set the contents of various
5121 dynamic sections here. */
5122
5123 static bool
5124 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
5125 struct bfd_link_info *info,
5126 struct elf_link_hash_entry *h,
5127 Elf_Internal_Sym *sym)
5128 {
5129 struct elf_x86_link_hash_table *htab;
5130 bool use_plt_second;
5131 struct elf_x86_link_hash_entry *eh;
5132 bool local_undefweak;
5133
5134 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
5135
5136 /* Use the second PLT section only if there is .plt section. */
5137 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
5138
5139 eh = (struct elf_x86_link_hash_entry *) h;
5140 if (eh->no_finish_dynamic_symbol)
5141 abort ();
5142
5143 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
5144 resolved undefined weak symbols in executable so that their
5145 references have value 0 at run-time. */
5146 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
5147
5148 if (h->plt.offset != (bfd_vma) -1)
5149 {
5150 bfd_vma plt_index;
5151 bfd_vma got_offset, plt_offset;
5152 Elf_Internal_Rela rela;
5153 bfd_byte *loc;
5154 asection *plt, *gotplt, *relplt, *resolved_plt;
5155 const struct elf_backend_data *bed;
5156 bfd_vma plt_got_pcrel_offset;
5157
5158 /* When building a static executable, use .iplt, .igot.plt and
5159 .rela.iplt sections for STT_GNU_IFUNC symbols. */
5160 if (htab->elf.splt != NULL)
5161 {
5162 plt = htab->elf.splt;
5163 gotplt = htab->elf.sgotplt;
5164 relplt = htab->elf.srelplt;
5165 }
5166 else
5167 {
5168 plt = htab->elf.iplt;
5169 gotplt = htab->elf.igotplt;
5170 relplt = htab->elf.irelplt;
5171 }
5172
5173 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
5174
5175 /* Get the index in the procedure linkage table which
5176 corresponds to this symbol. This is the index of this symbol
5177 in all the symbols for which we are making plt entries. The
5178 first entry in the procedure linkage table is reserved.
5179
5180 Get the offset into the .got table of the entry that
5181 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
5182 bytes. The first three are reserved for the dynamic linker.
5183
5184 For static executables, we don't reserve anything. */
5185
5186 if (plt == htab->elf.splt)
5187 {
5188 got_offset = (h->plt.offset / htab->plt.plt_entry_size
5189 - htab->plt.has_plt0);
5190 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
5191 }
5192 else
5193 {
5194 got_offset = h->plt.offset / htab->plt.plt_entry_size;
5195 got_offset = got_offset * GOT_ENTRY_SIZE;
5196 }
5197
5198 /* Fill in the entry in the procedure linkage table. */
5199 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
5200 htab->plt.plt_entry_size);
5201 if (use_plt_second)
5202 {
5203 memcpy (htab->plt_second->contents + eh->plt_second.offset,
5204 htab->non_lazy_plt->plt_entry,
5205 htab->non_lazy_plt->plt_entry_size);
5206
5207 resolved_plt = htab->plt_second;
5208 plt_offset = eh->plt_second.offset;
5209 }
5210 else
5211 {
5212 resolved_plt = plt;
5213 plt_offset = h->plt.offset;
5214 }
5215
5216 /* Insert the relocation positions of the plt section. */
5217
5218 /* Put offset the PC-relative instruction referring to the GOT entry,
5219 subtracting the size of that instruction. */
5220 plt_got_pcrel_offset = (gotplt->output_section->vma
5221 + gotplt->output_offset
5222 + got_offset
5223 - resolved_plt->output_section->vma
5224 - resolved_plt->output_offset
5225 - plt_offset
5226 - htab->plt.plt_got_insn_size);
5227
5228 /* Check PC-relative offset overflow in PLT entry. */
5229 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
5230 /* xgettext:c-format */
5231 info->callbacks->fatal (_("%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
5232 output_bfd, h->root.root.string);
5233
5234 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
5235 (resolved_plt->contents + plt_offset
5236 + htab->plt.plt_got_offset));
5237
5238 /* Fill in the entry in the global offset table, initially this
5239 points to the second part of the PLT entry. Leave the entry
5240 as zero for undefined weak symbol in PIE. No PLT relocation
5241 against undefined weak symbol in PIE. */
5242 if (!local_undefweak)
5243 {
5244 if (htab->plt.has_plt0)
5245 bfd_put_64 (output_bfd, (plt->output_section->vma
5246 + plt->output_offset
5247 + h->plt.offset
5248 + htab->lazy_plt->plt_lazy_offset),
5249 gotplt->contents + got_offset);
5250
5251 /* Fill in the entry in the .rela.plt section. */
5252 rela.r_offset = (gotplt->output_section->vma
5253 + gotplt->output_offset
5254 + got_offset);
5255 if (PLT_LOCAL_IFUNC_P (info, h))
5256 {
5257 if (h->root.u.def.section == NULL)
5258 return false;
5259
5260 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
5261 h->root.root.string,
5262 h->root.u.def.section->owner);
5263
5264 /* If an STT_GNU_IFUNC symbol is locally defined, generate
5265 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
5266 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
5267 rela.r_addend = (h->root.u.def.value
5268 + h->root.u.def.section->output_section->vma
5269 + h->root.u.def.section->output_offset);
5270
5271 if (htab->params->report_relative_reloc)
5272 _bfd_x86_elf_link_report_relative_reloc
5273 (info, relplt, h, sym, "R_X86_64_IRELATIVE", &rela);
5274
5275 /* R_X86_64_IRELATIVE comes last. */
5276 plt_index = htab->next_irelative_index--;
5277 }
5278 else
5279 {
5280 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
5281 if (htab->params->mark_plt)
5282 rela.r_addend = (resolved_plt->output_section->vma
5283 + plt_offset
5284 + htab->plt.plt_indirect_branch_offset);
5285 else
5286 rela.r_addend = 0;
5287 plt_index = htab->next_jump_slot_index++;
5288 }
5289
5290 /* Don't fill the second and third slots in PLT entry for
5291 static executables nor without PLT0. */
5292 if (plt == htab->elf.splt && htab->plt.has_plt0)
5293 {
5294 bfd_vma plt0_offset
5295 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
5296
5297 /* Put relocation index. */
5298 bfd_put_32 (output_bfd, plt_index,
5299 (plt->contents + h->plt.offset
5300 + htab->lazy_plt->plt_reloc_offset));
5301
5302 /* Put offset for jmp .PLT0 and check for overflow. We don't
5303 check relocation index for overflow since branch displacement
5304 will overflow first. */
5305 if (plt0_offset > 0x80000000)
5306 /* xgettext:c-format */
5307 info->callbacks->fatal (_("%pB: branch displacement overflow in PLT entry for `%s'\n"),
5308 output_bfd, h->root.root.string);
5309 bfd_put_32 (output_bfd, - plt0_offset,
5310 (plt->contents + h->plt.offset
5311 + htab->lazy_plt->plt_plt_offset));
5312 }
5313
5314 bed = get_elf_backend_data (output_bfd);
5315 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
5316 bed->s->swap_reloca_out (output_bfd, &rela, loc);
5317 }
5318 }
5319 else if (eh->plt_got.offset != (bfd_vma) -1)
5320 {
5321 bfd_vma got_offset, plt_offset;
5322 asection *plt, *got;
5323 bool got_after_plt;
5324 int32_t got_pcrel_offset;
5325
5326 /* Set the entry in the GOT procedure linkage table. */
5327 plt = htab->plt_got;
5328 got = htab->elf.sgot;
5329 got_offset = h->got.offset;
5330
5331 if (got_offset == (bfd_vma) -1
5332 || (h->type == STT_GNU_IFUNC && h->def_regular)
5333 || plt == NULL
5334 || got == NULL)
5335 abort ();
5336
5337 /* Use the non-lazy PLT entry template for the GOT PLT since they
5338 are the identical. */
5339 /* Fill in the entry in the GOT procedure linkage table. */
5340 plt_offset = eh->plt_got.offset;
5341 memcpy (plt->contents + plt_offset,
5342 htab->non_lazy_plt->plt_entry,
5343 htab->non_lazy_plt->plt_entry_size);
5344
5345 /* Put offset the PC-relative instruction referring to the GOT
5346 entry, subtracting the size of that instruction. */
5347 got_pcrel_offset = (got->output_section->vma
5348 + got->output_offset
5349 + got_offset
5350 - plt->output_section->vma
5351 - plt->output_offset
5352 - plt_offset
5353 - htab->non_lazy_plt->plt_got_insn_size);
5354
5355 /* Check PC-relative offset overflow in GOT PLT entry. */
5356 got_after_plt = got->output_section->vma > plt->output_section->vma;
5357 if ((got_after_plt && got_pcrel_offset < 0)
5358 || (!got_after_plt && got_pcrel_offset > 0))
5359 /* xgettext:c-format */
5360 info->callbacks->fatal (_("%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
5361 output_bfd, h->root.root.string);
5362
5363 bfd_put_32 (output_bfd, got_pcrel_offset,
5364 (plt->contents + plt_offset
5365 + htab->non_lazy_plt->plt_got_offset));
5366 }
5367
5368 if (!local_undefweak
5369 && !h->def_regular
5370 && (h->plt.offset != (bfd_vma) -1
5371 || eh->plt_got.offset != (bfd_vma) -1))
5372 {
5373 /* Mark the symbol as undefined, rather than as defined in
5374 the .plt section. Leave the value if there were any
5375 relocations where pointer equality matters (this is a clue
5376 for the dynamic linker, to make function pointer
5377 comparisons work between an application and shared
5378 library), otherwise set it to zero. If a function is only
5379 called from a binary, there is no need to slow down
5380 shared libraries because of that. */
5381 sym->st_shndx = SHN_UNDEF;
5382 if (!h->pointer_equality_needed)
5383 sym->st_value = 0;
5384 }
5385
5386 _bfd_x86_elf_link_fixup_ifunc_symbol (info, htab, h, sym);
5387
5388 /* Don't generate dynamic GOT relocation against undefined weak
5389 symbol in executable. */
5390 if (h->got.offset != (bfd_vma) -1
5391 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
5392 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
5393 && !local_undefweak)
5394 {
5395 Elf_Internal_Rela rela;
5396 asection *relgot = htab->elf.srelgot;
5397 const char *relative_reloc_name = NULL;
5398 bool generate_dynamic_reloc = true;
5399
5400 /* This symbol has an entry in the global offset table. Set it
5401 up. */
5402 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
5403 abort ();
5404
5405 rela.r_offset = (htab->elf.sgot->output_section->vma
5406 + htab->elf.sgot->output_offset
5407 + (h->got.offset &~ (bfd_vma) 1));
5408
5409 /* If this is a static link, or it is a -Bsymbolic link and the
5410 symbol is defined locally or was forced to be local because
5411 of a version file, we just want to emit a RELATIVE reloc.
5412 The entry in the global offset table will already have been
5413 initialized in the relocate_section function. */
5414 if (h->def_regular
5415 && h->type == STT_GNU_IFUNC)
5416 {
5417 if (h->plt.offset == (bfd_vma) -1)
5418 {
5419 /* STT_GNU_IFUNC is referenced without PLT. */
5420 if (htab->elf.splt == NULL)
5421 {
5422 /* use .rel[a].iplt section to store .got relocations
5423 in static executable. */
5424 relgot = htab->elf.irelplt;
5425 }
5426 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
5427 {
5428 if (h->root.u.def.section == NULL)
5429 return false;
5430
5431 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
5432 h->root.root.string,
5433 h->root.u.def.section->owner);
5434
5435 rela.r_info = htab->r_info (0,
5436 R_X86_64_IRELATIVE);
5437 rela.r_addend = (h->root.u.def.value
5438 + h->root.u.def.section->output_section->vma
5439 + h->root.u.def.section->output_offset);
5440 relative_reloc_name = "R_X86_64_IRELATIVE";
5441 }
5442 else
5443 goto do_glob_dat;
5444 }
5445 else if (bfd_link_pic (info))
5446 {
5447 /* Generate R_X86_64_GLOB_DAT. */
5448 goto do_glob_dat;
5449 }
5450 else
5451 {
5452 asection *plt;
5453 bfd_vma plt_offset;
5454
5455 if (!h->pointer_equality_needed)
5456 abort ();
5457
5458 /* For non-shared object, we can't use .got.plt, which
5459 contains the real function addres if we need pointer
5460 equality. We load the GOT entry with the PLT entry. */
5461 if (htab->plt_second != NULL)
5462 {
5463 plt = htab->plt_second;
5464 plt_offset = eh->plt_second.offset;
5465 }
5466 else
5467 {
5468 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
5469 plt_offset = h->plt.offset;
5470 }
5471 bfd_put_64 (output_bfd, (plt->output_section->vma
5472 + plt->output_offset
5473 + plt_offset),
5474 htab->elf.sgot->contents + h->got.offset);
5475 return true;
5476 }
5477 }
5478 else if (bfd_link_pic (info)
5479 && SYMBOL_REFERENCES_LOCAL_P (info, h))
5480 {
5481 if (!SYMBOL_DEFINED_NON_SHARED_P (h))
5482 return false;
5483 BFD_ASSERT((h->got.offset & 1) != 0);
5484 if (info->enable_dt_relr)
5485 generate_dynamic_reloc = false;
5486 else
5487 {
5488 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
5489 rela.r_addend = (h->root.u.def.value
5490 + h->root.u.def.section->output_section->vma
5491 + h->root.u.def.section->output_offset);
5492 relative_reloc_name = "R_X86_64_RELATIVE";
5493 }
5494 }
5495 else
5496 {
5497 BFD_ASSERT((h->got.offset & 1) == 0);
5498 do_glob_dat:
5499 bfd_put_64 (output_bfd, (bfd_vma) 0,
5500 htab->elf.sgot->contents + h->got.offset);
5501 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
5502 rela.r_addend = 0;
5503 }
5504
5505 if (generate_dynamic_reloc)
5506 {
5507 /* If the relgot section has not been created, then
5508 generate an error instead of a reloc. cf PR 32638. */
5509 if (relgot == NULL || relgot->size == 0)
5510 {
5511 info->callbacks->fatal (_("%pB: Unable to generate dynamic relocs because a suitable section does not exist\n"),
5512 output_bfd);
5513 return false;
5514 }
5515
5516 if (relative_reloc_name != NULL
5517 && htab->params->report_relative_reloc)
5518 _bfd_x86_elf_link_report_relative_reloc
5519 (info, relgot, h, sym, relative_reloc_name, &rela);
5520
5521 elf_append_rela (output_bfd, relgot, &rela);
5522 }
5523 }
5524
5525 if (h->needs_copy)
5526 {
5527 Elf_Internal_Rela rela;
5528 asection *s;
5529
5530 /* This symbol needs a copy reloc. Set it up. */
5531 VERIFY_COPY_RELOC (h, htab)
5532
5533 rela.r_offset = (h->root.u.def.value
5534 + h->root.u.def.section->output_section->vma
5535 + h->root.u.def.section->output_offset);
5536 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
5537 rela.r_addend = 0;
5538 if (h->root.u.def.section == htab->elf.sdynrelro)
5539 s = htab->elf.sreldynrelro;
5540 else
5541 s = htab->elf.srelbss;
5542 elf_append_rela (output_bfd, s, &rela);
5543 }
5544
5545 return true;
5546 }
5547
5548 /* Finish up local dynamic symbol handling. We set the contents of
5549 various dynamic sections here. */
5550
5551 static int
5552 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
5553 {
5554 struct elf_link_hash_entry *h
5555 = (struct elf_link_hash_entry *) *slot;
5556 struct bfd_link_info *info
5557 = (struct bfd_link_info *) inf;
5558
5559 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
5560 info, h, NULL);
5561 }
5562
5563 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
5564 here since undefined weak symbol may not be dynamic and may not be
5565 called for elf_x86_64_finish_dynamic_symbol. */
5566
5567 static bool
5568 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
5569 void *inf)
5570 {
5571 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
5572 struct bfd_link_info *info = (struct bfd_link_info *) inf;
5573
5574 if (h->root.type != bfd_link_hash_undefweak
5575 || h->dynindx != -1)
5576 return true;
5577
5578 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
5579 info, h, NULL);
5580 }
5581
5582 /* Used to decide how to sort relocs in an optimal manner for the
5583 dynamic linker, before writing them out. */
5584
5585 static enum elf_reloc_type_class
5586 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
5587 const asection *rel_sec ATTRIBUTE_UNUSED,
5588 const Elf_Internal_Rela *rela)
5589 {
5590 bfd *abfd = info->output_bfd;
5591 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
5592 struct elf_x86_link_hash_table *htab
5593 = elf_x86_hash_table (info, X86_64_ELF_DATA);
5594
5595 if (htab->elf.dynsym != NULL
5596 && htab->elf.dynsym->contents != NULL)
5597 {
5598 /* Check relocation against STT_GNU_IFUNC symbol if there are
5599 dynamic symbols. */
5600 unsigned long r_symndx = htab->r_sym (rela->r_info);
5601 if (r_symndx != STN_UNDEF)
5602 {
5603 Elf_Internal_Sym sym;
5604 if (!bed->s->swap_symbol_in (abfd,
5605 (htab->elf.dynsym->contents
5606 + r_symndx * bed->s->sizeof_sym),
5607 0, &sym))
5608 abort ();
5609
5610 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
5611 return reloc_class_ifunc;
5612 }
5613 }
5614
5615 switch ((int) ELF32_R_TYPE (rela->r_info))
5616 {
5617 case R_X86_64_IRELATIVE:
5618 return reloc_class_ifunc;
5619 case R_X86_64_RELATIVE:
5620 case R_X86_64_RELATIVE64:
5621 return reloc_class_relative;
5622 case R_X86_64_JUMP_SLOT:
5623 return reloc_class_plt;
5624 case R_X86_64_COPY:
5625 return reloc_class_copy;
5626 default:
5627 return reloc_class_normal;
5628 }
5629 }
5630
5631 /* Finish up the dynamic sections. */
5632
5633 static bool
5634 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
5635 struct bfd_link_info *info)
5636 {
5637 struct elf_x86_link_hash_table *htab;
5638
5639 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
5640 if (htab == NULL)
5641 return false;
5642
5643 if (! htab->elf.dynamic_sections_created)
5644 return true;
5645
5646 if (htab->elf.splt && htab->elf.splt->size > 0)
5647 {
5648 if (bfd_is_abs_section (htab->elf.splt->output_section))
5649 {
5650 info->callbacks->fatal
5651 (_("%P: discarded output section: `%pA'\n"),
5652 htab->elf.splt);
5653 return false;
5654 }
5655
5656 elf_section_data (htab->elf.splt->output_section)
5657 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
5658
5659 if (htab->plt.has_plt0)
5660 {
5661 /* Fill in the special first entry in the procedure linkage
5662 table. */
5663 memcpy (htab->elf.splt->contents,
5664 htab->lazy_plt->plt0_entry,
5665 htab->lazy_plt->plt0_entry_size);
5666 /* Add offset for pushq GOT+8(%rip), since the instruction
5667 uses 6 bytes subtract this value. */
5668 bfd_put_32 (output_bfd,
5669 (htab->elf.sgotplt->output_section->vma
5670 + htab->elf.sgotplt->output_offset
5671 + 8
5672 - htab->elf.splt->output_section->vma
5673 - htab->elf.splt->output_offset
5674 - 6),
5675 (htab->elf.splt->contents
5676 + htab->lazy_plt->plt0_got1_offset));
5677 /* Add offset for the PC-relative instruction accessing
5678 GOT+16, subtracting the offset to the end of that
5679 instruction. */
5680 bfd_put_32 (output_bfd,
5681 (htab->elf.sgotplt->output_section->vma
5682 + htab->elf.sgotplt->output_offset
5683 + 16
5684 - htab->elf.splt->output_section->vma
5685 - htab->elf.splt->output_offset
5686 - htab->lazy_plt->plt0_got2_insn_end),
5687 (htab->elf.splt->contents
5688 + htab->lazy_plt->plt0_got2_offset));
5689 }
5690
5691 if (htab->elf.tlsdesc_plt)
5692 {
5693 bfd_put_64 (output_bfd, (bfd_vma) 0,
5694 htab->elf.sgot->contents + htab->elf.tlsdesc_got);
5695
5696 memcpy (htab->elf.splt->contents + htab->elf.tlsdesc_plt,
5697 htab->lazy_plt->plt_tlsdesc_entry,
5698 htab->lazy_plt->plt_tlsdesc_entry_size);
5699
5700 /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4
5701 bytes and the instruction uses 6 bytes, subtract these
5702 values. */
5703 bfd_put_32 (output_bfd,
5704 (htab->elf.sgotplt->output_section->vma
5705 + htab->elf.sgotplt->output_offset
5706 + 8
5707 - htab->elf.splt->output_section->vma
5708 - htab->elf.splt->output_offset
5709 - htab->elf.tlsdesc_plt
5710 - htab->lazy_plt->plt_tlsdesc_got1_insn_end),
5711 (htab->elf.splt->contents
5712 + htab->elf.tlsdesc_plt
5713 + htab->lazy_plt->plt_tlsdesc_got1_offset));
5714 /* Add offset for indirect branch via GOT+TDG, where TDG
5715 stands for htab->tlsdesc_got, subtracting the offset
5716 to the end of that instruction. */
5717 bfd_put_32 (output_bfd,
5718 (htab->elf.sgot->output_section->vma
5719 + htab->elf.sgot->output_offset
5720 + htab->elf.tlsdesc_got
5721 - htab->elf.splt->output_section->vma
5722 - htab->elf.splt->output_offset
5723 - htab->elf.tlsdesc_plt
5724 - htab->lazy_plt->plt_tlsdesc_got2_insn_end),
5725 (htab->elf.splt->contents
5726 + htab->elf.tlsdesc_plt
5727 + htab->lazy_plt->plt_tlsdesc_got2_offset));
5728 }
5729 }
5730
5731 /* Fill PLT entries for undefined weak symbols in PIE. */
5732 if (bfd_link_pie (info))
5733 bfd_hash_traverse (&info->hash->table,
5734 elf_x86_64_pie_finish_undefweak_symbol,
5735 info);
5736
5737 return true;
5738 }
5739
5740 /* Fill PLT/GOT entries and allocate dynamic relocations for local
5741 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
5742 It has to be done before elf_link_sort_relocs is called so that
5743 dynamic relocations are properly sorted. */
5744
5745 static bool
5746 elf_x86_64_output_arch_local_syms
5747 (bfd *output_bfd ATTRIBUTE_UNUSED,
5748 struct bfd_link_info *info,
5749 void *flaginfo ATTRIBUTE_UNUSED,
5750 int (*func) (void *, const char *,
5751 Elf_Internal_Sym *,
5752 asection *,
5753 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
5754 {
5755 struct elf_x86_link_hash_table *htab
5756 = elf_x86_hash_table (info, X86_64_ELF_DATA);
5757 if (htab == NULL)
5758 return false;
5759
5760 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
5761 htab_traverse (htab->loc_hash_table,
5762 elf_x86_64_finish_local_dynamic_symbol,
5763 info);
5764
5765 return true;
5766 }
5767
5768 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
5769 dynamic relocations. */
5770
5771 static long
5772 elf_x86_64_get_synthetic_symtab (bfd *abfd,
5773 long symcount ATTRIBUTE_UNUSED,
5774 asymbol **syms ATTRIBUTE_UNUSED,
5775 long dynsymcount,
5776 asymbol **dynsyms,
5777 asymbol **ret)
5778 {
5779 long count, i, n;
5780 int j;
5781 bfd_byte *plt_contents;
5782 long relsize;
5783 const struct elf_x86_lazy_plt_layout *lazy_plt;
5784 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
5785 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
5786 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
5787 const struct elf_x86_lazy_plt_layout *lazy_bnd_ibt_plt;
5788 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_ibt_plt;
5789 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
5790 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
5791 asection *plt;
5792 enum elf_x86_plt_type plt_type;
5793 struct elf_x86_plt plts[] =
5794 {
5795 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
5796 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
5797 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
5798 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
5799 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
5800 };
5801
5802 *ret = NULL;
5803
5804 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
5805 return 0;
5806
5807 if (dynsymcount <= 0)
5808 return 0;
5809
5810 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
5811 if (relsize <= 0)
5812 return -1;
5813
5814 lazy_plt = &elf_x86_64_lazy_plt;
5815 non_lazy_plt = &elf_x86_64_non_lazy_plt;
5816 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
5817 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
5818 if (ABI_64_P (abfd))
5819 {
5820 lazy_bnd_ibt_plt = &elf_x86_64_lazy_bnd_ibt_plt;
5821 non_lazy_bnd_ibt_plt = &elf_x86_64_non_lazy_bnd_ibt_plt;
5822 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
5823 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
5824 }
5825 else
5826 {
5827 lazy_bnd_ibt_plt = NULL;
5828 non_lazy_bnd_ibt_plt = NULL;
5829 lazy_bnd_plt = NULL;
5830 non_lazy_bnd_plt = NULL;
5831 }
5832
5833 count = 0;
5834 for (j = 0; plts[j].name != NULL; j++)
5835 {
5836 plt = bfd_get_section_by_name (abfd, plts[j].name);
5837 if (plt == NULL
5838 || plt->size == 0
5839 || (plt->flags & SEC_HAS_CONTENTS) == 0)
5840 continue;
5841
5842 /* Get the PLT section contents. */
5843 if (!_bfd_elf_mmap_section_contents (abfd, plt, &plt_contents))
5844 break;
5845
5846 /* Check what kind of PLT it is. */
5847 plt_type = plt_unknown;
5848 if (plts[j].type == plt_unknown
5849 && (plt->size >= (lazy_plt->plt_entry_size
5850 + lazy_plt->plt_entry_size)))
5851 {
5852 /* Match lazy PLT first. Need to check the first two
5853 instructions. */
5854 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
5855 lazy_plt->plt0_got1_offset) == 0)
5856 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
5857 2) == 0))
5858 {
5859 if (memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
5860 lazy_ibt_plt->plt_entry,
5861 lazy_ibt_plt->plt_got_offset) == 0)
5862 {
5863 /* The fist entry in the lazy IBT PLT is the same as
5864 the lazy PLT. */
5865 plt_type = plt_lazy | plt_second;
5866 lazy_plt = lazy_ibt_plt;
5867 }
5868 else
5869 plt_type = plt_lazy;
5870 }
5871 else if (lazy_bnd_plt != NULL
5872 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
5873 lazy_bnd_plt->plt0_got1_offset) == 0)
5874 && (memcmp (plt_contents + 6,
5875 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
5876 {
5877 plt_type = plt_lazy | plt_second;
5878 /* The fist entry in the lazy BND IBT PLT is the same as
5879 the lazy BND PLT. */
5880 if (memcmp (plt_contents
5881 + lazy_bnd_ibt_plt->plt_entry_size,
5882 lazy_bnd_ibt_plt->plt_entry,
5883 lazy_bnd_ibt_plt->plt_got_offset) == 0)
5884 lazy_plt = lazy_bnd_ibt_plt;
5885 else
5886 lazy_plt = lazy_bnd_plt;
5887 }
5888 }
5889
5890 if (non_lazy_plt != NULL
5891 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
5892 && plt->size >= non_lazy_plt->plt_entry_size)
5893 {
5894 /* Match non-lazy PLT. */
5895 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
5896 non_lazy_plt->plt_got_offset) == 0)
5897 plt_type = plt_non_lazy;
5898 }
5899
5900 if (plt_type == plt_unknown || plt_type == plt_second)
5901 {
5902 if (plt->size >= non_lazy_ibt_plt->plt_entry_size
5903 && (memcmp (plt_contents,
5904 non_lazy_ibt_plt->plt_entry,
5905 non_lazy_ibt_plt->plt_got_offset) == 0))
5906 {
5907 /* Match IBT PLT. */
5908 plt_type = plt_second;
5909 non_lazy_plt = non_lazy_ibt_plt;
5910 }
5911 else if (non_lazy_bnd_plt != NULL)
5912 {
5913 if (plt->size >= non_lazy_bnd_plt->plt_entry_size
5914 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
5915 non_lazy_bnd_plt->plt_got_offset) == 0))
5916 {
5917 /* Match BND PLT. */
5918 plt_type = plt_second;
5919 non_lazy_plt = non_lazy_bnd_plt;
5920 }
5921 else if (plt->size >= non_lazy_bnd_ibt_plt->plt_entry_size
5922 && (memcmp (plt_contents,
5923 non_lazy_bnd_ibt_plt->plt_entry,
5924 non_lazy_bnd_ibt_plt->plt_got_offset)
5925 == 0))
5926 {
5927 /* Match BND IBT PLT. */
5928 plt_type = plt_second;
5929 non_lazy_plt = non_lazy_bnd_ibt_plt;
5930 }
5931 }
5932 }
5933
5934 if (plt_type == plt_unknown)
5935 {
5936 _bfd_elf_munmap_section_contents (plt, plt_contents);
5937 continue;
5938 }
5939
5940 plts[j].sec = plt;
5941 plts[j].type = plt_type;
5942
5943 if ((plt_type & plt_lazy))
5944 {
5945 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
5946 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
5947 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
5948 /* Skip PLT0 in lazy PLT. */
5949 i = 1;
5950 }
5951 else
5952 {
5953 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
5954 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
5955 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
5956 i = 0;
5957 }
5958
5959 /* Skip lazy PLT when the second PLT is used. */
5960 if (plt_type == (plt_lazy | plt_second))
5961 plts[j].count = 0;
5962 else
5963 {
5964 n = plt->size / plts[j].plt_entry_size;
5965 plts[j].count = n;
5966 count += n - i;
5967 }
5968
5969 plts[j].contents = plt_contents;
5970 }
5971
5972 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
5973 (bfd_vma) 0, plts, dynsyms,
5974 ret);
5975 }
5976
5977 /* Handle an x86-64 specific section when reading an object file. This
5978 is called when elfcode.h finds a section with an unknown type. */
5979
5980 static bool
5981 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
5982 const char *name, int shindex)
5983 {
5984 if (hdr->sh_type != SHT_X86_64_UNWIND)
5985 return false;
5986
5987 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5988 return false;
5989
5990 return true;
5991 }
5992
5993 /* Hook called by the linker routine which adds symbols from an object
5994 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
5995 of .bss. */
5996
5997 static bool
5998 elf_x86_64_add_symbol_hook (bfd *abfd,
5999 struct bfd_link_info *info ATTRIBUTE_UNUSED,
6000 Elf_Internal_Sym *sym,
6001 const char **namep ATTRIBUTE_UNUSED,
6002 flagword *flagsp ATTRIBUTE_UNUSED,
6003 asection **secp,
6004 bfd_vma *valp)
6005 {
6006 asection *lcomm;
6007
6008 switch (sym->st_shndx)
6009 {
6010 case SHN_X86_64_LCOMMON:
6011 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
6012 if (lcomm == NULL)
6013 {
6014 lcomm = bfd_make_section_with_flags (abfd,
6015 "LARGE_COMMON",
6016 (SEC_ALLOC
6017 | SEC_IS_COMMON
6018 | SEC_LINKER_CREATED));
6019 if (lcomm == NULL)
6020 return false;
6021 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
6022 }
6023 *secp = lcomm;
6024 *valp = sym->st_size;
6025 return true;
6026 }
6027
6028 return true;
6029 }
6030
6031
6032 /* Given a BFD section, try to locate the corresponding ELF section
6033 index. */
6034
6035 static bool
6036 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
6037 asection *sec, int *index_return)
6038 {
6039 if (sec == &_bfd_elf_large_com_section)
6040 {
6041 *index_return = SHN_X86_64_LCOMMON;
6042 return true;
6043 }
6044 return false;
6045 }
6046
6047 /* Process a symbol. */
6048
6049 static void
6050 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
6051 asymbol *asym)
6052 {
6053 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
6054
6055 switch (elfsym->internal_elf_sym.st_shndx)
6056 {
6057 case SHN_X86_64_LCOMMON:
6058 asym->section = &_bfd_elf_large_com_section;
6059 asym->value = elfsym->internal_elf_sym.st_size;
6060 /* Common symbol doesn't set BSF_GLOBAL. */
6061 asym->flags &= ~BSF_GLOBAL;
6062 break;
6063 }
6064 }
6065
6066 static bool
6067 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
6068 {
6069 return (sym->st_shndx == SHN_COMMON
6070 || sym->st_shndx == SHN_X86_64_LCOMMON);
6071 }
6072
6073 static unsigned int
6074 elf_x86_64_common_section_index (asection *sec)
6075 {
6076 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
6077 return SHN_COMMON;
6078 else
6079 return SHN_X86_64_LCOMMON;
6080 }
6081
6082 static asection *
6083 elf_x86_64_common_section (asection *sec)
6084 {
6085 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
6086 return bfd_com_section_ptr;
6087 else
6088 return &_bfd_elf_large_com_section;
6089 }
6090
6091 static bool
6092 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
6093 const Elf_Internal_Sym *sym,
6094 asection **psec,
6095 bool newdef,
6096 bool olddef,
6097 bfd *oldbfd,
6098 const asection *oldsec)
6099 {
6100 /* A normal common symbol and a large common symbol result in a
6101 normal common symbol. We turn the large common symbol into a
6102 normal one. */
6103 if (!olddef
6104 && h->root.type == bfd_link_hash_common
6105 && !newdef
6106 && bfd_is_com_section (*psec)
6107 && oldsec != *psec)
6108 {
6109 if (sym->st_shndx == SHN_COMMON
6110 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
6111 {
6112 h->root.u.c.p->section
6113 = bfd_make_section_old_way (oldbfd, "COMMON");
6114 h->root.u.c.p->section->flags = SEC_ALLOC;
6115 }
6116 else if (sym->st_shndx == SHN_X86_64_LCOMMON
6117 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
6118 *psec = bfd_com_section_ptr;
6119 }
6120
6121 return true;
6122 }
6123
6124 static bool
6125 elf_x86_64_section_flags (const Elf_Internal_Shdr *hdr)
6126 {
6127 if ((hdr->sh_flags & SHF_X86_64_LARGE) != 0)
6128 hdr->bfd_section->flags |= SEC_ELF_LARGE;
6129
6130 return true;
6131 }
6132
6133 static bool
6134 elf_x86_64_fake_sections (bfd *abfd ATTRIBUTE_UNUSED,
6135 Elf_Internal_Shdr *hdr, asection *sec)
6136 {
6137 if (sec->flags & SEC_ELF_LARGE)
6138 hdr->sh_flags |= SHF_X86_64_LARGE;
6139
6140 return true;
6141 }
6142
6143 static bool
6144 elf_x86_64_copy_private_section_data (bfd *ibfd, asection *isec,
6145 bfd *obfd, asection *osec,
6146 struct bfd_link_info *link_info)
6147 {
6148 if (!_bfd_elf_copy_private_section_data (ibfd, isec, obfd, osec, link_info))
6149 return false;
6150
6151 /* objcopy --set-section-flags without "large" drops SHF_X86_64_LARGE. */
6152 if (link_info == NULL && ibfd != obfd)
6153 elf_section_flags (osec) &= ~SHF_X86_64_LARGE;
6154
6155 return true;
6156 }
6157
6158 static int
6159 elf_x86_64_additional_program_headers (bfd *abfd,
6160 struct bfd_link_info *info ATTRIBUTE_UNUSED)
6161 {
6162 asection *s;
6163 int count = 0;
6164
6165 /* Check to see if we need a large readonly segment. */
6166 s = bfd_get_section_by_name (abfd, ".lrodata");
6167 if (s && (s->flags & SEC_LOAD))
6168 count++;
6169
6170 /* Check to see if we need a large data segment. Since .lbss sections
6171 is placed right after the .bss section, there should be no need for
6172 a large data segment just because of .lbss. */
6173 s = bfd_get_section_by_name (abfd, ".ldata");
6174 if (s && (s->flags & SEC_LOAD))
6175 count++;
6176
6177 return count;
6178 }
6179
6180 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
6181
6182 static bool
6183 elf_x86_64_relocs_compatible (const bfd_target *input,
6184 const bfd_target *output)
6185 {
6186 return ((xvec_get_elf_backend_data (input)->s->elfclass
6187 == xvec_get_elf_backend_data (output)->s->elfclass)
6188 && _bfd_elf_relocs_compatible (input, output));
6189 }
6190
6191 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
6192 with GNU properties if found. Otherwise, return NULL. */
6193
6194 static bfd *
6195 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
6196 {
6197 struct elf_x86_init_table init_table;
6198 const struct elf_backend_data *bed;
6199 struct elf_x86_link_hash_table *htab;
6200
6201 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
6202 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
6203 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
6204 != (int) R_X86_64_GNU_VTINHERIT)
6205 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
6206 != (int) R_X86_64_GNU_VTENTRY))
6207 abort ();
6208
6209 /* This is unused for x86-64. */
6210 init_table.plt0_pad_byte = 0x90;
6211
6212 bed = get_elf_backend_data (info->output_bfd);
6213 htab = elf_x86_hash_table (info, bed->target_id);
6214 if (!htab)
6215 abort ();
6216
6217 init_table.lazy_plt = &elf_x86_64_lazy_plt;
6218 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
6219
6220 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
6221 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
6222
6223 if (ABI_64_P (info->output_bfd))
6224 {
6225 init_table.sframe_lazy_plt = &elf_x86_64_sframe_plt;
6226 init_table.sframe_non_lazy_plt = &elf_x86_64_sframe_non_lazy_plt;
6227 init_table.sframe_lazy_ibt_plt = &elf_x86_64_sframe_ibt_plt;
6228 init_table.sframe_non_lazy_ibt_plt = &elf_x86_64_sframe_non_lazy_ibt_plt;
6229 }
6230 else
6231 {
6232 /* SFrame is not supported for non AMD64. */
6233 init_table.sframe_lazy_plt = NULL;
6234 init_table.sframe_non_lazy_plt = NULL;
6235 }
6236
6237 if (ABI_64_P (info->output_bfd))
6238 {
6239 init_table.r_info = elf64_r_info;
6240 init_table.r_sym = elf64_r_sym;
6241 }
6242 else
6243 {
6244 init_table.r_info = elf32_r_info;
6245 init_table.r_sym = elf32_r_sym;
6246 }
6247
6248 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
6249 }
6250
6251 static void
6252 elf_x86_64_add_glibc_version_dependency
6253 (struct elf_find_verdep_info *rinfo)
6254 {
6255 unsigned int i = 0;
6256 const char *version[3] = { NULL, NULL, NULL };
6257 struct elf_x86_link_hash_table *htab;
6258
6259 if (rinfo->info->enable_dt_relr)
6260 {
6261 version[i] = "GLIBC_ABI_DT_RELR";
6262 i++;
6263 }
6264
6265 htab = elf_x86_hash_table (rinfo->info, X86_64_ELF_DATA);
6266 if (htab != NULL && htab->params->mark_plt)
6267 {
6268 version[i] = "GLIBC_2.36";
6269 i++;
6270 }
6271
6272 if (i != 0)
6273 _bfd_elf_link_add_glibc_version_dependency (rinfo, version);
6274 }
6275
6276 static const struct bfd_elf_special_section
6277 elf_x86_64_special_sections[]=
6278 {
6279 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6280 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6281 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
6282 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6283 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6284 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6285 { NULL, 0, 0, 0, 0 }
6286 };
6287
6288 #define TARGET_LITTLE_SYM x86_64_elf64_vec
6289 #define TARGET_LITTLE_NAME "elf64-x86-64"
6290 #define ELF_ARCH bfd_arch_i386
6291 #define ELF_TARGET_ID X86_64_ELF_DATA
6292 #define ELF_MACHINE_CODE EM_X86_64
6293 #define ELF_MAXPAGESIZE 0x1000
6294 #define ELF_COMMONPAGESIZE 0x1000
6295
6296 #define elf_backend_can_gc_sections 1
6297 #define elf_backend_can_refcount 1
6298 #define elf_backend_want_got_plt 1
6299 #define elf_backend_plt_readonly 1
6300 #define elf_backend_want_plt_sym 0
6301 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
6302 #define elf_backend_rela_normal 1
6303 #define elf_backend_plt_alignment 4
6304 #define elf_backend_caches_rawsize 1
6305 #define elf_backend_dtrel_excludes_plt 1
6306 #define elf_backend_want_dynrelro 1
6307
6308 #define elf_info_to_howto elf_x86_64_info_to_howto
6309
6310 #define bfd_elf64_bfd_copy_private_section_data \
6311 elf_x86_64_copy_private_section_data
6312 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
6313 #define bfd_elf64_bfd_reloc_name_lookup \
6314 elf_x86_64_reloc_name_lookup
6315
6316 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
6317 #define elf_backend_early_size_sections elf_x86_64_early_size_sections
6318 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
6319 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
6320 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
6321 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
6322 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
6323 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
6324 #ifdef CORE_HEADER
6325 #define elf_backend_write_core_note elf_x86_64_write_core_note
6326 #endif
6327 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
6328 #define elf_backend_relocate_section elf_x86_64_relocate_section
6329 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
6330 #define elf_backend_object_p elf64_x86_64_elf_object_p
6331 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
6332
6333 #define elf_backend_section_from_shdr \
6334 elf_x86_64_section_from_shdr
6335
6336 #define elf_backend_section_from_bfd_section \
6337 elf_x86_64_elf_section_from_bfd_section
6338 #define elf_backend_add_symbol_hook \
6339 elf_x86_64_add_symbol_hook
6340 #define elf_backend_symbol_processing \
6341 elf_x86_64_symbol_processing
6342 #define elf_backend_common_section_index \
6343 elf_x86_64_common_section_index
6344 #define elf_backend_common_section \
6345 elf_x86_64_common_section
6346 #define elf_backend_common_definition \
6347 elf_x86_64_common_definition
6348 #define elf_backend_merge_symbol \
6349 elf_x86_64_merge_symbol
6350 #define elf_backend_special_sections \
6351 elf_x86_64_special_sections
6352 #define elf_backend_section_flags elf_x86_64_section_flags
6353 #define elf_backend_fake_sections elf_x86_64_fake_sections
6354 #define elf_backend_additional_program_headers \
6355 elf_x86_64_additional_program_headers
6356 #define elf_backend_setup_gnu_properties \
6357 elf_x86_64_link_setup_gnu_properties
6358 #define elf_backend_hide_symbol \
6359 _bfd_x86_elf_hide_symbol
6360 #define elf_backend_add_glibc_version_dependency \
6361 elf_x86_64_add_glibc_version_dependency
6362
6363 #undef elf64_bed
6364 #define elf64_bed elf64_x86_64_bed
6365
6366 #include "elf64-target.h"
6367
6368 /* CloudABI support. */
6369
6370 #undef TARGET_LITTLE_SYM
6371 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
6372 #undef TARGET_LITTLE_NAME
6373 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
6374
6375 #undef ELF_OSABI
6376 #define ELF_OSABI ELFOSABI_CLOUDABI
6377
6378 #undef elf64_bed
6379 #define elf64_bed elf64_x86_64_cloudabi_bed
6380
6381 #include "elf64-target.h"
6382
6383 /* FreeBSD support. */
6384
6385 #undef TARGET_LITTLE_SYM
6386 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
6387 #undef TARGET_LITTLE_NAME
6388 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
6389
6390 #undef ELF_OSABI
6391 #define ELF_OSABI ELFOSABI_FREEBSD
6392
6393 #undef elf64_bed
6394 #define elf64_bed elf64_x86_64_fbsd_bed
6395
6396 #include "elf64-target.h"
6397
6398 /* Solaris 2 support. */
6399
6400 #undef TARGET_LITTLE_SYM
6401 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
6402 #undef TARGET_LITTLE_NAME
6403 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
6404
6405 #undef ELF_TARGET_OS
6406 #define ELF_TARGET_OS is_solaris
6407
6408 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
6409 objects won't be recognized. */
6410 #undef ELF_OSABI
6411
6412 #undef elf64_bed
6413 #define elf64_bed elf64_x86_64_sol2_bed
6414
6415 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
6416 boundary. */
6417 #undef elf_backend_static_tls_alignment
6418 #define elf_backend_static_tls_alignment 16
6419
6420 /* The Solaris 2 ABI requires a plt symbol on all platforms.
6421
6422 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
6423 File, p.63. */
6424 #undef elf_backend_want_plt_sym
6425 #define elf_backend_want_plt_sym 1
6426
6427 #undef elf_backend_strtab_flags
6428 #define elf_backend_strtab_flags SHF_STRINGS
6429
6430 static bool
6431 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
6432 bfd *obfd ATTRIBUTE_UNUSED,
6433 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
6434 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
6435 {
6436 /* PR 19938: FIXME: Need to add code for setting the sh_info
6437 and sh_link fields of Solaris specific section types. */
6438 return false;
6439 }
6440
6441 #undef elf_backend_copy_special_section_fields
6442 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
6443
6444 #include "elf64-target.h"
6445
6446 /* Restore defaults. */
6447 #undef ELF_OSABI
6448 #undef elf_backend_static_tls_alignment
6449 #undef elf_backend_want_plt_sym
6450 #define elf_backend_want_plt_sym 0
6451 #undef elf_backend_strtab_flags
6452 #undef elf_backend_copy_special_section_fields
6453
6454 /* 32bit x86-64 support. */
6455
6456 #undef TARGET_LITTLE_SYM
6457 #define TARGET_LITTLE_SYM x86_64_elf32_vec
6458 #undef TARGET_LITTLE_NAME
6459 #define TARGET_LITTLE_NAME "elf32-x86-64"
6460 #undef elf32_bed
6461 #define elf32_bed elf32_x86_64_bed
6462
6463 #undef ELF_ARCH
6464 #define ELF_ARCH bfd_arch_i386
6465
6466 #undef ELF_MACHINE_CODE
6467 #define ELF_MACHINE_CODE EM_X86_64
6468
6469 #undef ELF_TARGET_OS
6470 #undef ELF_OSABI
6471
6472 #define bfd_elf32_bfd_copy_private_section_data \
6473 elf_x86_64_copy_private_section_data
6474 #define bfd_elf32_bfd_reloc_type_lookup \
6475 elf_x86_64_reloc_type_lookup
6476 #define bfd_elf32_bfd_reloc_name_lookup \
6477 elf_x86_64_reloc_name_lookup
6478 #define bfd_elf32_get_synthetic_symtab \
6479 elf_x86_64_get_synthetic_symtab
6480
6481 #undef elf_backend_object_p
6482 #define elf_backend_object_p \
6483 elf32_x86_64_elf_object_p
6484
6485 #undef elf_backend_bfd_from_remote_memory
6486 #define elf_backend_bfd_from_remote_memory \
6487 _bfd_elf32_bfd_from_remote_memory
6488
6489 #undef elf_backend_size_info
6490 #define elf_backend_size_info \
6491 _bfd_elf32_size_info
6492
6493 #include "elf32-target.h"
6494