elf32-arm.c revision 1.8 1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2015 Free Software Foundation, Inc.
3
4 This file is part of BFD, the Binary File Descriptor library.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include <limits.h>
23
24 #include "bfd.h"
25 #include "bfd_stdint.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-nacl.h"
30 #include "elf-vxworks.h"
31 #include "elf/arm.h"
32
33 /* Return the relocation section associated with NAME. HTAB is the
34 bfd's elf32_arm_link_hash_entry. */
35 #define RELOC_SECTION(HTAB, NAME) \
36 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
37
38 /* Return size of a relocation entry. HTAB is the bfd's
39 elf32_arm_link_hash_entry. */
40 #define RELOC_SIZE(HTAB) \
41 ((HTAB)->use_rel \
42 ? sizeof (Elf32_External_Rel) \
43 : sizeof (Elf32_External_Rela))
44
45 /* Return function to swap relocations in. HTAB is the bfd's
46 elf32_arm_link_hash_entry. */
47 #define SWAP_RELOC_IN(HTAB) \
48 ((HTAB)->use_rel \
49 ? bfd_elf32_swap_reloc_in \
50 : bfd_elf32_swap_reloca_in)
51
52 /* Return function to swap relocations out. HTAB is the bfd's
53 elf32_arm_link_hash_entry. */
54 #define SWAP_RELOC_OUT(HTAB) \
55 ((HTAB)->use_rel \
56 ? bfd_elf32_swap_reloc_out \
57 : bfd_elf32_swap_reloca_out)
58
59 #define elf_info_to_howto 0
60 #define elf_info_to_howto_rel elf32_arm_info_to_howto
61
62 #define ARM_ELF_ABI_VERSION 0
63 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
64
65 /* The Adjusted Place, as defined by AAELF. */
66 #define Pa(X) ((X) & 0xfffffffc)
67
68 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
69 struct bfd_link_info *link_info,
70 asection *sec,
71 bfd_byte *contents);
72
73 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
74 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
75 in that slot. */
76
77 static reloc_howto_type elf32_arm_howto_table_1[] =
78 {
79 /* No relocation. */
80 HOWTO (R_ARM_NONE, /* type */
81 0, /* rightshift */
82 3, /* size (0 = byte, 1 = short, 2 = long) */
83 0, /* bitsize */
84 FALSE, /* pc_relative */
85 0, /* bitpos */
86 complain_overflow_dont,/* complain_on_overflow */
87 bfd_elf_generic_reloc, /* special_function */
88 "R_ARM_NONE", /* name */
89 FALSE, /* partial_inplace */
90 0, /* src_mask */
91 0, /* dst_mask */
92 FALSE), /* pcrel_offset */
93
94 HOWTO (R_ARM_PC24, /* type */
95 2, /* rightshift */
96 2, /* size (0 = byte, 1 = short, 2 = long) */
97 24, /* bitsize */
98 TRUE, /* pc_relative */
99 0, /* bitpos */
100 complain_overflow_signed,/* complain_on_overflow */
101 bfd_elf_generic_reloc, /* special_function */
102 "R_ARM_PC24", /* name */
103 FALSE, /* partial_inplace */
104 0x00ffffff, /* src_mask */
105 0x00ffffff, /* dst_mask */
106 TRUE), /* pcrel_offset */
107
108 /* 32 bit absolute */
109 HOWTO (R_ARM_ABS32, /* type */
110 0, /* rightshift */
111 2, /* size (0 = byte, 1 = short, 2 = long) */
112 32, /* bitsize */
113 FALSE, /* pc_relative */
114 0, /* bitpos */
115 complain_overflow_bitfield,/* complain_on_overflow */
116 bfd_elf_generic_reloc, /* special_function */
117 "R_ARM_ABS32", /* name */
118 FALSE, /* partial_inplace */
119 0xffffffff, /* src_mask */
120 0xffffffff, /* dst_mask */
121 FALSE), /* pcrel_offset */
122
123 /* standard 32bit pc-relative reloc */
124 HOWTO (R_ARM_REL32, /* type */
125 0, /* rightshift */
126 2, /* size (0 = byte, 1 = short, 2 = long) */
127 32, /* bitsize */
128 TRUE, /* pc_relative */
129 0, /* bitpos */
130 complain_overflow_bitfield,/* complain_on_overflow */
131 bfd_elf_generic_reloc, /* special_function */
132 "R_ARM_REL32", /* name */
133 FALSE, /* partial_inplace */
134 0xffffffff, /* src_mask */
135 0xffffffff, /* dst_mask */
136 TRUE), /* pcrel_offset */
137
138 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
139 HOWTO (R_ARM_LDR_PC_G0, /* type */
140 0, /* rightshift */
141 0, /* size (0 = byte, 1 = short, 2 = long) */
142 32, /* bitsize */
143 TRUE, /* pc_relative */
144 0, /* bitpos */
145 complain_overflow_dont,/* complain_on_overflow */
146 bfd_elf_generic_reloc, /* special_function */
147 "R_ARM_LDR_PC_G0", /* name */
148 FALSE, /* partial_inplace */
149 0xffffffff, /* src_mask */
150 0xffffffff, /* dst_mask */
151 TRUE), /* pcrel_offset */
152
153 /* 16 bit absolute */
154 HOWTO (R_ARM_ABS16, /* type */
155 0, /* rightshift */
156 1, /* size (0 = byte, 1 = short, 2 = long) */
157 16, /* bitsize */
158 FALSE, /* pc_relative */
159 0, /* bitpos */
160 complain_overflow_bitfield,/* complain_on_overflow */
161 bfd_elf_generic_reloc, /* special_function */
162 "R_ARM_ABS16", /* name */
163 FALSE, /* partial_inplace */
164 0x0000ffff, /* src_mask */
165 0x0000ffff, /* dst_mask */
166 FALSE), /* pcrel_offset */
167
168 /* 12 bit absolute */
169 HOWTO (R_ARM_ABS12, /* type */
170 0, /* rightshift */
171 2, /* size (0 = byte, 1 = short, 2 = long) */
172 12, /* bitsize */
173 FALSE, /* pc_relative */
174 0, /* bitpos */
175 complain_overflow_bitfield,/* complain_on_overflow */
176 bfd_elf_generic_reloc, /* special_function */
177 "R_ARM_ABS12", /* name */
178 FALSE, /* partial_inplace */
179 0x00000fff, /* src_mask */
180 0x00000fff, /* dst_mask */
181 FALSE), /* pcrel_offset */
182
183 HOWTO (R_ARM_THM_ABS5, /* type */
184 6, /* rightshift */
185 1, /* size (0 = byte, 1 = short, 2 = long) */
186 5, /* bitsize */
187 FALSE, /* pc_relative */
188 0, /* bitpos */
189 complain_overflow_bitfield,/* complain_on_overflow */
190 bfd_elf_generic_reloc, /* special_function */
191 "R_ARM_THM_ABS5", /* name */
192 FALSE, /* partial_inplace */
193 0x000007e0, /* src_mask */
194 0x000007e0, /* dst_mask */
195 FALSE), /* pcrel_offset */
196
197 /* 8 bit absolute */
198 HOWTO (R_ARM_ABS8, /* type */
199 0, /* rightshift */
200 0, /* size (0 = byte, 1 = short, 2 = long) */
201 8, /* bitsize */
202 FALSE, /* pc_relative */
203 0, /* bitpos */
204 complain_overflow_bitfield,/* complain_on_overflow */
205 bfd_elf_generic_reloc, /* special_function */
206 "R_ARM_ABS8", /* name */
207 FALSE, /* partial_inplace */
208 0x000000ff, /* src_mask */
209 0x000000ff, /* dst_mask */
210 FALSE), /* pcrel_offset */
211
212 HOWTO (R_ARM_SBREL32, /* type */
213 0, /* rightshift */
214 2, /* size (0 = byte, 1 = short, 2 = long) */
215 32, /* bitsize */
216 FALSE, /* pc_relative */
217 0, /* bitpos */
218 complain_overflow_dont,/* complain_on_overflow */
219 bfd_elf_generic_reloc, /* special_function */
220 "R_ARM_SBREL32", /* name */
221 FALSE, /* partial_inplace */
222 0xffffffff, /* src_mask */
223 0xffffffff, /* dst_mask */
224 FALSE), /* pcrel_offset */
225
226 HOWTO (R_ARM_THM_CALL, /* type */
227 1, /* rightshift */
228 2, /* size (0 = byte, 1 = short, 2 = long) */
229 24, /* bitsize */
230 TRUE, /* pc_relative */
231 0, /* bitpos */
232 complain_overflow_signed,/* complain_on_overflow */
233 bfd_elf_generic_reloc, /* special_function */
234 "R_ARM_THM_CALL", /* name */
235 FALSE, /* partial_inplace */
236 0x07ff2fff, /* src_mask */
237 0x07ff2fff, /* dst_mask */
238 TRUE), /* pcrel_offset */
239
240 HOWTO (R_ARM_THM_PC8, /* type */
241 1, /* rightshift */
242 1, /* size (0 = byte, 1 = short, 2 = long) */
243 8, /* bitsize */
244 TRUE, /* pc_relative */
245 0, /* bitpos */
246 complain_overflow_signed,/* complain_on_overflow */
247 bfd_elf_generic_reloc, /* special_function */
248 "R_ARM_THM_PC8", /* name */
249 FALSE, /* partial_inplace */
250 0x000000ff, /* src_mask */
251 0x000000ff, /* dst_mask */
252 TRUE), /* pcrel_offset */
253
254 HOWTO (R_ARM_BREL_ADJ, /* type */
255 1, /* rightshift */
256 1, /* size (0 = byte, 1 = short, 2 = long) */
257 32, /* bitsize */
258 FALSE, /* pc_relative */
259 0, /* bitpos */
260 complain_overflow_signed,/* complain_on_overflow */
261 bfd_elf_generic_reloc, /* special_function */
262 "R_ARM_BREL_ADJ", /* name */
263 FALSE, /* partial_inplace */
264 0xffffffff, /* src_mask */
265 0xffffffff, /* dst_mask */
266 FALSE), /* pcrel_offset */
267
268 HOWTO (R_ARM_TLS_DESC, /* type */
269 0, /* rightshift */
270 2, /* size (0 = byte, 1 = short, 2 = long) */
271 32, /* bitsize */
272 FALSE, /* pc_relative */
273 0, /* bitpos */
274 complain_overflow_bitfield,/* complain_on_overflow */
275 bfd_elf_generic_reloc, /* special_function */
276 "R_ARM_TLS_DESC", /* name */
277 FALSE, /* partial_inplace */
278 0xffffffff, /* src_mask */
279 0xffffffff, /* dst_mask */
280 FALSE), /* pcrel_offset */
281
282 HOWTO (R_ARM_THM_SWI8, /* type */
283 0, /* rightshift */
284 0, /* size (0 = byte, 1 = short, 2 = long) */
285 0, /* bitsize */
286 FALSE, /* pc_relative */
287 0, /* bitpos */
288 complain_overflow_signed,/* complain_on_overflow */
289 bfd_elf_generic_reloc, /* special_function */
290 "R_ARM_SWI8", /* name */
291 FALSE, /* partial_inplace */
292 0x00000000, /* src_mask */
293 0x00000000, /* dst_mask */
294 FALSE), /* pcrel_offset */
295
296 /* BLX instruction for the ARM. */
297 HOWTO (R_ARM_XPC25, /* type */
298 2, /* rightshift */
299 2, /* size (0 = byte, 1 = short, 2 = long) */
300 24, /* bitsize */
301 TRUE, /* pc_relative */
302 0, /* bitpos */
303 complain_overflow_signed,/* complain_on_overflow */
304 bfd_elf_generic_reloc, /* special_function */
305 "R_ARM_XPC25", /* name */
306 FALSE, /* partial_inplace */
307 0x00ffffff, /* src_mask */
308 0x00ffffff, /* dst_mask */
309 TRUE), /* pcrel_offset */
310
311 /* BLX instruction for the Thumb. */
312 HOWTO (R_ARM_THM_XPC22, /* type */
313 2, /* rightshift */
314 2, /* size (0 = byte, 1 = short, 2 = long) */
315 24, /* bitsize */
316 TRUE, /* pc_relative */
317 0, /* bitpos */
318 complain_overflow_signed,/* complain_on_overflow */
319 bfd_elf_generic_reloc, /* special_function */
320 "R_ARM_THM_XPC22", /* name */
321 FALSE, /* partial_inplace */
322 0x07ff2fff, /* src_mask */
323 0x07ff2fff, /* dst_mask */
324 TRUE), /* pcrel_offset */
325
326 /* Dynamic TLS relocations. */
327
328 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
329 0, /* rightshift */
330 2, /* size (0 = byte, 1 = short, 2 = long) */
331 32, /* bitsize */
332 FALSE, /* pc_relative */
333 0, /* bitpos */
334 complain_overflow_bitfield,/* complain_on_overflow */
335 bfd_elf_generic_reloc, /* special_function */
336 "R_ARM_TLS_DTPMOD32", /* name */
337 TRUE, /* partial_inplace */
338 0xffffffff, /* src_mask */
339 0xffffffff, /* dst_mask */
340 FALSE), /* pcrel_offset */
341
342 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
343 0, /* rightshift */
344 2, /* size (0 = byte, 1 = short, 2 = long) */
345 32, /* bitsize */
346 FALSE, /* pc_relative */
347 0, /* bitpos */
348 complain_overflow_bitfield,/* complain_on_overflow */
349 bfd_elf_generic_reloc, /* special_function */
350 "R_ARM_TLS_DTPOFF32", /* name */
351 TRUE, /* partial_inplace */
352 0xffffffff, /* src_mask */
353 0xffffffff, /* dst_mask */
354 FALSE), /* pcrel_offset */
355
356 HOWTO (R_ARM_TLS_TPOFF32, /* type */
357 0, /* rightshift */
358 2, /* size (0 = byte, 1 = short, 2 = long) */
359 32, /* bitsize */
360 FALSE, /* pc_relative */
361 0, /* bitpos */
362 complain_overflow_bitfield,/* complain_on_overflow */
363 bfd_elf_generic_reloc, /* special_function */
364 "R_ARM_TLS_TPOFF32", /* name */
365 TRUE, /* partial_inplace */
366 0xffffffff, /* src_mask */
367 0xffffffff, /* dst_mask */
368 FALSE), /* pcrel_offset */
369
370 /* Relocs used in ARM Linux */
371
372 HOWTO (R_ARM_COPY, /* type */
373 0, /* rightshift */
374 2, /* size (0 = byte, 1 = short, 2 = long) */
375 32, /* bitsize */
376 FALSE, /* pc_relative */
377 0, /* bitpos */
378 complain_overflow_bitfield,/* complain_on_overflow */
379 bfd_elf_generic_reloc, /* special_function */
380 "R_ARM_COPY", /* name */
381 TRUE, /* partial_inplace */
382 0xffffffff, /* src_mask */
383 0xffffffff, /* dst_mask */
384 FALSE), /* pcrel_offset */
385
386 HOWTO (R_ARM_GLOB_DAT, /* type */
387 0, /* rightshift */
388 2, /* size (0 = byte, 1 = short, 2 = long) */
389 32, /* bitsize */
390 FALSE, /* pc_relative */
391 0, /* bitpos */
392 complain_overflow_bitfield,/* complain_on_overflow */
393 bfd_elf_generic_reloc, /* special_function */
394 "R_ARM_GLOB_DAT", /* name */
395 TRUE, /* partial_inplace */
396 0xffffffff, /* src_mask */
397 0xffffffff, /* dst_mask */
398 FALSE), /* pcrel_offset */
399
400 HOWTO (R_ARM_JUMP_SLOT, /* type */
401 0, /* rightshift */
402 2, /* size (0 = byte, 1 = short, 2 = long) */
403 32, /* bitsize */
404 FALSE, /* pc_relative */
405 0, /* bitpos */
406 complain_overflow_bitfield,/* complain_on_overflow */
407 bfd_elf_generic_reloc, /* special_function */
408 "R_ARM_JUMP_SLOT", /* name */
409 TRUE, /* partial_inplace */
410 0xffffffff, /* src_mask */
411 0xffffffff, /* dst_mask */
412 FALSE), /* pcrel_offset */
413
414 HOWTO (R_ARM_RELATIVE, /* type */
415 0, /* rightshift */
416 2, /* size (0 = byte, 1 = short, 2 = long) */
417 32, /* bitsize */
418 FALSE, /* pc_relative */
419 0, /* bitpos */
420 complain_overflow_bitfield,/* complain_on_overflow */
421 bfd_elf_generic_reloc, /* special_function */
422 "R_ARM_RELATIVE", /* name */
423 TRUE, /* partial_inplace */
424 0xffffffff, /* src_mask */
425 0xffffffff, /* dst_mask */
426 FALSE), /* pcrel_offset */
427
428 HOWTO (R_ARM_GOTOFF32, /* type */
429 0, /* rightshift */
430 2, /* size (0 = byte, 1 = short, 2 = long) */
431 32, /* bitsize */
432 FALSE, /* pc_relative */
433 0, /* bitpos */
434 complain_overflow_bitfield,/* complain_on_overflow */
435 bfd_elf_generic_reloc, /* special_function */
436 "R_ARM_GOTOFF32", /* name */
437 TRUE, /* partial_inplace */
438 0xffffffff, /* src_mask */
439 0xffffffff, /* dst_mask */
440 FALSE), /* pcrel_offset */
441
442 HOWTO (R_ARM_GOTPC, /* type */
443 0, /* rightshift */
444 2, /* size (0 = byte, 1 = short, 2 = long) */
445 32, /* bitsize */
446 TRUE, /* pc_relative */
447 0, /* bitpos */
448 complain_overflow_bitfield,/* complain_on_overflow */
449 bfd_elf_generic_reloc, /* special_function */
450 "R_ARM_GOTPC", /* name */
451 TRUE, /* partial_inplace */
452 0xffffffff, /* src_mask */
453 0xffffffff, /* dst_mask */
454 TRUE), /* pcrel_offset */
455
456 HOWTO (R_ARM_GOT32, /* type */
457 0, /* rightshift */
458 2, /* size (0 = byte, 1 = short, 2 = long) */
459 32, /* bitsize */
460 FALSE, /* pc_relative */
461 0, /* bitpos */
462 complain_overflow_bitfield,/* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_ARM_GOT32", /* name */
465 TRUE, /* partial_inplace */
466 0xffffffff, /* src_mask */
467 0xffffffff, /* dst_mask */
468 FALSE), /* pcrel_offset */
469
470 HOWTO (R_ARM_PLT32, /* type */
471 2, /* rightshift */
472 2, /* size (0 = byte, 1 = short, 2 = long) */
473 24, /* bitsize */
474 TRUE, /* pc_relative */
475 0, /* bitpos */
476 complain_overflow_bitfield,/* complain_on_overflow */
477 bfd_elf_generic_reloc, /* special_function */
478 "R_ARM_PLT32", /* name */
479 FALSE, /* partial_inplace */
480 0x00ffffff, /* src_mask */
481 0x00ffffff, /* dst_mask */
482 TRUE), /* pcrel_offset */
483
484 HOWTO (R_ARM_CALL, /* type */
485 2, /* rightshift */
486 2, /* size (0 = byte, 1 = short, 2 = long) */
487 24, /* bitsize */
488 TRUE, /* pc_relative */
489 0, /* bitpos */
490 complain_overflow_signed,/* complain_on_overflow */
491 bfd_elf_generic_reloc, /* special_function */
492 "R_ARM_CALL", /* name */
493 FALSE, /* partial_inplace */
494 0x00ffffff, /* src_mask */
495 0x00ffffff, /* dst_mask */
496 TRUE), /* pcrel_offset */
497
498 HOWTO (R_ARM_JUMP24, /* type */
499 2, /* rightshift */
500 2, /* size (0 = byte, 1 = short, 2 = long) */
501 24, /* bitsize */
502 TRUE, /* pc_relative */
503 0, /* bitpos */
504 complain_overflow_signed,/* complain_on_overflow */
505 bfd_elf_generic_reloc, /* special_function */
506 "R_ARM_JUMP24", /* name */
507 FALSE, /* partial_inplace */
508 0x00ffffff, /* src_mask */
509 0x00ffffff, /* dst_mask */
510 TRUE), /* pcrel_offset */
511
512 HOWTO (R_ARM_THM_JUMP24, /* type */
513 1, /* rightshift */
514 2, /* size (0 = byte, 1 = short, 2 = long) */
515 24, /* bitsize */
516 TRUE, /* pc_relative */
517 0, /* bitpos */
518 complain_overflow_signed,/* complain_on_overflow */
519 bfd_elf_generic_reloc, /* special_function */
520 "R_ARM_THM_JUMP24", /* name */
521 FALSE, /* partial_inplace */
522 0x07ff2fff, /* src_mask */
523 0x07ff2fff, /* dst_mask */
524 TRUE), /* pcrel_offset */
525
526 HOWTO (R_ARM_BASE_ABS, /* type */
527 0, /* rightshift */
528 2, /* size (0 = byte, 1 = short, 2 = long) */
529 32, /* bitsize */
530 FALSE, /* pc_relative */
531 0, /* bitpos */
532 complain_overflow_dont,/* complain_on_overflow */
533 bfd_elf_generic_reloc, /* special_function */
534 "R_ARM_BASE_ABS", /* name */
535 FALSE, /* partial_inplace */
536 0xffffffff, /* src_mask */
537 0xffffffff, /* dst_mask */
538 FALSE), /* pcrel_offset */
539
540 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
541 0, /* rightshift */
542 2, /* size (0 = byte, 1 = short, 2 = long) */
543 12, /* bitsize */
544 TRUE, /* pc_relative */
545 0, /* bitpos */
546 complain_overflow_dont,/* complain_on_overflow */
547 bfd_elf_generic_reloc, /* special_function */
548 "R_ARM_ALU_PCREL_7_0", /* name */
549 FALSE, /* partial_inplace */
550 0x00000fff, /* src_mask */
551 0x00000fff, /* dst_mask */
552 TRUE), /* pcrel_offset */
553
554 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
555 0, /* rightshift */
556 2, /* size (0 = byte, 1 = short, 2 = long) */
557 12, /* bitsize */
558 TRUE, /* pc_relative */
559 8, /* bitpos */
560 complain_overflow_dont,/* complain_on_overflow */
561 bfd_elf_generic_reloc, /* special_function */
562 "R_ARM_ALU_PCREL_15_8",/* name */
563 FALSE, /* partial_inplace */
564 0x00000fff, /* src_mask */
565 0x00000fff, /* dst_mask */
566 TRUE), /* pcrel_offset */
567
568 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
569 0, /* rightshift */
570 2, /* size (0 = byte, 1 = short, 2 = long) */
571 12, /* bitsize */
572 TRUE, /* pc_relative */
573 16, /* bitpos */
574 complain_overflow_dont,/* complain_on_overflow */
575 bfd_elf_generic_reloc, /* special_function */
576 "R_ARM_ALU_PCREL_23_15",/* name */
577 FALSE, /* partial_inplace */
578 0x00000fff, /* src_mask */
579 0x00000fff, /* dst_mask */
580 TRUE), /* pcrel_offset */
581
582 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
583 0, /* rightshift */
584 2, /* size (0 = byte, 1 = short, 2 = long) */
585 12, /* bitsize */
586 FALSE, /* pc_relative */
587 0, /* bitpos */
588 complain_overflow_dont,/* complain_on_overflow */
589 bfd_elf_generic_reloc, /* special_function */
590 "R_ARM_LDR_SBREL_11_0",/* name */
591 FALSE, /* partial_inplace */
592 0x00000fff, /* src_mask */
593 0x00000fff, /* dst_mask */
594 FALSE), /* pcrel_offset */
595
596 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
597 0, /* rightshift */
598 2, /* size (0 = byte, 1 = short, 2 = long) */
599 8, /* bitsize */
600 FALSE, /* pc_relative */
601 12, /* bitpos */
602 complain_overflow_dont,/* complain_on_overflow */
603 bfd_elf_generic_reloc, /* special_function */
604 "R_ARM_ALU_SBREL_19_12",/* name */
605 FALSE, /* partial_inplace */
606 0x000ff000, /* src_mask */
607 0x000ff000, /* dst_mask */
608 FALSE), /* pcrel_offset */
609
610 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
611 0, /* rightshift */
612 2, /* size (0 = byte, 1 = short, 2 = long) */
613 8, /* bitsize */
614 FALSE, /* pc_relative */
615 20, /* bitpos */
616 complain_overflow_dont,/* complain_on_overflow */
617 bfd_elf_generic_reloc, /* special_function */
618 "R_ARM_ALU_SBREL_27_20",/* name */
619 FALSE, /* partial_inplace */
620 0x0ff00000, /* src_mask */
621 0x0ff00000, /* dst_mask */
622 FALSE), /* pcrel_offset */
623
624 HOWTO (R_ARM_TARGET1, /* type */
625 0, /* rightshift */
626 2, /* size (0 = byte, 1 = short, 2 = long) */
627 32, /* bitsize */
628 FALSE, /* pc_relative */
629 0, /* bitpos */
630 complain_overflow_dont,/* complain_on_overflow */
631 bfd_elf_generic_reloc, /* special_function */
632 "R_ARM_TARGET1", /* name */
633 FALSE, /* partial_inplace */
634 0xffffffff, /* src_mask */
635 0xffffffff, /* dst_mask */
636 FALSE), /* pcrel_offset */
637
638 HOWTO (R_ARM_ROSEGREL32, /* type */
639 0, /* rightshift */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
641 32, /* bitsize */
642 FALSE, /* pc_relative */
643 0, /* bitpos */
644 complain_overflow_dont,/* complain_on_overflow */
645 bfd_elf_generic_reloc, /* special_function */
646 "R_ARM_ROSEGREL32", /* name */
647 FALSE, /* partial_inplace */
648 0xffffffff, /* src_mask */
649 0xffffffff, /* dst_mask */
650 FALSE), /* pcrel_offset */
651
652 HOWTO (R_ARM_V4BX, /* type */
653 0, /* rightshift */
654 2, /* size (0 = byte, 1 = short, 2 = long) */
655 32, /* bitsize */
656 FALSE, /* pc_relative */
657 0, /* bitpos */
658 complain_overflow_dont,/* complain_on_overflow */
659 bfd_elf_generic_reloc, /* special_function */
660 "R_ARM_V4BX", /* name */
661 FALSE, /* partial_inplace */
662 0xffffffff, /* src_mask */
663 0xffffffff, /* dst_mask */
664 FALSE), /* pcrel_offset */
665
666 HOWTO (R_ARM_TARGET2, /* type */
667 0, /* rightshift */
668 2, /* size (0 = byte, 1 = short, 2 = long) */
669 32, /* bitsize */
670 FALSE, /* pc_relative */
671 0, /* bitpos */
672 complain_overflow_signed,/* complain_on_overflow */
673 bfd_elf_generic_reloc, /* special_function */
674 "R_ARM_TARGET2", /* name */
675 FALSE, /* partial_inplace */
676 0xffffffff, /* src_mask */
677 0xffffffff, /* dst_mask */
678 TRUE), /* pcrel_offset */
679
680 HOWTO (R_ARM_PREL31, /* type */
681 0, /* rightshift */
682 2, /* size (0 = byte, 1 = short, 2 = long) */
683 31, /* bitsize */
684 TRUE, /* pc_relative */
685 0, /* bitpos */
686 complain_overflow_signed,/* complain_on_overflow */
687 bfd_elf_generic_reloc, /* special_function */
688 "R_ARM_PREL31", /* name */
689 FALSE, /* partial_inplace */
690 0x7fffffff, /* src_mask */
691 0x7fffffff, /* dst_mask */
692 TRUE), /* pcrel_offset */
693
694 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
695 0, /* rightshift */
696 2, /* size (0 = byte, 1 = short, 2 = long) */
697 16, /* bitsize */
698 FALSE, /* pc_relative */
699 0, /* bitpos */
700 complain_overflow_dont,/* complain_on_overflow */
701 bfd_elf_generic_reloc, /* special_function */
702 "R_ARM_MOVW_ABS_NC", /* name */
703 FALSE, /* partial_inplace */
704 0x000f0fff, /* src_mask */
705 0x000f0fff, /* dst_mask */
706 FALSE), /* pcrel_offset */
707
708 HOWTO (R_ARM_MOVT_ABS, /* type */
709 0, /* rightshift */
710 2, /* size (0 = byte, 1 = short, 2 = long) */
711 16, /* bitsize */
712 FALSE, /* pc_relative */
713 0, /* bitpos */
714 complain_overflow_bitfield,/* complain_on_overflow */
715 bfd_elf_generic_reloc, /* special_function */
716 "R_ARM_MOVT_ABS", /* name */
717 FALSE, /* partial_inplace */
718 0x000f0fff, /* src_mask */
719 0x000f0fff, /* dst_mask */
720 FALSE), /* pcrel_offset */
721
722 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
723 0, /* rightshift */
724 2, /* size (0 = byte, 1 = short, 2 = long) */
725 16, /* bitsize */
726 TRUE, /* pc_relative */
727 0, /* bitpos */
728 complain_overflow_dont,/* complain_on_overflow */
729 bfd_elf_generic_reloc, /* special_function */
730 "R_ARM_MOVW_PREL_NC", /* name */
731 FALSE, /* partial_inplace */
732 0x000f0fff, /* src_mask */
733 0x000f0fff, /* dst_mask */
734 TRUE), /* pcrel_offset */
735
736 HOWTO (R_ARM_MOVT_PREL, /* type */
737 0, /* rightshift */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
739 16, /* bitsize */
740 TRUE, /* pc_relative */
741 0, /* bitpos */
742 complain_overflow_bitfield,/* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 "R_ARM_MOVT_PREL", /* name */
745 FALSE, /* partial_inplace */
746 0x000f0fff, /* src_mask */
747 0x000f0fff, /* dst_mask */
748 TRUE), /* pcrel_offset */
749
750 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
751 0, /* rightshift */
752 2, /* size (0 = byte, 1 = short, 2 = long) */
753 16, /* bitsize */
754 FALSE, /* pc_relative */
755 0, /* bitpos */
756 complain_overflow_dont,/* complain_on_overflow */
757 bfd_elf_generic_reloc, /* special_function */
758 "R_ARM_THM_MOVW_ABS_NC",/* name */
759 FALSE, /* partial_inplace */
760 0x040f70ff, /* src_mask */
761 0x040f70ff, /* dst_mask */
762 FALSE), /* pcrel_offset */
763
764 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
765 0, /* rightshift */
766 2, /* size (0 = byte, 1 = short, 2 = long) */
767 16, /* bitsize */
768 FALSE, /* pc_relative */
769 0, /* bitpos */
770 complain_overflow_bitfield,/* complain_on_overflow */
771 bfd_elf_generic_reloc, /* special_function */
772 "R_ARM_THM_MOVT_ABS", /* name */
773 FALSE, /* partial_inplace */
774 0x040f70ff, /* src_mask */
775 0x040f70ff, /* dst_mask */
776 FALSE), /* pcrel_offset */
777
778 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
779 0, /* rightshift */
780 2, /* size (0 = byte, 1 = short, 2 = long) */
781 16, /* bitsize */
782 TRUE, /* pc_relative */
783 0, /* bitpos */
784 complain_overflow_dont,/* complain_on_overflow */
785 bfd_elf_generic_reloc, /* special_function */
786 "R_ARM_THM_MOVW_PREL_NC",/* name */
787 FALSE, /* partial_inplace */
788 0x040f70ff, /* src_mask */
789 0x040f70ff, /* dst_mask */
790 TRUE), /* pcrel_offset */
791
792 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
793 0, /* rightshift */
794 2, /* size (0 = byte, 1 = short, 2 = long) */
795 16, /* bitsize */
796 TRUE, /* pc_relative */
797 0, /* bitpos */
798 complain_overflow_bitfield,/* complain_on_overflow */
799 bfd_elf_generic_reloc, /* special_function */
800 "R_ARM_THM_MOVT_PREL", /* name */
801 FALSE, /* partial_inplace */
802 0x040f70ff, /* src_mask */
803 0x040f70ff, /* dst_mask */
804 TRUE), /* pcrel_offset */
805
806 HOWTO (R_ARM_THM_JUMP19, /* type */
807 1, /* rightshift */
808 2, /* size (0 = byte, 1 = short, 2 = long) */
809 19, /* bitsize */
810 TRUE, /* pc_relative */
811 0, /* bitpos */
812 complain_overflow_signed,/* complain_on_overflow */
813 bfd_elf_generic_reloc, /* special_function */
814 "R_ARM_THM_JUMP19", /* name */
815 FALSE, /* partial_inplace */
816 0x043f2fff, /* src_mask */
817 0x043f2fff, /* dst_mask */
818 TRUE), /* pcrel_offset */
819
820 HOWTO (R_ARM_THM_JUMP6, /* type */
821 1, /* rightshift */
822 1, /* size (0 = byte, 1 = short, 2 = long) */
823 6, /* bitsize */
824 TRUE, /* pc_relative */
825 0, /* bitpos */
826 complain_overflow_unsigned,/* complain_on_overflow */
827 bfd_elf_generic_reloc, /* special_function */
828 "R_ARM_THM_JUMP6", /* name */
829 FALSE, /* partial_inplace */
830 0x02f8, /* src_mask */
831 0x02f8, /* dst_mask */
832 TRUE), /* pcrel_offset */
833
834 /* These are declared as 13-bit signed relocations because we can
835 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
836 versa. */
837 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
838 0, /* rightshift */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
840 13, /* bitsize */
841 TRUE, /* pc_relative */
842 0, /* bitpos */
843 complain_overflow_dont,/* complain_on_overflow */
844 bfd_elf_generic_reloc, /* special_function */
845 "R_ARM_THM_ALU_PREL_11_0",/* name */
846 FALSE, /* partial_inplace */
847 0xffffffff, /* src_mask */
848 0xffffffff, /* dst_mask */
849 TRUE), /* pcrel_offset */
850
851 HOWTO (R_ARM_THM_PC12, /* type */
852 0, /* rightshift */
853 2, /* size (0 = byte, 1 = short, 2 = long) */
854 13, /* bitsize */
855 TRUE, /* pc_relative */
856 0, /* bitpos */
857 complain_overflow_dont,/* complain_on_overflow */
858 bfd_elf_generic_reloc, /* special_function */
859 "R_ARM_THM_PC12", /* name */
860 FALSE, /* partial_inplace */
861 0xffffffff, /* src_mask */
862 0xffffffff, /* dst_mask */
863 TRUE), /* pcrel_offset */
864
865 HOWTO (R_ARM_ABS32_NOI, /* type */
866 0, /* rightshift */
867 2, /* size (0 = byte, 1 = short, 2 = long) */
868 32, /* bitsize */
869 FALSE, /* pc_relative */
870 0, /* bitpos */
871 complain_overflow_dont,/* complain_on_overflow */
872 bfd_elf_generic_reloc, /* special_function */
873 "R_ARM_ABS32_NOI", /* name */
874 FALSE, /* partial_inplace */
875 0xffffffff, /* src_mask */
876 0xffffffff, /* dst_mask */
877 FALSE), /* pcrel_offset */
878
879 HOWTO (R_ARM_REL32_NOI, /* type */
880 0, /* rightshift */
881 2, /* size (0 = byte, 1 = short, 2 = long) */
882 32, /* bitsize */
883 TRUE, /* pc_relative */
884 0, /* bitpos */
885 complain_overflow_dont,/* complain_on_overflow */
886 bfd_elf_generic_reloc, /* special_function */
887 "R_ARM_REL32_NOI", /* name */
888 FALSE, /* partial_inplace */
889 0xffffffff, /* src_mask */
890 0xffffffff, /* dst_mask */
891 FALSE), /* pcrel_offset */
892
893 /* Group relocations. */
894
895 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
896 0, /* rightshift */
897 2, /* size (0 = byte, 1 = short, 2 = long) */
898 32, /* bitsize */
899 TRUE, /* pc_relative */
900 0, /* bitpos */
901 complain_overflow_dont,/* complain_on_overflow */
902 bfd_elf_generic_reloc, /* special_function */
903 "R_ARM_ALU_PC_G0_NC", /* name */
904 FALSE, /* partial_inplace */
905 0xffffffff, /* src_mask */
906 0xffffffff, /* dst_mask */
907 TRUE), /* pcrel_offset */
908
909 HOWTO (R_ARM_ALU_PC_G0, /* type */
910 0, /* rightshift */
911 2, /* size (0 = byte, 1 = short, 2 = long) */
912 32, /* bitsize */
913 TRUE, /* pc_relative */
914 0, /* bitpos */
915 complain_overflow_dont,/* complain_on_overflow */
916 bfd_elf_generic_reloc, /* special_function */
917 "R_ARM_ALU_PC_G0", /* name */
918 FALSE, /* partial_inplace */
919 0xffffffff, /* src_mask */
920 0xffffffff, /* dst_mask */
921 TRUE), /* pcrel_offset */
922
923 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
924 0, /* rightshift */
925 2, /* size (0 = byte, 1 = short, 2 = long) */
926 32, /* bitsize */
927 TRUE, /* pc_relative */
928 0, /* bitpos */
929 complain_overflow_dont,/* complain_on_overflow */
930 bfd_elf_generic_reloc, /* special_function */
931 "R_ARM_ALU_PC_G1_NC", /* name */
932 FALSE, /* partial_inplace */
933 0xffffffff, /* src_mask */
934 0xffffffff, /* dst_mask */
935 TRUE), /* pcrel_offset */
936
937 HOWTO (R_ARM_ALU_PC_G1, /* type */
938 0, /* rightshift */
939 2, /* size (0 = byte, 1 = short, 2 = long) */
940 32, /* bitsize */
941 TRUE, /* pc_relative */
942 0, /* bitpos */
943 complain_overflow_dont,/* complain_on_overflow */
944 bfd_elf_generic_reloc, /* special_function */
945 "R_ARM_ALU_PC_G1", /* name */
946 FALSE, /* partial_inplace */
947 0xffffffff, /* src_mask */
948 0xffffffff, /* dst_mask */
949 TRUE), /* pcrel_offset */
950
951 HOWTO (R_ARM_ALU_PC_G2, /* type */
952 0, /* rightshift */
953 2, /* size (0 = byte, 1 = short, 2 = long) */
954 32, /* bitsize */
955 TRUE, /* pc_relative */
956 0, /* bitpos */
957 complain_overflow_dont,/* complain_on_overflow */
958 bfd_elf_generic_reloc, /* special_function */
959 "R_ARM_ALU_PC_G2", /* name */
960 FALSE, /* partial_inplace */
961 0xffffffff, /* src_mask */
962 0xffffffff, /* dst_mask */
963 TRUE), /* pcrel_offset */
964
965 HOWTO (R_ARM_LDR_PC_G1, /* type */
966 0, /* rightshift */
967 2, /* size (0 = byte, 1 = short, 2 = long) */
968 32, /* bitsize */
969 TRUE, /* pc_relative */
970 0, /* bitpos */
971 complain_overflow_dont,/* complain_on_overflow */
972 bfd_elf_generic_reloc, /* special_function */
973 "R_ARM_LDR_PC_G1", /* name */
974 FALSE, /* partial_inplace */
975 0xffffffff, /* src_mask */
976 0xffffffff, /* dst_mask */
977 TRUE), /* pcrel_offset */
978
979 HOWTO (R_ARM_LDR_PC_G2, /* type */
980 0, /* rightshift */
981 2, /* size (0 = byte, 1 = short, 2 = long) */
982 32, /* bitsize */
983 TRUE, /* pc_relative */
984 0, /* bitpos */
985 complain_overflow_dont,/* complain_on_overflow */
986 bfd_elf_generic_reloc, /* special_function */
987 "R_ARM_LDR_PC_G2", /* name */
988 FALSE, /* partial_inplace */
989 0xffffffff, /* src_mask */
990 0xffffffff, /* dst_mask */
991 TRUE), /* pcrel_offset */
992
993 HOWTO (R_ARM_LDRS_PC_G0, /* type */
994 0, /* rightshift */
995 2, /* size (0 = byte, 1 = short, 2 = long) */
996 32, /* bitsize */
997 TRUE, /* pc_relative */
998 0, /* bitpos */
999 complain_overflow_dont,/* complain_on_overflow */
1000 bfd_elf_generic_reloc, /* special_function */
1001 "R_ARM_LDRS_PC_G0", /* name */
1002 FALSE, /* partial_inplace */
1003 0xffffffff, /* src_mask */
1004 0xffffffff, /* dst_mask */
1005 TRUE), /* pcrel_offset */
1006
1007 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1008 0, /* rightshift */
1009 2, /* size (0 = byte, 1 = short, 2 = long) */
1010 32, /* bitsize */
1011 TRUE, /* pc_relative */
1012 0, /* bitpos */
1013 complain_overflow_dont,/* complain_on_overflow */
1014 bfd_elf_generic_reloc, /* special_function */
1015 "R_ARM_LDRS_PC_G1", /* name */
1016 FALSE, /* partial_inplace */
1017 0xffffffff, /* src_mask */
1018 0xffffffff, /* dst_mask */
1019 TRUE), /* pcrel_offset */
1020
1021 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1022 0, /* rightshift */
1023 2, /* size (0 = byte, 1 = short, 2 = long) */
1024 32, /* bitsize */
1025 TRUE, /* pc_relative */
1026 0, /* bitpos */
1027 complain_overflow_dont,/* complain_on_overflow */
1028 bfd_elf_generic_reloc, /* special_function */
1029 "R_ARM_LDRS_PC_G2", /* name */
1030 FALSE, /* partial_inplace */
1031 0xffffffff, /* src_mask */
1032 0xffffffff, /* dst_mask */
1033 TRUE), /* pcrel_offset */
1034
1035 HOWTO (R_ARM_LDC_PC_G0, /* type */
1036 0, /* rightshift */
1037 2, /* size (0 = byte, 1 = short, 2 = long) */
1038 32, /* bitsize */
1039 TRUE, /* pc_relative */
1040 0, /* bitpos */
1041 complain_overflow_dont,/* complain_on_overflow */
1042 bfd_elf_generic_reloc, /* special_function */
1043 "R_ARM_LDC_PC_G0", /* name */
1044 FALSE, /* partial_inplace */
1045 0xffffffff, /* src_mask */
1046 0xffffffff, /* dst_mask */
1047 TRUE), /* pcrel_offset */
1048
1049 HOWTO (R_ARM_LDC_PC_G1, /* type */
1050 0, /* rightshift */
1051 2, /* size (0 = byte, 1 = short, 2 = long) */
1052 32, /* bitsize */
1053 TRUE, /* pc_relative */
1054 0, /* bitpos */
1055 complain_overflow_dont,/* complain_on_overflow */
1056 bfd_elf_generic_reloc, /* special_function */
1057 "R_ARM_LDC_PC_G1", /* name */
1058 FALSE, /* partial_inplace */
1059 0xffffffff, /* src_mask */
1060 0xffffffff, /* dst_mask */
1061 TRUE), /* pcrel_offset */
1062
1063 HOWTO (R_ARM_LDC_PC_G2, /* type */
1064 0, /* rightshift */
1065 2, /* size (0 = byte, 1 = short, 2 = long) */
1066 32, /* bitsize */
1067 TRUE, /* pc_relative */
1068 0, /* bitpos */
1069 complain_overflow_dont,/* complain_on_overflow */
1070 bfd_elf_generic_reloc, /* special_function */
1071 "R_ARM_LDC_PC_G2", /* name */
1072 FALSE, /* partial_inplace */
1073 0xffffffff, /* src_mask */
1074 0xffffffff, /* dst_mask */
1075 TRUE), /* pcrel_offset */
1076
1077 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1078 0, /* rightshift */
1079 2, /* size (0 = byte, 1 = short, 2 = long) */
1080 32, /* bitsize */
1081 TRUE, /* pc_relative */
1082 0, /* bitpos */
1083 complain_overflow_dont,/* complain_on_overflow */
1084 bfd_elf_generic_reloc, /* special_function */
1085 "R_ARM_ALU_SB_G0_NC", /* name */
1086 FALSE, /* partial_inplace */
1087 0xffffffff, /* src_mask */
1088 0xffffffff, /* dst_mask */
1089 TRUE), /* pcrel_offset */
1090
1091 HOWTO (R_ARM_ALU_SB_G0, /* type */
1092 0, /* rightshift */
1093 2, /* size (0 = byte, 1 = short, 2 = long) */
1094 32, /* bitsize */
1095 TRUE, /* pc_relative */
1096 0, /* bitpos */
1097 complain_overflow_dont,/* complain_on_overflow */
1098 bfd_elf_generic_reloc, /* special_function */
1099 "R_ARM_ALU_SB_G0", /* name */
1100 FALSE, /* partial_inplace */
1101 0xffffffff, /* src_mask */
1102 0xffffffff, /* dst_mask */
1103 TRUE), /* pcrel_offset */
1104
1105 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1106 0, /* rightshift */
1107 2, /* size (0 = byte, 1 = short, 2 = long) */
1108 32, /* bitsize */
1109 TRUE, /* pc_relative */
1110 0, /* bitpos */
1111 complain_overflow_dont,/* complain_on_overflow */
1112 bfd_elf_generic_reloc, /* special_function */
1113 "R_ARM_ALU_SB_G1_NC", /* name */
1114 FALSE, /* partial_inplace */
1115 0xffffffff, /* src_mask */
1116 0xffffffff, /* dst_mask */
1117 TRUE), /* pcrel_offset */
1118
1119 HOWTO (R_ARM_ALU_SB_G1, /* type */
1120 0, /* rightshift */
1121 2, /* size (0 = byte, 1 = short, 2 = long) */
1122 32, /* bitsize */
1123 TRUE, /* pc_relative */
1124 0, /* bitpos */
1125 complain_overflow_dont,/* complain_on_overflow */
1126 bfd_elf_generic_reloc, /* special_function */
1127 "R_ARM_ALU_SB_G1", /* name */
1128 FALSE, /* partial_inplace */
1129 0xffffffff, /* src_mask */
1130 0xffffffff, /* dst_mask */
1131 TRUE), /* pcrel_offset */
1132
1133 HOWTO (R_ARM_ALU_SB_G2, /* type */
1134 0, /* rightshift */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1136 32, /* bitsize */
1137 TRUE, /* pc_relative */
1138 0, /* bitpos */
1139 complain_overflow_dont,/* complain_on_overflow */
1140 bfd_elf_generic_reloc, /* special_function */
1141 "R_ARM_ALU_SB_G2", /* name */
1142 FALSE, /* partial_inplace */
1143 0xffffffff, /* src_mask */
1144 0xffffffff, /* dst_mask */
1145 TRUE), /* pcrel_offset */
1146
1147 HOWTO (R_ARM_LDR_SB_G0, /* type */
1148 0, /* rightshift */
1149 2, /* size (0 = byte, 1 = short, 2 = long) */
1150 32, /* bitsize */
1151 TRUE, /* pc_relative */
1152 0, /* bitpos */
1153 complain_overflow_dont,/* complain_on_overflow */
1154 bfd_elf_generic_reloc, /* special_function */
1155 "R_ARM_LDR_SB_G0", /* name */
1156 FALSE, /* partial_inplace */
1157 0xffffffff, /* src_mask */
1158 0xffffffff, /* dst_mask */
1159 TRUE), /* pcrel_offset */
1160
1161 HOWTO (R_ARM_LDR_SB_G1, /* type */
1162 0, /* rightshift */
1163 2, /* size (0 = byte, 1 = short, 2 = long) */
1164 32, /* bitsize */
1165 TRUE, /* pc_relative */
1166 0, /* bitpos */
1167 complain_overflow_dont,/* complain_on_overflow */
1168 bfd_elf_generic_reloc, /* special_function */
1169 "R_ARM_LDR_SB_G1", /* name */
1170 FALSE, /* partial_inplace */
1171 0xffffffff, /* src_mask */
1172 0xffffffff, /* dst_mask */
1173 TRUE), /* pcrel_offset */
1174
1175 HOWTO (R_ARM_LDR_SB_G2, /* type */
1176 0, /* rightshift */
1177 2, /* size (0 = byte, 1 = short, 2 = long) */
1178 32, /* bitsize */
1179 TRUE, /* pc_relative */
1180 0, /* bitpos */
1181 complain_overflow_dont,/* complain_on_overflow */
1182 bfd_elf_generic_reloc, /* special_function */
1183 "R_ARM_LDR_SB_G2", /* name */
1184 FALSE, /* partial_inplace */
1185 0xffffffff, /* src_mask */
1186 0xffffffff, /* dst_mask */
1187 TRUE), /* pcrel_offset */
1188
1189 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1190 0, /* rightshift */
1191 2, /* size (0 = byte, 1 = short, 2 = long) */
1192 32, /* bitsize */
1193 TRUE, /* pc_relative */
1194 0, /* bitpos */
1195 complain_overflow_dont,/* complain_on_overflow */
1196 bfd_elf_generic_reloc, /* special_function */
1197 "R_ARM_LDRS_SB_G0", /* name */
1198 FALSE, /* partial_inplace */
1199 0xffffffff, /* src_mask */
1200 0xffffffff, /* dst_mask */
1201 TRUE), /* pcrel_offset */
1202
1203 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1204 0, /* rightshift */
1205 2, /* size (0 = byte, 1 = short, 2 = long) */
1206 32, /* bitsize */
1207 TRUE, /* pc_relative */
1208 0, /* bitpos */
1209 complain_overflow_dont,/* complain_on_overflow */
1210 bfd_elf_generic_reloc, /* special_function */
1211 "R_ARM_LDRS_SB_G1", /* name */
1212 FALSE, /* partial_inplace */
1213 0xffffffff, /* src_mask */
1214 0xffffffff, /* dst_mask */
1215 TRUE), /* pcrel_offset */
1216
1217 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1218 0, /* rightshift */
1219 2, /* size (0 = byte, 1 = short, 2 = long) */
1220 32, /* bitsize */
1221 TRUE, /* pc_relative */
1222 0, /* bitpos */
1223 complain_overflow_dont,/* complain_on_overflow */
1224 bfd_elf_generic_reloc, /* special_function */
1225 "R_ARM_LDRS_SB_G2", /* name */
1226 FALSE, /* partial_inplace */
1227 0xffffffff, /* src_mask */
1228 0xffffffff, /* dst_mask */
1229 TRUE), /* pcrel_offset */
1230
1231 HOWTO (R_ARM_LDC_SB_G0, /* type */
1232 0, /* rightshift */
1233 2, /* size (0 = byte, 1 = short, 2 = long) */
1234 32, /* bitsize */
1235 TRUE, /* pc_relative */
1236 0, /* bitpos */
1237 complain_overflow_dont,/* complain_on_overflow */
1238 bfd_elf_generic_reloc, /* special_function */
1239 "R_ARM_LDC_SB_G0", /* name */
1240 FALSE, /* partial_inplace */
1241 0xffffffff, /* src_mask */
1242 0xffffffff, /* dst_mask */
1243 TRUE), /* pcrel_offset */
1244
1245 HOWTO (R_ARM_LDC_SB_G1, /* type */
1246 0, /* rightshift */
1247 2, /* size (0 = byte, 1 = short, 2 = long) */
1248 32, /* bitsize */
1249 TRUE, /* pc_relative */
1250 0, /* bitpos */
1251 complain_overflow_dont,/* complain_on_overflow */
1252 bfd_elf_generic_reloc, /* special_function */
1253 "R_ARM_LDC_SB_G1", /* name */
1254 FALSE, /* partial_inplace */
1255 0xffffffff, /* src_mask */
1256 0xffffffff, /* dst_mask */
1257 TRUE), /* pcrel_offset */
1258
1259 HOWTO (R_ARM_LDC_SB_G2, /* type */
1260 0, /* rightshift */
1261 2, /* size (0 = byte, 1 = short, 2 = long) */
1262 32, /* bitsize */
1263 TRUE, /* pc_relative */
1264 0, /* bitpos */
1265 complain_overflow_dont,/* complain_on_overflow */
1266 bfd_elf_generic_reloc, /* special_function */
1267 "R_ARM_LDC_SB_G2", /* name */
1268 FALSE, /* partial_inplace */
1269 0xffffffff, /* src_mask */
1270 0xffffffff, /* dst_mask */
1271 TRUE), /* pcrel_offset */
1272
1273 /* End of group relocations. */
1274
1275 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1276 0, /* rightshift */
1277 2, /* size (0 = byte, 1 = short, 2 = long) */
1278 16, /* bitsize */
1279 FALSE, /* pc_relative */
1280 0, /* bitpos */
1281 complain_overflow_dont,/* complain_on_overflow */
1282 bfd_elf_generic_reloc, /* special_function */
1283 "R_ARM_MOVW_BREL_NC", /* name */
1284 FALSE, /* partial_inplace */
1285 0x0000ffff, /* src_mask */
1286 0x0000ffff, /* dst_mask */
1287 FALSE), /* pcrel_offset */
1288
1289 HOWTO (R_ARM_MOVT_BREL, /* type */
1290 0, /* rightshift */
1291 2, /* size (0 = byte, 1 = short, 2 = long) */
1292 16, /* bitsize */
1293 FALSE, /* pc_relative */
1294 0, /* bitpos */
1295 complain_overflow_bitfield,/* complain_on_overflow */
1296 bfd_elf_generic_reloc, /* special_function */
1297 "R_ARM_MOVT_BREL", /* name */
1298 FALSE, /* partial_inplace */
1299 0x0000ffff, /* src_mask */
1300 0x0000ffff, /* dst_mask */
1301 FALSE), /* pcrel_offset */
1302
1303 HOWTO (R_ARM_MOVW_BREL, /* type */
1304 0, /* rightshift */
1305 2, /* size (0 = byte, 1 = short, 2 = long) */
1306 16, /* bitsize */
1307 FALSE, /* pc_relative */
1308 0, /* bitpos */
1309 complain_overflow_dont,/* complain_on_overflow */
1310 bfd_elf_generic_reloc, /* special_function */
1311 "R_ARM_MOVW_BREL", /* name */
1312 FALSE, /* partial_inplace */
1313 0x0000ffff, /* src_mask */
1314 0x0000ffff, /* dst_mask */
1315 FALSE), /* pcrel_offset */
1316
1317 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1318 0, /* rightshift */
1319 2, /* size (0 = byte, 1 = short, 2 = long) */
1320 16, /* bitsize */
1321 FALSE, /* pc_relative */
1322 0, /* bitpos */
1323 complain_overflow_dont,/* complain_on_overflow */
1324 bfd_elf_generic_reloc, /* special_function */
1325 "R_ARM_THM_MOVW_BREL_NC",/* name */
1326 FALSE, /* partial_inplace */
1327 0x040f70ff, /* src_mask */
1328 0x040f70ff, /* dst_mask */
1329 FALSE), /* pcrel_offset */
1330
1331 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1332 0, /* rightshift */
1333 2, /* size (0 = byte, 1 = short, 2 = long) */
1334 16, /* bitsize */
1335 FALSE, /* pc_relative */
1336 0, /* bitpos */
1337 complain_overflow_bitfield,/* complain_on_overflow */
1338 bfd_elf_generic_reloc, /* special_function */
1339 "R_ARM_THM_MOVT_BREL", /* name */
1340 FALSE, /* partial_inplace */
1341 0x040f70ff, /* src_mask */
1342 0x040f70ff, /* dst_mask */
1343 FALSE), /* pcrel_offset */
1344
1345 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1346 0, /* rightshift */
1347 2, /* size (0 = byte, 1 = short, 2 = long) */
1348 16, /* bitsize */
1349 FALSE, /* pc_relative */
1350 0, /* bitpos */
1351 complain_overflow_dont,/* complain_on_overflow */
1352 bfd_elf_generic_reloc, /* special_function */
1353 "R_ARM_THM_MOVW_BREL", /* name */
1354 FALSE, /* partial_inplace */
1355 0x040f70ff, /* src_mask */
1356 0x040f70ff, /* dst_mask */
1357 FALSE), /* pcrel_offset */
1358
1359 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1360 0, /* rightshift */
1361 2, /* size (0 = byte, 1 = short, 2 = long) */
1362 32, /* bitsize */
1363 FALSE, /* pc_relative */
1364 0, /* bitpos */
1365 complain_overflow_bitfield,/* complain_on_overflow */
1366 NULL, /* special_function */
1367 "R_ARM_TLS_GOTDESC", /* name */
1368 TRUE, /* partial_inplace */
1369 0xffffffff, /* src_mask */
1370 0xffffffff, /* dst_mask */
1371 FALSE), /* pcrel_offset */
1372
1373 HOWTO (R_ARM_TLS_CALL, /* type */
1374 0, /* rightshift */
1375 2, /* size (0 = byte, 1 = short, 2 = long) */
1376 24, /* bitsize */
1377 FALSE, /* pc_relative */
1378 0, /* bitpos */
1379 complain_overflow_dont,/* complain_on_overflow */
1380 bfd_elf_generic_reloc, /* special_function */
1381 "R_ARM_TLS_CALL", /* name */
1382 FALSE, /* partial_inplace */
1383 0x00ffffff, /* src_mask */
1384 0x00ffffff, /* dst_mask */
1385 FALSE), /* pcrel_offset */
1386
1387 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1388 0, /* rightshift */
1389 2, /* size (0 = byte, 1 = short, 2 = long) */
1390 0, /* bitsize */
1391 FALSE, /* pc_relative */
1392 0, /* bitpos */
1393 complain_overflow_bitfield,/* complain_on_overflow */
1394 bfd_elf_generic_reloc, /* special_function */
1395 "R_ARM_TLS_DESCSEQ", /* name */
1396 FALSE, /* partial_inplace */
1397 0x00000000, /* src_mask */
1398 0x00000000, /* dst_mask */
1399 FALSE), /* pcrel_offset */
1400
1401 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1402 0, /* rightshift */
1403 2, /* size (0 = byte, 1 = short, 2 = long) */
1404 24, /* bitsize */
1405 FALSE, /* pc_relative */
1406 0, /* bitpos */
1407 complain_overflow_dont,/* complain_on_overflow */
1408 bfd_elf_generic_reloc, /* special_function */
1409 "R_ARM_THM_TLS_CALL", /* name */
1410 FALSE, /* partial_inplace */
1411 0x07ff07ff, /* src_mask */
1412 0x07ff07ff, /* dst_mask */
1413 FALSE), /* pcrel_offset */
1414
1415 HOWTO (R_ARM_PLT32_ABS, /* type */
1416 0, /* rightshift */
1417 2, /* size (0 = byte, 1 = short, 2 = long) */
1418 32, /* bitsize */
1419 FALSE, /* pc_relative */
1420 0, /* bitpos */
1421 complain_overflow_dont,/* complain_on_overflow */
1422 bfd_elf_generic_reloc, /* special_function */
1423 "R_ARM_PLT32_ABS", /* name */
1424 FALSE, /* partial_inplace */
1425 0xffffffff, /* src_mask */
1426 0xffffffff, /* dst_mask */
1427 FALSE), /* pcrel_offset */
1428
1429 HOWTO (R_ARM_GOT_ABS, /* type */
1430 0, /* rightshift */
1431 2, /* size (0 = byte, 1 = short, 2 = long) */
1432 32, /* bitsize */
1433 FALSE, /* pc_relative */
1434 0, /* bitpos */
1435 complain_overflow_dont,/* complain_on_overflow */
1436 bfd_elf_generic_reloc, /* special_function */
1437 "R_ARM_GOT_ABS", /* name */
1438 FALSE, /* partial_inplace */
1439 0xffffffff, /* src_mask */
1440 0xffffffff, /* dst_mask */
1441 FALSE), /* pcrel_offset */
1442
1443 HOWTO (R_ARM_GOT_PREL, /* type */
1444 0, /* rightshift */
1445 2, /* size (0 = byte, 1 = short, 2 = long) */
1446 32, /* bitsize */
1447 TRUE, /* pc_relative */
1448 0, /* bitpos */
1449 complain_overflow_dont, /* complain_on_overflow */
1450 bfd_elf_generic_reloc, /* special_function */
1451 "R_ARM_GOT_PREL", /* name */
1452 FALSE, /* partial_inplace */
1453 0xffffffff, /* src_mask */
1454 0xffffffff, /* dst_mask */
1455 TRUE), /* pcrel_offset */
1456
1457 HOWTO (R_ARM_GOT_BREL12, /* type */
1458 0, /* rightshift */
1459 2, /* size (0 = byte, 1 = short, 2 = long) */
1460 12, /* bitsize */
1461 FALSE, /* pc_relative */
1462 0, /* bitpos */
1463 complain_overflow_bitfield,/* complain_on_overflow */
1464 bfd_elf_generic_reloc, /* special_function */
1465 "R_ARM_GOT_BREL12", /* name */
1466 FALSE, /* partial_inplace */
1467 0x00000fff, /* src_mask */
1468 0x00000fff, /* dst_mask */
1469 FALSE), /* pcrel_offset */
1470
1471 HOWTO (R_ARM_GOTOFF12, /* type */
1472 0, /* rightshift */
1473 2, /* size (0 = byte, 1 = short, 2 = long) */
1474 12, /* bitsize */
1475 FALSE, /* pc_relative */
1476 0, /* bitpos */
1477 complain_overflow_bitfield,/* complain_on_overflow */
1478 bfd_elf_generic_reloc, /* special_function */
1479 "R_ARM_GOTOFF12", /* name */
1480 FALSE, /* partial_inplace */
1481 0x00000fff, /* src_mask */
1482 0x00000fff, /* dst_mask */
1483 FALSE), /* pcrel_offset */
1484
1485 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1486
1487 /* GNU extension to record C++ vtable member usage */
1488 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1489 0, /* rightshift */
1490 2, /* size (0 = byte, 1 = short, 2 = long) */
1491 0, /* bitsize */
1492 FALSE, /* pc_relative */
1493 0, /* bitpos */
1494 complain_overflow_dont, /* complain_on_overflow */
1495 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1496 "R_ARM_GNU_VTENTRY", /* name */
1497 FALSE, /* partial_inplace */
1498 0, /* src_mask */
1499 0, /* dst_mask */
1500 FALSE), /* pcrel_offset */
1501
1502 /* GNU extension to record C++ vtable hierarchy */
1503 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1504 0, /* rightshift */
1505 2, /* size (0 = byte, 1 = short, 2 = long) */
1506 0, /* bitsize */
1507 FALSE, /* pc_relative */
1508 0, /* bitpos */
1509 complain_overflow_dont, /* complain_on_overflow */
1510 NULL, /* special_function */
1511 "R_ARM_GNU_VTINHERIT", /* name */
1512 FALSE, /* partial_inplace */
1513 0, /* src_mask */
1514 0, /* dst_mask */
1515 FALSE), /* pcrel_offset */
1516
1517 HOWTO (R_ARM_THM_JUMP11, /* type */
1518 1, /* rightshift */
1519 1, /* size (0 = byte, 1 = short, 2 = long) */
1520 11, /* bitsize */
1521 TRUE, /* pc_relative */
1522 0, /* bitpos */
1523 complain_overflow_signed, /* complain_on_overflow */
1524 bfd_elf_generic_reloc, /* special_function */
1525 "R_ARM_THM_JUMP11", /* name */
1526 FALSE, /* partial_inplace */
1527 0x000007ff, /* src_mask */
1528 0x000007ff, /* dst_mask */
1529 TRUE), /* pcrel_offset */
1530
1531 HOWTO (R_ARM_THM_JUMP8, /* type */
1532 1, /* rightshift */
1533 1, /* size (0 = byte, 1 = short, 2 = long) */
1534 8, /* bitsize */
1535 TRUE, /* pc_relative */
1536 0, /* bitpos */
1537 complain_overflow_signed, /* complain_on_overflow */
1538 bfd_elf_generic_reloc, /* special_function */
1539 "R_ARM_THM_JUMP8", /* name */
1540 FALSE, /* partial_inplace */
1541 0x000000ff, /* src_mask */
1542 0x000000ff, /* dst_mask */
1543 TRUE), /* pcrel_offset */
1544
1545 /* TLS relocations */
1546 HOWTO (R_ARM_TLS_GD32, /* type */
1547 0, /* rightshift */
1548 2, /* size (0 = byte, 1 = short, 2 = long) */
1549 32, /* bitsize */
1550 FALSE, /* pc_relative */
1551 0, /* bitpos */
1552 complain_overflow_bitfield,/* complain_on_overflow */
1553 NULL, /* special_function */
1554 "R_ARM_TLS_GD32", /* name */
1555 TRUE, /* partial_inplace */
1556 0xffffffff, /* src_mask */
1557 0xffffffff, /* dst_mask */
1558 FALSE), /* pcrel_offset */
1559
1560 HOWTO (R_ARM_TLS_LDM32, /* type */
1561 0, /* rightshift */
1562 2, /* size (0 = byte, 1 = short, 2 = long) */
1563 32, /* bitsize */
1564 FALSE, /* pc_relative */
1565 0, /* bitpos */
1566 complain_overflow_bitfield,/* complain_on_overflow */
1567 bfd_elf_generic_reloc, /* special_function */
1568 "R_ARM_TLS_LDM32", /* name */
1569 TRUE, /* partial_inplace */
1570 0xffffffff, /* src_mask */
1571 0xffffffff, /* dst_mask */
1572 FALSE), /* pcrel_offset */
1573
1574 HOWTO (R_ARM_TLS_LDO32, /* type */
1575 0, /* rightshift */
1576 2, /* size (0 = byte, 1 = short, 2 = long) */
1577 32, /* bitsize */
1578 FALSE, /* pc_relative */
1579 0, /* bitpos */
1580 complain_overflow_bitfield,/* complain_on_overflow */
1581 bfd_elf_generic_reloc, /* special_function */
1582 "R_ARM_TLS_LDO32", /* name */
1583 TRUE, /* partial_inplace */
1584 0xffffffff, /* src_mask */
1585 0xffffffff, /* dst_mask */
1586 FALSE), /* pcrel_offset */
1587
1588 HOWTO (R_ARM_TLS_IE32, /* type */
1589 0, /* rightshift */
1590 2, /* size (0 = byte, 1 = short, 2 = long) */
1591 32, /* bitsize */
1592 FALSE, /* pc_relative */
1593 0, /* bitpos */
1594 complain_overflow_bitfield,/* complain_on_overflow */
1595 NULL, /* special_function */
1596 "R_ARM_TLS_IE32", /* name */
1597 TRUE, /* partial_inplace */
1598 0xffffffff, /* src_mask */
1599 0xffffffff, /* dst_mask */
1600 FALSE), /* pcrel_offset */
1601
1602 HOWTO (R_ARM_TLS_LE32, /* type */
1603 0, /* rightshift */
1604 2, /* size (0 = byte, 1 = short, 2 = long) */
1605 32, /* bitsize */
1606 FALSE, /* pc_relative */
1607 0, /* bitpos */
1608 complain_overflow_bitfield,/* complain_on_overflow */
1609 NULL, /* special_function */
1610 "R_ARM_TLS_LE32", /* name */
1611 TRUE, /* partial_inplace */
1612 0xffffffff, /* src_mask */
1613 0xffffffff, /* dst_mask */
1614 FALSE), /* pcrel_offset */
1615
1616 HOWTO (R_ARM_TLS_LDO12, /* type */
1617 0, /* rightshift */
1618 2, /* size (0 = byte, 1 = short, 2 = long) */
1619 12, /* bitsize */
1620 FALSE, /* pc_relative */
1621 0, /* bitpos */
1622 complain_overflow_bitfield,/* complain_on_overflow */
1623 bfd_elf_generic_reloc, /* special_function */
1624 "R_ARM_TLS_LDO12", /* name */
1625 FALSE, /* partial_inplace */
1626 0x00000fff, /* src_mask */
1627 0x00000fff, /* dst_mask */
1628 FALSE), /* pcrel_offset */
1629
1630 HOWTO (R_ARM_TLS_LE12, /* type */
1631 0, /* rightshift */
1632 2, /* size (0 = byte, 1 = short, 2 = long) */
1633 12, /* bitsize */
1634 FALSE, /* pc_relative */
1635 0, /* bitpos */
1636 complain_overflow_bitfield,/* complain_on_overflow */
1637 bfd_elf_generic_reloc, /* special_function */
1638 "R_ARM_TLS_LE12", /* name */
1639 FALSE, /* partial_inplace */
1640 0x00000fff, /* src_mask */
1641 0x00000fff, /* dst_mask */
1642 FALSE), /* pcrel_offset */
1643
1644 HOWTO (R_ARM_TLS_IE12GP, /* type */
1645 0, /* rightshift */
1646 2, /* size (0 = byte, 1 = short, 2 = long) */
1647 12, /* bitsize */
1648 FALSE, /* pc_relative */
1649 0, /* bitpos */
1650 complain_overflow_bitfield,/* complain_on_overflow */
1651 bfd_elf_generic_reloc, /* special_function */
1652 "R_ARM_TLS_IE12GP", /* name */
1653 FALSE, /* partial_inplace */
1654 0x00000fff, /* src_mask */
1655 0x00000fff, /* dst_mask */
1656 FALSE), /* pcrel_offset */
1657
1658 /* 112-127 private relocations. */
1659 EMPTY_HOWTO (112),
1660 EMPTY_HOWTO (113),
1661 EMPTY_HOWTO (114),
1662 EMPTY_HOWTO (115),
1663 EMPTY_HOWTO (116),
1664 EMPTY_HOWTO (117),
1665 EMPTY_HOWTO (118),
1666 EMPTY_HOWTO (119),
1667 EMPTY_HOWTO (120),
1668 EMPTY_HOWTO (121),
1669 EMPTY_HOWTO (122),
1670 EMPTY_HOWTO (123),
1671 EMPTY_HOWTO (124),
1672 EMPTY_HOWTO (125),
1673 EMPTY_HOWTO (126),
1674 EMPTY_HOWTO (127),
1675
1676 /* R_ARM_ME_TOO, obsolete. */
1677 EMPTY_HOWTO (128),
1678
1679 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1680 0, /* rightshift */
1681 1, /* size (0 = byte, 1 = short, 2 = long) */
1682 0, /* bitsize */
1683 FALSE, /* pc_relative */
1684 0, /* bitpos */
1685 complain_overflow_bitfield,/* complain_on_overflow */
1686 bfd_elf_generic_reloc, /* special_function */
1687 "R_ARM_THM_TLS_DESCSEQ",/* name */
1688 FALSE, /* partial_inplace */
1689 0x00000000, /* src_mask */
1690 0x00000000, /* dst_mask */
1691 FALSE), /* pcrel_offset */
1692 };
1693
1694 /* 160 onwards: */
1695 static reloc_howto_type elf32_arm_howto_table_2[1] =
1696 {
1697 HOWTO (R_ARM_IRELATIVE, /* type */
1698 0, /* rightshift */
1699 2, /* size (0 = byte, 1 = short, 2 = long) */
1700 32, /* bitsize */
1701 FALSE, /* pc_relative */
1702 0, /* bitpos */
1703 complain_overflow_bitfield,/* complain_on_overflow */
1704 bfd_elf_generic_reloc, /* special_function */
1705 "R_ARM_IRELATIVE", /* name */
1706 TRUE, /* partial_inplace */
1707 0xffffffff, /* src_mask */
1708 0xffffffff, /* dst_mask */
1709 FALSE) /* pcrel_offset */
1710 };
1711
1712 /* 249-255 extended, currently unused, relocations: */
1713 static reloc_howto_type elf32_arm_howto_table_3[4] =
1714 {
1715 HOWTO (R_ARM_RREL32, /* type */
1716 0, /* rightshift */
1717 0, /* size (0 = byte, 1 = short, 2 = long) */
1718 0, /* bitsize */
1719 FALSE, /* pc_relative */
1720 0, /* bitpos */
1721 complain_overflow_dont,/* complain_on_overflow */
1722 bfd_elf_generic_reloc, /* special_function */
1723 "R_ARM_RREL32", /* name */
1724 FALSE, /* partial_inplace */
1725 0, /* src_mask */
1726 0, /* dst_mask */
1727 FALSE), /* pcrel_offset */
1728
1729 HOWTO (R_ARM_RABS32, /* type */
1730 0, /* rightshift */
1731 0, /* size (0 = byte, 1 = short, 2 = long) */
1732 0, /* bitsize */
1733 FALSE, /* pc_relative */
1734 0, /* bitpos */
1735 complain_overflow_dont,/* complain_on_overflow */
1736 bfd_elf_generic_reloc, /* special_function */
1737 "R_ARM_RABS32", /* name */
1738 FALSE, /* partial_inplace */
1739 0, /* src_mask */
1740 0, /* dst_mask */
1741 FALSE), /* pcrel_offset */
1742
1743 HOWTO (R_ARM_RPC24, /* type */
1744 0, /* rightshift */
1745 0, /* size (0 = byte, 1 = short, 2 = long) */
1746 0, /* bitsize */
1747 FALSE, /* pc_relative */
1748 0, /* bitpos */
1749 complain_overflow_dont,/* complain_on_overflow */
1750 bfd_elf_generic_reloc, /* special_function */
1751 "R_ARM_RPC24", /* name */
1752 FALSE, /* partial_inplace */
1753 0, /* src_mask */
1754 0, /* dst_mask */
1755 FALSE), /* pcrel_offset */
1756
1757 HOWTO (R_ARM_RBASE, /* type */
1758 0, /* rightshift */
1759 0, /* size (0 = byte, 1 = short, 2 = long) */
1760 0, /* bitsize */
1761 FALSE, /* pc_relative */
1762 0, /* bitpos */
1763 complain_overflow_dont,/* complain_on_overflow */
1764 bfd_elf_generic_reloc, /* special_function */
1765 "R_ARM_RBASE", /* name */
1766 FALSE, /* partial_inplace */
1767 0, /* src_mask */
1768 0, /* dst_mask */
1769 FALSE) /* pcrel_offset */
1770 };
1771
1772 static reloc_howto_type *
1773 elf32_arm_howto_from_type (unsigned int r_type)
1774 {
1775 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1776 return &elf32_arm_howto_table_1[r_type];
1777
1778 if (r_type == R_ARM_IRELATIVE)
1779 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1780
1781 if (r_type >= R_ARM_RREL32
1782 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1783 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1784
1785 return NULL;
1786 }
1787
1788 static void
1789 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1790 Elf_Internal_Rela * elf_reloc)
1791 {
1792 unsigned int r_type;
1793
1794 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1795 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1796 }
1797
1798 struct elf32_arm_reloc_map
1799 {
1800 bfd_reloc_code_real_type bfd_reloc_val;
1801 unsigned char elf_reloc_val;
1802 };
1803
1804 /* All entries in this list must also be present in elf32_arm_howto_table. */
1805 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1806 {
1807 {BFD_RELOC_NONE, R_ARM_NONE},
1808 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1809 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1810 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1811 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1812 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1813 {BFD_RELOC_32, R_ARM_ABS32},
1814 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1815 {BFD_RELOC_8, R_ARM_ABS8},
1816 {BFD_RELOC_16, R_ARM_ABS16},
1817 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1818 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1819 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1820 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1821 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1822 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1823 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1824 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1825 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1826 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1827 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1828 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1829 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1830 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1831 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1832 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1833 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1834 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1835 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1836 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1837 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1838 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1839 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
1840 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
1841 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
1842 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
1843 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
1844 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
1845 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1846 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1847 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1848 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1849 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1850 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1851 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1852 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1853 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
1854 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1855 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1856 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1857 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1858 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1859 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1860 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1861 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1862 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1863 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1864 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1865 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1866 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1867 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1868 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1869 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1870 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1871 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1872 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1873 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1874 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1875 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1876 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1877 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1878 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1879 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1880 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1881 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1882 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1883 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1884 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1885 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1886 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1887 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1888 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1889 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1890 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1891 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1892 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1893 };
1894
1895 static reloc_howto_type *
1896 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1897 bfd_reloc_code_real_type code)
1898 {
1899 unsigned int i;
1900
1901 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1902 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1903 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1904
1905 return NULL;
1906 }
1907
1908 static reloc_howto_type *
1909 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1910 const char *r_name)
1911 {
1912 unsigned int i;
1913
1914 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1915 if (elf32_arm_howto_table_1[i].name != NULL
1916 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1917 return &elf32_arm_howto_table_1[i];
1918
1919 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1920 if (elf32_arm_howto_table_2[i].name != NULL
1921 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1922 return &elf32_arm_howto_table_2[i];
1923
1924 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
1925 if (elf32_arm_howto_table_3[i].name != NULL
1926 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
1927 return &elf32_arm_howto_table_3[i];
1928
1929 return NULL;
1930 }
1931
1932 /* Support for core dump NOTE sections. */
1933
1934 static bfd_boolean
1935 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1936 {
1937 int offset;
1938 size_t size;
1939
1940 switch (note->descsz)
1941 {
1942 default:
1943 return FALSE;
1944
1945 case 148: /* Linux/ARM 32-bit. */
1946 /* pr_cursig */
1947 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
1948
1949 /* pr_pid */
1950 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
1951
1952 /* pr_reg */
1953 offset = 72;
1954 size = 72;
1955
1956 break;
1957 }
1958
1959 /* Make a ".reg/999" section. */
1960 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1961 size, note->descpos + offset);
1962 }
1963
1964 static bfd_boolean
1965 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1966 {
1967 switch (note->descsz)
1968 {
1969 default:
1970 return FALSE;
1971
1972 case 124: /* Linux/ARM elf_prpsinfo. */
1973 elf_tdata (abfd)->core->pid
1974 = bfd_get_32 (abfd, note->descdata + 12);
1975 elf_tdata (abfd)->core->program
1976 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1977 elf_tdata (abfd)->core->command
1978 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1979 }
1980
1981 /* Note that for some reason, a spurious space is tacked
1982 onto the end of the args in some (at least one anyway)
1983 implementations, so strip it off if it exists. */
1984 {
1985 char *command = elf_tdata (abfd)->core->command;
1986 int n = strlen (command);
1987
1988 if (0 < n && command[n - 1] == ' ')
1989 command[n - 1] = '\0';
1990 }
1991
1992 return TRUE;
1993 }
1994
1995 static char *
1996 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
1997 int note_type, ...)
1998 {
1999 switch (note_type)
2000 {
2001 default:
2002 return NULL;
2003
2004 case NT_PRPSINFO:
2005 {
2006 char data[124];
2007 va_list ap;
2008
2009 va_start (ap, note_type);
2010 memset (data, 0, sizeof (data));
2011 strncpy (data + 28, va_arg (ap, const char *), 16);
2012 strncpy (data + 44, va_arg (ap, const char *), 80);
2013 va_end (ap);
2014
2015 return elfcore_write_note (abfd, buf, bufsiz,
2016 "CORE", note_type, data, sizeof (data));
2017 }
2018
2019 case NT_PRSTATUS:
2020 {
2021 char data[148];
2022 va_list ap;
2023 long pid;
2024 int cursig;
2025 const void *greg;
2026
2027 va_start (ap, note_type);
2028 memset (data, 0, sizeof (data));
2029 pid = va_arg (ap, long);
2030 bfd_put_32 (abfd, pid, data + 24);
2031 cursig = va_arg (ap, int);
2032 bfd_put_16 (abfd, cursig, data + 12);
2033 greg = va_arg (ap, const void *);
2034 memcpy (data + 72, greg, 72);
2035 va_end (ap);
2036
2037 return elfcore_write_note (abfd, buf, bufsiz,
2038 "CORE", note_type, data, sizeof (data));
2039 }
2040 }
2041 }
2042
2043 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2044 #define TARGET_LITTLE_NAME "elf32-littlearm"
2045 #define TARGET_BIG_SYM arm_elf32_be_vec
2046 #define TARGET_BIG_NAME "elf32-bigarm"
2047
2048 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2049 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2050 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2051
2052 typedef unsigned long int insn32;
2053 typedef unsigned short int insn16;
2054
2055 /* In lieu of proper flags, assume all EABIv4 or later objects are
2056 interworkable. */
2057 #define INTERWORK_FLAG(abfd) \
2058 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2059 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2060 || ((abfd)->flags & BFD_LINKER_CREATED))
2061
2062 /* The linker script knows the section names for placement.
2063 The entry_names are used to do simple name mangling on the stubs.
2064 Given a function name, and its type, the stub can be found. The
2065 name can be changed. The only requirement is the %s be present. */
2066 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2067 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2068
2069 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2070 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2071
2072 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2073 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2074
2075 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2076 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2077
2078 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2079 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2080
2081 #define STUB_ENTRY_NAME "__%s_veneer"
2082
2083 /* The name of the dynamic interpreter. This is put in the .interp
2084 section. */
2085 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2086
2087 static const unsigned long tls_trampoline [] =
2088 {
2089 0xe08e0000, /* add r0, lr, r0 */
2090 0xe5901004, /* ldr r1, [r0,#4] */
2091 0xe12fff11, /* bx r1 */
2092 };
2093
2094 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2095 {
2096 0xe52d2004, /* push {r2} */
2097 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2098 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2099 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2100 0xe081100f, /* 2: add r1, pc */
2101 0xe12fff12, /* bx r2 */
2102 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2103 + dl_tlsdesc_lazy_resolver(GOT) */
2104 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2105 };
2106
2107 #ifdef FOUR_WORD_PLT
2108
2109 /* The first entry in a procedure linkage table looks like
2110 this. It is set up so that any shared library function that is
2111 called before the relocation has been set up calls the dynamic
2112 linker first. */
2113 static const bfd_vma elf32_arm_plt0_entry [] =
2114 {
2115 0xe52de004, /* str lr, [sp, #-4]! */
2116 0xe59fe010, /* ldr lr, [pc, #16] */
2117 0xe08fe00e, /* add lr, pc, lr */
2118 0xe5bef008, /* ldr pc, [lr, #8]! */
2119 };
2120
2121 /* Subsequent entries in a procedure linkage table look like
2122 this. */
2123 static const bfd_vma elf32_arm_plt_entry [] =
2124 {
2125 0xe28fc600, /* add ip, pc, #NN */
2126 0xe28cca00, /* add ip, ip, #NN */
2127 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2128 0x00000000, /* unused */
2129 };
2130
2131 #else /* not FOUR_WORD_PLT */
2132
2133 /* The first entry in a procedure linkage table looks like
2134 this. It is set up so that any shared library function that is
2135 called before the relocation has been set up calls the dynamic
2136 linker first. */
2137 static const bfd_vma elf32_arm_plt0_entry [] =
2138 {
2139 0xe52de004, /* str lr, [sp, #-4]! */
2140 0xe59fe004, /* ldr lr, [pc, #4] */
2141 0xe08fe00e, /* add lr, pc, lr */
2142 0xe5bef008, /* ldr pc, [lr, #8]! */
2143 0x00000000, /* &GOT[0] - . */
2144 };
2145
2146 /* By default subsequent entries in a procedure linkage table look like
2147 this. Offsets that don't fit into 28 bits will cause link error. */
2148 static const bfd_vma elf32_arm_plt_entry_short [] =
2149 {
2150 0xe28fc600, /* add ip, pc, #0xNN00000 */
2151 0xe28cca00, /* add ip, ip, #0xNN000 */
2152 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2153 };
2154
2155 /* When explicitly asked, we'll use this "long" entry format
2156 which can cope with arbitrary displacements. */
2157 static const bfd_vma elf32_arm_plt_entry_long [] =
2158 {
2159 0xe28fc200, /* add ip, pc, #0xN0000000 */
2160 0xe28cc600, /* add ip, ip, #0xNN00000 */
2161 0xe28cca00, /* add ip, ip, #0xNN000 */
2162 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2163 };
2164
2165 static bfd_boolean elf32_arm_use_long_plt_entry = FALSE;
2166
2167 #endif /* not FOUR_WORD_PLT */
2168
2169 /* The first entry in a procedure linkage table looks like this.
2170 It is set up so that any shared library function that is called before the
2171 relocation has been set up calls the dynamic linker first. */
2172 static const bfd_vma elf32_thumb2_plt0_entry [] =
2173 {
2174 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2175 an instruction maybe encoded to one or two array elements. */
2176 0xf8dfb500, /* push {lr} */
2177 0x44fee008, /* ldr.w lr, [pc, #8] */
2178 /* add lr, pc */
2179 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2180 0x00000000, /* &GOT[0] - . */
2181 };
2182
2183 /* Subsequent entries in a procedure linkage table for thumb only target
2184 look like this. */
2185 static const bfd_vma elf32_thumb2_plt_entry [] =
2186 {
2187 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2188 an instruction maybe encoded to one or two array elements. */
2189 0x0c00f240, /* movw ip, #0xNNNN */
2190 0x0c00f2c0, /* movt ip, #0xNNNN */
2191 0xf8dc44fc, /* add ip, pc */
2192 0xbf00f000 /* ldr.w pc, [ip] */
2193 /* nop */
2194 };
2195
2196 /* The format of the first entry in the procedure linkage table
2197 for a VxWorks executable. */
2198 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2199 {
2200 0xe52dc008, /* str ip,[sp,#-8]! */
2201 0xe59fc000, /* ldr ip,[pc] */
2202 0xe59cf008, /* ldr pc,[ip,#8] */
2203 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2204 };
2205
2206 /* The format of subsequent entries in a VxWorks executable. */
2207 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2208 {
2209 0xe59fc000, /* ldr ip,[pc] */
2210 0xe59cf000, /* ldr pc,[ip] */
2211 0x00000000, /* .long @got */
2212 0xe59fc000, /* ldr ip,[pc] */
2213 0xea000000, /* b _PLT */
2214 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2215 };
2216
2217 /* The format of entries in a VxWorks shared library. */
2218 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2219 {
2220 0xe59fc000, /* ldr ip,[pc] */
2221 0xe79cf009, /* ldr pc,[ip,r9] */
2222 0x00000000, /* .long @got */
2223 0xe59fc000, /* ldr ip,[pc] */
2224 0xe599f008, /* ldr pc,[r9,#8] */
2225 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2226 };
2227
2228 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2229 #define PLT_THUMB_STUB_SIZE 4
2230 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2231 {
2232 0x4778, /* bx pc */
2233 0x46c0 /* nop */
2234 };
2235
2236 /* The entries in a PLT when using a DLL-based target with multiple
2237 address spaces. */
2238 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2239 {
2240 0xe51ff004, /* ldr pc, [pc, #-4] */
2241 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2242 };
2243
2244 /* The first entry in a procedure linkage table looks like
2245 this. It is set up so that any shared library function that is
2246 called before the relocation has been set up calls the dynamic
2247 linker first. */
2248 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2249 {
2250 /* First bundle: */
2251 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2252 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2253 0xe08cc00f, /* add ip, ip, pc */
2254 0xe52dc008, /* str ip, [sp, #-8]! */
2255 /* Second bundle: */
2256 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2257 0xe59cc000, /* ldr ip, [ip] */
2258 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2259 0xe12fff1c, /* bx ip */
2260 /* Third bundle: */
2261 0xe320f000, /* nop */
2262 0xe320f000, /* nop */
2263 0xe320f000, /* nop */
2264 /* .Lplt_tail: */
2265 0xe50dc004, /* str ip, [sp, #-4] */
2266 /* Fourth bundle: */
2267 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2268 0xe59cc000, /* ldr ip, [ip] */
2269 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2270 0xe12fff1c, /* bx ip */
2271 };
2272 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2273
2274 /* Subsequent entries in a procedure linkage table look like this. */
2275 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2276 {
2277 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2278 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2279 0xe08cc00f, /* add ip, ip, pc */
2280 0xea000000, /* b .Lplt_tail */
2281 };
2282
2283 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2284 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2285 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2286 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2287 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2288 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2289 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2290 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2291
2292 enum stub_insn_type
2293 {
2294 THUMB16_TYPE = 1,
2295 THUMB32_TYPE,
2296 ARM_TYPE,
2297 DATA_TYPE
2298 };
2299
2300 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2301 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2302 is inserted in arm_build_one_stub(). */
2303 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2304 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2305 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2306 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2307 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2308 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2309
2310 typedef struct
2311 {
2312 bfd_vma data;
2313 enum stub_insn_type type;
2314 unsigned int r_type;
2315 int reloc_addend;
2316 } insn_sequence;
2317
2318 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2319 to reach the stub if necessary. */
2320 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2321 {
2322 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2323 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2324 };
2325
2326 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2327 available. */
2328 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2329 {
2330 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2331 ARM_INSN (0xe12fff1c), /* bx ip */
2332 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2333 };
2334
2335 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2336 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2337 {
2338 THUMB16_INSN (0xb401), /* push {r0} */
2339 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2340 THUMB16_INSN (0x4684), /* mov ip, r0 */
2341 THUMB16_INSN (0xbc01), /* pop {r0} */
2342 THUMB16_INSN (0x4760), /* bx ip */
2343 THUMB16_INSN (0xbf00), /* nop */
2344 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2345 };
2346
2347 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2348 allowed. */
2349 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2350 {
2351 THUMB16_INSN (0x4778), /* bx pc */
2352 THUMB16_INSN (0x46c0), /* nop */
2353 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2354 ARM_INSN (0xe12fff1c), /* bx ip */
2355 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2356 };
2357
2358 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2359 available. */
2360 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2361 {
2362 THUMB16_INSN (0x4778), /* bx pc */
2363 THUMB16_INSN (0x46c0), /* nop */
2364 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2365 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2366 };
2367
2368 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2369 one, when the destination is close enough. */
2370 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2371 {
2372 THUMB16_INSN (0x4778), /* bx pc */
2373 THUMB16_INSN (0x46c0), /* nop */
2374 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2375 };
2376
2377 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2378 blx to reach the stub if necessary. */
2379 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2380 {
2381 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2382 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2383 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2384 };
2385
2386 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2387 blx to reach the stub if necessary. We can not add into pc;
2388 it is not guaranteed to mode switch (different in ARMv6 and
2389 ARMv7). */
2390 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2391 {
2392 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2393 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2394 ARM_INSN (0xe12fff1c), /* bx ip */
2395 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2396 };
2397
2398 /* V4T ARM -> ARM long branch stub, PIC. */
2399 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2400 {
2401 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2402 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2403 ARM_INSN (0xe12fff1c), /* bx ip */
2404 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2405 };
2406
2407 /* V4T Thumb -> ARM long branch stub, PIC. */
2408 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2409 {
2410 THUMB16_INSN (0x4778), /* bx pc */
2411 THUMB16_INSN (0x46c0), /* nop */
2412 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2413 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2414 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2415 };
2416
2417 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2418 architectures. */
2419 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2420 {
2421 THUMB16_INSN (0xb401), /* push {r0} */
2422 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2423 THUMB16_INSN (0x46fc), /* mov ip, pc */
2424 THUMB16_INSN (0x4484), /* add ip, r0 */
2425 THUMB16_INSN (0xbc01), /* pop {r0} */
2426 THUMB16_INSN (0x4760), /* bx ip */
2427 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2428 };
2429
2430 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2431 allowed. */
2432 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2433 {
2434 THUMB16_INSN (0x4778), /* bx pc */
2435 THUMB16_INSN (0x46c0), /* nop */
2436 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2437 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2438 ARM_INSN (0xe12fff1c), /* bx ip */
2439 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2440 };
2441
2442 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2443 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2444 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2445 {
2446 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2447 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2448 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2449 };
2450
2451 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2452 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2453 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2454 {
2455 THUMB16_INSN (0x4778), /* bx pc */
2456 THUMB16_INSN (0x46c0), /* nop */
2457 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2458 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2459 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2460 };
2461
2462 /* NaCl ARM -> ARM long branch stub. */
2463 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2464 {
2465 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2466 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2467 ARM_INSN (0xe12fff1c), /* bx ip */
2468 ARM_INSN (0xe320f000), /* nop */
2469 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2470 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2471 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2472 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2473 };
2474
2475 /* NaCl ARM -> ARM long branch stub, PIC. */
2476 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2477 {
2478 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2479 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2480 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2481 ARM_INSN (0xe12fff1c), /* bx ip */
2482 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2483 DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */
2484 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2485 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2486 };
2487
2488
2489 /* Cortex-A8 erratum-workaround stubs. */
2490
2491 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2492 can't use a conditional branch to reach this stub). */
2493
2494 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2495 {
2496 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2497 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2498 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2499 };
2500
2501 /* Stub used for b.w and bl.w instructions. */
2502
2503 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2504 {
2505 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2506 };
2507
2508 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2509 {
2510 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2511 };
2512
2513 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2514 instruction (which switches to ARM mode) to point to this stub. Jump to the
2515 real destination using an ARM-mode branch. */
2516
2517 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2518 {
2519 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2520 };
2521
2522 /* For each section group there can be a specially created linker section
2523 to hold the stubs for that group. The name of the stub section is based
2524 upon the name of another section within that group with the suffix below
2525 applied.
2526
2527 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2528 create what appeared to be a linker stub section when it actually
2529 contained user code/data. For example, consider this fragment:
2530
2531 const char * stubborn_problems[] = { "np" };
2532
2533 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2534 section called:
2535
2536 .data.rel.local.stubborn_problems
2537
2538 This then causes problems in arm32_arm_build_stubs() as it triggers:
2539
2540 // Ignore non-stub sections.
2541 if (!strstr (stub_sec->name, STUB_SUFFIX))
2542 continue;
2543
2544 And so the section would be ignored instead of being processed. Hence
2545 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2546 C identifier. */
2547 #define STUB_SUFFIX ".__stub"
2548
2549 /* One entry per long/short branch stub defined above. */
2550 #define DEF_STUBS \
2551 DEF_STUB(long_branch_any_any) \
2552 DEF_STUB(long_branch_v4t_arm_thumb) \
2553 DEF_STUB(long_branch_thumb_only) \
2554 DEF_STUB(long_branch_v4t_thumb_thumb) \
2555 DEF_STUB(long_branch_v4t_thumb_arm) \
2556 DEF_STUB(short_branch_v4t_thumb_arm) \
2557 DEF_STUB(long_branch_any_arm_pic) \
2558 DEF_STUB(long_branch_any_thumb_pic) \
2559 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2560 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2561 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2562 DEF_STUB(long_branch_thumb_only_pic) \
2563 DEF_STUB(long_branch_any_tls_pic) \
2564 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2565 DEF_STUB(long_branch_arm_nacl) \
2566 DEF_STUB(long_branch_arm_nacl_pic) \
2567 DEF_STUB(a8_veneer_b_cond) \
2568 DEF_STUB(a8_veneer_b) \
2569 DEF_STUB(a8_veneer_bl) \
2570 DEF_STUB(a8_veneer_blx)
2571
2572 #define DEF_STUB(x) arm_stub_##x,
2573 enum elf32_arm_stub_type
2574 {
2575 arm_stub_none,
2576 DEF_STUBS
2577 /* Note the first a8_veneer type. */
2578 arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
2579 };
2580 #undef DEF_STUB
2581
2582 typedef struct
2583 {
2584 const insn_sequence* template_sequence;
2585 int template_size;
2586 } stub_def;
2587
2588 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2589 static const stub_def stub_definitions[] =
2590 {
2591 {NULL, 0},
2592 DEF_STUBS
2593 };
2594
2595 struct elf32_arm_stub_hash_entry
2596 {
2597 /* Base hash table entry structure. */
2598 struct bfd_hash_entry root;
2599
2600 /* The stub section. */
2601 asection *stub_sec;
2602
2603 /* Offset within stub_sec of the beginning of this stub. */
2604 bfd_vma stub_offset;
2605
2606 /* Given the symbol's value and its section we can determine its final
2607 value when building the stubs (so the stub knows where to jump). */
2608 bfd_vma target_value;
2609 asection *target_section;
2610
2611 /* Offset to apply to relocation referencing target_value. */
2612 bfd_vma target_addend;
2613
2614 /* The instruction which caused this stub to be generated (only valid for
2615 Cortex-A8 erratum workaround stubs at present). */
2616 unsigned long orig_insn;
2617
2618 /* The stub type. */
2619 enum elf32_arm_stub_type stub_type;
2620 /* Its encoding size in bytes. */
2621 int stub_size;
2622 /* Its template. */
2623 const insn_sequence *stub_template;
2624 /* The size of the template (number of entries). */
2625 int stub_template_size;
2626
2627 /* The symbol table entry, if any, that this was derived from. */
2628 struct elf32_arm_link_hash_entry *h;
2629
2630 /* Type of branch. */
2631 enum arm_st_branch_type branch_type;
2632
2633 /* Where this stub is being called from, or, in the case of combined
2634 stub sections, the first input section in the group. */
2635 asection *id_sec;
2636
2637 /* The name for the local symbol at the start of this stub. The
2638 stub name in the hash table has to be unique; this does not, so
2639 it can be friendlier. */
2640 char *output_name;
2641 };
2642
2643 /* Used to build a map of a section. This is required for mixed-endian
2644 code/data. */
2645
2646 typedef struct elf32_elf_section_map
2647 {
2648 bfd_vma vma;
2649 char type;
2650 }
2651 elf32_arm_section_map;
2652
2653 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2654
2655 typedef enum
2656 {
2657 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2658 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2659 VFP11_ERRATUM_ARM_VENEER,
2660 VFP11_ERRATUM_THUMB_VENEER
2661 }
2662 elf32_vfp11_erratum_type;
2663
2664 typedef struct elf32_vfp11_erratum_list
2665 {
2666 struct elf32_vfp11_erratum_list *next;
2667 bfd_vma vma;
2668 union
2669 {
2670 struct
2671 {
2672 struct elf32_vfp11_erratum_list *veneer;
2673 unsigned int vfp_insn;
2674 } b;
2675 struct
2676 {
2677 struct elf32_vfp11_erratum_list *branch;
2678 unsigned int id;
2679 } v;
2680 } u;
2681 elf32_vfp11_erratum_type type;
2682 }
2683 elf32_vfp11_erratum_list;
2684
2685 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2686 veneer. */
2687 typedef enum
2688 {
2689 STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2690 STM32L4XX_ERRATUM_VENEER
2691 }
2692 elf32_stm32l4xx_erratum_type;
2693
2694 typedef struct elf32_stm32l4xx_erratum_list
2695 {
2696 struct elf32_stm32l4xx_erratum_list *next;
2697 bfd_vma vma;
2698 union
2699 {
2700 struct
2701 {
2702 struct elf32_stm32l4xx_erratum_list *veneer;
2703 unsigned int insn;
2704 } b;
2705 struct
2706 {
2707 struct elf32_stm32l4xx_erratum_list *branch;
2708 unsigned int id;
2709 } v;
2710 } u;
2711 elf32_stm32l4xx_erratum_type type;
2712 }
2713 elf32_stm32l4xx_erratum_list;
2714
2715 typedef enum
2716 {
2717 DELETE_EXIDX_ENTRY,
2718 INSERT_EXIDX_CANTUNWIND_AT_END
2719 }
2720 arm_unwind_edit_type;
2721
2722 /* A (sorted) list of edits to apply to an unwind table. */
2723 typedef struct arm_unwind_table_edit
2724 {
2725 arm_unwind_edit_type type;
2726 /* Note: we sometimes want to insert an unwind entry corresponding to a
2727 section different from the one we're currently writing out, so record the
2728 (text) section this edit relates to here. */
2729 asection *linked_section;
2730 unsigned int index;
2731 struct arm_unwind_table_edit *next;
2732 }
2733 arm_unwind_table_edit;
2734
2735 typedef struct _arm_elf_section_data
2736 {
2737 /* Information about mapping symbols. */
2738 struct bfd_elf_section_data elf;
2739 unsigned int mapcount;
2740 unsigned int mapsize;
2741 elf32_arm_section_map *map;
2742 /* Information about CPU errata. */
2743 unsigned int erratumcount;
2744 elf32_vfp11_erratum_list *erratumlist;
2745 unsigned int stm32l4xx_erratumcount;
2746 elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
2747 /* Information about unwind tables. */
2748 union
2749 {
2750 /* Unwind info attached to a text section. */
2751 struct
2752 {
2753 asection *arm_exidx_sec;
2754 } text;
2755
2756 /* Unwind info attached to an .ARM.exidx section. */
2757 struct
2758 {
2759 arm_unwind_table_edit *unwind_edit_list;
2760 arm_unwind_table_edit *unwind_edit_tail;
2761 } exidx;
2762 } u;
2763 }
2764 _arm_elf_section_data;
2765
2766 #define elf32_arm_section_data(sec) \
2767 ((_arm_elf_section_data *) elf_section_data (sec))
2768
2769 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2770 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2771 so may be created multiple times: we use an array of these entries whilst
2772 relaxing which we can refresh easily, then create stubs for each potentially
2773 erratum-triggering instruction once we've settled on a solution. */
2774
2775 struct a8_erratum_fix
2776 {
2777 bfd *input_bfd;
2778 asection *section;
2779 bfd_vma offset;
2780 bfd_vma addend;
2781 unsigned long orig_insn;
2782 char *stub_name;
2783 enum elf32_arm_stub_type stub_type;
2784 enum arm_st_branch_type branch_type;
2785 };
2786
2787 /* A table of relocs applied to branches which might trigger Cortex-A8
2788 erratum. */
2789
2790 struct a8_erratum_reloc
2791 {
2792 bfd_vma from;
2793 bfd_vma destination;
2794 struct elf32_arm_link_hash_entry *hash;
2795 const char *sym_name;
2796 unsigned int r_type;
2797 enum arm_st_branch_type branch_type;
2798 bfd_boolean non_a8_stub;
2799 };
2800
2801 /* The size of the thread control block. */
2802 #define TCB_SIZE 8
2803
2804 /* ARM-specific information about a PLT entry, over and above the usual
2805 gotplt_union. */
2806 struct arm_plt_info
2807 {
2808 /* We reference count Thumb references to a PLT entry separately,
2809 so that we can emit the Thumb trampoline only if needed. */
2810 bfd_signed_vma thumb_refcount;
2811
2812 /* Some references from Thumb code may be eliminated by BL->BLX
2813 conversion, so record them separately. */
2814 bfd_signed_vma maybe_thumb_refcount;
2815
2816 /* How many of the recorded PLT accesses were from non-call relocations.
2817 This information is useful when deciding whether anything takes the
2818 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
2819 non-call references to the function should resolve directly to the
2820 real runtime target. */
2821 unsigned int noncall_refcount;
2822
2823 /* Since PLT entries have variable size if the Thumb prologue is
2824 used, we need to record the index into .got.plt instead of
2825 recomputing it from the PLT offset. */
2826 bfd_signed_vma got_offset;
2827 };
2828
2829 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
2830 struct arm_local_iplt_info
2831 {
2832 /* The information that is usually found in the generic ELF part of
2833 the hash table entry. */
2834 union gotplt_union root;
2835
2836 /* The information that is usually found in the ARM-specific part of
2837 the hash table entry. */
2838 struct arm_plt_info arm;
2839
2840 /* A list of all potential dynamic relocations against this symbol. */
2841 struct elf_dyn_relocs *dyn_relocs;
2842 };
2843
2844 struct elf_arm_obj_tdata
2845 {
2846 struct elf_obj_tdata root;
2847
2848 /* tls_type for each local got entry. */
2849 char *local_got_tls_type;
2850
2851 /* GOTPLT entries for TLS descriptors. */
2852 bfd_vma *local_tlsdesc_gotent;
2853
2854 /* Information for local symbols that need entries in .iplt. */
2855 struct arm_local_iplt_info **local_iplt;
2856
2857 /* Zero to warn when linking objects with incompatible enum sizes. */
2858 int no_enum_size_warning;
2859
2860 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2861 int no_wchar_size_warning;
2862 };
2863
2864 #define elf_arm_tdata(bfd) \
2865 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2866
2867 #define elf32_arm_local_got_tls_type(bfd) \
2868 (elf_arm_tdata (bfd)->local_got_tls_type)
2869
2870 #define elf32_arm_local_tlsdesc_gotent(bfd) \
2871 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
2872
2873 #define elf32_arm_local_iplt(bfd) \
2874 (elf_arm_tdata (bfd)->local_iplt)
2875
2876 #define is_arm_elf(bfd) \
2877 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2878 && elf_tdata (bfd) != NULL \
2879 && elf_object_id (bfd) == ARM_ELF_DATA)
2880
2881 static bfd_boolean
2882 elf32_arm_mkobject (bfd *abfd)
2883 {
2884 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2885 ARM_ELF_DATA);
2886 }
2887
2888 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2889
2890 /* Arm ELF linker hash entry. */
2891 struct elf32_arm_link_hash_entry
2892 {
2893 struct elf_link_hash_entry root;
2894
2895 /* Track dynamic relocs copied for this symbol. */
2896 struct elf_dyn_relocs *dyn_relocs;
2897
2898 /* ARM-specific PLT information. */
2899 struct arm_plt_info plt;
2900
2901 #define GOT_UNKNOWN 0
2902 #define GOT_NORMAL 1
2903 #define GOT_TLS_GD 2
2904 #define GOT_TLS_IE 4
2905 #define GOT_TLS_GDESC 8
2906 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
2907 unsigned int tls_type : 8;
2908
2909 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
2910 unsigned int is_iplt : 1;
2911
2912 unsigned int unused : 23;
2913
2914 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
2915 starting at the end of the jump table. */
2916 bfd_vma tlsdesc_got;
2917
2918 /* The symbol marking the real symbol location for exported thumb
2919 symbols with Arm stubs. */
2920 struct elf_link_hash_entry *export_glue;
2921
2922 /* A pointer to the most recently used stub hash entry against this
2923 symbol. */
2924 struct elf32_arm_stub_hash_entry *stub_cache;
2925 };
2926
2927 /* Traverse an arm ELF linker hash table. */
2928 #define elf32_arm_link_hash_traverse(table, func, info) \
2929 (elf_link_hash_traverse \
2930 (&(table)->root, \
2931 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2932 (info)))
2933
2934 /* Get the ARM elf linker hash table from a link_info structure. */
2935 #define elf32_arm_hash_table(info) \
2936 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
2937 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
2938
2939 #define arm_stub_hash_lookup(table, string, create, copy) \
2940 ((struct elf32_arm_stub_hash_entry *) \
2941 bfd_hash_lookup ((table), (string), (create), (copy)))
2942
2943 /* Array to keep track of which stub sections have been created, and
2944 information on stub grouping. */
2945 struct map_stub
2946 {
2947 /* This is the section to which stubs in the group will be
2948 attached. */
2949 asection *link_sec;
2950 /* The stub section. */
2951 asection *stub_sec;
2952 };
2953
2954 #define elf32_arm_compute_jump_table_size(htab) \
2955 ((htab)->next_tls_desc_index * 4)
2956
2957 /* ARM ELF linker hash table. */
2958 struct elf32_arm_link_hash_table
2959 {
2960 /* The main hash table. */
2961 struct elf_link_hash_table root;
2962
2963 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2964 bfd_size_type thumb_glue_size;
2965
2966 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2967 bfd_size_type arm_glue_size;
2968
2969 /* The size in bytes of section containing the ARMv4 BX veneers. */
2970 bfd_size_type bx_glue_size;
2971
2972 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2973 veneer has been populated. */
2974 bfd_vma bx_glue_offset[15];
2975
2976 /* The size in bytes of the section containing glue for VFP11 erratum
2977 veneers. */
2978 bfd_size_type vfp11_erratum_glue_size;
2979
2980 /* The size in bytes of the section containing glue for STM32L4XX erratum
2981 veneers. */
2982 bfd_size_type stm32l4xx_erratum_glue_size;
2983
2984 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2985 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2986 elf32_arm_write_section(). */
2987 struct a8_erratum_fix *a8_erratum_fixes;
2988 unsigned int num_a8_erratum_fixes;
2989
2990 /* An arbitrary input BFD chosen to hold the glue sections. */
2991 bfd * bfd_of_glue_owner;
2992
2993 /* Nonzero to output a BE8 image. */
2994 int byteswap_code;
2995
2996 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2997 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2998 int target1_is_rel;
2999
3000 /* The relocation to use for R_ARM_TARGET2 relocations. */
3001 int target2_reloc;
3002
3003 /* 0 = Ignore R_ARM_V4BX.
3004 1 = Convert BX to MOV PC.
3005 2 = Generate v4 interworing stubs. */
3006 int fix_v4bx;
3007
3008 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3009 int fix_cortex_a8;
3010
3011 /* Whether we should fix the ARM1176 BLX immediate issue. */
3012 int fix_arm1176;
3013
3014 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3015 int use_blx;
3016
3017 /* What sort of code sequences we should look for which may trigger the
3018 VFP11 denorm erratum. */
3019 bfd_arm_vfp11_fix vfp11_fix;
3020
3021 /* Global counter for the number of fixes we have emitted. */
3022 int num_vfp11_fixes;
3023
3024 /* What sort of code sequences we should look for which may trigger the
3025 STM32L4XX erratum. */
3026 bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3027
3028 /* Global counter for the number of fixes we have emitted. */
3029 int num_stm32l4xx_fixes;
3030
3031 /* Nonzero to force PIC branch veneers. */
3032 int pic_veneer;
3033
3034 /* The number of bytes in the initial entry in the PLT. */
3035 bfd_size_type plt_header_size;
3036
3037 /* The number of bytes in the subsequent PLT etries. */
3038 bfd_size_type plt_entry_size;
3039
3040 /* True if the target system is VxWorks. */
3041 int vxworks_p;
3042
3043 /* True if the target system is Symbian OS. */
3044 int symbian_p;
3045
3046 /* True if the target system is Native Client. */
3047 int nacl_p;
3048
3049 /* True if the target uses REL relocations. */
3050 int use_rel;
3051
3052 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3053 bfd_vma next_tls_desc_index;
3054
3055 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3056 bfd_vma num_tls_desc;
3057
3058 /* Short-cuts to get to dynamic linker sections. */
3059 asection *sdynbss;
3060 asection *srelbss;
3061
3062 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3063 asection *srelplt2;
3064
3065 /* The offset into splt of the PLT entry for the TLS descriptor
3066 resolver. Special values are 0, if not necessary (or not found
3067 to be necessary yet), and -1 if needed but not determined
3068 yet. */
3069 bfd_vma dt_tlsdesc_plt;
3070
3071 /* The offset into sgot of the GOT entry used by the PLT entry
3072 above. */
3073 bfd_vma dt_tlsdesc_got;
3074
3075 /* Offset in .plt section of tls_arm_trampoline. */
3076 bfd_vma tls_trampoline;
3077
3078 /* Data for R_ARM_TLS_LDM32 relocations. */
3079 union
3080 {
3081 bfd_signed_vma refcount;
3082 bfd_vma offset;
3083 } tls_ldm_got;
3084
3085 /* Small local sym cache. */
3086 struct sym_cache sym_cache;
3087
3088 /* For convenience in allocate_dynrelocs. */
3089 bfd * obfd;
3090
3091 /* The amount of space used by the reserved portion of the sgotplt
3092 section, plus whatever space is used by the jump slots. */
3093 bfd_vma sgotplt_jump_table_size;
3094
3095 /* The stub hash table. */
3096 struct bfd_hash_table stub_hash_table;
3097
3098 /* Linker stub bfd. */
3099 bfd *stub_bfd;
3100
3101 /* Linker call-backs. */
3102 asection * (*add_stub_section) (const char *, asection *, unsigned int);
3103 void (*layout_sections_again) (void);
3104
3105 /* Array to keep track of which stub sections have been created, and
3106 information on stub grouping. */
3107 struct map_stub *stub_group;
3108
3109 /* Number of elements in stub_group. */
3110 unsigned int top_id;
3111
3112 /* Assorted information used by elf32_arm_size_stubs. */
3113 unsigned int bfd_count;
3114 unsigned int top_index;
3115 asection **input_list;
3116 };
3117
3118 static inline int
3119 ctz (unsigned int mask)
3120 {
3121 #if GCC_VERSION >= 3004
3122 return __builtin_ctz (mask);
3123 #else
3124 unsigned int i;
3125
3126 for (i = 0; i < 8 * sizeof (mask); i++)
3127 {
3128 if (mask & 0x1)
3129 break;
3130 mask = (mask >> 1);
3131 }
3132 return i;
3133 #endif
3134 }
3135
3136 #ifndef __NetBSD__
3137 static inline int
3138 popcount (unsigned int mask)
3139 {
3140 #if GCC_VERSION >= 3004
3141 return __builtin_popcount (mask);
3142 #else
3143 unsigned int i, sum = 0;
3144
3145 for (i = 0; i < 8 * sizeof (mask); i++)
3146 {
3147 if (mask & 0x1)
3148 sum++;
3149 mask = (mask >> 1);
3150 }
3151 return sum;
3152 #endif
3153 }
3154 #endif
3155
3156 /* Create an entry in an ARM ELF linker hash table. */
3157
3158 static struct bfd_hash_entry *
3159 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3160 struct bfd_hash_table * table,
3161 const char * string)
3162 {
3163 struct elf32_arm_link_hash_entry * ret =
3164 (struct elf32_arm_link_hash_entry *) entry;
3165
3166 /* Allocate the structure if it has not already been allocated by a
3167 subclass. */
3168 if (ret == NULL)
3169 ret = (struct elf32_arm_link_hash_entry *)
3170 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3171 if (ret == NULL)
3172 return (struct bfd_hash_entry *) ret;
3173
3174 /* Call the allocation method of the superclass. */
3175 ret = ((struct elf32_arm_link_hash_entry *)
3176 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3177 table, string));
3178 if (ret != NULL)
3179 {
3180 ret->dyn_relocs = NULL;
3181 ret->tls_type = GOT_UNKNOWN;
3182 ret->tlsdesc_got = (bfd_vma) -1;
3183 ret->plt.thumb_refcount = 0;
3184 ret->plt.maybe_thumb_refcount = 0;
3185 ret->plt.noncall_refcount = 0;
3186 ret->plt.got_offset = -1;
3187 ret->is_iplt = FALSE;
3188 ret->export_glue = NULL;
3189
3190 ret->stub_cache = NULL;
3191 }
3192
3193 return (struct bfd_hash_entry *) ret;
3194 }
3195
3196 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3197 symbols. */
3198
3199 static bfd_boolean
3200 elf32_arm_allocate_local_sym_info (bfd *abfd)
3201 {
3202 if (elf_local_got_refcounts (abfd) == NULL)
3203 {
3204 bfd_size_type num_syms;
3205 bfd_size_type size;
3206 char *data;
3207
3208 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3209 size = num_syms * (sizeof (bfd_signed_vma)
3210 + sizeof (struct arm_local_iplt_info *)
3211 + sizeof (bfd_vma)
3212 + sizeof (char));
3213 data = bfd_zalloc (abfd, size);
3214 if (data == NULL)
3215 return FALSE;
3216
3217 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3218 data += num_syms * sizeof (bfd_signed_vma);
3219
3220 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3221 data += num_syms * sizeof (struct arm_local_iplt_info *);
3222
3223 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3224 data += num_syms * sizeof (bfd_vma);
3225
3226 elf32_arm_local_got_tls_type (abfd) = data;
3227 }
3228 return TRUE;
3229 }
3230
3231 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3232 to input bfd ABFD. Create the information if it doesn't already exist.
3233 Return null if an allocation fails. */
3234
3235 static struct arm_local_iplt_info *
3236 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3237 {
3238 struct arm_local_iplt_info **ptr;
3239
3240 if (!elf32_arm_allocate_local_sym_info (abfd))
3241 return NULL;
3242
3243 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3244 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3245 if (*ptr == NULL)
3246 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3247 return *ptr;
3248 }
3249
3250 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3251 in ABFD's symbol table. If the symbol is global, H points to its
3252 hash table entry, otherwise H is null.
3253
3254 Return true if the symbol does have PLT information. When returning
3255 true, point *ROOT_PLT at the target-independent reference count/offset
3256 union and *ARM_PLT at the ARM-specific information. */
3257
3258 static bfd_boolean
3259 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_entry *h,
3260 unsigned long r_symndx, union gotplt_union **root_plt,
3261 struct arm_plt_info **arm_plt)
3262 {
3263 struct arm_local_iplt_info *local_iplt;
3264
3265 if (h != NULL)
3266 {
3267 *root_plt = &h->root.plt;
3268 *arm_plt = &h->plt;
3269 return TRUE;
3270 }
3271
3272 if (elf32_arm_local_iplt (abfd) == NULL)
3273 return FALSE;
3274
3275 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3276 if (local_iplt == NULL)
3277 return FALSE;
3278
3279 *root_plt = &local_iplt->root;
3280 *arm_plt = &local_iplt->arm;
3281 return TRUE;
3282 }
3283
3284 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3285 before it. */
3286
3287 static bfd_boolean
3288 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3289 struct arm_plt_info *arm_plt)
3290 {
3291 struct elf32_arm_link_hash_table *htab;
3292
3293 htab = elf32_arm_hash_table (info);
3294 return (arm_plt->thumb_refcount != 0
3295 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0));
3296 }
3297
3298 /* Return a pointer to the head of the dynamic reloc list that should
3299 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3300 ABFD's symbol table. Return null if an error occurs. */
3301
3302 static struct elf_dyn_relocs **
3303 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3304 Elf_Internal_Sym *isym)
3305 {
3306 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3307 {
3308 struct arm_local_iplt_info *local_iplt;
3309
3310 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3311 if (local_iplt == NULL)
3312 return NULL;
3313 return &local_iplt->dyn_relocs;
3314 }
3315 else
3316 {
3317 /* Track dynamic relocs needed for local syms too.
3318 We really need local syms available to do this
3319 easily. Oh well. */
3320 asection *s;
3321 void *vpp;
3322
3323 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3324 if (s == NULL)
3325 abort ();
3326
3327 vpp = &elf_section_data (s)->local_dynrel;
3328 return (struct elf_dyn_relocs **) vpp;
3329 }
3330 }
3331
3332 /* Initialize an entry in the stub hash table. */
3333
3334 static struct bfd_hash_entry *
3335 stub_hash_newfunc (struct bfd_hash_entry *entry,
3336 struct bfd_hash_table *table,
3337 const char *string)
3338 {
3339 /* Allocate the structure if it has not already been allocated by a
3340 subclass. */
3341 if (entry == NULL)
3342 {
3343 entry = (struct bfd_hash_entry *)
3344 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3345 if (entry == NULL)
3346 return entry;
3347 }
3348
3349 /* Call the allocation method of the superclass. */
3350 entry = bfd_hash_newfunc (entry, table, string);
3351 if (entry != NULL)
3352 {
3353 struct elf32_arm_stub_hash_entry *eh;
3354
3355 /* Initialize the local fields. */
3356 eh = (struct elf32_arm_stub_hash_entry *) entry;
3357 eh->stub_sec = NULL;
3358 eh->stub_offset = 0;
3359 eh->target_value = 0;
3360 eh->target_section = NULL;
3361 eh->target_addend = 0;
3362 eh->orig_insn = 0;
3363 eh->stub_type = arm_stub_none;
3364 eh->stub_size = 0;
3365 eh->stub_template = NULL;
3366 eh->stub_template_size = 0;
3367 eh->h = NULL;
3368 eh->id_sec = NULL;
3369 eh->output_name = NULL;
3370 }
3371
3372 return entry;
3373 }
3374
3375 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3376 shortcuts to them in our hash table. */
3377
3378 static bfd_boolean
3379 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3380 {
3381 struct elf32_arm_link_hash_table *htab;
3382
3383 htab = elf32_arm_hash_table (info);
3384 if (htab == NULL)
3385 return FALSE;
3386
3387 /* BPABI objects never have a GOT, or associated sections. */
3388 if (htab->symbian_p)
3389 return TRUE;
3390
3391 if (! _bfd_elf_create_got_section (dynobj, info))
3392 return FALSE;
3393
3394 return TRUE;
3395 }
3396
3397 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3398
3399 static bfd_boolean
3400 create_ifunc_sections (struct bfd_link_info *info)
3401 {
3402 struct elf32_arm_link_hash_table *htab;
3403 const struct elf_backend_data *bed;
3404 bfd *dynobj;
3405 asection *s;
3406 flagword flags;
3407
3408 htab = elf32_arm_hash_table (info);
3409 dynobj = htab->root.dynobj;
3410 bed = get_elf_backend_data (dynobj);
3411 flags = bed->dynamic_sec_flags;
3412
3413 if (htab->root.iplt == NULL)
3414 {
3415 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3416 flags | SEC_READONLY | SEC_CODE);
3417 if (s == NULL
3418 || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment))
3419 return FALSE;
3420 htab->root.iplt = s;
3421 }
3422
3423 if (htab->root.irelplt == NULL)
3424 {
3425 s = bfd_make_section_anyway_with_flags (dynobj,
3426 RELOC_SECTION (htab, ".iplt"),
3427 flags | SEC_READONLY);
3428 if (s == NULL
3429 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3430 return FALSE;
3431 htab->root.irelplt = s;
3432 }
3433
3434 if (htab->root.igotplt == NULL)
3435 {
3436 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3437 if (s == NULL
3438 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3439 return FALSE;
3440 htab->root.igotplt = s;
3441 }
3442 return TRUE;
3443 }
3444
3445 /* Determine if we're dealing with a Thumb only architecture. */
3446
3447 static bfd_boolean
3448 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3449 {
3450 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3451 Tag_CPU_arch);
3452 int profile;
3453
3454 if (arch == TAG_CPU_ARCH_V6_M || arch == TAG_CPU_ARCH_V6S_M)
3455 return TRUE;
3456
3457 if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M)
3458 return FALSE;
3459
3460 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3461 Tag_CPU_arch_profile);
3462
3463 return profile == 'M';
3464 }
3465
3466 /* Determine if we're dealing with a Thumb-2 object. */
3467
3468 static bfd_boolean
3469 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3470 {
3471 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3472 Tag_CPU_arch);
3473 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
3474 }
3475
3476 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3477 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3478 hash table. */
3479
3480 static bfd_boolean
3481 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3482 {
3483 struct elf32_arm_link_hash_table *htab;
3484
3485 htab = elf32_arm_hash_table (info);
3486 if (htab == NULL)
3487 return FALSE;
3488
3489 if (!htab->root.sgot && !create_got_section (dynobj, info))
3490 return FALSE;
3491
3492 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3493 return FALSE;
3494
3495 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
3496 if (!bfd_link_pic (info))
3497 htab->srelbss = bfd_get_linker_section (dynobj,
3498 RELOC_SECTION (htab, ".bss"));
3499
3500 if (htab->vxworks_p)
3501 {
3502 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3503 return FALSE;
3504
3505 if (bfd_link_pic (info))
3506 {
3507 htab->plt_header_size = 0;
3508 htab->plt_entry_size
3509 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3510 }
3511 else
3512 {
3513 htab->plt_header_size
3514 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3515 htab->plt_entry_size
3516 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3517 }
3518 }
3519 else
3520 {
3521 /* PR ld/16017
3522 Test for thumb only architectures. Note - we cannot just call
3523 using_thumb_only() as the attributes in the output bfd have not been
3524 initialised at this point, so instead we use the input bfd. */
3525 bfd * saved_obfd = htab->obfd;
3526
3527 htab->obfd = dynobj;
3528 if (using_thumb_only (htab))
3529 {
3530 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
3531 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
3532 }
3533 htab->obfd = saved_obfd;
3534 }
3535
3536 if (!htab->root.splt
3537 || !htab->root.srelplt
3538 || !htab->sdynbss
3539 || (!bfd_link_pic (info) && !htab->srelbss))
3540 abort ();
3541
3542 return TRUE;
3543 }
3544
3545 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3546
3547 static void
3548 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
3549 struct elf_link_hash_entry *dir,
3550 struct elf_link_hash_entry *ind)
3551 {
3552 struct elf32_arm_link_hash_entry *edir, *eind;
3553
3554 edir = (struct elf32_arm_link_hash_entry *) dir;
3555 eind = (struct elf32_arm_link_hash_entry *) ind;
3556
3557 if (eind->dyn_relocs != NULL)
3558 {
3559 if (edir->dyn_relocs != NULL)
3560 {
3561 struct elf_dyn_relocs **pp;
3562 struct elf_dyn_relocs *p;
3563
3564 /* Add reloc counts against the indirect sym to the direct sym
3565 list. Merge any entries against the same section. */
3566 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
3567 {
3568 struct elf_dyn_relocs *q;
3569
3570 for (q = edir->dyn_relocs; q != NULL; q = q->next)
3571 if (q->sec == p->sec)
3572 {
3573 q->pc_count += p->pc_count;
3574 q->count += p->count;
3575 *pp = p->next;
3576 break;
3577 }
3578 if (q == NULL)
3579 pp = &p->next;
3580 }
3581 *pp = edir->dyn_relocs;
3582 }
3583
3584 edir->dyn_relocs = eind->dyn_relocs;
3585 eind->dyn_relocs = NULL;
3586 }
3587
3588 if (ind->root.type == bfd_link_hash_indirect)
3589 {
3590 /* Copy over PLT info. */
3591 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
3592 eind->plt.thumb_refcount = 0;
3593 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
3594 eind->plt.maybe_thumb_refcount = 0;
3595 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
3596 eind->plt.noncall_refcount = 0;
3597
3598 /* We should only allocate a function to .iplt once the final
3599 symbol information is known. */
3600 BFD_ASSERT (!eind->is_iplt);
3601
3602 if (dir->got.refcount <= 0)
3603 {
3604 edir->tls_type = eind->tls_type;
3605 eind->tls_type = GOT_UNKNOWN;
3606 }
3607 }
3608
3609 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
3610 }
3611
3612 /* Destroy an ARM elf linker hash table. */
3613
3614 static void
3615 elf32_arm_link_hash_table_free (bfd *obfd)
3616 {
3617 struct elf32_arm_link_hash_table *ret
3618 = (struct elf32_arm_link_hash_table *) obfd->link.hash;
3619
3620 bfd_hash_table_free (&ret->stub_hash_table);
3621 _bfd_elf_link_hash_table_free (obfd);
3622 }
3623
3624 /* Create an ARM elf linker hash table. */
3625
3626 static struct bfd_link_hash_table *
3627 elf32_arm_link_hash_table_create (bfd *abfd)
3628 {
3629 struct elf32_arm_link_hash_table *ret;
3630 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
3631
3632 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
3633 if (ret == NULL)
3634 return NULL;
3635
3636 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
3637 elf32_arm_link_hash_newfunc,
3638 sizeof (struct elf32_arm_link_hash_entry),
3639 ARM_ELF_DATA))
3640 {
3641 free (ret);
3642 return NULL;
3643 }
3644
3645 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
3646 ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
3647 #ifdef FOUR_WORD_PLT
3648 ret->plt_header_size = 16;
3649 ret->plt_entry_size = 16;
3650 #else
3651 ret->plt_header_size = 20;
3652 ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
3653 #endif
3654 ret->use_rel = 1;
3655 ret->obfd = abfd;
3656
3657 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
3658 sizeof (struct elf32_arm_stub_hash_entry)))
3659 {
3660 _bfd_elf_link_hash_table_free (abfd);
3661 return NULL;
3662 }
3663 ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
3664
3665 return &ret->root.root;
3666 }
3667
3668 /* Determine what kind of NOPs are available. */
3669
3670 static bfd_boolean
3671 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3672 {
3673 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3674 Tag_CPU_arch);
3675 return arch == TAG_CPU_ARCH_V6T2
3676 || arch == TAG_CPU_ARCH_V6K
3677 || arch == TAG_CPU_ARCH_V7
3678 || arch == TAG_CPU_ARCH_V7E_M;
3679 }
3680
3681 static bfd_boolean
3682 arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
3683 {
3684 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3685 Tag_CPU_arch);
3686 return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7
3687 || arch == TAG_CPU_ARCH_V7E_M);
3688 }
3689
3690 static bfd_boolean
3691 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3692 {
3693 switch (stub_type)
3694 {
3695 case arm_stub_long_branch_thumb_only:
3696 case arm_stub_long_branch_v4t_thumb_arm:
3697 case arm_stub_short_branch_v4t_thumb_arm:
3698 case arm_stub_long_branch_v4t_thumb_arm_pic:
3699 case arm_stub_long_branch_v4t_thumb_tls_pic:
3700 case arm_stub_long_branch_thumb_only_pic:
3701 return TRUE;
3702 case arm_stub_none:
3703 BFD_FAIL ();
3704 return FALSE;
3705 break;
3706 default:
3707 return FALSE;
3708 }
3709 }
3710
3711 /* Determine the type of stub needed, if any, for a call. */
3712
3713 static enum elf32_arm_stub_type
3714 arm_type_of_stub (struct bfd_link_info *info,
3715 asection *input_sec,
3716 const Elf_Internal_Rela *rel,
3717 unsigned char st_type,
3718 enum arm_st_branch_type *actual_branch_type,
3719 struct elf32_arm_link_hash_entry *hash,
3720 bfd_vma destination,
3721 asection *sym_sec,
3722 bfd *input_bfd,
3723 const char *name)
3724 {
3725 bfd_vma location;
3726 bfd_signed_vma branch_offset;
3727 unsigned int r_type;
3728 struct elf32_arm_link_hash_table * globals;
3729 int thumb2;
3730 int thumb_only;
3731 enum elf32_arm_stub_type stub_type = arm_stub_none;
3732 int use_plt = 0;
3733 enum arm_st_branch_type branch_type = *actual_branch_type;
3734 union gotplt_union *root_plt;
3735 struct arm_plt_info *arm_plt;
3736
3737 if (branch_type == ST_BRANCH_LONG)
3738 return stub_type;
3739
3740 globals = elf32_arm_hash_table (info);
3741 if (globals == NULL)
3742 return stub_type;
3743
3744 thumb_only = using_thumb_only (globals);
3745
3746 thumb2 = using_thumb2 (globals);
3747
3748 /* Determine where the call point is. */
3749 location = (input_sec->output_offset
3750 + input_sec->output_section->vma
3751 + rel->r_offset);
3752
3753 r_type = ELF32_R_TYPE (rel->r_info);
3754
3755 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
3756 are considering a function call relocation. */
3757 if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3758 || r_type == R_ARM_THM_JUMP19)
3759 && branch_type == ST_BRANCH_TO_ARM)
3760 branch_type = ST_BRANCH_TO_THUMB;
3761
3762 /* For TLS call relocs, it is the caller's responsibility to provide
3763 the address of the appropriate trampoline. */
3764 if (r_type != R_ARM_TLS_CALL
3765 && r_type != R_ARM_THM_TLS_CALL
3766 && elf32_arm_get_plt_info (input_bfd, hash, ELF32_R_SYM (rel->r_info),
3767 &root_plt, &arm_plt)
3768 && root_plt->offset != (bfd_vma) -1)
3769 {
3770 asection *splt;
3771
3772 if (hash == NULL || hash->is_iplt)
3773 splt = globals->root.iplt;
3774 else
3775 splt = globals->root.splt;
3776 if (splt != NULL)
3777 {
3778 use_plt = 1;
3779
3780 /* Note when dealing with PLT entries: the main PLT stub is in
3781 ARM mode, so if the branch is in Thumb mode, another
3782 Thumb->ARM stub will be inserted later just before the ARM
3783 PLT stub. We don't take this extra distance into account
3784 here, because if a long branch stub is needed, we'll add a
3785 Thumb->Arm one and branch directly to the ARM PLT entry
3786 because it avoids spreading offset corrections in several
3787 places. */
3788
3789 destination = (splt->output_section->vma
3790 + splt->output_offset
3791 + root_plt->offset);
3792 st_type = STT_FUNC;
3793 branch_type = ST_BRANCH_TO_ARM;
3794 }
3795 }
3796 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
3797 BFD_ASSERT (st_type != STT_GNU_IFUNC);
3798
3799 branch_offset = (bfd_signed_vma)(destination - location);
3800
3801 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3802 || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
3803 {
3804 /* Handle cases where:
3805 - this call goes too far (different Thumb/Thumb2 max
3806 distance)
3807 - it's a Thumb->Arm call and blx is not available, or it's a
3808 Thumb->Arm branch (not bl). A stub is needed in this case,
3809 but only if this call is not through a PLT entry. Indeed,
3810 PLT stubs handle mode switching already.
3811 */
3812 if ((!thumb2
3813 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3814 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3815 || (thumb2
3816 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3817 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3818 || (thumb2
3819 && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
3820 || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
3821 && (r_type == R_ARM_THM_JUMP19))
3822 || (branch_type == ST_BRANCH_TO_ARM
3823 && (((r_type == R_ARM_THM_CALL
3824 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
3825 || (r_type == R_ARM_THM_JUMP24)
3826 || (r_type == R_ARM_THM_JUMP19))
3827 && !use_plt))
3828 {
3829 if (branch_type == ST_BRANCH_TO_THUMB)
3830 {
3831 /* Thumb to thumb. */
3832 if (!thumb_only)
3833 {
3834 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
3835 /* PIC stubs. */
3836 ? ((globals->use_blx
3837 && (r_type == R_ARM_THM_CALL))
3838 /* V5T and above. Stub starts with ARM code, so
3839 we must be able to switch mode before
3840 reaching it, which is only possible for 'bl'
3841 (ie R_ARM_THM_CALL relocation). */
3842 ? arm_stub_long_branch_any_thumb_pic
3843 /* On V4T, use Thumb code only. */
3844 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3845
3846 /* non-PIC stubs. */
3847 : ((globals->use_blx
3848 && (r_type == R_ARM_THM_CALL))
3849 /* V5T and above. */
3850 ? arm_stub_long_branch_any_any
3851 /* V4T. */
3852 : arm_stub_long_branch_v4t_thumb_thumb);
3853 }
3854 else
3855 {
3856 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
3857 /* PIC stub. */
3858 ? arm_stub_long_branch_thumb_only_pic
3859 /* non-PIC stub. */
3860 : arm_stub_long_branch_thumb_only;
3861 }
3862 }
3863 else
3864 {
3865 /* Thumb to arm. */
3866 if (sym_sec != NULL
3867 && sym_sec->owner != NULL
3868 && !INTERWORK_FLAG (sym_sec->owner))
3869 {
3870 (*_bfd_error_handler)
3871 (_("%B(%s): warning: interworking not enabled.\n"
3872 " first occurrence: %B: Thumb call to ARM"),
3873 sym_sec->owner, input_bfd, name);
3874 }
3875
3876 stub_type =
3877 (bfd_link_pic (info) | globals->pic_veneer)
3878 /* PIC stubs. */
3879 ? (r_type == R_ARM_THM_TLS_CALL
3880 /* TLS PIC stubs. */
3881 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
3882 : arm_stub_long_branch_v4t_thumb_tls_pic)
3883 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3884 /* V5T PIC and above. */
3885 ? arm_stub_long_branch_any_arm_pic
3886 /* V4T PIC stub. */
3887 : arm_stub_long_branch_v4t_thumb_arm_pic))
3888
3889 /* non-PIC stubs. */
3890 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3891 /* V5T and above. */
3892 ? arm_stub_long_branch_any_any
3893 /* V4T. */
3894 : arm_stub_long_branch_v4t_thumb_arm);
3895
3896 /* Handle v4t short branches. */
3897 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3898 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3899 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3900 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3901 }
3902 }
3903 }
3904 else if (r_type == R_ARM_CALL
3905 || r_type == R_ARM_JUMP24
3906 || r_type == R_ARM_PLT32
3907 || r_type == R_ARM_TLS_CALL)
3908 {
3909 if (branch_type == ST_BRANCH_TO_THUMB)
3910 {
3911 /* Arm to thumb. */
3912
3913 if (sym_sec != NULL
3914 && sym_sec->owner != NULL
3915 && !INTERWORK_FLAG (sym_sec->owner))
3916 {
3917 (*_bfd_error_handler)
3918 (_("%B(%s): warning: interworking not enabled.\n"
3919 " first occurrence: %B: ARM call to Thumb"),
3920 sym_sec->owner, input_bfd, name);
3921 }
3922
3923 /* We have an extra 2-bytes reach because of
3924 the mode change (bit 24 (H) of BLX encoding). */
3925 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3926 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3927 || (r_type == R_ARM_CALL && !globals->use_blx)
3928 || (r_type == R_ARM_JUMP24)
3929 || (r_type == R_ARM_PLT32))
3930 {
3931 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
3932 /* PIC stubs. */
3933 ? ((globals->use_blx)
3934 /* V5T and above. */
3935 ? arm_stub_long_branch_any_thumb_pic
3936 /* V4T stub. */
3937 : arm_stub_long_branch_v4t_arm_thumb_pic)
3938
3939 /* non-PIC stubs. */
3940 : ((globals->use_blx)
3941 /* V5T and above. */
3942 ? arm_stub_long_branch_any_any
3943 /* V4T. */
3944 : arm_stub_long_branch_v4t_arm_thumb);
3945 }
3946 }
3947 else
3948 {
3949 /* Arm to arm. */
3950 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3951 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3952 {
3953 stub_type =
3954 (bfd_link_pic (info) | globals->pic_veneer)
3955 /* PIC stubs. */
3956 ? (r_type == R_ARM_TLS_CALL
3957 /* TLS PIC Stub. */
3958 ? arm_stub_long_branch_any_tls_pic
3959 : (globals->nacl_p
3960 ? arm_stub_long_branch_arm_nacl_pic
3961 : arm_stub_long_branch_any_arm_pic))
3962 /* non-PIC stubs. */
3963 : (globals->nacl_p
3964 ? arm_stub_long_branch_arm_nacl
3965 : arm_stub_long_branch_any_any);
3966 }
3967 }
3968 }
3969
3970 /* If a stub is needed, record the actual destination type. */
3971 if (stub_type != arm_stub_none)
3972 *actual_branch_type = branch_type;
3973
3974 return stub_type;
3975 }
3976
3977 /* Build a name for an entry in the stub hash table. */
3978
3979 static char *
3980 elf32_arm_stub_name (const asection *input_section,
3981 const asection *sym_sec,
3982 const struct elf32_arm_link_hash_entry *hash,
3983 const Elf_Internal_Rela *rel,
3984 enum elf32_arm_stub_type stub_type)
3985 {
3986 char *stub_name;
3987 bfd_size_type len;
3988
3989 if (hash)
3990 {
3991 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
3992 stub_name = (char *) bfd_malloc (len);
3993 if (stub_name != NULL)
3994 sprintf (stub_name, "%08x_%s+%x_%d",
3995 input_section->id & 0xffffffff,
3996 hash->root.root.root.string,
3997 (int) rel->r_addend & 0xffffffff,
3998 (int) stub_type);
3999 }
4000 else
4001 {
4002 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4003 stub_name = (char *) bfd_malloc (len);
4004 if (stub_name != NULL)
4005 sprintf (stub_name, "%08x_%x:%x+%x_%d",
4006 input_section->id & 0xffffffff,
4007 sym_sec->id & 0xffffffff,
4008 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4009 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4010 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4011 (int) rel->r_addend & 0xffffffff,
4012 (int) stub_type);
4013 }
4014
4015 return stub_name;
4016 }
4017
4018 /* Look up an entry in the stub hash. Stub entries are cached because
4019 creating the stub name takes a bit of time. */
4020
4021 static struct elf32_arm_stub_hash_entry *
4022 elf32_arm_get_stub_entry (const asection *input_section,
4023 const asection *sym_sec,
4024 struct elf_link_hash_entry *hash,
4025 const Elf_Internal_Rela *rel,
4026 struct elf32_arm_link_hash_table *htab,
4027 enum elf32_arm_stub_type stub_type)
4028 {
4029 struct elf32_arm_stub_hash_entry *stub_entry;
4030 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4031 const asection *id_sec;
4032
4033 if ((input_section->flags & SEC_CODE) == 0)
4034 return NULL;
4035
4036 /* If this input section is part of a group of sections sharing one
4037 stub section, then use the id of the first section in the group.
4038 Stub names need to include a section id, as there may well be
4039 more than one stub used to reach say, printf, and we need to
4040 distinguish between them. */
4041 id_sec = htab->stub_group[input_section->id].link_sec;
4042
4043 if (h != NULL && h->stub_cache != NULL
4044 && h->stub_cache->h == h
4045 && h->stub_cache->id_sec == id_sec
4046 && h->stub_cache->stub_type == stub_type)
4047 {
4048 stub_entry = h->stub_cache;
4049 }
4050 else
4051 {
4052 char *stub_name;
4053
4054 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4055 if (stub_name == NULL)
4056 return NULL;
4057
4058 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4059 stub_name, FALSE, FALSE);
4060 if (h != NULL)
4061 h->stub_cache = stub_entry;
4062
4063 free (stub_name);
4064 }
4065
4066 return stub_entry;
4067 }
4068
4069 /* Find or create a stub section. Returns a pointer to the stub section, and
4070 the section to which the stub section will be attached (in *LINK_SEC_P).
4071 LINK_SEC_P may be NULL. */
4072
4073 static asection *
4074 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4075 struct elf32_arm_link_hash_table *htab)
4076 {
4077 asection *link_sec;
4078 asection *stub_sec;
4079
4080 link_sec = htab->stub_group[section->id].link_sec;
4081 BFD_ASSERT (link_sec != NULL);
4082 stub_sec = htab->stub_group[section->id].stub_sec;
4083
4084 if (stub_sec == NULL)
4085 {
4086 stub_sec = htab->stub_group[link_sec->id].stub_sec;
4087 if (stub_sec == NULL)
4088 {
4089 size_t namelen;
4090 bfd_size_type len;
4091 char *s_name;
4092
4093 namelen = strlen (link_sec->name);
4094 len = namelen + sizeof (STUB_SUFFIX);
4095 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4096 if (s_name == NULL)
4097 return NULL;
4098
4099 memcpy (s_name, link_sec->name, namelen);
4100 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4101 stub_sec = (*htab->add_stub_section) (s_name, link_sec,
4102 htab->nacl_p ? 4 : 3);
4103 if (stub_sec == NULL)
4104 return NULL;
4105 htab->stub_group[link_sec->id].stub_sec = stub_sec;
4106 }
4107 htab->stub_group[section->id].stub_sec = stub_sec;
4108 }
4109
4110 if (link_sec_p)
4111 *link_sec_p = link_sec;
4112
4113 return stub_sec;
4114 }
4115
4116 /* Add a new stub entry to the stub hash. Not all fields of the new
4117 stub entry are initialised. */
4118
4119 static struct elf32_arm_stub_hash_entry *
4120 elf32_arm_add_stub (const char *stub_name,
4121 asection *section,
4122 struct elf32_arm_link_hash_table *htab)
4123 {
4124 asection *link_sec;
4125 asection *stub_sec;
4126 struct elf32_arm_stub_hash_entry *stub_entry;
4127
4128 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
4129 if (stub_sec == NULL)
4130 return NULL;
4131
4132 /* Enter this entry into the linker stub hash table. */
4133 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4134 TRUE, FALSE);
4135 if (stub_entry == NULL)
4136 {
4137 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4138 section->owner,
4139 stub_name);
4140 return NULL;
4141 }
4142
4143 stub_entry->stub_sec = stub_sec;
4144 stub_entry->stub_offset = 0;
4145 stub_entry->id_sec = link_sec;
4146
4147 return stub_entry;
4148 }
4149
4150 /* Store an Arm insn into an output section not processed by
4151 elf32_arm_write_section. */
4152
4153 static void
4154 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4155 bfd * output_bfd, bfd_vma val, void * ptr)
4156 {
4157 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4158 bfd_putl32 (val, ptr);
4159 else
4160 bfd_putb32 (val, ptr);
4161 }
4162
4163 /* Store a 16-bit Thumb insn into an output section not processed by
4164 elf32_arm_write_section. */
4165
4166 static void
4167 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4168 bfd * output_bfd, bfd_vma val, void * ptr)
4169 {
4170 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4171 bfd_putl16 (val, ptr);
4172 else
4173 bfd_putb16 (val, ptr);
4174 }
4175
4176 /* Store a Thumb2 insn into an output section not processed by
4177 elf32_arm_write_section. */
4178
4179 static void
4180 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4181 bfd * output_bfd, bfd_vma val, void * ptr)
4182 {
4183 /* T2 instructions are 16-bit streamed. */
4184 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4185 {
4186 bfd_putl16 ((val >> 16) & 0xffff, ptr);
4187 bfd_putl16 ((val & 0xffff), ptr + 2);
4188 }
4189 else
4190 {
4191 bfd_putb16 ((val >> 16) & 0xffff, ptr);
4192 bfd_putb16 ((val & 0xffff), ptr + 2);
4193 }
4194 }
4195
4196 /* If it's possible to change R_TYPE to a more efficient access
4197 model, return the new reloc type. */
4198
4199 static unsigned
4200 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4201 struct elf_link_hash_entry *h)
4202 {
4203 int is_local = (h == NULL);
4204
4205 if (bfd_link_pic (info)
4206 || (h && h->root.type == bfd_link_hash_undefweak))
4207 return r_type;
4208
4209 /* We do not support relaxations for Old TLS models. */
4210 switch (r_type)
4211 {
4212 case R_ARM_TLS_GOTDESC:
4213 case R_ARM_TLS_CALL:
4214 case R_ARM_THM_TLS_CALL:
4215 case R_ARM_TLS_DESCSEQ:
4216 case R_ARM_THM_TLS_DESCSEQ:
4217 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4218 }
4219
4220 return r_type;
4221 }
4222
4223 static bfd_reloc_status_type elf32_arm_final_link_relocate
4224 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4225 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4226 const char *, unsigned char, enum arm_st_branch_type,
4227 struct elf_link_hash_entry *, bfd_boolean *, char **);
4228
4229 static unsigned int
4230 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4231 {
4232 switch (stub_type)
4233 {
4234 case arm_stub_a8_veneer_b_cond:
4235 case arm_stub_a8_veneer_b:
4236 case arm_stub_a8_veneer_bl:
4237 return 2;
4238
4239 case arm_stub_long_branch_any_any:
4240 case arm_stub_long_branch_v4t_arm_thumb:
4241 case arm_stub_long_branch_thumb_only:
4242 case arm_stub_long_branch_v4t_thumb_thumb:
4243 case arm_stub_long_branch_v4t_thumb_arm:
4244 case arm_stub_short_branch_v4t_thumb_arm:
4245 case arm_stub_long_branch_any_arm_pic:
4246 case arm_stub_long_branch_any_thumb_pic:
4247 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4248 case arm_stub_long_branch_v4t_arm_thumb_pic:
4249 case arm_stub_long_branch_v4t_thumb_arm_pic:
4250 case arm_stub_long_branch_thumb_only_pic:
4251 case arm_stub_long_branch_any_tls_pic:
4252 case arm_stub_long_branch_v4t_thumb_tls_pic:
4253 case arm_stub_a8_veneer_blx:
4254 return 4;
4255
4256 case arm_stub_long_branch_arm_nacl:
4257 case arm_stub_long_branch_arm_nacl_pic:
4258 return 16;
4259
4260 default:
4261 abort (); /* Should be unreachable. */
4262 }
4263 }
4264
4265 static bfd_boolean
4266 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
4267 void * in_arg)
4268 {
4269 #define MAXRELOCS 3
4270 struct elf32_arm_stub_hash_entry *stub_entry;
4271 struct elf32_arm_link_hash_table *globals;
4272 struct bfd_link_info *info;
4273 asection *stub_sec;
4274 bfd *stub_bfd;
4275 bfd_byte *loc;
4276 bfd_vma sym_value;
4277 int template_size;
4278 int size;
4279 const insn_sequence *template_sequence;
4280 int i;
4281 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
4282 int stub_reloc_offset[MAXRELOCS] = {0, 0};
4283 int nrelocs = 0;
4284
4285 /* Massage our args to the form they really have. */
4286 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4287 info = (struct bfd_link_info *) in_arg;
4288
4289 globals = elf32_arm_hash_table (info);
4290 if (globals == NULL)
4291 return FALSE;
4292
4293 stub_sec = stub_entry->stub_sec;
4294
4295 if ((globals->fix_cortex_a8 < 0)
4296 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
4297 /* We have to do less-strictly-aligned fixes last. */
4298 return TRUE;
4299
4300 /* Make a note of the offset within the stubs for this entry. */
4301 stub_entry->stub_offset = stub_sec->size;
4302 loc = stub_sec->contents + stub_entry->stub_offset;
4303
4304 stub_bfd = stub_sec->owner;
4305
4306 /* This is the address of the stub destination. */
4307 sym_value = (stub_entry->target_value
4308 + stub_entry->target_section->output_offset
4309 + stub_entry->target_section->output_section->vma);
4310
4311 template_sequence = stub_entry->stub_template;
4312 template_size = stub_entry->stub_template_size;
4313
4314 size = 0;
4315 for (i = 0; i < template_size; i++)
4316 {
4317 switch (template_sequence[i].type)
4318 {
4319 case THUMB16_TYPE:
4320 {
4321 bfd_vma data = (bfd_vma) template_sequence[i].data;
4322 if (template_sequence[i].reloc_addend != 0)
4323 {
4324 /* We've borrowed the reloc_addend field to mean we should
4325 insert a condition code into this (Thumb-1 branch)
4326 instruction. See THUMB16_BCOND_INSN. */
4327 BFD_ASSERT ((data & 0xff00) == 0xd000);
4328 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
4329 }
4330 bfd_put_16 (stub_bfd, data, loc + size);
4331 size += 2;
4332 }
4333 break;
4334
4335 case THUMB32_TYPE:
4336 bfd_put_16 (stub_bfd,
4337 (template_sequence[i].data >> 16) & 0xffff,
4338 loc + size);
4339 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
4340 loc + size + 2);
4341 if (template_sequence[i].r_type != R_ARM_NONE)
4342 {
4343 stub_reloc_idx[nrelocs] = i;
4344 stub_reloc_offset[nrelocs++] = size;
4345 }
4346 size += 4;
4347 break;
4348
4349 case ARM_TYPE:
4350 bfd_put_32 (stub_bfd, template_sequence[i].data,
4351 loc + size);
4352 /* Handle cases where the target is encoded within the
4353 instruction. */
4354 if (template_sequence[i].r_type == R_ARM_JUMP24)
4355 {
4356 stub_reloc_idx[nrelocs] = i;
4357 stub_reloc_offset[nrelocs++] = size;
4358 }
4359 size += 4;
4360 break;
4361
4362 case DATA_TYPE:
4363 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
4364 stub_reloc_idx[nrelocs] = i;
4365 stub_reloc_offset[nrelocs++] = size;
4366 size += 4;
4367 break;
4368
4369 default:
4370 BFD_FAIL ();
4371 return FALSE;
4372 }
4373 }
4374
4375 stub_sec->size += size;
4376
4377 /* Stub size has already been computed in arm_size_one_stub. Check
4378 consistency. */
4379 BFD_ASSERT (size == stub_entry->stub_size);
4380
4381 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
4382 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
4383 sym_value |= 1;
4384
4385 /* Assume there is at least one and at most MAXRELOCS entries to relocate
4386 in each stub. */
4387 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
4388
4389 for (i = 0; i < nrelocs; i++)
4390 if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
4391 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
4392 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
4393 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
4394 {
4395 Elf_Internal_Rela rel;
4396 bfd_boolean unresolved_reloc;
4397 char *error_message;
4398 enum arm_st_branch_type branch_type
4399 = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22
4400 ? ST_BRANCH_TO_THUMB : ST_BRANCH_TO_ARM);
4401 bfd_vma points_to = sym_value + stub_entry->target_addend;
4402
4403 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4404 rel.r_info = ELF32_R_INFO (0,
4405 template_sequence[stub_reloc_idx[i]].r_type);
4406 rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
4407
4408 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
4409 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
4410 template should refer back to the instruction after the original
4411 branch. */
4412 points_to = sym_value;
4413
4414 /* There may be unintended consequences if this is not true. */
4415 BFD_ASSERT (stub_entry->h == NULL);
4416
4417 /* Note: _bfd_final_link_relocate doesn't handle these relocations
4418 properly. We should probably use this function unconditionally,
4419 rather than only for certain relocations listed in the enclosing
4420 conditional, for the sake of consistency. */
4421 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4422 (template_sequence[stub_reloc_idx[i]].r_type),
4423 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4424 points_to, info, stub_entry->target_section, "", STT_FUNC,
4425 branch_type, (struct elf_link_hash_entry *) stub_entry->h,
4426 &unresolved_reloc, &error_message);
4427 }
4428 else
4429 {
4430 Elf_Internal_Rela rel;
4431 bfd_boolean unresolved_reloc;
4432 char *error_message;
4433 bfd_vma points_to = sym_value + stub_entry->target_addend
4434 + template_sequence[stub_reloc_idx[i]].reloc_addend;
4435
4436 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4437 rel.r_info = ELF32_R_INFO (0,
4438 template_sequence[stub_reloc_idx[i]].r_type);
4439 rel.r_addend = 0;
4440
4441 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4442 (template_sequence[stub_reloc_idx[i]].r_type),
4443 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4444 points_to, info, stub_entry->target_section, "", STT_FUNC,
4445 stub_entry->branch_type,
4446 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
4447 &error_message);
4448 }
4449
4450 return TRUE;
4451 #undef MAXRELOCS
4452 }
4453
4454 /* Calculate the template, template size and instruction size for a stub.
4455 Return value is the instruction size. */
4456
4457 static unsigned int
4458 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
4459 const insn_sequence **stub_template,
4460 int *stub_template_size)
4461 {
4462 const insn_sequence *template_sequence = NULL;
4463 int template_size = 0, i;
4464 unsigned int size;
4465
4466 template_sequence = stub_definitions[stub_type].template_sequence;
4467 if (stub_template)
4468 *stub_template = template_sequence;
4469
4470 template_size = stub_definitions[stub_type].template_size;
4471 if (stub_template_size)
4472 *stub_template_size = template_size;
4473
4474 size = 0;
4475 for (i = 0; i < template_size; i++)
4476 {
4477 switch (template_sequence[i].type)
4478 {
4479 case THUMB16_TYPE:
4480 size += 2;
4481 break;
4482
4483 case ARM_TYPE:
4484 case THUMB32_TYPE:
4485 case DATA_TYPE:
4486 size += 4;
4487 break;
4488
4489 default:
4490 BFD_FAIL ();
4491 return 0;
4492 }
4493 }
4494
4495 return size;
4496 }
4497
4498 /* As above, but don't actually build the stub. Just bump offset so
4499 we know stub section sizes. */
4500
4501 static bfd_boolean
4502 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
4503 void *in_arg ATTRIBUTE_UNUSED)
4504 {
4505 struct elf32_arm_stub_hash_entry *stub_entry;
4506 const insn_sequence *template_sequence;
4507 int template_size, size;
4508
4509 /* Massage our args to the form they really have. */
4510 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4511
4512 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
4513 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
4514
4515 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
4516 &template_size);
4517
4518 stub_entry->stub_size = size;
4519 stub_entry->stub_template = template_sequence;
4520 stub_entry->stub_template_size = template_size;
4521
4522 size = (size + 7) & ~7;
4523 stub_entry->stub_sec->size += size;
4524
4525 return TRUE;
4526 }
4527
4528 /* External entry points for sizing and building linker stubs. */
4529
4530 /* Set up various things so that we can make a list of input sections
4531 for each output section included in the link. Returns -1 on error,
4532 0 when no stubs will be needed, and 1 on success. */
4533
4534 int
4535 elf32_arm_setup_section_lists (bfd *output_bfd,
4536 struct bfd_link_info *info)
4537 {
4538 bfd *input_bfd;
4539 unsigned int bfd_count;
4540 unsigned int top_id, top_index;
4541 asection *section;
4542 asection **input_list, **list;
4543 bfd_size_type amt;
4544 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4545
4546 if (htab == NULL)
4547 return 0;
4548 if (! is_elf_hash_table (htab))
4549 return 0;
4550
4551 /* Count the number of input BFDs and find the top input section id. */
4552 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
4553 input_bfd != NULL;
4554 input_bfd = input_bfd->link.next)
4555 {
4556 bfd_count += 1;
4557 for (section = input_bfd->sections;
4558 section != NULL;
4559 section = section->next)
4560 {
4561 if (top_id < section->id)
4562 top_id = section->id;
4563 }
4564 }
4565 htab->bfd_count = bfd_count;
4566
4567 amt = sizeof (struct map_stub) * (top_id + 1);
4568 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
4569 if (htab->stub_group == NULL)
4570 return -1;
4571 htab->top_id = top_id;
4572
4573 /* We can't use output_bfd->section_count here to find the top output
4574 section index as some sections may have been removed, and
4575 _bfd_strip_section_from_output doesn't renumber the indices. */
4576 for (section = output_bfd->sections, top_index = 0;
4577 section != NULL;
4578 section = section->next)
4579 {
4580 if (top_index < section->index)
4581 top_index = section->index;
4582 }
4583
4584 htab->top_index = top_index;
4585 amt = sizeof (asection *) * (top_index + 1);
4586 input_list = (asection **) bfd_malloc (amt);
4587 htab->input_list = input_list;
4588 if (input_list == NULL)
4589 return -1;
4590
4591 /* For sections we aren't interested in, mark their entries with a
4592 value we can check later. */
4593 list = input_list + top_index;
4594 do
4595 *list = bfd_abs_section_ptr;
4596 while (list-- != input_list);
4597
4598 for (section = output_bfd->sections;
4599 section != NULL;
4600 section = section->next)
4601 {
4602 if ((section->flags & SEC_CODE) != 0)
4603 input_list[section->index] = NULL;
4604 }
4605
4606 return 1;
4607 }
4608
4609 /* The linker repeatedly calls this function for each input section,
4610 in the order that input sections are linked into output sections.
4611 Build lists of input sections to determine groupings between which
4612 we may insert linker stubs. */
4613
4614 void
4615 elf32_arm_next_input_section (struct bfd_link_info *info,
4616 asection *isec)
4617 {
4618 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4619
4620 if (htab == NULL)
4621 return;
4622
4623 if (isec->output_section->index <= htab->top_index)
4624 {
4625 asection **list = htab->input_list + isec->output_section->index;
4626
4627 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
4628 {
4629 /* Steal the link_sec pointer for our list. */
4630 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
4631 /* This happens to make the list in reverse order,
4632 which we reverse later. */
4633 PREV_SEC (isec) = *list;
4634 *list = isec;
4635 }
4636 }
4637 }
4638
4639 /* See whether we can group stub sections together. Grouping stub
4640 sections may result in fewer stubs. More importantly, we need to
4641 put all .init* and .fini* stubs at the end of the .init or
4642 .fini output sections respectively, because glibc splits the
4643 _init and _fini functions into multiple parts. Putting a stub in
4644 the middle of a function is not a good idea. */
4645
4646 static void
4647 group_sections (struct elf32_arm_link_hash_table *htab,
4648 bfd_size_type stub_group_size,
4649 bfd_boolean stubs_always_after_branch)
4650 {
4651 asection **list = htab->input_list;
4652
4653 do
4654 {
4655 asection *tail = *list;
4656 asection *head;
4657
4658 if (tail == bfd_abs_section_ptr)
4659 continue;
4660
4661 /* Reverse the list: we must avoid placing stubs at the
4662 beginning of the section because the beginning of the text
4663 section may be required for an interrupt vector in bare metal
4664 code. */
4665 #define NEXT_SEC PREV_SEC
4666 head = NULL;
4667 while (tail != NULL)
4668 {
4669 /* Pop from tail. */
4670 asection *item = tail;
4671 tail = PREV_SEC (item);
4672
4673 /* Push on head. */
4674 NEXT_SEC (item) = head;
4675 head = item;
4676 }
4677
4678 while (head != NULL)
4679 {
4680 asection *curr;
4681 asection *next;
4682 bfd_vma stub_group_start = head->output_offset;
4683 bfd_vma end_of_next;
4684
4685 curr = head;
4686 while (NEXT_SEC (curr) != NULL)
4687 {
4688 next = NEXT_SEC (curr);
4689 end_of_next = next->output_offset + next->size;
4690 if (end_of_next - stub_group_start >= stub_group_size)
4691 /* End of NEXT is too far from start, so stop. */
4692 break;
4693 /* Add NEXT to the group. */
4694 curr = next;
4695 }
4696
4697 /* OK, the size from the start to the start of CURR is less
4698 than stub_group_size and thus can be handled by one stub
4699 section. (Or the head section is itself larger than
4700 stub_group_size, in which case we may be toast.)
4701 We should really be keeping track of the total size of
4702 stubs added here, as stubs contribute to the final output
4703 section size. */
4704 do
4705 {
4706 next = NEXT_SEC (head);
4707 /* Set up this stub group. */
4708 htab->stub_group[head->id].link_sec = curr;
4709 }
4710 while (head != curr && (head = next) != NULL);
4711
4712 /* But wait, there's more! Input sections up to stub_group_size
4713 bytes after the stub section can be handled by it too. */
4714 if (!stubs_always_after_branch)
4715 {
4716 stub_group_start = curr->output_offset + curr->size;
4717
4718 while (next != NULL)
4719 {
4720 end_of_next = next->output_offset + next->size;
4721 if (end_of_next - stub_group_start >= stub_group_size)
4722 /* End of NEXT is too far from stubs, so stop. */
4723 break;
4724 /* Add NEXT to the stub group. */
4725 head = next;
4726 next = NEXT_SEC (head);
4727 htab->stub_group[head->id].link_sec = curr;
4728 }
4729 }
4730 head = next;
4731 }
4732 }
4733 while (list++ != htab->input_list + htab->top_index);
4734
4735 free (htab->input_list);
4736 #undef PREV_SEC
4737 #undef NEXT_SEC
4738 }
4739
4740 /* Comparison function for sorting/searching relocations relating to Cortex-A8
4741 erratum fix. */
4742
4743 static int
4744 a8_reloc_compare (const void *a, const void *b)
4745 {
4746 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
4747 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
4748
4749 if (ra->from < rb->from)
4750 return -1;
4751 else if (ra->from > rb->from)
4752 return 1;
4753 else
4754 return 0;
4755 }
4756
4757 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
4758 const char *, char **);
4759
4760 /* Helper function to scan code for sequences which might trigger the Cortex-A8
4761 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
4762 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
4763 otherwise. */
4764
4765 static bfd_boolean
4766 cortex_a8_erratum_scan (bfd *input_bfd,
4767 struct bfd_link_info *info,
4768 struct a8_erratum_fix **a8_fixes_p,
4769 unsigned int *num_a8_fixes_p,
4770 unsigned int *a8_fix_table_size_p,
4771 struct a8_erratum_reloc *a8_relocs,
4772 unsigned int num_a8_relocs,
4773 unsigned prev_num_a8_fixes,
4774 bfd_boolean *stub_changed_p)
4775 {
4776 asection *section;
4777 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4778 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
4779 unsigned int num_a8_fixes = *num_a8_fixes_p;
4780 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
4781
4782 if (htab == NULL)
4783 return FALSE;
4784
4785 for (section = input_bfd->sections;
4786 section != NULL;
4787 section = section->next)
4788 {
4789 bfd_byte *contents = NULL;
4790 struct _arm_elf_section_data *sec_data;
4791 unsigned int span;
4792 bfd_vma base_vma;
4793
4794 if (elf_section_type (section) != SHT_PROGBITS
4795 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4796 || (section->flags & SEC_EXCLUDE) != 0
4797 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
4798 || (section->output_section == bfd_abs_section_ptr))
4799 continue;
4800
4801 base_vma = section->output_section->vma + section->output_offset;
4802
4803 if (elf_section_data (section)->this_hdr.contents != NULL)
4804 contents = elf_section_data (section)->this_hdr.contents;
4805 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4806 return TRUE;
4807
4808 sec_data = elf32_arm_section_data (section);
4809
4810 for (span = 0; span < sec_data->mapcount; span++)
4811 {
4812 unsigned int span_start = sec_data->map[span].vma;
4813 unsigned int span_end = (span == sec_data->mapcount - 1)
4814 ? section->size : sec_data->map[span + 1].vma;
4815 unsigned int i;
4816 char span_type = sec_data->map[span].type;
4817 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
4818
4819 if (span_type != 't')
4820 continue;
4821
4822 /* Span is entirely within a single 4KB region: skip scanning. */
4823 if (((base_vma + span_start) & ~0xfff)
4824 == ((base_vma + span_end) & ~0xfff))
4825 continue;
4826
4827 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
4828
4829 * The opcode is BLX.W, BL.W, B.W, Bcc.W
4830 * The branch target is in the same 4KB region as the
4831 first half of the branch.
4832 * The instruction before the branch is a 32-bit
4833 length non-branch instruction. */
4834 for (i = span_start; i < span_end;)
4835 {
4836 unsigned int insn = bfd_getl16 (&contents[i]);
4837 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
4838 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
4839
4840 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
4841 insn_32bit = TRUE;
4842
4843 if (insn_32bit)
4844 {
4845 /* Load the rest of the insn (in manual-friendly order). */
4846 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
4847
4848 /* Encoding T4: B<c>.W. */
4849 is_b = (insn & 0xf800d000) == 0xf0009000;
4850 /* Encoding T1: BL<c>.W. */
4851 is_bl = (insn & 0xf800d000) == 0xf000d000;
4852 /* Encoding T2: BLX<c>.W. */
4853 is_blx = (insn & 0xf800d000) == 0xf000c000;
4854 /* Encoding T3: B<c>.W (not permitted in IT block). */
4855 is_bcc = (insn & 0xf800d000) == 0xf0008000
4856 && (insn & 0x07f00000) != 0x03800000;
4857 }
4858
4859 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
4860
4861 if (((base_vma + i) & 0xfff) == 0xffe
4862 && insn_32bit
4863 && is_32bit_branch
4864 && last_was_32bit
4865 && ! last_was_branch)
4866 {
4867 bfd_signed_vma offset = 0;
4868 bfd_boolean force_target_arm = FALSE;
4869 bfd_boolean force_target_thumb = FALSE;
4870 bfd_vma target;
4871 enum elf32_arm_stub_type stub_type = arm_stub_none;
4872 struct a8_erratum_reloc key, *found;
4873 bfd_boolean use_plt = FALSE;
4874
4875 key.from = base_vma + i;
4876 found = (struct a8_erratum_reloc *)
4877 bsearch (&key, a8_relocs, num_a8_relocs,
4878 sizeof (struct a8_erratum_reloc),
4879 &a8_reloc_compare);
4880
4881 if (found)
4882 {
4883 char *error_message = NULL;
4884 struct elf_link_hash_entry *entry;
4885
4886 /* We don't care about the error returned from this
4887 function, only if there is glue or not. */
4888 entry = find_thumb_glue (info, found->sym_name,
4889 &error_message);
4890
4891 if (entry)
4892 found->non_a8_stub = TRUE;
4893
4894 /* Keep a simpler condition, for the sake of clarity. */
4895 if (htab->root.splt != NULL && found->hash != NULL
4896 && found->hash->root.plt.offset != (bfd_vma) -1)
4897 use_plt = TRUE;
4898
4899 if (found->r_type == R_ARM_THM_CALL)
4900 {
4901 if (found->branch_type == ST_BRANCH_TO_ARM
4902 || use_plt)
4903 force_target_arm = TRUE;
4904 else
4905 force_target_thumb = TRUE;
4906 }
4907 }
4908
4909 /* Check if we have an offending branch instruction. */
4910
4911 if (found && found->non_a8_stub)
4912 /* We've already made a stub for this instruction, e.g.
4913 it's a long branch or a Thumb->ARM stub. Assume that
4914 stub will suffice to work around the A8 erratum (see
4915 setting of always_after_branch above). */
4916 ;
4917 else if (is_bcc)
4918 {
4919 offset = (insn & 0x7ff) << 1;
4920 offset |= (insn & 0x3f0000) >> 4;
4921 offset |= (insn & 0x2000) ? 0x40000 : 0;
4922 offset |= (insn & 0x800) ? 0x80000 : 0;
4923 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4924 if (offset & 0x100000)
4925 offset |= ~ ((bfd_signed_vma) 0xfffff);
4926 stub_type = arm_stub_a8_veneer_b_cond;
4927 }
4928 else if (is_b || is_bl || is_blx)
4929 {
4930 int s = (insn & 0x4000000) != 0;
4931 int j1 = (insn & 0x2000) != 0;
4932 int j2 = (insn & 0x800) != 0;
4933 int i1 = !(j1 ^ s);
4934 int i2 = !(j2 ^ s);
4935
4936 offset = (insn & 0x7ff) << 1;
4937 offset |= (insn & 0x3ff0000) >> 4;
4938 offset |= i2 << 22;
4939 offset |= i1 << 23;
4940 offset |= s << 24;
4941 if (offset & 0x1000000)
4942 offset |= ~ ((bfd_signed_vma) 0xffffff);
4943
4944 if (is_blx)
4945 offset &= ~ ((bfd_signed_vma) 3);
4946
4947 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4948 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4949 }
4950
4951 if (stub_type != arm_stub_none)
4952 {
4953 bfd_vma pc_for_insn = base_vma + i + 4;
4954
4955 /* The original instruction is a BL, but the target is
4956 an ARM instruction. If we were not making a stub,
4957 the BL would have been converted to a BLX. Use the
4958 BLX stub instead in that case. */
4959 if (htab->use_blx && force_target_arm
4960 && stub_type == arm_stub_a8_veneer_bl)
4961 {
4962 stub_type = arm_stub_a8_veneer_blx;
4963 is_blx = TRUE;
4964 is_bl = FALSE;
4965 }
4966 /* Conversely, if the original instruction was
4967 BLX but the target is Thumb mode, use the BL
4968 stub. */
4969 else if (force_target_thumb
4970 && stub_type == arm_stub_a8_veneer_blx)
4971 {
4972 stub_type = arm_stub_a8_veneer_bl;
4973 is_blx = FALSE;
4974 is_bl = TRUE;
4975 }
4976
4977 if (is_blx)
4978 pc_for_insn &= ~ ((bfd_vma) 3);
4979
4980 /* If we found a relocation, use the proper destination,
4981 not the offset in the (unrelocated) instruction.
4982 Note this is always done if we switched the stub type
4983 above. */
4984 if (found)
4985 offset =
4986 (bfd_signed_vma) (found->destination - pc_for_insn);
4987
4988 /* If the stub will use a Thumb-mode branch to a
4989 PLT target, redirect it to the preceding Thumb
4990 entry point. */
4991 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
4992 offset -= PLT_THUMB_STUB_SIZE;
4993
4994 target = pc_for_insn + offset;
4995
4996 /* The BLX stub is ARM-mode code. Adjust the offset to
4997 take the different PC value (+8 instead of +4) into
4998 account. */
4999 if (stub_type == arm_stub_a8_veneer_blx)
5000 offset += 4;
5001
5002 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5003 {
5004 char *stub_name = NULL;
5005
5006 if (num_a8_fixes == a8_fix_table_size)
5007 {
5008 a8_fix_table_size *= 2;
5009 a8_fixes = (struct a8_erratum_fix *)
5010 bfd_realloc (a8_fixes,
5011 sizeof (struct a8_erratum_fix)
5012 * a8_fix_table_size);
5013 }
5014
5015 if (num_a8_fixes < prev_num_a8_fixes)
5016 {
5017 /* If we're doing a subsequent scan,
5018 check if we've found the same fix as
5019 before, and try and reuse the stub
5020 name. */
5021 stub_name = a8_fixes[num_a8_fixes].stub_name;
5022 if ((a8_fixes[num_a8_fixes].section != section)
5023 || (a8_fixes[num_a8_fixes].offset != i))
5024 {
5025 free (stub_name);
5026 stub_name = NULL;
5027 *stub_changed_p = TRUE;
5028 }
5029 }
5030
5031 if (!stub_name)
5032 {
5033 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5034 if (stub_name != NULL)
5035 sprintf (stub_name, "%x:%x", section->id, i);
5036 }
5037
5038 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5039 a8_fixes[num_a8_fixes].section = section;
5040 a8_fixes[num_a8_fixes].offset = i;
5041 a8_fixes[num_a8_fixes].addend = offset;
5042 a8_fixes[num_a8_fixes].orig_insn = insn;
5043 a8_fixes[num_a8_fixes].stub_name = stub_name;
5044 a8_fixes[num_a8_fixes].stub_type = stub_type;
5045 a8_fixes[num_a8_fixes].branch_type =
5046 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5047
5048 num_a8_fixes++;
5049 }
5050 }
5051 }
5052
5053 i += insn_32bit ? 4 : 2;
5054 last_was_32bit = insn_32bit;
5055 last_was_branch = is_32bit_branch;
5056 }
5057 }
5058
5059 if (elf_section_data (section)->this_hdr.contents == NULL)
5060 free (contents);
5061 }
5062
5063 *a8_fixes_p = a8_fixes;
5064 *num_a8_fixes_p = num_a8_fixes;
5065 *a8_fix_table_size_p = a8_fix_table_size;
5066
5067 return FALSE;
5068 }
5069
5070 /* Determine and set the size of the stub section for a final link.
5071
5072 The basic idea here is to examine all the relocations looking for
5073 PC-relative calls to a target that is unreachable with a "bl"
5074 instruction. */
5075
5076 bfd_boolean
5077 elf32_arm_size_stubs (bfd *output_bfd,
5078 bfd *stub_bfd,
5079 struct bfd_link_info *info,
5080 bfd_signed_vma group_size,
5081 asection * (*add_stub_section) (const char *, asection *,
5082 unsigned int),
5083 void (*layout_sections_again) (void))
5084 {
5085 bfd_size_type stub_group_size;
5086 bfd_boolean stubs_always_after_branch;
5087 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5088 struct a8_erratum_fix *a8_fixes = NULL;
5089 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
5090 struct a8_erratum_reloc *a8_relocs = NULL;
5091 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
5092
5093 if (htab == NULL)
5094 return FALSE;
5095
5096 if (htab->fix_cortex_a8)
5097 {
5098 a8_fixes = (struct a8_erratum_fix *)
5099 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
5100 a8_relocs = (struct a8_erratum_reloc *)
5101 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
5102 }
5103
5104 /* Propagate mach to stub bfd, because it may not have been
5105 finalized when we created stub_bfd. */
5106 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
5107 bfd_get_mach (output_bfd));
5108
5109 /* Stash our params away. */
5110 htab->stub_bfd = stub_bfd;
5111 htab->add_stub_section = add_stub_section;
5112 htab->layout_sections_again = layout_sections_again;
5113 stubs_always_after_branch = group_size < 0;
5114
5115 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
5116 as the first half of a 32-bit branch straddling two 4K pages. This is a
5117 crude way of enforcing that. */
5118 if (htab->fix_cortex_a8)
5119 stubs_always_after_branch = 1;
5120
5121 if (group_size < 0)
5122 stub_group_size = -group_size;
5123 else
5124 stub_group_size = group_size;
5125
5126 if (stub_group_size == 1)
5127 {
5128 /* Default values. */
5129 /* Thumb branch range is +-4MB has to be used as the default
5130 maximum size (a given section can contain both ARM and Thumb
5131 code, so the worst case has to be taken into account).
5132
5133 This value is 24K less than that, which allows for 2025
5134 12-byte stubs. If we exceed that, then we will fail to link.
5135 The user will have to relink with an explicit group size
5136 option. */
5137 stub_group_size = 4170000;
5138 }
5139
5140 group_sections (htab, stub_group_size, stubs_always_after_branch);
5141
5142 /* If we're applying the cortex A8 fix, we need to determine the
5143 program header size now, because we cannot change it later --
5144 that could alter section placements. Notice the A8 erratum fix
5145 ends up requiring the section addresses to remain unchanged
5146 modulo the page size. That's something we cannot represent
5147 inside BFD, and we don't want to force the section alignment to
5148 be the page size. */
5149 if (htab->fix_cortex_a8)
5150 (*htab->layout_sections_again) ();
5151
5152 while (1)
5153 {
5154 bfd *input_bfd;
5155 unsigned int bfd_indx;
5156 asection *stub_sec;
5157 bfd_boolean stub_changed = FALSE;
5158 unsigned prev_num_a8_fixes = num_a8_fixes;
5159
5160 num_a8_fixes = 0;
5161 for (input_bfd = info->input_bfds, bfd_indx = 0;
5162 input_bfd != NULL;
5163 input_bfd = input_bfd->link.next, bfd_indx++)
5164 {
5165 Elf_Internal_Shdr *symtab_hdr;
5166 asection *section;
5167 Elf_Internal_Sym *local_syms = NULL;
5168
5169 if (!is_arm_elf (input_bfd))
5170 continue;
5171
5172 num_a8_relocs = 0;
5173
5174 /* We'll need the symbol table in a second. */
5175 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5176 if (symtab_hdr->sh_info == 0)
5177 continue;
5178
5179 /* Walk over each section attached to the input bfd. */
5180 for (section = input_bfd->sections;
5181 section != NULL;
5182 section = section->next)
5183 {
5184 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5185
5186 /* If there aren't any relocs, then there's nothing more
5187 to do. */
5188 if ((section->flags & SEC_RELOC) == 0
5189 || section->reloc_count == 0
5190 || (section->flags & SEC_CODE) == 0)
5191 continue;
5192
5193 /* If this section is a link-once section that will be
5194 discarded, then don't create any stubs. */
5195 if (section->output_section == NULL
5196 || section->output_section->owner != output_bfd)
5197 continue;
5198
5199 /* Get the relocs. */
5200 internal_relocs
5201 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
5202 NULL, info->keep_memory);
5203 if (internal_relocs == NULL)
5204 goto error_ret_free_local;
5205
5206 /* Now examine each relocation. */
5207 irela = internal_relocs;
5208 irelaend = irela + section->reloc_count;
5209 for (; irela < irelaend; irela++)
5210 {
5211 unsigned int r_type, r_indx;
5212 enum elf32_arm_stub_type stub_type;
5213 struct elf32_arm_stub_hash_entry *stub_entry;
5214 asection *sym_sec;
5215 bfd_vma sym_value;
5216 bfd_vma destination;
5217 struct elf32_arm_link_hash_entry *hash;
5218 const char *sym_name;
5219 char *stub_name;
5220 const asection *id_sec;
5221 unsigned char st_type;
5222 enum arm_st_branch_type branch_type;
5223 bfd_boolean created_stub = FALSE;
5224
5225 r_type = ELF32_R_TYPE (irela->r_info);
5226 r_indx = ELF32_R_SYM (irela->r_info);
5227
5228 if (r_type >= (unsigned int) R_ARM_max)
5229 {
5230 bfd_set_error (bfd_error_bad_value);
5231 error_ret_free_internal:
5232 if (elf_section_data (section)->relocs == NULL)
5233 free (internal_relocs);
5234 goto error_ret_free_local;
5235 }
5236
5237 hash = NULL;
5238 if (r_indx >= symtab_hdr->sh_info)
5239 hash = elf32_arm_hash_entry
5240 (elf_sym_hashes (input_bfd)
5241 [r_indx - symtab_hdr->sh_info]);
5242
5243 /* Only look for stubs on branch instructions, or
5244 non-relaxed TLSCALL */
5245 if ((r_type != (unsigned int) R_ARM_CALL)
5246 && (r_type != (unsigned int) R_ARM_THM_CALL)
5247 && (r_type != (unsigned int) R_ARM_JUMP24)
5248 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
5249 && (r_type != (unsigned int) R_ARM_THM_XPC22)
5250 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
5251 && (r_type != (unsigned int) R_ARM_PLT32)
5252 && !((r_type == (unsigned int) R_ARM_TLS_CALL
5253 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5254 && r_type == elf32_arm_tls_transition
5255 (info, r_type, &hash->root)
5256 && ((hash ? hash->tls_type
5257 : (elf32_arm_local_got_tls_type
5258 (input_bfd)[r_indx]))
5259 & GOT_TLS_GDESC) != 0))
5260 continue;
5261
5262 /* Now determine the call target, its name, value,
5263 section. */
5264 sym_sec = NULL;
5265 sym_value = 0;
5266 destination = 0;
5267 sym_name = NULL;
5268
5269 if (r_type == (unsigned int) R_ARM_TLS_CALL
5270 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5271 {
5272 /* A non-relaxed TLS call. The target is the
5273 plt-resident trampoline and nothing to do
5274 with the symbol. */
5275 BFD_ASSERT (htab->tls_trampoline > 0);
5276 sym_sec = htab->root.splt;
5277 sym_value = htab->tls_trampoline;
5278 hash = 0;
5279 st_type = STT_FUNC;
5280 branch_type = ST_BRANCH_TO_ARM;
5281 }
5282 else if (!hash)
5283 {
5284 /* It's a local symbol. */
5285 Elf_Internal_Sym *sym;
5286
5287 if (local_syms == NULL)
5288 {
5289 local_syms
5290 = (Elf_Internal_Sym *) symtab_hdr->contents;
5291 if (local_syms == NULL)
5292 local_syms
5293 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5294 symtab_hdr->sh_info, 0,
5295 NULL, NULL, NULL);
5296 if (local_syms == NULL)
5297 goto error_ret_free_internal;
5298 }
5299
5300 sym = local_syms + r_indx;
5301 if (sym->st_shndx == SHN_UNDEF)
5302 sym_sec = bfd_und_section_ptr;
5303 else if (sym->st_shndx == SHN_ABS)
5304 sym_sec = bfd_abs_section_ptr;
5305 else if (sym->st_shndx == SHN_COMMON)
5306 sym_sec = bfd_com_section_ptr;
5307 else
5308 sym_sec =
5309 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
5310
5311 if (!sym_sec)
5312 /* This is an undefined symbol. It can never
5313 be resolved. */
5314 continue;
5315
5316 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
5317 sym_value = sym->st_value;
5318 destination = (sym_value + irela->r_addend
5319 + sym_sec->output_offset
5320 + sym_sec->output_section->vma);
5321 st_type = ELF_ST_TYPE (sym->st_info);
5322 branch_type = ARM_SYM_BRANCH_TYPE (sym);
5323 sym_name
5324 = bfd_elf_string_from_elf_section (input_bfd,
5325 symtab_hdr->sh_link,
5326 sym->st_name);
5327 }
5328 else
5329 {
5330 /* It's an external symbol. */
5331 while (hash->root.root.type == bfd_link_hash_indirect
5332 || hash->root.root.type == bfd_link_hash_warning)
5333 hash = ((struct elf32_arm_link_hash_entry *)
5334 hash->root.root.u.i.link);
5335
5336 if (hash->root.root.type == bfd_link_hash_defined
5337 || hash->root.root.type == bfd_link_hash_defweak)
5338 {
5339 sym_sec = hash->root.root.u.def.section;
5340 sym_value = hash->root.root.u.def.value;
5341
5342 struct elf32_arm_link_hash_table *globals =
5343 elf32_arm_hash_table (info);
5344
5345 /* For a destination in a shared library,
5346 use the PLT stub as target address to
5347 decide whether a branch stub is
5348 needed. */
5349 if (globals != NULL
5350 && globals->root.splt != NULL
5351 && hash != NULL
5352 && hash->root.plt.offset != (bfd_vma) -1)
5353 {
5354 sym_sec = globals->root.splt;
5355 sym_value = hash->root.plt.offset;
5356 if (sym_sec->output_section != NULL)
5357 destination = (sym_value
5358 + sym_sec->output_offset
5359 + sym_sec->output_section->vma);
5360 }
5361 else if (sym_sec->output_section != NULL)
5362 destination = (sym_value + irela->r_addend
5363 + sym_sec->output_offset
5364 + sym_sec->output_section->vma);
5365 }
5366 else if ((hash->root.root.type == bfd_link_hash_undefined)
5367 || (hash->root.root.type == bfd_link_hash_undefweak))
5368 {
5369 /* For a shared library, use the PLT stub as
5370 target address to decide whether a long
5371 branch stub is needed.
5372 For absolute code, they cannot be handled. */
5373 struct elf32_arm_link_hash_table *globals =
5374 elf32_arm_hash_table (info);
5375
5376 if (globals != NULL
5377 && globals->root.splt != NULL
5378 && hash != NULL
5379 && hash->root.plt.offset != (bfd_vma) -1)
5380 {
5381 sym_sec = globals->root.splt;
5382 sym_value = hash->root.plt.offset;
5383 if (sym_sec->output_section != NULL)
5384 destination = (sym_value
5385 + sym_sec->output_offset
5386 + sym_sec->output_section->vma);
5387 }
5388 else
5389 continue;
5390 }
5391 else
5392 {
5393 bfd_set_error (bfd_error_bad_value);
5394 goto error_ret_free_internal;
5395 }
5396 st_type = hash->root.type;
5397 branch_type = hash->root.target_internal;
5398 sym_name = hash->root.root.root.string;
5399 }
5400
5401 do
5402 {
5403 /* Determine what (if any) linker stub is needed. */
5404 stub_type = arm_type_of_stub (info, section, irela,
5405 st_type, &branch_type,
5406 hash, destination, sym_sec,
5407 input_bfd, sym_name);
5408 if (stub_type == arm_stub_none)
5409 break;
5410
5411 /* Support for grouping stub sections. */
5412 id_sec = htab->stub_group[section->id].link_sec;
5413
5414 /* Get the name of this stub. */
5415 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
5416 irela, stub_type);
5417 if (!stub_name)
5418 goto error_ret_free_internal;
5419
5420 /* We've either created a stub for this reloc already,
5421 or we are about to. */
5422 created_stub = TRUE;
5423
5424 stub_entry = arm_stub_hash_lookup
5425 (&htab->stub_hash_table, stub_name,
5426 FALSE, FALSE);
5427 if (stub_entry != NULL)
5428 {
5429 /* The proper stub has already been created. */
5430 free (stub_name);
5431 stub_entry->target_value = sym_value;
5432 break;
5433 }
5434
5435 stub_entry = elf32_arm_add_stub (stub_name, section,
5436 htab);
5437 if (stub_entry == NULL)
5438 {
5439 free (stub_name);
5440 goto error_ret_free_internal;
5441 }
5442
5443 stub_entry->target_value = sym_value;
5444 stub_entry->target_section = sym_sec;
5445 stub_entry->stub_type = stub_type;
5446 stub_entry->h = hash;
5447 stub_entry->branch_type = branch_type;
5448
5449 if (sym_name == NULL)
5450 sym_name = "unnamed";
5451 stub_entry->output_name = (char *)
5452 bfd_alloc (htab->stub_bfd,
5453 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5454 + strlen (sym_name));
5455 if (stub_entry->output_name == NULL)
5456 {
5457 free (stub_name);
5458 goto error_ret_free_internal;
5459 }
5460
5461 /* For historical reasons, use the existing names for
5462 ARM-to-Thumb and Thumb-to-ARM stubs. */
5463 if ((r_type == (unsigned int) R_ARM_THM_CALL
5464 || r_type == (unsigned int) R_ARM_THM_JUMP24
5465 || r_type == (unsigned int) R_ARM_THM_JUMP19)
5466 && branch_type == ST_BRANCH_TO_ARM)
5467 sprintf (stub_entry->output_name,
5468 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5469 else if ((r_type == (unsigned int) R_ARM_CALL
5470 || r_type == (unsigned int) R_ARM_JUMP24)
5471 && branch_type == ST_BRANCH_TO_THUMB)
5472 sprintf (stub_entry->output_name,
5473 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5474 else
5475 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
5476 sym_name);
5477
5478 stub_changed = TRUE;
5479 }
5480 while (0);
5481
5482 /* Look for relocations which might trigger Cortex-A8
5483 erratum. */
5484 if (htab->fix_cortex_a8
5485 && (r_type == (unsigned int) R_ARM_THM_JUMP24
5486 || r_type == (unsigned int) R_ARM_THM_JUMP19
5487 || r_type == (unsigned int) R_ARM_THM_CALL
5488 || r_type == (unsigned int) R_ARM_THM_XPC22))
5489 {
5490 bfd_vma from = section->output_section->vma
5491 + section->output_offset
5492 + irela->r_offset;
5493
5494 if ((from & 0xfff) == 0xffe)
5495 {
5496 /* Found a candidate. Note we haven't checked the
5497 destination is within 4K here: if we do so (and
5498 don't create an entry in a8_relocs) we can't tell
5499 that a branch should have been relocated when
5500 scanning later. */
5501 if (num_a8_relocs == a8_reloc_table_size)
5502 {
5503 a8_reloc_table_size *= 2;
5504 a8_relocs = (struct a8_erratum_reloc *)
5505 bfd_realloc (a8_relocs,
5506 sizeof (struct a8_erratum_reloc)
5507 * a8_reloc_table_size);
5508 }
5509
5510 a8_relocs[num_a8_relocs].from = from;
5511 a8_relocs[num_a8_relocs].destination = destination;
5512 a8_relocs[num_a8_relocs].r_type = r_type;
5513 a8_relocs[num_a8_relocs].branch_type = branch_type;
5514 a8_relocs[num_a8_relocs].sym_name = sym_name;
5515 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
5516 a8_relocs[num_a8_relocs].hash = hash;
5517
5518 num_a8_relocs++;
5519 }
5520 }
5521 }
5522
5523 /* We're done with the internal relocs, free them. */
5524 if (elf_section_data (section)->relocs == NULL)
5525 free (internal_relocs);
5526 }
5527
5528 if (htab->fix_cortex_a8)
5529 {
5530 /* Sort relocs which might apply to Cortex-A8 erratum. */
5531 qsort (a8_relocs, num_a8_relocs,
5532 sizeof (struct a8_erratum_reloc),
5533 &a8_reloc_compare);
5534
5535 /* Scan for branches which might trigger Cortex-A8 erratum. */
5536 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
5537 &num_a8_fixes, &a8_fix_table_size,
5538 a8_relocs, num_a8_relocs,
5539 prev_num_a8_fixes, &stub_changed)
5540 != 0)
5541 goto error_ret_free_local;
5542 }
5543 }
5544
5545 if (prev_num_a8_fixes != num_a8_fixes)
5546 stub_changed = TRUE;
5547
5548 if (!stub_changed)
5549 break;
5550
5551 /* OK, we've added some stubs. Find out the new size of the
5552 stub sections. */
5553 for (stub_sec = htab->stub_bfd->sections;
5554 stub_sec != NULL;
5555 stub_sec = stub_sec->next)
5556 {
5557 /* Ignore non-stub sections. */
5558 if (!strstr (stub_sec->name, STUB_SUFFIX))
5559 continue;
5560
5561 stub_sec->size = 0;
5562 }
5563
5564 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
5565
5566 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
5567 if (htab->fix_cortex_a8)
5568 for (i = 0; i < num_a8_fixes; i++)
5569 {
5570 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
5571 a8_fixes[i].section, htab);
5572
5573 if (stub_sec == NULL)
5574 goto error_ret_free_local;
5575
5576 stub_sec->size
5577 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
5578 NULL);
5579 }
5580
5581
5582 /* Ask the linker to do its stuff. */
5583 (*htab->layout_sections_again) ();
5584 }
5585
5586 /* Add stubs for Cortex-A8 erratum fixes now. */
5587 if (htab->fix_cortex_a8)
5588 {
5589 for (i = 0; i < num_a8_fixes; i++)
5590 {
5591 struct elf32_arm_stub_hash_entry *stub_entry;
5592 char *stub_name = a8_fixes[i].stub_name;
5593 asection *section = a8_fixes[i].section;
5594 unsigned int section_id = a8_fixes[i].section->id;
5595 asection *link_sec = htab->stub_group[section_id].link_sec;
5596 asection *stub_sec = htab->stub_group[section_id].stub_sec;
5597 const insn_sequence *template_sequence;
5598 int template_size, size = 0;
5599
5600 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
5601 TRUE, FALSE);
5602 if (stub_entry == NULL)
5603 {
5604 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
5605 section->owner,
5606 stub_name);
5607 return FALSE;
5608 }
5609
5610 stub_entry->stub_sec = stub_sec;
5611 stub_entry->stub_offset = 0;
5612 stub_entry->id_sec = link_sec;
5613 stub_entry->stub_type = a8_fixes[i].stub_type;
5614 stub_entry->target_section = a8_fixes[i].section;
5615 stub_entry->target_value = a8_fixes[i].offset;
5616 stub_entry->target_addend = a8_fixes[i].addend;
5617 stub_entry->orig_insn = a8_fixes[i].orig_insn;
5618 stub_entry->branch_type = a8_fixes[i].branch_type;
5619
5620 size = find_stub_size_and_template (a8_fixes[i].stub_type,
5621 &template_sequence,
5622 &template_size);
5623
5624 stub_entry->stub_size = size;
5625 stub_entry->stub_template = template_sequence;
5626 stub_entry->stub_template_size = template_size;
5627 }
5628
5629 /* Stash the Cortex-A8 erratum fix array for use later in
5630 elf32_arm_write_section(). */
5631 htab->a8_erratum_fixes = a8_fixes;
5632 htab->num_a8_erratum_fixes = num_a8_fixes;
5633 }
5634 else
5635 {
5636 htab->a8_erratum_fixes = NULL;
5637 htab->num_a8_erratum_fixes = 0;
5638 }
5639 return TRUE;
5640
5641 error_ret_free_local:
5642 return FALSE;
5643 }
5644
5645 /* Build all the stubs associated with the current output file. The
5646 stubs are kept in a hash table attached to the main linker hash
5647 table. We also set up the .plt entries for statically linked PIC
5648 functions here. This function is called via arm_elf_finish in the
5649 linker. */
5650
5651 bfd_boolean
5652 elf32_arm_build_stubs (struct bfd_link_info *info)
5653 {
5654 asection *stub_sec;
5655 struct bfd_hash_table *table;
5656 struct elf32_arm_link_hash_table *htab;
5657
5658 htab = elf32_arm_hash_table (info);
5659 if (htab == NULL)
5660 return FALSE;
5661
5662 for (stub_sec = htab->stub_bfd->sections;
5663 stub_sec != NULL;
5664 stub_sec = stub_sec->next)
5665 {
5666 bfd_size_type size;
5667
5668 /* Ignore non-stub sections. */
5669 if (!strstr (stub_sec->name, STUB_SUFFIX))
5670 continue;
5671
5672 /* Allocate memory to hold the linker stubs. */
5673 size = stub_sec->size;
5674 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
5675 if (stub_sec->contents == NULL && size != 0)
5676 return FALSE;
5677 stub_sec->size = 0;
5678 }
5679
5680 /* Build the stubs as directed by the stub hash table. */
5681 table = &htab->stub_hash_table;
5682 bfd_hash_traverse (table, arm_build_one_stub, info);
5683 if (htab->fix_cortex_a8)
5684 {
5685 /* Place the cortex a8 stubs last. */
5686 htab->fix_cortex_a8 = -1;
5687 bfd_hash_traverse (table, arm_build_one_stub, info);
5688 }
5689
5690 return TRUE;
5691 }
5692
5693 /* Locate the Thumb encoded calling stub for NAME. */
5694
5695 static struct elf_link_hash_entry *
5696 find_thumb_glue (struct bfd_link_info *link_info,
5697 const char *name,
5698 char **error_message)
5699 {
5700 char *tmp_name;
5701 struct elf_link_hash_entry *hash;
5702 struct elf32_arm_link_hash_table *hash_table;
5703
5704 /* We need a pointer to the armelf specific hash table. */
5705 hash_table = elf32_arm_hash_table (link_info);
5706 if (hash_table == NULL)
5707 return NULL;
5708
5709 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5710 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
5711
5712 BFD_ASSERT (tmp_name);
5713
5714 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
5715
5716 hash = elf_link_hash_lookup
5717 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5718
5719 if (hash == NULL
5720 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
5721 tmp_name, name) == -1)
5722 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5723
5724 free (tmp_name);
5725
5726 return hash;
5727 }
5728
5729 /* Locate the ARM encoded calling stub for NAME. */
5730
5731 static struct elf_link_hash_entry *
5732 find_arm_glue (struct bfd_link_info *link_info,
5733 const char *name,
5734 char **error_message)
5735 {
5736 char *tmp_name;
5737 struct elf_link_hash_entry *myh;
5738 struct elf32_arm_link_hash_table *hash_table;
5739
5740 /* We need a pointer to the elfarm specific hash table. */
5741 hash_table = elf32_arm_hash_table (link_info);
5742 if (hash_table == NULL)
5743 return NULL;
5744
5745 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5746 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5747
5748 BFD_ASSERT (tmp_name);
5749
5750 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5751
5752 myh = elf_link_hash_lookup
5753 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5754
5755 if (myh == NULL
5756 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
5757 tmp_name, name) == -1)
5758 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5759
5760 free (tmp_name);
5761
5762 return myh;
5763 }
5764
5765 /* ARM->Thumb glue (static images):
5766
5767 .arm
5768 __func_from_arm:
5769 ldr r12, __func_addr
5770 bx r12
5771 __func_addr:
5772 .word func @ behave as if you saw a ARM_32 reloc.
5773
5774 (v5t static images)
5775 .arm
5776 __func_from_arm:
5777 ldr pc, __func_addr
5778 __func_addr:
5779 .word func @ behave as if you saw a ARM_32 reloc.
5780
5781 (relocatable images)
5782 .arm
5783 __func_from_arm:
5784 ldr r12, __func_offset
5785 add r12, r12, pc
5786 bx r12
5787 __func_offset:
5788 .word func - . */
5789
5790 #define ARM2THUMB_STATIC_GLUE_SIZE 12
5791 static const insn32 a2t1_ldr_insn = 0xe59fc000;
5792 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
5793 static const insn32 a2t3_func_addr_insn = 0x00000001;
5794
5795 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
5796 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
5797 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
5798
5799 #define ARM2THUMB_PIC_GLUE_SIZE 16
5800 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
5801 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
5802 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
5803
5804 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
5805
5806 .thumb .thumb
5807 .align 2 .align 2
5808 __func_from_thumb: __func_from_thumb:
5809 bx pc push {r6, lr}
5810 nop ldr r6, __func_addr
5811 .arm mov lr, pc
5812 b func bx r6
5813 .arm
5814 ;; back_to_thumb
5815 ldmia r13! {r6, lr}
5816 bx lr
5817 __func_addr:
5818 .word func */
5819
5820 #define THUMB2ARM_GLUE_SIZE 8
5821 static const insn16 t2a1_bx_pc_insn = 0x4778;
5822 static const insn16 t2a2_noop_insn = 0x46c0;
5823 static const insn32 t2a3_b_insn = 0xea000000;
5824
5825 #define VFP11_ERRATUM_VENEER_SIZE 8
5826 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
5827 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
5828
5829 #define ARM_BX_VENEER_SIZE 12
5830 static const insn32 armbx1_tst_insn = 0xe3100001;
5831 static const insn32 armbx2_moveq_insn = 0x01a0f000;
5832 static const insn32 armbx3_bx_insn = 0xe12fff10;
5833
5834 #ifndef ELFARM_NABI_C_INCLUDED
5835 static void
5836 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
5837 {
5838 asection * s;
5839 bfd_byte * contents;
5840
5841 if (size == 0)
5842 {
5843 /* Do not include empty glue sections in the output. */
5844 if (abfd != NULL)
5845 {
5846 s = bfd_get_linker_section (abfd, name);
5847 if (s != NULL)
5848 s->flags |= SEC_EXCLUDE;
5849 }
5850 return;
5851 }
5852
5853 BFD_ASSERT (abfd != NULL);
5854
5855 s = bfd_get_linker_section (abfd, name);
5856 BFD_ASSERT (s != NULL);
5857
5858 contents = (bfd_byte *) bfd_alloc (abfd, size);
5859
5860 BFD_ASSERT (s->size == size);
5861 s->contents = contents;
5862 }
5863
5864 bfd_boolean
5865 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
5866 {
5867 struct elf32_arm_link_hash_table * globals;
5868
5869 globals = elf32_arm_hash_table (info);
5870 BFD_ASSERT (globals != NULL);
5871
5872 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5873 globals->arm_glue_size,
5874 ARM2THUMB_GLUE_SECTION_NAME);
5875
5876 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5877 globals->thumb_glue_size,
5878 THUMB2ARM_GLUE_SECTION_NAME);
5879
5880 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5881 globals->vfp11_erratum_glue_size,
5882 VFP11_ERRATUM_VENEER_SECTION_NAME);
5883
5884 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5885 globals->stm32l4xx_erratum_glue_size,
5886 STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
5887
5888 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5889 globals->bx_glue_size,
5890 ARM_BX_GLUE_SECTION_NAME);
5891
5892 return TRUE;
5893 }
5894
5895 /* Allocate space and symbols for calling a Thumb function from Arm mode.
5896 returns the symbol identifying the stub. */
5897
5898 static struct elf_link_hash_entry *
5899 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
5900 struct elf_link_hash_entry * h)
5901 {
5902 const char * name = h->root.root.string;
5903 asection * s;
5904 char * tmp_name;
5905 struct elf_link_hash_entry * myh;
5906 struct bfd_link_hash_entry * bh;
5907 struct elf32_arm_link_hash_table * globals;
5908 bfd_vma val;
5909 bfd_size_type size;
5910
5911 globals = elf32_arm_hash_table (link_info);
5912 BFD_ASSERT (globals != NULL);
5913 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5914
5915 s = bfd_get_linker_section
5916 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
5917
5918 BFD_ASSERT (s != NULL);
5919
5920 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5921 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5922
5923 BFD_ASSERT (tmp_name);
5924
5925 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5926
5927 myh = elf_link_hash_lookup
5928 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
5929
5930 if (myh != NULL)
5931 {
5932 /* We've already seen this guy. */
5933 free (tmp_name);
5934 return myh;
5935 }
5936
5937 /* The only trick here is using hash_table->arm_glue_size as the value.
5938 Even though the section isn't allocated yet, this is where we will be
5939 putting it. The +1 on the value marks that the stub has not been
5940 output yet - not that it is a Thumb function. */
5941 bh = NULL;
5942 val = globals->arm_glue_size + 1;
5943 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5944 tmp_name, BSF_GLOBAL, s, val,
5945 NULL, TRUE, FALSE, &bh);
5946
5947 myh = (struct elf_link_hash_entry *) bh;
5948 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5949 myh->forced_local = 1;
5950
5951 free (tmp_name);
5952
5953 if (bfd_link_pic (link_info)
5954 || globals->root.is_relocatable_executable
5955 || globals->pic_veneer)
5956 size = ARM2THUMB_PIC_GLUE_SIZE;
5957 else if (globals->use_blx)
5958 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
5959 else
5960 size = ARM2THUMB_STATIC_GLUE_SIZE;
5961
5962 s->size += size;
5963 globals->arm_glue_size += size;
5964
5965 return myh;
5966 }
5967
5968 /* Allocate space for ARMv4 BX veneers. */
5969
5970 static void
5971 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
5972 {
5973 asection * s;
5974 struct elf32_arm_link_hash_table *globals;
5975 char *tmp_name;
5976 struct elf_link_hash_entry *myh;
5977 struct bfd_link_hash_entry *bh;
5978 bfd_vma val;
5979
5980 /* BX PC does not need a veneer. */
5981 if (reg == 15)
5982 return;
5983
5984 globals = elf32_arm_hash_table (link_info);
5985 BFD_ASSERT (globals != NULL);
5986 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5987
5988 /* Check if this veneer has already been allocated. */
5989 if (globals->bx_glue_offset[reg])
5990 return;
5991
5992 s = bfd_get_linker_section
5993 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
5994
5995 BFD_ASSERT (s != NULL);
5996
5997 /* Add symbol for veneer. */
5998 tmp_name = (char *)
5999 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
6000
6001 BFD_ASSERT (tmp_name);
6002
6003 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
6004
6005 myh = elf_link_hash_lookup
6006 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
6007
6008 BFD_ASSERT (myh == NULL);
6009
6010 bh = NULL;
6011 val = globals->bx_glue_size;
6012 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
6013 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6014 NULL, TRUE, FALSE, &bh);
6015
6016 myh = (struct elf_link_hash_entry *) bh;
6017 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6018 myh->forced_local = 1;
6019
6020 s->size += ARM_BX_VENEER_SIZE;
6021 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
6022 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
6023 }
6024
6025
6026 /* Add an entry to the code/data map for section SEC. */
6027
6028 static void
6029 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
6030 {
6031 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6032 unsigned int newidx;
6033
6034 if (sec_data->map == NULL)
6035 {
6036 sec_data->map = (elf32_arm_section_map *)
6037 bfd_malloc (sizeof (elf32_arm_section_map));
6038 sec_data->mapcount = 0;
6039 sec_data->mapsize = 1;
6040 }
6041
6042 newidx = sec_data->mapcount++;
6043
6044 if (sec_data->mapcount > sec_data->mapsize)
6045 {
6046 sec_data->mapsize *= 2;
6047 sec_data->map = (elf32_arm_section_map *)
6048 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
6049 * sizeof (elf32_arm_section_map));
6050 }
6051
6052 if (sec_data->map)
6053 {
6054 sec_data->map[newidx].vma = vma;
6055 sec_data->map[newidx].type = type;
6056 }
6057 }
6058
6059
6060 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
6061 veneers are handled for now. */
6062
6063 static bfd_vma
6064 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
6065 elf32_vfp11_erratum_list *branch,
6066 bfd *branch_bfd,
6067 asection *branch_sec,
6068 unsigned int offset)
6069 {
6070 asection *s;
6071 struct elf32_arm_link_hash_table *hash_table;
6072 char *tmp_name;
6073 struct elf_link_hash_entry *myh;
6074 struct bfd_link_hash_entry *bh;
6075 bfd_vma val;
6076 struct _arm_elf_section_data *sec_data;
6077 elf32_vfp11_erratum_list *newerr;
6078
6079 hash_table = elf32_arm_hash_table (link_info);
6080 BFD_ASSERT (hash_table != NULL);
6081 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
6082
6083 s = bfd_get_linker_section
6084 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
6085
6086 sec_data = elf32_arm_section_data (s);
6087
6088 BFD_ASSERT (s != NULL);
6089
6090 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6091 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6092
6093 BFD_ASSERT (tmp_name);
6094
6095 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6096 hash_table->num_vfp11_fixes);
6097
6098 myh = elf_link_hash_lookup
6099 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6100
6101 BFD_ASSERT (myh == NULL);
6102
6103 bh = NULL;
6104 val = hash_table->vfp11_erratum_glue_size;
6105 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
6106 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6107 NULL, TRUE, FALSE, &bh);
6108
6109 myh = (struct elf_link_hash_entry *) bh;
6110 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6111 myh->forced_local = 1;
6112
6113 /* Link veneer back to calling location. */
6114 sec_data->erratumcount += 1;
6115 newerr = (elf32_vfp11_erratum_list *)
6116 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6117
6118 newerr->type = VFP11_ERRATUM_ARM_VENEER;
6119 newerr->vma = -1;
6120 newerr->u.v.branch = branch;
6121 newerr->u.v.id = hash_table->num_vfp11_fixes;
6122 branch->u.b.veneer = newerr;
6123
6124 newerr->next = sec_data->erratumlist;
6125 sec_data->erratumlist = newerr;
6126
6127 /* A symbol for the return from the veneer. */
6128 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6129 hash_table->num_vfp11_fixes);
6130
6131 myh = elf_link_hash_lookup
6132 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6133
6134 if (myh != NULL)
6135 abort ();
6136
6137 bh = NULL;
6138 val = offset + 4;
6139 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
6140 branch_sec, val, NULL, TRUE, FALSE, &bh);
6141
6142 myh = (struct elf_link_hash_entry *) bh;
6143 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6144 myh->forced_local = 1;
6145
6146 free (tmp_name);
6147
6148 /* Generate a mapping symbol for the veneer section, and explicitly add an
6149 entry for that symbol to the code/data map for the section. */
6150 if (hash_table->vfp11_erratum_glue_size == 0)
6151 {
6152 bh = NULL;
6153 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
6154 ever requires this erratum fix. */
6155 _bfd_generic_link_add_one_symbol (link_info,
6156 hash_table->bfd_of_glue_owner, "$a",
6157 BSF_LOCAL, s, 0, NULL,
6158 TRUE, FALSE, &bh);
6159
6160 myh = (struct elf_link_hash_entry *) bh;
6161 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6162 myh->forced_local = 1;
6163
6164 /* The elf32_arm_init_maps function only cares about symbols from input
6165 BFDs. We must make a note of this generated mapping symbol
6166 ourselves so that code byteswapping works properly in
6167 elf32_arm_write_section. */
6168 elf32_arm_section_map_add (s, 'a', 0);
6169 }
6170
6171 s->size += VFP11_ERRATUM_VENEER_SIZE;
6172 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
6173 hash_table->num_vfp11_fixes++;
6174
6175 /* The offset of the veneer. */
6176 return val;
6177 }
6178
6179 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
6180 veneers need to be handled because used only in Cortex-M. */
6181
6182 static bfd_vma
6183 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
6184 elf32_stm32l4xx_erratum_list *branch,
6185 bfd *branch_bfd,
6186 asection *branch_sec,
6187 unsigned int offset,
6188 bfd_size_type veneer_size)
6189 {
6190 asection *s;
6191 struct elf32_arm_link_hash_table *hash_table;
6192 char *tmp_name;
6193 struct elf_link_hash_entry *myh;
6194 struct bfd_link_hash_entry *bh;
6195 bfd_vma val;
6196 struct _arm_elf_section_data *sec_data;
6197 elf32_stm32l4xx_erratum_list *newerr;
6198
6199 hash_table = elf32_arm_hash_table (link_info);
6200 BFD_ASSERT (hash_table != NULL);
6201 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
6202
6203 s = bfd_get_linker_section
6204 (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6205
6206 BFD_ASSERT (s != NULL);
6207
6208 sec_data = elf32_arm_section_data (s);
6209
6210 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6211 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
6212
6213 BFD_ASSERT (tmp_name);
6214
6215 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
6216 hash_table->num_stm32l4xx_fixes);
6217
6218 myh = elf_link_hash_lookup
6219 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6220
6221 BFD_ASSERT (myh == NULL);
6222
6223 bh = NULL;
6224 val = hash_table->stm32l4xx_erratum_glue_size;
6225 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
6226 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6227 NULL, TRUE, FALSE, &bh);
6228
6229 myh = (struct elf_link_hash_entry *) bh;
6230 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6231 myh->forced_local = 1;
6232
6233 /* Link veneer back to calling location. */
6234 sec_data->stm32l4xx_erratumcount += 1;
6235 newerr = (elf32_stm32l4xx_erratum_list *)
6236 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
6237
6238 newerr->type = STM32L4XX_ERRATUM_VENEER;
6239 newerr->vma = -1;
6240 newerr->u.v.branch = branch;
6241 newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
6242 branch->u.b.veneer = newerr;
6243
6244 newerr->next = sec_data->stm32l4xx_erratumlist;
6245 sec_data->stm32l4xx_erratumlist = newerr;
6246
6247 /* A symbol for the return from the veneer. */
6248 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
6249 hash_table->num_stm32l4xx_fixes);
6250
6251 myh = elf_link_hash_lookup
6252 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6253
6254 if (myh != NULL)
6255 abort ();
6256
6257 bh = NULL;
6258 val = offset + 4;
6259 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
6260 branch_sec, val, NULL, TRUE, FALSE, &bh);
6261
6262 myh = (struct elf_link_hash_entry *) bh;
6263 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6264 myh->forced_local = 1;
6265
6266 free (tmp_name);
6267
6268 /* Generate a mapping symbol for the veneer section, and explicitly add an
6269 entry for that symbol to the code/data map for the section. */
6270 if (hash_table->stm32l4xx_erratum_glue_size == 0)
6271 {
6272 bh = NULL;
6273 /* Creates a THUMB symbol since there is no other choice. */
6274 _bfd_generic_link_add_one_symbol (link_info,
6275 hash_table->bfd_of_glue_owner, "$t",
6276 BSF_LOCAL, s, 0, NULL,
6277 TRUE, FALSE, &bh);
6278
6279 myh = (struct elf_link_hash_entry *) bh;
6280 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6281 myh->forced_local = 1;
6282
6283 /* The elf32_arm_init_maps function only cares about symbols from input
6284 BFDs. We must make a note of this generated mapping symbol
6285 ourselves so that code byteswapping works properly in
6286 elf32_arm_write_section. */
6287 elf32_arm_section_map_add (s, 't', 0);
6288 }
6289
6290 s->size += veneer_size;
6291 hash_table->stm32l4xx_erratum_glue_size += veneer_size;
6292 hash_table->num_stm32l4xx_fixes++;
6293
6294 /* The offset of the veneer. */
6295 return val;
6296 }
6297
6298 #define ARM_GLUE_SECTION_FLAGS \
6299 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
6300 | SEC_READONLY | SEC_LINKER_CREATED)
6301
6302 /* Create a fake section for use by the ARM backend of the linker. */
6303
6304 static bfd_boolean
6305 arm_make_glue_section (bfd * abfd, const char * name)
6306 {
6307 asection * sec;
6308
6309 sec = bfd_get_linker_section (abfd, name);
6310 if (sec != NULL)
6311 /* Already made. */
6312 return TRUE;
6313
6314 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
6315
6316 if (sec == NULL
6317 || !bfd_set_section_alignment (abfd, sec, 2))
6318 return FALSE;
6319
6320 /* Set the gc mark to prevent the section from being removed by garbage
6321 collection, despite the fact that no relocs refer to this section. */
6322 sec->gc_mark = 1;
6323
6324 return TRUE;
6325 }
6326
6327 /* Set size of .plt entries. This function is called from the
6328 linker scripts in ld/emultempl/{armelf}.em. */
6329
6330 void
6331 bfd_elf32_arm_use_long_plt (void)
6332 {
6333 elf32_arm_use_long_plt_entry = TRUE;
6334 }
6335
6336 /* Add the glue sections to ABFD. This function is called from the
6337 linker scripts in ld/emultempl/{armelf}.em. */
6338
6339 bfd_boolean
6340 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
6341 struct bfd_link_info *info)
6342 {
6343 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
6344 bfd_boolean dostm32l4xx = globals
6345 && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
6346 bfd_boolean addglue;
6347
6348 /* If we are only performing a partial
6349 link do not bother adding the glue. */
6350 if (bfd_link_relocatable (info))
6351 return TRUE;
6352
6353 addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
6354 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
6355 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
6356 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
6357
6358 if (!dostm32l4xx)
6359 return addglue;
6360
6361 return addglue
6362 && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6363 }
6364
6365 /* Select a BFD to be used to hold the sections used by the glue code.
6366 This function is called from the linker scripts in ld/emultempl/
6367 {armelf/pe}.em. */
6368
6369 bfd_boolean
6370 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
6371 {
6372 struct elf32_arm_link_hash_table *globals;
6373
6374 /* If we are only performing a partial link
6375 do not bother getting a bfd to hold the glue. */
6376 if (bfd_link_relocatable (info))
6377 return TRUE;
6378
6379 /* Make sure we don't attach the glue sections to a dynamic object. */
6380 BFD_ASSERT (!(abfd->flags & DYNAMIC));
6381
6382 globals = elf32_arm_hash_table (info);
6383 BFD_ASSERT (globals != NULL);
6384
6385 if (globals->bfd_of_glue_owner != NULL)
6386 return TRUE;
6387
6388 /* Save the bfd for later use. */
6389 globals->bfd_of_glue_owner = abfd;
6390
6391 return TRUE;
6392 }
6393
6394 static void
6395 check_use_blx (struct elf32_arm_link_hash_table *globals)
6396 {
6397 int cpu_arch;
6398
6399 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
6400 Tag_CPU_arch);
6401
6402 if (globals->fix_arm1176)
6403 {
6404 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
6405 globals->use_blx = 1;
6406 }
6407 else
6408 {
6409 if (cpu_arch > TAG_CPU_ARCH_V4T)
6410 globals->use_blx = 1;
6411 }
6412 }
6413
6414 bfd_boolean
6415 bfd_elf32_arm_process_before_allocation (bfd *abfd,
6416 struct bfd_link_info *link_info)
6417 {
6418 Elf_Internal_Shdr *symtab_hdr;
6419 Elf_Internal_Rela *internal_relocs = NULL;
6420 Elf_Internal_Rela *irel, *irelend;
6421 bfd_byte *contents = NULL;
6422
6423 asection *sec;
6424 struct elf32_arm_link_hash_table *globals;
6425
6426 /* If we are only performing a partial link do not bother
6427 to construct any glue. */
6428 if (bfd_link_relocatable (link_info))
6429 return TRUE;
6430
6431 /* Here we have a bfd that is to be included on the link. We have a
6432 hook to do reloc rummaging, before section sizes are nailed down. */
6433 globals = elf32_arm_hash_table (link_info);
6434 BFD_ASSERT (globals != NULL);
6435
6436 check_use_blx (globals);
6437
6438 if (globals->byteswap_code && !bfd_big_endian (abfd))
6439 {
6440 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
6441 abfd);
6442 return FALSE;
6443 }
6444
6445 /* PR 5398: If we have not decided to include any loadable sections in
6446 the output then we will not have a glue owner bfd. This is OK, it
6447 just means that there is nothing else for us to do here. */
6448 if (globals->bfd_of_glue_owner == NULL)
6449 return TRUE;
6450
6451 /* Rummage around all the relocs and map the glue vectors. */
6452 sec = abfd->sections;
6453
6454 if (sec == NULL)
6455 return TRUE;
6456
6457 for (; sec != NULL; sec = sec->next)
6458 {
6459 if (sec->reloc_count == 0)
6460 continue;
6461
6462 if ((sec->flags & SEC_EXCLUDE) != 0)
6463 continue;
6464
6465 symtab_hdr = & elf_symtab_hdr (abfd);
6466
6467 /* Load the relocs. */
6468 internal_relocs
6469 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
6470
6471 if (internal_relocs == NULL)
6472 goto error_return;
6473
6474 irelend = internal_relocs + sec->reloc_count;
6475 for (irel = internal_relocs; irel < irelend; irel++)
6476 {
6477 long r_type;
6478 unsigned long r_index;
6479
6480 struct elf_link_hash_entry *h;
6481
6482 r_type = ELF32_R_TYPE (irel->r_info);
6483 r_index = ELF32_R_SYM (irel->r_info);
6484
6485 /* These are the only relocation types we care about. */
6486 if ( r_type != R_ARM_PC24
6487 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
6488 continue;
6489
6490 /* Get the section contents if we haven't done so already. */
6491 if (contents == NULL)
6492 {
6493 /* Get cached copy if it exists. */
6494 if (elf_section_data (sec)->this_hdr.contents != NULL)
6495 contents = elf_section_data (sec)->this_hdr.contents;
6496 else
6497 {
6498 /* Go get them off disk. */
6499 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6500 goto error_return;
6501 }
6502 }
6503
6504 if (r_type == R_ARM_V4BX)
6505 {
6506 int reg;
6507
6508 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
6509 record_arm_bx_glue (link_info, reg);
6510 continue;
6511 }
6512
6513 /* If the relocation is not against a symbol it cannot concern us. */
6514 h = NULL;
6515
6516 /* We don't care about local symbols. */
6517 if (r_index < symtab_hdr->sh_info)
6518 continue;
6519
6520 /* This is an external symbol. */
6521 r_index -= symtab_hdr->sh_info;
6522 h = (struct elf_link_hash_entry *)
6523 elf_sym_hashes (abfd)[r_index];
6524
6525 /* If the relocation is against a static symbol it must be within
6526 the current section and so cannot be a cross ARM/Thumb relocation. */
6527 if (h == NULL)
6528 continue;
6529
6530 /* If the call will go through a PLT entry then we do not need
6531 glue. */
6532 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
6533 continue;
6534
6535 switch (r_type)
6536 {
6537 case R_ARM_PC24:
6538 /* This one is a call from arm code. We need to look up
6539 the target of the call. If it is a thumb target, we
6540 insert glue. */
6541 if (h->target_internal == ST_BRANCH_TO_THUMB)
6542 record_arm_to_thumb_glue (link_info, h);
6543 break;
6544
6545 default:
6546 abort ();
6547 }
6548 }
6549
6550 if (contents != NULL
6551 && elf_section_data (sec)->this_hdr.contents != contents)
6552 free (contents);
6553 contents = NULL;
6554
6555 if (internal_relocs != NULL
6556 && elf_section_data (sec)->relocs != internal_relocs)
6557 free (internal_relocs);
6558 internal_relocs = NULL;
6559 }
6560
6561 return TRUE;
6562
6563 error_return:
6564 if (contents != NULL
6565 && elf_section_data (sec)->this_hdr.contents != contents)
6566 free (contents);
6567 if (internal_relocs != NULL
6568 && elf_section_data (sec)->relocs != internal_relocs)
6569 free (internal_relocs);
6570
6571 return FALSE;
6572 }
6573 #endif
6574
6575
6576 /* Initialise maps of ARM/Thumb/data for input BFDs. */
6577
6578 void
6579 bfd_elf32_arm_init_maps (bfd *abfd)
6580 {
6581 Elf_Internal_Sym *isymbuf;
6582 Elf_Internal_Shdr *hdr;
6583 unsigned int i, localsyms;
6584
6585 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
6586 if (! is_arm_elf (abfd))
6587 return;
6588
6589 if ((abfd->flags & DYNAMIC) != 0)
6590 return;
6591
6592 hdr = & elf_symtab_hdr (abfd);
6593 localsyms = hdr->sh_info;
6594
6595 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
6596 should contain the number of local symbols, which should come before any
6597 global symbols. Mapping symbols are always local. */
6598 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
6599 NULL);
6600
6601 /* No internal symbols read? Skip this BFD. */
6602 if (isymbuf == NULL)
6603 return;
6604
6605 for (i = 0; i < localsyms; i++)
6606 {
6607 Elf_Internal_Sym *isym = &isymbuf[i];
6608 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
6609 const char *name;
6610
6611 if (sec != NULL
6612 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
6613 {
6614 name = bfd_elf_string_from_elf_section (abfd,
6615 hdr->sh_link, isym->st_name);
6616
6617 if (bfd_is_arm_special_symbol_name (name,
6618 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
6619 elf32_arm_section_map_add (sec, name[1], isym->st_value);
6620 }
6621 }
6622 }
6623
6624
6625 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
6626 say what they wanted. */
6627
6628 void
6629 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
6630 {
6631 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6632 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6633
6634 if (globals == NULL)
6635 return;
6636
6637 if (globals->fix_cortex_a8 == -1)
6638 {
6639 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
6640 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
6641 && (out_attr[Tag_CPU_arch_profile].i == 'A'
6642 || out_attr[Tag_CPU_arch_profile].i == 0))
6643 globals->fix_cortex_a8 = 1;
6644 else
6645 globals->fix_cortex_a8 = 0;
6646 }
6647 }
6648
6649
6650 void
6651 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
6652 {
6653 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6654 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6655
6656 if (globals == NULL)
6657 return;
6658 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
6659 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
6660 {
6661 switch (globals->vfp11_fix)
6662 {
6663 case BFD_ARM_VFP11_FIX_DEFAULT:
6664 case BFD_ARM_VFP11_FIX_NONE:
6665 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6666 break;
6667
6668 default:
6669 /* Give a warning, but do as the user requests anyway. */
6670 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
6671 "workaround is not necessary for target architecture"), obfd);
6672 }
6673 }
6674 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
6675 /* For earlier architectures, we might need the workaround, but do not
6676 enable it by default. If users is running with broken hardware, they
6677 must enable the erratum fix explicitly. */
6678 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6679 }
6680
6681 void
6682 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
6683 {
6684 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6685 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6686
6687 if (globals == NULL)
6688 return;
6689
6690 /* We assume only Cortex-M4 may require the fix. */
6691 if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
6692 || out_attr[Tag_CPU_arch_profile].i != 'M')
6693 {
6694 if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
6695 /* Give a warning, but do as the user requests anyway. */
6696 (*_bfd_error_handler)
6697 (_("%B: warning: selected STM32L4XX erratum "
6698 "workaround is not necessary for target architecture"), obfd);
6699 }
6700 }
6701
6702 enum bfd_arm_vfp11_pipe
6703 {
6704 VFP11_FMAC,
6705 VFP11_LS,
6706 VFP11_DS,
6707 VFP11_BAD
6708 };
6709
6710 /* Return a VFP register number. This is encoded as RX:X for single-precision
6711 registers, or X:RX for double-precision registers, where RX is the group of
6712 four bits in the instruction encoding and X is the single extension bit.
6713 RX and X fields are specified using their lowest (starting) bit. The return
6714 value is:
6715
6716 0...31: single-precision registers s0...s31
6717 32...63: double-precision registers d0...d31.
6718
6719 Although X should be zero for VFP11 (encoding d0...d15 only), we might
6720 encounter VFP3 instructions, so we allow the full range for DP registers. */
6721
6722 static unsigned int
6723 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
6724 unsigned int x)
6725 {
6726 if (is_double)
6727 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
6728 else
6729 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
6730 }
6731
6732 /* Set bits in *WMASK according to a register number REG as encoded by
6733 bfd_arm_vfp11_regno(). Ignore d16-d31. */
6734
6735 static void
6736 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
6737 {
6738 if (reg < 32)
6739 *wmask |= 1 << reg;
6740 else if (reg < 48)
6741 *wmask |= 3 << ((reg - 32) * 2);
6742 }
6743
6744 /* Return TRUE if WMASK overwrites anything in REGS. */
6745
6746 static bfd_boolean
6747 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
6748 {
6749 int i;
6750
6751 for (i = 0; i < numregs; i++)
6752 {
6753 unsigned int reg = regs[i];
6754
6755 if (reg < 32 && (wmask & (1 << reg)) != 0)
6756 return TRUE;
6757
6758 reg -= 32;
6759
6760 if (reg >= 16)
6761 continue;
6762
6763 if ((wmask & (3 << (reg * 2))) != 0)
6764 return TRUE;
6765 }
6766
6767 return FALSE;
6768 }
6769
6770 /* In this function, we're interested in two things: finding input registers
6771 for VFP data-processing instructions, and finding the set of registers which
6772 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
6773 hold the written set, so FLDM etc. are easy to deal with (we're only
6774 interested in 32 SP registers or 16 dp registers, due to the VFP version
6775 implemented by the chip in question). DP registers are marked by setting
6776 both SP registers in the write mask). */
6777
6778 static enum bfd_arm_vfp11_pipe
6779 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
6780 int *numregs)
6781 {
6782 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
6783 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
6784
6785 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
6786 {
6787 unsigned int pqrs;
6788 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
6789 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
6790
6791 pqrs = ((insn & 0x00800000) >> 20)
6792 | ((insn & 0x00300000) >> 19)
6793 | ((insn & 0x00000040) >> 6);
6794
6795 switch (pqrs)
6796 {
6797 case 0: /* fmac[sd]. */
6798 case 1: /* fnmac[sd]. */
6799 case 2: /* fmsc[sd]. */
6800 case 3: /* fnmsc[sd]. */
6801 vpipe = VFP11_FMAC;
6802 bfd_arm_vfp11_write_mask (destmask, fd);
6803 regs[0] = fd;
6804 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
6805 regs[2] = fm;
6806 *numregs = 3;
6807 break;
6808
6809 case 4: /* fmul[sd]. */
6810 case 5: /* fnmul[sd]. */
6811 case 6: /* fadd[sd]. */
6812 case 7: /* fsub[sd]. */
6813 vpipe = VFP11_FMAC;
6814 goto vfp_binop;
6815
6816 case 8: /* fdiv[sd]. */
6817 vpipe = VFP11_DS;
6818 vfp_binop:
6819 bfd_arm_vfp11_write_mask (destmask, fd);
6820 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
6821 regs[1] = fm;
6822 *numregs = 2;
6823 break;
6824
6825 case 15: /* extended opcode. */
6826 {
6827 unsigned int extn = ((insn >> 15) & 0x1e)
6828 | ((insn >> 7) & 1);
6829
6830 switch (extn)
6831 {
6832 case 0: /* fcpy[sd]. */
6833 case 1: /* fabs[sd]. */
6834 case 2: /* fneg[sd]. */
6835 case 8: /* fcmp[sd]. */
6836 case 9: /* fcmpe[sd]. */
6837 case 10: /* fcmpz[sd]. */
6838 case 11: /* fcmpez[sd]. */
6839 case 16: /* fuito[sd]. */
6840 case 17: /* fsito[sd]. */
6841 case 24: /* ftoui[sd]. */
6842 case 25: /* ftouiz[sd]. */
6843 case 26: /* ftosi[sd]. */
6844 case 27: /* ftosiz[sd]. */
6845 /* These instructions will not bounce due to underflow. */
6846 *numregs = 0;
6847 vpipe = VFP11_FMAC;
6848 break;
6849
6850 case 3: /* fsqrt[sd]. */
6851 /* fsqrt cannot underflow, but it can (perhaps) overwrite
6852 registers to cause the erratum in previous instructions. */
6853 bfd_arm_vfp11_write_mask (destmask, fd);
6854 vpipe = VFP11_DS;
6855 break;
6856
6857 case 15: /* fcvt{ds,sd}. */
6858 {
6859 int rnum = 0;
6860
6861 bfd_arm_vfp11_write_mask (destmask, fd);
6862
6863 /* Only FCVTSD can underflow. */
6864 if ((insn & 0x100) != 0)
6865 regs[rnum++] = fm;
6866
6867 *numregs = rnum;
6868
6869 vpipe = VFP11_FMAC;
6870 }
6871 break;
6872
6873 default:
6874 return VFP11_BAD;
6875 }
6876 }
6877 break;
6878
6879 default:
6880 return VFP11_BAD;
6881 }
6882 }
6883 /* Two-register transfer. */
6884 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
6885 {
6886 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
6887
6888 if ((insn & 0x100000) == 0)
6889 {
6890 if (is_double)
6891 bfd_arm_vfp11_write_mask (destmask, fm);
6892 else
6893 {
6894 bfd_arm_vfp11_write_mask (destmask, fm);
6895 bfd_arm_vfp11_write_mask (destmask, fm + 1);
6896 }
6897 }
6898
6899 vpipe = VFP11_LS;
6900 }
6901 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
6902 {
6903 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
6904 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
6905
6906 switch (puw)
6907 {
6908 case 0: /* Two-reg transfer. We should catch these above. */
6909 abort ();
6910
6911 case 2: /* fldm[sdx]. */
6912 case 3:
6913 case 5:
6914 {
6915 unsigned int i, offset = insn & 0xff;
6916
6917 if (is_double)
6918 offset >>= 1;
6919
6920 for (i = fd; i < fd + offset; i++)
6921 bfd_arm_vfp11_write_mask (destmask, i);
6922 }
6923 break;
6924
6925 case 4: /* fld[sd]. */
6926 case 6:
6927 bfd_arm_vfp11_write_mask (destmask, fd);
6928 break;
6929
6930 default:
6931 return VFP11_BAD;
6932 }
6933
6934 vpipe = VFP11_LS;
6935 }
6936 /* Single-register transfer. Note L==0. */
6937 else if ((insn & 0x0f100e10) == 0x0e000a10)
6938 {
6939 unsigned int opcode = (insn >> 21) & 7;
6940 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
6941
6942 switch (opcode)
6943 {
6944 case 0: /* fmsr/fmdlr. */
6945 case 1: /* fmdhr. */
6946 /* Mark fmdhr and fmdlr as writing to the whole of the DP
6947 destination register. I don't know if this is exactly right,
6948 but it is the conservative choice. */
6949 bfd_arm_vfp11_write_mask (destmask, fn);
6950 break;
6951
6952 case 7: /* fmxr. */
6953 break;
6954 }
6955
6956 vpipe = VFP11_LS;
6957 }
6958
6959 return vpipe;
6960 }
6961
6962
6963 static int elf32_arm_compare_mapping (const void * a, const void * b);
6964
6965
6966 /* Look for potentially-troublesome code sequences which might trigger the
6967 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
6968 (available from ARM) for details of the erratum. A short version is
6969 described in ld.texinfo. */
6970
6971 bfd_boolean
6972 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
6973 {
6974 asection *sec;
6975 bfd_byte *contents = NULL;
6976 int state = 0;
6977 int regs[3], numregs = 0;
6978 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6979 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
6980
6981 if (globals == NULL)
6982 return FALSE;
6983
6984 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
6985 The states transition as follows:
6986
6987 0 -> 1 (vector) or 0 -> 2 (scalar)
6988 A VFP FMAC-pipeline instruction has been seen. Fill
6989 regs[0]..regs[numregs-1] with its input operands. Remember this
6990 instruction in 'first_fmac'.
6991
6992 1 -> 2
6993 Any instruction, except for a VFP instruction which overwrites
6994 regs[*].
6995
6996 1 -> 3 [ -> 0 ] or
6997 2 -> 3 [ -> 0 ]
6998 A VFP instruction has been seen which overwrites any of regs[*].
6999 We must make a veneer! Reset state to 0 before examining next
7000 instruction.
7001
7002 2 -> 0
7003 If we fail to match anything in state 2, reset to state 0 and reset
7004 the instruction pointer to the instruction after 'first_fmac'.
7005
7006 If the VFP11 vector mode is in use, there must be at least two unrelated
7007 instructions between anti-dependent VFP11 instructions to properly avoid
7008 triggering the erratum, hence the use of the extra state 1. */
7009
7010 /* If we are only performing a partial link do not bother
7011 to construct any glue. */
7012 if (bfd_link_relocatable (link_info))
7013 return TRUE;
7014
7015 /* Skip if this bfd does not correspond to an ELF image. */
7016 if (! is_arm_elf (abfd))
7017 return TRUE;
7018
7019 /* We should have chosen a fix type by the time we get here. */
7020 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
7021
7022 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
7023 return TRUE;
7024
7025 /* Skip this BFD if it corresponds to an executable or dynamic object. */
7026 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
7027 return TRUE;
7028
7029 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7030 {
7031 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
7032 struct _arm_elf_section_data *sec_data;
7033
7034 /* If we don't have executable progbits, we're not interested in this
7035 section. Also skip if section is to be excluded. */
7036 if (elf_section_type (sec) != SHT_PROGBITS
7037 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
7038 || (sec->flags & SEC_EXCLUDE) != 0
7039 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
7040 || sec->output_section == bfd_abs_section_ptr
7041 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
7042 continue;
7043
7044 sec_data = elf32_arm_section_data (sec);
7045
7046 if (sec_data->mapcount == 0)
7047 continue;
7048
7049 if (elf_section_data (sec)->this_hdr.contents != NULL)
7050 contents = elf_section_data (sec)->this_hdr.contents;
7051 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7052 goto error_return;
7053
7054 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
7055 elf32_arm_compare_mapping);
7056
7057 for (span = 0; span < sec_data->mapcount; span++)
7058 {
7059 unsigned int span_start = sec_data->map[span].vma;
7060 unsigned int span_end = (span == sec_data->mapcount - 1)
7061 ? sec->size : sec_data->map[span + 1].vma;
7062 char span_type = sec_data->map[span].type;
7063
7064 /* FIXME: Only ARM mode is supported at present. We may need to
7065 support Thumb-2 mode also at some point. */
7066 if (span_type != 'a')
7067 continue;
7068
7069 for (i = span_start; i < span_end;)
7070 {
7071 unsigned int next_i = i + 4;
7072 unsigned int insn = bfd_big_endian (abfd)
7073 ? (contents[i] << 24)
7074 | (contents[i + 1] << 16)
7075 | (contents[i + 2] << 8)
7076 | contents[i + 3]
7077 : (contents[i + 3] << 24)
7078 | (contents[i + 2] << 16)
7079 | (contents[i + 1] << 8)
7080 | contents[i];
7081 unsigned int writemask = 0;
7082 enum bfd_arm_vfp11_pipe vpipe;
7083
7084 switch (state)
7085 {
7086 case 0:
7087 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
7088 &numregs);
7089 /* I'm assuming the VFP11 erratum can trigger with denorm
7090 operands on either the FMAC or the DS pipeline. This might
7091 lead to slightly overenthusiastic veneer insertion. */
7092 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
7093 {
7094 state = use_vector ? 1 : 2;
7095 first_fmac = i;
7096 veneer_of_insn = insn;
7097 }
7098 break;
7099
7100 case 1:
7101 {
7102 int other_regs[3], other_numregs;
7103 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
7104 other_regs,
7105 &other_numregs);
7106 if (vpipe != VFP11_BAD
7107 && bfd_arm_vfp11_antidependency (writemask, regs,
7108 numregs))
7109 state = 3;
7110 else
7111 state = 2;
7112 }
7113 break;
7114
7115 case 2:
7116 {
7117 int other_regs[3], other_numregs;
7118 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
7119 other_regs,
7120 &other_numregs);
7121 if (vpipe != VFP11_BAD
7122 && bfd_arm_vfp11_antidependency (writemask, regs,
7123 numregs))
7124 state = 3;
7125 else
7126 {
7127 state = 0;
7128 next_i = first_fmac + 4;
7129 }
7130 }
7131 break;
7132
7133 case 3:
7134 abort (); /* Should be unreachable. */
7135 }
7136
7137 if (state == 3)
7138 {
7139 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
7140 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7141
7142 elf32_arm_section_data (sec)->erratumcount += 1;
7143
7144 newerr->u.b.vfp_insn = veneer_of_insn;
7145
7146 switch (span_type)
7147 {
7148 case 'a':
7149 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
7150 break;
7151
7152 default:
7153 abort ();
7154 }
7155
7156 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
7157 first_fmac);
7158
7159 newerr->vma = -1;
7160
7161 newerr->next = sec_data->erratumlist;
7162 sec_data->erratumlist = newerr;
7163
7164 state = 0;
7165 }
7166
7167 i = next_i;
7168 }
7169 }
7170
7171 if (contents != NULL
7172 && elf_section_data (sec)->this_hdr.contents != contents)
7173 free (contents);
7174 contents = NULL;
7175 }
7176
7177 return TRUE;
7178
7179 error_return:
7180 if (contents != NULL
7181 && elf_section_data (sec)->this_hdr.contents != contents)
7182 free (contents);
7183
7184 return FALSE;
7185 }
7186
7187 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
7188 after sections have been laid out, using specially-named symbols. */
7189
7190 void
7191 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
7192 struct bfd_link_info *link_info)
7193 {
7194 asection *sec;
7195 struct elf32_arm_link_hash_table *globals;
7196 char *tmp_name;
7197
7198 if (bfd_link_relocatable (link_info))
7199 return;
7200
7201 /* Skip if this bfd does not correspond to an ELF image. */
7202 if (! is_arm_elf (abfd))
7203 return;
7204
7205 globals = elf32_arm_hash_table (link_info);
7206 if (globals == NULL)
7207 return;
7208
7209 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7210 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7211
7212 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7213 {
7214 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7215 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
7216
7217 for (; errnode != NULL; errnode = errnode->next)
7218 {
7219 struct elf_link_hash_entry *myh;
7220 bfd_vma vma;
7221
7222 switch (errnode->type)
7223 {
7224 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
7225 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
7226 /* Find veneer symbol. */
7227 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7228 errnode->u.b.veneer->u.v.id);
7229
7230 myh = elf_link_hash_lookup
7231 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7232
7233 if (myh == NULL)
7234 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
7235 "`%s'"), abfd, tmp_name);
7236
7237 vma = myh->root.u.def.section->output_section->vma
7238 + myh->root.u.def.section->output_offset
7239 + myh->root.u.def.value;
7240
7241 errnode->u.b.veneer->vma = vma;
7242 break;
7243
7244 case VFP11_ERRATUM_ARM_VENEER:
7245 case VFP11_ERRATUM_THUMB_VENEER:
7246 /* Find return location. */
7247 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7248 errnode->u.v.id);
7249
7250 myh = elf_link_hash_lookup
7251 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7252
7253 if (myh == NULL)
7254 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
7255 "`%s'"), abfd, tmp_name);
7256
7257 vma = myh->root.u.def.section->output_section->vma
7258 + myh->root.u.def.section->output_offset
7259 + myh->root.u.def.value;
7260
7261 errnode->u.v.branch->vma = vma;
7262 break;
7263
7264 default:
7265 abort ();
7266 }
7267 }
7268 }
7269
7270 free (tmp_name);
7271 }
7272
7273 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
7274 return locations after sections have been laid out, using
7275 specially-named symbols. */
7276
7277 void
7278 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
7279 struct bfd_link_info *link_info)
7280 {
7281 asection *sec;
7282 struct elf32_arm_link_hash_table *globals;
7283 char *tmp_name;
7284
7285 if (bfd_link_relocatable (link_info))
7286 return;
7287
7288 /* Skip if this bfd does not correspond to an ELF image. */
7289 if (! is_arm_elf (abfd))
7290 return;
7291
7292 globals = elf32_arm_hash_table (link_info);
7293 if (globals == NULL)
7294 return;
7295
7296 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7297 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7298
7299 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7300 {
7301 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7302 elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
7303
7304 for (; errnode != NULL; errnode = errnode->next)
7305 {
7306 struct elf_link_hash_entry *myh;
7307 bfd_vma vma;
7308
7309 switch (errnode->type)
7310 {
7311 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
7312 /* Find veneer symbol. */
7313 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7314 errnode->u.b.veneer->u.v.id);
7315
7316 myh = elf_link_hash_lookup
7317 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7318
7319 if (myh == NULL)
7320 (*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer "
7321 "`%s'"), abfd, tmp_name);
7322
7323 vma = myh->root.u.def.section->output_section->vma
7324 + myh->root.u.def.section->output_offset
7325 + myh->root.u.def.value;
7326
7327 errnode->u.b.veneer->vma = vma;
7328 break;
7329
7330 case STM32L4XX_ERRATUM_VENEER:
7331 /* Find return location. */
7332 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7333 errnode->u.v.id);
7334
7335 myh = elf_link_hash_lookup
7336 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7337
7338 if (myh == NULL)
7339 (*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer "
7340 "`%s'"), abfd, tmp_name);
7341
7342 vma = myh->root.u.def.section->output_section->vma
7343 + myh->root.u.def.section->output_offset
7344 + myh->root.u.def.value;
7345
7346 errnode->u.v.branch->vma = vma;
7347 break;
7348
7349 default:
7350 abort ();
7351 }
7352 }
7353 }
7354
7355 free (tmp_name);
7356 }
7357
7358 static inline bfd_boolean
7359 is_thumb2_ldmia (const insn32 insn)
7360 {
7361 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
7362 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
7363 return (insn & 0xffd02000) == 0xe8900000;
7364 }
7365
7366 static inline bfd_boolean
7367 is_thumb2_ldmdb (const insn32 insn)
7368 {
7369 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
7370 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
7371 return (insn & 0xffd02000) == 0xe9100000;
7372 }
7373
7374 static inline bfd_boolean
7375 is_thumb2_vldm (const insn32 insn)
7376 {
7377 /* A6.5 Extension register load or store instruction
7378 A7.7.229
7379 We look only for the 32-bit registers case since the DP (64-bit
7380 registers) are not supported for STM32L4XX
7381 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
7382 <list> is consecutive 32-bit registers
7383 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
7384 if P==0 && U==1 && W==1 && Rn=1101 VPOP
7385 if PUW=010 || PUW=011 || PUW=101 VLDM. */
7386 return
7387 ((insn & 0xfe100f00) == 0xec100a00)
7388 && /* (IA without !). */
7389 (((((insn << 7) >> 28) & 0xd) == 0x4)
7390 /* (IA with !), includes VPOP (when reg number is SP). */
7391 || ((((insn << 7) >> 28) & 0xd) == 0x5)
7392 /* (DB with !). */
7393 || ((((insn << 7) >> 28) & 0xd) == 0x9));
7394 }
7395
7396 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
7397 VLDM opcode and:
7398 - computes the number and the mode of memory accesses
7399 - decides if the replacement should be done:
7400 . replaces only if > 8-word accesses
7401 . or (testing purposes only) replaces all accesses. */
7402
7403 static bfd_boolean
7404 stm32l4xx_need_create_replacing_stub (const insn32 insn,
7405 bfd_arm_stm32l4xx_fix stm32l4xx_fix)
7406 {
7407 int nb_regs = 0;
7408
7409 /* The field encoding the register list is the same for both LDMIA
7410 and LDMDB encodings. */
7411 if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
7412 nb_regs = popcount (insn & 0x0000ffff);
7413 else if (is_thumb2_vldm (insn))
7414 nb_regs = (insn & 0xff);
7415
7416 /* DEFAULT mode accounts for the real bug condition situation,
7417 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
7418 return
7419 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT) ? nb_regs > 8 :
7420 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL) ? TRUE : FALSE;
7421 }
7422
7423 /* Look for potentially-troublesome code sequences which might trigger
7424 the STM STM32L4XX erratum. */
7425
7426 bfd_boolean
7427 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
7428 struct bfd_link_info *link_info)
7429 {
7430 asection *sec;
7431 bfd_byte *contents = NULL;
7432 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7433
7434 if (globals == NULL)
7435 return FALSE;
7436
7437 /* If we are only performing a partial link do not bother
7438 to construct any glue. */
7439 if (bfd_link_relocatable (link_info))
7440 return TRUE;
7441
7442 /* Skip if this bfd does not correspond to an ELF image. */
7443 if (! is_arm_elf (abfd))
7444 return TRUE;
7445
7446 if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
7447 return TRUE;
7448
7449 /* Skip this BFD if it corresponds to an executable or dynamic object. */
7450 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
7451 return TRUE;
7452
7453 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7454 {
7455 unsigned int i, span;
7456 struct _arm_elf_section_data *sec_data;
7457
7458 /* If we don't have executable progbits, we're not interested in this
7459 section. Also skip if section is to be excluded. */
7460 if (elf_section_type (sec) != SHT_PROGBITS
7461 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
7462 || (sec->flags & SEC_EXCLUDE) != 0
7463 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
7464 || sec->output_section == bfd_abs_section_ptr
7465 || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
7466 continue;
7467
7468 sec_data = elf32_arm_section_data (sec);
7469
7470 if (sec_data->mapcount == 0)
7471 continue;
7472
7473 if (elf_section_data (sec)->this_hdr.contents != NULL)
7474 contents = elf_section_data (sec)->this_hdr.contents;
7475 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7476 goto error_return;
7477
7478 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
7479 elf32_arm_compare_mapping);
7480
7481 for (span = 0; span < sec_data->mapcount; span++)
7482 {
7483 unsigned int span_start = sec_data->map[span].vma;
7484 unsigned int span_end = (span == sec_data->mapcount - 1)
7485 ? sec->size : sec_data->map[span + 1].vma;
7486 char span_type = sec_data->map[span].type;
7487 int itblock_current_pos = 0;
7488
7489 /* Only Thumb2 mode need be supported with this CM4 specific
7490 code, we should not encounter any arm mode eg span_type
7491 != 'a'. */
7492 if (span_type != 't')
7493 continue;
7494
7495 for (i = span_start; i < span_end;)
7496 {
7497 unsigned int insn = bfd_get_16 (abfd, &contents[i]);
7498 bfd_boolean insn_32bit = FALSE;
7499 bfd_boolean is_ldm = FALSE;
7500 bfd_boolean is_vldm = FALSE;
7501 bfd_boolean is_not_last_in_it_block = FALSE;
7502
7503 /* The first 16-bits of all 32-bit thumb2 instructions start
7504 with opcode[15..13]=0b111 and the encoded op1 can be anything
7505 except opcode[12..11]!=0b00.
7506 See 32-bit Thumb instruction encoding. */
7507 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
7508 insn_32bit = TRUE;
7509
7510 /* Compute the predicate that tells if the instruction
7511 is concerned by the IT block
7512 - Creates an error if there is a ldm that is not
7513 last in the IT block thus cannot be replaced
7514 - Otherwise we can create a branch at the end of the
7515 IT block, it will be controlled naturally by IT
7516 with the proper pseudo-predicate
7517 - So the only interesting predicate is the one that
7518 tells that we are not on the last item of an IT
7519 block. */
7520 if (itblock_current_pos != 0)
7521 is_not_last_in_it_block = !!--itblock_current_pos;
7522
7523 if (insn_32bit)
7524 {
7525 /* Load the rest of the insn (in manual-friendly order). */
7526 insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
7527 is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
7528 is_vldm = is_thumb2_vldm (insn);
7529
7530 /* Veneers are created for (v)ldm depending on
7531 option flags and memory accesses conditions; but
7532 if the instruction is not the last instruction of
7533 an IT block, we cannot create a jump there, so we
7534 bail out. */
7535 if ((is_ldm || is_vldm) &&
7536 stm32l4xx_need_create_replacing_stub
7537 (insn, globals->stm32l4xx_fix))
7538 {
7539 if (is_not_last_in_it_block)
7540 {
7541 (*_bfd_error_handler)
7542 /* Note - overlong line used here to allow for translation. */
7543 (_("\
7544 %B(%A+0x%lx): error: multiple load detected in non-last IT block instruction : STM32L4XX veneer cannot be generated.\n"
7545 "Use gcc option -mrestrict-it to generate only one instruction per IT block.\n"),
7546 abfd, sec, (long)i);
7547 }
7548 else
7549 {
7550 elf32_stm32l4xx_erratum_list *newerr =
7551 (elf32_stm32l4xx_erratum_list *)
7552 bfd_zmalloc
7553 (sizeof (elf32_stm32l4xx_erratum_list));
7554
7555 elf32_arm_section_data (sec)
7556 ->stm32l4xx_erratumcount += 1;
7557 newerr->u.b.insn = insn;
7558 /* We create only thumb branches. */
7559 newerr->type =
7560 STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
7561 record_stm32l4xx_erratum_veneer
7562 (link_info, newerr, abfd, sec,
7563 i,
7564 is_ldm ?
7565 STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
7566 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
7567 newerr->vma = -1;
7568 newerr->next = sec_data->stm32l4xx_erratumlist;
7569 sec_data->stm32l4xx_erratumlist = newerr;
7570 }
7571 }
7572 }
7573 else
7574 {
7575 /* A7.7.37 IT p208
7576 IT blocks are only encoded in T1
7577 Encoding T1: IT{x{y{z}}} <firstcond>
7578 1 0 1 1 - 1 1 1 1 - firstcond - mask
7579 if mask = '0000' then see 'related encodings'
7580 We don't deal with UNPREDICTABLE, just ignore these.
7581 There can be no nested IT blocks so an IT block
7582 is naturally a new one for which it is worth
7583 computing its size. */
7584 bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00) &&
7585 ((insn & 0x000f) != 0x0000);
7586 /* If we have a new IT block we compute its size. */
7587 if (is_newitblock)
7588 {
7589 /* Compute the number of instructions controlled
7590 by the IT block, it will be used to decide
7591 whether we are inside an IT block or not. */
7592 unsigned int mask = insn & 0x000f;
7593 itblock_current_pos = 4 - ctz (mask);
7594 }
7595 }
7596
7597 i += insn_32bit ? 4 : 2;
7598 }
7599 }
7600
7601 if (contents != NULL
7602 && elf_section_data (sec)->this_hdr.contents != contents)
7603 free (contents);
7604 contents = NULL;
7605 }
7606
7607 return TRUE;
7608
7609 error_return:
7610 if (contents != NULL
7611 && elf_section_data (sec)->this_hdr.contents != contents)
7612 free (contents);
7613
7614 return FALSE;
7615 }
7616
7617 /* Set target relocation values needed during linking. */
7618
7619 void
7620 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
7621 struct bfd_link_info *link_info,
7622 int target1_is_rel,
7623 char * target2_type,
7624 int fix_v4bx,
7625 int use_blx,
7626 bfd_arm_vfp11_fix vfp11_fix,
7627 bfd_arm_stm32l4xx_fix stm32l4xx_fix,
7628 int no_enum_warn, int no_wchar_warn,
7629 int pic_veneer, int fix_cortex_a8,
7630 int fix_arm1176)
7631 {
7632 struct elf32_arm_link_hash_table *globals;
7633
7634 globals = elf32_arm_hash_table (link_info);
7635 if (globals == NULL)
7636 return;
7637
7638 globals->target1_is_rel = target1_is_rel;
7639 if (strcmp (target2_type, "rel") == 0)
7640 globals->target2_reloc = R_ARM_REL32;
7641 else if (strcmp (target2_type, "abs") == 0)
7642 globals->target2_reloc = R_ARM_ABS32;
7643 else if (strcmp (target2_type, "got-rel") == 0)
7644 globals->target2_reloc = R_ARM_GOT_PREL;
7645 else
7646 {
7647 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
7648 target2_type);
7649 }
7650 globals->fix_v4bx = fix_v4bx;
7651 globals->use_blx |= use_blx;
7652 globals->vfp11_fix = vfp11_fix;
7653 globals->stm32l4xx_fix = stm32l4xx_fix;
7654 globals->pic_veneer = pic_veneer;
7655 globals->fix_cortex_a8 = fix_cortex_a8;
7656 globals->fix_arm1176 = fix_arm1176;
7657
7658 BFD_ASSERT (is_arm_elf (output_bfd));
7659 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
7660 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
7661 }
7662
7663 /* Replace the target offset of a Thumb bl or b.w instruction. */
7664
7665 static void
7666 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
7667 {
7668 bfd_vma upper;
7669 bfd_vma lower;
7670 int reloc_sign;
7671
7672 BFD_ASSERT ((offset & 1) == 0);
7673
7674 upper = bfd_get_16 (abfd, insn);
7675 lower = bfd_get_16 (abfd, insn + 2);
7676 reloc_sign = (offset < 0) ? 1 : 0;
7677 upper = (upper & ~(bfd_vma) 0x7ff)
7678 | ((offset >> 12) & 0x3ff)
7679 | (reloc_sign << 10);
7680 lower = (lower & ~(bfd_vma) 0x2fff)
7681 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
7682 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
7683 | ((offset >> 1) & 0x7ff);
7684 bfd_put_16 (abfd, upper, insn);
7685 bfd_put_16 (abfd, lower, insn + 2);
7686 }
7687
7688 /* Thumb code calling an ARM function. */
7689
7690 static int
7691 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
7692 const char * name,
7693 bfd * input_bfd,
7694 bfd * output_bfd,
7695 asection * input_section,
7696 bfd_byte * hit_data,
7697 asection * sym_sec,
7698 bfd_vma offset,
7699 bfd_signed_vma addend,
7700 bfd_vma val,
7701 char **error_message)
7702 {
7703 asection * s = 0;
7704 bfd_vma my_offset;
7705 long int ret_offset;
7706 struct elf_link_hash_entry * myh;
7707 struct elf32_arm_link_hash_table * globals;
7708
7709 myh = find_thumb_glue (info, name, error_message);
7710 if (myh == NULL)
7711 return FALSE;
7712
7713 globals = elf32_arm_hash_table (info);
7714 BFD_ASSERT (globals != NULL);
7715 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7716
7717 my_offset = myh->root.u.def.value;
7718
7719 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7720 THUMB2ARM_GLUE_SECTION_NAME);
7721
7722 BFD_ASSERT (s != NULL);
7723 BFD_ASSERT (s->contents != NULL);
7724 BFD_ASSERT (s->output_section != NULL);
7725
7726 if ((my_offset & 0x01) == 0x01)
7727 {
7728 if (sym_sec != NULL
7729 && sym_sec->owner != NULL
7730 && !INTERWORK_FLAG (sym_sec->owner))
7731 {
7732 (*_bfd_error_handler)
7733 (_("%B(%s): warning: interworking not enabled.\n"
7734 " first occurrence: %B: Thumb call to ARM"),
7735 sym_sec->owner, input_bfd, name);
7736
7737 return FALSE;
7738 }
7739
7740 --my_offset;
7741 myh->root.u.def.value = my_offset;
7742
7743 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
7744 s->contents + my_offset);
7745
7746 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
7747 s->contents + my_offset + 2);
7748
7749 ret_offset =
7750 /* Address of destination of the stub. */
7751 ((bfd_signed_vma) val)
7752 - ((bfd_signed_vma)
7753 /* Offset from the start of the current section
7754 to the start of the stubs. */
7755 (s->output_offset
7756 /* Offset of the start of this stub from the start of the stubs. */
7757 + my_offset
7758 /* Address of the start of the current section. */
7759 + s->output_section->vma)
7760 /* The branch instruction is 4 bytes into the stub. */
7761 + 4
7762 /* ARM branches work from the pc of the instruction + 8. */
7763 + 8);
7764
7765 put_arm_insn (globals, output_bfd,
7766 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
7767 s->contents + my_offset + 4);
7768 }
7769
7770 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
7771
7772 /* Now go back and fix up the original BL insn to point to here. */
7773 ret_offset =
7774 /* Address of where the stub is located. */
7775 (s->output_section->vma + s->output_offset + my_offset)
7776 /* Address of where the BL is located. */
7777 - (input_section->output_section->vma + input_section->output_offset
7778 + offset)
7779 /* Addend in the relocation. */
7780 - addend
7781 /* Biassing for PC-relative addressing. */
7782 - 8;
7783
7784 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
7785
7786 return TRUE;
7787 }
7788
7789 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
7790
7791 static struct elf_link_hash_entry *
7792 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
7793 const char * name,
7794 bfd * input_bfd,
7795 bfd * output_bfd,
7796 asection * sym_sec,
7797 bfd_vma val,
7798 asection * s,
7799 char ** error_message)
7800 {
7801 bfd_vma my_offset;
7802 long int ret_offset;
7803 struct elf_link_hash_entry * myh;
7804 struct elf32_arm_link_hash_table * globals;
7805
7806 myh = find_arm_glue (info, name, error_message);
7807 if (myh == NULL)
7808 return NULL;
7809
7810 globals = elf32_arm_hash_table (info);
7811 BFD_ASSERT (globals != NULL);
7812 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7813
7814 my_offset = myh->root.u.def.value;
7815
7816 if ((my_offset & 0x01) == 0x01)
7817 {
7818 if (sym_sec != NULL
7819 && sym_sec->owner != NULL
7820 && !INTERWORK_FLAG (sym_sec->owner))
7821 {
7822 (*_bfd_error_handler)
7823 (_("%B(%s): warning: interworking not enabled.\n"
7824 " first occurrence: %B: arm call to thumb"),
7825 sym_sec->owner, input_bfd, name);
7826 }
7827
7828 --my_offset;
7829 myh->root.u.def.value = my_offset;
7830
7831 if (bfd_link_pic (info)
7832 || globals->root.is_relocatable_executable
7833 || globals->pic_veneer)
7834 {
7835 /* For relocatable objects we can't use absolute addresses,
7836 so construct the address from a relative offset. */
7837 /* TODO: If the offset is small it's probably worth
7838 constructing the address with adds. */
7839 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
7840 s->contents + my_offset);
7841 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
7842 s->contents + my_offset + 4);
7843 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
7844 s->contents + my_offset + 8);
7845 /* Adjust the offset by 4 for the position of the add,
7846 and 8 for the pipeline offset. */
7847 ret_offset = (val - (s->output_offset
7848 + s->output_section->vma
7849 + my_offset + 12))
7850 | 1;
7851 bfd_put_32 (output_bfd, ret_offset,
7852 s->contents + my_offset + 12);
7853 }
7854 else if (globals->use_blx)
7855 {
7856 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
7857 s->contents + my_offset);
7858
7859 /* It's a thumb address. Add the low order bit. */
7860 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
7861 s->contents + my_offset + 4);
7862 }
7863 else
7864 {
7865 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
7866 s->contents + my_offset);
7867
7868 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
7869 s->contents + my_offset + 4);
7870
7871 /* It's a thumb address. Add the low order bit. */
7872 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
7873 s->contents + my_offset + 8);
7874
7875 my_offset += 12;
7876 }
7877 }
7878
7879 BFD_ASSERT (my_offset <= globals->arm_glue_size);
7880
7881 return myh;
7882 }
7883
7884 /* Arm code calling a Thumb function. */
7885
7886 static int
7887 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
7888 const char * name,
7889 bfd * input_bfd,
7890 bfd * output_bfd,
7891 asection * input_section,
7892 bfd_byte * hit_data,
7893 asection * sym_sec,
7894 bfd_vma offset,
7895 bfd_signed_vma addend,
7896 bfd_vma val,
7897 char **error_message)
7898 {
7899 unsigned long int tmp;
7900 bfd_vma my_offset;
7901 asection * s;
7902 long int ret_offset;
7903 struct elf_link_hash_entry * myh;
7904 struct elf32_arm_link_hash_table * globals;
7905
7906 globals = elf32_arm_hash_table (info);
7907 BFD_ASSERT (globals != NULL);
7908 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7909
7910 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7911 ARM2THUMB_GLUE_SECTION_NAME);
7912 BFD_ASSERT (s != NULL);
7913 BFD_ASSERT (s->contents != NULL);
7914 BFD_ASSERT (s->output_section != NULL);
7915
7916 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
7917 sym_sec, val, s, error_message);
7918 if (!myh)
7919 return FALSE;
7920
7921 my_offset = myh->root.u.def.value;
7922 tmp = bfd_get_32 (input_bfd, hit_data);
7923 tmp = tmp & 0xFF000000;
7924
7925 /* Somehow these are both 4 too far, so subtract 8. */
7926 ret_offset = (s->output_offset
7927 + my_offset
7928 + s->output_section->vma
7929 - (input_section->output_offset
7930 + input_section->output_section->vma
7931 + offset + addend)
7932 - 8);
7933
7934 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
7935
7936 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
7937
7938 return TRUE;
7939 }
7940
7941 /* Populate Arm stub for an exported Thumb function. */
7942
7943 static bfd_boolean
7944 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
7945 {
7946 struct bfd_link_info * info = (struct bfd_link_info *) inf;
7947 asection * s;
7948 struct elf_link_hash_entry * myh;
7949 struct elf32_arm_link_hash_entry *eh;
7950 struct elf32_arm_link_hash_table * globals;
7951 asection *sec;
7952 bfd_vma val;
7953 char *error_message;
7954
7955 eh = elf32_arm_hash_entry (h);
7956 /* Allocate stubs for exported Thumb functions on v4t. */
7957 if (eh->export_glue == NULL)
7958 return TRUE;
7959
7960 globals = elf32_arm_hash_table (info);
7961 BFD_ASSERT (globals != NULL);
7962 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7963
7964 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7965 ARM2THUMB_GLUE_SECTION_NAME);
7966 BFD_ASSERT (s != NULL);
7967 BFD_ASSERT (s->contents != NULL);
7968 BFD_ASSERT (s->output_section != NULL);
7969
7970 sec = eh->export_glue->root.u.def.section;
7971
7972 BFD_ASSERT (sec->output_section != NULL);
7973
7974 val = eh->export_glue->root.u.def.value + sec->output_offset
7975 + sec->output_section->vma;
7976
7977 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
7978 h->root.u.def.section->owner,
7979 globals->obfd, sec, val, s,
7980 &error_message);
7981 BFD_ASSERT (myh);
7982 return TRUE;
7983 }
7984
7985 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
7986
7987 static bfd_vma
7988 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
7989 {
7990 bfd_byte *p;
7991 bfd_vma glue_addr;
7992 asection *s;
7993 struct elf32_arm_link_hash_table *globals;
7994
7995 globals = elf32_arm_hash_table (info);
7996 BFD_ASSERT (globals != NULL);
7997 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7998
7999 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8000 ARM_BX_GLUE_SECTION_NAME);
8001 BFD_ASSERT (s != NULL);
8002 BFD_ASSERT (s->contents != NULL);
8003 BFD_ASSERT (s->output_section != NULL);
8004
8005 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
8006
8007 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
8008
8009 if ((globals->bx_glue_offset[reg] & 1) == 0)
8010 {
8011 p = s->contents + glue_addr;
8012 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
8013 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
8014 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
8015 globals->bx_glue_offset[reg] |= 1;
8016 }
8017
8018 return glue_addr + s->output_section->vma + s->output_offset;
8019 }
8020
8021 /* Generate Arm stubs for exported Thumb symbols. */
8022 static void
8023 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
8024 struct bfd_link_info *link_info)
8025 {
8026 struct elf32_arm_link_hash_table * globals;
8027
8028 if (link_info == NULL)
8029 /* Ignore this if we are not called by the ELF backend linker. */
8030 return;
8031
8032 globals = elf32_arm_hash_table (link_info);
8033 if (globals == NULL)
8034 return;
8035
8036 /* If blx is available then exported Thumb symbols are OK and there is
8037 nothing to do. */
8038 if (globals->use_blx)
8039 return;
8040
8041 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
8042 link_info);
8043 }
8044
8045 /* Reserve space for COUNT dynamic relocations in relocation selection
8046 SRELOC. */
8047
8048 static void
8049 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
8050 bfd_size_type count)
8051 {
8052 struct elf32_arm_link_hash_table *htab;
8053
8054 htab = elf32_arm_hash_table (info);
8055 BFD_ASSERT (htab->root.dynamic_sections_created);
8056 if (sreloc == NULL)
8057 abort ();
8058 sreloc->size += RELOC_SIZE (htab) * count;
8059 }
8060
8061 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
8062 dynamic, the relocations should go in SRELOC, otherwise they should
8063 go in the special .rel.iplt section. */
8064
8065 static void
8066 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
8067 bfd_size_type count)
8068 {
8069 struct elf32_arm_link_hash_table *htab;
8070
8071 htab = elf32_arm_hash_table (info);
8072 if (!htab->root.dynamic_sections_created)
8073 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
8074 else
8075 {
8076 BFD_ASSERT (sreloc != NULL);
8077 sreloc->size += RELOC_SIZE (htab) * count;
8078 }
8079 }
8080
8081 /* Add relocation REL to the end of relocation section SRELOC. */
8082
8083 static void
8084 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
8085 asection *sreloc, Elf_Internal_Rela *rel)
8086 {
8087 bfd_byte *loc;
8088 struct elf32_arm_link_hash_table *htab;
8089
8090 htab = elf32_arm_hash_table (info);
8091 if (!htab->root.dynamic_sections_created
8092 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
8093 sreloc = htab->root.irelplt;
8094 if (sreloc == NULL)
8095 abort ();
8096 loc = sreloc->contents;
8097 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
8098 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
8099 abort ();
8100 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
8101 }
8102
8103 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
8104 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
8105 to .plt. */
8106
8107 static void
8108 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
8109 bfd_boolean is_iplt_entry,
8110 union gotplt_union *root_plt,
8111 struct arm_plt_info *arm_plt)
8112 {
8113 struct elf32_arm_link_hash_table *htab;
8114 asection *splt;
8115 asection *sgotplt;
8116
8117 htab = elf32_arm_hash_table (info);
8118
8119 if (is_iplt_entry)
8120 {
8121 splt = htab->root.iplt;
8122 sgotplt = htab->root.igotplt;
8123
8124 /* NaCl uses a special first entry in .iplt too. */
8125 if (htab->nacl_p && splt->size == 0)
8126 splt->size += htab->plt_header_size;
8127
8128 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
8129 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
8130 }
8131 else
8132 {
8133 splt = htab->root.splt;
8134 sgotplt = htab->root.sgotplt;
8135
8136 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
8137 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
8138
8139 /* If this is the first .plt entry, make room for the special
8140 first entry. */
8141 if (splt->size == 0)
8142 splt->size += htab->plt_header_size;
8143
8144 htab->next_tls_desc_index++;
8145 }
8146
8147 /* Allocate the PLT entry itself, including any leading Thumb stub. */
8148 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
8149 splt->size += PLT_THUMB_STUB_SIZE;
8150 root_plt->offset = splt->size;
8151 splt->size += htab->plt_entry_size;
8152
8153 if (!htab->symbian_p)
8154 {
8155 /* We also need to make an entry in the .got.plt section, which
8156 will be placed in the .got section by the linker script. */
8157 if (is_iplt_entry)
8158 arm_plt->got_offset = sgotplt->size;
8159 else
8160 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
8161 sgotplt->size += 4;
8162 }
8163 }
8164
8165 static bfd_vma
8166 arm_movw_immediate (bfd_vma value)
8167 {
8168 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
8169 }
8170
8171 static bfd_vma
8172 arm_movt_immediate (bfd_vma value)
8173 {
8174 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
8175 }
8176
8177 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
8178 the entry lives in .iplt and resolves to (*SYM_VALUE)().
8179 Otherwise, DYNINDX is the index of the symbol in the dynamic
8180 symbol table and SYM_VALUE is undefined.
8181
8182 ROOT_PLT points to the offset of the PLT entry from the start of its
8183 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
8184 bookkeeping information.
8185
8186 Returns FALSE if there was a problem. */
8187
8188 static bfd_boolean
8189 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
8190 union gotplt_union *root_plt,
8191 struct arm_plt_info *arm_plt,
8192 int dynindx, bfd_vma sym_value)
8193 {
8194 struct elf32_arm_link_hash_table *htab;
8195 asection *sgot;
8196 asection *splt;
8197 asection *srel;
8198 bfd_byte *loc;
8199 bfd_vma plt_index;
8200 Elf_Internal_Rela rel;
8201 bfd_vma plt_header_size;
8202 bfd_vma got_header_size;
8203
8204 htab = elf32_arm_hash_table (info);
8205
8206 /* Pick the appropriate sections and sizes. */
8207 if (dynindx == -1)
8208 {
8209 splt = htab->root.iplt;
8210 sgot = htab->root.igotplt;
8211 srel = htab->root.irelplt;
8212
8213 /* There are no reserved entries in .igot.plt, and no special
8214 first entry in .iplt. */
8215 got_header_size = 0;
8216 plt_header_size = 0;
8217 }
8218 else
8219 {
8220 splt = htab->root.splt;
8221 sgot = htab->root.sgotplt;
8222 srel = htab->root.srelplt;
8223
8224 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
8225 plt_header_size = htab->plt_header_size;
8226 }
8227 BFD_ASSERT (splt != NULL && srel != NULL);
8228
8229 /* Fill in the entry in the procedure linkage table. */
8230 if (htab->symbian_p)
8231 {
8232 BFD_ASSERT (dynindx >= 0);
8233 put_arm_insn (htab, output_bfd,
8234 elf32_arm_symbian_plt_entry[0],
8235 splt->contents + root_plt->offset);
8236 bfd_put_32 (output_bfd,
8237 elf32_arm_symbian_plt_entry[1],
8238 splt->contents + root_plt->offset + 4);
8239
8240 /* Fill in the entry in the .rel.plt section. */
8241 rel.r_offset = (splt->output_section->vma
8242 + splt->output_offset
8243 + root_plt->offset + 4);
8244 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
8245
8246 /* Get the index in the procedure linkage table which
8247 corresponds to this symbol. This is the index of this symbol
8248 in all the symbols for which we are making plt entries. The
8249 first entry in the procedure linkage table is reserved. */
8250 plt_index = ((root_plt->offset - plt_header_size)
8251 / htab->plt_entry_size);
8252 }
8253 else
8254 {
8255 bfd_vma got_offset, got_address, plt_address;
8256 bfd_vma got_displacement, initial_got_entry;
8257 bfd_byte * ptr;
8258
8259 BFD_ASSERT (sgot != NULL);
8260
8261 /* Get the offset into the .(i)got.plt table of the entry that
8262 corresponds to this function. */
8263 got_offset = (arm_plt->got_offset & -2);
8264
8265 /* Get the index in the procedure linkage table which
8266 corresponds to this symbol. This is the index of this symbol
8267 in all the symbols for which we are making plt entries.
8268 After the reserved .got.plt entries, all symbols appear in
8269 the same order as in .plt. */
8270 plt_index = (got_offset - got_header_size) / 4;
8271
8272 /* Calculate the address of the GOT entry. */
8273 got_address = (sgot->output_section->vma
8274 + sgot->output_offset
8275 + got_offset);
8276
8277 /* ...and the address of the PLT entry. */
8278 plt_address = (splt->output_section->vma
8279 + splt->output_offset
8280 + root_plt->offset);
8281
8282 ptr = splt->contents + root_plt->offset;
8283 if (htab->vxworks_p && bfd_link_pic (info))
8284 {
8285 unsigned int i;
8286 bfd_vma val;
8287
8288 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
8289 {
8290 val = elf32_arm_vxworks_shared_plt_entry[i];
8291 if (i == 2)
8292 val |= got_address - sgot->output_section->vma;
8293 if (i == 5)
8294 val |= plt_index * RELOC_SIZE (htab);
8295 if (i == 2 || i == 5)
8296 bfd_put_32 (output_bfd, val, ptr);
8297 else
8298 put_arm_insn (htab, output_bfd, val, ptr);
8299 }
8300 }
8301 else if (htab->vxworks_p)
8302 {
8303 unsigned int i;
8304 bfd_vma val;
8305
8306 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
8307 {
8308 val = elf32_arm_vxworks_exec_plt_entry[i];
8309 if (i == 2)
8310 val |= got_address;
8311 if (i == 4)
8312 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
8313 if (i == 5)
8314 val |= plt_index * RELOC_SIZE (htab);
8315 if (i == 2 || i == 5)
8316 bfd_put_32 (output_bfd, val, ptr);
8317 else
8318 put_arm_insn (htab, output_bfd, val, ptr);
8319 }
8320
8321 loc = (htab->srelplt2->contents
8322 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
8323
8324 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
8325 referencing the GOT for this PLT entry. */
8326 rel.r_offset = plt_address + 8;
8327 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
8328 rel.r_addend = got_offset;
8329 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8330 loc += RELOC_SIZE (htab);
8331
8332 /* Create the R_ARM_ABS32 relocation referencing the
8333 beginning of the PLT for this GOT entry. */
8334 rel.r_offset = got_address;
8335 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
8336 rel.r_addend = 0;
8337 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8338 }
8339 else if (htab->nacl_p)
8340 {
8341 /* Calculate the displacement between the PLT slot and the
8342 common tail that's part of the special initial PLT slot. */
8343 int32_t tail_displacement
8344 = ((splt->output_section->vma + splt->output_offset
8345 + ARM_NACL_PLT_TAIL_OFFSET)
8346 - (plt_address + htab->plt_entry_size + 4));
8347 BFD_ASSERT ((tail_displacement & 3) == 0);
8348 tail_displacement >>= 2;
8349
8350 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
8351 || (-tail_displacement & 0xff000000) == 0);
8352
8353 /* Calculate the displacement between the PLT slot and the entry
8354 in the GOT. The offset accounts for the value produced by
8355 adding to pc in the penultimate instruction of the PLT stub. */
8356 got_displacement = (got_address
8357 - (plt_address + htab->plt_entry_size));
8358
8359 /* NaCl does not support interworking at all. */
8360 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
8361
8362 put_arm_insn (htab, output_bfd,
8363 elf32_arm_nacl_plt_entry[0]
8364 | arm_movw_immediate (got_displacement),
8365 ptr + 0);
8366 put_arm_insn (htab, output_bfd,
8367 elf32_arm_nacl_plt_entry[1]
8368 | arm_movt_immediate (got_displacement),
8369 ptr + 4);
8370 put_arm_insn (htab, output_bfd,
8371 elf32_arm_nacl_plt_entry[2],
8372 ptr + 8);
8373 put_arm_insn (htab, output_bfd,
8374 elf32_arm_nacl_plt_entry[3]
8375 | (tail_displacement & 0x00ffffff),
8376 ptr + 12);
8377 }
8378 else if (using_thumb_only (htab))
8379 {
8380 /* PR ld/16017: Generate thumb only PLT entries. */
8381 if (!using_thumb2 (htab))
8382 {
8383 /* FIXME: We ought to be able to generate thumb-1 PLT
8384 instructions... */
8385 _bfd_error_handler (_("%B: Warning: thumb-1 mode PLT generation not currently supported"),
8386 output_bfd);
8387 return FALSE;
8388 }
8389
8390 /* Calculate the displacement between the PLT slot and the entry in
8391 the GOT. The 12-byte offset accounts for the value produced by
8392 adding to pc in the 3rd instruction of the PLT stub. */
8393 got_displacement = got_address - (plt_address + 12);
8394
8395 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
8396 instead of 'put_thumb_insn'. */
8397 put_arm_insn (htab, output_bfd,
8398 elf32_thumb2_plt_entry[0]
8399 | ((got_displacement & 0x000000ff) << 16)
8400 | ((got_displacement & 0x00000700) << 20)
8401 | ((got_displacement & 0x00000800) >> 1)
8402 | ((got_displacement & 0x0000f000) >> 12),
8403 ptr + 0);
8404 put_arm_insn (htab, output_bfd,
8405 elf32_thumb2_plt_entry[1]
8406 | ((got_displacement & 0x00ff0000) )
8407 | ((got_displacement & 0x07000000) << 4)
8408 | ((got_displacement & 0x08000000) >> 17)
8409 | ((got_displacement & 0xf0000000) >> 28),
8410 ptr + 4);
8411 put_arm_insn (htab, output_bfd,
8412 elf32_thumb2_plt_entry[2],
8413 ptr + 8);
8414 put_arm_insn (htab, output_bfd,
8415 elf32_thumb2_plt_entry[3],
8416 ptr + 12);
8417 }
8418 else
8419 {
8420 /* Calculate the displacement between the PLT slot and the
8421 entry in the GOT. The eight-byte offset accounts for the
8422 value produced by adding to pc in the first instruction
8423 of the PLT stub. */
8424 got_displacement = got_address - (plt_address + 8);
8425
8426 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
8427 {
8428 put_thumb_insn (htab, output_bfd,
8429 elf32_arm_plt_thumb_stub[0], ptr - 4);
8430 put_thumb_insn (htab, output_bfd,
8431 elf32_arm_plt_thumb_stub[1], ptr - 2);
8432 }
8433
8434 if (!elf32_arm_use_long_plt_entry)
8435 {
8436 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
8437
8438 put_arm_insn (htab, output_bfd,
8439 elf32_arm_plt_entry_short[0]
8440 | ((got_displacement & 0x0ff00000) >> 20),
8441 ptr + 0);
8442 put_arm_insn (htab, output_bfd,
8443 elf32_arm_plt_entry_short[1]
8444 | ((got_displacement & 0x000ff000) >> 12),
8445 ptr+ 4);
8446 put_arm_insn (htab, output_bfd,
8447 elf32_arm_plt_entry_short[2]
8448 | (got_displacement & 0x00000fff),
8449 ptr + 8);
8450 #ifdef FOUR_WORD_PLT
8451 bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
8452 #endif
8453 }
8454 else
8455 {
8456 put_arm_insn (htab, output_bfd,
8457 elf32_arm_plt_entry_long[0]
8458 | ((got_displacement & 0xf0000000) >> 28),
8459 ptr + 0);
8460 put_arm_insn (htab, output_bfd,
8461 elf32_arm_plt_entry_long[1]
8462 | ((got_displacement & 0x0ff00000) >> 20),
8463 ptr + 4);
8464 put_arm_insn (htab, output_bfd,
8465 elf32_arm_plt_entry_long[2]
8466 | ((got_displacement & 0x000ff000) >> 12),
8467 ptr+ 8);
8468 put_arm_insn (htab, output_bfd,
8469 elf32_arm_plt_entry_long[3]
8470 | (got_displacement & 0x00000fff),
8471 ptr + 12);
8472 }
8473 }
8474
8475 /* Fill in the entry in the .rel(a).(i)plt section. */
8476 rel.r_offset = got_address;
8477 rel.r_addend = 0;
8478 if (dynindx == -1)
8479 {
8480 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
8481 The dynamic linker or static executable then calls SYM_VALUE
8482 to determine the correct run-time value of the .igot.plt entry. */
8483 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
8484 initial_got_entry = sym_value;
8485 }
8486 else
8487 {
8488 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
8489 initial_got_entry = (splt->output_section->vma
8490 + splt->output_offset);
8491 }
8492
8493 /* Fill in the entry in the global offset table. */
8494 bfd_put_32 (output_bfd, initial_got_entry,
8495 sgot->contents + got_offset);
8496 }
8497
8498 if (dynindx == -1)
8499 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
8500 else
8501 {
8502 loc = srel->contents + plt_index * RELOC_SIZE (htab);
8503 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8504 }
8505
8506 return TRUE;
8507 }
8508
8509 /* Some relocations map to different relocations depending on the
8510 target. Return the real relocation. */
8511
8512 static int
8513 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
8514 int r_type)
8515 {
8516 switch (r_type)
8517 {
8518 case R_ARM_TARGET1:
8519 if (globals->target1_is_rel)
8520 return R_ARM_REL32;
8521 else
8522 return R_ARM_ABS32;
8523
8524 case R_ARM_TARGET2:
8525 return globals->target2_reloc;
8526
8527 default:
8528 return r_type;
8529 }
8530 }
8531
8532 /* Return the base VMA address which should be subtracted from real addresses
8533 when resolving @dtpoff relocation.
8534 This is PT_TLS segment p_vaddr. */
8535
8536 static bfd_vma
8537 dtpoff_base (struct bfd_link_info *info)
8538 {
8539 /* If tls_sec is NULL, we should have signalled an error already. */
8540 if (elf_hash_table (info)->tls_sec == NULL)
8541 return 0;
8542 return elf_hash_table (info)->tls_sec->vma;
8543 }
8544
8545 /* Return the relocation value for @tpoff relocation
8546 if STT_TLS virtual address is ADDRESS. */
8547
8548 static bfd_vma
8549 tpoff (struct bfd_link_info *info, bfd_vma address)
8550 {
8551 struct elf_link_hash_table *htab = elf_hash_table (info);
8552 bfd_vma base;
8553
8554 /* If tls_sec is NULL, we should have signalled an error already. */
8555 if (htab->tls_sec == NULL)
8556 return 0;
8557 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
8558 return address - htab->tls_sec->vma + base;
8559 }
8560
8561 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
8562 VALUE is the relocation value. */
8563
8564 static bfd_reloc_status_type
8565 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
8566 {
8567 if (value > 0xfff)
8568 return bfd_reloc_overflow;
8569
8570 value |= bfd_get_32 (abfd, data) & 0xfffff000;
8571 bfd_put_32 (abfd, value, data);
8572 return bfd_reloc_ok;
8573 }
8574
8575 /* Handle TLS relaxations. Relaxing is possible for symbols that use
8576 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
8577 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
8578
8579 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
8580 is to then call final_link_relocate. Return other values in the
8581 case of error.
8582
8583 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
8584 the pre-relaxed code. It would be nice if the relocs were updated
8585 to match the optimization. */
8586
8587 static bfd_reloc_status_type
8588 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
8589 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
8590 Elf_Internal_Rela *rel, unsigned long is_local)
8591 {
8592 unsigned long insn;
8593
8594 switch (ELF32_R_TYPE (rel->r_info))
8595 {
8596 default:
8597 return bfd_reloc_notsupported;
8598
8599 case R_ARM_TLS_GOTDESC:
8600 if (is_local)
8601 insn = 0;
8602 else
8603 {
8604 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
8605 if (insn & 1)
8606 insn -= 5; /* THUMB */
8607 else
8608 insn -= 8; /* ARM */
8609 }
8610 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
8611 return bfd_reloc_continue;
8612
8613 case R_ARM_THM_TLS_DESCSEQ:
8614 /* Thumb insn. */
8615 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
8616 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
8617 {
8618 if (is_local)
8619 /* nop */
8620 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8621 }
8622 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
8623 {
8624 if (is_local)
8625 /* nop */
8626 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8627 else
8628 /* ldr rx,[ry] */
8629 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
8630 }
8631 else if ((insn & 0xff87) == 0x4780) /* blx rx */
8632 {
8633 if (is_local)
8634 /* nop */
8635 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8636 else
8637 /* mov r0, rx */
8638 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
8639 contents + rel->r_offset);
8640 }
8641 else
8642 {
8643 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
8644 /* It's a 32 bit instruction, fetch the rest of it for
8645 error generation. */
8646 insn = (insn << 16)
8647 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
8648 (*_bfd_error_handler)
8649 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' in TLS trampoline"),
8650 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
8651 return bfd_reloc_notsupported;
8652 }
8653 break;
8654
8655 case R_ARM_TLS_DESCSEQ:
8656 /* arm insn. */
8657 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
8658 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
8659 {
8660 if (is_local)
8661 /* mov rx, ry */
8662 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
8663 contents + rel->r_offset);
8664 }
8665 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
8666 {
8667 if (is_local)
8668 /* nop */
8669 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
8670 else
8671 /* ldr rx,[ry] */
8672 bfd_put_32 (input_bfd, insn & 0xfffff000,
8673 contents + rel->r_offset);
8674 }
8675 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
8676 {
8677 if (is_local)
8678 /* nop */
8679 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
8680 else
8681 /* mov r0, rx */
8682 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
8683 contents + rel->r_offset);
8684 }
8685 else
8686 {
8687 (*_bfd_error_handler)
8688 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' in TLS trampoline"),
8689 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
8690 return bfd_reloc_notsupported;
8691 }
8692 break;
8693
8694 case R_ARM_TLS_CALL:
8695 /* GD->IE relaxation, turn the instruction into 'nop' or
8696 'ldr r0, [pc,r0]' */
8697 insn = is_local ? 0xe1a00000 : 0xe79f0000;
8698 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
8699 break;
8700
8701 case R_ARM_THM_TLS_CALL:
8702 /* GD->IE relaxation. */
8703 if (!is_local)
8704 /* add r0,pc; ldr r0, [r0] */
8705 insn = 0x44786800;
8706 else if (arch_has_thumb2_nop (globals))
8707 /* nop.w */
8708 insn = 0xf3af8000;
8709 else
8710 /* nop; nop */
8711 insn = 0xbf00bf00;
8712
8713 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
8714 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
8715 break;
8716 }
8717 return bfd_reloc_ok;
8718 }
8719
8720 /* For a given value of n, calculate the value of G_n as required to
8721 deal with group relocations. We return it in the form of an
8722 encoded constant-and-rotation, together with the final residual. If n is
8723 specified as less than zero, then final_residual is filled with the
8724 input value and no further action is performed. */
8725
8726 static bfd_vma
8727 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
8728 {
8729 int current_n;
8730 bfd_vma g_n;
8731 bfd_vma encoded_g_n = 0;
8732 bfd_vma residual = value; /* Also known as Y_n. */
8733
8734 for (current_n = 0; current_n <= n; current_n++)
8735 {
8736 int shift;
8737
8738 /* Calculate which part of the value to mask. */
8739 if (residual == 0)
8740 shift = 0;
8741 else
8742 {
8743 int msb;
8744
8745 /* Determine the most significant bit in the residual and
8746 align the resulting value to a 2-bit boundary. */
8747 for (msb = 30; msb >= 0; msb -= 2)
8748 if (residual & (3 << msb))
8749 break;
8750
8751 /* The desired shift is now (msb - 6), or zero, whichever
8752 is the greater. */
8753 shift = msb - 6;
8754 if (shift < 0)
8755 shift = 0;
8756 }
8757
8758 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
8759 g_n = residual & (0xff << shift);
8760 encoded_g_n = (g_n >> shift)
8761 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
8762
8763 /* Calculate the residual for the next time around. */
8764 residual &= ~g_n;
8765 }
8766
8767 *final_residual = residual;
8768
8769 return encoded_g_n;
8770 }
8771
8772 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
8773 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
8774
8775 static int
8776 identify_add_or_sub (bfd_vma insn)
8777 {
8778 int opcode = insn & 0x1e00000;
8779
8780 if (opcode == 1 << 23) /* ADD */
8781 return 1;
8782
8783 if (opcode == 1 << 22) /* SUB */
8784 return -1;
8785
8786 return 0;
8787 }
8788
8789 /* Perform a relocation as part of a final link. */
8790
8791 static bfd_reloc_status_type
8792 elf32_arm_final_link_relocate (reloc_howto_type * howto,
8793 bfd * input_bfd,
8794 bfd * output_bfd,
8795 asection * input_section,
8796 bfd_byte * contents,
8797 Elf_Internal_Rela * rel,
8798 bfd_vma value,
8799 struct bfd_link_info * info,
8800 asection * sym_sec,
8801 const char * sym_name,
8802 unsigned char st_type,
8803 enum arm_st_branch_type branch_type,
8804 struct elf_link_hash_entry * h,
8805 bfd_boolean * unresolved_reloc_p,
8806 char ** error_message)
8807 {
8808 unsigned long r_type = howto->type;
8809 unsigned long r_symndx;
8810 bfd_byte * hit_data = contents + rel->r_offset;
8811 bfd_vma * local_got_offsets;
8812 bfd_vma * local_tlsdesc_gotents;
8813 asection * sgot;
8814 asection * splt;
8815 asection * sreloc = NULL;
8816 asection * srelgot;
8817 bfd_vma addend;
8818 bfd_signed_vma signed_addend;
8819 unsigned char dynreloc_st_type;
8820 bfd_vma dynreloc_value;
8821 struct elf32_arm_link_hash_table * globals;
8822 struct elf32_arm_link_hash_entry *eh;
8823 union gotplt_union *root_plt;
8824 struct arm_plt_info *arm_plt;
8825 bfd_vma plt_offset;
8826 bfd_vma gotplt_offset;
8827 bfd_boolean has_iplt_entry;
8828
8829 globals = elf32_arm_hash_table (info);
8830 if (globals == NULL)
8831 return bfd_reloc_notsupported;
8832
8833 BFD_ASSERT (is_arm_elf (input_bfd));
8834
8835 /* Some relocation types map to different relocations depending on the
8836 target. We pick the right one here. */
8837 r_type = arm_real_reloc_type (globals, r_type);
8838
8839 /* It is possible to have linker relaxations on some TLS access
8840 models. Update our information here. */
8841 r_type = elf32_arm_tls_transition (info, r_type, h);
8842
8843 if (r_type != howto->type)
8844 howto = elf32_arm_howto_from_type (r_type);
8845
8846 eh = (struct elf32_arm_link_hash_entry *) h;
8847 sgot = globals->root.sgot;
8848 local_got_offsets = elf_local_got_offsets (input_bfd);
8849 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
8850
8851 if (globals->root.dynamic_sections_created)
8852 srelgot = globals->root.srelgot;
8853 else
8854 srelgot = NULL;
8855
8856 r_symndx = ELF32_R_SYM (rel->r_info);
8857
8858 if (globals->use_rel)
8859 {
8860 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
8861
8862 if (addend & ((howto->src_mask + 1) >> 1))
8863 {
8864 signed_addend = -1;
8865 signed_addend &= ~ howto->src_mask;
8866 signed_addend |= addend;
8867 }
8868 else
8869 signed_addend = addend;
8870 }
8871 else
8872 addend = signed_addend = rel->r_addend;
8873
8874 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
8875 are resolving a function call relocation. */
8876 if (using_thumb_only (globals)
8877 && (r_type == R_ARM_THM_CALL
8878 || r_type == R_ARM_THM_JUMP24)
8879 && branch_type == ST_BRANCH_TO_ARM)
8880 branch_type = ST_BRANCH_TO_THUMB;
8881
8882 /* Record the symbol information that should be used in dynamic
8883 relocations. */
8884 dynreloc_st_type = st_type;
8885 dynreloc_value = value;
8886 if (branch_type == ST_BRANCH_TO_THUMB)
8887 dynreloc_value |= 1;
8888
8889 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
8890 VALUE appropriately for relocations that we resolve at link time. */
8891 has_iplt_entry = FALSE;
8892 if (elf32_arm_get_plt_info (input_bfd, eh, r_symndx, &root_plt, &arm_plt)
8893 && root_plt->offset != (bfd_vma) -1)
8894 {
8895 plt_offset = root_plt->offset;
8896 gotplt_offset = arm_plt->got_offset;
8897
8898 if (h == NULL || eh->is_iplt)
8899 {
8900 has_iplt_entry = TRUE;
8901 splt = globals->root.iplt;
8902
8903 /* Populate .iplt entries here, because not all of them will
8904 be seen by finish_dynamic_symbol. The lower bit is set if
8905 we have already populated the entry. */
8906 if (plt_offset & 1)
8907 plt_offset--;
8908 else
8909 {
8910 if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
8911 -1, dynreloc_value))
8912 root_plt->offset |= 1;
8913 else
8914 return bfd_reloc_notsupported;
8915 }
8916
8917 /* Static relocations always resolve to the .iplt entry. */
8918 st_type = STT_FUNC;
8919 value = (splt->output_section->vma
8920 + splt->output_offset
8921 + plt_offset);
8922 branch_type = ST_BRANCH_TO_ARM;
8923
8924 /* If there are non-call relocations that resolve to the .iplt
8925 entry, then all dynamic ones must too. */
8926 if (arm_plt->noncall_refcount != 0)
8927 {
8928 dynreloc_st_type = st_type;
8929 dynreloc_value = value;
8930 }
8931 }
8932 else
8933 /* We populate the .plt entry in finish_dynamic_symbol. */
8934 splt = globals->root.splt;
8935 }
8936 else
8937 {
8938 splt = NULL;
8939 plt_offset = (bfd_vma) -1;
8940 gotplt_offset = (bfd_vma) -1;
8941 }
8942
8943 switch (r_type)
8944 {
8945 case R_ARM_NONE:
8946 /* We don't need to find a value for this symbol. It's just a
8947 marker. */
8948 *unresolved_reloc_p = FALSE;
8949 return bfd_reloc_ok;
8950
8951 case R_ARM_ABS12:
8952 if (!globals->vxworks_p)
8953 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
8954
8955 case R_ARM_PC24:
8956 case R_ARM_ABS32:
8957 case R_ARM_ABS32_NOI:
8958 case R_ARM_REL32:
8959 case R_ARM_REL32_NOI:
8960 case R_ARM_CALL:
8961 case R_ARM_JUMP24:
8962 case R_ARM_XPC25:
8963 case R_ARM_PREL31:
8964 case R_ARM_PLT32:
8965 /* Handle relocations which should use the PLT entry. ABS32/REL32
8966 will use the symbol's value, which may point to a PLT entry, but we
8967 don't need to handle that here. If we created a PLT entry, all
8968 branches in this object should go to it, except if the PLT is too
8969 far away, in which case a long branch stub should be inserted. */
8970 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
8971 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
8972 && r_type != R_ARM_CALL
8973 && r_type != R_ARM_JUMP24
8974 && r_type != R_ARM_PLT32)
8975 && plt_offset != (bfd_vma) -1)
8976 {
8977 /* If we've created a .plt section, and assigned a PLT entry
8978 to this function, it must either be a STT_GNU_IFUNC reference
8979 or not be known to bind locally. In other cases, we should
8980 have cleared the PLT entry by now. */
8981 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
8982
8983 value = (splt->output_section->vma
8984 + splt->output_offset
8985 + plt_offset);
8986 *unresolved_reloc_p = FALSE;
8987 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8988 contents, rel->r_offset, value,
8989 rel->r_addend);
8990 }
8991
8992 /* When generating a shared object or relocatable executable, these
8993 relocations are copied into the output file to be resolved at
8994 run time. */
8995 if ((bfd_link_pic (info)
8996 || globals->root.is_relocatable_executable)
8997 && (input_section->flags & SEC_ALLOC)
8998 && !(globals->vxworks_p
8999 && strcmp (input_section->output_section->name,
9000 ".tls_vars") == 0)
9001 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
9002 || !SYMBOL_CALLS_LOCAL (info, h))
9003 && !(input_bfd == globals->stub_bfd
9004 && strstr (input_section->name, STUB_SUFFIX))
9005 && (h == NULL
9006 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9007 || h->root.type != bfd_link_hash_undefweak)
9008 && r_type != R_ARM_PC24
9009 && r_type != R_ARM_CALL
9010 && r_type != R_ARM_JUMP24
9011 && r_type != R_ARM_PREL31
9012 && r_type != R_ARM_PLT32)
9013 {
9014 Elf_Internal_Rela outrel;
9015 bfd_boolean skip, relocate;
9016
9017 if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
9018 && !h->def_regular)
9019 {
9020 char *v = _("shared object");
9021
9022 if (bfd_link_executable (info))
9023 v = _("PIE executable");
9024
9025 (*_bfd_error_handler)
9026 (_("%B: relocation %s against external or undefined symbol `%s'"
9027 " can not be used when making a %s; recompile with -fPIC"), input_bfd,
9028 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
9029 return bfd_reloc_notsupported;
9030 }
9031
9032 *unresolved_reloc_p = FALSE;
9033
9034 if (sreloc == NULL && globals->root.dynamic_sections_created)
9035 {
9036 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
9037 ! globals->use_rel);
9038
9039 if (sreloc == NULL)
9040 return bfd_reloc_notsupported;
9041 }
9042
9043 skip = FALSE;
9044 relocate = FALSE;
9045
9046 outrel.r_addend = addend;
9047 outrel.r_offset =
9048 _bfd_elf_section_offset (output_bfd, info, input_section,
9049 rel->r_offset);
9050 if (outrel.r_offset == (bfd_vma) -1)
9051 skip = TRUE;
9052 else if (outrel.r_offset == (bfd_vma) -2)
9053 skip = TRUE, relocate = TRUE;
9054 outrel.r_offset += (input_section->output_section->vma
9055 + input_section->output_offset);
9056
9057 if (skip)
9058 memset (&outrel, 0, sizeof outrel);
9059 else if (h != NULL
9060 && h->dynindx != -1
9061 && (!bfd_link_pic (info)
9062 || !SYMBOLIC_BIND (info, h)
9063 || !h->def_regular))
9064 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
9065 else
9066 {
9067 int symbol;
9068
9069 /* This symbol is local, or marked to become local. */
9070 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI);
9071 if (globals->symbian_p)
9072 {
9073 asection *osec;
9074
9075 /* On Symbian OS, the data segment and text segement
9076 can be relocated independently. Therefore, we
9077 must indicate the segment to which this
9078 relocation is relative. The BPABI allows us to
9079 use any symbol in the right segment; we just use
9080 the section symbol as it is convenient. (We
9081 cannot use the symbol given by "h" directly as it
9082 will not appear in the dynamic symbol table.)
9083
9084 Note that the dynamic linker ignores the section
9085 symbol value, so we don't subtract osec->vma
9086 from the emitted reloc addend. */
9087 if (sym_sec)
9088 osec = sym_sec->output_section;
9089 else
9090 osec = input_section->output_section;
9091 symbol = elf_section_data (osec)->dynindx;
9092 if (symbol == 0)
9093 {
9094 struct elf_link_hash_table *htab = elf_hash_table (info);
9095
9096 if ((osec->flags & SEC_READONLY) == 0
9097 && htab->data_index_section != NULL)
9098 osec = htab->data_index_section;
9099 else
9100 osec = htab->text_index_section;
9101 symbol = elf_section_data (osec)->dynindx;
9102 }
9103 BFD_ASSERT (symbol != 0);
9104 }
9105 else
9106 /* On SVR4-ish systems, the dynamic loader cannot
9107 relocate the text and data segments independently,
9108 so the symbol does not matter. */
9109 symbol = 0;
9110 if (dynreloc_st_type == STT_GNU_IFUNC)
9111 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
9112 to the .iplt entry. Instead, every non-call reference
9113 must use an R_ARM_IRELATIVE relocation to obtain the
9114 correct run-time address. */
9115 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
9116 else
9117 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
9118 if (globals->use_rel)
9119 relocate = TRUE;
9120 else
9121 outrel.r_addend += dynreloc_value;
9122 }
9123
9124 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
9125
9126 /* If this reloc is against an external symbol, we do not want to
9127 fiddle with the addend. Otherwise, we need to include the symbol
9128 value so that it becomes an addend for the dynamic reloc. */
9129 if (! relocate)
9130 return bfd_reloc_ok;
9131
9132 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9133 contents, rel->r_offset,
9134 dynreloc_value, (bfd_vma) 0);
9135 }
9136 else switch (r_type)
9137 {
9138 case R_ARM_ABS12:
9139 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
9140
9141 case R_ARM_XPC25: /* Arm BLX instruction. */
9142 case R_ARM_CALL:
9143 case R_ARM_JUMP24:
9144 case R_ARM_PC24: /* Arm B/BL instruction. */
9145 case R_ARM_PLT32:
9146 {
9147 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
9148
9149 if (r_type == R_ARM_XPC25)
9150 {
9151 /* Check for Arm calling Arm function. */
9152 /* FIXME: Should we translate the instruction into a BL
9153 instruction instead ? */
9154 if (branch_type != ST_BRANCH_TO_THUMB)
9155 (*_bfd_error_handler)
9156 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
9157 input_bfd,
9158 h ? h->root.root.string : "(local)");
9159 }
9160 else if (r_type == R_ARM_PC24)
9161 {
9162 /* Check for Arm calling Thumb function. */
9163 if (branch_type == ST_BRANCH_TO_THUMB)
9164 {
9165 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
9166 output_bfd, input_section,
9167 hit_data, sym_sec, rel->r_offset,
9168 signed_addend, value,
9169 error_message))
9170 return bfd_reloc_ok;
9171 else
9172 return bfd_reloc_dangerous;
9173 }
9174 }
9175
9176 /* Check if a stub has to be inserted because the
9177 destination is too far or we are changing mode. */
9178 if ( r_type == R_ARM_CALL
9179 || r_type == R_ARM_JUMP24
9180 || r_type == R_ARM_PLT32)
9181 {
9182 enum elf32_arm_stub_type stub_type = arm_stub_none;
9183 struct elf32_arm_link_hash_entry *hash;
9184
9185 hash = (struct elf32_arm_link_hash_entry *) h;
9186 stub_type = arm_type_of_stub (info, input_section, rel,
9187 st_type, &branch_type,
9188 hash, value, sym_sec,
9189 input_bfd, sym_name);
9190
9191 if (stub_type != arm_stub_none)
9192 {
9193 /* The target is out of reach, so redirect the
9194 branch to the local stub for this function. */
9195 stub_entry = elf32_arm_get_stub_entry (input_section,
9196 sym_sec, h,
9197 rel, globals,
9198 stub_type);
9199 {
9200 if (stub_entry != NULL)
9201 value = (stub_entry->stub_offset
9202 + stub_entry->stub_sec->output_offset
9203 + stub_entry->stub_sec->output_section->vma);
9204
9205 if (plt_offset != (bfd_vma) -1)
9206 *unresolved_reloc_p = FALSE;
9207 }
9208 }
9209 else
9210 {
9211 /* If the call goes through a PLT entry, make sure to
9212 check distance to the right destination address. */
9213 if (plt_offset != (bfd_vma) -1)
9214 {
9215 value = (splt->output_section->vma
9216 + splt->output_offset
9217 + plt_offset);
9218 *unresolved_reloc_p = FALSE;
9219 /* The PLT entry is in ARM mode, regardless of the
9220 target function. */
9221 branch_type = ST_BRANCH_TO_ARM;
9222 }
9223 }
9224 }
9225
9226 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
9227 where:
9228 S is the address of the symbol in the relocation.
9229 P is address of the instruction being relocated.
9230 A is the addend (extracted from the instruction) in bytes.
9231
9232 S is held in 'value'.
9233 P is the base address of the section containing the
9234 instruction plus the offset of the reloc into that
9235 section, ie:
9236 (input_section->output_section->vma +
9237 input_section->output_offset +
9238 rel->r_offset).
9239 A is the addend, converted into bytes, ie:
9240 (signed_addend * 4)
9241
9242 Note: None of these operations have knowledge of the pipeline
9243 size of the processor, thus it is up to the assembler to
9244 encode this information into the addend. */
9245 value -= (input_section->output_section->vma
9246 + input_section->output_offset);
9247 value -= rel->r_offset;
9248 if (globals->use_rel)
9249 value += (signed_addend << howto->size);
9250 else
9251 /* RELA addends do not have to be adjusted by howto->size. */
9252 value += signed_addend;
9253
9254 signed_addend = value;
9255 signed_addend >>= howto->rightshift;
9256
9257 /* A branch to an undefined weak symbol is turned into a jump to
9258 the next instruction unless a PLT entry will be created.
9259 Do the same for local undefined symbols (but not for STN_UNDEF).
9260 The jump to the next instruction is optimized as a NOP depending
9261 on the architecture. */
9262 if (h ? (h->root.type == bfd_link_hash_undefweak
9263 && plt_offset == (bfd_vma) -1)
9264 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
9265 {
9266 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
9267
9268 if (arch_has_arm_nop (globals))
9269 value |= 0x0320f000;
9270 else
9271 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
9272 }
9273 else
9274 {
9275 /* Perform a signed range check. */
9276 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
9277 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
9278 return bfd_reloc_overflow;
9279
9280 addend = (value & 2);
9281
9282 value = (signed_addend & howto->dst_mask)
9283 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
9284
9285 if (r_type == R_ARM_CALL)
9286 {
9287 /* Set the H bit in the BLX instruction. */
9288 if (branch_type == ST_BRANCH_TO_THUMB)
9289 {
9290 if (addend)
9291 value |= (1 << 24);
9292 else
9293 value &= ~(bfd_vma)(1 << 24);
9294 }
9295
9296 /* Select the correct instruction (BL or BLX). */
9297 /* Only if we are not handling a BL to a stub. In this
9298 case, mode switching is performed by the stub. */
9299 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
9300 value |= (1 << 28);
9301 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
9302 {
9303 value &= ~(bfd_vma)(1 << 28);
9304 value |= (1 << 24);
9305 }
9306 }
9307 }
9308 }
9309 break;
9310
9311 case R_ARM_ABS32:
9312 value += addend;
9313 if (branch_type == ST_BRANCH_TO_THUMB)
9314 value |= 1;
9315 break;
9316
9317 case R_ARM_ABS32_NOI:
9318 value += addend;
9319 break;
9320
9321 case R_ARM_REL32:
9322 value += addend;
9323 if (branch_type == ST_BRANCH_TO_THUMB)
9324 value |= 1;
9325 value -= (input_section->output_section->vma
9326 + input_section->output_offset + rel->r_offset);
9327 break;
9328
9329 case R_ARM_REL32_NOI:
9330 value += addend;
9331 value -= (input_section->output_section->vma
9332 + input_section->output_offset + rel->r_offset);
9333 break;
9334
9335 case R_ARM_PREL31:
9336 value -= (input_section->output_section->vma
9337 + input_section->output_offset + rel->r_offset);
9338 value += signed_addend;
9339 if (! h || h->root.type != bfd_link_hash_undefweak)
9340 {
9341 /* Check for overflow. */
9342 if ((value ^ (value >> 1)) & (1 << 30))
9343 return bfd_reloc_overflow;
9344 }
9345 value &= 0x7fffffff;
9346 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
9347 if (branch_type == ST_BRANCH_TO_THUMB)
9348 value |= 1;
9349 break;
9350 }
9351
9352 bfd_put_32 (input_bfd, value, hit_data);
9353 return bfd_reloc_ok;
9354
9355 case R_ARM_ABS8:
9356 /* PR 16202: Refectch the addend using the correct size. */
9357 if (globals->use_rel)
9358 addend = bfd_get_8 (input_bfd, hit_data);
9359 value += addend;
9360
9361 /* There is no way to tell whether the user intended to use a signed or
9362 unsigned addend. When checking for overflow we accept either,
9363 as specified by the AAELF. */
9364 if ((long) value > 0xff || (long) value < -0x80)
9365 return bfd_reloc_overflow;
9366
9367 bfd_put_8 (input_bfd, value, hit_data);
9368 return bfd_reloc_ok;
9369
9370 case R_ARM_ABS16:
9371 /* PR 16202: Refectch the addend using the correct size. */
9372 if (globals->use_rel)
9373 addend = bfd_get_16 (input_bfd, hit_data);
9374 value += addend;
9375
9376 /* See comment for R_ARM_ABS8. */
9377 if ((long) value > 0xffff || (long) value < -0x8000)
9378 return bfd_reloc_overflow;
9379
9380 bfd_put_16 (input_bfd, value, hit_data);
9381 return bfd_reloc_ok;
9382
9383 case R_ARM_THM_ABS5:
9384 /* Support ldr and str instructions for the thumb. */
9385 if (globals->use_rel)
9386 {
9387 /* Need to refetch addend. */
9388 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
9389 /* ??? Need to determine shift amount from operand size. */
9390 addend >>= howto->rightshift;
9391 }
9392 value += addend;
9393
9394 /* ??? Isn't value unsigned? */
9395 if ((long) value > 0x1f || (long) value < -0x10)
9396 return bfd_reloc_overflow;
9397
9398 /* ??? Value needs to be properly shifted into place first. */
9399 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
9400 bfd_put_16 (input_bfd, value, hit_data);
9401 return bfd_reloc_ok;
9402
9403 case R_ARM_THM_ALU_PREL_11_0:
9404 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
9405 {
9406 bfd_vma insn;
9407 bfd_signed_vma relocation;
9408
9409 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
9410 | bfd_get_16 (input_bfd, hit_data + 2);
9411
9412 if (globals->use_rel)
9413 {
9414 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
9415 | ((insn & (1 << 26)) >> 15);
9416 if (insn & 0xf00000)
9417 signed_addend = -signed_addend;
9418 }
9419
9420 relocation = value + signed_addend;
9421 relocation -= Pa (input_section->output_section->vma
9422 + input_section->output_offset
9423 + rel->r_offset);
9424
9425 value = relocation;
9426
9427 if (value >= 0x1000)
9428 return bfd_reloc_overflow;
9429
9430 insn = (insn & 0xfb0f8f00) | (value & 0xff)
9431 | ((value & 0x700) << 4)
9432 | ((value & 0x800) << 15);
9433 if (relocation < 0)
9434 insn |= 0xa00000;
9435
9436 bfd_put_16 (input_bfd, insn >> 16, hit_data);
9437 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9438
9439 return bfd_reloc_ok;
9440 }
9441
9442 case R_ARM_THM_PC8:
9443 /* PR 10073: This reloc is not generated by the GNU toolchain,
9444 but it is supported for compatibility with third party libraries
9445 generated by other compilers, specifically the ARM/IAR. */
9446 {
9447 bfd_vma insn;
9448 bfd_signed_vma relocation;
9449
9450 insn = bfd_get_16 (input_bfd, hit_data);
9451
9452 if (globals->use_rel)
9453 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
9454
9455 relocation = value + addend;
9456 relocation -= Pa (input_section->output_section->vma
9457 + input_section->output_offset
9458 + rel->r_offset);
9459
9460 value = relocation;
9461
9462 /* We do not check for overflow of this reloc. Although strictly
9463 speaking this is incorrect, it appears to be necessary in order
9464 to work with IAR generated relocs. Since GCC and GAS do not
9465 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
9466 a problem for them. */
9467 value &= 0x3fc;
9468
9469 insn = (insn & 0xff00) | (value >> 2);
9470
9471 bfd_put_16 (input_bfd, insn, hit_data);
9472
9473 return bfd_reloc_ok;
9474 }
9475
9476 case R_ARM_THM_PC12:
9477 /* Corresponds to: ldr.w reg, [pc, #offset]. */
9478 {
9479 bfd_vma insn;
9480 bfd_signed_vma relocation;
9481
9482 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
9483 | bfd_get_16 (input_bfd, hit_data + 2);
9484
9485 if (globals->use_rel)
9486 {
9487 signed_addend = insn & 0xfff;
9488 if (!(insn & (1 << 23)))
9489 signed_addend = -signed_addend;
9490 }
9491
9492 relocation = value + signed_addend;
9493 relocation -= Pa (input_section->output_section->vma
9494 + input_section->output_offset
9495 + rel->r_offset);
9496
9497 value = relocation;
9498
9499 if (value >= 0x1000)
9500 return bfd_reloc_overflow;
9501
9502 insn = (insn & 0xff7ff000) | value;
9503 if (relocation >= 0)
9504 insn |= (1 << 23);
9505
9506 bfd_put_16 (input_bfd, insn >> 16, hit_data);
9507 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9508
9509 return bfd_reloc_ok;
9510 }
9511
9512 case R_ARM_THM_XPC22:
9513 case R_ARM_THM_CALL:
9514 case R_ARM_THM_JUMP24:
9515 /* Thumb BL (branch long instruction). */
9516 {
9517 bfd_vma relocation;
9518 bfd_vma reloc_sign;
9519 bfd_boolean overflow = FALSE;
9520 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
9521 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
9522 bfd_signed_vma reloc_signed_max;
9523 bfd_signed_vma reloc_signed_min;
9524 bfd_vma check;
9525 bfd_signed_vma signed_check;
9526 int bitsize;
9527 const int thumb2 = using_thumb2 (globals);
9528
9529 /* A branch to an undefined weak symbol is turned into a jump to
9530 the next instruction unless a PLT entry will be created.
9531 The jump to the next instruction is optimized as a NOP.W for
9532 Thumb-2 enabled architectures. */
9533 if (h && h->root.type == bfd_link_hash_undefweak
9534 && plt_offset == (bfd_vma) -1)
9535 {
9536 if (arch_has_thumb2_nop (globals))
9537 {
9538 bfd_put_16 (input_bfd, 0xf3af, hit_data);
9539 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
9540 }
9541 else
9542 {
9543 bfd_put_16 (input_bfd, 0xe000, hit_data);
9544 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
9545 }
9546 return bfd_reloc_ok;
9547 }
9548
9549 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
9550 with Thumb-1) involving the J1 and J2 bits. */
9551 if (globals->use_rel)
9552 {
9553 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
9554 bfd_vma upper = upper_insn & 0x3ff;
9555 bfd_vma lower = lower_insn & 0x7ff;
9556 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
9557 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
9558 bfd_vma i1 = j1 ^ s ? 0 : 1;
9559 bfd_vma i2 = j2 ^ s ? 0 : 1;
9560
9561 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
9562 /* Sign extend. */
9563 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
9564
9565 signed_addend = addend;
9566 }
9567
9568 if (r_type == R_ARM_THM_XPC22)
9569 {
9570 /* Check for Thumb to Thumb call. */
9571 /* FIXME: Should we translate the instruction into a BL
9572 instruction instead ? */
9573 if (branch_type == ST_BRANCH_TO_THUMB)
9574 (*_bfd_error_handler)
9575 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
9576 input_bfd,
9577 h ? h->root.root.string : "(local)");
9578 }
9579 else
9580 {
9581 /* If it is not a call to Thumb, assume call to Arm.
9582 If it is a call relative to a section name, then it is not a
9583 function call at all, but rather a long jump. Calls through
9584 the PLT do not require stubs. */
9585 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
9586 {
9587 if (globals->use_blx && r_type == R_ARM_THM_CALL)
9588 {
9589 /* Convert BL to BLX. */
9590 lower_insn = (lower_insn & ~0x1000) | 0x0800;
9591 }
9592 else if (( r_type != R_ARM_THM_CALL)
9593 && (r_type != R_ARM_THM_JUMP24))
9594 {
9595 if (elf32_thumb_to_arm_stub
9596 (info, sym_name, input_bfd, output_bfd, input_section,
9597 hit_data, sym_sec, rel->r_offset, signed_addend, value,
9598 error_message))
9599 return bfd_reloc_ok;
9600 else
9601 return bfd_reloc_dangerous;
9602 }
9603 }
9604 else if (branch_type == ST_BRANCH_TO_THUMB
9605 && globals->use_blx
9606 && r_type == R_ARM_THM_CALL)
9607 {
9608 /* Make sure this is a BL. */
9609 lower_insn |= 0x1800;
9610 }
9611 }
9612
9613 enum elf32_arm_stub_type stub_type = arm_stub_none;
9614 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
9615 {
9616 /* Check if a stub has to be inserted because the destination
9617 is too far. */
9618 struct elf32_arm_stub_hash_entry *stub_entry;
9619 struct elf32_arm_link_hash_entry *hash;
9620
9621 hash = (struct elf32_arm_link_hash_entry *) h;
9622
9623 stub_type = arm_type_of_stub (info, input_section, rel,
9624 st_type, &branch_type,
9625 hash, value, sym_sec,
9626 input_bfd, sym_name);
9627
9628 if (stub_type != arm_stub_none)
9629 {
9630 /* The target is out of reach or we are changing modes, so
9631 redirect the branch to the local stub for this
9632 function. */
9633 stub_entry = elf32_arm_get_stub_entry (input_section,
9634 sym_sec, h,
9635 rel, globals,
9636 stub_type);
9637 if (stub_entry != NULL)
9638 {
9639 value = (stub_entry->stub_offset
9640 + stub_entry->stub_sec->output_offset
9641 + stub_entry->stub_sec->output_section->vma);
9642
9643 if (plt_offset != (bfd_vma) -1)
9644 *unresolved_reloc_p = FALSE;
9645 }
9646
9647 /* If this call becomes a call to Arm, force BLX. */
9648 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
9649 {
9650 if ((stub_entry
9651 && !arm_stub_is_thumb (stub_entry->stub_type))
9652 || branch_type != ST_BRANCH_TO_THUMB)
9653 lower_insn = (lower_insn & ~0x1000) | 0x0800;
9654 }
9655 }
9656 }
9657
9658 /* Handle calls via the PLT. */
9659 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
9660 {
9661 value = (splt->output_section->vma
9662 + splt->output_offset
9663 + plt_offset);
9664
9665 if (globals->use_blx
9666 && r_type == R_ARM_THM_CALL
9667 && ! using_thumb_only (globals))
9668 {
9669 /* If the Thumb BLX instruction is available, convert
9670 the BL to a BLX instruction to call the ARM-mode
9671 PLT entry. */
9672 lower_insn = (lower_insn & ~0x1000) | 0x0800;
9673 branch_type = ST_BRANCH_TO_ARM;
9674 }
9675 else
9676 {
9677 if (! using_thumb_only (globals))
9678 /* Target the Thumb stub before the ARM PLT entry. */
9679 value -= PLT_THUMB_STUB_SIZE;
9680 branch_type = ST_BRANCH_TO_THUMB;
9681 }
9682 *unresolved_reloc_p = FALSE;
9683 }
9684
9685 relocation = value + signed_addend;
9686
9687 relocation -= (input_section->output_section->vma
9688 + input_section->output_offset
9689 + rel->r_offset);
9690
9691 check = relocation >> howto->rightshift;
9692
9693 /* If this is a signed value, the rightshift just dropped
9694 leading 1 bits (assuming twos complement). */
9695 if ((bfd_signed_vma) relocation >= 0)
9696 signed_check = check;
9697 else
9698 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
9699
9700 /* Calculate the permissable maximum and minimum values for
9701 this relocation according to whether we're relocating for
9702 Thumb-2 or not. */
9703 bitsize = howto->bitsize;
9704 if (!thumb2)
9705 bitsize -= 2;
9706 reloc_signed_max = (1 << (bitsize - 1)) - 1;
9707 reloc_signed_min = ~reloc_signed_max;
9708
9709 /* Assumes two's complement. */
9710 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
9711 overflow = TRUE;
9712
9713 if ((lower_insn & 0x5000) == 0x4000)
9714 /* For a BLX instruction, make sure that the relocation is rounded up
9715 to a word boundary. This follows the semantics of the instruction
9716 which specifies that bit 1 of the target address will come from bit
9717 1 of the base address. */
9718 relocation = (relocation + 2) & ~ 3;
9719
9720 /* Put RELOCATION back into the insn. Assumes two's complement.
9721 We use the Thumb-2 encoding, which is safe even if dealing with
9722 a Thumb-1 instruction by virtue of our overflow check above. */
9723 reloc_sign = (signed_check < 0) ? 1 : 0;
9724 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
9725 | ((relocation >> 12) & 0x3ff)
9726 | (reloc_sign << 10);
9727 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
9728 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
9729 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
9730 | ((relocation >> 1) & 0x7ff);
9731
9732 /* Put the relocated value back in the object file: */
9733 bfd_put_16 (input_bfd, upper_insn, hit_data);
9734 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
9735
9736 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
9737 }
9738 break;
9739
9740 case R_ARM_THM_JUMP19:
9741 /* Thumb32 conditional branch instruction. */
9742 {
9743 bfd_vma relocation;
9744 bfd_boolean overflow = FALSE;
9745 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
9746 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
9747 bfd_signed_vma reloc_signed_max = 0xffffe;
9748 bfd_signed_vma reloc_signed_min = -0x100000;
9749 bfd_signed_vma signed_check;
9750 enum elf32_arm_stub_type stub_type = arm_stub_none;
9751 struct elf32_arm_stub_hash_entry *stub_entry;
9752 struct elf32_arm_link_hash_entry *hash;
9753
9754 /* Need to refetch the addend, reconstruct the top three bits,
9755 and squish the two 11 bit pieces together. */
9756 if (globals->use_rel)
9757 {
9758 bfd_vma S = (upper_insn & 0x0400) >> 10;
9759 bfd_vma upper = (upper_insn & 0x003f);
9760 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
9761 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
9762 bfd_vma lower = (lower_insn & 0x07ff);
9763
9764 upper |= J1 << 6;
9765 upper |= J2 << 7;
9766 upper |= (!S) << 8;
9767 upper -= 0x0100; /* Sign extend. */
9768
9769 addend = (upper << 12) | (lower << 1);
9770 signed_addend = addend;
9771 }
9772
9773 /* Handle calls via the PLT. */
9774 if (plt_offset != (bfd_vma) -1)
9775 {
9776 value = (splt->output_section->vma
9777 + splt->output_offset
9778 + plt_offset);
9779 /* Target the Thumb stub before the ARM PLT entry. */
9780 value -= PLT_THUMB_STUB_SIZE;
9781 *unresolved_reloc_p = FALSE;
9782 }
9783
9784 hash = (struct elf32_arm_link_hash_entry *)h;
9785
9786 stub_type = arm_type_of_stub (info, input_section, rel,
9787 st_type, &branch_type,
9788 hash, value, sym_sec,
9789 input_bfd, sym_name);
9790 if (stub_type != arm_stub_none)
9791 {
9792 stub_entry = elf32_arm_get_stub_entry (input_section,
9793 sym_sec, h,
9794 rel, globals,
9795 stub_type);
9796 if (stub_entry != NULL)
9797 {
9798 value = (stub_entry->stub_offset
9799 + stub_entry->stub_sec->output_offset
9800 + stub_entry->stub_sec->output_section->vma);
9801 }
9802 }
9803
9804 relocation = value + signed_addend;
9805 relocation -= (input_section->output_section->vma
9806 + input_section->output_offset
9807 + rel->r_offset);
9808 signed_check = (bfd_signed_vma) relocation;
9809
9810 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
9811 overflow = TRUE;
9812
9813 /* Put RELOCATION back into the insn. */
9814 {
9815 bfd_vma S = (relocation & 0x00100000) >> 20;
9816 bfd_vma J2 = (relocation & 0x00080000) >> 19;
9817 bfd_vma J1 = (relocation & 0x00040000) >> 18;
9818 bfd_vma hi = (relocation & 0x0003f000) >> 12;
9819 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
9820
9821 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
9822 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
9823 }
9824
9825 /* Put the relocated value back in the object file: */
9826 bfd_put_16 (input_bfd, upper_insn, hit_data);
9827 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
9828
9829 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
9830 }
9831
9832 case R_ARM_THM_JUMP11:
9833 case R_ARM_THM_JUMP8:
9834 case R_ARM_THM_JUMP6:
9835 /* Thumb B (branch) instruction). */
9836 {
9837 bfd_signed_vma relocation;
9838 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
9839 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
9840 bfd_signed_vma signed_check;
9841
9842 /* CZB cannot jump backward. */
9843 if (r_type == R_ARM_THM_JUMP6)
9844 reloc_signed_min = 0;
9845
9846 if (globals->use_rel)
9847 {
9848 /* Need to refetch addend. */
9849 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
9850 if (addend & ((howto->src_mask + 1) >> 1))
9851 {
9852 signed_addend = -1;
9853 signed_addend &= ~ howto->src_mask;
9854 signed_addend |= addend;
9855 }
9856 else
9857 signed_addend = addend;
9858 /* The value in the insn has been right shifted. We need to
9859 undo this, so that we can perform the address calculation
9860 in terms of bytes. */
9861 signed_addend <<= howto->rightshift;
9862 }
9863 relocation = value + signed_addend;
9864
9865 relocation -= (input_section->output_section->vma
9866 + input_section->output_offset
9867 + rel->r_offset);
9868
9869 relocation >>= howto->rightshift;
9870 signed_check = relocation;
9871
9872 if (r_type == R_ARM_THM_JUMP6)
9873 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
9874 else
9875 relocation &= howto->dst_mask;
9876 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
9877
9878 bfd_put_16 (input_bfd, relocation, hit_data);
9879
9880 /* Assumes two's complement. */
9881 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
9882 return bfd_reloc_overflow;
9883
9884 return bfd_reloc_ok;
9885 }
9886
9887 case R_ARM_ALU_PCREL7_0:
9888 case R_ARM_ALU_PCREL15_8:
9889 case R_ARM_ALU_PCREL23_15:
9890 {
9891 bfd_vma insn;
9892 bfd_vma relocation;
9893
9894 insn = bfd_get_32 (input_bfd, hit_data);
9895 if (globals->use_rel)
9896 {
9897 /* Extract the addend. */
9898 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
9899 signed_addend = addend;
9900 }
9901 relocation = value + signed_addend;
9902
9903 relocation -= (input_section->output_section->vma
9904 + input_section->output_offset
9905 + rel->r_offset);
9906 insn = (insn & ~0xfff)
9907 | ((howto->bitpos << 7) & 0xf00)
9908 | ((relocation >> howto->bitpos) & 0xff);
9909 bfd_put_32 (input_bfd, value, hit_data);
9910 }
9911 return bfd_reloc_ok;
9912
9913 case R_ARM_GNU_VTINHERIT:
9914 case R_ARM_GNU_VTENTRY:
9915 return bfd_reloc_ok;
9916
9917 case R_ARM_GOTOFF32:
9918 /* Relocation is relative to the start of the
9919 global offset table. */
9920
9921 BFD_ASSERT (sgot != NULL);
9922 if (sgot == NULL)
9923 return bfd_reloc_notsupported;
9924
9925 /* If we are addressing a Thumb function, we need to adjust the
9926 address by one, so that attempts to call the function pointer will
9927 correctly interpret it as Thumb code. */
9928 if (branch_type == ST_BRANCH_TO_THUMB)
9929 value += 1;
9930
9931 /* Note that sgot->output_offset is not involved in this
9932 calculation. We always want the start of .got. If we
9933 define _GLOBAL_OFFSET_TABLE in a different way, as is
9934 permitted by the ABI, we might have to change this
9935 calculation. */
9936 value -= sgot->output_section->vma;
9937 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9938 contents, rel->r_offset, value,
9939 rel->r_addend);
9940
9941 case R_ARM_GOTPC:
9942 /* Use global offset table as symbol value. */
9943 BFD_ASSERT (sgot != NULL);
9944
9945 if (sgot == NULL)
9946 return bfd_reloc_notsupported;
9947
9948 *unresolved_reloc_p = FALSE;
9949 value = sgot->output_section->vma;
9950 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9951 contents, rel->r_offset, value,
9952 rel->r_addend);
9953
9954 case R_ARM_GOT32:
9955 case R_ARM_GOT_PREL:
9956 /* Relocation is to the entry for this symbol in the
9957 global offset table. */
9958 if (sgot == NULL)
9959 return bfd_reloc_notsupported;
9960
9961 if (dynreloc_st_type == STT_GNU_IFUNC
9962 && plt_offset != (bfd_vma) -1
9963 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
9964 {
9965 /* We have a relocation against a locally-binding STT_GNU_IFUNC
9966 symbol, and the relocation resolves directly to the runtime
9967 target rather than to the .iplt entry. This means that any
9968 .got entry would be the same value as the .igot.plt entry,
9969 so there's no point creating both. */
9970 sgot = globals->root.igotplt;
9971 value = sgot->output_offset + gotplt_offset;
9972 }
9973 else if (h != NULL)
9974 {
9975 bfd_vma off;
9976
9977 off = h->got.offset;
9978 BFD_ASSERT (off != (bfd_vma) -1);
9979 if ((off & 1) != 0)
9980 {
9981 /* We have already processsed one GOT relocation against
9982 this symbol. */
9983 off &= ~1;
9984 if (globals->root.dynamic_sections_created
9985 && !SYMBOL_REFERENCES_LOCAL (info, h))
9986 *unresolved_reloc_p = FALSE;
9987 }
9988 else
9989 {
9990 Elf_Internal_Rela outrel;
9991
9992 if (h->dynindx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
9993 {
9994 /* If the symbol doesn't resolve locally in a static
9995 object, we have an undefined reference. If the
9996 symbol doesn't resolve locally in a dynamic object,
9997 it should be resolved by the dynamic linker. */
9998 if (globals->root.dynamic_sections_created)
9999 {
10000 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
10001 *unresolved_reloc_p = FALSE;
10002 }
10003 else
10004 outrel.r_info = 0;
10005 outrel.r_addend = 0;
10006 }
10007 else
10008 {
10009 if (dynreloc_st_type == STT_GNU_IFUNC)
10010 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
10011 else if (bfd_link_pic (info) &&
10012 (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10013 || h->root.type != bfd_link_hash_undefweak))
10014 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
10015 else
10016 outrel.r_info = 0;
10017 outrel.r_addend = dynreloc_value;
10018 }
10019
10020 /* The GOT entry is initialized to zero by default.
10021 See if we should install a different value. */
10022 if (outrel.r_addend != 0
10023 && (outrel.r_info == 0 || globals->use_rel))
10024 {
10025 bfd_put_32 (output_bfd, outrel.r_addend,
10026 sgot->contents + off);
10027 outrel.r_addend = 0;
10028 }
10029
10030 if (outrel.r_info != 0)
10031 {
10032 outrel.r_offset = (sgot->output_section->vma
10033 + sgot->output_offset
10034 + off);
10035 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10036 }
10037 h->got.offset |= 1;
10038 }
10039 value = sgot->output_offset + off;
10040 }
10041 else
10042 {
10043 bfd_vma off;
10044
10045 BFD_ASSERT (local_got_offsets != NULL &&
10046 local_got_offsets[r_symndx] != (bfd_vma) -1);
10047
10048 off = local_got_offsets[r_symndx];
10049
10050 /* The offset must always be a multiple of 4. We use the
10051 least significant bit to record whether we have already
10052 generated the necessary reloc. */
10053 if ((off & 1) != 0)
10054 off &= ~1;
10055 else
10056 {
10057 if (globals->use_rel)
10058 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
10059
10060 if (bfd_link_pic (info) || dynreloc_st_type == STT_GNU_IFUNC)
10061 {
10062 Elf_Internal_Rela outrel;
10063
10064 outrel.r_addend = addend + dynreloc_value;
10065 outrel.r_offset = (sgot->output_section->vma
10066 + sgot->output_offset
10067 + off);
10068 if (dynreloc_st_type == STT_GNU_IFUNC)
10069 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
10070 else
10071 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
10072 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10073 }
10074
10075 local_got_offsets[r_symndx] |= 1;
10076 }
10077
10078 value = sgot->output_offset + off;
10079 }
10080 if (r_type != R_ARM_GOT32)
10081 value += sgot->output_section->vma;
10082
10083 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10084 contents, rel->r_offset, value,
10085 rel->r_addend);
10086
10087 case R_ARM_TLS_LDO32:
10088 value = value - dtpoff_base (info);
10089
10090 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10091 contents, rel->r_offset, value,
10092 rel->r_addend);
10093
10094 case R_ARM_TLS_LDM32:
10095 {
10096 bfd_vma off;
10097
10098 if (sgot == NULL)
10099 abort ();
10100
10101 off = globals->tls_ldm_got.offset;
10102
10103 if ((off & 1) != 0)
10104 off &= ~1;
10105 else
10106 {
10107 /* If we don't know the module number, create a relocation
10108 for it. */
10109 if (bfd_link_pic (info))
10110 {
10111 Elf_Internal_Rela outrel;
10112
10113 if (srelgot == NULL)
10114 abort ();
10115
10116 outrel.r_addend = 0;
10117 outrel.r_offset = (sgot->output_section->vma
10118 + sgot->output_offset + off);
10119 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
10120
10121 if (globals->use_rel)
10122 bfd_put_32 (output_bfd, outrel.r_addend,
10123 sgot->contents + off);
10124
10125 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10126 }
10127 else
10128 bfd_put_32 (output_bfd, 1, sgot->contents + off);
10129
10130 globals->tls_ldm_got.offset |= 1;
10131 }
10132
10133 value = sgot->output_section->vma + sgot->output_offset + off
10134 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
10135
10136 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10137 contents, rel->r_offset, value,
10138 rel->r_addend);
10139 }
10140
10141 case R_ARM_TLS_CALL:
10142 case R_ARM_THM_TLS_CALL:
10143 case R_ARM_TLS_GD32:
10144 case R_ARM_TLS_IE32:
10145 case R_ARM_TLS_GOTDESC:
10146 case R_ARM_TLS_DESCSEQ:
10147 case R_ARM_THM_TLS_DESCSEQ:
10148 {
10149 bfd_vma off, offplt;
10150 int indx = 0;
10151 char tls_type;
10152
10153 BFD_ASSERT (sgot != NULL);
10154
10155 if (h != NULL)
10156 {
10157 bfd_boolean dyn;
10158 dyn = globals->root.dynamic_sections_created;
10159 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
10160 bfd_link_pic (info),
10161 h)
10162 && (!bfd_link_pic (info)
10163 || !SYMBOL_REFERENCES_LOCAL (info, h)))
10164 {
10165 *unresolved_reloc_p = FALSE;
10166 indx = h->dynindx;
10167 }
10168 off = h->got.offset;
10169 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
10170 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
10171 }
10172 else
10173 {
10174 BFD_ASSERT (local_got_offsets != NULL);
10175 off = local_got_offsets[r_symndx];
10176 offplt = local_tlsdesc_gotents[r_symndx];
10177 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
10178 }
10179
10180 /* Linker relaxations happens from one of the
10181 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
10182 if (ELF32_R_TYPE(rel->r_info) != r_type)
10183 tls_type = GOT_TLS_IE;
10184
10185 BFD_ASSERT (tls_type != GOT_UNKNOWN);
10186
10187 if ((off & 1) != 0)
10188 off &= ~1;
10189 else
10190 {
10191 bfd_boolean need_relocs = FALSE;
10192 Elf_Internal_Rela outrel;
10193 int cur_off = off;
10194
10195 /* The GOT entries have not been initialized yet. Do it
10196 now, and emit any relocations. If both an IE GOT and a
10197 GD GOT are necessary, we emit the GD first. */
10198
10199 if ((bfd_link_pic (info) || indx != 0)
10200 && (h == NULL
10201 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10202 || h->root.type != bfd_link_hash_undefweak))
10203 {
10204 need_relocs = TRUE;
10205 BFD_ASSERT (srelgot != NULL);
10206 }
10207
10208 if (tls_type & GOT_TLS_GDESC)
10209 {
10210 bfd_byte *loc;
10211
10212 /* We should have relaxed, unless this is an undefined
10213 weak symbol. */
10214 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
10215 || bfd_link_pic (info));
10216 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
10217 <= globals->root.sgotplt->size);
10218
10219 outrel.r_addend = 0;
10220 outrel.r_offset = (globals->root.sgotplt->output_section->vma
10221 + globals->root.sgotplt->output_offset
10222 + offplt
10223 + globals->sgotplt_jump_table_size);
10224
10225 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
10226 sreloc = globals->root.srelplt;
10227 loc = sreloc->contents;
10228 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
10229 BFD_ASSERT (loc + RELOC_SIZE (globals)
10230 <= sreloc->contents + sreloc->size);
10231
10232 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
10233
10234 /* For globals, the first word in the relocation gets
10235 the relocation index and the top bit set, or zero,
10236 if we're binding now. For locals, it gets the
10237 symbol's offset in the tls section. */
10238 bfd_put_32 (output_bfd,
10239 !h ? value - elf_hash_table (info)->tls_sec->vma
10240 : info->flags & DF_BIND_NOW ? 0
10241 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
10242 globals->root.sgotplt->contents + offplt
10243 + globals->sgotplt_jump_table_size);
10244
10245 /* Second word in the relocation is always zero. */
10246 bfd_put_32 (output_bfd, 0,
10247 globals->root.sgotplt->contents + offplt
10248 + globals->sgotplt_jump_table_size + 4);
10249 }
10250 if (tls_type & GOT_TLS_GD)
10251 {
10252 if (need_relocs)
10253 {
10254 outrel.r_addend = 0;
10255 outrel.r_offset = (sgot->output_section->vma
10256 + sgot->output_offset
10257 + cur_off);
10258 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
10259
10260 if (globals->use_rel)
10261 bfd_put_32 (output_bfd, outrel.r_addend,
10262 sgot->contents + cur_off);
10263
10264 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10265
10266 if (indx == 0)
10267 bfd_put_32 (output_bfd, value - dtpoff_base (info),
10268 sgot->contents + cur_off + 4);
10269 else
10270 {
10271 outrel.r_addend = 0;
10272 outrel.r_info = ELF32_R_INFO (indx,
10273 R_ARM_TLS_DTPOFF32);
10274 outrel.r_offset += 4;
10275
10276 if (globals->use_rel)
10277 bfd_put_32 (output_bfd, outrel.r_addend,
10278 sgot->contents + cur_off + 4);
10279
10280 elf32_arm_add_dynreloc (output_bfd, info,
10281 srelgot, &outrel);
10282 }
10283 }
10284 else
10285 {
10286 /* If we are not emitting relocations for a
10287 general dynamic reference, then we must be in a
10288 static link or an executable link with the
10289 symbol binding locally. Mark it as belonging
10290 to module 1, the executable. */
10291 bfd_put_32 (output_bfd, 1,
10292 sgot->contents + cur_off);
10293 bfd_put_32 (output_bfd, value - dtpoff_base (info),
10294 sgot->contents + cur_off + 4);
10295 }
10296
10297 cur_off += 8;
10298 }
10299
10300 if (tls_type & GOT_TLS_IE)
10301 {
10302 if (need_relocs)
10303 {
10304 if (indx == 0)
10305 outrel.r_addend = value - dtpoff_base (info);
10306 else
10307 outrel.r_addend = 0;
10308 outrel.r_offset = (sgot->output_section->vma
10309 + sgot->output_offset
10310 + cur_off);
10311 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
10312
10313 if (globals->use_rel)
10314 bfd_put_32 (output_bfd, outrel.r_addend,
10315 sgot->contents + cur_off);
10316
10317 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10318 }
10319 else
10320 bfd_put_32 (output_bfd, tpoff (info, value),
10321 sgot->contents + cur_off);
10322 cur_off += 4;
10323 }
10324
10325 if (h != NULL)
10326 h->got.offset |= 1;
10327 else
10328 local_got_offsets[r_symndx] |= 1;
10329 }
10330
10331 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
10332 off += 8;
10333 else if (tls_type & GOT_TLS_GDESC)
10334 off = offplt;
10335
10336 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
10337 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
10338 {
10339 bfd_signed_vma offset;
10340 /* TLS stubs are arm mode. The original symbol is a
10341 data object, so branch_type is bogus. */
10342 branch_type = ST_BRANCH_TO_ARM;
10343 enum elf32_arm_stub_type stub_type
10344 = arm_type_of_stub (info, input_section, rel,
10345 st_type, &branch_type,
10346 (struct elf32_arm_link_hash_entry *)h,
10347 globals->tls_trampoline, globals->root.splt,
10348 input_bfd, sym_name);
10349
10350 if (stub_type != arm_stub_none)
10351 {
10352 struct elf32_arm_stub_hash_entry *stub_entry
10353 = elf32_arm_get_stub_entry
10354 (input_section, globals->root.splt, 0, rel,
10355 globals, stub_type);
10356 offset = (stub_entry->stub_offset
10357 + stub_entry->stub_sec->output_offset
10358 + stub_entry->stub_sec->output_section->vma);
10359 }
10360 else
10361 offset = (globals->root.splt->output_section->vma
10362 + globals->root.splt->output_offset
10363 + globals->tls_trampoline);
10364
10365 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
10366 {
10367 unsigned long inst;
10368
10369 offset -= (input_section->output_section->vma
10370 + input_section->output_offset
10371 + rel->r_offset + 8);
10372
10373 inst = offset >> 2;
10374 inst &= 0x00ffffff;
10375 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
10376 }
10377 else
10378 {
10379 /* Thumb blx encodes the offset in a complicated
10380 fashion. */
10381 unsigned upper_insn, lower_insn;
10382 unsigned neg;
10383
10384 offset -= (input_section->output_section->vma
10385 + input_section->output_offset
10386 + rel->r_offset + 4);
10387
10388 if (stub_type != arm_stub_none
10389 && arm_stub_is_thumb (stub_type))
10390 {
10391 lower_insn = 0xd000;
10392 }
10393 else
10394 {
10395 lower_insn = 0xc000;
10396 /* Round up the offset to a word boundary. */
10397 offset = (offset + 2) & ~2;
10398 }
10399
10400 neg = offset < 0;
10401 upper_insn = (0xf000
10402 | ((offset >> 12) & 0x3ff)
10403 | (neg << 10));
10404 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
10405 | (((!((offset >> 22) & 1)) ^ neg) << 11)
10406 | ((offset >> 1) & 0x7ff);
10407 bfd_put_16 (input_bfd, upper_insn, hit_data);
10408 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
10409 return bfd_reloc_ok;
10410 }
10411 }
10412 /* These relocations needs special care, as besides the fact
10413 they point somewhere in .gotplt, the addend must be
10414 adjusted accordingly depending on the type of instruction
10415 we refer to. */
10416 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
10417 {
10418 unsigned long data, insn;
10419 unsigned thumb;
10420
10421 data = bfd_get_32 (input_bfd, hit_data);
10422 thumb = data & 1;
10423 data &= ~1u;
10424
10425 if (thumb)
10426 {
10427 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
10428 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10429 insn = (insn << 16)
10430 | bfd_get_16 (input_bfd,
10431 contents + rel->r_offset - data + 2);
10432 if ((insn & 0xf800c000) == 0xf000c000)
10433 /* bl/blx */
10434 value = -6;
10435 else if ((insn & 0xffffff00) == 0x4400)
10436 /* add */
10437 value = -5;
10438 else
10439 {
10440 (*_bfd_error_handler)
10441 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"),
10442 input_bfd, input_section,
10443 (unsigned long)rel->r_offset, insn);
10444 return bfd_reloc_notsupported;
10445 }
10446 }
10447 else
10448 {
10449 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
10450
10451 switch (insn >> 24)
10452 {
10453 case 0xeb: /* bl */
10454 case 0xfa: /* blx */
10455 value = -4;
10456 break;
10457
10458 case 0xe0: /* add */
10459 value = -8;
10460 break;
10461
10462 default:
10463 (*_bfd_error_handler)
10464 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"),
10465 input_bfd, input_section,
10466 (unsigned long)rel->r_offset, insn);
10467 return bfd_reloc_notsupported;
10468 }
10469 }
10470
10471 value += ((globals->root.sgotplt->output_section->vma
10472 + globals->root.sgotplt->output_offset + off)
10473 - (input_section->output_section->vma
10474 + input_section->output_offset
10475 + rel->r_offset)
10476 + globals->sgotplt_jump_table_size);
10477 }
10478 else
10479 value = ((globals->root.sgot->output_section->vma
10480 + globals->root.sgot->output_offset + off)
10481 - (input_section->output_section->vma
10482 + input_section->output_offset + rel->r_offset));
10483
10484 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10485 contents, rel->r_offset, value,
10486 rel->r_addend);
10487 }
10488
10489 case R_ARM_TLS_LE32:
10490 if (bfd_link_dll (info))
10491 {
10492 (*_bfd_error_handler)
10493 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
10494 input_bfd, input_section,
10495 (long) rel->r_offset, howto->name);
10496 return bfd_reloc_notsupported;
10497 }
10498 else
10499 value = tpoff (info, value);
10500
10501 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10502 contents, rel->r_offset, value,
10503 rel->r_addend);
10504
10505 case R_ARM_V4BX:
10506 if (globals->fix_v4bx)
10507 {
10508 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10509
10510 /* Ensure that we have a BX instruction. */
10511 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
10512
10513 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
10514 {
10515 /* Branch to veneer. */
10516 bfd_vma glue_addr;
10517 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
10518 glue_addr -= input_section->output_section->vma
10519 + input_section->output_offset
10520 + rel->r_offset + 8;
10521 insn = (insn & 0xf0000000) | 0x0a000000
10522 | ((glue_addr >> 2) & 0x00ffffff);
10523 }
10524 else
10525 {
10526 /* Preserve Rm (lowest four bits) and the condition code
10527 (highest four bits). Other bits encode MOV PC,Rm. */
10528 insn = (insn & 0xf000000f) | 0x01a0f000;
10529 }
10530
10531 bfd_put_32 (input_bfd, insn, hit_data);
10532 }
10533 return bfd_reloc_ok;
10534
10535 case R_ARM_MOVW_ABS_NC:
10536 case R_ARM_MOVT_ABS:
10537 case R_ARM_MOVW_PREL_NC:
10538 case R_ARM_MOVT_PREL:
10539 /* Until we properly support segment-base-relative addressing then
10540 we assume the segment base to be zero, as for the group relocations.
10541 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
10542 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
10543 case R_ARM_MOVW_BREL_NC:
10544 case R_ARM_MOVW_BREL:
10545 case R_ARM_MOVT_BREL:
10546 {
10547 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10548
10549 if (globals->use_rel)
10550 {
10551 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
10552 signed_addend = (addend ^ 0x8000) - 0x8000;
10553 }
10554
10555 value += signed_addend;
10556
10557 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
10558 value -= (input_section->output_section->vma
10559 + input_section->output_offset + rel->r_offset);
10560
10561 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
10562 return bfd_reloc_overflow;
10563
10564 if (branch_type == ST_BRANCH_TO_THUMB)
10565 value |= 1;
10566
10567 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
10568 || r_type == R_ARM_MOVT_BREL)
10569 value >>= 16;
10570
10571 insn &= 0xfff0f000;
10572 insn |= value & 0xfff;
10573 insn |= (value & 0xf000) << 4;
10574 bfd_put_32 (input_bfd, insn, hit_data);
10575 }
10576 return bfd_reloc_ok;
10577
10578 case R_ARM_THM_MOVW_ABS_NC:
10579 case R_ARM_THM_MOVT_ABS:
10580 case R_ARM_THM_MOVW_PREL_NC:
10581 case R_ARM_THM_MOVT_PREL:
10582 /* Until we properly support segment-base-relative addressing then
10583 we assume the segment base to be zero, as for the above relocations.
10584 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
10585 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
10586 as R_ARM_THM_MOVT_ABS. */
10587 case R_ARM_THM_MOVW_BREL_NC:
10588 case R_ARM_THM_MOVW_BREL:
10589 case R_ARM_THM_MOVT_BREL:
10590 {
10591 bfd_vma insn;
10592
10593 insn = bfd_get_16 (input_bfd, hit_data) << 16;
10594 insn |= bfd_get_16 (input_bfd, hit_data + 2);
10595
10596 if (globals->use_rel)
10597 {
10598 addend = ((insn >> 4) & 0xf000)
10599 | ((insn >> 15) & 0x0800)
10600 | ((insn >> 4) & 0x0700)
10601 | (insn & 0x00ff);
10602 signed_addend = (addend ^ 0x8000) - 0x8000;
10603 }
10604
10605 value += signed_addend;
10606
10607 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
10608 value -= (input_section->output_section->vma
10609 + input_section->output_offset + rel->r_offset);
10610
10611 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
10612 return bfd_reloc_overflow;
10613
10614 if (branch_type == ST_BRANCH_TO_THUMB)
10615 value |= 1;
10616
10617 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
10618 || r_type == R_ARM_THM_MOVT_BREL)
10619 value >>= 16;
10620
10621 insn &= 0xfbf08f00;
10622 insn |= (value & 0xf000) << 4;
10623 insn |= (value & 0x0800) << 15;
10624 insn |= (value & 0x0700) << 4;
10625 insn |= (value & 0x00ff);
10626
10627 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10628 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10629 }
10630 return bfd_reloc_ok;
10631
10632 case R_ARM_ALU_PC_G0_NC:
10633 case R_ARM_ALU_PC_G1_NC:
10634 case R_ARM_ALU_PC_G0:
10635 case R_ARM_ALU_PC_G1:
10636 case R_ARM_ALU_PC_G2:
10637 case R_ARM_ALU_SB_G0_NC:
10638 case R_ARM_ALU_SB_G1_NC:
10639 case R_ARM_ALU_SB_G0:
10640 case R_ARM_ALU_SB_G1:
10641 case R_ARM_ALU_SB_G2:
10642 {
10643 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10644 bfd_vma pc = input_section->output_section->vma
10645 + input_section->output_offset + rel->r_offset;
10646 /* sb is the origin of the *segment* containing the symbol. */
10647 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
10648 bfd_vma residual;
10649 bfd_vma g_n;
10650 bfd_signed_vma signed_value;
10651 int group = 0;
10652
10653 /* Determine which group of bits to select. */
10654 switch (r_type)
10655 {
10656 case R_ARM_ALU_PC_G0_NC:
10657 case R_ARM_ALU_PC_G0:
10658 case R_ARM_ALU_SB_G0_NC:
10659 case R_ARM_ALU_SB_G0:
10660 group = 0;
10661 break;
10662
10663 case R_ARM_ALU_PC_G1_NC:
10664 case R_ARM_ALU_PC_G1:
10665 case R_ARM_ALU_SB_G1_NC:
10666 case R_ARM_ALU_SB_G1:
10667 group = 1;
10668 break;
10669
10670 case R_ARM_ALU_PC_G2:
10671 case R_ARM_ALU_SB_G2:
10672 group = 2;
10673 break;
10674
10675 default:
10676 abort ();
10677 }
10678
10679 /* If REL, extract the addend from the insn. If RELA, it will
10680 have already been fetched for us. */
10681 if (globals->use_rel)
10682 {
10683 int negative;
10684 bfd_vma constant = insn & 0xff;
10685 bfd_vma rotation = (insn & 0xf00) >> 8;
10686
10687 if (rotation == 0)
10688 signed_addend = constant;
10689 else
10690 {
10691 /* Compensate for the fact that in the instruction, the
10692 rotation is stored in multiples of 2 bits. */
10693 rotation *= 2;
10694
10695 /* Rotate "constant" right by "rotation" bits. */
10696 signed_addend = (constant >> rotation) |
10697 (constant << (8 * sizeof (bfd_vma) - rotation));
10698 }
10699
10700 /* Determine if the instruction is an ADD or a SUB.
10701 (For REL, this determines the sign of the addend.) */
10702 negative = identify_add_or_sub (insn);
10703 if (negative == 0)
10704 {
10705 (*_bfd_error_handler)
10706 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
10707 input_bfd, input_section,
10708 (long) rel->r_offset, howto->name);
10709 return bfd_reloc_overflow;
10710 }
10711
10712 signed_addend *= negative;
10713 }
10714
10715 /* Compute the value (X) to go in the place. */
10716 if (r_type == R_ARM_ALU_PC_G0_NC
10717 || r_type == R_ARM_ALU_PC_G1_NC
10718 || r_type == R_ARM_ALU_PC_G0
10719 || r_type == R_ARM_ALU_PC_G1
10720 || r_type == R_ARM_ALU_PC_G2)
10721 /* PC relative. */
10722 signed_value = value - pc + signed_addend;
10723 else
10724 /* Section base relative. */
10725 signed_value = value - sb + signed_addend;
10726
10727 /* If the target symbol is a Thumb function, then set the
10728 Thumb bit in the address. */
10729 if (branch_type == ST_BRANCH_TO_THUMB)
10730 signed_value |= 1;
10731
10732 /* Calculate the value of the relevant G_n, in encoded
10733 constant-with-rotation format. */
10734 g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
10735 group, &residual);
10736
10737 /* Check for overflow if required. */
10738 if ((r_type == R_ARM_ALU_PC_G0
10739 || r_type == R_ARM_ALU_PC_G1
10740 || r_type == R_ARM_ALU_PC_G2
10741 || r_type == R_ARM_ALU_SB_G0
10742 || r_type == R_ARM_ALU_SB_G1
10743 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
10744 {
10745 (*_bfd_error_handler)
10746 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10747 input_bfd, input_section,
10748 (long) rel->r_offset, signed_value < 0 ? - signed_value : signed_value,
10749 howto->name);
10750 return bfd_reloc_overflow;
10751 }
10752
10753 /* Mask out the value and the ADD/SUB part of the opcode; take care
10754 not to destroy the S bit. */
10755 insn &= 0xff1ff000;
10756
10757 /* Set the opcode according to whether the value to go in the
10758 place is negative. */
10759 if (signed_value < 0)
10760 insn |= 1 << 22;
10761 else
10762 insn |= 1 << 23;
10763
10764 /* Encode the offset. */
10765 insn |= g_n;
10766
10767 bfd_put_32 (input_bfd, insn, hit_data);
10768 }
10769 return bfd_reloc_ok;
10770
10771 case R_ARM_LDR_PC_G0:
10772 case R_ARM_LDR_PC_G1:
10773 case R_ARM_LDR_PC_G2:
10774 case R_ARM_LDR_SB_G0:
10775 case R_ARM_LDR_SB_G1:
10776 case R_ARM_LDR_SB_G2:
10777 {
10778 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10779 bfd_vma pc = input_section->output_section->vma
10780 + input_section->output_offset + rel->r_offset;
10781 /* sb is the origin of the *segment* containing the symbol. */
10782 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
10783 bfd_vma residual;
10784 bfd_signed_vma signed_value;
10785 int group = 0;
10786
10787 /* Determine which groups of bits to calculate. */
10788 switch (r_type)
10789 {
10790 case R_ARM_LDR_PC_G0:
10791 case R_ARM_LDR_SB_G0:
10792 group = 0;
10793 break;
10794
10795 case R_ARM_LDR_PC_G1:
10796 case R_ARM_LDR_SB_G1:
10797 group = 1;
10798 break;
10799
10800 case R_ARM_LDR_PC_G2:
10801 case R_ARM_LDR_SB_G2:
10802 group = 2;
10803 break;
10804
10805 default:
10806 abort ();
10807 }
10808
10809 /* If REL, extract the addend from the insn. If RELA, it will
10810 have already been fetched for us. */
10811 if (globals->use_rel)
10812 {
10813 int negative = (insn & (1 << 23)) ? 1 : -1;
10814 signed_addend = negative * (insn & 0xfff);
10815 }
10816
10817 /* Compute the value (X) to go in the place. */
10818 if (r_type == R_ARM_LDR_PC_G0
10819 || r_type == R_ARM_LDR_PC_G1
10820 || r_type == R_ARM_LDR_PC_G2)
10821 /* PC relative. */
10822 signed_value = value - pc + signed_addend;
10823 else
10824 /* Section base relative. */
10825 signed_value = value - sb + signed_addend;
10826
10827 /* Calculate the value of the relevant G_{n-1} to obtain
10828 the residual at that stage. */
10829 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
10830 group - 1, &residual);
10831
10832 /* Check for overflow. */
10833 if (residual >= 0x1000)
10834 {
10835 (*_bfd_error_handler)
10836 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10837 input_bfd, input_section,
10838 (long) rel->r_offset, labs (signed_value), howto->name);
10839 return bfd_reloc_overflow;
10840 }
10841
10842 /* Mask out the value and U bit. */
10843 insn &= 0xff7ff000;
10844
10845 /* Set the U bit if the value to go in the place is non-negative. */
10846 if (signed_value >= 0)
10847 insn |= 1 << 23;
10848
10849 /* Encode the offset. */
10850 insn |= residual;
10851
10852 bfd_put_32 (input_bfd, insn, hit_data);
10853 }
10854 return bfd_reloc_ok;
10855
10856 case R_ARM_LDRS_PC_G0:
10857 case R_ARM_LDRS_PC_G1:
10858 case R_ARM_LDRS_PC_G2:
10859 case R_ARM_LDRS_SB_G0:
10860 case R_ARM_LDRS_SB_G1:
10861 case R_ARM_LDRS_SB_G2:
10862 {
10863 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10864 bfd_vma pc = input_section->output_section->vma
10865 + input_section->output_offset + rel->r_offset;
10866 /* sb is the origin of the *segment* containing the symbol. */
10867 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
10868 bfd_vma residual;
10869 bfd_signed_vma signed_value;
10870 int group = 0;
10871
10872 /* Determine which groups of bits to calculate. */
10873 switch (r_type)
10874 {
10875 case R_ARM_LDRS_PC_G0:
10876 case R_ARM_LDRS_SB_G0:
10877 group = 0;
10878 break;
10879
10880 case R_ARM_LDRS_PC_G1:
10881 case R_ARM_LDRS_SB_G1:
10882 group = 1;
10883 break;
10884
10885 case R_ARM_LDRS_PC_G2:
10886 case R_ARM_LDRS_SB_G2:
10887 group = 2;
10888 break;
10889
10890 default:
10891 abort ();
10892 }
10893
10894 /* If REL, extract the addend from the insn. If RELA, it will
10895 have already been fetched for us. */
10896 if (globals->use_rel)
10897 {
10898 int negative = (insn & (1 << 23)) ? 1 : -1;
10899 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
10900 }
10901
10902 /* Compute the value (X) to go in the place. */
10903 if (r_type == R_ARM_LDRS_PC_G0
10904 || r_type == R_ARM_LDRS_PC_G1
10905 || r_type == R_ARM_LDRS_PC_G2)
10906 /* PC relative. */
10907 signed_value = value - pc + signed_addend;
10908 else
10909 /* Section base relative. */
10910 signed_value = value - sb + signed_addend;
10911
10912 /* Calculate the value of the relevant G_{n-1} to obtain
10913 the residual at that stage. */
10914 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
10915 group - 1, &residual);
10916
10917 /* Check for overflow. */
10918 if (residual >= 0x100)
10919 {
10920 (*_bfd_error_handler)
10921 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10922 input_bfd, input_section,
10923 (long) rel->r_offset, labs (signed_value), howto->name);
10924 return bfd_reloc_overflow;
10925 }
10926
10927 /* Mask out the value and U bit. */
10928 insn &= 0xff7ff0f0;
10929
10930 /* Set the U bit if the value to go in the place is non-negative. */
10931 if (signed_value >= 0)
10932 insn |= 1 << 23;
10933
10934 /* Encode the offset. */
10935 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
10936
10937 bfd_put_32 (input_bfd, insn, hit_data);
10938 }
10939 return bfd_reloc_ok;
10940
10941 case R_ARM_LDC_PC_G0:
10942 case R_ARM_LDC_PC_G1:
10943 case R_ARM_LDC_PC_G2:
10944 case R_ARM_LDC_SB_G0:
10945 case R_ARM_LDC_SB_G1:
10946 case R_ARM_LDC_SB_G2:
10947 {
10948 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10949 bfd_vma pc = input_section->output_section->vma
10950 + input_section->output_offset + rel->r_offset;
10951 /* sb is the origin of the *segment* containing the symbol. */
10952 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
10953 bfd_vma residual;
10954 bfd_signed_vma signed_value;
10955 int group = 0;
10956
10957 /* Determine which groups of bits to calculate. */
10958 switch (r_type)
10959 {
10960 case R_ARM_LDC_PC_G0:
10961 case R_ARM_LDC_SB_G0:
10962 group = 0;
10963 break;
10964
10965 case R_ARM_LDC_PC_G1:
10966 case R_ARM_LDC_SB_G1:
10967 group = 1;
10968 break;
10969
10970 case R_ARM_LDC_PC_G2:
10971 case R_ARM_LDC_SB_G2:
10972 group = 2;
10973 break;
10974
10975 default:
10976 abort ();
10977 }
10978
10979 /* If REL, extract the addend from the insn. If RELA, it will
10980 have already been fetched for us. */
10981 if (globals->use_rel)
10982 {
10983 int negative = (insn & (1 << 23)) ? 1 : -1;
10984 signed_addend = negative * ((insn & 0xff) << 2);
10985 }
10986
10987 /* Compute the value (X) to go in the place. */
10988 if (r_type == R_ARM_LDC_PC_G0
10989 || r_type == R_ARM_LDC_PC_G1
10990 || r_type == R_ARM_LDC_PC_G2)
10991 /* PC relative. */
10992 signed_value = value - pc + signed_addend;
10993 else
10994 /* Section base relative. */
10995 signed_value = value - sb + signed_addend;
10996
10997 /* Calculate the value of the relevant G_{n-1} to obtain
10998 the residual at that stage. */
10999 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11000 group - 1, &residual);
11001
11002 /* Check for overflow. (The absolute value to go in the place must be
11003 divisible by four and, after having been divided by four, must
11004 fit in eight bits.) */
11005 if ((residual & 0x3) != 0 || residual >= 0x400)
11006 {
11007 (*_bfd_error_handler)
11008 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11009 input_bfd, input_section,
11010 (long) rel->r_offset, labs (signed_value), howto->name);
11011 return bfd_reloc_overflow;
11012 }
11013
11014 /* Mask out the value and U bit. */
11015 insn &= 0xff7fff00;
11016
11017 /* Set the U bit if the value to go in the place is non-negative. */
11018 if (signed_value >= 0)
11019 insn |= 1 << 23;
11020
11021 /* Encode the offset. */
11022 insn |= residual >> 2;
11023
11024 bfd_put_32 (input_bfd, insn, hit_data);
11025 }
11026 return bfd_reloc_ok;
11027
11028 default:
11029 return bfd_reloc_notsupported;
11030 }
11031 }
11032
11033 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
11034 static void
11035 arm_add_to_rel (bfd * abfd,
11036 bfd_byte * address,
11037 reloc_howto_type * howto,
11038 bfd_signed_vma increment)
11039 {
11040 bfd_signed_vma addend;
11041
11042 if (howto->type == R_ARM_THM_CALL
11043 || howto->type == R_ARM_THM_JUMP24)
11044 {
11045 int upper_insn, lower_insn;
11046 int upper, lower;
11047
11048 upper_insn = bfd_get_16 (abfd, address);
11049 lower_insn = bfd_get_16 (abfd, address + 2);
11050 upper = upper_insn & 0x7ff;
11051 lower = lower_insn & 0x7ff;
11052
11053 addend = (upper << 12) | (lower << 1);
11054 addend += increment;
11055 addend >>= 1;
11056
11057 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
11058 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
11059
11060 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
11061 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
11062 }
11063 else
11064 {
11065 bfd_vma contents;
11066
11067 contents = bfd_get_32 (abfd, address);
11068
11069 /* Get the (signed) value from the instruction. */
11070 addend = contents & howto->src_mask;
11071 if (addend & ((howto->src_mask + 1) >> 1))
11072 {
11073 bfd_signed_vma mask;
11074
11075 mask = -1;
11076 mask &= ~ howto->src_mask;
11077 addend |= mask;
11078 }
11079
11080 /* Add in the increment, (which is a byte value). */
11081 switch (howto->type)
11082 {
11083 default:
11084 addend += increment;
11085 break;
11086
11087 case R_ARM_PC24:
11088 case R_ARM_PLT32:
11089 case R_ARM_CALL:
11090 case R_ARM_JUMP24:
11091 addend <<= howto->size;
11092 addend += increment;
11093
11094 /* Should we check for overflow here ? */
11095
11096 /* Drop any undesired bits. */
11097 addend >>= howto->rightshift;
11098 break;
11099 }
11100
11101 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
11102
11103 bfd_put_32 (abfd, contents, address);
11104 }
11105 }
11106
11107 #define IS_ARM_TLS_RELOC(R_TYPE) \
11108 ((R_TYPE) == R_ARM_TLS_GD32 \
11109 || (R_TYPE) == R_ARM_TLS_LDO32 \
11110 || (R_TYPE) == R_ARM_TLS_LDM32 \
11111 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
11112 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
11113 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
11114 || (R_TYPE) == R_ARM_TLS_LE32 \
11115 || (R_TYPE) == R_ARM_TLS_IE32 \
11116 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
11117
11118 /* Specific set of relocations for the gnu tls dialect. */
11119 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
11120 ((R_TYPE) == R_ARM_TLS_GOTDESC \
11121 || (R_TYPE) == R_ARM_TLS_CALL \
11122 || (R_TYPE) == R_ARM_THM_TLS_CALL \
11123 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
11124 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
11125
11126 /* Relocate an ARM ELF section. */
11127
11128 static bfd_boolean
11129 elf32_arm_relocate_section (bfd * output_bfd,
11130 struct bfd_link_info * info,
11131 bfd * input_bfd,
11132 asection * input_section,
11133 bfd_byte * contents,
11134 Elf_Internal_Rela * relocs,
11135 Elf_Internal_Sym * local_syms,
11136 asection ** local_sections)
11137 {
11138 Elf_Internal_Shdr *symtab_hdr;
11139 struct elf_link_hash_entry **sym_hashes;
11140 Elf_Internal_Rela *rel;
11141 Elf_Internal_Rela *relend;
11142 const char *name;
11143 struct elf32_arm_link_hash_table * globals;
11144
11145 globals = elf32_arm_hash_table (info);
11146 if (globals == NULL)
11147 return FALSE;
11148
11149 symtab_hdr = & elf_symtab_hdr (input_bfd);
11150 sym_hashes = elf_sym_hashes (input_bfd);
11151
11152 rel = relocs;
11153 relend = relocs + input_section->reloc_count;
11154 for (; rel < relend; rel++)
11155 {
11156 int r_type;
11157 reloc_howto_type * howto;
11158 unsigned long r_symndx;
11159 Elf_Internal_Sym * sym;
11160 asection * sec;
11161 struct elf_link_hash_entry * h;
11162 bfd_vma relocation;
11163 bfd_reloc_status_type r;
11164 arelent bfd_reloc;
11165 char sym_type;
11166 bfd_boolean unresolved_reloc = FALSE;
11167 char *error_message = NULL;
11168
11169 r_symndx = ELF32_R_SYM (rel->r_info);
11170 r_type = ELF32_R_TYPE (rel->r_info);
11171 r_type = arm_real_reloc_type (globals, r_type);
11172
11173 if ( r_type == R_ARM_GNU_VTENTRY
11174 || r_type == R_ARM_GNU_VTINHERIT)
11175 continue;
11176
11177 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
11178 howto = bfd_reloc.howto;
11179
11180 h = NULL;
11181 sym = NULL;
11182 sec = NULL;
11183
11184 if (r_symndx < symtab_hdr->sh_info)
11185 {
11186 sym = local_syms + r_symndx;
11187 sym_type = ELF32_ST_TYPE (sym->st_info);
11188 sec = local_sections[r_symndx];
11189
11190 /* An object file might have a reference to a local
11191 undefined symbol. This is a daft object file, but we
11192 should at least do something about it. V4BX & NONE
11193 relocations do not use the symbol and are explicitly
11194 allowed to use the undefined symbol, so allow those.
11195 Likewise for relocations against STN_UNDEF. */
11196 if (r_type != R_ARM_V4BX
11197 && r_type != R_ARM_NONE
11198 && r_symndx != STN_UNDEF
11199 && bfd_is_und_section (sec)
11200 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
11201 {
11202 if (!info->callbacks->undefined_symbol
11203 (info, bfd_elf_string_from_elf_section
11204 (input_bfd, symtab_hdr->sh_link, sym->st_name),
11205 input_bfd, input_section,
11206 rel->r_offset, TRUE))
11207 return FALSE;
11208 }
11209
11210 if (globals->use_rel)
11211 {
11212 relocation = (sec->output_section->vma
11213 + sec->output_offset
11214 + sym->st_value);
11215 if (!bfd_link_relocatable (info)
11216 && (sec->flags & SEC_MERGE)
11217 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
11218 {
11219 asection *msec;
11220 bfd_vma addend, value;
11221
11222 switch (r_type)
11223 {
11224 case R_ARM_MOVW_ABS_NC:
11225 case R_ARM_MOVT_ABS:
11226 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
11227 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
11228 addend = (addend ^ 0x8000) - 0x8000;
11229 break;
11230
11231 case R_ARM_THM_MOVW_ABS_NC:
11232 case R_ARM_THM_MOVT_ABS:
11233 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
11234 << 16;
11235 value |= bfd_get_16 (input_bfd,
11236 contents + rel->r_offset + 2);
11237 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
11238 | ((value & 0x04000000) >> 15);
11239 addend = (addend ^ 0x8000) - 0x8000;
11240 break;
11241
11242 default:
11243 if (howto->rightshift
11244 || (howto->src_mask & (howto->src_mask + 1)))
11245 {
11246 (*_bfd_error_handler)
11247 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
11248 input_bfd, input_section,
11249 (long) rel->r_offset, howto->name);
11250 return FALSE;
11251 }
11252
11253 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
11254
11255 /* Get the (signed) value from the instruction. */
11256 addend = value & howto->src_mask;
11257 if (addend & ((howto->src_mask + 1) >> 1))
11258 {
11259 bfd_signed_vma mask;
11260
11261 mask = -1;
11262 mask &= ~ howto->src_mask;
11263 addend |= mask;
11264 }
11265 break;
11266 }
11267
11268 msec = sec;
11269 addend =
11270 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
11271 - relocation;
11272 addend += msec->output_section->vma + msec->output_offset;
11273
11274 /* Cases here must match those in the preceding
11275 switch statement. */
11276 switch (r_type)
11277 {
11278 case R_ARM_MOVW_ABS_NC:
11279 case R_ARM_MOVT_ABS:
11280 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
11281 | (addend & 0xfff);
11282 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
11283 break;
11284
11285 case R_ARM_THM_MOVW_ABS_NC:
11286 case R_ARM_THM_MOVT_ABS:
11287 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
11288 | (addend & 0xff) | ((addend & 0x0800) << 15);
11289 bfd_put_16 (input_bfd, value >> 16,
11290 contents + rel->r_offset);
11291 bfd_put_16 (input_bfd, value,
11292 contents + rel->r_offset + 2);
11293 break;
11294
11295 default:
11296 value = (value & ~ howto->dst_mask)
11297 | (addend & howto->dst_mask);
11298 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
11299 break;
11300 }
11301 }
11302 }
11303 else
11304 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
11305 }
11306 else
11307 {
11308 bfd_boolean warned, ignored;
11309
11310 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
11311 r_symndx, symtab_hdr, sym_hashes,
11312 h, sec, relocation,
11313 unresolved_reloc, warned, ignored);
11314
11315 sym_type = h->type;
11316 }
11317
11318 if (sec != NULL && discarded_section (sec))
11319 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
11320 rel, 1, relend, howto, 0, contents);
11321
11322 if (bfd_link_relocatable (info))
11323 {
11324 /* This is a relocatable link. We don't have to change
11325 anything, unless the reloc is against a section symbol,
11326 in which case we have to adjust according to where the
11327 section symbol winds up in the output section. */
11328 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
11329 {
11330 if (globals->use_rel)
11331 arm_add_to_rel (input_bfd, contents + rel->r_offset,
11332 howto, (bfd_signed_vma) sec->output_offset);
11333 else
11334 rel->r_addend += sec->output_offset;
11335 }
11336 continue;
11337 }
11338
11339 if (h != NULL)
11340 name = h->root.root.string;
11341 else
11342 {
11343 name = (bfd_elf_string_from_elf_section
11344 (input_bfd, symtab_hdr->sh_link, sym->st_name));
11345 if (name == NULL || *name == '\0')
11346 name = bfd_section_name (input_bfd, sec);
11347 }
11348
11349 if (r_symndx != STN_UNDEF
11350 && r_type != R_ARM_NONE
11351 && (h == NULL
11352 || h->root.type == bfd_link_hash_defined
11353 || h->root.type == bfd_link_hash_defweak)
11354 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
11355 {
11356 (*_bfd_error_handler)
11357 ((sym_type == STT_TLS
11358 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
11359 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
11360 input_bfd,
11361 input_section,
11362 (long) rel->r_offset,
11363 howto->name,
11364 name);
11365 }
11366
11367 /* We call elf32_arm_final_link_relocate unless we're completely
11368 done, i.e., the relaxation produced the final output we want,
11369 and we won't let anybody mess with it. Also, we have to do
11370 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
11371 both in relaxed and non-relaxed cases. */
11372 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
11373 || (IS_ARM_TLS_GNU_RELOC (r_type)
11374 && !((h ? elf32_arm_hash_entry (h)->tls_type :
11375 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
11376 & GOT_TLS_GDESC)))
11377 {
11378 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
11379 contents, rel, h == NULL);
11380 /* This may have been marked unresolved because it came from
11381 a shared library. But we've just dealt with that. */
11382 unresolved_reloc = 0;
11383 }
11384 else
11385 r = bfd_reloc_continue;
11386
11387 if (r == bfd_reloc_continue)
11388 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
11389 input_section, contents, rel,
11390 relocation, info, sec, name, sym_type,
11391 (h ? h->target_internal
11392 : ARM_SYM_BRANCH_TYPE (sym)), h,
11393 &unresolved_reloc, &error_message);
11394
11395 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
11396 because such sections are not SEC_ALLOC and thus ld.so will
11397 not process them. */
11398 if (unresolved_reloc
11399 && !((input_section->flags & SEC_DEBUGGING) != 0
11400 && h->def_dynamic)
11401 && _bfd_elf_section_offset (output_bfd, info, input_section,
11402 rel->r_offset) != (bfd_vma) -1)
11403 {
11404 (*_bfd_error_handler)
11405 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
11406 input_bfd,
11407 input_section,
11408 (long) rel->r_offset,
11409 howto->name,
11410 h->root.root.string);
11411 return FALSE;
11412 }
11413
11414 if (r != bfd_reloc_ok)
11415 {
11416 switch (r)
11417 {
11418 case bfd_reloc_overflow:
11419 /* If the overflowing reloc was to an undefined symbol,
11420 we have already printed one error message and there
11421 is no point complaining again. */
11422 if ((! h ||
11423 h->root.type != bfd_link_hash_undefined)
11424 && (!((*info->callbacks->reloc_overflow)
11425 (info, (h ? &h->root : NULL), name, howto->name,
11426 (bfd_vma) 0, input_bfd, input_section,
11427 rel->r_offset))))
11428 return FALSE;
11429 break;
11430
11431 case bfd_reloc_undefined:
11432 if (!((*info->callbacks->undefined_symbol)
11433 (info, name, input_bfd, input_section,
11434 rel->r_offset, TRUE)))
11435 return FALSE;
11436 break;
11437
11438 case bfd_reloc_outofrange:
11439 error_message = _("out of range");
11440 goto common_error;
11441
11442 case bfd_reloc_notsupported:
11443 error_message = _("unsupported relocation");
11444 goto common_error;
11445
11446 case bfd_reloc_dangerous:
11447 /* error_message should already be set. */
11448 goto common_error;
11449
11450 default:
11451 error_message = _("unknown error");
11452 /* Fall through. */
11453
11454 common_error:
11455 BFD_ASSERT (error_message != NULL);
11456 if (!((*info->callbacks->reloc_dangerous)
11457 (info, error_message, input_bfd, input_section,
11458 rel->r_offset)))
11459 return FALSE;
11460 break;
11461 }
11462 }
11463 }
11464
11465 return TRUE;
11466 }
11467
11468 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
11469 adds the edit to the start of the list. (The list must be built in order of
11470 ascending TINDEX: the function's callers are primarily responsible for
11471 maintaining that condition). */
11472
11473 static void
11474 add_unwind_table_edit (arm_unwind_table_edit **head,
11475 arm_unwind_table_edit **tail,
11476 arm_unwind_edit_type type,
11477 asection *linked_section,
11478 unsigned int tindex)
11479 {
11480 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
11481 xmalloc (sizeof (arm_unwind_table_edit));
11482
11483 new_edit->type = type;
11484 new_edit->linked_section = linked_section;
11485 new_edit->index = tindex;
11486
11487 if (tindex > 0)
11488 {
11489 new_edit->next = NULL;
11490
11491 if (*tail)
11492 (*tail)->next = new_edit;
11493
11494 (*tail) = new_edit;
11495
11496 if (!*head)
11497 (*head) = new_edit;
11498 }
11499 else
11500 {
11501 new_edit->next = *head;
11502
11503 if (!*tail)
11504 *tail = new_edit;
11505
11506 *head = new_edit;
11507 }
11508 }
11509
11510 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
11511
11512 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
11513 static void
11514 adjust_exidx_size(asection *exidx_sec, int adjust)
11515 {
11516 asection *out_sec;
11517
11518 if (!exidx_sec->rawsize)
11519 exidx_sec->rawsize = exidx_sec->size;
11520
11521 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
11522 out_sec = exidx_sec->output_section;
11523 /* Adjust size of output section. */
11524 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
11525 }
11526
11527 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
11528 static void
11529 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
11530 {
11531 struct _arm_elf_section_data *exidx_arm_data;
11532
11533 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
11534 add_unwind_table_edit (
11535 &exidx_arm_data->u.exidx.unwind_edit_list,
11536 &exidx_arm_data->u.exidx.unwind_edit_tail,
11537 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
11538
11539 adjust_exidx_size(exidx_sec, 8);
11540 }
11541
11542 /* Scan .ARM.exidx tables, and create a list describing edits which should be
11543 made to those tables, such that:
11544
11545 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
11546 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
11547 codes which have been inlined into the index).
11548
11549 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
11550
11551 The edits are applied when the tables are written
11552 (in elf32_arm_write_section). */
11553
11554 bfd_boolean
11555 elf32_arm_fix_exidx_coverage (asection **text_section_order,
11556 unsigned int num_text_sections,
11557 struct bfd_link_info *info,
11558 bfd_boolean merge_exidx_entries)
11559 {
11560 bfd *inp;
11561 unsigned int last_second_word = 0, i;
11562 asection *last_exidx_sec = NULL;
11563 asection *last_text_sec = NULL;
11564 int last_unwind_type = -1;
11565
11566 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
11567 text sections. */
11568 for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
11569 {
11570 asection *sec;
11571
11572 for (sec = inp->sections; sec != NULL; sec = sec->next)
11573 {
11574 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
11575 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
11576
11577 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
11578 continue;
11579
11580 if (elf_sec->linked_to)
11581 {
11582 Elf_Internal_Shdr *linked_hdr
11583 = &elf_section_data (elf_sec->linked_to)->this_hdr;
11584 struct _arm_elf_section_data *linked_sec_arm_data
11585 = get_arm_elf_section_data (linked_hdr->bfd_section);
11586
11587 if (linked_sec_arm_data == NULL)
11588 continue;
11589
11590 /* Link this .ARM.exidx section back from the text section it
11591 describes. */
11592 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
11593 }
11594 }
11595 }
11596
11597 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
11598 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
11599 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
11600
11601 for (i = 0; i < num_text_sections; i++)
11602 {
11603 asection *sec = text_section_order[i];
11604 asection *exidx_sec;
11605 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
11606 struct _arm_elf_section_data *exidx_arm_data;
11607 bfd_byte *contents = NULL;
11608 int deleted_exidx_bytes = 0;
11609 bfd_vma j;
11610 arm_unwind_table_edit *unwind_edit_head = NULL;
11611 arm_unwind_table_edit *unwind_edit_tail = NULL;
11612 Elf_Internal_Shdr *hdr;
11613 bfd *ibfd;
11614
11615 if (arm_data == NULL)
11616 continue;
11617
11618 exidx_sec = arm_data->u.text.arm_exidx_sec;
11619 if (exidx_sec == NULL)
11620 {
11621 /* Section has no unwind data. */
11622 if (last_unwind_type == 0 || !last_exidx_sec)
11623 continue;
11624
11625 /* Ignore zero sized sections. */
11626 if (sec->size == 0)
11627 continue;
11628
11629 insert_cantunwind_after(last_text_sec, last_exidx_sec);
11630 last_unwind_type = 0;
11631 continue;
11632 }
11633
11634 /* Skip /DISCARD/ sections. */
11635 if (bfd_is_abs_section (exidx_sec->output_section))
11636 continue;
11637
11638 hdr = &elf_section_data (exidx_sec)->this_hdr;
11639 if (hdr->sh_type != SHT_ARM_EXIDX)
11640 continue;
11641
11642 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
11643 if (exidx_arm_data == NULL)
11644 continue;
11645
11646 ibfd = exidx_sec->owner;
11647
11648 if (hdr->contents != NULL)
11649 contents = hdr->contents;
11650 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
11651 /* An error? */
11652 continue;
11653
11654 for (j = 0; j < hdr->sh_size; j += 8)
11655 {
11656 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
11657 int unwind_type;
11658 int elide = 0;
11659
11660 /* An EXIDX_CANTUNWIND entry. */
11661 if (second_word == 1)
11662 {
11663 if (last_unwind_type == 0)
11664 elide = 1;
11665 unwind_type = 0;
11666 }
11667 /* Inlined unwinding data. Merge if equal to previous. */
11668 else if ((second_word & 0x80000000) != 0)
11669 {
11670 if (merge_exidx_entries
11671 && last_second_word == second_word && last_unwind_type == 1)
11672 elide = 1;
11673 unwind_type = 1;
11674 last_second_word = second_word;
11675 }
11676 /* Normal table entry. In theory we could merge these too,
11677 but duplicate entries are likely to be much less common. */
11678 else
11679 unwind_type = 2;
11680
11681 if (elide)
11682 {
11683 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
11684 DELETE_EXIDX_ENTRY, NULL, j / 8);
11685
11686 deleted_exidx_bytes += 8;
11687 }
11688
11689 last_unwind_type = unwind_type;
11690 }
11691
11692 /* Free contents if we allocated it ourselves. */
11693 if (contents != hdr->contents)
11694 free (contents);
11695
11696 /* Record edits to be applied later (in elf32_arm_write_section). */
11697 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
11698 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
11699
11700 if (deleted_exidx_bytes > 0)
11701 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
11702
11703 last_exidx_sec = exidx_sec;
11704 last_text_sec = sec;
11705 }
11706
11707 /* Add terminating CANTUNWIND entry. */
11708 if (last_exidx_sec && last_unwind_type != 0)
11709 insert_cantunwind_after(last_text_sec, last_exidx_sec);
11710
11711 return TRUE;
11712 }
11713
11714 static bfd_boolean
11715 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
11716 bfd *ibfd, const char *name)
11717 {
11718 asection *sec, *osec;
11719
11720 sec = bfd_get_linker_section (ibfd, name);
11721 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
11722 return TRUE;
11723
11724 osec = sec->output_section;
11725 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
11726 return TRUE;
11727
11728 if (! bfd_set_section_contents (obfd, osec, sec->contents,
11729 sec->output_offset, sec->size))
11730 return FALSE;
11731
11732 return TRUE;
11733 }
11734
11735 static bfd_boolean
11736 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
11737 {
11738 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
11739 asection *sec, *osec;
11740
11741 if (globals == NULL)
11742 return FALSE;
11743
11744 /* Invoke the regular ELF backend linker to do all the work. */
11745 if (!bfd_elf_final_link (abfd, info))
11746 return FALSE;
11747
11748 /* Process stub sections (eg BE8 encoding, ...). */
11749 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
11750 unsigned int i;
11751 for (i=0; i<htab->top_id; i++)
11752 {
11753 sec = htab->stub_group[i].stub_sec;
11754 /* Only process it once, in its link_sec slot. */
11755 if (sec && i == htab->stub_group[i].link_sec->id)
11756 {
11757 osec = sec->output_section;
11758 elf32_arm_write_section (abfd, info, sec, sec->contents);
11759 if (! bfd_set_section_contents (abfd, osec, sec->contents,
11760 sec->output_offset, sec->size))
11761 return FALSE;
11762 }
11763 }
11764
11765 /* Write out any glue sections now that we have created all the
11766 stubs. */
11767 if (globals->bfd_of_glue_owner != NULL)
11768 {
11769 if (! elf32_arm_output_glue_section (info, abfd,
11770 globals->bfd_of_glue_owner,
11771 ARM2THUMB_GLUE_SECTION_NAME))
11772 return FALSE;
11773
11774 if (! elf32_arm_output_glue_section (info, abfd,
11775 globals->bfd_of_glue_owner,
11776 THUMB2ARM_GLUE_SECTION_NAME))
11777 return FALSE;
11778
11779 if (! elf32_arm_output_glue_section (info, abfd,
11780 globals->bfd_of_glue_owner,
11781 VFP11_ERRATUM_VENEER_SECTION_NAME))
11782 return FALSE;
11783
11784 if (! elf32_arm_output_glue_section (info, abfd,
11785 globals->bfd_of_glue_owner,
11786 STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
11787 return FALSE;
11788
11789 if (! elf32_arm_output_glue_section (info, abfd,
11790 globals->bfd_of_glue_owner,
11791 ARM_BX_GLUE_SECTION_NAME))
11792 return FALSE;
11793 }
11794
11795 return TRUE;
11796 }
11797
11798 /* Return a best guess for the machine number based on the attributes. */
11799
11800 static unsigned int
11801 bfd_arm_get_mach_from_attributes (bfd * abfd)
11802 {
11803 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
11804
11805 switch (arch)
11806 {
11807 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
11808 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
11809 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
11810
11811 case TAG_CPU_ARCH_V5TE:
11812 {
11813 char * name;
11814
11815 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
11816 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
11817
11818 if (name)
11819 {
11820 if (strcmp (name, "IWMMXT2") == 0)
11821 return bfd_mach_arm_iWMMXt2;
11822
11823 if (strcmp (name, "IWMMXT") == 0)
11824 return bfd_mach_arm_iWMMXt;
11825
11826 if (strcmp (name, "XSCALE") == 0)
11827 {
11828 int wmmx;
11829
11830 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
11831 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
11832 switch (wmmx)
11833 {
11834 case 1: return bfd_mach_arm_iWMMXt;
11835 case 2: return bfd_mach_arm_iWMMXt2;
11836 default: return bfd_mach_arm_XScale;
11837 }
11838 }
11839 }
11840
11841 return bfd_mach_arm_5TE;
11842 }
11843
11844 default:
11845 return bfd_mach_arm_unknown;
11846 }
11847 }
11848
11849 /* Set the right machine number. */
11850
11851 static bfd_boolean
11852 elf32_arm_object_p (bfd *abfd)
11853 {
11854 unsigned int mach;
11855
11856 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
11857
11858 if (mach == bfd_mach_arm_unknown)
11859 {
11860 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
11861 mach = bfd_mach_arm_ep9312;
11862 else
11863 mach = bfd_arm_get_mach_from_attributes (abfd);
11864 }
11865
11866 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
11867 return TRUE;
11868 }
11869
11870 /* Function to keep ARM specific flags in the ELF header. */
11871
11872 static bfd_boolean
11873 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
11874 {
11875 if (elf_flags_init (abfd)
11876 && elf_elfheader (abfd)->e_flags != flags)
11877 {
11878 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
11879 {
11880 if (flags & EF_ARM_INTERWORK)
11881 (*_bfd_error_handler)
11882 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
11883 abfd);
11884 else
11885 _bfd_error_handler
11886 (_("Warning: Clearing the interworking flag of %B due to outside request"),
11887 abfd);
11888 }
11889 }
11890 else
11891 {
11892 elf_elfheader (abfd)->e_flags = flags;
11893 elf_flags_init (abfd) = TRUE;
11894 }
11895
11896 return TRUE;
11897 }
11898
11899 /* Copy backend specific data from one object module to another. */
11900
11901 static bfd_boolean
11902 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
11903 {
11904 flagword in_flags;
11905 flagword out_flags;
11906
11907 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
11908 return TRUE;
11909
11910 in_flags = elf_elfheader (ibfd)->e_flags;
11911 out_flags = elf_elfheader (obfd)->e_flags;
11912
11913 if (elf_flags_init (obfd)
11914 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
11915 && in_flags != out_flags)
11916 {
11917 /* Cannot mix APCS26 and APCS32 code. */
11918 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
11919 return FALSE;
11920
11921 /* Cannot mix float APCS and non-float APCS code. */
11922 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
11923 return FALSE;
11924
11925 /* If the src and dest have different interworking flags
11926 then turn off the interworking bit. */
11927 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
11928 {
11929 if (out_flags & EF_ARM_INTERWORK)
11930 _bfd_error_handler
11931 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
11932 obfd, ibfd);
11933
11934 in_flags &= ~EF_ARM_INTERWORK;
11935 }
11936
11937 /* Likewise for PIC, though don't warn for this case. */
11938 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
11939 in_flags &= ~EF_ARM_PIC;
11940 }
11941
11942 elf_elfheader (obfd)->e_flags = in_flags;
11943 elf_flags_init (obfd) = TRUE;
11944
11945 return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
11946 }
11947
11948 /* Values for Tag_ABI_PCS_R9_use. */
11949 enum
11950 {
11951 AEABI_R9_V6,
11952 AEABI_R9_SB,
11953 AEABI_R9_TLS,
11954 AEABI_R9_unused
11955 };
11956
11957 /* Values for Tag_ABI_PCS_RW_data. */
11958 enum
11959 {
11960 AEABI_PCS_RW_data_absolute,
11961 AEABI_PCS_RW_data_PCrel,
11962 AEABI_PCS_RW_data_SBrel,
11963 AEABI_PCS_RW_data_unused
11964 };
11965
11966 /* Values for Tag_ABI_enum_size. */
11967 enum
11968 {
11969 AEABI_enum_unused,
11970 AEABI_enum_short,
11971 AEABI_enum_wide,
11972 AEABI_enum_forced_wide
11973 };
11974
11975 /* Determine whether an object attribute tag takes an integer, a
11976 string or both. */
11977
11978 static int
11979 elf32_arm_obj_attrs_arg_type (int tag)
11980 {
11981 if (tag == Tag_compatibility)
11982 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
11983 else if (tag == Tag_nodefaults)
11984 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
11985 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
11986 return ATTR_TYPE_FLAG_STR_VAL;
11987 else if (tag < 32)
11988 return ATTR_TYPE_FLAG_INT_VAL;
11989 else
11990 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
11991 }
11992
11993 /* The ABI defines that Tag_conformance should be emitted first, and that
11994 Tag_nodefaults should be second (if either is defined). This sets those
11995 two positions, and bumps up the position of all the remaining tags to
11996 compensate. */
11997 static int
11998 elf32_arm_obj_attrs_order (int num)
11999 {
12000 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
12001 return Tag_conformance;
12002 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
12003 return Tag_nodefaults;
12004 if ((num - 2) < Tag_nodefaults)
12005 return num - 2;
12006 if ((num - 1) < Tag_conformance)
12007 return num - 1;
12008 return num;
12009 }
12010
12011 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
12012 static bfd_boolean
12013 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
12014 {
12015 if ((tag & 127) < 64)
12016 {
12017 _bfd_error_handler
12018 (_("%B: Unknown mandatory EABI object attribute %d"),
12019 abfd, tag);
12020 bfd_set_error (bfd_error_bad_value);
12021 return FALSE;
12022 }
12023 else
12024 {
12025 _bfd_error_handler
12026 (_("Warning: %B: Unknown EABI object attribute %d"),
12027 abfd, tag);
12028 return TRUE;
12029 }
12030 }
12031
12032 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
12033 Returns -1 if no architecture could be read. */
12034
12035 static int
12036 get_secondary_compatible_arch (bfd *abfd)
12037 {
12038 obj_attribute *attr =
12039 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
12040
12041 /* Note: the tag and its argument below are uleb128 values, though
12042 currently-defined values fit in one byte for each. */
12043 if (attr->s
12044 && attr->s[0] == Tag_CPU_arch
12045 && (attr->s[1] & 128) != 128
12046 && attr->s[2] == 0)
12047 return attr->s[1];
12048
12049 /* This tag is "safely ignorable", so don't complain if it looks funny. */
12050 return -1;
12051 }
12052
12053 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
12054 The tag is removed if ARCH is -1. */
12055
12056 static void
12057 set_secondary_compatible_arch (bfd *abfd, int arch)
12058 {
12059 obj_attribute *attr =
12060 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
12061
12062 if (arch == -1)
12063 {
12064 attr->s = NULL;
12065 return;
12066 }
12067
12068 /* Note: the tag and its argument below are uleb128 values, though
12069 currently-defined values fit in one byte for each. */
12070 if (!attr->s)
12071 attr->s = (char *) bfd_alloc (abfd, 3);
12072 attr->s[0] = Tag_CPU_arch;
12073 attr->s[1] = arch;
12074 attr->s[2] = '\0';
12075 }
12076
12077 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
12078 into account. */
12079
12080 static int
12081 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
12082 int newtag, int secondary_compat)
12083 {
12084 #define T(X) TAG_CPU_ARCH_##X
12085 int tagl, tagh, result;
12086 const int v6t2[] =
12087 {
12088 T(V6T2), /* PRE_V4. */
12089 T(V6T2), /* V4. */
12090 T(V6T2), /* V4T. */
12091 T(V6T2), /* V5T. */
12092 T(V6T2), /* V5TE. */
12093 T(V6T2), /* V5TEJ. */
12094 T(V6T2), /* V6. */
12095 T(V7), /* V6KZ. */
12096 T(V6T2) /* V6T2. */
12097 };
12098 const int v6k[] =
12099 {
12100 T(V6K), /* PRE_V4. */
12101 T(V6K), /* V4. */
12102 T(V6K), /* V4T. */
12103 T(V6K), /* V5T. */
12104 T(V6K), /* V5TE. */
12105 T(V6K), /* V5TEJ. */
12106 T(V6K), /* V6. */
12107 T(V6KZ), /* V6KZ. */
12108 T(V7), /* V6T2. */
12109 T(V6K) /* V6K. */
12110 };
12111 const int v7[] =
12112 {
12113 T(V7), /* PRE_V4. */
12114 T(V7), /* V4. */
12115 T(V7), /* V4T. */
12116 T(V7), /* V5T. */
12117 T(V7), /* V5TE. */
12118 T(V7), /* V5TEJ. */
12119 T(V7), /* V6. */
12120 T(V7), /* V6KZ. */
12121 T(V7), /* V6T2. */
12122 T(V7), /* V6K. */
12123 T(V7) /* V7. */
12124 };
12125 const int v6_m[] =
12126 {
12127 -1, /* PRE_V4. */
12128 -1, /* V4. */
12129 T(V6K), /* V4T. */
12130 T(V6K), /* V5T. */
12131 T(V6K), /* V5TE. */
12132 T(V6K), /* V5TEJ. */
12133 T(V6K), /* V6. */
12134 T(V6KZ), /* V6KZ. */
12135 T(V7), /* V6T2. */
12136 T(V6K), /* V6K. */
12137 T(V7), /* V7. */
12138 T(V6_M) /* V6_M. */
12139 };
12140 const int v6s_m[] =
12141 {
12142 -1, /* PRE_V4. */
12143 -1, /* V4. */
12144 T(V6K), /* V4T. */
12145 T(V6K), /* V5T. */
12146 T(V6K), /* V5TE. */
12147 T(V6K), /* V5TEJ. */
12148 T(V6K), /* V6. */
12149 T(V6KZ), /* V6KZ. */
12150 T(V7), /* V6T2. */
12151 T(V6K), /* V6K. */
12152 T(V7), /* V7. */
12153 T(V6S_M), /* V6_M. */
12154 T(V6S_M) /* V6S_M. */
12155 };
12156 const int v7e_m[] =
12157 {
12158 -1, /* PRE_V4. */
12159 -1, /* V4. */
12160 T(V7E_M), /* V4T. */
12161 T(V7E_M), /* V5T. */
12162 T(V7E_M), /* V5TE. */
12163 T(V7E_M), /* V5TEJ. */
12164 T(V7E_M), /* V6. */
12165 T(V7E_M), /* V6KZ. */
12166 T(V7E_M), /* V6T2. */
12167 T(V7E_M), /* V6K. */
12168 T(V7E_M), /* V7. */
12169 T(V7E_M), /* V6_M. */
12170 T(V7E_M), /* V6S_M. */
12171 T(V7E_M) /* V7E_M. */
12172 };
12173 const int v8[] =
12174 {
12175 T(V8), /* PRE_V4. */
12176 T(V8), /* V4. */
12177 T(V8), /* V4T. */
12178 T(V8), /* V5T. */
12179 T(V8), /* V5TE. */
12180 T(V8), /* V5TEJ. */
12181 T(V8), /* V6. */
12182 T(V8), /* V6KZ. */
12183 T(V8), /* V6T2. */
12184 T(V8), /* V6K. */
12185 T(V8), /* V7. */
12186 T(V8), /* V6_M. */
12187 T(V8), /* V6S_M. */
12188 T(V8), /* V7E_M. */
12189 T(V8) /* V8. */
12190 };
12191 const int v4t_plus_v6_m[] =
12192 {
12193 -1, /* PRE_V4. */
12194 -1, /* V4. */
12195 T(V4T), /* V4T. */
12196 T(V5T), /* V5T. */
12197 T(V5TE), /* V5TE. */
12198 T(V5TEJ), /* V5TEJ. */
12199 T(V6), /* V6. */
12200 T(V6KZ), /* V6KZ. */
12201 T(V6T2), /* V6T2. */
12202 T(V6K), /* V6K. */
12203 T(V7), /* V7. */
12204 T(V6_M), /* V6_M. */
12205 T(V6S_M), /* V6S_M. */
12206 T(V7E_M), /* V7E_M. */
12207 T(V8), /* V8. */
12208 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
12209 };
12210 const int *comb[] =
12211 {
12212 v6t2,
12213 v6k,
12214 v7,
12215 v6_m,
12216 v6s_m,
12217 v7e_m,
12218 v8,
12219 /* Pseudo-architecture. */
12220 v4t_plus_v6_m
12221 };
12222
12223 /* Check we've not got a higher architecture than we know about. */
12224
12225 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
12226 {
12227 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
12228 return -1;
12229 }
12230
12231 /* Override old tag if we have a Tag_also_compatible_with on the output. */
12232
12233 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
12234 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
12235 oldtag = T(V4T_PLUS_V6_M);
12236
12237 /* And override the new tag if we have a Tag_also_compatible_with on the
12238 input. */
12239
12240 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
12241 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
12242 newtag = T(V4T_PLUS_V6_M);
12243
12244 tagl = (oldtag < newtag) ? oldtag : newtag;
12245 result = tagh = (oldtag > newtag) ? oldtag : newtag;
12246
12247 /* Architectures before V6KZ add features monotonically. */
12248 if (tagh <= TAG_CPU_ARCH_V6KZ)
12249 return result;
12250
12251 result = comb[tagh - T(V6T2)][tagl];
12252
12253 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
12254 as the canonical version. */
12255 if (result == T(V4T_PLUS_V6_M))
12256 {
12257 result = T(V4T);
12258 *secondary_compat_out = T(V6_M);
12259 }
12260 else
12261 *secondary_compat_out = -1;
12262
12263 if (result == -1)
12264 {
12265 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
12266 ibfd, oldtag, newtag);
12267 return -1;
12268 }
12269
12270 return result;
12271 #undef T
12272 }
12273
12274 /* Query attributes object to see if integer divide instructions may be
12275 present in an object. */
12276 static bfd_boolean
12277 elf32_arm_attributes_accept_div (const obj_attribute *attr)
12278 {
12279 int arch = attr[Tag_CPU_arch].i;
12280 int profile = attr[Tag_CPU_arch_profile].i;
12281
12282 switch (attr[Tag_DIV_use].i)
12283 {
12284 case 0:
12285 /* Integer divide allowed if instruction contained in archetecture. */
12286 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
12287 return TRUE;
12288 else if (arch >= TAG_CPU_ARCH_V7E_M)
12289 return TRUE;
12290 else
12291 return FALSE;
12292
12293 case 1:
12294 /* Integer divide explicitly prohibited. */
12295 return FALSE;
12296
12297 default:
12298 /* Unrecognised case - treat as allowing divide everywhere. */
12299 case 2:
12300 /* Integer divide allowed in ARM state. */
12301 return TRUE;
12302 }
12303 }
12304
12305 /* Query attributes object to see if integer divide instructions are
12306 forbidden to be in the object. This is not the inverse of
12307 elf32_arm_attributes_accept_div. */
12308 static bfd_boolean
12309 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
12310 {
12311 return attr[Tag_DIV_use].i == 1;
12312 }
12313
12314 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
12315 are conflicting attributes. */
12316
12317 static bfd_boolean
12318 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
12319 {
12320 obj_attribute *in_attr;
12321 obj_attribute *out_attr;
12322 /* Some tags have 0 = don't care, 1 = strong requirement,
12323 2 = weak requirement. */
12324 static const int order_021[3] = {0, 2, 1};
12325 int i;
12326 bfd_boolean result = TRUE;
12327 const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
12328
12329 /* Skip the linker stubs file. This preserves previous behavior
12330 of accepting unknown attributes in the first input file - but
12331 is that a bug? */
12332 if (ibfd->flags & BFD_LINKER_CREATED)
12333 return TRUE;
12334
12335 /* Skip any input that hasn't attribute section.
12336 This enables to link object files without attribute section with
12337 any others. */
12338 if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
12339 return TRUE;
12340
12341 if (!elf_known_obj_attributes_proc (obfd)[0].i)
12342 {
12343 /* This is the first object. Copy the attributes. */
12344 _bfd_elf_copy_obj_attributes (ibfd, obfd);
12345
12346 out_attr = elf_known_obj_attributes_proc (obfd);
12347
12348 /* Use the Tag_null value to indicate the attributes have been
12349 initialized. */
12350 out_attr[0].i = 1;
12351
12352 /* We do not output objects with Tag_MPextension_use_legacy - we move
12353 the attribute's value to Tag_MPextension_use. */
12354 if (out_attr[Tag_MPextension_use_legacy].i != 0)
12355 {
12356 if (out_attr[Tag_MPextension_use].i != 0
12357 && out_attr[Tag_MPextension_use_legacy].i
12358 != out_attr[Tag_MPextension_use].i)
12359 {
12360 _bfd_error_handler
12361 (_("Error: %B has both the current and legacy "
12362 "Tag_MPextension_use attributes"), ibfd);
12363 result = FALSE;
12364 }
12365
12366 out_attr[Tag_MPextension_use] =
12367 out_attr[Tag_MPextension_use_legacy];
12368 out_attr[Tag_MPextension_use_legacy].type = 0;
12369 out_attr[Tag_MPextension_use_legacy].i = 0;
12370 }
12371
12372 return result;
12373 }
12374
12375 in_attr = elf_known_obj_attributes_proc (ibfd);
12376 out_attr = elf_known_obj_attributes_proc (obfd);
12377 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
12378 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
12379 {
12380 /* Ignore mismatches if the object doesn't use floating point or is
12381 floating point ABI independent. */
12382 if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
12383 || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
12384 && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
12385 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
12386 else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
12387 && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
12388 {
12389 _bfd_error_handler
12390 (_("error: %B uses VFP register arguments, %B does not"),
12391 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
12392 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
12393 result = FALSE;
12394 }
12395 }
12396
12397 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
12398 {
12399 /* Merge this attribute with existing attributes. */
12400 switch (i)
12401 {
12402 case Tag_CPU_raw_name:
12403 case Tag_CPU_name:
12404 /* These are merged after Tag_CPU_arch. */
12405 break;
12406
12407 case Tag_ABI_optimization_goals:
12408 case Tag_ABI_FP_optimization_goals:
12409 /* Use the first value seen. */
12410 break;
12411
12412 case Tag_CPU_arch:
12413 {
12414 int secondary_compat = -1, secondary_compat_out = -1;
12415 unsigned int saved_out_attr = out_attr[i].i;
12416 int arch_attr;
12417 static const char *name_table[] =
12418 {
12419 /* These aren't real CPU names, but we can't guess
12420 that from the architecture version alone. */
12421 "Pre v4",
12422 "ARM v4",
12423 "ARM v4T",
12424 "ARM v5T",
12425 "ARM v5TE",
12426 "ARM v5TEJ",
12427 "ARM v6",
12428 "ARM v6KZ",
12429 "ARM v6T2",
12430 "ARM v6K",
12431 "ARM v7",
12432 "ARM v6-M",
12433 "ARM v6S-M",
12434 "ARM v8"
12435 };
12436
12437 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
12438 secondary_compat = get_secondary_compatible_arch (ibfd);
12439 secondary_compat_out = get_secondary_compatible_arch (obfd);
12440 arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
12441 &secondary_compat_out,
12442 in_attr[i].i,
12443 secondary_compat);
12444
12445 /* Return with error if failed to merge. */
12446 if (arch_attr == -1)
12447 return FALSE;
12448
12449 out_attr[i].i = arch_attr;
12450
12451 set_secondary_compatible_arch (obfd, secondary_compat_out);
12452
12453 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
12454 if (out_attr[i].i == saved_out_attr)
12455 ; /* Leave the names alone. */
12456 else if (out_attr[i].i == in_attr[i].i)
12457 {
12458 /* The output architecture has been changed to match the
12459 input architecture. Use the input names. */
12460 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
12461 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
12462 : NULL;
12463 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
12464 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
12465 : NULL;
12466 }
12467 else
12468 {
12469 out_attr[Tag_CPU_name].s = NULL;
12470 out_attr[Tag_CPU_raw_name].s = NULL;
12471 }
12472
12473 /* If we still don't have a value for Tag_CPU_name,
12474 make one up now. Tag_CPU_raw_name remains blank. */
12475 if (out_attr[Tag_CPU_name].s == NULL
12476 && out_attr[i].i < ARRAY_SIZE (name_table))
12477 out_attr[Tag_CPU_name].s =
12478 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
12479 }
12480 break;
12481
12482 case Tag_ARM_ISA_use:
12483 case Tag_THUMB_ISA_use:
12484 case Tag_WMMX_arch:
12485 case Tag_Advanced_SIMD_arch:
12486 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
12487 case Tag_ABI_FP_rounding:
12488 case Tag_ABI_FP_exceptions:
12489 case Tag_ABI_FP_user_exceptions:
12490 case Tag_ABI_FP_number_model:
12491 case Tag_FP_HP_extension:
12492 case Tag_CPU_unaligned_access:
12493 case Tag_T2EE_use:
12494 case Tag_MPextension_use:
12495 /* Use the largest value specified. */
12496 if (in_attr[i].i > out_attr[i].i)
12497 out_attr[i].i = in_attr[i].i;
12498 break;
12499
12500 case Tag_ABI_align_preserved:
12501 case Tag_ABI_PCS_RO_data:
12502 /* Use the smallest value specified. */
12503 if (in_attr[i].i < out_attr[i].i)
12504 out_attr[i].i = in_attr[i].i;
12505 break;
12506
12507 case Tag_ABI_align_needed:
12508 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
12509 && (in_attr[Tag_ABI_align_preserved].i == 0
12510 || out_attr[Tag_ABI_align_preserved].i == 0))
12511 {
12512 /* This error message should be enabled once all non-conformant
12513 binaries in the toolchain have had the attributes set
12514 properly.
12515 _bfd_error_handler
12516 (_("error: %B: 8-byte data alignment conflicts with %B"),
12517 obfd, ibfd);
12518 result = FALSE; */
12519 }
12520 /* Fall through. */
12521 case Tag_ABI_FP_denormal:
12522 case Tag_ABI_PCS_GOT_use:
12523 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
12524 value if greater than 2 (for future-proofing). */
12525 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
12526 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
12527 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
12528 out_attr[i].i = in_attr[i].i;
12529 break;
12530
12531 case Tag_Virtualization_use:
12532 /* The virtualization tag effectively stores two bits of
12533 information: the intended use of TrustZone (in bit 0), and the
12534 intended use of Virtualization (in bit 1). */
12535 if (out_attr[i].i == 0)
12536 out_attr[i].i = in_attr[i].i;
12537 else if (in_attr[i].i != 0
12538 && in_attr[i].i != out_attr[i].i)
12539 {
12540 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
12541 out_attr[i].i = 3;
12542 else
12543 {
12544 _bfd_error_handler
12545 (_("error: %B: unable to merge virtualization attributes "
12546 "with %B"),
12547 obfd, ibfd);
12548 result = FALSE;
12549 }
12550 }
12551 break;
12552
12553 case Tag_CPU_arch_profile:
12554 if (out_attr[i].i != in_attr[i].i)
12555 {
12556 /* 0 will merge with anything.
12557 'A' and 'S' merge to 'A'.
12558 'R' and 'S' merge to 'R'.
12559 'M' and 'A|R|S' is an error. */
12560 if (out_attr[i].i == 0
12561 || (out_attr[i].i == 'S'
12562 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
12563 out_attr[i].i = in_attr[i].i;
12564 else if (in_attr[i].i == 0
12565 || (in_attr[i].i == 'S'
12566 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
12567 ; /* Do nothing. */
12568 else
12569 {
12570 _bfd_error_handler
12571 (_("error: %B: Conflicting architecture profiles %c/%c"),
12572 ibfd,
12573 in_attr[i].i ? in_attr[i].i : '0',
12574 out_attr[i].i ? out_attr[i].i : '0');
12575 result = FALSE;
12576 }
12577 }
12578 break;
12579 case Tag_FP_arch:
12580 {
12581 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
12582 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
12583 when it's 0. It might mean absence of FP hardware if
12584 Tag_FP_arch is zero. */
12585
12586 #define VFP_VERSION_COUNT 9
12587 static const struct
12588 {
12589 int ver;
12590 int regs;
12591 } vfp_versions[VFP_VERSION_COUNT] =
12592 {
12593 {0, 0},
12594 {1, 16},
12595 {2, 16},
12596 {3, 32},
12597 {3, 16},
12598 {4, 32},
12599 {4, 16},
12600 {8, 32},
12601 {8, 16}
12602 };
12603 int ver;
12604 int regs;
12605 int newval;
12606
12607 /* If the output has no requirement about FP hardware,
12608 follow the requirement of the input. */
12609 if (out_attr[i].i == 0)
12610 {
12611 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
12612 out_attr[i].i = in_attr[i].i;
12613 out_attr[Tag_ABI_HardFP_use].i
12614 = in_attr[Tag_ABI_HardFP_use].i;
12615 break;
12616 }
12617 /* If the input has no requirement about FP hardware, do
12618 nothing. */
12619 else if (in_attr[i].i == 0)
12620 {
12621 BFD_ASSERT (in_attr[Tag_ABI_HardFP_use].i == 0);
12622 break;
12623 }
12624
12625 /* Both the input and the output have nonzero Tag_FP_arch.
12626 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
12627
12628 /* If both the input and the output have zero Tag_ABI_HardFP_use,
12629 do nothing. */
12630 if (in_attr[Tag_ABI_HardFP_use].i == 0
12631 && out_attr[Tag_ABI_HardFP_use].i == 0)
12632 ;
12633 /* If the input and the output have different Tag_ABI_HardFP_use,
12634 the combination of them is 0 (implied by Tag_FP_arch). */
12635 else if (in_attr[Tag_ABI_HardFP_use].i
12636 != out_attr[Tag_ABI_HardFP_use].i)
12637 out_attr[Tag_ABI_HardFP_use].i = 0;
12638
12639 /* Now we can handle Tag_FP_arch. */
12640
12641 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
12642 pick the biggest. */
12643 if (in_attr[i].i >= VFP_VERSION_COUNT
12644 && in_attr[i].i > out_attr[i].i)
12645 {
12646 out_attr[i] = in_attr[i];
12647 break;
12648 }
12649 /* The output uses the superset of input features
12650 (ISA version) and registers. */
12651 ver = vfp_versions[in_attr[i].i].ver;
12652 if (ver < vfp_versions[out_attr[i].i].ver)
12653 ver = vfp_versions[out_attr[i].i].ver;
12654 regs = vfp_versions[in_attr[i].i].regs;
12655 if (regs < vfp_versions[out_attr[i].i].regs)
12656 regs = vfp_versions[out_attr[i].i].regs;
12657 /* This assumes all possible supersets are also a valid
12658 options. */
12659 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
12660 {
12661 if (regs == vfp_versions[newval].regs
12662 && ver == vfp_versions[newval].ver)
12663 break;
12664 }
12665 out_attr[i].i = newval;
12666 }
12667 break;
12668 case Tag_PCS_config:
12669 if (out_attr[i].i == 0)
12670 out_attr[i].i = in_attr[i].i;
12671 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
12672 {
12673 /* It's sometimes ok to mix different configs, so this is only
12674 a warning. */
12675 _bfd_error_handler
12676 (_("Warning: %B: Conflicting platform configuration"), ibfd);
12677 }
12678 break;
12679 case Tag_ABI_PCS_R9_use:
12680 if (in_attr[i].i != out_attr[i].i
12681 && out_attr[i].i != AEABI_R9_unused
12682 && in_attr[i].i != AEABI_R9_unused)
12683 {
12684 _bfd_error_handler
12685 (_("error: %B: Conflicting use of R9"), ibfd);
12686 result = FALSE;
12687 }
12688 if (out_attr[i].i == AEABI_R9_unused)
12689 out_attr[i].i = in_attr[i].i;
12690 break;
12691 case Tag_ABI_PCS_RW_data:
12692 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
12693 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
12694 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
12695 {
12696 _bfd_error_handler
12697 (_("error: %B: SB relative addressing conflicts with use of R9"),
12698 ibfd);
12699 result = FALSE;
12700 }
12701 /* Use the smallest value specified. */
12702 if (in_attr[i].i < out_attr[i].i)
12703 out_attr[i].i = in_attr[i].i;
12704 break;
12705 case Tag_ABI_PCS_wchar_t:
12706 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
12707 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
12708 {
12709 _bfd_error_handler
12710 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
12711 ibfd, in_attr[i].i, out_attr[i].i);
12712 }
12713 else if (in_attr[i].i && !out_attr[i].i)
12714 out_attr[i].i = in_attr[i].i;
12715 break;
12716 case Tag_ABI_enum_size:
12717 if (in_attr[i].i != AEABI_enum_unused)
12718 {
12719 if (out_attr[i].i == AEABI_enum_unused
12720 || out_attr[i].i == AEABI_enum_forced_wide)
12721 {
12722 /* The existing object is compatible with anything.
12723 Use whatever requirements the new object has. */
12724 out_attr[i].i = in_attr[i].i;
12725 }
12726 else if (in_attr[i].i != AEABI_enum_forced_wide
12727 && out_attr[i].i != in_attr[i].i
12728 && !elf_arm_tdata (obfd)->no_enum_size_warning)
12729 {
12730 static const char *aeabi_enum_names[] =
12731 { "", "variable-size", "32-bit", "" };
12732 const char *in_name =
12733 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
12734 ? aeabi_enum_names[in_attr[i].i]
12735 : "<unknown>";
12736 const char *out_name =
12737 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
12738 ? aeabi_enum_names[out_attr[i].i]
12739 : "<unknown>";
12740 _bfd_error_handler
12741 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
12742 ibfd, in_name, out_name);
12743 }
12744 }
12745 break;
12746 case Tag_ABI_VFP_args:
12747 /* Aready done. */
12748 break;
12749 case Tag_ABI_WMMX_args:
12750 if (in_attr[i].i != out_attr[i].i)
12751 {
12752 _bfd_error_handler
12753 (_("error: %B uses iWMMXt register arguments, %B does not"),
12754 ibfd, obfd);
12755 result = FALSE;
12756 }
12757 break;
12758 case Tag_compatibility:
12759 /* Merged in target-independent code. */
12760 break;
12761 case Tag_ABI_HardFP_use:
12762 /* This is handled along with Tag_FP_arch. */
12763 break;
12764 case Tag_ABI_FP_16bit_format:
12765 if (in_attr[i].i != 0 && out_attr[i].i != 0)
12766 {
12767 if (in_attr[i].i != out_attr[i].i)
12768 {
12769 _bfd_error_handler
12770 (_("error: fp16 format mismatch between %B and %B"),
12771 ibfd, obfd);
12772 result = FALSE;
12773 }
12774 }
12775 if (in_attr[i].i != 0)
12776 out_attr[i].i = in_attr[i].i;
12777 break;
12778
12779 case Tag_DIV_use:
12780 /* A value of zero on input means that the divide instruction may
12781 be used if available in the base architecture as specified via
12782 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
12783 the user did not want divide instructions. A value of 2
12784 explicitly means that divide instructions were allowed in ARM
12785 and Thumb state. */
12786 if (in_attr[i].i == out_attr[i].i)
12787 /* Do nothing. */ ;
12788 else if (elf32_arm_attributes_forbid_div (in_attr)
12789 && !elf32_arm_attributes_accept_div (out_attr))
12790 out_attr[i].i = 1;
12791 else if (elf32_arm_attributes_forbid_div (out_attr)
12792 && elf32_arm_attributes_accept_div (in_attr))
12793 out_attr[i].i = in_attr[i].i;
12794 else if (in_attr[i].i == 2)
12795 out_attr[i].i = in_attr[i].i;
12796 break;
12797
12798 case Tag_MPextension_use_legacy:
12799 /* We don't output objects with Tag_MPextension_use_legacy - we
12800 move the value to Tag_MPextension_use. */
12801 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
12802 {
12803 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
12804 {
12805 _bfd_error_handler
12806 (_("%B has has both the current and legacy "
12807 "Tag_MPextension_use attributes"),
12808 ibfd);
12809 result = FALSE;
12810 }
12811 }
12812
12813 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
12814 out_attr[Tag_MPextension_use] = in_attr[i];
12815
12816 break;
12817
12818 case Tag_nodefaults:
12819 /* This tag is set if it exists, but the value is unused (and is
12820 typically zero). We don't actually need to do anything here -
12821 the merge happens automatically when the type flags are merged
12822 below. */
12823 break;
12824 case Tag_also_compatible_with:
12825 /* Already done in Tag_CPU_arch. */
12826 break;
12827 case Tag_conformance:
12828 /* Keep the attribute if it matches. Throw it away otherwise.
12829 No attribute means no claim to conform. */
12830 if (!in_attr[i].s || !out_attr[i].s
12831 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
12832 out_attr[i].s = NULL;
12833 break;
12834
12835 default:
12836 result
12837 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
12838 }
12839
12840 /* If out_attr was copied from in_attr then it won't have a type yet. */
12841 if (in_attr[i].type && !out_attr[i].type)
12842 out_attr[i].type = in_attr[i].type;
12843 }
12844
12845 /* Merge Tag_compatibility attributes and any common GNU ones. */
12846 if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
12847 return FALSE;
12848
12849 /* Check for any attributes not known on ARM. */
12850 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
12851
12852 return result;
12853 }
12854
12855
12856 /* Return TRUE if the two EABI versions are incompatible. */
12857
12858 static bfd_boolean
12859 elf32_arm_versions_compatible (unsigned iver, unsigned over)
12860 {
12861 /* v4 and v5 are the same spec before and after it was released,
12862 so allow mixing them. */
12863 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
12864 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
12865 return TRUE;
12866
12867 return (iver == over);
12868 }
12869
12870 /* Merge backend specific data from an object file to the output
12871 object file when linking. */
12872
12873 static bfd_boolean
12874 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
12875
12876 /* Display the flags field. */
12877
12878 static bfd_boolean
12879 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
12880 {
12881 FILE * file = (FILE *) ptr;
12882 unsigned long flags;
12883
12884 BFD_ASSERT (abfd != NULL && ptr != NULL);
12885
12886 /* Print normal ELF private data. */
12887 _bfd_elf_print_private_bfd_data (abfd, ptr);
12888
12889 flags = elf_elfheader (abfd)->e_flags;
12890 /* Ignore init flag - it may not be set, despite the flags field
12891 containing valid data. */
12892
12893 /* xgettext:c-format */
12894 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
12895
12896 switch (EF_ARM_EABI_VERSION (flags))
12897 {
12898 case EF_ARM_EABI_UNKNOWN:
12899 /* The following flag bits are GNU extensions and not part of the
12900 official ARM ELF extended ABI. Hence they are only decoded if
12901 the EABI version is not set. */
12902 if (flags & EF_ARM_INTERWORK)
12903 fprintf (file, _(" [interworking enabled]"));
12904
12905 if (flags & EF_ARM_APCS_26)
12906 fprintf (file, " [APCS-26]");
12907 else
12908 fprintf (file, " [APCS-32]");
12909
12910 if (flags & EF_ARM_VFP_FLOAT)
12911 fprintf (file, _(" [VFP float format]"));
12912 else if (flags & EF_ARM_MAVERICK_FLOAT)
12913 fprintf (file, _(" [Maverick float format]"));
12914 else
12915 fprintf (file, _(" [FPA float format]"));
12916
12917 if (flags & EF_ARM_APCS_FLOAT)
12918 fprintf (file, _(" [floats passed in float registers]"));
12919
12920 if (flags & EF_ARM_PIC)
12921 fprintf (file, _(" [position independent]"));
12922
12923 if (flags & EF_ARM_NEW_ABI)
12924 fprintf (file, _(" [new ABI]"));
12925
12926 if (flags & EF_ARM_OLD_ABI)
12927 fprintf (file, _(" [old ABI]"));
12928
12929 if (flags & EF_ARM_SOFT_FLOAT)
12930 fprintf (file, _(" [software FP]"));
12931
12932 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
12933 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
12934 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
12935 | EF_ARM_MAVERICK_FLOAT);
12936 break;
12937
12938 case EF_ARM_EABI_VER1:
12939 fprintf (file, _(" [Version1 EABI]"));
12940
12941 if (flags & EF_ARM_SYMSARESORTED)
12942 fprintf (file, _(" [sorted symbol table]"));
12943 else
12944 fprintf (file, _(" [unsorted symbol table]"));
12945
12946 flags &= ~ EF_ARM_SYMSARESORTED;
12947 break;
12948
12949 case EF_ARM_EABI_VER2:
12950 fprintf (file, _(" [Version2 EABI]"));
12951
12952 if (flags & EF_ARM_SYMSARESORTED)
12953 fprintf (file, _(" [sorted symbol table]"));
12954 else
12955 fprintf (file, _(" [unsorted symbol table]"));
12956
12957 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
12958 fprintf (file, _(" [dynamic symbols use segment index]"));
12959
12960 if (flags & EF_ARM_MAPSYMSFIRST)
12961 fprintf (file, _(" [mapping symbols precede others]"));
12962
12963 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
12964 | EF_ARM_MAPSYMSFIRST);
12965 break;
12966
12967 case EF_ARM_EABI_VER3:
12968 fprintf (file, _(" [Version3 EABI]"));
12969 break;
12970
12971 case EF_ARM_EABI_VER4:
12972 fprintf (file, _(" [Version4 EABI]"));
12973 goto eabi;
12974
12975 case EF_ARM_EABI_VER5:
12976 fprintf (file, _(" [Version5 EABI]"));
12977
12978 if (flags & EF_ARM_ABI_FLOAT_SOFT)
12979 fprintf (file, _(" [soft-float ABI]"));
12980
12981 if (flags & EF_ARM_ABI_FLOAT_HARD)
12982 fprintf (file, _(" [hard-float ABI]"));
12983
12984 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
12985
12986 eabi:
12987 if (flags & EF_ARM_BE8)
12988 fprintf (file, _(" [BE8]"));
12989
12990 if (flags & EF_ARM_LE8)
12991 fprintf (file, _(" [LE8]"));
12992
12993 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
12994 break;
12995
12996 default:
12997 fprintf (file, _(" <EABI version unrecognised>"));
12998 break;
12999 }
13000
13001 flags &= ~ EF_ARM_EABIMASK;
13002
13003 if (flags & EF_ARM_RELEXEC)
13004 fprintf (file, _(" [relocatable executable]"));
13005
13006 flags &= ~EF_ARM_RELEXEC;
13007
13008 if (flags)
13009 fprintf (file, _("<Unrecognised flag bits set>"));
13010
13011 fputc ('\n', file);
13012
13013 return TRUE;
13014 }
13015
13016 static int
13017 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
13018 {
13019 switch (ELF_ST_TYPE (elf_sym->st_info))
13020 {
13021 case STT_ARM_TFUNC:
13022 return ELF_ST_TYPE (elf_sym->st_info);
13023
13024 case STT_ARM_16BIT:
13025 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
13026 This allows us to distinguish between data used by Thumb instructions
13027 and non-data (which is probably code) inside Thumb regions of an
13028 executable. */
13029 if (type != STT_OBJECT && type != STT_TLS)
13030 return ELF_ST_TYPE (elf_sym->st_info);
13031 break;
13032
13033 default:
13034 break;
13035 }
13036
13037 return type;
13038 }
13039
13040 static asection *
13041 elf32_arm_gc_mark_hook (asection *sec,
13042 struct bfd_link_info *info,
13043 Elf_Internal_Rela *rel,
13044 struct elf_link_hash_entry *h,
13045 Elf_Internal_Sym *sym)
13046 {
13047 if (h != NULL)
13048 switch (ELF32_R_TYPE (rel->r_info))
13049 {
13050 case R_ARM_GNU_VTINHERIT:
13051 case R_ARM_GNU_VTENTRY:
13052 return NULL;
13053 }
13054
13055 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
13056 }
13057
13058 /* Update the got entry reference counts for the section being removed. */
13059
13060 static bfd_boolean
13061 elf32_arm_gc_sweep_hook (bfd * abfd,
13062 struct bfd_link_info * info,
13063 asection * sec,
13064 const Elf_Internal_Rela * relocs)
13065 {
13066 Elf_Internal_Shdr *symtab_hdr;
13067 struct elf_link_hash_entry **sym_hashes;
13068 bfd_signed_vma *local_got_refcounts;
13069 const Elf_Internal_Rela *rel, *relend;
13070 struct elf32_arm_link_hash_table * globals;
13071
13072 if (bfd_link_relocatable (info))
13073 return TRUE;
13074
13075 globals = elf32_arm_hash_table (info);
13076 if (globals == NULL)
13077 return FALSE;
13078
13079 elf_section_data (sec)->local_dynrel = NULL;
13080
13081 symtab_hdr = & elf_symtab_hdr (abfd);
13082 sym_hashes = elf_sym_hashes (abfd);
13083 local_got_refcounts = elf_local_got_refcounts (abfd);
13084
13085 check_use_blx (globals);
13086
13087 relend = relocs + sec->reloc_count;
13088 for (rel = relocs; rel < relend; rel++)
13089 {
13090 unsigned long r_symndx;
13091 struct elf_link_hash_entry *h = NULL;
13092 struct elf32_arm_link_hash_entry *eh;
13093 int r_type;
13094 bfd_boolean call_reloc_p;
13095 bfd_boolean may_become_dynamic_p;
13096 bfd_boolean may_need_local_target_p;
13097 union gotplt_union *root_plt;
13098 struct arm_plt_info *arm_plt;
13099
13100 r_symndx = ELF32_R_SYM (rel->r_info);
13101 if (r_symndx >= symtab_hdr->sh_info)
13102 {
13103 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
13104 while (h->root.type == bfd_link_hash_indirect
13105 || h->root.type == bfd_link_hash_warning)
13106 h = (struct elf_link_hash_entry *) h->root.u.i.link;
13107 }
13108 eh = (struct elf32_arm_link_hash_entry *) h;
13109
13110 call_reloc_p = FALSE;
13111 may_become_dynamic_p = FALSE;
13112 may_need_local_target_p = FALSE;
13113
13114 r_type = ELF32_R_TYPE (rel->r_info);
13115 r_type = arm_real_reloc_type (globals, r_type);
13116 switch (r_type)
13117 {
13118 case R_ARM_GOT32:
13119 case R_ARM_GOT_PREL:
13120 case R_ARM_TLS_GD32:
13121 case R_ARM_TLS_IE32:
13122 if (h != NULL)
13123 {
13124 if (h->got.refcount > 0)
13125 h->got.refcount -= 1;
13126 }
13127 else if (local_got_refcounts != NULL)
13128 {
13129 if (local_got_refcounts[r_symndx] > 0)
13130 local_got_refcounts[r_symndx] -= 1;
13131 }
13132 break;
13133
13134 case R_ARM_TLS_LDM32:
13135 globals->tls_ldm_got.refcount -= 1;
13136 break;
13137
13138 case R_ARM_PC24:
13139 case R_ARM_PLT32:
13140 case R_ARM_CALL:
13141 case R_ARM_JUMP24:
13142 case R_ARM_PREL31:
13143 case R_ARM_THM_CALL:
13144 case R_ARM_THM_JUMP24:
13145 case R_ARM_THM_JUMP19:
13146 call_reloc_p = TRUE;
13147 may_need_local_target_p = TRUE;
13148 break;
13149
13150 case R_ARM_ABS12:
13151 if (!globals->vxworks_p)
13152 {
13153 may_need_local_target_p = TRUE;
13154 break;
13155 }
13156 /* Fall through. */
13157 case R_ARM_ABS32:
13158 case R_ARM_ABS32_NOI:
13159 case R_ARM_REL32:
13160 case R_ARM_REL32_NOI:
13161 case R_ARM_MOVW_ABS_NC:
13162 case R_ARM_MOVT_ABS:
13163 case R_ARM_MOVW_PREL_NC:
13164 case R_ARM_MOVT_PREL:
13165 case R_ARM_THM_MOVW_ABS_NC:
13166 case R_ARM_THM_MOVT_ABS:
13167 case R_ARM_THM_MOVW_PREL_NC:
13168 case R_ARM_THM_MOVT_PREL:
13169 /* Should the interworking branches be here also? */
13170 if ((bfd_link_pic (info) || globals->root.is_relocatable_executable)
13171 && (sec->flags & SEC_ALLOC) != 0)
13172 {
13173 if (h == NULL
13174 && elf32_arm_howto_from_type (r_type)->pc_relative)
13175 {
13176 call_reloc_p = TRUE;
13177 may_need_local_target_p = TRUE;
13178 }
13179 else
13180 may_become_dynamic_p = TRUE;
13181 }
13182 else
13183 may_need_local_target_p = TRUE;
13184 break;
13185
13186 default:
13187 break;
13188 }
13189
13190 if (may_need_local_target_p
13191 && elf32_arm_get_plt_info (abfd, eh, r_symndx, &root_plt, &arm_plt))
13192 {
13193 /* If PLT refcount book-keeping is wrong and too low, we'll
13194 see a zero value (going to -1) for the root PLT reference
13195 count. */
13196 if (root_plt->refcount >= 0)
13197 {
13198 BFD_ASSERT (root_plt->refcount != 0);
13199 root_plt->refcount -= 1;
13200 }
13201 else
13202 /* A value of -1 means the symbol has become local, forced
13203 or seeing a hidden definition. Any other negative value
13204 is an error. */
13205 BFD_ASSERT (root_plt->refcount == -1);
13206
13207 if (!call_reloc_p)
13208 arm_plt->noncall_refcount--;
13209
13210 if (r_type == R_ARM_THM_CALL)
13211 arm_plt->maybe_thumb_refcount--;
13212
13213 if (r_type == R_ARM_THM_JUMP24
13214 || r_type == R_ARM_THM_JUMP19)
13215 arm_plt->thumb_refcount--;
13216 }
13217
13218 if (may_become_dynamic_p)
13219 {
13220 struct elf_dyn_relocs **pp;
13221 struct elf_dyn_relocs *p;
13222
13223 if (h != NULL)
13224 pp = &(eh->dyn_relocs);
13225 else
13226 {
13227 Elf_Internal_Sym *isym;
13228
13229 isym = bfd_sym_from_r_symndx (&globals->sym_cache,
13230 abfd, r_symndx);
13231 if (isym == NULL)
13232 return FALSE;
13233 pp = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
13234 if (pp == NULL)
13235 return FALSE;
13236 }
13237 for (; (p = *pp) != NULL; pp = &p->next)
13238 if (p->sec == sec)
13239 {
13240 /* Everything must go for SEC. */
13241 *pp = p->next;
13242 break;
13243 }
13244 }
13245 }
13246
13247 return TRUE;
13248 }
13249
13250 /* Look through the relocs for a section during the first phase. */
13251
13252 static bfd_boolean
13253 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
13254 asection *sec, const Elf_Internal_Rela *relocs)
13255 {
13256 Elf_Internal_Shdr *symtab_hdr;
13257 struct elf_link_hash_entry **sym_hashes;
13258 const Elf_Internal_Rela *rel;
13259 const Elf_Internal_Rela *rel_end;
13260 bfd *dynobj;
13261 asection *sreloc;
13262 struct elf32_arm_link_hash_table *htab;
13263 bfd_boolean call_reloc_p;
13264 bfd_boolean may_become_dynamic_p;
13265 bfd_boolean may_need_local_target_p;
13266 unsigned long nsyms;
13267
13268 if (bfd_link_relocatable (info))
13269 return TRUE;
13270
13271 BFD_ASSERT (is_arm_elf (abfd));
13272
13273 htab = elf32_arm_hash_table (info);
13274 if (htab == NULL)
13275 return FALSE;
13276
13277 sreloc = NULL;
13278
13279 /* Create dynamic sections for relocatable executables so that we can
13280 copy relocations. */
13281 if (htab->root.is_relocatable_executable
13282 && ! htab->root.dynamic_sections_created)
13283 {
13284 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
13285 return FALSE;
13286 }
13287
13288 if (htab->root.dynobj == NULL)
13289 htab->root.dynobj = abfd;
13290 if (!create_ifunc_sections (info))
13291 return FALSE;
13292
13293 dynobj = htab->root.dynobj;
13294
13295 symtab_hdr = & elf_symtab_hdr (abfd);
13296 sym_hashes = elf_sym_hashes (abfd);
13297 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
13298
13299 rel_end = relocs + sec->reloc_count;
13300 for (rel = relocs; rel < rel_end; rel++)
13301 {
13302 Elf_Internal_Sym *isym;
13303 struct elf_link_hash_entry *h;
13304 struct elf32_arm_link_hash_entry *eh;
13305 unsigned long r_symndx;
13306 int r_type;
13307
13308 r_symndx = ELF32_R_SYM (rel->r_info);
13309 r_type = ELF32_R_TYPE (rel->r_info);
13310 r_type = arm_real_reloc_type (htab, r_type);
13311
13312 if (r_symndx >= nsyms
13313 /* PR 9934: It is possible to have relocations that do not
13314 refer to symbols, thus it is also possible to have an
13315 object file containing relocations but no symbol table. */
13316 && (r_symndx > STN_UNDEF || nsyms > 0))
13317 {
13318 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
13319 r_symndx);
13320 return FALSE;
13321 }
13322
13323 h = NULL;
13324 isym = NULL;
13325 if (nsyms > 0)
13326 {
13327 if (r_symndx < symtab_hdr->sh_info)
13328 {
13329 /* A local symbol. */
13330 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
13331 abfd, r_symndx);
13332 if (isym == NULL)
13333 return FALSE;
13334 }
13335 else
13336 {
13337 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
13338 while (h->root.type == bfd_link_hash_indirect
13339 || h->root.type == bfd_link_hash_warning)
13340 h = (struct elf_link_hash_entry *) h->root.u.i.link;
13341
13342 /* PR15323, ref flags aren't set for references in the
13343 same object. */
13344 h->root.non_ir_ref = 1;
13345 }
13346 }
13347
13348 eh = (struct elf32_arm_link_hash_entry *) h;
13349
13350 call_reloc_p = FALSE;
13351 may_become_dynamic_p = FALSE;
13352 may_need_local_target_p = FALSE;
13353
13354 /* Could be done earlier, if h were already available. */
13355 r_type = elf32_arm_tls_transition (info, r_type, h);
13356 switch (r_type)
13357 {
13358 case R_ARM_GOT32:
13359 case R_ARM_GOT_PREL:
13360 case R_ARM_TLS_GD32:
13361 case R_ARM_TLS_IE32:
13362 case R_ARM_TLS_GOTDESC:
13363 case R_ARM_TLS_DESCSEQ:
13364 case R_ARM_THM_TLS_DESCSEQ:
13365 case R_ARM_TLS_CALL:
13366 case R_ARM_THM_TLS_CALL:
13367 /* This symbol requires a global offset table entry. */
13368 {
13369 int tls_type, old_tls_type;
13370
13371 switch (r_type)
13372 {
13373 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
13374
13375 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
13376
13377 case R_ARM_TLS_GOTDESC:
13378 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
13379 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
13380 tls_type = GOT_TLS_GDESC; break;
13381
13382 default: tls_type = GOT_NORMAL; break;
13383 }
13384
13385 if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
13386 info->flags |= DF_STATIC_TLS;
13387
13388 if (h != NULL)
13389 {
13390 h->got.refcount++;
13391 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
13392 }
13393 else
13394 {
13395 /* This is a global offset table entry for a local symbol. */
13396 if (!elf32_arm_allocate_local_sym_info (abfd))
13397 return FALSE;
13398 elf_local_got_refcounts (abfd)[r_symndx] += 1;
13399 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
13400 }
13401
13402 /* If a variable is accessed with both tls methods, two
13403 slots may be created. */
13404 if (GOT_TLS_GD_ANY_P (old_tls_type)
13405 && GOT_TLS_GD_ANY_P (tls_type))
13406 tls_type |= old_tls_type;
13407
13408 /* We will already have issued an error message if there
13409 is a TLS/non-TLS mismatch, based on the symbol
13410 type. So just combine any TLS types needed. */
13411 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
13412 && tls_type != GOT_NORMAL)
13413 tls_type |= old_tls_type;
13414
13415 /* If the symbol is accessed in both IE and GDESC
13416 method, we're able to relax. Turn off the GDESC flag,
13417 without messing up with any other kind of tls types
13418 that may be involved. */
13419 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
13420 tls_type &= ~GOT_TLS_GDESC;
13421
13422 if (old_tls_type != tls_type)
13423 {
13424 if (h != NULL)
13425 elf32_arm_hash_entry (h)->tls_type = tls_type;
13426 else
13427 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
13428 }
13429 }
13430 /* Fall through. */
13431
13432 case R_ARM_TLS_LDM32:
13433 if (r_type == R_ARM_TLS_LDM32)
13434 htab->tls_ldm_got.refcount++;
13435 /* Fall through. */
13436
13437 case R_ARM_GOTOFF32:
13438 case R_ARM_GOTPC:
13439 if (htab->root.sgot == NULL
13440 && !create_got_section (htab->root.dynobj, info))
13441 return FALSE;
13442 break;
13443
13444 case R_ARM_PC24:
13445 case R_ARM_PLT32:
13446 case R_ARM_CALL:
13447 case R_ARM_JUMP24:
13448 case R_ARM_PREL31:
13449 case R_ARM_THM_CALL:
13450 case R_ARM_THM_JUMP24:
13451 case R_ARM_THM_JUMP19:
13452 call_reloc_p = TRUE;
13453 may_need_local_target_p = TRUE;
13454 break;
13455
13456 case R_ARM_ABS12:
13457 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
13458 ldr __GOTT_INDEX__ offsets. */
13459 if (!htab->vxworks_p)
13460 {
13461 may_need_local_target_p = TRUE;
13462 break;
13463 }
13464 /* Fall through. */
13465
13466 case R_ARM_MOVW_ABS_NC:
13467 case R_ARM_MOVT_ABS:
13468 case R_ARM_THM_MOVW_ABS_NC:
13469 case R_ARM_THM_MOVT_ABS:
13470 if (bfd_link_pic (info))
13471 {
13472 (*_bfd_error_handler)
13473 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
13474 abfd, elf32_arm_howto_table_1[r_type].name,
13475 (h) ? h->root.root.string : "a local symbol");
13476 bfd_set_error (bfd_error_bad_value);
13477 return FALSE;
13478 }
13479
13480 /* Fall through. */
13481 case R_ARM_ABS32:
13482 case R_ARM_ABS32_NOI:
13483 if (h != NULL && bfd_link_executable (info))
13484 {
13485 h->pointer_equality_needed = 1;
13486 }
13487 /* Fall through. */
13488 case R_ARM_REL32:
13489 case R_ARM_REL32_NOI:
13490 case R_ARM_MOVW_PREL_NC:
13491 case R_ARM_MOVT_PREL:
13492 case R_ARM_THM_MOVW_PREL_NC:
13493 case R_ARM_THM_MOVT_PREL:
13494
13495 /* Should the interworking branches be listed here? */
13496 if ((bfd_link_pic (info) || htab->root.is_relocatable_executable)
13497 && (sec->flags & SEC_ALLOC) != 0)
13498 {
13499 if (h == NULL
13500 && elf32_arm_howto_from_type (r_type)->pc_relative)
13501 {
13502 /* In shared libraries and relocatable executables,
13503 we treat local relative references as calls;
13504 see the related SYMBOL_CALLS_LOCAL code in
13505 allocate_dynrelocs. */
13506 call_reloc_p = TRUE;
13507 may_need_local_target_p = TRUE;
13508 }
13509 else
13510 /* We are creating a shared library or relocatable
13511 executable, and this is a reloc against a global symbol,
13512 or a non-PC-relative reloc against a local symbol.
13513 We may need to copy the reloc into the output. */
13514 may_become_dynamic_p = TRUE;
13515 }
13516 else
13517 may_need_local_target_p = TRUE;
13518 break;
13519
13520 /* This relocation describes the C++ object vtable hierarchy.
13521 Reconstruct it for later use during GC. */
13522 case R_ARM_GNU_VTINHERIT:
13523 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
13524 return FALSE;
13525 break;
13526
13527 /* This relocation describes which C++ vtable entries are actually
13528 used. Record for later use during GC. */
13529 case R_ARM_GNU_VTENTRY:
13530 BFD_ASSERT (h != NULL);
13531 if (h != NULL
13532 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
13533 return FALSE;
13534 break;
13535 }
13536
13537 if (h != NULL)
13538 {
13539 if (call_reloc_p)
13540 /* We may need a .plt entry if the function this reloc
13541 refers to is in a different object, regardless of the
13542 symbol's type. We can't tell for sure yet, because
13543 something later might force the symbol local. */
13544 h->needs_plt = 1;
13545 else if (may_need_local_target_p)
13546 /* If this reloc is in a read-only section, we might
13547 need a copy reloc. We can't check reliably at this
13548 stage whether the section is read-only, as input
13549 sections have not yet been mapped to output sections.
13550 Tentatively set the flag for now, and correct in
13551 adjust_dynamic_symbol. */
13552 h->non_got_ref = 1;
13553 }
13554
13555 if (may_need_local_target_p
13556 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
13557 {
13558 union gotplt_union *root_plt;
13559 struct arm_plt_info *arm_plt;
13560 struct arm_local_iplt_info *local_iplt;
13561
13562 if (h != NULL)
13563 {
13564 root_plt = &h->plt;
13565 arm_plt = &eh->plt;
13566 }
13567 else
13568 {
13569 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
13570 if (local_iplt == NULL)
13571 return FALSE;
13572 root_plt = &local_iplt->root;
13573 arm_plt = &local_iplt->arm;
13574 }
13575
13576 /* If the symbol is a function that doesn't bind locally,
13577 this relocation will need a PLT entry. */
13578 if (root_plt->refcount != -1)
13579 root_plt->refcount += 1;
13580
13581 if (!call_reloc_p)
13582 arm_plt->noncall_refcount++;
13583
13584 /* It's too early to use htab->use_blx here, so we have to
13585 record possible blx references separately from
13586 relocs that definitely need a thumb stub. */
13587
13588 if (r_type == R_ARM_THM_CALL)
13589 arm_plt->maybe_thumb_refcount += 1;
13590
13591 if (r_type == R_ARM_THM_JUMP24
13592 || r_type == R_ARM_THM_JUMP19)
13593 arm_plt->thumb_refcount += 1;
13594 }
13595
13596 if (may_become_dynamic_p)
13597 {
13598 struct elf_dyn_relocs *p, **head;
13599
13600 /* Create a reloc section in dynobj. */
13601 if (sreloc == NULL)
13602 {
13603 sreloc = _bfd_elf_make_dynamic_reloc_section
13604 (sec, dynobj, 2, abfd, ! htab->use_rel);
13605
13606 if (sreloc == NULL)
13607 return FALSE;
13608
13609 /* BPABI objects never have dynamic relocations mapped. */
13610 if (htab->symbian_p)
13611 {
13612 flagword flags;
13613
13614 flags = bfd_get_section_flags (dynobj, sreloc);
13615 flags &= ~(SEC_LOAD | SEC_ALLOC);
13616 bfd_set_section_flags (dynobj, sreloc, flags);
13617 }
13618 }
13619
13620 /* If this is a global symbol, count the number of
13621 relocations we need for this symbol. */
13622 if (h != NULL)
13623 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
13624 else
13625 {
13626 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
13627 if (head == NULL)
13628 return FALSE;
13629 }
13630
13631 p = *head;
13632 if (p == NULL || p->sec != sec)
13633 {
13634 bfd_size_type amt = sizeof *p;
13635
13636 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
13637 if (p == NULL)
13638 return FALSE;
13639 p->next = *head;
13640 *head = p;
13641 p->sec = sec;
13642 p->count = 0;
13643 p->pc_count = 0;
13644 }
13645
13646 if (elf32_arm_howto_from_type (r_type)->pc_relative)
13647 p->pc_count += 1;
13648 p->count += 1;
13649 }
13650 }
13651
13652 return TRUE;
13653 }
13654
13655 /* Unwinding tables are not referenced directly. This pass marks them as
13656 required if the corresponding code section is marked. */
13657
13658 static bfd_boolean
13659 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
13660 elf_gc_mark_hook_fn gc_mark_hook)
13661 {
13662 bfd *sub;
13663 Elf_Internal_Shdr **elf_shdrp;
13664 bfd_boolean again;
13665
13666 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
13667
13668 /* Marking EH data may cause additional code sections to be marked,
13669 requiring multiple passes. */
13670 again = TRUE;
13671 while (again)
13672 {
13673 again = FALSE;
13674 for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
13675 {
13676 asection *o;
13677
13678 if (! is_arm_elf (sub))
13679 continue;
13680
13681 elf_shdrp = elf_elfsections (sub);
13682 for (o = sub->sections; o != NULL; o = o->next)
13683 {
13684 Elf_Internal_Shdr *hdr;
13685
13686 hdr = &elf_section_data (o)->this_hdr;
13687 if (hdr->sh_type == SHT_ARM_EXIDX
13688 && hdr->sh_link
13689 && hdr->sh_link < elf_numsections (sub)
13690 && !o->gc_mark
13691 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
13692 {
13693 again = TRUE;
13694 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
13695 return FALSE;
13696 }
13697 }
13698 }
13699 }
13700
13701 return TRUE;
13702 }
13703
13704 /* Treat mapping symbols as special target symbols. */
13705
13706 static bfd_boolean
13707 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
13708 {
13709 return bfd_is_arm_special_symbol_name (sym->name,
13710 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
13711 }
13712
13713 /* This is a copy of elf_find_function() from elf.c except that
13714 ARM mapping symbols are ignored when looking for function names
13715 and STT_ARM_TFUNC is considered to a function type. */
13716
13717 static bfd_boolean
13718 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
13719 asymbol ** symbols,
13720 asection * section,
13721 bfd_vma offset,
13722 const char ** filename_ptr,
13723 const char ** functionname_ptr)
13724 {
13725 const char * filename = NULL;
13726 asymbol * func = NULL;
13727 bfd_vma low_func = 0;
13728 asymbol ** p;
13729
13730 for (p = symbols; *p != NULL; p++)
13731 {
13732 elf_symbol_type *q;
13733
13734 q = (elf_symbol_type *) *p;
13735
13736 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
13737 {
13738 default:
13739 break;
13740 case STT_FILE:
13741 filename = bfd_asymbol_name (&q->symbol);
13742 break;
13743 case STT_FUNC:
13744 case STT_ARM_TFUNC:
13745 case STT_NOTYPE:
13746 /* Skip mapping symbols. */
13747 if ((q->symbol.flags & BSF_LOCAL)
13748 && bfd_is_arm_special_symbol_name (q->symbol.name,
13749 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
13750 continue;
13751 /* Fall through. */
13752 if (bfd_get_section (&q->symbol) == section
13753 && q->symbol.value >= low_func
13754 && q->symbol.value <= offset)
13755 {
13756 func = (asymbol *) q;
13757 low_func = q->symbol.value;
13758 }
13759 break;
13760 }
13761 }
13762
13763 if (func == NULL)
13764 return FALSE;
13765
13766 if (filename_ptr)
13767 *filename_ptr = filename;
13768 if (functionname_ptr)
13769 *functionname_ptr = bfd_asymbol_name (func);
13770
13771 return TRUE;
13772 }
13773
13774
13775 /* Find the nearest line to a particular section and offset, for error
13776 reporting. This code is a duplicate of the code in elf.c, except
13777 that it uses arm_elf_find_function. */
13778
13779 static bfd_boolean
13780 elf32_arm_find_nearest_line (bfd * abfd,
13781 asymbol ** symbols,
13782 asection * section,
13783 bfd_vma offset,
13784 const char ** filename_ptr,
13785 const char ** functionname_ptr,
13786 unsigned int * line_ptr,
13787 unsigned int * discriminator_ptr)
13788 {
13789 bfd_boolean found = FALSE;
13790
13791 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
13792 filename_ptr, functionname_ptr,
13793 line_ptr, discriminator_ptr,
13794 dwarf_debug_sections, 0,
13795 & elf_tdata (abfd)->dwarf2_find_line_info))
13796 {
13797 if (!*functionname_ptr)
13798 arm_elf_find_function (abfd, symbols, section, offset,
13799 *filename_ptr ? NULL : filename_ptr,
13800 functionname_ptr);
13801
13802 return TRUE;
13803 }
13804
13805 /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain
13806 uses DWARF1. */
13807
13808 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
13809 & found, filename_ptr,
13810 functionname_ptr, line_ptr,
13811 & elf_tdata (abfd)->line_info))
13812 return FALSE;
13813
13814 if (found && (*functionname_ptr || *line_ptr))
13815 return TRUE;
13816
13817 if (symbols == NULL)
13818 return FALSE;
13819
13820 if (! arm_elf_find_function (abfd, symbols, section, offset,
13821 filename_ptr, functionname_ptr))
13822 return FALSE;
13823
13824 *line_ptr = 0;
13825 return TRUE;
13826 }
13827
13828 static bfd_boolean
13829 elf32_arm_find_inliner_info (bfd * abfd,
13830 const char ** filename_ptr,
13831 const char ** functionname_ptr,
13832 unsigned int * line_ptr)
13833 {
13834 bfd_boolean found;
13835 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
13836 functionname_ptr, line_ptr,
13837 & elf_tdata (abfd)->dwarf2_find_line_info);
13838 return found;
13839 }
13840
13841 /* Adjust a symbol defined by a dynamic object and referenced by a
13842 regular object. The current definition is in some section of the
13843 dynamic object, but we're not including those sections. We have to
13844 change the definition to something the rest of the link can
13845 understand. */
13846
13847 static bfd_boolean
13848 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
13849 struct elf_link_hash_entry * h)
13850 {
13851 bfd * dynobj;
13852 asection * s;
13853 struct elf32_arm_link_hash_entry * eh;
13854 struct elf32_arm_link_hash_table *globals;
13855
13856 globals = elf32_arm_hash_table (info);
13857 if (globals == NULL)
13858 return FALSE;
13859
13860 dynobj = elf_hash_table (info)->dynobj;
13861
13862 /* Make sure we know what is going on here. */
13863 BFD_ASSERT (dynobj != NULL
13864 && (h->needs_plt
13865 || h->type == STT_GNU_IFUNC
13866 || h->u.weakdef != NULL
13867 || (h->def_dynamic
13868 && h->ref_regular
13869 && !h->def_regular)));
13870
13871 eh = (struct elf32_arm_link_hash_entry *) h;
13872
13873 /* If this is a function, put it in the procedure linkage table. We
13874 will fill in the contents of the procedure linkage table later,
13875 when we know the address of the .got section. */
13876 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
13877 {
13878 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
13879 symbol binds locally. */
13880 if (h->plt.refcount <= 0
13881 || (h->type != STT_GNU_IFUNC
13882 && (SYMBOL_CALLS_LOCAL (info, h)
13883 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
13884 && h->root.type == bfd_link_hash_undefweak))))
13885 {
13886 /* This case can occur if we saw a PLT32 reloc in an input
13887 file, but the symbol was never referred to by a dynamic
13888 object, or if all references were garbage collected. In
13889 such a case, we don't actually need to build a procedure
13890 linkage table, and we can just do a PC24 reloc instead. */
13891 h->plt.offset = (bfd_vma) -1;
13892 eh->plt.thumb_refcount = 0;
13893 eh->plt.maybe_thumb_refcount = 0;
13894 eh->plt.noncall_refcount = 0;
13895 h->needs_plt = 0;
13896 }
13897
13898 return TRUE;
13899 }
13900 else
13901 {
13902 /* It's possible that we incorrectly decided a .plt reloc was
13903 needed for an R_ARM_PC24 or similar reloc to a non-function sym
13904 in check_relocs. We can't decide accurately between function
13905 and non-function syms in check-relocs; Objects loaded later in
13906 the link may change h->type. So fix it now. */
13907 h->plt.offset = (bfd_vma) -1;
13908 eh->plt.thumb_refcount = 0;
13909 eh->plt.maybe_thumb_refcount = 0;
13910 eh->plt.noncall_refcount = 0;
13911 }
13912
13913 /* If this is a weak symbol, and there is a real definition, the
13914 processor independent code will have arranged for us to see the
13915 real definition first, and we can just use the same value. */
13916 if (h->u.weakdef != NULL)
13917 {
13918 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
13919 || h->u.weakdef->root.type == bfd_link_hash_defweak);
13920 h->root.u.def.section = h->u.weakdef->root.u.def.section;
13921 h->root.u.def.value = h->u.weakdef->root.u.def.value;
13922 return TRUE;
13923 }
13924
13925 /* If there are no non-GOT references, we do not need a copy
13926 relocation. */
13927 if (!h->non_got_ref)
13928 return TRUE;
13929
13930 /* This is a reference to a symbol defined by a dynamic object which
13931 is not a function. */
13932
13933 /* If we are creating a shared library, we must presume that the
13934 only references to the symbol are via the global offset table.
13935 For such cases we need not do anything here; the relocations will
13936 be handled correctly by relocate_section. Relocatable executables
13937 can reference data in shared objects directly, so we don't need to
13938 do anything here. */
13939 if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
13940 return TRUE;
13941
13942 /* We must allocate the symbol in our .dynbss section, which will
13943 become part of the .bss section of the executable. There will be
13944 an entry for this symbol in the .dynsym section. The dynamic
13945 object will contain position independent code, so all references
13946 from the dynamic object to this symbol will go through the global
13947 offset table. The dynamic linker will use the .dynsym entry to
13948 determine the address it must put in the global offset table, so
13949 both the dynamic object and the regular object will refer to the
13950 same memory location for the variable. */
13951 s = bfd_get_linker_section (dynobj, ".dynbss");
13952 BFD_ASSERT (s != NULL);
13953
13954 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
13955 copy the initial value out of the dynamic object and into the
13956 runtime process image. We need to remember the offset into the
13957 .rel(a).bss section we are going to use. */
13958 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
13959 {
13960 asection *srel;
13961
13962 srel = bfd_get_linker_section (dynobj, RELOC_SECTION (globals, ".bss"));
13963 elf32_arm_allocate_dynrelocs (info, srel, 1);
13964 h->needs_copy = 1;
13965 }
13966
13967 return _bfd_elf_adjust_dynamic_copy (info, h, s);
13968 }
13969
13970 /* Allocate space in .plt, .got and associated reloc sections for
13971 dynamic relocs. */
13972
13973 static bfd_boolean
13974 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
13975 {
13976 struct bfd_link_info *info;
13977 struct elf32_arm_link_hash_table *htab;
13978 struct elf32_arm_link_hash_entry *eh;
13979 struct elf_dyn_relocs *p;
13980
13981 if (h->root.type == bfd_link_hash_indirect)
13982 return TRUE;
13983
13984 eh = (struct elf32_arm_link_hash_entry *) h;
13985
13986 info = (struct bfd_link_info *) inf;
13987 htab = elf32_arm_hash_table (info);
13988 if (htab == NULL)
13989 return FALSE;
13990
13991 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
13992 && h->plt.refcount > 0)
13993 {
13994 /* Make sure this symbol is output as a dynamic symbol.
13995 Undefined weak syms won't yet be marked as dynamic. */
13996 if (h->dynindx == -1
13997 && !h->forced_local)
13998 {
13999 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14000 return FALSE;
14001 }
14002
14003 /* If the call in the PLT entry binds locally, the associated
14004 GOT entry should use an R_ARM_IRELATIVE relocation instead of
14005 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
14006 than the .plt section. */
14007 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
14008 {
14009 eh->is_iplt = 1;
14010 if (eh->plt.noncall_refcount == 0
14011 && SYMBOL_REFERENCES_LOCAL (info, h))
14012 /* All non-call references can be resolved directly.
14013 This means that they can (and in some cases, must)
14014 resolve directly to the run-time target, rather than
14015 to the PLT. That in turns means that any .got entry
14016 would be equal to the .igot.plt entry, so there's
14017 no point having both. */
14018 h->got.refcount = 0;
14019 }
14020
14021 if (bfd_link_pic (info)
14022 || eh->is_iplt
14023 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
14024 {
14025 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
14026
14027 /* If this symbol is not defined in a regular file, and we are
14028 not generating a shared library, then set the symbol to this
14029 location in the .plt. This is required to make function
14030 pointers compare as equal between the normal executable and
14031 the shared library. */
14032 if (! bfd_link_pic (info)
14033 && !h->def_regular)
14034 {
14035 h->root.u.def.section = htab->root.splt;
14036 h->root.u.def.value = h->plt.offset;
14037
14038 /* Make sure the function is not marked as Thumb, in case
14039 it is the target of an ABS32 relocation, which will
14040 point to the PLT entry. */
14041 h->target_internal = ST_BRANCH_TO_ARM;
14042 }
14043
14044 /* VxWorks executables have a second set of relocations for
14045 each PLT entry. They go in a separate relocation section,
14046 which is processed by the kernel loader. */
14047 if (htab->vxworks_p && !bfd_link_pic (info))
14048 {
14049 /* There is a relocation for the initial PLT entry:
14050 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
14051 if (h->plt.offset == htab->plt_header_size)
14052 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
14053
14054 /* There are two extra relocations for each subsequent
14055 PLT entry: an R_ARM_32 relocation for the GOT entry,
14056 and an R_ARM_32 relocation for the PLT entry. */
14057 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
14058 }
14059 }
14060 else
14061 {
14062 h->plt.offset = (bfd_vma) -1;
14063 h->needs_plt = 0;
14064 }
14065 }
14066 else
14067 {
14068 h->plt.offset = (bfd_vma) -1;
14069 h->needs_plt = 0;
14070 }
14071
14072 eh = (struct elf32_arm_link_hash_entry *) h;
14073 eh->tlsdesc_got = (bfd_vma) -1;
14074
14075 if (h->got.refcount > 0)
14076 {
14077 asection *s;
14078 bfd_boolean dyn;
14079 int tls_type = elf32_arm_hash_entry (h)->tls_type;
14080 int indx;
14081
14082 /* Make sure this symbol is output as a dynamic symbol.
14083 Undefined weak syms won't yet be marked as dynamic. */
14084 if (h->dynindx == -1
14085 && !h->forced_local)
14086 {
14087 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14088 return FALSE;
14089 }
14090
14091 if (!htab->symbian_p)
14092 {
14093 s = htab->root.sgot;
14094 h->got.offset = s->size;
14095
14096 if (tls_type == GOT_UNKNOWN)
14097 abort ();
14098
14099 if (tls_type == GOT_NORMAL)
14100 /* Non-TLS symbols need one GOT slot. */
14101 s->size += 4;
14102 else
14103 {
14104 if (tls_type & GOT_TLS_GDESC)
14105 {
14106 /* R_ARM_TLS_DESC needs 2 GOT slots. */
14107 eh->tlsdesc_got
14108 = (htab->root.sgotplt->size
14109 - elf32_arm_compute_jump_table_size (htab));
14110 htab->root.sgotplt->size += 8;
14111 h->got.offset = (bfd_vma) -2;
14112 /* plt.got_offset needs to know there's a TLS_DESC
14113 reloc in the middle of .got.plt. */
14114 htab->num_tls_desc++;
14115 }
14116
14117 if (tls_type & GOT_TLS_GD)
14118 {
14119 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. If
14120 the symbol is both GD and GDESC, got.offset may
14121 have been overwritten. */
14122 h->got.offset = s->size;
14123 s->size += 8;
14124 }
14125
14126 if (tls_type & GOT_TLS_IE)
14127 /* R_ARM_TLS_IE32 needs one GOT slot. */
14128 s->size += 4;
14129 }
14130
14131 dyn = htab->root.dynamic_sections_created;
14132
14133 indx = 0;
14134 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
14135 bfd_link_pic (info),
14136 h)
14137 && (!bfd_link_pic (info)
14138 || !SYMBOL_REFERENCES_LOCAL (info, h)))
14139 indx = h->dynindx;
14140
14141 if (tls_type != GOT_NORMAL
14142 && (bfd_link_pic (info) || indx != 0)
14143 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
14144 || h->root.type != bfd_link_hash_undefweak))
14145 {
14146 if (tls_type & GOT_TLS_IE)
14147 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14148
14149 if (tls_type & GOT_TLS_GD)
14150 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14151
14152 if (tls_type & GOT_TLS_GDESC)
14153 {
14154 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
14155 /* GDESC needs a trampoline to jump to. */
14156 htab->tls_trampoline = -1;
14157 }
14158
14159 /* Only GD needs it. GDESC just emits one relocation per
14160 2 entries. */
14161 if ((tls_type & GOT_TLS_GD) && indx != 0)
14162 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14163 }
14164 else if (indx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
14165 {
14166 if (htab->root.dynamic_sections_created)
14167 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
14168 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14169 }
14170 else if (h->type == STT_GNU_IFUNC
14171 && eh->plt.noncall_refcount == 0)
14172 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
14173 they all resolve dynamically instead. Reserve room for the
14174 GOT entry's R_ARM_IRELATIVE relocation. */
14175 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
14176 else if (bfd_link_pic (info)
14177 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
14178 || h->root.type != bfd_link_hash_undefweak))
14179 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
14180 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14181 }
14182 }
14183 else
14184 h->got.offset = (bfd_vma) -1;
14185
14186 /* Allocate stubs for exported Thumb functions on v4t. */
14187 if (!htab->use_blx && h->dynindx != -1
14188 && h->def_regular
14189 && h->target_internal == ST_BRANCH_TO_THUMB
14190 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
14191 {
14192 struct elf_link_hash_entry * th;
14193 struct bfd_link_hash_entry * bh;
14194 struct elf_link_hash_entry * myh;
14195 char name[1024];
14196 asection *s;
14197 bh = NULL;
14198 /* Create a new symbol to regist the real location of the function. */
14199 s = h->root.u.def.section;
14200 sprintf (name, "__real_%s", h->root.root.string);
14201 _bfd_generic_link_add_one_symbol (info, s->owner,
14202 name, BSF_GLOBAL, s,
14203 h->root.u.def.value,
14204 NULL, TRUE, FALSE, &bh);
14205
14206 myh = (struct elf_link_hash_entry *) bh;
14207 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
14208 myh->forced_local = 1;
14209 myh->target_internal = ST_BRANCH_TO_THUMB;
14210 eh->export_glue = myh;
14211 th = record_arm_to_thumb_glue (info, h);
14212 /* Point the symbol at the stub. */
14213 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
14214 h->target_internal = ST_BRANCH_TO_ARM;
14215 h->root.u.def.section = th->root.u.def.section;
14216 h->root.u.def.value = th->root.u.def.value & ~1;
14217 }
14218
14219 if (eh->dyn_relocs == NULL)
14220 return TRUE;
14221
14222 /* In the shared -Bsymbolic case, discard space allocated for
14223 dynamic pc-relative relocs against symbols which turn out to be
14224 defined in regular objects. For the normal shared case, discard
14225 space for pc-relative relocs that have become local due to symbol
14226 visibility changes. */
14227
14228 if (bfd_link_pic (info) || htab->root.is_relocatable_executable)
14229 {
14230 /* Relocs that use pc_count are PC-relative forms, which will appear
14231 on something like ".long foo - ." or "movw REG, foo - .". We want
14232 calls to protected symbols to resolve directly to the function
14233 rather than going via the plt. If people want function pointer
14234 comparisons to work as expected then they should avoid writing
14235 assembly like ".long foo - .". */
14236 if (SYMBOL_CALLS_LOCAL (info, h))
14237 {
14238 struct elf_dyn_relocs **pp;
14239
14240 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
14241 {
14242 p->count -= p->pc_count;
14243 p->pc_count = 0;
14244 if (p->count == 0)
14245 *pp = p->next;
14246 else
14247 pp = &p->next;
14248 }
14249 }
14250
14251 if (htab->vxworks_p)
14252 {
14253 struct elf_dyn_relocs **pp;
14254
14255 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
14256 {
14257 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
14258 *pp = p->next;
14259 else
14260 pp = &p->next;
14261 }
14262 }
14263
14264 /* Also discard relocs on undefined weak syms with non-default
14265 visibility. */
14266 if (eh->dyn_relocs != NULL
14267 && h->root.type == bfd_link_hash_undefweak)
14268 {
14269 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
14270 eh->dyn_relocs = NULL;
14271
14272 /* Make sure undefined weak symbols are output as a dynamic
14273 symbol in PIEs. */
14274 else if (h->dynindx == -1
14275 && !h->forced_local)
14276 {
14277 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14278 return FALSE;
14279 }
14280 }
14281
14282 else if (htab->root.is_relocatable_executable && h->dynindx == -1
14283 && h->root.type == bfd_link_hash_new)
14284 {
14285 /* Output absolute symbols so that we can create relocations
14286 against them. For normal symbols we output a relocation
14287 against the section that contains them. */
14288 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14289 return FALSE;
14290 }
14291
14292 }
14293 else
14294 {
14295 /* For the non-shared case, discard space for relocs against
14296 symbols which turn out to need copy relocs or are not
14297 dynamic. */
14298
14299 if (!h->non_got_ref
14300 && ((h->def_dynamic
14301 && !h->def_regular)
14302 || (htab->root.dynamic_sections_created
14303 && (h->root.type == bfd_link_hash_undefweak
14304 || h->root.type == bfd_link_hash_undefined))))
14305 {
14306 /* Make sure this symbol is output as a dynamic symbol.
14307 Undefined weak syms won't yet be marked as dynamic. */
14308 if (h->dynindx == -1
14309 && !h->forced_local)
14310 {
14311 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14312 return FALSE;
14313 }
14314
14315 /* If that succeeded, we know we'll be keeping all the
14316 relocs. */
14317 if (h->dynindx != -1)
14318 goto keep;
14319 }
14320
14321 eh->dyn_relocs = NULL;
14322
14323 keep: ;
14324 }
14325
14326 /* Finally, allocate space. */
14327 for (p = eh->dyn_relocs; p != NULL; p = p->next)
14328 {
14329 asection *sreloc = elf_section_data (p->sec)->sreloc;
14330 if (h->type == STT_GNU_IFUNC
14331 && eh->plt.noncall_refcount == 0
14332 && SYMBOL_REFERENCES_LOCAL (info, h))
14333 elf32_arm_allocate_irelocs (info, sreloc, p->count);
14334 else
14335 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
14336 }
14337
14338 return TRUE;
14339 }
14340
14341 /* Find any dynamic relocs that apply to read-only sections. */
14342
14343 static bfd_boolean
14344 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
14345 {
14346 struct elf32_arm_link_hash_entry * eh;
14347 struct elf_dyn_relocs * p;
14348
14349 eh = (struct elf32_arm_link_hash_entry *) h;
14350 for (p = eh->dyn_relocs; p != NULL; p = p->next)
14351 {
14352 asection *s = p->sec;
14353
14354 if (s != NULL && (s->flags & SEC_READONLY) != 0)
14355 {
14356 struct bfd_link_info *info = (struct bfd_link_info *) inf;
14357
14358 if (info->warn_shared_textrel)
14359 (*_bfd_error_handler)
14360 (_("warning: dynamic relocation to `%s' in readonly section `%s'"),
14361 h->root.root.string, s->name);
14362 info->flags |= DF_TEXTREL;
14363
14364 /* Not an error, just cut short the traversal. */
14365 return FALSE;
14366 }
14367 }
14368 return TRUE;
14369 }
14370
14371 void
14372 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
14373 int byteswap_code)
14374 {
14375 struct elf32_arm_link_hash_table *globals;
14376
14377 globals = elf32_arm_hash_table (info);
14378 if (globals == NULL)
14379 return;
14380
14381 globals->byteswap_code = byteswap_code;
14382 }
14383
14384 /* Set the sizes of the dynamic sections. */
14385
14386 static bfd_boolean
14387 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
14388 struct bfd_link_info * info)
14389 {
14390 bfd * dynobj;
14391 asection * s;
14392 bfd_boolean plt;
14393 bfd_boolean relocs;
14394 bfd *ibfd;
14395 struct elf32_arm_link_hash_table *htab;
14396
14397 htab = elf32_arm_hash_table (info);
14398 if (htab == NULL)
14399 return FALSE;
14400
14401 dynobj = elf_hash_table (info)->dynobj;
14402 BFD_ASSERT (dynobj != NULL);
14403 check_use_blx (htab);
14404
14405 if (elf_hash_table (info)->dynamic_sections_created)
14406 {
14407 /* Set the contents of the .interp section to the interpreter. */
14408 if (bfd_link_executable (info) && !info->nointerp)
14409 {
14410 s = bfd_get_linker_section (dynobj, ".interp");
14411 BFD_ASSERT (s != NULL);
14412 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
14413 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
14414 }
14415 }
14416
14417 /* Set up .got offsets for local syms, and space for local dynamic
14418 relocs. */
14419 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
14420 {
14421 bfd_signed_vma *local_got;
14422 bfd_signed_vma *end_local_got;
14423 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
14424 char *local_tls_type;
14425 bfd_vma *local_tlsdesc_gotent;
14426 bfd_size_type locsymcount;
14427 Elf_Internal_Shdr *symtab_hdr;
14428 asection *srel;
14429 bfd_boolean is_vxworks = htab->vxworks_p;
14430 unsigned int symndx;
14431
14432 if (! is_arm_elf (ibfd))
14433 continue;
14434
14435 for (s = ibfd->sections; s != NULL; s = s->next)
14436 {
14437 struct elf_dyn_relocs *p;
14438
14439 for (p = (struct elf_dyn_relocs *)
14440 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
14441 {
14442 if (!bfd_is_abs_section (p->sec)
14443 && bfd_is_abs_section (p->sec->output_section))
14444 {
14445 /* Input section has been discarded, either because
14446 it is a copy of a linkonce section or due to
14447 linker script /DISCARD/, so we'll be discarding
14448 the relocs too. */
14449 }
14450 else if (is_vxworks
14451 && strcmp (p->sec->output_section->name,
14452 ".tls_vars") == 0)
14453 {
14454 /* Relocations in vxworks .tls_vars sections are
14455 handled specially by the loader. */
14456 }
14457 else if (p->count != 0)
14458 {
14459 srel = elf_section_data (p->sec)->sreloc;
14460 elf32_arm_allocate_dynrelocs (info, srel, p->count);
14461 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
14462 info->flags |= DF_TEXTREL;
14463 }
14464 }
14465 }
14466
14467 local_got = elf_local_got_refcounts (ibfd);
14468 if (!local_got)
14469 continue;
14470
14471 symtab_hdr = & elf_symtab_hdr (ibfd);
14472 locsymcount = symtab_hdr->sh_info;
14473 end_local_got = local_got + locsymcount;
14474 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
14475 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
14476 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
14477 symndx = 0;
14478 s = htab->root.sgot;
14479 srel = htab->root.srelgot;
14480 for (; local_got < end_local_got;
14481 ++local_got, ++local_iplt_ptr, ++local_tls_type,
14482 ++local_tlsdesc_gotent, ++symndx)
14483 {
14484 *local_tlsdesc_gotent = (bfd_vma) -1;
14485 local_iplt = *local_iplt_ptr;
14486 if (local_iplt != NULL)
14487 {
14488 struct elf_dyn_relocs *p;
14489
14490 if (local_iplt->root.refcount > 0)
14491 {
14492 elf32_arm_allocate_plt_entry (info, TRUE,
14493 &local_iplt->root,
14494 &local_iplt->arm);
14495 if (local_iplt->arm.noncall_refcount == 0)
14496 /* All references to the PLT are calls, so all
14497 non-call references can resolve directly to the
14498 run-time target. This means that the .got entry
14499 would be the same as the .igot.plt entry, so there's
14500 no point creating both. */
14501 *local_got = 0;
14502 }
14503 else
14504 {
14505 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
14506 local_iplt->root.offset = (bfd_vma) -1;
14507 }
14508
14509 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
14510 {
14511 asection *psrel;
14512
14513 psrel = elf_section_data (p->sec)->sreloc;
14514 if (local_iplt->arm.noncall_refcount == 0)
14515 elf32_arm_allocate_irelocs (info, psrel, p->count);
14516 else
14517 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
14518 }
14519 }
14520 if (*local_got > 0)
14521 {
14522 Elf_Internal_Sym *isym;
14523
14524 *local_got = s->size;
14525 if (*local_tls_type & GOT_TLS_GD)
14526 /* TLS_GD relocs need an 8-byte structure in the GOT. */
14527 s->size += 8;
14528 if (*local_tls_type & GOT_TLS_GDESC)
14529 {
14530 *local_tlsdesc_gotent = htab->root.sgotplt->size
14531 - elf32_arm_compute_jump_table_size (htab);
14532 htab->root.sgotplt->size += 8;
14533 *local_got = (bfd_vma) -2;
14534 /* plt.got_offset needs to know there's a TLS_DESC
14535 reloc in the middle of .got.plt. */
14536 htab->num_tls_desc++;
14537 }
14538 if (*local_tls_type & GOT_TLS_IE)
14539 s->size += 4;
14540
14541 if (*local_tls_type & GOT_NORMAL)
14542 {
14543 /* If the symbol is both GD and GDESC, *local_got
14544 may have been overwritten. */
14545 *local_got = s->size;
14546 s->size += 4;
14547 }
14548
14549 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
14550 if (isym == NULL)
14551 return FALSE;
14552
14553 /* If all references to an STT_GNU_IFUNC PLT are calls,
14554 then all non-call references, including this GOT entry,
14555 resolve directly to the run-time target. */
14556 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
14557 && (local_iplt == NULL
14558 || local_iplt->arm.noncall_refcount == 0))
14559 elf32_arm_allocate_irelocs (info, srel, 1);
14560 else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC)
14561 {
14562 if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC))
14563 || *local_tls_type & GOT_TLS_GD)
14564 elf32_arm_allocate_dynrelocs (info, srel, 1);
14565
14566 if (bfd_link_pic (info) && *local_tls_type & GOT_TLS_GDESC)
14567 {
14568 elf32_arm_allocate_dynrelocs (info,
14569 htab->root.srelplt, 1);
14570 htab->tls_trampoline = -1;
14571 }
14572 }
14573 }
14574 else
14575 *local_got = (bfd_vma) -1;
14576 }
14577 }
14578
14579 if (htab->tls_ldm_got.refcount > 0)
14580 {
14581 /* Allocate two GOT entries and one dynamic relocation (if necessary)
14582 for R_ARM_TLS_LDM32 relocations. */
14583 htab->tls_ldm_got.offset = htab->root.sgot->size;
14584 htab->root.sgot->size += 8;
14585 if (bfd_link_pic (info))
14586 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14587 }
14588 else
14589 htab->tls_ldm_got.offset = -1;
14590
14591 /* Allocate global sym .plt and .got entries, and space for global
14592 sym dynamic relocs. */
14593 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
14594
14595 /* Here we rummage through the found bfds to collect glue information. */
14596 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
14597 {
14598 if (! is_arm_elf (ibfd))
14599 continue;
14600
14601 /* Initialise mapping tables for code/data. */
14602 bfd_elf32_arm_init_maps (ibfd);
14603
14604 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
14605 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
14606 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
14607 /* xgettext:c-format */
14608 _bfd_error_handler (_("Errors encountered processing file %s"),
14609 ibfd->filename);
14610 }
14611
14612 /* Allocate space for the glue sections now that we've sized them. */
14613 bfd_elf32_arm_allocate_interworking_sections (info);
14614
14615 /* For every jump slot reserved in the sgotplt, reloc_count is
14616 incremented. However, when we reserve space for TLS descriptors,
14617 it's not incremented, so in order to compute the space reserved
14618 for them, it suffices to multiply the reloc count by the jump
14619 slot size. */
14620 if (htab->root.srelplt)
14621 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
14622
14623 if (htab->tls_trampoline)
14624 {
14625 if (htab->root.splt->size == 0)
14626 htab->root.splt->size += htab->plt_header_size;
14627
14628 htab->tls_trampoline = htab->root.splt->size;
14629 htab->root.splt->size += htab->plt_entry_size;
14630
14631 /* If we're not using lazy TLS relocations, don't generate the
14632 PLT and GOT entries they require. */
14633 if (!(info->flags & DF_BIND_NOW))
14634 {
14635 htab->dt_tlsdesc_got = htab->root.sgot->size;
14636 htab->root.sgot->size += 4;
14637
14638 htab->dt_tlsdesc_plt = htab->root.splt->size;
14639 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
14640 }
14641 }
14642
14643 /* The check_relocs and adjust_dynamic_symbol entry points have
14644 determined the sizes of the various dynamic sections. Allocate
14645 memory for them. */
14646 plt = FALSE;
14647 relocs = FALSE;
14648 for (s = dynobj->sections; s != NULL; s = s->next)
14649 {
14650 const char * name;
14651
14652 if ((s->flags & SEC_LINKER_CREATED) == 0)
14653 continue;
14654
14655 /* It's OK to base decisions on the section name, because none
14656 of the dynobj section names depend upon the input files. */
14657 name = bfd_get_section_name (dynobj, s);
14658
14659 if (s == htab->root.splt)
14660 {
14661 /* Remember whether there is a PLT. */
14662 plt = s->size != 0;
14663 }
14664 else if (CONST_STRNEQ (name, ".rel"))
14665 {
14666 if (s->size != 0)
14667 {
14668 /* Remember whether there are any reloc sections other
14669 than .rel(a).plt and .rela.plt.unloaded. */
14670 if (s != htab->root.srelplt && s != htab->srelplt2)
14671 relocs = TRUE;
14672
14673 /* We use the reloc_count field as a counter if we need
14674 to copy relocs into the output file. */
14675 s->reloc_count = 0;
14676 }
14677 }
14678 else if (s != htab->root.sgot
14679 && s != htab->root.sgotplt
14680 && s != htab->root.iplt
14681 && s != htab->root.igotplt
14682 && s != htab->sdynbss)
14683 {
14684 /* It's not one of our sections, so don't allocate space. */
14685 continue;
14686 }
14687
14688 if (s->size == 0)
14689 {
14690 /* If we don't need this section, strip it from the
14691 output file. This is mostly to handle .rel(a).bss and
14692 .rel(a).plt. We must create both sections in
14693 create_dynamic_sections, because they must be created
14694 before the linker maps input sections to output
14695 sections. The linker does that before
14696 adjust_dynamic_symbol is called, and it is that
14697 function which decides whether anything needs to go
14698 into these sections. */
14699 s->flags |= SEC_EXCLUDE;
14700 continue;
14701 }
14702
14703 if ((s->flags & SEC_HAS_CONTENTS) == 0)
14704 continue;
14705
14706 /* Allocate memory for the section contents. */
14707 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
14708 if (s->contents == NULL)
14709 return FALSE;
14710 }
14711
14712 if (elf_hash_table (info)->dynamic_sections_created)
14713 {
14714 /* Add some entries to the .dynamic section. We fill in the
14715 values later, in elf32_arm_finish_dynamic_sections, but we
14716 must add the entries now so that we get the correct size for
14717 the .dynamic section. The DT_DEBUG entry is filled in by the
14718 dynamic linker and used by the debugger. */
14719 #define add_dynamic_entry(TAG, VAL) \
14720 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
14721
14722 if (bfd_link_executable (info))
14723 {
14724 if (!add_dynamic_entry (DT_DEBUG, 0))
14725 return FALSE;
14726 }
14727
14728 if (plt)
14729 {
14730 if ( !add_dynamic_entry (DT_PLTGOT, 0)
14731 || !add_dynamic_entry (DT_PLTRELSZ, 0)
14732 || !add_dynamic_entry (DT_PLTREL,
14733 htab->use_rel ? DT_REL : DT_RELA)
14734 || !add_dynamic_entry (DT_JMPREL, 0))
14735 return FALSE;
14736
14737 if (htab->dt_tlsdesc_plt &&
14738 (!add_dynamic_entry (DT_TLSDESC_PLT,0)
14739 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
14740 return FALSE;
14741 }
14742
14743 if (relocs)
14744 {
14745 if (htab->use_rel)
14746 {
14747 if (!add_dynamic_entry (DT_REL, 0)
14748 || !add_dynamic_entry (DT_RELSZ, 0)
14749 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
14750 return FALSE;
14751 }
14752 else
14753 {
14754 if (!add_dynamic_entry (DT_RELA, 0)
14755 || !add_dynamic_entry (DT_RELASZ, 0)
14756 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
14757 return FALSE;
14758 }
14759 }
14760
14761 /* If any dynamic relocs apply to a read-only section,
14762 then we need a DT_TEXTREL entry. */
14763 if ((info->flags & DF_TEXTREL) == 0)
14764 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
14765 info);
14766
14767 if ((info->flags & DF_TEXTREL) != 0)
14768 {
14769 if (!add_dynamic_entry (DT_TEXTREL, 0))
14770 return FALSE;
14771 }
14772 if (htab->vxworks_p
14773 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
14774 return FALSE;
14775 }
14776 #undef add_dynamic_entry
14777
14778 return TRUE;
14779 }
14780
14781 /* Size sections even though they're not dynamic. We use it to setup
14782 _TLS_MODULE_BASE_, if needed. */
14783
14784 static bfd_boolean
14785 elf32_arm_always_size_sections (bfd *output_bfd,
14786 struct bfd_link_info *info)
14787 {
14788 asection *tls_sec;
14789
14790 if (bfd_link_relocatable (info))
14791 return TRUE;
14792
14793 tls_sec = elf_hash_table (info)->tls_sec;
14794
14795 if (tls_sec)
14796 {
14797 struct elf_link_hash_entry *tlsbase;
14798
14799 tlsbase = elf_link_hash_lookup
14800 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
14801
14802 if (tlsbase)
14803 {
14804 struct bfd_link_hash_entry *bh = NULL;
14805 const struct elf_backend_data *bed
14806 = get_elf_backend_data (output_bfd);
14807
14808 if (!(_bfd_generic_link_add_one_symbol
14809 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
14810 tls_sec, 0, NULL, FALSE,
14811 bed->collect, &bh)))
14812 return FALSE;
14813
14814 tlsbase->type = STT_TLS;
14815 tlsbase = (struct elf_link_hash_entry *)bh;
14816 tlsbase->def_regular = 1;
14817 tlsbase->other = STV_HIDDEN;
14818 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
14819 }
14820 }
14821 return TRUE;
14822 }
14823
14824 /* Finish up dynamic symbol handling. We set the contents of various
14825 dynamic sections here. */
14826
14827 static bfd_boolean
14828 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
14829 struct bfd_link_info * info,
14830 struct elf_link_hash_entry * h,
14831 Elf_Internal_Sym * sym)
14832 {
14833 struct elf32_arm_link_hash_table *htab;
14834 struct elf32_arm_link_hash_entry *eh;
14835
14836 htab = elf32_arm_hash_table (info);
14837 if (htab == NULL)
14838 return FALSE;
14839
14840 eh = (struct elf32_arm_link_hash_entry *) h;
14841
14842 if (h->plt.offset != (bfd_vma) -1)
14843 {
14844 if (!eh->is_iplt)
14845 {
14846 BFD_ASSERT (h->dynindx != -1);
14847 if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
14848 h->dynindx, 0))
14849 return FALSE;
14850 }
14851
14852 if (!h->def_regular)
14853 {
14854 /* Mark the symbol as undefined, rather than as defined in
14855 the .plt section. */
14856 sym->st_shndx = SHN_UNDEF;
14857 /* If the symbol is weak we need to clear the value.
14858 Otherwise, the PLT entry would provide a definition for
14859 the symbol even if the symbol wasn't defined anywhere,
14860 and so the symbol would never be NULL. Leave the value if
14861 there were any relocations where pointer equality matters
14862 (this is a clue for the dynamic linker, to make function
14863 pointer comparisons work between an application and shared
14864 library). */
14865 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
14866 sym->st_value = 0;
14867 }
14868 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
14869 {
14870 /* At least one non-call relocation references this .iplt entry,
14871 so the .iplt entry is the function's canonical address. */
14872 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
14873 sym->st_target_internal = ST_BRANCH_TO_ARM;
14874 sym->st_shndx = (_bfd_elf_section_from_bfd_section
14875 (output_bfd, htab->root.iplt->output_section));
14876 sym->st_value = (h->plt.offset
14877 + htab->root.iplt->output_section->vma
14878 + htab->root.iplt->output_offset);
14879 }
14880 }
14881
14882 if (h->needs_copy)
14883 {
14884 asection * s;
14885 Elf_Internal_Rela rel;
14886
14887 /* This symbol needs a copy reloc. Set it up. */
14888 BFD_ASSERT (h->dynindx != -1
14889 && (h->root.type == bfd_link_hash_defined
14890 || h->root.type == bfd_link_hash_defweak));
14891
14892 s = htab->srelbss;
14893 BFD_ASSERT (s != NULL);
14894
14895 rel.r_addend = 0;
14896 rel.r_offset = (h->root.u.def.value
14897 + h->root.u.def.section->output_section->vma
14898 + h->root.u.def.section->output_offset);
14899 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
14900 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
14901 }
14902
14903 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
14904 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
14905 to the ".got" section. */
14906 if (h == htab->root.hdynamic
14907 || (!htab->vxworks_p && h == htab->root.hgot))
14908 sym->st_shndx = SHN_ABS;
14909
14910 return TRUE;
14911 }
14912
14913 static void
14914 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
14915 void *contents,
14916 const unsigned long *template, unsigned count)
14917 {
14918 unsigned ix;
14919
14920 for (ix = 0; ix != count; ix++)
14921 {
14922 unsigned long insn = template[ix];
14923
14924 /* Emit mov pc,rx if bx is not permitted. */
14925 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
14926 insn = (insn & 0xf000000f) | 0x01a0f000;
14927 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
14928 }
14929 }
14930
14931 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
14932 other variants, NaCl needs this entry in a static executable's
14933 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
14934 zero. For .iplt really only the last bundle is useful, and .iplt
14935 could have a shorter first entry, with each individual PLT entry's
14936 relative branch calculated differently so it targets the last
14937 bundle instead of the instruction before it (labelled .Lplt_tail
14938 above). But it's simpler to keep the size and layout of PLT0
14939 consistent with the dynamic case, at the cost of some dead code at
14940 the start of .iplt and the one dead store to the stack at the start
14941 of .Lplt_tail. */
14942 static void
14943 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
14944 asection *plt, bfd_vma got_displacement)
14945 {
14946 unsigned int i;
14947
14948 put_arm_insn (htab, output_bfd,
14949 elf32_arm_nacl_plt0_entry[0]
14950 | arm_movw_immediate (got_displacement),
14951 plt->contents + 0);
14952 put_arm_insn (htab, output_bfd,
14953 elf32_arm_nacl_plt0_entry[1]
14954 | arm_movt_immediate (got_displacement),
14955 plt->contents + 4);
14956
14957 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
14958 put_arm_insn (htab, output_bfd,
14959 elf32_arm_nacl_plt0_entry[i],
14960 plt->contents + (i * 4));
14961 }
14962
14963 /* Finish up the dynamic sections. */
14964
14965 static bfd_boolean
14966 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
14967 {
14968 bfd * dynobj;
14969 asection * sgot;
14970 asection * sdyn;
14971 struct elf32_arm_link_hash_table *htab;
14972
14973 htab = elf32_arm_hash_table (info);
14974 if (htab == NULL)
14975 return FALSE;
14976
14977 dynobj = elf_hash_table (info)->dynobj;
14978
14979 sgot = htab->root.sgotplt;
14980 /* A broken linker script might have discarded the dynamic sections.
14981 Catch this here so that we do not seg-fault later on. */
14982 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
14983 return FALSE;
14984 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
14985
14986 if (elf_hash_table (info)->dynamic_sections_created)
14987 {
14988 asection *splt;
14989 Elf32_External_Dyn *dyncon, *dynconend;
14990
14991 splt = htab->root.splt;
14992 BFD_ASSERT (splt != NULL && sdyn != NULL);
14993 BFD_ASSERT (htab->symbian_p || sgot != NULL);
14994
14995 dyncon = (Elf32_External_Dyn *) sdyn->contents;
14996 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
14997
14998 for (; dyncon < dynconend; dyncon++)
14999 {
15000 Elf_Internal_Dyn dyn;
15001 const char * name;
15002 asection * s;
15003
15004 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
15005
15006 switch (dyn.d_tag)
15007 {
15008 unsigned int type;
15009
15010 default:
15011 if (htab->vxworks_p
15012 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
15013 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15014 break;
15015
15016 case DT_HASH:
15017 name = ".hash";
15018 goto get_vma_if_bpabi;
15019 case DT_STRTAB:
15020 name = ".dynstr";
15021 goto get_vma_if_bpabi;
15022 case DT_SYMTAB:
15023 name = ".dynsym";
15024 goto get_vma_if_bpabi;
15025 case DT_VERSYM:
15026 name = ".gnu.version";
15027 goto get_vma_if_bpabi;
15028 case DT_VERDEF:
15029 name = ".gnu.version_d";
15030 goto get_vma_if_bpabi;
15031 case DT_VERNEED:
15032 name = ".gnu.version_r";
15033 goto get_vma_if_bpabi;
15034
15035 case DT_PLTGOT:
15036 name = ".got";
15037 goto get_vma;
15038 case DT_JMPREL:
15039 name = RELOC_SECTION (htab, ".plt");
15040 get_vma:
15041 s = bfd_get_section_by_name (output_bfd, name);
15042 if (s == NULL)
15043 {
15044 /* PR ld/14397: Issue an error message if a required section is missing. */
15045 (*_bfd_error_handler)
15046 (_("error: required section '%s' not found in the linker script"), name);
15047 bfd_set_error (bfd_error_invalid_operation);
15048 return FALSE;
15049 }
15050 if (!htab->symbian_p)
15051 dyn.d_un.d_ptr = s->vma;
15052 else
15053 /* In the BPABI, tags in the PT_DYNAMIC section point
15054 at the file offset, not the memory address, for the
15055 convenience of the post linker. */
15056 dyn.d_un.d_ptr = s->filepos;
15057 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15058 break;
15059
15060 get_vma_if_bpabi:
15061 if (htab->symbian_p)
15062 goto get_vma;
15063 break;
15064
15065 case DT_PLTRELSZ:
15066 s = htab->root.srelplt;
15067 BFD_ASSERT (s != NULL);
15068 dyn.d_un.d_val = s->size;
15069 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15070 break;
15071
15072 case DT_RELSZ:
15073 case DT_RELASZ:
15074 if (!htab->symbian_p)
15075 {
15076 /* My reading of the SVR4 ABI indicates that the
15077 procedure linkage table relocs (DT_JMPREL) should be
15078 included in the overall relocs (DT_REL). This is
15079 what Solaris does. However, UnixWare can not handle
15080 that case. Therefore, we override the DT_RELSZ entry
15081 here to make it not include the JMPREL relocs. Since
15082 the linker script arranges for .rel(a).plt to follow all
15083 other relocation sections, we don't have to worry
15084 about changing the DT_REL entry. */
15085 s = htab->root.srelplt;
15086 if (s != NULL)
15087 dyn.d_un.d_val -= s->size;
15088 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15089 break;
15090 }
15091 /* Fall through. */
15092
15093 case DT_REL:
15094 case DT_RELA:
15095 /* In the BPABI, the DT_REL tag must point at the file
15096 offset, not the VMA, of the first relocation
15097 section. So, we use code similar to that in
15098 elflink.c, but do not check for SHF_ALLOC on the
15099 relcoation section, since relocations sections are
15100 never allocated under the BPABI. The comments above
15101 about Unixware notwithstanding, we include all of the
15102 relocations here. */
15103 if (htab->symbian_p)
15104 {
15105 unsigned int i;
15106 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
15107 ? SHT_REL : SHT_RELA);
15108 dyn.d_un.d_val = 0;
15109 for (i = 1; i < elf_numsections (output_bfd); i++)
15110 {
15111 Elf_Internal_Shdr *hdr
15112 = elf_elfsections (output_bfd)[i];
15113 if (hdr->sh_type == type)
15114 {
15115 if (dyn.d_tag == DT_RELSZ
15116 || dyn.d_tag == DT_RELASZ)
15117 dyn.d_un.d_val += hdr->sh_size;
15118 else if ((ufile_ptr) hdr->sh_offset
15119 <= dyn.d_un.d_val - 1)
15120 dyn.d_un.d_val = hdr->sh_offset;
15121 }
15122 }
15123 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15124 }
15125 break;
15126
15127 case DT_TLSDESC_PLT:
15128 s = htab->root.splt;
15129 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
15130 + htab->dt_tlsdesc_plt);
15131 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15132 break;
15133
15134 case DT_TLSDESC_GOT:
15135 s = htab->root.sgot;
15136 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
15137 + htab->dt_tlsdesc_got);
15138 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15139 break;
15140
15141 /* Set the bottom bit of DT_INIT/FINI if the
15142 corresponding function is Thumb. */
15143 case DT_INIT:
15144 name = info->init_function;
15145 goto get_sym;
15146 case DT_FINI:
15147 name = info->fini_function;
15148 get_sym:
15149 /* If it wasn't set by elf_bfd_final_link
15150 then there is nothing to adjust. */
15151 if (dyn.d_un.d_val != 0)
15152 {
15153 struct elf_link_hash_entry * eh;
15154
15155 eh = elf_link_hash_lookup (elf_hash_table (info), name,
15156 FALSE, FALSE, TRUE);
15157 if (eh != NULL && eh->target_internal == ST_BRANCH_TO_THUMB)
15158 {
15159 dyn.d_un.d_val |= 1;
15160 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15161 }
15162 }
15163 break;
15164 }
15165 }
15166
15167 /* Fill in the first entry in the procedure linkage table. */
15168 if (splt->size > 0 && htab->plt_header_size)
15169 {
15170 const bfd_vma *plt0_entry;
15171 bfd_vma got_address, plt_address, got_displacement;
15172
15173 /* Calculate the addresses of the GOT and PLT. */
15174 got_address = sgot->output_section->vma + sgot->output_offset;
15175 plt_address = splt->output_section->vma + splt->output_offset;
15176
15177 if (htab->vxworks_p)
15178 {
15179 /* The VxWorks GOT is relocated by the dynamic linker.
15180 Therefore, we must emit relocations rather than simply
15181 computing the values now. */
15182 Elf_Internal_Rela rel;
15183
15184 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
15185 put_arm_insn (htab, output_bfd, plt0_entry[0],
15186 splt->contents + 0);
15187 put_arm_insn (htab, output_bfd, plt0_entry[1],
15188 splt->contents + 4);
15189 put_arm_insn (htab, output_bfd, plt0_entry[2],
15190 splt->contents + 8);
15191 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
15192
15193 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
15194 rel.r_offset = plt_address + 12;
15195 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
15196 rel.r_addend = 0;
15197 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
15198 htab->srelplt2->contents);
15199 }
15200 else if (htab->nacl_p)
15201 arm_nacl_put_plt0 (htab, output_bfd, splt,
15202 got_address + 8 - (plt_address + 16));
15203 else if (using_thumb_only (htab))
15204 {
15205 got_displacement = got_address - (plt_address + 12);
15206
15207 plt0_entry = elf32_thumb2_plt0_entry;
15208 put_arm_insn (htab, output_bfd, plt0_entry[0],
15209 splt->contents + 0);
15210 put_arm_insn (htab, output_bfd, plt0_entry[1],
15211 splt->contents + 4);
15212 put_arm_insn (htab, output_bfd, plt0_entry[2],
15213 splt->contents + 8);
15214
15215 bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
15216 }
15217 else
15218 {
15219 got_displacement = got_address - (plt_address + 16);
15220
15221 plt0_entry = elf32_arm_plt0_entry;
15222 put_arm_insn (htab, output_bfd, plt0_entry[0],
15223 splt->contents + 0);
15224 put_arm_insn (htab, output_bfd, plt0_entry[1],
15225 splt->contents + 4);
15226 put_arm_insn (htab, output_bfd, plt0_entry[2],
15227 splt->contents + 8);
15228 put_arm_insn (htab, output_bfd, plt0_entry[3],
15229 splt->contents + 12);
15230
15231 #ifdef FOUR_WORD_PLT
15232 /* The displacement value goes in the otherwise-unused
15233 last word of the second entry. */
15234 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
15235 #else
15236 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
15237 #endif
15238 }
15239 }
15240
15241 /* UnixWare sets the entsize of .plt to 4, although that doesn't
15242 really seem like the right value. */
15243 if (splt->output_section->owner == output_bfd)
15244 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
15245
15246 if (htab->dt_tlsdesc_plt)
15247 {
15248 bfd_vma got_address
15249 = sgot->output_section->vma + sgot->output_offset;
15250 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
15251 + htab->root.sgot->output_offset);
15252 bfd_vma plt_address
15253 = splt->output_section->vma + splt->output_offset;
15254
15255 arm_put_trampoline (htab, output_bfd,
15256 splt->contents + htab->dt_tlsdesc_plt,
15257 dl_tlsdesc_lazy_trampoline, 6);
15258
15259 bfd_put_32 (output_bfd,
15260 gotplt_address + htab->dt_tlsdesc_got
15261 - (plt_address + htab->dt_tlsdesc_plt)
15262 - dl_tlsdesc_lazy_trampoline[6],
15263 splt->contents + htab->dt_tlsdesc_plt + 24);
15264 bfd_put_32 (output_bfd,
15265 got_address - (plt_address + htab->dt_tlsdesc_plt)
15266 - dl_tlsdesc_lazy_trampoline[7],
15267 splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
15268 }
15269
15270 if (htab->tls_trampoline)
15271 {
15272 arm_put_trampoline (htab, output_bfd,
15273 splt->contents + htab->tls_trampoline,
15274 tls_trampoline, 3);
15275 #ifdef FOUR_WORD_PLT
15276 bfd_put_32 (output_bfd, 0x00000000,
15277 splt->contents + htab->tls_trampoline + 12);
15278 #endif
15279 }
15280
15281 if (htab->vxworks_p
15282 && !bfd_link_pic (info)
15283 && htab->root.splt->size > 0)
15284 {
15285 /* Correct the .rel(a).plt.unloaded relocations. They will have
15286 incorrect symbol indexes. */
15287 int num_plts;
15288 unsigned char *p;
15289
15290 num_plts = ((htab->root.splt->size - htab->plt_header_size)
15291 / htab->plt_entry_size);
15292 p = htab->srelplt2->contents + RELOC_SIZE (htab);
15293
15294 for (; num_plts; num_plts--)
15295 {
15296 Elf_Internal_Rela rel;
15297
15298 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
15299 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
15300 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
15301 p += RELOC_SIZE (htab);
15302
15303 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
15304 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
15305 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
15306 p += RELOC_SIZE (htab);
15307 }
15308 }
15309 }
15310
15311 if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0)
15312 /* NaCl uses a special first entry in .iplt too. */
15313 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
15314
15315 /* Fill in the first three entries in the global offset table. */
15316 if (sgot)
15317 {
15318 if (sgot->size > 0)
15319 {
15320 if (sdyn == NULL)
15321 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
15322 else
15323 bfd_put_32 (output_bfd,
15324 sdyn->output_section->vma + sdyn->output_offset,
15325 sgot->contents);
15326 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
15327 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
15328 }
15329
15330 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
15331 }
15332
15333 return TRUE;
15334 }
15335
15336 static void
15337 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
15338 {
15339 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
15340 struct elf32_arm_link_hash_table *globals;
15341
15342 i_ehdrp = elf_elfheader (abfd);
15343
15344 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
15345 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
15346 else
15347 _bfd_elf_post_process_headers (abfd, link_info);
15348 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
15349
15350 if (link_info)
15351 {
15352 globals = elf32_arm_hash_table (link_info);
15353 if (globals != NULL && globals->byteswap_code)
15354 i_ehdrp->e_flags |= EF_ARM_BE8;
15355 }
15356
15357 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
15358 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
15359 {
15360 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
15361 if (abi == AEABI_VFP_args_vfp)
15362 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
15363 else
15364 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
15365 }
15366 }
15367
15368 static enum elf_reloc_type_class
15369 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
15370 const asection *rel_sec ATTRIBUTE_UNUSED,
15371 const Elf_Internal_Rela *rela)
15372 {
15373 switch ((int) ELF32_R_TYPE (rela->r_info))
15374 {
15375 case R_ARM_RELATIVE:
15376 return reloc_class_relative;
15377 case R_ARM_JUMP_SLOT:
15378 return reloc_class_plt;
15379 case R_ARM_COPY:
15380 return reloc_class_copy;
15381 case R_ARM_IRELATIVE:
15382 return reloc_class_ifunc;
15383 default:
15384 return reloc_class_normal;
15385 }
15386 }
15387
15388 static void
15389 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
15390 {
15391 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
15392 }
15393
15394 /* Return TRUE if this is an unwinding table entry. */
15395
15396 static bfd_boolean
15397 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
15398 {
15399 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
15400 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
15401 }
15402
15403
15404 /* Set the type and flags for an ARM section. We do this by
15405 the section name, which is a hack, but ought to work. */
15406
15407 static bfd_boolean
15408 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
15409 {
15410 const char * name;
15411
15412 name = bfd_get_section_name (abfd, sec);
15413
15414 if (is_arm_elf_unwind_section_name (abfd, name))
15415 {
15416 hdr->sh_type = SHT_ARM_EXIDX;
15417 hdr->sh_flags |= SHF_LINK_ORDER;
15418 }
15419 return TRUE;
15420 }
15421
15422 /* Handle an ARM specific section when reading an object file. This is
15423 called when bfd_section_from_shdr finds a section with an unknown
15424 type. */
15425
15426 static bfd_boolean
15427 elf32_arm_section_from_shdr (bfd *abfd,
15428 Elf_Internal_Shdr * hdr,
15429 const char *name,
15430 int shindex)
15431 {
15432 /* There ought to be a place to keep ELF backend specific flags, but
15433 at the moment there isn't one. We just keep track of the
15434 sections by their name, instead. Fortunately, the ABI gives
15435 names for all the ARM specific sections, so we will probably get
15436 away with this. */
15437 switch (hdr->sh_type)
15438 {
15439 case SHT_ARM_EXIDX:
15440 case SHT_ARM_PREEMPTMAP:
15441 case SHT_ARM_ATTRIBUTES:
15442 break;
15443
15444 default:
15445 return FALSE;
15446 }
15447
15448 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
15449 return FALSE;
15450
15451 return TRUE;
15452 }
15453
15454 static _arm_elf_section_data *
15455 get_arm_elf_section_data (asection * sec)
15456 {
15457 if (sec && sec->owner && is_arm_elf (sec->owner))
15458 return elf32_arm_section_data (sec);
15459 else
15460 return NULL;
15461 }
15462
15463 typedef struct
15464 {
15465 void *flaginfo;
15466 struct bfd_link_info *info;
15467 asection *sec;
15468 int sec_shndx;
15469 int (*func) (void *, const char *, Elf_Internal_Sym *,
15470 asection *, struct elf_link_hash_entry *);
15471 } output_arch_syminfo;
15472
15473 enum map_symbol_type
15474 {
15475 ARM_MAP_ARM,
15476 ARM_MAP_THUMB,
15477 ARM_MAP_DATA
15478 };
15479
15480
15481 /* Output a single mapping symbol. */
15482
15483 static bfd_boolean
15484 elf32_arm_output_map_sym (output_arch_syminfo *osi,
15485 enum map_symbol_type type,
15486 bfd_vma offset)
15487 {
15488 static const char *names[3] = {"$a", "$t", "$d"};
15489 Elf_Internal_Sym sym;
15490
15491 sym.st_value = osi->sec->output_section->vma
15492 + osi->sec->output_offset
15493 + offset;
15494 sym.st_size = 0;
15495 sym.st_other = 0;
15496 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
15497 sym.st_shndx = osi->sec_shndx;
15498 sym.st_target_internal = 0;
15499 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
15500 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
15501 }
15502
15503 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
15504 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
15505
15506 static bfd_boolean
15507 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
15508 bfd_boolean is_iplt_entry_p,
15509 union gotplt_union *root_plt,
15510 struct arm_plt_info *arm_plt)
15511 {
15512 struct elf32_arm_link_hash_table *htab;
15513 bfd_vma addr, plt_header_size;
15514
15515 if (root_plt->offset == (bfd_vma) -1)
15516 return TRUE;
15517
15518 htab = elf32_arm_hash_table (osi->info);
15519 if (htab == NULL)
15520 return FALSE;
15521
15522 if (is_iplt_entry_p)
15523 {
15524 osi->sec = htab->root.iplt;
15525 plt_header_size = 0;
15526 }
15527 else
15528 {
15529 osi->sec = htab->root.splt;
15530 plt_header_size = htab->plt_header_size;
15531 }
15532 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
15533 (osi->info->output_bfd, osi->sec->output_section));
15534
15535 addr = root_plt->offset & -2;
15536 if (htab->symbian_p)
15537 {
15538 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
15539 return FALSE;
15540 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
15541 return FALSE;
15542 }
15543 else if (htab->vxworks_p)
15544 {
15545 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
15546 return FALSE;
15547 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
15548 return FALSE;
15549 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
15550 return FALSE;
15551 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
15552 return FALSE;
15553 }
15554 else if (htab->nacl_p)
15555 {
15556 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
15557 return FALSE;
15558 }
15559 else if (using_thumb_only (htab))
15560 {
15561 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
15562 return FALSE;
15563 }
15564 else
15565 {
15566 bfd_boolean thumb_stub_p;
15567
15568 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
15569 if (thumb_stub_p)
15570 {
15571 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
15572 return FALSE;
15573 }
15574 #ifdef FOUR_WORD_PLT
15575 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
15576 return FALSE;
15577 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
15578 return FALSE;
15579 #else
15580 /* A three-word PLT with no Thumb thunk contains only Arm code,
15581 so only need to output a mapping symbol for the first PLT entry and
15582 entries with thumb thunks. */
15583 if (thumb_stub_p || addr == plt_header_size)
15584 {
15585 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
15586 return FALSE;
15587 }
15588 #endif
15589 }
15590
15591 return TRUE;
15592 }
15593
15594 /* Output mapping symbols for PLT entries associated with H. */
15595
15596 static bfd_boolean
15597 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
15598 {
15599 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
15600 struct elf32_arm_link_hash_entry *eh;
15601
15602 if (h->root.type == bfd_link_hash_indirect)
15603 return TRUE;
15604
15605 if (h->root.type == bfd_link_hash_warning)
15606 /* When warning symbols are created, they **replace** the "real"
15607 entry in the hash table, thus we never get to see the real
15608 symbol in a hash traversal. So look at it now. */
15609 h = (struct elf_link_hash_entry *) h->root.u.i.link;
15610
15611 eh = (struct elf32_arm_link_hash_entry *) h;
15612 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
15613 &h->plt, &eh->plt);
15614 }
15615
15616 /* Output a single local symbol for a generated stub. */
15617
15618 static bfd_boolean
15619 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
15620 bfd_vma offset, bfd_vma size)
15621 {
15622 Elf_Internal_Sym sym;
15623
15624 sym.st_value = osi->sec->output_section->vma
15625 + osi->sec->output_offset
15626 + offset;
15627 sym.st_size = size;
15628 sym.st_other = 0;
15629 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
15630 sym.st_shndx = osi->sec_shndx;
15631 sym.st_target_internal = 0;
15632 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
15633 }
15634
15635 static bfd_boolean
15636 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
15637 void * in_arg)
15638 {
15639 struct elf32_arm_stub_hash_entry *stub_entry;
15640 asection *stub_sec;
15641 bfd_vma addr;
15642 char *stub_name;
15643 output_arch_syminfo *osi;
15644 const insn_sequence *template_sequence;
15645 enum stub_insn_type prev_type;
15646 int size;
15647 int i;
15648 enum map_symbol_type sym_type;
15649
15650 /* Massage our args to the form they really have. */
15651 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
15652 osi = (output_arch_syminfo *) in_arg;
15653
15654 stub_sec = stub_entry->stub_sec;
15655
15656 /* Ensure this stub is attached to the current section being
15657 processed. */
15658 if (stub_sec != osi->sec)
15659 return TRUE;
15660
15661 addr = (bfd_vma) stub_entry->stub_offset;
15662 stub_name = stub_entry->output_name;
15663
15664 template_sequence = stub_entry->stub_template;
15665 switch (template_sequence[0].type)
15666 {
15667 case ARM_TYPE:
15668 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
15669 return FALSE;
15670 break;
15671 case THUMB16_TYPE:
15672 case THUMB32_TYPE:
15673 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
15674 stub_entry->stub_size))
15675 return FALSE;
15676 break;
15677 default:
15678 BFD_FAIL ();
15679 return 0;
15680 }
15681
15682 prev_type = DATA_TYPE;
15683 size = 0;
15684 for (i = 0; i < stub_entry->stub_template_size; i++)
15685 {
15686 switch (template_sequence[i].type)
15687 {
15688 case ARM_TYPE:
15689 sym_type = ARM_MAP_ARM;
15690 break;
15691
15692 case THUMB16_TYPE:
15693 case THUMB32_TYPE:
15694 sym_type = ARM_MAP_THUMB;
15695 break;
15696
15697 case DATA_TYPE:
15698 sym_type = ARM_MAP_DATA;
15699 break;
15700
15701 default:
15702 BFD_FAIL ();
15703 return FALSE;
15704 }
15705
15706 if (template_sequence[i].type != prev_type)
15707 {
15708 prev_type = template_sequence[i].type;
15709 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
15710 return FALSE;
15711 }
15712
15713 switch (template_sequence[i].type)
15714 {
15715 case ARM_TYPE:
15716 case THUMB32_TYPE:
15717 size += 4;
15718 break;
15719
15720 case THUMB16_TYPE:
15721 size += 2;
15722 break;
15723
15724 case DATA_TYPE:
15725 size += 4;
15726 break;
15727
15728 default:
15729 BFD_FAIL ();
15730 return FALSE;
15731 }
15732 }
15733
15734 return TRUE;
15735 }
15736
15737 /* Output mapping symbols for linker generated sections,
15738 and for those data-only sections that do not have a
15739 $d. */
15740
15741 static bfd_boolean
15742 elf32_arm_output_arch_local_syms (bfd *output_bfd,
15743 struct bfd_link_info *info,
15744 void *flaginfo,
15745 int (*func) (void *, const char *,
15746 Elf_Internal_Sym *,
15747 asection *,
15748 struct elf_link_hash_entry *))
15749 {
15750 output_arch_syminfo osi;
15751 struct elf32_arm_link_hash_table *htab;
15752 bfd_vma offset;
15753 bfd_size_type size;
15754 bfd *input_bfd;
15755
15756 htab = elf32_arm_hash_table (info);
15757 if (htab == NULL)
15758 return FALSE;
15759
15760 check_use_blx (htab);
15761
15762 osi.flaginfo = flaginfo;
15763 osi.info = info;
15764 osi.func = func;
15765
15766 /* Add a $d mapping symbol to data-only sections that
15767 don't have any mapping symbol. This may result in (harmless) redundant
15768 mapping symbols. */
15769 for (input_bfd = info->input_bfds;
15770 input_bfd != NULL;
15771 input_bfd = input_bfd->link.next)
15772 {
15773 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
15774 for (osi.sec = input_bfd->sections;
15775 osi.sec != NULL;
15776 osi.sec = osi.sec->next)
15777 {
15778 if (osi.sec->output_section != NULL
15779 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
15780 != 0)
15781 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
15782 == SEC_HAS_CONTENTS
15783 && get_arm_elf_section_data (osi.sec) != NULL
15784 && get_arm_elf_section_data (osi.sec)->mapcount == 0
15785 && osi.sec->size > 0
15786 && (osi.sec->flags & SEC_EXCLUDE) == 0)
15787 {
15788 osi.sec_shndx = _bfd_elf_section_from_bfd_section
15789 (output_bfd, osi.sec->output_section);
15790 if (osi.sec_shndx != (int)SHN_BAD)
15791 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
15792 }
15793 }
15794 }
15795
15796 /* ARM->Thumb glue. */
15797 if (htab->arm_glue_size > 0)
15798 {
15799 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
15800 ARM2THUMB_GLUE_SECTION_NAME);
15801
15802 osi.sec_shndx = _bfd_elf_section_from_bfd_section
15803 (output_bfd, osi.sec->output_section);
15804 if (bfd_link_pic (info) || htab->root.is_relocatable_executable
15805 || htab->pic_veneer)
15806 size = ARM2THUMB_PIC_GLUE_SIZE;
15807 else if (htab->use_blx)
15808 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
15809 else
15810 size = ARM2THUMB_STATIC_GLUE_SIZE;
15811
15812 for (offset = 0; offset < htab->arm_glue_size; offset += size)
15813 {
15814 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
15815 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
15816 }
15817 }
15818
15819 /* Thumb->ARM glue. */
15820 if (htab->thumb_glue_size > 0)
15821 {
15822 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
15823 THUMB2ARM_GLUE_SECTION_NAME);
15824
15825 osi.sec_shndx = _bfd_elf_section_from_bfd_section
15826 (output_bfd, osi.sec->output_section);
15827 size = THUMB2ARM_GLUE_SIZE;
15828
15829 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
15830 {
15831 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
15832 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
15833 }
15834 }
15835
15836 /* ARMv4 BX veneers. */
15837 if (htab->bx_glue_size > 0)
15838 {
15839 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
15840 ARM_BX_GLUE_SECTION_NAME);
15841
15842 osi.sec_shndx = _bfd_elf_section_from_bfd_section
15843 (output_bfd, osi.sec->output_section);
15844
15845 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
15846 }
15847
15848 /* Long calls stubs. */
15849 if (htab->stub_bfd && htab->stub_bfd->sections)
15850 {
15851 asection* stub_sec;
15852
15853 for (stub_sec = htab->stub_bfd->sections;
15854 stub_sec != NULL;
15855 stub_sec = stub_sec->next)
15856 {
15857 /* Ignore non-stub sections. */
15858 if (!strstr (stub_sec->name, STUB_SUFFIX))
15859 continue;
15860
15861 osi.sec = stub_sec;
15862
15863 osi.sec_shndx = _bfd_elf_section_from_bfd_section
15864 (output_bfd, osi.sec->output_section);
15865
15866 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
15867 }
15868 }
15869
15870 /* Finally, output mapping symbols for the PLT. */
15871 if (htab->root.splt && htab->root.splt->size > 0)
15872 {
15873 osi.sec = htab->root.splt;
15874 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
15875 (output_bfd, osi.sec->output_section));
15876
15877 /* Output mapping symbols for the plt header. SymbianOS does not have a
15878 plt header. */
15879 if (htab->vxworks_p)
15880 {
15881 /* VxWorks shared libraries have no PLT header. */
15882 if (!bfd_link_pic (info))
15883 {
15884 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
15885 return FALSE;
15886 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
15887 return FALSE;
15888 }
15889 }
15890 else if (htab->nacl_p)
15891 {
15892 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
15893 return FALSE;
15894 }
15895 else if (using_thumb_only (htab))
15896 {
15897 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
15898 return FALSE;
15899 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
15900 return FALSE;
15901 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
15902 return FALSE;
15903 }
15904 else if (!htab->symbian_p)
15905 {
15906 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
15907 return FALSE;
15908 #ifndef FOUR_WORD_PLT
15909 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
15910 return FALSE;
15911 #endif
15912 }
15913 }
15914 if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0)
15915 {
15916 /* NaCl uses a special first entry in .iplt too. */
15917 osi.sec = htab->root.iplt;
15918 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
15919 (output_bfd, osi.sec->output_section));
15920 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
15921 return FALSE;
15922 }
15923 if ((htab->root.splt && htab->root.splt->size > 0)
15924 || (htab->root.iplt && htab->root.iplt->size > 0))
15925 {
15926 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
15927 for (input_bfd = info->input_bfds;
15928 input_bfd != NULL;
15929 input_bfd = input_bfd->link.next)
15930 {
15931 struct arm_local_iplt_info **local_iplt;
15932 unsigned int i, num_syms;
15933
15934 local_iplt = elf32_arm_local_iplt (input_bfd);
15935 if (local_iplt != NULL)
15936 {
15937 num_syms = elf_symtab_hdr (input_bfd).sh_info;
15938 for (i = 0; i < num_syms; i++)
15939 if (local_iplt[i] != NULL
15940 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
15941 &local_iplt[i]->root,
15942 &local_iplt[i]->arm))
15943 return FALSE;
15944 }
15945 }
15946 }
15947 if (htab->dt_tlsdesc_plt != 0)
15948 {
15949 /* Mapping symbols for the lazy tls trampoline. */
15950 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
15951 return FALSE;
15952
15953 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
15954 htab->dt_tlsdesc_plt + 24))
15955 return FALSE;
15956 }
15957 if (htab->tls_trampoline != 0)
15958 {
15959 /* Mapping symbols for the tls trampoline. */
15960 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
15961 return FALSE;
15962 #ifdef FOUR_WORD_PLT
15963 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
15964 htab->tls_trampoline + 12))
15965 return FALSE;
15966 #endif
15967 }
15968
15969 return TRUE;
15970 }
15971
15972 /* Allocate target specific section data. */
15973
15974 static bfd_boolean
15975 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
15976 {
15977 if (!sec->used_by_bfd)
15978 {
15979 _arm_elf_section_data *sdata;
15980 bfd_size_type amt = sizeof (*sdata);
15981
15982 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
15983 if (sdata == NULL)
15984 return FALSE;
15985 sec->used_by_bfd = sdata;
15986 }
15987
15988 return _bfd_elf_new_section_hook (abfd, sec);
15989 }
15990
15991
15992 /* Used to order a list of mapping symbols by address. */
15993
15994 static int
15995 elf32_arm_compare_mapping (const void * a, const void * b)
15996 {
15997 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
15998 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
15999
16000 if (amap->vma > bmap->vma)
16001 return 1;
16002 else if (amap->vma < bmap->vma)
16003 return -1;
16004 else if (amap->type > bmap->type)
16005 /* Ensure results do not depend on the host qsort for objects with
16006 multiple mapping symbols at the same address by sorting on type
16007 after vma. */
16008 return 1;
16009 else if (amap->type < bmap->type)
16010 return -1;
16011 else
16012 return 0;
16013 }
16014
16015 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
16016
16017 static unsigned long
16018 offset_prel31 (unsigned long addr, bfd_vma offset)
16019 {
16020 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
16021 }
16022
16023 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
16024 relocations. */
16025
16026 static void
16027 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
16028 {
16029 unsigned long first_word = bfd_get_32 (output_bfd, from);
16030 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
16031
16032 /* High bit of first word is supposed to be zero. */
16033 if ((first_word & 0x80000000ul) == 0)
16034 first_word = offset_prel31 (first_word, offset);
16035
16036 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
16037 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
16038 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
16039 second_word = offset_prel31 (second_word, offset);
16040
16041 bfd_put_32 (output_bfd, first_word, to);
16042 bfd_put_32 (output_bfd, second_word, to + 4);
16043 }
16044
16045 /* Data for make_branch_to_a8_stub(). */
16046
16047 struct a8_branch_to_stub_data
16048 {
16049 asection *writing_section;
16050 bfd_byte *contents;
16051 };
16052
16053
16054 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
16055 places for a particular section. */
16056
16057 static bfd_boolean
16058 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
16059 void *in_arg)
16060 {
16061 struct elf32_arm_stub_hash_entry *stub_entry;
16062 struct a8_branch_to_stub_data *data;
16063 bfd_byte *contents;
16064 unsigned long branch_insn;
16065 bfd_vma veneered_insn_loc, veneer_entry_loc;
16066 bfd_signed_vma branch_offset;
16067 bfd *abfd;
16068 unsigned int target;
16069
16070 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
16071 data = (struct a8_branch_to_stub_data *) in_arg;
16072
16073 if (stub_entry->target_section != data->writing_section
16074 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
16075 return TRUE;
16076
16077 contents = data->contents;
16078
16079 veneered_insn_loc = stub_entry->target_section->output_section->vma
16080 + stub_entry->target_section->output_offset
16081 + stub_entry->target_value;
16082
16083 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
16084 + stub_entry->stub_sec->output_offset
16085 + stub_entry->stub_offset;
16086
16087 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
16088 veneered_insn_loc &= ~3u;
16089
16090 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
16091
16092 abfd = stub_entry->target_section->owner;
16093 target = stub_entry->target_value;
16094
16095 /* We attempt to avoid this condition by setting stubs_always_after_branch
16096 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
16097 This check is just to be on the safe side... */
16098 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
16099 {
16100 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
16101 "allocated in unsafe location"), abfd);
16102 return FALSE;
16103 }
16104
16105 switch (stub_entry->stub_type)
16106 {
16107 case arm_stub_a8_veneer_b:
16108 case arm_stub_a8_veneer_b_cond:
16109 branch_insn = 0xf0009000;
16110 goto jump24;
16111
16112 case arm_stub_a8_veneer_blx:
16113 branch_insn = 0xf000e800;
16114 goto jump24;
16115
16116 case arm_stub_a8_veneer_bl:
16117 {
16118 unsigned int i1, j1, i2, j2, s;
16119
16120 branch_insn = 0xf000d000;
16121
16122 jump24:
16123 if (branch_offset < -16777216 || branch_offset > 16777214)
16124 {
16125 /* There's not much we can do apart from complain if this
16126 happens. */
16127 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
16128 "of range (input file too large)"), abfd);
16129 return FALSE;
16130 }
16131
16132 /* i1 = not(j1 eor s), so:
16133 not i1 = j1 eor s
16134 j1 = (not i1) eor s. */
16135
16136 branch_insn |= (branch_offset >> 1) & 0x7ff;
16137 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
16138 i2 = (branch_offset >> 22) & 1;
16139 i1 = (branch_offset >> 23) & 1;
16140 s = (branch_offset >> 24) & 1;
16141 j1 = (!i1) ^ s;
16142 j2 = (!i2) ^ s;
16143 branch_insn |= j2 << 11;
16144 branch_insn |= j1 << 13;
16145 branch_insn |= s << 26;
16146 }
16147 break;
16148
16149 default:
16150 BFD_FAIL ();
16151 return FALSE;
16152 }
16153
16154 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[target]);
16155 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[target + 2]);
16156
16157 return TRUE;
16158 }
16159
16160 /* Beginning of stm32l4xx work-around. */
16161
16162 /* Functions encoding instructions necessary for the emission of the
16163 fix-stm32l4xx-629360.
16164 Encoding is extracted from the
16165 ARM (C) Architecture Reference Manual
16166 ARMv7-A and ARMv7-R edition
16167 ARM DDI 0406C.b (ID072512). */
16168
16169 static inline bfd_vma
16170 create_instruction_branch_absolute (int branch_offset)
16171 {
16172 /* A8.8.18 B (A8-334)
16173 B target_address (Encoding T4). */
16174 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
16175 /* jump offset is: S:I1:I2:imm10:imm11:0. */
16176 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
16177
16178 int s = ((branch_offset & 0x1000000) >> 24);
16179 int j1 = s ^ !((branch_offset & 0x800000) >> 23);
16180 int j2 = s ^ !((branch_offset & 0x400000) >> 22);
16181
16182 if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
16183 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
16184
16185 bfd_vma patched_inst = 0xf0009000
16186 | s << 26 /* S. */
16187 | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10. */
16188 | j1 << 13 /* J1. */
16189 | j2 << 11 /* J2. */
16190 | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11. */
16191
16192 return patched_inst;
16193 }
16194
16195 static inline bfd_vma
16196 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
16197 {
16198 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
16199 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
16200 bfd_vma patched_inst = 0xe8900000
16201 | (/*W=*/wback << 21)
16202 | (base_reg << 16)
16203 | (reg_mask & 0x0000ffff);
16204
16205 return patched_inst;
16206 }
16207
16208 static inline bfd_vma
16209 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
16210 {
16211 /* A8.8.60 LDMDB/LDMEA (A8-402)
16212 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
16213 bfd_vma patched_inst = 0xe9100000
16214 | (/*W=*/wback << 21)
16215 | (base_reg << 16)
16216 | (reg_mask & 0x0000ffff);
16217
16218 return patched_inst;
16219 }
16220
16221 static inline bfd_vma
16222 create_instruction_mov (int target_reg, int source_reg)
16223 {
16224 /* A8.8.103 MOV (register) (A8-486)
16225 MOV Rd, Rm (Encoding T1). */
16226 bfd_vma patched_inst = 0x4600
16227 | (target_reg & 0x7)
16228 | ((target_reg & 0x8) >> 3) << 7
16229 | (source_reg << 3);
16230
16231 return patched_inst;
16232 }
16233
16234 static inline bfd_vma
16235 create_instruction_sub (int target_reg, int source_reg, int value)
16236 {
16237 /* A8.8.221 SUB (immediate) (A8-708)
16238 SUB Rd, Rn, #value (Encoding T3). */
16239 bfd_vma patched_inst = 0xf1a00000
16240 | (target_reg << 8)
16241 | (source_reg << 16)
16242 | (/*S=*/0 << 20)
16243 | ((value & 0x800) >> 11) << 26
16244 | ((value & 0x700) >> 8) << 12
16245 | (value & 0x0ff);
16246
16247 return patched_inst;
16248 }
16249
16250 static inline bfd_vma
16251 create_instruction_vldmia (int base_reg, int wback, int num_regs,
16252 int first_reg)
16253 {
16254 /* A8.8.332 VLDM (A8-922)
16255 VLMD{MODE} Rn{!}, {list} (Encoding T2). */
16256 bfd_vma patched_inst = 0xec900a00
16257 | (/*W=*/wback << 21)
16258 | (base_reg << 16)
16259 | (num_regs & 0x000000ff)
16260 | (((unsigned)first_reg>>1) & 0x0000000f) << 12
16261 | (first_reg & 0x00000001) << 22;
16262
16263 return patched_inst;
16264 }
16265
16266 static inline bfd_vma
16267 create_instruction_vldmdb (int base_reg, int num_regs, int first_reg)
16268 {
16269 /* A8.8.332 VLDM (A8-922)
16270 VLMD{MODE} Rn!, {} (Encoding T2). */
16271 bfd_vma patched_inst = 0xed300a00
16272 | (base_reg << 16)
16273 | (num_regs & 0x000000ff)
16274 | (((unsigned)first_reg>>1) & 0x0000000f) << 12
16275 | (first_reg & 0x00000001) << 22;
16276
16277 return patched_inst;
16278 }
16279
16280 static inline bfd_vma
16281 create_instruction_udf_w (int value)
16282 {
16283 /* A8.8.247 UDF (A8-758)
16284 Undefined (Encoding T2). */
16285 bfd_vma patched_inst = 0xf7f0a000
16286 | (value & 0x00000fff)
16287 | (value & 0x000f0000) << 16;
16288
16289 return patched_inst;
16290 }
16291
16292 static inline bfd_vma
16293 create_instruction_udf (int value)
16294 {
16295 /* A8.8.247 UDF (A8-758)
16296 Undefined (Encoding T1). */
16297 bfd_vma patched_inst = 0xde00
16298 | (value & 0xff);
16299
16300 return patched_inst;
16301 }
16302
16303 /* Functions writing an instruction in memory, returning the next
16304 memory position to write to. */
16305
16306 static inline bfd_byte *
16307 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
16308 bfd * output_bfd, bfd_byte *pt, insn32 insn)
16309 {
16310 put_thumb2_insn (htab, output_bfd, insn, pt);
16311 return pt + 4;
16312 }
16313
16314 static inline bfd_byte *
16315 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
16316 bfd * output_bfd, bfd_byte *pt, insn32 insn)
16317 {
16318 put_thumb_insn (htab, output_bfd, insn, pt);
16319 return pt + 2;
16320 }
16321
16322 /* Function filling up a region in memory with T1 and T2 UDFs taking
16323 care of alignment. */
16324
16325 static bfd_byte *
16326 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
16327 bfd * output_bfd,
16328 const bfd_byte * const base_stub_contents,
16329 bfd_byte * const from_stub_contents,
16330 const bfd_byte * const end_stub_contents)
16331 {
16332 bfd_byte *current_stub_contents = from_stub_contents;
16333
16334 /* Fill the remaining of the stub with deterministic contents : UDF
16335 instructions.
16336 Check if realignment is needed on modulo 4 frontier using T1, to
16337 further use T2. */
16338 if ((current_stub_contents < end_stub_contents)
16339 && !((current_stub_contents - base_stub_contents) % 2)
16340 && ((current_stub_contents - base_stub_contents) % 4))
16341 current_stub_contents =
16342 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
16343 create_instruction_udf (0));
16344
16345 for (; current_stub_contents < end_stub_contents;)
16346 current_stub_contents =
16347 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16348 create_instruction_udf_w (0));
16349
16350 return current_stub_contents;
16351 }
16352
16353 /* Functions writing the stream of instructions equivalent to the
16354 derived sequence for ldmia, ldmdb, vldm respectively. */
16355
16356 static void
16357 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
16358 bfd * output_bfd,
16359 const insn32 initial_insn,
16360 const bfd_byte *const initial_insn_addr,
16361 bfd_byte *const base_stub_contents)
16362 {
16363 int wback = (initial_insn & 0x00200000) >> 21;
16364 int ri, rn = (initial_insn & 0x000F0000) >> 16;
16365 int insn_all_registers = initial_insn & 0x0000ffff;
16366 int insn_low_registers, insn_high_registers;
16367 int usable_register_mask;
16368 int nb_registers = popcount (insn_all_registers);
16369 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
16370 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
16371 bfd_byte *current_stub_contents = base_stub_contents;
16372
16373 BFD_ASSERT (is_thumb2_ldmia (initial_insn));
16374
16375 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
16376 smaller than 8 registers load sequences that do not cause the
16377 hardware issue. */
16378 if (nb_registers <= 8)
16379 {
16380 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
16381 current_stub_contents =
16382 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16383 initial_insn);
16384
16385 /* B initial_insn_addr+4. */
16386 if (!restore_pc)
16387 current_stub_contents =
16388 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16389 create_instruction_branch_absolute
16390 (initial_insn_addr - current_stub_contents));
16391
16392
16393 /* Fill the remaining of the stub with deterministic contents. */
16394 current_stub_contents =
16395 stm32l4xx_fill_stub_udf (htab, output_bfd,
16396 base_stub_contents, current_stub_contents,
16397 base_stub_contents +
16398 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
16399
16400 return;
16401 }
16402
16403 /* - reg_list[13] == 0. */
16404 BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
16405
16406 /* - reg_list[14] & reg_list[15] != 1. */
16407 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
16408
16409 /* - if (wback==1) reg_list[rn] == 0. */
16410 BFD_ASSERT (!wback || !restore_rn);
16411
16412 /* - nb_registers > 8. */
16413 BFD_ASSERT (popcount (insn_all_registers) > 8);
16414
16415 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
16416
16417 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
16418 - One with the 7 lowest registers (register mask 0x007F)
16419 This LDM will finally contain between 2 and 7 registers
16420 - One with the 7 highest registers (register mask 0xDF80)
16421 This ldm will finally contain between 2 and 7 registers. */
16422 insn_low_registers = insn_all_registers & 0x007F;
16423 insn_high_registers = insn_all_registers & 0xDF80;
16424
16425 /* A spare register may be needed during this veneer to temporarily
16426 handle the base register. This register will be restored with the
16427 last LDM operation.
16428 The usable register may be any general purpose register (that
16429 excludes PC, SP, LR : register mask is 0x1FFF). */
16430 usable_register_mask = 0x1FFF;
16431
16432 /* Generate the stub function. */
16433 if (wback)
16434 {
16435 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
16436 current_stub_contents =
16437 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16438 create_instruction_ldmia
16439 (rn, /*wback=*/1, insn_low_registers));
16440
16441 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
16442 current_stub_contents =
16443 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16444 create_instruction_ldmia
16445 (rn, /*wback=*/1, insn_high_registers));
16446 if (!restore_pc)
16447 {
16448 /* B initial_insn_addr+4. */
16449 current_stub_contents =
16450 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16451 create_instruction_branch_absolute
16452 (initial_insn_addr - current_stub_contents));
16453 }
16454 }
16455 else /* if (!wback). */
16456 {
16457 ri = rn;
16458
16459 /* If Rn is not part of the high-register-list, move it there. */
16460 if (!(insn_high_registers & (1 << rn)))
16461 {
16462 /* Choose a Ri in the high-register-list that will be restored. */
16463 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
16464
16465 /* MOV Ri, Rn. */
16466 current_stub_contents =
16467 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
16468 create_instruction_mov (ri, rn));
16469 }
16470
16471 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
16472 current_stub_contents =
16473 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16474 create_instruction_ldmia
16475 (ri, /*wback=*/1, insn_low_registers));
16476
16477 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
16478 current_stub_contents =
16479 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16480 create_instruction_ldmia
16481 (ri, /*wback=*/0, insn_high_registers));
16482
16483 if (!restore_pc)
16484 {
16485 /* B initial_insn_addr+4. */
16486 current_stub_contents =
16487 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16488 create_instruction_branch_absolute
16489 (initial_insn_addr - current_stub_contents));
16490 }
16491 }
16492
16493 /* Fill the remaining of the stub with deterministic contents. */
16494 current_stub_contents =
16495 stm32l4xx_fill_stub_udf (htab, output_bfd,
16496 base_stub_contents, current_stub_contents,
16497 base_stub_contents +
16498 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
16499 }
16500
16501 static void
16502 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
16503 bfd * output_bfd,
16504 const insn32 initial_insn,
16505 const bfd_byte *const initial_insn_addr,
16506 bfd_byte *const base_stub_contents)
16507 {
16508 int wback = (initial_insn & 0x00200000) >> 21;
16509 int ri, rn = (initial_insn & 0x000f0000) >> 16;
16510 int insn_all_registers = initial_insn & 0x0000ffff;
16511 int insn_low_registers, insn_high_registers;
16512 int usable_register_mask;
16513 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
16514 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
16515 int nb_registers = popcount (insn_all_registers);
16516 bfd_byte *current_stub_contents = base_stub_contents;
16517
16518 BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
16519
16520 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
16521 smaller than 8 registers load sequences that do not cause the
16522 hardware issue. */
16523 if (nb_registers <= 8)
16524 {
16525 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
16526 current_stub_contents =
16527 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16528 initial_insn);
16529
16530 /* B initial_insn_addr+4. */
16531 current_stub_contents =
16532 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16533 create_instruction_branch_absolute
16534 (initial_insn_addr - current_stub_contents));
16535
16536 /* Fill the remaining of the stub with deterministic contents. */
16537 current_stub_contents =
16538 stm32l4xx_fill_stub_udf (htab, output_bfd,
16539 base_stub_contents, current_stub_contents,
16540 base_stub_contents +
16541 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
16542
16543 return;
16544 }
16545
16546 /* - reg_list[13] == 0. */
16547 BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
16548
16549 /* - reg_list[14] & reg_list[15] != 1. */
16550 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
16551
16552 /* - if (wback==1) reg_list[rn] == 0. */
16553 BFD_ASSERT (!wback || !restore_rn);
16554
16555 /* - nb_registers > 8. */
16556 BFD_ASSERT (popcount (insn_all_registers) > 8);
16557
16558 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
16559
16560 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
16561 - One with the 7 lowest registers (register mask 0x007F)
16562 This LDM will finally contain between 2 and 7 registers
16563 - One with the 7 highest registers (register mask 0xDF80)
16564 This ldm will finally contain between 2 and 7 registers. */
16565 insn_low_registers = insn_all_registers & 0x007F;
16566 insn_high_registers = insn_all_registers & 0xDF80;
16567
16568 /* A spare register may be needed during this veneer to temporarily
16569 handle the base register. This register will be restored with
16570 the last LDM operation.
16571 The usable register may be any general purpose register (that excludes
16572 PC, SP, LR : register mask is 0x1FFF). */
16573 usable_register_mask = 0x1FFF;
16574
16575 /* Generate the stub function. */
16576 if (!wback && !restore_pc && !restore_rn)
16577 {
16578 /* Choose a Ri in the low-register-list that will be restored. */
16579 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
16580
16581 /* MOV Ri, Rn. */
16582 current_stub_contents =
16583 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
16584 create_instruction_mov (ri, rn));
16585
16586 /* LDMDB Ri!, {R-high-register-list}. */
16587 current_stub_contents =
16588 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16589 create_instruction_ldmdb
16590 (ri, /*wback=*/1, insn_high_registers));
16591
16592 /* LDMDB Ri, {R-low-register-list}. */
16593 current_stub_contents =
16594 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16595 create_instruction_ldmdb
16596 (ri, /*wback=*/0, insn_low_registers));
16597
16598 /* B initial_insn_addr+4. */
16599 current_stub_contents =
16600 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16601 create_instruction_branch_absolute
16602 (initial_insn_addr - current_stub_contents));
16603 }
16604 else if (wback && !restore_pc && !restore_rn)
16605 {
16606 /* LDMDB Rn!, {R-high-register-list}. */
16607 current_stub_contents =
16608 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16609 create_instruction_ldmdb
16610 (rn, /*wback=*/1, insn_high_registers));
16611
16612 /* LDMDB Rn!, {R-low-register-list}. */
16613 current_stub_contents =
16614 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16615 create_instruction_ldmdb
16616 (rn, /*wback=*/1, insn_low_registers));
16617
16618 /* B initial_insn_addr+4. */
16619 current_stub_contents =
16620 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16621 create_instruction_branch_absolute
16622 (initial_insn_addr - current_stub_contents));
16623 }
16624 else if (!wback && restore_pc && !restore_rn)
16625 {
16626 /* Choose a Ri in the high-register-list that will be restored. */
16627 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
16628
16629 /* SUB Ri, Rn, #(4*nb_registers). */
16630 current_stub_contents =
16631 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16632 create_instruction_sub (ri, rn, (4 * nb_registers)));
16633
16634 /* LDMIA Ri!, {R-low-register-list}. */
16635 current_stub_contents =
16636 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16637 create_instruction_ldmia
16638 (ri, /*wback=*/1, insn_low_registers));
16639
16640 /* LDMIA Ri, {R-high-register-list}. */
16641 current_stub_contents =
16642 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16643 create_instruction_ldmia
16644 (ri, /*wback=*/0, insn_high_registers));
16645 }
16646 else if (wback && restore_pc && !restore_rn)
16647 {
16648 /* Choose a Ri in the high-register-list that will be restored. */
16649 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
16650
16651 /* SUB Rn, Rn, #(4*nb_registers) */
16652 current_stub_contents =
16653 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16654 create_instruction_sub (rn, rn, (4 * nb_registers)));
16655
16656 /* MOV Ri, Rn. */
16657 current_stub_contents =
16658 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
16659 create_instruction_mov (ri, rn));
16660
16661 /* LDMIA Ri!, {R-low-register-list}. */
16662 current_stub_contents =
16663 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16664 create_instruction_ldmia
16665 (ri, /*wback=*/1, insn_low_registers));
16666
16667 /* LDMIA Ri, {R-high-register-list}. */
16668 current_stub_contents =
16669 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16670 create_instruction_ldmia
16671 (ri, /*wback=*/0, insn_high_registers));
16672 }
16673 else if (!wback && !restore_pc && restore_rn)
16674 {
16675 ri = rn;
16676 if (!(insn_low_registers & (1 << rn)))
16677 {
16678 /* Choose a Ri in the low-register-list that will be restored. */
16679 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
16680
16681 /* MOV Ri, Rn. */
16682 current_stub_contents =
16683 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
16684 create_instruction_mov (ri, rn));
16685 }
16686
16687 /* LDMDB Ri!, {R-high-register-list}. */
16688 current_stub_contents =
16689 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16690 create_instruction_ldmdb
16691 (ri, /*wback=*/1, insn_high_registers));
16692
16693 /* LDMDB Ri, {R-low-register-list}. */
16694 current_stub_contents =
16695 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16696 create_instruction_ldmdb
16697 (ri, /*wback=*/0, insn_low_registers));
16698
16699 /* B initial_insn_addr+4. */
16700 current_stub_contents =
16701 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16702 create_instruction_branch_absolute
16703 (initial_insn_addr - current_stub_contents));
16704 }
16705 else if (!wback && restore_pc && restore_rn)
16706 {
16707 ri = rn;
16708 if (!(insn_high_registers & (1 << rn)))
16709 {
16710 /* Choose a Ri in the high-register-list that will be restored. */
16711 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
16712 }
16713
16714 /* SUB Ri, Rn, #(4*nb_registers). */
16715 current_stub_contents =
16716 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16717 create_instruction_sub (ri, rn, (4 * nb_registers)));
16718
16719 /* LDMIA Ri!, {R-low-register-list}. */
16720 current_stub_contents =
16721 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16722 create_instruction_ldmia
16723 (ri, /*wback=*/1, insn_low_registers));
16724
16725 /* LDMIA Ri, {R-high-register-list}. */
16726 current_stub_contents =
16727 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16728 create_instruction_ldmia
16729 (ri, /*wback=*/0, insn_high_registers));
16730 }
16731 else if (wback && restore_rn)
16732 {
16733 /* The assembler should not have accepted to encode this. */
16734 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
16735 "undefined behavior.\n");
16736 }
16737
16738 /* Fill the remaining of the stub with deterministic contents. */
16739 current_stub_contents =
16740 stm32l4xx_fill_stub_udf (htab, output_bfd,
16741 base_stub_contents, current_stub_contents,
16742 base_stub_contents +
16743 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
16744
16745 }
16746
16747 static void
16748 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
16749 bfd * output_bfd,
16750 const insn32 initial_insn,
16751 const bfd_byte *const initial_insn_addr,
16752 bfd_byte *const base_stub_contents)
16753 {
16754 int num_regs = ((unsigned int)initial_insn << 24) >> 24;
16755 bfd_byte *current_stub_contents = base_stub_contents;
16756
16757 BFD_ASSERT (is_thumb2_vldm (initial_insn));
16758
16759 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
16760 smaller than 8 registers load sequences that do not cause the
16761 hardware issue. */
16762 if (num_regs <= 8)
16763 {
16764 /* Untouched instruction. */
16765 current_stub_contents =
16766 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16767 initial_insn);
16768
16769 /* B initial_insn_addr+4. */
16770 current_stub_contents =
16771 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16772 create_instruction_branch_absolute
16773 (initial_insn_addr - current_stub_contents));
16774 }
16775 else
16776 {
16777 bfd_boolean is_ia_nobang = /* (IA without !). */
16778 (((initial_insn << 7) >> 28) & 0xd) == 0x4;
16779 bfd_boolean is_ia_bang = /* (IA with !) - includes VPOP. */
16780 (((initial_insn << 7) >> 28) & 0xd) == 0x5;
16781 bfd_boolean is_db_bang = /* (DB with !). */
16782 (((initial_insn << 7) >> 28) & 0xd) == 0x9;
16783 int base_reg = ((unsigned int)initial_insn << 12) >> 28;
16784 /* d = UInt (Vd:D);. */
16785 int first_reg = ((((unsigned int)initial_insn << 16) >> 28) << 1)
16786 | (((unsigned int)initial_insn << 9) >> 31);
16787
16788 /* Compute the number of 8-register chunks needed to split. */
16789 int chunks = (num_regs%8) ? (num_regs/8 + 1) : (num_regs/8);
16790 int chunk;
16791
16792 /* The test coverage has been done assuming the following
16793 hypothesis that exactly one of the previous is_ predicates is
16794 true. */
16795 BFD_ASSERT ((is_ia_nobang ^ is_ia_bang ^ is_db_bang) &&
16796 !(is_ia_nobang & is_ia_bang & is_db_bang));
16797
16798 /* We treat the cutting of the register in one pass for all
16799 cases, then we emit the adjustments:
16800
16801 vldm rx, {...}
16802 -> vldm rx!, {8_words_or_less} for each needed 8_word
16803 -> sub rx, rx, #size (list)
16804
16805 vldm rx!, {...}
16806 -> vldm rx!, {8_words_or_less} for each needed 8_word
16807 This also handles vpop instruction (when rx is sp)
16808
16809 vldmd rx!, {...}
16810 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
16811 for (chunk = 0; chunk<chunks; ++chunk)
16812 {
16813 if (is_ia_nobang || is_ia_bang)
16814 {
16815 current_stub_contents =
16816 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16817 create_instruction_vldmia
16818 (base_reg,
16819 /*wback= . */1,
16820 chunks - (chunk + 1) ?
16821 8 : num_regs - chunk * 8,
16822 first_reg + chunk * 8));
16823 }
16824 else if (is_db_bang)
16825 {
16826 current_stub_contents =
16827 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16828 create_instruction_vldmdb
16829 (base_reg,
16830 chunks - (chunk + 1) ?
16831 8 : num_regs - chunk * 8,
16832 first_reg + chunk * 8));
16833 }
16834 }
16835
16836 /* Only this case requires the base register compensation
16837 subtract. */
16838 if (is_ia_nobang)
16839 {
16840 current_stub_contents =
16841 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16842 create_instruction_sub
16843 (base_reg, base_reg, 4*num_regs));
16844 }
16845
16846 /* B initial_insn_addr+4. */
16847 current_stub_contents =
16848 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16849 create_instruction_branch_absolute
16850 (initial_insn_addr - current_stub_contents));
16851 }
16852
16853 /* Fill the remaining of the stub with deterministic contents. */
16854 current_stub_contents =
16855 stm32l4xx_fill_stub_udf (htab, output_bfd,
16856 base_stub_contents, current_stub_contents,
16857 base_stub_contents +
16858 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
16859 }
16860
16861 static void
16862 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
16863 bfd * output_bfd,
16864 const insn32 wrong_insn,
16865 const bfd_byte *const wrong_insn_addr,
16866 bfd_byte *const stub_contents)
16867 {
16868 if (is_thumb2_ldmia (wrong_insn))
16869 stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
16870 wrong_insn, wrong_insn_addr,
16871 stub_contents);
16872 else if (is_thumb2_ldmdb (wrong_insn))
16873 stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
16874 wrong_insn, wrong_insn_addr,
16875 stub_contents);
16876 else if (is_thumb2_vldm (wrong_insn))
16877 stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
16878 wrong_insn, wrong_insn_addr,
16879 stub_contents);
16880 }
16881
16882 /* End of stm32l4xx work-around. */
16883
16884
16885 /* Do code byteswapping. Return FALSE afterwards so that the section is
16886 written out as normal. */
16887
16888 static bfd_boolean
16889 elf32_arm_write_section (bfd *output_bfd,
16890 struct bfd_link_info *link_info,
16891 asection *sec,
16892 bfd_byte *contents)
16893 {
16894 unsigned int mapcount, errcount;
16895 _arm_elf_section_data *arm_data;
16896 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
16897 elf32_arm_section_map *map;
16898 elf32_vfp11_erratum_list *errnode;
16899 elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
16900 bfd_vma ptr;
16901 bfd_vma end;
16902 bfd_vma offset = sec->output_section->vma + sec->output_offset;
16903 bfd_byte tmp;
16904 unsigned int i;
16905
16906 if (globals == NULL)
16907 return FALSE;
16908
16909 /* If this section has not been allocated an _arm_elf_section_data
16910 structure then we cannot record anything. */
16911 arm_data = get_arm_elf_section_data (sec);
16912 if (arm_data == NULL)
16913 return FALSE;
16914
16915 mapcount = arm_data->mapcount;
16916 map = arm_data->map;
16917 errcount = arm_data->erratumcount;
16918
16919 if (errcount != 0)
16920 {
16921 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
16922
16923 for (errnode = arm_data->erratumlist; errnode != 0;
16924 errnode = errnode->next)
16925 {
16926 bfd_vma target = errnode->vma - offset;
16927
16928 switch (errnode->type)
16929 {
16930 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
16931 {
16932 bfd_vma branch_to_veneer;
16933 /* Original condition code of instruction, plus bit mask for
16934 ARM B instruction. */
16935 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
16936 | 0x0a000000;
16937
16938 /* The instruction is before the label. */
16939 target -= 4;
16940
16941 /* Above offset included in -4 below. */
16942 branch_to_veneer = errnode->u.b.veneer->vma
16943 - errnode->vma - 4;
16944
16945 if ((signed) branch_to_veneer < -(1 << 25)
16946 || (signed) branch_to_veneer >= (1 << 25))
16947 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
16948 "range"), output_bfd);
16949
16950 insn |= (branch_to_veneer >> 2) & 0xffffff;
16951 contents[endianflip ^ target] = insn & 0xff;
16952 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
16953 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
16954 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
16955 }
16956 break;
16957
16958 case VFP11_ERRATUM_ARM_VENEER:
16959 {
16960 bfd_vma branch_from_veneer;
16961 unsigned int insn;
16962
16963 /* Take size of veneer into account. */
16964 branch_from_veneer = errnode->u.v.branch->vma
16965 - errnode->vma - 12;
16966
16967 if ((signed) branch_from_veneer < -(1 << 25)
16968 || (signed) branch_from_veneer >= (1 << 25))
16969 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
16970 "range"), output_bfd);
16971
16972 /* Original instruction. */
16973 insn = errnode->u.v.branch->u.b.vfp_insn;
16974 contents[endianflip ^ target] = insn & 0xff;
16975 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
16976 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
16977 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
16978
16979 /* Branch back to insn after original insn. */
16980 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
16981 contents[endianflip ^ (target + 4)] = insn & 0xff;
16982 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
16983 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
16984 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
16985 }
16986 break;
16987
16988 default:
16989 abort ();
16990 }
16991 }
16992 }
16993
16994 if (arm_data->stm32l4xx_erratumcount != 0)
16995 {
16996 for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
16997 stm32l4xx_errnode != 0;
16998 stm32l4xx_errnode = stm32l4xx_errnode->next)
16999 {
17000 bfd_vma target = stm32l4xx_errnode->vma - offset;
17001
17002 switch (stm32l4xx_errnode->type)
17003 {
17004 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
17005 {
17006 unsigned int insn;
17007 bfd_vma branch_to_veneer =
17008 stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
17009
17010 if ((signed) branch_to_veneer < -(1 << 24)
17011 || (signed) branch_to_veneer >= (1 << 24))
17012 {
17013 bfd_vma out_of_range =
17014 ((signed) branch_to_veneer < -(1 << 24)) ?
17015 - branch_to_veneer - (1 << 24) :
17016 ((signed) branch_to_veneer >= (1 << 24)) ?
17017 branch_to_veneer - (1 << 24) : 0;
17018
17019 (*_bfd_error_handler)
17020 (_("%B(%#x): error: Cannot create STM32L4XX veneer. "
17021 "Jump out of range by %d bytes. "
17022 "Cannot encode branch instruction. "),
17023 output_bfd,
17024 stm32l4xx_errnode->vma - 4,
17025 out_of_range);
17026 continue;
17027 }
17028
17029 insn = create_instruction_branch_absolute
17030 (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
17031
17032 /* The instruction is before the label. */
17033 target -= 4;
17034
17035 put_thumb2_insn (globals, output_bfd,
17036 (bfd_vma) insn, contents + target);
17037 }
17038 break;
17039
17040 case STM32L4XX_ERRATUM_VENEER:
17041 {
17042 bfd_byte * veneer;
17043 bfd_byte * veneer_r;
17044 unsigned int insn;
17045
17046 veneer = contents + target;
17047 veneer_r = veneer
17048 + stm32l4xx_errnode->u.b.veneer->vma
17049 - stm32l4xx_errnode->vma - 4;
17050
17051 if ((signed) (veneer_r - veneer -
17052 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
17053 STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
17054 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
17055 STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
17056 || (signed) (veneer_r - veneer) >= (1 << 24))
17057 {
17058 (*_bfd_error_handler) (_("%B: error: Cannot create STM32L4XX "
17059 "veneer."), output_bfd);
17060 continue;
17061 }
17062
17063 /* Original instruction. */
17064 insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
17065
17066 stm32l4xx_create_replacing_stub
17067 (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
17068 }
17069 break;
17070
17071 default:
17072 abort ();
17073 }
17074 }
17075 }
17076
17077 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
17078 {
17079 arm_unwind_table_edit *edit_node
17080 = arm_data->u.exidx.unwind_edit_list;
17081 /* Now, sec->size is the size of the section we will write. The original
17082 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
17083 markers) was sec->rawsize. (This isn't the case if we perform no
17084 edits, then rawsize will be zero and we should use size). */
17085 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
17086 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
17087 unsigned int in_index, out_index;
17088 bfd_vma add_to_offsets = 0;
17089
17090 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
17091 {
17092 if (edit_node)
17093 {
17094 unsigned int edit_index = edit_node->index;
17095
17096 if (in_index < edit_index && in_index * 8 < input_size)
17097 {
17098 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
17099 contents + in_index * 8, add_to_offsets);
17100 out_index++;
17101 in_index++;
17102 }
17103 else if (in_index == edit_index
17104 || (in_index * 8 >= input_size
17105 && edit_index == UINT_MAX))
17106 {
17107 switch (edit_node->type)
17108 {
17109 case DELETE_EXIDX_ENTRY:
17110 in_index++;
17111 add_to_offsets += 8;
17112 break;
17113
17114 case INSERT_EXIDX_CANTUNWIND_AT_END:
17115 {
17116 asection *text_sec = edit_node->linked_section;
17117 bfd_vma text_offset = text_sec->output_section->vma
17118 + text_sec->output_offset
17119 + text_sec->size;
17120 bfd_vma exidx_offset = offset + out_index * 8;
17121 unsigned long prel31_offset;
17122
17123 /* Note: this is meant to be equivalent to an
17124 R_ARM_PREL31 relocation. These synthetic
17125 EXIDX_CANTUNWIND markers are not relocated by the
17126 usual BFD method. */
17127 prel31_offset = (text_offset - exidx_offset)
17128 & 0x7ffffffful;
17129
17130 /* First address we can't unwind. */
17131 bfd_put_32 (output_bfd, prel31_offset,
17132 &edited_contents[out_index * 8]);
17133
17134 /* Code for EXIDX_CANTUNWIND. */
17135 bfd_put_32 (output_bfd, 0x1,
17136 &edited_contents[out_index * 8 + 4]);
17137
17138 out_index++;
17139 add_to_offsets -= 8;
17140 }
17141 break;
17142 }
17143
17144 edit_node = edit_node->next;
17145 }
17146 }
17147 else
17148 {
17149 /* No more edits, copy remaining entries verbatim. */
17150 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
17151 contents + in_index * 8, add_to_offsets);
17152 out_index++;
17153 in_index++;
17154 }
17155 }
17156
17157 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
17158 bfd_set_section_contents (output_bfd, sec->output_section,
17159 edited_contents,
17160 (file_ptr) sec->output_offset, sec->size);
17161
17162 return TRUE;
17163 }
17164
17165 /* Fix code to point to Cortex-A8 erratum stubs. */
17166 if (globals->fix_cortex_a8)
17167 {
17168 struct a8_branch_to_stub_data data;
17169
17170 data.writing_section = sec;
17171 data.contents = contents;
17172
17173 bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
17174 & data);
17175 }
17176
17177 if (mapcount == 0)
17178 return FALSE;
17179
17180 if (globals->byteswap_code)
17181 {
17182 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
17183
17184 ptr = map[0].vma;
17185 for (i = 0; i < mapcount; i++)
17186 {
17187 if (i == mapcount - 1)
17188 end = sec->size;
17189 else
17190 end = map[i + 1].vma;
17191
17192 switch (map[i].type)
17193 {
17194 case 'a':
17195 /* Byte swap code words. */
17196 while (ptr + 3 < end)
17197 {
17198 tmp = contents[ptr];
17199 contents[ptr] = contents[ptr + 3];
17200 contents[ptr + 3] = tmp;
17201 tmp = contents[ptr + 1];
17202 contents[ptr + 1] = contents[ptr + 2];
17203 contents[ptr + 2] = tmp;
17204 ptr += 4;
17205 }
17206 break;
17207
17208 case 't':
17209 /* Byte swap code halfwords. */
17210 while (ptr + 1 < end)
17211 {
17212 tmp = contents[ptr];
17213 contents[ptr] = contents[ptr + 1];
17214 contents[ptr + 1] = tmp;
17215 ptr += 2;
17216 }
17217 break;
17218
17219 case 'd':
17220 /* Leave data alone. */
17221 break;
17222 }
17223 ptr = end;
17224 }
17225 }
17226
17227 free (map);
17228 arm_data->mapcount = -1;
17229 arm_data->mapsize = 0;
17230 arm_data->map = NULL;
17231
17232 return FALSE;
17233 }
17234
17235 /* Mangle thumb function symbols as we read them in. */
17236
17237 static bfd_boolean
17238 elf32_arm_swap_symbol_in (bfd * abfd,
17239 const void *psrc,
17240 const void *pshn,
17241 Elf_Internal_Sym *dst)
17242 {
17243 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
17244 return FALSE;
17245
17246 /* New EABI objects mark thumb function symbols by setting the low bit of
17247 the address. */
17248 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
17249 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
17250 {
17251 if (dst->st_value & 1)
17252 {
17253 dst->st_value &= ~(bfd_vma) 1;
17254 dst->st_target_internal = ST_BRANCH_TO_THUMB;
17255 }
17256 else
17257 dst->st_target_internal = ST_BRANCH_TO_ARM;
17258 }
17259 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
17260 {
17261 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
17262 dst->st_target_internal = ST_BRANCH_TO_THUMB;
17263 }
17264 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
17265 dst->st_target_internal = ST_BRANCH_LONG;
17266 else
17267 dst->st_target_internal = ST_BRANCH_UNKNOWN;
17268
17269 return TRUE;
17270 }
17271
17272
17273 /* Mangle thumb function symbols as we write them out. */
17274
17275 static void
17276 elf32_arm_swap_symbol_out (bfd *abfd,
17277 const Elf_Internal_Sym *src,
17278 void *cdst,
17279 void *shndx)
17280 {
17281 Elf_Internal_Sym newsym;
17282
17283 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
17284 of the address set, as per the new EABI. We do this unconditionally
17285 because objcopy does not set the elf header flags until after
17286 it writes out the symbol table. */
17287 if (src->st_target_internal == ST_BRANCH_TO_THUMB)
17288 {
17289 newsym = *src;
17290 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
17291 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
17292 if (newsym.st_shndx != SHN_UNDEF)
17293 {
17294 /* Do this only for defined symbols. At link type, the static
17295 linker will simulate the work of dynamic linker of resolving
17296 symbols and will carry over the thumbness of found symbols to
17297 the output symbol table. It's not clear how it happens, but
17298 the thumbness of undefined symbols can well be different at
17299 runtime, and writing '1' for them will be confusing for users
17300 and possibly for dynamic linker itself.
17301 */
17302 newsym.st_value |= 1;
17303 }
17304
17305 src = &newsym;
17306 }
17307 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
17308 }
17309
17310 /* Add the PT_ARM_EXIDX program header. */
17311
17312 static bfd_boolean
17313 elf32_arm_modify_segment_map (bfd *abfd,
17314 struct bfd_link_info *info ATTRIBUTE_UNUSED)
17315 {
17316 struct elf_segment_map *m;
17317 asection *sec;
17318
17319 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
17320 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
17321 {
17322 /* If there is already a PT_ARM_EXIDX header, then we do not
17323 want to add another one. This situation arises when running
17324 "strip"; the input binary already has the header. */
17325 m = elf_seg_map (abfd);
17326 while (m && m->p_type != PT_ARM_EXIDX)
17327 m = m->next;
17328 if (!m)
17329 {
17330 m = (struct elf_segment_map *)
17331 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
17332 if (m == NULL)
17333 return FALSE;
17334 m->p_type = PT_ARM_EXIDX;
17335 m->count = 1;
17336 m->sections[0] = sec;
17337
17338 m->next = elf_seg_map (abfd);
17339 elf_seg_map (abfd) = m;
17340 }
17341 }
17342
17343 return TRUE;
17344 }
17345
17346 /* We may add a PT_ARM_EXIDX program header. */
17347
17348 static int
17349 elf32_arm_additional_program_headers (bfd *abfd,
17350 struct bfd_link_info *info ATTRIBUTE_UNUSED)
17351 {
17352 asection *sec;
17353
17354 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
17355 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
17356 return 1;
17357 else
17358 return 0;
17359 }
17360
17361 /* Hook called by the linker routine which adds symbols from an object
17362 file. */
17363
17364 static bfd_boolean
17365 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
17366 Elf_Internal_Sym *sym, const char **namep,
17367 flagword *flagsp, asection **secp, bfd_vma *valp)
17368 {
17369 if ((ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
17370 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE)
17371 && (abfd->flags & DYNAMIC) == 0
17372 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
17373 elf_tdata (info->output_bfd)->has_gnu_symbols = elf_gnu_symbol_any;
17374
17375 if (elf32_arm_hash_table (info) == NULL)
17376 return FALSE;
17377
17378 if (elf32_arm_hash_table (info)->vxworks_p
17379 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
17380 flagsp, secp, valp))
17381 return FALSE;
17382
17383 return TRUE;
17384 }
17385
17386 /* We use this to override swap_symbol_in and swap_symbol_out. */
17387 const struct elf_size_info elf32_arm_size_info =
17388 {
17389 sizeof (Elf32_External_Ehdr),
17390 sizeof (Elf32_External_Phdr),
17391 sizeof (Elf32_External_Shdr),
17392 sizeof (Elf32_External_Rel),
17393 sizeof (Elf32_External_Rela),
17394 sizeof (Elf32_External_Sym),
17395 sizeof (Elf32_External_Dyn),
17396 sizeof (Elf_External_Note),
17397 4,
17398 1,
17399 32, 2,
17400 ELFCLASS32, EV_CURRENT,
17401 bfd_elf32_write_out_phdrs,
17402 bfd_elf32_write_shdrs_and_ehdr,
17403 bfd_elf32_checksum_contents,
17404 bfd_elf32_write_relocs,
17405 elf32_arm_swap_symbol_in,
17406 elf32_arm_swap_symbol_out,
17407 bfd_elf32_slurp_reloc_table,
17408 bfd_elf32_slurp_symbol_table,
17409 bfd_elf32_swap_dyn_in,
17410 bfd_elf32_swap_dyn_out,
17411 bfd_elf32_swap_reloc_in,
17412 bfd_elf32_swap_reloc_out,
17413 bfd_elf32_swap_reloca_in,
17414 bfd_elf32_swap_reloca_out
17415 };
17416
17417 static bfd_vma
17418 read_code32 (const bfd *abfd, const bfd_byte *addr)
17419 {
17420 /* V7 BE8 code is always little endian. */
17421 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
17422 return bfd_getl32 (addr);
17423
17424 return bfd_get_32 (abfd, addr);
17425 }
17426
17427 static bfd_vma
17428 read_code16 (const bfd *abfd, const bfd_byte *addr)
17429 {
17430 /* V7 BE8 code is always little endian. */
17431 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
17432 return bfd_getl16 (addr);
17433
17434 return bfd_get_16 (abfd, addr);
17435 }
17436
17437 /* Return size of plt0 entry starting at ADDR
17438 or (bfd_vma) -1 if size can not be determined. */
17439
17440 static bfd_vma
17441 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
17442 {
17443 bfd_vma first_word;
17444 bfd_vma plt0_size;
17445
17446 first_word = read_code32 (abfd, addr);
17447
17448 if (first_word == elf32_arm_plt0_entry[0])
17449 plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
17450 else if (first_word == elf32_thumb2_plt0_entry[0])
17451 plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
17452 else
17453 /* We don't yet handle this PLT format. */
17454 return (bfd_vma) -1;
17455
17456 return plt0_size;
17457 }
17458
17459 /* Return size of plt entry starting at offset OFFSET
17460 of plt section located at address START
17461 or (bfd_vma) -1 if size can not be determined. */
17462
17463 static bfd_vma
17464 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
17465 {
17466 bfd_vma first_insn;
17467 bfd_vma plt_size = 0;
17468 const bfd_byte *addr = start + offset;
17469
17470 /* PLT entry size if fixed on Thumb-only platforms. */
17471 if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
17472 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
17473
17474 /* Respect Thumb stub if necessary. */
17475 if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
17476 {
17477 plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub);
17478 }
17479
17480 /* Strip immediate from first add. */
17481 first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
17482
17483 #ifdef FOUR_WORD_PLT
17484 if (first_insn == elf32_arm_plt_entry[0])
17485 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
17486 #else
17487 if (first_insn == elf32_arm_plt_entry_long[0])
17488 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
17489 else if (first_insn == elf32_arm_plt_entry_short[0])
17490 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
17491 #endif
17492 else
17493 /* We don't yet handle this PLT format. */
17494 return (bfd_vma) -1;
17495
17496 return plt_size;
17497 }
17498
17499 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
17500
17501 static long
17502 elf32_arm_get_synthetic_symtab (bfd *abfd,
17503 long symcount ATTRIBUTE_UNUSED,
17504 asymbol **syms ATTRIBUTE_UNUSED,
17505 long dynsymcount,
17506 asymbol **dynsyms,
17507 asymbol **ret)
17508 {
17509 asection *relplt;
17510 asymbol *s;
17511 arelent *p;
17512 long count, i, n;
17513 size_t size;
17514 Elf_Internal_Shdr *hdr;
17515 char *names;
17516 asection *plt;
17517 bfd_vma offset;
17518 bfd_byte *data;
17519
17520 *ret = NULL;
17521
17522 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
17523 return 0;
17524
17525 if (dynsymcount <= 0)
17526 return 0;
17527
17528 relplt = bfd_get_section_by_name (abfd, ".rel.plt");
17529 if (relplt == NULL)
17530 return 0;
17531
17532 hdr = &elf_section_data (relplt)->this_hdr;
17533 if (hdr->sh_link != elf_dynsymtab (abfd)
17534 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
17535 return 0;
17536
17537 plt = bfd_get_section_by_name (abfd, ".plt");
17538 if (plt == NULL)
17539 return 0;
17540
17541 if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE))
17542 return -1;
17543
17544 data = plt->contents;
17545 if (data == NULL)
17546 {
17547 if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL)
17548 return -1;
17549 bfd_cache_section_contents((asection *) plt, data);
17550 }
17551
17552 count = relplt->size / hdr->sh_entsize;
17553 size = count * sizeof (asymbol);
17554 p = relplt->relocation;
17555 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
17556 {
17557 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
17558 if (p->addend != 0)
17559 size += sizeof ("+0x") - 1 + 8;
17560 }
17561
17562 s = *ret = (asymbol *) bfd_malloc (size);
17563 if (s == NULL)
17564 return -1;
17565
17566 offset = elf32_arm_plt0_size (abfd, data);
17567 if (offset == (bfd_vma) -1)
17568 return -1;
17569
17570 names = (char *) (s + count);
17571 p = relplt->relocation;
17572 n = 0;
17573 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
17574 {
17575 size_t len;
17576
17577 bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
17578 if (plt_size == (bfd_vma) -1)
17579 break;
17580
17581 *s = **p->sym_ptr_ptr;
17582 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
17583 we are defining a symbol, ensure one of them is set. */
17584 if ((s->flags & BSF_LOCAL) == 0)
17585 s->flags |= BSF_GLOBAL;
17586 s->flags |= BSF_SYNTHETIC;
17587 s->section = plt;
17588 s->value = offset;
17589 s->name = names;
17590 s->udata.p = NULL;
17591 len = strlen ((*p->sym_ptr_ptr)->name);
17592 memcpy (names, (*p->sym_ptr_ptr)->name, len);
17593 names += len;
17594 if (p->addend != 0)
17595 {
17596 char buf[30], *a;
17597
17598 memcpy (names, "+0x", sizeof ("+0x") - 1);
17599 names += sizeof ("+0x") - 1;
17600 bfd_sprintf_vma (abfd, buf, p->addend);
17601 for (a = buf; *a == '0'; ++a)
17602 ;
17603 len = strlen (a);
17604 memcpy (names, a, len);
17605 names += len;
17606 }
17607 memcpy (names, "@plt", sizeof ("@plt"));
17608 names += sizeof ("@plt");
17609 ++s, ++n;
17610 offset += plt_size;
17611 }
17612
17613 return n;
17614 }
17615
17616 #define ELF_ARCH bfd_arch_arm
17617 #define ELF_TARGET_ID ARM_ELF_DATA
17618 #define ELF_MACHINE_CODE EM_ARM
17619 #ifdef __QNXTARGET__
17620 #define ELF_MAXPAGESIZE 0x1000
17621 #else
17622 #define ELF_MAXPAGESIZE 0x10000
17623 #endif
17624 #define ELF_MINPAGESIZE 0x1000
17625 #define ELF_COMMONPAGESIZE 0x1000
17626
17627 #define bfd_elf32_mkobject elf32_arm_mkobject
17628
17629 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
17630 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
17631 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
17632 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
17633 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
17634 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
17635 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
17636 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
17637 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
17638 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
17639 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
17640 #define bfd_elf32_bfd_final_link elf32_arm_final_link
17641 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
17642
17643 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
17644 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
17645 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
17646 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
17647 #define elf_backend_check_relocs elf32_arm_check_relocs
17648 #define elf_backend_relocate_section elf32_arm_relocate_section
17649 #define elf_backend_write_section elf32_arm_write_section
17650 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
17651 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
17652 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
17653 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
17654 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
17655 #define elf_backend_always_size_sections elf32_arm_always_size_sections
17656 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
17657 #define elf_backend_post_process_headers elf32_arm_post_process_headers
17658 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
17659 #define elf_backend_object_p elf32_arm_object_p
17660 #define elf_backend_fake_sections elf32_arm_fake_sections
17661 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
17662 #define elf_backend_final_write_processing elf32_arm_final_write_processing
17663 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
17664 #define elf_backend_size_info elf32_arm_size_info
17665 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
17666 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
17667 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
17668 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
17669 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
17670
17671 #define elf_backend_can_refcount 1
17672 #define elf_backend_can_gc_sections 1
17673 #define elf_backend_plt_readonly 1
17674 #define elf_backend_want_got_plt 1
17675 #define elf_backend_want_plt_sym 0
17676 #define elf_backend_may_use_rel_p 1
17677 #define elf_backend_may_use_rela_p 0
17678 #define elf_backend_default_use_rela_p 0
17679
17680 #define elf_backend_got_header_size 12
17681 #define elf_backend_extern_protected_data 1
17682
17683 #undef elf_backend_obj_attrs_vendor
17684 #define elf_backend_obj_attrs_vendor "aeabi"
17685 #undef elf_backend_obj_attrs_section
17686 #define elf_backend_obj_attrs_section ".ARM.attributes"
17687 #undef elf_backend_obj_attrs_arg_type
17688 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
17689 #undef elf_backend_obj_attrs_section_type
17690 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
17691 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
17692 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
17693
17694 #include "elf32-target.h"
17695
17696 /* Native Client targets. */
17697
17698 #undef TARGET_LITTLE_SYM
17699 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
17700 #undef TARGET_LITTLE_NAME
17701 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
17702 #undef TARGET_BIG_SYM
17703 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
17704 #undef TARGET_BIG_NAME
17705 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
17706
17707 /* Like elf32_arm_link_hash_table_create -- but overrides
17708 appropriately for NaCl. */
17709
17710 static struct bfd_link_hash_table *
17711 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
17712 {
17713 struct bfd_link_hash_table *ret;
17714
17715 ret = elf32_arm_link_hash_table_create (abfd);
17716 if (ret)
17717 {
17718 struct elf32_arm_link_hash_table *htab
17719 = (struct elf32_arm_link_hash_table *) ret;
17720
17721 htab->nacl_p = 1;
17722
17723 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
17724 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
17725 }
17726 return ret;
17727 }
17728
17729 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
17730 really need to use elf32_arm_modify_segment_map. But we do it
17731 anyway just to reduce gratuitous differences with the stock ARM backend. */
17732
17733 static bfd_boolean
17734 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
17735 {
17736 return (elf32_arm_modify_segment_map (abfd, info)
17737 && nacl_modify_segment_map (abfd, info));
17738 }
17739
17740 static void
17741 elf32_arm_nacl_final_write_processing (bfd *abfd, bfd_boolean linker)
17742 {
17743 elf32_arm_final_write_processing (abfd, linker);
17744 nacl_final_write_processing (abfd, linker);
17745 }
17746
17747 static bfd_vma
17748 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
17749 const arelent *rel ATTRIBUTE_UNUSED)
17750 {
17751 return plt->vma
17752 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
17753 i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
17754 }
17755
17756 #undef elf32_bed
17757 #define elf32_bed elf32_arm_nacl_bed
17758 #undef bfd_elf32_bfd_link_hash_table_create
17759 #define bfd_elf32_bfd_link_hash_table_create \
17760 elf32_arm_nacl_link_hash_table_create
17761 #undef elf_backend_plt_alignment
17762 #define elf_backend_plt_alignment 4
17763 #undef elf_backend_modify_segment_map
17764 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
17765 #undef elf_backend_modify_program_headers
17766 #define elf_backend_modify_program_headers nacl_modify_program_headers
17767 #undef elf_backend_final_write_processing
17768 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
17769 #undef bfd_elf32_get_synthetic_symtab
17770 #undef elf_backend_plt_sym_val
17771 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
17772
17773 #undef ELF_MINPAGESIZE
17774 #undef ELF_COMMONPAGESIZE
17775
17776
17777 #include "elf32-target.h"
17778
17779 /* Reset to defaults. */
17780 #undef elf_backend_plt_alignment
17781 #undef elf_backend_modify_segment_map
17782 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
17783 #undef elf_backend_modify_program_headers
17784 #undef elf_backend_final_write_processing
17785 #define elf_backend_final_write_processing elf32_arm_final_write_processing
17786 #undef ELF_MINPAGESIZE
17787 #define ELF_MINPAGESIZE 0x1000
17788 #undef ELF_COMMONPAGESIZE
17789 #define ELF_COMMONPAGESIZE 0x1000
17790
17791
17792 /* VxWorks Targets. */
17793
17794 #undef TARGET_LITTLE_SYM
17795 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
17796 #undef TARGET_LITTLE_NAME
17797 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
17798 #undef TARGET_BIG_SYM
17799 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
17800 #undef TARGET_BIG_NAME
17801 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
17802
17803 /* Like elf32_arm_link_hash_table_create -- but overrides
17804 appropriately for VxWorks. */
17805
17806 static struct bfd_link_hash_table *
17807 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
17808 {
17809 struct bfd_link_hash_table *ret;
17810
17811 ret = elf32_arm_link_hash_table_create (abfd);
17812 if (ret)
17813 {
17814 struct elf32_arm_link_hash_table *htab
17815 = (struct elf32_arm_link_hash_table *) ret;
17816 htab->use_rel = 0;
17817 htab->vxworks_p = 1;
17818 }
17819 return ret;
17820 }
17821
17822 static void
17823 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
17824 {
17825 elf32_arm_final_write_processing (abfd, linker);
17826 elf_vxworks_final_write_processing (abfd, linker);
17827 }
17828
17829 #undef elf32_bed
17830 #define elf32_bed elf32_arm_vxworks_bed
17831
17832 #undef bfd_elf32_bfd_link_hash_table_create
17833 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
17834 #undef elf_backend_final_write_processing
17835 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
17836 #undef elf_backend_emit_relocs
17837 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
17838
17839 #undef elf_backend_may_use_rel_p
17840 #define elf_backend_may_use_rel_p 0
17841 #undef elf_backend_may_use_rela_p
17842 #define elf_backend_may_use_rela_p 1
17843 #undef elf_backend_default_use_rela_p
17844 #define elf_backend_default_use_rela_p 1
17845 #undef elf_backend_want_plt_sym
17846 #define elf_backend_want_plt_sym 1
17847 #undef ELF_MAXPAGESIZE
17848 #define ELF_MAXPAGESIZE 0x1000
17849
17850 #include "elf32-target.h"
17851
17852
17853 /* Merge backend specific data from an object file to the output
17854 object file when linking. */
17855
17856 static bfd_boolean
17857 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
17858 {
17859 flagword out_flags;
17860 flagword in_flags;
17861 bfd_boolean flags_compatible = TRUE;
17862 asection *sec;
17863
17864 /* Check if we have the same endianness. */
17865 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
17866 return FALSE;
17867
17868 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
17869 return TRUE;
17870
17871 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
17872 return FALSE;
17873
17874 /* The input BFD must have had its flags initialised. */
17875 /* The following seems bogus to me -- The flags are initialized in
17876 the assembler but I don't think an elf_flags_init field is
17877 written into the object. */
17878 /* BFD_ASSERT (elf_flags_init (ibfd)); */
17879
17880 in_flags = elf_elfheader (ibfd)->e_flags;
17881 out_flags = elf_elfheader (obfd)->e_flags;
17882
17883 /* In theory there is no reason why we couldn't handle this. However
17884 in practice it isn't even close to working and there is no real
17885 reason to want it. */
17886 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
17887 && !(ibfd->flags & DYNAMIC)
17888 && (in_flags & EF_ARM_BE8))
17889 {
17890 _bfd_error_handler (_("error: %B is already in final BE8 format"),
17891 ibfd);
17892 return FALSE;
17893 }
17894
17895 if (!elf_flags_init (obfd))
17896 {
17897 /* If the input is the default architecture and had the default
17898 flags then do not bother setting the flags for the output
17899 architecture, instead allow future merges to do this. If no
17900 future merges ever set these flags then they will retain their
17901 uninitialised values, which surprise surprise, correspond
17902 to the default values. */
17903 if (bfd_get_arch_info (ibfd)->the_default
17904 && elf_elfheader (ibfd)->e_flags == 0)
17905 return TRUE;
17906
17907 elf_flags_init (obfd) = TRUE;
17908 elf_elfheader (obfd)->e_flags = in_flags;
17909
17910 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
17911 && bfd_get_arch_info (obfd)->the_default)
17912 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
17913
17914 return TRUE;
17915 }
17916
17917 /* Determine what should happen if the input ARM architecture
17918 does not match the output ARM architecture. */
17919 if (! bfd_arm_merge_machines (ibfd, obfd))
17920 return FALSE;
17921
17922 /* Identical flags must be compatible. */
17923 if (in_flags == out_flags)
17924 return TRUE;
17925
17926 /* Check to see if the input BFD actually contains any sections. If
17927 not, its flags may not have been initialised either, but it
17928 cannot actually cause any incompatiblity. Do not short-circuit
17929 dynamic objects; their section list may be emptied by
17930 elf_link_add_object_symbols.
17931
17932 Also check to see if there are no code sections in the input.
17933 In this case there is no need to check for code specific flags.
17934 XXX - do we need to worry about floating-point format compatability
17935 in data sections ? */
17936 if (!(ibfd->flags & DYNAMIC))
17937 {
17938 bfd_boolean null_input_bfd = TRUE;
17939 bfd_boolean only_data_sections = TRUE;
17940
17941 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
17942 {
17943 /* Ignore synthetic glue sections. */
17944 if (strcmp (sec->name, ".glue_7")
17945 && strcmp (sec->name, ".glue_7t"))
17946 {
17947 if ((bfd_get_section_flags (ibfd, sec)
17948 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
17949 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
17950 only_data_sections = FALSE;
17951
17952 null_input_bfd = FALSE;
17953 break;
17954 }
17955 }
17956
17957 if (null_input_bfd || only_data_sections)
17958 return TRUE;
17959 }
17960
17961 /* Complain about various flag mismatches. */
17962 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
17963 EF_ARM_EABI_VERSION (out_flags)))
17964 {
17965 _bfd_error_handler
17966 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
17967 ibfd, obfd,
17968 (in_flags & EF_ARM_EABIMASK) >> 24,
17969 (out_flags & EF_ARM_EABIMASK) >> 24);
17970 return FALSE;
17971 }
17972
17973 /* Not sure what needs to be checked for EABI versions >= 1. */
17974 /* VxWorks libraries do not use these flags. */
17975 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
17976 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
17977 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
17978 {
17979 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
17980 {
17981 _bfd_error_handler
17982 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
17983 ibfd, obfd,
17984 in_flags & EF_ARM_APCS_26 ? 26 : 32,
17985 out_flags & EF_ARM_APCS_26 ? 26 : 32);
17986 flags_compatible = FALSE;
17987 }
17988
17989 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
17990 {
17991 if (in_flags & EF_ARM_APCS_FLOAT)
17992 _bfd_error_handler
17993 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
17994 ibfd, obfd);
17995 else
17996 _bfd_error_handler
17997 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
17998 ibfd, obfd);
17999
18000 flags_compatible = FALSE;
18001 }
18002
18003 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
18004 {
18005 if (in_flags & EF_ARM_VFP_FLOAT)
18006 _bfd_error_handler
18007 (_("error: %B uses VFP instructions, whereas %B does not"),
18008 ibfd, obfd);
18009 else
18010 _bfd_error_handler
18011 (_("error: %B uses FPA instructions, whereas %B does not"),
18012 ibfd, obfd);
18013
18014 flags_compatible = FALSE;
18015 }
18016
18017 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
18018 {
18019 if (in_flags & EF_ARM_MAVERICK_FLOAT)
18020 _bfd_error_handler
18021 (_("error: %B uses Maverick instructions, whereas %B does not"),
18022 ibfd, obfd);
18023 else
18024 _bfd_error_handler
18025 (_("error: %B does not use Maverick instructions, whereas %B does"),
18026 ibfd, obfd);
18027
18028 flags_compatible = FALSE;
18029 }
18030
18031 #ifdef EF_ARM_SOFT_FLOAT
18032 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
18033 {
18034 /* We can allow interworking between code that is VFP format
18035 layout, and uses either soft float or integer regs for
18036 passing floating point arguments and results. We already
18037 know that the APCS_FLOAT flags match; similarly for VFP
18038 flags. */
18039 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
18040 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
18041 {
18042 if (in_flags & EF_ARM_SOFT_FLOAT)
18043 _bfd_error_handler
18044 (_("error: %B uses software FP, whereas %B uses hardware FP"),
18045 ibfd, obfd);
18046 else
18047 _bfd_error_handler
18048 (_("error: %B uses hardware FP, whereas %B uses software FP"),
18049 ibfd, obfd);
18050
18051 flags_compatible = FALSE;
18052 }
18053 }
18054 #endif
18055
18056 /* Interworking mismatch is only a warning. */
18057 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
18058 {
18059 if (in_flags & EF_ARM_INTERWORK)
18060 {
18061 _bfd_error_handler
18062 (_("Warning: %B supports interworking, whereas %B does not"),
18063 ibfd, obfd);
18064 }
18065 else
18066 {
18067 _bfd_error_handler
18068 (_("Warning: %B does not support interworking, whereas %B does"),
18069 ibfd, obfd);
18070 }
18071 }
18072 }
18073
18074 return flags_compatible;
18075 }
18076
18077
18078 /* Symbian OS Targets. */
18079
18080 #undef TARGET_LITTLE_SYM
18081 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec
18082 #undef TARGET_LITTLE_NAME
18083 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
18084 #undef TARGET_BIG_SYM
18085 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec
18086 #undef TARGET_BIG_NAME
18087 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
18088
18089 /* Like elf32_arm_link_hash_table_create -- but overrides
18090 appropriately for Symbian OS. */
18091
18092 static struct bfd_link_hash_table *
18093 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
18094 {
18095 struct bfd_link_hash_table *ret;
18096
18097 ret = elf32_arm_link_hash_table_create (abfd);
18098 if (ret)
18099 {
18100 struct elf32_arm_link_hash_table *htab
18101 = (struct elf32_arm_link_hash_table *)ret;
18102 /* There is no PLT header for Symbian OS. */
18103 htab->plt_header_size = 0;
18104 /* The PLT entries are each one instruction and one word. */
18105 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
18106 htab->symbian_p = 1;
18107 /* Symbian uses armv5t or above, so use_blx is always true. */
18108 htab->use_blx = 1;
18109 htab->root.is_relocatable_executable = 1;
18110 }
18111 return ret;
18112 }
18113
18114 static const struct bfd_elf_special_section
18115 elf32_arm_symbian_special_sections[] =
18116 {
18117 /* In a BPABI executable, the dynamic linking sections do not go in
18118 the loadable read-only segment. The post-linker may wish to
18119 refer to these sections, but they are not part of the final
18120 program image. */
18121 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
18122 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
18123 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
18124 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
18125 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
18126 /* These sections do not need to be writable as the SymbianOS
18127 postlinker will arrange things so that no dynamic relocation is
18128 required. */
18129 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
18130 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
18131 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
18132 { NULL, 0, 0, 0, 0 }
18133 };
18134
18135 static void
18136 elf32_arm_symbian_begin_write_processing (bfd *abfd,
18137 struct bfd_link_info *link_info)
18138 {
18139 /* BPABI objects are never loaded directly by an OS kernel; they are
18140 processed by a postlinker first, into an OS-specific format. If
18141 the D_PAGED bit is set on the file, BFD will align segments on
18142 page boundaries, so that an OS can directly map the file. With
18143 BPABI objects, that just results in wasted space. In addition,
18144 because we clear the D_PAGED bit, map_sections_to_segments will
18145 recognize that the program headers should not be mapped into any
18146 loadable segment. */
18147 abfd->flags &= ~D_PAGED;
18148 elf32_arm_begin_write_processing (abfd, link_info);
18149 }
18150
18151 static bfd_boolean
18152 elf32_arm_symbian_modify_segment_map (bfd *abfd,
18153 struct bfd_link_info *info)
18154 {
18155 struct elf_segment_map *m;
18156 asection *dynsec;
18157
18158 /* BPABI shared libraries and executables should have a PT_DYNAMIC
18159 segment. However, because the .dynamic section is not marked
18160 with SEC_LOAD, the generic ELF code will not create such a
18161 segment. */
18162 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
18163 if (dynsec)
18164 {
18165 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
18166 if (m->p_type == PT_DYNAMIC)
18167 break;
18168
18169 if (m == NULL)
18170 {
18171 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
18172 m->next = elf_seg_map (abfd);
18173 elf_seg_map (abfd) = m;
18174 }
18175 }
18176
18177 /* Also call the generic arm routine. */
18178 return elf32_arm_modify_segment_map (abfd, info);
18179 }
18180
18181 /* Return address for Ith PLT stub in section PLT, for relocation REL
18182 or (bfd_vma) -1 if it should not be included. */
18183
18184 static bfd_vma
18185 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
18186 const arelent *rel ATTRIBUTE_UNUSED)
18187 {
18188 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
18189 }
18190
18191
18192 #undef elf32_bed
18193 #define elf32_bed elf32_arm_symbian_bed
18194
18195 /* The dynamic sections are not allocated on SymbianOS; the postlinker
18196 will process them and then discard them. */
18197 #undef ELF_DYNAMIC_SEC_FLAGS
18198 #define ELF_DYNAMIC_SEC_FLAGS \
18199 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
18200
18201 #undef elf_backend_emit_relocs
18202
18203 #undef bfd_elf32_bfd_link_hash_table_create
18204 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
18205 #undef elf_backend_special_sections
18206 #define elf_backend_special_sections elf32_arm_symbian_special_sections
18207 #undef elf_backend_begin_write_processing
18208 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
18209 #undef elf_backend_final_write_processing
18210 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18211
18212 #undef elf_backend_modify_segment_map
18213 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
18214
18215 /* There is no .got section for BPABI objects, and hence no header. */
18216 #undef elf_backend_got_header_size
18217 #define elf_backend_got_header_size 0
18218
18219 /* Similarly, there is no .got.plt section. */
18220 #undef elf_backend_want_got_plt
18221 #define elf_backend_want_got_plt 0
18222
18223 #undef elf_backend_plt_sym_val
18224 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
18225
18226 #undef elf_backend_may_use_rel_p
18227 #define elf_backend_may_use_rel_p 1
18228 #undef elf_backend_may_use_rela_p
18229 #define elf_backend_may_use_rela_p 0
18230 #undef elf_backend_default_use_rela_p
18231 #define elf_backend_default_use_rela_p 0
18232 #undef elf_backend_want_plt_sym
18233 #define elf_backend_want_plt_sym 0
18234 #undef ELF_MAXPAGESIZE
18235 #define ELF_MAXPAGESIZE 0x8000
18236
18237 #include "elf32-target.h"
18238