Home | History | Annotate | Line # | Download | only in bfd
elf32-spu.c revision 1.1.1.12
      1 /* SPU specific support for 32-bit ELF
      2 
      3    Copyright (C) 2006-2024 Free Software Foundation, Inc.
      4 
      5    This file is part of BFD, the Binary File Descriptor library.
      6 
      7    This program is free software; you can redistribute it and/or modify
      8    it under the terms of the GNU General Public License as published by
      9    the Free Software Foundation; either version 3 of the License, or
     10    (at your option) any later version.
     11 
     12    This program is distributed in the hope that it will be useful,
     13    but WITHOUT ANY WARRANTY; without even the implied warranty of
     14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     15    GNU General Public License for more details.
     16 
     17    You should have received a copy of the GNU General Public License along
     18    with this program; if not, write to the Free Software Foundation, Inc.,
     19    51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
     20 
     21 #include "sysdep.h"
     22 #include "libiberty.h"
     23 #include "bfd.h"
     24 #include "bfdlink.h"
     25 #include "libbfd.h"
     26 #include "elf-bfd.h"
     27 #include "elf/spu.h"
     28 #include "elf32-spu.h"
     29 
     30 /* All users of this file have bfd_octets_per_byte (abfd, sec) == 1.  */
     31 #define OCTETS_PER_BYTE(ABFD, SEC) 1
     32 
     33 /* We use RELA style relocs.  Don't define USE_REL.  */
     34 
     35 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
     36 					   void *, asection *,
     37 					   bfd *, char **);
     38 
     39 /* Values of type 'enum elf_spu_reloc_type' are used to index this
     40    array, so it must be declared in the order of that type.  */
     41 
     42 static reloc_howto_type elf_howto_table[] = {
     43   HOWTO (R_SPU_NONE,	   0, 0,  0, false,  0, complain_overflow_dont,
     44 	 bfd_elf_generic_reloc, "SPU_NONE",
     45 	 false, 0, 0x00000000, false),
     46   HOWTO (R_SPU_ADDR10,	   4, 4, 10, false, 14, complain_overflow_bitfield,
     47 	 bfd_elf_generic_reloc, "SPU_ADDR10",
     48 	 false, 0, 0x00ffc000, false),
     49   HOWTO (R_SPU_ADDR16,	   2, 4, 16, false,  7, complain_overflow_bitfield,
     50 	 bfd_elf_generic_reloc, "SPU_ADDR16",
     51 	 false, 0, 0x007fff80, false),
     52   HOWTO (R_SPU_ADDR16_HI, 16, 4, 16, false,  7, complain_overflow_bitfield,
     53 	 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
     54 	 false, 0, 0x007fff80, false),
     55   HOWTO (R_SPU_ADDR16_LO,  0, 4, 16, false,  7, complain_overflow_dont,
     56 	 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
     57 	 false, 0, 0x007fff80, false),
     58   HOWTO (R_SPU_ADDR18,	   0, 4, 18, false,  7, complain_overflow_bitfield,
     59 	 bfd_elf_generic_reloc, "SPU_ADDR18",
     60 	 false, 0, 0x01ffff80, false),
     61   HOWTO (R_SPU_ADDR32,	   0, 4, 32, false,  0, complain_overflow_dont,
     62 	 bfd_elf_generic_reloc, "SPU_ADDR32",
     63 	 false, 0, 0xffffffff, false),
     64   HOWTO (R_SPU_REL16,	   2, 4, 16,  true,  7, complain_overflow_bitfield,
     65 	 bfd_elf_generic_reloc, "SPU_REL16",
     66 	 false, 0, 0x007fff80, true),
     67   HOWTO (R_SPU_ADDR7,	   0, 4,  7, false, 14, complain_overflow_dont,
     68 	 bfd_elf_generic_reloc, "SPU_ADDR7",
     69 	 false, 0, 0x001fc000, false),
     70   HOWTO (R_SPU_REL9,	   2, 4,  9,  true,  0, complain_overflow_signed,
     71 	 spu_elf_rel9,		"SPU_REL9",
     72 	 false, 0, 0x0180007f, true),
     73   HOWTO (R_SPU_REL9I,	   2, 4,  9,  true,  0, complain_overflow_signed,
     74 	 spu_elf_rel9,		"SPU_REL9I",
     75 	 false, 0, 0x0000c07f, true),
     76   HOWTO (R_SPU_ADDR10I,	   0, 4, 10, false, 14, complain_overflow_signed,
     77 	 bfd_elf_generic_reloc, "SPU_ADDR10I",
     78 	 false, 0, 0x00ffc000, false),
     79   HOWTO (R_SPU_ADDR16I,	   0, 4, 16, false,  7, complain_overflow_signed,
     80 	 bfd_elf_generic_reloc, "SPU_ADDR16I",
     81 	 false, 0, 0x007fff80, false),
     82   HOWTO (R_SPU_REL32,	   0, 4, 32, true,  0, complain_overflow_dont,
     83 	 bfd_elf_generic_reloc, "SPU_REL32",
     84 	 false, 0, 0xffffffff, true),
     85   HOWTO (R_SPU_ADDR16X,	   0, 4, 16, false,  7, complain_overflow_bitfield,
     86 	 bfd_elf_generic_reloc, "SPU_ADDR16X",
     87 	 false, 0, 0x007fff80, false),
     88   HOWTO (R_SPU_PPU32,	   0, 4, 32, false,  0, complain_overflow_dont,
     89 	 bfd_elf_generic_reloc, "SPU_PPU32",
     90 	 false, 0, 0xffffffff, false),
     91   HOWTO (R_SPU_PPU64,	   0, 8, 64, false,  0, complain_overflow_dont,
     92 	 bfd_elf_generic_reloc, "SPU_PPU64",
     93 	 false, 0, -1, false),
     94   HOWTO (R_SPU_ADD_PIC,	   0, 0,  0, false,  0, complain_overflow_dont,
     95 	 bfd_elf_generic_reloc, "SPU_ADD_PIC",
     96 	 false, 0, 0x00000000, false),
     97 };
     98 
     99 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
    100   { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
    101   { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
    102   { NULL, 0, 0, 0, 0 }
    103 };
    104 
    105 static enum elf_spu_reloc_type
    106 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
    107 {
    108   switch (code)
    109     {
    110     default:
    111       return (enum elf_spu_reloc_type) -1;
    112     case BFD_RELOC_NONE:
    113       return R_SPU_NONE;
    114     case BFD_RELOC_SPU_IMM10W:
    115       return R_SPU_ADDR10;
    116     case BFD_RELOC_SPU_IMM16W:
    117       return R_SPU_ADDR16;
    118     case BFD_RELOC_SPU_LO16:
    119       return R_SPU_ADDR16_LO;
    120     case BFD_RELOC_SPU_HI16:
    121       return R_SPU_ADDR16_HI;
    122     case BFD_RELOC_SPU_IMM18:
    123       return R_SPU_ADDR18;
    124     case BFD_RELOC_SPU_PCREL16:
    125       return R_SPU_REL16;
    126     case BFD_RELOC_SPU_IMM7:
    127       return R_SPU_ADDR7;
    128     case BFD_RELOC_SPU_IMM8:
    129       return R_SPU_NONE;
    130     case BFD_RELOC_SPU_PCREL9a:
    131       return R_SPU_REL9;
    132     case BFD_RELOC_SPU_PCREL9b:
    133       return R_SPU_REL9I;
    134     case BFD_RELOC_SPU_IMM10:
    135       return R_SPU_ADDR10I;
    136     case BFD_RELOC_SPU_IMM16:
    137       return R_SPU_ADDR16I;
    138     case BFD_RELOC_32:
    139       return R_SPU_ADDR32;
    140     case BFD_RELOC_32_PCREL:
    141       return R_SPU_REL32;
    142     case BFD_RELOC_SPU_PPU32:
    143       return R_SPU_PPU32;
    144     case BFD_RELOC_SPU_PPU64:
    145       return R_SPU_PPU64;
    146     case BFD_RELOC_SPU_ADD_PIC:
    147       return R_SPU_ADD_PIC;
    148     }
    149 }
    150 
    151 static bool
    152 spu_elf_info_to_howto (bfd *abfd,
    153 		       arelent *cache_ptr,
    154 		       Elf_Internal_Rela *dst)
    155 {
    156   enum elf_spu_reloc_type r_type;
    157 
    158   r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
    159   /* PR 17512: file: 90c2a92e.  */
    160   if (r_type >= R_SPU_max)
    161     {
    162       /* xgettext:c-format */
    163       _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
    164 			  abfd, r_type);
    165       bfd_set_error (bfd_error_bad_value);
    166       return false;
    167     }
    168   cache_ptr->howto = &elf_howto_table[(int) r_type];
    169   return true;
    170 }
    171 
    172 static reloc_howto_type *
    173 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
    174 			   bfd_reloc_code_real_type code)
    175 {
    176   enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
    177 
    178   if (r_type == (enum elf_spu_reloc_type) -1)
    179     return NULL;
    180 
    181   return elf_howto_table + r_type;
    182 }
    183 
    184 static reloc_howto_type *
    185 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
    186 			   const char *r_name)
    187 {
    188   unsigned int i;
    189 
    190   for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
    191     if (elf_howto_table[i].name != NULL
    192 	&& strcasecmp (elf_howto_table[i].name, r_name) == 0)
    193       return &elf_howto_table[i];
    194 
    195   return NULL;
    196 }
    197 
    198 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs.  */
    199 
    200 static bfd_reloc_status_type
    201 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
    202 	      void *data, asection *input_section,
    203 	      bfd *output_bfd, char **error_message)
    204 {
    205   bfd_size_type octets;
    206   bfd_vma val;
    207   long insn;
    208 
    209   /* If this is a relocatable link (output_bfd test tells us), just
    210      call the generic function.  Any adjustment will be done at final
    211      link time.  */
    212   if (output_bfd != NULL)
    213     return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
    214 				  input_section, output_bfd, error_message);
    215 
    216   if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
    217     return bfd_reloc_outofrange;
    218   octets = reloc_entry->address * OCTETS_PER_BYTE (abfd, input_section);
    219 
    220   /* Get symbol value.  */
    221   val = 0;
    222   if (!bfd_is_com_section (symbol->section))
    223     val = symbol->value;
    224   if (symbol->section->output_section)
    225     val += symbol->section->output_section->vma;
    226 
    227   val += reloc_entry->addend;
    228 
    229   /* Make it pc-relative.  */
    230   val -= input_section->output_section->vma + input_section->output_offset;
    231 
    232   val >>= 2;
    233   if (val + 256 >= 512)
    234     return bfd_reloc_overflow;
    235 
    236   insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
    237 
    238   /* Move two high bits of value to REL9I and REL9 position.
    239      The mask will take care of selecting the right field.  */
    240   val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
    241   insn &= ~reloc_entry->howto->dst_mask;
    242   insn |= val & reloc_entry->howto->dst_mask;
    243   bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
    244   return bfd_reloc_ok;
    245 }
    246 
    247 static bool
    248 spu_elf_new_section_hook (bfd *abfd, asection *sec)
    249 {
    250   struct _spu_elf_section_data *sdata;
    251 
    252   sdata = bfd_zalloc (abfd, sizeof (*sdata));
    253   if (sdata == NULL)
    254     return false;
    255   sec->used_by_bfd = sdata;
    256 
    257   return _bfd_elf_new_section_hook (abfd, sec);
    258 }
    259 
    260 /* Set up overlay info for executables.  */
    261 
    262 static bool
    263 spu_elf_object_p (bfd *abfd)
    264 {
    265   if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
    266     {
    267       unsigned int i, num_ovl, num_buf;
    268       Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
    269       Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
    270       Elf_Internal_Phdr *last_phdr = NULL;
    271 
    272       for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
    273 	if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
    274 	  {
    275 	    unsigned int j;
    276 
    277 	    ++num_ovl;
    278 	    if (last_phdr == NULL
    279 		|| ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
    280 	      ++num_buf;
    281 	    last_phdr = phdr;
    282 	    for (j = 1; j < elf_numsections (abfd); j++)
    283 	      {
    284 		Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
    285 
    286 		if (shdr->bfd_section != NULL
    287 		    && ELF_SECTION_SIZE (shdr, phdr) != 0
    288 		    && ELF_SECTION_IN_SEGMENT (shdr, phdr))
    289 		  {
    290 		    asection *sec = shdr->bfd_section;
    291 		    spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
    292 		    spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
    293 		  }
    294 	      }
    295 	  }
    296     }
    297   return true;
    298 }
    299 
    300 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
    301    strip --strip-unneeded will not remove them.  */
    302 
    303 static void
    304 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
    305 {
    306   if (sym->name != NULL
    307       && sym->section != bfd_abs_section_ptr
    308       && startswith (sym->name, "_EAR_"))
    309     sym->flags |= BSF_KEEP;
    310 }
    311 
    312 /* SPU ELF linker hash table.  */
    313 
    314 struct spu_link_hash_table
    315 {
    316   struct elf_link_hash_table elf;
    317 
    318   struct spu_elf_params *params;
    319 
    320   /* Shortcuts to overlay sections.  */
    321   asection *ovtab;
    322   asection *init;
    323   asection *toe;
    324   asection **ovl_sec;
    325 
    326   /* Count of stubs in each overlay section.  */
    327   unsigned int *stub_count;
    328 
    329   /* The stub section for each overlay section.  */
    330   asection **stub_sec;
    331 
    332   struct elf_link_hash_entry *ovly_entry[2];
    333 
    334   /* Number of overlay buffers.  */
    335   unsigned int num_buf;
    336 
    337   /* Total number of overlays.  */
    338   unsigned int num_overlays;
    339 
    340   /* For soft icache.  */
    341   unsigned int line_size_log2;
    342   unsigned int num_lines_log2;
    343   unsigned int fromelem_size_log2;
    344 
    345   /* How much memory we have.  */
    346   unsigned int local_store;
    347 
    348   /* Count of overlay stubs needed in non-overlay area.  */
    349   unsigned int non_ovly_stub;
    350 
    351   /* Pointer to the fixup section */
    352   asection *sfixup;
    353 
    354   /* Set on error.  */
    355   unsigned int stub_err : 1;
    356 };
    357 
    358 /* Hijack the generic got fields for overlay stub accounting.  */
    359 
    360 struct got_entry
    361 {
    362   struct got_entry *next;
    363   unsigned int ovl;
    364   union {
    365     bfd_vma addend;
    366     bfd_vma br_addr;
    367   };
    368   bfd_vma stub_addr;
    369 };
    370 
    371 #define spu_hash_table(p) \
    372   ((is_elf_hash_table ((p)->hash)					\
    373     && elf_hash_table_id (elf_hash_table (p)) == SPU_ELF_DATA)		\
    374    ? (struct spu_link_hash_table *) (p)->hash : NULL)
    375 
    376 struct call_info
    377 {
    378   struct function_info *fun;
    379   struct call_info *next;
    380   unsigned int count;
    381   unsigned int max_depth;
    382   unsigned int is_tail : 1;
    383   unsigned int is_pasted : 1;
    384   unsigned int broken_cycle : 1;
    385   unsigned int priority : 13;
    386 };
    387 
    388 struct function_info
    389 {
    390   /* List of functions called.  Also branches to hot/cold part of
    391      function.  */
    392   struct call_info *call_list;
    393   /* For hot/cold part of function, point to owner.  */
    394   struct function_info *start;
    395   /* Symbol at start of function.  */
    396   union {
    397     Elf_Internal_Sym *sym;
    398     struct elf_link_hash_entry *h;
    399   } u;
    400   /* Function section.  */
    401   asection *sec;
    402   asection *rodata;
    403   /* Where last called from, and number of sections called from.  */
    404   asection *last_caller;
    405   unsigned int call_count;
    406   /* Address range of (this part of) function.  */
    407   bfd_vma lo, hi;
    408   /* Offset where we found a store of lr, or -1 if none found.  */
    409   bfd_vma lr_store;
    410   /* Offset where we found the stack adjustment insn.  */
    411   bfd_vma sp_adjust;
    412   /* Stack usage.  */
    413   int stack;
    414   /* Distance from root of call tree.  Tail and hot/cold branches
    415      count as one deeper.  We aren't counting stack frames here.  */
    416   unsigned int depth;
    417   /* Set if global symbol.  */
    418   unsigned int global : 1;
    419   /* Set if known to be start of function (as distinct from a hunk
    420      in hot/cold section.  */
    421   unsigned int is_func : 1;
    422   /* Set if not a root node.  */
    423   unsigned int non_root : 1;
    424   /* Flags used during call tree traversal.  It's cheaper to replicate
    425      the visit flags than have one which needs clearing after a traversal.  */
    426   unsigned int visit1 : 1;
    427   unsigned int visit2 : 1;
    428   unsigned int marking : 1;
    429   unsigned int visit3 : 1;
    430   unsigned int visit4 : 1;
    431   unsigned int visit5 : 1;
    432   unsigned int visit6 : 1;
    433   unsigned int visit7 : 1;
    434 };
    435 
    436 struct spu_elf_stack_info
    437 {
    438   int num_fun;
    439   int max_fun;
    440   /* Variable size array describing functions, one per contiguous
    441      address range belonging to a function.  */
    442   struct function_info fun[1];
    443 };
    444 
    445 static struct function_info *find_function (asection *, bfd_vma,
    446 					    struct bfd_link_info *);
    447 
    448 /* Create a spu ELF linker hash table.  */
    449 
    450 static struct bfd_link_hash_table *
    451 spu_elf_link_hash_table_create (bfd *abfd)
    452 {
    453   struct spu_link_hash_table *htab;
    454 
    455   htab = bfd_zmalloc (sizeof (*htab));
    456   if (htab == NULL)
    457     return NULL;
    458 
    459   if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
    460 				      _bfd_elf_link_hash_newfunc,
    461 				      sizeof (struct elf_link_hash_entry)))
    462     {
    463       free (htab);
    464       return NULL;
    465     }
    466 
    467   htab->elf.init_got_refcount.refcount = 0;
    468   htab->elf.init_got_refcount.glist = NULL;
    469   htab->elf.init_got_offset.offset = 0;
    470   htab->elf.init_got_offset.glist = NULL;
    471   return &htab->elf.root;
    472 }
    473 
    474 void
    475 spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
    476 {
    477   bfd_vma max_branch_log2;
    478 
    479   struct spu_link_hash_table *htab = spu_hash_table (info);
    480   htab->params = params;
    481   htab->line_size_log2 = bfd_log2 (htab->params->line_size);
    482   htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
    483 
    484   /* For the software i-cache, we provide a "from" list whose size
    485      is a power-of-two number of quadwords, big enough to hold one
    486      byte per outgoing branch.  Compute this number here.  */
    487   max_branch_log2 = bfd_log2 (htab->params->max_branch);
    488   htab->fromelem_size_log2 = max_branch_log2 > 4 ? max_branch_log2 - 4 : 0;
    489 }
    490 
    491 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
    492    to (hash, NULL) for global symbols, and (NULL, sym) for locals.  Set
    493    *SYMSECP to the symbol's section.  *LOCSYMSP caches local syms.  */
    494 
    495 static bool
    496 get_sym_h (struct elf_link_hash_entry **hp,
    497 	   Elf_Internal_Sym **symp,
    498 	   asection **symsecp,
    499 	   Elf_Internal_Sym **locsymsp,
    500 	   unsigned long r_symndx,
    501 	   bfd *ibfd)
    502 {
    503   Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
    504 
    505   if (r_symndx >= symtab_hdr->sh_info)
    506     {
    507       struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
    508       struct elf_link_hash_entry *h;
    509 
    510       h = sym_hashes[r_symndx - symtab_hdr->sh_info];
    511       while (h->root.type == bfd_link_hash_indirect
    512 	     || h->root.type == bfd_link_hash_warning)
    513 	h = (struct elf_link_hash_entry *) h->root.u.i.link;
    514 
    515       if (hp != NULL)
    516 	*hp = h;
    517 
    518       if (symp != NULL)
    519 	*symp = NULL;
    520 
    521       if (symsecp != NULL)
    522 	{
    523 	  asection *symsec = NULL;
    524 	  if (h->root.type == bfd_link_hash_defined
    525 	      || h->root.type == bfd_link_hash_defweak)
    526 	    symsec = h->root.u.def.section;
    527 	  *symsecp = symsec;
    528 	}
    529     }
    530   else
    531     {
    532       Elf_Internal_Sym *sym;
    533       Elf_Internal_Sym *locsyms = *locsymsp;
    534 
    535       if (locsyms == NULL)
    536 	{
    537 	  locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
    538 	  if (locsyms == NULL)
    539 	    locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
    540 					    symtab_hdr->sh_info,
    541 					    0, NULL, NULL, NULL);
    542 	  if (locsyms == NULL)
    543 	    return false;
    544 	  *locsymsp = locsyms;
    545 	}
    546       sym = locsyms + r_symndx;
    547 
    548       if (hp != NULL)
    549 	*hp = NULL;
    550 
    551       if (symp != NULL)
    552 	*symp = sym;
    553 
    554       if (symsecp != NULL)
    555 	*symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
    556     }
    557 
    558   return true;
    559 }
    560 
    561 /* Create the note section if not already present.  This is done early so
    562    that the linker maps the sections to the right place in the output.  */
    563 
    564 bool
    565 spu_elf_create_sections (struct bfd_link_info *info)
    566 {
    567   struct spu_link_hash_table *htab = spu_hash_table (info);
    568   bfd *ibfd;
    569 
    570   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
    571     if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
    572       break;
    573 
    574   if (ibfd == NULL)
    575     {
    576       /* Make SPU_PTNOTE_SPUNAME section.  */
    577       asection *s;
    578       size_t name_len;
    579       size_t size;
    580       bfd_byte *data;
    581       flagword flags;
    582 
    583       ibfd = info->input_bfds;
    584       /* This should really be SEC_LINKER_CREATED, but then we'd need
    585 	 to write out the section ourselves.  */
    586       flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
    587       s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
    588       if (s == NULL
    589 	  || !bfd_set_section_alignment (s, 4))
    590 	return false;
    591       /* Because we didn't set SEC_LINKER_CREATED we need to set the
    592 	 proper section type.  */
    593       elf_section_type (s) = SHT_NOTE;
    594 
    595       name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
    596       size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
    597       size += (name_len + 3) & -4;
    598 
    599       if (!bfd_set_section_size (s, size))
    600 	return false;
    601 
    602       data = bfd_zalloc (ibfd, size);
    603       if (data == NULL)
    604 	return false;
    605 
    606       bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
    607       bfd_put_32 (ibfd, name_len, data + 4);
    608       bfd_put_32 (ibfd, 1, data + 8);
    609       memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
    610       memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
    611 	      bfd_get_filename (info->output_bfd), name_len);
    612       s->contents = data;
    613     }
    614 
    615   if (htab->params->emit_fixups)
    616     {
    617       asection *s;
    618       flagword flags;
    619 
    620       if (htab->elf.dynobj == NULL)
    621 	htab->elf.dynobj = ibfd;
    622       ibfd = htab->elf.dynobj;
    623       flags = (SEC_LOAD | SEC_ALLOC | SEC_READONLY | SEC_HAS_CONTENTS
    624 	       | SEC_IN_MEMORY | SEC_LINKER_CREATED);
    625       s = bfd_make_section_anyway_with_flags (ibfd, ".fixup", flags);
    626       if (s == NULL || !bfd_set_section_alignment (s, 2))
    627 	return false;
    628       htab->sfixup = s;
    629     }
    630 
    631   return true;
    632 }
    633 
    634 /* qsort predicate to sort sections by vma.  */
    635 
    636 static int
    637 sort_sections (const void *a, const void *b)
    638 {
    639   const asection *const *s1 = a;
    640   const asection *const *s2 = b;
    641   bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
    642 
    643   if (delta != 0)
    644     return delta < 0 ? -1 : 1;
    645 
    646   return (*s1)->index - (*s2)->index;
    647 }
    648 
    649 /* Identify overlays in the output bfd, and number them.
    650    Returns 0 on error, 1 if no overlays, 2 if overlays.  */
    651 
    652 int
    653 spu_elf_find_overlays (struct bfd_link_info *info)
    654 {
    655   struct spu_link_hash_table *htab = spu_hash_table (info);
    656   asection **alloc_sec;
    657   unsigned int i, n, ovl_index, num_buf;
    658   asection *s;
    659   bfd_vma ovl_end;
    660   static const char *const entry_names[2][2] = {
    661     { "__ovly_load", "__icache_br_handler" },
    662     { "__ovly_return", "__icache_call_handler" }
    663   };
    664 
    665   if (info->output_bfd->section_count < 2)
    666     return 1;
    667 
    668   alloc_sec
    669     = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
    670   if (alloc_sec == NULL)
    671     return 0;
    672 
    673   /* Pick out all the alloced sections.  */
    674   for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
    675     if ((s->flags & SEC_ALLOC) != 0
    676 	&& (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
    677 	&& s->size != 0)
    678       alloc_sec[n++] = s;
    679 
    680   if (n == 0)
    681     {
    682       free (alloc_sec);
    683       return 1;
    684     }
    685 
    686   /* Sort them by vma.  */
    687   qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
    688 
    689   ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
    690   if (htab->params->ovly_flavour == ovly_soft_icache)
    691     {
    692       unsigned int prev_buf = 0, set_id = 0;
    693 
    694       /* Look for an overlapping vma to find the first overlay section.  */
    695       bfd_vma vma_start = 0;
    696 
    697       for (i = 1; i < n; i++)
    698 	{
    699 	  s = alloc_sec[i];
    700 	  if (s->vma < ovl_end)
    701 	    {
    702 	      asection *s0 = alloc_sec[i - 1];
    703 	      vma_start = s0->vma;
    704 	      ovl_end = (s0->vma
    705 			 + ((bfd_vma) 1
    706 			    << (htab->num_lines_log2 + htab->line_size_log2)));
    707 	      --i;
    708 	      break;
    709 	    }
    710 	  else
    711 	    ovl_end = s->vma + s->size;
    712 	}
    713 
    714       /* Now find any sections within the cache area.  */
    715       for (ovl_index = 0, num_buf = 0; i < n; i++)
    716 	{
    717 	  s = alloc_sec[i];
    718 	  if (s->vma >= ovl_end)
    719 	    break;
    720 
    721 	  /* A section in an overlay area called .ovl.init is not
    722 	     an overlay, in the sense that it might be loaded in
    723 	     by the overlay manager, but rather the initial
    724 	     section contents for the overlay buffer.  */
    725 	  if (!startswith (s->name, ".ovl.init"))
    726 	    {
    727 	      num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
    728 	      set_id = (num_buf == prev_buf)? set_id + 1 : 0;
    729 	      prev_buf = num_buf;
    730 
    731 	      if ((s->vma - vma_start) & (htab->params->line_size - 1))
    732 		{
    733 		  info->callbacks->einfo (_("%X%P: overlay section %pA "
    734 					    "does not start on a cache line\n"),
    735 					  s);
    736 		  bfd_set_error (bfd_error_bad_value);
    737 		  return 0;
    738 		}
    739 	      else if (s->size > htab->params->line_size)
    740 		{
    741 		  info->callbacks->einfo (_("%X%P: overlay section %pA "
    742 					    "is larger than a cache line\n"),
    743 					  s);
    744 		  bfd_set_error (bfd_error_bad_value);
    745 		  return 0;
    746 		}
    747 
    748 	      alloc_sec[ovl_index++] = s;
    749 	      spu_elf_section_data (s)->u.o.ovl_index
    750 		= (set_id << htab->num_lines_log2) + num_buf;
    751 	      spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
    752 	    }
    753 	}
    754 
    755       /* Ensure there are no more overlay sections.  */
    756       for ( ; i < n; i++)
    757 	{
    758 	  s = alloc_sec[i];
    759 	  if (s->vma < ovl_end)
    760 	    {
    761 	      info->callbacks->einfo (_("%X%P: overlay section %pA "
    762 					"is not in cache area\n"),
    763 				      alloc_sec[i-1]);
    764 	      bfd_set_error (bfd_error_bad_value);
    765 	      return 0;
    766 	    }
    767 	  else
    768 	    ovl_end = s->vma + s->size;
    769 	}
    770     }
    771   else
    772     {
    773       /* Look for overlapping vmas.  Any with overlap must be overlays.
    774 	 Count them.  Also count the number of overlay regions.  */
    775       for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
    776 	{
    777 	  s = alloc_sec[i];
    778 	  if (s->vma < ovl_end)
    779 	    {
    780 	      asection *s0 = alloc_sec[i - 1];
    781 
    782 	      if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
    783 		{
    784 		  ++num_buf;
    785 		  if (!startswith (s0->name, ".ovl.init"))
    786 		    {
    787 		      alloc_sec[ovl_index] = s0;
    788 		      spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
    789 		      spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
    790 		    }
    791 		  else
    792 		    ovl_end = s->vma + s->size;
    793 		}
    794 	      if (!startswith (s->name, ".ovl.init"))
    795 		{
    796 		  alloc_sec[ovl_index] = s;
    797 		  spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
    798 		  spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
    799 		  if (s0->vma != s->vma)
    800 		    {
    801 		      /* xgettext:c-format */
    802 		      info->callbacks->einfo (_("%X%P: overlay sections %pA "
    803 						"and %pA do not start at the "
    804 						"same address\n"),
    805 					      s0, s);
    806 		      bfd_set_error (bfd_error_bad_value);
    807 		      return 0;
    808 		    }
    809 		  if (ovl_end < s->vma + s->size)
    810 		    ovl_end = s->vma + s->size;
    811 		}
    812 	    }
    813 	  else
    814 	    ovl_end = s->vma + s->size;
    815 	}
    816     }
    817 
    818   htab->num_overlays = ovl_index;
    819   htab->num_buf = num_buf;
    820   htab->ovl_sec = alloc_sec;
    821 
    822   if (ovl_index == 0)
    823     return 1;
    824 
    825   for (i = 0; i < 2; i++)
    826     {
    827       const char *name;
    828       struct elf_link_hash_entry *h;
    829 
    830       name = entry_names[i][htab->params->ovly_flavour];
    831       h = elf_link_hash_lookup (&htab->elf, name, true, false, false);
    832       if (h == NULL)
    833 	return 0;
    834 
    835       if (h->root.type == bfd_link_hash_new)
    836 	{
    837 	  h->root.type = bfd_link_hash_undefined;
    838 	  h->ref_regular = 1;
    839 	  h->ref_regular_nonweak = 1;
    840 	  h->non_elf = 0;
    841 	}
    842       htab->ovly_entry[i] = h;
    843     }
    844 
    845   return 2;
    846 }
    847 
    848 /* Non-zero to use bra in overlay stubs rather than br.  */
    849 #define BRA_STUBS 0
    850 
    851 #define BRA	0x30000000
    852 #define BRASL	0x31000000
    853 #define BR	0x32000000
    854 #define BRSL	0x33000000
    855 #define NOP	0x40200000
    856 #define LNOP	0x00200000
    857 #define ILA	0x42000000
    858 
    859 /* Return true for all relative and absolute branch instructions.
    860    bra   00110000 0..
    861    brasl 00110001 0..
    862    br    00110010 0..
    863    brsl  00110011 0..
    864    brz   00100000 0..
    865    brnz  00100001 0..
    866    brhz  00100010 0..
    867    brhnz 00100011 0..  */
    868 
    869 static bool
    870 is_branch (const unsigned char *insn)
    871 {
    872   return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
    873 }
    874 
    875 /* Return true for all indirect branch instructions.
    876    bi     00110101 000
    877    bisl   00110101 001
    878    iret   00110101 010
    879    bisled 00110101 011
    880    biz    00100101 000
    881    binz   00100101 001
    882    bihz   00100101 010
    883    bihnz  00100101 011  */
    884 
    885 static bool
    886 is_indirect_branch (const unsigned char *insn)
    887 {
    888   return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
    889 }
    890 
    891 /* Return true for branch hint instructions.
    892    hbra  0001000..
    893    hbrr  0001001..  */
    894 
    895 static bool
    896 is_hint (const unsigned char *insn)
    897 {
    898   return (insn[0] & 0xfc) == 0x10;
    899 }
    900 
    901 /* True if INPUT_SECTION might need overlay stubs.  */
    902 
    903 static bool
    904 maybe_needs_stubs (asection *input_section)
    905 {
    906   /* No stubs for debug sections and suchlike.  */
    907   if ((input_section->flags & SEC_ALLOC) == 0)
    908     return false;
    909 
    910   /* No stubs for link-once sections that will be discarded.  */
    911   if (input_section->output_section == bfd_abs_section_ptr)
    912     return false;
    913 
    914   /* Don't create stubs for .eh_frame references.  */
    915   if (strcmp (input_section->name, ".eh_frame") == 0)
    916     return false;
    917 
    918   return true;
    919 }
    920 
    921 enum _stub_type
    922 {
    923   no_stub,
    924   call_ovl_stub,
    925   br000_ovl_stub,
    926   br001_ovl_stub,
    927   br010_ovl_stub,
    928   br011_ovl_stub,
    929   br100_ovl_stub,
    930   br101_ovl_stub,
    931   br110_ovl_stub,
    932   br111_ovl_stub,
    933   nonovl_stub,
    934   stub_error
    935 };
    936 
    937 /* Return non-zero if this reloc symbol should go via an overlay stub.
    938    Return 2 if the stub must be in non-overlay area.  */
    939 
    940 static enum _stub_type
    941 needs_ovl_stub (struct elf_link_hash_entry *h,
    942 		Elf_Internal_Sym *sym,
    943 		asection *sym_sec,
    944 		asection *input_section,
    945 		Elf_Internal_Rela *irela,
    946 		bfd_byte *contents,
    947 		struct bfd_link_info *info)
    948 {
    949   struct spu_link_hash_table *htab = spu_hash_table (info);
    950   enum elf_spu_reloc_type r_type;
    951   unsigned int sym_type;
    952   bool branch, hint, call;
    953   enum _stub_type ret = no_stub;
    954   bfd_byte insn[4];
    955 
    956   if (sym_sec == NULL
    957       || sym_sec->output_section == bfd_abs_section_ptr
    958       || spu_elf_section_data (sym_sec->output_section) == NULL)
    959     return ret;
    960 
    961   if (h != NULL)
    962     {
    963       /* Ensure no stubs for user supplied overlay manager syms.  */
    964       if (h == htab->ovly_entry[0] || h == htab->ovly_entry[1])
    965 	return ret;
    966 
    967       /* setjmp always goes via an overlay stub, because then the return
    968 	 and hence the longjmp goes via __ovly_return.  That magically
    969 	 makes setjmp/longjmp between overlays work.  */
    970       if (startswith (h->root.root.string, "setjmp")
    971 	  && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
    972 	ret = call_ovl_stub;
    973     }
    974 
    975   if (h != NULL)
    976     sym_type = h->type;
    977   else
    978     sym_type = ELF_ST_TYPE (sym->st_info);
    979 
    980   r_type = ELF32_R_TYPE (irela->r_info);
    981   branch = false;
    982   hint = false;
    983   call = false;
    984   if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
    985     {
    986       if (contents == NULL)
    987 	{
    988 	  contents = insn;
    989 	  if (!bfd_get_section_contents (input_section->owner,
    990 					 input_section,
    991 					 contents,
    992 					 irela->r_offset, 4))
    993 	    return stub_error;
    994 	}
    995       else
    996 	contents += irela->r_offset;
    997 
    998       branch = is_branch (contents);
    999       hint = is_hint (contents);
   1000       if (branch || hint)
   1001 	{
   1002 	  call = (contents[0] & 0xfd) == 0x31;
   1003 	  if (call
   1004 	      && sym_type != STT_FUNC
   1005 	      && contents != insn)
   1006 	    {
   1007 	      /* It's common for people to write assembly and forget
   1008 		 to give function symbols the right type.  Handle
   1009 		 calls to such symbols, but warn so that (hopefully)
   1010 		 people will fix their code.  We need the symbol
   1011 		 type to be correct to distinguish function pointer
   1012 		 initialisation from other pointer initialisations.  */
   1013 	      const char *sym_name;
   1014 
   1015 	      if (h != NULL)
   1016 		sym_name = h->root.root.string;
   1017 	      else
   1018 		{
   1019 		  Elf_Internal_Shdr *symtab_hdr;
   1020 		  symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
   1021 		  sym_name = bfd_elf_sym_name (input_section->owner,
   1022 					       symtab_hdr,
   1023 					       sym,
   1024 					       sym_sec);
   1025 		}
   1026 	      _bfd_error_handler
   1027 		/* xgettext:c-format */
   1028 		(_("warning: call to non-function symbol %s defined in %pB"),
   1029 		 sym_name, sym_sec->owner);
   1030 
   1031 	    }
   1032 	}
   1033     }
   1034 
   1035   if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
   1036       || (sym_type != STT_FUNC
   1037 	  && !(branch || hint)
   1038 	  && (sym_sec->flags & SEC_CODE) == 0))
   1039     return no_stub;
   1040 
   1041   /* Usually, symbols in non-overlay sections don't need stubs.  */
   1042   if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
   1043       && !htab->params->non_overlay_stubs)
   1044     return ret;
   1045 
   1046   /* A reference from some other section to a symbol in an overlay
   1047      section needs a stub.  */
   1048   if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
   1049        != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
   1050     {
   1051       unsigned int lrlive = 0;
   1052       if (branch)
   1053 	lrlive = (contents[1] & 0x70) >> 4;
   1054 
   1055       if (!lrlive && (call || sym_type == STT_FUNC))
   1056 	ret = call_ovl_stub;
   1057       else
   1058 	ret = br000_ovl_stub + lrlive;
   1059     }
   1060 
   1061   /* If this insn isn't a branch then we are possibly taking the
   1062      address of a function and passing it out somehow.  Soft-icache code
   1063      always generates inline code to do indirect branches.  */
   1064   if (!(branch || hint)
   1065       && sym_type == STT_FUNC
   1066       && htab->params->ovly_flavour != ovly_soft_icache)
   1067     ret = nonovl_stub;
   1068 
   1069   return ret;
   1070 }
   1071 
   1072 static bool
   1073 count_stub (struct spu_link_hash_table *htab,
   1074 	    bfd *ibfd,
   1075 	    asection *isec,
   1076 	    enum _stub_type stub_type,
   1077 	    struct elf_link_hash_entry *h,
   1078 	    const Elf_Internal_Rela *irela)
   1079 {
   1080   unsigned int ovl = 0;
   1081   struct got_entry *g, **head;
   1082   bfd_vma addend;
   1083 
   1084   /* If this instruction is a branch or call, we need a stub
   1085      for it.  One stub per function per overlay.
   1086      If it isn't a branch, then we are taking the address of
   1087      this function so need a stub in the non-overlay area
   1088      for it.  One stub per function.  */
   1089   if (stub_type != nonovl_stub)
   1090     ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
   1091 
   1092   if (h != NULL)
   1093     head = &h->got.glist;
   1094   else
   1095     {
   1096       if (elf_local_got_ents (ibfd) == NULL)
   1097 	{
   1098 	  bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
   1099 			       * sizeof (*elf_local_got_ents (ibfd)));
   1100 	  elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
   1101 	  if (elf_local_got_ents (ibfd) == NULL)
   1102 	    return false;
   1103 	}
   1104       head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
   1105     }
   1106 
   1107   if (htab->params->ovly_flavour == ovly_soft_icache)
   1108     {
   1109       htab->stub_count[ovl] += 1;
   1110       return true;
   1111     }
   1112 
   1113   addend = 0;
   1114   if (irela != NULL)
   1115     addend = irela->r_addend;
   1116 
   1117   if (ovl == 0)
   1118     {
   1119       struct got_entry *gnext;
   1120 
   1121       for (g = *head; g != NULL; g = g->next)
   1122 	if (g->addend == addend && g->ovl == 0)
   1123 	  break;
   1124 
   1125       if (g == NULL)
   1126 	{
   1127 	  /* Need a new non-overlay area stub.  Zap other stubs.  */
   1128 	  for (g = *head; g != NULL; g = gnext)
   1129 	    {
   1130 	      gnext = g->next;
   1131 	      if (g->addend == addend)
   1132 		{
   1133 		  htab->stub_count[g->ovl] -= 1;
   1134 		  free (g);
   1135 		}
   1136 	    }
   1137 	}
   1138     }
   1139   else
   1140     {
   1141       for (g = *head; g != NULL; g = g->next)
   1142 	if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
   1143 	  break;
   1144     }
   1145 
   1146   if (g == NULL)
   1147     {
   1148       g = bfd_malloc (sizeof *g);
   1149       if (g == NULL)
   1150 	return false;
   1151       g->ovl = ovl;
   1152       g->addend = addend;
   1153       g->stub_addr = (bfd_vma) -1;
   1154       g->next = *head;
   1155       *head = g;
   1156 
   1157       htab->stub_count[ovl] += 1;
   1158     }
   1159 
   1160   return true;
   1161 }
   1162 
   1163 /* Support two sizes of overlay stubs, a slower more compact stub of two
   1164    instructions, and a faster stub of four instructions.
   1165    Soft-icache stubs are four or eight words.  */
   1166 
   1167 static unsigned int
   1168 ovl_stub_size (struct spu_elf_params *params)
   1169 {
   1170   return 16 << params->ovly_flavour >> params->compact_stub;
   1171 }
   1172 
   1173 static unsigned int
   1174 ovl_stub_size_log2 (struct spu_elf_params *params)
   1175 {
   1176   return 4 + params->ovly_flavour - params->compact_stub;
   1177 }
   1178 
   1179 /* Two instruction overlay stubs look like:
   1180 
   1181    brsl $75,__ovly_load
   1182    .word target_ovl_and_address
   1183 
   1184    ovl_and_address is a word with the overlay number in the top 14 bits
   1185    and local store address in the bottom 18 bits.
   1186 
   1187    Four instruction overlay stubs look like:
   1188 
   1189    ila $78,ovl_number
   1190    lnop
   1191    ila $79,target_address
   1192    br __ovly_load
   1193 
   1194    Software icache stubs are:
   1195 
   1196    .word target_index
   1197    .word target_ia;
   1198    .word lrlive_branchlocalstoreaddr;
   1199    brasl $75,__icache_br_handler
   1200    .quad xor_pattern
   1201 */
   1202 
   1203 static bool
   1204 build_stub (struct bfd_link_info *info,
   1205 	    bfd *ibfd,
   1206 	    asection *isec,
   1207 	    enum _stub_type stub_type,
   1208 	    struct elf_link_hash_entry *h,
   1209 	    const Elf_Internal_Rela *irela,
   1210 	    bfd_vma dest,
   1211 	    asection *dest_sec)
   1212 {
   1213   struct spu_link_hash_table *htab = spu_hash_table (info);
   1214   unsigned int ovl, dest_ovl, set_id;
   1215   struct got_entry *g, **head;
   1216   asection *sec;
   1217   bfd_vma addend, from, to, br_dest, patt;
   1218   unsigned int lrlive;
   1219 
   1220   ovl = 0;
   1221   if (stub_type != nonovl_stub)
   1222     ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
   1223 
   1224   if (h != NULL)
   1225     head = &h->got.glist;
   1226   else
   1227     head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
   1228 
   1229   addend = 0;
   1230   if (irela != NULL)
   1231     addend = irela->r_addend;
   1232 
   1233   if (htab->params->ovly_flavour == ovly_soft_icache)
   1234     {
   1235       g = bfd_malloc (sizeof *g);
   1236       if (g == NULL)
   1237 	return false;
   1238       g->ovl = ovl;
   1239       g->br_addr = 0;
   1240       if (irela != NULL)
   1241 	g->br_addr = (irela->r_offset
   1242 		      + isec->output_offset
   1243 		      + isec->output_section->vma);
   1244       g->next = *head;
   1245       *head = g;
   1246     }
   1247   else
   1248     {
   1249       for (g = *head; g != NULL; g = g->next)
   1250 	if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
   1251 	  break;
   1252       if (g == NULL)
   1253 	abort ();
   1254 
   1255       if (g->ovl == 0 && ovl != 0)
   1256 	return true;
   1257 
   1258       if (g->stub_addr != (bfd_vma) -1)
   1259 	return true;
   1260     }
   1261 
   1262   sec = htab->stub_sec[ovl];
   1263   dest += dest_sec->output_offset + dest_sec->output_section->vma;
   1264   from = sec->size + sec->output_offset + sec->output_section->vma;
   1265   g->stub_addr = from;
   1266   to = (htab->ovly_entry[0]->root.u.def.value
   1267 	+ htab->ovly_entry[0]->root.u.def.section->output_offset
   1268 	+ htab->ovly_entry[0]->root.u.def.section->output_section->vma);
   1269 
   1270   if (((dest | to | from) & 3) != 0)
   1271     {
   1272       htab->stub_err = 1;
   1273       return false;
   1274     }
   1275   dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
   1276 
   1277   if (htab->params->ovly_flavour == ovly_normal
   1278       && !htab->params->compact_stub)
   1279     {
   1280       bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
   1281 		  sec->contents + sec->size);
   1282       bfd_put_32 (sec->owner, LNOP,
   1283 		  sec->contents + sec->size + 4);
   1284       bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
   1285 		  sec->contents + sec->size + 8);
   1286       if (!BRA_STUBS)
   1287 	bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
   1288 		    sec->contents + sec->size + 12);
   1289       else
   1290 	bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
   1291 		    sec->contents + sec->size + 12);
   1292     }
   1293   else if (htab->params->ovly_flavour == ovly_normal
   1294 	   && htab->params->compact_stub)
   1295     {
   1296       if (!BRA_STUBS)
   1297 	bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
   1298 		    sec->contents + sec->size);
   1299       else
   1300 	bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
   1301 		    sec->contents + sec->size);
   1302       bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
   1303 		  sec->contents + sec->size + 4);
   1304     }
   1305   else if (htab->params->ovly_flavour == ovly_soft_icache
   1306 	   && htab->params->compact_stub)
   1307     {
   1308       lrlive = 0;
   1309       if (stub_type == nonovl_stub)
   1310 	;
   1311       else if (stub_type == call_ovl_stub)
   1312 	/* A brsl makes lr live and *(*sp+16) is live.
   1313 	   Tail calls have the same liveness.  */
   1314 	lrlive = 5;
   1315       else if (!htab->params->lrlive_analysis)
   1316 	/* Assume stack frame and lr save.  */
   1317 	lrlive = 1;
   1318       else if (irela != NULL)
   1319 	{
   1320 	  /* Analyse branch instructions.  */
   1321 	  struct function_info *caller;
   1322 	  bfd_vma off;
   1323 
   1324 	  caller = find_function (isec, irela->r_offset, info);
   1325 	  if (caller->start == NULL)
   1326 	    off = irela->r_offset;
   1327 	  else
   1328 	    {
   1329 	      struct function_info *found = NULL;
   1330 
   1331 	      /* Find the earliest piece of this function that
   1332 		 has frame adjusting instructions.  We might
   1333 		 see dynamic frame adjustment (eg. for alloca)
   1334 		 in some later piece, but functions using
   1335 		 alloca always set up a frame earlier.  Frame
   1336 		 setup instructions are always in one piece.  */
   1337 	      if (caller->lr_store != (bfd_vma) -1
   1338 		  || caller->sp_adjust != (bfd_vma) -1)
   1339 		found = caller;
   1340 	      while (caller->start != NULL)
   1341 		{
   1342 		  caller = caller->start;
   1343 		  if (caller->lr_store != (bfd_vma) -1
   1344 		      || caller->sp_adjust != (bfd_vma) -1)
   1345 		    found = caller;
   1346 		}
   1347 	      if (found != NULL)
   1348 		caller = found;
   1349 	      off = (bfd_vma) -1;
   1350 	    }
   1351 
   1352 	  if (off > caller->sp_adjust)
   1353 	    {
   1354 	      if (off > caller->lr_store)
   1355 		/* Only *(*sp+16) is live.  */
   1356 		lrlive = 1;
   1357 	      else
   1358 		/* If no lr save, then we must be in a
   1359 		   leaf function with a frame.
   1360 		   lr is still live.  */
   1361 		lrlive = 4;
   1362 	    }
   1363 	  else if (off > caller->lr_store)
   1364 	    {
   1365 	      /* Between lr save and stack adjust.  */
   1366 	      lrlive = 3;
   1367 	      /* This should never happen since prologues won't
   1368 		 be split here.  */
   1369 	      BFD_ASSERT (0);
   1370 	    }
   1371 	  else
   1372 	    /* On entry to function.  */
   1373 	    lrlive = 5;
   1374 
   1375 	  if (stub_type != br000_ovl_stub
   1376 	      && lrlive != stub_type - br000_ovl_stub)
   1377 	    /* xgettext:c-format */
   1378 	    info->callbacks->einfo (_("%pA:0x%v lrlive .brinfo (%u) differs "
   1379 				      "from analysis (%u)\n"),
   1380 				    isec, irela->r_offset, lrlive,
   1381 				    stub_type - br000_ovl_stub);
   1382 	}
   1383 
   1384       /* If given lrlive info via .brinfo, use it.  */
   1385       if (stub_type > br000_ovl_stub)
   1386 	lrlive = stub_type - br000_ovl_stub;
   1387 
   1388       if (ovl == 0)
   1389 	to = (htab->ovly_entry[1]->root.u.def.value
   1390 	      + htab->ovly_entry[1]->root.u.def.section->output_offset
   1391 	      + htab->ovly_entry[1]->root.u.def.section->output_section->vma);
   1392 
   1393       /* The branch that uses this stub goes to stub_addr + 4.  We'll
   1394 	 set up an xor pattern that can be used by the icache manager
   1395 	 to modify this branch to go directly to its destination.  */
   1396       g->stub_addr += 4;
   1397       br_dest = g->stub_addr;
   1398       if (irela == NULL)
   1399 	{
   1400 	  /* Except in the case of _SPUEAR_ stubs, the branch in
   1401 	     question is the one in the stub itself.  */
   1402 	  BFD_ASSERT (stub_type == nonovl_stub);
   1403 	  g->br_addr = g->stub_addr;
   1404 	  br_dest = to;
   1405 	}
   1406 
   1407       set_id = ((dest_ovl - 1) >> htab->num_lines_log2) + 1;
   1408       bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
   1409 		  sec->contents + sec->size);
   1410       bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
   1411 		  sec->contents + sec->size + 4);
   1412       bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
   1413 		  sec->contents + sec->size + 8);
   1414       patt = dest ^ br_dest;
   1415       if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
   1416 	patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
   1417       bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
   1418 		  sec->contents + sec->size + 12);
   1419 
   1420       if (ovl == 0)
   1421 	/* Extra space for linked list entries.  */
   1422 	sec->size += 16;
   1423     }
   1424   else
   1425     abort ();
   1426 
   1427   sec->size += ovl_stub_size (htab->params);
   1428 
   1429   if (htab->params->emit_stub_syms)
   1430     {
   1431       size_t len;
   1432       char *name;
   1433       int add;
   1434 
   1435       len = 8 + sizeof (".ovl_call.") - 1;
   1436       if (h != NULL)
   1437 	len += strlen (h->root.root.string);
   1438       else
   1439 	len += 8 + 1 + 8;
   1440       add = 0;
   1441       if (irela != NULL)
   1442 	add = (int) irela->r_addend & 0xffffffff;
   1443       if (add != 0)
   1444 	len += 1 + 8;
   1445       name = bfd_malloc (len + 1);
   1446       if (name == NULL)
   1447 	return false;
   1448 
   1449       sprintf (name, "%08x.ovl_call.", g->ovl);
   1450       if (h != NULL)
   1451 	strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
   1452       else
   1453 	sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
   1454 		 dest_sec->id & 0xffffffff,
   1455 		 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
   1456       if (add != 0)
   1457 	sprintf (name + len - 9, "+%x", add);
   1458 
   1459       h = elf_link_hash_lookup (&htab->elf, name, true, true, false);
   1460       free (name);
   1461       if (h == NULL)
   1462 	return false;
   1463       if (h->root.type == bfd_link_hash_new)
   1464 	{
   1465 	  h->root.type = bfd_link_hash_defined;
   1466 	  h->root.u.def.section = sec;
   1467 	  h->size = ovl_stub_size (htab->params);
   1468 	  h->root.u.def.value = sec->size - h->size;
   1469 	  h->type = STT_FUNC;
   1470 	  h->ref_regular = 1;
   1471 	  h->def_regular = 1;
   1472 	  h->ref_regular_nonweak = 1;
   1473 	  h->forced_local = 1;
   1474 	  h->non_elf = 0;
   1475 	}
   1476     }
   1477 
   1478   return true;
   1479 }
   1480 
   1481 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
   1482    symbols.  */
   1483 
   1484 static bool
   1485 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
   1486 {
   1487   /* Symbols starting with _SPUEAR_ need a stub because they may be
   1488      invoked by the PPU.  */
   1489   struct bfd_link_info *info = inf;
   1490   struct spu_link_hash_table *htab = spu_hash_table (info);
   1491   asection *sym_sec;
   1492 
   1493   if ((h->root.type == bfd_link_hash_defined
   1494        || h->root.type == bfd_link_hash_defweak)
   1495       && h->def_regular
   1496       && startswith (h->root.root.string, "_SPUEAR_")
   1497       && (sym_sec = h->root.u.def.section) != NULL
   1498       && sym_sec->output_section != bfd_abs_section_ptr
   1499       && spu_elf_section_data (sym_sec->output_section) != NULL
   1500       && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
   1501 	  || htab->params->non_overlay_stubs))
   1502     {
   1503       return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
   1504     }
   1505 
   1506   return true;
   1507 }
   1508 
   1509 static bool
   1510 build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
   1511 {
   1512   /* Symbols starting with _SPUEAR_ need a stub because they may be
   1513      invoked by the PPU.  */
   1514   struct bfd_link_info *info = inf;
   1515   struct spu_link_hash_table *htab = spu_hash_table (info);
   1516   asection *sym_sec;
   1517 
   1518   if ((h->root.type == bfd_link_hash_defined
   1519        || h->root.type == bfd_link_hash_defweak)
   1520       && h->def_regular
   1521       && startswith (h->root.root.string, "_SPUEAR_")
   1522       && (sym_sec = h->root.u.def.section) != NULL
   1523       && sym_sec->output_section != bfd_abs_section_ptr
   1524       && spu_elf_section_data (sym_sec->output_section) != NULL
   1525       && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
   1526 	  || htab->params->non_overlay_stubs))
   1527     {
   1528       return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
   1529 			 h->root.u.def.value, sym_sec);
   1530     }
   1531 
   1532   return true;
   1533 }
   1534 
   1535 /* Size or build stubs.  */
   1536 
   1537 static bool
   1538 process_stubs (struct bfd_link_info *info, bool build)
   1539 {
   1540   struct spu_link_hash_table *htab = spu_hash_table (info);
   1541   bfd *ibfd;
   1542 
   1543   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
   1544     {
   1545       extern const bfd_target spu_elf32_vec;
   1546       Elf_Internal_Shdr *symtab_hdr;
   1547       asection *isec;
   1548       Elf_Internal_Sym *local_syms = NULL;
   1549 
   1550       if (ibfd->xvec != &spu_elf32_vec)
   1551 	continue;
   1552 
   1553       /* We'll need the symbol table in a second.  */
   1554       symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
   1555       if (symtab_hdr->sh_info == 0)
   1556 	continue;
   1557 
   1558       /* Walk over each section attached to the input bfd.  */
   1559       for (isec = ibfd->sections; isec != NULL; isec = isec->next)
   1560 	{
   1561 	  Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
   1562 
   1563 	  /* If there aren't any relocs, then there's nothing more to do.  */
   1564 	  if ((isec->flags & SEC_RELOC) == 0
   1565 	      || isec->reloc_count == 0)
   1566 	    continue;
   1567 
   1568 	  if (!maybe_needs_stubs (isec))
   1569 	    continue;
   1570 
   1571 	  /* Get the relocs.  */
   1572 	  internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
   1573 						       info->keep_memory);
   1574 	  if (internal_relocs == NULL)
   1575 	    goto error_ret_free_local;
   1576 
   1577 	  /* Now examine each relocation.  */
   1578 	  irela = internal_relocs;
   1579 	  irelaend = irela + isec->reloc_count;
   1580 	  for (; irela < irelaend; irela++)
   1581 	    {
   1582 	      enum elf_spu_reloc_type r_type;
   1583 	      unsigned int r_indx;
   1584 	      asection *sym_sec;
   1585 	      Elf_Internal_Sym *sym;
   1586 	      struct elf_link_hash_entry *h;
   1587 	      enum _stub_type stub_type;
   1588 
   1589 	      r_type = ELF32_R_TYPE (irela->r_info);
   1590 	      r_indx = ELF32_R_SYM (irela->r_info);
   1591 
   1592 	      if (r_type >= R_SPU_max)
   1593 		{
   1594 		  bfd_set_error (bfd_error_bad_value);
   1595 		error_ret_free_internal:
   1596 		  if (elf_section_data (isec)->relocs != internal_relocs)
   1597 		    free (internal_relocs);
   1598 		error_ret_free_local:
   1599 		  if (symtab_hdr->contents != (unsigned char *) local_syms)
   1600 		    free (local_syms);
   1601 		  return false;
   1602 		}
   1603 
   1604 	      /* Determine the reloc target section.  */
   1605 	      if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
   1606 		goto error_ret_free_internal;
   1607 
   1608 	      stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
   1609 					  NULL, info);
   1610 	      if (stub_type == no_stub)
   1611 		continue;
   1612 	      else if (stub_type == stub_error)
   1613 		goto error_ret_free_internal;
   1614 
   1615 	      if (htab->stub_count == NULL)
   1616 		{
   1617 		  bfd_size_type amt;
   1618 		  amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
   1619 		  htab->stub_count = bfd_zmalloc (amt);
   1620 		  if (htab->stub_count == NULL)
   1621 		    goto error_ret_free_internal;
   1622 		}
   1623 
   1624 	      if (!build)
   1625 		{
   1626 		  if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
   1627 		    goto error_ret_free_internal;
   1628 		}
   1629 	      else
   1630 		{
   1631 		  bfd_vma dest;
   1632 
   1633 		  if (h != NULL)
   1634 		    dest = h->root.u.def.value;
   1635 		  else
   1636 		    dest = sym->st_value;
   1637 		  dest += irela->r_addend;
   1638 		  if (!build_stub (info, ibfd, isec, stub_type, h, irela,
   1639 				   dest, sym_sec))
   1640 		    goto error_ret_free_internal;
   1641 		}
   1642 	    }
   1643 
   1644 	  /* We're done with the internal relocs, free them.  */
   1645 	  if (elf_section_data (isec)->relocs != internal_relocs)
   1646 	    free (internal_relocs);
   1647 	}
   1648 
   1649       if (local_syms != NULL
   1650 	  && symtab_hdr->contents != (unsigned char *) local_syms)
   1651 	{
   1652 	  if (!info->keep_memory)
   1653 	    free (local_syms);
   1654 	  else
   1655 	    symtab_hdr->contents = (unsigned char *) local_syms;
   1656 	}
   1657     }
   1658 
   1659   return true;
   1660 }
   1661 
   1662 /* Allocate space for overlay call and return stubs.
   1663    Return 0 on error, 1 if no overlays, 2 otherwise.  */
   1664 
   1665 int
   1666 spu_elf_size_stubs (struct bfd_link_info *info)
   1667 {
   1668   struct spu_link_hash_table *htab;
   1669   bfd *ibfd;
   1670   bfd_size_type amt;
   1671   flagword flags;
   1672   unsigned int i;
   1673   asection *stub;
   1674 
   1675   if (!process_stubs (info, false))
   1676     return 0;
   1677 
   1678   htab = spu_hash_table (info);
   1679   elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
   1680   if (htab->stub_err)
   1681     return 0;
   1682 
   1683   ibfd = info->input_bfds;
   1684   if (htab->stub_count != NULL)
   1685     {
   1686       amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
   1687       htab->stub_sec = bfd_zmalloc (amt);
   1688       if (htab->stub_sec == NULL)
   1689 	return 0;
   1690 
   1691       flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
   1692 	       | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
   1693       stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
   1694       htab->stub_sec[0] = stub;
   1695       if (stub == NULL
   1696 	  || !bfd_set_section_alignment (stub,
   1697 					 ovl_stub_size_log2 (htab->params)))
   1698 	return 0;
   1699       stub->size = htab->stub_count[0] * ovl_stub_size (htab->params);
   1700       if (htab->params->ovly_flavour == ovly_soft_icache)
   1701 	/* Extra space for linked list entries.  */
   1702 	stub->size += htab->stub_count[0] * 16;
   1703 
   1704       for (i = 0; i < htab->num_overlays; ++i)
   1705 	{
   1706 	  asection *osec = htab->ovl_sec[i];
   1707 	  unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
   1708 	  stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
   1709 	  htab->stub_sec[ovl] = stub;
   1710 	  if (stub == NULL
   1711 	      || !bfd_set_section_alignment (stub,
   1712 					     ovl_stub_size_log2 (htab->params)))
   1713 	    return 0;
   1714 	  stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params);
   1715 	}
   1716     }
   1717 
   1718   if (htab->params->ovly_flavour == ovly_soft_icache)
   1719     {
   1720       /* Space for icache manager tables.
   1721 	 a) Tag array, one quadword per cache line.
   1722 	 b) Rewrite "to" list, one quadword per cache line.
   1723 	 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
   1724 	    a power-of-two number of full quadwords) per cache line.  */
   1725 
   1726       flags = SEC_ALLOC;
   1727       htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
   1728       if (htab->ovtab == NULL
   1729 	  || !bfd_set_section_alignment (htab->ovtab, 4))
   1730 	return 0;
   1731 
   1732       htab->ovtab->size = (16 + 16 + (16 << htab->fromelem_size_log2))
   1733 			  << htab->num_lines_log2;
   1734 
   1735       flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
   1736       htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
   1737       if (htab->init == NULL
   1738 	  || !bfd_set_section_alignment (htab->init, 4))
   1739 	return 0;
   1740 
   1741       htab->init->size = 16;
   1742     }
   1743   else if (htab->stub_count == NULL)
   1744     return 1;
   1745   else
   1746     {
   1747       /* htab->ovtab consists of two arrays.
   1748 	 .	struct {
   1749 	 .	  u32 vma;
   1750 	 .	  u32 size;
   1751 	 .	  u32 file_off;
   1752 	 .	  u32 buf;
   1753 	 .	} _ovly_table[];
   1754 	 .
   1755 	 .	struct {
   1756 	 .	  u32 mapped;
   1757 	 .	} _ovly_buf_table[];
   1758 	 .  */
   1759 
   1760       flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
   1761       htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
   1762       if (htab->ovtab == NULL
   1763 	  || !bfd_set_section_alignment (htab->ovtab, 4))
   1764 	return 0;
   1765 
   1766       htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
   1767     }
   1768 
   1769   htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
   1770   if (htab->toe == NULL
   1771       || !bfd_set_section_alignment (htab->toe, 4))
   1772     return 0;
   1773   htab->toe->size = 16;
   1774 
   1775   return 2;
   1776 }
   1777 
   1778 /* Called from ld to place overlay manager data sections.  This is done
   1779    after the overlay manager itself is loaded, mainly so that the
   1780    linker's htab->init section is placed after any other .ovl.init
   1781    sections.  */
   1782 
   1783 void
   1784 spu_elf_place_overlay_data (struct bfd_link_info *info)
   1785 {
   1786   struct spu_link_hash_table *htab = spu_hash_table (info);
   1787   unsigned int i;
   1788 
   1789   if (htab->stub_sec != NULL)
   1790     {
   1791       (*htab->params->place_spu_section) (htab->stub_sec[0], NULL, ".text");
   1792 
   1793       for (i = 0; i < htab->num_overlays; ++i)
   1794 	{
   1795 	  asection *osec = htab->ovl_sec[i];
   1796 	  unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
   1797 	  (*htab->params->place_spu_section) (htab->stub_sec[ovl], osec, NULL);
   1798 	}
   1799     }
   1800 
   1801   if (htab->params->ovly_flavour == ovly_soft_icache)
   1802     (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
   1803 
   1804   if (htab->ovtab != NULL)
   1805     {
   1806       const char *ovout = ".data";
   1807       if (htab->params->ovly_flavour == ovly_soft_icache)
   1808 	ovout = ".bss";
   1809       (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
   1810     }
   1811 
   1812   if (htab->toe != NULL)
   1813     (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
   1814 }
   1815 
   1816 /* Functions to handle embedded spu_ovl.o object.  */
   1817 
   1818 static void *
   1819 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
   1820 {
   1821   return stream;
   1822 }
   1823 
   1824 static file_ptr
   1825 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
   1826 	       void *stream,
   1827 	       void *buf,
   1828 	       file_ptr nbytes,
   1829 	       file_ptr offset)
   1830 {
   1831   struct _ovl_stream *os;
   1832   size_t count;
   1833   size_t max;
   1834 
   1835   os = (struct _ovl_stream *) stream;
   1836   max = (const char *) os->end - (const char *) os->start;
   1837 
   1838   if ((ufile_ptr) offset >= max)
   1839     return 0;
   1840 
   1841   count = nbytes;
   1842   if (count > max - offset)
   1843     count = max - offset;
   1844 
   1845   memcpy (buf, (const char *) os->start + offset, count);
   1846   return count;
   1847 }
   1848 
   1849 static int
   1850 ovl_mgr_stat (struct bfd *abfd ATTRIBUTE_UNUSED,
   1851 	      void *stream,
   1852 	      struct stat *sb)
   1853 {
   1854   struct _ovl_stream *os = (struct _ovl_stream *) stream;
   1855 
   1856   memset (sb, 0, sizeof (*sb));
   1857   sb->st_size = (const char *) os->end - (const char *) os->start;
   1858   return 0;
   1859 }
   1860 
   1861 bool
   1862 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
   1863 {
   1864   *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
   1865 			      "elf32-spu",
   1866 			      ovl_mgr_open,
   1867 			      (void *) stream,
   1868 			      ovl_mgr_pread,
   1869 			      NULL,
   1870 			      ovl_mgr_stat);
   1871   return *ovl_bfd != NULL;
   1872 }
   1873 
   1874 static unsigned int
   1875 overlay_index (asection *sec)
   1876 {
   1877   if (sec == NULL
   1878       || sec->output_section == bfd_abs_section_ptr)
   1879     return 0;
   1880   return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
   1881 }
   1882 
   1883 /* Define an STT_OBJECT symbol.  */
   1884 
   1885 static struct elf_link_hash_entry *
   1886 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
   1887 {
   1888   struct elf_link_hash_entry *h;
   1889 
   1890   h = elf_link_hash_lookup (&htab->elf, name, true, false, false);
   1891   if (h == NULL)
   1892     return NULL;
   1893 
   1894   if (h->root.type != bfd_link_hash_defined
   1895       || !h->def_regular)
   1896     {
   1897       h->root.type = bfd_link_hash_defined;
   1898       h->root.u.def.section = htab->ovtab;
   1899       h->type = STT_OBJECT;
   1900       h->ref_regular = 1;
   1901       h->def_regular = 1;
   1902       h->ref_regular_nonweak = 1;
   1903       h->non_elf = 0;
   1904     }
   1905   else if (h->root.u.def.section->owner != NULL)
   1906     {
   1907       /* xgettext:c-format */
   1908       _bfd_error_handler (_("%pB is not allowed to define %s"),
   1909 			  h->root.u.def.section->owner,
   1910 			  h->root.root.string);
   1911       bfd_set_error (bfd_error_bad_value);
   1912       return NULL;
   1913     }
   1914   else
   1915     {
   1916       _bfd_error_handler (_("you are not allowed to define %s in a script"),
   1917 			  h->root.root.string);
   1918       bfd_set_error (bfd_error_bad_value);
   1919       return NULL;
   1920     }
   1921 
   1922   return h;
   1923 }
   1924 
   1925 /* Fill in all stubs and the overlay tables.  */
   1926 
   1927 static bool
   1928 spu_elf_build_stubs (struct bfd_link_info *info)
   1929 {
   1930   struct spu_link_hash_table *htab = spu_hash_table (info);
   1931   struct elf_link_hash_entry *h;
   1932   bfd_byte *p;
   1933   asection *s;
   1934   bfd *obfd;
   1935   unsigned int i;
   1936 
   1937   if (htab->num_overlays != 0)
   1938     {
   1939       for (i = 0; i < 2; i++)
   1940 	{
   1941 	  h = htab->ovly_entry[i];
   1942 	  if (h != NULL
   1943 	      && (h->root.type == bfd_link_hash_defined
   1944 		  || h->root.type == bfd_link_hash_defweak)
   1945 	      && h->def_regular)
   1946 	    {
   1947 	      s = h->root.u.def.section->output_section;
   1948 	      if (spu_elf_section_data (s)->u.o.ovl_index)
   1949 		{
   1950 		  _bfd_error_handler (_("%s in overlay section"),
   1951 				      h->root.root.string);
   1952 		  bfd_set_error (bfd_error_bad_value);
   1953 		  return false;
   1954 		}
   1955 	    }
   1956 	}
   1957     }
   1958 
   1959   if (htab->stub_sec != NULL)
   1960     {
   1961       for (i = 0; i <= htab->num_overlays; i++)
   1962 	if (htab->stub_sec[i]->size != 0)
   1963 	  {
   1964 	    htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
   1965 						      htab->stub_sec[i]->size);
   1966 	    if (htab->stub_sec[i]->contents == NULL)
   1967 	      return false;
   1968 	    htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
   1969 	    htab->stub_sec[i]->size = 0;
   1970 	  }
   1971 
   1972       /* Fill in all the stubs.  */
   1973       process_stubs (info, true);
   1974       if (!htab->stub_err)
   1975 	elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
   1976 
   1977       if (htab->stub_err)
   1978 	{
   1979 	  _bfd_error_handler (_("overlay stub relocation overflow"));
   1980 	  bfd_set_error (bfd_error_bad_value);
   1981 	  return false;
   1982 	}
   1983 
   1984       for (i = 0; i <= htab->num_overlays; i++)
   1985 	{
   1986 	  if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
   1987 	    {
   1988 	      _bfd_error_handler  (_("stubs don't match calculated size"));
   1989 	      bfd_set_error (bfd_error_bad_value);
   1990 	      return false;
   1991 	    }
   1992 	  htab->stub_sec[i]->rawsize = 0;
   1993 	}
   1994     }
   1995 
   1996   if (htab->ovtab == NULL || htab->ovtab->size == 0)
   1997     return true;
   1998 
   1999   htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
   2000   if (htab->ovtab->contents == NULL)
   2001     return false;
   2002 
   2003   p = htab->ovtab->contents;
   2004   if (htab->params->ovly_flavour == ovly_soft_icache)
   2005     {
   2006       bfd_vma off;
   2007 
   2008       h = define_ovtab_symbol (htab, "__icache_tag_array");
   2009       if (h == NULL)
   2010 	return false;
   2011       h->root.u.def.value = 0;
   2012       h->size = 16 << htab->num_lines_log2;
   2013       off = h->size;
   2014 
   2015       h = define_ovtab_symbol (htab, "__icache_tag_array_size");
   2016       if (h == NULL)
   2017 	return false;
   2018       h->root.u.def.value = 16 << htab->num_lines_log2;
   2019       h->root.u.def.section = bfd_abs_section_ptr;
   2020 
   2021       h = define_ovtab_symbol (htab, "__icache_rewrite_to");
   2022       if (h == NULL)
   2023 	return false;
   2024       h->root.u.def.value = off;
   2025       h->size = 16 << htab->num_lines_log2;
   2026       off += h->size;
   2027 
   2028       h = define_ovtab_symbol (htab, "__icache_rewrite_to_size");
   2029       if (h == NULL)
   2030 	return false;
   2031       h->root.u.def.value = 16 << htab->num_lines_log2;
   2032       h->root.u.def.section = bfd_abs_section_ptr;
   2033 
   2034       h = define_ovtab_symbol (htab, "__icache_rewrite_from");
   2035       if (h == NULL)
   2036 	return false;
   2037       h->root.u.def.value = off;
   2038       h->size = 16 << (htab->fromelem_size_log2 + htab->num_lines_log2);
   2039       off += h->size;
   2040 
   2041       h = define_ovtab_symbol (htab, "__icache_rewrite_from_size");
   2042       if (h == NULL)
   2043 	return false;
   2044       h->root.u.def.value = 16 << (htab->fromelem_size_log2
   2045 				   + htab->num_lines_log2);
   2046       h->root.u.def.section = bfd_abs_section_ptr;
   2047 
   2048       h = define_ovtab_symbol (htab, "__icache_log2_fromelemsize");
   2049       if (h == NULL)
   2050 	return false;
   2051       h->root.u.def.value = htab->fromelem_size_log2;
   2052       h->root.u.def.section = bfd_abs_section_ptr;
   2053 
   2054       h = define_ovtab_symbol (htab, "__icache_base");
   2055       if (h == NULL)
   2056 	return false;
   2057       h->root.u.def.value = htab->ovl_sec[0]->vma;
   2058       h->root.u.def.section = bfd_abs_section_ptr;
   2059       h->size = htab->num_buf << htab->line_size_log2;
   2060 
   2061       h = define_ovtab_symbol (htab, "__icache_linesize");
   2062       if (h == NULL)
   2063 	return false;
   2064       h->root.u.def.value = 1 << htab->line_size_log2;
   2065       h->root.u.def.section = bfd_abs_section_ptr;
   2066 
   2067       h = define_ovtab_symbol (htab, "__icache_log2_linesize");
   2068       if (h == NULL)
   2069 	return false;
   2070       h->root.u.def.value = htab->line_size_log2;
   2071       h->root.u.def.section = bfd_abs_section_ptr;
   2072 
   2073       h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
   2074       if (h == NULL)
   2075 	return false;
   2076       h->root.u.def.value = -htab->line_size_log2;
   2077       h->root.u.def.section = bfd_abs_section_ptr;
   2078 
   2079       h = define_ovtab_symbol (htab, "__icache_cachesize");
   2080       if (h == NULL)
   2081 	return false;
   2082       h->root.u.def.value = 1 << (htab->num_lines_log2 + htab->line_size_log2);
   2083       h->root.u.def.section = bfd_abs_section_ptr;
   2084 
   2085       h = define_ovtab_symbol (htab, "__icache_log2_cachesize");
   2086       if (h == NULL)
   2087 	return false;
   2088       h->root.u.def.value = htab->num_lines_log2 + htab->line_size_log2;
   2089       h->root.u.def.section = bfd_abs_section_ptr;
   2090 
   2091       h = define_ovtab_symbol (htab, "__icache_neg_log2_cachesize");
   2092       if (h == NULL)
   2093 	return false;
   2094       h->root.u.def.value = -(htab->num_lines_log2 + htab->line_size_log2);
   2095       h->root.u.def.section = bfd_abs_section_ptr;
   2096 
   2097       if (htab->init != NULL && htab->init->size != 0)
   2098 	{
   2099 	  htab->init->contents = bfd_zalloc (htab->init->owner,
   2100 					     htab->init->size);
   2101 	  if (htab->init->contents == NULL)
   2102 	    return false;
   2103 
   2104 	  h = define_ovtab_symbol (htab, "__icache_fileoff");
   2105 	  if (h == NULL)
   2106 	    return false;
   2107 	  h->root.u.def.value = 0;
   2108 	  h->root.u.def.section = htab->init;
   2109 	  h->size = 8;
   2110 	}
   2111     }
   2112   else
   2113     {
   2114       /* Write out _ovly_table.  */
   2115       /* set low bit of .size to mark non-overlay area as present.  */
   2116       p[7] = 1;
   2117       obfd = htab->ovtab->output_section->owner;
   2118       for (s = obfd->sections; s != NULL; s = s->next)
   2119 	{
   2120 	  unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
   2121 
   2122 	  if (ovl_index != 0)
   2123 	    {
   2124 	      unsigned long off = ovl_index * 16;
   2125 	      unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
   2126 
   2127 	      bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
   2128 	      bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
   2129 			  p + off + 4);
   2130 	      /* file_off written later in spu_elf_modify_headers.  */
   2131 	      bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
   2132 	    }
   2133 	}
   2134 
   2135       h = define_ovtab_symbol (htab, "_ovly_table");
   2136       if (h == NULL)
   2137 	return false;
   2138       h->root.u.def.value = 16;
   2139       h->size = htab->num_overlays * 16;
   2140 
   2141       h = define_ovtab_symbol (htab, "_ovly_table_end");
   2142       if (h == NULL)
   2143 	return false;
   2144       h->root.u.def.value = htab->num_overlays * 16 + 16;
   2145       h->size = 0;
   2146 
   2147       h = define_ovtab_symbol (htab, "_ovly_buf_table");
   2148       if (h == NULL)
   2149 	return false;
   2150       h->root.u.def.value = htab->num_overlays * 16 + 16;
   2151       h->size = htab->num_buf * 4;
   2152 
   2153       h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
   2154       if (h == NULL)
   2155 	return false;
   2156       h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
   2157       h->size = 0;
   2158     }
   2159 
   2160   h = define_ovtab_symbol (htab, "_EAR_");
   2161   if (h == NULL)
   2162     return false;
   2163   h->root.u.def.section = htab->toe;
   2164   h->root.u.def.value = 0;
   2165   h->size = 16;
   2166 
   2167   return true;
   2168 }
   2169 
   2170 /* Check that all loadable section VMAs lie in the range
   2171    LO .. HI inclusive, and stash some parameters for --auto-overlay.  */
   2172 
   2173 asection *
   2174 spu_elf_check_vma (struct bfd_link_info *info)
   2175 {
   2176   struct elf_segment_map *m;
   2177   unsigned int i;
   2178   struct spu_link_hash_table *htab = spu_hash_table (info);
   2179   bfd *abfd = info->output_bfd;
   2180   bfd_vma hi = htab->params->local_store_hi;
   2181   bfd_vma lo = htab->params->local_store_lo;
   2182 
   2183   htab->local_store = hi + 1 - lo;
   2184 
   2185   for (m = elf_seg_map (abfd); m != NULL; m = m->next)
   2186     if (m->p_type == PT_LOAD)
   2187       for (i = 0; i < m->count; i++)
   2188 	if (m->sections[i]->size != 0
   2189 	    && (m->sections[i]->vma < lo
   2190 		|| m->sections[i]->vma > hi
   2191 		|| m->sections[i]->vma + m->sections[i]->size - 1 > hi))
   2192 	  return m->sections[i];
   2193 
   2194   return NULL;
   2195 }
   2196 
   2197 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
   2198    Search for stack adjusting insns, and return the sp delta.
   2199    If a store of lr is found save the instruction offset to *LR_STORE.
   2200    If a stack adjusting instruction is found, save that offset to
   2201    *SP_ADJUST.  */
   2202 
   2203 static int
   2204 find_function_stack_adjust (asection *sec,
   2205 			    bfd_vma offset,
   2206 			    bfd_vma *lr_store,
   2207 			    bfd_vma *sp_adjust)
   2208 {
   2209   int32_t reg[128];
   2210 
   2211   memset (reg, 0, sizeof (reg));
   2212   for ( ; offset + 4 <= sec->size; offset += 4)
   2213     {
   2214       unsigned char buf[4];
   2215       int rt, ra;
   2216       uint32_t imm;
   2217 
   2218       /* Assume no relocs on stack adjusing insns.  */
   2219       if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
   2220 	break;
   2221 
   2222       rt = buf[3] & 0x7f;
   2223       ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
   2224 
   2225       if (buf[0] == 0x24 /* stqd */)
   2226 	{
   2227 	  if (rt == 0 /* lr */ && ra == 1 /* sp */)
   2228 	    *lr_store = offset;
   2229 	  continue;
   2230 	}
   2231 
   2232       /* Partly decoded immediate field.  */
   2233       imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
   2234 
   2235       if (buf[0] == 0x1c /* ai */)
   2236 	{
   2237 	  imm >>= 7;
   2238 	  imm = (imm ^ 0x200) - 0x200;
   2239 	  reg[rt] = reg[ra] + imm;
   2240 
   2241 	  if (rt == 1 /* sp */)
   2242 	    {
   2243 	      if (reg[rt] > 0)
   2244 		break;
   2245 	      *sp_adjust = offset;
   2246 	      return reg[rt];
   2247 	    }
   2248 	}
   2249       else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
   2250 	{
   2251 	  int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
   2252 
   2253 	  reg[rt] = reg[ra] + reg[rb];
   2254 	  if (rt == 1)
   2255 	    {
   2256 	      if (reg[rt] > 0)
   2257 		break;
   2258 	      *sp_adjust = offset;
   2259 	      return reg[rt];
   2260 	    }
   2261 	}
   2262       else if (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */)
   2263 	{
   2264 	  int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
   2265 
   2266 	  reg[rt] = reg[rb] - reg[ra];
   2267 	  if (rt == 1)
   2268 	    {
   2269 	      if (reg[rt] > 0)
   2270 		break;
   2271 	      *sp_adjust = offset;
   2272 	      return reg[rt];
   2273 	    }
   2274 	}
   2275       else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
   2276 	{
   2277 	  if (buf[0] >= 0x42 /* ila */)
   2278 	    imm |= (buf[0] & 1) << 17;
   2279 	  else
   2280 	    {
   2281 	      imm &= 0xffff;
   2282 
   2283 	      if (buf[0] == 0x40 /* il */)
   2284 		{
   2285 		  if ((buf[1] & 0x80) == 0)
   2286 		    continue;
   2287 		  imm = (imm ^ 0x8000) - 0x8000;
   2288 		}
   2289 	      else if ((buf[1] & 0x80) == 0 /* ilhu */)
   2290 		imm <<= 16;
   2291 	    }
   2292 	  reg[rt] = imm;
   2293 	  continue;
   2294 	}
   2295       else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
   2296 	{
   2297 	  reg[rt] |= imm & 0xffff;
   2298 	  continue;
   2299 	}
   2300       else if (buf[0] == 0x04 /* ori */)
   2301 	{
   2302 	  imm >>= 7;
   2303 	  imm = (imm ^ 0x200) - 0x200;
   2304 	  reg[rt] = reg[ra] | imm;
   2305 	  continue;
   2306 	}
   2307       else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
   2308 	{
   2309 	  reg[rt] = (  ((imm & 0x8000) ? 0xff000000 : 0)
   2310 		     | ((imm & 0x4000) ? 0x00ff0000 : 0)
   2311 		     | ((imm & 0x2000) ? 0x0000ff00 : 0)
   2312 		     | ((imm & 0x1000) ? 0x000000ff : 0));
   2313 	  continue;
   2314 	}
   2315       else if (buf[0] == 0x16 /* andbi */)
   2316 	{
   2317 	  imm >>= 7;
   2318 	  imm &= 0xff;
   2319 	  imm |= imm << 8;
   2320 	  imm |= imm << 16;
   2321 	  reg[rt] = reg[ra] & imm;
   2322 	  continue;
   2323 	}
   2324       else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
   2325 	{
   2326 	  /* Used in pic reg load.  Say rt is trashed.  Won't be used
   2327 	     in stack adjust, but we need to continue past this branch.  */
   2328 	  reg[rt] = 0;
   2329 	  continue;
   2330 	}
   2331       else if (is_branch (buf) || is_indirect_branch (buf))
   2332 	/* If we hit a branch then we must be out of the prologue.  */
   2333 	break;
   2334     }
   2335 
   2336   return 0;
   2337 }
   2338 
   2339 /* qsort predicate to sort symbols by section and value.  */
   2340 
   2341 static Elf_Internal_Sym *sort_syms_syms;
   2342 static asection **sort_syms_psecs;
   2343 
   2344 static int
   2345 sort_syms (const void *a, const void *b)
   2346 {
   2347   Elf_Internal_Sym *const *s1 = a;
   2348   Elf_Internal_Sym *const *s2 = b;
   2349   asection *sec1,*sec2;
   2350   bfd_signed_vma delta;
   2351 
   2352   sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
   2353   sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
   2354 
   2355   if (sec1 != sec2)
   2356     return sec1->index - sec2->index;
   2357 
   2358   delta = (*s1)->st_value - (*s2)->st_value;
   2359   if (delta != 0)
   2360     return delta < 0 ? -1 : 1;
   2361 
   2362   delta = (*s2)->st_size - (*s1)->st_size;
   2363   if (delta != 0)
   2364     return delta < 0 ? -1 : 1;
   2365 
   2366   return *s1 < *s2 ? -1 : 1;
   2367 }
   2368 
   2369 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
   2370    entries for section SEC.  */
   2371 
   2372 static struct spu_elf_stack_info *
   2373 alloc_stack_info (asection *sec, int max_fun)
   2374 {
   2375   struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
   2376   bfd_size_type amt;
   2377 
   2378   amt = sizeof (struct spu_elf_stack_info);
   2379   amt += (max_fun - 1) * sizeof (struct function_info);
   2380   sec_data->u.i.stack_info = bfd_zmalloc (amt);
   2381   if (sec_data->u.i.stack_info != NULL)
   2382     sec_data->u.i.stack_info->max_fun = max_fun;
   2383   return sec_data->u.i.stack_info;
   2384 }
   2385 
   2386 /* Add a new struct function_info describing a (part of a) function
   2387    starting at SYM_H.  Keep the array sorted by address.  */
   2388 
   2389 static struct function_info *
   2390 maybe_insert_function (asection *sec,
   2391 		       void *sym_h,
   2392 		       bool global,
   2393 		       bool is_func)
   2394 {
   2395   struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
   2396   struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
   2397   int i;
   2398   bfd_vma off, size;
   2399 
   2400   if (sinfo == NULL)
   2401     {
   2402       sinfo = alloc_stack_info (sec, 20);
   2403       if (sinfo == NULL)
   2404 	return NULL;
   2405     }
   2406 
   2407   if (!global)
   2408     {
   2409       Elf_Internal_Sym *sym = sym_h;
   2410       off = sym->st_value;
   2411       size = sym->st_size;
   2412     }
   2413   else
   2414     {
   2415       struct elf_link_hash_entry *h = sym_h;
   2416       off = h->root.u.def.value;
   2417       size = h->size;
   2418     }
   2419 
   2420   for (i = sinfo->num_fun; --i >= 0; )
   2421     if (sinfo->fun[i].lo <= off)
   2422       break;
   2423 
   2424   if (i >= 0)
   2425     {
   2426       /* Don't add another entry for an alias, but do update some
   2427 	 info.  */
   2428       if (sinfo->fun[i].lo == off)
   2429 	{
   2430 	  /* Prefer globals over local syms.  */
   2431 	  if (global && !sinfo->fun[i].global)
   2432 	    {
   2433 	      sinfo->fun[i].global = true;
   2434 	      sinfo->fun[i].u.h = sym_h;
   2435 	    }
   2436 	  if (is_func)
   2437 	    sinfo->fun[i].is_func = true;
   2438 	  return &sinfo->fun[i];
   2439 	}
   2440       /* Ignore a zero-size symbol inside an existing function.  */
   2441       else if (sinfo->fun[i].hi > off && size == 0)
   2442 	return &sinfo->fun[i];
   2443     }
   2444 
   2445   if (sinfo->num_fun >= sinfo->max_fun)
   2446     {
   2447       bfd_size_type amt = sizeof (struct spu_elf_stack_info);
   2448       bfd_size_type old = amt;
   2449 
   2450       old += (sinfo->max_fun - 1) * sizeof (struct function_info);
   2451       sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
   2452       amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
   2453       sinfo = bfd_realloc (sinfo, amt);
   2454       if (sinfo == NULL)
   2455 	return NULL;
   2456       memset ((char *) sinfo + old, 0, amt - old);
   2457       sec_data->u.i.stack_info = sinfo;
   2458     }
   2459 
   2460   if (++i < sinfo->num_fun)
   2461     memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
   2462 	     (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
   2463   sinfo->fun[i].is_func = is_func;
   2464   sinfo->fun[i].global = global;
   2465   sinfo->fun[i].sec = sec;
   2466   if (global)
   2467     sinfo->fun[i].u.h = sym_h;
   2468   else
   2469     sinfo->fun[i].u.sym = sym_h;
   2470   sinfo->fun[i].lo = off;
   2471   sinfo->fun[i].hi = off + size;
   2472   sinfo->fun[i].lr_store = -1;
   2473   sinfo->fun[i].sp_adjust = -1;
   2474   sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
   2475 						     &sinfo->fun[i].lr_store,
   2476 						     &sinfo->fun[i].sp_adjust);
   2477   sinfo->num_fun += 1;
   2478   return &sinfo->fun[i];
   2479 }
   2480 
   2481 /* Return the name of FUN.  */
   2482 
   2483 static const char *
   2484 func_name (struct function_info *fun)
   2485 {
   2486   asection *sec;
   2487   bfd *ibfd;
   2488   Elf_Internal_Shdr *symtab_hdr;
   2489 
   2490   while (fun->start != NULL)
   2491     fun = fun->start;
   2492 
   2493   if (fun->global)
   2494     return fun->u.h->root.root.string;
   2495 
   2496   sec = fun->sec;
   2497   if (fun->u.sym->st_name == 0)
   2498     {
   2499       size_t len = strlen (sec->name);
   2500       char *name = bfd_malloc (len + 10);
   2501       if (name == NULL)
   2502 	return "(null)";
   2503       sprintf (name, "%s+%lx", sec->name,
   2504 	       (unsigned long) fun->u.sym->st_value & 0xffffffff);
   2505       return name;
   2506     }
   2507   ibfd = sec->owner;
   2508   symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
   2509   return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
   2510 }
   2511 
   2512 /* Read the instruction at OFF in SEC.  Return true iff the instruction
   2513    is a nop, lnop, or stop 0 (all zero insn).  */
   2514 
   2515 static bool
   2516 is_nop (asection *sec, bfd_vma off)
   2517 {
   2518   unsigned char insn[4];
   2519 
   2520   if (off + 4 > sec->size
   2521       || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
   2522     return false;
   2523   if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
   2524     return true;
   2525   if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
   2526     return true;
   2527   return false;
   2528 }
   2529 
   2530 /* Extend the range of FUN to cover nop padding up to LIMIT.
   2531    Return TRUE iff some instruction other than a NOP was found.  */
   2532 
   2533 static bool
   2534 insns_at_end (struct function_info *fun, bfd_vma limit)
   2535 {
   2536   bfd_vma off = (fun->hi + 3) & -4;
   2537 
   2538   while (off < limit && is_nop (fun->sec, off))
   2539     off += 4;
   2540   if (off < limit)
   2541     {
   2542       fun->hi = off;
   2543       return true;
   2544     }
   2545   fun->hi = limit;
   2546   return false;
   2547 }
   2548 
   2549 /* Check and fix overlapping function ranges.  Return TRUE iff there
   2550    are gaps in the current info we have about functions in SEC.  */
   2551 
   2552 static bool
   2553 check_function_ranges (asection *sec, struct bfd_link_info *info)
   2554 {
   2555   struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
   2556   struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
   2557   int i;
   2558   bool gaps = false;
   2559 
   2560   if (sinfo == NULL)
   2561     return false;
   2562 
   2563   for (i = 1; i < sinfo->num_fun; i++)
   2564     if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
   2565       {
   2566 	/* Fix overlapping symbols.  */
   2567 	const char *f1 = func_name (&sinfo->fun[i - 1]);
   2568 	const char *f2 = func_name (&sinfo->fun[i]);
   2569 
   2570 	/* xgettext:c-format */
   2571 	info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
   2572 	sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
   2573       }
   2574     else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
   2575       gaps = true;
   2576 
   2577   if (sinfo->num_fun == 0)
   2578     gaps = true;
   2579   else
   2580     {
   2581       if (sinfo->fun[0].lo != 0)
   2582 	gaps = true;
   2583       if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
   2584 	{
   2585 	  const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
   2586 
   2587 	  info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
   2588 	  sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
   2589 	}
   2590       else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
   2591 	gaps = true;
   2592     }
   2593   return gaps;
   2594 }
   2595 
   2596 /* Search current function info for a function that contains address
   2597    OFFSET in section SEC.  */
   2598 
   2599 static struct function_info *
   2600 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
   2601 {
   2602   struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
   2603   struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
   2604   int lo, hi, mid;
   2605 
   2606   lo = 0;
   2607   hi = sinfo->num_fun;
   2608   while (lo < hi)
   2609     {
   2610       mid = (lo + hi) / 2;
   2611       if (offset < sinfo->fun[mid].lo)
   2612 	hi = mid;
   2613       else if (offset >= sinfo->fun[mid].hi)
   2614 	lo = mid + 1;
   2615       else
   2616 	return &sinfo->fun[mid];
   2617     }
   2618   /* xgettext:c-format */
   2619   info->callbacks->einfo (_("%pA:0x%v not found in function table\n"),
   2620 			  sec, offset);
   2621   bfd_set_error (bfd_error_bad_value);
   2622   return NULL;
   2623 }
   2624 
   2625 /* Add CALLEE to CALLER call list if not already present.  Return TRUE
   2626    if CALLEE was new.  If this function return FALSE, CALLEE should
   2627    be freed.  */
   2628 
   2629 static bool
   2630 insert_callee (struct function_info *caller, struct call_info *callee)
   2631 {
   2632   struct call_info **pp, *p;
   2633 
   2634   for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
   2635     if (p->fun == callee->fun)
   2636       {
   2637 	/* Tail calls use less stack than normal calls.  Retain entry
   2638 	   for normal call over one for tail call.  */
   2639 	p->is_tail &= callee->is_tail;
   2640 	if (!p->is_tail)
   2641 	  {
   2642 	    p->fun->start = NULL;
   2643 	    p->fun->is_func = true;
   2644 	  }
   2645 	p->count += callee->count;
   2646 	/* Reorder list so most recent call is first.  */
   2647 	*pp = p->next;
   2648 	p->next = caller->call_list;
   2649 	caller->call_list = p;
   2650 	return false;
   2651       }
   2652   callee->next = caller->call_list;
   2653   caller->call_list = callee;
   2654   return true;
   2655 }
   2656 
   2657 /* Copy CALL and insert the copy into CALLER.  */
   2658 
   2659 static bool
   2660 copy_callee (struct function_info *caller, const struct call_info *call)
   2661 {
   2662   struct call_info *callee;
   2663   callee = bfd_malloc (sizeof (*callee));
   2664   if (callee == NULL)
   2665     return false;
   2666   *callee = *call;
   2667   if (!insert_callee (caller, callee))
   2668     free (callee);
   2669   return true;
   2670 }
   2671 
   2672 /* We're only interested in code sections.  Testing SEC_IN_MEMORY excludes
   2673    overlay stub sections.  */
   2674 
   2675 static bool
   2676 interesting_section (asection *s)
   2677 {
   2678   return (s->output_section != bfd_abs_section_ptr
   2679 	  && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
   2680 	      == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
   2681 	  && s->size != 0);
   2682 }
   2683 
   2684 /* Rummage through the relocs for SEC, looking for function calls.
   2685    If CALL_TREE is true, fill in call graph.  If CALL_TREE is false,
   2686    mark destination symbols on calls as being functions.  Also
   2687    look at branches, which may be tail calls or go to hot/cold
   2688    section part of same function.  */
   2689 
   2690 static bool
   2691 mark_functions_via_relocs (asection *sec,
   2692 			   struct bfd_link_info *info,
   2693 			   int call_tree)
   2694 {
   2695   Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
   2696   Elf_Internal_Shdr *symtab_hdr;
   2697   void *psyms;
   2698   unsigned int priority = 0;
   2699   static bool warned;
   2700 
   2701   if (!interesting_section (sec)
   2702       || sec->reloc_count == 0)
   2703     return true;
   2704 
   2705   internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
   2706 					       info->keep_memory);
   2707   if (internal_relocs == NULL)
   2708     return false;
   2709 
   2710   symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
   2711   psyms = &symtab_hdr->contents;
   2712   irela = internal_relocs;
   2713   irelaend = irela + sec->reloc_count;
   2714   for (; irela < irelaend; irela++)
   2715     {
   2716       enum elf_spu_reloc_type r_type;
   2717       unsigned int r_indx;
   2718       asection *sym_sec;
   2719       Elf_Internal_Sym *sym;
   2720       struct elf_link_hash_entry *h;
   2721       bfd_vma val;
   2722       bool nonbranch, is_call;
   2723       struct function_info *caller;
   2724       struct call_info *callee;
   2725 
   2726       r_type = ELF32_R_TYPE (irela->r_info);
   2727       nonbranch = r_type != R_SPU_REL16 && r_type != R_SPU_ADDR16;
   2728 
   2729       r_indx = ELF32_R_SYM (irela->r_info);
   2730       if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
   2731 	return false;
   2732 
   2733       if (sym_sec == NULL
   2734 	  || sym_sec->output_section == bfd_abs_section_ptr)
   2735 	continue;
   2736 
   2737       is_call = false;
   2738       if (!nonbranch)
   2739 	{
   2740 	  unsigned char insn[4];
   2741 
   2742 	  if (!bfd_get_section_contents (sec->owner, sec, insn,
   2743 					 irela->r_offset, 4))
   2744 	    return false;
   2745 	  if (is_branch (insn))
   2746 	    {
   2747 	      is_call = (insn[0] & 0xfd) == 0x31;
   2748 	      priority = insn[1] & 0x0f;
   2749 	      priority <<= 8;
   2750 	      priority |= insn[2];
   2751 	      priority <<= 8;
   2752 	      priority |= insn[3];
   2753 	      priority >>= 7;
   2754 	      if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
   2755 		  != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
   2756 		{
   2757 		  if (!warned)
   2758 		    info->callbacks->einfo
   2759 		      /* xgettext:c-format */
   2760 		      (_("%pB(%pA+0x%v): call to non-code section"
   2761 			 " %pB(%pA), analysis incomplete\n"),
   2762 		       sec->owner, sec, irela->r_offset,
   2763 		       sym_sec->owner, sym_sec);
   2764 		  warned = true;
   2765 		  continue;
   2766 		}
   2767 	    }
   2768 	  else
   2769 	    {
   2770 	      nonbranch = true;
   2771 	      if (is_hint (insn))
   2772 		continue;
   2773 	    }
   2774 	}
   2775 
   2776       if (nonbranch)
   2777 	{
   2778 	  /* For --auto-overlay, count possible stubs we need for
   2779 	     function pointer references.  */
   2780 	  unsigned int sym_type;
   2781 	  if (h)
   2782 	    sym_type = h->type;
   2783 	  else
   2784 	    sym_type = ELF_ST_TYPE (sym->st_info);
   2785 	  if (sym_type == STT_FUNC)
   2786 	    {
   2787 	      if (call_tree && spu_hash_table (info)->params->auto_overlay)
   2788 		spu_hash_table (info)->non_ovly_stub += 1;
   2789 	      /* If the symbol type is STT_FUNC then this must be a
   2790 		 function pointer initialisation.  */
   2791 	      continue;
   2792 	    }
   2793 	  /* Ignore data references.  */
   2794 	  if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
   2795 	      != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
   2796 	    continue;
   2797 	  /* Otherwise we probably have a jump table reloc for
   2798 	     a switch statement or some other reference to a
   2799 	     code label.  */
   2800 	}
   2801 
   2802       if (h)
   2803 	val = h->root.u.def.value;
   2804       else
   2805 	val = sym->st_value;
   2806       val += irela->r_addend;
   2807 
   2808       if (!call_tree)
   2809 	{
   2810 	  struct function_info *fun;
   2811 
   2812 	  if (irela->r_addend != 0)
   2813 	    {
   2814 	      Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
   2815 	      if (fake == NULL)
   2816 		return false;
   2817 	      fake->st_value = val;
   2818 	      fake->st_shndx
   2819 		= _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
   2820 	      sym = fake;
   2821 	    }
   2822 	  if (sym)
   2823 	    fun = maybe_insert_function (sym_sec, sym, false, is_call);
   2824 	  else
   2825 	    fun = maybe_insert_function (sym_sec, h, true, is_call);
   2826 	  if (fun == NULL)
   2827 	    return false;
   2828 	  if (irela->r_addend != 0
   2829 	      && fun->u.sym != sym)
   2830 	    free (sym);
   2831 	  continue;
   2832 	}
   2833 
   2834       caller = find_function (sec, irela->r_offset, info);
   2835       if (caller == NULL)
   2836 	return false;
   2837       callee = bfd_malloc (sizeof *callee);
   2838       if (callee == NULL)
   2839 	return false;
   2840 
   2841       callee->fun = find_function (sym_sec, val, info);
   2842       if (callee->fun == NULL)
   2843 	return false;
   2844       callee->is_tail = !is_call;
   2845       callee->is_pasted = false;
   2846       callee->broken_cycle = false;
   2847       callee->priority = priority;
   2848       callee->count = nonbranch? 0 : 1;
   2849       if (callee->fun->last_caller != sec)
   2850 	{
   2851 	  callee->fun->last_caller = sec;
   2852 	  callee->fun->call_count += 1;
   2853 	}
   2854       if (!insert_callee (caller, callee))
   2855 	free (callee);
   2856       else if (!is_call
   2857 	       && !callee->fun->is_func
   2858 	       && callee->fun->stack == 0)
   2859 	{
   2860 	  /* This is either a tail call or a branch from one part of
   2861 	     the function to another, ie. hot/cold section.  If the
   2862 	     destination has been called by some other function then
   2863 	     it is a separate function.  We also assume that functions
   2864 	     are not split across input files.  */
   2865 	  if (sec->owner != sym_sec->owner)
   2866 	    {
   2867 	      callee->fun->start = NULL;
   2868 	      callee->fun->is_func = true;
   2869 	    }
   2870 	  else if (callee->fun->start == NULL)
   2871 	    {
   2872 	      struct function_info *caller_start = caller;
   2873 	      while (caller_start->start)
   2874 		caller_start = caller_start->start;
   2875 
   2876 	      if (caller_start != callee->fun)
   2877 		callee->fun->start = caller_start;
   2878 	    }
   2879 	  else
   2880 	    {
   2881 	      struct function_info *callee_start;
   2882 	      struct function_info *caller_start;
   2883 	      callee_start = callee->fun;
   2884 	      while (callee_start->start)
   2885 		callee_start = callee_start->start;
   2886 	      caller_start = caller;
   2887 	      while (caller_start->start)
   2888 		caller_start = caller_start->start;
   2889 	      if (caller_start != callee_start)
   2890 		{
   2891 		  callee->fun->start = NULL;
   2892 		  callee->fun->is_func = true;
   2893 		}
   2894 	    }
   2895 	}
   2896     }
   2897 
   2898   return true;
   2899 }
   2900 
   2901 /* Handle something like .init or .fini, which has a piece of a function.
   2902    These sections are pasted together to form a single function.  */
   2903 
   2904 static bool
   2905 pasted_function (asection *sec)
   2906 {
   2907   struct bfd_link_order *l;
   2908   struct _spu_elf_section_data *sec_data;
   2909   struct spu_elf_stack_info *sinfo;
   2910   Elf_Internal_Sym *fake;
   2911   struct function_info *fun, *fun_start;
   2912 
   2913   fake = bfd_zmalloc (sizeof (*fake));
   2914   if (fake == NULL)
   2915     return false;
   2916   fake->st_value = 0;
   2917   fake->st_size = sec->size;
   2918   fake->st_shndx
   2919     = _bfd_elf_section_from_bfd_section (sec->owner, sec);
   2920   fun = maybe_insert_function (sec, fake, false, false);
   2921   if (!fun)
   2922     return false;
   2923 
   2924   /* Find a function immediately preceding this section.  */
   2925   fun_start = NULL;
   2926   for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
   2927     {
   2928       if (l->u.indirect.section == sec)
   2929 	{
   2930 	  if (fun_start != NULL)
   2931 	    {
   2932 	      struct call_info *callee = bfd_malloc (sizeof *callee);
   2933 	      if (callee == NULL)
   2934 		return false;
   2935 
   2936 	      fun->start = fun_start;
   2937 	      callee->fun = fun;
   2938 	      callee->is_tail = true;
   2939 	      callee->is_pasted = true;
   2940 	      callee->broken_cycle = false;
   2941 	      callee->priority = 0;
   2942 	      callee->count = 1;
   2943 	      if (!insert_callee (fun_start, callee))
   2944 		free (callee);
   2945 	      return true;
   2946 	    }
   2947 	  break;
   2948 	}
   2949       if (l->type == bfd_indirect_link_order
   2950 	  && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
   2951 	  && (sinfo = sec_data->u.i.stack_info) != NULL
   2952 	  && sinfo->num_fun != 0)
   2953 	fun_start = &sinfo->fun[sinfo->num_fun - 1];
   2954     }
   2955 
   2956   /* Don't return an error if we did not find a function preceding this
   2957      section.  The section may have incorrect flags.  */
   2958   return true;
   2959 }
   2960 
   2961 /* Map address ranges in code sections to functions.  */
   2962 
   2963 static bool
   2964 discover_functions (struct bfd_link_info *info)
   2965 {
   2966   bfd *ibfd;
   2967   int bfd_idx;
   2968   Elf_Internal_Sym ***psym_arr;
   2969   asection ***sec_arr;
   2970   bool gaps = false;
   2971 
   2972   bfd_idx = 0;
   2973   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
   2974     bfd_idx++;
   2975 
   2976   psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
   2977   if (psym_arr == NULL)
   2978     return false;
   2979   sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
   2980   if (sec_arr == NULL)
   2981     return false;
   2982 
   2983   for (ibfd = info->input_bfds, bfd_idx = 0;
   2984        ibfd != NULL;
   2985        ibfd = ibfd->link.next, bfd_idx++)
   2986     {
   2987       extern const bfd_target spu_elf32_vec;
   2988       Elf_Internal_Shdr *symtab_hdr;
   2989       asection *sec;
   2990       size_t symcount;
   2991       Elf_Internal_Sym *syms, *sy, **psyms, **psy;
   2992       asection **psecs, **p;
   2993 
   2994       if (ibfd->xvec != &spu_elf32_vec)
   2995 	continue;
   2996 
   2997       /* Read all the symbols.  */
   2998       symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
   2999       symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
   3000       if (symcount == 0)
   3001 	{
   3002 	  if (!gaps)
   3003 	    for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
   3004 	      if (interesting_section (sec))
   3005 		{
   3006 		  gaps = true;
   3007 		  break;
   3008 		}
   3009 	  continue;
   3010 	}
   3011 
   3012       /* Don't use cached symbols since the generic ELF linker
   3013 	 code only reads local symbols, and we need globals too.  */
   3014       free (symtab_hdr->contents);
   3015       symtab_hdr->contents = NULL;
   3016       syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
   3017 				   NULL, NULL, NULL);
   3018       symtab_hdr->contents = (void *) syms;
   3019       if (syms == NULL)
   3020 	return false;
   3021 
   3022       /* Select defined function symbols that are going to be output.  */
   3023       psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
   3024       if (psyms == NULL)
   3025 	return false;
   3026       psym_arr[bfd_idx] = psyms;
   3027       psecs = bfd_malloc (symcount * sizeof (*psecs));
   3028       if (psecs == NULL)
   3029 	return false;
   3030       sec_arr[bfd_idx] = psecs;
   3031       for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
   3032 	if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
   3033 	    || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
   3034 	  {
   3035 	    asection *s;
   3036 
   3037 	    *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
   3038 	    if (s != NULL && interesting_section (s))
   3039 	      *psy++ = sy;
   3040 	  }
   3041       symcount = psy - psyms;
   3042       *psy = NULL;
   3043 
   3044       /* Sort them by section and offset within section.  */
   3045       sort_syms_syms = syms;
   3046       sort_syms_psecs = psecs;
   3047       qsort (psyms, symcount, sizeof (*psyms), sort_syms);
   3048 
   3049       /* Now inspect the function symbols.  */
   3050       for (psy = psyms; psy < psyms + symcount; )
   3051 	{
   3052 	  asection *s = psecs[*psy - syms];
   3053 	  Elf_Internal_Sym **psy2;
   3054 
   3055 	  for (psy2 = psy; ++psy2 < psyms + symcount; )
   3056 	    if (psecs[*psy2 - syms] != s)
   3057 	      break;
   3058 
   3059 	  if (!alloc_stack_info (s, psy2 - psy))
   3060 	    return false;
   3061 	  psy = psy2;
   3062 	}
   3063 
   3064       /* First install info about properly typed and sized functions.
   3065 	 In an ideal world this will cover all code sections, except
   3066 	 when partitioning functions into hot and cold sections,
   3067 	 and the horrible pasted together .init and .fini functions.  */
   3068       for (psy = psyms; psy < psyms + symcount; ++psy)
   3069 	{
   3070 	  sy = *psy;
   3071 	  if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
   3072 	    {
   3073 	      asection *s = psecs[sy - syms];
   3074 	      if (!maybe_insert_function (s, sy, false, true))
   3075 		return false;
   3076 	    }
   3077 	}
   3078 
   3079       for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
   3080 	if (interesting_section (sec))
   3081 	  gaps |= check_function_ranges (sec, info);
   3082     }
   3083 
   3084   if (gaps)
   3085     {
   3086       /* See if we can discover more function symbols by looking at
   3087 	 relocations.  */
   3088       for (ibfd = info->input_bfds, bfd_idx = 0;
   3089 	   ibfd != NULL;
   3090 	   ibfd = ibfd->link.next, bfd_idx++)
   3091 	{
   3092 	  asection *sec;
   3093 
   3094 	  if (psym_arr[bfd_idx] == NULL)
   3095 	    continue;
   3096 
   3097 	  for (sec = ibfd->sections; sec != NULL; sec = sec->next)
   3098 	    if (!mark_functions_via_relocs (sec, info, false))
   3099 	      return false;
   3100 	}
   3101 
   3102       for (ibfd = info->input_bfds, bfd_idx = 0;
   3103 	   ibfd != NULL;
   3104 	   ibfd = ibfd->link.next, bfd_idx++)
   3105 	{
   3106 	  Elf_Internal_Shdr *symtab_hdr;
   3107 	  asection *sec;
   3108 	  Elf_Internal_Sym *syms, *sy, **psyms, **psy;
   3109 	  asection **psecs;
   3110 
   3111 	  if ((psyms = psym_arr[bfd_idx]) == NULL)
   3112 	    continue;
   3113 
   3114 	  psecs = sec_arr[bfd_idx];
   3115 
   3116 	  symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
   3117 	  syms = (Elf_Internal_Sym *) symtab_hdr->contents;
   3118 
   3119 	  gaps = false;
   3120 	  for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
   3121 	    if (interesting_section (sec))
   3122 	      gaps |= check_function_ranges (sec, info);
   3123 	  if (!gaps)
   3124 	    continue;
   3125 
   3126 	  /* Finally, install all globals.  */
   3127 	  for (psy = psyms; (sy = *psy) != NULL; ++psy)
   3128 	    {
   3129 	      asection *s;
   3130 
   3131 	      s = psecs[sy - syms];
   3132 
   3133 	      /* Global syms might be improperly typed functions.  */
   3134 	      if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
   3135 		  && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
   3136 		{
   3137 		  if (!maybe_insert_function (s, sy, false, false))
   3138 		    return false;
   3139 		}
   3140 	    }
   3141 	}
   3142 
   3143       for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
   3144 	{
   3145 	  extern const bfd_target spu_elf32_vec;
   3146 	  asection *sec;
   3147 
   3148 	  if (ibfd->xvec != &spu_elf32_vec)
   3149 	    continue;
   3150 
   3151 	  /* Some of the symbols we've installed as marking the
   3152 	     beginning of functions may have a size of zero.  Extend
   3153 	     the range of such functions to the beginning of the
   3154 	     next symbol of interest.  */
   3155 	  for (sec = ibfd->sections; sec != NULL; sec = sec->next)
   3156 	    if (interesting_section (sec))
   3157 	      {
   3158 		struct _spu_elf_section_data *sec_data;
   3159 		struct spu_elf_stack_info *sinfo;
   3160 
   3161 		sec_data = spu_elf_section_data (sec);
   3162 		sinfo = sec_data->u.i.stack_info;
   3163 		if (sinfo != NULL && sinfo->num_fun != 0)
   3164 		  {
   3165 		    int fun_idx;
   3166 		    bfd_vma hi = sec->size;
   3167 
   3168 		    for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
   3169 		      {
   3170 			sinfo->fun[fun_idx].hi = hi;
   3171 			hi = sinfo->fun[fun_idx].lo;
   3172 		      }
   3173 
   3174 		    sinfo->fun[0].lo = 0;
   3175 		  }
   3176 		/* No symbols in this section.  Must be .init or .fini
   3177 		   or something similar.  */
   3178 		else if (!pasted_function (sec))
   3179 		  return false;
   3180 	      }
   3181 	}
   3182     }
   3183 
   3184   for (ibfd = info->input_bfds, bfd_idx = 0;
   3185        ibfd != NULL;
   3186        ibfd = ibfd->link.next, bfd_idx++)
   3187     {
   3188       if (psym_arr[bfd_idx] == NULL)
   3189 	continue;
   3190 
   3191       free (psym_arr[bfd_idx]);
   3192       free (sec_arr[bfd_idx]);
   3193     }
   3194 
   3195   free (psym_arr);
   3196   free (sec_arr);
   3197 
   3198   return true;
   3199 }
   3200 
   3201 /* Iterate over all function_info we have collected, calling DOIT on
   3202    each node if ROOT_ONLY is false.  Only call DOIT on root nodes
   3203    if ROOT_ONLY.  */
   3204 
   3205 static bool
   3206 for_each_node (bool (*doit) (struct function_info *,
   3207 			     struct bfd_link_info *,
   3208 			     void *),
   3209 	       struct bfd_link_info *info,
   3210 	       void *param,
   3211 	       int root_only)
   3212 {
   3213   bfd *ibfd;
   3214 
   3215   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
   3216     {
   3217       extern const bfd_target spu_elf32_vec;
   3218       asection *sec;
   3219 
   3220       if (ibfd->xvec != &spu_elf32_vec)
   3221 	continue;
   3222 
   3223       for (sec = ibfd->sections; sec != NULL; sec = sec->next)
   3224 	{
   3225 	  struct _spu_elf_section_data *sec_data;
   3226 	  struct spu_elf_stack_info *sinfo;
   3227 
   3228 	  if ((sec_data = spu_elf_section_data (sec)) != NULL
   3229 	      && (sinfo = sec_data->u.i.stack_info) != NULL)
   3230 	    {
   3231 	      int i;
   3232 	      for (i = 0; i < sinfo->num_fun; ++i)
   3233 		if (!root_only || !sinfo->fun[i].non_root)
   3234 		  if (!doit (&sinfo->fun[i], info, param))
   3235 		    return false;
   3236 	    }
   3237 	}
   3238     }
   3239   return true;
   3240 }
   3241 
   3242 /* Transfer call info attached to struct function_info entries for
   3243    all of a given function's sections to the first entry.  */
   3244 
   3245 static bool
   3246 transfer_calls (struct function_info *fun,
   3247 		struct bfd_link_info *info ATTRIBUTE_UNUSED,
   3248 		void *param ATTRIBUTE_UNUSED)
   3249 {
   3250   struct function_info *start = fun->start;
   3251 
   3252   if (start != NULL)
   3253     {
   3254       struct call_info *call, *call_next;
   3255 
   3256       while (start->start != NULL)
   3257 	start = start->start;
   3258       for (call = fun->call_list; call != NULL; call = call_next)
   3259 	{
   3260 	  call_next = call->next;
   3261 	  if (!insert_callee (start, call))
   3262 	    free (call);
   3263 	}
   3264       fun->call_list = NULL;
   3265     }
   3266   return true;
   3267 }
   3268 
   3269 /* Mark nodes in the call graph that are called by some other node.  */
   3270 
   3271 static bool
   3272 mark_non_root (struct function_info *fun,
   3273 	       struct bfd_link_info *info ATTRIBUTE_UNUSED,
   3274 	       void *param ATTRIBUTE_UNUSED)
   3275 {
   3276   struct call_info *call;
   3277 
   3278   if (fun->visit1)
   3279     return true;
   3280   fun->visit1 = true;
   3281   for (call = fun->call_list; call; call = call->next)
   3282     {
   3283       call->fun->non_root = true;
   3284       mark_non_root (call->fun, 0, 0);
   3285     }
   3286   return true;
   3287 }
   3288 
   3289 /* Remove cycles from the call graph.  Set depth of nodes.  */
   3290 
   3291 static bool
   3292 remove_cycles (struct function_info *fun,
   3293 	       struct bfd_link_info *info,
   3294 	       void *param)
   3295 {
   3296   struct call_info **callp, *call;
   3297   unsigned int depth = *(unsigned int *) param;
   3298   unsigned int max_depth = depth;
   3299 
   3300   fun->depth = depth;
   3301   fun->visit2 = true;
   3302   fun->marking = true;
   3303 
   3304   callp = &fun->call_list;
   3305   while ((call = *callp) != NULL)
   3306     {
   3307       call->max_depth = depth + !call->is_pasted;
   3308       if (!call->fun->visit2)
   3309 	{
   3310 	  if (!remove_cycles (call->fun, info, &call->max_depth))
   3311 	    return false;
   3312 	  if (max_depth < call->max_depth)
   3313 	    max_depth = call->max_depth;
   3314 	}
   3315       else if (call->fun->marking)
   3316 	{
   3317 	  struct spu_link_hash_table *htab = spu_hash_table (info);
   3318 
   3319 	  if (!htab->params->auto_overlay
   3320 	      && htab->params->stack_analysis)
   3321 	    {
   3322 	      const char *f1 = func_name (fun);
   3323 	      const char *f2 = func_name (call->fun);
   3324 
   3325 	      /* xgettext:c-format */
   3326 	      info->callbacks->info (_("stack analysis will ignore the call "
   3327 				       "from %s to %s\n"),
   3328 				     f1, f2);
   3329 	    }
   3330 
   3331 	  call->broken_cycle = true;
   3332 	}
   3333       callp = &call->next;
   3334     }
   3335   fun->marking = false;
   3336   *(unsigned int *) param = max_depth;
   3337   return true;
   3338 }
   3339 
   3340 /* Check that we actually visited all nodes in remove_cycles.  If we
   3341    didn't, then there is some cycle in the call graph not attached to
   3342    any root node.  Arbitrarily choose a node in the cycle as a new
   3343    root and break the cycle.  */
   3344 
   3345 static bool
   3346 mark_detached_root (struct function_info *fun,
   3347 		    struct bfd_link_info *info,
   3348 		    void *param)
   3349 {
   3350   if (fun->visit2)
   3351     return true;
   3352   fun->non_root = false;
   3353   *(unsigned int *) param = 0;
   3354   return remove_cycles (fun, info, param);
   3355 }
   3356 
   3357 /* Populate call_list for each function.  */
   3358 
   3359 static bool
   3360 build_call_tree (struct bfd_link_info *info)
   3361 {
   3362   bfd *ibfd;
   3363   unsigned int depth;
   3364 
   3365   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
   3366     {
   3367       extern const bfd_target spu_elf32_vec;
   3368       asection *sec;
   3369 
   3370       if (ibfd->xvec != &spu_elf32_vec)
   3371 	continue;
   3372 
   3373       for (sec = ibfd->sections; sec != NULL; sec = sec->next)
   3374 	if (!mark_functions_via_relocs (sec, info, true))
   3375 	  return false;
   3376     }
   3377 
   3378   /* Transfer call info from hot/cold section part of function
   3379      to main entry.  */
   3380   if (!spu_hash_table (info)->params->auto_overlay
   3381       && !for_each_node (transfer_calls, info, 0, false))
   3382     return false;
   3383 
   3384   /* Find the call graph root(s).  */
   3385   if (!for_each_node (mark_non_root, info, 0, false))
   3386     return false;
   3387 
   3388   /* Remove cycles from the call graph.  We start from the root node(s)
   3389      so that we break cycles in a reasonable place.  */
   3390   depth = 0;
   3391   if (!for_each_node (remove_cycles, info, &depth, true))
   3392     return false;
   3393 
   3394   return for_each_node (mark_detached_root, info, &depth, false);
   3395 }
   3396 
   3397 /* qsort predicate to sort calls by priority, max_depth then count.  */
   3398 
   3399 static int
   3400 sort_calls (const void *a, const void *b)
   3401 {
   3402   struct call_info *const *c1 = a;
   3403   struct call_info *const *c2 = b;
   3404   int delta;
   3405 
   3406   delta = (*c2)->priority - (*c1)->priority;
   3407   if (delta != 0)
   3408     return delta;
   3409 
   3410   delta = (*c2)->max_depth - (*c1)->max_depth;
   3411   if (delta != 0)
   3412     return delta;
   3413 
   3414   delta = (*c2)->count - (*c1)->count;
   3415   if (delta != 0)
   3416     return delta;
   3417 
   3418   return (char *) c1 - (char *) c2;
   3419 }
   3420 
   3421 struct _mos_param {
   3422   unsigned int max_overlay_size;
   3423 };
   3424 
   3425 /* Set linker_mark and gc_mark on any sections that we will put in
   3426    overlays.  These flags are used by the generic ELF linker, but we
   3427    won't be continuing on to bfd_elf_final_link so it is OK to use
   3428    them.  linker_mark is clear before we get here.  Set segment_mark
   3429    on sections that are part of a pasted function (excluding the last
   3430    section).
   3431 
   3432    Set up function rodata section if --overlay-rodata.  We don't
   3433    currently include merged string constant rodata sections since
   3434 
   3435    Sort the call graph so that the deepest nodes will be visited
   3436    first.  */
   3437 
   3438 static bool
   3439 mark_overlay_section (struct function_info *fun,
   3440 		      struct bfd_link_info *info,
   3441 		      void *param)
   3442 {
   3443   struct call_info *call;
   3444   unsigned int count;
   3445   struct _mos_param *mos_param = param;
   3446   struct spu_link_hash_table *htab = spu_hash_table (info);
   3447 
   3448   if (fun->visit4)
   3449     return true;
   3450 
   3451   fun->visit4 = true;
   3452   if (!fun->sec->linker_mark
   3453       && (htab->params->ovly_flavour != ovly_soft_icache
   3454 	  || htab->params->non_ia_text
   3455 	  || startswith (fun->sec->name, ".text.ia.")
   3456 	  || strcmp (fun->sec->name, ".init") == 0
   3457 	  || strcmp (fun->sec->name, ".fini") == 0))
   3458     {
   3459       unsigned int size;
   3460 
   3461       fun->sec->linker_mark = 1;
   3462       fun->sec->gc_mark = 1;
   3463       fun->sec->segment_mark = 0;
   3464       /* Ensure SEC_CODE is set on this text section (it ought to
   3465 	 be!), and SEC_CODE is clear on rodata sections.  We use
   3466 	 this flag to differentiate the two overlay section types.  */
   3467       fun->sec->flags |= SEC_CODE;
   3468 
   3469       size = fun->sec->size;
   3470       if (htab->params->auto_overlay & OVERLAY_RODATA)
   3471 	{
   3472 	  char *name = NULL;
   3473 
   3474 	  /* Find the rodata section corresponding to this function's
   3475 	     text section.  */
   3476 	  if (strcmp (fun->sec->name, ".text") == 0)
   3477 	    {
   3478 	      name = bfd_malloc (sizeof (".rodata"));
   3479 	      if (name == NULL)
   3480 		return false;
   3481 	      memcpy (name, ".rodata", sizeof (".rodata"));
   3482 	    }
   3483 	  else if (startswith (fun->sec->name, ".text."))
   3484 	    {
   3485 	      size_t len = strlen (fun->sec->name);
   3486 	      name = bfd_malloc (len + 3);
   3487 	      if (name == NULL)
   3488 		return false;
   3489 	      memcpy (name, ".rodata", sizeof (".rodata"));
   3490 	      memcpy (name + 7, fun->sec->name + 5, len - 4);
   3491 	    }
   3492 	  else if (startswith (fun->sec->name, ".gnu.linkonce.t."))
   3493 	    {
   3494 	      size_t len = strlen (fun->sec->name) + 1;
   3495 	      name = bfd_malloc (len);
   3496 	      if (name == NULL)
   3497 		return false;
   3498 	      memcpy (name, fun->sec->name, len);
   3499 	      name[14] = 'r';
   3500 	    }
   3501 
   3502 	  if (name != NULL)
   3503 	    {
   3504 	      asection *rodata = NULL;
   3505 	      asection *group_sec = elf_section_data (fun->sec)->next_in_group;
   3506 	      if (group_sec == NULL)
   3507 		rodata = bfd_get_section_by_name (fun->sec->owner, name);
   3508 	      else
   3509 		while (group_sec != NULL && group_sec != fun->sec)
   3510 		  {
   3511 		    if (strcmp (group_sec->name, name) == 0)
   3512 		      {
   3513 			rodata = group_sec;
   3514 			break;
   3515 		      }
   3516 		    group_sec = elf_section_data (group_sec)->next_in_group;
   3517 		  }
   3518 	      fun->rodata = rodata;
   3519 	      if (fun->rodata)
   3520 		{
   3521 		  size += fun->rodata->size;
   3522 		  if (htab->params->line_size != 0
   3523 		      && size > htab->params->line_size)
   3524 		    {
   3525 		      size -= fun->rodata->size;
   3526 		      fun->rodata = NULL;
   3527 		    }
   3528 		  else
   3529 		    {
   3530 		      fun->rodata->linker_mark = 1;
   3531 		      fun->rodata->gc_mark = 1;
   3532 		      fun->rodata->flags &= ~SEC_CODE;
   3533 		    }
   3534 		}
   3535 	      free (name);
   3536 	    }
   3537 	}
   3538       if (mos_param->max_overlay_size < size)
   3539 	mos_param->max_overlay_size = size;
   3540     }
   3541 
   3542   for (count = 0, call = fun->call_list; call != NULL; call = call->next)
   3543     count += 1;
   3544 
   3545   if (count > 1)
   3546     {
   3547       struct call_info **calls = bfd_malloc (count * sizeof (*calls));
   3548       if (calls == NULL)
   3549 	return false;
   3550 
   3551       for (count = 0, call = fun->call_list; call != NULL; call = call->next)
   3552 	calls[count++] = call;
   3553 
   3554       qsort (calls, count, sizeof (*calls), sort_calls);
   3555 
   3556       fun->call_list = NULL;
   3557       while (count != 0)
   3558 	{
   3559 	  --count;
   3560 	  calls[count]->next = fun->call_list;
   3561 	  fun->call_list = calls[count];
   3562 	}
   3563       free (calls);
   3564     }
   3565 
   3566   for (call = fun->call_list; call != NULL; call = call->next)
   3567     {
   3568       if (call->is_pasted)
   3569 	{
   3570 	  /* There can only be one is_pasted call per function_info.  */
   3571 	  BFD_ASSERT (!fun->sec->segment_mark);
   3572 	  fun->sec->segment_mark = 1;
   3573 	}
   3574       if (!call->broken_cycle
   3575 	  && !mark_overlay_section (call->fun, info, param))
   3576 	return false;
   3577     }
   3578 
   3579   /* Don't put entry code into an overlay.  The overlay manager needs
   3580      a stack!  Also, don't mark .ovl.init as an overlay.  */
   3581   if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
   3582       == info->output_bfd->start_address
   3583       || startswith (fun->sec->output_section->name, ".ovl.init"))
   3584     {
   3585       fun->sec->linker_mark = 0;
   3586       if (fun->rodata != NULL)
   3587 	fun->rodata->linker_mark = 0;
   3588     }
   3589   return true;
   3590 }
   3591 
   3592 /* If non-zero then unmark functions called from those within sections
   3593    that we need to unmark.  Unfortunately this isn't reliable since the
   3594    call graph cannot know the destination of function pointer calls.  */
   3595 #define RECURSE_UNMARK 0
   3596 
   3597 struct _uos_param {
   3598   asection *exclude_input_section;
   3599   asection *exclude_output_section;
   3600   unsigned long clearing;
   3601 };
   3602 
   3603 /* Undo some of mark_overlay_section's work.  */
   3604 
   3605 static bool
   3606 unmark_overlay_section (struct function_info *fun,
   3607 			struct bfd_link_info *info,
   3608 			void *param)
   3609 {
   3610   struct call_info *call;
   3611   struct _uos_param *uos_param = param;
   3612   unsigned int excluded = 0;
   3613 
   3614   if (fun->visit5)
   3615     return true;
   3616 
   3617   fun->visit5 = true;
   3618 
   3619   excluded = 0;
   3620   if (fun->sec == uos_param->exclude_input_section
   3621       || fun->sec->output_section == uos_param->exclude_output_section)
   3622     excluded = 1;
   3623 
   3624   if (RECURSE_UNMARK)
   3625     uos_param->clearing += excluded;
   3626 
   3627   if (RECURSE_UNMARK ? uos_param->clearing : excluded)
   3628     {
   3629       fun->sec->linker_mark = 0;
   3630       if (fun->rodata)
   3631 	fun->rodata->linker_mark = 0;
   3632     }
   3633 
   3634   for (call = fun->call_list; call != NULL; call = call->next)
   3635     if (!call->broken_cycle
   3636 	&& !unmark_overlay_section (call->fun, info, param))
   3637       return false;
   3638 
   3639   if (RECURSE_UNMARK)
   3640     uos_param->clearing -= excluded;
   3641   return true;
   3642 }
   3643 
   3644 struct _cl_param {
   3645   unsigned int lib_size;
   3646   asection **lib_sections;
   3647 };
   3648 
   3649 /* Add sections we have marked as belonging to overlays to an array
   3650    for consideration as non-overlay sections.  The array consist of
   3651    pairs of sections, (text,rodata), for functions in the call graph.  */
   3652 
   3653 static bool
   3654 collect_lib_sections (struct function_info *fun,
   3655 		      struct bfd_link_info *info,
   3656 		      void *param)
   3657 {
   3658   struct _cl_param *lib_param = param;
   3659   struct call_info *call;
   3660   unsigned int size;
   3661 
   3662   if (fun->visit6)
   3663     return true;
   3664 
   3665   fun->visit6 = true;
   3666   if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
   3667     return true;
   3668 
   3669   size = fun->sec->size;
   3670   if (fun->rodata)
   3671     size += fun->rodata->size;
   3672 
   3673   if (size <= lib_param->lib_size)
   3674     {
   3675       *lib_param->lib_sections++ = fun->sec;
   3676       fun->sec->gc_mark = 0;
   3677       if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
   3678 	{
   3679 	  *lib_param->lib_sections++ = fun->rodata;
   3680 	  fun->rodata->gc_mark = 0;
   3681 	}
   3682       else
   3683 	*lib_param->lib_sections++ = NULL;
   3684     }
   3685 
   3686   for (call = fun->call_list; call != NULL; call = call->next)
   3687     if (!call->broken_cycle)
   3688       collect_lib_sections (call->fun, info, param);
   3689 
   3690   return true;
   3691 }
   3692 
   3693 /* qsort predicate to sort sections by call count.  */
   3694 
   3695 static int
   3696 sort_lib (const void *a, const void *b)
   3697 {
   3698   asection *const *s1 = a;
   3699   asection *const *s2 = b;
   3700   struct _spu_elf_section_data *sec_data;
   3701   struct spu_elf_stack_info *sinfo;
   3702   int delta;
   3703 
   3704   delta = 0;
   3705   if ((sec_data = spu_elf_section_data (*s1)) != NULL
   3706       && (sinfo = sec_data->u.i.stack_info) != NULL)
   3707     {
   3708       int i;
   3709       for (i = 0; i < sinfo->num_fun; ++i)
   3710 	delta -= sinfo->fun[i].call_count;
   3711     }
   3712 
   3713   if ((sec_data = spu_elf_section_data (*s2)) != NULL
   3714       && (sinfo = sec_data->u.i.stack_info) != NULL)
   3715     {
   3716       int i;
   3717       for (i = 0; i < sinfo->num_fun; ++i)
   3718 	delta += sinfo->fun[i].call_count;
   3719     }
   3720 
   3721   if (delta != 0)
   3722     return delta;
   3723 
   3724   return s1 - s2;
   3725 }
   3726 
   3727 /* Remove some sections from those marked to be in overlays.  Choose
   3728    those that are called from many places, likely library functions.  */
   3729 
   3730 static unsigned int
   3731 auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
   3732 {
   3733   bfd *ibfd;
   3734   asection **lib_sections;
   3735   unsigned int i, lib_count;
   3736   struct _cl_param collect_lib_param;
   3737   struct function_info dummy_caller;
   3738   struct spu_link_hash_table *htab;
   3739 
   3740   memset (&dummy_caller, 0, sizeof (dummy_caller));
   3741   lib_count = 0;
   3742   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
   3743     {
   3744       extern const bfd_target spu_elf32_vec;
   3745       asection *sec;
   3746 
   3747       if (ibfd->xvec != &spu_elf32_vec)
   3748 	continue;
   3749 
   3750       for (sec = ibfd->sections; sec != NULL; sec = sec->next)
   3751 	if (sec->linker_mark
   3752 	    && sec->size < lib_size
   3753 	    && (sec->flags & SEC_CODE) != 0)
   3754 	  lib_count += 1;
   3755     }
   3756   lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
   3757   if (lib_sections == NULL)
   3758     return (unsigned int) -1;
   3759   collect_lib_param.lib_size = lib_size;
   3760   collect_lib_param.lib_sections = lib_sections;
   3761   if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
   3762 		      true))
   3763     return (unsigned int) -1;
   3764   lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
   3765 
   3766   /* Sort sections so that those with the most calls are first.  */
   3767   if (lib_count > 1)
   3768     qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
   3769 
   3770   htab = spu_hash_table (info);
   3771   for (i = 0; i < lib_count; i++)
   3772     {
   3773       unsigned int tmp, stub_size;
   3774       asection *sec;
   3775       struct _spu_elf_section_data *sec_data;
   3776       struct spu_elf_stack_info *sinfo;
   3777 
   3778       sec = lib_sections[2 * i];
   3779       /* If this section is OK, its size must be less than lib_size.  */
   3780       tmp = sec->size;
   3781       /* If it has a rodata section, then add that too.  */
   3782       if (lib_sections[2 * i + 1])
   3783 	tmp += lib_sections[2 * i + 1]->size;
   3784       /* Add any new overlay call stubs needed by the section.  */
   3785       stub_size = 0;
   3786       if (tmp < lib_size
   3787 	  && (sec_data = spu_elf_section_data (sec)) != NULL
   3788 	  && (sinfo = sec_data->u.i.stack_info) != NULL)
   3789 	{
   3790 	  int k;
   3791 	  struct call_info *call;
   3792 
   3793 	  for (k = 0; k < sinfo->num_fun; ++k)
   3794 	    for (call = sinfo->fun[k].call_list; call; call = call->next)
   3795 	      if (call->fun->sec->linker_mark)
   3796 		{
   3797 		  struct call_info *p;
   3798 		  for (p = dummy_caller.call_list; p; p = p->next)
   3799 		    if (p->fun == call->fun)
   3800 		      break;
   3801 		  if (!p)
   3802 		    stub_size += ovl_stub_size (htab->params);
   3803 		}
   3804 	}
   3805       if (tmp + stub_size < lib_size)
   3806 	{
   3807 	  struct call_info **pp, *p;
   3808 
   3809 	  /* This section fits.  Mark it as non-overlay.  */
   3810 	  lib_sections[2 * i]->linker_mark = 0;
   3811 	  if (lib_sections[2 * i + 1])
   3812 	    lib_sections[2 * i + 1]->linker_mark = 0;
   3813 	  lib_size -= tmp + stub_size;
   3814 	  /* Call stubs to the section we just added are no longer
   3815 	     needed.  */
   3816 	  pp = &dummy_caller.call_list;
   3817 	  while ((p = *pp) != NULL)
   3818 	    if (!p->fun->sec->linker_mark)
   3819 	      {
   3820 		lib_size += ovl_stub_size (htab->params);
   3821 		*pp = p->next;
   3822 		free (p);
   3823 	      }
   3824 	    else
   3825 	      pp = &p->next;
   3826 	  /* Add new call stubs to dummy_caller.  */
   3827 	  if ((sec_data = spu_elf_section_data (sec)) != NULL
   3828 	      && (sinfo = sec_data->u.i.stack_info) != NULL)
   3829 	    {
   3830 	      int k;
   3831 	      struct call_info *call;
   3832 
   3833 	      for (k = 0; k < sinfo->num_fun; ++k)
   3834 		for (call = sinfo->fun[k].call_list;
   3835 		     call;
   3836 		     call = call->next)
   3837 		  if (call->fun->sec->linker_mark)
   3838 		    {
   3839 		      struct call_info *callee;
   3840 		      callee = bfd_malloc (sizeof (*callee));
   3841 		      if (callee == NULL)
   3842 			return (unsigned int) -1;
   3843 		      *callee = *call;
   3844 		      if (!insert_callee (&dummy_caller, callee))
   3845 			free (callee);
   3846 		    }
   3847 	    }
   3848 	}
   3849     }
   3850   while (dummy_caller.call_list != NULL)
   3851     {
   3852       struct call_info *call = dummy_caller.call_list;
   3853       dummy_caller.call_list = call->next;
   3854       free (call);
   3855     }
   3856   for (i = 0; i < 2 * lib_count; i++)
   3857     if (lib_sections[i])
   3858       lib_sections[i]->gc_mark = 1;
   3859   free (lib_sections);
   3860   return lib_size;
   3861 }
   3862 
   3863 /* Build an array of overlay sections.  The deepest node's section is
   3864    added first, then its parent node's section, then everything called
   3865    from the parent section.  The idea being to group sections to
   3866    minimise calls between different overlays.  */
   3867 
   3868 static bool
   3869 collect_overlays (struct function_info *fun,
   3870 		  struct bfd_link_info *info,
   3871 		  void *param)
   3872 {
   3873   struct call_info *call;
   3874   bool added_fun;
   3875   asection ***ovly_sections = param;
   3876 
   3877   if (fun->visit7)
   3878     return true;
   3879 
   3880   fun->visit7 = true;
   3881   for (call = fun->call_list; call != NULL; call = call->next)
   3882     if (!call->is_pasted && !call->broken_cycle)
   3883       {
   3884 	if (!collect_overlays (call->fun, info, ovly_sections))
   3885 	  return false;
   3886 	break;
   3887       }
   3888 
   3889   added_fun = false;
   3890   if (fun->sec->linker_mark && fun->sec->gc_mark)
   3891     {
   3892       fun->sec->gc_mark = 0;
   3893       *(*ovly_sections)++ = fun->sec;
   3894       if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
   3895 	{
   3896 	  fun->rodata->gc_mark = 0;
   3897 	  *(*ovly_sections)++ = fun->rodata;
   3898 	}
   3899       else
   3900 	*(*ovly_sections)++ = NULL;
   3901       added_fun = true;
   3902 
   3903       /* Pasted sections must stay with the first section.  We don't
   3904 	 put pasted sections in the array, just the first section.
   3905 	 Mark subsequent sections as already considered.  */
   3906       if (fun->sec->segment_mark)
   3907 	{
   3908 	  struct function_info *call_fun = fun;
   3909 	  do
   3910 	    {
   3911 	      for (call = call_fun->call_list; call != NULL; call = call->next)
   3912 		if (call->is_pasted)
   3913 		  {
   3914 		    call_fun = call->fun;
   3915 		    call_fun->sec->gc_mark = 0;
   3916 		    if (call_fun->rodata)
   3917 		      call_fun->rodata->gc_mark = 0;
   3918 		    break;
   3919 		  }
   3920 	      if (call == NULL)
   3921 		abort ();
   3922 	    }
   3923 	  while (call_fun->sec->segment_mark);
   3924 	}
   3925     }
   3926 
   3927   for (call = fun->call_list; call != NULL; call = call->next)
   3928     if (!call->broken_cycle
   3929 	&& !collect_overlays (call->fun, info, ovly_sections))
   3930       return false;
   3931 
   3932   if (added_fun)
   3933     {
   3934       struct _spu_elf_section_data *sec_data;
   3935       struct spu_elf_stack_info *sinfo;
   3936 
   3937       if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
   3938 	  && (sinfo = sec_data->u.i.stack_info) != NULL)
   3939 	{
   3940 	  int i;
   3941 	  for (i = 0; i < sinfo->num_fun; ++i)
   3942 	    if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
   3943 	      return false;
   3944 	}
   3945     }
   3946 
   3947   return true;
   3948 }
   3949 
   3950 struct _sum_stack_param {
   3951   size_t cum_stack;
   3952   size_t overall_stack;
   3953   bool emit_stack_syms;
   3954 };
   3955 
   3956 /* Descend the call graph for FUN, accumulating total stack required.  */
   3957 
   3958 static bool
   3959 sum_stack (struct function_info *fun,
   3960 	   struct bfd_link_info *info,
   3961 	   void *param)
   3962 {
   3963   struct call_info *call;
   3964   struct function_info *max;
   3965   size_t stack, cum_stack;
   3966   const char *f1;
   3967   bool has_call;
   3968   struct _sum_stack_param *sum_stack_param = param;
   3969   struct spu_link_hash_table *htab;
   3970 
   3971   cum_stack = fun->stack;
   3972   sum_stack_param->cum_stack = cum_stack;
   3973   if (fun->visit3)
   3974     return true;
   3975 
   3976   has_call = false;
   3977   max = NULL;
   3978   for (call = fun->call_list; call; call = call->next)
   3979     {
   3980       if (call->broken_cycle)
   3981 	continue;
   3982       if (!call->is_pasted)
   3983 	has_call = true;
   3984       if (!sum_stack (call->fun, info, sum_stack_param))
   3985 	return false;
   3986       stack = sum_stack_param->cum_stack;
   3987       /* Include caller stack for normal calls, don't do so for
   3988 	 tail calls.  fun->stack here is local stack usage for
   3989 	 this function.  */
   3990       if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
   3991 	stack += fun->stack;
   3992       if (cum_stack < stack)
   3993 	{
   3994 	  cum_stack = stack;
   3995 	  max = call->fun;
   3996 	}
   3997     }
   3998 
   3999   sum_stack_param->cum_stack = cum_stack;
   4000   stack = fun->stack;
   4001   /* Now fun->stack holds cumulative stack.  */
   4002   fun->stack = cum_stack;
   4003   fun->visit3 = true;
   4004 
   4005   if (!fun->non_root
   4006       && sum_stack_param->overall_stack < cum_stack)
   4007     sum_stack_param->overall_stack = cum_stack;
   4008 
   4009   htab = spu_hash_table (info);
   4010   if (htab->params->auto_overlay)
   4011     return true;
   4012 
   4013   f1 = func_name (fun);
   4014   if (htab->params->stack_analysis)
   4015     {
   4016       if (!fun->non_root)
   4017 	info->callbacks->info ("  %s: 0x%v\n", f1, (bfd_vma) cum_stack);
   4018       info->callbacks->minfo ("%s: 0x%v 0x%v\n",
   4019 			      f1, (bfd_vma) stack, (bfd_vma) cum_stack);
   4020 
   4021       if (has_call)
   4022 	{
   4023 	  info->callbacks->minfo (_("  calls:\n"));
   4024 	  for (call = fun->call_list; call; call = call->next)
   4025 	    if (!call->is_pasted && !call->broken_cycle)
   4026 	      {
   4027 		const char *f2 = func_name (call->fun);
   4028 		const char *ann1 = call->fun == max ? "*" : " ";
   4029 		const char *ann2 = call->is_tail ? "t" : " ";
   4030 
   4031 		info->callbacks->minfo ("   %s%s %s\n", ann1, ann2, f2);
   4032 	      }
   4033 	}
   4034     }
   4035 
   4036   if (sum_stack_param->emit_stack_syms)
   4037     {
   4038       char *name = bfd_malloc (18 + strlen (f1));
   4039       struct elf_link_hash_entry *h;
   4040 
   4041       if (name == NULL)
   4042 	return false;
   4043 
   4044       if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
   4045 	sprintf (name, "__stack_%s", f1);
   4046       else
   4047 	sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
   4048 
   4049       h = elf_link_hash_lookup (&htab->elf, name, true, true, false);
   4050       free (name);
   4051       if (h != NULL
   4052 	  && (h->root.type == bfd_link_hash_new
   4053 	      || h->root.type == bfd_link_hash_undefined
   4054 	      || h->root.type == bfd_link_hash_undefweak))
   4055 	{
   4056 	  h->root.type = bfd_link_hash_defined;
   4057 	  h->root.u.def.section = bfd_abs_section_ptr;
   4058 	  h->root.u.def.value = cum_stack;
   4059 	  h->size = 0;
   4060 	  h->type = 0;
   4061 	  h->ref_regular = 1;
   4062 	  h->def_regular = 1;
   4063 	  h->ref_regular_nonweak = 1;
   4064 	  h->forced_local = 1;
   4065 	  h->non_elf = 0;
   4066 	}
   4067     }
   4068 
   4069   return true;
   4070 }
   4071 
   4072 /* SEC is part of a pasted function.  Return the call_info for the
   4073    next section of this function.  */
   4074 
   4075 static struct call_info *
   4076 find_pasted_call (asection *sec)
   4077 {
   4078   struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
   4079   struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
   4080   struct call_info *call;
   4081   int k;
   4082 
   4083   for (k = 0; k < sinfo->num_fun; ++k)
   4084     for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
   4085       if (call->is_pasted)
   4086 	return call;
   4087   abort ();
   4088   return 0;
   4089 }
   4090 
   4091 /* qsort predicate to sort bfds by file name.  */
   4092 
   4093 static int
   4094 sort_bfds (const void *a, const void *b)
   4095 {
   4096   bfd *const *abfd1 = a;
   4097   bfd *const *abfd2 = b;
   4098 
   4099   return filename_cmp (bfd_get_filename (*abfd1), bfd_get_filename (*abfd2));
   4100 }
   4101 
   4102 static unsigned int
   4103 print_one_overlay_section (FILE *script,
   4104 			   unsigned int base,
   4105 			   unsigned int count,
   4106 			   unsigned int ovlynum,
   4107 			   unsigned int *ovly_map,
   4108 			   asection **ovly_sections,
   4109 			   struct bfd_link_info *info)
   4110 {
   4111   unsigned int j;
   4112 
   4113   for (j = base; j < count && ovly_map[j] == ovlynum; j++)
   4114     {
   4115       asection *sec = ovly_sections[2 * j];
   4116 
   4117       if (fprintf (script, "   %s%c%s (%s)\n",
   4118 		   (sec->owner->my_archive != NULL
   4119 		    ? bfd_get_filename (sec->owner->my_archive) : ""),
   4120 		   info->path_separator,
   4121 		   bfd_get_filename (sec->owner),
   4122 		   sec->name) <= 0)
   4123 	return -1;
   4124       if (sec->segment_mark)
   4125 	{
   4126 	  struct call_info *call = find_pasted_call (sec);
   4127 	  while (call != NULL)
   4128 	    {
   4129 	      struct function_info *call_fun = call->fun;
   4130 	      sec = call_fun->sec;
   4131 	      if (fprintf (script, "   %s%c%s (%s)\n",
   4132 			   (sec->owner->my_archive != NULL
   4133 			    ? bfd_get_filename (sec->owner->my_archive) : ""),
   4134 			   info->path_separator,
   4135 			   bfd_get_filename (sec->owner),
   4136 			   sec->name) <= 0)
   4137 		return -1;
   4138 	      for (call = call_fun->call_list; call; call = call->next)
   4139 		if (call->is_pasted)
   4140 		  break;
   4141 	    }
   4142 	}
   4143     }
   4144 
   4145   for (j = base; j < count && ovly_map[j] == ovlynum; j++)
   4146     {
   4147       asection *sec = ovly_sections[2 * j + 1];
   4148       if (sec != NULL
   4149 	  && fprintf (script, "   %s%c%s (%s)\n",
   4150 		      (sec->owner->my_archive != NULL
   4151 		       ? bfd_get_filename (sec->owner->my_archive) : ""),
   4152 		      info->path_separator,
   4153 		      bfd_get_filename (sec->owner),
   4154 		      sec->name) <= 0)
   4155 	return -1;
   4156 
   4157       sec = ovly_sections[2 * j];
   4158       if (sec->segment_mark)
   4159 	{
   4160 	  struct call_info *call = find_pasted_call (sec);
   4161 	  while (call != NULL)
   4162 	    {
   4163 	      struct function_info *call_fun = call->fun;
   4164 	      sec = call_fun->rodata;
   4165 	      if (sec != NULL
   4166 		  && fprintf (script, "   %s%c%s (%s)\n",
   4167 			      (sec->owner->my_archive != NULL
   4168 			       ? bfd_get_filename (sec->owner->my_archive) : ""),
   4169 			      info->path_separator,
   4170 			      bfd_get_filename (sec->owner),
   4171 			      sec->name) <= 0)
   4172 		return -1;
   4173 	      for (call = call_fun->call_list; call; call = call->next)
   4174 		if (call->is_pasted)
   4175 		  break;
   4176 	    }
   4177 	}
   4178     }
   4179 
   4180   return j;
   4181 }
   4182 
   4183 /* Handle --auto-overlay.  */
   4184 
   4185 static void
   4186 spu_elf_auto_overlay (struct bfd_link_info *info)
   4187 {
   4188   bfd *ibfd;
   4189   bfd **bfd_arr;
   4190   struct elf_segment_map *m;
   4191   unsigned int fixed_size, lo, hi;
   4192   unsigned int reserved;
   4193   struct spu_link_hash_table *htab;
   4194   unsigned int base, i, count, bfd_count;
   4195   unsigned int region, ovlynum;
   4196   asection **ovly_sections, **ovly_p;
   4197   unsigned int *ovly_map;
   4198   FILE *script;
   4199   unsigned int total_overlay_size, overlay_size;
   4200   const char *ovly_mgr_entry;
   4201   struct elf_link_hash_entry *h;
   4202   struct _mos_param mos_param;
   4203   struct _uos_param uos_param;
   4204   struct function_info dummy_caller;
   4205 
   4206   /* Find the extents of our loadable image.  */
   4207   lo = (unsigned int) -1;
   4208   hi = 0;
   4209   for (m = elf_seg_map (info->output_bfd); m != NULL; m = m->next)
   4210     if (m->p_type == PT_LOAD)
   4211       for (i = 0; i < m->count; i++)
   4212 	if (m->sections[i]->size != 0)
   4213 	  {
   4214 	    if (m->sections[i]->vma < lo)
   4215 	      lo = m->sections[i]->vma;
   4216 	    if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
   4217 	      hi = m->sections[i]->vma + m->sections[i]->size - 1;
   4218 	  }
   4219   fixed_size = hi + 1 - lo;
   4220 
   4221   if (!discover_functions (info))
   4222     goto err_exit;
   4223 
   4224   if (!build_call_tree (info))
   4225     goto err_exit;
   4226 
   4227   htab = spu_hash_table (info);
   4228   reserved = htab->params->auto_overlay_reserved;
   4229   if (reserved == 0)
   4230     {
   4231       struct _sum_stack_param sum_stack_param;
   4232 
   4233       sum_stack_param.emit_stack_syms = 0;
   4234       sum_stack_param.overall_stack = 0;
   4235       if (!for_each_node (sum_stack, info, &sum_stack_param, true))
   4236 	goto err_exit;
   4237       reserved = (sum_stack_param.overall_stack
   4238 		  + htab->params->extra_stack_space);
   4239     }
   4240 
   4241   /* No need for overlays if everything already fits.  */
   4242   if (fixed_size + reserved <= htab->local_store
   4243       && htab->params->ovly_flavour != ovly_soft_icache)
   4244     {
   4245       htab->params->auto_overlay = 0;
   4246       return;
   4247     }
   4248 
   4249   uos_param.exclude_input_section = 0;
   4250   uos_param.exclude_output_section
   4251     = bfd_get_section_by_name (info->output_bfd, ".interrupt");
   4252 
   4253   ovly_mgr_entry = "__ovly_load";
   4254   if (htab->params->ovly_flavour == ovly_soft_icache)
   4255     ovly_mgr_entry = "__icache_br_handler";
   4256   h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
   4257 			    false, false, false);
   4258   if (h != NULL
   4259       && (h->root.type == bfd_link_hash_defined
   4260 	  || h->root.type == bfd_link_hash_defweak)
   4261       && h->def_regular)
   4262     {
   4263       /* We have a user supplied overlay manager.  */
   4264       uos_param.exclude_input_section = h->root.u.def.section;
   4265     }
   4266   else
   4267     {
   4268       /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
   4269 	 builtin version to .text, and will adjust .text size.  */
   4270       fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
   4271     }
   4272 
   4273   /* Mark overlay sections, and find max overlay section size.  */
   4274   mos_param.max_overlay_size = 0;
   4275   if (!for_each_node (mark_overlay_section, info, &mos_param, true))
   4276     goto err_exit;
   4277 
   4278   /* We can't put the overlay manager or interrupt routines in
   4279      overlays.  */
   4280   uos_param.clearing = 0;
   4281   if ((uos_param.exclude_input_section
   4282        || uos_param.exclude_output_section)
   4283       && !for_each_node (unmark_overlay_section, info, &uos_param, true))
   4284     goto err_exit;
   4285 
   4286   bfd_count = 0;
   4287   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
   4288     ++bfd_count;
   4289   bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
   4290   if (bfd_arr == NULL)
   4291     goto err_exit;
   4292 
   4293   /* Count overlay sections, and subtract their sizes from "fixed_size".  */
   4294   count = 0;
   4295   bfd_count = 0;
   4296   total_overlay_size = 0;
   4297   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
   4298     {
   4299       extern const bfd_target spu_elf32_vec;
   4300       asection *sec;
   4301       unsigned int old_count;
   4302 
   4303       if (ibfd->xvec != &spu_elf32_vec)
   4304 	continue;
   4305 
   4306       old_count = count;
   4307       for (sec = ibfd->sections; sec != NULL; sec = sec->next)
   4308 	if (sec->linker_mark)
   4309 	  {
   4310 	    if ((sec->flags & SEC_CODE) != 0)
   4311 	      count += 1;
   4312 	    fixed_size -= sec->size;
   4313 	    total_overlay_size += sec->size;
   4314 	  }
   4315 	else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
   4316 		 && sec->output_section->owner == info->output_bfd
   4317 		 && startswith (sec->output_section->name, ".ovl.init"))
   4318 	  fixed_size -= sec->size;
   4319       if (count != old_count)
   4320 	bfd_arr[bfd_count++] = ibfd;
   4321     }
   4322 
   4323   /* Since the overlay link script selects sections by file name and
   4324      section name, ensure that file names are unique.  */
   4325   if (bfd_count > 1)
   4326     {
   4327       bool ok = true;
   4328 
   4329       qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
   4330       for (i = 1; i < bfd_count; ++i)
   4331 	if (filename_cmp (bfd_get_filename (bfd_arr[i - 1]),
   4332 			  bfd_get_filename (bfd_arr[i])) == 0)
   4333 	  {
   4334 	    if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
   4335 	      {
   4336 		if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
   4337 		  /* xgettext:c-format */
   4338 		  info->callbacks->einfo (_("%s duplicated in %s\n"),
   4339 					  bfd_get_filename (bfd_arr[i]),
   4340 					  bfd_get_filename (bfd_arr[i]->my_archive));
   4341 		else
   4342 		  info->callbacks->einfo (_("%s duplicated\n"),
   4343 					  bfd_get_filename (bfd_arr[i]));
   4344 		ok = false;
   4345 	      }
   4346 	  }
   4347       if (!ok)
   4348 	{
   4349 	  info->callbacks->einfo (_("sorry, no support for duplicate "
   4350 				    "object files in auto-overlay script\n"));
   4351 	  bfd_set_error (bfd_error_bad_value);
   4352 	  goto err_exit;
   4353 	}
   4354     }
   4355   free (bfd_arr);
   4356 
   4357   fixed_size += reserved;
   4358   fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params);
   4359   if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
   4360     {
   4361       if (htab->params->ovly_flavour == ovly_soft_icache)
   4362 	{
   4363 	  /* Stubs in the non-icache area are bigger.  */
   4364 	  fixed_size += htab->non_ovly_stub * 16;
   4365 	  /* Space for icache manager tables.
   4366 	     a) Tag array, one quadword per cache line.
   4367 	     - word 0: ia address of present line, init to zero.  */
   4368 	  fixed_size += 16 << htab->num_lines_log2;
   4369 	  /* b) Rewrite "to" list, one quadword per cache line.  */
   4370 	  fixed_size += 16 << htab->num_lines_log2;
   4371 	  /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
   4372 		to a power-of-two number of full quadwords) per cache line.  */
   4373 	  fixed_size += 16 << (htab->fromelem_size_log2
   4374 			       + htab->num_lines_log2);
   4375 	  /* d) Pointer to __ea backing store (toe), 1 quadword.  */
   4376 	  fixed_size += 16;
   4377 	}
   4378       else
   4379 	{
   4380 	  /* Guess number of overlays.  Assuming overlay buffer is on
   4381 	     average only half full should be conservative.  */
   4382 	  ovlynum = (total_overlay_size * 2 * htab->params->num_lines
   4383 		     / (htab->local_store - fixed_size));
   4384 	  /* Space for _ovly_table[], _ovly_buf_table[] and toe.  */
   4385 	  fixed_size += ovlynum * 16 + 16 + 4 + 16;
   4386 	}
   4387     }
   4388 
   4389   if (fixed_size + mos_param.max_overlay_size > htab->local_store)
   4390     /* xgettext:c-format */
   4391     info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
   4392 			      "size of 0x%v exceeds local store\n"),
   4393 			    (bfd_vma) fixed_size,
   4394 			    (bfd_vma) mos_param.max_overlay_size);
   4395 
   4396   /* Now see if we should put some functions in the non-overlay area.  */
   4397   else if (fixed_size < htab->params->auto_overlay_fixed)
   4398     {
   4399       unsigned int max_fixed, lib_size;
   4400 
   4401       max_fixed = htab->local_store - mos_param.max_overlay_size;
   4402       if (max_fixed > htab->params->auto_overlay_fixed)
   4403 	max_fixed = htab->params->auto_overlay_fixed;
   4404       lib_size = max_fixed - fixed_size;
   4405       lib_size = auto_ovl_lib_functions (info, lib_size);
   4406       if (lib_size == (unsigned int) -1)
   4407 	goto err_exit;
   4408       fixed_size = max_fixed - lib_size;
   4409     }
   4410 
   4411   /* Build an array of sections, suitably sorted to place into
   4412      overlays.  */
   4413   ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
   4414   if (ovly_sections == NULL)
   4415     goto err_exit;
   4416   ovly_p = ovly_sections;
   4417   if (!for_each_node (collect_overlays, info, &ovly_p, true))
   4418     goto err_exit;
   4419   count = (size_t) (ovly_p - ovly_sections) / 2;
   4420   ovly_map = bfd_malloc (count * sizeof (*ovly_map));
   4421   if (ovly_map == NULL)
   4422     goto err_exit;
   4423 
   4424   memset (&dummy_caller, 0, sizeof (dummy_caller));
   4425   overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
   4426   if (htab->params->line_size != 0)
   4427     overlay_size = htab->params->line_size;
   4428   base = 0;
   4429   ovlynum = 0;
   4430   while (base < count)
   4431     {
   4432       unsigned int size = 0, rosize = 0, roalign = 0;
   4433 
   4434       for (i = base; i < count; i++)
   4435 	{
   4436 	  asection *sec, *rosec;
   4437 	  unsigned int tmp, rotmp;
   4438 	  unsigned int num_stubs;
   4439 	  struct call_info *call, *pasty;
   4440 	  struct _spu_elf_section_data *sec_data;
   4441 	  struct spu_elf_stack_info *sinfo;
   4442 	  unsigned int k;
   4443 
   4444 	  /* See whether we can add this section to the current
   4445 	     overlay without overflowing our overlay buffer.  */
   4446 	  sec = ovly_sections[2 * i];
   4447 	  tmp = align_power (size, sec->alignment_power) + sec->size;
   4448 	  rotmp = rosize;
   4449 	  rosec = ovly_sections[2 * i + 1];
   4450 	  if (rosec != NULL)
   4451 	    {
   4452 	      rotmp = align_power (rotmp, rosec->alignment_power) + rosec->size;
   4453 	      if (roalign < rosec->alignment_power)
   4454 		roalign = rosec->alignment_power;
   4455 	    }
   4456 	  if (align_power (tmp, roalign) + rotmp > overlay_size)
   4457 	    break;
   4458 	  if (sec->segment_mark)
   4459 	    {
   4460 	      /* Pasted sections must stay together, so add their
   4461 		 sizes too.  */
   4462 	      pasty = find_pasted_call (sec);
   4463 	      while (pasty != NULL)
   4464 		{
   4465 		  struct function_info *call_fun = pasty->fun;
   4466 		  tmp = (align_power (tmp, call_fun->sec->alignment_power)
   4467 			 + call_fun->sec->size);
   4468 		  if (call_fun->rodata)
   4469 		    {
   4470 		      rotmp = (align_power (rotmp,
   4471 					    call_fun->rodata->alignment_power)
   4472 			       + call_fun->rodata->size);
   4473 		      if (roalign < rosec->alignment_power)
   4474 			roalign = rosec->alignment_power;
   4475 		    }
   4476 		  for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
   4477 		    if (pasty->is_pasted)
   4478 		      break;
   4479 		}
   4480 	    }
   4481 	  if (align_power (tmp, roalign) + rotmp > overlay_size)
   4482 	    break;
   4483 
   4484 	  /* If we add this section, we might need new overlay call
   4485 	     stubs.  Add any overlay section calls to dummy_call.  */
   4486 	  pasty = NULL;
   4487 	  sec_data = spu_elf_section_data (sec);
   4488 	  sinfo = sec_data->u.i.stack_info;
   4489 	  for (k = 0; k < (unsigned) sinfo->num_fun; ++k)
   4490 	    for (call = sinfo->fun[k].call_list; call; call = call->next)
   4491 	      if (call->is_pasted)
   4492 		{
   4493 		  BFD_ASSERT (pasty == NULL);
   4494 		  pasty = call;
   4495 		}
   4496 	      else if (call->fun->sec->linker_mark)
   4497 		{
   4498 		  if (!copy_callee (&dummy_caller, call))
   4499 		    goto err_exit;
   4500 		}
   4501 	  while (pasty != NULL)
   4502 	    {
   4503 	      struct function_info *call_fun = pasty->fun;
   4504 	      pasty = NULL;
   4505 	      for (call = call_fun->call_list; call; call = call->next)
   4506 		if (call->is_pasted)
   4507 		  {
   4508 		    BFD_ASSERT (pasty == NULL);
   4509 		    pasty = call;
   4510 		  }
   4511 		else if (!copy_callee (&dummy_caller, call))
   4512 		  goto err_exit;
   4513 	    }
   4514 
   4515 	  /* Calculate call stub size.  */
   4516 	  num_stubs = 0;
   4517 	  for (call = dummy_caller.call_list; call; call = call->next)
   4518 	    {
   4519 	      unsigned int stub_delta = 1;
   4520 
   4521 	      if (htab->params->ovly_flavour == ovly_soft_icache)
   4522 		stub_delta = call->count;
   4523 	      num_stubs += stub_delta;
   4524 
   4525 	      /* If the call is within this overlay, we won't need a
   4526 		 stub.  */
   4527 	      for (k = base; k < i + 1; k++)
   4528 		if (call->fun->sec == ovly_sections[2 * k])
   4529 		  {
   4530 		    num_stubs -= stub_delta;
   4531 		    break;
   4532 		  }
   4533 	    }
   4534 	  if (htab->params->ovly_flavour == ovly_soft_icache
   4535 	      && num_stubs > htab->params->max_branch)
   4536 	    break;
   4537 	  if (align_power (tmp, roalign) + rotmp
   4538 	      + num_stubs * ovl_stub_size (htab->params) > overlay_size)
   4539 	    break;
   4540 	  size = tmp;
   4541 	  rosize = rotmp;
   4542 	}
   4543 
   4544       if (i == base)
   4545 	{
   4546 	  /* xgettext:c-format */
   4547 	  info->callbacks->einfo (_("%pB:%pA%s exceeds overlay size\n"),
   4548 				  ovly_sections[2 * i]->owner,
   4549 				  ovly_sections[2 * i],
   4550 				  ovly_sections[2 * i + 1] ? " + rodata" : "");
   4551 	  bfd_set_error (bfd_error_bad_value);
   4552 	  goto err_exit;
   4553 	}
   4554 
   4555       while (dummy_caller.call_list != NULL)
   4556 	{
   4557 	  struct call_info *call = dummy_caller.call_list;
   4558 	  dummy_caller.call_list = call->next;
   4559 	  free (call);
   4560 	}
   4561 
   4562       ++ovlynum;
   4563       while (base < i)
   4564 	ovly_map[base++] = ovlynum;
   4565     }
   4566 
   4567   script = htab->params->spu_elf_open_overlay_script ();
   4568 
   4569   if (htab->params->ovly_flavour == ovly_soft_icache)
   4570     {
   4571       if (fprintf (script, "SECTIONS\n{\n") <= 0)
   4572 	goto file_err;
   4573 
   4574       if (fprintf (script,
   4575 		   " . = ALIGN (%u);\n"
   4576 		   " .ovl.init : { *(.ovl.init) }\n"
   4577 		   " . = ABSOLUTE (ADDR (.ovl.init));\n",
   4578 		   htab->params->line_size) <= 0)
   4579 	goto file_err;
   4580 
   4581       base = 0;
   4582       ovlynum = 1;
   4583       while (base < count)
   4584 	{
   4585 	  unsigned int indx = ovlynum - 1;
   4586 	  unsigned int vma, lma;
   4587 
   4588 	  vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
   4589 	  lma = vma + (((indx >> htab->num_lines_log2) + 1) << 18);
   4590 
   4591 	  if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
   4592 			       ": AT (LOADADDR (.ovl.init) + %u) {\n",
   4593 		       ovlynum, vma, lma) <= 0)
   4594 	    goto file_err;
   4595 
   4596 	  base = print_one_overlay_section (script, base, count, ovlynum,
   4597 					    ovly_map, ovly_sections, info);
   4598 	  if (base == (unsigned) -1)
   4599 	    goto file_err;
   4600 
   4601 	  if (fprintf (script, "  }\n") <= 0)
   4602 	    goto file_err;
   4603 
   4604 	  ovlynum++;
   4605 	}
   4606 
   4607       if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
   4608 		   1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
   4609 	goto file_err;
   4610 
   4611       if (fprintf (script, "}\nINSERT AFTER .toe;\n") <= 0)
   4612 	goto file_err;
   4613     }
   4614   else
   4615     {
   4616       if (fprintf (script, "SECTIONS\n{\n") <= 0)
   4617 	goto file_err;
   4618 
   4619       if (fprintf (script,
   4620 		   " . = ALIGN (16);\n"
   4621 		   " .ovl.init : { *(.ovl.init) }\n"
   4622 		   " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
   4623 	goto file_err;
   4624 
   4625       for (region = 1; region <= htab->params->num_lines; region++)
   4626 	{
   4627 	  ovlynum = region;
   4628 	  base = 0;
   4629 	  while (base < count && ovly_map[base] < ovlynum)
   4630 	    base++;
   4631 
   4632 	  if (base == count)
   4633 	    break;
   4634 
   4635 	  if (region == 1)
   4636 	    {
   4637 	      /* We need to set lma since we are overlaying .ovl.init.  */
   4638 	      if (fprintf (script,
   4639 			   " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
   4640 		goto file_err;
   4641 	    }
   4642 	  else
   4643 	    {
   4644 	      if (fprintf (script, " OVERLAY :\n {\n") <= 0)
   4645 		goto file_err;
   4646 	    }
   4647 
   4648 	  while (base < count)
   4649 	    {
   4650 	      if (fprintf (script, "  .ovly%u {\n", ovlynum) <= 0)
   4651 		goto file_err;
   4652 
   4653 	      base = print_one_overlay_section (script, base, count, ovlynum,
   4654 						ovly_map, ovly_sections, info);
   4655 	      if (base == (unsigned) -1)
   4656 		goto file_err;
   4657 
   4658 	      if (fprintf (script, "  }\n") <= 0)
   4659 		goto file_err;
   4660 
   4661 	      ovlynum += htab->params->num_lines;
   4662 	      while (base < count && ovly_map[base] < ovlynum)
   4663 		base++;
   4664 	    }
   4665 
   4666 	  if (fprintf (script, " }\n") <= 0)
   4667 	    goto file_err;
   4668 	}
   4669 
   4670       if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
   4671 	goto file_err;
   4672     }
   4673 
   4674   free (ovly_map);
   4675   free (ovly_sections);
   4676 
   4677   if (fclose (script) != 0)
   4678     goto file_err;
   4679 
   4680   if (htab->params->auto_overlay & AUTO_RELINK)
   4681     (*htab->params->spu_elf_relink) ();
   4682 
   4683   xexit (0);
   4684 
   4685  file_err:
   4686   bfd_set_error (bfd_error_system_call);
   4687  err_exit:
   4688   info->callbacks->einfo (_("%F%P: auto overlay error: %E\n"));
   4689   xexit (1);
   4690 }
   4691 
   4692 /* Provide an estimate of total stack required.  */
   4693 
   4694 static bool
   4695 spu_elf_stack_analysis (struct bfd_link_info *info)
   4696 {
   4697   struct spu_link_hash_table *htab;
   4698   struct _sum_stack_param sum_stack_param;
   4699 
   4700   if (!discover_functions (info))
   4701     return false;
   4702 
   4703   if (!build_call_tree (info))
   4704     return false;
   4705 
   4706   htab = spu_hash_table (info);
   4707   if (htab->params->stack_analysis)
   4708     {
   4709       info->callbacks->info (_("Stack size for call graph root nodes.\n"));
   4710       info->callbacks->minfo (_("\nStack size for functions.  "
   4711 				"Annotations: '*' max stack, 't' tail call\n"));
   4712     }
   4713 
   4714   sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
   4715   sum_stack_param.overall_stack = 0;
   4716   if (!for_each_node (sum_stack, info, &sum_stack_param, true))
   4717     return false;
   4718 
   4719   if (htab->params->stack_analysis)
   4720     info->callbacks->info (_("Maximum stack required is 0x%v\n"),
   4721 			   (bfd_vma) sum_stack_param.overall_stack);
   4722   return true;
   4723 }
   4724 
   4725 /* Perform a final link.  */
   4726 
   4727 static bool
   4728 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
   4729 {
   4730   struct spu_link_hash_table *htab = spu_hash_table (info);
   4731 
   4732   if (htab->params->auto_overlay)
   4733     spu_elf_auto_overlay (info);
   4734 
   4735   if ((htab->params->stack_analysis
   4736        || (htab->params->ovly_flavour == ovly_soft_icache
   4737 	   && htab->params->lrlive_analysis))
   4738       && !spu_elf_stack_analysis (info))
   4739     info->callbacks->einfo (_("%X%P: stack/lrlive analysis error: %E\n"));
   4740 
   4741   if (!spu_elf_build_stubs (info))
   4742     info->callbacks->einfo (_("%F%P: can not build overlay stubs: %E\n"));
   4743 
   4744   return bfd_elf_final_link (output_bfd, info);
   4745 }
   4746 
   4747 /* Called when not normally emitting relocs, ie. !bfd_link_relocatable (info)
   4748    and !info->emitrelocations.  Returns a count of special relocs
   4749    that need to be emitted.  */
   4750 
   4751 static unsigned int
   4752 spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
   4753 {
   4754   Elf_Internal_Rela *relocs;
   4755   unsigned int count = 0;
   4756 
   4757   relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
   4758 				      info->keep_memory);
   4759   if (relocs != NULL)
   4760     {
   4761       Elf_Internal_Rela *rel;
   4762       Elf_Internal_Rela *relend = relocs + sec->reloc_count;
   4763 
   4764       for (rel = relocs; rel < relend; rel++)
   4765 	{
   4766 	  int r_type = ELF32_R_TYPE (rel->r_info);
   4767 	  if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
   4768 	    ++count;
   4769 	}
   4770 
   4771       if (elf_section_data (sec)->relocs != relocs)
   4772 	free (relocs);
   4773     }
   4774 
   4775   return count;
   4776 }
   4777 
   4778 /* Functions for adding fixup records to .fixup */
   4779 
   4780 #define FIXUP_RECORD_SIZE 4
   4781 
   4782 #define FIXUP_PUT(output_bfd,htab,index,addr) \
   4783 	  bfd_put_32 (output_bfd, addr, \
   4784 		      htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
   4785 #define FIXUP_GET(output_bfd,htab,index) \
   4786 	  bfd_get_32 (output_bfd, \
   4787 		      htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
   4788 
   4789 /* Store OFFSET in .fixup.  This assumes it will be called with an
   4790    increasing OFFSET.  When this OFFSET fits with the last base offset,
   4791    it just sets a bit, otherwise it adds a new fixup record.  */
   4792 static void
   4793 spu_elf_emit_fixup (bfd * output_bfd, struct bfd_link_info *info,
   4794 		    bfd_vma offset)
   4795 {
   4796   struct spu_link_hash_table *htab = spu_hash_table (info);
   4797   asection *sfixup = htab->sfixup;
   4798   bfd_vma qaddr = offset & ~(bfd_vma) 15;
   4799   bfd_vma bit = ((bfd_vma) 8) >> ((offset & 15) >> 2);
   4800   if (sfixup->reloc_count == 0)
   4801     {
   4802       FIXUP_PUT (output_bfd, htab, 0, qaddr | bit);
   4803       sfixup->reloc_count++;
   4804     }
   4805   else
   4806     {
   4807       bfd_vma base = FIXUP_GET (output_bfd, htab, sfixup->reloc_count - 1);
   4808       if (qaddr != (base & ~(bfd_vma) 15))
   4809 	{
   4810 	  if ((sfixup->reloc_count + 1) * FIXUP_RECORD_SIZE > sfixup->size)
   4811 	    _bfd_error_handler (_("fatal error while creating .fixup"));
   4812 	  FIXUP_PUT (output_bfd, htab, sfixup->reloc_count, qaddr | bit);
   4813 	  sfixup->reloc_count++;
   4814 	}
   4815       else
   4816 	FIXUP_PUT (output_bfd, htab, sfixup->reloc_count - 1, base | bit);
   4817     }
   4818 }
   4819 
   4820 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD.  */
   4821 
   4822 static int
   4823 spu_elf_relocate_section (bfd *output_bfd,
   4824 			  struct bfd_link_info *info,
   4825 			  bfd *input_bfd,
   4826 			  asection *input_section,
   4827 			  bfd_byte *contents,
   4828 			  Elf_Internal_Rela *relocs,
   4829 			  Elf_Internal_Sym *local_syms,
   4830 			  asection **local_sections)
   4831 {
   4832   Elf_Internal_Shdr *symtab_hdr;
   4833   struct elf_link_hash_entry **sym_hashes;
   4834   Elf_Internal_Rela *rel, *relend;
   4835   struct spu_link_hash_table *htab;
   4836   asection *ea;
   4837   int ret = true;
   4838   bool emit_these_relocs = false;
   4839   bool is_ea_sym;
   4840   bool stubs;
   4841   unsigned int iovl = 0;
   4842 
   4843   htab = spu_hash_table (info);
   4844   stubs = (htab->stub_sec != NULL
   4845 	   && maybe_needs_stubs (input_section));
   4846   iovl = overlay_index (input_section);
   4847   ea = bfd_get_section_by_name (output_bfd, "._ea");
   4848   symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
   4849   sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
   4850 
   4851   rel = relocs;
   4852   relend = relocs + input_section->reloc_count;
   4853   for (; rel < relend; rel++)
   4854     {
   4855       int r_type;
   4856       reloc_howto_type *howto;
   4857       unsigned int r_symndx;
   4858       Elf_Internal_Sym *sym;
   4859       asection *sec;
   4860       struct elf_link_hash_entry *h;
   4861       const char *sym_name;
   4862       bfd_vma relocation;
   4863       bfd_vma addend;
   4864       bfd_reloc_status_type r;
   4865       bool unresolved_reloc;
   4866       enum _stub_type stub_type;
   4867 
   4868       r_symndx = ELF32_R_SYM (rel->r_info);
   4869       r_type = ELF32_R_TYPE (rel->r_info);
   4870       howto = elf_howto_table + r_type;
   4871       unresolved_reloc = false;
   4872       h = NULL;
   4873       sym = NULL;
   4874       sec = NULL;
   4875       if (r_symndx < symtab_hdr->sh_info)
   4876 	{
   4877 	  sym = local_syms + r_symndx;
   4878 	  sec = local_sections[r_symndx];
   4879 	  sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
   4880 	  relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
   4881 	}
   4882       else
   4883 	{
   4884 	  if (sym_hashes == NULL)
   4885 	    return false;
   4886 
   4887 	  h = sym_hashes[r_symndx - symtab_hdr->sh_info];
   4888 
   4889 	  if (info->wrap_hash != NULL
   4890 	      && (input_section->flags & SEC_DEBUGGING) != 0)
   4891 	    h = ((struct elf_link_hash_entry *)
   4892 		 unwrap_hash_lookup (info, input_bfd, &h->root));
   4893 
   4894 	  while (h->root.type == bfd_link_hash_indirect
   4895 		 || h->root.type == bfd_link_hash_warning)
   4896 	    h = (struct elf_link_hash_entry *) h->root.u.i.link;
   4897 
   4898 	  relocation = 0;
   4899 	  if (h->root.type == bfd_link_hash_defined
   4900 	      || h->root.type == bfd_link_hash_defweak)
   4901 	    {
   4902 	      sec = h->root.u.def.section;
   4903 	      if (sec == NULL
   4904 		  || sec->output_section == NULL)
   4905 		/* Set a flag that will be cleared later if we find a
   4906 		   relocation value for this symbol.  output_section
   4907 		   is typically NULL for symbols satisfied by a shared
   4908 		   library.  */
   4909 		unresolved_reloc = true;
   4910 	      else
   4911 		relocation = (h->root.u.def.value
   4912 			      + sec->output_section->vma
   4913 			      + sec->output_offset);
   4914 	    }
   4915 	  else if (h->root.type == bfd_link_hash_undefweak)
   4916 	    ;
   4917 	  else if (info->unresolved_syms_in_objects == RM_IGNORE
   4918 		   && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
   4919 	    ;
   4920 	  else if (!bfd_link_relocatable (info)
   4921 		   && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
   4922 	    {
   4923 	      bool err;
   4924 
   4925 	      err = (info->unresolved_syms_in_objects == RM_DIAGNOSE
   4926 		     && !info->warn_unresolved_syms)
   4927 		|| ELF_ST_VISIBILITY (h->other) != STV_DEFAULT;
   4928 
   4929 	      info->callbacks->undefined_symbol
   4930 		(info, h->root.root.string, input_bfd,
   4931 		 input_section, rel->r_offset, err);
   4932 	    }
   4933 	  sym_name = h->root.root.string;
   4934 	}
   4935 
   4936       if (sec != NULL && discarded_section (sec))
   4937 	RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
   4938 					 rel, 1, relend, howto, 0, contents);
   4939 
   4940       if (bfd_link_relocatable (info))
   4941 	continue;
   4942 
   4943       /* Change "a rt,ra,rb" to "ai rt,ra,0". */
   4944       if (r_type == R_SPU_ADD_PIC
   4945 	  && h != NULL
   4946 	  && !(h->def_regular || ELF_COMMON_DEF_P (h)))
   4947 	{
   4948 	  bfd_byte *loc = contents + rel->r_offset;
   4949 	  loc[0] = 0x1c;
   4950 	  loc[1] = 0x00;
   4951 	  loc[2] &= 0x3f;
   4952 	}
   4953 
   4954       is_ea_sym = (ea != NULL
   4955 		   && sec != NULL
   4956 		   && sec->output_section == ea);
   4957 
   4958       /* If this symbol is in an overlay area, we may need to relocate
   4959 	 to the overlay stub.  */
   4960       addend = rel->r_addend;
   4961       if (stubs
   4962 	  && !is_ea_sym
   4963 	  && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
   4964 					  contents, info)) != no_stub)
   4965 	{
   4966 	  unsigned int ovl = 0;
   4967 	  struct got_entry *g, **head;
   4968 
   4969 	  if (stub_type != nonovl_stub)
   4970 	    ovl = iovl;
   4971 
   4972 	  if (h != NULL)
   4973 	    head = &h->got.glist;
   4974 	  else
   4975 	    head = elf_local_got_ents (input_bfd) + r_symndx;
   4976 
   4977 	  for (g = *head; g != NULL; g = g->next)
   4978 	    if (htab->params->ovly_flavour == ovly_soft_icache
   4979 		? (g->ovl == ovl
   4980 		   && g->br_addr == (rel->r_offset
   4981 				     + input_section->output_offset
   4982 				     + input_section->output_section->vma))
   4983 		: g->addend == addend && (g->ovl == ovl || g->ovl == 0))
   4984 	      break;
   4985 	  if (g == NULL)
   4986 	    abort ();
   4987 
   4988 	  relocation = g->stub_addr;
   4989 	  addend = 0;
   4990 	}
   4991       else
   4992 	{
   4993 	  /* For soft icache, encode the overlay index into addresses.  */
   4994 	  if (htab->params->ovly_flavour == ovly_soft_icache
   4995 	      && (r_type == R_SPU_ADDR16_HI
   4996 		  || r_type == R_SPU_ADDR32 || r_type == R_SPU_REL32)
   4997 	      && !is_ea_sym)
   4998 	    {
   4999 	      unsigned int ovl = overlay_index (sec);
   5000 	      if (ovl != 0)
   5001 		{
   5002 		  unsigned int set_id = ((ovl - 1) >> htab->num_lines_log2) + 1;
   5003 		  relocation += set_id << 18;
   5004 		}
   5005 	    }
   5006 	}
   5007 
   5008       if (htab->params->emit_fixups && !bfd_link_relocatable (info)
   5009 	  && (input_section->flags & SEC_ALLOC) != 0
   5010 	  && r_type == R_SPU_ADDR32)
   5011 	{
   5012 	  bfd_vma offset;
   5013 	  offset = rel->r_offset + input_section->output_section->vma
   5014 		   + input_section->output_offset;
   5015 	  spu_elf_emit_fixup (output_bfd, info, offset);
   5016 	}
   5017 
   5018       if (unresolved_reloc)
   5019 	;
   5020       else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
   5021 	{
   5022 	  if (is_ea_sym)
   5023 	    {
   5024 	      /* ._ea is a special section that isn't allocated in SPU
   5025 		 memory, but rather occupies space in PPU memory as
   5026 		 part of an embedded ELF image.  If this reloc is
   5027 		 against a symbol defined in ._ea, then transform the
   5028 		 reloc into an equivalent one without a symbol
   5029 		 relative to the start of the ELF image.  */
   5030 	      rel->r_addend += (relocation
   5031 				- ea->vma
   5032 				+ elf_section_data (ea)->this_hdr.sh_offset);
   5033 	      rel->r_info = ELF32_R_INFO (0, r_type);
   5034 	    }
   5035 	  emit_these_relocs = true;
   5036 	  continue;
   5037 	}
   5038       else if (is_ea_sym)
   5039 	unresolved_reloc = true;
   5040 
   5041       if (unresolved_reloc
   5042 	  && _bfd_elf_section_offset (output_bfd, info, input_section,
   5043 				      rel->r_offset) != (bfd_vma) -1)
   5044 	{
   5045 	  _bfd_error_handler
   5046 	    /* xgettext:c-format */
   5047 	    (_("%pB(%s+%#" PRIx64 "): "
   5048 	       "unresolvable %s relocation against symbol `%s'"),
   5049 	     input_bfd,
   5050 	     bfd_section_name (input_section),
   5051 	     (uint64_t) rel->r_offset,
   5052 	     howto->name,
   5053 	     sym_name);
   5054 	  ret = false;
   5055 	}
   5056 
   5057       r = _bfd_final_link_relocate (howto,
   5058 				    input_bfd,
   5059 				    input_section,
   5060 				    contents,
   5061 				    rel->r_offset, relocation, addend);
   5062 
   5063       if (r != bfd_reloc_ok)
   5064 	{
   5065 	  const char *msg = (const char *) 0;
   5066 
   5067 	  switch (r)
   5068 	    {
   5069 	    case bfd_reloc_overflow:
   5070 	      (*info->callbacks->reloc_overflow)
   5071 		(info, (h ? &h->root : NULL), sym_name, howto->name,
   5072 		 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
   5073 	      break;
   5074 
   5075 	    case bfd_reloc_undefined:
   5076 	      (*info->callbacks->undefined_symbol)
   5077 		(info, sym_name, input_bfd, input_section, rel->r_offset, true);
   5078 	      break;
   5079 
   5080 	    case bfd_reloc_outofrange:
   5081 	      msg = _("internal error: out of range error");
   5082 	      goto common_error;
   5083 
   5084 	    case bfd_reloc_notsupported:
   5085 	      msg = _("internal error: unsupported relocation error");
   5086 	      goto common_error;
   5087 
   5088 	    case bfd_reloc_dangerous:
   5089 	      msg = _("internal error: dangerous error");
   5090 	      goto common_error;
   5091 
   5092 	    default:
   5093 	      msg = _("internal error: unknown error");
   5094 	      /* fall through */
   5095 
   5096 	    common_error:
   5097 	      ret = false;
   5098 	      (*info->callbacks->warning) (info, msg, sym_name, input_bfd,
   5099 					   input_section, rel->r_offset);
   5100 	      break;
   5101 	    }
   5102 	}
   5103     }
   5104 
   5105   if (ret
   5106       && emit_these_relocs
   5107       && !info->emitrelocations)
   5108     {
   5109       Elf_Internal_Rela *wrel;
   5110       Elf_Internal_Shdr *rel_hdr;
   5111 
   5112       wrel = rel = relocs;
   5113       relend = relocs + input_section->reloc_count;
   5114       for (; rel < relend; rel++)
   5115 	{
   5116 	  int r_type;
   5117 
   5118 	  r_type = ELF32_R_TYPE (rel->r_info);
   5119 	  if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
   5120 	    *wrel++ = *rel;
   5121 	}
   5122       input_section->reloc_count = wrel - relocs;
   5123       /* Backflips for _bfd_elf_link_output_relocs.  */
   5124       rel_hdr = _bfd_elf_single_rel_hdr (input_section);
   5125       rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
   5126       ret = 2;
   5127     }
   5128 
   5129   return ret;
   5130 }
   5131 
   5132 static bool
   5133 spu_elf_finish_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
   5134 				 struct bfd_link_info *info ATTRIBUTE_UNUSED)
   5135 {
   5136   return true;
   5137 }
   5138 
   5139 /* Adjust _SPUEAR_ syms to point at their overlay stubs.  */
   5140 
   5141 static int
   5142 spu_elf_output_symbol_hook (struct bfd_link_info *info,
   5143 			    const char *sym_name ATTRIBUTE_UNUSED,
   5144 			    Elf_Internal_Sym *sym,
   5145 			    asection *sym_sec ATTRIBUTE_UNUSED,
   5146 			    struct elf_link_hash_entry *h)
   5147 {
   5148   struct spu_link_hash_table *htab = spu_hash_table (info);
   5149 
   5150   if (!bfd_link_relocatable (info)
   5151       && htab->stub_sec != NULL
   5152       && h != NULL
   5153       && (h->root.type == bfd_link_hash_defined
   5154 	  || h->root.type == bfd_link_hash_defweak)
   5155       && h->def_regular
   5156       && startswith (h->root.root.string, "_SPUEAR_"))
   5157     {
   5158       struct got_entry *g;
   5159 
   5160       for (g = h->got.glist; g != NULL; g = g->next)
   5161 	if (htab->params->ovly_flavour == ovly_soft_icache
   5162 	    ? g->br_addr == g->stub_addr
   5163 	    : g->addend == 0 && g->ovl == 0)
   5164 	  {
   5165 	    sym->st_shndx = (_bfd_elf_section_from_bfd_section
   5166 			     (htab->stub_sec[0]->output_section->owner,
   5167 			      htab->stub_sec[0]->output_section));
   5168 	    sym->st_value = g->stub_addr;
   5169 	    break;
   5170 	  }
   5171     }
   5172 
   5173   return 1;
   5174 }
   5175 
   5176 static int spu_plugin = 0;
   5177 
   5178 void
   5179 spu_elf_plugin (int val)
   5180 {
   5181   spu_plugin = val;
   5182 }
   5183 
   5184 /* Set ELF header e_type for plugins.  */
   5185 
   5186 static bool
   5187 spu_elf_init_file_header (bfd *abfd, struct bfd_link_info *info)
   5188 {
   5189   if (!_bfd_elf_init_file_header (abfd, info))
   5190     return false;
   5191 
   5192   if (spu_plugin)
   5193     {
   5194       Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
   5195 
   5196       i_ehdrp->e_type = ET_DYN;
   5197     }
   5198   return true;
   5199 }
   5200 
   5201 /* We may add an extra PT_LOAD segment for .toe.  We also need extra
   5202    segments for overlays.  */
   5203 
   5204 static int
   5205 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
   5206 {
   5207   int extra = 0;
   5208   asection *sec;
   5209 
   5210   if (info != NULL)
   5211     {
   5212       struct spu_link_hash_table *htab = spu_hash_table (info);
   5213       extra = htab->num_overlays;
   5214     }
   5215 
   5216   if (extra)
   5217     ++extra;
   5218 
   5219   sec = bfd_get_section_by_name (abfd, ".toe");
   5220   if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
   5221     ++extra;
   5222 
   5223   return extra;
   5224 }
   5225 
   5226 /* Remove .toe section from other PT_LOAD segments and put it in
   5227    a segment of its own.  Put overlays in separate segments too.  */
   5228 
   5229 static bool
   5230 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
   5231 {
   5232   asection *toe, *s;
   5233   struct elf_segment_map *m, *m_overlay;
   5234   struct elf_segment_map **p, **p_overlay, **first_load;
   5235   unsigned int i;
   5236 
   5237   if (info == NULL)
   5238     return true;
   5239 
   5240   toe = bfd_get_section_by_name (abfd, ".toe");
   5241   for (m = elf_seg_map (abfd); m != NULL; m = m->next)
   5242     if (m->p_type == PT_LOAD && m->count > 1)
   5243       for (i = 0; i < m->count; i++)
   5244 	if ((s = m->sections[i]) == toe
   5245 	    || spu_elf_section_data (s)->u.o.ovl_index != 0)
   5246 	  {
   5247 	    struct elf_segment_map *m2;
   5248 	    bfd_vma amt;
   5249 
   5250 	    if (i + 1 < m->count)
   5251 	      {
   5252 		amt = sizeof (struct elf_segment_map);
   5253 		amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
   5254 		m2 = bfd_zalloc (abfd, amt);
   5255 		if (m2 == NULL)
   5256 		  return false;
   5257 		m2->count = m->count - (i + 1);
   5258 		memcpy (m2->sections, m->sections + i + 1,
   5259 			m2->count * sizeof (m->sections[0]));
   5260 		m2->p_type = PT_LOAD;
   5261 		m2->next = m->next;
   5262 		m->next = m2;
   5263 	      }
   5264 	    m->count = 1;
   5265 	    if (i != 0)
   5266 	      {
   5267 		m->count = i;
   5268 		amt = sizeof (struct elf_segment_map);
   5269 		m2 = bfd_zalloc (abfd, amt);
   5270 		if (m2 == NULL)
   5271 		  return false;
   5272 		m2->p_type = PT_LOAD;
   5273 		m2->count = 1;
   5274 		m2->sections[0] = s;
   5275 		m2->next = m->next;
   5276 		m->next = m2;
   5277 	      }
   5278 	    break;
   5279 	  }
   5280 
   5281 
   5282   /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
   5283      PT_LOAD segments.  This can cause the .ovl.init section to be
   5284      overwritten with the contents of some overlay segment.  To work
   5285      around this issue, we ensure that all PF_OVERLAY segments are
   5286      sorted first amongst the program headers; this ensures that even
   5287      with a broken loader, the .ovl.init section (which is not marked
   5288      as PF_OVERLAY) will be placed into SPU local store on startup.  */
   5289 
   5290   /* Move all overlay segments onto a separate list.  */
   5291   p = &elf_seg_map (abfd);
   5292   p_overlay = &m_overlay;
   5293   m_overlay = NULL;
   5294   first_load = NULL;
   5295   while (*p != NULL)
   5296     {
   5297       if ((*p)->p_type == PT_LOAD)
   5298 	{
   5299 	  if (!first_load)
   5300 	    first_load = p;
   5301 	  if ((*p)->count == 1
   5302 	      && spu_elf_section_data ((*p)->sections[0])->u.o.ovl_index != 0)
   5303 	    {
   5304 	      m = *p;
   5305 	      m->no_sort_lma = 1;
   5306 	      *p = m->next;
   5307 	      *p_overlay = m;
   5308 	      p_overlay = &m->next;
   5309 	      continue;
   5310 	    }
   5311 	}
   5312       p = &((*p)->next);
   5313     }
   5314 
   5315   /* Re-insert overlay segments at the head of the segment map.  */
   5316   if (m_overlay != NULL)
   5317     {
   5318       p = first_load;
   5319       if (*p != NULL && (*p)->p_type == PT_LOAD && (*p)->includes_filehdr)
   5320 	/* It doesn't really make sense for someone to include the ELF
   5321 	   file header into an spu image, but if they do the code that
   5322 	   assigns p_offset needs to see the segment containing the
   5323 	   header first.  */
   5324 	p = &(*p)->next;
   5325       *p_overlay = *p;
   5326       *p = m_overlay;
   5327     }
   5328 
   5329   return true;
   5330 }
   5331 
   5332 /* Tweak the section type of .note.spu_name.  */
   5333 
   5334 static bool
   5335 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
   5336 		       Elf_Internal_Shdr *hdr,
   5337 		       asection *sec)
   5338 {
   5339   if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
   5340     hdr->sh_type = SHT_NOTE;
   5341   return true;
   5342 }
   5343 
   5344 /* Tweak phdrs before writing them out.  */
   5345 
   5346 static bool
   5347 spu_elf_modify_headers (bfd *abfd, struct bfd_link_info *info)
   5348 {
   5349   if (info != NULL)
   5350     {
   5351       const struct elf_backend_data *bed;
   5352       struct elf_obj_tdata *tdata;
   5353       Elf_Internal_Phdr *phdr, *last;
   5354       struct spu_link_hash_table *htab;
   5355       unsigned int count;
   5356       unsigned int i;
   5357 
   5358       bed = get_elf_backend_data (abfd);
   5359       tdata = elf_tdata (abfd);
   5360       phdr = tdata->phdr;
   5361       count = elf_program_header_size (abfd) / bed->s->sizeof_phdr;
   5362       htab = spu_hash_table (info);
   5363       if (htab->num_overlays != 0)
   5364 	{
   5365 	  struct elf_segment_map *m;
   5366 	  unsigned int o;
   5367 
   5368 	  for (i = 0, m = elf_seg_map (abfd); m; ++i, m = m->next)
   5369 	    if (m->count != 0
   5370 		&& ((o = spu_elf_section_data (m->sections[0])->u.o.ovl_index)
   5371 		    != 0))
   5372 	      {
   5373 		/* Mark this as an overlay header.  */
   5374 		phdr[i].p_flags |= PF_OVERLAY;
   5375 
   5376 		if (htab->ovtab != NULL && htab->ovtab->size != 0
   5377 		    && htab->params->ovly_flavour != ovly_soft_icache)
   5378 		  {
   5379 		    bfd_byte *p = htab->ovtab->contents;
   5380 		    unsigned int off = o * 16 + 8;
   5381 
   5382 		    /* Write file_off into _ovly_table.  */
   5383 		    bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
   5384 		  }
   5385 	      }
   5386 	  /* Soft-icache has its file offset put in .ovl.init.  */
   5387 	  if (htab->init != NULL && htab->init->size != 0)
   5388 	    {
   5389 	      bfd_vma val
   5390 		= elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
   5391 
   5392 	      bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
   5393 	    }
   5394 	}
   5395 
   5396       /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
   5397 	 of 16.  This should always be possible when using the standard
   5398 	 linker scripts, but don't create overlapping segments if
   5399 	 someone is playing games with linker scripts.  */
   5400       last = NULL;
   5401       for (i = count; i-- != 0; )
   5402 	if (phdr[i].p_type == PT_LOAD)
   5403 	  {
   5404 	    unsigned adjust;
   5405 
   5406 	    adjust = -phdr[i].p_filesz & 15;
   5407 	    if (adjust != 0
   5408 		&& last != NULL
   5409 		&& (phdr[i].p_offset + phdr[i].p_filesz
   5410 		    > last->p_offset - adjust))
   5411 	      break;
   5412 
   5413 	    adjust = -phdr[i].p_memsz & 15;
   5414 	    if (adjust != 0
   5415 		&& last != NULL
   5416 		&& phdr[i].p_filesz != 0
   5417 		&& phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
   5418 		&& phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
   5419 	      break;
   5420 
   5421 	    if (phdr[i].p_filesz != 0)
   5422 	      last = &phdr[i];
   5423 	  }
   5424 
   5425       if (i == (unsigned int) -1)
   5426 	for (i = count; i-- != 0; )
   5427 	  if (phdr[i].p_type == PT_LOAD)
   5428 	    {
   5429 	      unsigned adjust;
   5430 
   5431 	      adjust = -phdr[i].p_filesz & 15;
   5432 	      phdr[i].p_filesz += adjust;
   5433 
   5434 	      adjust = -phdr[i].p_memsz & 15;
   5435 	      phdr[i].p_memsz += adjust;
   5436 	    }
   5437     }
   5438 
   5439   return _bfd_elf_modify_headers (abfd, info);
   5440 }
   5441 
   5442 bool
   5443 spu_elf_size_sections (bfd *obfd ATTRIBUTE_UNUSED, struct bfd_link_info *info)
   5444 {
   5445   struct spu_link_hash_table *htab = spu_hash_table (info);
   5446   if (htab->params->emit_fixups)
   5447     {
   5448       asection *sfixup = htab->sfixup;
   5449       int fixup_count = 0;
   5450       bfd *ibfd;
   5451       size_t size;
   5452 
   5453       for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
   5454 	{
   5455 	  asection *isec;
   5456 
   5457 	  if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour)
   5458 	    continue;
   5459 
   5460 	  /* Walk over each section attached to the input bfd.  */
   5461 	  for (isec = ibfd->sections; isec != NULL; isec = isec->next)
   5462 	    {
   5463 	      Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
   5464 	      bfd_vma base_end;
   5465 
   5466 	      /* If there aren't any relocs, then there's nothing more
   5467 		 to do.  */
   5468 	      if ((isec->flags & SEC_ALLOC) == 0
   5469 		  || (isec->flags & SEC_RELOC) == 0
   5470 		  || isec->reloc_count == 0)
   5471 		continue;
   5472 
   5473 	      /* Get the relocs.  */
   5474 	      internal_relocs =
   5475 		_bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
   5476 					   info->keep_memory);
   5477 	      if (internal_relocs == NULL)
   5478 		return false;
   5479 
   5480 	      /* 1 quadword can contain up to 4 R_SPU_ADDR32
   5481 		 relocations.  They are stored in a single word by
   5482 		 saving the upper 28 bits of the address and setting the
   5483 		 lower 4 bits to a bit mask of the words that have the
   5484 		 relocation.  BASE_END keeps track of the next quadword. */
   5485 	      irela = internal_relocs;
   5486 	      irelaend = irela + isec->reloc_count;
   5487 	      base_end = 0;
   5488 	      for (; irela < irelaend; irela++)
   5489 		if (ELF32_R_TYPE (irela->r_info) == R_SPU_ADDR32
   5490 		    && irela->r_offset >= base_end)
   5491 		  {
   5492 		    base_end = (irela->r_offset & ~(bfd_vma) 15) + 16;
   5493 		    fixup_count++;
   5494 		  }
   5495 	    }
   5496 	}
   5497 
   5498       /* We always have a NULL fixup as a sentinel */
   5499       size = (fixup_count + 1) * FIXUP_RECORD_SIZE;
   5500       if (!bfd_set_section_size (sfixup, size))
   5501 	return false;
   5502       sfixup->contents = (bfd_byte *) bfd_zalloc (info->input_bfds, size);
   5503       if (sfixup->contents == NULL)
   5504 	return false;
   5505     }
   5506   return true;
   5507 }
   5508 
   5509 #define TARGET_BIG_SYM		spu_elf32_vec
   5510 #define TARGET_BIG_NAME		"elf32-spu"
   5511 #define ELF_ARCH		bfd_arch_spu
   5512 #define ELF_TARGET_ID		SPU_ELF_DATA
   5513 #define ELF_MACHINE_CODE	EM_SPU
   5514 /* This matches the alignment need for DMA.  */
   5515 #define ELF_MAXPAGESIZE		0x80
   5516 #define elf_backend_rela_normal		1
   5517 #define elf_backend_can_gc_sections	1
   5518 
   5519 #define bfd_elf32_bfd_reloc_type_lookup		spu_elf_reloc_type_lookup
   5520 #define bfd_elf32_bfd_reloc_name_lookup		spu_elf_reloc_name_lookup
   5521 #define elf_info_to_howto			spu_elf_info_to_howto
   5522 #define elf_backend_count_relocs		spu_elf_count_relocs
   5523 #define elf_backend_relocate_section		spu_elf_relocate_section
   5524 #define elf_backend_finish_dynamic_sections	spu_elf_finish_dynamic_sections
   5525 #define elf_backend_symbol_processing		spu_elf_backend_symbol_processing
   5526 #define elf_backend_link_output_symbol_hook	spu_elf_output_symbol_hook
   5527 #define elf_backend_object_p			spu_elf_object_p
   5528 #define bfd_elf32_new_section_hook		spu_elf_new_section_hook
   5529 #define bfd_elf32_bfd_link_hash_table_create	spu_elf_link_hash_table_create
   5530 
   5531 #define elf_backend_additional_program_headers	spu_elf_additional_program_headers
   5532 #define elf_backend_modify_segment_map		spu_elf_modify_segment_map
   5533 #define elf_backend_modify_headers		spu_elf_modify_headers
   5534 #define elf_backend_init_file_header		spu_elf_init_file_header
   5535 #define elf_backend_fake_sections		spu_elf_fake_sections
   5536 #define elf_backend_special_sections		spu_elf_special_sections
   5537 #define bfd_elf32_bfd_final_link		spu_elf_final_link
   5538 
   5539 #include "elf32-target.h"
   5540